summaryrefslogtreecommitdiffstats
path: root/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'scripts')
-rwxr-xr-xscripts/autobuilder-worker-prereq-tests7
-rwxr-xr-xscripts/bitbake-prserv-tool3
-rwxr-xr-xscripts/bitbake-whatchanged14
-rwxr-xr-xscripts/buildall-qemu120
-rwxr-xr-xscripts/buildhistory-collect-srcrevs19
-rwxr-xr-xscripts/buildhistory-diff17
-rwxr-xr-xscripts/buildstats-diff24
-rwxr-xr-xscripts/combo-layer16
-rwxr-xr-xscripts/combo-layer-hook-default.sh3
-rwxr-xr-xscripts/contrib/bb-perf/bb-matrix-plot.sh15
-rwxr-xr-xscripts/contrib/bb-perf/bb-matrix.sh15
-rwxr-xr-xscripts/contrib/bb-perf/buildstats-plot.sh93
-rwxr-xr-xscripts/contrib/bb-perf/buildstats.sh154
-rwxr-xr-xscripts/contrib/bbvars.py14
-rwxr-xr-xscripts/contrib/build-perf-test-wrapper.sh25
-rwxr-xr-xscripts/contrib/convert-overrides.py144
-rwxr-xr-xscripts/contrib/convert-spdx-licenses.py145
-rwxr-xr-xscripts/contrib/convert-srcuri.py77
-rwxr-xr-xscripts/contrib/convert-variable-renames.py116
-rwxr-xr-xscripts/contrib/ddimage92
-rwxr-xr-xscripts/contrib/devtool-stress.py13
-rwxr-xr-xscripts/contrib/dialog-power-control2
-rwxr-xr-xscripts/contrib/documentation-audit.sh7
-rwxr-xr-xscripts/contrib/graph-tool113
-rwxr-xr-xscripts/contrib/image-manifest523
-rwxr-xr-xscripts/contrib/list-packageconfig-flags.py19
-rwxr-xr-xscripts/contrib/oe-build-perf-report-email.py164
-rwxr-xr-xscripts/contrib/patchreview.py3
-rwxr-xr-xscripts/contrib/patchtest.sh18
-rwxr-xr-xscripts/contrib/serdevtry3
-rwxr-xr-xscripts/contrib/test_build_time.sh18
-rwxr-xr-xscripts/contrib/test_build_time_worker.sh4
-rwxr-xr-xscripts/contrib/uncovered15
-rwxr-xr-xscripts/contrib/verify-homepage.py4
-rwxr-xr-xscripts/cp-noerror2
-rwxr-xr-xscripts/create-pull-request37
l---------scripts/cross-intercept/ar1
-rwxr-xr-xscripts/crosstap21
-rwxr-xr-xscripts/devtool14
-rwxr-xr-xscripts/gen-lockedsig-cache59
-rwxr-xr-xscripts/gen-site-config12
-rwxr-xr-xscripts/install-buildtools345
-rw-r--r--scripts/lib/argparse_oe.py4
-rw-r--r--scripts/lib/build_perf/__init__.py9
-rw-r--r--scripts/lib/build_perf/html.py9
-rw-r--r--scripts/lib/build_perf/report.py12
-rw-r--r--scripts/lib/buildstats.py17
-rw-r--r--scripts/lib/checklayer/__init__.py99
-rw-r--r--scripts/lib/checklayer/case.py4
-rw-r--r--scripts/lib/checklayer/cases/bsp.py6
-rw-r--r--scripts/lib/checklayer/cases/common.py34
-rw-r--r--scripts/lib/checklayer/cases/distro.py4
-rw-r--r--scripts/lib/checklayer/context.py4
-rw-r--r--scripts/lib/devtool/__init__.py22
-rw-r--r--scripts/lib/devtool/build.py25
-rw-r--r--scripts/lib/devtool/build_image.py14
-rw-r--r--scripts/lib/devtool/build_sdk.py12
-rw-r--r--scripts/lib/devtool/deploy.py52
-rw-r--r--scripts/lib/devtool/export.py12
-rw-r--r--scripts/lib/devtool/import.py12
-rw-r--r--scripts/lib/devtool/menuconfig.py79
-rw-r--r--scripts/lib/devtool/package.py12
-rw-r--r--scripts/lib/devtool/runqemu.py12
-rw-r--r--scripts/lib/devtool/sdk.py14
-rw-r--r--scripts/lib/devtool/search.py17
-rw-r--r--scripts/lib/devtool/standard.py326
-rw-r--r--scripts/lib/devtool/upgrade.py126
-rw-r--r--scripts/lib/devtool/utilcmds.py12
-rw-r--r--scripts/lib/recipetool/append.py24
-rw-r--r--scripts/lib/recipetool/create.py297
-rw-r--r--scripts/lib/recipetool/create_buildsys.py32
-rw-r--r--scripts/lib/recipetool/create_buildsys_python.py105
-rw-r--r--scripts/lib/recipetool/create_kernel.py12
-rw-r--r--scripts/lib/recipetool/create_kmod.py14
-rw-r--r--scripts/lib/recipetool/create_npm.py571
-rw-r--r--scripts/lib/recipetool/edit.py14
-rw-r--r--scripts/lib/recipetool/licenses.csv37
-rw-r--r--scripts/lib/recipetool/newappend.py12
-rw-r--r--scripts/lib/recipetool/setvar.py12
-rw-r--r--scripts/lib/resulttool/__init__.py0
-rw-r--r--scripts/lib/resulttool/log.py104
-rwxr-xr-xscripts/lib/resulttool/manualexecution.py235
-rw-r--r--scripts/lib/resulttool/merge.py46
-rw-r--r--scripts/lib/resulttool/regression.py186
-rw-r--r--scripts/lib/resulttool/report.py312
-rw-r--r--scripts/lib/resulttool/resultutils.py224
-rw-r--r--scripts/lib/resulttool/store.py104
-rw-r--r--scripts/lib/resulttool/template/test_report_full_text.txt79
-rw-r--r--scripts/lib/scriptpath.py12
-rw-r--r--scripts/lib/scriptutils.py90
-rw-r--r--scripts/lib/wic/__init__.py14
-rw-r--r--scripts/lib/wic/canned-wks/qemuriscv.wks3
-rw-r--r--scripts/lib/wic/canned-wks/qemux86-directdisk.wks2
-rw-r--r--scripts/lib/wic/engine.py151
-rw-r--r--scripts/lib/wic/filemap.py93
-rw-r--r--scripts/lib/wic/help.py143
-rw-r--r--scripts/lib/wic/ksparser.py107
-rw-r--r--scripts/lib/wic/misc.py41
-rw-r--r--scripts/lib/wic/partition.py175
-rw-r--r--scripts/lib/wic/pluginbase.py33
-rw-r--r--scripts/lib/wic/plugins/imager/direct.py166
-rw-r--r--scripts/lib/wic/plugins/source/bootimg-biosplusefi.py213
-rw-r--r--scripts/lib/wic/plugins/source/bootimg-efi.py218
-rw-r--r--scripts/lib/wic/plugins/source/bootimg-partition.py17
-rw-r--r--scripts/lib/wic/plugins/source/bootimg-pcbios.py32
-rw-r--r--scripts/lib/wic/plugins/source/empty.py32
-rw-r--r--scripts/lib/wic/plugins/source/isoimage-isohybrid.py69
-rw-r--r--scripts/lib/wic/plugins/source/rawcopy.py55
-rw-r--r--scripts/lib/wic/plugins/source/rootfs.py181
-rwxr-xr-xscripts/lnr21
-rw-r--r--scripts/multilib_header_wrapper.h39
-rwxr-xr-xscripts/native-intercept/ar32
-rwxr-xr-xscripts/native-intercept/chgrp5
-rwxr-xr-xscripts/native-intercept/chown3
-rwxr-xr-xscripts/nativesdk-intercept/chgrp27
-rwxr-xr-xscripts/nativesdk-intercept/chown27
-rwxr-xr-xscripts/oe-build-perf-report132
-rwxr-xr-xscripts/oe-build-perf-test13
-rwxr-xr-xscripts/oe-buildenv-internal41
-rwxr-xr-xscripts/oe-check-sstate16
-rwxr-xr-xscripts/oe-debuginfod26
-rwxr-xr-xscripts/oe-depends-dot12
-rwxr-xr-xscripts/oe-find-native-sysroot12
-rwxr-xr-xscripts/oe-git-archive176
-rwxr-xr-xscripts/oe-git-proxy24
-rwxr-xr-xscripts/oe-gnome-terminal-phonehome2
-rwxr-xr-xscripts/oe-pkgdata-browser255
-rw-r--r--scripts/oe-pkgdata-browser.glade337
-rwxr-xr-xscripts/oe-pkgdata-util80
-rwxr-xr-xscripts/oe-publish-sdk26
-rwxr-xr-xscripts/oe-pylint13
-rwxr-xr-xscripts/oe-run-native18
-rwxr-xr-xscripts/oe-selftest14
-rwxr-xr-xscripts/oe-setup-builddir25
-rwxr-xr-xscripts/oe-test4
-rwxr-xr-xscripts/oe-time-dd-test.sh102
-rwxr-xr-xscripts/oe-trim-schemas3
-rwxr-xr-xscripts/oepydevshell-internal.py5
-rwxr-xr-xscripts/opkg-query-helper.py15
-rw-r--r--scripts/postinst-intercepts/delay_to_first_boot4
-rwxr-xr-xscripts/postinst-intercepts/postinst_intercept2
-rw-r--r--scripts/postinst-intercepts/update_desktop_database8
-rw-r--r--scripts/postinst-intercepts/update_font_cache7
-rw-r--r--scripts/postinst-intercepts/update_gio_module_cache3
-rw-r--r--scripts/postinst-intercepts/update_gtk_icon_cache (renamed from scripts/postinst-intercepts/update_icon_cache)6
-rw-r--r--scripts/postinst-intercepts/update_gtk_immodules_cache3
-rw-r--r--scripts/postinst-intercepts/update_mime_database9
-rw-r--r--scripts/postinst-intercepts/update_pixbuf_cache3
-rw-r--r--scripts/postinst-intercepts/update_udev_hwdb20
-rwxr-xr-xscripts/pybootchartgui/pybootchartgui.py2
-rw-r--r--scripts/pybootchartgui/pybootchartgui/draw.py1388
-rw-r--r--scripts/pybootchartgui/pybootchartgui/gui.py208
-rw-r--r--scripts/pybootchartgui/pybootchartgui/parsing.py18
-rwxr-xr-xscripts/pythondeps2
-rwxr-xr-xscripts/recipetool12
-rwxr-xr-xscripts/relocate_sdk.py13
-rwxr-xr-xscripts/resulttool78
-rwxr-xr-xscripts/rpm2cpio.sh4
-rwxr-xr-xscripts/runqemu744
-rwxr-xr-xscripts/runqemu-addptable2image14
-rwxr-xr-xscripts/runqemu-export-rootfs12
-rwxr-xr-xscripts/runqemu-extract-sdk14
-rwxr-xr-xscripts/runqemu-gen-tapdevs12
-rwxr-xr-xscripts/runqemu-ifdown13
-rwxr-xr-xscripts/runqemu-ifup12
-rwxr-xr-xscripts/send-error-report14
-rwxr-xr-xscripts/send-pull-request15
-rwxr-xr-xscripts/sstate-cache-management.sh53
-rwxr-xr-xscripts/sstate-diff-machines.sh8
-rwxr-xr-xscripts/sstate-sysroot-cruft.sh16
-rwxr-xr-xscripts/sysroot-relativelinks.py4
-rwxr-xr-xscripts/task-time3
-rwxr-xr-xscripts/test-reexec15
-rwxr-xr-xscripts/test-remote-image14
-rwxr-xr-xscripts/tiny/dirsize.py16
-rwxr-xr-xscripts/tiny/ksize.py28
-rwxr-xr-xscripts/tiny/ksum.py60
-rwxr-xr-xscripts/verify-bashisms15
-rwxr-xr-xscripts/wic77
-rwxr-xr-xscripts/yocto-check-layer68
-rwxr-xr-xscripts/yocto-check-layer-wrapper8
181 files changed, 8309 insertions, 3947 deletions
diff --git a/scripts/autobuilder-worker-prereq-tests b/scripts/autobuilder-worker-prereq-tests
index 358dd2beee..82e9a77bd5 100755
--- a/scripts/autobuilder-worker-prereq-tests
+++ b/scripts/autobuilder-worker-prereq-tests
@@ -3,7 +3,7 @@
# Script which can be run on new autobuilder workers to check all needed configuration is present.
# Designed to be run in a repo where bitbake/oe-core are already present.
#
-
+# SPDX-License-Identifier: GPL-2.0-only
#
# Todo
# Add testtools/subunit import test
@@ -35,6 +35,11 @@ if [ "$?" != "0" ]; then
echo "Please set git config --global user.email"
exit 1
fi
+python3 -c "import jinja2"
+if [ "$?" != "0" ]; then
+ echo "Please ensure jinja2 is available"
+ exit 1
+fi
bitbake -p
if [ "$?" != "0" ]; then
echo "Bitbake parsing failed"
diff --git a/scripts/bitbake-prserv-tool b/scripts/bitbake-prserv-tool
index fa31b52584..e55d98c72e 100755
--- a/scripts/bitbake-prserv-tool
+++ b/scripts/bitbake-prserv-tool
@@ -1,4 +1,7 @@
#!/usr/bin/env bash
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
help ()
{
diff --git a/scripts/bitbake-whatchanged b/scripts/bitbake-whatchanged
index 0207777e63..6f4b268119 100755
--- a/scripts/bitbake-whatchanged
+++ b/scripts/bitbake-whatchanged
@@ -4,18 +4,8 @@
# Copyright (c) 2013 Wind River Systems, Inc.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
import sys
@@ -227,7 +217,7 @@ print what will be done between the current and last builds, for example:
# Edit the recipes
$ bitbake-whatchanged core-image-sato
-The changes will be printed"
+The changes will be printed.
Note:
The amount of tasks is not accurate when the task is "do_build" since
diff --git a/scripts/buildall-qemu b/scripts/buildall-qemu
new file mode 100755
index 0000000000..ca9aafadf7
--- /dev/null
+++ b/scripts/buildall-qemu
@@ -0,0 +1,120 @@
+#!/bin/sh
+# Copyright (c) 2020 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# buildall-qemu: a tool for automating build testing of recipes
+# TODO: Add support for selecting which qemu architectures to build
+# TODO: Add support for queueing up multiple recipe builds
+# TODO: Add more logging options (e.g. local.conf info, bitbake env info)
+
+usage ()
+{
+ base=$(basename "$0")
+ echo "Usage: $base [options] [recipename/target]"
+ echo "Executes a build of a given target for selected LIBCs. With no options, default to both libc and musl."
+ echo "Options:"
+ echo "-l, --libc Specify one of \"glibc\" or \"musl\""
+}
+
+
+buildall ()
+{
+ # Get path to oe-core directory. Since oe-init-build-env prepends $PATH with
+ # the path to the scripts directory, get it from there
+ SCRIPTS_PATH="$(echo "$PATH" | cut -d ":" -f 1)"
+ OE_CORE_PATH=$(echo "$SCRIPTS_PATH" | sed 's|\(.*\)/.*|\1|')
+
+ # Get target list and host machine information
+ TARGET_LIST=$(find "$OE_CORE_PATH"/meta/conf/machine -maxdepth 1 -type f | grep qemu | sed 's|.*/||' | sed -e 's/\.conf//')
+
+ # Set LIBC value to use for the builds based on options provided by the user
+ if [ -n "$2" ]
+ then
+ LIBC_LIST="$2"
+ echo "$LIBC_LIST"
+ else
+ LIBC_LIST="glibc musl"
+ echo "$LIBC_LIST"
+ fi
+
+ START_TIME=$(date "+%Y-%m-%d_%H:%M:%S")
+ LOG_FILE="$1-buildall.log"
+ OS_INFO=$(grep "PRETTY_NAME=" /etc/os-release | awk -F "=" '{print $2}' | sed -e 's/^"//' -e 's/"$//')
+
+ # Append an existing log file for this build with .old if one exists
+ if [ -f "${LOG_FILE}" ]
+ then
+ mv "${LOG_FILE}" "${LOG_FILE}.old"
+ else
+ touch "${LOG_FILE}"
+ fi
+
+ # Fill the log file with build and host info
+ echo "BUILDALL-QEMU LOG FOR $1" >> "${LOG_FILE}"
+ echo "START TIME: ${START_TIME}" >> "${LOG_FILE}"
+ echo "HOSTNAME: $(uname -n)" >> "${LOG_FILE}"
+ echo "HOST OS: ${OS_INFO}" >> "${LOG_FILE}"
+ echo "HOST KERNEL: $(uname -r)" >> "${LOG_FILE}"
+ echo "===============" >> "${LOG_FILE}"
+ echo "BUILD RESULTS:" >> "${LOG_FILE}"
+
+ # start the builds for each MACHINE and TCLIBC
+ for j in ${LIBC_LIST}
+ do
+ echo "[$j]" >> "${LOG_FILE}"
+ for i in ${TARGET_LIST}
+ do
+ echo "$i" "$j"; \
+ TCLIBC=$j MACHINE=$i bitbake "$1" && echo "PASS: $i" >> "${LOG_FILE}" || echo "FAIL: $i" >> "${LOG_FILE}"
+ done
+ done
+
+ # Get pass/fail totals and add them to the end of the log
+ PASSED=$(grep "PASS:" "${LOG_FILE}" | wc -l)
+ FAILED=$(grep "FAIL:" "${LOG_FILE}" | wc -l)
+
+ echo "===============" >> "${LOG_FILE}"
+ echo "PASSED: ${PASSED}" >> "${LOG_FILE}"
+ echo "FAILED: ${FAILED}" >> "${LOG_FILE}"
+}
+
+
+# fail entire script if any command fails
+set -e
+
+# print usage and exit if not enough args given
+[ $# -eq 0 ] && usage && exit 1
+
+# handle arguments
+RECIPE=
+while [ $# -gt 0 ]
+do
+ arg=$1
+ case $arg in
+ -l|--libc)
+ if [ "$2" = "glibc" ] || [ "$2" = "musl" ]
+ then
+ LIBC_LIST="$2"
+ else
+ echo "Unrecognized libc option."
+ usage && exit 1
+ fi
+ shift
+ shift
+ ;;
+ *)
+ RECIPE="$1"
+ shift
+ ;;
+ esac
+done
+
+set -- "$RECIPE"
+
+# run buildall for the given recipe and LIBC
+if [ -n "$1" ]
+then
+ buildall "$1" "$LIBC_LIST"
+fi
+
diff --git a/scripts/buildhistory-collect-srcrevs b/scripts/buildhistory-collect-srcrevs
index d375b045d8..c937e49c2a 100755
--- a/scripts/buildhistory-collect-srcrevs
+++ b/scripts/buildhistory-collect-srcrevs
@@ -5,18 +5,8 @@
# Copyright 2013 Intel Corporation
# Authored-by: Paul Eggleton <paul.eggleton@intel.com>
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import collections
import os
@@ -63,12 +53,13 @@ def main():
sys.exit(1)
if options.forcevariable:
- forcevariable = '_forcevariable'
+ forcevariable = ':forcevariable'
else:
forcevariable = ''
all_srcrevs = collections.defaultdict(list)
for root, dirs, files in os.walk(options.buildhistory_dir):
+ dirs.sort()
if '.git' in dirs:
dirs.remove('.git')
for fn in files:
@@ -108,9 +99,9 @@ def main():
print('# %s' % curdir)
for pn, name, srcrev in srcrevs:
if name:
- print('SRCREV_%s_pn-%s%s = "%s"' % (name, pn, forcevariable, srcrev))
+ print('SRCREV_%s:pn-%s%s = "%s"' % (name, pn, forcevariable, srcrev))
else:
- print('SRCREV_pn-%s%s = "%s"' % (pn, forcevariable, srcrev))
+ print('SRCREV:pn-%s%s = "%s"' % (pn, forcevariable, srcrev))
if __name__ == "__main__":
diff --git a/scripts/buildhistory-diff b/scripts/buildhistory-diff
index 70805b0678..a6e785aa23 100755
--- a/scripts/buildhistory-diff
+++ b/scripts/buildhistory-diff
@@ -4,11 +4,13 @@
#
# Copyright (C) 2013 Intel Corporation
# Author: Paul Eggleton <paul.eggleton@linux.intel.com>
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
import sys
import os
import argparse
-from distutils.version import LooseVersion
# Ensure PythonGit is installed (buildhistory_analysis needs it)
try:
@@ -25,10 +27,12 @@ def get_args_parser():
%(prog)s [options] [from-revision [to-revision]]
(if not specified, from-revision defaults to build-minus-1, and to-revision defaults to HEAD)""")
+ default_dir = os.path.join(os.environ.get('BUILDDIR', '.'), 'buildhistory')
+
parser.add_argument('-p', '--buildhistory-dir',
action='store',
dest='buildhistory_dir',
- default='buildhistory/',
+ default=default_dir,
help="Specify path to buildhistory directory (defaults to buildhistory/ under cwd)")
parser.add_argument('-v', '--report-version',
action='store_true',
@@ -68,20 +72,11 @@ def main():
parser = get_args_parser()
args = parser.parse_args()
- if LooseVersion(git.__version__) < '0.3.1':
- sys.stderr.write("Version of GitPython is too old, please install GitPython (python-git) 0.3.1 or later in order to use this script\n")
- sys.exit(1)
-
if len(args.revisions) > 2:
sys.stderr.write('Invalid argument(s) specified: %s\n\n' % ' '.join(args.revisions[2:]))
parser.print_help()
sys.exit(1)
- if not os.path.exists(args.buildhistory_dir):
- if args.buildhistory_dir == 'buildhistory/':
- cwd = os.getcwd()
- if os.path.basename(cwd) == 'buildhistory':
- args.buildhistory_dir = cwd
if not os.path.exists(args.buildhistory_dir):
sys.stderr.write('Buildhistory directory "%s" does not exist\n\n' % args.buildhistory_dir)
diff --git a/scripts/buildstats-diff b/scripts/buildstats-diff
index a128dd324f..2f6498ab67 100755
--- a/scripts/buildstats-diff
+++ b/scripts/buildstats-diff
@@ -4,15 +4,9 @@
#
# Copyright (c) 2016, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
+
import argparse
import glob
import logging
@@ -120,7 +114,7 @@ def print_ver_diff(bs1, bs2):
print(fmt_str.format(name, field1, field2, maxlen=maxlen))
-def print_task_diff(bs1, bs2, val_type, min_val=0, min_absdiff=0, sort_by=('absdiff',)):
+def print_task_diff(bs1, bs2, val_type, min_val=0, min_absdiff=0, sort_by=('absdiff',), only_tasks=[]):
"""Diff task execution times"""
def val_to_str(val, human_readable=False):
"""Convert raw value to printable string"""
@@ -157,8 +151,9 @@ def print_task_diff(bs1, bs2, val_type, min_val=0, min_absdiff=0, sort_by=('absd
"""Get cumulative sum of all tasks"""
total = 0.0
for recipe_data in buildstats.values():
- for bs_task in recipe_data.tasks.values():
- total += getattr(bs_task, val_type)
+ for name, bs_task in recipe_data.tasks.items():
+ if not only_tasks or name in only_tasks:
+ total += getattr(bs_task, val_type)
return total
if min_val:
@@ -169,7 +164,7 @@ def print_task_diff(bs1, bs2, val_type, min_val=0, min_absdiff=0, sort_by=('absd
val_to_str(min_absdiff, True), val_to_str(min_absdiff)))
# Prepare the data
- tasks_diff = diff_buildstats(bs1, bs2, val_type, min_val, min_absdiff)
+ tasks_diff = diff_buildstats(bs1, bs2, val_type, min_val, min_absdiff, only_tasks)
# Sort our list
for field in reversed(sort_by):
@@ -254,6 +249,8 @@ Script for comparing buildstats of two separate builds."""
parser.add_argument('--multi', action='store_true',
help="Read all buildstats from the given paths and "
"average over them")
+ parser.add_argument('--only-task', dest='only_tasks', metavar='TASK', action='append', default=[],
+ help="Only include TASK in report. May be specified multiple times")
parser.add_argument('buildstats1', metavar='BUILDSTATS1', help="'Left' buildstat")
parser.add_argument('buildstats2', metavar='BUILDSTATS2', help="'Right' buildstat")
@@ -272,7 +269,6 @@ Script for comparing buildstats of two separate builds."""
return args
-
def main(argv=None):
"""Script entry point"""
args = parse_args(argv)
@@ -296,7 +292,7 @@ def main(argv=None):
print_ver_diff(bs1, bs2)
else:
print_task_diff(bs1, bs2, args.diff_attr, args.min_val,
- args.min_absdiff, sort_by)
+ args.min_absdiff, sort_by, args.only_tasks)
except ScriptError as err:
log.error(str(err))
return 1
diff --git a/scripts/combo-layer b/scripts/combo-layer
index dc40e72404..045de65642 100755
--- a/scripts/combo-layer
+++ b/scripts/combo-layer
@@ -7,18 +7,8 @@
# Paul Eggleton <paul.eggleton@intel.com>
# Richard Purdie <richard.purdie@intel.com>
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import fnmatch
import os, sys
@@ -90,7 +80,7 @@ class Configuration(object):
logger.debug("Loading config file %s" % self.conffile)
self.parser = configparser.ConfigParser()
with open(self.conffile) as f:
- self.parser.readfp(f)
+ self.parser.read_file(f)
# initialize default values
self.commit_msg_template = "Automatic commit to update last_revision"
@@ -518,7 +508,7 @@ def check_patch(patchfile):
f.close()
if of:
of.close()
- os.rename(patchfile + '.tmp', patchfile)
+ bb.utils.rename(patchfile + '.tmp', patchfile)
def drop_to_shell(workdir=None):
if not sys.stdin.isatty():
diff --git a/scripts/combo-layer-hook-default.sh b/scripts/combo-layer-hook-default.sh
index 1e3a3b9bc8..11547a9826 100755
--- a/scripts/combo-layer-hook-default.sh
+++ b/scripts/combo-layer-hook-default.sh
@@ -1,4 +1,7 @@
#!/bin/sh
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Hook to add source component/revision info to commit message
# Parameter:
# $1 patch-file
diff --git a/scripts/contrib/bb-perf/bb-matrix-plot.sh b/scripts/contrib/bb-perf/bb-matrix-plot.sh
index 136a25570d..e7bd129e9e 100755
--- a/scripts/contrib/bb-perf/bb-matrix-plot.sh
+++ b/scripts/contrib/bb-perf/bb-matrix-plot.sh
@@ -1,21 +1,8 @@
#!/bin/bash
#
# Copyright (c) 2011, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# SPDX-License-Identifier: GPL-2.0-or-later
#
# DESCRIPTION
# This script operates on the .dat file generated by bb-matrix.sh. It tolerates
diff --git a/scripts/contrib/bb-perf/bb-matrix.sh b/scripts/contrib/bb-perf/bb-matrix.sh
index 106456584d..b1fff0f344 100755
--- a/scripts/contrib/bb-perf/bb-matrix.sh
+++ b/scripts/contrib/bb-perf/bb-matrix.sh
@@ -1,21 +1,8 @@
#!/bin/bash
#
# Copyright (c) 2011, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# SPDX-License-Identifier: GPL-2.0-or-later
#
# DESCRIPTION
# This script runs BB_CMD (typically building core-image-sato) for all
diff --git a/scripts/contrib/bb-perf/buildstats-plot.sh b/scripts/contrib/bb-perf/buildstats-plot.sh
index 7e8ae0410e..45c27d0b97 100755
--- a/scripts/contrib/bb-perf/buildstats-plot.sh
+++ b/scripts/contrib/bb-perf/buildstats-plot.sh
@@ -1,21 +1,8 @@
#!/usr/bin/env bash
#
# Copyright (c) 2011, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# SPDX-License-Identifier: GPL-2.0-or-later
#
# DESCRIPTION
#
@@ -52,7 +39,10 @@ set -o errexit
BS_DIR="tmp/buildstats"
N=10
+RECIPE=""
+TASKS="compile:configure:fetch:install:patch:populate_lic:populate_sysroot:unpack"
STATS="utime"
+ACCUMULATE=""
SUM=""
OUTDATA_FILE="$PWD/buildstats-plot.out"
@@ -64,11 +54,15 @@ Usage: $CMD [-b buildstats_dir] [-t do_task]
(default: "$BS_DIR")
-n N Top N recipes to display. Ignored if -S is present
(default: "$N")
+ -r recipe The recipe mask to be searched
+ -t tasks The tasks to be computed
+ (default: "$TASKS")
-s stats The stats to be matched. If more that one stat, units
should be the same because data is plot as histogram.
(see buildstats.sh -h for all options) or any other defined
(build)stat separated by colons, i.e. stime:utime
(default: "$STATS")
+ -a Accumulate all stats values for found recipes
-S Sum values for a particular stat for found recipes
-o Output data file.
(default: "$OUTDATA_FILE")
@@ -77,32 +71,41 @@ EOM
}
# Parse and validate arguments
-while getopts "b:n:s:o:Sh" OPT; do
- case $OPT in
- b)
- BS_DIR="$OPTARG"
- ;;
- n)
- N="$OPTARG"
- ;;
- s)
- STATS="$OPTARG"
- ;;
- S)
- SUM="y"
- ;;
- o)
- OUTDATA_FILE="$OPTARG"
- ;;
- h)
- usage
- exit 0
- ;;
- *)
- usage
- exit 1
- ;;
- esac
+while getopts "b:n:r:t:s:o:aSh" OPT; do
+ case $OPT in
+ b)
+ BS_DIR="$OPTARG"
+ ;;
+ n)
+ N="$OPTARG"
+ ;;
+ r)
+ RECIPE="-r $OPTARG"
+ ;;
+ t)
+ TASKS="$OPTARG"
+ ;;
+ s)
+ STATS="$OPTARG"
+ ;;
+ a)
+ ACCUMULATE="-a"
+ ;;
+ S)
+ SUM="y"
+ ;;
+ o)
+ OUTDATA_FILE="$OPTARG"
+ ;;
+ h)
+ usage
+ exit 0
+ ;;
+ *)
+ usage
+ exit 1
+ ;;
+ esac
done
# Get number of stats
@@ -114,10 +117,10 @@ CD=$(dirname $0)
# Parse buildstats recipes to produce a single table
OUTBUILDSTATS="$PWD/buildstats.log"
-$CD/buildstats.sh -H -s "$STATS" -H > $OUTBUILDSTATS
+$CD/buildstats.sh -b "$BS_DIR" -s "$STATS" -t "$TASKS" $RECIPE $ACCUMULATE -H > $OUTBUILDSTATS
# Get headers
-HEADERS=$(cat $OUTBUILDSTATS | sed -n -e '1s/ /-/g' -e '1s/:/ /gp')
+HEADERS=$(cat $OUTBUILDSTATS | sed -n -e 's/\(.*\)/"\1"/' -e '1s/ /\\\\\\\\ /g' -e 's/_/\\\\\\\\_/g' -e '1s/:/" "/gp')
echo -e "set boxwidth 0.9 relative"
echo -e "set style data histograms"
@@ -126,7 +129,7 @@ echo -e "set xtics rotate by 45 right"
# Get output data
if [ -z "$SUM" ]; then
- cat $OUTBUILDSTATS | sed -e '1d' | sort -k3 -n -r | head -$N > $OUTDATA_FILE
+ cat $OUTBUILDSTATS | sed -e '1d' -e 's/_/\\\\_/g' | sort -k3 -n -r | head -$N > $OUTDATA_FILE
# include task at recipe column
sed -i -e "1i\
${HEADERS}" $OUTDATA_FILE
@@ -138,8 +141,8 @@ else
declare -a sumargs
j=0
for i in `seq $nstats`; do
- sumargs[j]=sum; j=$(( $j + 1 ))
- sumargs[j]=`expr 3 + $i - 1`; j=$(( $j + 1 ))
+ sumargs[j]=sum; j=$(( $j + 1 ))
+ sumargs[j]=`expr 3 + $i - 1`; j=$(( $j + 1 ))
done
# Do the processing with datamash
diff --git a/scripts/contrib/bb-perf/buildstats.sh b/scripts/contrib/bb-perf/buildstats.sh
index 8d7e2488f0..e45cfc146d 100755
--- a/scripts/contrib/bb-perf/buildstats.sh
+++ b/scripts/contrib/bb-perf/buildstats.sh
@@ -1,21 +1,8 @@
#!/bin/bash
#
# Copyright (c) 2011, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# SPDX-License-Identifier: GPL-2.0-or-later
#
# DESCRIPTION
# Given 'buildstats' data (generate by bitbake when setting
@@ -49,8 +36,10 @@ Child rusage ru_majflt:Child rusage ru_inblock:Child rusage ru_oublock:Child rus
Child rusage ru_nivcsw"
BS_DIR="tmp/buildstats"
+RECIPE=""
TASKS="compile:configure:fetch:install:patch:populate_lic:populate_sysroot:unpack"
STATS="$TIME"
+ACCUMULATE=""
HEADER="" # No header by default
function usage {
@@ -59,6 +48,7 @@ cat <<EOM
Usage: $CMD [-b buildstats_dir] [-t do_task]
-b buildstats The path where the folder resides
(default: "$BS_DIR")
+ -r recipe The recipe to be computed
-t tasks The tasks to be computed
(default: "$TASKS")
-s stats The stats to be matched. Options: TIME, IO, RUSAGE, CHILD_RUSAGE
@@ -69,87 +59,109 @@ Usage: $CMD [-b buildstats_dir] [-t do_task]
IO=$IO
RUSAGE=$RUSAGE
CHILD_RUSAGE=$CHILD_RUSAGE
+ -a Accumulate all stats values for found recipes
-h Display this help message
EOM
}
# Parse and validate arguments
-while getopts "b:t:s:Hh" OPT; do
- case $OPT in
- b)
- BS_DIR="$OPTARG"
- ;;
- t)
- TASKS="$OPTARG"
- ;;
- s)
- STATS="$OPTARG"
- ;;
- H)
- HEADER="y"
- ;;
- h)
- usage
- exit 0
- ;;
- *)
- usage
- exit 1
- ;;
- esac
+while getopts "b:r:t:s:aHh" OPT; do
+ case $OPT in
+ b)
+ BS_DIR="$OPTARG"
+ ;;
+ r)
+ RECIPE="$OPTARG"
+ ;;
+ t)
+ TASKS="$OPTARG"
+ ;;
+ s)
+ STATS="$OPTARG"
+ ;;
+ a)
+ ACCUMULATE="y"
+ ;;
+ H)
+ HEADER="y"
+ ;;
+ h)
+ usage
+ exit 0
+ ;;
+ *)
+ usage
+ exit 1
+ ;;
+ esac
done
# Ensure the buildstats folder exists
if [ ! -d "$BS_DIR" ]; then
- echo "ERROR: $BS_DIR does not exist"
- usage
- exit 1
+ echo "ERROR: $BS_DIR does not exist"
+ usage
+ exit 1
fi
stats=""
IFS=":"
for stat in ${STATS}; do
- case $stat in
- TIME)
- stats="${stats}:${TIME}"
- ;;
- IO)
- stats="${stats}:${IO}"
- ;;
- RUSAGE)
- stats="${stats}:${RUSAGE}"
- ;;
- CHILD_RUSAGE)
- stats="${stats}:${CHILD_RUSAGE}"
- ;;
- *)
- stats="${STATS}"
- esac
+ case $stat in
+ TIME)
+ stats="${stats}:${TIME}"
+ ;;
+ IO)
+ stats="${stats}:${IO}"
+ ;;
+ RUSAGE)
+ stats="${stats}:${RUSAGE}"
+ ;;
+ CHILD_RUSAGE)
+ stats="${stats}:${CHILD_RUSAGE}"
+ ;;
+ *)
+ stats="${STATS}"
+ ;;
+ esac
done
# remove possible colon at the beginning
stats="$(echo "$stats" | sed -e 's/^://1')"
# Provide a header if required by the user
-[ -n "$HEADER" ] && { echo "task:recipe:$stats"; }
+if [ -n "$HEADER" ] ; then
+ if [ -n "$ACCUMULATE" ]; then
+ echo "task:recipe:accumulated(${stats//:/;})"
+ else
+ echo "task:recipe:$stats"
+ fi
+fi
for task in ${TASKS}; do
task="do_${task}"
- for file in $(find ${BS_DIR} -type f -name ${task} | awk 'BEGIN{ ORS=""; OFS=":" } { print $0,"" }'); do
+ for file in $(find ${BS_DIR} -type f -path *${RECIPE}*/${task} | awk 'BEGIN{ ORS=""; OFS=":" } { print $0,"" }'); do
recipe="$(basename $(dirname $file))"
- times=""
- for stat in ${stats}; do
- [ -z "$stat" ] && { echo "empty stats"; }
- time=$(sed -n -e "s/^\($stat\): \\(.*\\)/\\2/p" $file)
- # in case the stat is not present, set the value as NA
- [ -z "$time" ] && { time="NA"; }
- # Append it to times
- if [ -z "$times" ]; then
- times="${time}"
- else
- times="${times} ${time}"
- fi
- done
+ times=""
+ for stat in ${stats}; do
+ [ -z "$stat" ] && { echo "empty stats"; }
+ time=$(sed -n -e "s/^\($stat\): \\(.*\\)/\\2/p" $file)
+ # in case the stat is not present, set the value as NA
+ [ -z "$time" ] && { time="NA"; }
+ # Append it to times
+ if [ -z "$times" ]; then
+ times="${time}"
+ else
+ times="${times} ${time}"
+ fi
+ done
+ if [ -n "$ACCUMULATE" ]; then
+ IFS=' '; valuesarray=(${times}); IFS=':'
+ times=0
+ for value in "${valuesarray[@]}"; do
+ [ "$value" == "NA" ] && { echo "ERROR: stat is not present."; usage; exit 1; }
+ times=$(( $times + $value ))
+ done
+ fi
echo "${task} ${recipe} ${times}"
done
done
diff --git a/scripts/contrib/bbvars.py b/scripts/contrib/bbvars.py
index 286b5a9405..090133600b 100755
--- a/scripts/contrib/bbvars.py
+++ b/scripts/contrib/bbvars.py
@@ -1,18 +1,6 @@
#!/usr/bin/env python3
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) Darren Hart <dvhart@linux.intel.com>, 2010
diff --git a/scripts/contrib/build-perf-test-wrapper.sh b/scripts/contrib/build-perf-test-wrapper.sh
index 7cbb5d794f..0a85e6e708 100755
--- a/scripts/contrib/build-perf-test-wrapper.sh
+++ b/scripts/contrib/build-perf-test-wrapper.sh
@@ -4,15 +4,7 @@
#
# Copyright (c) 2016, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
-#
+# SPDX-License-Identifier: GPL-2.0-only
#
# This script is a simple wrapper around the actual build performance tester
# script. This script initializes the build environment, runs
@@ -95,21 +87,10 @@ if [ $# -ne 0 ]; then
exit 1
fi
-if [ -n "$email_to" ]; then
- if ! [ -x "$(command -v phantomjs)" ]; then
- echo "ERROR: Sending email needs phantomjs."
- exit 1
- fi
- if ! [ -x "$(command -v optipng)" ]; then
- echo "ERROR: Sending email needs optipng."
- exit 1
- fi
-fi
-
# Open a file descriptor for flock and acquire lock
LOCK_FILE="/tmp/oe-build-perf-test-wrapper.lock"
if ! exec 3> "$LOCK_FILE"; then
- echo "ERROR: Unable to open lock file"
+ echo "ERROR: Unable to open loemack file"
exit 1
fi
if ! flock -n 3; then
@@ -234,7 +215,7 @@ if [ -n "$results_repo" ]; then
if [ -n "$email_to" ]; then
echo "Emailing test report"
os_name=`get_os_release_var PRETTY_NAME`
- "$script_dir"/oe-build-perf-report-email.py --to "$email_to" --subject "Build Perf Test Report for $os_name" --text $report_txt --html $report_html "${OE_BUILD_PERF_REPORT_EMAIL_EXTRA_ARGS[@]}"
+ "$script_dir"/oe-build-perf-report-email.py --to "$email_to" --subject "Build Perf Test Report for $os_name" --text $report_txt "${OE_BUILD_PERF_REPORT_EMAIL_EXTRA_ARGS[@]}"
fi
# Upload report files, unless we're on detached head
diff --git a/scripts/contrib/convert-overrides.py b/scripts/contrib/convert-overrides.py
new file mode 100755
index 0000000000..4d41a4c475
--- /dev/null
+++ b/scripts/contrib/convert-overrides.py
@@ -0,0 +1,144 @@
+#!/usr/bin/env python3
+#
+# Conversion script to add new override syntax to existing bitbake metadata
+#
+# Copyright (C) 2021 Richard Purdie
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+#
+# To use this script on a new layer you need to list the overrides the
+# layer is known to use in the list below.
+#
+# Known constraint: Matching is 'loose' and in particular will find variable
+# and function names with "_append" and "_remove" in them. Those need to be
+# filtered out manually or in the skip list below.
+#
+
+import re
+import os
+import sys
+import tempfile
+import shutil
+import mimetypes
+
+if len(sys.argv) < 2:
+ print("Please specify a directory to run the conversion script against.")
+ sys.exit(1)
+
+# List of strings to treat as overrides
+vars = ["append", "prepend", "remove"]
+vars = vars + ["qemuarm", "qemux86", "qemumips", "qemuppc", "qemuriscv", "qemuall"]
+vars = vars + ["genericx86", "edgerouter", "beaglebone-yocto"]
+vars = vars + ["armeb", "arm", "armv5", "armv6", "armv4", "powerpc64", "aarch64", "riscv32", "riscv64", "x86", "mips64", "powerpc"]
+vars = vars + ["mipsarch", "x86-x32", "mips16e", "microblaze", "e5500-64b", "mipsisa32", "mipsisa64"]
+vars = vars + ["class-native", "class-target", "class-cross-canadian", "class-cross", "class-devupstream"]
+vars = vars + ["tune-", "pn-", "forcevariable"]
+vars = vars + ["libc-musl", "libc-glibc", "libc-newlib","libc-baremetal"]
+vars = vars + ["task-configure", "task-compile", "task-install", "task-clean", "task-image-qa", "task-rm_work", "task-image-complete", "task-populate-sdk"]
+vars = vars + ["toolchain-clang", "mydistro", "nios2", "sdkmingw32", "overrideone", "overridetwo"]
+vars = vars + ["linux-gnux32", "linux-muslx32", "linux-gnun32", "mingw32", "poky", "darwin", "linuxstdbase"]
+vars = vars + ["linux-gnueabi", "eabi"]
+vars = vars + ["virtclass-multilib", "virtclass-mcextend"]
+
+# List of strings to treat as overrides but only with whitespace following or another override (more restricted matching).
+# Handles issues with arc matching arch.
+shortvars = ["arc", "mips", "mipsel", "sh4"]
+
+# Variables which take packagenames as an override
+packagevars = ["FILES", "RDEPENDS", "RRECOMMENDS", "SUMMARY", "DESCRIPTION", "RSUGGESTS", "RPROVIDES", "RCONFLICTS", "PKG", "ALLOW_EMPTY",
+ "pkg_postrm", "pkg_postinst_ontarget", "pkg_postinst", "INITSCRIPT_NAME", "INITSCRIPT_PARAMS", "DEBIAN_NOAUTONAME", "ALTERNATIVE",
+ "PKGE", "PKGV", "PKGR", "USERADD_PARAM", "GROUPADD_PARAM", "CONFFILES", "SYSTEMD_SERVICE", "LICENSE", "SECTION", "pkg_preinst",
+ "pkg_prerm", "RREPLACES", "GROUPMEMS_PARAM", "SYSTEMD_AUTO_ENABLE", "SKIP_FILEDEPS", "PRIVATE_LIBS", "PACKAGE_ADD_METADATA",
+ "INSANE_SKIP", "DEBIANNAME", "SYSTEMD_SERVICE_ESCAPED"]
+
+# Expressions to skip if encountered, these are not overrides
+skips = ["parser_append", "recipe_to_append", "extra_append", "to_remove", "show_appends", "applied_appends", "file_appends", "handle_remove"]
+skips = skips + ["expanded_removes", "color_remove", "test_remove", "empty_remove", "toaster_prepend", "num_removed", "licfiles_append", "_write_append"]
+skips = skips + ["no_report_remove", "test_prepend", "test_append", "multiple_append", "test_remove", "shallow_remove", "do_remove_layer", "first_append"]
+skips = skips + ["parser_remove", "to_append", "no_remove", "bblayers_add_remove", "bblayers_remove", "apply_append", "is_x86", "base_dep_prepend"]
+skips = skips + ["autotools_dep_prepend", "go_map_arm", "alt_remove_links", "systemd_append_file", "file_append", "process_file_darwin"]
+skips = skips + ["run_loaddata_poky", "determine_if_poky_env", "do_populate_poky_src", "libc_cv_include_x86_isa_level", "test_rpm_remove", "do_install_armmultilib"]
+skips = skips + ["get_appends_for_files", "test_doubleref_remove", "test_bitbakelayers_add_remove", "elf32_x86_64", "colour_remove", "revmap_remove"]
+skips = skips + ["test_rpm_remove", "test_bitbakelayers_add_remove", "recipe_append_file", "log_data_removed", "recipe_append", "systemd_machine_unit_append"]
+skips = skips + ["recipetool_append", "changetype_remove", "try_appendfile_wc", "test_qemux86_directdisk", "test_layer_appends", "tgz_removed"]
+
+imagevars = ["IMAGE_CMD", "EXTRA_IMAGECMD", "IMAGE_TYPEDEP", "CONVERSION_CMD", "COMPRESS_CMD"]
+packagevars = packagevars + imagevars
+
+vars_re = {}
+for exp in vars:
+ vars_re[exp] = (re.compile('((^|[#\'"\s\-\+])[A-Za-z0-9_\-:${}\.]+)_' + exp), r"\1:" + exp)
+
+shortvars_re = {}
+for exp in shortvars:
+ shortvars_re[exp] = (re.compile('((^|[#\'"\s\-\+])[A-Za-z0-9_\-:${}\.]+)_' + exp + '([\(\'"\s:])'), r"\1:" + exp + r"\3")
+
+package_re = {}
+for exp in packagevars:
+ package_re[exp] = (re.compile('(^|[#\'"\s\-\+]+)' + exp + '_' + '([$a-z"\'\s%\[<{\\\*].)'), r"\1" + exp + r":\2")
+
+# Other substitutions to make
+subs = {
+ 'r = re.compile("([^:]+):\s*(.*)")' : 'r = re.compile("(^.+?):\s+(.*)")',
+ "val = d.getVar('%s_%s' % (var, pkg))" : "val = d.getVar('%s:%s' % (var, pkg))",
+ "f.write('%s_%s: %s\\n' % (var, pkg, encode(val)))" : "f.write('%s:%s: %s\\n' % (var, pkg, encode(val)))",
+ "d.getVar('%s_%s' % (scriptlet_name, pkg))" : "d.getVar('%s:%s' % (scriptlet_name, pkg))",
+ 'ret.append(v + "_" + p)' : 'ret.append(v + ":" + p)',
+}
+
+def processfile(fn):
+ print("processing file '%s'" % fn)
+ try:
+ fh, abs_path = tempfile.mkstemp()
+ with os.fdopen(fh, 'w') as new_file:
+ with open(fn, "r") as old_file:
+ for line in old_file:
+ skip = False
+ for s in skips:
+ if s in line:
+ skip = True
+ if "ptest_append" in line or "ptest_remove" in line or "ptest_prepend" in line:
+ skip = False
+ for sub in subs:
+ if sub in line:
+ line = line.replace(sub, subs[sub])
+ skip = True
+ if not skip:
+ for pvar in packagevars:
+ line = package_re[pvar][0].sub(package_re[pvar][1], line)
+ for var in vars:
+ line = vars_re[var][0].sub(vars_re[var][1], line)
+ for shortvar in shortvars:
+ line = shortvars_re[shortvar][0].sub(shortvars_re[shortvar][1], line)
+ if "pkg_postinst:ontarget" in line:
+ line = line.replace("pkg_postinst:ontarget", "pkg_postinst_ontarget")
+ new_file.write(line)
+ shutil.copymode(fn, abs_path)
+ os.remove(fn)
+ shutil.move(abs_path, fn)
+ except UnicodeDecodeError:
+ pass
+
+ourname = os.path.basename(sys.argv[0])
+ourversion = "0.9.3"
+
+if os.path.isfile(sys.argv[1]):
+ processfile(sys.argv[1])
+ sys.exit(0)
+
+for targetdir in sys.argv[1:]:
+ print("processing directory '%s'" % targetdir)
+ for root, dirs, files in os.walk(targetdir):
+ for name in files:
+ if name == ourname:
+ continue
+ fn = os.path.join(root, name)
+ if os.path.islink(fn):
+ continue
+ if "/.git/" in fn or fn.endswith(".html") or fn.endswith(".patch") or fn.endswith(".m4") or fn.endswith(".diff"):
+ continue
+ processfile(fn)
+
+print("All files processed with version %s" % ourversion)
diff --git a/scripts/contrib/convert-spdx-licenses.py b/scripts/contrib/convert-spdx-licenses.py
new file mode 100755
index 0000000000..4e194dee3f
--- /dev/null
+++ b/scripts/contrib/convert-spdx-licenses.py
@@ -0,0 +1,145 @@
+#!/usr/bin/env python3
+#
+# Conversion script to change LICENSE entries to SPDX identifiers
+#
+# Copyright (C) 2021-2022 Richard Purdie
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import re
+import os
+import sys
+import tempfile
+import shutil
+import mimetypes
+
+if len(sys.argv) < 2:
+ print("Please specify a directory to run the conversion script against.")
+ sys.exit(1)
+
+license_map = {
+"AGPL-3" : "AGPL-3.0-only",
+"AGPL-3+" : "AGPL-3.0-or-later",
+"AGPLv3" : "AGPL-3.0-only",
+"AGPLv3+" : "AGPL-3.0-or-later",
+"AGPLv3.0" : "AGPL-3.0-only",
+"AGPLv3.0+" : "AGPL-3.0-or-later",
+"AGPL-3.0" : "AGPL-3.0-only",
+"AGPL-3.0+" : "AGPL-3.0-or-later",
+"BSD-0-Clause" : "0BSD",
+"GPL-1" : "GPL-1.0-only",
+"GPL-1+" : "GPL-1.0-or-later",
+"GPLv1" : "GPL-1.0-only",
+"GPLv1+" : "GPL-1.0-or-later",
+"GPLv1.0" : "GPL-1.0-only",
+"GPLv1.0+" : "GPL-1.0-or-later",
+"GPL-1.0" : "GPL-1.0-only",
+"GPL-1.0+" : "GPL-1.0-or-later",
+"GPL-2" : "GPL-2.0-only",
+"GPL-2+" : "GPL-2.0-or-later",
+"GPLv2" : "GPL-2.0-only",
+"GPLv2+" : "GPL-2.0-or-later",
+"GPLv2.0" : "GPL-2.0-only",
+"GPLv2.0+" : "GPL-2.0-or-later",
+"GPL-2.0" : "GPL-2.0-only",
+"GPL-2.0+" : "GPL-2.0-or-later",
+"GPL-3" : "GPL-3.0-only",
+"GPL-3+" : "GPL-3.0-or-later",
+"GPLv3" : "GPL-3.0-only",
+"GPLv3+" : "GPL-3.0-or-later",
+"GPLv3.0" : "GPL-3.0-only",
+"GPLv3.0+" : "GPL-3.0-or-later",
+"GPL-3.0" : "GPL-3.0-only",
+"GPL-3.0+" : "GPL-3.0-or-later",
+"LGPLv2" : "LGPL-2.0-only",
+"LGPLv2+" : "LGPL-2.0-or-later",
+"LGPLv2.0" : "LGPL-2.0-only",
+"LGPLv2.0+" : "LGPL-2.0-or-later",
+"LGPL-2.0" : "LGPL-2.0-only",
+"LGPL-2.0+" : "LGPL-2.0-or-later",
+"LGPL2.1" : "LGPL-2.1-only",
+"LGPL2.1+" : "LGPL-2.1-or-later",
+"LGPLv2.1" : "LGPL-2.1-only",
+"LGPLv2.1+" : "LGPL-2.1-or-later",
+"LGPL-2.1" : "LGPL-2.1-only",
+"LGPL-2.1+" : "LGPL-2.1-or-later",
+"LGPLv3" : "LGPL-3.0-only",
+"LGPLv3+" : "LGPL-3.0-or-later",
+"LGPL-3.0" : "LGPL-3.0-only",
+"LGPL-3.0+" : "LGPL-3.0-or-later",
+"MPL-1" : "MPL-1.0",
+"MPLv1" : "MPL-1.0",
+"MPLv1.1" : "MPL-1.1",
+"MPLv2" : "MPL-2.0",
+"MIT-X" : "MIT",
+"MIT-style" : "MIT",
+"openssl" : "OpenSSL",
+"PSF" : "PSF-2.0",
+"PSFv2" : "PSF-2.0",
+"Python-2" : "Python-2.0",
+"Apachev2" : "Apache-2.0",
+"Apache-2" : "Apache-2.0",
+"Artisticv1" : "Artistic-1.0",
+"Artistic-1" : "Artistic-1.0",
+"AFL-2" : "AFL-2.0",
+"AFL-1" : "AFL-1.2",
+"AFLv2" : "AFL-2.0",
+"AFLv1" : "AFL-1.2",
+"CDDLv1" : "CDDL-1.0",
+"CDDL-1" : "CDDL-1.0",
+"EPLv1.0" : "EPL-1.0",
+"FreeType" : "FTL",
+"Nauman" : "Naumen",
+"tcl" : "TCL",
+"vim" : "Vim",
+"SGIv1" : "SGI-1",
+}
+
+def processfile(fn):
+ print("processing file '%s'" % fn)
+ try:
+ fh, abs_path = tempfile.mkstemp()
+ modified = False
+ with os.fdopen(fh, 'w') as new_file:
+ with open(fn, "r") as old_file:
+ for line in old_file:
+ if not line.startswith("LICENSE"):
+ new_file.write(line)
+ continue
+ orig = line
+ for license in sorted(license_map, key=len, reverse=True):
+ for ending in ['"', "'", " ", ")"]:
+ line = line.replace(license + ending, license_map[license] + ending)
+ if orig != line:
+ modified = True
+ new_file.write(line)
+ new_file.close()
+ if modified:
+ shutil.copymode(fn, abs_path)
+ os.remove(fn)
+ shutil.move(abs_path, fn)
+ except UnicodeDecodeError:
+ pass
+
+ourname = os.path.basename(sys.argv[0])
+ourversion = "0.01"
+
+if os.path.isfile(sys.argv[1]):
+ processfile(sys.argv[1])
+ sys.exit(0)
+
+for targetdir in sys.argv[1:]:
+ print("processing directory '%s'" % targetdir)
+ for root, dirs, files in os.walk(targetdir):
+ for name in files:
+ if name == ourname:
+ continue
+ fn = os.path.join(root, name)
+ if os.path.islink(fn):
+ continue
+ if "/.git/" in fn or fn.endswith(".html") or fn.endswith(".patch") or fn.endswith(".m4") or fn.endswith(".diff") or fn.endswith(".orig"):
+ continue
+ processfile(fn)
+
+print("All files processed with version %s" % ourversion)
diff --git a/scripts/contrib/convert-srcuri.py b/scripts/contrib/convert-srcuri.py
new file mode 100755
index 0000000000..587392334f
--- /dev/null
+++ b/scripts/contrib/convert-srcuri.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python3
+#
+# Conversion script to update SRC_URI to add branch to git urls
+#
+# Copyright (C) 2021 Richard Purdie
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import re
+import os
+import sys
+import tempfile
+import shutil
+import mimetypes
+
+if len(sys.argv) < 2:
+ print("Please specify a directory to run the conversion script against.")
+ sys.exit(1)
+
+def processfile(fn):
+ def matchline(line):
+ if "MIRROR" in line or ".*" in line or "GNOME_GIT" in line:
+ return False
+ return True
+ print("processing file '%s'" % fn)
+ try:
+ if "distro_alias.inc" in fn or "linux-yocto-custom.bb" in fn:
+ return
+ fh, abs_path = tempfile.mkstemp()
+ modified = False
+ with os.fdopen(fh, 'w') as new_file:
+ with open(fn, "r") as old_file:
+ for line in old_file:
+ if ("git://" in line or "gitsm://" in line) and "branch=" not in line and matchline(line):
+ if line.endswith('"\n'):
+ line = line.replace('"\n', ';branch=master"\n')
+ elif re.search('\s*\\\\$', line):
+ line = re.sub('\s*\\\\$', ';branch=master \\\\', line)
+ modified = True
+ if ("git://" in line or "gitsm://" in line) and "github.com" in line and "protocol=https" not in line and matchline(line):
+ if "protocol=git" in line:
+ line = line.replace('protocol=git', 'protocol=https')
+ elif line.endswith('"\n'):
+ line = line.replace('"\n', ';protocol=https"\n')
+ elif re.search('\s*\\\\$', line):
+ line = re.sub('\s*\\\\$', ';protocol=https \\\\', line)
+ modified = True
+ new_file.write(line)
+ if modified:
+ shutil.copymode(fn, abs_path)
+ os.remove(fn)
+ shutil.move(abs_path, fn)
+ except UnicodeDecodeError:
+ pass
+
+ourname = os.path.basename(sys.argv[0])
+ourversion = "0.1"
+
+if os.path.isfile(sys.argv[1]):
+ processfile(sys.argv[1])
+ sys.exit(0)
+
+for targetdir in sys.argv[1:]:
+ print("processing directory '%s'" % targetdir)
+ for root, dirs, files in os.walk(targetdir):
+ for name in files:
+ if name == ourname:
+ continue
+ fn = os.path.join(root, name)
+ if os.path.islink(fn):
+ continue
+ if "/.git/" in fn or fn.endswith(".html") or fn.endswith(".patch") or fn.endswith(".m4") or fn.endswith(".diff"):
+ continue
+ processfile(fn)
+
+print("All files processed with version %s" % ourversion)
diff --git a/scripts/contrib/convert-variable-renames.py b/scripts/contrib/convert-variable-renames.py
new file mode 100755
index 0000000000..856c001e11
--- /dev/null
+++ b/scripts/contrib/convert-variable-renames.py
@@ -0,0 +1,116 @@
+#!/usr/bin/env python3
+#
+# Conversion script to rename variables to versions with improved terminology.
+# Also highlights potentially problematic langage and removed variables.
+#
+# Copyright (C) 2021 Richard Purdie
+# Copyright (C) 2022 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import re
+import os
+import sys
+import tempfile
+import shutil
+import mimetypes
+
+if len(sys.argv) < 2:
+ print("Please specify a directory to run the conversion script against.")
+ sys.exit(1)
+
+renames = {
+"BB_ENV_WHITELIST" : "BB_ENV_PASSTHROUGH",
+"BB_ENV_EXTRAWHITE" : "BB_ENV_PASSTHROUGH_ADDITIONS",
+"BB_HASHCONFIG_WHITELIST" : "BB_HASHCONFIG_IGNORE_VARS",
+"BB_SETSCENE_ENFORCE_WHITELIST" : "BB_SETSCENE_ENFORCE_IGNORE_TASKS",
+"BB_HASHBASE_WHITELIST" : "BB_BASEHASH_IGNORE_VARS",
+"BB_HASHTASK_WHITELIST" : "BB_TASKHASH_IGNORE_TASKS",
+"CVE_CHECK_PN_WHITELIST" : "CVE_CHECK_SKIP_RECIPE",
+"CVE_CHECK_WHITELIST" : "CVE_CHECK_IGNORE",
+"MULTI_PROVIDER_WHITELIST" : "BB_MULTI_PROVIDER_ALLOWED",
+"PNBLACKLIST" : "SKIP_RECIPE",
+"SDK_LOCAL_CONF_BLACKLIST" : "ESDK_LOCALCONF_REMOVE",
+"SDK_LOCAL_CONF_WHITELIST" : "ESDK_LOCALCONF_ALLOW",
+"SDK_INHERIT_BLACKLIST" : "ESDK_CLASS_INHERIT_DISABLE",
+"SSTATE_DUPWHITELIST" : "SSTATE_ALLOW_OVERLAP_FILES",
+"SYSROOT_DIRS_BLACKLIST" : "SYSROOT_DIRS_IGNORE",
+"UNKNOWN_CONFIGURE_WHITELIST" : "UNKNOWN_CONFIGURE_OPT_IGNORE",
+"ICECC_USER_CLASS_BL" : "ICECC_CLASS_DISABLE",
+"ICECC_SYSTEM_CLASS_BL" : "ICECC_CLASS_DISABLE",
+"ICECC_USER_PACKAGE_WL" : "ICECC_RECIPE_ENABLE",
+"ICECC_USER_PACKAGE_BL" : "ICECC_RECIPE_DISABLE",
+"ICECC_SYSTEM_PACKAGE_BL" : "ICECC_RECIPE_DISABLE",
+"LICENSE_FLAGS_WHITELIST" : "LICENSE_FLAGS_ACCEPTED",
+}
+
+removed_list = [
+"BB_STAMP_WHITELIST",
+"BB_STAMP_POLICY",
+"INHERIT_BLACKLIST",
+"TUNEABI_WHITELIST",
+]
+
+context_check_list = [
+"blacklist",
+"whitelist",
+"abort",
+]
+
+def processfile(fn):
+
+ print("processing file '%s'" % fn)
+ try:
+ fh, abs_path = tempfile.mkstemp()
+ modified = False
+ with os.fdopen(fh, 'w') as new_file:
+ with open(fn, "r") as old_file:
+ lineno = 0
+ for line in old_file:
+ lineno += 1
+ if not line or "BB_RENAMED_VARIABLE" in line:
+ continue
+ # Do the renames
+ for old_name, new_name in renames.items():
+ if old_name in line:
+ line = line.replace(old_name, new_name)
+ modified = True
+ # Find removed names
+ for removed_name in removed_list:
+ if removed_name in line:
+ print("%s needs further work at line %s because %s has been deprecated" % (fn, lineno, removed_name))
+ for check_word in context_check_list:
+ if re.search(check_word, line, re.IGNORECASE):
+ print("%s needs further work at line %s since it contains %s"% (fn, lineno, check_word))
+ new_file.write(line)
+ new_file.close()
+ if modified:
+ print("*** Modified file '%s'" % (fn))
+ shutil.copymode(fn, abs_path)
+ os.remove(fn)
+ shutil.move(abs_path, fn)
+ except UnicodeDecodeError:
+ pass
+
+ourname = os.path.basename(sys.argv[0])
+ourversion = "0.1"
+
+if os.path.isfile(sys.argv[1]):
+ processfile(sys.argv[1])
+ sys.exit(0)
+
+for targetdir in sys.argv[1:]:
+ print("processing directory '%s'" % targetdir)
+ for root, dirs, files in os.walk(targetdir):
+ for name in files:
+ if name == ourname:
+ continue
+ fn = os.path.join(root, name)
+ if os.path.islink(fn):
+ continue
+ if "ChangeLog" in fn or "/.git/" in fn or fn.endswith(".html") or fn.endswith(".patch") or fn.endswith(".m4") or fn.endswith(".diff") or fn.endswith(".orig"):
+ continue
+ processfile(fn)
+
+print("All files processed with version %s" % ourversion)
diff --git a/scripts/contrib/ddimage b/scripts/contrib/ddimage
index ab929957a5..7f2ad112a6 100755
--- a/scripts/contrib/ddimage
+++ b/scripts/contrib/ddimage
@@ -1,8 +1,7 @@
#!/bin/sh
-
-# Default to avoiding the first two disks on typical Linux and Mac OS installs
-# Better safe than sorry :-)
-BLACKLIST_DEVICES="/dev/sda /dev/sdb /dev/disk1 /dev/disk2"
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# 1MB blocksize
BLOCKSIZE=1048576
@@ -29,7 +28,6 @@ image_details() {
}
device_details() {
- DEV=$1
BLOCK_SIZE=512
echo "Device details"
@@ -42,11 +40,17 @@ device_details() {
fi
# Default / Linux information collection
- echo " device: $DEVICE"
+ ACTUAL_DEVICE=`readlink -f $DEVICE`
+ DEV=`basename $ACTUAL_DEVICE`
+ if [ "$ACTUAL_DEVICE" != "$DEVICE" ] ; then
+ echo " device: $DEVICE -> $ACTUAL_DEVICE"
+ else
+ echo " device: $DEVICE"
+ fi
if [ -f "/sys/class/block/$DEV/device/vendor" ]; then
echo " vendor: $(cat /sys/class/block/$DEV/device/vendor)"
else
- echo " vendor: UNKOWN"
+ echo " vendor: UNKNOWN"
fi
if [ -f "/sys/class/block/$DEV/device/model" ]; then
echo " model: $(cat /sys/class/block/$DEV/device/model)"
@@ -61,6 +65,49 @@ device_details() {
echo ""
}
+check_mount_device() {
+ if cat /proc/self/mounts | awk '{ print $1 }' | grep /dev/ | grep -q -E "^$1$" ; then
+ return 0
+ fi
+ return 1
+}
+
+is_mounted() {
+ if [ "$(uname)" = "Darwin" ]; then
+ if df | awk '{ print $1 }' | grep /dev/ | grep -q -E "^$1(s[0-9]+)?$" ; then
+ return 0
+ fi
+ else
+ if check_mount_device $1 ; then
+ return 0
+ fi
+ DEV=`basename $1`
+ if [ -d /sys/class/block/$DEV/ ] ; then
+ PARENT_BLKDEV=`basename $(readlink -f "/sys/class/block/$DEV/..")`
+ if [ "$PARENT_BLKDEV" != "block" ] ; then
+ if check_mount_device $PARENT_BLKDEV ; then
+ return 0
+ fi
+ fi
+ for CHILD_BLKDEV in `find /sys/class/block/$DEV/ -mindepth 1 -maxdepth 1 -name "$DEV*" -type d`
+ do
+ if check_mount_device /dev/`basename $CHILD_BLKDEV` ; then
+ return 0
+ fi
+ done
+ fi
+ fi
+ return 1
+}
+
+is_inuse() {
+ HOLDERS_DIR="/sys/class/block/`basename $1`/holders"
+ if [ -d $HOLDERS_DIR ] && [ `ls -A $HOLDERS_DIR` ] ; then
+ return 0
+ fi
+ return 1
+}
+
if [ $# -ne 2 ]; then
usage
exit 1
@@ -75,22 +122,37 @@ if [ ! -e "$IMAGE" ]; then
exit 1
fi
+if [ ! -e "$DEVICE" ]; then
+ echo "ERROR: Device $DEVICE does not exist"
+ usage
+ exit 1
+fi
-for i in ${BLACKLIST_DEVICES}; do
- if [ "$i" = "$DEVICE" ]; then
- echo "ERROR: Device $DEVICE is blacklisted"
- exit 1
- fi
-done
+if [ "$(uname)" = "Darwin" ]; then
+ # readlink doesn't support -f on MacOS, just assume it isn't a symlink
+ ACTUAL_DEVICE=$DEVICE
+else
+ ACTUAL_DEVICE=`readlink -f $DEVICE`
+fi
+if is_mounted $ACTUAL_DEVICE ; then
+ echo "ERROR: Device $DEVICE is currently mounted - check if this is the right device, and unmount it first if so"
+ device_details
+ exit 1
+fi
+if is_inuse $ACTUAL_DEVICE ; then
+ echo "ERROR: Device $DEVICE is currently in use (possibly part of LVM) - check if this is the right device!"
+ device_details
+ exit 1
+fi
if [ ! -w "$DEVICE" ]; then
- echo "ERROR: Device $DEVICE does not exist or is not writable"
+ echo "ERROR: Device $DEVICE is not writable - possibly use sudo?"
usage
exit 1
fi
image_details $IMAGE
-device_details $(basename $DEVICE)
+device_details
printf "Write $IMAGE to $DEVICE [y/N]? "
read RESPONSE
diff --git a/scripts/contrib/devtool-stress.py b/scripts/contrib/devtool-stress.py
index d555c51a65..81046ecf49 100755
--- a/scripts/contrib/devtool-stress.py
+++ b/scripts/contrib/devtool-stress.py
@@ -6,18 +6,7 @@
#
# Copyright 2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
import sys
diff --git a/scripts/contrib/dialog-power-control b/scripts/contrib/dialog-power-control
index 7550ea53be..ad6070c369 100755
--- a/scripts/contrib/dialog-power-control
+++ b/scripts/contrib/dialog-power-control
@@ -1,5 +1,7 @@
#!/bin/sh
#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Simple script to show a manual power prompt for when you want to use
# automated hardware testing with testimage.bbclass but you don't have a
# web-enabled power strip or similar to do the power on/off/cycle.
diff --git a/scripts/contrib/documentation-audit.sh b/scripts/contrib/documentation-audit.sh
index 2144aac936..36f7f3287c 100755
--- a/scripts/contrib/documentation-audit.sh
+++ b/scripts/contrib/documentation-audit.sh
@@ -1,5 +1,7 @@
#!/bin/bash
#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Perform an audit of which packages provide documentation and which
# are missing -doc packages.
#
@@ -7,7 +9,6 @@
# this script after source'ing the build environment script, so you're
# running it from build/ directory.
#
-# Maintainer: Scott Garman <scott.a.garman@intel.com>
REPORT_DOC_SIMPLE="documentation_exists.txt"
REPORT_DOC_DETAIL="documentation_exists_detail.txt"
@@ -25,8 +26,8 @@ if [ -z "$BITBAKE" ]; then
fi
echo "REMINDER: you need to build for MACHINE=qemux86 or you won't get useful results"
-echo "REMINDER: you need to set LICENSE_FLAGS_WHITELIST appropriately in local.conf or "
-echo " you'll get false positives. For example, LICENSE_FLAGS_WHITELIST = \"Commercial\""
+echo "REMINDER: you need to set LICENSE_FLAGS_ACCEPTED appropriately in local.conf or "
+echo " you'll get false positives. For example, LICENSE_FLAGS_ACCEPTED = \"commercial\""
for pkg in `bitbake -s | awk '{ print \$1 }'`; do
if [[ "$pkg" == "Loading" || "$pkg" == "Loaded" ||
diff --git a/scripts/contrib/graph-tool b/scripts/contrib/graph-tool
index 1df5b8c345..26488930e0 100755
--- a/scripts/contrib/graph-tool
+++ b/scripts/contrib/graph-tool
@@ -7,21 +7,17 @@
#
# Copyright 2013 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
import sys
+import os
+import argparse
+
+scripts_lib_path = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'lib'))
+sys.path.insert(0, scripts_lib_path)
+import argparse_oe
+
def get_path_networkx(dotfile, fromnode, tonode):
try:
@@ -45,47 +41,78 @@ def get_path_networkx(dotfile, fromnode, tonode):
return networkx.all_simple_paths(graph, source=fromnode, target=tonode)
-def find_paths(args, usage):
- if len(args) < 3:
- usage()
- sys.exit(1)
-
- fromnode = args[1]
- tonode = args[2]
-
+def find_paths(args):
path = None
- for path in get_path_networkx(args[0], fromnode, tonode):
+ for path in get_path_networkx(args.dotfile, args.fromnode, args.tonode):
print(" -> ".join(map(str, path)))
if not path:
- print("ERROR: no path from %s to %s in graph" % (fromnode, tonode))
- sys.exit(1)
+ print("ERROR: no path from %s to %s in graph" % (args.fromnode, args.tonode))
+ return 1
+
+
+def filter_graph(args):
+ import fnmatch
+
+ exclude_tasks = []
+ if args.exclude_tasks:
+ for task in args.exclude_tasks.split(','):
+ if not task.startswith('do_'):
+ task = 'do_%s' % task
+ exclude_tasks.append(task)
+
+ def checkref(strval):
+ strval = strval.strip().strip('"')
+ target, taskname = strval.rsplit('.', 1)
+ if exclude_tasks:
+ for extask in exclude_tasks:
+ if fnmatch.fnmatch(taskname, extask):
+ return False
+ if strval in args.ref or target in args.ref:
+ return True
+ return False
+
+ with open(args.infile, 'r') as f:
+ for line in f:
+ line = line.rstrip()
+ if line.startswith(('digraph', '}')):
+ print(line)
+ elif '->' in line:
+ linesplit = line.split('->')
+ if checkref(linesplit[0]) and checkref(linesplit[1]):
+ print(line)
+ elif (not args.no_nodes) and checkref(line.split()[0]):
+ print(line)
+
def main():
- import optparse
- parser = optparse.OptionParser(
- usage = '''%prog [options] <command> <arguments>
+ parser = argparse_oe.ArgumentParser(description='Small utility for working with .dot graph files')
-Available commands:
- find-paths <dotfile> <from> <to>
- Find all of the paths between two nodes in a dot graph''')
+ subparsers = parser.add_subparsers(title='subcommands', metavar='<subcommand>')
+ subparsers.required = True
- #parser.add_option("-d", "--debug",
- # help = "Report all SRCREV values, not just ones where AUTOREV has been used",
- # action="store_true", dest="debug", default=False)
+ parser_find_paths = subparsers.add_parser('find-paths',
+ help='Find all of the paths between two nodes in a dot graph',
+ description='Finds all of the paths between two nodes in a dot graph')
+ parser_find_paths.add_argument('dotfile', help='.dot graph to search in')
+ parser_find_paths.add_argument('fromnode', help='starting node name')
+ parser_find_paths.add_argument('tonode', help='ending node name')
+ parser_find_paths.set_defaults(func=find_paths)
- options, args = parser.parse_args(sys.argv)
- args = args[1:]
+ parser_filter = subparsers.add_parser('filter',
+ help='Pare down a task graph to contain only the specified references',
+ description='Pares down a task-depends.dot graph produced by bitbake -g to contain only the specified references')
+ parser_filter.add_argument('infile', help='Input file')
+ parser_filter.add_argument('ref', nargs='+', help='Reference to include (either recipe/target name or full target.taskname specification)')
+ parser_filter.add_argument('-n', '--no-nodes', action='store_true', help='Skip node formatting lines')
+ parser_filter.add_argument('-x', '--exclude-tasks', help='Comma-separated list of tasks to exclude (do_ prefix optional, wildcards allowed)')
+ parser_filter.set_defaults(func=filter_graph)
- if len(args) < 1:
- parser.print_help()
- sys.exit(1)
+ args = parser.parse_args()
- if args[0] == "find-paths":
- find_paths(args[1:], parser.print_help)
- else:
- parser.print_help()
- sys.exit(1)
+ ret = args.func(args)
+ return ret
if __name__ == "__main__":
- main()
+ ret = main()
+ sys.exit(ret)
diff --git a/scripts/contrib/image-manifest b/scripts/contrib/image-manifest
new file mode 100755
index 0000000000..3c07a73a4e
--- /dev/null
+++ b/scripts/contrib/image-manifest
@@ -0,0 +1,523 @@
+#!/usr/bin/env python3
+
+# Script to extract information from image manifests
+#
+# Copyright (C) 2018 Intel Corporation
+# Copyright (C) 2021 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import sys
+import os
+import argparse
+import logging
+import json
+import shutil
+import tempfile
+import tarfile
+from collections import OrderedDict
+
+scripts_path = os.path.dirname(__file__)
+lib_path = scripts_path + '/../lib'
+sys.path = sys.path + [lib_path]
+
+import scriptutils
+logger = scriptutils.logger_create(os.path.basename(__file__))
+
+import argparse_oe
+import scriptpath
+bitbakepath = scriptpath.add_bitbake_lib_path()
+if not bitbakepath:
+ logger.error("Unable to find bitbake by searching parent directory of this script or PATH")
+ sys.exit(1)
+logger.debug('Using standard bitbake path %s' % bitbakepath)
+scriptpath.add_oe_lib_path()
+
+import bb.tinfoil
+import bb.utils
+import oe.utils
+import oe.recipeutils
+
+def get_pkg_list(manifest):
+ pkglist = []
+ with open(manifest, 'r') as f:
+ for line in f:
+ linesplit = line.split()
+ if len(linesplit) == 3:
+ # manifest file
+ pkglist.append(linesplit[0])
+ elif len(linesplit) == 1:
+ # build dependency file
+ pkglist.append(linesplit[0])
+ return sorted(pkglist)
+
+def list_packages(args):
+ pkglist = get_pkg_list(args.manifest)
+ for pkg in pkglist:
+ print('%s' % pkg)
+
+def pkg2recipe(tinfoil, pkg):
+ if "-native" in pkg:
+ logger.info('skipping %s' % pkg)
+ return None
+
+ pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR')
+ pkgdatafile = os.path.join(pkgdata_dir, 'runtime-reverse', pkg)
+ logger.debug('pkgdatafile %s' % pkgdatafile)
+ try:
+ f = open(pkgdatafile, 'r')
+ for line in f:
+ if line.startswith('PN:'):
+ recipe = line.split(':', 1)[1].strip()
+ return recipe
+ except Exception:
+ logger.warning('%s is missing' % pkgdatafile)
+ return None
+
+def get_recipe_list(manifest, tinfoil):
+ pkglist = get_pkg_list(manifest)
+ recipelist = []
+ for pkg in pkglist:
+ recipe = pkg2recipe(tinfoil,pkg)
+ if recipe:
+ if not recipe in recipelist:
+ recipelist.append(recipe)
+
+ return sorted(recipelist)
+
+def list_recipes(args):
+ import bb.tinfoil
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.logger.setLevel(logger.getEffectiveLevel())
+ tinfoil.prepare(config_only=True)
+ recipelist = get_recipe_list(args.manifest, tinfoil)
+ for recipe in sorted(recipelist):
+ print('%s' % recipe)
+
+def list_layers(args):
+
+ def find_git_repo(pth):
+ checkpth = pth
+ while checkpth != os.sep:
+ if os.path.exists(os.path.join(checkpth, '.git')):
+ return checkpth
+ checkpth = os.path.dirname(checkpth)
+ return None
+
+ def get_git_remote_branch(repodir):
+ try:
+ stdout, _ = bb.process.run(['git', 'rev-parse', '--abbrev-ref', '--symbolic-full-name', '@{u}'], cwd=repodir)
+ except bb.process.ExecutionError as e:
+ stdout = None
+ if stdout:
+ return stdout.strip()
+ else:
+ return None
+
+ def get_git_head_commit(repodir):
+ try:
+ stdout, _ = bb.process.run(['git', 'rev-parse', 'HEAD'], cwd=repodir)
+ except bb.process.ExecutionError as e:
+ stdout = None
+ if stdout:
+ return stdout.strip()
+ else:
+ return None
+
+ def get_git_repo_url(repodir, remote='origin'):
+ import bb.process
+ # Try to get upstream repo location from origin remote
+ try:
+ stdout, _ = bb.process.run(['git', 'remote', '-v'], cwd=repodir)
+ except bb.process.ExecutionError as e:
+ stdout = None
+ if stdout:
+ for line in stdout.splitlines():
+ splitline = line.split()
+ if len(splitline) > 1:
+ if splitline[0] == remote and scriptutils.is_src_url(splitline[1]):
+ return splitline[1]
+ return None
+
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.logger.setLevel(logger.getEffectiveLevel())
+ tinfoil.prepare(config_only=False)
+ layers = OrderedDict()
+ for layerdir in tinfoil.config_data.getVar('BBLAYERS').split():
+ layerdata = OrderedDict()
+ layername = os.path.basename(layerdir)
+ logger.debug('layername %s, layerdir %s' % (layername, layerdir))
+ if layername in layers:
+ logger.warning('layername %s is not unique in configuration' % layername)
+ layername = os.path.basename(os.path.dirname(layerdir)) + '_' + os.path.basename(layerdir)
+ logger.debug('trying layername %s' % layername)
+ if layername in layers:
+ logger.error('Layer name %s is not unique in configuration' % layername)
+ sys.exit(2)
+ repodir = find_git_repo(layerdir)
+ if repodir:
+ remotebranch = get_git_remote_branch(repodir)
+ remote = 'origin'
+ if remotebranch and '/' in remotebranch:
+ rbsplit = remotebranch.split('/', 1)
+ layerdata['actual_branch'] = rbsplit[1]
+ remote = rbsplit[0]
+ layerdata['vcs_url'] = get_git_repo_url(repodir, remote)
+ if os.path.abspath(repodir) != os.path.abspath(layerdir):
+ layerdata['vcs_subdir'] = os.path.relpath(layerdir, repodir)
+ commit = get_git_head_commit(repodir)
+ if commit:
+ layerdata['vcs_commit'] = commit
+ layers[layername] = layerdata
+
+ json.dump(layers, args.output, indent=2)
+
+def get_recipe(args):
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.logger.setLevel(logger.getEffectiveLevel())
+ tinfoil.prepare(config_only=True)
+
+ recipe = pkg2recipe(tinfoil, args.package)
+ print(' %s package provided by %s' % (args.package, recipe))
+
+def pkg_dependencies(args):
+ def get_recipe_info(tinfoil, recipe):
+ try:
+ info = tinfoil.get_recipe_info(recipe)
+ except Exception:
+ logger.error('Failed to get recipe info for: %s' % recipe)
+ sys.exit(1)
+ if not info:
+ logger.warning('No recipe info found for: %s' % recipe)
+ sys.exit(1)
+ append_files = tinfoil.get_file_appends(info.fn)
+ appends = True
+ data = tinfoil.parse_recipe_file(info.fn, appends, append_files)
+ data.pn = info.pn
+ data.pv = info.pv
+ return data
+
+ def find_dependencies(tinfoil, assume_provided, recipe_info, packages, rn, order):
+ spaces = ' ' * order
+ data = recipe_info[rn]
+ if args.native:
+ logger.debug('%s- %s' % (spaces, data.pn))
+ elif "-native" not in data.pn:
+ if "cross" not in data.pn:
+ logger.debug('%s- %s' % (spaces, data.pn))
+
+ depends = []
+ for dep in data.depends:
+ if dep not in assume_provided:
+ depends.append(dep)
+
+ # First find all dependencies not in package list.
+ for dep in depends:
+ if dep not in packages:
+ packages.append(dep)
+ dep_data = get_recipe_info(tinfoil, dep)
+ # Do this once now to reduce the number of bitbake calls.
+ dep_data.depends = dep_data.getVar('DEPENDS').split()
+ recipe_info[dep] = dep_data
+
+ # Then recursively analyze all of the dependencies for the current recipe.
+ for dep in depends:
+ find_dependencies(tinfoil, assume_provided, recipe_info, packages, dep, order + 1)
+
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.logger.setLevel(logger.getEffectiveLevel())
+ tinfoil.prepare()
+
+ assume_provided = tinfoil.config_data.getVar('ASSUME_PROVIDED').split()
+ logger.debug('assumed provided:')
+ for ap in sorted(assume_provided):
+ logger.debug(' - %s' % ap)
+
+ recipe = pkg2recipe(tinfoil, args.package)
+ data = get_recipe_info(tinfoil, recipe)
+ data.depends = []
+ depends = data.getVar('DEPENDS').split()
+ for dep in depends:
+ if dep not in assume_provided:
+ data.depends.append(dep)
+
+ recipe_info = dict([(recipe, data)])
+ packages = []
+ find_dependencies(tinfoil, assume_provided, recipe_info, packages, recipe, order=1)
+
+ print('\nThe following packages are required to build %s' % recipe)
+ for p in sorted(packages):
+ data = recipe_info[p]
+ if "-native" not in data.pn:
+ if "cross" not in data.pn:
+ print(" %s (%s)" % (data.pn,p))
+
+ if args.native:
+ print('\nThe following native packages are required to build %s' % recipe)
+ for p in sorted(packages):
+ data = recipe_info[p]
+ if "-native" in data.pn:
+ print(" %s(%s)" % (data.pn,p))
+ if "cross" in data.pn:
+ print(" %s(%s)" % (data.pn,p))
+
+def default_config():
+ vlist = OrderedDict()
+ vlist['PV'] = 'yes'
+ vlist['SUMMARY'] = 'no'
+ vlist['DESCRIPTION'] = 'no'
+ vlist['SECTION'] = 'no'
+ vlist['LICENSE'] = 'yes'
+ vlist['HOMEPAGE'] = 'no'
+ vlist['BUGTRACKER'] = 'no'
+ vlist['PROVIDES'] = 'no'
+ vlist['BBCLASSEXTEND'] = 'no'
+ vlist['DEPENDS'] = 'no'
+ vlist['PACKAGECONFIG'] = 'no'
+ vlist['SRC_URI'] = 'yes'
+ vlist['SRCREV'] = 'yes'
+ vlist['EXTRA_OECONF'] = 'no'
+ vlist['EXTRA_OESCONS'] = 'no'
+ vlist['EXTRA_OECMAKE'] = 'no'
+ vlist['EXTRA_OEMESON'] = 'no'
+
+ clist = OrderedDict()
+ clist['variables'] = vlist
+ clist['filepath'] = 'no'
+ clist['sha256sum'] = 'no'
+ clist['layerdir'] = 'no'
+ clist['layer'] = 'no'
+ clist['inherits'] = 'no'
+ clist['source_urls'] = 'no'
+ clist['packageconfig_opts'] = 'no'
+ clist['patches'] = 'no'
+ clist['packagedir'] = 'no'
+ return clist
+
+def dump_config(args):
+ config = default_config()
+ f = open('default_config.json', 'w')
+ json.dump(config, f, indent=2)
+ logger.info('Default config list dumped to default_config.json')
+
+def export_manifest_info(args):
+
+ def handle_value(value):
+ if value:
+ return oe.utils.squashspaces(value)
+ else:
+ return value
+
+ if args.config:
+ logger.debug('config: %s' % args.config)
+ f = open(args.config, 'r')
+ config = json.load(f, object_pairs_hook=OrderedDict)
+ else:
+ config = default_config()
+ if logger.isEnabledFor(logging.DEBUG):
+ print('Configuration:')
+ json.dump(config, sys.stdout, indent=2)
+ print('')
+
+ tmpoutdir = tempfile.mkdtemp(prefix=os.path.basename(__file__)+'-')
+ logger.debug('tmp dir: %s' % tmpoutdir)
+
+ # export manifest
+ shutil.copy2(args.manifest,os.path.join(tmpoutdir, "manifest"))
+
+ with bb.tinfoil.Tinfoil(tracking=True) as tinfoil:
+ tinfoil.logger.setLevel(logger.getEffectiveLevel())
+ tinfoil.prepare(config_only=False)
+
+ pkglist = get_pkg_list(args.manifest)
+ # export pkg list
+ f = open(os.path.join(tmpoutdir, "pkgs"), 'w')
+ for pkg in pkglist:
+ f.write('%s\n' % pkg)
+ f.close()
+
+ recipelist = []
+ for pkg in pkglist:
+ recipe = pkg2recipe(tinfoil,pkg)
+ if recipe:
+ if not recipe in recipelist:
+ recipelist.append(recipe)
+ recipelist.sort()
+ # export recipe list
+ f = open(os.path.join(tmpoutdir, "recipes"), 'w')
+ for recipe in recipelist:
+ f.write('%s\n' % recipe)
+ f.close()
+
+ try:
+ rvalues = OrderedDict()
+ for pn in sorted(recipelist):
+ logger.debug('Package: %s' % pn)
+ rd = tinfoil.parse_recipe(pn)
+
+ rvalues[pn] = OrderedDict()
+
+ for varname in config['variables']:
+ if config['variables'][varname] == 'yes':
+ rvalues[pn][varname] = handle_value(rd.getVar(varname))
+
+ fpth = rd.getVar('FILE')
+ layerdir = oe.recipeutils.find_layerdir(fpth)
+ if config['filepath'] == 'yes':
+ rvalues[pn]['filepath'] = os.path.relpath(fpth, layerdir)
+ if config['sha256sum'] == 'yes':
+ rvalues[pn]['sha256sum'] = bb.utils.sha256_file(fpth)
+
+ if config['layerdir'] == 'yes':
+ rvalues[pn]['layerdir'] = layerdir
+
+ if config['layer'] == 'yes':
+ rvalues[pn]['layer'] = os.path.basename(layerdir)
+
+ if config['inherits'] == 'yes':
+ gr = set(tinfoil.config_data.getVar("__inherit_cache") or [])
+ lr = set(rd.getVar("__inherit_cache") or [])
+ rvalues[pn]['inherits'] = sorted({os.path.splitext(os.path.basename(r))[0] for r in lr if r not in gr})
+
+ if config['source_urls'] == 'yes':
+ rvalues[pn]['source_urls'] = []
+ for url in (rd.getVar('SRC_URI') or '').split():
+ if not url.startswith('file://'):
+ url = url.split(';')[0]
+ rvalues[pn]['source_urls'].append(url)
+
+ if config['packageconfig_opts'] == 'yes':
+ rvalues[pn]['packageconfig_opts'] = OrderedDict()
+ for key in rd.getVarFlags('PACKAGECONFIG').keys():
+ if key == 'doc':
+ continue
+ rvalues[pn]['packageconfig_opts'][key] = rd.getVarFlag('PACKAGECONFIG', key, True)
+
+ if config['patches'] == 'yes':
+ patches = oe.recipeutils.get_recipe_patches(rd)
+ rvalues[pn]['patches'] = []
+ if patches:
+ recipeoutdir = os.path.join(tmpoutdir, pn, 'patches')
+ bb.utils.mkdirhier(recipeoutdir)
+ for patch in patches:
+ # Patches may be in other layers too
+ patchlayerdir = oe.recipeutils.find_layerdir(patch)
+ # patchlayerdir will be None for remote patches, which we ignore
+ # (since currently they are considered as part of sources)
+ if patchlayerdir:
+ rvalues[pn]['patches'].append((os.path.basename(patchlayerdir), os.path.relpath(patch, patchlayerdir)))
+ shutil.copy(patch, recipeoutdir)
+
+ if config['packagedir'] == 'yes':
+ pn_dir = os.path.join(tmpoutdir, pn)
+ bb.utils.mkdirhier(pn_dir)
+ f = open(os.path.join(pn_dir, 'recipe.json'), 'w')
+ json.dump(rvalues[pn], f, indent=2)
+ f.close()
+
+ with open(os.path.join(tmpoutdir, 'recipes.json'), 'w') as f:
+ json.dump(rvalues, f, indent=2)
+
+ if args.output:
+ outname = os.path.basename(args.output)
+ else:
+ outname = os.path.splitext(os.path.basename(args.manifest))[0]
+ if outname.endswith('.tar.gz'):
+ outname = outname[:-7]
+ elif outname.endswith('.tgz'):
+ outname = outname[:-4]
+
+ tarfn = outname
+ if tarfn.endswith(os.sep):
+ tarfn = tarfn[:-1]
+ if not tarfn.endswith(('.tar.gz', '.tgz')):
+ tarfn += '.tar.gz'
+ with open(tarfn, 'wb') as f:
+ with tarfile.open(None, "w:gz", f) as tar:
+ tar.add(tmpoutdir, outname)
+ finally:
+ shutil.rmtree(tmpoutdir)
+
+
+def main():
+ parser = argparse_oe.ArgumentParser(description="Image manifest utility",
+ epilog="Use %(prog)s <subcommand> --help to get help on a specific command")
+ parser.add_argument('-d', '--debug', help='Enable debug output', action='store_true')
+ parser.add_argument('-q', '--quiet', help='Print only errors', action='store_true')
+ subparsers = parser.add_subparsers(dest="subparser_name", title='subcommands', metavar='<subcommand>')
+ subparsers.required = True
+
+ # get recipe info
+ parser_get_recipes = subparsers.add_parser('recipe-info',
+ help='Get recipe info',
+ description='Get recipe information for a package')
+ parser_get_recipes.add_argument('package', help='Package name')
+ parser_get_recipes.set_defaults(func=get_recipe)
+
+ # list runtime dependencies
+ parser_pkg_dep = subparsers.add_parser('list-depends',
+ help='List dependencies',
+ description='List dependencies required to build the package')
+ parser_pkg_dep.add_argument('--native', help='also print native and cross packages', action='store_true')
+ parser_pkg_dep.add_argument('package', help='Package name')
+ parser_pkg_dep.set_defaults(func=pkg_dependencies)
+
+ # list recipes
+ parser_recipes = subparsers.add_parser('list-recipes',
+ help='List recipes producing packages within an image',
+ description='Lists recipes producing the packages that went into an image, using the manifest and pkgdata')
+ parser_recipes.add_argument('manifest', help='Manifest file')
+ parser_recipes.set_defaults(func=list_recipes)
+
+ # list packages
+ parser_packages = subparsers.add_parser('list-packages',
+ help='List packages within an image',
+ description='Lists packages that went into an image, using the manifest')
+ parser_packages.add_argument('manifest', help='Manifest file')
+ parser_packages.set_defaults(func=list_packages)
+
+ # list layers
+ parser_layers = subparsers.add_parser('list-layers',
+ help='List included layers',
+ description='Lists included layers')
+ parser_layers.add_argument('-o', '--output', help='Output file - defaults to stdout if not specified',
+ default=sys.stdout, type=argparse.FileType('w'))
+ parser_layers.set_defaults(func=list_layers)
+
+ # dump default configuration file
+ parser_dconfig = subparsers.add_parser('dump-config',
+ help='Dump default config',
+ description='Dump default config to default_config.json')
+ parser_dconfig.set_defaults(func=dump_config)
+
+ # export recipe info for packages in manifest
+ parser_export = subparsers.add_parser('manifest-info',
+ help='Export recipe info for a manifest',
+ description='Export recipe information using the manifest')
+ parser_export.add_argument('-c', '--config', help='load config from json file')
+ parser_export.add_argument('-o', '--output', help='Output file (tarball) - defaults to manifest name if not specified')
+ parser_export.add_argument('manifest', help='Manifest file')
+ parser_export.set_defaults(func=export_manifest_info)
+
+ args = parser.parse_args()
+
+ if args.debug:
+ logger.setLevel(logging.DEBUG)
+ logger.debug("Debug Enabled")
+ elif args.quiet:
+ logger.setLevel(logging.ERROR)
+
+ ret = args.func(args)
+
+ return ret
+
+
+if __name__ == "__main__":
+ try:
+ ret = main()
+ except Exception:
+ ret = 1
+ import traceback
+ traceback.print_exc()
+ sys.exit(ret)
diff --git a/scripts/contrib/list-packageconfig-flags.py b/scripts/contrib/list-packageconfig-flags.py
index 7ce718624a..bb288e9099 100755
--- a/scripts/contrib/list-packageconfig-flags.py
+++ b/scripts/contrib/list-packageconfig-flags.py
@@ -1,21 +1,10 @@
#!/usr/bin/env python3
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software Foundation.
-#
# Copyright (C) 2013 Wind River Systems, Inc.
# Copyright (C) 2014 Intel Corporation
#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
# - list available recipes which have PACKAGECONFIG flags
# - list available PACKAGECONFIG flags and all affected recipes
# - list all recipes and PACKAGECONFIG information
@@ -44,7 +33,7 @@ import bb.tinfoil
def get_fnlist(bbhandler, pkg_pn, preferred):
''' Get all recipe file names '''
if preferred:
- (latest_versions, preferred_versions) = bb.providers.findProviders(bbhandler.config_data, bbhandler.cooker.recipecaches[''], pkg_pn)
+ (latest_versions, preferred_versions, required_versions) = bb.providers.findProviders(bbhandler.config_data, bbhandler.cooker.recipecaches[''], pkg_pn)
fn_list = []
for pn in sorted(pkg_pn):
@@ -76,7 +65,7 @@ def collect_pkgs(data_dict):
for fn in data_dict:
pkgconfigflags = data_dict[fn].getVarFlags("PACKAGECONFIG")
pkgconfigflags.pop('doc', None)
- pkgname = data_dict[fn].getVar("P")
+ pkgname = data_dict[fn].getVar("PN")
pkg_dict[pkgname] = sorted(pkgconfigflags.keys())
return pkg_dict
diff --git a/scripts/contrib/oe-build-perf-report-email.py b/scripts/contrib/oe-build-perf-report-email.py
index 913847bbed..c900720f6e 100755
--- a/scripts/contrib/oe-build-perf-report-email.py
+++ b/scripts/contrib/oe-build-perf-report-email.py
@@ -4,15 +4,9 @@
#
# Copyright (c) 2017, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
+
import argparse
import base64
import logging
@@ -25,8 +19,6 @@ import socket
import subprocess
import sys
import tempfile
-from email.mime.image import MIMEImage
-from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
@@ -35,19 +27,6 @@ logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
log = logging.getLogger('oe-build-perf-report')
-# Find js scaper script
-SCRAPE_JS = os.path.join(os.path.dirname(__file__), '..', 'lib', 'build_perf',
- 'scrape-html-report.js')
-if not os.path.isfile(SCRAPE_JS):
- log.error("Unableto find oe-build-perf-report-scrape.js")
- sys.exit(1)
-
-
-class ReportError(Exception):
- """Local errors"""
- pass
-
-
def check_utils():
"""Check that all needed utils are installed in the system"""
missing = []
@@ -83,137 +62,19 @@ def parse_args(argv):
"the email parts")
parser.add_argument('--text',
help="Plain text message")
- parser.add_argument('--html',
- help="HTML peport generated by oe-build-perf-report")
- parser.add_argument('--phantomjs-args', action='append',
- help="Extra command line arguments passed to PhantomJS")
args = parser.parse_args(argv)
- if not args.html and not args.text:
- parser.error("Please specify --html and/or --text")
+ if not args.text:
+ parser.error("Please specify --text")
return args
-def decode_png(infile, outfile):
- """Parse/decode/optimize png data from a html element"""
- with open(infile) as f:
- raw_data = f.read()
-
- # Grab raw base64 data
- b64_data = re.sub('^.*href="data:image/png;base64,', '', raw_data, 1)
- b64_data = re.sub('">.+$', '', b64_data, 1)
-
- # Replace file with proper decoded png
- with open(outfile, 'wb') as f:
- f.write(base64.b64decode(b64_data))
-
- subprocess.check_output(['optipng', outfile], stderr=subprocess.STDOUT)
-
-
-def mangle_html_report(infile, outfile, pngs):
- """Mangle html file into a email compatible format"""
- paste = True
- png_dir = os.path.dirname(outfile)
- with open(infile) as f_in:
- with open(outfile, 'w') as f_out:
- for line in f_in.readlines():
- stripped = line.strip()
- # Strip out scripts
- if stripped == '<!--START-OF-SCRIPTS-->':
- paste = False
- elif stripped == '<!--END-OF-SCRIPTS-->':
- paste = True
- elif paste:
- if re.match('^.+href="data:image/png;base64', stripped):
- # Strip out encoded pngs (as they're huge in size)
- continue
- elif 'www.gstatic.com' in stripped:
- # HACK: drop references to external static pages
- continue
-
- # Replace charts with <img> elements
- match = re.match('<div id="(?P<id>\w+)"', stripped)
- if match and match.group('id') in pngs:
- f_out.write('<img src="cid:{}"\n'.format(match.group('id')))
- else:
- f_out.write(line)
-
-
-def scrape_html_report(report, outdir, phantomjs_extra_args=None):
- """Scrape html report into a format sendable by email"""
- tmpdir = tempfile.mkdtemp(dir='.')
- log.debug("Using tmpdir %s for phantomjs output", tmpdir)
-
- if not os.path.isdir(outdir):
- os.mkdir(outdir)
- if os.path.splitext(report)[1] not in ('.html', '.htm'):
- raise ReportError("Invalid file extension for report, needs to be "
- "'.html' or '.htm'")
-
- try:
- log.info("Scraping HTML report with PhangomJS")
- extra_args = phantomjs_extra_args if phantomjs_extra_args else []
- subprocess.check_output(['phantomjs', '--debug=true'] + extra_args +
- [SCRAPE_JS, report, tmpdir],
- stderr=subprocess.STDOUT)
-
- pngs = []
- images = []
- for fname in os.listdir(tmpdir):
- base, ext = os.path.splitext(fname)
- if ext == '.png':
- log.debug("Decoding %s", fname)
- decode_png(os.path.join(tmpdir, fname),
- os.path.join(outdir, fname))
- pngs.append(base)
- images.append(fname)
- elif ext in ('.html', '.htm'):
- report_file = fname
- else:
- log.warning("Unknown file extension: '%s'", ext)
- #shutil.move(os.path.join(tmpdir, fname), outdir)
-
- log.debug("Mangling html report file %s", report_file)
- mangle_html_report(os.path.join(tmpdir, report_file),
- os.path.join(outdir, report_file), pngs)
- return (os.path.join(outdir, report_file),
- [os.path.join(outdir, i) for i in images])
- finally:
- shutil.rmtree(tmpdir)
-
-def send_email(text_fn, html_fn, image_fns, subject, recipients, copy=[],
- blind_copy=[]):
- """Send email"""
+def send_email(text_fn, subject, recipients, copy=[], blind_copy=[]):
# Generate email message
- text_msg = html_msg = None
- if text_fn:
- with open(text_fn) as f:
- text_msg = MIMEText("Yocto build performance test report.\n" +
- f.read(), 'plain')
- if html_fn:
- html_msg = msg = MIMEMultipart('related')
- with open(html_fn) as f:
- html_msg.attach(MIMEText(f.read(), 'html'))
- for img_fn in image_fns:
- # Expect that content id is same as the filename
- cid = os.path.splitext(os.path.basename(img_fn))[0]
- with open(img_fn, 'rb') as f:
- image_msg = MIMEImage(f.read())
- image_msg['Content-ID'] = '<{}>'.format(cid)
- html_msg.attach(image_msg)
-
- if text_msg and html_msg:
- msg = MIMEMultipart('alternative')
- msg.attach(text_msg)
- msg.attach(html_msg)
- elif text_msg:
- msg = text_msg
- elif html_msg:
- msg = html_msg
- else:
- raise ReportError("Neither plain text nor html body specified")
+ with open(text_fn) as f:
+ msg = MIMEText("Yocto build performance test report.\n" + f.read(), 'plain')
pw_data = pwd.getpwuid(os.getuid())
full_name = pw_data.pw_gecos.split(',')[0]
@@ -251,25 +112,16 @@ def main(argv=None):
try:
log.debug("Storing email parts in %s", outdir)
- html_report = images = None
- if args.html:
- html_report, images = scrape_html_report(args.html, outdir,
- args.phantomjs_args)
-
if args.to:
log.info("Sending email to %s", ', '.join(args.to))
if args.cc:
log.info("Copying to %s", ', '.join(args.cc))
if args.bcc:
log.info("Blind copying to %s", ', '.join(args.bcc))
- send_email(args.text, html_report, images, args.subject,
- args.to, args.cc, args.bcc)
+ send_email(args.text, args.subject, args.to, args.cc, args.bcc)
except subprocess.CalledProcessError as err:
log.error("%s, with output:\n%s", str(err), err.output.decode())
return 1
- except ReportError as err:
- log.error(err)
- return 1
finally:
if not args.outdir:
log.debug("Wiping %s", outdir)
diff --git a/scripts/contrib/patchreview.py b/scripts/contrib/patchreview.py
index 072166504d..62c509f51c 100755
--- a/scripts/contrib/patchreview.py
+++ b/scripts/contrib/patchreview.py
@@ -1,4 +1,7 @@
#! /usr/bin/env python3
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# TODO
# - option to just list all broken files
diff --git a/scripts/contrib/patchtest.sh b/scripts/contrib/patchtest.sh
index 7fe566666e..b1e1ea334b 100755
--- a/scripts/contrib/patchtest.sh
+++ b/scripts/contrib/patchtest.sh
@@ -1,26 +1,12 @@
#!/bin/bash
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# patchtest: Run patchtest on commits starting at master
#
# Copyright (c) 2017, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# SPDX-License-Identifier: GPL-2.0-or-later
#
+
set -o errexit
# Default values
diff --git a/scripts/contrib/serdevtry b/scripts/contrib/serdevtry
index 74bd7b7161..9144730e7e 100755
--- a/scripts/contrib/serdevtry
+++ b/scripts/contrib/serdevtry
@@ -2,7 +2,8 @@
# Copyright (C) 2014 Intel Corporation
#
-# Released under the MIT license (see COPYING.MIT)
+# SPDX-License-Identifier: MIT
+#
if [ "$1" = "" -o "$1" = "--help" ] ; then
echo "Usage: $0 <serial terminal command>"
diff --git a/scripts/contrib/test_build_time.sh b/scripts/contrib/test_build_time.sh
index 9e5725ae54..4012ac7ba7 100755
--- a/scripts/contrib/test_build_time.sh
+++ b/scripts/contrib/test_build_time.sh
@@ -3,22 +3,8 @@
# Build performance regression test script
#
# Copyright 2011 Intel Corporation
-# All rights reserved.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
+# SPDX-License-Identifier: GPL-2.0-or-later
#
# DESCRIPTION
# This script is intended to be used in conjunction with "git bisect run"
@@ -111,7 +97,7 @@ if [ $? != 0 ] ; then
exit 251
fi
-if [ "$BB_ENV_EXTRAWHITE" != "" ] ; then
+if [ "BB_ENV_PASSTHROUGH_ADDITIONS" != "" ] ; then
echo "WARNING: you are running after sourcing the build environment script, this is not recommended"
fi
diff --git a/scripts/contrib/test_build_time_worker.sh b/scripts/contrib/test_build_time_worker.sh
index 8e20a9ea7d..478e8b0d03 100755
--- a/scripts/contrib/test_build_time_worker.sh
+++ b/scripts/contrib/test_build_time_worker.sh
@@ -1,5 +1,7 @@
#!/bin/bash
-
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# This is an example script to be used in conjunction with test_build_time.sh
if [ "$TEST_BUILDDIR" = "" ] ; then
diff --git a/scripts/contrib/uncovered b/scripts/contrib/uncovered
index a8399ad170..f16128cb7a 100755
--- a/scripts/contrib/uncovered
+++ b/scripts/contrib/uncovered
@@ -1,23 +1,10 @@
#!/bin/bash -eur
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Find python modules uncovered by oe-seltest
#
# Copyright (c) 2016, Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# Author: Ed Bartosh <ed.bartosh@linux.intel.com>
#
diff --git a/scripts/contrib/verify-homepage.py b/scripts/contrib/verify-homepage.py
index cc6e797d8b..7bffa78e23 100755
--- a/scripts/contrib/verify-homepage.py
+++ b/scripts/contrib/verify-homepage.py
@@ -1,5 +1,7 @@
#!/usr/bin/env python3
-
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# This script can be used to verify HOMEPAGE values for all recipes in
# the current configuration.
# The result is influenced by network environment, since the timeout of connect url is 5 seconds as default.
diff --git a/scripts/cp-noerror b/scripts/cp-noerror
index 35eb211be3..ab617c5d35 100755
--- a/scripts/cp-noerror
+++ b/scripts/cp-noerror
@@ -1,5 +1,7 @@
#!/usr/bin/env python3
#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Allow copying of $1 to $2 but if files in $1 disappear during the copy operation,
# don't error.
# Also don't error if $1 disappears.
diff --git a/scripts/create-pull-request b/scripts/create-pull-request
index 280880b3f7..8eefcf63a5 100755
--- a/scripts/create-pull-request
+++ b/scripts/create-pull-request
@@ -1,21 +1,8 @@
#!/bin/sh
#
# Copyright (c) 2010-2013, Intel Corporation.
-# All Rights Reserved
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
-# the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+# SPDX-License-Identifier: GPL-2.0-or-later
#
#
@@ -136,20 +123,12 @@ fi
# Rewrite private URLs to public URLs
# Determine the repository name for use in the WEB_URL later
-case "$REMOTE_URL" in
-*@*)
- USER_RE="[A-Za-z0-9_.@][A-Za-z0-9_.@-]*\$\?"
- PROTO_RE="[a-z][a-z+]*://"
- GIT_RE="\(^\($PROTO_RE\)\?$USER_RE@\)\([^:/]*\)[:/]\(.*\)"
- REMOTE_URL=${REMOTE_URL%.git}
- REMOTE_REPO=$(echo $REMOTE_URL | sed "s#$GIT_RE#\4#")
- REMOTE_URL=$(echo $REMOTE_URL | sed "s#$GIT_RE#git://\3/\4#")
- ;;
-*)
- echo "WARNING: Unrecognized remote URL: $REMOTE_URL"
- echo " The pull and browse URLs will likely be incorrect"
- ;;
-esac
+USER_RE="[A-Za-z0-9_.@][A-Za-z0-9_.@-]*\$\?"
+PROTO_RE="[a-z][a-z+]*://"
+GIT_RE="\(^\($PROTO_RE\)\?\)\($USER_RE@\)\?\([^:/]*\)[:/]\(.*\)"
+REMOTE_URL=${REMOTE_URL%.git}
+REMOTE_REPO=$(echo $REMOTE_URL | sed "s#$GIT_RE#\5#")
+REMOTE_URL=$(echo $REMOTE_URL | sed "s#$GIT_RE#git://\4/\5#")
if [ -z "$BRANCH" ]; then
BRANCH=$(git branch | grep -e "^\* " | cut -d' ' -f2)
@@ -278,7 +257,7 @@ fi
# Replace the SUBJECT token with it.
if [ -n "$SUBJECT" ]; then
- sed -i -e "s/\*\*\* SUBJECT HERE \*\*\*/$SUBJECT/" "$CL"
+ sed -i -e "s\`\*\*\* SUBJECT HERE \*\*\*\`$SUBJECT\`" "$CL"
fi
diff --git a/scripts/cross-intercept/ar b/scripts/cross-intercept/ar
new file mode 120000
index 0000000000..bc68ffd7a2
--- /dev/null
+++ b/scripts/cross-intercept/ar
@@ -0,0 +1 @@
+../native-intercept/ar \ No newline at end of file
diff --git a/scripts/crosstap b/scripts/crosstap
index e33fa4ad46..5aa72f14d4 100755
--- a/scripts/crosstap
+++ b/scripts/crosstap
@@ -17,20 +17,9 @@
# related to kernel.
#
# Copyright (c) 2018, Cisco Systems.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys
import re
@@ -364,7 +353,7 @@ bitbake workspace.
Anything after -- option is passed directly to stap.
-Legacy script invocation style supported but depreciated:
+Legacy script invocation style supported but deprecated:
%prog <user@hostname> <sytemtap-script> [systemtap options]
To enable most out of systemtap the following site.conf or local.conf
@@ -376,13 +365,13 @@ IMAGE_FSTYPES_DEBUGFS = "tar.bz2"
USER_CLASSES += "image-combined-dbg"
# enables kernel debug symbols
-KERNEL_EXTRA_FEATURES_append = " features/debug/debug-kernel.scc"
+KERNEL_EXTRA_FEATURES:append = " features/debug/debug-kernel.scc"
# minimal, just run-time systemtap configuration in target image
-PACKAGECONFIG_pn-systemtap = "monitor"
+PACKAGECONFIG:pn-systemtap = "monitor"
# add systemtap run-time into target image if it is not there yet
-IMAGE_INSTALL_append = " systemtap"
+IMAGE_INSTALL:append = " systemtap"
"""
option_parser = optparse.OptionParser(usage=usage)
diff --git a/scripts/devtool b/scripts/devtool
index 0e578c0de3..af4811b922 100755
--- a/scripts/devtool
+++ b/scripts/devtool
@@ -4,18 +4,8 @@
#
# Copyright (C) 2014-2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os
@@ -110,7 +100,7 @@ def read_workspace():
_enable_workspace_layer(config.workspace_path, config, basepath)
logger.debug('Reading workspace in %s' % config.workspace_path)
- externalsrc_re = re.compile(r'^EXTERNALSRC(_pn-([^ =]+))? *= *"([^"]*)"$')
+ externalsrc_re = re.compile(r'^EXTERNALSRC(:pn-([^ =]+))? *= *"([^"]*)"$')
for fn in glob.glob(os.path.join(config.workspace_path, 'appends', '*.bbappend')):
with open(fn, 'r') as f:
pnvalues = {}
diff --git a/scripts/gen-lockedsig-cache b/scripts/gen-lockedsig-cache
index 6765891d19..cc674f9c1b 100755
--- a/scripts/gen-lockedsig-cache
+++ b/scripts/gen-lockedsig-cache
@@ -1,10 +1,13 @@
#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
import os
import sys
-import glob
import shutil
import errno
+import time
def mkdir(d):
try:
@@ -13,6 +16,38 @@ def mkdir(d):
if e.errno != errno.EEXIST:
raise e
+# extract the hash from past the last colon to last underscore
+def extract_sha(filename):
+ return filename.split(':')[7].split('_')[0]
+
+# get all files in a directory, extract hash and make
+# a map from hash to list of file with that hash
+def map_sha_to_files(dir_, prefix, sha_map):
+ sstate_prefix_path = dir_ + '/' + prefix + '/'
+ if not os.path.exists(sstate_prefix_path):
+ return
+ sstate_files = os.listdir(sstate_prefix_path)
+ for f in sstate_files:
+ try:
+ sha = extract_sha(f)
+ if sha not in sha_map:
+ sha_map[sha] = []
+ sha_map[sha].append(sstate_prefix_path + f)
+ except IndexError:
+ continue
+
+# given a prefix build a map of hash to list of files
+def build_sha_cache(prefix):
+ sha_map = {}
+
+ sstate_dir = sys.argv[2]
+ map_sha_to_files(sstate_dir, prefix, sha_map)
+
+ native_sstate_dir = sys.argv[2] + '/' + sys.argv[4]
+ map_sha_to_files(native_sstate_dir, prefix, sha_map)
+
+ return sha_map
+
if len(sys.argv) < 5:
print("Incorrect number of arguments specified")
print("syntax: gen-lockedsig-cache <locked-sigs.inc> <input-cachedir> <output-cachedir> <nativelsbstring> [filterfile]")
@@ -38,18 +73,28 @@ with open(sys.argv[1]) as f:
sigs.append(sig)
print('Gathering file list')
+start_time = time.perf_counter()
files = set()
+sstate_content_cache = {}
for s in sigs:
- p = sys.argv[2] + "/" + s[:2] + "/*" + s + "*"
- files |= set(glob.glob(p))
- p = sys.argv[2] + "/%s/" % sys.argv[4] + s[:2] + "/*" + s + "*"
- files |= set(glob.glob(p))
+ prefix = s[:2]
+ prefix2 = s[2:4]
+ if prefix not in sstate_content_cache:
+ sstate_content_cache[prefix] = {}
+ if prefix2 not in sstate_content_cache[prefix]:
+ sstate_content_cache[prefix][prefix2] = build_sha_cache(prefix + "/" + prefix2)
+
+ if s in sstate_content_cache[prefix][prefix2]:
+ for f in sstate_content_cache[prefix][prefix2][s]:
+ files.add(f)
+
+elapsed = time.perf_counter() - start_time
+print("Gathering file list took %.1fs" % elapsed)
print('Processing files')
for f in files:
sys.stdout.write('Processing %s... ' % f)
- _, ext = os.path.splitext(f)
- if not ext in ['.tgz', '.siginfo', '.sig']:
+ if not f.endswith(('.tar.zst', '.siginfo', '.sig')):
# Most likely a temp file, skip it
print('skipping')
continue
diff --git a/scripts/gen-site-config b/scripts/gen-site-config
index 7da7a0bd8a..727b809c0f 100755
--- a/scripts/gen-site-config
+++ b/scripts/gen-site-config
@@ -1,18 +1,8 @@
#! /bin/sh
# Copyright (c) 2005-2008 Wind River Systems, Inc.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
cat << EOF
AC_PREREQ(2.57)
diff --git a/scripts/install-buildtools b/scripts/install-buildtools
new file mode 100755
index 0000000000..10c3d043de
--- /dev/null
+++ b/scripts/install-buildtools
@@ -0,0 +1,345 @@
+#!/usr/bin/env python3
+
+# Buildtools and buildtools extended installer helper script
+#
+# Copyright (C) 2017-2020 Intel Corporation
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# NOTE: --with-extended-buildtools is on by default
+#
+# Example usage (extended buildtools from milestone):
+# (1) using --url and --filename
+# $ install-buildtools \
+# --url http://downloads.yoctoproject.org/releases/yocto/milestones/yocto-3.1_M3/buildtools \
+# --filename x86_64-buildtools-extended-nativesdk-standalone-3.0+snapshot-20200315.sh
+# (2) using --base-url, --release, --installer-version and --build-date
+# $ install-buildtools \
+# --base-url http://downloads.yoctoproject.org/releases/yocto \
+# --release yocto-3.1_M3 \
+# --installer-version 3.0+snapshot
+# --build-date 202000315
+#
+# Example usage (standard buildtools from release):
+# (3) using --url and --filename
+# $ install-buildtools --without-extended-buildtools \
+# --url http://downloads.yoctoproject.org/releases/yocto/yocto-3.0.2/buildtools \
+# --filename x86_64-buildtools-nativesdk-standalone-3.0.2.sh
+# (4) using --base-url, --release and --installer-version
+# $ install-buildtools --without-extended-buildtools \
+# --base-url http://downloads.yoctoproject.org/releases/yocto \
+# --release yocto-3.0.2 \
+# --installer-version 3.0.2
+#
+
+import argparse
+import logging
+import os
+import platform
+import re
+import shutil
+import shlex
+import stat
+import subprocess
+import sys
+import tempfile
+from urllib.parse import quote
+
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+lib_path = scripts_path + '/lib'
+sys.path = sys.path + [lib_path]
+import scriptutils
+import scriptpath
+
+
+PROGNAME = 'install-buildtools'
+logger = scriptutils.logger_create(PROGNAME, stream=sys.stdout)
+
+DEFAULT_INSTALL_DIR = os.path.join(os.path.split(scripts_path)[0],'buildtools')
+DEFAULT_BASE_URL = 'http://downloads.yoctoproject.org/releases/yocto'
+DEFAULT_RELEASE = 'yocto-3.4'
+DEFAULT_INSTALLER_VERSION = '3.4'
+DEFAULT_BUILDDATE = '202110XX'
+
+# Python version sanity check
+if not (sys.version_info.major == 3 and sys.version_info.minor >= 4):
+ logger.error("This script requires Python 3.4 or greater")
+ logger.error("You have Python %s.%s" %
+ (sys.version_info.major, sys.version_info.minor))
+ sys.exit(1)
+
+# The following three functions are copied directly from
+# bitbake/lib/bb/utils.py, in order to allow this script
+# to run on versions of python earlier than what bitbake
+# supports (e.g. less than Python 3.5 for YP 3.1 release)
+
+def _hasher(method, filename):
+ import mmap
+
+ with open(filename, "rb") as f:
+ try:
+ with mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as mm:
+ for chunk in iter(lambda: mm.read(8192), b''):
+ method.update(chunk)
+ except ValueError:
+ # You can't mmap() an empty file so silence this exception
+ pass
+ return method.hexdigest()
+
+
+def md5_file(filename):
+ """
+ Return the hex string representation of the MD5 checksum of filename.
+ """
+ import hashlib
+ return _hasher(hashlib.md5(), filename)
+
+def sha256_file(filename):
+ """
+ Return the hex string representation of the 256-bit SHA checksum of
+ filename.
+ """
+ import hashlib
+ return _hasher(hashlib.sha256(), filename)
+
+
+def main():
+ global DEFAULT_INSTALL_DIR
+ global DEFAULT_BASE_URL
+ global DEFAULT_RELEASE
+ global DEFAULT_INSTALLER_VERSION
+ global DEFAULT_BUILDDATE
+ filename = ""
+ release = ""
+ buildtools_url = ""
+ install_dir = ""
+ arch = platform.machine()
+
+ parser = argparse.ArgumentParser(
+ description="Buildtools installation helper",
+ add_help=False)
+ parser.add_argument('-u', '--url',
+ help='URL from where to fetch buildtools SDK installer, not '
+ 'including filename (optional)\n'
+ 'Requires --filename.',
+ action='store')
+ parser.add_argument('-f', '--filename',
+ help='filename for the buildtools SDK installer to be installed '
+ '(optional)\nRequires --url',
+ action='store')
+ parser.add_argument('-d', '--directory',
+ default=DEFAULT_INSTALL_DIR,
+ help='directory where buildtools SDK will be installed (optional)',
+ action='store')
+ parser.add_argument('-r', '--release',
+ default=DEFAULT_RELEASE,
+ help='Yocto Project release string for SDK which will be '
+ 'installed (optional)',
+ action='store')
+ parser.add_argument('-V', '--installer-version',
+ default=DEFAULT_INSTALLER_VERSION,
+ help='version string for the SDK to be installed (optional)',
+ action='store')
+ parser.add_argument('-b', '--base-url',
+ default=DEFAULT_BASE_URL,
+ help='base URL from which to fetch SDK (optional)', action='store')
+ parser.add_argument('-t', '--build-date',
+ default=DEFAULT_BUILDDATE,
+ help='Build date of pre-release SDK (optional)', action='store')
+ group = parser.add_mutually_exclusive_group()
+ group.add_argument('--with-extended-buildtools', action='store_true',
+ dest='with_extended_buildtools',
+ default=True,
+ help='enable extended buildtools tarball (on by default)')
+ group.add_argument('--without-extended-buildtools', action='store_false',
+ dest='with_extended_buildtools',
+ help='disable extended buildtools (traditional buildtools tarball)')
+ group = parser.add_mutually_exclusive_group()
+ group.add_argument('-c', '--check', help='enable checksum validation',
+ default=True, action='store_true')
+ group.add_argument('-n', '--no-check', help='disable checksum validation',
+ dest="check", action='store_false')
+ parser.add_argument('-D', '--debug', help='enable debug output',
+ action='store_true')
+ parser.add_argument('-q', '--quiet', help='print only errors',
+ action='store_true')
+
+ parser.add_argument('-h', '--help', action='help',
+ default=argparse.SUPPRESS,
+ help='show this help message and exit')
+
+ args = parser.parse_args()
+
+ if args.debug:
+ logger.setLevel(logging.DEBUG)
+ elif args.quiet:
+ logger.setLevel(logging.ERROR)
+
+ if args.url and args.filename:
+ logger.debug("--url and --filename detected. Ignoring --base-url "
+ "--release --installer-version arguments.")
+ filename = args.filename
+ buildtools_url = "%s/%s" % (args.url, filename)
+ else:
+ if args.base_url:
+ base_url = args.base_url
+ else:
+ base_url = DEFAULT_BASE_URL
+ if args.release:
+ # check if this is a pre-release "milestone" SDK
+ m = re.search(r"^(?P<distro>[a-zA-Z\-]+)(?P<version>[0-9.]+)(?P<milestone>_M[1-9])$",
+ args.release)
+ logger.debug("milestone regex: %s" % m)
+ if m and m.group('milestone'):
+ logger.debug("release[distro]: %s" % m.group('distro'))
+ logger.debug("release[version]: %s" % m.group('version'))
+ logger.debug("release[milestone]: %s" % m.group('milestone'))
+ if not args.build_date:
+ logger.error("Milestone installers require --build-date")
+ else:
+ if args.with_extended_buildtools:
+ filename = "%s-buildtools-extended-nativesdk-standalone-%s-%s.sh" % (
+ arch, args.installer_version, args.build_date)
+ else:
+ filename = "%s-buildtools-nativesdk-standalone-%s-%s.sh" % (
+ arch, args.installer_version, args.build_date)
+ safe_filename = quote(filename)
+ buildtools_url = "%s/milestones/%s/buildtools/%s" % (base_url, args.release, safe_filename)
+ # regular release SDK
+ else:
+ if args.with_extended_buildtools:
+ filename = "%s-buildtools-extended-nativesdk-standalone-%s.sh" % (arch, args.installer_version)
+ else:
+ filename = "%s-buildtools-nativesdk-standalone-%s.sh" % (arch, args.installer_version)
+ safe_filename = quote(filename)
+ buildtools_url = "%s/%s/buildtools/%s" % (base_url, args.release, safe_filename)
+
+ tmpsdk_dir = tempfile.mkdtemp()
+ try:
+ # Fetch installer
+ logger.info("Fetching buildtools installer")
+ tmpbuildtools = os.path.join(tmpsdk_dir, filename)
+ ret = subprocess.call("wget -q -O %s %s" %
+ (tmpbuildtools, buildtools_url), shell=True)
+ if ret != 0:
+ logger.error("Could not download file from %s" % buildtools_url)
+ return ret
+
+ # Verify checksum
+ if args.check:
+ logger.info("Fetching buildtools installer checksum")
+ checksum_type = ""
+ for checksum_type in ["md5sum", "sha256sum"]:
+ check_url = "{}.{}".format(buildtools_url, checksum_type)
+ checksum_filename = "{}.{}".format(filename, checksum_type)
+ tmpbuildtools_checksum = os.path.join(tmpsdk_dir, checksum_filename)
+ ret = subprocess.call("wget -q -O %s %s" %
+ (tmpbuildtools_checksum, check_url), shell=True)
+ if ret == 0:
+ break
+ else:
+ if ret != 0:
+ logger.error("Could not download file from %s" % check_url)
+ return ret
+ regex = re.compile(r"^(?P<checksum>[0-9a-f]+)\s+(?P<path>.*/)?(?P<filename>.*)$")
+ with open(tmpbuildtools_checksum, 'rb') as f:
+ original = f.read()
+ m = re.search(regex, original.decode("utf-8"))
+ logger.debug("checksum regex match: %s" % m)
+ logger.debug("checksum: %s" % m.group('checksum'))
+ logger.debug("path: %s" % m.group('path'))
+ logger.debug("filename: %s" % m.group('filename'))
+ if filename != m.group('filename'):
+ logger.error("Filename does not match name in checksum")
+ return 1
+ checksum = m.group('checksum')
+ if checksum_type == "md5sum":
+ checksum_value = md5_file(tmpbuildtools)
+ else:
+ checksum_value = sha256_file(tmpbuildtools)
+ if checksum == checksum_value:
+ logger.info("Checksum success")
+ else:
+ logger.error("Checksum %s expected. Actual checksum is %s." %
+ (checksum, checksum_value))
+ return 1
+
+ # Make installer executable
+ logger.info("Making installer executable")
+ st = os.stat(tmpbuildtools)
+ os.chmod(tmpbuildtools, st.st_mode | stat.S_IEXEC)
+ logger.debug(os.stat(tmpbuildtools))
+ if args.directory:
+ install_dir = args.directory
+ ret = subprocess.call("%s -d %s -y" %
+ (tmpbuildtools, install_dir), shell=True)
+ else:
+ install_dir = "/opt/poky/%s" % args.installer_version
+ ret = subprocess.call("%s -y" % tmpbuildtools, shell=True)
+ if ret != 0:
+ logger.error("Could not run buildtools installer")
+ return ret
+
+ # Setup the environment
+ logger.info("Setting up the environment")
+ regex = re.compile(r'^(?P<export>export )?(?P<env_var>[A-Z_]+)=(?P<env_val>.+)$')
+ with open("%s/environment-setup-%s-pokysdk-linux" %
+ (install_dir, arch), 'rb') as f:
+ for line in f:
+ match = regex.search(line.decode('utf-8'))
+ logger.debug("export regex: %s" % match)
+ if match:
+ env_var = match.group('env_var')
+ logger.debug("env_var: %s" % env_var)
+ env_val = match.group('env_val')
+ logger.debug("env_val: %s" % env_val)
+ os.environ[env_var] = env_val
+
+ # Test installation
+ logger.info("Testing installation")
+ tool = ""
+ m = re.search("extended", tmpbuildtools)
+ logger.debug("extended regex: %s" % m)
+ if args.with_extended_buildtools and not m:
+ logger.info("Ignoring --with-extended-buildtools as filename "
+ "does not contain 'extended'")
+ if args.with_extended_buildtools and m:
+ tool = 'gcc'
+ else:
+ tool = 'tar'
+ logger.debug("install_dir: %s" % install_dir)
+ cmd = shlex.split("/usr/bin/which %s" % tool)
+ logger.debug("cmd: %s" % cmd)
+ logger.debug("tool: %s" % tool)
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+ output, errors = proc.communicate()
+ logger.debug("proc.args: %s" % proc.args)
+ logger.debug("proc.communicate(): output %s" % output)
+ logger.debug("proc.communicate(): errors %s" % errors)
+ which_tool = output.decode('utf-8')
+ logger.debug("which %s: %s" % (tool, which_tool))
+ ret = proc.returncode
+ if not which_tool.startswith(install_dir):
+ logger.error("Something went wrong: %s not found in %s" %
+ (tool, install_dir))
+ if ret != 0:
+ logger.error("Something went wrong: installation failed")
+ else:
+ logger.info("Installation successful. Remember to source the "
+ "environment setup script now and in any new session.")
+ return ret
+
+ finally:
+ # cleanup tmp directory
+ shutil.rmtree(tmpsdk_dir)
+
+
+if __name__ == '__main__':
+ try:
+ ret = main()
+ except Exception:
+ ret = 1
+ import traceback
+
+ traceback.print_exc()
+ sys.exit(ret)
diff --git a/scripts/lib/argparse_oe.py b/scripts/lib/argparse_oe.py
index 9bdfc1ceca..94a4ac5011 100644
--- a/scripts/lib/argparse_oe.py
+++ b/scripts/lib/argparse_oe.py
@@ -1,3 +1,7 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import sys
import argparse
from collections import defaultdict, OrderedDict
diff --git a/scripts/lib/build_perf/__init__.py b/scripts/lib/build_perf/__init__.py
index 1f8b729078..dcbb78042d 100644
--- a/scripts/lib/build_perf/__init__.py
+++ b/scripts/lib/build_perf/__init__.py
@@ -1,14 +1,7 @@
#
# Copyright (c) 2017, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
"""Build performance test library functions"""
diff --git a/scripts/lib/build_perf/html.py b/scripts/lib/build_perf/html.py
index 578bb162ee..d1273c9c50 100644
--- a/scripts/lib/build_perf/html.py
+++ b/scripts/lib/build_perf/html.py
@@ -1,14 +1,7 @@
#
# Copyright (c) 2017, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
"""Helper module for HTML reporting"""
from jinja2 import Environment, PackageLoader
diff --git a/scripts/lib/build_perf/report.py b/scripts/lib/build_perf/report.py
index d99a36797f..ab77424cc7 100644
--- a/scripts/lib/build_perf/report.py
+++ b/scripts/lib/build_perf/report.py
@@ -1,17 +1,11 @@
#
# Copyright (c) 2017, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
"""Handling of build perf test reports"""
-from collections import OrderedDict, Mapping, namedtuple
+from collections import OrderedDict, namedtuple
+from collections.abc import Mapping
from datetime import datetime, timezone
from numbers import Number
from statistics import mean, stdev, variance
diff --git a/scripts/lib/buildstats.py b/scripts/lib/buildstats.py
index d9aadf3cb8..c69b5bf4d7 100644
--- a/scripts/lib/buildstats.py
+++ b/scripts/lib/buildstats.py
@@ -1,14 +1,7 @@
#
# Copyright (c) 2017, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
"""Functionality for analyzing buildstats"""
import json
@@ -263,18 +256,22 @@ class BuildStats(dict):
"""Aggregate other buildstats into this"""
if set(self.keys()) != set(buildstats.keys()):
raise ValueError("Refusing to aggregate buildstats, set of "
- "recipes is different")
+ "recipes is different: %s" % (set(self.keys()) ^ set(buildstats.keys())))
for pkg, data in buildstats.items():
self[pkg].aggregate(data)
-def diff_buildstats(bs1, bs2, stat_attr, min_val=None, min_absdiff=None):
+def diff_buildstats(bs1, bs2, stat_attr, min_val=None, min_absdiff=None, only_tasks=[]):
"""Compare the tasks of two buildstats"""
tasks_diff = []
pkgs = set(bs1.keys()).union(set(bs2.keys()))
for pkg in pkgs:
tasks1 = bs1[pkg].tasks if pkg in bs1 else {}
tasks2 = bs2[pkg].tasks if pkg in bs2 else {}
+ if only_tasks:
+ tasks1 = {k: v for k, v in tasks1.items() if k in only_tasks}
+ tasks2 = {k: v for k, v in tasks2.items() if k in only_tasks}
+
if not tasks1:
pkg_op = '+'
elif not tasks2:
diff --git a/scripts/lib/checklayer/__init__.py b/scripts/lib/checklayer/__init__.py
index ca7863a19e..aa946f3036 100644
--- a/scripts/lib/checklayer/__init__.py
+++ b/scripts/lib/checklayer/__init__.py
@@ -1,7 +1,9 @@
# Yocto Project layer check tool
#
# Copyright (C) 2017 Intel Corporation
-# Released under the MIT license (see COPYING.MIT)
+#
+# SPDX-License-Identifier: MIT
+#
import os
import re
@@ -57,9 +59,14 @@ def _get_layer_collections(layer_path, lconf=None, data=None):
pattern = ldata.getVar('BBFILE_PATTERN_%s' % name)
depends = ldata.getVar('LAYERDEPENDS_%s' % name)
compat = ldata.getVar('LAYERSERIES_COMPAT_%s' % name)
+ try:
+ depDict = bb.utils.explode_dep_versions2(depends or "")
+ except bb.utils.VersionStringException as vse:
+ bb.fatal('Error parsing LAYERDEPENDS_%s: %s' % (name, str(vse)))
+
collections[name]['priority'] = priority
collections[name]['pattern'] = pattern
- collections[name]['depends'] = depends
+ collections[name]['depends'] = ' '.join(depDict.keys())
collections[name]['compat'] = compat
return collections
@@ -139,14 +146,38 @@ def detect_layers(layer_directories, no_auto):
return layers
-def _find_layer_depends(depend, layers):
+def _find_layer(depend, layers):
for layer in layers:
+ if 'collections' not in layer:
+ continue
+
for collection in layer['collections']:
if depend == collection:
return layer
return None
-def add_layer_dependencies(bblayersconf, layer, layers, logger):
+def sanity_check_layers(layers, logger):
+ """
+ Check that we didn't find duplicate collection names, as the layer that will
+ be used is non-deterministic. The precise check is duplicate collections
+ with different patterns, as the same pattern being repeated won't cause
+ problems.
+ """
+ import collections
+
+ passed = True
+ seen = collections.defaultdict(set)
+ for layer in layers:
+ for name, data in layer.get("collections", {}).items():
+ seen[name].add(data["pattern"])
+
+ for name, patterns in seen.items():
+ if len(patterns) > 1:
+ passed = False
+ logger.error("Collection %s found multiple times: %s" % (name, ", ".join(patterns)))
+ return passed
+
+def get_layer_dependencies(layer, layers, logger):
def recurse_dependencies(depends, layer, layers, logger, ret = []):
logger.debug('Processing dependencies %s for layer %s.' % \
(depends, layer['name']))
@@ -156,7 +187,7 @@ def add_layer_dependencies(bblayersconf, layer, layers, logger):
if depend == 'core':
continue
- layer_depend = _find_layer_depends(depend, layers)
+ layer_depend = _find_layer(depend, layers)
if not layer_depend:
logger.error('Layer %s depends on %s and isn\'t found.' % \
(layer['name'], depend))
@@ -193,33 +224,50 @@ def add_layer_dependencies(bblayersconf, layer, layers, logger):
layer_depends = recurse_dependencies(depends, layer, layers, logger, layer_depends)
# Note: [] (empty) is allowed, None is not!
+ return layer_depends
+
+def add_layer_dependencies(bblayersconf, layer, layers, logger):
+
+ layer_depends = get_layer_dependencies(layer, layers, logger)
if layer_depends is None:
return False
else:
- # Don't add a layer that is already present.
- added = set()
- output = check_command('Getting existing layers failed.', 'bitbake-layers show-layers').decode('utf-8')
- for layer, path, pri in re.findall(r'^(\S+) +([^\n]*?) +(\d+)$', output, re.MULTILINE):
- added.add(path)
-
- for layer_depend in layer_depends:
- name = layer_depend['name']
- path = layer_depend['path']
+ add_layers(bblayersconf, layer_depends, logger)
+
+ return True
+
+def add_layers(bblayersconf, layers, logger):
+ # Don't add a layer that is already present.
+ added = set()
+ output = check_command('Getting existing layers failed.', 'bitbake-layers show-layers').decode('utf-8')
+ for layer, path, pri in re.findall(r'^(\S+) +([^\n]*?) +(\d+)$', output, re.MULTILINE):
+ added.add(path)
+
+ with open(bblayersconf, 'a+') as f:
+ for layer in layers:
+ logger.info('Adding layer %s' % layer['name'])
+ name = layer['name']
+ path = layer['path']
if path in added:
- continue
+ logger.info('%s is already in %s' % (name, bblayersconf))
else:
added.add(path)
- logger.info('Adding layer dependency %s' % name)
- with open(bblayersconf, 'a+') as f:
f.write("\nBBLAYERS += \"%s\"\n" % path)
return True
-def add_layer(bblayersconf, layer, layers, logger):
- logger.info('Adding layer %s' % layer['name'])
- with open(bblayersconf, 'a+') as f:
- f.write("\nBBLAYERS += \"%s\"\n" % layer['path'])
+def check_bblayers(bblayersconf, layer_path, logger):
+ '''
+ If layer_path found in BBLAYERS return True
+ '''
+ import bb.parse
+ import bb.data
- return True
+ ldata = bb.parse.handle(bblayersconf, bb.data.init(), include=True)
+ for bblayer in (ldata.getVar('BBLAYERS') or '').split():
+ if os.path.normpath(bblayer) == os.path.normpath(layer_path):
+ return True
+
+ return False
def check_command(error_msg, cmd, cwd=None):
'''
@@ -234,7 +282,7 @@ def check_command(error_msg, cmd, cwd=None):
raise RuntimeError(msg)
return output
-def get_signatures(builddir, failsafe=False, machine=None):
+def get_signatures(builddir, failsafe=False, machine=None, extravars=None):
import re
# some recipes needs to be excluded like meta-world-pkgdata
@@ -245,7 +293,10 @@ def get_signatures(builddir, failsafe=False, machine=None):
sigs = {}
tune2tasks = {}
- cmd = ''
+ cmd = 'BB_ENV_PASSTHROUGH_ADDITIONS="$BB_ENV_PASSTHROUGH_ADDITIONS BB_SIGNATURE_HANDLER" BB_SIGNATURE_HANDLER="OEBasicHash" '
+ if extravars:
+ cmd += extravars
+ cmd += ' '
if machine:
cmd += 'MACHINE=%s ' % machine
cmd += 'bitbake '
diff --git a/scripts/lib/checklayer/case.py b/scripts/lib/checklayer/case.py
index 9dd00412e5..fa9dee384e 100644
--- a/scripts/lib/checklayer/case.py
+++ b/scripts/lib/checklayer/case.py
@@ -1,5 +1,7 @@
# Copyright (C) 2017 Intel Corporation
-# Released under the MIT license (see COPYING.MIT)
+#
+# SPDX-License-Identifier: MIT
+#
from oeqa.core.case import OETestCase
diff --git a/scripts/lib/checklayer/cases/bsp.py b/scripts/lib/checklayer/cases/bsp.py
index b6b611be73..a80a5844da 100644
--- a/scripts/lib/checklayer/cases/bsp.py
+++ b/scripts/lib/checklayer/cases/bsp.py
@@ -1,5 +1,7 @@
# Copyright (C) 2017 Intel Corporation
-# Released under the MIT license (see COPYING.MIT)
+#
+# SPDX-License-Identifier: MIT
+#
import unittest
@@ -151,7 +153,7 @@ class BSPCheckLayer(OECheckLayerTestCase):
# do_build can be ignored: it is know to have
# different signatures in some cases, for example in
# the allarch ca-certificates due to RDEPENDS=openssl.
- # That particular dependency is whitelisted via
+ # That particular dependency is marked via
# SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS, but still shows up
# in the sstate signature hash because filtering it
# out would be hard and running do_build multiple
diff --git a/scripts/lib/checklayer/cases/common.py b/scripts/lib/checklayer/cases/common.py
index 1bef61b048..491a13953c 100644
--- a/scripts/lib/checklayer/cases/common.py
+++ b/scripts/lib/checklayer/cases/common.py
@@ -1,18 +1,21 @@
# Copyright (C) 2017 Intel Corporation
-# Released under the MIT license (see COPYING.MIT)
+#
+# SPDX-License-Identifier: MIT
+#
import glob
import os
import unittest
+import re
from checklayer import get_signatures, LayerType, check_command, get_depgraph, compare_signatures
from checklayer.case import OECheckLayerTestCase
class CommonCheckLayer(OECheckLayerTestCase):
def test_readme(self):
# The top-level README file may have a suffix (like README.rst or README.txt).
- readme_files = glob.glob(os.path.join(self.tc.layer['path'], 'README*'))
+ readme_files = glob.glob(os.path.join(self.tc.layer['path'], '[Rr][Ee][Aa][Dd][Mm][Ee]*'))
self.assertTrue(len(readme_files) > 0,
- msg="Layer doesn't contains README file.")
+ msg="Layer doesn't contain a README file.")
# There might be more than one file matching the file pattern above
# (for example, README.rst and README-COPYING.rst). The one with the shortest
@@ -24,6 +27,16 @@ class CommonCheckLayer(OECheckLayerTestCase):
self.assertTrue(data,
msg="Layer contains a README file but it is empty.")
+ # If a layer's README references another README, then the checks below are not valid
+ if re.search('README', data, re.IGNORECASE):
+ return
+
+ self.assertIn('maintainer', data.lower())
+ self.assertIn('patch', data.lower())
+ # Check that there is an email address in the README
+ email_regex = re.compile(r"[^@]+@[^@]+")
+ self.assertTrue(email_regex.match(data))
+
def test_parse(self):
check_command('Layer %s failed to parse.' % self.tc.layer['name'],
'bitbake -p')
@@ -41,6 +54,21 @@ class CommonCheckLayer(OECheckLayerTestCase):
'''
get_signatures(self.td['builddir'], failsafe=False)
+ def test_world_inherit_class(self):
+ '''
+ This also does "bitbake -S none world" along with inheriting "yocto-check-layer"
+ class, which can do additional per-recipe test cases.
+ '''
+ msg = []
+ try:
+ get_signatures(self.td['builddir'], failsafe=False, machine=None, extravars='BB_ENV_PASSTHROUGH_ADDITIONS="$BB_ENV_PASSTHROUGH_ADDITIONS INHERIT" INHERIT="yocto-check-layer"')
+ except RuntimeError as ex:
+ msg.append(str(ex))
+ if msg:
+ msg.insert(0, 'Layer %s failed additional checks from yocto-check-layer.bbclass\nSee below log for specific recipe parsing errors:\n' % \
+ self.tc.layer['name'])
+ self.fail('\n'.join(msg))
+
def test_signatures(self):
if self.tc.layer['type'] == LayerType.SOFTWARE and \
not self.tc.test_software_layer_signatures:
diff --git a/scripts/lib/checklayer/cases/distro.py b/scripts/lib/checklayer/cases/distro.py
index df1b3035eb..f0bee5493c 100644
--- a/scripts/lib/checklayer/cases/distro.py
+++ b/scripts/lib/checklayer/cases/distro.py
@@ -1,5 +1,7 @@
# Copyright (C) 2017 Intel Corporation
-# Released under the MIT license (see COPYING.MIT)
+#
+# SPDX-License-Identifier: MIT
+#
import unittest
diff --git a/scripts/lib/checklayer/context.py b/scripts/lib/checklayer/context.py
index 1bec2c4103..4de8f668fd 100644
--- a/scripts/lib/checklayer/context.py
+++ b/scripts/lib/checklayer/context.py
@@ -1,5 +1,7 @@
# Copyright (C) 2017 Intel Corporation
-# Released under the MIT license (see COPYING.MIT)
+#
+# SPDX-License-Identifier: MIT
+#
import os
import sys
diff --git a/scripts/lib/devtool/__init__.py b/scripts/lib/devtool/__init__.py
index 89f098a912..702db669de 100644
--- a/scripts/lib/devtool/__init__.py
+++ b/scripts/lib/devtool/__init__.py
@@ -4,18 +4,8 @@
#
# Copyright (C) 2014 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool plugins module"""
import os
@@ -205,7 +195,8 @@ def setup_git_repo(repodir, version, devbranch, basetag='devtool-base', d=None):
import oe.patch
if not os.path.exists(os.path.join(repodir, '.git')):
bb.process.run('git init', cwd=repodir)
- bb.process.run('git add .', cwd=repodir)
+ bb.process.run('git config --local gc.autodetach 0', cwd=repodir)
+ bb.process.run('git add -f -A .', cwd=repodir)
commit_cmd = ['git']
oe.patch.GitApplyTree.gitCommandUserOptions(commit_cmd, d=d)
commit_cmd += ['commit', '-q']
@@ -221,8 +212,13 @@ def setup_git_repo(repodir, version, devbranch, basetag='devtool-base', d=None):
bb.process.run(commit_cmd, cwd=repodir)
# Ensure singletask.lock (as used by externalsrc.bbclass) is ignored by git
+ gitinfodir = os.path.join(repodir, '.git', 'info')
+ try:
+ os.mkdir(gitinfodir)
+ except FileExistsError:
+ pass
excludes = []
- excludefile = os.path.join(repodir, '.git', 'info', 'exclude')
+ excludefile = os.path.join(gitinfodir, 'exclude')
try:
with open(excludefile, 'r') as f:
excludes = f.readlines()
diff --git a/scripts/lib/devtool/build.py b/scripts/lib/devtool/build.py
index ba9593f1ad..935ffab46c 100644
--- a/scripts/lib/devtool/build.py
+++ b/scripts/lib/devtool/build.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2014-2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool build plugin"""
import os
@@ -21,7 +11,8 @@ import bb
import logging
import argparse
import tempfile
-from devtool import exec_build_env_command, check_workspace_recipe, DevtoolError
+from devtool import exec_build_env_command, setup_tinfoil, check_workspace_recipe, DevtoolError
+from devtool import parse_recipe
logger = logging.getLogger('devtool')
@@ -53,12 +44,22 @@ def _get_build_tasks(config):
def build(args, config, basepath, workspace):
"""Entry point for the devtool 'build' subcommand"""
workspacepn = check_workspace_recipe(workspace, args.recipename, bbclassextend=True)
+ tinfoil = setup_tinfoil(config_only=False, basepath=basepath)
+ try:
+ rd = parse_recipe(config, tinfoil, args.recipename, appends=True, filter_workspace=False)
+ if not rd:
+ return 1
+ deploytask = 'do_deploy' in rd.getVar('__BBTASKS')
+ finally:
+ tinfoil.shutdown()
if args.clean:
# use clean instead of cleansstate to avoid messing things up in eSDK
build_tasks = ['do_clean']
else:
build_tasks = _get_build_tasks(config)
+ if deploytask:
+ build_tasks.append('do_deploy')
bbappend = workspace[workspacepn]['bbappend']
if args.disable_parallel_make:
diff --git a/scripts/lib/devtool/build_image.py b/scripts/lib/devtool/build_image.py
index e5810389be..980f90ddd6 100644
--- a/scripts/lib/devtool/build_image.py
+++ b/scripts/lib/devtool/build_image.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool plugin containing the build-image subcommand."""
@@ -123,7 +113,7 @@ def build_image_task(config, basepath, workspace, image, add_packages=None, task
with open(appendfile, 'w') as afile:
if packages:
# include packages from workspace recipes into the image
- afile.write('IMAGE_INSTALL_append = " %s"\n' % ' '.join(packages))
+ afile.write('IMAGE_INSTALL:append = " %s"\n' % ' '.join(packages))
if not task:
logger.info('Building image %s with the following '
'additional packages: %s', image, ' '.join(packages))
diff --git a/scripts/lib/devtool/build_sdk.py b/scripts/lib/devtool/build_sdk.py
index b89d65b0cb..6fe02fff2a 100644
--- a/scripts/lib/devtool/build_sdk.py
+++ b/scripts/lib/devtool/build_sdk.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2015-2016 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import subprocess
diff --git a/scripts/lib/devtool/deploy.py b/scripts/lib/devtool/deploy.py
index 886004b5d0..e14a587417 100644
--- a/scripts/lib/devtool/deploy.py
+++ b/scripts/lib/devtool/deploy.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2014-2016 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool plugin containing the deploy subcommands"""
import logging
@@ -178,22 +168,28 @@ def deploy(args, config, basepath, workspace):
if args.strip and not args.dry_run:
# Fakeroot copy to new destination
srcdir = recipe_outdir
- recipe_outdir = os.path.join(rd.getVar('WORKDIR'), 'deploy-target-stripped')
+ recipe_outdir = os.path.join(rd.getVar('WORKDIR'), 'devtool-deploy-target-stripped')
if os.path.isdir(recipe_outdir):
- bb.utils.remove(recipe_outdir, True)
+ exec_fakeroot(rd, "rm -rf %s" % recipe_outdir, shell=True)
exec_fakeroot(rd, "cp -af %s %s" % (os.path.join(srcdir, '.'), recipe_outdir), shell=True)
os.environ['PATH'] = ':'.join([os.environ['PATH'], rd.getVar('PATH') or ''])
oe.package.strip_execs(args.recipename, recipe_outdir, rd.getVar('STRIP'), rd.getVar('libdir'),
rd.getVar('base_libdir'), rd)
filelist = []
+ inodes = set({})
ftotalsize = 0
for root, _, files in os.walk(recipe_outdir):
for fn in files:
+ fstat = os.lstat(os.path.join(root, fn))
# Get the size in kiB (since we'll be comparing it to the output of du -k)
# MUST use lstat() here not stat() or getfilesize() since we don't want to
# dereference symlinks
- fsize = int(math.ceil(float(os.lstat(os.path.join(root, fn)).st_size)/1024))
+ if fstat.st_ino in inodes:
+ fsize = 0
+ else:
+ fsize = int(math.ceil(float(fstat.st_size)/1024))
+ inodes.add(fstat.st_ino)
ftotalsize += fsize
# The path as it would appear on the target
fpath = os.path.join(destdir, os.path.relpath(root, recipe_outdir), fn)
@@ -211,12 +207,20 @@ def deploy(args, config, basepath, workspace):
if not args.show_status:
extraoptions += ' -q'
+ scp_sshexec = ''
+ ssh_sshexec = 'ssh'
+ if args.ssh_exec:
+ scp_sshexec = "-S %s" % args.ssh_exec
+ ssh_sshexec = args.ssh_exec
scp_port = ''
ssh_port = ''
if args.port:
scp_port = "-P %s" % args.port
ssh_port = "-p %s" % args.port
+ if args.key:
+ extraoptions += ' -i %s' % args.key
+
# In order to delete previously deployed files and have the manifest file on
# the target, we write out a shell script and then copy it to the target
# so we can then run it (piping tar output to it).
@@ -238,7 +242,7 @@ def deploy(args, config, basepath, workspace):
for fpath, fsize in filelist:
f.write('%s %d\n' % (fpath, fsize))
# Copy them to the target
- ret = subprocess.call("scp %s %s %s/* %s:%s" % (scp_port, extraoptions, tmpdir, args.target, os.path.dirname(tmpscript)), shell=True)
+ ret = subprocess.call("scp %s %s %s %s/* %s:%s" % (scp_sshexec, scp_port, extraoptions, tmpdir, args.target, os.path.dirname(tmpscript)), shell=True)
if ret != 0:
raise DevtoolError('Failed to copy script to %s - rerun with -s to '
'get a complete error message' % args.target)
@@ -246,7 +250,7 @@ def deploy(args, config, basepath, workspace):
shutil.rmtree(tmpdir)
# Now run the script
- ret = exec_fakeroot(rd, 'tar cf - . | ssh %s %s %s \'sh %s %s %s %s\'' % (ssh_port, extraoptions, args.target, tmpscript, args.recipename, destdir, tmpfilelist), cwd=recipe_outdir, shell=True)
+ ret = exec_fakeroot(rd, 'tar cf - . | %s %s %s %s \'sh %s %s %s %s\'' % (ssh_sshexec, ssh_port, extraoptions, args.target, tmpscript, args.recipename, destdir, tmpfilelist), cwd=recipe_outdir, shell=True)
if ret != 0:
raise DevtoolError('Deploy failed - rerun with -s to get a complete '
'error message')
@@ -276,6 +280,11 @@ def undeploy(args, config, basepath, workspace):
if not args.show_status:
extraoptions += ' -q'
+ scp_sshexec = ''
+ ssh_sshexec = 'ssh'
+ if args.ssh_exec:
+ scp_sshexec = "-S %s" % args.ssh_exec
+ ssh_sshexec = args.ssh_exec
scp_port = ''
ssh_port = ''
if args.port:
@@ -292,7 +301,7 @@ def undeploy(args, config, basepath, workspace):
with open(os.path.join(tmpdir, os.path.basename(tmpscript)), 'w') as f:
f.write(shellscript)
# Copy it to the target
- ret = subprocess.call("scp %s %s %s/* %s:%s" % (scp_port, extraoptions, tmpdir, args.target, os.path.dirname(tmpscript)), shell=True)
+ ret = subprocess.call("scp %s %s %s %s/* %s:%s" % (scp_sshexec, scp_port, extraoptions, tmpdir, args.target, os.path.dirname(tmpscript)), shell=True)
if ret != 0:
raise DevtoolError('Failed to copy script to %s - rerun with -s to '
'get a complete error message' % args.target)
@@ -300,7 +309,7 @@ def undeploy(args, config, basepath, workspace):
shutil.rmtree(tmpdir)
# Now run the script
- ret = subprocess.call('ssh %s %s %s \'sh %s %s\'' % (ssh_port, extraoptions, args.target, tmpscript, args.recipename), shell=True)
+ ret = subprocess.call('%s %s %s %s \'sh %s %s\'' % (ssh_sshexec, ssh_port, extraoptions, args.target, tmpscript, args.recipename), shell=True)
if ret != 0:
raise DevtoolError('Undeploy failed - rerun with -s to get a complete '
'error message')
@@ -324,7 +333,10 @@ def register_commands(subparsers, context):
parser_deploy.add_argument('-n', '--dry-run', help='List files to be deployed only', action='store_true')
parser_deploy.add_argument('-p', '--no-preserve', help='Do not preserve existing files', action='store_true')
parser_deploy.add_argument('--no-check-space', help='Do not check for available space before deploying', action='store_true')
+ parser_deploy.add_argument('-e', '--ssh-exec', help='Executable to use in place of ssh')
parser_deploy.add_argument('-P', '--port', help='Specify port to use for connection to the target')
+ parser_deploy.add_argument('-I', '--key',
+ help='Specify ssh private key for connection to the target')
strip_opts = parser_deploy.add_mutually_exclusive_group(required=False)
strip_opts.add_argument('-S', '--strip',
@@ -346,5 +358,9 @@ def register_commands(subparsers, context):
parser_undeploy.add_argument('-s', '--show-status', help='Show progress/status output', action='store_true')
parser_undeploy.add_argument('-a', '--all', help='Undeploy all recipes deployed on the target', action='store_true')
parser_undeploy.add_argument('-n', '--dry-run', help='List files to be undeployed only', action='store_true')
+ parser_undeploy.add_argument('-e', '--ssh-exec', help='Executable to use in place of ssh')
parser_undeploy.add_argument('-P', '--port', help='Specify port to use for connection to the target')
+ parser_undeploy.add_argument('-I', '--key',
+ help='Specify ssh private key for connection to the target')
+
parser_undeploy.set_defaults(func=undeploy)
diff --git a/scripts/lib/devtool/export.py b/scripts/lib/devtool/export.py
index 35349e2cda..01174edae5 100644
--- a/scripts/lib/devtool/export.py
+++ b/scripts/lib/devtool/export.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2014-2017 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool export plugin"""
import os
diff --git a/scripts/lib/devtool/import.py b/scripts/lib/devtool/import.py
index 4264b7d820..6829851669 100644
--- a/scripts/lib/devtool/import.py
+++ b/scripts/lib/devtool/import.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2014-2017 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool import plugin"""
import os
diff --git a/scripts/lib/devtool/menuconfig.py b/scripts/lib/devtool/menuconfig.py
new file mode 100644
index 0000000000..95384c5333
--- /dev/null
+++ b/scripts/lib/devtool/menuconfig.py
@@ -0,0 +1,79 @@
+# OpenEmbedded Development tool - menuconfig command plugin
+#
+# Copyright (C) 2018 Xilinx
+# Written by: Chandana Kalluri <ckalluri@xilinx.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""Devtool menuconfig plugin"""
+
+import os
+import bb
+import logging
+import argparse
+import re
+import glob
+from devtool import setup_tinfoil, parse_recipe, DevtoolError, standard, exec_build_env_command
+from devtool import check_workspace_recipe
+logger = logging.getLogger('devtool')
+
+def menuconfig(args, config, basepath, workspace):
+ """Entry point for the devtool 'menuconfig' subcommand"""
+
+ rd = ""
+ kconfigpath = ""
+ pn_src = ""
+ localfilesdir = ""
+ workspace_dir = ""
+ tinfoil = setup_tinfoil(basepath=basepath)
+ try:
+ rd = parse_recipe(config, tinfoil, args.component, appends=True, filter_workspace=False)
+ if not rd:
+ return 1
+
+ check_workspace_recipe(workspace, args.component)
+ pn = rd.getVar('PN', True)
+
+ if not rd.getVarFlag('do_menuconfig','task'):
+ raise DevtoolError("This recipe does not support menuconfig option")
+
+ workspace_dir = os.path.join(config.workspace_path,'sources')
+ kconfigpath = rd.getVar('B')
+ pn_src = os.path.join(workspace_dir,pn)
+
+ # add check to see if oe_local_files exists or not
+ localfilesdir = os.path.join(pn_src,'oe-local-files')
+ if not os.path.exists(localfilesdir):
+ bb.utils.mkdirhier(localfilesdir)
+ # Add gitignore to ensure source tree is clean
+ gitignorefile = os.path.join(localfilesdir,'.gitignore')
+ with open(gitignorefile, 'w') as f:
+ f.write('# Ignore local files, by default. Remove this file if you want to commit the directory to Git\n')
+ f.write('*\n')
+
+ finally:
+ tinfoil.shutdown()
+
+ logger.info('Launching menuconfig')
+ exec_build_env_command(config.init_path, basepath, 'bitbake -c menuconfig %s' % pn, watch=True)
+ fragment = os.path.join(localfilesdir, 'devtool-fragment.cfg')
+ res = standard._create_kconfig_diff(pn_src,rd,fragment)
+
+ return 0
+
+def register_commands(subparsers, context):
+ """register devtool subcommands from this plugin"""
+ parser_menuconfig = subparsers.add_parser('menuconfig',help='Alter build-time configuration for a recipe', description='Launches the make menuconfig command (for recipes where do_menuconfig is available), allowing users to make changes to the build-time configuration. Creates a config fragment corresponding to changes made.', group='advanced')
+ parser_menuconfig.add_argument('component', help='compenent to alter config')
+ parser_menuconfig.set_defaults(func=menuconfig,fixed_setup=context.fixed_setup)
diff --git a/scripts/lib/devtool/package.py b/scripts/lib/devtool/package.py
index af9e8f15f5..c2367342c3 100644
--- a/scripts/lib/devtool/package.py
+++ b/scripts/lib/devtool/package.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2014-2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool plugin containing the package subcommands"""
import os
diff --git a/scripts/lib/devtool/runqemu.py b/scripts/lib/devtool/runqemu.py
index e26cf28c2f..ead978aabc 100644
--- a/scripts/lib/devtool/runqemu.py
+++ b/scripts/lib/devtool/runqemu.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool runqemu plugin"""
diff --git a/scripts/lib/devtool/sdk.py b/scripts/lib/devtool/sdk.py
index 4616753797..d717b6c2b8 100644
--- a/scripts/lib/devtool/sdk.py
+++ b/scripts/lib/devtool/sdk.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2015-2016 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import subprocess
@@ -217,7 +207,7 @@ def sdk_update(args, config, basepath, workspace):
if not sstate_mirrors:
with open(os.path.join(conf_dir, 'site.conf'), 'a') as f:
f.write('SCONF_VERSION = "%s"\n' % site_conf_version)
- f.write('SSTATE_MIRRORS_append = " file://.* %s/sstate-cache/PATH \\n "\n' % updateserver)
+ f.write('SSTATE_MIRRORS:append = " file://.* %s/sstate-cache/PATH"\n' % updateserver)
finally:
shutil.rmtree(tmpsdk_dir)
diff --git a/scripts/lib/devtool/search.py b/scripts/lib/devtool/search.py
index b4f209b7e3..70b81cac5e 100644
--- a/scripts/lib/devtool/search.py
+++ b/scripts/lib/devtool/search.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool search plugin"""
@@ -72,10 +62,11 @@ def search(args, config, basepath, workspace):
with open(os.path.join(pkgdata_dir, 'runtime', pkg), 'r') as f:
for line in f:
if ': ' in line:
- splitline = line.split(':', 1)
+ splitline = line.split(': ', 1)
key = splitline[0]
value = splitline[1].strip()
- if key in ['PKG_%s' % pkg, 'DESCRIPTION', 'FILES_INFO'] or key.startswith('FILERPROVIDES_'):
+ key = key.replace(":" + pkg, "")
+ if key in ['PKG', 'DESCRIPTION', 'FILES_INFO', 'FILERPROVIDES']:
if keyword_rc.search(value):
match = True
break
diff --git a/scripts/lib/devtool/standard.py b/scripts/lib/devtool/standard.py
index b7d4d47dfc..01fb5ad96f 100644
--- a/scripts/lib/devtool/standard.py
+++ b/scripts/lib/devtool/standard.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2014-2017 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool standard plugins"""
import os
@@ -155,8 +145,8 @@ def add(args, config, basepath, workspace):
extracmdopts += ' --src-subdir "%s"' % args.src_subdir
if args.autorev:
extracmdopts += ' -a'
- if args.fetch_dev:
- extracmdopts += ' --fetch-dev'
+ if args.npm_dev:
+ extracmdopts += ' --npm-dev'
if args.mirrors:
extracmdopts += ' --mirrors'
if args.srcrev:
@@ -264,20 +254,16 @@ def add(args, config, basepath, workspace):
f.write('\n# initial_rev: %s\n' % initial_rev)
if args.binary:
- f.write('do_install_append() {\n')
+ f.write('do_install:append() {\n')
f.write(' rm -rf ${D}/.git\n')
f.write(' rm -f ${D}/singletask.lock\n')
f.write('}\n')
if bb.data.inherits_class('npm', rd):
- f.write('do_install_append() {\n')
- f.write(' # Remove files added to source dir by devtool/externalsrc\n')
- f.write(' rm -f ${NPM_INSTALLDIR}/singletask.lock\n')
- f.write(' rm -rf ${NPM_INSTALLDIR}/.git\n')
- f.write(' rm -rf ${NPM_INSTALLDIR}/oe-local-files\n')
- f.write(' for symlink in ${EXTERNALSRC_SYMLINKS} ; do\n')
- f.write(' rm -f ${NPM_INSTALLDIR}/${symlink%%:*}\n')
- f.write(' done\n')
+ f.write('python do_configure:append() {\n')
+ f.write(' pkgdir = d.getVar("NPM_PACKAGE")\n')
+ f.write(' lockfile = os.path.join(pkgdir, "singletask.lock")\n')
+ f.write(' bb.utils.remove(lockfile)\n')
f.write('}\n')
# Check if the new layer provides recipes whose priorities have been
@@ -332,10 +318,6 @@ def _check_compatible_recipe(pn, d):
raise DevtoolError("The %s recipe is a packagegroup, and therefore is "
"not supported by this tool" % pn, 4)
- if bb.data.inherits_class('meta', d):
- raise DevtoolError("The %s recipe is a meta-recipe, and therefore is "
- "not supported by this tool" % pn, 4)
-
if bb.data.inherits_class('externalsrc', d) and d.getVar('EXTERNALSRC'):
# Not an incompatibility error per se, so we don't pass the error code
raise DevtoolError("externalsrc is currently enabled for the %s "
@@ -471,11 +453,41 @@ def sync(args, config, basepath, workspace):
finally:
tinfoil.shutdown()
+def symlink_oelocal_files_srctree(rd,srctree):
+ import oe.patch
+ if os.path.abspath(rd.getVar('S')) == os.path.abspath(rd.getVar('WORKDIR')):
+ # If recipe extracts to ${WORKDIR}, symlink the files into the srctree
+ # (otherwise the recipe won't build as expected)
+ local_files_dir = os.path.join(srctree, 'oe-local-files')
+ addfiles = []
+ for root, _, files in os.walk(local_files_dir):
+ relpth = os.path.relpath(root, local_files_dir)
+ if relpth != '.':
+ bb.utils.mkdirhier(os.path.join(srctree, relpth))
+ for fn in files:
+ if fn == '.gitignore':
+ continue
+ destpth = os.path.join(srctree, relpth, fn)
+ if os.path.exists(destpth):
+ os.unlink(destpth)
+ if relpth != '.':
+ back_relpth = os.path.relpath(local_files_dir, root)
+ os.symlink('%s/oe-local-files/%s/%s' % (back_relpth, relpth, fn), destpth)
+ else:
+ os.symlink('oe-local-files/%s' % fn, destpth)
+ addfiles.append(os.path.join(relpth, fn))
+ if addfiles:
+ bb.process.run('git add %s' % ' '.join(addfiles), cwd=srctree)
+ useroptions = []
+ oe.patch.GitApplyTree.gitCommandUserOptions(useroptions, d=rd)
+ bb.process.run('git %s commit -m "Committing local file symlinks\n\n%s"' % (' '.join(useroptions), oe.patch.GitApplyTree.ignore_commit_prefix), cwd=srctree)
+
def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, workspace, fixed_setup, d, tinfoil, no_overrides=False):
"""Extract sources of a recipe"""
import oe.recipeutils
import oe.patch
+ import oe.path
pn = d.getVar('PN')
@@ -507,7 +519,7 @@ def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, works
history = d.varhistory.variable('SRC_URI')
for event in history:
if not 'flag' in event:
- if event['op'].startswith(('_append[', '_prepend[')):
+ if event['op'].startswith((':append[', ':prepend[')):
extra_overrides.append(event['op'].split('[')[1].split(']')[0])
# We want to remove duplicate overrides. If a recipe had multiple
# SRC_URI_override += values it would cause mulitple instances of
@@ -519,7 +531,6 @@ def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, works
initial_rev = None
- appendexisted = False
recipefile = d.getVar('FILE')
appendfile = recipe_to_append(recipefile, config)
is_kernel_yocto = bb.data.inherits_class('kernel-yocto', d)
@@ -572,12 +583,22 @@ def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, works
with open(preservestampfile, 'w') as f:
f.write(d.getVar('STAMP'))
try:
- if bb.data.inherits_class('kernel-yocto', d):
+ if is_kernel_yocto:
# We need to generate the kernel config
task = 'do_configure'
else:
task = 'do_patch'
+ if 'noexec' in (d.getVarFlags(task, False) or []) or 'task' not in (d.getVarFlags(task, False) or []):
+ logger.info('The %s recipe has %s disabled. Running only '
+ 'do_configure task dependencies' % (pn, task))
+
+ if 'depends' in d.getVarFlags('do_configure', False):
+ pn = d.getVarFlags('do_configure', False)['depends']
+ pn = pn.replace('${PV}', d.getVar('PV'))
+ pn = pn.replace('${COMPILERDEP}', d.getVar('COMPILERDEP'))
+ task = None
+
# Run the fetch + unpack tasks
res = tinfoil.build_targets(pn,
task,
@@ -589,6 +610,17 @@ def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, works
if not res:
raise DevtoolError('Extracting source for %s failed' % pn)
+ if not is_kernel_yocto and ('noexec' in (d.getVarFlags('do_patch', False) or []) or 'task' not in (d.getVarFlags('do_patch', False) or [])):
+ workshareddir = d.getVar('S')
+ if os.path.islink(srctree):
+ os.unlink(srctree)
+
+ os.symlink(workshareddir, srctree)
+
+ # The initial_rev file is created in devtool_post_unpack function that will not be executed if
+ # do_unpack/do_patch tasks are disabled so we have to directly say that source extraction was successful
+ return True, True
+
try:
with open(os.path.join(tempdir, 'initial_rev'), 'r') as f:
initial_rev = f.read()
@@ -599,6 +631,23 @@ def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, works
raise DevtoolError('Something went wrong with source extraction - the devtool-source class was not active or did not function correctly:\n%s' % str(e))
srcsubdir_rel = os.path.relpath(srcsubdir, os.path.join(tempdir, 'workdir'))
+ # Check if work-shared is empty, if yes
+ # find source and copy to work-shared
+ if is_kernel_yocto:
+ workshareddir = d.getVar('STAGING_KERNEL_DIR')
+ staging_kerVer = get_staging_kver(workshareddir)
+ kernelVersion = d.getVar('LINUX_VERSION')
+
+ # handle dangling symbolic link in work-shared:
+ if os.path.islink(workshareddir):
+ os.unlink(workshareddir)
+
+ if os.path.exists(workshareddir) and (not os.listdir(workshareddir) or kernelVersion != staging_kerVer):
+ shutil.rmtree(workshareddir)
+ oe.path.copyhardlinktree(srcsubdir,workshareddir)
+ elif not os.path.exists(workshareddir):
+ oe.path.copyhardlinktree(srcsubdir,workshareddir)
+
tempdir_localdir = os.path.join(tempdir, 'oe-local-files')
srctree_localdir = os.path.join(srctree, 'oe-local-files')
@@ -627,29 +676,7 @@ def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, works
shutil.move(tempdir_localdir, srcsubdir)
shutil.move(srcsubdir, srctree)
-
- if os.path.abspath(d.getVar('S')) == os.path.abspath(d.getVar('WORKDIR')):
- # If recipe extracts to ${WORKDIR}, symlink the files into the srctree
- # (otherwise the recipe won't build as expected)
- local_files_dir = os.path.join(srctree, 'oe-local-files')
- addfiles = []
- for root, _, files in os.walk(local_files_dir):
- relpth = os.path.relpath(root, local_files_dir)
- if relpth != '.':
- bb.utils.mkdirhier(os.path.join(srctree, relpth))
- for fn in files:
- if fn == '.gitignore':
- continue
- destpth = os.path.join(srctree, relpth, fn)
- if os.path.exists(destpth):
- os.unlink(destpth)
- os.symlink('oe-local-files/%s' % fn, destpth)
- addfiles.append(os.path.join(relpth, fn))
- if addfiles:
- bb.process.run('git add %s' % ' '.join(addfiles), cwd=srctree)
- useroptions = []
- oe.patch.GitApplyTree.gitCommandUserOptions(useroptions, d=d)
- bb.process.run('git %s commit -a -m "Committing local file symlinks\n\n%s"' % (' '.join(useroptions), oe.patch.GitApplyTree.ignore_commit_prefix), cwd=srctree)
+ symlink_oelocal_files_srctree(d,srctree)
if is_kernel_yocto:
logger.info('Copying kernel config to srctree')
@@ -715,13 +742,33 @@ def _check_preserve(config, recipename):
os.remove(removefile)
else:
tf.write(line)
- os.rename(newfile, origfile)
+ bb.utils.rename(newfile, origfile)
+
+def get_staging_kver(srcdir):
+ # Kernel version from work-shared
+ kerver = []
+ staging_kerVer=""
+ if os.path.exists(srcdir) and os.listdir(srcdir):
+ with open(os.path.join(srcdir,"Makefile")) as f:
+ version = [next(f) for x in range(5)][1:4]
+ for word in version:
+ kerver.append(word.split('= ')[1].split('\n')[0])
+ staging_kerVer = ".".join(kerver)
+ return staging_kerVer
+
+def get_staging_kbranch(srcdir):
+ staging_kbranch = ""
+ if os.path.exists(srcdir) and os.listdir(srcdir):
+ (branch, _) = bb.process.run('git branch | grep \* | cut -d \' \' -f2', cwd=srcdir)
+ staging_kbranch = "".join(branch.split('\n')[0])
+ return staging_kbranch
def modify(args, config, basepath, workspace):
"""Entry point for the devtool 'modify' subcommand"""
import bb
import oe.recipeutils
import oe.patch
+ import oe.path
if args.recipename in workspace:
raise DevtoolError("recipe %s is already in your workspace" %
@@ -763,15 +810,69 @@ def modify(args, config, basepath, workspace):
initial_rev = None
commits = []
check_commits = False
+
+ if bb.data.inherits_class('kernel-yocto', rd):
+ # Current set kernel version
+ kernelVersion = rd.getVar('LINUX_VERSION')
+ srcdir = rd.getVar('STAGING_KERNEL_DIR')
+ kbranch = rd.getVar('KBRANCH')
+
+ staging_kerVer = get_staging_kver(srcdir)
+ staging_kbranch = get_staging_kbranch(srcdir)
+ if (os.path.exists(srcdir) and os.listdir(srcdir)) and (kernelVersion in staging_kerVer and staging_kbranch == kbranch):
+ oe.path.copyhardlinktree(srcdir,srctree)
+ workdir = rd.getVar('WORKDIR')
+ srcsubdir = rd.getVar('S')
+ localfilesdir = os.path.join(srctree,'oe-local-files')
+ # Move local source files into separate subdir
+ recipe_patches = [os.path.basename(patch) for patch in oe.recipeutils.get_recipe_patches(rd)]
+ local_files = oe.recipeutils.get_recipe_local_files(rd)
+
+ for key in local_files.copy():
+ if key.endswith('scc'):
+ sccfile = open(local_files[key], 'r')
+ for l in sccfile:
+ line = l.split()
+ if line and line[0] in ('kconf', 'patch'):
+ cfg = os.path.join(os.path.dirname(local_files[key]), line[-1])
+ if not cfg in local_files.values():
+ local_files[line[-1]] = cfg
+ shutil.copy2(cfg, workdir)
+ sccfile.close()
+
+ # Ignore local files with subdir={BP}
+ srcabspath = os.path.abspath(srcsubdir)
+ local_files = [fname for fname in local_files if os.path.exists(os.path.join(workdir, fname)) and (srcabspath == workdir or not os.path.join(workdir, fname).startswith(srcabspath + os.sep))]
+ if local_files:
+ for fname in local_files:
+ _move_file(os.path.join(workdir, fname), os.path.join(srctree, 'oe-local-files', fname))
+ with open(os.path.join(srctree, 'oe-local-files', '.gitignore'), 'w') as f:
+ f.write('# Ignore local files, by default. Remove this file ''if you want to commit the directory to Git\n*\n')
+
+ symlink_oelocal_files_srctree(rd,srctree)
+
+ task = 'do_configure'
+ res = tinfoil.build_targets(pn, task, handle_events=True)
+
+ # Copy .config to workspace
+ kconfpath = rd.getVar('B')
+ logger.info('Copying kernel config to workspace')
+ shutil.copy2(os.path.join(kconfpath, '.config'),srctree)
+
+ # Set this to true, we still need to get initial_rev
+ # by parsing the git repo
+ args.no_extract = True
+
if not args.no_extract:
initial_rev, _ = _extract_source(srctree, args.keep_temp, args.branch, False, config, basepath, workspace, args.fixed_setup, rd, tinfoil, no_overrides=args.no_overrides)
if not initial_rev:
return 1
logger.info('Source tree extracted to %s' % srctree)
- # Get list of commits since this revision
- (stdout, _) = bb.process.run('git rev-list --reverse %s..HEAD' % initial_rev, cwd=srctree)
- commits = stdout.split()
- check_commits = True
+ if os.path.exists(os.path.join(srctree, '.git')):
+ # Get list of commits since this revision
+ (stdout, _) = bb.process.run('git rev-list --reverse %s..HEAD' % initial_rev, cwd=srctree)
+ commits = stdout.split()
+ check_commits = True
else:
if os.path.exists(os.path.join(srctree, '.git')):
# Check if it's a tree previously extracted by us. This is done
@@ -831,31 +932,41 @@ def modify(args, config, basepath, workspace):
bb.utils.mkdirhier(os.path.dirname(appendfile))
with open(appendfile, 'w') as f:
- f.write('FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n')
+ f.write('FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"\n')
# Local files can be modified/tracked in separate subdir under srctree
# Mostly useful for packages with S != WORKDIR
- f.write('FILESPATH_prepend := "%s:"\n' %
+ f.write('FILESPATH:prepend := "%s:"\n' %
os.path.join(srctreebase, 'oe-local-files'))
f.write('# srctreebase: %s\n' % srctreebase)
f.write('\ninherit externalsrc\n')
f.write('# NOTE: We use pn- overrides here to avoid affecting multiple variants in the case where the recipe uses BBCLASSEXTEND\n')
- f.write('EXTERNALSRC_pn-%s = "%s"\n' % (pn, srctree))
+ f.write('EXTERNALSRC:pn-%s = "%s"\n' % (pn, srctree))
b_is_s = use_external_build(args.same_dir, args.no_same_dir, rd)
if b_is_s:
- f.write('EXTERNALSRC_BUILD_pn-%s = "%s"\n' % (pn, srctree))
+ f.write('EXTERNALSRC_BUILD:pn-%s = "%s"\n' % (pn, srctree))
if bb.data.inherits_class('kernel', rd):
f.write('SRCTREECOVEREDTASKS = "do_validate_branches do_kernel_checkout '
- 'do_fetch do_unpack do_kernel_configme do_kernel_configcheck"\n')
- f.write('\ndo_patch() {\n'
- ' :\n'
- '}\n')
- f.write('\ndo_configure_append() {\n'
+ 'do_fetch do_unpack do_kernel_configcheck"\n')
+ f.write('\ndo_patch[noexec] = "1"\n')
+ f.write('\ndo_configure:append() {\n'
' cp ${B}/.config ${S}/.config.baseline\n'
' ln -sfT ${B}/.config ${S}/.config.new\n'
'}\n')
+ f.write('\ndo_kernel_configme:prepend() {\n'
+ ' if [ -e ${S}/.config ]; then\n'
+ ' mv ${S}/.config ${S}/.config.old\n'
+ ' fi\n'
+ '}\n')
+ if rd.getVarFlag('do_menuconfig','task'):
+ f.write('\ndo_configure:append() {\n'
+ ' if [ ! ${DEVTOOL_DISABLE_MENUCONFIG} ]; then\n'
+ ' cp ${B}/.config ${S}/.config.baseline\n'
+ ' ln -sfT ${B}/.config ${S}/.config.new\n'
+ ' fi\n'
+ '}\n')
if initial_rev:
f.write('\n# initial_rev: %s\n' % initial_rev)
for commit in commits:
@@ -979,10 +1090,10 @@ def rename(args, config, basepath, workspace):
# Rename bbappend
logger.info('Renaming %s to %s' % (append, newappend))
- os.rename(append, newappend)
+ bb.utils.rename(append, newappend)
# Rename recipe file
logger.info('Renaming %s to %s' % (recipefile, newfile))
- os.rename(recipefile, newfile)
+ bb.utils.rename(recipefile, newfile)
# Rename source tree if it's the default path
appendmd5 = None
@@ -1218,7 +1329,7 @@ def _export_patches(srctree, rd, start_rev, destdir, changed_revs=None):
if match_name:
# Rename patch files
if new_patch != match_name:
- os.rename(os.path.join(destdir, new_patch),
+ bb.utils.rename(os.path.join(destdir, new_patch),
os.path.join(destdir, match_name))
# Need to pop it off the list now before checking changed_revs
oldpath = existing_patches.pop(old_patch)
@@ -1330,6 +1441,20 @@ def _export_local_files(srctree, rd, destdir, srctreebase):
if os.path.exists(os.path.join(local_files_dir, fragment_fn)):
os.unlink(os.path.join(local_files_dir, fragment_fn))
+ # Special handling for cml1, ccmake, etc bbclasses that generated
+ # configuration fragment files that are consumed as source files
+ for frag_class, frag_name in [("cml1", "fragment.cfg"), ("ccmake", "site-file.cmake")]:
+ if bb.data.inherits_class(frag_class, rd):
+ srcpath = os.path.join(rd.getVar('WORKDIR'), frag_name)
+ if os.path.exists(srcpath):
+ if frag_name not in new_set:
+ new_set.append(frag_name)
+ # copy fragment into destdir
+ shutil.copy2(srcpath, destdir)
+ # copy fragment into local files if exists
+ if os.path.isdir(local_files_dir):
+ shutil.copy2(srcpath, local_files_dir)
+
if new_set is not None:
for fname in new_set:
if fname in existing_files:
@@ -1518,17 +1643,17 @@ def _update_recipe_patch(recipename, workspace, srctree, rd, appendlayerdir, wil
patches_dir, changed_revs)
logger.debug('Pre-filtering: update: %s, new: %s' % (dict(upd_p), dict(new_p)))
if filter_patches:
- new_p = {}
- upd_p = {k:v for k,v in upd_p.items() if k in filter_patches}
+ new_p = OrderedDict()
+ upd_p = OrderedDict((k,v) for k,v in upd_p.items() if k in filter_patches)
remove_files = [f for f in remove_files if f in filter_patches]
updatefiles = False
updaterecipe = False
destpath = None
srcuri = (rd.getVar('SRC_URI', False) or '').split()
if appendlayerdir:
- files = dict((os.path.join(local_files_dir, key), val) for
+ files = OrderedDict((os.path.join(local_files_dir, key), val) for
key, val in list(upd_f.items()) + list(new_f.items()))
- files.update(dict((os.path.join(patches_dir, key), val) for
+ files.update(OrderedDict((os.path.join(patches_dir, key), val) for
key, val in list(upd_p.items()) + list(new_p.items())))
if files or remove_files:
removevalues = None
@@ -1612,7 +1737,7 @@ def _update_recipe_patch(recipename, workspace, srctree, rd, appendlayerdir, wil
def _guess_recipe_update_mode(srctree, rdata):
"""Guess the recipe update mode to use"""
- src_uri = (rdata.getVar('SRC_URI', False) or '').split()
+ src_uri = (rdata.getVar('SRC_URI') or '').split()
git_uris = [uri for uri in src_uri if uri.startswith('git://')]
if not git_uris:
return 'patch'
@@ -1751,7 +1876,7 @@ def status(args, config, basepath, workspace):
return 0
-def _reset(recipes, no_clean, config, basepath, workspace):
+def _reset(recipes, no_clean, remove_work, config, basepath, workspace):
"""Reset one or more recipes"""
import oe.path
@@ -1829,10 +1954,15 @@ def _reset(recipes, no_clean, config, basepath, workspace):
srctreebase = workspace[pn]['srctreebase']
if os.path.isdir(srctreebase):
if os.listdir(srctreebase):
- # We don't want to risk wiping out any work in progress
- logger.info('Leaving source tree %s as-is; if you no '
- 'longer need it then please delete it manually'
- % srctreebase)
+ if remove_work:
+ logger.info('-r argument used on %s, removing source tree.'
+ ' You will lose any unsaved work' %pn)
+ shutil.rmtree(srctreebase)
+ else:
+ # We don't want to risk wiping out any work in progress
+ logger.info('Leaving source tree %s as-is; if you no '
+ 'longer need it then please delete it manually'
+ % srctreebase)
else:
# This is unlikely, but if it's empty we can just remove it
os.rmdir(srctreebase)
@@ -1842,6 +1972,10 @@ def _reset(recipes, no_clean, config, basepath, workspace):
def reset(args, config, basepath, workspace):
"""Entry point for the devtool 'reset' subcommand"""
import bb
+ import shutil
+
+ recipes = ""
+
if args.recipename:
if args.all:
raise DevtoolError("Recipe cannot be specified if -a/--all is used")
@@ -1856,7 +1990,7 @@ def reset(args, config, basepath, workspace):
else:
recipes = args.recipename
- _reset(recipes, args.no_clean, config, basepath, workspace)
+ _reset(recipes, args.no_clean, args.remove_work, config, basepath, workspace)
return 0
@@ -1864,13 +1998,27 @@ def reset(args, config, basepath, workspace):
def _get_layer(layername, d):
"""Determine the base layer path for the specified layer name/path"""
layerdirs = d.getVar('BBLAYERS').split()
- layers = {os.path.basename(p): p for p in layerdirs}
+ layers = {} # {basename: layer_paths}
+ for p in layerdirs:
+ bn = os.path.basename(p)
+ if bn not in layers:
+ layers[bn] = [p]
+ else:
+ layers[bn].append(p)
# Provide some shortcuts
if layername.lower() in ['oe-core', 'openembedded-core']:
- layerdir = layers.get('meta', None)
+ layername = 'meta'
+ layer_paths = layers.get(layername, None)
+ if not layer_paths:
+ return os.path.abspath(layername)
+ elif len(layer_paths) == 1:
+ return os.path.abspath(layer_paths[0])
else:
- layerdir = layers.get(layername, None)
- return os.path.abspath(layerdir or layername)
+ # multiple layers having the same base name
+ logger.warning("Multiple layers have the same base name '%s', use the first one '%s'." % (layername, layer_paths[0]))
+ logger.warning("Consider using path instead of base name to specify layer:\n\t\t%s" % '\n\t\t'.join(layer_paths))
+ return os.path.abspath(layer_paths[0])
+
def finish(args, config, basepath, workspace):
"""Entry point for the devtool 'finish' subcommand"""
@@ -1893,7 +2041,8 @@ def finish(args, config, basepath, workspace):
else:
raise DevtoolError('Source tree is not clean:\n\n%s\nEnsure you have committed your changes or use -f/--force if you are sure there\'s nothing that needs to be committed' % dirty)
- no_clean = False
+ no_clean = args.no_clean
+ remove_work=args.remove_work
tinfoil = setup_tinfoil(basepath=basepath, tracking=True)
try:
rd = parse_recipe(config, tinfoil, args.recipename, True)
@@ -2045,7 +2194,7 @@ def finish(args, config, basepath, workspace):
if args.dry_run:
logger.info('Resetting recipe (dry-run)')
else:
- _reset([args.recipename], no_clean=no_clean, config=config, basepath=basepath, workspace=workspace)
+ _reset([args.recipename], no_clean=no_clean, remove_work=remove_work, config=config, basepath=basepath, workspace=workspace)
return 0
@@ -2072,7 +2221,7 @@ def register_commands(subparsers, context):
group.add_argument('--same-dir', '-s', help='Build in same directory as source', action="store_true")
group.add_argument('--no-same-dir', help='Force build in a separate build directory', action="store_true")
parser_add.add_argument('--fetch', '-f', help='Fetch the specified URI and extract it to create the source tree (deprecated - pass as positional argument instead)', metavar='URI')
- parser_add.add_argument('--fetch-dev', help='For npm, also fetch devDependencies', action="store_true")
+ parser_add.add_argument('--npm-dev', help='For npm, also fetch devDependencies', action="store_true")
parser_add.add_argument('--version', '-V', help='Version to use within recipe (PV)')
parser_add.add_argument('--no-git', '-g', help='If fetching source, do not set up source tree as a git repository', action="store_true")
group = parser_add.add_mutually_exclusive_group()
@@ -2157,6 +2306,7 @@ def register_commands(subparsers, context):
parser_reset.add_argument('recipename', nargs='*', help='Recipe to reset')
parser_reset.add_argument('--all', '-a', action="store_true", help='Reset all recipes (clear workspace)')
parser_reset.add_argument('--no-clean', '-n', action="store_true", help='Don\'t clean the sysroot to remove recipe output')
+ parser_reset.add_argument('--remove-work', '-r', action="store_true", help='Clean the sources directory along with append')
parser_reset.set_defaults(func=reset)
parser_finish = subparsers.add_parser('finish', help='Finish working on a recipe in your workspace',
@@ -2167,6 +2317,8 @@ def register_commands(subparsers, context):
parser_finish.add_argument('--mode', '-m', choices=['patch', 'srcrev', 'auto'], default='auto', help='Update mode (where %(metavar)s is %(choices)s; default is %(default)s)', metavar='MODE')
parser_finish.add_argument('--initial-rev', help='Override starting revision for patches')
parser_finish.add_argument('--force', '-f', action="store_true", help='Force continuing even if there are uncommitted changes in the source tree repository')
+ parser_finish.add_argument('--remove-work', '-r', action="store_true", help='Clean the sources directory under workspace')
+ parser_finish.add_argument('--no-clean', '-n', action="store_true", help='Don\'t clean the sysroot to remove recipe output')
parser_finish.add_argument('--no-overrides', '-O', action="store_true", help='Do not handle other override branches (if they exist)')
parser_finish.add_argument('--dry-run', '-N', action="store_true", help='Dry-run (just report changes instead of writing them)')
parser_finish.add_argument('--force-patch-refresh', action="store_true", help='Update patches in the layer even if they have not been modified (useful for refreshing patch context)')
diff --git a/scripts/lib/devtool/upgrade.py b/scripts/lib/devtool/upgrade.py
index 75e765e019..0357ec07bf 100644
--- a/scripts/lib/devtool/upgrade.py
+++ b/scripts/lib/devtool/upgrade.py
@@ -2,18 +2,7 @@
#
# Copyright (C) 2014-2017 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
"""Devtool upgrade plugin"""
@@ -43,7 +32,7 @@ def _run(cmd, cwd=''):
def _get_srctree(tmpdir):
srctree = tmpdir
- dirs = os.listdir(tmpdir)
+ dirs = scriptutils.filter_src_subdirs(tmpdir)
if len(dirs) == 1:
srctree = os.path.join(tmpdir, dirs[0])
return srctree
@@ -82,7 +71,8 @@ def _rename_recipe_dirs(oldpv, newpv, path):
if oldfile.find(oldpv) != -1:
newfile = oldfile.replace(oldpv, newpv)
if oldfile != newfile:
- os.rename(os.path.join(path, oldfile), os.path.join(path, newfile))
+ bb.utils.rename(os.path.join(path, oldfile),
+ os.path.join(path, newfile))
def _rename_recipe_file(oldrecipe, bpn, oldpv, newpv, path):
oldrecipe = os.path.basename(oldrecipe)
@@ -113,14 +103,14 @@ def _write_append(rc, srctree, same_dir, no_same_dir, rev, copied, workspace, d)
pn = d.getVar('PN')
af = os.path.join(appendpath, '%s.bbappend' % brf)
with open(af, 'w') as f:
- f.write('FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n\n')
+ f.write('FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"\n\n')
f.write('inherit externalsrc\n')
f.write(('# NOTE: We use pn- overrides here to avoid affecting'
'multiple variants in the case where the recipe uses BBCLASSEXTEND\n'))
- f.write('EXTERNALSRC_pn-%s = "%s"\n' % (pn, srctree))
+ f.write('EXTERNALSRC:pn-%s = "%s"\n' % (pn, srctree))
b_is_s = use_external_build(same_dir, no_same_dir, d)
if b_is_s:
- f.write('EXTERNALSRC_BUILD_pn-%s = "%s"\n' % (pn, srctree))
+ f.write('EXTERNALSRC_BUILD:pn-%s = "%s"\n' % (pn, srctree))
f.write('\n')
if rev:
f.write('# initial_rev: %s\n' % rev)
@@ -133,18 +123,22 @@ def _cleanup_on_error(rf, srctree):
rfp = os.path.split(rf)[0] # recipe folder
rfpp = os.path.split(rfp)[0] # recipes folder
if os.path.exists(rfp):
- shutil.rmtree(b)
+ shutil.rmtree(rfp)
if not len(os.listdir(rfpp)):
os.rmdir(rfpp)
srctree = os.path.abspath(srctree)
if os.path.exists(srctree):
shutil.rmtree(srctree)
-def _upgrade_error(e, rf, srctree):
- if rf:
- cleanup_on_error(rf, srctree)
+def _upgrade_error(e, rf, srctree, keep_failure=False, extramsg=None):
+ if rf and not keep_failure:
+ _cleanup_on_error(rf, srctree)
logger.error(e)
- raise DevtoolError(e)
+ if extramsg:
+ logger.error(extramsg)
+ if keep_failure:
+ logger.info('Preserving failed upgrade files (--keep-failure)')
+ sys.exit(1)
def _get_uri(rd):
srcuris = rd.getVar('SRC_URI').split()
@@ -185,7 +179,7 @@ def _extract_new_source(newpv, srctree, no_patch, srcrev, srcbranch, branch, kee
uri, rev = _get_uri(crd)
if srcrev:
rev = srcrev
- if uri.startswith('git://'):
+ if uri.startswith('git://') or uri.startswith('gitsm://'):
__run('git fetch')
__run('git checkout %s' % rev)
__run('git tag -f devtool-base-new')
@@ -198,14 +192,15 @@ def _extract_new_source(newpv, srctree, no_patch, srcrev, srcbranch, branch, kee
get_branch = [x.strip() for x in check_branch.splitlines()]
# Remove HEAD reference point and drop remote prefix
get_branch = [x.split('/', 1)[1] for x in get_branch if not x.startswith('origin/HEAD')]
- if 'master' in get_branch:
- # If it is master, we do not need to append 'branch=master' as this is default.
- # Even with the case where get_branch has multiple objects, if 'master' is one
- # of them, we should default take from 'master'
- srcbranch = ''
- elif len(get_branch) == 1:
- # If 'master' isn't in get_branch and get_branch contains only ONE object, then store result into 'srcbranch'
+ if len(get_branch) == 1:
+ # If srcrev is on only ONE branch, then use that branch
srcbranch = get_branch[0]
+ elif 'main' in get_branch:
+ # If srcrev is on multiple branches, then choose 'main' if it is one of them
+ srcbranch = 'main'
+ elif 'master' in get_branch:
+ # Otherwise choose 'master' if it is one of the branches
+ srcbranch = 'master'
else:
# If get_branch contains more than one objects, then display error and exit.
mbrch = '\n ' + '\n '.join(get_branch)
@@ -242,14 +237,14 @@ def _extract_new_source(newpv, srctree, no_patch, srcrev, srcbranch, branch, kee
# Copy in new ones
_copy_source_code(tmpsrctree, srctree)
- (stdout,_) = __run('git ls-files --modified --others --exclude-standard')
+ (stdout,_) = __run('git ls-files --modified --others')
filelist = stdout.splitlines()
pbar = bb.ui.knotty.BBProgress('Adding changed files', len(filelist))
pbar.start()
batchsize = 100
for i in range(0, len(filelist), batchsize):
batch = filelist[i:i+batchsize]
- __run('git add -A %s' % ' '.join(['"%s"' % item for item in batch]))
+ __run('git add -f -A %s' % ' '.join(['"%s"' % item for item in batch]))
pbar.update(i)
pbar.finish()
@@ -267,27 +262,28 @@ def _extract_new_source(newpv, srctree, no_patch, srcrev, srcbranch, branch, kee
logger.warning('By user choice, the following patches will NOT be applied to the new source tree:\n %s' % '\n '.join([os.path.basename(patch) for patch in patches]))
else:
__run('git checkout devtool-patched -b %s' % branch)
- skiptag = False
- try:
- __run('git rebase %s' % rev)
- except bb.process.ExecutionError as e:
- skiptag = True
- if 'conflict' in e.stdout:
- logger.warning('Command \'%s\' failed:\n%s\n\nYou will need to resolve conflicts in order to complete the upgrade.' % (e.command, e.stdout.rstrip()))
- else:
- logger.warning('Command \'%s\' failed:\n%s' % (e.command, e.stdout))
- if not skiptag:
- if uri.startswith('git://'):
- suffix = 'new'
- else:
- suffix = newpv
- __run('git tag -f devtool-patched-%s' % suffix)
+ (stdout, _) = __run('git branch --list devtool-override-*')
+ branches_to_rebase = [branch] + stdout.split()
+ for b in branches_to_rebase:
+ logger.info("Rebasing {} onto {}".format(b, rev))
+ __run('git checkout %s' % b)
+ try:
+ __run('git rebase %s' % rev)
+ except bb.process.ExecutionError as e:
+ if 'conflict' in e.stdout:
+ logger.warning('Command \'%s\' failed:\n%s\n\nYou will need to resolve conflicts in order to complete the upgrade.' % (e.command, e.stdout.rstrip()))
+ __run('git rebase --abort')
+ else:
+ logger.warning('Command \'%s\' failed:\n%s' % (e.command, e.stdout))
+ __run('git checkout %s' % branch)
if tmpsrctree:
if keep_temp:
logger.info('Preserving temporary directory %s' % tmpsrctree)
else:
shutil.rmtree(tmpsrctree)
+ if tmpdir != tmpsrctree:
+ shutil.rmtree(tmpdir)
return (rev, md5, sha256, srcbranch, srcsubdir_rel)
@@ -310,7 +306,7 @@ def _add_license_diff_to_recipe(path, diff):
f.write("\n#\n\n".encode())
f.write(orig_content)
-def _create_new_recipe(newpv, md5, sha256, srcrev, srcbranch, srcsubdir_old, srcsubdir_new, workspace, tinfoil, rd, license_diff, new_licenses):
+def _create_new_recipe(newpv, md5, sha256, srcrev, srcbranch, srcsubdir_old, srcsubdir_new, workspace, tinfoil, rd, license_diff, new_licenses, srctree, keep_failure):
"""Creates the new recipe under workspace"""
bpn = rd.getVar('BPN')
@@ -396,12 +392,12 @@ def _create_new_recipe(newpv, md5, sha256, srcrev, srcbranch, srcsubdir_old, src
newvalues['SRC_URI[%s.md5sum]' % name] = None
newvalues['SRC_URI[%s.sha256sum]' % name] = None
- if md5 and sha256:
+ if sha256:
if addnames:
nameprefix = '%s.' % addnames[0]
else:
nameprefix = ''
- newvalues['SRC_URI[%smd5sum]' % nameprefix] = md5
+ newvalues['SRC_URI[%smd5sum]' % nameprefix] = None
newvalues['SRC_URI[%ssha256sum]' % nameprefix] = sha256
if srcsubdir_new != srcsubdir_old:
@@ -427,7 +423,10 @@ def _create_new_recipe(newpv, md5, sha256, srcrev, srcbranch, srcsubdir_old, src
newvalues["LIC_FILES_CHKSUM"] = newlicchksum
_add_license_diff_to_recipe(fullpath, license_diff)
- rd = tinfoil.parse_recipe_file(fullpath, False)
+ try:
+ rd = tinfoil.parse_recipe_file(fullpath, False)
+ except bb.tinfoil.TinfoilCommandFailed as e:
+ _upgrade_error(e, fullpath, srctree, keep_failure, 'Parsing of upgraded recipe failed')
oe.recipeutils.patch_recipe(rd, fullpath, newvalues)
return fullpath, copied
@@ -523,6 +522,15 @@ def upgrade(args, config, basepath, workspace):
else:
srctree = standard.get_default_srctree(config, pn)
+ # Check that recipe isn't using a shared workdir
+ s = os.path.abspath(rd.getVar('S'))
+ workdir = os.path.abspath(rd.getVar('WORKDIR'))
+ srctree_s = srctree
+ if s.startswith(workdir) and s != workdir and os.path.dirname(s) != workdir:
+ # Handle if S is set to a subdirectory of the source
+ srcsubdir = os.path.relpath(s, workdir).split(os.sep, 1)[1]
+ srctree_s = os.path.join(srctree, srcsubdir)
+
# try to automatically discover latest version and revision if not provided on command line
if not args.version and not args.srcrev:
version_info = oe.recipeutils.get_recipe_upstream_version(rd)
@@ -552,21 +560,21 @@ def upgrade(args, config, basepath, workspace):
try:
logger.info('Extracting current version source...')
rev1, srcsubdir1 = standard._extract_source(srctree, False, 'devtool-orig', False, config, basepath, workspace, args.fixed_setup, rd, tinfoil, no_overrides=args.no_overrides)
- old_licenses = _extract_licenses(srctree, rd.getVar('LIC_FILES_CHKSUM'))
+ old_licenses = _extract_licenses(srctree_s, (rd.getVar('LIC_FILES_CHKSUM') or ""))
logger.info('Extracting upgraded version source...')
rev2, md5, sha256, srcbranch, srcsubdir2 = _extract_new_source(args.version, srctree, args.no_patch,
args.srcrev, args.srcbranch, args.branch, args.keep_temp,
tinfoil, rd)
- new_licenses = _extract_licenses(srctree, rd.getVar('LIC_FILES_CHKSUM'))
+ new_licenses = _extract_licenses(srctree_s, (rd.getVar('LIC_FILES_CHKSUM') or ""))
license_diff = _generate_license_diff(old_licenses, new_licenses)
- rf, copied = _create_new_recipe(args.version, md5, sha256, args.srcrev, srcbranch, srcsubdir1, srcsubdir2, config.workspace_path, tinfoil, rd, license_diff, new_licenses)
+ rf, copied = _create_new_recipe(args.version, md5, sha256, args.srcrev, srcbranch, srcsubdir1, srcsubdir2, config.workspace_path, tinfoil, rd, license_diff, new_licenses, srctree, args.keep_failure)
except bb.process.CmdError as e:
- _upgrade_error(e, rf, srctree)
+ _upgrade_error(e, rf, srctree, args.keep_failure)
except DevtoolError as e:
- _upgrade_error(e, rf, srctree)
+ _upgrade_error(e, rf, srctree, args.keep_failure)
standard._add_md5(config, pn, os.path.dirname(rf))
- af = _write_append(rf, srctree, args.same_dir, args.no_same_dir, rev2,
+ af = _write_append(rf, srctree_s, args.same_dir, args.no_same_dir, rev2,
copied, config.workspace_path, rd)
standard._add_md5(config, pn, af)
@@ -576,6 +584,9 @@ def upgrade(args, config, basepath, workspace):
logger.info('New recipe is %s' % rf)
if license_diff:
logger.info('License checksums have been updated in the new recipe; please refer to it for the difference between the old and the new license texts.')
+ preferred_version = rd.getVar('PREFERRED_VERSION_%s' % rd.getVar('PN'))
+ if preferred_version:
+ logger.warning('Version is pinned to %s via PREFERRED_VERSION; it may need adjustment to match the new version before any further steps are taken' % preferred_version)
finally:
tinfoil.shutdown()
return 0
@@ -634,6 +645,7 @@ def register_commands(subparsers, context):
group.add_argument('--same-dir', '-s', help='Build in same directory as source', action="store_true")
group.add_argument('--no-same-dir', help='Force build in a separate build directory', action="store_true")
parser_upgrade.add_argument('--keep-temp', action="store_true", help='Keep temporary directory (for debugging)')
+ parser_upgrade.add_argument('--keep-failure', action="store_true", help='Keep failed upgrade recipe and associated files (for debugging)')
parser_upgrade.set_defaults(func=upgrade, fixed_setup=context.fixed_setup)
parser_latest_version = subparsers.add_parser('latest-version', help='Report the latest version of an existing recipe',
diff --git a/scripts/lib/devtool/utilcmds.py b/scripts/lib/devtool/utilcmds.py
index 7cd139fb8b..964817766b 100644
--- a/scripts/lib/devtool/utilcmds.py
+++ b/scripts/lib/devtool/utilcmds.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2015-2016 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool utility plugins"""
diff --git a/scripts/lib/recipetool/append.py b/scripts/lib/recipetool/append.py
index 3f2c134ad5..88ed8c5f01 100644
--- a/scripts/lib/recipetool/append.py
+++ b/scripts/lib/recipetool/append.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os
@@ -59,7 +49,7 @@ def find_target_file(targetpath, d, pkglist=None):
'/etc/group': '/etc/group should be managed through the useradd and extrausers classes',
'/etc/shadow': '/etc/shadow should be managed through the useradd and extrausers classes',
'/etc/gshadow': '/etc/gshadow should be managed through the useradd and extrausers classes',
- '${sysconfdir}/hostname': '${sysconfdir}/hostname contents should be set by setting hostname_pn-base-files = "value" in configuration',}
+ '${sysconfdir}/hostname': '${sysconfdir}/hostname contents should be set by setting hostname:pn-base-files = "value" in configuration',}
for pthspec, message in invalidtargets.items():
if fnmatch.fnmatchcase(targetpath, d.expand(pthspec)):
@@ -82,15 +72,15 @@ def find_target_file(targetpath, d, pkglist=None):
# This does assume that PN comes before other values, but that's a fairly safe assumption
for line in f:
if line.startswith('PN:'):
- pn = line.split(':', 1)[1].strip()
- elif line.startswith('FILES_INFO:'):
- val = line.split(':', 1)[1].strip()
+ pn = line.split(': ', 1)[1].strip()
+ elif line.startswith('FILES_INFO'):
+ val = line.split(': ', 1)[1].strip()
dictval = json.loads(val)
for fullpth in dictval.keys():
if fnmatch.fnmatchcase(fullpth, targetpath):
recipes[targetpath].append(pn)
- elif line.startswith('pkg_preinst_') or line.startswith('pkg_postinst_'):
- scriptval = line.split(':', 1)[1].strip().encode('utf-8').decode('unicode_escape')
+ elif line.startswith('pkg_preinst:') or line.startswith('pkg_postinst:'):
+ scriptval = line.split(': ', 1)[1].strip().encode('utf-8').decode('unicode_escape')
if 'update-alternatives --install %s ' % targetpath in scriptval:
recipes[targetpath].append('?%s' % pn)
elif targetpath_re.search(scriptval):
diff --git a/scripts/lib/recipetool/create.py b/scripts/lib/recipetool/create.py
index 1810c70ae2..220465ed2f 100644
--- a/scripts/lib/recipetool/create.py
+++ b/scripts/lib/recipetool/create.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2014-2017 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os
@@ -70,11 +60,13 @@ class RecipeHandler(object):
if RecipeHandler.recipelibmap:
return
# First build up library->package mapping
- shlib_providers = oe.package.read_shlib_providers(d)
+ d2 = bb.data.createCopy(d)
+ d2.setVar("WORKDIR_PKGDATA", "${PKGDATA_DIR}")
+ shlib_providers = oe.package.read_shlib_providers(d2)
libdir = d.getVar('libdir')
base_libdir = d.getVar('base_libdir')
libpaths = list(set([base_libdir, libdir]))
- libname_re = re.compile('^lib(.+)\.so.*$')
+ libname_re = re.compile(r'^lib(.+)\.so.*$')
pkglibmap = {}
for lib, item in shlib_providers.items():
for path, pkg in item.items():
@@ -123,8 +115,8 @@ class RecipeHandler(object):
for line in f:
if line.startswith('PN:'):
pn = line.split(':', 1)[-1].strip()
- elif line.startswith('FILES_INFO:'):
- val = line.split(':', 1)[1].strip()
+ elif line.startswith('FILES_INFO:%s:' % pkg):
+ val = line.split(': ', 1)[1].strip()
dictval = json.loads(val)
for fullpth in sorted(dictval):
if fullpth.startswith(includedir) and fullpth.endswith('.h'):
@@ -374,7 +366,7 @@ def supports_srcrev(uri):
def reformat_git_uri(uri):
'''Convert any http[s]://....git URI into git://...;protocol=http[s]'''
checkuri = uri.split(';', 1)[0]
- if checkuri.endswith('.git') or '/git/' in checkuri or re.match('https?://github.com/[^/]+/[^/]+/?$', checkuri):
+ if checkuri.endswith('.git') or '/git/' in checkuri or re.match('https?://git(hub|lab).com/[^/]+/[^/]+/?$', checkuri):
# Appends scheme if the scheme is missing
if not '://' in uri:
uri = 'git://' + uri
@@ -436,14 +428,14 @@ def create_recipe(args):
if scriptutils.is_src_url(source):
# Warn about github archive URLs
- if re.match('https?://github.com/[^/]+/[^/]+/archive/.+(\.tar\..*|\.zip)$', source):
+ if re.match(r'https?://github.com/[^/]+/[^/]+/archive/.+(\.tar\..*|\.zip)$', source):
logger.warning('github archive files are not guaranteed to be stable and may be re-generated over time. If the latter occurs, the checksums will likely change and the recipe will fail at do_fetch. It is recommended that you point to an actual commit or tag in the repository instead (using the repository URL in conjunction with the -S/--srcrev option).')
# Fetch a URL
fetchuri = reformat_git_uri(urldefrag(source)[0])
if args.binary:
# Assume the archive contains the directory structure verbatim
# so we need to extract to a subdirectory
- fetchuri += ';subdir=${BP}'
+ fetchuri += ';subdir=${BPN}'
srcuri = fetchuri
rev_re = re.compile(';rev=([^;]+)')
res = rev_re.search(srcuri)
@@ -468,6 +460,7 @@ def create_recipe(args):
logger.error('branch= parameter and -B/--srcbranch option cannot both be specified - use one or the other')
sys.exit(1)
srcbranch = args.srcbranch
+ params['branch'] = srcbranch
nobranch = params.get('nobranch')
if nobranch and srcbranch:
logger.error('nobranch= cannot be used if you specify a branch')
@@ -485,8 +478,9 @@ def create_recipe(args):
storeTagName = params['tag']
params['nobranch'] = '1'
del params['tag']
- if scheme == 'npm':
- params['noverify'] = '1'
+ # Assume 'master' branch if not set
+ if scheme in ['git', 'gitsm'] and 'branch' not in params and 'nobranch' not in params:
+ params['branch'] = 'master'
fetchuri = bb.fetch2.encodeurl((scheme, network, path, user, passwd, params))
tmpparent = tinfoil.config_data.getVar('BASE_WORKDIR')
@@ -503,9 +497,7 @@ def create_recipe(args):
if ftmpdir and args.keep_temp:
logger.info('Fetch temp directory is %s' % ftmpdir)
- dirlist = os.listdir(srctree)
- filterout = ['git.indirectionsymlink']
- dirlist = [x for x in dirlist if x not in filterout]
+ dirlist = scriptutils.filter_src_subdirs(srctree)
logger.debug('Directory listing (excluding filtered out):\n %s' % '\n '.join(dirlist))
if len(dirlist) == 1:
singleitem = os.path.join(srctree, dirlist[0])
@@ -538,10 +530,9 @@ def create_recipe(args):
# Remove HEAD reference point and drop remote prefix
get_branch = [x.split('/', 1)[1] for x in get_branch if not x.startswith('origin/HEAD')]
if 'master' in get_branch:
- # If it is master, we do not need to append 'branch=master' as this is default.
# Even with the case where get_branch has multiple objects, if 'master' is one
# of them, we should default take from 'master'
- srcbranch = ''
+ srcbranch = 'master'
elif len(get_branch) == 1:
# If 'master' isn't in get_branch and get_branch contains only ONE object, then store result into 'srcbranch'
srcbranch = get_branch[0]
@@ -554,8 +545,8 @@ def create_recipe(args):
# Since we might have a value in srcbranch, we need to
# recontruct the srcuri to include 'branch' in params.
scheme, network, path, user, passwd, params = bb.fetch2.decodeurl(srcuri)
- if srcbranch:
- params['branch'] = srcbranch
+ if scheme in ['git', 'gitsm']:
+ params['branch'] = srcbranch or 'master'
if storeTagName and scheme in ['git', 'gitsm']:
# Check srcrev using tag and check validity of the tag
@@ -614,7 +605,7 @@ def create_recipe(args):
splitline = line.split()
if len(splitline) > 1:
if splitline[0] == 'origin' and scriptutils.is_src_url(splitline[1]):
- srcuri = reformat_git_uri(splitline[1])
+ srcuri = reformat_git_uri(splitline[1]) + ';branch=master'
srcsubdir = 'git'
break
@@ -704,7 +695,7 @@ def create_recipe(args):
if not args.autorev and srcrev == '${AUTOREV}':
if os.path.exists(os.path.join(srctree, '.git')):
(stdout, _) = bb.process.run('git rev-parse HEAD', cwd=srctree)
- srcrev = stdout.rstrip()
+ srcrev = stdout.rstrip()
lines_before.append('SRCREV = "%s"' % srcrev)
if args.provides:
lines_before.append('PROVIDES = "%s"' % args.provides)
@@ -721,13 +712,11 @@ def create_recipe(args):
lines_after.append('')
if args.binary:
- lines_after.append('INSANE_SKIP_${PN} += "already-stripped"')
+ lines_after.append('INSANE_SKIP:${PN} += "already-stripped"')
lines_after.append('')
- if args.fetch_dev:
- extravalues['fetchdev'] = True
- else:
- extravalues['fetchdev'] = None
+ if args.npm_dev:
+ extravalues['NPM_INSTALL_DEV'] = 1
# Find all plugins that want to register handlers
logger.debug('Loading recipe handlers')
@@ -843,7 +832,7 @@ def create_recipe(args):
elif line.startswith('PV = '):
if realpv:
# Replace the first part of the PV value
- line = re.sub('"[^+]*\+', '"%s+' % realpv, line)
+ line = re.sub(r'"[^+]*\+', '"%s+' % realpv, line)
lines_before.append(line)
if args.also_native:
@@ -930,6 +919,22 @@ def split_value(value):
else:
return value
+def fixup_license(value):
+ # Ensure licenses with OR starts and ends with brackets
+ if '|' in value:
+ return '(' + value + ')'
+ return value
+
+def tidy_licenses(value):
+ """Flat, split and sort licenses"""
+ from oe.license import flattened_licenses
+ def _choose(a, b):
+ str_a, str_b = sorted((" & ".join(a), " & ".join(b)), key=str.casefold)
+ return ["(%s | %s)" % (str_a, str_b)]
+ if not isinstance(value, str):
+ value = " & ".join(value)
+ return sorted(list(set(flattened_licenses(value, _choose))), key=str.casefold)
+
def handle_license_vars(srctree, lines_before, handled, extravalues, d):
lichandled = [x for x in handled if x[0] == 'license']
if lichandled:
@@ -943,10 +948,13 @@ def handle_license_vars(srctree, lines_before, handled, extravalues, d):
lines = []
if licvalues:
for licvalue in licvalues:
- if not licvalue[0] in licenses:
- licenses.append(licvalue[0])
+ license = licvalue[0]
+ lics = tidy_licenses(fixup_license(license))
+ lics = [lic for lic in lics if lic not in licenses]
+ if len(lics):
+ licenses.extend(lics)
lic_files_chksum.append('file://%s;md5=%s' % (licvalue[1], licvalue[2]))
- if licvalue[0] == 'Unknown':
+ if license == 'Unknown':
lic_unknown.append(licvalue[1])
if lic_unknown:
lines.append('#')
@@ -955,9 +963,7 @@ def handle_license_vars(srctree, lines_before, handled, extravalues, d):
for licfile in lic_unknown:
lines.append('# %s' % licfile)
- extra_license = split_value(extravalues.pop('LICENSE', []))
- if '&' in extra_license:
- extra_license.remove('&')
+ extra_license = tidy_licenses(extravalues.pop('LICENSE', ''))
if extra_license:
if licenses == ['Unknown']:
licenses = extra_license
@@ -998,7 +1004,7 @@ def handle_license_vars(srctree, lines_before, handled, extravalues, d):
lines.append('# instead of &. If there is any doubt, check the accompanying documentation')
lines.append('# to determine which situation is applicable.')
- lines.append('LICENSE = "%s"' % ' & '.join(licenses))
+ lines.append('LICENSE = "%s"' % ' & '.join(sorted(licenses, key=str.casefold)))
lines.append('LIC_FILES_CHKSUM = "%s"' % ' \\\n '.join(lic_files_chksum))
lines.append('')
@@ -1015,54 +1021,37 @@ def handle_license_vars(srctree, lines_before, handled, extravalues, d):
handled.append(('license', licvalues))
return licvalues
-def get_license_md5sums(d, static_only=False):
+def get_license_md5sums(d, static_only=False, linenumbers=False):
import bb.utils
+ import csv
md5sums = {}
- if not static_only:
+ if not static_only and not linenumbers:
# Gather md5sums of license files in common license dir
commonlicdir = d.getVar('COMMON_LICENSE_DIR')
for fn in os.listdir(commonlicdir):
md5value = bb.utils.md5_file(os.path.join(commonlicdir, fn))
md5sums[md5value] = fn
+
# The following were extracted from common values in various recipes
# (double checking the license against the license file itself, not just
# the LICENSE value in the recipe)
- md5sums['94d55d512a9ba36caa9b7df079bae19f'] = 'GPLv2'
- md5sums['b234ee4d69f5fce4486a80fdaf4a4263'] = 'GPLv2'
- md5sums['59530bdf33659b29e73d4adb9f9f6552'] = 'GPLv2'
- md5sums['0636e73ff0215e8d672dc4c32c317bb3'] = 'GPLv2'
- md5sums['eb723b61539feef013de476e68b5c50a'] = 'GPLv2'
- md5sums['751419260aa954499f7abaabaa882bbe'] = 'GPLv2'
- md5sums['393a5ca445f6965873eca0259a17f833'] = 'GPLv2'
- md5sums['12f884d2ae1ff87c09e5b7ccc2c4ca7e'] = 'GPLv2'
- md5sums['8ca43cbc842c2336e835926c2166c28b'] = 'GPLv2'
- md5sums['ebb5c50ab7cab4baeffba14977030c07'] = 'GPLv2'
- md5sums['c93c0550bd3173f4504b2cbd8991e50b'] = 'GPLv2'
- md5sums['9ac2e7cff1ddaf48b6eab6028f23ef88'] = 'GPLv2'
- md5sums['4325afd396febcb659c36b49533135d4'] = 'GPLv2'
- md5sums['18810669f13b87348459e611d31ab760'] = 'GPLv2'
- md5sums['d7810fab7487fb0aad327b76f1be7cd7'] = 'GPLv2' # the Linux kernel's COPYING file
- md5sums['bbb461211a33b134d42ed5ee802b37ff'] = 'LGPLv2.1'
- md5sums['7fbc338309ac38fefcd64b04bb903e34'] = 'LGPLv2.1'
- md5sums['4fbd65380cdd255951079008b364516c'] = 'LGPLv2.1'
- md5sums['2d5025d4aa3495befef8f17206a5b0a1'] = 'LGPLv2.1'
- md5sums['fbc093901857fcd118f065f900982c24'] = 'LGPLv2.1'
- md5sums['a6f89e2100d9b6cdffcea4f398e37343'] = 'LGPLv2.1'
- md5sums['d8045f3b8f929c1cb29a1e3fd737b499'] = 'LGPLv2.1'
- md5sums['fad9b3332be894bab9bc501572864b29'] = 'LGPLv2.1'
- md5sums['3bf50002aefd002f49e7bb854063f7e7'] = 'LGPLv2'
- md5sums['9f604d8a4f8e74f4f5140845a21b6674'] = 'LGPLv2'
- md5sums['5f30f0716dfdd0d91eb439ebec522ec2'] = 'LGPLv2'
- md5sums['55ca817ccb7d5b5b66355690e9abc605'] = 'LGPLv2'
- md5sums['252890d9eee26aab7b432e8b8a616475'] = 'LGPLv2'
- md5sums['3214f080875748938ba060314b4f727d'] = 'LGPLv2'
- md5sums['db979804f025cf55aabec7129cb671ed'] = 'LGPLv2'
- md5sums['d32239bcb673463ab874e80d47fae504'] = 'GPLv3'
- md5sums['f27defe1e96c2e1ecd4e0c9be8967949'] = 'GPLv3'
- md5sums['6a6a8e020838b23406c81b19c1d46df6'] = 'LGPLv3'
- md5sums['3b83ef96387f14655fc854ddc3c6bd57'] = 'Apache-2.0'
- md5sums['385c55653886acac3821999a3ccd17b3'] = 'Artistic-1.0 | GPL-2.0' # some perl modules
- md5sums['54c7042be62e169199200bc6477f04d1'] = 'BSD-3-Clause'
+
+ # Read license md5sums from csv file
+ scripts_path = os.path.dirname(os.path.realpath(__file__))
+ for path in (d.getVar('BBPATH').split(':')
+ + [os.path.join(scripts_path, '..', '..')]):
+ csv_path = os.path.join(path, 'lib', 'recipetool', 'licenses.csv')
+ if os.path.isfile(csv_path):
+ with open(csv_path, newline='') as csv_file:
+ fieldnames = ['md5sum', 'license', 'beginline', 'endline', 'md5']
+ reader = csv.DictReader(csv_file, delimiter=',', fieldnames=fieldnames)
+ for row in reader:
+ if linenumbers:
+ md5sums[row['md5sum']] = (
+ row['license'], row['beginline'], row['endline'], row['md5'])
+ else:
+ md5sums[row['md5sum']] = row['license']
+
return md5sums
def crunch_license(licfile):
@@ -1078,54 +1067,127 @@ def crunch_license(licfile):
import oe.utils
# Note: these are carefully constructed!
- license_title_re = re.compile('^\(?(#+ *)?(The )?.{1,10} [Ll]icen[sc]e( \(.{1,10}\))?\)?:?$')
- license_statement_re = re.compile('^(This (project|software) is( free software)? (released|licen[sc]ed)|(Released|Licen[cs]ed)) under the .{1,10} [Ll]icen[sc]e:?$')
- copyright_re = re.compile('^(#+)? *Copyright .*$')
+ license_title_re = re.compile(r'^#*\(? *(This is )?([Tt]he )?.{0,15} ?[Ll]icen[sc]e( \(.{1,10}\))?\)?[:\.]? ?#*$')
+ license_statement_re = re.compile(r'^((This (project|software)|.{1,10}) is( free software)? (released|licen[sc]ed)|(Released|Licen[cs]ed)) under the .{1,10} [Ll]icen[sc]e:?$')
+ copyright_re = re.compile('^ *[#\*]* *(Modified work |MIT LICENSED )?Copyright ?(\([cC]\))? .*$')
+ disclaimer_re = re.compile('^ *\*? ?All [Rr]ights [Rr]eserved\.$')
+ email_re = re.compile('^.*<[\w\.-]*@[\w\.\-]*>$')
+ header_re = re.compile('^(\/\**!?)? ?[\-=\*]* ?(\*\/)?$')
+ tag_re = re.compile('^ *@?\(?([Ll]icense|MIT)\)?$')
+ url_re = re.compile('^ *[#\*]* *https?:\/\/[\w\.\/\-]+$')
crunched_md5sums = {}
+
+ # common licenses
+ crunched_md5sums['89f3bf322f30a1dcfe952e09945842f0'] = 'Apache-2.0'
+ crunched_md5sums['13b6fe3075f8f42f2270a748965bf3a1'] = '0BSD'
+ crunched_md5sums['ba87a7d7c20719c8df4b8beed9b78c43'] = 'BSD-2-Clause'
+ crunched_md5sums['7f8892c03b72de419c27be4ebfa253f8'] = 'BSD-3-Clause'
+ crunched_md5sums['21128c0790b23a8a9f9e260d5f6b3619'] = 'BSL-1.0'
+ crunched_md5sums['975742a59ae1b8abdea63a97121f49f4'] = 'EDL-1.0'
+ crunched_md5sums['5322cee4433d84fb3aafc9e253116447'] = 'EPL-1.0'
+ crunched_md5sums['6922352e87de080f42419bed93063754'] = 'EPL-2.0'
+ crunched_md5sums['793475baa22295cae1d3d4046a3a0ceb'] = 'GPL-2.0-only'
+ crunched_md5sums['ff9047f969b02c20f0559470df5cb433'] = 'GPL-2.0-or-later'
+ crunched_md5sums['ea6de5453fcadf534df246e6cdafadcd'] = 'GPL-3.0-only'
+ crunched_md5sums['b419257d4d153a6fde92ddf96acf5b67'] = 'GPL-3.0-or-later'
+ crunched_md5sums['228737f4c49d3ee75b8fb3706b090b84'] = 'ISC'
+ crunched_md5sums['c6a782e826ca4e85bf7f8b89435a677d'] = 'LGPL-2.0-only'
+ crunched_md5sums['32d8f758a066752f0db09bd7624b8090'] = 'LGPL-2.0-or-later'
+ crunched_md5sums['4820937eb198b4f84c52217ed230be33'] = 'LGPL-2.1-only'
+ crunched_md5sums['db13fe9f3a13af7adab2dc7a76f9e44a'] = 'LGPL-2.1-or-later'
+ crunched_md5sums['d7a0f2e4e0950e837ac3eabf5bd1d246'] = 'LGPL-3.0-only'
+ crunched_md5sums['abbf328e2b434f9153351f06b9f79d02'] = 'LGPL-3.0-or-later'
+ crunched_md5sums['eecf6429523cbc9693547cf2db790b5c'] = 'MIT'
+ crunched_md5sums['b218b0e94290b9b818c4be67c8e1cc82'] = 'MIT-0'
+ crunched_md5sums['ddc18131d6748374f0f35a621c245b49'] = 'Unlicense'
+ crunched_md5sums['51f9570ff32571fc0a443102285c5e33'] = 'WTFPL'
+
# The following two were gleaned from the "forever" npm package
crunched_md5sums['0a97f8e4cbaf889d6fa51f84b89a79f6'] = 'ISC'
- crunched_md5sums['eecf6429523cbc9693547cf2db790b5c'] = 'MIT'
- # https://github.com/vasi/pixz/blob/master/LICENSE
- crunched_md5sums['2f03392b40bbe663597b5bd3cc5ebdb9'] = 'BSD-2-Clause'
# https://github.com/waffle-gl/waffle/blob/master/LICENSE.txt
- crunched_md5sums['e72e5dfef0b1a4ca8a3d26a60587db66'] = 'BSD-2-Clause'
+ crunched_md5sums['50fab24ce589d69af8964fdbfe414c60'] = 'BSD-2-Clause'
# https://github.com/spigwitmer/fakeds1963s/blob/master/LICENSE
- crunched_md5sums['8be76ac6d191671f347ee4916baa637e'] = 'GPLv2'
- # https://github.com/datto/dattobd/blob/master/COPYING
- # http://git.savannah.gnu.org/cgit/freetype/freetype2.git/tree/docs/GPLv2.TXT
- crunched_md5sums['1d65c5ad4bf6489f85f4812bf08ae73d'] = 'GPLv2'
+ crunched_md5sums['88a4355858a1433fea99fae34a44da88'] = 'GPL-2.0-only'
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
- # http://git.neil.brown.name/?p=mdadm.git;a=blob;f=COPYING;h=d159169d1050894d3ea3b98e1c965c4058208fe1;hb=HEAD
- crunched_md5sums['fb530f66a7a89ce920f0e912b5b66d4b'] = 'GPLv2'
- # https://github.com/gkos/nrf24/blob/master/COPYING
- crunched_md5sums['7b6aaa4daeafdfa6ed5443fd2684581b'] = 'GPLv2'
- # https://github.com/josch09/resetusb/blob/master/COPYING
- crunched_md5sums['8b8ac1d631a4d220342e83bcf1a1fbc3'] = 'GPLv3'
+ crunched_md5sums['063b5c3ebb5f3aa4c85a2ed18a31fbe7'] = 'GPL-2.0-only'
# https://github.com/FFmpeg/FFmpeg/blob/master/COPYING.LGPLv2.1
- crunched_md5sums['2ea316ed973ae176e502e2297b574bb3'] = 'LGPLv2.1'
+ crunched_md5sums['7f5202f4d44ed15dcd4915f5210417d8'] = 'LGPL-2.1-only'
# unixODBC-2.3.4 COPYING
- crunched_md5sums['1daebd9491d1e8426900b4fa5a422814'] = 'LGPLv2.1'
+ crunched_md5sums['3debde09238a8c8e1f6a847e1ec9055b'] = 'LGPL-2.1-only'
# https://github.com/FFmpeg/FFmpeg/blob/master/COPYING.LGPLv3
- crunched_md5sums['2ebfb3bb49b9a48a075cc1425e7f4129'] = 'LGPLv3'
+ crunched_md5sums['f90c613c51aa35da4d79dd55fc724ceb'] = 'LGPL-3.0-only'
# https://raw.githubusercontent.com/eclipse/mosquitto/v1.4.14/epl-v10
crunched_md5sums['efe2cb9a35826992b9df68224e3c2628'] = 'EPL-1.0'
- # https://raw.githubusercontent.com/eclipse/mosquitto/v1.4.14/edl-v10
- crunched_md5sums['0a9c78c0a398d1bbce4a166757d60387'] = 'EDL-1.0'
+
+ # https://raw.githubusercontent.com/jquery/esprima/3.1.3/LICENSE.BSD
+ crunched_md5sums['80fa7b56a28e8c902e6af194003220a5'] = 'BSD-2-Clause'
+ # https://raw.githubusercontent.com/npm/npm-install-checks/master/LICENSE
+ crunched_md5sums['e659f77bfd9002659e112d0d3d59b2c1'] = 'BSD-2-Clause'
+ # https://raw.githubusercontent.com/silverwind/default-gateway/4.2.0/LICENSE
+ crunched_md5sums['4c641f2d995c47f5cb08bdb4b5b6ea05'] = 'BSD-2-Clause'
+ # https://raw.githubusercontent.com/tad-lispy/node-damerau-levenshtein/v1.0.5/LICENSE
+ crunched_md5sums['2b8c039b2b9a25f0feb4410c4542d346'] = 'BSD-2-Clause'
+ # https://raw.githubusercontent.com/terser/terser/v3.17.0/LICENSE
+ crunched_md5sums['8bd23871802951c9ad63855151204c2c'] = 'BSD-2-Clause'
+ # https://raw.githubusercontent.com/alexei/sprintf.js/1.0.3/LICENSE
+ crunched_md5sums['008c22318c8ea65928bf730ddd0273e3'] = 'BSD-3-Clause'
+ # https://raw.githubusercontent.com/Caligatio/jsSHA/v3.2.0/LICENSE
+ crunched_md5sums['0e46634a01bfef056892949acaea85b1'] = 'BSD-3-Clause'
+ # https://raw.githubusercontent.com/d3/d3-path/v1.0.9/LICENSE
+ crunched_md5sums['b5f72aef53d3b2b432702c30b0215666'] = 'BSD-3-Clause'
+ # https://raw.githubusercontent.com/feross/ieee754/v1.1.13/LICENSE
+ crunched_md5sums['a39327c997c20da0937955192d86232d'] = 'BSD-3-Clause'
+ # https://raw.githubusercontent.com/joyent/node-extsprintf/v1.3.0/LICENSE
+ crunched_md5sums['721f23a96ff4161ca3a5f071bbe18108'] = 'MIT'
+ # https://raw.githubusercontent.com/pvorb/clone/v0.2.0/LICENSE
+ crunched_md5sums['b376d29a53c9573006b9970709231431'] = 'MIT'
+ # https://raw.githubusercontent.com/andris9/encoding/v0.1.12/LICENSE
+ crunched_md5sums['85d8a977ee9d7c5ab4ac03c9b95431c4'] = 'MIT-0'
+ # https://raw.githubusercontent.com/faye/websocket-driver-node/0.7.3/LICENSE.md
+ crunched_md5sums['b66384e7137e41a9b1904ef4d39703b6'] = 'Apache-2.0'
+ # https://raw.githubusercontent.com/less/less.js/v4.1.1/LICENSE
+ crunched_md5sums['b27575459e02221ccef97ec0bfd457ae'] = 'Apache-2.0'
+ # https://raw.githubusercontent.com/microsoft/TypeScript/v3.5.3/LICENSE.txt
+ crunched_md5sums['a54a1a6a39e7f9dbb4a23a42f5c7fd1c'] = 'Apache-2.0'
+ # https://raw.githubusercontent.com/request/request/v2.87.0/LICENSE
+ crunched_md5sums['1034431802e57486b393d00c5d262b8a'] = 'Apache-2.0'
+ # https://raw.githubusercontent.com/dchest/tweetnacl-js/v0.14.5/LICENSE
+ crunched_md5sums['75605e6bdd564791ab698fca65c94a4f'] = 'Unlicense'
+ # https://raw.githubusercontent.com/stackgl/gl-mat3/v2.0.0/LICENSE.md
+ crunched_md5sums['75512892d6f59dddb6d1c7e191957e9c'] = 'Zlib'
+
lictext = []
with open(licfile, 'r', errors='surrogateescape') as f:
for line in f:
# Drop opening statements
if copyright_re.match(line):
continue
+ elif disclaimer_re.match(line):
+ continue
+ elif email_re.match(line):
+ continue
+ elif header_re.match(line):
+ continue
+ elif tag_re.match(line):
+ continue
+ elif url_re.match(line):
+ continue
elif license_title_re.match(line):
continue
elif license_statement_re.match(line):
continue
- # Squash spaces, and replace smart quotes, double quotes
- # and backticks with single quotes
+ # Strip comment symbols
+ line = line.replace('*', '') \
+ .replace('#', '')
+ # Unify spelling
+ line = line.replace('sub-license', 'sublicense')
+ # Squash spaces
line = oe.utils.squashspaces(line.strip())
+ # Replace smart quotes, double quotes and backticks with single quotes
line = line.replace(u"\u2018", "'").replace(u"\u2019", "'").replace(u"\u201c","'").replace(u"\u201d", "'").replace('"', '\'').replace('`', '\'')
+ # Unify brackets
+ line = line.replace("{", "[").replace("}", "]")
if line:
lictext.append(line)
@@ -1145,28 +1207,35 @@ def guess_license(srctree, d):
licenses = []
licspecs = ['*LICEN[CS]E*', 'COPYING*', '*[Ll]icense*', 'LEGAL*', '[Ll]egal*', '*GPL*', 'README.lic*', 'COPYRIGHT*', '[Cc]opyright*', 'e[dp]l-v10']
+ skip_extensions = (".html", ".js", ".json", ".svg", ".ts")
licfiles = []
for root, dirs, files in os.walk(srctree):
for fn in files:
+ if fn.endswith(skip_extensions):
+ continue
for spec in licspecs:
if fnmatch.fnmatch(fn, spec):
fullpath = os.path.join(root, fn)
if not fullpath in licfiles:
licfiles.append(fullpath)
- for licfile in licfiles:
+ for licfile in sorted(licfiles):
md5value = bb.utils.md5_file(licfile)
license = md5sums.get(md5value, None)
if not license:
license, crunched_md5, lictext = crunch_license(licfile)
- if not license:
+ if lictext and not license:
license = 'Unknown'
- licenses.append((license, os.path.relpath(licfile, srctree), md5value))
+ logger.info("Please add the following line for '%s' to a 'lib/recipetool/licenses.csv' " \
+ "and replace `Unknown` with the license:\n" \
+ "%s,Unknown" % (os.path.relpath(licfile, srctree), md5value))
+ if license:
+ licenses.append((license, os.path.relpath(licfile, srctree), md5value))
# FIXME should we grab at least one source file with a license header and add that too?
return licenses
-def split_pkg_licenses(licvalues, packages, outlines, fallback_licenses=None, pn='${PN}'):
+def split_pkg_licenses(licvalues, packages, outlines, fallback_licenses=[], pn='${PN}'):
"""
Given a list of (license, path, md5sum) as returned by guess_license(),
a dict of package name to path mappings, write out a set of
@@ -1174,6 +1243,7 @@ def split_pkg_licenses(licvalues, packages, outlines, fallback_licenses=None, pn
"""
pkglicenses = {pn: []}
for license, licpath, _ in licvalues:
+ license = fixup_license(license)
for pkgname, pkgpath in packages.items():
if licpath.startswith(pkgpath + '/'):
if pkgname in pkglicenses:
@@ -1186,11 +1256,14 @@ def split_pkg_licenses(licvalues, packages, outlines, fallback_licenses=None, pn
pkglicenses[pn].append(license)
outlicenses = {}
for pkgname in packages:
- license = ' '.join(list(set(pkglicenses.get(pkgname, ['Unknown'])))) or 'Unknown'
+ # Assume AND operator between license files
+ license = ' & '.join(list(set(pkglicenses.get(pkgname, ['Unknown'])))) or 'Unknown'
if license == 'Unknown' and pkgname in fallback_licenses:
license = fallback_licenses[pkgname]
- outlines.append('LICENSE_%s = "%s"' % (pkgname, license))
- outlicenses[pkgname] = license.split()
+ licenses = tidy_licenses(license)
+ license = ' & '.join(licenses)
+ outlines.append('LICENSE:%s = "%s"' % (pkgname, license))
+ outlicenses[pkgname] = licenses
return outlicenses
def read_pkgconfig_provides(d):
@@ -1322,7 +1395,7 @@ def register_commands(subparsers):
group.add_argument('-S', '--srcrev', help='Source revision to fetch if fetching from an SCM such as git (default latest)')
parser_create.add_argument('-B', '--srcbranch', help='Branch in source repository if fetching from an SCM such as git (default master)')
parser_create.add_argument('--keep-temp', action="store_true", help='Keep temporary directory (for debugging)')
- parser_create.add_argument('--fetch-dev', action="store_true", help='For npm, also fetch devDependencies')
+ parser_create.add_argument('--npm-dev', action="store_true", help='For npm, also fetch devDependencies')
parser_create.add_argument('--devtool', action="store_true", help=argparse.SUPPRESS)
parser_create.add_argument('--mirrors', action="store_true", help='Enable PREMIRRORS and MIRRORS for source tree fetching (disabled by default).')
parser_create.set_defaults(func=create_recipe)
diff --git a/scripts/lib/recipetool/create_buildsys.py b/scripts/lib/recipetool/create_buildsys.py
index 4743c740cf..5015634476 100644
--- a/scripts/lib/recipetool/create_buildsys.py
+++ b/scripts/lib/recipetool/create_buildsys.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2014-2016 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import re
import logging
@@ -236,9 +226,9 @@ class CmakeRecipeHandler(RecipeHandler):
elif pkg == 'PkgConfig':
inherits.append('pkgconfig')
elif pkg == 'PythonInterp':
- inherits.append('pythonnative')
+ inherits.append('python3native')
elif pkg == 'PythonLibs':
- inherits.append('python-dir')
+ inherits.append('python3-dir')
else:
# Try to map via looking at installed CMake packages in pkgdata
dep = find_cmake_package(pkg)
@@ -427,7 +417,7 @@ class AutotoolsRecipeHandler(RecipeHandler):
}
progclassmap = {'gconftool-2': 'gconf',
'pkg-config': 'pkgconfig',
- 'python': 'pythonnative',
+ 'python': 'python3native',
'python3': 'python3native',
'perl': 'perlnative',
'makeinfo': 'texinfo',
@@ -555,7 +545,7 @@ class AutotoolsRecipeHandler(RecipeHandler):
deps.append('zlib')
elif keyword in ('AX_CHECK_OPENSSL', 'AX_LIB_CRYPTO'):
deps.append('openssl')
- elif keyword == 'AX_LIB_CURL':
+ elif keyword in ('AX_LIB_CURL', 'LIBCURL_CHECK_CONFIG'):
deps.append('curl')
elif keyword == 'AX_LIB_BEECRYPT':
deps.append('beecrypt')
@@ -576,16 +566,7 @@ class AutotoolsRecipeHandler(RecipeHandler):
elif keyword == 'AX_PROG_XSLTPROC':
deps.append('libxslt-native')
elif keyword in ['AC_PYTHON_DEVEL', 'AX_PYTHON_DEVEL', 'AM_PATH_PYTHON']:
- pythonclass = 'pythonnative'
- res = version_re.search(value)
- if res:
- if res.group(1).startswith('3'):
- pythonclass = 'python3native'
- # Avoid replacing python3native with pythonnative
- if not pythonclass in inherits and not 'python3native' in inherits:
- if 'pythonnative' in inherits:
- inherits.remove('pythonnative')
- inherits.append(pythonclass)
+ pythonclass = 'python3native'
elif keyword == 'AX_WITH_CURSES':
deps.append('ncurses')
elif keyword == 'AX_PATH_BDB':
@@ -643,6 +624,7 @@ class AutotoolsRecipeHandler(RecipeHandler):
'AX_CHECK_OPENSSL',
'AX_LIB_CRYPTO',
'AX_LIB_CURL',
+ 'LIBCURL_CHECK_CONFIG',
'AX_LIB_BEECRYPT',
'AX_LIB_EXPAT',
'AX_LIB_GCRYPT',
diff --git a/scripts/lib/recipetool/create_buildsys_python.py b/scripts/lib/recipetool/create_buildsys_python.py
index 5bd2aa337c..f4f51c88b4 100644
--- a/scripts/lib/recipetool/create_buildsys_python.py
+++ b/scripts/lib/recipetool/create_buildsys_python.py
@@ -2,23 +2,13 @@
#
# Copyright (C) 2015 Mentor Graphics Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import ast
import codecs
import collections
-import distutils.command.build_py
+import setuptools.command.build_py
import email
import imp
import glob
@@ -41,11 +31,11 @@ def tinfoil_init(instance):
class PythonRecipeHandler(RecipeHandler):
- base_pkgdeps = ['python-core']
- excluded_pkgdeps = ['python-dbg']
- # os.path is provided by python-core
+ base_pkgdeps = ['python3-core']
+ excluded_pkgdeps = ['python3-dbg']
+ # os.path is provided by python3-core
assume_provided = ['builtins', 'os.path']
- # Assumes that the host python builtin_module_names is sane for target too
+ # Assumes that the host python3 builtin_module_names is sane for target too
assume_provided = assume_provided + list(sys.builtin_module_names)
bbvar_map = {
@@ -55,9 +45,9 @@ class PythonRecipeHandler(RecipeHandler):
'Summary': 'SUMMARY',
'Description': 'DESCRIPTION',
'License': 'LICENSE',
- 'Requires': 'RDEPENDS_${PN}',
- 'Provides': 'RPROVIDES_${PN}',
- 'Obsoletes': 'RREPLACES_${PN}',
+ 'Requires': 'RDEPENDS:${PN}',
+ 'Provides': 'RPROVIDES:${PN}',
+ 'Obsoletes': 'RREPLACES:${PN}',
}
# PN/PV are already set by recipetool core & desc can be extremely long
excluded_fields = [
@@ -111,30 +101,39 @@ class PythonRecipeHandler(RecipeHandler):
'License :: OSI Approved :: Apple Public Source License': 'APSL',
'License :: OSI Approved :: Artistic License': 'Artistic',
'License :: OSI Approved :: Attribution Assurance License': 'AAL',
- 'License :: OSI Approved :: BSD License': 'BSD',
+ 'License :: OSI Approved :: BSD License': 'BSD-3-Clause',
+ 'License :: OSI Approved :: Boost Software License 1.0 (BSL-1.0)': 'BSL-1.0',
+ 'License :: OSI Approved :: CEA CNRS Inria Logiciel Libre License, version 2.1 (CeCILL-2.1)': 'CECILL-2.1',
+ 'License :: OSI Approved :: Common Development and Distribution License 1.0 (CDDL-1.0)': 'CDDL-1.0',
'License :: OSI Approved :: Common Public License': 'CPL',
+ 'License :: OSI Approved :: Eclipse Public License 1.0 (EPL-1.0)': 'EPL-1.0',
+ 'License :: OSI Approved :: Eclipse Public License 2.0 (EPL-2.0)': 'EPL-2.0',
'License :: OSI Approved :: Eiffel Forum License': 'EFL',
'License :: OSI Approved :: European Union Public Licence 1.0 (EUPL 1.0)': 'EUPL-1.0',
'License :: OSI Approved :: European Union Public Licence 1.1 (EUPL 1.1)': 'EUPL-1.1',
- 'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)': 'AGPL-3.0+',
- 'License :: OSI Approved :: GNU Affero General Public License v3': 'AGPL-3.0',
+ 'License :: OSI Approved :: European Union Public Licence 1.2 (EUPL 1.2)': 'EUPL-1.2',
+ 'License :: OSI Approved :: GNU Affero General Public License v3': 'AGPL-3.0-only',
+ 'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)': 'AGPL-3.0-or-later',
'License :: OSI Approved :: GNU Free Documentation License (FDL)': 'GFDL',
'License :: OSI Approved :: GNU General Public License (GPL)': 'GPL',
- 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)': 'GPL-2.0',
- 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)': 'GPL-2.0+',
- 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)': 'GPL-3.0',
- 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)': 'GPL-3.0+',
- 'License :: OSI Approved :: GNU Lesser General Public License v2 (LGPLv2)': 'LGPL-2.0',
- 'License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)': 'LGPL-2.0+',
- 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)': 'LGPL-3.0',
- 'License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)': 'LGPL-3.0+',
+ 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)': 'GPL-2.0-only',
+ 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)': 'GPL-2.0-or-later',
+ 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)': 'GPL-3.0-only',
+ 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)': 'GPL-3.0-or-later',
+ 'License :: OSI Approved :: GNU Lesser General Public License v2 (LGPLv2)': 'LGPL-2.0-only',
+ 'License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)': 'LGPL-2.0-or-later',
+ 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)': 'LGPL-3.0-only',
+ 'License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)': 'LGPL-3.0-or-later',
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)': 'LGPL',
+ 'License :: OSI Approved :: Historical Permission Notice and Disclaimer (HPND)': 'HPND',
'License :: OSI Approved :: IBM Public License': 'IPL',
'License :: OSI Approved :: ISC License (ISCL)': 'ISC',
'License :: OSI Approved :: Intel Open Source License': 'Intel',
'License :: OSI Approved :: Jabber Open Source License': 'Jabber',
'License :: OSI Approved :: MIT License': 'MIT',
+ 'License :: OSI Approved :: MIT No Attribution License (MIT-0)': 'MIT-0',
'License :: OSI Approved :: MITRE Collaborative Virtual Workspace License (CVW)': 'CVWL',
+ 'License :: OSI Approved :: MirOS License (MirOS)': 'MirOS',
'License :: OSI Approved :: Motosoto License': 'Motosoto',
'License :: OSI Approved :: Mozilla Public License 1.0 (MPL)': 'MPL-1.0',
'License :: OSI Approved :: Mozilla Public License 1.1 (MPL 1.1)': 'MPL-1.1',
@@ -142,19 +141,26 @@ class PythonRecipeHandler(RecipeHandler):
'License :: OSI Approved :: Nethack General Public License': 'NGPL',
'License :: OSI Approved :: Nokia Open Source License': 'Nokia',
'License :: OSI Approved :: Open Group Test Suite License': 'OGTSL',
+ 'License :: OSI Approved :: Open Software License 3.0 (OSL-3.0)': 'OSL-3.0',
+ 'License :: OSI Approved :: PostgreSQL License': 'PostgreSQL',
'License :: OSI Approved :: Python License (CNRI Python License)': 'CNRI-Python',
- 'License :: OSI Approved :: Python Software Foundation License': 'PSF',
+ 'License :: OSI Approved :: Python Software Foundation License': 'PSF-2.0',
'License :: OSI Approved :: Qt Public License (QPL)': 'QPL',
'License :: OSI Approved :: Ricoh Source Code Public License': 'RSCPL',
+ 'License :: OSI Approved :: SIL Open Font License 1.1 (OFL-1.1)': 'OFL-1.1',
'License :: OSI Approved :: Sleepycat License': 'Sleepycat',
- 'License :: OSI Approved :: Sun Industry Standards Source License (SISSL)': '-- Sun Industry Standards Source License (SISSL)',
+ 'License :: OSI Approved :: Sun Industry Standards Source License (SISSL)': 'SISSL',
'License :: OSI Approved :: Sun Public License': 'SPL',
+ 'License :: OSI Approved :: The Unlicense (Unlicense)': 'Unlicense',
+ 'License :: OSI Approved :: Universal Permissive License (UPL)': 'UPL-1.0',
'License :: OSI Approved :: University of Illinois/NCSA Open Source License': 'NCSA',
'License :: OSI Approved :: Vovida Software License 1.0': 'VSL-1.0',
'License :: OSI Approved :: W3C License': 'W3C',
'License :: OSI Approved :: X.Net License': 'Xnet',
'License :: OSI Approved :: Zope Public License': 'ZPL',
'License :: OSI Approved :: zlib/libpng License': 'Zlib',
+ 'License :: Other/Proprietary License': 'Proprietary',
+ 'License :: Public Domain': 'PD',
}
def __init__(self):
@@ -164,8 +170,13 @@ class PythonRecipeHandler(RecipeHandler):
if 'buildsystem' in handled:
return False
- if not RecipeHandler.checkfiles(srctree, ['setup.py']):
- return
+ # Check for non-zero size setup.py files
+ setupfiles = RecipeHandler.checkfiles(srctree, ['setup.py'])
+ for fn in setupfiles:
+ if os.path.getsize(fn):
+ break
+ else:
+ return False
# setup.py is always parsed to get at certain required information, such as
# distutils vs setuptools
@@ -225,9 +236,9 @@ class PythonRecipeHandler(RecipeHandler):
self.apply_info_replacements(info)
if uses_setuptools:
- classes.append('setuptools')
+ classes.append('setuptools3')
else:
- classes.append('distutils')
+ classes.append('distutils3')
if license_str:
for i, line in enumerate(lines_before):
@@ -292,7 +303,7 @@ class PythonRecipeHandler(RecipeHandler):
for feature, feature_reqs in extras_req.items():
unmapped_deps.difference_update(feature_reqs)
- feature_req_deps = ('python-' + r.replace('.', '-').lower() for r in sorted(feature_reqs))
+ feature_req_deps = ('python3-' + r.replace('.', '-').lower() for r in sorted(feature_reqs))
lines_after.append('PACKAGECONFIG[{}] = ",,,{}"'.format(feature.lower(), ' '.join(feature_req_deps)))
inst_reqs = set()
@@ -303,10 +314,10 @@ class PythonRecipeHandler(RecipeHandler):
if inst_reqs:
unmapped_deps.difference_update(inst_reqs)
- inst_req_deps = ('python-' + r.replace('.', '-').lower() for r in sorted(inst_reqs))
+ inst_req_deps = ('python3-' + r.replace('.', '-').lower() for r in sorted(inst_reqs))
lines_after.append('# WARNING: the following rdepends are from setuptools install_requires. These')
lines_after.append('# upstream names may not correspond exactly to bitbake package names.')
- lines_after.append('RDEPENDS_${{PN}} += "{}"'.format(' '.join(inst_req_deps)))
+ lines_after.append('RDEPENDS:${{PN}} += "{}"'.format(' '.join(inst_req_deps)))
if mapped_deps:
name = info.get('Name')
@@ -318,7 +329,7 @@ class PythonRecipeHandler(RecipeHandler):
lines_after.append('')
lines_after.append('# WARNING: the following rdepends are determined through basic analysis of the')
lines_after.append('# python sources, and might not be 100% accurate.')
- lines_after.append('RDEPENDS_${{PN}} += "{}"'.format(' '.join(sorted(mapped_deps))))
+ lines_after.append('RDEPENDS:${{PN}} += "{}"'.format(' '.join(sorted(mapped_deps))))
unmapped_deps -= set(extensions)
unmapped_deps -= set(self.assume_provided)
@@ -366,7 +377,7 @@ class PythonRecipeHandler(RecipeHandler):
return info, 'setuptools' in imported_modules, non_literals, extensions
def get_setup_args_info(self, setupscript='./setup.py'):
- cmd = ['python', setupscript]
+ cmd = ['python3', setupscript]
info = {}
keys = set(self.bbvar_map.keys())
keys |= set(self.setuparg_list_fields)
@@ -400,7 +411,7 @@ class PythonRecipeHandler(RecipeHandler):
def get_setup_byline(self, fields, setupscript='./setup.py'):
info = {}
- cmd = ['python', setupscript]
+ cmd = ['python3', setupscript]
cmd.extend('--' + self.setuparg_map.get(f, f.lower()) for f in fields)
try:
info_lines = self.run_command(cmd, cwd=os.path.dirname(setupscript)).splitlines()
@@ -464,9 +475,13 @@ class PythonRecipeHandler(RecipeHandler):
else:
package_dir = {}
- class PackageDir(distutils.command.build_py.build_py):
+ dist = setuptools.Distribution()
+
+ class PackageDir(setuptools.command.build_py.build_py):
def __init__(self, package_dir):
self.package_dir = package_dir
+ self.dist = dist
+ super().__init__(self.dist)
pd = PackageDir(package_dir)
to_scan = []
@@ -537,7 +552,7 @@ class PythonRecipeHandler(RecipeHandler):
pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR')
ldata = tinfoil.config_data.createCopy()
- bb.parse.handle('classes/python-dir.bbclass', ldata, True)
+ bb.parse.handle('classes/python3-dir.bbclass', ldata, True)
python_sitedir = ldata.getVar('PYTHON_SITEPACKAGES_DIR')
dynload_dir = os.path.join(os.path.dirname(python_sitedir), 'lib-dynload')
@@ -550,7 +565,7 @@ class PythonRecipeHandler(RecipeHandler):
with open(pkgdatafile, 'r') as f:
for line in f.readlines():
field, value = line.split(': ', 1)
- if field == 'FILES_INFO':
+ if field.startswith('FILES_INFO'):
files_info = ast.literal_eval(value)
break
else:
diff --git a/scripts/lib/recipetool/create_kernel.py b/scripts/lib/recipetool/create_kernel.py
index ca4996c7ac..5740589a68 100644
--- a/scripts/lib/recipetool/create_kernel.py
+++ b/scripts/lib/recipetool/create_kernel.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2016 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import re
import logging
diff --git a/scripts/lib/recipetool/create_kmod.py b/scripts/lib/recipetool/create_kmod.py
index 3982537a4e..cc00106961 100644
--- a/scripts/lib/recipetool/create_kmod.py
+++ b/scripts/lib/recipetool/create_kmod.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2016 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import re
import logging
@@ -123,7 +113,7 @@ class KernelModuleRecipeHandler(RecipeHandler):
kdirpath, _ = check_target(compile_lines, install=False)
if manual_install or not install_lines:
- lines_after.append('EXTRA_OEMAKE_append_task-install = " -C ${STAGING_KERNEL_DIR} M=${S}"')
+ lines_after.append('EXTRA_OEMAKE:append:task-install = " -C ${STAGING_KERNEL_DIR} M=${S}"')
elif install_target and install_target != 'modules_install':
lines_after.append('MODULES_INSTALL_TARGET = "install"')
diff --git a/scripts/lib/recipetool/create_npm.py b/scripts/lib/recipetool/create_npm.py
index 03667887fc..3394a89970 100644
--- a/scripts/lib/recipetool/create_npm.py
+++ b/scripts/lib/recipetool/create_npm.py
@@ -1,330 +1,299 @@
-# Recipe creation tool - node.js NPM module support plugin
-#
# Copyright (C) 2016 Intel Corporation
+# Copyright (C) 2020 Savoir-Faire Linux
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+"""Recipe creation tool - npm module support plugin"""
-import os
+import json
import logging
-import subprocess
+import os
+import re
+import sys
import tempfile
-import shutil
-import json
-from recipetool.create import RecipeHandler, split_pkg_licenses, handle_license_vars
-
+import bb
+from bb.fetch2.npm import NpmEnvironment
+from bb.fetch2.npmsw import foreach_dependencies
+from recipetool.create import RecipeHandler
+from recipetool.create import get_license_md5sums
+from recipetool.create import guess_license
+from recipetool.create import split_pkg_licenses
logger = logging.getLogger('recipetool')
-
-tinfoil = None
+TINFOIL = None
def tinfoil_init(instance):
- global tinfoil
- tinfoil = instance
-
+ """Initialize tinfoil"""
+ global TINFOIL
+ TINFOIL = instance
class NpmRecipeHandler(RecipeHandler):
- lockdownpath = None
+ """Class to handle the npm recipe creation"""
+
+ @staticmethod
+ def _npm_name(name):
+ """Generate a Yocto friendly npm name"""
+ name = re.sub("/", "-", name)
+ name = name.lower()
+ name = re.sub(r"[^\-a-z0-9]", "", name)
+ name = name.strip("-")
+ return name
+
+ @staticmethod
+ def _get_registry(lines):
+ """Get the registry value from the 'npm://registry' url"""
+ registry = None
+
+ def _handle_registry(varname, origvalue, op, newlines):
+ nonlocal registry
+ if origvalue.startswith("npm://"):
+ registry = re.sub(r"^npm://", "http://", origvalue.split(";")[0])
+ return origvalue, None, 0, True
+
+ bb.utils.edit_metadata(lines, ["SRC_URI"], _handle_registry)
+
+ return registry
+
+ @staticmethod
+ def _ensure_npm():
+ """Check if the 'npm' command is available in the recipes"""
+ if not TINFOIL.recipes_parsed:
+ TINFOIL.parse_recipes()
- def _ensure_npm(self, fixed_setup=False):
- if not tinfoil.recipes_parsed:
- tinfoil.parse_recipes()
try:
- rd = tinfoil.parse_recipe('nodejs-native')
+ d = TINFOIL.parse_recipe("nodejs-native")
except bb.providers.NoProvider:
- if fixed_setup:
- msg = 'nodejs-native is required for npm but is not available within this SDK'
- else:
- msg = 'nodejs-native is required for npm but is not available - you will likely need to add a layer that provides nodejs'
- logger.error(msg)
- return None
- bindir = rd.getVar('STAGING_BINDIR_NATIVE')
- npmpath = os.path.join(bindir, 'npm')
+ bb.error("Nothing provides 'nodejs-native' which is required for the build")
+ bb.note("You will likely need to add a layer that provides nodejs")
+ sys.exit(14)
+
+ bindir = d.getVar("STAGING_BINDIR_NATIVE")
+ npmpath = os.path.join(bindir, "npm")
+
if not os.path.exists(npmpath):
- tinfoil.build_targets('nodejs-native', 'addto_recipe_sysroot')
+ TINFOIL.build_targets("nodejs-native", "addto_recipe_sysroot")
+
if not os.path.exists(npmpath):
- logger.error('npm required to process specified source, but nodejs-native did not seem to populate it')
- return None
+ bb.error("Failed to add 'npm' to sysroot")
+ sys.exit(14)
+
return bindir
- def _handle_license(self, data):
- '''
- Handle the license value from an npm package.json file
- '''
- license = None
- if 'license' in data:
- license = data['license']
- if isinstance(license, dict):
- license = license.get('type', None)
- if license:
- if 'OR' in license:
- license = license.replace('OR', '|')
- license = license.replace('AND', '&')
- license = license.replace(' ', '_')
- if not license[0] == '(':
- license = '(' + license + ')'
- else:
- license = license.replace('AND', '&')
- if license[0] == '(':
- license = license[1:]
- if license[-1] == ')':
- license = license[:-1]
- license = license.replace('MIT/X11', 'MIT')
- license = license.replace('Public Domain', 'PD')
- license = license.replace('SEE LICENSE IN EULA',
- 'SEE-LICENSE-IN-EULA')
- return license
-
- def _shrinkwrap(self, srctree, localfilesdir, extravalues, lines_before, d):
- try:
- runenv = dict(os.environ, PATH=d.getVar('PATH'))
- bb.process.run('npm shrinkwrap', cwd=srctree, stderr=subprocess.STDOUT, env=runenv, shell=True)
- except bb.process.ExecutionError as e:
- logger.warning('npm shrinkwrap failed:\n%s' % e.stdout)
- return
-
- tmpfile = os.path.join(localfilesdir, 'npm-shrinkwrap.json')
- shutil.move(os.path.join(srctree, 'npm-shrinkwrap.json'), tmpfile)
- extravalues.setdefault('extrafiles', {})
- extravalues['extrafiles']['npm-shrinkwrap.json'] = tmpfile
- lines_before.append('NPM_SHRINKWRAP := "${THISDIR}/${PN}/npm-shrinkwrap.json"')
-
- def _lockdown(self, srctree, localfilesdir, extravalues, lines_before, d):
- runenv = dict(os.environ, PATH=d.getVar('PATH'))
- if not NpmRecipeHandler.lockdownpath:
- NpmRecipeHandler.lockdownpath = tempfile.mkdtemp('recipetool-npm-lockdown')
- bb.process.run('npm install lockdown --prefix %s' % NpmRecipeHandler.lockdownpath,
- cwd=srctree, stderr=subprocess.STDOUT, env=runenv, shell=True)
- relockbin = os.path.join(NpmRecipeHandler.lockdownpath, 'node_modules', 'lockdown', 'relock.js')
- if not os.path.exists(relockbin):
- logger.warning('Could not find relock.js within lockdown directory; skipping lockdown')
- return
- try:
- bb.process.run('node %s' % relockbin, cwd=srctree, stderr=subprocess.STDOUT, env=runenv, shell=True)
- except bb.process.ExecutionError as e:
- logger.warning('lockdown-relock failed:\n%s' % e.stdout)
- return
-
- tmpfile = os.path.join(localfilesdir, 'lockdown.json')
- shutil.move(os.path.join(srctree, 'lockdown.json'), tmpfile)
- extravalues.setdefault('extrafiles', {})
- extravalues['extrafiles']['lockdown.json'] = tmpfile
- lines_before.append('NPM_LOCKDOWN := "${THISDIR}/${PN}/lockdown.json"')
-
- def _handle_dependencies(self, d, deps, optdeps, devdeps, lines_before, srctree):
- import scriptutils
- # If this isn't a single module we need to get the dependencies
- # and add them to SRC_URI
- def varfunc(varname, origvalue, op, newlines):
- if varname == 'SRC_URI':
- if not origvalue.startswith('npm://'):
- src_uri = origvalue.split()
- deplist = {}
- for dep, depver in optdeps.items():
- depdata = self.get_npm_data(dep, depver, d)
- if self.check_npm_optional_dependency(depdata):
- deplist[dep] = depdata
- for dep, depver in devdeps.items():
- depdata = self.get_npm_data(dep, depver, d)
- if self.check_npm_optional_dependency(depdata):
- deplist[dep] = depdata
- for dep, depver in deps.items():
- depdata = self.get_npm_data(dep, depver, d)
- deplist[dep] = depdata
-
- extra_urls = []
- for dep, depdata in deplist.items():
- version = depdata.get('version', None)
- if version:
- url = 'npm://registry.npmjs.org;name=%s;version=%s;subdir=node_modules/%s' % (dep, version, dep)
- extra_urls.append(url)
- if extra_urls:
- scriptutils.fetch_url(tinfoil, ' '.join(extra_urls), None, srctree, logger)
- src_uri.extend(extra_urls)
- return src_uri, None, -1, True
- return origvalue, None, 0, True
- updated, newlines = bb.utils.edit_metadata(lines_before, ['SRC_URI'], varfunc)
- if updated:
- del lines_before[:]
- for line in newlines:
- # Hack to avoid newlines that edit_metadata inserts
- if line.endswith('\n'):
- line = line[:-1]
- lines_before.append(line)
- return updated
+ @staticmethod
+ def _npm_global_configs(dev):
+ """Get the npm global configuration"""
+ configs = []
+
+ if dev:
+ configs.append(("also", "development"))
+ else:
+ configs.append(("only", "production"))
+
+ configs.append(("save", "false"))
+ configs.append(("package-lock", "false"))
+ configs.append(("shrinkwrap", "false"))
+ return configs
+
+ def _run_npm_install(self, d, srctree, registry, dev):
+ """Run the 'npm install' command without building the addons"""
+ configs = self._npm_global_configs(dev)
+ configs.append(("ignore-scripts", "true"))
+
+ if registry:
+ configs.append(("registry", registry))
+
+ bb.utils.remove(os.path.join(srctree, "node_modules"), recurse=True)
+
+ env = NpmEnvironment(d, configs=configs)
+ env.run("npm install", workdir=srctree)
+
+ def _generate_shrinkwrap(self, d, srctree, dev):
+ """Check and generate the 'npm-shrinkwrap.json' file if needed"""
+ configs = self._npm_global_configs(dev)
+
+ env = NpmEnvironment(d, configs=configs)
+ env.run("npm shrinkwrap", workdir=srctree)
+
+ return os.path.join(srctree, "npm-shrinkwrap.json")
+
+ def _handle_licenses(self, srctree, shrinkwrap_file, dev):
+ """Return the extra license files and the list of packages"""
+ licfiles = []
+ packages = {}
+
+ # Handle the parent package
+ packages["${PN}"] = ""
+
+ def _licfiles_append_fallback_readme_files(destdir):
+ """Append README files as fallback to license files if a license files is missing"""
+
+ fallback = True
+ readmes = []
+ basedir = os.path.join(srctree, destdir)
+ for fn in os.listdir(basedir):
+ upper = fn.upper()
+ if upper.startswith("README"):
+ fullpath = os.path.join(basedir, fn)
+ readmes.append(fullpath)
+ if upper.startswith("COPYING") or "LICENCE" in upper or "LICENSE" in upper:
+ fallback = False
+ if fallback:
+ for readme in readmes:
+ licfiles.append(os.path.relpath(readme, srctree))
+
+ # Handle the dependencies
+ def _handle_dependency(name, params, deptree):
+ suffix = "-".join([self._npm_name(dep) for dep in deptree])
+ destdirs = [os.path.join("node_modules", dep) for dep in deptree]
+ destdir = os.path.join(*destdirs)
+ packages["${PN}-" + suffix] = destdir
+ _licfiles_append_fallback_readme_files(destdir)
+
+ with open(shrinkwrap_file, "r") as f:
+ shrinkwrap = json.load(f)
+
+ foreach_dependencies(shrinkwrap, _handle_dependency, dev)
+
+ return licfiles, packages
def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
- import bb.utils
- import oe.package
- from collections import OrderedDict
+ """Handle the npm recipe creation"""
- if 'buildsystem' in handled:
+ if "buildsystem" in handled:
return False
- def read_package_json(fn):
- with open(fn, 'r', errors='surrogateescape') as f:
- return json.loads(f.read())
+ files = RecipeHandler.checkfiles(srctree, ["package.json"])
- files = RecipeHandler.checkfiles(srctree, ['package.json'])
- if files:
- d = bb.data.createCopy(tinfoil.config_data)
- npm_bindir = self._ensure_npm()
- if not npm_bindir:
- sys.exit(14)
- d.prependVar('PATH', '%s:' % npm_bindir)
-
- data = read_package_json(files[0])
- if 'name' in data and 'version' in data:
- extravalues['PN'] = data['name']
- extravalues['PV'] = data['version']
- classes.append('npm')
- handled.append('buildsystem')
- if 'description' in data:
- extravalues['SUMMARY'] = data['description']
- if 'homepage' in data:
- extravalues['HOMEPAGE'] = data['homepage']
-
- fetchdev = extravalues['fetchdev'] or None
- deps, optdeps, devdeps = self.get_npm_package_dependencies(data, fetchdev)
- self._handle_dependencies(d, deps, optdeps, devdeps, lines_before, srctree)
-
- # Shrinkwrap
- localfilesdir = tempfile.mkdtemp(prefix='recipetool-npm')
- self._shrinkwrap(srctree, localfilesdir, extravalues, lines_before, d)
-
- # Lockdown
- self._lockdown(srctree, localfilesdir, extravalues, lines_before, d)
-
- # Split each npm module out to is own package
- npmpackages = oe.package.npm_split_package_dirs(srctree)
- licvalues = None
- for item in handled:
- if isinstance(item, tuple):
- if item[0] == 'license':
- licvalues = item[1]
- break
- if not licvalues:
- licvalues = handle_license_vars(srctree, lines_before, handled, extravalues, d)
- if licvalues:
- # Augment the license list with information we have in the packages
- licenses = {}
- license = self._handle_license(data)
- if license:
- licenses['${PN}'] = license
- for pkgname, pkgitem in npmpackages.items():
- _, pdata = pkgitem
- license = self._handle_license(pdata)
- if license:
- licenses[pkgname] = license
- # Now write out the package-specific license values
- # We need to strip out the json data dicts for this since split_pkg_licenses
- # isn't expecting it
- packages = OrderedDict((x,y[0]) for x,y in npmpackages.items())
- packages['${PN}'] = ''
- pkglicenses = split_pkg_licenses(licvalues, packages, lines_after, licenses)
- all_licenses = list(set([item.replace('_', ' ') for pkglicense in pkglicenses.values() for item in pkglicense]))
- if '&' in all_licenses:
- all_licenses.remove('&')
- extravalues['LICENSE'] = ' & '.join(all_licenses)
-
- # Need to move S setting after inherit npm
- for i, line in enumerate(lines_before):
- if line.startswith('S ='):
- lines_before.pop(i)
- lines_after.insert(0, '# Must be set after inherit npm since that itself sets S')
- lines_after.insert(1, line)
- break
-
- return True
-
- return False
-
- # FIXME this is duplicated from lib/bb/fetch2/npm.py
- def _parse_view(self, output):
- '''
- Parse the output of npm view --json; the last JSON result
- is assumed to be the one that we're interested in.
- '''
- pdata = None
- outdeps = {}
- datalines = []
- bracelevel = 0
- for line in output.splitlines():
- if bracelevel:
- datalines.append(line)
- elif '{' in line:
- datalines = []
- datalines.append(line)
- bracelevel = bracelevel + line.count('{') - line.count('}')
- if datalines:
- pdata = json.loads('\n'.join(datalines))
- return pdata
-
- # FIXME this is effectively duplicated from lib/bb/fetch2/npm.py
- # (split out from _getdependencies())
- def get_npm_data(self, pkg, version, d):
- import bb.fetch2
- pkgfullname = pkg
- if version != '*' and not '/' in version:
- pkgfullname += "@'%s'" % version
- logger.debug(2, "Calling getdeps on %s" % pkg)
- runenv = dict(os.environ, PATH=d.getVar('PATH'))
- fetchcmd = "npm view %s --json" % pkgfullname
- output, _ = bb.process.run(fetchcmd, stderr=subprocess.STDOUT, env=runenv, shell=True)
- data = self._parse_view(output)
- return data
-
- # FIXME this is effectively duplicated from lib/bb/fetch2/npm.py
- # (split out from _getdependencies())
- def get_npm_package_dependencies(self, pdata, fetchdev):
- dependencies = pdata.get('dependencies', {})
- optionalDependencies = pdata.get('optionalDependencies', {})
- dependencies.update(optionalDependencies)
- if fetchdev:
- devDependencies = pdata.get('devDependencies', {})
- dependencies.update(devDependencies)
- else:
- devDependencies = {}
- depsfound = {}
- optdepsfound = {}
- devdepsfound = {}
- for dep in dependencies:
- if dep in optionalDependencies:
- optdepsfound[dep] = dependencies[dep]
- elif dep in devDependencies:
- devdepsfound[dep] = dependencies[dep]
- else:
- depsfound[dep] = dependencies[dep]
- return depsfound, optdepsfound, devdepsfound
-
- # FIXME this is effectively duplicated from lib/bb/fetch2/npm.py
- # (split out from _getdependencies())
- def check_npm_optional_dependency(self, pdata):
- pkg_os = pdata.get('os', None)
- if pkg_os:
- if not isinstance(pkg_os, list):
- pkg_os = [pkg_os]
- blacklist = False
- for item in pkg_os:
- if item.startswith('!'):
- blacklist = True
- break
- if (not blacklist and 'linux' not in pkg_os) or '!linux' in pkg_os:
- pkg = pdata.get('name', 'Unnamed package')
- logger.debug(2, "Skipping %s since it's incompatible with Linux" % pkg)
- return False
- return True
+ if not files:
+ return False
+
+ with open(files[0], "r") as f:
+ data = json.load(f)
+
+ if "name" not in data or "version" not in data:
+ return False
+
+ extravalues["PN"] = self._npm_name(data["name"])
+ extravalues["PV"] = data["version"]
+
+ if "description" in data:
+ extravalues["SUMMARY"] = data["description"]
+
+ if "homepage" in data:
+ extravalues["HOMEPAGE"] = data["homepage"]
+
+ dev = bb.utils.to_boolean(str(extravalues.get("NPM_INSTALL_DEV", "0")), False)
+ registry = self._get_registry(lines_before)
+
+ bb.note("Checking if npm is available ...")
+ # The native npm is used here (and not the host one) to ensure that the
+ # npm version is high enough to ensure an efficient dependency tree
+ # resolution and avoid issue with the shrinkwrap file format.
+ # Moreover the native npm is mandatory for the build.
+ bindir = self._ensure_npm()
+
+ d = bb.data.createCopy(TINFOIL.config_data)
+ d.prependVar("PATH", bindir + ":")
+ d.setVar("S", srctree)
+
+ bb.note("Generating shrinkwrap file ...")
+ # To generate the shrinkwrap file the dependencies have to be installed
+ # first. During the generation process some files may be updated /
+ # deleted. By default devtool tracks the diffs in the srctree and raises
+ # errors when finishing the recipe if some diffs are found.
+ git_exclude_file = os.path.join(srctree, ".git", "info", "exclude")
+ if os.path.exists(git_exclude_file):
+ with open(git_exclude_file, "r+") as f:
+ lines = f.readlines()
+ for line in ["/node_modules/", "/npm-shrinkwrap.json"]:
+ if line not in lines:
+ f.write(line + "\n")
+
+ lock_file = os.path.join(srctree, "package-lock.json")
+ lock_copy = lock_file + ".copy"
+ if os.path.exists(lock_file):
+ bb.utils.copyfile(lock_file, lock_copy)
+
+ self._run_npm_install(d, srctree, registry, dev)
+ shrinkwrap_file = self._generate_shrinkwrap(d, srctree, dev)
+
+ with open(shrinkwrap_file, "r") as f:
+ shrinkwrap = json.load(f)
+
+ if os.path.exists(lock_copy):
+ bb.utils.movefile(lock_copy, lock_file)
+
+ # Add the shrinkwrap file as 'extrafiles'
+ shrinkwrap_copy = shrinkwrap_file + ".copy"
+ bb.utils.copyfile(shrinkwrap_file, shrinkwrap_copy)
+ extravalues.setdefault("extrafiles", {})
+ extravalues["extrafiles"]["npm-shrinkwrap.json"] = shrinkwrap_copy
+
+ url_local = "npmsw://%s" % shrinkwrap_file
+ url_recipe= "npmsw://${THISDIR}/${BPN}/npm-shrinkwrap.json"
+
+ if dev:
+ url_local += ";dev=1"
+ url_recipe += ";dev=1"
+
+ # Add the npmsw url in the SRC_URI of the generated recipe
+ def _handle_srcuri(varname, origvalue, op, newlines):
+ """Update the version value and add the 'npmsw://' url"""
+ value = origvalue.replace("version=" + data["version"], "version=${PV}")
+ value = value.replace("version=latest", "version=${PV}")
+ values = [line.strip() for line in value.strip('\n').splitlines()]
+ if "dependencies" in shrinkwrap:
+ values.append(url_recipe)
+ return values, None, 4, False
+
+ (_, newlines) = bb.utils.edit_metadata(lines_before, ["SRC_URI"], _handle_srcuri)
+ lines_before[:] = [line.rstrip('\n') for line in newlines]
+
+ # In order to generate correct licence checksums in the recipe the
+ # dependencies have to be fetched again using the npmsw url
+ bb.note("Fetching npm dependencies ...")
+ bb.utils.remove(os.path.join(srctree, "node_modules"), recurse=True)
+ fetcher = bb.fetch2.Fetch([url_local], d)
+ fetcher.download()
+ fetcher.unpack(srctree)
+
+ bb.note("Handling licences ...")
+ (licfiles, packages) = self._handle_licenses(srctree, shrinkwrap_file, dev)
+
+ def _guess_odd_license(licfiles):
+ import bb
+
+ md5sums = get_license_md5sums(d, linenumbers=True)
+
+ chksums = []
+ licenses = []
+ for licfile in licfiles:
+ f = os.path.join(srctree, licfile)
+ md5value = bb.utils.md5_file(f)
+ (license, beginline, endline, md5) = md5sums.get(md5value,
+ (None, "", "", ""))
+ if not license:
+ license = "Unknown"
+ logger.info("Please add the following line for '%s' to a "
+ "'lib/recipetool/licenses.csv' and replace `Unknown`, "
+ "`X`, `Y` and `MD5` with the license, begin line, "
+ "end line and partial MD5 checksum:\n" \
+ "%s,Unknown,X,Y,MD5" % (licfile, md5value))
+ chksums.append("file://%s%s%s;md5=%s" % (licfile,
+ ";beginline=%s" % (beginline) if beginline else "",
+ ";endline=%s" % (endline) if endline else "",
+ md5 if md5 else md5value))
+ licenses.append((license, licfile, md5value))
+ return (licenses, chksums)
+
+ (licenses, extravalues["LIC_FILES_CHKSUM"]) = _guess_odd_license(licfiles)
+ split_pkg_licenses([*licenses, *guess_license(srctree, d)], packages, lines_after)
+
+ classes.append("npm")
+ handled.append("buildsystem")
+ return True
def register_recipe_handlers(handlers):
+ """Register the npm handler"""
handlers.append((NpmRecipeHandler(), 60))
diff --git a/scripts/lib/recipetool/edit.py b/scripts/lib/recipetool/edit.py
index c4789a9994..d5b980a1c0 100644
--- a/scripts/lib/recipetool/edit.py
+++ b/scripts/lib/recipetool/edit.py
@@ -6,18 +6,8 @@
#
# Copyright (C) 2018 Mentor Graphics Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import argparse
import errno
@@ -44,7 +34,7 @@ def edit(args):
recipe_path = tinfoil.get_recipe_file(args.target)
appends = tinfoil.get_file_appends(recipe_path)
- return scriptutils.run_editor([recipe_path] + appends, logger)
+ return scriptutils.run_editor([recipe_path] + list(appends), logger)
def register_commands(subparsers):
diff --git a/scripts/lib/recipetool/licenses.csv b/scripts/lib/recipetool/licenses.csv
new file mode 100644
index 0000000000..80851111b3
--- /dev/null
+++ b/scripts/lib/recipetool/licenses.csv
@@ -0,0 +1,37 @@
+0636e73ff0215e8d672dc4c32c317bb3,GPL-2.0-only
+12f884d2ae1ff87c09e5b7ccc2c4ca7e,GPL-2.0-only
+18810669f13b87348459e611d31ab760,GPL-2.0-only
+252890d9eee26aab7b432e8b8a616475,LGPL-2.0-only
+2d5025d4aa3495befef8f17206a5b0a1,LGPL-2.1-only
+3214f080875748938ba060314b4f727d,LGPL-2.0-only
+385c55653886acac3821999a3ccd17b3,Artistic-1.0 | GPL-2.0-only
+393a5ca445f6965873eca0259a17f833,GPL-2.0-only
+3b83ef96387f14655fc854ddc3c6bd57,Apache-2.0
+3bf50002aefd002f49e7bb854063f7e7,LGPL-2.0-only
+4325afd396febcb659c36b49533135d4,GPL-2.0-only
+4fbd65380cdd255951079008b364516c,LGPL-2.1-only
+54c7042be62e169199200bc6477f04d1,BSD-3-Clause
+55ca817ccb7d5b5b66355690e9abc605,LGPL-2.0-only
+59530bdf33659b29e73d4adb9f9f6552,GPL-2.0-only
+5f30f0716dfdd0d91eb439ebec522ec2,LGPL-2.0-only
+6a6a8e020838b23406c81b19c1d46df6,LGPL-3.0-only
+751419260aa954499f7abaabaa882bbe,GPL-2.0-only
+7fbc338309ac38fefcd64b04bb903e34,LGPL-2.1-only
+8ca43cbc842c2336e835926c2166c28b,GPL-2.0-only
+94d55d512a9ba36caa9b7df079bae19f,GPL-2.0-only
+9ac2e7cff1ddaf48b6eab6028f23ef88,GPL-2.0-only
+9f604d8a4f8e74f4f5140845a21b6674,LGPL-2.0-only
+a6f89e2100d9b6cdffcea4f398e37343,LGPL-2.1-only
+b234ee4d69f5fce4486a80fdaf4a4263,GPL-2.0-only
+bbb461211a33b134d42ed5ee802b37ff,LGPL-2.1-only
+bfe1f75d606912a4111c90743d6c7325,MPL-1.1-only
+c93c0550bd3173f4504b2cbd8991e50b,GPL-2.0-only
+d32239bcb673463ab874e80d47fae504,GPL-3.0-only
+d7810fab7487fb0aad327b76f1be7cd7,GPL-2.0-only
+d8045f3b8f929c1cb29a1e3fd737b499,LGPL-2.1-only
+db979804f025cf55aabec7129cb671ed,LGPL-2.0-only
+eb723b61539feef013de476e68b5c50a,GPL-2.0-only
+ebb5c50ab7cab4baeffba14977030c07,GPL-2.0-only
+f27defe1e96c2e1ecd4e0c9be8967949,GPL-3.0-only
+fad9b3332be894bab9bc501572864b29,LGPL-2.1-only
+fbc093901857fcd118f065f900982c24,LGPL-2.1-only
diff --git a/scripts/lib/recipetool/newappend.py b/scripts/lib/recipetool/newappend.py
index 76707b4c91..08e2474dc4 100644
--- a/scripts/lib/recipetool/newappend.py
+++ b/scripts/lib/recipetool/newappend.py
@@ -7,18 +7,8 @@
#
# Copyright (C) 2015 Christopher Larson <kergoth@gmail.com>
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import argparse
import errno
diff --git a/scripts/lib/recipetool/setvar.py b/scripts/lib/recipetool/setvar.py
index 9de315a0ef..f8e2ee75fb 100644
--- a/scripts/lib/recipetool/setvar.py
+++ b/scripts/lib/recipetool/setvar.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os
diff --git a/scripts/lib/resulttool/__init__.py b/scripts/lib/resulttool/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/scripts/lib/resulttool/__init__.py
diff --git a/scripts/lib/resulttool/log.py b/scripts/lib/resulttool/log.py
new file mode 100644
index 0000000000..eb3927ec82
--- /dev/null
+++ b/scripts/lib/resulttool/log.py
@@ -0,0 +1,104 @@
+# resulttool - Show logs
+#
+# Copyright (c) 2019 Garmin International
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+import os
+import resulttool.resultutils as resultutils
+
+def show_ptest(result, ptest, logger):
+ logdata = resultutils.ptestresult_get_log(result, ptest)
+ if logdata is not None:
+ print(logdata)
+ return 0
+
+ print("ptest '%s' log not found" % ptest)
+ return 1
+
+def show_reproducible(result, reproducible, logger):
+ try:
+ print(result['reproducible'][reproducible]['diffoscope.text'])
+ return 0
+
+ except KeyError:
+ print("reproducible '%s' not found" % reproducible)
+ return 1
+
+def log(args, logger):
+ results = resultutils.load_resultsdata(args.source)
+
+ ptest_count = sum(1 for _, _, _, r in resultutils.test_run_results(results) if 'ptestresult.sections' in r)
+ if ptest_count > 1 and not args.prepend_run:
+ print("%i ptest sections found. '--prepend-run' is required" % ptest_count)
+ return 1
+
+ for _, run_name, _, r in resultutils.test_run_results(results):
+ if args.dump_ptest:
+ for sectname in ['ptestresult.sections', 'ltpposixresult.sections', 'ltpresult.sections']:
+ if sectname in r:
+ for name, ptest in r[sectname].items():
+ logdata = resultutils.generic_get_log(sectname, r, name)
+ if logdata is not None:
+ dest_dir = args.dump_ptest
+ if args.prepend_run:
+ dest_dir = os.path.join(dest_dir, run_name)
+ if not sectname.startswith("ptest"):
+ dest_dir = os.path.join(dest_dir, sectname.split(".")[0])
+
+ os.makedirs(dest_dir, exist_ok=True)
+ dest = os.path.join(dest_dir, '%s.log' % name)
+ print(dest)
+ with open(dest, 'w') as f:
+ f.write(logdata)
+
+ if args.raw_ptest:
+ found = False
+ for sectname in ['ptestresult.rawlogs', 'ltpposixresult.rawlogs', 'ltpresult.rawlogs']:
+ rawlog = resultutils.generic_get_rawlogs(sectname, r)
+ if rawlog is not None:
+ print(rawlog)
+ found = True
+ if not found:
+ print('Raw ptest logs not found')
+ return 1
+
+ if args.raw_reproducible:
+ if 'reproducible.rawlogs' in r:
+ print(r['reproducible.rawlogs']['log'])
+ else:
+ print('Raw reproducible logs not found')
+ return 1
+
+ for ptest in args.ptest:
+ if not show_ptest(r, ptest, logger):
+ return 1
+
+ for reproducible in args.reproducible:
+ if not show_reproducible(r, reproducible, logger):
+ return 1
+
+def register_commands(subparsers):
+ """Register subcommands from this plugin"""
+ parser = subparsers.add_parser('log', help='show logs',
+ description='show the logs from test results',
+ group='analysis')
+ parser.set_defaults(func=log)
+ parser.add_argument('source',
+ help='the results file/directory/URL to import')
+ parser.add_argument('--ptest', action='append', default=[],
+ help='show logs for a ptest')
+ parser.add_argument('--dump-ptest', metavar='DIR',
+ help='Dump all ptest log files to the specified directory.')
+ parser.add_argument('--reproducible', action='append', default=[],
+ help='show logs for a reproducible test')
+ parser.add_argument('--prepend-run', action='store_true',
+ help='''Dump ptest results to a subdirectory named after the test run when using --dump-ptest.
+ Required if more than one test run is present in the result file''')
+ parser.add_argument('--raw', action='store_true',
+ help='show raw (ptest) logs. Deprecated. Alias for "--raw-ptest"', dest='raw_ptest')
+ parser.add_argument('--raw-ptest', action='store_true',
+ help='show raw ptest log')
+ parser.add_argument('--raw-reproducible', action='store_true',
+ help='show raw reproducible build logs')
+
diff --git a/scripts/lib/resulttool/manualexecution.py b/scripts/lib/resulttool/manualexecution.py
new file mode 100755
index 0000000000..ecb27c5933
--- /dev/null
+++ b/scripts/lib/resulttool/manualexecution.py
@@ -0,0 +1,235 @@
+# test case management tool - manual execution from testopia test cases
+#
+# Copyright (c) 2018, Intel Corporation.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import argparse
+import json
+import os
+import sys
+import datetime
+import re
+import copy
+from oeqa.core.runner import OETestResultJSONHelper
+
+
+def load_json_file(f):
+ with open(f, "r") as filedata:
+ return json.load(filedata)
+
+def write_json_file(f, json_data):
+ os.makedirs(os.path.dirname(f), exist_ok=True)
+ with open(f, 'w') as filedata:
+ filedata.write(json.dumps(json_data, sort_keys=True, indent=4))
+
+class ManualTestRunner(object):
+
+ def _get_test_module(self, case_file):
+ return os.path.basename(case_file).split('.')[0]
+
+ def _get_input(self, config):
+ while True:
+ output = input('{} = '.format(config))
+ if re.match('^[a-z0-9-.]+$', output):
+ break
+ print('Only lowercase alphanumeric, hyphen and dot are allowed. Please try again')
+ return output
+
+ def _get_available_config_options(self, config_options, test_module, target_config):
+ avail_config_options = None
+ if test_module in config_options:
+ avail_config_options = config_options[test_module].get(target_config)
+ return avail_config_options
+
+ def _choose_config_option(self, options):
+ while True:
+ output = input('{} = '.format('Option index number'))
+ if output in options:
+ break
+ print('Only integer index inputs from above available configuration options are allowed. Please try again.')
+ return options[output]
+
+ def _get_config(self, config_options, test_module):
+ from oeqa.utils.metadata import get_layers
+ from oeqa.utils.commands import get_bb_var
+ from resulttool.resultutils import store_map
+
+ layers = get_layers(get_bb_var('BBLAYERS'))
+ configurations = {}
+ configurations['LAYERS'] = layers
+ configurations['STARTTIME'] = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
+ configurations['TEST_TYPE'] = 'manual'
+ configurations['TEST_MODULE'] = test_module
+
+ extra_config = set(store_map['manual']) - set(configurations)
+ for config in sorted(extra_config):
+ avail_config_options = self._get_available_config_options(config_options, test_module, config)
+ if avail_config_options:
+ print('---------------------------------------------')
+ print('These are available configuration #%s options:' % config)
+ print('---------------------------------------------')
+ for option, _ in sorted(avail_config_options.items(), key=lambda x: int(x[0])):
+ print('%s: %s' % (option, avail_config_options[option]))
+ print('Please select configuration option, enter the integer index number.')
+ value_conf = self._choose_config_option(avail_config_options)
+ print('---------------------------------------------\n')
+ else:
+ print('---------------------------------------------')
+ print('This is configuration #%s. Please provide configuration value(use "None" if not applicable).' % config)
+ print('---------------------------------------------')
+ value_conf = self._get_input('Configuration Value')
+ print('---------------------------------------------\n')
+ configurations[config] = value_conf
+ return configurations
+
+ def _execute_test_steps(self, case):
+ test_result = {}
+ print('------------------------------------------------------------------------')
+ print('Executing test case: %s' % case['test']['@alias'])
+ print('------------------------------------------------------------------------')
+ print('You have total %s test steps to be executed.' % len(case['test']['execution']))
+ print('------------------------------------------------------------------------\n')
+ for step, _ in sorted(case['test']['execution'].items(), key=lambda x: int(x[0])):
+ print('Step %s: %s' % (step, case['test']['execution'][step]['action']))
+ expected_output = case['test']['execution'][step]['expected_results']
+ if expected_output:
+ print('Expected output: %s' % expected_output)
+ while True:
+ done = input('\nPlease provide test results: (P)assed/(F)ailed/(B)locked/(S)kipped? \n').lower()
+ result_types = {'p':'PASSED',
+ 'f':'FAILED',
+ 'b':'BLOCKED',
+ 's':'SKIPPED'}
+ if done in result_types:
+ for r in result_types:
+ if done == r:
+ res = result_types[r]
+ if res == 'FAILED':
+ log_input = input('\nPlease enter the error and the description of the log: (Ex:log:211 Error Bitbake)\n')
+ test_result.update({case['test']['@alias']: {'status': '%s' % res, 'log': '%s' % log_input}})
+ else:
+ test_result.update({case['test']['@alias']: {'status': '%s' % res}})
+ break
+ print('Invalid input!')
+ return test_result
+
+ def _get_write_dir(self):
+ return os.environ['BUILDDIR'] + '/tmp/log/manual/'
+
+ def run_test(self, case_file, config_options_file, testcase_config_file):
+ test_module = self._get_test_module(case_file)
+ cases = load_json_file(case_file)
+ config_options = {}
+ if config_options_file:
+ config_options = load_json_file(config_options_file)
+ configurations = self._get_config(config_options, test_module)
+ result_id = 'manual_%s_%s' % (test_module, configurations['STARTTIME'])
+ test_results = {}
+ if testcase_config_file:
+ test_case_config = load_json_file(testcase_config_file)
+ test_case_to_execute = test_case_config['testcases']
+ for case in copy.deepcopy(cases) :
+ if case['test']['@alias'] not in test_case_to_execute:
+ cases.remove(case)
+
+ print('\nTotal number of test cases in this test suite: %s\n' % len(cases))
+ for c in cases:
+ test_result = self._execute_test_steps(c)
+ test_results.update(test_result)
+ return configurations, result_id, self._get_write_dir(), test_results
+
+ def _get_true_false_input(self, input_message):
+ yes_list = ['Y', 'YES']
+ no_list = ['N', 'NO']
+ while True:
+ more_config_option = input(input_message).upper()
+ if more_config_option in yes_list or more_config_option in no_list:
+ break
+ print('Invalid input!')
+ if more_config_option in no_list:
+ return False
+ return True
+
+ def make_config_option_file(self, logger, case_file, config_options_file):
+ config_options = {}
+ if config_options_file:
+ config_options = load_json_file(config_options_file)
+ new_test_module = self._get_test_module(case_file)
+ print('Creating configuration options file for test module: %s' % new_test_module)
+ new_config_options = {}
+
+ while True:
+ config_name = input('\nPlease provide test configuration to create:\n').upper()
+ new_config_options[config_name] = {}
+ while True:
+ config_value = self._get_input('Configuration possible option value')
+ config_option_index = len(new_config_options[config_name]) + 1
+ new_config_options[config_name][config_option_index] = config_value
+ more_config_option = self._get_true_false_input('\nIs there more configuration option input: (Y)es/(N)o\n')
+ if not more_config_option:
+ break
+ more_config = self._get_true_false_input('\nIs there more configuration to create: (Y)es/(N)o\n')
+ if not more_config:
+ break
+
+ if new_config_options:
+ config_options[new_test_module] = new_config_options
+ if not config_options_file:
+ config_options_file = os.path.join(self._get_write_dir(), 'manual_config_options.json')
+ write_json_file(config_options_file, config_options)
+ logger.info('Configuration option file created at %s' % config_options_file)
+
+ def make_testcase_config_file(self, logger, case_file, testcase_config_file):
+ if testcase_config_file:
+ if os.path.exists(testcase_config_file):
+ print('\nTest configuration file with name %s already exists. Please provide a unique file name' % (testcase_config_file))
+ return 0
+
+ if not testcase_config_file:
+ testcase_config_file = os.path.join(self._get_write_dir(), "testconfig_new.json")
+
+ testcase_config = {}
+ cases = load_json_file(case_file)
+ new_test_module = self._get_test_module(case_file)
+ new_testcase_config = {}
+ new_testcase_config['testcases'] = []
+
+ print('\nAdd testcases for this configuration file:')
+ for case in cases:
+ print('\n' + case['test']['@alias'])
+ add_tc_config = self._get_true_false_input('\nDo you want to add this test case to test configuration : (Y)es/(N)o\n')
+ if add_tc_config:
+ new_testcase_config['testcases'].append(case['test']['@alias'])
+ write_json_file(testcase_config_file, new_testcase_config)
+ logger.info('Testcase Configuration file created at %s' % testcase_config_file)
+
+def manualexecution(args, logger):
+ testrunner = ManualTestRunner()
+ if args.make_config_options_file:
+ testrunner.make_config_option_file(logger, args.file, args.config_options_file)
+ return 0
+ if args.make_testcase_config_file:
+ testrunner.make_testcase_config_file(logger, args.file, args.testcase_config_file)
+ return 0
+ configurations, result_id, write_dir, test_results = testrunner.run_test(args.file, args.config_options_file, args.testcase_config_file)
+ resultjsonhelper = OETestResultJSONHelper()
+ resultjsonhelper.dump_testresult_file(write_dir, configurations, result_id, test_results)
+ return 0
+
+def register_commands(subparsers):
+ """Register subcommands from this plugin"""
+ parser_build = subparsers.add_parser('manualexecution', help='helper script for results populating during manual test execution.',
+ description='helper script for results populating during manual test execution. You can find manual test case JSON file in meta/lib/oeqa/manual/',
+ group='manualexecution')
+ parser_build.set_defaults(func=manualexecution)
+ parser_build.add_argument('file', help='specify path to manual test case JSON file.Note: Please use \"\" to encapsulate the file path.')
+ parser_build.add_argument('-c', '--config-options-file', default='',
+ help='the config options file to import and used as available configuration option selection or make config option file')
+ parser_build.add_argument('-m', '--make-config-options-file', action='store_true',
+ help='make the configuration options file based on provided inputs')
+ parser_build.add_argument('-t', '--testcase-config-file', default='',
+ help='the testcase configuration file to enable user to run a selected set of test case or make a testcase configuration file')
+ parser_build.add_argument('-d', '--make-testcase-config-file', action='store_true',
+ help='make the testcase configuration file to run a set of test cases based on user selection') \ No newline at end of file
diff --git a/scripts/lib/resulttool/merge.py b/scripts/lib/resulttool/merge.py
new file mode 100644
index 0000000000..18b4825a18
--- /dev/null
+++ b/scripts/lib/resulttool/merge.py
@@ -0,0 +1,46 @@
+# resulttool - merge multiple testresults.json files into a file or directory
+#
+# Copyright (c) 2019, Intel Corporation.
+# Copyright (c) 2019, Linux Foundation
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import os
+import json
+import resulttool.resultutils as resultutils
+
+def merge(args, logger):
+ configvars = {}
+ if not args.not_add_testseries:
+ configvars = resultutils.extra_configvars.copy()
+ if args.executed_by:
+ configvars['EXECUTED_BY'] = args.executed_by
+ if resultutils.is_url(args.target_results) or os.path.isdir(args.target_results):
+ results = resultutils.load_resultsdata(args.target_results, configmap=resultutils.store_map, configvars=configvars)
+ resultutils.append_resultsdata(results, args.base_results, configmap=resultutils.store_map, configvars=configvars)
+ resultutils.save_resultsdata(results, args.target_results)
+ else:
+ results = resultutils.load_resultsdata(args.base_results, configmap=resultutils.flatten_map, configvars=configvars)
+ if os.path.exists(args.target_results):
+ resultutils.append_resultsdata(results, args.target_results, configmap=resultutils.flatten_map, configvars=configvars)
+ resultutils.save_resultsdata(results, os.path.dirname(args.target_results), fn=os.path.basename(args.target_results))
+
+ logger.info('Merged results to %s' % os.path.dirname(args.target_results))
+
+ return 0
+
+def register_commands(subparsers):
+ """Register subcommands from this plugin"""
+ parser_build = subparsers.add_parser('merge', help='merge test result files/directories/URLs',
+ description='merge the results from multiple files/directories/URLs into the target file or directory',
+ group='setup')
+ parser_build.set_defaults(func=merge)
+ parser_build.add_argument('base_results',
+ help='the results file/directory/URL to import')
+ parser_build.add_argument('target_results',
+ help='the target file or directory to merge the base_results with')
+ parser_build.add_argument('-t', '--not-add-testseries', action='store_true',
+ help='do not add testseries configuration to results')
+ parser_build.add_argument('-x', '--executed-by', default='',
+ help='add executed-by configuration to each result file')
diff --git a/scripts/lib/resulttool/regression.py b/scripts/lib/resulttool/regression.py
new file mode 100644
index 0000000000..9f952951b3
--- /dev/null
+++ b/scripts/lib/resulttool/regression.py
@@ -0,0 +1,186 @@
+# resulttool - regression analysis
+#
+# Copyright (c) 2019, Intel Corporation.
+# Copyright (c) 2019, Linux Foundation
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import resulttool.resultutils as resultutils
+import json
+
+from oeqa.utils.git import GitRepo
+import oeqa.utils.gitarchive as gitarchive
+
+def compare_result(logger, base_name, target_name, base_result, target_result):
+ base_result = base_result.get('result')
+ target_result = target_result.get('result')
+ result = {}
+ if base_result and target_result:
+ for k in base_result:
+ base_testcase = base_result[k]
+ base_status = base_testcase.get('status')
+ if base_status:
+ target_testcase = target_result.get(k, {})
+ target_status = target_testcase.get('status')
+ if base_status != target_status:
+ result[k] = {'base': base_status, 'target': target_status}
+ else:
+ logger.error('Failed to retrieved base test case status: %s' % k)
+ if result:
+ resultstring = "Regression: %s\n %s\n" % (base_name, target_name)
+ for k in sorted(result):
+ resultstring += ' %s: %s -> %s\n' % (k, result[k]['base'], result[k]['target'])
+ else:
+ resultstring = "Match: %s\n %s" % (base_name, target_name)
+ return result, resultstring
+
+def get_results(logger, source):
+ return resultutils.load_resultsdata(source, configmap=resultutils.regression_map)
+
+def regression(args, logger):
+ base_results = get_results(logger, args.base_result)
+ target_results = get_results(logger, args.target_result)
+
+ regression_common(args, logger, base_results, target_results)
+
+def regression_common(args, logger, base_results, target_results):
+ if args.base_result_id:
+ base_results = resultutils.filter_resultsdata(base_results, args.base_result_id)
+ if args.target_result_id:
+ target_results = resultutils.filter_resultsdata(target_results, args.target_result_id)
+
+ matches = []
+ regressions = []
+ notfound = []
+
+ for a in base_results:
+ if a in target_results:
+ base = list(base_results[a].keys())
+ target = list(target_results[a].keys())
+ # We may have multiple base/targets which are for different configurations. Start by
+ # removing any pairs which match
+ for c in base.copy():
+ for b in target.copy():
+ res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b])
+ if not res:
+ matches.append(resstr)
+ base.remove(c)
+ target.remove(b)
+ break
+ # Should only now see regressions, we may not be able to match multiple pairs directly
+ for c in base:
+ for b in target:
+ res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b])
+ if res:
+ regressions.append(resstr)
+ else:
+ notfound.append("%s not found in target" % a)
+ print("\n".join(sorted(matches)))
+ print("\n".join(sorted(regressions)))
+ print("\n".join(sorted(notfound)))
+
+ return 0
+
+def regression_git(args, logger):
+ base_results = {}
+ target_results = {}
+
+ tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}"
+ repo = GitRepo(args.repo)
+
+ revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=args.branch)
+
+ if args.branch2:
+ revs2 = gitarchive.get_test_revs(logger, repo, tag_name, branch=args.branch2)
+ if not len(revs2):
+ logger.error("No revisions found to compare against")
+ return 1
+ if not len(revs):
+ logger.error("No revision to report on found")
+ return 1
+ else:
+ if len(revs) < 2:
+ logger.error("Only %d tester revisions found, unable to generate report" % len(revs))
+ return 1
+
+ # Pick revisions
+ if args.commit:
+ if args.commit_number:
+ logger.warning("Ignoring --commit-number as --commit was specified")
+ index1 = gitarchive.rev_find(revs, 'commit', args.commit)
+ elif args.commit_number:
+ index1 = gitarchive.rev_find(revs, 'commit_number', args.commit_number)
+ else:
+ index1 = len(revs) - 1
+
+ if args.branch2:
+ revs2.append(revs[index1])
+ index1 = len(revs2) - 1
+ revs = revs2
+
+ if args.commit2:
+ if args.commit_number2:
+ logger.warning("Ignoring --commit-number2 as --commit2 was specified")
+ index2 = gitarchive.rev_find(revs, 'commit', args.commit2)
+ elif args.commit_number2:
+ index2 = gitarchive.rev_find(revs, 'commit_number', args.commit_number2)
+ else:
+ if index1 > 0:
+ index2 = index1 - 1
+ # Find the closest matching commit number for comparision
+ # In future we could check the commit is a common ancestor and
+ # continue back if not but this good enough for now
+ while index2 > 0 and revs[index2].commit_number > revs[index1].commit_number:
+ index2 = index2 - 1
+ else:
+ logger.error("Unable to determine the other commit, use "
+ "--commit2 or --commit-number2 to specify it")
+ return 1
+
+ logger.info("Comparing:\n%s\nto\n%s\n" % (revs[index1], revs[index2]))
+
+ base_results = resultutils.git_get_result(repo, revs[index1][2])
+ target_results = resultutils.git_get_result(repo, revs[index2][2])
+
+ regression_common(args, logger, base_results, target_results)
+
+ return 0
+
+def register_commands(subparsers):
+ """Register subcommands from this plugin"""
+
+ parser_build = subparsers.add_parser('regression', help='regression file/directory analysis',
+ description='regression analysis comparing the base set of results to the target results',
+ group='analysis')
+ parser_build.set_defaults(func=regression)
+ parser_build.add_argument('base_result',
+ help='base result file/directory/URL for the comparison')
+ parser_build.add_argument('target_result',
+ help='target result file/directory/URL to compare with')
+ parser_build.add_argument('-b', '--base-result-id', default='',
+ help='(optional) filter the base results to this result ID')
+ parser_build.add_argument('-t', '--target-result-id', default='',
+ help='(optional) filter the target results to this result ID')
+
+ parser_build = subparsers.add_parser('regression-git', help='regression git analysis',
+ description='regression analysis comparing base result set to target '
+ 'result set',
+ group='analysis')
+ parser_build.set_defaults(func=regression_git)
+ parser_build.add_argument('repo',
+ help='the git repository containing the data')
+ parser_build.add_argument('-b', '--base-result-id', default='',
+ help='(optional) default select regression based on configurations unless base result '
+ 'id was provided')
+ parser_build.add_argument('-t', '--target-result-id', default='',
+ help='(optional) default select regression based on configurations unless target result '
+ 'id was provided')
+
+ parser_build.add_argument('--branch', '-B', default='master', help="Branch to find commit in")
+ parser_build.add_argument('--branch2', help="Branch to find comparision revisions in")
+ parser_build.add_argument('--commit', help="Revision to search for")
+ parser_build.add_argument('--commit-number', help="Revision number to search for, redundant if --commit is specified")
+ parser_build.add_argument('--commit2', help="Revision to compare with")
+ parser_build.add_argument('--commit-number2', help="Revision number to compare with, redundant if --commit2 is specified")
+
diff --git a/scripts/lib/resulttool/report.py b/scripts/lib/resulttool/report.py
new file mode 100644
index 0000000000..f0ca50ebe2
--- /dev/null
+++ b/scripts/lib/resulttool/report.py
@@ -0,0 +1,312 @@
+# test result tool - report text based test results
+#
+# Copyright (c) 2019, Intel Corporation.
+# Copyright (c) 2019, Linux Foundation
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import os
+import glob
+import json
+import resulttool.resultutils as resultutils
+from oeqa.utils.git import GitRepo
+import oeqa.utils.gitarchive as gitarchive
+
+
+class ResultsTextReport(object):
+ def __init__(self):
+ self.ptests = {}
+ self.ltptests = {}
+ self.ltpposixtests = {}
+ self.result_types = {'passed': ['PASSED', 'passed', 'PASS', 'XFAIL'],
+ 'failed': ['FAILED', 'failed', 'FAIL', 'ERROR', 'error', 'UNKNOWN', 'XPASS'],
+ 'skipped': ['SKIPPED', 'skipped', 'UNSUPPORTED', 'UNTESTED', 'UNRESOLVED']}
+
+
+ def handle_ptest_result(self, k, status, result, machine):
+ if machine not in self.ptests:
+ self.ptests[machine] = {}
+
+ if k == 'ptestresult.sections':
+ # Ensure tests without any test results still show up on the report
+ for suite in result['ptestresult.sections']:
+ if suite not in self.ptests[machine]:
+ self.ptests[machine][suite] = {
+ 'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-',
+ 'failed_testcases': [], "testcases": set(),
+ }
+ if 'duration' in result['ptestresult.sections'][suite]:
+ self.ptests[machine][suite]['duration'] = result['ptestresult.sections'][suite]['duration']
+ if 'timeout' in result['ptestresult.sections'][suite]:
+ self.ptests[machine][suite]['duration'] += " T"
+ return True
+
+ # process test result
+ try:
+ _, suite, test = k.split(".", 2)
+ except ValueError:
+ return True
+
+ # Handle 'glib-2.0'
+ if 'ptestresult.sections' in result and suite not in result['ptestresult.sections']:
+ try:
+ _, suite, suite1, test = k.split(".", 3)
+ if suite + "." + suite1 in result['ptestresult.sections']:
+ suite = suite + "." + suite1
+ except ValueError:
+ pass
+
+ if suite not in self.ptests[machine]:
+ self.ptests[machine][suite] = {
+ 'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-',
+ 'failed_testcases': [], "testcases": set(),
+ }
+
+ # do not process duplicate results
+ if test in self.ptests[machine][suite]["testcases"]:
+ print("Warning duplicate ptest result '{}.{}' for {}".format(suite, test, machine))
+ return False
+
+ for tk in self.result_types:
+ if status in self.result_types[tk]:
+ self.ptests[machine][suite][tk] += 1
+ self.ptests[machine][suite]["testcases"].add(test)
+ return True
+
+ def handle_ltptest_result(self, k, status, result, machine):
+ if machine not in self.ltptests:
+ self.ltptests[machine] = {}
+
+ if k == 'ltpresult.sections':
+ # Ensure tests without any test results still show up on the report
+ for suite in result['ltpresult.sections']:
+ if suite not in self.ltptests[machine]:
+ self.ltptests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ if 'duration' in result['ltpresult.sections'][suite]:
+ self.ltptests[machine][suite]['duration'] = result['ltpresult.sections'][suite]['duration']
+ if 'timeout' in result['ltpresult.sections'][suite]:
+ self.ltptests[machine][suite]['duration'] += " T"
+ return
+ try:
+ _, suite, test = k.split(".", 2)
+ except ValueError:
+ return
+ # Handle 'glib-2.0'
+ if 'ltpresult.sections' in result and suite not in result['ltpresult.sections']:
+ try:
+ _, suite, suite1, test = k.split(".", 3)
+ if suite + "." + suite1 in result['ltpresult.sections']:
+ suite = suite + "." + suite1
+ except ValueError:
+ pass
+ if suite not in self.ltptests[machine]:
+ self.ltptests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ for tk in self.result_types:
+ if status in self.result_types[tk]:
+ self.ltptests[machine][suite][tk] += 1
+
+ def handle_ltpposixtest_result(self, k, status, result, machine):
+ if machine not in self.ltpposixtests:
+ self.ltpposixtests[machine] = {}
+
+ if k == 'ltpposixresult.sections':
+ # Ensure tests without any test results still show up on the report
+ for suite in result['ltpposixresult.sections']:
+ if suite not in self.ltpposixtests[machine]:
+ self.ltpposixtests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ if 'duration' in result['ltpposixresult.sections'][suite]:
+ self.ltpposixtests[machine][suite]['duration'] = result['ltpposixresult.sections'][suite]['duration']
+ return
+ try:
+ _, suite, test = k.split(".", 2)
+ except ValueError:
+ return
+ # Handle 'glib-2.0'
+ if 'ltpposixresult.sections' in result and suite not in result['ltpposixresult.sections']:
+ try:
+ _, suite, suite1, test = k.split(".", 3)
+ if suite + "." + suite1 in result['ltpposixresult.sections']:
+ suite = suite + "." + suite1
+ except ValueError:
+ pass
+ if suite not in self.ltpposixtests[machine]:
+ self.ltpposixtests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ for tk in self.result_types:
+ if status in self.result_types[tk]:
+ self.ltpposixtests[machine][suite][tk] += 1
+
+ def get_aggregated_test_result(self, logger, testresult, machine):
+ test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []}
+ result = testresult.get('result', [])
+ for k in result:
+ test_status = result[k].get('status', [])
+ if k.startswith("ptestresult."):
+ if not self.handle_ptest_result(k, test_status, result, machine):
+ continue
+ elif k.startswith("ltpresult."):
+ self.handle_ltptest_result(k, test_status, result, machine)
+ elif k.startswith("ltpposixresult."):
+ self.handle_ltpposixtest_result(k, test_status, result, machine)
+
+ # process result if it was not skipped by a handler
+ for tk in self.result_types:
+ if test_status in self.result_types[tk]:
+ test_count_report[tk] += 1
+ if test_status in self.result_types['failed']:
+ test_count_report['failed_testcases'].append(k)
+ return test_count_report
+
+ def print_test_report(self, template_file_name, test_count_reports):
+ from jinja2 import Environment, FileSystemLoader
+ script_path = os.path.dirname(os.path.realpath(__file__))
+ file_loader = FileSystemLoader(script_path + '/template')
+ env = Environment(loader=file_loader, trim_blocks=True)
+ template = env.get_template(template_file_name)
+ havefailed = False
+ reportvalues = []
+ machines = []
+ cols = ['passed', 'failed', 'skipped']
+ maxlen = {'passed' : 0, 'failed' : 0, 'skipped' : 0, 'result_id': 0, 'testseries' : 0, 'ptest' : 0 ,'ltptest': 0, 'ltpposixtest': 0}
+ for line in test_count_reports:
+ total_tested = line['passed'] + line['failed'] + line['skipped']
+ vals = {}
+ vals['result_id'] = line['result_id']
+ vals['testseries'] = line['testseries']
+ vals['sort'] = line['testseries'] + "_" + line['result_id']
+ vals['failed_testcases'] = line['failed_testcases']
+ for k in cols:
+ vals[k] = "%d (%s%%)" % (line[k], format(line[k] / total_tested * 100, '.0f'))
+ for k in maxlen:
+ if k in vals and len(vals[k]) > maxlen[k]:
+ maxlen[k] = len(vals[k])
+ reportvalues.append(vals)
+ if line['failed_testcases']:
+ havefailed = True
+ if line['machine'] not in machines:
+ machines.append(line['machine'])
+ reporttotalvalues = {}
+ for k in cols:
+ reporttotalvalues[k] = '%s' % sum([line[k] for line in test_count_reports])
+ reporttotalvalues['count'] = '%s' % len(test_count_reports)
+ for (machine, report) in self.ptests.items():
+ for ptest in self.ptests[machine]:
+ if len(ptest) > maxlen['ptest']:
+ maxlen['ptest'] = len(ptest)
+ for (machine, report) in self.ltptests.items():
+ for ltptest in self.ltptests[machine]:
+ if len(ltptest) > maxlen['ltptest']:
+ maxlen['ltptest'] = len(ltptest)
+ for (machine, report) in self.ltpposixtests.items():
+ for ltpposixtest in self.ltpposixtests[machine]:
+ if len(ltpposixtest) > maxlen['ltpposixtest']:
+ maxlen['ltpposixtest'] = len(ltpposixtest)
+ output = template.render(reportvalues=reportvalues,
+ reporttotalvalues=reporttotalvalues,
+ havefailed=havefailed,
+ machines=machines,
+ ptests=self.ptests,
+ ltptests=self.ltptests,
+ ltpposixtests=self.ltpposixtests,
+ maxlen=maxlen)
+ print(output)
+
+ def view_test_report(self, logger, source_dir, branch, commit, tag, use_regression_map, raw_test, selected_test_case_only):
+ def print_selected_testcase_result(testresults, selected_test_case_only):
+ for testsuite in testresults:
+ for resultid in testresults[testsuite]:
+ result = testresults[testsuite][resultid]['result']
+ test_case_result = result.get(selected_test_case_only, {})
+ if test_case_result.get('status'):
+ print('Found selected test case result for %s from %s' % (selected_test_case_only,
+ resultid))
+ print(test_case_result['status'])
+ else:
+ print('Could not find selected test case result for %s from %s' % (selected_test_case_only,
+ resultid))
+ if test_case_result.get('log'):
+ print(test_case_result['log'])
+ test_count_reports = []
+ configmap = resultutils.store_map
+ if use_regression_map:
+ configmap = resultutils.regression_map
+ if commit:
+ if tag:
+ logger.warning("Ignoring --tag as --commit was specified")
+ tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}"
+ repo = GitRepo(source_dir)
+ revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=branch)
+ rev_index = gitarchive.rev_find(revs, 'commit', commit)
+ testresults = resultutils.git_get_result(repo, revs[rev_index][2], configmap=configmap)
+ elif tag:
+ repo = GitRepo(source_dir)
+ testresults = resultutils.git_get_result(repo, [tag], configmap=configmap)
+ else:
+ testresults = resultutils.load_resultsdata(source_dir, configmap=configmap)
+ if raw_test:
+ raw_results = {}
+ for testsuite in testresults:
+ result = testresults[testsuite].get(raw_test, {})
+ if result:
+ raw_results[testsuite] = {raw_test: result}
+ if raw_results:
+ if selected_test_case_only:
+ print_selected_testcase_result(raw_results, selected_test_case_only)
+ else:
+ print(json.dumps(raw_results, sort_keys=True, indent=4))
+ else:
+ print('Could not find raw test result for %s' % raw_test)
+ return 0
+ if selected_test_case_only:
+ print_selected_testcase_result(testresults, selected_test_case_only)
+ return 0
+ for testsuite in testresults:
+ for resultid in testresults[testsuite]:
+ skip = False
+ result = testresults[testsuite][resultid]
+ machine = result['configuration']['MACHINE']
+
+ # Check to see if there is already results for these kinds of tests for the machine
+ for key in result['result'].keys():
+ testtype = str(key).split('.')[0]
+ if ((machine in self.ltptests and testtype == "ltpiresult" and self.ltptests[machine]) or
+ (machine in self.ltpposixtests and testtype == "ltpposixresult" and self.ltpposixtests[machine])):
+ print("Already have test results for %s on %s, skipping %s" %(str(key).split('.')[0], machine, resultid))
+ skip = True
+ break
+ if skip:
+ break
+
+ test_count_report = self.get_aggregated_test_result(logger, result, machine)
+ test_count_report['machine'] = machine
+ test_count_report['testseries'] = result['configuration']['TESTSERIES']
+ test_count_report['result_id'] = resultid
+ test_count_reports.append(test_count_report)
+ self.print_test_report('test_report_full_text.txt', test_count_reports)
+
+def report(args, logger):
+ report = ResultsTextReport()
+ report.view_test_report(logger, args.source_dir, args.branch, args.commit, args.tag, args.use_regression_map,
+ args.raw_test_only, args.selected_test_case_only)
+ return 0
+
+def register_commands(subparsers):
+ """Register subcommands from this plugin"""
+ parser_build = subparsers.add_parser('report', help='summarise test results',
+ description='print a text-based summary of the test results',
+ group='analysis')
+ parser_build.set_defaults(func=report)
+ parser_build.add_argument('source_dir',
+ help='source file/directory/URL that contain the test result files to summarise')
+ parser_build.add_argument('--branch', '-B', default='master', help="Branch to find commit in")
+ parser_build.add_argument('--commit', help="Revision to report")
+ parser_build.add_argument('-t', '--tag', default='',
+ help='source_dir is a git repository, report on the tag specified from that repository')
+ parser_build.add_argument('-m', '--use_regression_map', action='store_true',
+ help='instead of the default "store_map", use the "regression_map" for report')
+ parser_build.add_argument('-r', '--raw_test_only', default='',
+ help='output raw test result only for the user provided test result id')
+ parser_build.add_argument('-s', '--selected_test_case_only', default='',
+ help='output selected test case result for the user provided test case id, if both test '
+ 'result id and test case id are provided then output the selected test case result '
+ 'from the provided test result id')
diff --git a/scripts/lib/resulttool/resultutils.py b/scripts/lib/resulttool/resultutils.py
new file mode 100644
index 0000000000..8917022d36
--- /dev/null
+++ b/scripts/lib/resulttool/resultutils.py
@@ -0,0 +1,224 @@
+# resulttool - common library/utility functions
+#
+# Copyright (c) 2019, Intel Corporation.
+# Copyright (c) 2019, Linux Foundation
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import os
+import base64
+import zlib
+import json
+import scriptpath
+import copy
+import urllib.request
+import posixpath
+scriptpath.add_oe_lib_path()
+
+flatten_map = {
+ "oeselftest": [],
+ "runtime": [],
+ "sdk": [],
+ "sdkext": [],
+ "manual": []
+}
+regression_map = {
+ "oeselftest": ['TEST_TYPE', 'MACHINE'],
+ "runtime": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'IMAGE_PKGTYPE', 'DISTRO'],
+ "sdk": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE'],
+ "sdkext": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE'],
+ "manual": ['TEST_TYPE', 'TEST_MODULE', 'IMAGE_BASENAME', 'MACHINE']
+}
+store_map = {
+ "oeselftest": ['TEST_TYPE'],
+ "runtime": ['TEST_TYPE', 'DISTRO', 'MACHINE', 'IMAGE_BASENAME'],
+ "sdk": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME'],
+ "sdkext": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME'],
+ "manual": ['TEST_TYPE', 'TEST_MODULE', 'MACHINE', 'IMAGE_BASENAME']
+}
+
+def is_url(p):
+ """
+ Helper for determining if the given path is a URL
+ """
+ return p.startswith('http://') or p.startswith('https://')
+
+extra_configvars = {'TESTSERIES': ''}
+
+#
+# Load the json file and append the results data into the provided results dict
+#
+def append_resultsdata(results, f, configmap=store_map, configvars=extra_configvars):
+ if type(f) is str:
+ if is_url(f):
+ with urllib.request.urlopen(f) as response:
+ data = json.loads(response.read().decode('utf-8'))
+ url = urllib.parse.urlparse(f)
+ testseries = posixpath.basename(posixpath.dirname(url.path))
+ else:
+ with open(f, "r") as filedata:
+ data = json.load(filedata)
+ testseries = os.path.basename(os.path.dirname(f))
+ else:
+ data = f
+ for res in data:
+ if "configuration" not in data[res] or "result" not in data[res]:
+ raise ValueError("Test results data without configuration or result section?")
+ for config in configvars:
+ if config == "TESTSERIES" and "TESTSERIES" not in data[res]["configuration"]:
+ data[res]["configuration"]["TESTSERIES"] = testseries
+ continue
+ if config not in data[res]["configuration"]:
+ data[res]["configuration"][config] = configvars[config]
+ testtype = data[res]["configuration"].get("TEST_TYPE")
+ if testtype not in configmap:
+ raise ValueError("Unknown test type %s" % testtype)
+ testpath = "/".join(data[res]["configuration"].get(i) for i in configmap[testtype])
+ if testpath not in results:
+ results[testpath] = {}
+ results[testpath][res] = data[res]
+
+#
+# Walk a directory and find/load results data
+# or load directly from a file
+#
+def load_resultsdata(source, configmap=store_map, configvars=extra_configvars):
+ results = {}
+ if is_url(source) or os.path.isfile(source):
+ append_resultsdata(results, source, configmap, configvars)
+ return results
+ for root, dirs, files in os.walk(source):
+ for name in files:
+ f = os.path.join(root, name)
+ if name == "testresults.json":
+ append_resultsdata(results, f, configmap, configvars)
+ return results
+
+def filter_resultsdata(results, resultid):
+ newresults = {}
+ for r in results:
+ for i in results[r]:
+ if i == resultsid:
+ newresults[r] = {}
+ newresults[r][i] = results[r][i]
+ return newresults
+
+def strip_ptestresults(results):
+ newresults = copy.deepcopy(results)
+ #for a in newresults2:
+ # newresults = newresults2[a]
+ for res in newresults:
+ if 'result' not in newresults[res]:
+ continue
+ if 'ptestresult.rawlogs' in newresults[res]['result']:
+ del newresults[res]['result']['ptestresult.rawlogs']
+ if 'ptestresult.sections' in newresults[res]['result']:
+ for i in newresults[res]['result']['ptestresult.sections']:
+ if 'log' in newresults[res]['result']['ptestresult.sections'][i]:
+ del newresults[res]['result']['ptestresult.sections'][i]['log']
+ return newresults
+
+def decode_log(logdata):
+ if isinstance(logdata, str):
+ return logdata
+ elif isinstance(logdata, dict):
+ if "compressed" in logdata:
+ data = logdata.get("compressed")
+ data = base64.b64decode(data.encode("utf-8"))
+ data = zlib.decompress(data)
+ return data.decode("utf-8", errors='ignore')
+ return None
+
+def generic_get_log(sectionname, results, section):
+ if sectionname not in results:
+ return None
+ if section not in results[sectionname]:
+ return None
+
+ ptest = results[sectionname][section]
+ if 'log' not in ptest:
+ return None
+ return decode_log(ptest['log'])
+
+def ptestresult_get_log(results, section):
+ return generic_get_log('ptestresuls.sections', results, section)
+
+def generic_get_rawlogs(sectname, results):
+ if sectname not in results:
+ return None
+ if 'log' not in results[sectname]:
+ return None
+ return decode_log(results[sectname]['log'])
+
+def ptestresult_get_rawlogs(results):
+ return generic_get_rawlogs('ptestresult.rawlogs', results)
+
+def save_resultsdata(results, destdir, fn="testresults.json", ptestjson=False, ptestlogs=False):
+ for res in results:
+ if res:
+ dst = destdir + "/" + res + "/" + fn
+ else:
+ dst = destdir + "/" + fn
+ os.makedirs(os.path.dirname(dst), exist_ok=True)
+ resultsout = results[res]
+ if not ptestjson:
+ resultsout = strip_ptestresults(results[res])
+ with open(dst, 'w') as f:
+ f.write(json.dumps(resultsout, sort_keys=True, indent=4))
+ for res2 in results[res]:
+ if ptestlogs and 'result' in results[res][res2]:
+ seriesresults = results[res][res2]['result']
+ rawlogs = ptestresult_get_rawlogs(seriesresults)
+ if rawlogs is not None:
+ with open(dst.replace(fn, "ptest-raw.log"), "w+") as f:
+ f.write(rawlogs)
+ if 'ptestresult.sections' in seriesresults:
+ for i in seriesresults['ptestresult.sections']:
+ sectionlog = ptestresult_get_log(seriesresults, i)
+ if sectionlog is not None:
+ with open(dst.replace(fn, "ptest-%s.log" % i), "w+") as f:
+ f.write(sectionlog)
+
+def git_get_result(repo, tags, configmap=store_map):
+ git_objs = []
+ for tag in tags:
+ files = repo.run_cmd(['ls-tree', "--name-only", "-r", tag]).splitlines()
+ git_objs.extend([tag + ':' + f for f in files if f.endswith("testresults.json")])
+
+ def parse_json_stream(data):
+ """Parse multiple concatenated JSON objects"""
+ objs = []
+ json_d = ""
+ for line in data.splitlines():
+ if line == '}{':
+ json_d += '}'
+ objs.append(json.loads(json_d))
+ json_d = '{'
+ else:
+ json_d += line
+ objs.append(json.loads(json_d))
+ return objs
+
+ # Optimize by reading all data with one git command
+ results = {}
+ for obj in parse_json_stream(repo.run_cmd(['show'] + git_objs + ['--'])):
+ append_resultsdata(results, obj, configmap=configmap)
+
+ return results
+
+def test_run_results(results):
+ """
+ Convenient generator function that iterates over all test runs that have a
+ result section.
+
+ Generates a tuple of:
+ (result json file path, test run name, test run (dict), test run "results" (dict))
+ for each test run that has a "result" section
+ """
+ for path in results:
+ for run_name, test_run in results[path].items():
+ if not 'result' in test_run:
+ continue
+ yield path, run_name, test_run, test_run['result']
+
diff --git a/scripts/lib/resulttool/store.py b/scripts/lib/resulttool/store.py
new file mode 100644
index 0000000000..e0951f0a8f
--- /dev/null
+++ b/scripts/lib/resulttool/store.py
@@ -0,0 +1,104 @@
+# resulttool - store test results
+#
+# Copyright (c) 2019, Intel Corporation.
+# Copyright (c) 2019, Linux Foundation
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import tempfile
+import os
+import subprocess
+import json
+import shutil
+import scriptpath
+scriptpath.add_bitbake_lib_path()
+scriptpath.add_oe_lib_path()
+import resulttool.resultutils as resultutils
+import oeqa.utils.gitarchive as gitarchive
+
+
+def store(args, logger):
+ tempdir = tempfile.mkdtemp(prefix='testresults.')
+ try:
+ configvars = resultutils.extra_configvars.copy()
+ if args.executed_by:
+ configvars['EXECUTED_BY'] = args.executed_by
+ if args.extra_test_env:
+ configvars['EXTRA_TEST_ENV'] = args.extra_test_env
+ results = {}
+ logger.info('Reading files from %s' % args.source)
+ if resultutils.is_url(args.source) or os.path.isfile(args.source):
+ resultutils.append_resultsdata(results, args.source, configvars=configvars)
+ else:
+ for root, dirs, files in os.walk(args.source):
+ for name in files:
+ f = os.path.join(root, name)
+ if name == "testresults.json":
+ resultutils.append_resultsdata(results, f, configvars=configvars)
+ elif args.all:
+ dst = f.replace(args.source, tempdir + "/")
+ os.makedirs(os.path.dirname(dst), exist_ok=True)
+ shutil.copyfile(f, dst)
+
+ revisions = {}
+
+ if not results and not args.all:
+ if args.allow_empty:
+ logger.info("No results found to store")
+ return 0
+ logger.error("No results found to store")
+ return 1
+
+ # Find the branch/commit/commit_count and ensure they all match
+ for suite in results:
+ for result in results[suite]:
+ config = results[suite][result]['configuration']['LAYERS']['meta']
+ revision = (config['commit'], config['branch'], str(config['commit_count']))
+ if revision not in revisions:
+ revisions[revision] = {}
+ if suite not in revisions[revision]:
+ revisions[revision][suite] = {}
+ revisions[revision][suite][result] = results[suite][result]
+
+ logger.info("Found %d revisions to store" % len(revisions))
+
+ for r in revisions:
+ results = revisions[r]
+ keywords = {'commit': r[0], 'branch': r[1], "commit_count": r[2]}
+ subprocess.check_call(["find", tempdir, "!", "-path", "./.git/*", "-delete"])
+ resultutils.save_resultsdata(results, tempdir, ptestlogs=True)
+
+ logger.info('Storing test result into git repository %s' % args.git_dir)
+
+ gitarchive.gitarchive(tempdir, args.git_dir, False, False,
+ "Results of {branch}:{commit}", "branch: {branch}\ncommit: {commit}", "{branch}",
+ False, "{branch}/{commit_count}-g{commit}/{tag_number}",
+ 'Test run #{tag_number} of {branch}:{commit}', '',
+ [], [], False, keywords, logger)
+
+ finally:
+ subprocess.check_call(["rm", "-rf", tempdir])
+
+ return 0
+
+def register_commands(subparsers):
+ """Register subcommands from this plugin"""
+ parser_build = subparsers.add_parser('store', help='store test results into a git repository',
+ description='takes a results file or directory of results files and stores '
+ 'them into the destination git repository, splitting out the results '
+ 'files as configured',
+ group='setup')
+ parser_build.set_defaults(func=store)
+ parser_build.add_argument('source',
+ help='source file/directory/URL that contain the test result files to be stored')
+ parser_build.add_argument('git_dir',
+ help='the location of the git repository to store the results in')
+ parser_build.add_argument('-a', '--all', action='store_true',
+ help='include all files, not just testresults.json files')
+ parser_build.add_argument('-e', '--allow-empty', action='store_true',
+ help='don\'t error if no results to store are found')
+ parser_build.add_argument('-x', '--executed-by', default='',
+ help='add executed-by configuration to each result file')
+ parser_build.add_argument('-t', '--extra-test-env', default='',
+ help='add extra test environment data to each result file configuration')
diff --git a/scripts/lib/resulttool/template/test_report_full_text.txt b/scripts/lib/resulttool/template/test_report_full_text.txt
new file mode 100644
index 0000000000..2efba2ef6f
--- /dev/null
+++ b/scripts/lib/resulttool/template/test_report_full_text.txt
@@ -0,0 +1,79 @@
+==============================================================================================================
+Test Result Status Summary (Counts/Percentages sorted by testseries, ID)
+==============================================================================================================
+--------------------------------------------------------------------------------------------------------------
+{{ 'Test Series'.ljust(maxlen['testseries']) }} | {{ 'ID'.ljust(maxlen['result_id']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }}
+--------------------------------------------------------------------------------------------------------------
+{% for report in reportvalues |sort(attribute='sort') %}
+{{ report.testseries.ljust(maxlen['testseries']) }} | {{ report.result_id.ljust(maxlen['result_id']) }} | {{ (report.passed|string).ljust(maxlen['passed']) }} | {{ (report.failed|string).ljust(maxlen['failed']) }} | {{ (report.skipped|string).ljust(maxlen['skipped']) }}
+{% endfor %}
+--------------------------------------------------------------------------------------------------------------
+{{ 'Total'.ljust(maxlen['testseries']) }} | {{ reporttotalvalues['count'].ljust(maxlen['result_id']) }} | {{ reporttotalvalues['passed'].ljust(maxlen['passed']) }} | {{ reporttotalvalues['failed'].ljust(maxlen['failed']) }} | {{ reporttotalvalues['skipped'].ljust(maxlen['skipped']) }}
+--------------------------------------------------------------------------------------------------------------
+
+{% for machine in machines %}
+{% if ptests[machine] %}
+==============================================================================================================
+{{ machine }} PTest Result Summary
+==============================================================================================================
+--------------------------------------------------------------------------------------------------------------
+{{ 'Recipe'.ljust(maxlen['ptest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }}
+--------------------------------------------------------------------------------------------------------------
+{% for ptest in ptests[machine] |sort %}
+{{ ptest.ljust(maxlen['ptest']) }} | {{ (ptests[machine][ptest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ptests[machine][ptest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ptests[machine][ptest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ptests[machine][ptest]['duration']|string) }}
+{% endfor %}
+--------------------------------------------------------------------------------------------------------------
+
+{% endif %}
+{% endfor %}
+
+{% for machine in machines %}
+{% if ltptests[machine] %}
+==============================================================================================================
+{{ machine }} Ltp Test Result Summary
+==============================================================================================================
+--------------------------------------------------------------------------------------------------------------
+{{ 'Recipe'.ljust(maxlen['ltptest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }}
+--------------------------------------------------------------------------------------------------------------
+{% for ltptest in ltptests[machine] |sort %}
+{{ ltptest.ljust(maxlen['ltptest']) }} | {{ (ltptests[machine][ltptest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ltptests[machine][ltptest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ltptests[machine][ltptest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ltptests[machine][ltptest]['duration']|string) }}
+{% endfor %}
+--------------------------------------------------------------------------------------------------------------
+
+{% endif %}
+{% endfor %}
+
+{% for machine in machines %}
+{% if ltpposixtests[machine] %}
+==============================================================================================================
+{{ machine }} Ltp Posix Result Summary
+==============================================================================================================
+--------------------------------------------------------------------------------------------------------------
+{{ 'Recipe'.ljust(maxlen['ltpposixtest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }}
+--------------------------------------------------------------------------------------------------------------
+{% for ltpposixtest in ltpposixtests[machine] |sort %}
+{{ ltpposixtest.ljust(maxlen['ltpposixtest']) }} | {{ (ltpposixtests[machine][ltpposixtest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ltpposixtests[machine][ltpposixtest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ltpposixtests[machine][ltpposixtest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ltpposixtests[machine][ltpposixtest]['duration']|string) }}
+{% endfor %}
+--------------------------------------------------------------------------------------------------------------
+
+{% endif %}
+{% endfor %}
+
+
+==============================================================================================================
+Failed test cases (sorted by testseries, ID)
+==============================================================================================================
+{% if havefailed %}
+--------------------------------------------------------------------------------------------------------------
+{% for report in reportvalues |sort(attribute='sort') %}
+{% if report.failed_testcases %}
+testseries | result_id : {{ report.testseries }} | {{ report.result_id }}
+{% for testcase in report.failed_testcases %}
+ {{ testcase }}
+{% endfor %}
+{% endif %}
+{% endfor %}
+--------------------------------------------------------------------------------------------------------------
+{% else %}
+There were no test failures
+{% endif %}
diff --git a/scripts/lib/scriptpath.py b/scripts/lib/scriptpath.py
index d00317e18d..f32326db3a 100644
--- a/scripts/lib/scriptpath.py
+++ b/scripts/lib/scriptpath.py
@@ -3,18 +3,8 @@
# Copyright (C) 2012-2014 Intel Corporation
# Copyright (C) 2011 Mentor Graphics Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os
diff --git a/scripts/lib/scriptutils.py b/scripts/lib/scriptutils.py
index 3c60c3a1e6..adf81476f0 100644
--- a/scripts/lib/scriptutils.py
+++ b/scripts/lib/scriptutils.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2014 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import argparse
import glob
@@ -26,12 +16,52 @@ import string
import subprocess
import sys
import tempfile
+import threading
import importlib
-from importlib import machinery
+import importlib.machinery
+import importlib.util
+
+class KeepAliveStreamHandler(logging.StreamHandler):
+ def __init__(self, keepalive=True, **kwargs):
+ super().__init__(**kwargs)
+ if keepalive is True:
+ keepalive = 5000 # default timeout
+ self._timeout = threading.Condition()
+ self._stop = False
+
+ # background thread waits on condition, if the condition does not
+ # happen emit a keep alive message
+ def thread():
+ while not self._stop:
+ with self._timeout:
+ if not self._timeout.wait(keepalive):
+ self.emit(logging.LogRecord("keepalive", logging.INFO,
+ None, None, "Keepalive message", None, None))
+
+ self._thread = threading.Thread(target = thread, daemon = True)
+ self._thread.start()
+
+ def close(self):
+ # mark the thread to stop and notify it
+ self._stop = True
+ with self._timeout:
+ self._timeout.notify()
+ # wait for it to join
+ self._thread.join()
+ super().close()
+
+ def emit(self, record):
+ super().emit(record)
+ # trigger timer reset
+ with self._timeout:
+ self._timeout.notify()
-def logger_create(name, stream=None):
+def logger_create(name, stream=None, keepalive=None):
logger = logging.getLogger(name)
- loggerhandler = logging.StreamHandler(stream=stream)
+ if keepalive is not None:
+ loggerhandler = KeepAliveStreamHandler(stream=stream, keepalive=keepalive)
+ else:
+ loggerhandler = logging.StreamHandler(stream=stream)
loggerhandler.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
logger.addHandler(loggerhandler)
logger.setLevel(logging.INFO)
@@ -39,22 +69,23 @@ def logger_create(name, stream=None):
def logger_setup_color(logger, color='auto'):
from bb.msg import BBLogFormatter
- console = logging.StreamHandler(sys.stdout)
- formatter = BBLogFormatter("%(levelname)s: %(message)s")
- console.setFormatter(formatter)
- logger.handlers = [console]
- if color == 'always' or (color=='auto' and console.stream.isatty()):
- formatter.enable_color()
+
+ for handler in logger.handlers:
+ if (isinstance(handler, logging.StreamHandler) and
+ isinstance(handler.formatter, BBLogFormatter)):
+ if color == 'always' or (color == 'auto' and handler.stream.isatty()):
+ handler.formatter.enable_color()
def load_plugins(logger, plugins, pluginpath):
- import imp
def load_plugin(name):
logger.debug('Loading plugin %s' % name)
spec = importlib.machinery.PathFinder.find_spec(name, path=[pluginpath] )
if spec:
- return spec.loader.load_module()
+ mod = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(mod)
+ return mod
def plugin_name(filename):
return os.path.splitext(os.path.basename(filename))[0]
@@ -69,6 +100,7 @@ def load_plugins(logger, plugins, pluginpath):
plugin.plugin_init(plugins)
plugins.append(plugin)
+
def git_convert_standalone_clone(repodir):
"""If specified directory is a git repository, ensure it's a standalone clone"""
import bb.process
@@ -147,6 +179,7 @@ def fetch_url(tinfoil, srcuri, srcrev, destdir, logger, preserve_tmp=False, mirr
f.write('BB_STRICT_CHECKSUM = "ignore"\n')
f.write('SRC_URI = "%s"\n' % srcuri)
f.write('SRCREV = "%s"\n' % srcrev)
+ f.write('PV = "0.0+${SRCPV}"\n')
f.write('WORKDIR = "%s"\n' % tmpworkdir)
# Set S out of the way so it doesn't get created under the workdir
f.write('S = "%s"\n' % os.path.join(tmpdir, 'emptysrc'))
@@ -186,7 +219,8 @@ def fetch_url(tinfoil, srcuri, srcrev, destdir, logger, preserve_tmp=False, mirr
pathvars = ['T', 'RECIPE_SYSROOT', 'RECIPE_SYSROOT_NATIVE']
for pathvar in pathvars:
path = rd.getVar(pathvar)
- shutil.rmtree(path)
+ if os.path.exists(path):
+ shutil.rmtree(path)
finally:
if fetchrecipe:
try:
@@ -238,3 +272,13 @@ def is_src_url(param):
elif param.startswith('git@') or ('@' in param and param.endswith('.git')):
return True
return False
+
+def filter_src_subdirs(pth):
+ """
+ Filter out subdirectories of initial unpacked source trees that we do not care about.
+ Used by devtool and recipetool.
+ """
+ dirlist = os.listdir(pth)
+ filterout = ['git.indirectionsymlink', 'source-date-epoch']
+ dirlist = [x for x in dirlist if x not in filterout]
+ return dirlist
diff --git a/scripts/lib/wic/__init__.py b/scripts/lib/wic/__init__.py
index 85876b138c..85567934ae 100644
--- a/scripts/lib/wic/__init__.py
+++ b/scripts/lib/wic/__init__.py
@@ -1,20 +1,10 @@
-#!/usr/bin/env python -tt
+#!/usr/bin/env python3
#
# Copyright (c) 2007 Red Hat, Inc.
# Copyright (c) 2011 Intel, Inc.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the Free
-# Software Foundation; version 2 of the License
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc., 59
-# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
class WicError(Exception):
pass
diff --git a/scripts/lib/wic/canned-wks/qemuriscv.wks b/scripts/lib/wic/canned-wks/qemuriscv.wks
new file mode 100644
index 0000000000..12c68b7069
--- /dev/null
+++ b/scripts/lib/wic/canned-wks/qemuriscv.wks
@@ -0,0 +1,3 @@
+# short-description: Create qcow2 image for RISC-V QEMU machines
+
+part / --source rootfs --fstype=ext4 --label root --align 4096 --size 5G
diff --git a/scripts/lib/wic/canned-wks/qemux86-directdisk.wks b/scripts/lib/wic/canned-wks/qemux86-directdisk.wks
index c8d9f121b5..22b45217f1 100644
--- a/scripts/lib/wic/canned-wks/qemux86-directdisk.wks
+++ b/scripts/lib/wic/canned-wks/qemux86-directdisk.wks
@@ -4,5 +4,5 @@
include common.wks.inc
-bootloader --timeout=0 --append="vga=0 rw oprofile.timer=1 rootfstype=ext4 "
+bootloader --timeout=0 --append="rw oprofile.timer=1 rootfstype=ext4 "
diff --git a/scripts/lib/wic/engine.py b/scripts/lib/wic/engine.py
index 4662c665c0..674ccfc244 100644
--- a/scripts/lib/wic/engine.py
+++ b/scripts/lib/wic/engine.py
@@ -1,21 +1,7 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2013, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
@@ -33,9 +19,10 @@ import os
import tempfile
import json
import subprocess
+import shutil
+import re
from collections import namedtuple, OrderedDict
-from distutils.spawn import find_executable
from wic import WicError
from wic.filemap import sparse_copy
@@ -89,7 +76,8 @@ def find_canned_image(scripts_path, wks_file):
for fname in files:
if fname.endswith("~") or fname.endswith("#"):
continue
- if fname.endswith(".wks") and wks_file + ".wks" == fname:
+ if ((fname.endswith(".wks") and wks_file + ".wks" == fname) or \
+ (fname.endswith(".wks.in") and wks_file + ".wks.in" == fname)):
fullpath = os.path.join(canned_wks_dir, fname)
return fullpath
return None
@@ -106,7 +94,7 @@ def list_canned_images(scripts_path):
for fname in files:
if fname.endswith("~") or fname.endswith("#"):
continue
- if fname.endswith(".wks"):
+ if fname.endswith(".wks") or fname.endswith(".wks.in"):
fullpath = os.path.join(canned_wks_dir, fname)
with open(fullpath) as wks:
for line in wks:
@@ -115,7 +103,7 @@ def list_canned_images(scripts_path):
if idx != -1:
desc = line[idx + len("short-description:"):].strip()
break
- basename = os.path.splitext(fname)[0]
+ basename = fname.split('.')[0]
print(" %s\t\t%s" % (basename.ljust(30), desc))
@@ -245,12 +233,19 @@ class Disk:
self._ptable_format = None
# find parted
- self.paths = "/bin:/usr/bin:/usr/sbin:/sbin/"
+ # read paths from $PATH environment variable
+ # if it fails, use hardcoded paths
+ pathlist = "/bin:/usr/bin:/usr/sbin:/sbin/"
+ try:
+ self.paths = os.environ['PATH'] + ":" + pathlist
+ except KeyError:
+ self.paths = pathlist
+
if native_sysroot:
- for path in self.paths.split(':'):
+ for path in pathlist.split(':'):
self.paths = "%s%s:%s" % (native_sysroot, path, self.paths)
- self.parted = find_executable("parted", self.paths)
+ self.parted = shutil.which("parted", path=self.paths)
if not self.parted:
raise WicError("Can't find executable parted")
@@ -285,10 +280,10 @@ class Disk:
def __getattr__(self, name):
"""Get path to the executable in a lazy way."""
if name in ("mdir", "mcopy", "mdel", "mdeltree", "sfdisk", "e2fsck",
- "resize2fs", "mkswap", "mkdosfs", "debugfs"):
+ "resize2fs", "mkswap", "mkdosfs", "debugfs","blkid"):
aname = "_%s" % name
if aname not in self.__dict__:
- setattr(self, aname, find_executable(name, self.paths))
+ setattr(self, aname, shutil.which(name, path=self.paths))
if aname not in self.__dict__ or self.__dict__[aname] is None:
raise WicError("Can't find executable '{}'".format(name))
return self.__dict__[aname]
@@ -296,7 +291,7 @@ class Disk:
def _get_part_image(self, pnum):
if pnum not in self.partitions:
- raise WicError("Partition %s is not in the image")
+ raise WicError("Partition %s is not in the image" % pnum)
part = self.partitions[pnum]
# check if fstype is supported
for fstype in self.fstypes:
@@ -319,6 +314,9 @@ class Disk:
seek=self.partitions[pnum].start)
def dir(self, pnum, path):
+ if pnum not in self.partitions:
+ raise WicError("Partition %s is not in the image" % pnum)
+
if self.partitions[pnum].fstype.startswith('ext'):
return exec_cmd("{} {} -R 'ls -l {}'".format(self.debugfs,
self._get_part_image(pnum),
@@ -328,38 +326,80 @@ class Disk:
self._get_part_image(pnum),
path))
- def copy(self, src, pnum, path):
+ def copy(self, src, dest):
"""Copy partition image into wic image."""
+ pnum = dest.part if isinstance(src, str) else src.part
+
if self.partitions[pnum].fstype.startswith('ext'):
- cmd = "echo -e 'cd {}\nwrite {} {}' | {} -w {}".\
- format(path, src, os.path.basename(src),
+ if isinstance(src, str):
+ cmd = "printf 'cd {}\nwrite {} {}\n' | {} -w {}".\
+ format(os.path.dirname(dest.path), src, os.path.basename(src),
self.debugfs, self._get_part_image(pnum))
+ else: # copy from wic
+ # run both dump and rdump to support both files and directory
+ cmd = "printf 'cd {}\ndump /{} {}\nrdump /{} {}\n' | {} {}".\
+ format(os.path.dirname(src.path), src.path,
+ dest, src.path, dest, self.debugfs,
+ self._get_part_image(pnum))
else: # fat
- cmd = "{} -i {} -snop {} ::{}".format(self.mcopy,
+ if isinstance(src, str):
+ cmd = "{} -i {} -snop {} ::{}".format(self.mcopy,
+ self._get_part_image(pnum),
+ src, dest.path)
+ else:
+ cmd = "{} -i {} -snop ::{} {}".format(self.mcopy,
self._get_part_image(pnum),
- src, path)
+ src.path, dest)
+
exec_cmd(cmd, as_shell=True)
self._put_part_image(pnum)
- def remove(self, pnum, path):
+ def remove_ext(self, pnum, path, recursive):
+ """
+ Remove files/dirs and their contents from the partition.
+ This only applies to ext* partition.
+ """
+ abs_path = re.sub('\/\/+', '/', path)
+ cmd = "{} {} -wR 'rm \"{}\"'".format(self.debugfs,
+ self._get_part_image(pnum),
+ abs_path)
+ out = exec_cmd(cmd , as_shell=True)
+ for line in out.splitlines():
+ if line.startswith("rm:"):
+ if "file is a directory" in line:
+ if recursive:
+ # loop through content and delete them one by one if
+ # flaged with -r
+ subdirs = iter(self.dir(pnum, abs_path).splitlines())
+ next(subdirs)
+ for subdir in subdirs:
+ dir = subdir.split(':')[1].split(" ", 1)[1]
+ if not dir == "." and not dir == "..":
+ self.remove_ext(pnum, "%s/%s" % (abs_path, dir), recursive)
+
+ rmdir_out = exec_cmd("{} {} -wR 'rmdir \"{}\"'".format(self.debugfs,
+ self._get_part_image(pnum),
+ abs_path.rstrip('/'))
+ , as_shell=True)
+
+ for rmdir_line in rmdir_out.splitlines():
+ if "directory not empty" in rmdir_line:
+ raise WicError("Could not complete operation: \n%s \n"
+ "use -r to remove non-empty directory" % rmdir_line)
+ if rmdir_line.startswith("rmdir:"):
+ raise WicError("Could not complete operation: \n%s "
+ "\n%s" % (str(line), rmdir_line))
+
+ else:
+ raise WicError("Could not complete operation: \n%s "
+ "\nUnable to remove %s" % (str(line), abs_path))
+
+ def remove(self, pnum, path, recursive):
"""Remove files/dirs from the partition."""
partimg = self._get_part_image(pnum)
if self.partitions[pnum].fstype.startswith('ext'):
- cmd = "{} {} -wR 'rm {}'".format(self.debugfs,
- self._get_part_image(pnum),
- path)
- out = exec_cmd(cmd , as_shell=True)
- for line in out.splitlines():
- if line.startswith("rm:"):
- if "file is a directory" in line:
- # Try rmdir to see if this is an empty directory. This won't delete
- # any non empty directory so let user know about any error that this might
- # generate.
- print(exec_cmd("{} {} -wR 'rmdir {}'".format(self.debugfs,
- self._get_part_image(pnum),
- path), as_shell=True))
- else:
- raise WicError("Could not complete operation: wic %s" % str(line))
+ self.remove_ext(pnum, path, recursive)
+
else: # fat
cmd = "{} -i {} ::{}".format(self.mdel, partimg, path)
try:
@@ -402,7 +442,7 @@ class Disk:
outf.flush()
def read_ptable(path):
- out = exec_cmd("{} -dJ {}".format(self.sfdisk, path))
+ out = exec_cmd("{} -J {}".format(self.sfdisk, path))
return json.loads(out)
def write_ptable(parts, target):
@@ -503,7 +543,8 @@ class Disk:
logger.info("creating swap partition {}".format(pnum))
label = part.get("name")
label_str = "-L {}".format(label) if label else ''
- uuid = part.get("uuid")
+ out = exec_cmd("{} --probe {}".format(self.blkid, self._get_part_image(pnum)))
+ uuid = out[out.index("UUID=\"")+6:out.index("UUID=\"")+42]
uuid_str = "-U {}".format(uuid) if uuid else ''
with open(partfname, 'w') as sparse:
os.ftruncate(sparse.fileno(), part['size'] * self._lsector_size)
@@ -529,11 +570,15 @@ def wic_ls(args, native_sysroot):
def wic_cp(args, native_sysroot):
"""
- Copy local file or directory to the vfat partition of
+ Copy file or directory to/from the vfat/ext partition of
partitioned image.
"""
- disk = Disk(args.dest.image, native_sysroot)
- disk.copy(args.src, args.dest.part, args.dest.path)
+ if isinstance(args.dest, str):
+ disk = Disk(args.src.image, native_sysroot)
+ else:
+ disk = Disk(args.dest.image, native_sysroot)
+ disk.copy(args.src, args.dest)
+
def wic_rm(args, native_sysroot):
"""
@@ -541,13 +586,13 @@ def wic_rm(args, native_sysroot):
partitioned image.
"""
disk = Disk(args.path.image, native_sysroot)
- disk.remove(args.path.part, args.path.path)
+ disk.remove(args.path.part, args.path.path, args.recursive_delete)
def wic_write(args, native_sysroot):
"""
Write image to a target device.
"""
- disk = Disk(args.image, native_sysroot, ('fat', 'ext', 'swap'))
+ disk = Disk(args.image, native_sysroot, ('fat', 'ext', 'linux-swap'))
disk.write(args.target, args.expand)
def find_canned(scripts_path, file_name):
diff --git a/scripts/lib/wic/filemap.py b/scripts/lib/wic/filemap.py
index abbf958b8c..4d9da28172 100644
--- a/scripts/lib/wic/filemap.py
+++ b/scripts/lib/wic/filemap.py
@@ -1,13 +1,8 @@
+#
# Copyright (c) 2012 Intel, Inc.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License, version 2,
-# as published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# General Public License for more details.
"""
This module implements python implements a way to get file block. Two methods
@@ -37,8 +32,13 @@ def get_block_size(file_obj):
"""
# Get the block size of the host file-system for the image file by calling
# the FIGETBSZ ioctl (number 2).
- binary_data = fcntl.ioctl(file_obj, 2, struct.pack('I', 0))
- bsize = struct.unpack('I', binary_data)[0]
+ try:
+ binary_data = fcntl.ioctl(file_obj, 2, struct.pack('I', 0))
+ bsize = struct.unpack('I', binary_data)[0]
+ except OSError:
+ bsize = None
+
+ # If ioctl causes OSError or give bsize to zero failback to os.fstat
if not bsize:
import os
stat = os.fstat(file_obj.fileno())
@@ -142,15 +142,6 @@ class _FilemapBase(object):
raise Error("the method is not implemented")
- def block_is_unmapped(self, block): # pylint: disable=W0613,R0201
- """
- This method has has to be implemented by child classes. It returns
- 'True' if block number 'block' of the image file is not mapped (hole)
- and 'False' otherwise.
- """
-
- raise Error("the method is not implemented")
-
def get_mapped_ranges(self, start, count): # pylint: disable=W0613,R0201
"""
This method has has to be implemented by child classes. This is a
@@ -164,15 +155,6 @@ class _FilemapBase(object):
raise Error("the method is not implemented")
- def get_unmapped_ranges(self, start, count): # pylint: disable=W0613,R0201
- """
- This method has has to be implemented by child classes. Just like
- 'get_mapped_ranges()', but yields unmapped block ranges instead
- (holes).
- """
-
- raise Error("the method is not implemented")
-
# The 'SEEK_HOLE' and 'SEEK_DATA' options of the file seek system call
_SEEK_DATA = 3
@@ -265,15 +247,10 @@ class FilemapSeek(_FilemapBase):
% (block, result))
return result
- def block_is_unmapped(self, block):
- """Refer the '_FilemapBase' class for the documentation."""
- return not self.block_is_mapped(block)
-
def _get_ranges(self, start, count, whence1, whence2):
"""
- This function implements 'get_mapped_ranges()' and
- 'get_unmapped_ranges()' depending on what is passed in the 'whence1'
- and 'whence2' arguments.
+ This function implements 'get_mapped_ranges()' depending
+ on what is passed in the 'whence1' and 'whence2' arguments.
"""
assert whence1 != whence2
@@ -303,12 +280,6 @@ class FilemapSeek(_FilemapBase):
% (start, count, start + count - 1))
return self._get_ranges(start, count, _SEEK_DATA, _SEEK_HOLE)
- def get_unmapped_ranges(self, start, count):
- """Refer the '_FilemapBase' class for the documentation."""
- self._log.debug("FilemapSeek: get_unmapped_ranges(%d, %d(%d))"
- % (start, count, start + count - 1))
- return self._get_ranges(start, count, _SEEK_HOLE, _SEEK_DATA)
-
# Below goes the FIEMAP ioctl implementation, which is not very readable
# because it deals with the rather complex FIEMAP ioctl. To understand the
@@ -422,10 +393,6 @@ class FilemapFiemap(_FilemapBase):
% (block, result))
return result
- def block_is_unmapped(self, block):
- """Refer the '_FilemapBase' class for the documentation."""
- return not self.block_is_mapped(block)
-
def _unpack_fiemap_extent(self, index):
"""
Unpack a 'struct fiemap_extent' structure object number 'index' from
@@ -502,23 +469,28 @@ class FilemapFiemap(_FilemapBase):
% (first_prev, last_prev))
yield (first_prev, last_prev)
- def get_unmapped_ranges(self, start, count):
+class FilemapNobmap(_FilemapBase):
+ """
+ This class is used when both the 'SEEK_DATA/HOLE' and FIEMAP are not
+ supported by the filesystem or kernel.
+ """
+
+ def __init__(self, image, log=None):
"""Refer the '_FilemapBase' class for the documentation."""
- self._log.debug("FilemapFiemap: get_unmapped_ranges(%d, %d(%d))"
- % (start, count, start + count - 1))
- hole_first = start
- for first, last in self._do_get_mapped_ranges(start, count):
- if first > hole_first:
- self._log.debug("FilemapFiemap: yielding range (%d, %d)"
- % (hole_first, first - 1))
- yield (hole_first, first - 1)
- hole_first = last + 1
+ # Call the base class constructor first
+ _FilemapBase.__init__(self, image, log)
+ self._log.debug("FilemapNobmap: initializing")
- if hole_first < start + count:
- self._log.debug("FilemapFiemap: yielding range (%d, %d)"
- % (hole_first, start + count - 1))
- yield (hole_first, start + count - 1)
+ def block_is_mapped(self, block):
+ """Refer the '_FilemapBase' class for the documentation."""
+ return True
+
+ def get_mapped_ranges(self, start, count):
+ """Refer the '_FilemapBase' class for the documentation."""
+ self._log.debug("FilemapNobmap: get_mapped_ranges(%d, %d(%d))"
+ % (start, count, start + count - 1))
+ yield (start, start + count -1)
def filemap(image, log=None):
"""
@@ -533,7 +505,10 @@ def filemap(image, log=None):
try:
return FilemapFiemap(image, log)
except ErrorNotSupp:
- return FilemapSeek(image, log)
+ try:
+ return FilemapSeek(image, log)
+ except ErrorNotSupp:
+ return FilemapNobmap(image, log)
def sparse_copy(src_fname, dst_fname, skip=0, seek=0,
length=0, api=None):
diff --git a/scripts/lib/wic/help.py b/scripts/lib/wic/help.py
index 64f08052c7..4ff7470a6a 100644
--- a/scripts/lib/wic/help.py
+++ b/scripts/lib/wic/help.py
@@ -1,21 +1,6 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
-#
# Copyright (c) 2013, Intel Corporation.
-# All rights reserved.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This module implements some basic help invocation functions along
@@ -356,12 +341,15 @@ DESCRIPTION
wic_cp_usage = """
- Copy files and directories to the vfat or ext* partition
+ Copy files and directories to/from the vfat or ext* partition
+
+ usage: wic cp <src> <dest> [--native-sysroot <path>]
- usage: wic cp <src> <image>:<partition>[<path>] [--native-sysroot <path>]
+ source/destination image in format <image>:<partition>[<path>]
- This command copies local files or directories to the vfat or ext* partitions
-of partitioned image.
+ This command copies files or directories either
+ - from local to vfat or ext* partitions of partitioned image
+ - from vfat or ext* partitions of partitioned image to local
See 'wic help cp' for more detailed instructions.
@@ -370,16 +358,18 @@ of partitioned image.
wic_cp_help = """
NAME
- wic cp - copy files and directories to the vfat or ext* partitions
+ wic cp - copy files and directories to/from the vfat or ext* partitions
SYNOPSIS
- wic cp <src> <image>:<partition>
- wic cp <src> <image>:<partition><path>
- wic cp <src> <image>:<partition><path> --native-sysroot <path>
+ wic cp <src> <dest>:<partition>
+ wic cp <src>:<partition> <dest>
+ wic cp <src> <dest-image>:<partition><path>
+ wic cp <src> <dest-image>:<partition><path> --native-sysroot <path>
DESCRIPTION
- This command copies files and directories to the vfat or ext* partition of
- the partitioned image.
+ This command copies files or directories either
+ - from local to vfat or ext* partitions of partitioned image
+ - from vfat or ext* partitions of partitioned image to local
The first form of it copies file or directory to the root directory of
the partition:
@@ -412,6 +402,10 @@ DESCRIPTION
4 files 0 bytes
15 675 392 bytes free
+ The third form of the command copies file or directory from the specified directory
+ on the partition to local:
+ $ wic cp tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.wic:1/vmlinuz test
+
The -n option is used to specify the path to the native sysroot
containing the tools(parted and mtools) to use.
"""
@@ -437,6 +431,7 @@ NAME
SYNOPSIS
wic rm <src> <image>:<partition><path>
wic rm <src> <image>:<partition><path> --native-sysroot <path>
+ wic rm -r <image>:<partition><path>
DESCRIPTION
This command removes files or directories from the vfat or ext* partition of the
@@ -471,6 +466,9 @@ DESCRIPTION
The -n option is used to specify the path to the native sysroot
containing the tools(parted and mtools) to use.
+
+ The -r option is used to remove directories and their contents
+ recursively,this only applies to ext* partition.
"""
wic_write_usage = """
@@ -493,7 +491,7 @@ NAME
SYNOPSIS
wic write <image> <target>
wic write <image> <target> --expand auto
- wic write <image> <target> --expand 1:100M-2:300M
+ wic write <image> <target> --expand 1:100M,2:300M
wic write <image> <target> --native-sysroot <path>
DESCRIPTION
@@ -504,7 +502,7 @@ DESCRIPTION
The --expand option is used to resize image partitions.
--expand auto expands partitions to occupy all free space available on the target device.
It's also possible to specify expansion rules in a format
- <partition>:<size>[-<partition>:<size>...] for one or more partitions.
+ <partition>:<size>[,<partition>:<size>...] for one or more partitions.
Specifying size 0 will keep partition unmodified.
Note: Resizing boot partition can result in non-bootable image for non-EFI images. It is
recommended to use size 0 for boot partition to keep image bootable.
@@ -538,7 +536,8 @@ DESCRIPTION
Source plugins can also be implemented and added by external
layers - any plugins found in a scripts/lib/wic/plugins/source/
- directory in an external layer will also be made available.
+ or lib/wic/plugins/source/ directory in an external layer will
+ also be made available.
When the wic implementation needs to invoke a partition-specific
implementation, it looks for the plugin that has the same name as
@@ -638,7 +637,7 @@ DESCRIPTION
oe-core: directdisk.bbclass and mkefidisk.sh. The difference
between wic and those examples is that with wic the functionality
of those scripts is implemented by a general-purpose partitioning
- 'language' based on Redhat kickstart syntax).
+ 'language' based on Red Hat kickstart syntax).
The initial motivation and design considerations that lead to the
current tool are described exhaustively in Yocto Bug #3847
@@ -841,8 +840,8 @@ DESCRIPTION
meanings. The commands are based on the Fedora kickstart
documentation but with modifications to reflect wic capabilities.
- http://fedoraproject.org/wiki/Anaconda/Kickstart#part_or_partition
- http://fedoraproject.org/wiki/Anaconda/Kickstart#bootloader
+ https://pykickstart.readthedocs.io/en/latest/kickstart-docs.html#part-or-partition
+ https://pykickstart.readthedocs.io/en/latest/kickstart-docs.html#bootloader
Commands
@@ -931,6 +930,7 @@ DESCRIPTION
ext4
btrfs
squashfs
+ erofs
swap
--fsoptions: Specifies a free-form string of options to be
@@ -971,6 +971,29 @@ DESCRIPTION
is omitted, not the directory itself. This option only
has an effect with the rootfs source plugin.
+ --include-path: This option is specific to wic. It adds the contents
+ of the given path or a rootfs to the resulting image.
+ The option contains two fields, the origin and the
+ destination. When the origin is a rootfs, it follows
+ the same logic as the rootfs-dir argument and the
+ permissions and owners are kept. When the origin is a
+ path, it is relative to the directory in which wic is
+ running not the rootfs itself so use of an absolute
+ path is recommended, and the owner and group is set to
+ root:root. If no destination is given it is
+ automatically set to the root of the rootfs. This
+ option only has an effect with the rootfs source
+ plugin.
+
+ --change-directory: This option is specific to wic. It changes to the
+ given directory before copying the files. This
+ option is useful when we want to split a rootfs in
+ multiple partitions and we want to keep the right
+ permissions and usernames in all the partitions.
+
+ --no-fstab-update: This option is specific to wic. It does not update the
+ '/etc/fstab' stock file for the given partition.
+
--extra-space: This option is specific to wic. It adds extra
space after the space filled by the content
of the partition. The final size can go
@@ -1061,3 +1084,59 @@ NAME
DESCRIPTION
Specify a help topic to display it. Topics are shown above.
"""
+
+
+wic_help = """
+Creates a customized OpenEmbedded image.
+
+Usage: wic [--version]
+ wic help [COMMAND or TOPIC]
+ wic COMMAND [ARGS]
+
+ usage 1: Returns the current version of Wic
+ usage 2: Returns detailed help for a COMMAND or TOPIC
+ usage 3: Executes COMMAND
+
+
+COMMAND:
+
+ list - List available canned images and source plugins
+ ls - List contents of partitioned image or partition
+ rm - Remove files or directories from the vfat or ext* partitions
+ help - Show help for a wic COMMAND or TOPIC
+ write - Write an image to a device
+ cp - Copy files and directories to the vfat or ext* partitions
+ create - Create a new OpenEmbedded image
+
+
+TOPIC:
+ overview - Presents an overall overview of Wic
+ plugins - Presents an overview and API for Wic plugins
+ kickstart - Presents a Wic kicstart file reference
+
+
+Examples:
+
+ $ wic --version
+
+ Returns the current version of Wic
+
+
+ $ wic help cp
+
+ Returns the SYNOPSIS and DESCRIPTION for the Wic "cp" command.
+
+
+ $ wic list images
+
+ Returns the list of canned images (i.e. *.wks files located in
+ the /scripts/lib/wic/canned-wks directory.
+
+
+ $ wic create mkefidisk -e core-image-minimal
+
+ Creates an EFI disk image from artifacts used in a previous
+ core-image-minimal build in standard BitBake locations
+ (e.g. Cooked Mode).
+
+"""
diff --git a/scripts/lib/wic/ksparser.py b/scripts/lib/wic/ksparser.py
index 08baf76123..0df9eb0d05 100644
--- a/scripts/lib/wic/ksparser.py
+++ b/scripts/lib/wic/ksparser.py
@@ -1,21 +1,8 @@
-#!/usr/bin/env python -tt
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#!/usr/bin/env python3
#
# Copyright (c) 2016 Intel, Inc.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the Free
-# Software Foundation; version 2 of the License
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc., 59
-# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This module provides parser for kickstart format
@@ -64,26 +51,39 @@ class KickStartParser(ArgumentParser):
def error(self, message):
raise ArgumentError(None, message)
-def sizetype(arg):
- """
- Custom type for ArgumentParser
- Converts size string in <num>[K|k|M|G] format into the integer value
- """
- if arg.isdigit():
- return int(arg) * 1024
+def sizetype(default, size_in_bytes=False):
+ def f(arg):
+ """
+ Custom type for ArgumentParser
+ Converts size string in <num>[S|s|K|k|M|G] format into the integer value
+ """
+ try:
+ suffix = default
+ size = int(arg)
+ except ValueError:
+ try:
+ suffix = arg[-1:]
+ size = int(arg[:-1])
+ except ValueError:
+ raise ArgumentTypeError("Invalid size: %r" % arg)
+
+
+ if size_in_bytes:
+ if suffix == 's' or suffix == 'S':
+ return size * 512
+ mult = 1024
+ else:
+ mult = 1
+
+ if suffix == "k" or suffix == "K":
+ return size * mult
+ if suffix == "M":
+ return size * mult * 1024
+ if suffix == "G":
+ return size * mult * 1024 * 1024
- if not arg[:-1].isdigit():
raise ArgumentTypeError("Invalid size: %r" % arg)
-
- size = int(arg[:-1])
- if arg.endswith("k") or arg.endswith("K"):
- return size
- if arg.endswith("M"):
- return size * 1024
- if arg.endswith("G"):
- return size * 1024 * 1024
-
- raise ArgumentTypeError("Invalid size: %r" % arg)
+ return f
def overheadtype(arg):
"""
@@ -149,12 +149,16 @@ class KickStart():
part.add_argument('mountpoint', nargs='?')
part.add_argument('--active', action='store_true')
part.add_argument('--align', type=int)
+ part.add_argument('--offset', type=sizetype("K", True))
part.add_argument('--exclude-path', nargs='+')
- part.add_argument("--extra-space", type=sizetype)
+ part.add_argument('--include-path', nargs='+', action='append')
+ part.add_argument('--change-directory')
+ part.add_argument("--extra-space", type=sizetype("M"))
part.add_argument('--fsoptions', dest='fsopts')
part.add_argument('--fstype', default='vfat',
choices=('ext2', 'ext3', 'ext4', 'btrfs',
- 'squashfs', 'vfat', 'msdos', 'swap'))
+ 'squashfs', 'vfat', 'msdos', 'erofs',
+ 'swap'))
part.add_argument('--mkfs-extraopts', default='')
part.add_argument('--label')
part.add_argument('--use-label', action='store_true')
@@ -164,14 +168,16 @@ class KickStart():
part.add_argument('--part-name')
part.add_argument('--part-type')
part.add_argument('--rootfs-dir')
+ part.add_argument('--type', default='primary',
+ choices = ('primary', 'logical'))
# --size and --fixed-size cannot be specified together; options
# ----extra-space and --overhead-factor should also raise a parser
# --error, but since nesting mutually exclusive groups does not work,
# ----extra-space/--overhead-factor are handled later
sizeexcl = part.add_mutually_exclusive_group()
- sizeexcl.add_argument('--size', type=sizetype, default=0)
- sizeexcl.add_argument('--fixed-size', type=sizetype, default=0)
+ sizeexcl.add_argument('--size', type=sizetype("M"), default=0)
+ sizeexcl.add_argument('--fixed-size', type=sizetype("M"), default=0)
part.add_argument('--source')
part.add_argument('--sourceparams')
@@ -179,6 +185,7 @@ class KickStart():
part.add_argument('--use-uuid', action='store_true')
part.add_argument('--uuid')
part.add_argument('--fsuuid')
+ part.add_argument('--no-fstab-update', action='store_true')
bootloader = subparsers.add_parser('bootloader')
bootloader.add_argument('--append')
@@ -224,6 +231,27 @@ class KickStart():
err = "%s:%d: SquashFS does not support LABEL" \
% (confpath, lineno)
raise KickStartError(err)
+ # erofs does not support filesystem labels
+ if parsed.fstype == 'erofs' and parsed.label:
+ err = "%s:%d: erofs does not support LABEL" % (confpath, lineno)
+ raise KickStartError(err)
+ if parsed.fstype == 'msdos' or parsed.fstype == 'vfat':
+ if parsed.fsuuid:
+ if parsed.fsuuid.upper().startswith('0X'):
+ if len(parsed.fsuuid) > 10:
+ err = "%s:%d: fsuuid %s given in wks kickstart file " \
+ "exceeds the length limit for %s filesystem. " \
+ "It should be in the form of a 32 bit hexadecimal" \
+ "number (for example, 0xABCD1234)." \
+ % (confpath, lineno, parsed.fsuuid, parsed.fstype)
+ raise KickStartError(err)
+ elif len(parsed.fsuuid) > 8:
+ err = "%s:%d: fsuuid %s given in wks kickstart file " \
+ "exceeds the length limit for %s filesystem. " \
+ "It should be in the form of a 32 bit hexadecimal" \
+ "number (for example, 0xABCD1234)." \
+ % (confpath, lineno, parsed.fsuuid, parsed.fstype)
+ raise KickStartError(err)
if parsed.use_label and not parsed.label:
err = "%s:%d: Must set the label with --label" \
% (confpath, lineno)
@@ -256,6 +284,11 @@ class KickStart():
elif line.startswith('bootloader'):
if not self.bootloader:
self.bootloader = parsed
+ # Concatenate the strings set in APPEND
+ append_var = get_bitbake_var("APPEND")
+ if append_var:
+ self.bootloader.append = ' '.join(filter(None, \
+ (self.bootloader.append, append_var)))
else:
err = "%s:%d: more than one bootloader specified" \
% (confpath, lineno)
diff --git a/scripts/lib/wic/misc.py b/scripts/lib/wic/misc.py
index ee888b478c..3e11822996 100644
--- a/scripts/lib/wic/misc.py
+++ b/scripts/lib/wic/misc.py
@@ -1,21 +1,7 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2013, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This module provides a place to collect various wic-related utils
@@ -30,9 +16,9 @@ import logging
import os
import re
import subprocess
+import shutil
from collections import defaultdict
-from distutils import spawn
from wic import WicError
@@ -40,6 +26,7 @@ logger = logging.getLogger('wic')
# executable -> recipe pairs for exec_native_cmd
NATIVE_RECIPES = {"bmaptool": "bmap-tools",
+ "dumpe2fs": "e2fsprogs",
"grub-mkimage": "grub-efi",
"isohybrid": "syslinux",
"mcopy": "mtools",
@@ -59,7 +46,8 @@ NATIVE_RECIPES = {"bmaptool": "bmap-tools",
"parted": "parted",
"sfdisk": "util-linux",
"sgdisk": "gptfdisk",
- "syslinux": "syslinux"
+ "syslinux": "syslinux",
+ "tar": "tar"
}
def runtool(cmdln_or_args):
@@ -126,6 +114,15 @@ def exec_cmd(cmd_and_args, as_shell=False):
"""
return _exec_cmd(cmd_and_args, as_shell)[1]
+def find_executable(cmd, paths):
+ recipe = cmd
+ if recipe in NATIVE_RECIPES:
+ recipe = NATIVE_RECIPES[recipe]
+ provided = get_bitbake_var("ASSUME_PROVIDED")
+ if provided and "%s-native" % recipe in provided:
+ return True
+
+ return shutil.which(cmd, path=paths)
def exec_native_cmd(cmd_and_args, native_sysroot, pseudo=""):
"""
@@ -142,15 +139,19 @@ def exec_native_cmd(cmd_and_args, native_sysroot, pseudo=""):
if pseudo:
cmd_and_args = pseudo + cmd_and_args
- native_paths = "%s/sbin:%s/usr/sbin:%s/usr/bin" % \
- (native_sysroot, native_sysroot, native_sysroot)
+ hosttools_dir = get_bitbake_var("HOSTTOOLS_DIR")
+
+ native_paths = "%s/sbin:%s/usr/sbin:%s/usr/bin:%s/bin:%s" % \
+ (native_sysroot, native_sysroot,
+ native_sysroot, native_sysroot,
+ hosttools_dir)
native_cmd_and_args = "export PATH=%s:$PATH;%s" % \
(native_paths, cmd_and_args)
logger.debug("exec_native_cmd: %s", native_cmd_and_args)
# If the command isn't in the native sysroot say we failed.
- if spawn.find_executable(args[0], native_paths):
+ if find_executable(args[0], native_paths):
ret, out = _exec_cmd(native_cmd_and_args, True)
else:
ret = 127
diff --git a/scripts/lib/wic/partition.py b/scripts/lib/wic/partition.py
index 3da7e23e61..09e491dd49 100644
--- a/scripts/lib/wic/partition.py
+++ b/scripts/lib/wic/partition.py
@@ -1,21 +1,7 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2013-2016 Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This module provides the OpenEmbedded partition object definitions.
@@ -44,6 +30,8 @@ class Partition():
self.device = None
self.extra_space = args.extra_space
self.exclude_path = args.exclude_path
+ self.include_path = args.include_path
+ self.change_directory = args.change_directory
self.fsopts = args.fsopts
self.fstype = args.fstype
self.label = args.label
@@ -52,6 +40,7 @@ class Partition():
self.mountpoint = args.mountpoint
self.no_table = args.no_table
self.num = None
+ self.offset = args.offset
self.overhead_factor = args.overhead_factor
self.part_name = args.part_name
self.part_type = args.part_type
@@ -64,6 +53,11 @@ class Partition():
self.use_uuid = args.use_uuid
self.uuid = args.uuid
self.fsuuid = args.fsuuid
+ self.type = args.type
+ self.no_fstab_update = args.no_fstab_update
+ self.updated_fstab_path = None
+ self.has_fstab = False
+ self.update_fstab_in_rootfs = False
self.lineno = lineno
self.source_file = ""
@@ -111,7 +105,7 @@ class Partition():
extra_blocks = self.extra_space
rootfs_size = actual_rootfs_size + extra_blocks
- rootfs_size *= self.overhead_factor
+ rootfs_size = int(rootfs_size * self.overhead_factor)
logger.debug("Added %d extra blocks to %s to get to %d total blocks",
extra_blocks, self.mountpoint, rootfs_size)
@@ -128,11 +122,15 @@ class Partition():
return self.fixed_size if self.fixed_size else self.size
def prepare(self, creator, cr_workdir, oe_builddir, rootfs_dir,
- bootimg_dir, kernel_dir, native_sysroot):
+ bootimg_dir, kernel_dir, native_sysroot, updated_fstab_path):
"""
Prepare content for individual partitions, depending on
partition command parameters.
"""
+ self.updated_fstab_path = updated_fstab_path
+ if self.updated_fstab_path and not (self.fstype.startswith("ext") or self.fstype == "msdos"):
+ self.update_fstab_in_rootfs = True
+
if not self.source:
if not self.size and not self.fixed_size:
raise WicError("The %s partition has a size of zero. Please "
@@ -144,9 +142,9 @@ class Partition():
native_sysroot)
self.source_file = "%s/fs.%s" % (cr_workdir, self.fstype)
else:
- if self.fstype == 'squashfs':
- raise WicError("It's not possible to create empty squashfs "
- "partition '%s'" % (self.mountpoint))
+ if self.fstype in ('squashfs', 'erofs'):
+ raise WicError("It's not possible to create empty %s "
+ "partition '%s'" % (self.fstype, self.mountpoint))
rootfs = "%s/fs_%s.%s.%s" % (cr_workdir, self.label,
self.lineno, self.fstype)
@@ -173,7 +171,7 @@ class Partition():
# Split sourceparams string of the form key1=val1[,key2=val2,...]
# into a dict. Also accepts valueless keys i.e. without =
splitted = self.sourceparams.split(',')
- srcparams_dict = dict(par.split('=') for par in splitted if par)
+ srcparams_dict = dict((par.split('=', 1) + [None])[:2] for par in splitted if par)
plugin = PluginMgr.get_plugins('source')[self.source]
plugin.do_configure_partition(self, srcparams_dict, creator,
@@ -202,46 +200,62 @@ class Partition():
(self.mountpoint, self.size, self.fixed_size))
def prepare_rootfs(self, cr_workdir, oe_builddir, rootfs_dir,
- native_sysroot, real_rootfs = True):
+ native_sysroot, real_rootfs = True, pseudo_dir = None):
"""
Prepare content for a rootfs partition i.e. create a partition
and fill it from a /rootfs dir.
Currently handles ext2/3/4, btrfs, vfat and squashfs.
"""
- p_prefix = os.environ.get("PSEUDO_PREFIX", "%s/usr" % native_sysroot)
- p_localstatedir = os.environ.get("PSEUDO_LOCALSTATEDIR",
- "%s/../pseudo" % rootfs_dir)
- p_passwd = os.environ.get("PSEUDO_PASSWD", rootfs_dir)
- p_nosymlinkexp = os.environ.get("PSEUDO_NOSYMLINKEXP", "1")
- pseudo = "export PSEUDO_PREFIX=%s;" % p_prefix
- pseudo += "export PSEUDO_LOCALSTATEDIR=%s;" % p_localstatedir
- pseudo += "export PSEUDO_PASSWD=%s;" % p_passwd
- pseudo += "export PSEUDO_NOSYMLINKEXP=%s;" % p_nosymlinkexp
- pseudo += "%s " % get_bitbake_var("FAKEROOTCMD")
rootfs = "%s/rootfs_%s.%s.%s" % (cr_workdir, self.label,
self.lineno, self.fstype)
if os.path.isfile(rootfs):
os.remove(rootfs)
- # Get rootfs size from bitbake variable if it's not set in .ks file
+ p_prefix = os.environ.get("PSEUDO_PREFIX", "%s/usr" % native_sysroot)
+ if (pseudo_dir):
+ # Canonicalize the ignore paths. This corresponds to
+ # calling oe.path.canonicalize(), which is used in bitbake.conf.
+ ignore_paths = [rootfs] + (get_bitbake_var("PSEUDO_IGNORE_PATHS") or "").split(",")
+ canonical_paths = []
+ for path in ignore_paths:
+ if "$" not in path:
+ trailing_slash = path.endswith("/") and "/" or ""
+ canonical_paths.append(os.path.realpath(path) + trailing_slash)
+ ignore_paths = ",".join(canonical_paths)
+
+ pseudo = "export PSEUDO_PREFIX=%s;" % p_prefix
+ pseudo += "export PSEUDO_LOCALSTATEDIR=%s;" % pseudo_dir
+ pseudo += "export PSEUDO_PASSWD=%s;" % rootfs_dir
+ pseudo += "export PSEUDO_NOSYMLINKEXP=1;"
+ pseudo += "export PSEUDO_IGNORE_PATHS=%s;" % ignore_paths
+ pseudo += "%s " % get_bitbake_var("FAKEROOTCMD")
+ else:
+ pseudo = None
+
if not self.size and real_rootfs:
- # Bitbake variable ROOTFS_SIZE is calculated in
- # Image._get_rootfs_size method from meta/lib/oe/image.py
- # using IMAGE_ROOTFS_SIZE, IMAGE_ROOTFS_ALIGNMENT,
- # IMAGE_OVERHEAD_FACTOR and IMAGE_ROOTFS_EXTRA_SPACE
+ # The rootfs size is not set in .ks file so try to get it
+ # from bitbake variable
rsize_bb = get_bitbake_var('ROOTFS_SIZE')
- if rsize_bb:
- logger.warning('overhead-factor was specified, but size was not,'
- ' so bitbake variables will be used for the size.'
- ' In this case both IMAGE_OVERHEAD_FACTOR and '
- '--overhead-factor will be applied')
+ rdir = get_bitbake_var('IMAGE_ROOTFS')
+ if rsize_bb and rdir == rootfs_dir:
+ # Bitbake variable ROOTFS_SIZE is calculated in
+ # Image._get_rootfs_size method from meta/lib/oe/image.py
+ # using IMAGE_ROOTFS_SIZE, IMAGE_ROOTFS_ALIGNMENT,
+ # IMAGE_OVERHEAD_FACTOR and IMAGE_ROOTFS_EXTRA_SPACE
self.size = int(round(float(rsize_bb)))
+ else:
+ # Bitbake variable ROOTFS_SIZE is not defined so compute it
+ # from the rootfs_dir size using the same logic found in
+ # get_rootfs_size() from meta/classes/image.bbclass
+ du_cmd = "du -ks %s" % rootfs_dir
+ out = exec_cmd(du_cmd)
+ self.size = int(out.split()[0])
prefix = "ext" if self.fstype.startswith("ext") else self.fstype
method = getattr(self, "prepare_rootfs_" + prefix)
- method(rootfs, oe_builddir, rootfs_dir, native_sysroot, pseudo)
+ method(rootfs, cr_workdir, oe_builddir, rootfs_dir, native_sysroot, pseudo)
self.source_file = rootfs
# get the rootfs size in the right units for kickstart (kB)
@@ -249,7 +263,7 @@ class Partition():
out = exec_cmd(du_cmd)
self.size = int(out.split()[0])
- def prepare_rootfs_ext(self, rootfs, oe_builddir, rootfs_dir,
+ def prepare_rootfs_ext(self, rootfs, cr_workdir, oe_builddir, rootfs_dir,
native_sysroot, pseudo):
"""
Prepare content for an ext2/3/4 rootfs partition.
@@ -273,10 +287,21 @@ class Partition():
(self.fstype, extraopts, rootfs, label_str, self.fsuuid, rootfs_dir)
exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo)
+ if self.updated_fstab_path and self.has_fstab and not self.no_fstab_update:
+ debugfs_script_path = os.path.join(cr_workdir, "debugfs_script")
+ with open(debugfs_script_path, "w") as f:
+ f.write("cd etc\n")
+ f.write("rm fstab\n")
+ f.write("write %s fstab\n" % (self.updated_fstab_path))
+ debugfs_cmd = "debugfs -w -f %s %s" % (debugfs_script_path, rootfs)
+ exec_native_cmd(debugfs_cmd, native_sysroot)
+
mkfs_cmd = "fsck.%s -pvfD %s" % (self.fstype, rootfs)
exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo)
- def prepare_rootfs_btrfs(self, rootfs, oe_builddir, rootfs_dir,
+ self.check_for_Y2038_problem(rootfs, native_sysroot)
+
+ def prepare_rootfs_btrfs(self, rootfs, cr_workdir, oe_builddir, rootfs_dir,
native_sysroot, pseudo):
"""
Prepare content for a btrfs rootfs partition.
@@ -299,7 +324,7 @@ class Partition():
self.mkfs_extraopts, self.fsuuid, rootfs)
exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo)
- def prepare_rootfs_msdos(self, rootfs, oe_builddir, rootfs_dir,
+ def prepare_rootfs_msdos(self, rootfs, cr_workdir, oe_builddir, rootfs_dir,
native_sysroot, pseudo):
"""
Prepare content for a msdos/vfat rootfs partition.
@@ -315,25 +340,27 @@ class Partition():
label_str = "-n %s" % self.label
size_str = ""
- if self.fstype == 'msdos':
- size_str = "-F 16" # FAT 16
extraopts = self.mkfs_extraopts or '-S 512'
dosfs_cmd = "mkdosfs %s -i %s %s %s -C %s %d" % \
(label_str, self.fsuuid, size_str, extraopts, rootfs,
- max(8250, rootfs_size))
+ rootfs_size)
exec_native_cmd(dosfs_cmd, native_sysroot)
mcopy_cmd = "mcopy -i %s -s %s/* ::/" % (rootfs, rootfs_dir)
exec_native_cmd(mcopy_cmd, native_sysroot)
+ if self.updated_fstab_path and self.has_fstab and not self.no_fstab_update:
+ mcopy_cmd = "mcopy -i %s %s ::/etc/fstab" % (rootfs, self.updated_fstab_path)
+ exec_native_cmd(mcopy_cmd, native_sysroot)
+
chmod_cmd = "chmod 644 %s" % rootfs
exec_cmd(chmod_cmd)
prepare_rootfs_vfat = prepare_rootfs_msdos
- def prepare_rootfs_squashfs(self, rootfs, oe_builddir, rootfs_dir,
+ def prepare_rootfs_squashfs(self, rootfs, cr_workdir, oe_builddir, rootfs_dir,
native_sysroot, pseudo):
"""
Prepare content for a squashfs rootfs partition.
@@ -343,6 +370,16 @@ class Partition():
(rootfs_dir, rootfs, extraopts)
exec_native_cmd(squashfs_cmd, native_sysroot, pseudo=pseudo)
+ def prepare_rootfs_erofs(self, rootfs, cr_workdir, oe_builddir, rootfs_dir,
+ native_sysroot, pseudo):
+ """
+ Prepare content for a erofs rootfs partition.
+ """
+ extraopts = self.mkfs_extraopts or ''
+ erofs_cmd = "mkfs.erofs %s -U %s %s %s" % \
+ (extraopts, self.fsuuid, rootfs, rootfs_dir)
+ exec_native_cmd(erofs_cmd, native_sysroot, pseudo=pseudo)
+
def prepare_empty_partition_ext(self, rootfs, oe_builddir,
native_sysroot):
"""
@@ -362,6 +399,8 @@ class Partition():
(self.fstype, extraopts, label_str, self.fsuuid, rootfs)
exec_native_cmd(mkfs_cmd, native_sysroot)
+ self.check_for_Y2038_problem(rootfs, native_sysroot)
+
def prepare_empty_partition_btrfs(self, rootfs, oe_builddir,
native_sysroot):
"""
@@ -392,8 +431,6 @@ class Partition():
label_str = "-n %s" % self.label
size_str = ""
- if self.fstype == 'msdos':
- size_str = "-F 16" # FAT 16
extraopts = self.mkfs_extraopts or '-S 512'
@@ -423,3 +460,37 @@ class Partition():
mkswap_cmd = "mkswap %s -U %s %s" % (label_str, self.fsuuid, path)
exec_native_cmd(mkswap_cmd, native_sysroot)
+
+ def check_for_Y2038_problem(self, rootfs, native_sysroot):
+ """
+ Check if the filesystem is affected by the Y2038 problem
+ (Y2038 problem = 32 bit time_t overflow in January 2038)
+ """
+ def get_err_str(part):
+ err = "The {} filesystem {} has no Y2038 support."
+ if part.mountpoint:
+ args = [part.fstype, "mounted at %s" % part.mountpoint]
+ elif part.label:
+ args = [part.fstype, "labeled '%s'" % part.label]
+ elif part.part_name:
+ args = [part.fstype, "in partition '%s'" % part.part_name]
+ else:
+ args = [part.fstype, "in partition %s" % part.num]
+ return err.format(*args)
+
+ # ext2 and ext3 are always affected by the Y2038 problem
+ if self.fstype in ["ext2", "ext3"]:
+ logger.warn(get_err_str(self))
+ return
+
+ ret, out = exec_native_cmd("dumpe2fs %s" % rootfs, native_sysroot)
+
+ # if ext4 is affected by the Y2038 problem depends on the inode size
+ for line in out.splitlines():
+ if line.startswith("Inode size:"):
+ size = int(line.split(":")[1].strip())
+ if size < 256:
+ logger.warn("%s Inodes (of size %d) are too small." %
+ (get_err_str(self), size))
+ break
+
diff --git a/scripts/lib/wic/pluginbase.py b/scripts/lib/wic/pluginbase.py
index 686d2fee3b..b64568339b 100644
--- a/scripts/lib/wic/pluginbase.py
+++ b/scripts/lib/wic/pluginbase.py
@@ -1,34 +1,26 @@
-#!/usr/bin/env python -tt
+#!/usr/bin/env python3
#
# Copyright (c) 2011 Intel, Inc.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the Free
-# Software Foundation; version 2 of the License
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc., 59
-# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
__all__ = ['ImagerPlugin', 'SourcePlugin']
import os
import logging
+import types
from collections import defaultdict
-from importlib.machinery import SourceFileLoader
+import importlib
+import importlib.util
from wic import WicError
from wic.misc import get_bitbake_var
PLUGIN_TYPES = ["imager", "source"]
-SCRIPTS_PLUGIN_DIR = "scripts/lib/wic/plugins"
+SCRIPTS_PLUGIN_DIR = ["scripts/lib/wic/plugins", "lib/wic/plugins"]
logger = logging.getLogger('wic')
@@ -48,10 +40,11 @@ class PluginMgr:
cls._plugin_dirs = [os.path.join(os.path.dirname(__file__), 'plugins')]
layers = get_bitbake_var("BBLAYERS") or ''
for layer_path in layers.split():
- path = os.path.join(layer_path, SCRIPTS_PLUGIN_DIR)
- path = os.path.abspath(os.path.expanduser(path))
- if path not in cls._plugin_dirs and os.path.isdir(path):
- cls._plugin_dirs.insert(0, path)
+ for script_plugin_dir in SCRIPTS_PLUGIN_DIR:
+ path = os.path.join(layer_path, script_plugin_dir)
+ path = os.path.abspath(os.path.expanduser(path))
+ if path not in cls._plugin_dirs and os.path.isdir(path):
+ cls._plugin_dirs.insert(0, path)
if ptype not in PLUGINS:
# load all ptype plugins
@@ -63,7 +56,9 @@ class PluginMgr:
mname = fname[:-3]
mpath = os.path.join(ppath, fname)
logger.debug("loading plugin module %s", mpath)
- SourceFileLoader(mname, mpath).load_module()
+ spec = importlib.util.spec_from_file_location(mname, mpath)
+ module = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(module)
return PLUGINS.get(ptype)
diff --git a/scripts/lib/wic/plugins/imager/direct.py b/scripts/lib/wic/plugins/imager/direct.py
index bb14a334b2..35fff7c102 100644
--- a/scripts/lib/wic/plugins/imager/direct.py
+++ b/scripts/lib/wic/plugins/imager/direct.py
@@ -1,21 +1,7 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2013, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This implements the 'direct' imager plugin class for 'wic'
@@ -63,20 +49,21 @@ class DirectPlugin(ImagerPlugin):
# parse possible 'rootfs=name' items
self.rootfs_dir = dict(rdir.split('=') for rdir in rootfs_dir.split(' '))
- self.replaced_rootfs_paths = {}
self.bootimg_dir = bootimg_dir
self.kernel_dir = kernel_dir
self.native_sysroot = native_sysroot
self.oe_builddir = oe_builddir
+ self.debug = options.debug
self.outdir = options.outdir
self.compressor = options.compressor
self.bmap = options.bmap
self.no_fstab_update = options.no_fstab_update
+ self.updated_fstab_path = None
self.name = "%s-%s" % (os.path.splitext(os.path.basename(wks_file))[0],
strftime("%Y%m%d%H%M"))
- self.workdir = tempfile.mkdtemp(dir=self.outdir, prefix='tmp.wic.')
+ self.workdir = self.setup_workdir(options.workdir)
self._image = None
self.ptable_format = self.ks.bootloader.ptable
self.parts = self.ks.partitions
@@ -90,7 +77,18 @@ class DirectPlugin(ImagerPlugin):
image_path = self._full_path(self.workdir, self.parts[0].disk, "direct")
self._image = PartitionedImage(image_path, self.ptable_format,
- self.parts, self.native_sysroot)
+ self.parts, self.native_sysroot,
+ options.extra_space)
+
+ def setup_workdir(self, workdir):
+ if workdir:
+ if os.path.exists(workdir):
+ raise WicError("Internal workdir '%s' specified in wic arguments already exists!" % (workdir))
+
+ os.makedirs(workdir)
+ return workdir
+ else:
+ return tempfile.mkdtemp(dir=self.outdir, prefix='tmp.wic.')
def do_create(self):
"""
@@ -104,11 +102,8 @@ class DirectPlugin(ImagerPlugin):
finally:
self.cleanup()
- def _write_fstab(self, image_rootfs):
- """overriden to generate fstab (temporarily) in rootfs. This is called
- from _create, make sure it doesn't get called from
- BaseImage.create()
- """
+ def update_fstab(self, image_rootfs):
+ """Assume partition order same as in wks"""
if not image_rootfs:
return
@@ -119,30 +114,10 @@ class DirectPlugin(ImagerPlugin):
with open(fstab_path) as fstab:
fstab_lines = fstab.readlines()
- if self._update_fstab(fstab_lines, self.parts):
- # copy rootfs dir to workdir to update fstab
- # as rootfs can be used by other tasks and can't be modified
- new_pseudo = os.path.realpath(os.path.join(self.workdir, "pseudo"))
- from_dir = os.path.join(os.path.join(image_rootfs, ".."), "pseudo")
- from_dir = os.path.realpath(from_dir)
- copyhardlinktree(from_dir, new_pseudo)
- new_rootfs = os.path.realpath(os.path.join(self.workdir, "rootfs_copy"))
- copyhardlinktree(image_rootfs, new_rootfs)
- fstab_path = os.path.join(new_rootfs, 'etc/fstab')
-
- os.unlink(fstab_path)
-
- with open(fstab_path, "w") as fstab:
- fstab.writelines(fstab_lines)
-
- return new_rootfs
-
- def _update_fstab(self, fstab_lines, parts):
- """Assume partition order same as in wks"""
updated = False
- for part in parts:
+ for part in self.parts:
if not part.realnum or not part.mountpoint \
- or part.mountpoint == "/":
+ or part.mountpoint == "/" or not part.mountpoint.startswith('/'):
continue
if part.use_uuid:
@@ -169,7 +144,10 @@ class DirectPlugin(ImagerPlugin):
fstab_lines.append(line)
updated = True
- return updated
+ if updated:
+ self.updated_fstab_path = os.path.join(self.workdir, "fstab")
+ with open(self.updated_fstab_path, "w") as f:
+ f.writelines(fstab_lines)
def _full_path(self, path, name, extention):
""" Construct full file path to a file we generate. """
@@ -184,14 +162,8 @@ class DirectPlugin(ImagerPlugin):
filesystems from the artifacts directly and combine them into
a partitioned image.
"""
- if self.no_fstab_update:
- new_rootfs = None
- else:
- new_rootfs = self._write_fstab(self.rootfs_dir.get("ROOTFS_DIR"))
- if new_rootfs:
- # rootfs was copied to update fstab
- self.replaced_rootfs_paths[new_rootfs] = self.rootfs_dir['ROOTFS_DIR']
- self.rootfs_dir['ROOTFS_DIR'] = new_rootfs
+ if not self.no_fstab_update:
+ self.update_fstab(self.rootfs_dir.get("ROOTFS_DIR"))
for part in self.parts:
# get rootfs size from bitbake variable if it's not set in .ks file
@@ -267,8 +239,6 @@ class DirectPlugin(ImagerPlugin):
else:
suffix = '["%s"]:' % (part.mountpoint or part.label)
rootdir = part.rootfs_dir
- if rootdir in self.replaced_rootfs_paths:
- rootdir = self.replaced_rootfs_paths[rootdir]
msg += ' ROOTFS_DIR%s%s\n' % (suffix.ljust(20), rootdir)
msg += ' BOOTIMG_DIR: %s\n' % self.bootimg_dir
@@ -289,6 +259,8 @@ class DirectPlugin(ImagerPlugin):
if part.mountpoint == "/":
if part.uuid:
return "PARTUUID=%s" % part.uuid
+ elif part.label:
+ return "PARTLABEL=%s" % part.label
else:
suffix = 'p' if part.disk.startswith('mmcblk') else ''
return "/dev/%s%s%-d" % (part.disk, suffix, part.realnum)
@@ -306,8 +278,9 @@ class DirectPlugin(ImagerPlugin):
if os.path.isfile(path):
shutil.move(path, os.path.join(self.outdir, fname))
- # remove work directory
- shutil.rmtree(self.workdir, ignore_errors=True)
+ # remove work directory when it is not in debugging mode
+ if not self.debug:
+ shutil.rmtree(self.workdir, ignore_errors=True)
# Overhead of the MBR partitioning scheme (just one sector)
MBR_OVERHEAD = 1
@@ -323,10 +296,14 @@ class PartitionedImage():
Partitioned image in a file.
"""
- def __init__(self, path, ptable_format, partitions, native_sysroot=None):
+ def __init__(self, path, ptable_format, partitions, native_sysroot=None, extra_space=0):
self.path = path # Path to the image file
self.numpart = 0 # Number of allocated partitions
self.realpart = 0 # Number of partitions in the partition table
+ self.primary_part_num = 0 # Number of primary partitions (msdos)
+ self.extendedpart = 0 # Create extended partition before this logical partition (msdos)
+ self.extended_size_sec = 0 # Size of exteded partition (msdos)
+ self.logical_part_cnt = 0 # Number of total logical paritions (msdos)
self.offset = 0 # Offset of next partition (in sectors)
self.min_size = 0 # Minimum required disk size to fit
# all partitions (in bytes)
@@ -339,6 +316,8 @@ class PartitionedImage():
# Size of a sector used in calculations
self.sector_size = SECTOR_SIZE
self.native_sysroot = native_sysroot
+ num_real_partitions = len([p for p in self.partitions if not p.no_table])
+ self.extra_space = extra_space
# calculate the real partition number, accounting for partitions not
# in the partition table and logical partitions
@@ -348,7 +327,7 @@ class PartitionedImage():
part.realnum = 0
else:
realnum += 1
- if self.ptable_format == 'msdos' and realnum > 3 and len(partitions) > 4:
+ if self.ptable_format == 'msdos' and realnum > 3 and num_real_partitions > 4:
part.realnum = realnum + 1
continue
part.realnum = realnum
@@ -365,6 +344,13 @@ class PartitionedImage():
part.fsuuid = '0x' + str(uuid.uuid4())[:8].upper()
else:
part.fsuuid = str(uuid.uuid4())
+ else:
+ #make sure the fsuuid for vfat/msdos align with format 0xYYYYYYYY
+ if part.fstype == 'vfat' or part.fstype == 'msdos':
+ if part.fsuuid.upper().startswith("0X"):
+ part.fsuuid = '0x' + part.fsuuid.upper()[2:].rjust(8,"0")
+ else:
+ part.fsuuid = '0x' + part.fsuuid.upper().rjust(8,"0")
def prepare(self, imager):
"""Prepare an image. Call prepare method of all image partitions."""
@@ -373,7 +359,8 @@ class PartitionedImage():
# sizes before we can add them and do the layout.
part.prepare(imager, imager.workdir, imager.oe_builddir,
imager.rootfs_dir, imager.bootimg_dir,
- imager.kernel_dir, imager.native_sysroot)
+ imager.kernel_dir, imager.native_sysroot,
+ imager.updated_fstab_path)
# Converting kB to sectors for parted
part.size_sec = part.disk_size * 1024 // self.sector_size
@@ -418,12 +405,16 @@ class PartitionedImage():
# Skip one sector required for the partitioning scheme overhead
self.offset += overhead
- if self.realpart > 3 and num_real_partitions > 4:
+ if self.ptable_format == "msdos":
+ if self.primary_part_num > 3 or \
+ (self.extendedpart == 0 and self.primary_part_num >= 3 and num_real_partitions > 4):
+ part.type = 'logical'
# Reserve a sector for EBR for every logical partition
# before alignment is performed.
- if self.ptable_format == "msdos":
- self.offset += 1
+ if part.type == 'logical':
+ self.offset += 2
+ align_sectors = 0
if part.align:
# If not first partition and we do have alignment set we need
# to align the partition.
@@ -446,21 +437,43 @@ class PartitionedImage():
# increase the offset so we actually start the partition on right alignment
self.offset += align_sectors
+ if part.offset is not None:
+ offset = part.offset // self.sector_size
+
+ if offset * self.sector_size != part.offset:
+ raise WicError("Could not place %s%s at offset %d with sector size %d" % (part.disk, self.numpart, part.offset, self.sector_size))
+
+ delta = offset - self.offset
+ if delta < 0:
+ raise WicError("Could not place %s%s at offset %d: next free sector is %d (delta: %d)" % (part.disk, self.numpart, part.offset, self.offset, delta))
+
+ logger.debug("Skipping %d sectors to place %s%s at offset %dK",
+ delta, part.disk, self.numpart, part.offset)
+
+ self.offset = offset
+
part.start = self.offset
self.offset += part.size_sec
- part.type = 'primary'
if not part.no_table:
part.num = self.realpart
else:
part.num = 0
- if self.ptable_format == "msdos":
- # only count the partitions that are in partition table
- if num_real_partitions > 4:
- if self.realpart > 3:
- part.type = 'logical'
- part.num = self.realpart + 1
+ if self.ptable_format == "msdos" and not part.no_table:
+ if part.type == 'logical':
+ self.logical_part_cnt += 1
+ part.num = self.logical_part_cnt + 4
+ if self.extendedpart == 0:
+ # Create extended partition as a primary partition
+ self.primary_part_num += 1
+ self.extendedpart = part.num
+ else:
+ self.extended_size_sec += align_sectors
+ self.extended_size_sec += part.size_sec + 2
+ else:
+ self.primary_part_num += 1
+ part.num = self.primary_part_num
logger.debug("Assigned %s to %s%d, sectors range %d-%d size %d "
"sectors (%d bytes).", part.mountpoint, part.disk,
@@ -474,6 +487,7 @@ class PartitionedImage():
self.min_size += GPT_OVERHEAD
self.min_size *= self.sector_size
+ self.min_size += self.extra_space
def _create_partition(self, device, parttype, fstype, start, size):
""" Create a partition on an image described by the 'device' object. """
@@ -510,7 +524,7 @@ class PartitionedImage():
if part.num == 0:
continue
- if self.ptable_format == "msdos" and part.num == 5:
+ if self.ptable_format == "msdos" and part.num == self.extendedpart:
# Create an extended partition (note: extended
# partition is described in MBR and contains all
# logical partitions). The logical partitions save a
@@ -523,8 +537,8 @@ class PartitionedImage():
# add a sector at the back, so that there is enough
# room for all logical partitions.
self._create_partition(self.path, "extended",
- None, part.start - 1,
- self.offset - part.start + 1)
+ None, part.start - 2,
+ self.extended_size_sec)
if part.fstype == "swap":
parted_fs_type = "linux-swap"
@@ -591,9 +605,7 @@ class PartitionedImage():
self.native_sysroot)
def cleanup(self):
- # remove partition images
- for image in set(self.partimages):
- os.remove(image)
+ pass
def assemble(self):
logger.debug("Installing partitions")
diff --git a/scripts/lib/wic/plugins/source/bootimg-biosplusefi.py b/scripts/lib/wic/plugins/source/bootimg-biosplusefi.py
new file mode 100644
index 0000000000..5bd7390680
--- /dev/null
+++ b/scripts/lib/wic/plugins/source/bootimg-biosplusefi.py
@@ -0,0 +1,213 @@
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# DESCRIPTION
+# This implements the 'bootimg-biosplusefi' source plugin class for 'wic'
+#
+# AUTHORS
+# William Bourque <wbourque [at) gmail.com>
+
+import types
+
+from wic.pluginbase import SourcePlugin
+from importlib.machinery import SourceFileLoader
+
+class BootimgBiosPlusEFIPlugin(SourcePlugin):
+ """
+ Create MBR + EFI boot partition
+
+ This plugin creates a boot partition that contains both
+ legacy BIOS and EFI content. It will be able to boot from both.
+ This is useful when managing PC fleet with some older machines
+ without EFI support.
+
+ Note it is possible to create an image that can boot from both
+ legacy BIOS and EFI by defining two partitions : one with arg
+ --source bootimg-efi and another one with --source bootimg-pcbios.
+ However, this method has the obvious downside that it requires TWO
+ partitions to be created on the storage device.
+ Both partitions will also be marked as "bootable" which does not work on
+ most BIOS, has BIOS often uses the "bootable" flag to determine
+ what to boot. If you have such a BIOS, you need to manually remove the
+ "bootable" flag from the EFI partition for the drive to be bootable.
+ Having two partitions also seems to confuse wic : the content of
+ the first partition will be duplicated into the second, even though it
+ will not be used at all.
+
+ Also, unlike "isoimage-isohybrid" that also does BIOS and EFI, this plugin
+ allows you to have more than only a single rootfs partitions and does
+ not turn the rootfs into an initramfs RAM image.
+
+ This plugin is made to put everything into a single /boot partition so it
+ does not have the limitations listed above.
+
+ The plugin is made so it does tries not to reimplement what's already
+ been done in other plugins; as such it imports "bootimg-pcbios"
+ and "bootimg-efi".
+ Plugin "bootimg-pcbios" is used to generate legacy BIOS boot.
+ Plugin "bootimg-efi" is used to generate the UEFI boot. Note that it
+ requires a --sourceparams argument to know which loader to use; refer
+ to "bootimg-efi" code/documentation for the list of loader.
+
+ Imports are handled with "SourceFileLoader" from importlib as it is
+ otherwise very difficult to import module that has hyphen "-" in their
+ filename.
+ The SourcePlugin() methods used in the plugins (do_install_disk,
+ do_configure_partition, do_prepare_partition) are then called on both,
+ beginning by "bootimg-efi".
+
+ Plugin options, such as "--sourceparams" can still be passed to a
+ plugin, as long they does not cause issue in the other plugin.
+
+ Example wic configuration:
+ part /boot --source bootimg-biosplusefi --sourceparams="loader=grub-efi"\\
+ --ondisk sda --label os_boot --active --align 1024 --use-uuid
+ """
+
+ name = 'bootimg-biosplusefi'
+
+ __PCBIOS_MODULE_NAME = "bootimg-pcbios"
+ __EFI_MODULE_NAME = "bootimg-efi"
+
+ __imgEFIObj = None
+ __imgBiosObj = None
+
+ @classmethod
+ def __init__(cls):
+ """
+ Constructor (init)
+ """
+
+ # XXX
+ # For some reasons, __init__ constructor is never called.
+ # Something to do with how pluginbase works?
+ cls.__instanciateSubClasses()
+
+ @classmethod
+ def __instanciateSubClasses(cls):
+ """
+
+ """
+
+ # Import bootimg-pcbios (class name "BootimgPcbiosPlugin")
+ modulePath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
+ cls.__PCBIOS_MODULE_NAME + ".py")
+ loader = SourceFileLoader(cls.__PCBIOS_MODULE_NAME, modulePath)
+ mod = types.ModuleType(loader.name)
+ loader.exec_module(mod)
+ cls.__imgBiosObj = mod.BootimgPcbiosPlugin()
+
+ # Import bootimg-efi (class name "BootimgEFIPlugin")
+ modulePath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
+ cls.__EFI_MODULE_NAME + ".py")
+ loader = SourceFileLoader(cls.__EFI_MODULE_NAME, modulePath)
+ mod = types.ModuleType(loader.name)
+ loader.exec_module(mod)
+ cls.__imgEFIObj = mod.BootimgEFIPlugin()
+
+ @classmethod
+ def do_install_disk(cls, disk, disk_name, creator, workdir, oe_builddir,
+ bootimg_dir, kernel_dir, native_sysroot):
+ """
+ Called after all partitions have been prepared and assembled into a
+ disk image.
+ """
+
+ if ( (not cls.__imgEFIObj) or (not cls.__imgBiosObj) ):
+ cls.__instanciateSubClasses()
+
+ cls.__imgEFIObj.do_install_disk(
+ disk,
+ disk_name,
+ creator,
+ workdir,
+ oe_builddir,
+ bootimg_dir,
+ kernel_dir,
+ native_sysroot)
+
+ cls.__imgBiosObj.do_install_disk(
+ disk,
+ disk_name,
+ creator,
+ workdir,
+ oe_builddir,
+ bootimg_dir,
+ kernel_dir,
+ native_sysroot)
+
+ @classmethod
+ def do_configure_partition(cls, part, source_params, creator, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ native_sysroot):
+ """
+ Called before do_prepare_partition()
+ """
+
+ if ( (not cls.__imgEFIObj) or (not cls.__imgBiosObj) ):
+ cls.__instanciateSubClasses()
+
+ cls.__imgEFIObj.do_configure_partition(
+ part,
+ source_params,
+ creator,
+ cr_workdir,
+ oe_builddir,
+ bootimg_dir,
+ kernel_dir,
+ native_sysroot)
+
+ cls.__imgBiosObj.do_configure_partition(
+ part,
+ source_params,
+ creator,
+ cr_workdir,
+ oe_builddir,
+ bootimg_dir,
+ kernel_dir,
+ native_sysroot)
+
+ @classmethod
+ def do_prepare_partition(cls, part, source_params, creator, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ rootfs_dir, native_sysroot):
+ """
+ Called to do the actual content population for a partition i.e. it
+ 'prepares' the partition to be incorporated into the image.
+ """
+
+ if ( (not cls.__imgEFIObj) or (not cls.__imgBiosObj) ):
+ cls.__instanciateSubClasses()
+
+ cls.__imgEFIObj.do_prepare_partition(
+ part,
+ source_params,
+ creator,
+ cr_workdir,
+ oe_builddir,
+ bootimg_dir,
+ kernel_dir,
+ rootfs_dir,
+ native_sysroot)
+
+ cls.__imgBiosObj.do_prepare_partition(
+ part,
+ source_params,
+ creator,
+ cr_workdir,
+ oe_builddir,
+ bootimg_dir,
+ kernel_dir,
+ rootfs_dir,
+ native_sysroot)
diff --git a/scripts/lib/wic/plugins/source/bootimg-efi.py b/scripts/lib/wic/plugins/source/bootimg-efi.py
index 83a7e189ed..0391aebdc8 100644
--- a/scripts/lib/wic/plugins/source/bootimg-efi.py
+++ b/scripts/lib/wic/plugins/source/bootimg-efi.py
@@ -1,21 +1,7 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2014, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This implements the 'bootimg-efi' source plugin class for 'wic'
@@ -26,7 +12,11 @@
import logging
import os
+import tempfile
import shutil
+import re
+
+from glob import glob
from wic import WicError
from wic.engine import get_custom_config
@@ -69,8 +59,10 @@ class BootimgEFIPlugin(SourcePlugin):
if not bootimg_dir:
raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting")
- cp_cmd = "cp %s/%s %s" % (bootimg_dir, initrd, hdddir)
- exec_cmd(cp_cmd, True)
+ initrds = initrd.split(';')
+ for rd in initrds:
+ cp_cmd = "cp %s/%s %s" % (bootimg_dir, rd, hdddir)
+ exec_cmd(cp_cmd, True)
else:
logger.debug("Ignoring missing initrd")
@@ -85,13 +77,26 @@ class BootimgEFIPlugin(SourcePlugin):
grubefi_conf += "timeout=%s\n" % bootloader.timeout
grubefi_conf += "menuentry '%s'{\n" % (title if title else "boot")
- kernel = "/bzImage"
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if get_bitbake_var("INITRAMFS_IMAGE_BUNDLE") == "1":
+ if get_bitbake_var("INITRAMFS_IMAGE"):
+ kernel = "%s-%s.bin" % \
+ (get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
- grubefi_conf += "linux %s root=%s rootwait %s\n" \
- % (kernel, creator.rootdev, bootloader.append)
+ label = source_params.get('label')
+ label_conf = "root=%s" % creator.rootdev
+ if label:
+ label_conf = "LABEL=%s" % label
+
+ grubefi_conf += "linux /%s %s rootwait %s\n" \
+ % (kernel, label_conf, bootloader.append)
if initrd:
- grubefi_conf += "initrd /%s\n" % initrd
+ initrds = initrd.split(';')
+ grubefi_conf += "initrd"
+ for rd in initrds:
+ grubefi_conf += " /%s" % rd
+ grubefi_conf += "\n"
grubefi_conf += "}\n"
@@ -115,19 +120,22 @@ class BootimgEFIPlugin(SourcePlugin):
bootloader = creator.ks.bootloader
loader_conf = ""
- loader_conf += "default boot\n"
+ if source_params.get('create-unified-kernel-image') != "true":
+ loader_conf += "default boot\n"
loader_conf += "timeout %d\n" % bootloader.timeout
initrd = source_params.get('initrd')
- if initrd:
+ if initrd and source_params.get('create-unified-kernel-image') != "true":
# obviously we need to have a common common deploy var
bootimg_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
if not bootimg_dir:
raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting")
- cp_cmd = "cp %s/%s %s" % (bootimg_dir, initrd, hdddir)
- exec_cmd(cp_cmd, True)
+ initrds = initrd.split(';')
+ for rd in initrds:
+ cp_cmd = "cp %s/%s %s" % (bootimg_dir, rd, hdddir)
+ exec_cmd(cp_cmd, True)
else:
logger.debug("Ignoring missing initrd")
@@ -152,23 +160,37 @@ class BootimgEFIPlugin(SourcePlugin):
if not custom_cfg:
# Create systemd-boot configuration using parameters from wks file
- kernel = "/bzImage"
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if get_bitbake_var("INITRAMFS_IMAGE_BUNDLE") == "1":
+ if get_bitbake_var("INITRAMFS_IMAGE"):
+ kernel = "%s-%s.bin" % \
+ (get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
+
title = source_params.get('title')
boot_conf = ""
boot_conf += "title %s\n" % (title if title else "boot")
- boot_conf += "linux %s\n" % kernel
- boot_conf += "options LABEL=Boot root=%s %s\n" % \
- (creator.rootdev, bootloader.append)
+ boot_conf += "linux /%s\n" % kernel
+
+ label = source_params.get('label')
+ label_conf = "LABEL=Boot root=%s" % creator.rootdev
+ if label:
+ label_conf = "LABEL=%s" % label
+
+ boot_conf += "options %s %s\n" % \
+ (label_conf, bootloader.append)
if initrd:
- boot_conf += "initrd /%s\n" % initrd
+ initrds = initrd.split(';')
+ for rd in initrds:
+ boot_conf += "initrd /%s\n" % rd
- logger.debug("Writing systemd-boot config "
- "%s/hdd/boot/loader/entries/boot.conf", cr_workdir)
- cfg = open("%s/hdd/boot/loader/entries/boot.conf" % cr_workdir, "w")
- cfg.write(boot_conf)
- cfg.close()
+ if source_params.get('create-unified-kernel-image') != "true":
+ logger.debug("Writing systemd-boot config "
+ "%s/hdd/boot/loader/entries/boot.conf", cr_workdir)
+ cfg = open("%s/hdd/boot/loader/entries/boot.conf" % cr_workdir, "w")
+ cfg.write(boot_conf)
+ cfg.close()
@classmethod
@@ -193,6 +215,57 @@ class BootimgEFIPlugin(SourcePlugin):
except KeyError:
raise WicError("bootimg-efi requires a loader, none specified")
+ if get_bitbake_var("IMAGE_EFI_BOOT_FILES") is None:
+ logger.debug('No boot files defined in IMAGE_EFI_BOOT_FILES')
+ else:
+ boot_files = None
+ for (fmt, id) in (("_uuid-%s", part.uuid), ("_label-%s", part.label), (None, None)):
+ if fmt:
+ var = fmt % id
+ else:
+ var = ""
+
+ boot_files = get_bitbake_var("IMAGE_EFI_BOOT_FILES" + var)
+ if boot_files:
+ break
+
+ logger.debug('Boot files: %s', boot_files)
+
+ # list of tuples (src_name, dst_name)
+ deploy_files = []
+ for src_entry in re.findall(r'[\w;\-\./\*]+', boot_files):
+ if ';' in src_entry:
+ dst_entry = tuple(src_entry.split(';'))
+ if not dst_entry[0] or not dst_entry[1]:
+ raise WicError('Malformed boot file entry: %s' % src_entry)
+ else:
+ dst_entry = (src_entry, src_entry)
+
+ logger.debug('Destination entry: %r', dst_entry)
+ deploy_files.append(dst_entry)
+
+ cls.install_task = [];
+ for deploy_entry in deploy_files:
+ src, dst = deploy_entry
+ if '*' in src:
+ # by default install files under their basename
+ entry_name_fn = os.path.basename
+ if dst != src:
+ # unless a target name was given, then treat name
+ # as a directory and append a basename
+ entry_name_fn = lambda name: \
+ os.path.join(dst,
+ os.path.basename(name))
+
+ srcs = glob(os.path.join(kernel_dir, src))
+
+ logger.debug('Globbed sources: %s', ', '.join(srcs))
+ for entry in srcs:
+ src = os.path.relpath(entry, kernel_dir)
+ entry_dst_name = entry_name_fn(entry)
+ cls.install_task.append((src, entry_dst_name))
+ else:
+ cls.install_task.append((src, dst))
@classmethod
def do_prepare_partition(cls, part, source_params, creator, cr_workdir,
@@ -212,10 +285,73 @@ class BootimgEFIPlugin(SourcePlugin):
hdddir = "%s/hdd/boot" % cr_workdir
- install_cmd = "install -m 0644 %s/bzImage %s/bzImage" % \
- (staging_kernel_dir, hdddir)
- exec_cmd(install_cmd)
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if get_bitbake_var("INITRAMFS_IMAGE_BUNDLE") == "1":
+ if get_bitbake_var("INITRAMFS_IMAGE"):
+ kernel = "%s-%s.bin" % \
+ (get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
+
+ if source_params.get('create-unified-kernel-image') == "true":
+ initrd = source_params.get('initrd')
+ if not initrd:
+ raise WicError("initrd= must be specified when create-unified-kernel-image=true, exiting")
+
+ deploy_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
+ efi_stub = glob("%s/%s" % (deploy_dir, "linux*.efi.stub"))
+ if len(efi_stub) == 0:
+ raise WicError("Unified Kernel Image EFI stub not found, exiting")
+ efi_stub = efi_stub[0]
+
+ with tempfile.TemporaryDirectory() as tmp_dir:
+ label = source_params.get('label')
+ label_conf = "root=%s" % creator.rootdev
+ if label:
+ label_conf = "LABEL=%s" % label
+
+ bootloader = creator.ks.bootloader
+ cmdline = open("%s/cmdline" % tmp_dir, "w")
+ cmdline.write("%s %s" % (label_conf, bootloader.append))
+ cmdline.close()
+
+ initrds = initrd.split(';')
+ initrd = open("%s/initrd" % tmp_dir, "wb")
+ for f in initrds:
+ with open("%s/%s" % (deploy_dir, f), 'rb') as in_file:
+ shutil.copyfileobj(in_file, initrd)
+ initrd.close()
+
+ # Searched by systemd-boot:
+ # https://systemd.io/BOOT_LOADER_SPECIFICATION/#type-2-efi-unified-kernel-images
+ install_cmd = "install -d %s/EFI/Linux" % hdddir
+ exec_cmd(install_cmd)
+
+ staging_dir_host = get_bitbake_var("STAGING_DIR_HOST")
+
+ # https://www.freedesktop.org/software/systemd/man/systemd-stub.html
+ objcopy_cmd = "objcopy \
+ --add-section .osrel=%s --change-section-vma .osrel=0x20000 \
+ --add-section .cmdline=%s --change-section-vma .cmdline=0x30000 \
+ --add-section .linux=%s --change-section-vma .linux=0x2000000 \
+ --add-section .initrd=%s --change-section-vma .initrd=0x3000000 \
+ %s %s" % \
+ ("%s/usr/lib/os-release" % staging_dir_host,
+ cmdline.name,
+ "%s/%s" % (staging_kernel_dir, kernel),
+ initrd.name,
+ efi_stub,
+ "%s/EFI/Linux/linux.efi" % hdddir)
+ exec_cmd(objcopy_cmd)
+ else:
+ install_cmd = "install -m 0644 %s/%s %s/%s" % \
+ (staging_kernel_dir, kernel, hdddir, kernel)
+ exec_cmd(install_cmd)
+ if get_bitbake_var("IMAGE_EFI_BOOT_FILES"):
+ for src_path, dst_path in cls.install_task:
+ install_cmd = "install -m 0644 -D %s %s" \
+ % (os.path.join(kernel_dir, src_path),
+ os.path.join(hdddir, dst_path))
+ exec_cmd(install_cmd)
try:
if source_params['loader'] == 'grub-efi':
@@ -258,8 +394,10 @@ class BootimgEFIPlugin(SourcePlugin):
# dosfs image, created by mkdosfs
bootimg = "%s/boot.img" % cr_workdir
- dosfs_cmd = "mkdosfs -n efi -i %s -C %s %d" % \
- (part.fsuuid, bootimg, blocks)
+ label = part.label if part.label else "ESP"
+
+ dosfs_cmd = "mkdosfs -n %s -i %s -C %s %d" % \
+ (label, part.fsuuid, bootimg, blocks)
exec_native_cmd(dosfs_cmd, native_sysroot)
mcopy_cmd = "mcopy -i %s -s %s/* ::/" % (bootimg, hdddir)
diff --git a/scripts/lib/wic/plugins/source/bootimg-partition.py b/scripts/lib/wic/plugins/source/bootimg-partition.py
index ddc880be36..5dbe2558d2 100644
--- a/scripts/lib/wic/plugins/source/bootimg-partition.py
+++ b/scripts/lib/wic/plugins/source/bootimg-partition.py
@@ -1,18 +1,5 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This implements the 'bootimg-partition' source plugin class for
@@ -154,7 +141,7 @@ class BootimgPartitionPlugin(SourcePlugin):
break
if not kernel_name:
- raise WicError('No kernel file founded')
+ raise WicError('No kernel file found')
# Compose the extlinux.conf
extlinux_conf = "default Yocto\n"
diff --git a/scripts/lib/wic/plugins/source/bootimg-pcbios.py b/scripts/lib/wic/plugins/source/bootimg-pcbios.py
index 9347aa7fcb..32e47f1831 100644
--- a/scripts/lib/wic/plugins/source/bootimg-pcbios.py
+++ b/scripts/lib/wic/plugins/source/bootimg-pcbios.py
@@ -1,21 +1,7 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2014, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This implements the 'bootimg-pcbios' source plugin class for 'wic'
@@ -163,8 +149,14 @@ class BootimgPcbiosPlugin(SourcePlugin):
hdddir = "%s/hdd/boot" % cr_workdir
- cmds = ("install -m 0644 %s/bzImage %s/vmlinuz" %
- (staging_kernel_dir, hdddir),
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if get_bitbake_var("INITRAMFS_IMAGE_BUNDLE") == "1":
+ if get_bitbake_var("INITRAMFS_IMAGE"):
+ kernel = "%s-%s.bin" % \
+ (get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
+
+ cmds = ("install -m 0644 %s/%s %s/vmlinuz" %
+ (staging_kernel_dir, kernel, hdddir),
"install -m 444 %s/syslinux/ldlinux.sys %s/ldlinux.sys" %
(bootimg_dir, hdddir),
"install -m 0644 %s/syslinux/vesamenu.c32 %s/vesamenu.c32" %
@@ -194,8 +186,10 @@ class BootimgPcbiosPlugin(SourcePlugin):
# dosfs image, created by mkdosfs
bootimg = "%s/boot%s.img" % (cr_workdir, part.lineno)
- dosfs_cmd = "mkdosfs -n boot -i %s -S 512 -C %s %d" % \
- (part.fsuuid, bootimg, blocks)
+ label = part.label if part.label else "boot"
+
+ dosfs_cmd = "mkdosfs -n %s -i %s -S 512 -C %s %d" % \
+ (label, part.fsuuid, bootimg, blocks)
exec_native_cmd(dosfs_cmd, native_sysroot)
mcopy_cmd = "mcopy -i %s -s %s/* ::/" % (bootimg, hdddir)
diff --git a/scripts/lib/wic/plugins/source/empty.py b/scripts/lib/wic/plugins/source/empty.py
new file mode 100644
index 0000000000..041617d648
--- /dev/null
+++ b/scripts/lib/wic/plugins/source/empty.py
@@ -0,0 +1,32 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+# The empty wic plugin is used to create unformatted empty partitions for wic
+# images.
+# To use it you must pass "empty" as argument for the "--source" parameter in
+# the wks file. For example:
+# part foo --source empty --ondisk sda --size="1024" --align 1024
+
+import logging
+
+from wic.pluginbase import SourcePlugin
+
+logger = logging.getLogger('wic')
+
+class EmptyPartitionPlugin(SourcePlugin):
+ """
+ Populate unformatted empty partition.
+ """
+
+ name = 'empty'
+
+ @classmethod
+ def do_prepare_partition(cls, part, source_params, cr, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ rootfs_dir, native_sysroot):
+ """
+ Called to do the actual content population for a partition i.e. it
+ 'prepares' the partition to be incorporated into the image.
+ """
+ return
diff --git a/scripts/lib/wic/plugins/source/isoimage-isohybrid.py b/scripts/lib/wic/plugins/source/isoimage-isohybrid.py
index 170077c22c..afc9ea0f8f 100644
--- a/scripts/lib/wic/plugins/source/isoimage-isohybrid.py
+++ b/scripts/lib/wic/plugins/source/isoimage-isohybrid.py
@@ -1,18 +1,5 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This implements the 'isoimage-isohybrid' source plugin class for 'wic'
@@ -83,8 +70,13 @@ class IsoImagePlugin(SourcePlugin):
syslinux_conf += "DEFAULT boot\n"
syslinux_conf += "LABEL boot\n"
- kernel = "/bzImage"
- syslinux_conf += "KERNEL " + kernel + "\n"
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if get_bitbake_var("INITRAMFS_IMAGE_BUNDLE") == "1":
+ if get_bitbake_var("INITRAMFS_IMAGE"):
+ kernel = "%s-%s.bin" % \
+ (get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
+
+ syslinux_conf += "KERNEL /" + kernel + "\n"
syslinux_conf += "APPEND initrd=/initrd LABEL=boot %s\n" \
% bootloader.append
@@ -127,9 +119,13 @@ class IsoImagePlugin(SourcePlugin):
grubefi_conf += "\n"
grubefi_conf += "menuentry 'boot'{\n"
- kernel = "/bzImage"
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if get_bitbake_var("INITRAMFS_IMAGE_BUNDLE") == "1":
+ if get_bitbake_var("INITRAMFS_IMAGE"):
+ kernel = "%s-%s.bin" % \
+ (get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
- grubefi_conf += "linux %s rootwait %s\n" \
+ grubefi_conf += "linux /%s rootwait %s\n" \
% (kernel, bootloader.append)
grubefi_conf += "initrd /initrd \n"
grubefi_conf += "}\n"
@@ -220,6 +216,18 @@ class IsoImagePlugin(SourcePlugin):
creator.name = source_params['image_name'].strip()
logger.debug("The name of the image is: %s", creator.name)
+ @staticmethod
+ def _install_payload(source_params, iso_dir):
+ """
+ Copies contents of payload directory (as specified in 'payload_dir' param) into iso_dir
+ """
+
+ if source_params.get('payload_dir'):
+ payload_dir = source_params['payload_dir']
+
+ logger.debug("Payload directory: %s", payload_dir)
+ shutil.copytree(payload_dir, iso_dir, symlinks=True, dirs_exist_ok=True)
+
@classmethod
def do_prepare_partition(cls, part, source_params, creator, cr_workdir,
oe_builddir, bootimg_dir, kernel_dir,
@@ -232,6 +240,8 @@ class IsoImagePlugin(SourcePlugin):
isodir = "%s/ISO" % cr_workdir
+ cls._install_payload(source_params, isodir)
+
if part.rootfs_dir is None:
if not 'ROOTFS_DIR' in rootfs_dir:
raise WicError("Couldn't find --rootfs-dir, exiting.")
@@ -281,9 +291,14 @@ class IsoImagePlugin(SourcePlugin):
if os.path.isfile("%s/initrd.cpio.gz" % cr_workdir):
os.remove("%s/initrd.cpio.gz" % cr_workdir)
- # Install bzImage
- install_cmd = "install -m 0644 %s/bzImage %s/bzImage" % \
- (kernel_dir, isodir)
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if get_bitbake_var("INITRAMFS_IMAGE_BUNDLE") == "1":
+ if get_bitbake_var("INITRAMFS_IMAGE"):
+ kernel = "%s-%s.bin" % \
+ (get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
+
+ install_cmd = "install -m 0644 %s/%s %s/%s" % \
+ (kernel_dir, kernel, isodir, kernel)
exec_cmd(install_cmd)
#Create bootloader for efi boot
@@ -335,19 +350,23 @@ class IsoImagePlugin(SourcePlugin):
(img_iso_dir, isodir)
exec_cmd(install_cmd)
else:
+ # Default to 100 blocks of extra space for file system overhead
+ esp_extra_blocks = int(source_params.get('esp_extra_blocks', '100'))
+
du_cmd = "du -bks %s/EFI" % isodir
out = exec_cmd(du_cmd)
blocks = int(out.split()[0])
- # Add some extra space for file system overhead
- blocks += 100
+ blocks += esp_extra_blocks
logger.debug("Added 100 extra blocks to %s to get to %d "
"total blocks", part.mountpoint, blocks)
# dosfs image for EFI boot
bootimg = "%s/efi.img" % isodir
- dosfs_cmd = 'mkfs.vfat -n "EFIimg" -S 512 -C %s %d' \
- % (bootimg, blocks)
+ esp_label = source_params.get('esp_label', 'EFIimg')
+
+ dosfs_cmd = 'mkfs.vfat -n \'%s\' -S 512 -C %s %d' \
+ % (esp_label, bootimg, blocks)
exec_native_cmd(dosfs_cmd, native_sysroot)
mmd_cmd = "mmd -i %s ::/EFI" % bootimg
diff --git a/scripts/lib/wic/plugins/source/rawcopy.py b/scripts/lib/wic/plugins/source/rawcopy.py
index e86398ac8f..7c90cd3cf8 100644
--- a/scripts/lib/wic/plugins/source/rawcopy.py
+++ b/scripts/lib/wic/plugins/source/rawcopy.py
@@ -1,22 +1,11 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
import logging
import os
+import signal
+import subprocess
from wic import WicError
from wic.pluginbase import SourcePlugin
@@ -42,15 +31,34 @@ class RawCopyPlugin(SourcePlugin):
cmd = 'btrfs filesystem label %s %s' % (dst, label)
elif fstype == 'swap':
cmd = 'mkswap -L %s %s' % (label, dst)
- elif fstype == 'squashfs':
- raise WicError("It's not possible to update a squashfs "
- "filesystem label '%s'" % (label))
+ elif fstype in ('squashfs', 'erofs'):
+ raise WicError("It's not possible to update a %s "
+ "filesystem label '%s'" % (fstype, label))
else:
raise WicError("Cannot update filesystem label: "
"Unknown fstype: '%s'" % (fstype))
exec_cmd(cmd)
+ @staticmethod
+ def do_image_uncompression(src, dst, workdir):
+ def subprocess_setup():
+ # Python installs a SIGPIPE handler by default. This is usually not what
+ # non-Python subprocesses expect.
+ # SIGPIPE errors are known issues with gzip/bash
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+ extension = os.path.splitext(src)[1]
+ decompressor = {
+ ".bz2": "bzip2",
+ ".gz": "gzip",
+ ".xz": "xz"
+ }.get(extension)
+ if not decompressor:
+ raise WicError("Not supported compressor filename extension: %s" % extension)
+ cmd = "%s -dc %s > %s" % (decompressor, src, dst)
+ subprocess.call(cmd, preexec_fn=subprocess_setup, shell=True, cwd=workdir)
+
@classmethod
def do_prepare_partition(cls, part, source_params, cr, cr_workdir,
oe_builddir, bootimg_dir, kernel_dir,
@@ -69,8 +77,17 @@ class RawCopyPlugin(SourcePlugin):
if 'file' not in source_params:
raise WicError("No file specified")
- src = os.path.join(kernel_dir, source_params['file'])
- dst = os.path.join(cr_workdir, "%s.%s" % (source_params['file'], part.lineno))
+ if 'unpack' in source_params:
+ img = os.path.join(kernel_dir, source_params['file'])
+ src = os.path.join(cr_workdir, os.path.splitext(source_params['file'])[0])
+ RawCopyPlugin.do_image_uncompression(img, src, cr_workdir)
+ else:
+ src = os.path.join(kernel_dir, source_params['file'])
+
+ dst = os.path.join(cr_workdir, "%s.%s" % (os.path.basename(source_params['file']), part.lineno))
+
+ if not os.path.exists(os.path.dirname(dst)):
+ os.makedirs(os.path.dirname(dst))
if 'skip' in source_params:
sparse_copy(src, dst, skip=int(source_params['skip']))
diff --git a/scripts/lib/wic/plugins/source/rootfs.py b/scripts/lib/wic/plugins/source/rootfs.py
index aec720fb22..2e34e715ca 100644
--- a/scripts/lib/wic/plugins/source/rootfs.py
+++ b/scripts/lib/wic/plugins/source/rootfs.py
@@ -1,21 +1,7 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2014, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This implements the 'rootfs' source plugin class for 'wic'
@@ -31,10 +17,11 @@ import shutil
import sys
from oe.path import copyhardlinktree
+from pathlib import Path
from wic import WicError
from wic.pluginbase import SourcePlugin
-from wic.misc import get_bitbake_var
+from wic.misc import get_bitbake_var, exec_native_cmd
logger = logging.getLogger('wic')
@@ -46,6 +33,22 @@ class RootfsPlugin(SourcePlugin):
name = 'rootfs'
@staticmethod
+ def __validate_path(cmd, rootfs_dir, path):
+ if os.path.isabs(path):
+ logger.error("%s: Must be relative: %s" % (cmd, orig_path))
+ sys.exit(1)
+
+ # Disallow climbing outside of parent directory using '..',
+ # because doing so could be quite disastrous (we will delete the
+ # directory, or modify a directory outside OpenEmbedded).
+ full_path = os.path.realpath(os.path.join(rootfs_dir, path))
+ if not full_path.startswith(os.path.realpath(rootfs_dir)):
+ logger.error("%s: Must point inside the rootfs:" % (cmd, path))
+ sys.exit(1)
+
+ return full_path
+
+ @staticmethod
def __get_rootfs_dir(rootfs_dir):
if os.path.isdir(rootfs_dir):
return os.path.realpath(rootfs_dir)
@@ -58,6 +61,15 @@ class RootfsPlugin(SourcePlugin):
return os.path.realpath(image_rootfs_dir)
+ @staticmethod
+ def __get_pseudo(native_sysroot, rootfs, pseudo_dir):
+ pseudo = "export PSEUDO_PREFIX=%s/usr;" % native_sysroot
+ pseudo += "export PSEUDO_LOCALSTATEDIR=%s;" % pseudo_dir
+ pseudo += "export PSEUDO_PASSWD=%s;" % rootfs
+ pseudo += "export PSEUDO_NOSYMLINKEXP=1;"
+ pseudo += "%s " % get_bitbake_var("FAKEROOTCMD")
+ return pseudo
+
@classmethod
def do_prepare_partition(cls, part, source_params, cr, cr_workdir,
oe_builddir, bootimg_dir, kernel_dir,
@@ -82,45 +94,140 @@ class RootfsPlugin(SourcePlugin):
"it is not a valid path, exiting" % part.rootfs_dir)
part.rootfs_dir = cls.__get_rootfs_dir(rootfs_dir)
+ part.has_fstab = os.path.exists(os.path.join(part.rootfs_dir, "etc/fstab"))
+ pseudo_dir = os.path.join(part.rootfs_dir, "../pseudo")
+ if not os.path.lexists(pseudo_dir):
+ logger.warn("%s folder does not exist. "
+ "Usernames and permissions will be invalid " % pseudo_dir)
+ pseudo_dir = None
new_rootfs = None
+ new_pseudo = None
# Handle excluded paths.
- if part.exclude_path is not None:
- # We need a new rootfs directory we can delete files from. Copy to
- # workdir.
+ if part.exclude_path or part.include_path or part.change_directory or part.update_fstab_in_rootfs:
+ # We need a new rootfs directory we can safely modify without
+ # interfering with other tasks. Copy to workdir.
new_rootfs = os.path.realpath(os.path.join(cr_workdir, "rootfs%d" % part.lineno))
if os.path.lexists(new_rootfs):
shutil.rmtree(os.path.join(new_rootfs))
- copyhardlinktree(part.rootfs_dir, new_rootfs)
+ if part.change_directory:
+ cd = part.change_directory
+ if cd[-1] == '/':
+ cd = cd[:-1]
+ orig_dir = cls.__validate_path("--change-directory", part.rootfs_dir, cd)
+ else:
+ orig_dir = part.rootfs_dir
+ copyhardlinktree(orig_dir, new_rootfs)
+
+ # Convert the pseudo directory to its new location
+ if (pseudo_dir):
+ new_pseudo = os.path.realpath(
+ os.path.join(cr_workdir, "pseudo%d" % part.lineno))
+ if os.path.lexists(new_pseudo):
+ shutil.rmtree(new_pseudo)
+ os.mkdir(new_pseudo)
+ shutil.copy(os.path.join(pseudo_dir, "files.db"),
+ os.path.join(new_pseudo, "files.db"))
+
+ pseudo_cmd = "%s -B -m %s -M %s" % (cls.__get_pseudo(native_sysroot,
+ new_rootfs,
+ new_pseudo),
+ orig_dir, new_rootfs)
+ exec_native_cmd(pseudo_cmd, native_sysroot)
+
+ for in_path in part.include_path or []:
+ #parse arguments
+ include_path = in_path[0]
+ if len(in_path) > 2:
+ logger.error("'Invalid number of arguments for include-path")
+ sys.exit(1)
+ if len(in_path) == 2:
+ path = in_path[1]
+ else:
+ path = None
+
+ # Pack files to be included into a tar file.
+ # We need to create a tar file, because that way we can keep the
+ # permissions from the files even when they belong to different
+ # pseudo enviroments.
+ # If we simply copy files using copyhardlinktree/copytree... the
+ # copied files will belong to the user running wic.
+ tar_file = os.path.realpath(
+ os.path.join(cr_workdir, "include-path%d.tar" % part.lineno))
+ if os.path.isfile(include_path):
+ parent = os.path.dirname(os.path.realpath(include_path))
+ tar_cmd = "tar c --owner=root --group=root -f %s -C %s %s" % (
+ tar_file, parent, os.path.relpath(include_path, parent))
+ exec_native_cmd(tar_cmd, native_sysroot)
+ else:
+ if include_path in krootfs_dir:
+ include_path = krootfs_dir[include_path]
+ include_path = cls.__get_rootfs_dir(include_path)
+ include_pseudo = os.path.join(include_path, "../pseudo")
+ if os.path.lexists(include_pseudo):
+ pseudo = cls.__get_pseudo(native_sysroot, include_path,
+ include_pseudo)
+ tar_cmd = "tar cf %s -C %s ." % (tar_file, include_path)
+ else:
+ pseudo = None
+ tar_cmd = "tar c --owner=root --group=root -f %s -C %s ." % (
+ tar_file, include_path)
+ exec_native_cmd(tar_cmd, native_sysroot, pseudo)
+
+ #create destination
+ if path:
+ destination = cls.__validate_path("--include-path", new_rootfs, path)
+ Path(destination).mkdir(parents=True, exist_ok=True)
+ else:
+ destination = new_rootfs
+
+ #extract destination
+ untar_cmd = "tar xf %s -C %s" % (tar_file, destination)
+ if new_pseudo:
+ pseudo = cls.__get_pseudo(native_sysroot, new_rootfs, new_pseudo)
+ else:
+ pseudo = None
+ exec_native_cmd(untar_cmd, native_sysroot, pseudo)
+ os.remove(tar_file)
- for orig_path in part.exclude_path:
+ for orig_path in part.exclude_path or []:
path = orig_path
- if os.path.isabs(path):
- logger.error("Must be relative: --exclude-path=%s" % orig_path)
- sys.exit(1)
- full_path = os.path.realpath(os.path.join(new_rootfs, path))
+ full_path = cls.__validate_path("--exclude-path", new_rootfs, path)
- # Disallow climbing outside of parent directory using '..',
- # because doing so could be quite disastrous (we will delete the
- # directory).
- if not full_path.startswith(new_rootfs):
- logger.error("'%s' points to a path outside the rootfs" % orig_path)
- sys.exit(1)
+ if not os.path.lexists(full_path):
+ continue
+ if new_pseudo:
+ pseudo = cls.__get_pseudo(native_sysroot, new_rootfs, new_pseudo)
+ else:
+ pseudo = None
if path.endswith(os.sep):
# Delete content only.
for entry in os.listdir(full_path):
full_entry = os.path.join(full_path, entry)
- if os.path.isdir(full_entry) and not os.path.islink(full_entry):
- shutil.rmtree(full_entry)
- else:
- os.remove(full_entry)
+ rm_cmd = "rm -rf %s" % (full_entry)
+ exec_native_cmd(rm_cmd, native_sysroot, pseudo)
else:
# Delete whole directory.
- shutil.rmtree(full_path)
+ rm_cmd = "rm -rf %s" % (full_path)
+ exec_native_cmd(rm_cmd, native_sysroot, pseudo)
+
+ # Update part.has_fstab here as fstab may have been added or
+ # removed by the above modifications.
+ part.has_fstab = os.path.exists(os.path.join(new_rootfs, "etc/fstab"))
+ if part.update_fstab_in_rootfs and part.has_fstab and not part.no_fstab_update:
+ fstab_path = os.path.join(new_rootfs, "etc/fstab")
+ # Assume that fstab should always be owned by root with fixed permissions
+ install_cmd = "install -m 0644 %s %s" % (part.updated_fstab_path, fstab_path)
+ if new_pseudo:
+ pseudo = cls.__get_pseudo(native_sysroot, new_rootfs, new_pseudo)
+ else:
+ pseudo = None
+ exec_native_cmd(install_cmd, native_sysroot, pseudo)
part.prepare_rootfs(cr_workdir, oe_builddir,
- new_rootfs or part.rootfs_dir, native_sysroot)
+ new_rootfs or part.rootfs_dir, native_sysroot,
+ pseudo_dir = new_pseudo or pseudo_dir)
diff --git a/scripts/lnr b/scripts/lnr
deleted file mode 100755
index 5fed780eb2..0000000000
--- a/scripts/lnr
+++ /dev/null
@@ -1,21 +0,0 @@
-#! /usr/bin/env python3
-
-# Create a *relative* symlink, just like ln --relative does but without needing
-# coreutils 8.16.
-
-import sys, os
-
-if len(sys.argv) != 3:
- print("$ lnr TARGET LINK_NAME")
- sys.exit(1)
-
-target = sys.argv[1]
-linkname = sys.argv[2]
-
-if os.path.isabs(target):
- if not os.path.isabs(linkname):
- linkname = os.path.abspath(linkname)
- start = os.path.dirname(linkname)
- target = os.path.relpath(target, start)
-
-os.symlink(target, linkname)
diff --git a/scripts/multilib_header_wrapper.h b/scripts/multilib_header_wrapper.h
index 4824790783..88f3193812 100644
--- a/scripts/multilib_header_wrapper.h
+++ b/scripts/multilib_header_wrapper.h
@@ -1,46 +1,13 @@
/*
* Copyright (C) 2005-2011 by Wind River Systems, Inc.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
*/
-#pragma once
-
-#if defined (__bpf__)
-#define __MHWORDSIZE 64
-#elif defined (__arm__)
-#define __MHWORDSIZE 32
-#elif defined (__aarch64__) && defined ( __LP64__)
-#define __MHWORDSIZE 64
-#elif defined (__aarch64__)
-#define __MHWORDSIZE 32
-#else
#include <bits/wordsize.h>
-#if defined (__WORDSIZE)
-#define __MHWORDSIZE __WORDSIZE
-#else
-#error "__WORDSIZE is not defined"
-#endif
-#endif
-#if __MHWORDSIZE == 32
+#if __WORDSIZE == 32
#ifdef _MIPS_SIM
@@ -56,7 +23,7 @@
#include <ENTER_HEADER_FILENAME_HERE-32.h>
#endif
-#elif __MHWORDSIZE == 64
+#elif __WORDSIZE == 64
#include <ENTER_HEADER_FILENAME_HERE-64.h>
#else
#error "Unknown __WORDSIZE detected"
diff --git a/scripts/native-intercept/ar b/scripts/native-intercept/ar
new file mode 100755
index 0000000000..dcc623e3ed
--- /dev/null
+++ b/scripts/native-intercept/ar
@@ -0,0 +1,32 @@
+#!/usr/bin/env python3
+#
+# Wrapper around 'ar' that defaults to deterministic archives
+
+import os
+import shutil
+import sys
+
+# calculate path to the real 'ar'
+path = os.environ['PATH']
+path = path.replace(os.path.dirname(sys.argv[0]), '')
+real_ar = shutil.which('ar', path=path)
+
+if len(sys.argv) == 1:
+ os.execl(real_ar, 'ar')
+
+# modify args to mimic 'ar' configured with --default-deterministic-archives
+argv = sys.argv
+if argv[1].startswith('--'):
+ # No modifier given
+ None
+else:
+ # remove the optional '-'
+ if argv[1][0] == '-':
+ argv[1] = argv[1][1:]
+ if 'U' in argv[1]:
+ sys.stderr.write("ar: non-deterministic mode requested\n")
+ else:
+ argv[1] = argv[1].replace('u', '')
+ argv[1] = 'D' + argv[1]
+
+os.execv(real_ar, argv)
diff --git a/scripts/native-intercept/chgrp b/scripts/native-intercept/chgrp
new file mode 100755
index 0000000000..399c979f9a
--- /dev/null
+++ b/scripts/native-intercept/chgrp
@@ -0,0 +1,5 @@
+#! /bin/sh
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+echo "Intercept $0: $@ -- do nothing"
diff --git a/scripts/native-intercept/chown b/scripts/native-intercept/chown
index 4f43271c2b..399c979f9a 100755
--- a/scripts/native-intercept/chown
+++ b/scripts/native-intercept/chown
@@ -1,2 +1,5 @@
#! /bin/sh
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
echo "Intercept $0: $@ -- do nothing"
diff --git a/scripts/nativesdk-intercept/chgrp b/scripts/nativesdk-intercept/chgrp
new file mode 100755
index 0000000000..30cc417d3a
--- /dev/null
+++ b/scripts/nativesdk-intercept/chgrp
@@ -0,0 +1,27 @@
+#!/usr/bin/env python3
+#
+# Wrapper around 'chgrp' that redirects to root in all cases
+
+import os
+import shutil
+import sys
+
+# calculate path to the real 'chgrp'
+path = os.environ['PATH']
+path = path.replace(os.path.dirname(sys.argv[0]), '')
+real_chgrp = shutil.which('chgrp', path=path)
+
+args = list()
+
+found = False
+for i in sys.argv:
+ if i.startswith("-"):
+ args.append(i)
+ continue
+ if not found:
+ args.append("root")
+ found = True
+ else:
+ args.append(i)
+
+os.execv(real_chgrp, args)
diff --git a/scripts/nativesdk-intercept/chown b/scripts/nativesdk-intercept/chown
new file mode 100755
index 0000000000..3914b3e384
--- /dev/null
+++ b/scripts/nativesdk-intercept/chown
@@ -0,0 +1,27 @@
+#!/usr/bin/env python3
+#
+# Wrapper around 'chown' that redirects to root in all cases
+
+import os
+import shutil
+import sys
+
+# calculate path to the real 'chown'
+path = os.environ['PATH']
+path = path.replace(os.path.dirname(sys.argv[0]), '')
+real_chown = shutil.which('chown', path=path)
+
+args = list()
+
+found = False
+for i in sys.argv:
+ if i.startswith("-"):
+ args.append(i)
+ continue
+ if not found:
+ args.append("root:root")
+ found = True
+ else:
+ args.append(i)
+
+os.execv(real_chown, args)
diff --git a/scripts/oe-build-perf-report b/scripts/oe-build-perf-report
index b3c769895e..7812ea4540 100755
--- a/scripts/oe-build-perf-report
+++ b/scripts/oe-build-perf-report
@@ -1,18 +1,12 @@
-#!/usr/bin/python3
+#!/usr/bin/env python3
#
# Examine build performance test results
#
# Copyright (c) 2017, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
+
import argparse
import json
import logging
@@ -37,58 +31,18 @@ from buildstats import BuildStats, diff_buildstats, BSVerDiff
scriptpath.add_oe_lib_path()
from oeqa.utils.git import GitRepo, GitError
+import oeqa.utils.gitarchive as gitarchive
# Setup logging
logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
log = logging.getLogger('oe-build-perf-report')
-
-# Container class for tester revisions
-TestedRev = namedtuple('TestedRev', 'commit commit_number tags')
-
-
-def get_test_runs(repo, tag_name, **kwargs):
- """Get a sorted list of test runs, matching given pattern"""
- # First, get field names from the tag name pattern
- field_names = [m.group(1) for m in re.finditer(r'{(\w+)}', tag_name)]
- undef_fields = [f for f in field_names if f not in kwargs.keys()]
-
- # Fields for formatting tag name pattern
- str_fields = dict([(f, '*') for f in field_names])
- str_fields.update(kwargs)
-
- # Get a list of all matching tags
- tag_pattern = tag_name.format(**str_fields)
- tags = repo.run_cmd(['tag', '-l', tag_pattern]).splitlines()
- log.debug("Found %d tags matching pattern '%s'", len(tags), tag_pattern)
-
- # Parse undefined fields from tag names
- str_fields = dict([(f, r'(?P<{}>[\w\-.()]+)'.format(f)) for f in field_names])
- str_fields['branch'] = r'(?P<branch>[\w\-.()/]+)'
- str_fields['commit'] = '(?P<commit>[0-9a-f]{7,40})'
- str_fields['commit_number'] = '(?P<commit_number>[0-9]{1,7})'
- str_fields['tag_number'] = '(?P<tag_number>[0-9]{1,5})'
- # escape parenthesis in fields in order to not messa up the regexp
- fixed_fields = dict([(k, v.replace('(', r'\(').replace(')', r'\)')) for k, v in kwargs.items()])
- str_fields.update(fixed_fields)
- tag_re = re.compile(tag_name.format(**str_fields))
-
- # Parse fields from tags
- revs = []
- for tag in tags:
- m = tag_re.match(tag)
- groups = m.groupdict()
- revs.append([groups[f] for f in undef_fields] + [tag])
-
- # Return field names and a sorted list of revs
- return undef_fields, sorted(revs)
-
def list_test_revs(repo, tag_name, verbosity, **kwargs):
"""Get list of all tested revisions"""
valid_kwargs = dict([(k, v) for k, v in kwargs.items() if v is not None])
- fields, revs = get_test_runs(repo, tag_name, **valid_kwargs)
+ fields, revs = gitarchive.get_test_runs(log, repo, tag_name, **valid_kwargs)
ignore_fields = ['tag_number']
if verbosity < 2:
extra_fields = ['COMMITS', 'TEST RUNS']
@@ -133,36 +87,6 @@ def list_test_revs(repo, tag_name, verbosity, **kwargs):
print_table(rows)
-def get_test_revs(repo, tag_name, **kwargs):
- """Get list of all tested revisions"""
- fields, runs = get_test_runs(repo, tag_name, **kwargs)
-
- revs = {}
- commit_i = fields.index('commit')
- commit_num_i = fields.index('commit_number')
- for run in runs:
- commit = run[commit_i]
- commit_num = run[commit_num_i]
- tag = run[-1]
- if not commit in revs:
- revs[commit] = TestedRev(commit, commit_num, [tag])
- else:
- assert commit_num == revs[commit].commit_number, "Commit numbers do not match"
- revs[commit].tags.append(tag)
-
- # Return in sorted table
- revs = sorted(revs.values(), key=attrgetter('commit_number'))
- log.debug("Found %d tested revisions:\n %s", len(revs),
- "\n ".join(['{} ({})'.format(rev.commit_number, rev.commit) for rev in revs]))
- return revs
-
-def rev_find(revs, attr, val):
- """Search from a list of TestedRev"""
- for i, rev in enumerate(revs):
- if getattr(rev, attr) == val:
- return i
- raise ValueError("Unable to find '{}' value '{}'".format(attr, val))
-
def is_xml_format(repo, commit):
"""Check if the commit contains xml (or json) data"""
if repo.rev_parse(commit + ':results.xml'):
@@ -427,9 +351,9 @@ def print_html_report(data, id_comp, buildstats):
# Compare buildstats
bs_key = test + '.' + meas
- rev = metadata['commit_num']['value']
- comp_rev = metadata['commit_num']['value_old']
- if (rev in buildstats and bs_key in buildstats[rev] and
+ rev = str(metadata['commit_num']['value'])
+ comp_rev = str(metadata['commit_num']['value_old'])
+ if (buildstats and rev in buildstats and bs_key in buildstats[rev] and
comp_rev in buildstats and bs_key in buildstats[comp_rev]):
new_meas['buildstats'] = BSSummary(buildstats[comp_rev][bs_key],
buildstats[rev][bs_key])
@@ -448,7 +372,7 @@ def print_html_report(data, id_comp, buildstats):
chart_opts=chart_opts))
-def get_buildstats(repo, notes_ref, revs, outdir=None):
+def get_buildstats(repo, notes_ref, notes_ref2, revs, outdir=None):
"""Get the buildstats from git notes"""
full_ref = 'refs/notes/' + notes_ref
if not repo.rev_parse(full_ref):
@@ -467,8 +391,13 @@ def get_buildstats(repo, notes_ref, revs, outdir=None):
for tag in rev.tags:
log.debug(' %s', tag)
try:
- bs_all = json.loads(repo.run_cmd(['notes', '--ref', notes_ref,
- 'show', tag + '^0']))
+ try:
+ bs_all = json.loads(repo.run_cmd(['notes', '--ref', notes_ref, 'show', tag + '^0']))
+ except GitError:
+ if notes_ref2:
+ bs_all = json.loads(repo.run_cmd(['notes', '--ref', notes_ref2, 'show', tag + '^0']))
+ else:
+ raise
except GitError:
log.warning("Buildstats not found for %s", tag)
bs_all = {}
@@ -578,11 +507,11 @@ def main(argv=None):
if not args.hostname:
auto_args(repo, args)
- revs = get_test_revs(repo, args.tag_name, hostname=args.hostname,
- branch=args.branch, machine=args.machine)
- if args.branch2:
- revs2 = get_test_revs(repo, args.tag_name, hostname=args.hostname,
- branch=args.branch2, machine=args.machine)
+ revs = gitarchive.get_test_revs(log, repo, args.tag_name, hostname=args.hostname,
+ branch=args.branch, machine=args.machine)
+ if args.branch2 and args.branch2 != args.branch:
+ revs2 = gitarchive.get_test_revs(log, repo, args.tag_name, hostname=args.hostname,
+ branch=args.branch2, machine=args.machine)
if not len(revs2):
log.error("No revisions found to compare against")
return 1
@@ -598,13 +527,13 @@ def main(argv=None):
if args.commit:
if args.commit_number:
log.warning("Ignoring --commit-number as --commit was specified")
- index1 = rev_find(revs, 'commit', args.commit)
+ index1 = gitarchive.rev_find(revs, 'commit', args.commit)
elif args.commit_number:
- index1 = rev_find(revs, 'commit_number', args.commit_number)
+ index1 = gitarchive.rev_find(revs, 'commit_number', args.commit_number)
else:
index1 = len(revs) - 1
- if args.branch2:
+ if args.branch2 and args.branch2 != args.branch:
revs2.append(revs[index1])
index1 = len(revs2) - 1
revs = revs2
@@ -612,9 +541,9 @@ def main(argv=None):
if args.commit2:
if args.commit_number2:
log.warning("Ignoring --commit-number2 as --commit2 was specified")
- index2 = rev_find(revs, 'commit', args.commit2)
+ index2 = gitarchive.rev_find(revs, 'commit', args.commit2)
elif args.commit_number2:
- index2 = rev_find(revs, 'commit_number', args.commit_number2)
+ index2 = gitarchive.rev_find(revs, 'commit_number', args.commit_number2)
else:
if index1 > 0:
index2 = index1 - 1
@@ -665,9 +594,12 @@ def main(argv=None):
buildstats = None
if args.dump_buildstats or args.html:
outdir = 'oe-build-perf-buildstats' if args.dump_buildstats else None
- notes_ref = 'buildstats/{}/{}/{}'.format(args.hostname, args.branch,
- args.machine)
- buildstats = get_buildstats(repo, notes_ref, [rev_l, rev_r], outdir)
+ notes_ref = 'buildstats/{}/{}/{}'.format(args.hostname, args.branch, args.machine)
+ notes_ref2 = None
+ if args.branch2:
+ notes_ref = 'buildstats/{}/{}/{}'.format(args.hostname, args.branch2, args.machine)
+ notes_ref2 = 'buildstats/{}/{}/{}'.format(args.hostname, args.branch, args.machine)
+ buildstats = get_buildstats(repo, notes_ref, notes_ref2, [rev_l, rev_r], outdir)
# Print report
if not args.html:
diff --git a/scripts/oe-build-perf-test b/scripts/oe-build-perf-test
index 669470fa97..00e00b4ce9 100755
--- a/scripts/oe-build-perf-test
+++ b/scripts/oe-build-perf-test
@@ -1,19 +1,12 @@
-#!/usr/bin/python3
+#!/usr/bin/env python3
#
# Build performance test script
#
# Copyright (c) 2016, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
-#
-"""Build performance test script"""
+
import argparse
import errno
import fcntl
diff --git a/scripts/oe-buildenv-internal b/scripts/oe-buildenv-internal
index 52ce32987c..485d4c52e1 100755
--- a/scripts/oe-buildenv-internal
+++ b/scripts/oe-buildenv-internal
@@ -4,19 +4,8 @@
#
# Copyright (C) 2006-2011 Linux Foundation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
+# SPDX-License-Identifier: GPL-2.0-or-later
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
if ! $(return >/dev/null 2>&1) ; then
echo 'oe-buildenv-internal: error: this script must be sourced'
@@ -40,22 +29,15 @@ if [ -z "$OE_SKIP_SDK_CHECK" ] && [ -n "$OECORE_SDK_VERSION" ]; then
return 1
fi
-py_v27_check=$(python2 -c 'import sys; print sys.version_info >= (2,7,3)')
-if [ "$py_v27_check" != "True" ]; then
- echo >&2 "OpenEmbedded requires 'python' to be python v2 (>= 2.7.3), not python v3."
- echo >&2 "Please upgrade your python v2."
-fi
-unset py_v27_check
-
# We potentially have code that doesn't parse correctly with older versions
# of Python, and rather than fixing that and being eternally vigilant for
# any other new feature use, just check the version here.
-py_v34_check=$(python3 -c 'import sys; print(sys.version_info >= (3,4,0))')
-if [ "$py_v34_check" != "True" ]; then
- echo >&2 "BitBake requires Python 3.4.0 or later as 'python3'"
+py_v35_check=$(python3 -c 'import sys; print(sys.version_info >= (3,5,0))')
+if [ "$py_v35_check" != "True" ]; then
+ echo >&2 "BitBake requires Python 3.5.0 or later as 'python3 (scripts/install-buildtools can be used if needed)'"
return 1
fi
-unset py_v34_check
+unset py_v35_check
if [ -z "$BDIR" ]; then
if [ -z "$1" ]; then
@@ -106,6 +88,10 @@ if [ ! -d "$BITBAKEDIR" ]; then
return 1
fi
+# Add BitBake's library to PYTHONPATH
+PYTHONPATH=$BITBAKEDIR/lib:$PYTHONPATH
+export PYTHONPATH
+
# Make sure our paths are at the beginning of $PATH
for newpath in "$BITBAKEDIR/bin" "$OEROOT/scripts"; do
# Remove any existences of $newpath from $PATH
@@ -120,12 +106,13 @@ unset BITBAKEDIR newpath
export BUILDDIR
export PATH
-BB_ENV_EXTRAWHITE_OE="MACHINE DISTRO TCMODE TCLIBC HTTP_PROXY http_proxy \
+BB_ENV_PASSTHROUGH_ADDITIONS_OE="MACHINE DISTRO TCMODE TCLIBC HTTP_PROXY http_proxy \
HTTPS_PROXY https_proxy FTP_PROXY ftp_proxy FTPS_PROXY ftps_proxy ALL_PROXY \
all_proxy NO_PROXY no_proxy SSH_AGENT_PID SSH_AUTH_SOCK BB_SRCREV_POLICY \
SDKMACHINE BB_NUMBER_THREADS BB_NO_NETWORK PARALLEL_MAKE GIT_PROXY_COMMAND \
-SOCKS5_PASSWD SOCKS5_USER SCREENDIR STAMPS_DIR BBPATH_EXTRA BB_SETSCENE_ENFORCE"
+SOCKS5_PASSWD SOCKS5_USER SCREENDIR STAMPS_DIR BBPATH_EXTRA BB_SETSCENE_ENFORCE \
+BB_LOGCONFIG"
-BB_ENV_EXTRAWHITE="$(echo $BB_ENV_EXTRAWHITE $BB_ENV_EXTRAWHITE_OE | tr ' ' '\n' | LC_ALL=C sort --unique | tr '\n' ' ')"
+BB_ENV_PASSTHROUGH_ADDITIONS="$(echo $BB_ENV_PASSTHROUGH_ADDITIONS $BB_ENV_PASSTHROUGH_ADDITIONS_OE | tr ' ' '\n' | LC_ALL=C sort --unique | tr '\n' ' ')"
-export BB_ENV_EXTRAWHITE
+export BB_ENV_PASSTHROUGH_ADDITIONS
diff --git a/scripts/oe-check-sstate b/scripts/oe-check-sstate
index d06efe436a..f4cc5869de 100755
--- a/scripts/oe-check-sstate
+++ b/scripts/oe-check-sstate
@@ -5,18 +5,8 @@
# Copyright 2016 Intel Corporation
# Authored-by: Paul Eggleton <paul.eggleton@intel.com>
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os
@@ -57,8 +47,8 @@ def check(args):
try:
env = os.environ.copy()
if not args.same_tmpdir:
- env['BB_ENV_EXTRAWHITE'] = env.get('BB_ENV_EXTRAWHITE', '') + ' TMPDIR_forcevariable'
- env['TMPDIR_forcevariable'] = tmpdir
+ env['BB_ENV_PASSTHROUGH_ADDITIONS'] = env.get('BB_ENV_PASSTHROUGH_ADDITIONS', '') + ' TMPDIR:forcevariable'
+ env['TMPDIR:forcevariable'] = tmpdir
try:
output = subprocess.check_output(
diff --git a/scripts/oe-debuginfod b/scripts/oe-debuginfod
new file mode 100755
index 0000000000..9e5482d869
--- /dev/null
+++ b/scripts/oe-debuginfod
@@ -0,0 +1,26 @@
+#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+import sys
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+lib_path = scripts_path + "/lib"
+sys.path.insert(0, lib_path)
+import scriptpath
+scriptpath.add_bitbake_lib_path()
+
+import bb.tinfoil
+import subprocess
+
+if __name__ == "__main__":
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=True)
+ package_classes_var = "DEPLOY_DIR_" + tinfoil.config_data.getVar("PACKAGE_CLASSES").split()[0].replace("package_", "").upper()
+ feed_dir = tinfoil.config_data.getVar(package_classes_var, expand=True)
+
+ subprocess.call(['bitbake', '-c', 'addto_recipe_sysroot', 'elfutils-native'])
+
+ subprocess.call(['oe-run-native', 'elfutils-native', 'debuginfod', '--verbose', '-R', '-U', feed_dir])
+ print("\nTo use the debuginfod server please ensure that this variable PACKAGECONFIG:pn-elfutils-native = \"debuginfod libdebuginfod\" is set in the local.conf")
diff --git a/scripts/oe-depends-dot b/scripts/oe-depends-dot
index 6c7e9d3383..5eb3e12769 100755
--- a/scripts/oe-depends-dot
+++ b/scripts/oe-depends-dot
@@ -2,18 +2,8 @@
#
# Copyright (C) 2018 Wind River Systems, Inc.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
import sys
diff --git a/scripts/oe-find-native-sysroot b/scripts/oe-find-native-sysroot
index cc146b3fdb..5146bbf999 100755
--- a/scripts/oe-find-native-sysroot
+++ b/scripts/oe-find-native-sysroot
@@ -17,18 +17,8 @@
#
# Copyright (c) 2010 Linux Foundation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
if [ "$1" = '--help' -o "$1" = '-h' -o $# -ne 1 ] ; then
echo 'Usage: oe-find-native-sysroot <recipe> [-h|--help]'
diff --git a/scripts/oe-git-archive b/scripts/oe-git-archive
index 913291a99c..9305ed0b0f 100755
--- a/scripts/oe-git-archive
+++ b/scripts/oe-git-archive
@@ -4,26 +4,14 @@
#
# Copyright (c) 2017, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
+
import argparse
-import glob
-import json
import logging
-import math
import os
import re
import sys
-from collections import namedtuple, OrderedDict
-from datetime import datetime, timedelta, tzinfo
-from operator import attrgetter
# Import oe and bitbake libs
scripts_path = os.path.dirname(os.path.realpath(__file__))
@@ -34,128 +22,13 @@ scriptpath.add_oe_lib_path()
from oeqa.utils.git import GitRepo, GitError
from oeqa.utils.metadata import metadata_from_bb
-
+import oeqa.utils.gitarchive as gitarchive
# Setup logging
logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
log = logging.getLogger()
-class ArchiveError(Exception):
- """Internal error handling of this script"""
-
-
-def format_str(string, fields):
- """Format string using the given fields (dict)"""
- try:
- return string.format(**fields)
- except KeyError as err:
- raise ArchiveError("Unable to expand string '{}': unknown field {} "
- "(valid fields are: {})".format(
- string, err, ', '.join(sorted(fields.keys()))))
-
-
-def init_git_repo(path, no_create, bare):
- """Initialize local Git repository"""
- path = os.path.abspath(path)
- if os.path.isfile(path):
- raise ArchiveError("Invalid Git repo at {}: path exists but is not a "
- "directory".format(path))
- if not os.path.isdir(path) or not os.listdir(path):
- if no_create:
- raise ArchiveError("No git repo at {}, refusing to create "
- "one".format(path))
- if not os.path.isdir(path):
- try:
- os.mkdir(path)
- except (FileNotFoundError, PermissionError) as err:
- raise ArchiveError("Failed to mkdir {}: {}".format(path, err))
- if not os.listdir(path):
- log.info("Initializing a new Git repo at %s", path)
- repo = GitRepo.init(path, bare)
- try:
- repo = GitRepo(path, is_topdir=True)
- except GitError:
- raise ArchiveError("Non-empty directory that is not a Git repository "
- "at {}\nPlease specify an existing Git repository, "
- "an empty directory or a non-existing directory "
- "path.".format(path))
- return repo
-
-
-def git_commit_data(repo, data_dir, branch, message, exclude, notes):
- """Commit data into a Git repository"""
- log.info("Committing data into to branch %s", branch)
- tmp_index = os.path.join(repo.git_dir, 'index.oe-git-archive')
- try:
- # Create new tree object from the data
- env_update = {'GIT_INDEX_FILE': tmp_index,
- 'GIT_WORK_TREE': os.path.abspath(data_dir)}
- repo.run_cmd('add .', env_update)
-
- # Remove files that are excluded
- if exclude:
- repo.run_cmd(['rm', '--cached'] + [f for f in exclude], env_update)
-
- tree = repo.run_cmd('write-tree', env_update)
-
- # Create new commit object from the tree
- parent = repo.rev_parse(branch)
- git_cmd = ['commit-tree', tree, '-m', message]
- if parent:
- git_cmd += ['-p', parent]
- commit = repo.run_cmd(git_cmd, env_update)
-
- # Create git notes
- for ref, filename in notes:
- ref = ref.format(branch_name=branch)
- repo.run_cmd(['notes', '--ref', ref, 'add',
- '-F', os.path.abspath(filename), commit])
-
- # Update branch head
- git_cmd = ['update-ref', 'refs/heads/' + branch, commit]
- if parent:
- git_cmd.append(parent)
- repo.run_cmd(git_cmd)
-
- # Update current HEAD, if we're on branch 'branch'
- if not repo.bare and repo.get_current_branch() == branch:
- log.info("Updating %s HEAD to latest commit", repo.top_dir)
- repo.run_cmd('reset --hard')
-
- return commit
- finally:
- if os.path.exists(tmp_index):
- os.unlink(tmp_index)
-
-
-def expand_tag_strings(repo, name_pattern, msg_subj_pattern, msg_body_pattern,
- keywords):
- """Generate tag name and message, with support for running id number"""
- keyws = keywords.copy()
- # Tag number is handled specially: if not defined, we autoincrement it
- if 'tag_number' not in keyws:
- # Fill in all other fields than 'tag_number'
- keyws['tag_number'] = '{tag_number}'
- tag_re = format_str(name_pattern, keyws)
- # Replace parentheses for proper regex matching
- tag_re = tag_re.replace('(', '\(').replace(')', '\)') + '$'
- # Inject regex group pattern for 'tag_number'
- tag_re = tag_re.format(tag_number='(?P<tag_number>[0-9]{1,5})')
-
- keyws['tag_number'] = 0
- for existing_tag in repo.run_cmd('tag').splitlines():
- match = re.match(tag_re, existing_tag)
-
- if match and int(match.group('tag_number')) >= keyws['tag_number']:
- keyws['tag_number'] = int(match.group('tag_number')) + 1
-
- tag_name = format_str(name_pattern, keyws)
- msg_subj= format_str(msg_subj_pattern.strip(), keyws)
- msg_body = format_str(msg_body_pattern, keyws)
- return tag_name, msg_subj + '\n\n' + msg_body
-
-
def parse_args(argv):
"""Parse command line arguments"""
parser = argparse.ArgumentParser(
@@ -217,17 +90,11 @@ def get_nested(d, list_of_keys):
return ""
def main(argv=None):
- """Script entry point"""
args = parse_args(argv)
if args.debug:
log.setLevel(logging.DEBUG)
try:
- if not os.path.isdir(args.data_dir):
- raise ArchiveError("Not a directory: {}".format(args.data_dir))
-
- data_repo = init_git_repo(args.git_dir, args.no_create, args.bare)
-
# Get keywords to be used in tag and branch names and messages
metadata = metadata_from_bb()
keywords = {'hostname': get_nested(metadata, ['hostname']),
@@ -236,39 +103,12 @@ def main(argv=None):
'commit_count': get_nested(metadata, ['layers', 'meta', 'commit_count']),
'machine': get_nested(metadata, ['config', 'MACHINE'])}
- # Expand strings early in order to avoid getting into inconsistent
- # state (e.g. no tag even if data was committed)
- commit_msg = format_str(args.commit_msg_subject.strip(), keywords)
- commit_msg += '\n\n' + format_str(args.commit_msg_body, keywords)
- branch_name = format_str(args.branch_name, keywords)
- tag_name = None
- if not args.no_tag and args.tag_name:
- tag_name, tag_msg = expand_tag_strings(data_repo, args.tag_name,
- args.tag_msg_subject,
- args.tag_msg_body, keywords)
-
- # Commit data
- commit = git_commit_data(data_repo, args.data_dir, branch_name,
- commit_msg, args.exclude, args.notes)
-
- # Create tag
- if tag_name:
- log.info("Creating tag %s", tag_name)
- data_repo.run_cmd(['tag', '-a', '-m', tag_msg, tag_name, commit])
-
- # Push data to remote
- if args.push:
- cmd = ['push', '--tags']
- # If no remote is given we push with the default settings from
- # gitconfig
- if args.push is not True:
- notes_refs = ['refs/notes/' + ref.format(branch_name=branch_name)
- for ref, _ in args.notes]
- cmd.extend([args.push, branch_name] + notes_refs)
- log.info("Pushing data to remote")
- data_repo.run_cmd(cmd)
+ gitarchive.gitarchive(args.data_dir, args.git_dir, args.no_create, args.bare,
+ args.commit_msg_subject.strip(), args.commit_msg_body, args.branch_name,
+ args.no_tag, args.tag_name, args.tag_msg_subject, args.tag_msg_body,
+ args.exclude, args.notes, args.push, keywords, log)
- except ArchiveError as err:
+ except gitarchive.ArchiveError as err:
log.error(str(err))
return 1
diff --git a/scripts/oe-git-proxy b/scripts/oe-git-proxy
index 1800942f36..aa9b9dc9a9 100755
--- a/scripts/oe-git-proxy
+++ b/scripts/oe-git-proxy
@@ -13,11 +13,15 @@
# ALL_PROXY=https://proxy.example.com:8080
#
# Copyright (c) 2013, Intel Corporation.
-# All rights reserved.
+#
+# SPDX-License-Identifier: GPL-2.0-only
#
# AUTHORS
# Darren Hart <dvhart@linux.intel.com>
+# disable pathname expansion, NO_PROXY fields could start with "*" or be it
+set -f
+
if [ $# -lt 2 -o "$1" = '--help' -o "$1" = '-h' ] ; then
echo 'oe-git-proxy: error: the following arguments are required: host port'
echo 'Usage: oe-git-proxy host port'
@@ -40,10 +44,12 @@ if [ $# -lt 2 -o "$1" = '--help' -o "$1" = '-h' ] ; then
fi
# Locate the netcat binary
-SOCAT=$(which socat 2>/dev/null)
-if [ $? -ne 0 ]; then
- echo "ERROR: socat binary not in PATH" 1>&2
- exit 1
+if [ -z "$SOCAT" ]; then
+ SOCAT=$(which socat 2>/dev/null)
+ if [ $? -ne 0 ]; then
+ echo "ERROR: socat binary not in PATH" 1>&2
+ exit 1
+ fi
fi
METHOD=""
@@ -58,7 +64,7 @@ ipv4_val() {
IP="$1"
SHIFT=24
VAL=0
- for B in ${IP//./ }; do
+ for B in $( echo "$IP" | tr '.' ' ' ); do
VAL=$(($VAL+$(($B<<$SHIFT))))
SHIFT=$(($SHIFT-8))
done
@@ -101,7 +107,7 @@ match_host() {
HOST=$1
GLOB=$2
- if [ -z "${HOST%%$GLOB}" ]; then
+ if [ -z "${HOST%%*$GLOB}" ]; then
return 0
fi
@@ -131,8 +137,8 @@ if [ -z "$ALL_PROXY" ]; then
fi
# Connect directly to hosts in NO_PROXY
-for H in "${NO_PROXY//,/ }"; do
- if match_host $1 "$H"; then
+for H in $( echo "$NO_PROXY" | tr ',' ' ' ); do
+ if match_host $1 $H; then
exec $SOCAT STDIO $METHOD
fi
done
diff --git a/scripts/oe-gnome-terminal-phonehome b/scripts/oe-gnome-terminal-phonehome
index e02354883a..b6b9a3867b 100755
--- a/scripts/oe-gnome-terminal-phonehome
+++ b/scripts/oe-gnome-terminal-phonehome
@@ -1,5 +1,7 @@
#!/bin/sh
#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Gnome terminal won't tell us which PID a given command is run as
# or allow a single instance so we can't tell when it completes.
# This allows us to figure out the PID of the target so we can tell
diff --git a/scripts/oe-pkgdata-browser b/scripts/oe-pkgdata-browser
new file mode 100755
index 0000000000..a3a381923b
--- /dev/null
+++ b/scripts/oe-pkgdata-browser
@@ -0,0 +1,255 @@
+#! /usr/bin/env python3
+
+import os, sys, enum, ast
+
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+lib_path = scripts_path + '/lib'
+sys.path = sys.path + [lib_path]
+
+import scriptpath
+bitbakepath = scriptpath.add_bitbake_lib_path()
+if not bitbakepath:
+ print("Unable to find bitbake by searching parent directory of this script or PATH")
+ sys.exit(1)
+import bb
+
+import gi
+gi.require_version('Gtk', '3.0')
+from gi.repository import Gtk, Gdk, GObject
+
+RecipeColumns = enum.IntEnum("RecipeColumns", {"Recipe": 0})
+PackageColumns = enum.IntEnum("PackageColumns", {"Package": 0, "Size": 1})
+FileColumns = enum.IntEnum("FileColumns", {"Filename": 0, "Size": 1})
+
+import time
+def timeit(f):
+ def timed(*args, **kw):
+ ts = time.time()
+ print ("func:%r calling" % f.__name__)
+ result = f(*args, **kw)
+ te = time.time()
+ print ('func:%r args:[%r, %r] took: %2.4f sec' % \
+ (f.__name__, args, kw, te-ts))
+ return result
+ return timed
+
+def human_size(nbytes):
+ import math
+ suffixes = ['B', 'kB', 'MB', 'GB', 'TB', 'PB']
+ human = nbytes
+ rank = 0
+ if nbytes != 0:
+ rank = int((math.log10(nbytes)) / 3)
+ rank = min(rank, len(suffixes) - 1)
+ human = nbytes / (1000.0 ** rank)
+ f = ('%.2f' % human).rstrip('0').rstrip('.')
+ return '%s %s' % (f, suffixes[rank])
+
+def load(filename, suffix=None):
+ from configparser import ConfigParser
+ from itertools import chain
+
+ parser = ConfigParser(delimiters=('='))
+ if suffix:
+ parser.optionxform = lambda option: option.replace(":" + suffix, "")
+ with open(filename) as lines:
+ lines = chain(("[fake]",), (line.replace(": ", " = ", 1) for line in lines))
+ parser.read_file(lines)
+
+ # TODO extract the data and put it into a real dict so we can transform some
+ # values to ints?
+ return parser["fake"]
+
+def find_pkgdata():
+ import subprocess
+ output = subprocess.check_output(("bitbake", "-e"), universal_newlines=True)
+ for line in output.splitlines():
+ if line.startswith("PKGDATA_DIR="):
+ return line.split("=", 1)[1].strip("\'\"")
+ # TODO exception or something
+ return None
+
+def packages_in_recipe(pkgdata, recipe):
+ """
+ Load the recipe pkgdata to determine the list of runtime packages.
+ """
+ data = load(os.path.join(pkgdata, recipe))
+ packages = data["PACKAGES"].split()
+ return packages
+
+def load_runtime_package(pkgdata, package):
+ return load(os.path.join(pkgdata, "runtime", package), suffix=package)
+
+def recipe_from_package(pkgdata, package):
+ data = load(os.path.join(pkgdata, "runtime", package), suffix=package)
+ return data["PN"]
+
+def summary(data):
+ s = ""
+ s += "{0[PKG]} {0[PKGV]}-{0[PKGR]}\n{0[LICENSE]}\n{0[SUMMARY]}\n".format(data)
+
+ return s
+
+
+class PkgUi():
+ def __init__(self, pkgdata):
+ self.pkgdata = pkgdata
+ self.current_recipe = None
+ self.recipe_iters = {}
+ self.package_iters = {}
+
+ builder = Gtk.Builder()
+ builder.add_from_file(os.path.join(os.path.dirname(__file__), "oe-pkgdata-browser.glade"))
+
+ self.window = builder.get_object("window")
+ self.window.connect("delete-event", Gtk.main_quit)
+
+ self.recipe_store = builder.get_object("recipe_store")
+ self.recipe_view = builder.get_object("recipe_view")
+ self.package_store = builder.get_object("package_store")
+ self.package_view = builder.get_object("package_view")
+
+ # Somehow resizable does not get set via builder xml
+ package_name_column = builder.get_object("package_name_column")
+ package_name_column.set_resizable(True)
+ file_name_column = builder.get_object("file_name_column")
+ file_name_column.set_resizable(True)
+
+ self.recipe_view.get_selection().connect("changed", self.on_recipe_changed)
+ self.package_view.get_selection().connect("changed", self.on_package_changed)
+
+ self.package_store.set_sort_column_id(PackageColumns.Package, Gtk.SortType.ASCENDING)
+ builder.get_object("package_size_column").set_cell_data_func(builder.get_object("package_size_cell"), lambda column, cell, model, iter, data: cell.set_property("text", human_size(model[iter][PackageColumns.Size])))
+
+ self.label = builder.get_object("label1")
+ self.depends_label = builder.get_object("depends_label")
+ self.recommends_label = builder.get_object("recommends_label")
+ self.suggests_label = builder.get_object("suggests_label")
+ self.provides_label = builder.get_object("provides_label")
+
+ self.depends_label.connect("activate-link", self.on_link_activate)
+ self.recommends_label.connect("activate-link", self.on_link_activate)
+ self.suggests_label.connect("activate-link", self.on_link_activate)
+
+ self.file_store = builder.get_object("file_store")
+ self.file_store.set_sort_column_id(FileColumns.Filename, Gtk.SortType.ASCENDING)
+ builder.get_object("file_size_column").set_cell_data_func(builder.get_object("file_size_cell"), lambda column, cell, model, iter, data: cell.set_property("text", human_size(model[iter][FileColumns.Size])))
+
+ self.files_view = builder.get_object("files_scrollview")
+ self.files_label = builder.get_object("files_label")
+
+ self.load_recipes()
+
+ self.recipe_view.set_cursor(Gtk.TreePath.new_first())
+
+ self.window.show()
+
+ def on_link_activate(self, label, url_string):
+ from urllib.parse import urlparse
+ url = urlparse(url_string)
+ if url.scheme == "package":
+ package = url.path
+ recipe = recipe_from_package(self.pkgdata, package)
+
+ it = self.recipe_iters[recipe]
+ path = self.recipe_store.get_path(it)
+ self.recipe_view.set_cursor(path)
+ self.recipe_view.scroll_to_cell(path)
+
+ self.on_recipe_changed(self.recipe_view.get_selection())
+
+ it = self.package_iters[package]
+ path = self.package_store.get_path(it)
+ self.package_view.set_cursor(path)
+ self.package_view.scroll_to_cell(path)
+
+ return True
+ else:
+ return False
+
+ def on_recipe_changed(self, selection):
+ self.package_store.clear()
+ self.package_iters = {}
+
+ (model, it) = selection.get_selected()
+ if not it:
+ return
+
+ recipe = model[it][RecipeColumns.Recipe]
+ packages = packages_in_recipe(self.pkgdata, recipe)
+ for package in packages:
+ # TODO also show PKG after debian-renaming?
+ data = load_runtime_package(self.pkgdata, package)
+ # TODO stash data to avoid reading in on_package_changed
+ self.package_iters[package] = self.package_store.append([package, int(data["PKGSIZE"])])
+
+ package = recipe if recipe in packages else sorted(packages)[0]
+ path = self.package_store.get_path(self.package_iters[package])
+ self.package_view.set_cursor(path)
+ self.package_view.scroll_to_cell(path)
+
+ def on_package_changed(self, selection):
+ self.label.set_text("")
+ self.file_store.clear()
+ self.depends_label.hide()
+ self.recommends_label.hide()
+ self.suggests_label.hide()
+ self.provides_label.hide()
+ self.files_view.hide()
+ self.files_label.hide()
+
+ (model, it) = selection.get_selected()
+ if it is None:
+ return
+
+ package = model[it][PackageColumns.Package]
+ data = load_runtime_package(self.pkgdata, package)
+
+ self.label.set_text(summary(data))
+
+ files = ast.literal_eval(data["FILES_INFO"])
+ if files:
+ self.files_label.set_text("{0} files take {1}.".format(len(files), human_size(int(data["PKGSIZE"]))))
+ self.files_view.show()
+ for filename, size in files.items():
+ self.file_store.append([filename, size])
+ else:
+ self.files_view.hide()
+ self.files_label.set_text("This package has no files.")
+ self.files_label.show()
+
+ def update_deps(field, prefix, label, clickable=True):
+ if field in data:
+ l = []
+ for name, version in bb.utils.explode_dep_versions2(data[field]).items():
+ if clickable:
+ l.append("<a href='package:{0}'>{0}</a> {1}".format(name, " ".join(version)).strip())
+ else:
+ l.append("{0} {1}".format(name, " ".join(version)).strip())
+ label.set_markup(prefix + ", ".join(l))
+ label.show()
+ else:
+ label.hide()
+ update_deps("RDEPENDS", "Depends: ", self.depends_label)
+ update_deps("RRECOMMENDS", "Recommends: ", self.recommends_label)
+ update_deps("RSUGGESTS", "Suggests: ", self.suggests_label)
+ update_deps("RPROVIDES", "Provides: ", self.provides_label, clickable=False)
+
+ def load_recipes(self):
+ if not os.path.exists(pkgdata):
+ sys.exit("Error: Please ensure %s exists by generating packages before using this tool." % pkgdata)
+ for recipe in sorted(os.listdir(pkgdata)):
+ if os.path.isfile(os.path.join(pkgdata, recipe)):
+ self.recipe_iters[recipe] = self.recipe_store.append([recipe])
+
+if __name__ == "__main__":
+ import argparse
+
+ parser = argparse.ArgumentParser(description='pkgdata browser')
+ parser.add_argument('-p', '--pkgdata', help="Optional location of pkgdata")
+
+ args = parser.parse_args()
+ pkgdata = args.pkgdata if args.pkgdata else find_pkgdata()
+ # TODO assert pkgdata is a directory
+ window = PkgUi(pkgdata)
+ Gtk.main()
diff --git a/scripts/oe-pkgdata-browser.glade b/scripts/oe-pkgdata-browser.glade
new file mode 100644
index 0000000000..0d06c825ff
--- /dev/null
+++ b/scripts/oe-pkgdata-browser.glade
@@ -0,0 +1,337 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Generated with glade 3.18.3 -->
+<interface>
+ <requires lib="gtk+" version="3.12"/>
+ <object class="GtkListStore" id="file_store">
+ <columns>
+ <!-- column-name Filename -->
+ <column type="gchararray"/>
+ <!-- column-name Size -->
+ <column type="glong"/>
+ </columns>
+ </object>
+ <object class="GtkListStore" id="package_store">
+ <columns>
+ <!-- column-name Package -->
+ <column type="gchararray"/>
+ <!-- column-name Size -->
+ <column type="glong"/>
+ </columns>
+ </object>
+ <object class="GtkListStore" id="pkgdata_store">
+ <columns>
+ <!-- column-name Name -->
+ <column type="gchararray"/>
+ <!-- column-name Path -->
+ <column type="gchararray"/>
+ </columns>
+ </object>
+ <object class="GtkListStore" id="recipe_store">
+ <columns>
+ <!-- column-name Recipe -->
+ <column type="gchararray"/>
+ </columns>
+ </object>
+ <object class="GtkWindow" id="window">
+ <property name="can_focus">False</property>
+ <property name="title" translatable="yes">Package Data Browser</property>
+ <property name="default_width">1200</property>
+ <property name="default_height">900</property>
+ <property name="icon_name">accessories-dictionary</property>
+ <property name="has_resize_grip">True</property>
+ <child>
+ <object class="GtkBox" id="box1">
+ <property name="visible">True</property>
+ <property name="can_focus">False</property>
+ <property name="margin_left">4</property>
+ <property name="margin_right">4</property>
+ <property name="margin_top">4</property>
+ <property name="margin_bottom">4</property>
+ <property name="orientation">vertical</property>
+ <property name="spacing">4</property>
+ <child>
+ <object class="GtkComboBox" id="pkgdata_combo">
+ <property name="can_focus">False</property>
+ <property name="model">pkgdata_store</property>
+ <property name="id_column">1</property>
+ <child>
+ <object class="GtkCellRendererText" id="cellrenderertext5"/>
+ <attributes>
+ <attribute name="text">0</attribute>
+ </attributes>
+ </child>
+ </object>
+ <packing>
+ <property name="expand">False</property>
+ <property name="fill">True</property>
+ <property name="position">0</property>
+ </packing>
+ </child>
+ <child>
+ <object class="GtkPaned" id="paned1">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="position">400</property>
+ <property name="position_set">True</property>
+ <child>
+ <object class="GtkScrolledWindow" id="scrolledwindow1">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="shadow_type">in</property>
+ <property name="min_content_width">100</property>
+ <child>
+ <object class="GtkTreeView" id="recipe_view">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="model">recipe_store</property>
+ <property name="search_column">0</property>
+ <property name="fixed_height_mode">True</property>
+ <property name="show_expanders">False</property>
+ <child internal-child="selection">
+ <object class="GtkTreeSelection" id="treeview-selection1"/>
+ </child>
+ <child>
+ <object class="GtkTreeViewColumn" id="treeviewcolumn1">
+ <property name="sizing">fixed</property>
+ <property name="title" translatable="yes">Recipe</property>
+ <child>
+ <object class="GtkCellRendererText" id="cellrenderertext1"/>
+ <attributes>
+ <attribute name="text">0</attribute>
+ </attributes>
+ </child>
+ </object>
+ </child>
+ </object>
+ </child>
+ </object>
+ <packing>
+ <property name="resize">False</property>
+ <property name="shrink">True</property>
+ </packing>
+ </child>
+ <child>
+ <object class="GtkPaned" id="paned2">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="position">400</property>
+ <property name="position_set">True</property>
+ <child>
+ <object class="GtkScrolledWindow" id="scrolledwindow2">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="shadow_type">in</property>
+ <property name="min_content_width">100</property>
+ <child>
+ <object class="GtkTreeView" id="package_view">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="model">package_store</property>
+ <property name="search_column">0</property>
+ <property name="show_expanders">False</property>
+ <child internal-child="selection">
+ <object class="GtkTreeSelection" id="treeview-selection2"/>
+ </child>
+ <child>
+ <object class="GtkTreeViewColumn" id="package_name_column">
+ <property name="resizable">True</property>
+ <property name="sizing">autosize</property>
+ <property name="title" translatable="yes">Package</property>
+ <property name="sort_column_id">0</property>
+ <child>
+ <object class="GtkCellRendererText" id="cellrenderertext2"/>
+ <attributes>
+ <attribute name="text">0</attribute>
+ </attributes>
+ </child>
+ </object>
+ </child>
+ <child>
+ <object class="GtkTreeViewColumn" id="package_size_column">
+ <property name="resizable">True</property>
+ <property name="sizing">autosize</property>
+ <property name="title" translatable="yes">Size</property>
+ <property name="sort_column_id">1</property>
+ <child>
+ <object class="GtkCellRendererText" id="package_size_cell"/>
+ </child>
+ </object>
+ </child>
+ </object>
+ </child>
+ </object>
+ <packing>
+ <property name="resize">False</property>
+ <property name="shrink">True</property>
+ </packing>
+ </child>
+ <child>
+ <object class="GtkBox" id="box2">
+ <property name="visible">True</property>
+ <property name="can_focus">False</property>
+ <property name="margin_left">4</property>
+ <property name="orientation">vertical</property>
+ <property name="spacing">4</property>
+ <child>
+ <object class="GtkLabel" id="label1">
+ <property name="visible">True</property>
+ <property name="can_focus">False</property>
+ <property name="xalign">0</property>
+ <property name="label" translatable="yes">label</property>
+ </object>
+ <packing>
+ <property name="expand">False</property>
+ <property name="fill">True</property>
+ <property name="position">0</property>
+ </packing>
+ </child>
+ <child>
+ <object class="GtkLabel" id="depends_label">
+ <property name="visible">True</property>
+ <property name="can_focus">False</property>
+ <property name="xalign">0</property>
+ <property name="label" translatable="yes">depends_label</property>
+ <property name="wrap">True</property>
+ <property name="track_visited_links">False</property>
+ </object>
+ <packing>
+ <property name="expand">False</property>
+ <property name="fill">True</property>
+ <property name="position">1</property>
+ </packing>
+ </child>
+ <child>
+ <object class="GtkLabel" id="recommends_label">
+ <property name="visible">True</property>
+ <property name="can_focus">False</property>
+ <property name="xalign">0</property>
+ <property name="label" translatable="yes">recs_label</property>
+ <property name="wrap">True</property>
+ <property name="track_visited_links">False</property>
+ </object>
+ <packing>
+ <property name="expand">False</property>
+ <property name="fill">True</property>
+ <property name="position">2</property>
+ </packing>
+ </child>
+ <child>
+ <object class="GtkLabel" id="suggests_label">
+ <property name="visible">True</property>
+ <property name="can_focus">False</property>
+ <property name="xalign">0</property>
+ <property name="label" translatable="yes">suggests_label</property>
+ <property name="wrap">True</property>
+ <property name="track_visited_links">False</property>
+ </object>
+ <packing>
+ <property name="expand">False</property>
+ <property name="fill">True</property>
+ <property name="position">3</property>
+ </packing>
+ </child>
+ <child>
+ <object class="GtkLabel" id="provides_label">
+ <property name="visible">True</property>
+ <property name="can_focus">False</property>
+ <property name="xalign">0</property>
+ <property name="label" translatable="yes">provides_label</property>
+ <property name="wrap">True</property>
+ <property name="track_visited_links">False</property>
+ </object>
+ <packing>
+ <property name="expand">False</property>
+ <property name="fill">True</property>
+ <property name="position">4</property>
+ </packing>
+ </child>
+ <child>
+ <object class="GtkLabel" id="files_label">
+ <property name="visible">True</property>
+ <property name="can_focus">False</property>
+ <property name="xalign">0</property>
+ <property name="label" translatable="yes">files_label</property>
+ <property name="ellipsize">end</property>
+ </object>
+ <packing>
+ <property name="expand">False</property>
+ <property name="fill">True</property>
+ <property name="position">5</property>
+ </packing>
+ </child>
+ <child>
+ <object class="GtkScrolledWindow" id="files_scrollview">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="shadow_type">in</property>
+ <child>
+ <object class="GtkTreeView" id="files_view">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="model">file_store</property>
+ <property name="rules_hint">True</property>
+ <property name="search_column">0</property>
+ <property name="show_expanders">False</property>
+ <child internal-child="selection">
+ <object class="GtkTreeSelection" id="treeview-selection3"/>
+ </child>
+ <child>
+ <object class="GtkTreeViewColumn" id="file_name_column">
+ <property name="title" translatable="yes">Name</property>
+ <property name="sort_indicator">True</property>
+ <property name="sort_column_id">0</property>
+ <child>
+ <object class="GtkCellRendererText" id="cellrenderertext3">
+ <property name="background_rgba">rgba(0,0,0,0)</property>
+ </object>
+ <attributes>
+ <attribute name="text">0</attribute>
+ </attributes>
+ </child>
+ </object>
+ </child>
+ <child>
+ <object class="GtkTreeViewColumn" id="file_size_column">
+ <property name="title" translatable="yes">Size</property>
+ <property name="sort_indicator">True</property>
+ <property name="sort_column_id">1</property>
+ <child>
+ <object class="GtkCellRendererText" id="file_size_cell"/>
+ <attributes>
+ <attribute name="text">1</attribute>
+ </attributes>
+ </child>
+ </object>
+ </child>
+ </object>
+ </child>
+ </object>
+ <packing>
+ <property name="expand">True</property>
+ <property name="fill">True</property>
+ <property name="position">6</property>
+ </packing>
+ </child>
+ </object>
+ <packing>
+ <property name="resize">True</property>
+ <property name="shrink">True</property>
+ </packing>
+ </child>
+ </object>
+ <packing>
+ <property name="resize">True</property>
+ <property name="shrink">True</property>
+ </packing>
+ </child>
+ </object>
+ <packing>
+ <property name="expand">True</property>
+ <property name="fill">True</property>
+ <property name="position">1</property>
+ </packing>
+ </child>
+ </object>
+ </child>
+ </object>
+</interface>
diff --git a/scripts/oe-pkgdata-util b/scripts/oe-pkgdata-util
index 53739b0bfc..7412cc1f47 100755
--- a/scripts/oe-pkgdata-util
+++ b/scripts/oe-pkgdata-util
@@ -6,18 +6,7 @@
#
# Copyright 2012-2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
import sys
@@ -107,7 +96,7 @@ def glob(args):
pn = os.path.basename(pkgdata_file)
with open(pkgdata_file, 'r') as f:
for line in f:
- if line.startswith("PKG_%s:" % pn):
+ if line.startswith("PKG:%s:" % pn):
renamed = line.split(': ')[1].rstrip()
return renamed
@@ -182,7 +171,7 @@ def read_value(args):
val = line.split(': ', 1)[1].rstrip()
return val
- logger.debug("read-value('%s', '%s' '%s')" % (args.pkgdata_dir, args.valuename, packages))
+ logger.debug("read-value('%s', '%s' '%s')" % (args.pkgdata_dir, args.valuenames, packages))
for package in packages:
pkg_split = package.split('_')
pkg_name = pkg_split[0]
@@ -191,20 +180,29 @@ def read_value(args):
logger.debug(revlink)
if os.path.exists(revlink):
mappedpkg = os.path.basename(os.readlink(revlink))
- qvar = args.valuename
- value = readvar(revlink, qvar, mappedpkg)
- if qvar == "PKGSIZE":
- # PKGSIZE is now in bytes, but we we want it in KB
- pkgsize = (int(value) + 1024 // 2) // 1024
- value = "%d" % pkgsize
- if args.unescape:
- import codecs
- # escape_decode() unescapes backslash encodings in byte streams
- value = codecs.escape_decode(bytes(value, "utf-8"))[0].decode("utf-8")
+ qvars = args.valuenames
+ val_names = qvars.split(',')
+ values = []
+ for qvar in val_names:
+ if qvar == "PACKAGE":
+ value = mappedpkg
+ else:
+ value = readvar(revlink, qvar, mappedpkg)
+ if qvar == "PKGSIZE":
+ # PKGSIZE is now in bytes, but we we want it in KB
+ pkgsize = (int(value) + 1024 // 2) // 1024
+ value = "%d" % pkgsize
+ if args.unescape:
+ import codecs
+ # escape_decode() unescapes backslash encodings in byte streams
+ value = codecs.escape_decode(bytes(value, "utf-8"))[0].decode("utf-8")
+ values.append(value)
+
+ values_str = ' '.join(values)
if args.prefix_name:
- print('%s %s' % (pkg_name, value))
+ print('%s %s' % (pkg_name, values_str))
else:
- print(value)
+ print(values_str)
else:
logger.debug("revlink %s does not exist", revlink)
@@ -224,7 +222,7 @@ def lookup_pkglist(pkgs, pkgdata_dir, reverse):
with open(pkgfile, 'r') as f:
for line in f:
fields = line.rstrip().split(': ')
- if fields[0] == 'PKG_%s' % pkg:
+ if fields[0] == 'PKG:%s' % pkg:
mappings[pkg].append(fields[1])
break
return mappings
@@ -298,7 +296,7 @@ def package_info(args):
extra = ''
for line in f:
for var in vars:
- m = re.match(var + '(?:_\S+)?:\s*(.+?)\s*$', line)
+ m = re.match(var + '(?::\S+)?:\s*(.+?)\s*$', line)
if m:
vals[var] = m.group(1)
pkg_version = vals['PKGV'] or ''
@@ -400,21 +398,16 @@ def list_pkgs(args):
return False
return True
+ pkglist = []
if args.recipe:
packages = get_recipe_pkgs(args.pkgdata_dir, args.recipe, args.unpackaged)
if args.runtime:
- pkglist = []
runtime_pkgs = lookup_pkglist(packages, args.pkgdata_dir, False)
for rtpkgs in runtime_pkgs.values():
pkglist.extend(rtpkgs)
else:
pkglist = packages
-
- for pkg in pkglist:
- if matchpkg(pkg):
- found = True
- print("%s" % pkg)
else:
if args.runtime:
searchdir = 'runtime-reverse'
@@ -425,9 +418,13 @@ def list_pkgs(args):
for fn in files:
if fn.endswith('.packaged'):
continue
- if matchpkg(fn):
- found = True
- print("%s" % fn)
+ pkglist.append(fn)
+
+ for pkg in sorted(pkglist):
+ if matchpkg(pkg):
+ found = True
+ print("%s" % pkg)
+
if not found:
if args.pkgspec:
logger.error("Unable to find any package matching %s" % args.pkgspec)
@@ -443,7 +440,7 @@ def list_pkg_files(args):
for line in f:
if line.startswith('FILES_INFO:'):
found = True
- val = line.split(':', 1)[1].strip()
+ val = line.split(': ', 1)[1].strip()
dictval = json.loads(val)
if long:
width = max(map(len, dictval), default=0)
@@ -512,7 +509,7 @@ def find_path(args):
with open(os.path.join(root,fn)) as f:
for line in f:
if line.startswith('FILES_INFO:'):
- val = line.split(':', 1)[1].strip()
+ val = line.split(': ', 1)[1].strip()
dictval = json.loads(val)
for fullpth in dictval.keys():
if fnmatch.fnmatchcase(fullpth, args.targetpath):
@@ -582,7 +579,7 @@ def main():
parser_read_value = subparsers.add_parser('read-value',
help='Read any pkgdata value for one or more packages',
description='Reads the named value from the pkgdata files for the specified packages')
- parser_read_value.add_argument('valuename', help='Name of the value to look up')
+ parser_read_value.add_argument('valuenames', help='Name of the value/s to look up (separated by commas, no spaces)')
parser_read_value.add_argument('pkg', nargs='*', help='Runtime package name to look up')
parser_read_value.add_argument('-f', '--file', help='Read package names from the specified file (one per line, first field only)')
parser_read_value.add_argument('-n', '--prefix-name', help='Prefix output with package name', action='store_true')
@@ -610,6 +607,9 @@ def main():
logger.error("Unable to find bitbake by searching parent directory of this script or PATH")
sys.exit(1)
logger.debug('Found bitbake path: %s' % bitbakepath)
+ if not os.environ.get('BUILDDIR', ''):
+ logger.error("This script can only be run after initialising the build environment (e.g. by using oe-init-build-env)")
+ sys.exit(1)
tinfoil = tinfoil_init()
try:
args.pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR')
diff --git a/scripts/oe-publish-sdk b/scripts/oe-publish-sdk
index ee33acf902..b8a652e47f 100755
--- a/scripts/oe-publish-sdk
+++ b/scripts/oe-publish-sdk
@@ -1,21 +1,11 @@
#!/usr/bin/env python3
-
+#
# OpenEmbedded SDK publishing tool
-
-# Copyright (C) 2015-2016 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# Copyright (C) 2015-2016 Intel Corporation
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os
@@ -104,7 +94,10 @@ def publish(args):
logger.error('Failed to unpack %s to %s' % (dest_sdk, destination))
return ret
else:
- cmd = "ssh %s 'sh %s -p -y -d %s && rm -f %s'" % (host, dest_sdk, destdir, dest_sdk)
+ rm_or_not = " && rm -f %s" % dest_sdk
+ if args.keep_orig:
+ rm_or_not = ""
+ cmd = "ssh %s 'sh %s -p -y -d %s%s'" % (host, dest_sdk, destdir, rm_or_not)
ret = subprocess.call(cmd, shell=True)
if ret == 0:
logger.info('Successfully unpacked %s to %s' % (dest_sdk, destdir))
@@ -114,9 +107,9 @@ def publish(args):
# Setting up the git repo
if not is_remote:
- cmd = 'set -e; mkdir -p %s/layers; cd %s/layers; if [ ! -e .git ]; then git init .; cp .git/hooks/post-update.sample .git/hooks/post-commit; echo "*.pyc\n*.pyo\npyshtables.py" > .gitignore; fi; git add -A .; git config user.email "oe@oe.oe" && git config user.name "OE" && git commit -q -m "init repo" || true' % (destination, destination)
+ cmd = 'set -e; mkdir -p %s/layers; cd %s/layers; if [ ! -e .git ]; then git init .; cp .git/hooks/post-update.sample .git/hooks/post-commit; echo "*.pyc\n*.pyo\npyshtables.py" > .gitignore; fi; git config gc.auto 0; git add -A .; git config user.email "oe@oe.oe" && git config user.name "OE" && git commit -q -m "init repo" || true' % (destination, destination)
else:
- cmd = "ssh %s 'set -e; mkdir -p %s/layers; cd %s/layers; if [ ! -e .git ]; then git init .; cp .git/hooks/post-update.sample .git/hooks/post-commit; echo '*.pyc\n*.pyo\npyshtables.py' > .gitignore; fi; git add -A .; git config user.email 'oe@oe.oe' && git config user.name 'OE' && git commit -q -m \"init repo\" || true'" % (host, destdir, destdir)
+ cmd = "ssh %s 'set -e; mkdir -p %s/layers; cd %s/layers; if [ ! -e .git ]; then git init .; cp .git/hooks/post-update.sample .git/hooks/post-commit; echo '*.pyc' > .gitignore; echo '*.pyo' >> .gitignore; echo 'pyshtables.py' >> .gitignore; fi; git config gc.auto 0; git add -A .; git config user.email 'oe@oe.oe' && git config user.name 'OE' && git commit -q -m \"init repo\" || true'" % (host, destdir, destdir)
ret = subprocess.call(cmd, shell=True)
if ret == 0:
logger.info('SDK published successfully')
@@ -129,6 +122,7 @@ def main():
parser = argparse_oe.ArgumentParser(description="OpenEmbedded extensible SDK publishing tool - writes server-side data to support the extensible SDK update process to a specified location")
parser.add_argument('-d', '--debug', help='Enable debug output', action='store_true')
parser.add_argument('-q', '--quiet', help='Print only errors', action='store_true')
+ parser.add_argument('-k', '--keep-orig', help='When published to a remote host, the eSDK installer gets deleted by default.', action='store_true')
parser.add_argument('sdk', help='Extensible SDK to publish (path to .sh installer file)')
parser.add_argument('dest', help='Destination to publish SDK to; can be local path or remote in the form of user@host:/path (in the latter case ssh/scp will be used).')
diff --git a/scripts/oe-pylint b/scripts/oe-pylint
new file mode 100755
index 0000000000..7cc1ccb010
--- /dev/null
+++ b/scripts/oe-pylint
@@ -0,0 +1,13 @@
+#!/bin/bash
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Run the pylint3 against our common python module spaces and print a report of potential issues
+#
+this_dir=$(dirname $(readlink -f $0))
+ERRORS="-E"
+IGNORELIST="$ERRORS -d logging-too-many-args -d missing-docstring -d line-too-long -d invalid-name"
+PYTHONPATH=$this_dir/../bitbake/lib/ pylint3 $IGNORELIST bb
+PYTHONPATH=$this_dir/../bitbake/lib/:$this_dir/../meta/lib pylint3 $IGNORELIST -d undefined-variable oe
+PYTHONPATH=$this_dir/../bitbake/lib/:$this_dir/../meta/lib pylint3 $IGNORELIST oeqa
+PYTHONPATH=$this_dir/../bitbake/lib/:$this_dir/../meta/lib:$this_dir/lib pylint3 $IGNORELIST -d undefined-variable argparse_oe buildstats devtool recipetool scriptpath testcasemgmt build_perf checklayer resulttool scriptutils wic \ No newline at end of file
diff --git a/scripts/oe-run-native b/scripts/oe-run-native
index a29e99438a..22958d97e7 100755
--- a/scripts/oe-run-native
+++ b/scripts/oe-run-native
@@ -1,20 +1,8 @@
#!/bin/bash
#
# Copyright (c) 2016, Intel Corporation.
-# All Rights Reserved
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
-# the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, see <http://www.gnu.org/licenses/>
+# SPDX-License-Identifier: GPL-2.0-or-later
#
#
@@ -28,7 +16,7 @@ if [ $# -lt 1 -o "$1" = '--help' -o "$1" = '-h' ] ; then
echo 'OpenEmbedded run-native - runs native tools'
echo ''
echo 'arguments:'
- echo ' native-recipe The recipe which provoides tool'
+ echo ' native-recipe The recipe which provides tool'
echo ' tool Native tool to run'
echo ''
exit 2
@@ -55,7 +43,7 @@ fi
OLD_PATH=$PATH
# look for a tool only in native sysroot
-PATH=$OECORE_NATIVE_SYSROOT/usr/bin:$OECORE_NATIVE_SYSROOT/bin:$OECORE_NATIVE_SYSROOT/usr/sbin:$OECORE_NATIVE_SYSROOT/sbin$(find $OECORE_NATIVE_SYSROOT/usr/bin/*-native -maxdepth 1 -type d -printf ":%p")
+PATH=$OECORE_NATIVE_SYSROOT/usr/bin:$OECORE_NATIVE_SYSROOT/bin:$OECORE_NATIVE_SYSROOT/usr/sbin:$OECORE_NATIVE_SYSROOT/sbin$(find $OECORE_NATIVE_SYSROOT/usr/bin -maxdepth 1 -name "*-native" -type d -printf ":%p")
tool_find=`/usr/bin/which $tool 2>/dev/null`
if [ -n "$tool_find" ] ; then
diff --git a/scripts/oe-selftest b/scripts/oe-selftest
index 1bf860a415..18ac0f5869 100755
--- a/scripts/oe-selftest
+++ b/scripts/oe-selftest
@@ -2,18 +2,8 @@
# Copyright (c) 2013-2017 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# DESCRIPTION
# This script runs tests defined in meta/lib/oeqa/selftest/
@@ -43,7 +33,7 @@ scriptpath.add_bitbake_lib_path()
from oeqa.utils import load_test_components
from oeqa.core.exception import OEQAPreRun
-logger = scriptutils.logger_create('oe-selftest', stream=sys.stdout)
+logger = scriptutils.logger_create('oe-selftest', stream=sys.stdout, keepalive=True)
def main():
description = "Script that runs unit tests against bitbake and other Yocto related tools. The goal is to validate tools functionality and metadata integrity. Refer to https://wiki.yoctoproject.org/wiki/Oe-selftest for more information."
diff --git a/scripts/oe-setup-builddir b/scripts/oe-setup-builddir
index 55d73ca1e4..d4ac074ad9 100755
--- a/scripts/oe-setup-builddir
+++ b/scripts/oe-setup-builddir
@@ -4,19 +4,8 @@
#
# Copyright (C) 2006-2011 Linux Foundation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
+# SPDX-License-Identifier: GPL-2.0-or-later
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
if [ -z "$BUILDDIR" ]; then
echo >&2 "Error: The build directory (BUILDDIR) must be set!"
@@ -105,9 +94,9 @@ into your configuration please add entries to conf/bblayers.conf.
EOM
- # Put the abosolute path to the layers in bblayers.conf so we can run
- # bitbake without the init script after the first run
- # ##COREBASE## is deprecated as it's meaning was inconsistent, but continue
+ # Put the absolute path to the layers in bblayers.conf so we can run
+ # bitbake without the init script after the first run.
+ # ##COREBASE## is deprecated as its meaning was inconsistent, but continue
# to replace it for compatibility.
sed -e "s|##OEROOT##|$OEROOT|g" \
-e "s|##COREBASE##|$OEROOT|g" \
@@ -124,10 +113,10 @@ if [ ! -z "$SHOWYPDOC" ]; then
cat <<EOM
The Yocto Project has extensive documentation about OE including a reference
manual which can be found at:
- http://yoctoproject.org/documentation
+ https://docs.yoctoproject.org
-For more information about OpenEmbedded see their website:
- http://www.openembedded.org/
+For more information about OpenEmbedded see the website:
+ https://www.openembedded.org/
EOM
# unset SHOWYPDOC
diff --git a/scripts/oe-test b/scripts/oe-test
index 34d9012d14..55985b0b24 100755
--- a/scripts/oe-test
+++ b/scripts/oe-test
@@ -3,7 +3,9 @@
# OpenEmbedded test tool
#
# Copyright (C) 2016 Intel Corporation
-# Released under the MIT license (see COPYING.MIT)
+#
+# SPDX-License-Identifier: MIT
+#
import os
import sys
diff --git a/scripts/oe-time-dd-test.sh b/scripts/oe-time-dd-test.sh
new file mode 100755
index 0000000000..386de83dce
--- /dev/null
+++ b/scripts/oe-time-dd-test.sh
@@ -0,0 +1,102 @@
+#!/bin/bash
+#
+# oe-time-dd-test records how much time it takes to
+# write <count> number of kilobytes to the filesystem.
+# It also records the number of processes that are in
+# running (R), uninterruptible sleep (D) and interruptible
+# sleep (S) state from the output of "top" command.
+# The purporse of this script is to find which part of
+# the build system puts stress on the filesystem io and
+# log all the processes.
+usage() {
+ echo "$0 is used to detect i/o latency and runs commands to display host information."
+ echo "The following commands are run in order:"
+ echo "1) top -c -b -n1 -w 512"
+ echo "2) iostat -y -z -x 5 1"
+ echo "3) tail -30 tmp*/log/cooker/*/console-latest.log to gather cooker log."
+ echo " "
+ echo "Options:"
+ echo "-c | --count <amount> dd (transfer) <amount> KiB of data within specified timeout to detect latency."
+ echo " Must enable -t option."
+ echo "-t | --timeout <time> timeout in seconds for the <count> amount of data to be transferred."
+ echo "-l | --log-only run the commands without performing the data transfer."
+ echo "-h | --help show help"
+
+}
+
+run_cmds() {
+ echo "start: top output"
+ top -c -b -n1 -w 512
+ echo "end: top output"
+ echo "start: iostat"
+ iostat -y -z -x 5 1
+ echo "end: iostat"
+ echo "start: cooker log"
+ tail -30 tmp*/log/cooker/*/console-latest.log
+ echo "end: cooker log"
+}
+
+if [ $# -lt 1 ]; then
+ usage
+ exit 1
+fi
+
+re_c='^[0-9]+$'
+#re_t='^[0-9]+([.][0-9]+)?$'
+
+while [[ $# -gt 0 ]]; do
+ key="$1"
+
+ case $key in
+ -c|--count)
+ COUNT=$2
+ shift
+ shift
+ if ! [[ $COUNT =~ $re_c ]] || [[ $COUNT -le 0 ]] ; then
+ usage
+ exit 1
+ fi
+ ;;
+ -t|--timeout)
+ TIMEOUT=$2
+ shift
+ shift
+ if ! [[ $TIMEOUT =~ $re_c ]] || [[ $TIMEOUT -le 0 ]] ; then
+ usage
+ exit 1
+ fi
+ ;;
+ -l|--log-only)
+ LOG_ONLY="true"
+ shift
+ shift
+ ;;
+ -h|--help)
+ usage
+ exit 0
+ ;;
+ *)
+ usage
+ exit 1
+ ;;
+ esac
+done
+
+
+if [ "$LOG_ONLY" = "true" ] ; then
+ uptime
+ run_cmds
+ exit
+fi
+
+if [ -z ${TIMEOUT+x} ] || [ -z ${COUNT+x} ] ; then
+ usage
+ exit 1
+fi
+
+uptime
+echo "Timeout used: ${TIMEOUT}"
+timeout ${TIMEOUT} dd if=/dev/zero of=oe-time-dd-test.dat bs=1024 count=${COUNT} conv=fsync
+if [ $? -ne 0 ]; then
+ run_cmds
+fi
diff --git a/scripts/oe-trim-schemas b/scripts/oe-trim-schemas
index 7c199ef1df..bf77c8cf64 100755
--- a/scripts/oe-trim-schemas
+++ b/scripts/oe-trim-schemas
@@ -1,4 +1,7 @@
#! /usr/bin/env python3
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
import sys
try:
diff --git a/scripts/oepydevshell-internal.py b/scripts/oepydevshell-internal.py
index 2f7d5d433e..e3c35bbe2c 100755
--- a/scripts/oepydevshell-internal.py
+++ b/scripts/oepydevshell-internal.py
@@ -1,4 +1,7 @@
#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
import os
import sys
@@ -40,7 +43,7 @@ nonblockingfd(pty)
nonblockingfd(sys.stdin)
-histfile = os.path.expanduser("~/.oedevpyshell-history")
+histfile = os.path.expanduser("~/.oepydevshell-history")
readline.parse_and_bind("tab: complete")
try:
readline.read_history_file(histfile)
diff --git a/scripts/opkg-query-helper.py b/scripts/opkg-query-helper.py
index ce89491f60..bc3ab43823 100755
--- a/scripts/opkg-query-helper.py
+++ b/scripts/opkg-query-helper.py
@@ -6,21 +6,8 @@
#
# Copyright 2012 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-#
-
import sys
import fileinput
diff --git a/scripts/postinst-intercepts/delay_to_first_boot b/scripts/postinst-intercepts/delay_to_first_boot
index ecdbef95dd..fa8e1caaf5 100644
--- a/scripts/postinst-intercepts/delay_to_first_boot
+++ b/scripts/postinst-intercepts/delay_to_first_boot
@@ -1,2 +1,6 @@
#!/bin/sh
+#
+# SPDX-License-Identifier: MIT
+#
+
exit 1
diff --git a/scripts/postinst-intercepts/postinst_intercept b/scripts/postinst-intercepts/postinst_intercept
index b18e806d43..b91974c885 100755
--- a/scripts/postinst-intercepts/postinst_intercept
+++ b/scripts/postinst-intercepts/postinst_intercept
@@ -1,5 +1,7 @@
#!/bin/sh
#
+# SPDX-License-Identifier: MIT
+#
# This script is called from inside postinstall scriptlets at do_rootfs time. It
# actually adds, at the end, the list of packages for which the intercept script
# is valid. Also, if one wants to pass any variables to the intercept script from
diff --git a/scripts/postinst-intercepts/update_desktop_database b/scripts/postinst-intercepts/update_desktop_database
new file mode 100644
index 0000000000..8903b496f3
--- /dev/null
+++ b/scripts/postinst-intercepts/update_desktop_database
@@ -0,0 +1,8 @@
+#!/bin/sh
+#
+# SPDX-License-Identifier: MIT
+#
+# Post-install intercept for mime-xdg.bbclass
+
+update-desktop-database $D${desktop_dir}
+
diff --git a/scripts/postinst-intercepts/update_font_cache b/scripts/postinst-intercepts/update_font_cache
index e0ec471964..900db042d6 100644
--- a/scripts/postinst-intercepts/update_font_cache
+++ b/scripts/postinst-intercepts/update_font_cache
@@ -1,6 +1,13 @@
#!/bin/sh
+#
+# SPDX-License-Identifier: MIT
+#
set -e
+rm -f $D${fontconfigcachedir}/CACHEDIR.TAG
+
PSEUDO_UNLOAD=1 ${binprefix}qemuwrapper -L $D -E ${fontconfigcacheenv} $D${libexecdir}/${binprefix}fc-cache --sysroot=$D --system-only ${fontconfigcacheparams}
+
chown -R root:root $D${fontconfigcachedir}
+find $D -type f -name .uuid -exec chown root:root '{}' +
diff --git a/scripts/postinst-intercepts/update_gio_module_cache b/scripts/postinst-intercepts/update_gio_module_cache
index d1f0140947..c87fa85db9 100644
--- a/scripts/postinst-intercepts/update_gio_module_cache
+++ b/scripts/postinst-intercepts/update_gio_module_cache
@@ -1,4 +1,7 @@
#!/bin/sh
+#
+# SPDX-License-Identifier: MIT
+#
set -e
diff --git a/scripts/postinst-intercepts/update_icon_cache b/scripts/postinst-intercepts/update_gtk_icon_cache
index 9cf2a72a0c..99367a2855 100644
--- a/scripts/postinst-intercepts/update_icon_cache
+++ b/scripts/postinst-intercepts/update_gtk_icon_cache
@@ -1,8 +1,12 @@
#!/bin/sh
+#
+# SPDX-License-Identifier: MIT
+#
+# Post-install intercept for gtk-icon-cache.bbclass
set -e
-# update native pixbuf loaders
+# Update native pixbuf loaders
$STAGING_DIR_NATIVE/${libdir_native}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders --update-cache
for icondir in $D/usr/share/icons/*/ ; do
diff --git a/scripts/postinst-intercepts/update_gtk_immodules_cache b/scripts/postinst-intercepts/update_gtk_immodules_cache
index 395516971e..9f07ccca6b 100644
--- a/scripts/postinst-intercepts/update_gtk_immodules_cache
+++ b/scripts/postinst-intercepts/update_gtk_immodules_cache
@@ -1,4 +1,7 @@
#!/bin/sh
+#
+# SPDX-License-Identifier: MIT
+#
set -e
diff --git a/scripts/postinst-intercepts/update_mime_database b/scripts/postinst-intercepts/update_mime_database
new file mode 100644
index 0000000000..582d1e162c
--- /dev/null
+++ b/scripts/postinst-intercepts/update_mime_database
@@ -0,0 +1,9 @@
+#!/bin/sh
+#
+# SPDX-License-Identifier: MIT
+#
+# Post-install intercept for mime.bbclass
+
+echo "Updating MIME database... this may take a while."
+update-mime-database $D${mimedir}
+
diff --git a/scripts/postinst-intercepts/update_pixbuf_cache b/scripts/postinst-intercepts/update_pixbuf_cache
index ebea07c356..ea12814474 100644
--- a/scripts/postinst-intercepts/update_pixbuf_cache
+++ b/scripts/postinst-intercepts/update_pixbuf_cache
@@ -1,4 +1,7 @@
#!/bin/sh
+#
+# SPDX-License-Identifier: MIT
+#
set -e
diff --git a/scripts/postinst-intercepts/update_udev_hwdb b/scripts/postinst-intercepts/update_udev_hwdb
index b5cce0a09d..8076b8ae6f 100644
--- a/scripts/postinst-intercepts/update_udev_hwdb
+++ b/scripts/postinst-intercepts/update_udev_hwdb
@@ -1,6 +1,22 @@
#!/bin/sh
+#
+# SPDX-License-Identifier: MIT
+#
set -e
-PSEUDO_UNLOAD=1 ${binprefix}qemuwrapper -L $D $D${libexecdir}/${binprefix}udevadm hwdb --update --root $D
-chown root:root $D${sysconfdir}/udev/hwdb.bin
+case "${PREFERRED_PROVIDER_udev}" in
+ systemd)
+ UDEV_EXTRA_ARGS="--usr"
+ UDEVLIBDIR="${rootlibexecdir}"
+ ;;
+
+ *)
+ UDEV_EXTRA_ARGS=""
+ UDEVLIBDIR="${sysconfdir}"
+ ;;
+esac
+
+rm -f $D${UDEVLIBDIR}/udev/hwdb.bin
+PSEUDO_UNLOAD=1 ${binprefix}qemuwrapper -L $D $D${libexecdir}/${binprefix}udevadm hwdb --update --root $D ${UDEV_EXTRA_ARGS}
+chown root:root $D${UDEVLIBDIR}/udev/hwdb.bin
diff --git a/scripts/pybootchartgui/pybootchartgui.py b/scripts/pybootchartgui/pybootchartgui.py
index 7ce1a5be40..1c4062b42c 100755
--- a/scripts/pybootchartgui/pybootchartgui.py
+++ b/scripts/pybootchartgui/pybootchartgui.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
#
# This file is part of pybootchartgui.
diff --git a/scripts/pybootchartgui/pybootchartgui/draw.py b/scripts/pybootchartgui/pybootchartgui/draw.py
index 201ce4577f..fc708b55c3 100644
--- a/scripts/pybootchartgui/pybootchartgui/draw.py
+++ b/scripts/pybootchartgui/pybootchartgui/draw.py
@@ -19,22 +19,23 @@ import math
import re
import random
import colorsys
+import functools
from operator import itemgetter
class RenderOptions:
- def __init__(self, app_options):
- # should we render a cumulative CPU time chart
- self.cumulative = True
- self.charts = True
- self.kernel_only = False
- self.app_options = app_options
+ def __init__(self, app_options):
+ # should we render a cumulative CPU time chart
+ self.cumulative = True
+ self.charts = True
+ self.kernel_only = False
+ self.app_options = app_options
- def proc_tree (self, trace):
- if self.kernel_only:
- return trace.kernel_tree
- else:
- return trace.proc_tree
+ def proc_tree (self, trace):
+ if self.kernel_only:
+ return trace.kernel_tree
+ else:
+ return trace.proc_tree
# Process tree background color.
BACK_COLOR = (1.0, 1.0, 1.0, 1.0)
@@ -136,11 +137,11 @@ TASK_COLOR_PACKAGE_WRITE = (0.0, 0.50, 0.50, 1.0)
# Distinct colors used for different disk volumnes.
# If we have more volumns, colors get re-used.
VOLUME_COLORS = [
- (1.0, 1.0, 0.00, 1.0),
- (0.0, 1.00, 0.00, 1.0),
- (1.0, 0.00, 1.00, 1.0),
- (0.0, 0.00, 1.00, 1.0),
- (0.0, 1.00, 1.00, 1.0),
+ (1.0, 1.0, 0.00, 1.0),
+ (0.0, 1.00, 0.00, 1.0),
+ (1.0, 0.00, 1.00, 1.0),
+ (0.0, 0.00, 1.00, 1.0),
+ (0.0, 1.00, 1.00, 1.0),
]
# Process states
@@ -152,7 +153,7 @@ STATE_STOPPED = 4
STATE_ZOMBIE = 5
STATE_COLORS = [(0, 0, 0, 0), PROC_COLOR_R, PROC_COLOR_S, PROC_COLOR_D, \
- PROC_COLOR_T, PROC_COLOR_Z, PROC_COLOR_X, PROC_COLOR_W]
+ PROC_COLOR_T, PROC_COLOR_Z, PROC_COLOR_X, PROC_COLOR_W]
# CumulativeStats Types
STAT_TYPE_CPU = 0
@@ -160,80 +161,80 @@ STAT_TYPE_IO = 1
# Convert ps process state to an int
def get_proc_state(flag):
- return "RSDTZXW".find(flag) + 1
+ return "RSDTZXW".find(flag) + 1
def draw_text(ctx, text, color, x, y):
- ctx.set_source_rgba(*color)
- ctx.move_to(x, y)
- ctx.show_text(text)
+ ctx.set_source_rgba(*color)
+ ctx.move_to(x, y)
+ ctx.show_text(text)
def draw_fill_rect(ctx, color, rect):
- ctx.set_source_rgba(*color)
- ctx.rectangle(*rect)
- ctx.fill()
+ ctx.set_source_rgba(*color)
+ ctx.rectangle(*rect)
+ ctx.fill()
def draw_rect(ctx, color, rect):
- ctx.set_source_rgba(*color)
- ctx.rectangle(*rect)
- ctx.stroke()
+ ctx.set_source_rgba(*color)
+ ctx.rectangle(*rect)
+ ctx.stroke()
def draw_legend_box(ctx, label, fill_color, x, y, s):
- draw_fill_rect(ctx, fill_color, (x, y - s, s, s))
- draw_rect(ctx, PROC_BORDER_COLOR, (x, y - s, s, s))
- draw_text(ctx, label, TEXT_COLOR, x + s + 5, y)
+ draw_fill_rect(ctx, fill_color, (x, y - s, s, s))
+ draw_rect(ctx, PROC_BORDER_COLOR, (x, y - s, s, s))
+ draw_text(ctx, label, TEXT_COLOR, x + s + 5, y)
def draw_legend_line(ctx, label, fill_color, x, y, s):
- draw_fill_rect(ctx, fill_color, (x, y - s/2, s + 1, 3))
- ctx.arc(x + (s + 1)/2.0, y - (s - 3)/2.0, 2.5, 0, 2.0 * math.pi)
- ctx.fill()
- draw_text(ctx, label, TEXT_COLOR, x + s + 5, y)
+ draw_fill_rect(ctx, fill_color, (x, y - s/2, s + 1, 3))
+ ctx.arc(x + (s + 1)/2.0, y - (s - 3)/2.0, 2.5, 0, 2.0 * math.pi)
+ ctx.fill()
+ draw_text(ctx, label, TEXT_COLOR, x + s + 5, y)
def draw_label_in_box(ctx, color, label, x, y, w, maxx):
- label_w = ctx.text_extents(label)[2]
- label_x = x + w / 2 - label_w / 2
- if label_w + 10 > w:
- label_x = x + w + 5
- if label_x + label_w > maxx:
- label_x = x - label_w - 5
- draw_text(ctx, label, color, label_x, y)
+ label_w = ctx.text_extents(label)[2]
+ label_x = x + w / 2 - label_w / 2
+ if label_w + 10 > w:
+ label_x = x + w + 5
+ if label_x + label_w > maxx:
+ label_x = x - label_w - 5
+ draw_text(ctx, label, color, label_x, y)
def draw_sec_labels(ctx, options, rect, sec_w, nsecs):
- ctx.set_font_size(AXIS_FONT_SIZE)
- prev_x = 0
- for i in range(0, rect[2] + 1, sec_w):
- if ((i / sec_w) % nsecs == 0) :
- if options.app_options.as_minutes :
- label = "%.1f" % (i / sec_w / 60.0)
- else :
- label = "%d" % (i / sec_w)
- label_w = ctx.text_extents(label)[2]
- x = rect[0] + i - label_w/2
- if x >= prev_x:
- draw_text(ctx, label, TEXT_COLOR, x, rect[1] - 2)
- prev_x = x + label_w
+ ctx.set_font_size(AXIS_FONT_SIZE)
+ prev_x = 0
+ for i in range(0, rect[2] + 1, sec_w):
+ if ((i / sec_w) % nsecs == 0) :
+ if options.app_options.as_minutes :
+ label = "%.1f" % (i / sec_w / 60.0)
+ else :
+ label = "%d" % (i / sec_w)
+ label_w = ctx.text_extents(label)[2]
+ x = rect[0] + i - label_w/2
+ if x >= prev_x:
+ draw_text(ctx, label, TEXT_COLOR, x, rect[1] - 2)
+ prev_x = x + label_w
def draw_box_ticks(ctx, rect, sec_w):
- draw_rect(ctx, BORDER_COLOR, tuple(rect))
-
- ctx.set_line_cap(cairo.LINE_CAP_SQUARE)
-
- for i in range(sec_w, rect[2] + 1, sec_w):
- if ((i / sec_w) % 10 == 0) :
- ctx.set_line_width(1.5)
- elif sec_w < 5 :
- continue
- else :
- ctx.set_line_width(1.0)
- if ((i / sec_w) % 30 == 0) :
- ctx.set_source_rgba(*TICK_COLOR_BOLD)
- else :
- ctx.set_source_rgba(*TICK_COLOR)
- ctx.move_to(rect[0] + i, rect[1] + 1)
- ctx.line_to(rect[0] + i, rect[1] + rect[3] - 1)
- ctx.stroke()
- ctx.set_line_width(1.0)
-
- ctx.set_line_cap(cairo.LINE_CAP_BUTT)
+ draw_rect(ctx, BORDER_COLOR, tuple(rect))
+
+ ctx.set_line_cap(cairo.LINE_CAP_SQUARE)
+
+ for i in range(sec_w, rect[2] + 1, sec_w):
+ if ((i / sec_w) % 10 == 0) :
+ ctx.set_line_width(1.5)
+ elif sec_w < 5 :
+ continue
+ else :
+ ctx.set_line_width(1.0)
+ if ((i / sec_w) % 30 == 0) :
+ ctx.set_source_rgba(*TICK_COLOR_BOLD)
+ else :
+ ctx.set_source_rgba(*TICK_COLOR)
+ ctx.move_to(rect[0] + i, rect[1] + 1)
+ ctx.line_to(rect[0] + i, rect[1] + rect[3] - 1)
+ ctx.stroke()
+ ctx.set_line_width(1.0)
+
+ ctx.set_line_cap(cairo.LINE_CAP_BUTT)
def draw_annotations(ctx, proc_tree, times, rect):
ctx.set_line_cap(cairo.LINE_CAP_SQUARE)
@@ -252,51 +253,54 @@ def draw_annotations(ctx, proc_tree, times, rect):
ctx.set_dash([])
def draw_chart(ctx, color, fill, chart_bounds, data, proc_tree, data_range):
- ctx.set_line_width(0.5)
- x_shift = proc_tree.start_time
-
- def transform_point_coords(point, x_base, y_base, \
- xscale, yscale, x_trans, y_trans):
- x = (point[0] - x_base) * xscale + x_trans
- y = (point[1] - y_base) * -yscale + y_trans + chart_bounds[3]
- return x, y
-
- max_x = max (x for (x, y) in data)
- max_y = max (y for (x, y) in data)
- # avoid divide by zero
- if max_y == 0:
- max_y = 1.0
- xscale = float (chart_bounds[2]) / (max_x - x_shift)
- # If data_range is given, scale the chart so that the value range in
- # data_range matches the chart bounds exactly.
- # Otherwise, scale so that the actual data matches the chart bounds.
- if data_range:
- yscale = float(chart_bounds[3]) / (data_range[1] - data_range[0])
- ybase = data_range[0]
- else:
- yscale = float(chart_bounds[3]) / max_y
- ybase = 0
-
- first = transform_point_coords (data[0], x_shift, ybase, xscale, yscale, \
- chart_bounds[0], chart_bounds[1])
- last = transform_point_coords (data[-1], x_shift, ybase, xscale, yscale, \
- chart_bounds[0], chart_bounds[1])
-
- ctx.set_source_rgba(*color)
- ctx.move_to(*first)
- for point in data:
- x, y = transform_point_coords (point, x_shift, ybase, xscale, yscale, \
- chart_bounds[0], chart_bounds[1])
- ctx.line_to(x, y)
- if fill:
- ctx.stroke_preserve()
- ctx.line_to(last[0], chart_bounds[1]+chart_bounds[3])
- ctx.line_to(first[0], chart_bounds[1]+chart_bounds[3])
- ctx.line_to(first[0], first[1])
- ctx.fill()
- else:
- ctx.stroke()
- ctx.set_line_width(1.0)
+ ctx.set_line_width(0.5)
+ x_shift = proc_tree.start_time
+
+ def transform_point_coords(point, x_base, y_base, \
+ xscale, yscale, x_trans, y_trans):
+ x = (point[0] - x_base) * xscale + x_trans
+ y = (point[1] - y_base) * -yscale + y_trans + chart_bounds[3]
+ return x, y
+
+ max_x = max (x for (x, y) in data)
+ max_y = max (y for (x, y) in data)
+ # avoid divide by zero
+ if max_y == 0:
+ max_y = 1.0
+ if (max_x - x_shift):
+ xscale = float (chart_bounds[2]) / (max_x - x_shift)
+ else:
+ xscale = float (chart_bounds[2])
+ # If data_range is given, scale the chart so that the value range in
+ # data_range matches the chart bounds exactly.
+ # Otherwise, scale so that the actual data matches the chart bounds.
+ if data_range and (data_range[1] - data_range[0]):
+ yscale = float(chart_bounds[3]) / (data_range[1] - data_range[0])
+ ybase = data_range[0]
+ else:
+ yscale = float(chart_bounds[3]) / max_y
+ ybase = 0
+
+ first = transform_point_coords (data[0], x_shift, ybase, xscale, yscale, \
+ chart_bounds[0], chart_bounds[1])
+ last = transform_point_coords (data[-1], x_shift, ybase, xscale, yscale, \
+ chart_bounds[0], chart_bounds[1])
+
+ ctx.set_source_rgba(*color)
+ ctx.move_to(*first)
+ for point in data:
+ x, y = transform_point_coords (point, x_shift, ybase, xscale, yscale, \
+ chart_bounds[0], chart_bounds[1])
+ ctx.line_to(x, y)
+ if fill:
+ ctx.stroke_preserve()
+ ctx.line_to(last[0], chart_bounds[1]+chart_bounds[3])
+ ctx.line_to(first[0], chart_bounds[1]+chart_bounds[3])
+ ctx.line_to(first[0], first[1])
+ ctx.fill()
+ else:
+ ctx.stroke()
+ ctx.set_line_width(1.0)
bar_h = 55
meminfo_bar_h = 2 * bar_h
@@ -307,342 +311,348 @@ sec_w_base = 1 # the width of a second
proc_h = 16 # the height of a process
leg_s = 10
MIN_IMG_W = 800
-CUML_HEIGHT = 2000 # Increased value to accomodate CPU and I/O Graphs
+CUML_HEIGHT = 2000 # Increased value to accommodate CPU and I/O Graphs
OPTIONS = None
def extents(options, xscale, trace):
- start = min(trace.start.keys())
- end = start
-
- processes = 0
- for proc in trace.processes:
- if not options.app_options.show_all and \
- trace.processes[proc][1] - trace.processes[proc][0] < options.app_options.mintime:
- continue
-
- if trace.processes[proc][1] > end:
- end = trace.processes[proc][1]
- processes += 1
-
- if trace.min is not None and trace.max is not None:
- start = trace.min
- end = trace.max
-
- w = int ((end - start) * sec_w_base * xscale) + 2 * off_x
- h = proc_h * processes + header_h + 2 * off_y
-
- if options.charts:
- if trace.cpu_stats:
- h += 30 + bar_h
- if trace.disk_stats:
- h += 30 + bar_h
- if trace.monitor_disk:
- h += 30 + bar_h
- if trace.mem_stats:
- h += meminfo_bar_h
-
- return (w, h)
+ start = min(trace.start.keys())
+ end = start
+
+ processes = 0
+ for proc in trace.processes:
+ if not options.app_options.show_all and \
+ trace.processes[proc][1] - trace.processes[proc][0] < options.app_options.mintime:
+ continue
+
+ if trace.processes[proc][1] > end:
+ end = trace.processes[proc][1]
+ processes += 1
+
+ if trace.min is not None and trace.max is not None:
+ start = trace.min
+ end = trace.max
+
+ w = int ((end - start) * sec_w_base * xscale) + 2 * off_x
+ h = proc_h * processes + header_h + 2 * off_y
+
+ if options.charts:
+ if trace.cpu_stats:
+ h += 30 + bar_h
+ if trace.disk_stats:
+ h += 30 + bar_h
+ if trace.monitor_disk:
+ h += 30 + bar_h
+ if trace.mem_stats:
+ h += meminfo_bar_h
+
+ # Allow for width of process legend and offset
+ if w < (720 + off_x):
+ w = 720 + off_x
+
+ return (w, h)
def clip_visible(clip, rect):
- xmax = max (clip[0], rect[0])
- ymax = max (clip[1], rect[1])
- xmin = min (clip[0] + clip[2], rect[0] + rect[2])
- ymin = min (clip[1] + clip[3], rect[1] + rect[3])
- return (xmin > xmax and ymin > ymax)
+ xmax = max (clip[0], rect[0])
+ ymax = max (clip[1], rect[1])
+ xmin = min (clip[0] + clip[2], rect[0] + rect[2])
+ ymin = min (clip[1] + clip[3], rect[1] + rect[3])
+ return (xmin > xmax and ymin > ymax)
def render_charts(ctx, options, clip, trace, curr_y, w, h, sec_w):
- proc_tree = options.proc_tree(trace)
-
- # render bar legend
- if trace.cpu_stats:
- ctx.set_font_size(LEGEND_FONT_SIZE)
-
- draw_legend_box(ctx, "CPU (user+sys)", CPU_COLOR, off_x, curr_y+20, leg_s)
- draw_legend_box(ctx, "I/O (wait)", IO_COLOR, off_x + 120, curr_y+20, leg_s)
-
- # render I/O wait
- chart_rect = (off_x, curr_y+30, w, bar_h)
- if clip_visible (clip, chart_rect):
- draw_box_ticks (ctx, chart_rect, sec_w)
- draw_annotations (ctx, proc_tree, trace.times, chart_rect)
- draw_chart (ctx, IO_COLOR, True, chart_rect, \
- [(sample.time, sample.user + sample.sys + sample.io) for sample in trace.cpu_stats], \
- proc_tree, None)
- # render CPU load
- draw_chart (ctx, CPU_COLOR, True, chart_rect, \
- [(sample.time, sample.user + sample.sys) for sample in trace.cpu_stats], \
- proc_tree, None)
-
- curr_y = curr_y + 30 + bar_h
-
- # render second chart
- if trace.disk_stats:
- draw_legend_line(ctx, "Disk throughput", DISK_TPUT_COLOR, off_x, curr_y+20, leg_s)
- draw_legend_box(ctx, "Disk utilization", IO_COLOR, off_x + 120, curr_y+20, leg_s)
-
- # render I/O utilization
- chart_rect = (off_x, curr_y+30, w, bar_h)
- if clip_visible (clip, chart_rect):
- draw_box_ticks (ctx, chart_rect, sec_w)
- draw_annotations (ctx, proc_tree, trace.times, chart_rect)
- draw_chart (ctx, IO_COLOR, True, chart_rect, \
- [(sample.time, sample.util) for sample in trace.disk_stats], \
- proc_tree, None)
-
- # render disk throughput
- max_sample = max (trace.disk_stats, key = lambda s: s.tput)
- if clip_visible (clip, chart_rect):
- draw_chart (ctx, DISK_TPUT_COLOR, False, chart_rect, \
- [(sample.time, sample.tput) for sample in trace.disk_stats], \
- proc_tree, None)
-
- pos_x = off_x + ((max_sample.time - proc_tree.start_time) * w / proc_tree.duration)
-
- shift_x, shift_y = -20, 20
- if (pos_x < off_x + 245):
- shift_x, shift_y = 5, 40
-
- label = "%dMB/s" % round ((max_sample.tput) / 1024.0)
- draw_text (ctx, label, DISK_TPUT_COLOR, pos_x + shift_x, curr_y + shift_y)
-
- curr_y = curr_y + 30 + bar_h
-
- # render disk space usage
- #
- # Draws the amount of disk space used on each volume relative to the
- # lowest recorded amount. The graphs for each volume are stacked above
- # each other so that total disk usage is visible.
- if trace.monitor_disk:
- ctx.set_font_size(LEGEND_FONT_SIZE)
- # Determine set of volumes for which we have
- # information and the minimal amount of used disk
- # space for each. Currently samples are allowed to
- # not have a values for all volumes; drawing could be
- # made more efficient if that wasn't the case.
- volumes = set()
- min_used = {}
- for sample in trace.monitor_disk:
- for volume, used in sample.records.items():
- volumes.add(volume)
- if volume not in min_used or min_used[volume] > used:
- min_used[volume] = used
- volumes = sorted(list(volumes))
- disk_scale = 0
- for i, volume in enumerate(volumes):
- volume_scale = max([sample.records[volume] - min_used[volume]
- for sample in trace.monitor_disk
- if volume in sample.records])
- # Does not take length of volume name into account, but fixed offset
- # works okay in practice.
- draw_legend_box(ctx, '%s (max: %u MiB)' % (volume, volume_scale / 1024 / 1024),
- VOLUME_COLORS[i % len(VOLUME_COLORS)],
- off_x + i * 250, curr_y+20, leg_s)
- disk_scale += volume_scale
-
- # render used amount of disk space
- chart_rect = (off_x, curr_y+30, w, bar_h)
- if clip_visible (clip, chart_rect):
- draw_box_ticks (ctx, chart_rect, sec_w)
- draw_annotations (ctx, proc_tree, trace.times, chart_rect)
- for i in range(len(volumes), 0, -1):
- draw_chart (ctx, VOLUME_COLORS[(i - 1) % len(VOLUME_COLORS)], True, chart_rect, \
- [(sample.time,
- # Sum up used space of all volumes including the current one
- # so that the graphs appear as stacked on top of each other.
- reduce(lambda x,y: x+y,
- [sample.records[volume] - min_used[volume]
- for volume in volumes[0:i]
- if volume in sample.records],
- 0))
- for sample in trace.monitor_disk], \
- proc_tree, [0, disk_scale])
-
- curr_y = curr_y + 30 + bar_h
-
- # render mem usage
- chart_rect = (off_x, curr_y+30, w, meminfo_bar_h)
- mem_stats = trace.mem_stats
- if mem_stats and clip_visible (clip, chart_rect):
- mem_scale = max(sample.buffers for sample in mem_stats)
- draw_legend_box(ctx, "Mem cached (scale: %u MiB)" % (float(mem_scale) / 1024), MEM_CACHED_COLOR, off_x, curr_y+20, leg_s)
- draw_legend_box(ctx, "Used", MEM_USED_COLOR, off_x + 240, curr_y+20, leg_s)
- draw_legend_box(ctx, "Buffers", MEM_BUFFERS_COLOR, off_x + 360, curr_y+20, leg_s)
- draw_legend_line(ctx, "Swap (scale: %u MiB)" % max([(sample.swap)/1024 for sample in mem_stats]), \
- MEM_SWAP_COLOR, off_x + 480, curr_y+20, leg_s)
- draw_box_ticks(ctx, chart_rect, sec_w)
- draw_annotations(ctx, proc_tree, trace.times, chart_rect)
- draw_chart(ctx, MEM_BUFFERS_COLOR, True, chart_rect, \
- [(sample.time, sample.buffers) for sample in trace.mem_stats], \
- proc_tree, [0, mem_scale])
- draw_chart(ctx, MEM_USED_COLOR, True, chart_rect, \
- [(sample.time, sample.used) for sample in mem_stats], \
- proc_tree, [0, mem_scale])
- draw_chart(ctx, MEM_CACHED_COLOR, True, chart_rect, \
- [(sample.time, sample.cached) for sample in mem_stats], \
- proc_tree, [0, mem_scale])
- draw_chart(ctx, MEM_SWAP_COLOR, False, chart_rect, \
- [(sample.time, float(sample.swap)) for sample in mem_stats], \
- proc_tree, None)
-
- curr_y = curr_y + meminfo_bar_h
-
- return curr_y
+ proc_tree = options.proc_tree(trace)
+
+ # render bar legend
+ if trace.cpu_stats:
+ ctx.set_font_size(LEGEND_FONT_SIZE)
+
+ draw_legend_box(ctx, "CPU (user+sys)", CPU_COLOR, off_x, curr_y+20, leg_s)
+ draw_legend_box(ctx, "I/O (wait)", IO_COLOR, off_x + 120, curr_y+20, leg_s)
+
+ # render I/O wait
+ chart_rect = (off_x, curr_y+30, w, bar_h)
+ if clip_visible (clip, chart_rect):
+ draw_box_ticks (ctx, chart_rect, sec_w)
+ draw_annotations (ctx, proc_tree, trace.times, chart_rect)
+ draw_chart (ctx, IO_COLOR, True, chart_rect, \
+ [(sample.time, sample.user + sample.sys + sample.io) for sample in trace.cpu_stats], \
+ proc_tree, None)
+ # render CPU load
+ draw_chart (ctx, CPU_COLOR, True, chart_rect, \
+ [(sample.time, sample.user + sample.sys) for sample in trace.cpu_stats], \
+ proc_tree, None)
+
+ curr_y = curr_y + 30 + bar_h
+
+ # render second chart
+ if trace.disk_stats:
+ draw_legend_line(ctx, "Disk throughput", DISK_TPUT_COLOR, off_x, curr_y+20, leg_s)
+ draw_legend_box(ctx, "Disk utilization", IO_COLOR, off_x + 120, curr_y+20, leg_s)
+
+ # render I/O utilization
+ chart_rect = (off_x, curr_y+30, w, bar_h)
+ if clip_visible (clip, chart_rect):
+ draw_box_ticks (ctx, chart_rect, sec_w)
+ draw_annotations (ctx, proc_tree, trace.times, chart_rect)
+ draw_chart (ctx, IO_COLOR, True, chart_rect, \
+ [(sample.time, sample.util) for sample in trace.disk_stats], \
+ proc_tree, None)
+
+ # render disk throughput
+ max_sample = max (trace.disk_stats, key = lambda s: s.tput)
+ if clip_visible (clip, chart_rect):
+ draw_chart (ctx, DISK_TPUT_COLOR, False, chart_rect, \
+ [(sample.time, sample.tput) for sample in trace.disk_stats], \
+ proc_tree, None)
+
+ pos_x = off_x + ((max_sample.time - proc_tree.start_time) * w / proc_tree.duration)
+
+ shift_x, shift_y = -20, 20
+ if (pos_x < off_x + 245):
+ shift_x, shift_y = 5, 40
+
+ label = "%dMB/s" % round ((max_sample.tput) / 1024.0)
+ draw_text (ctx, label, DISK_TPUT_COLOR, pos_x + shift_x, curr_y + shift_y)
+
+ curr_y = curr_y + 30 + bar_h
+
+ # render disk space usage
+ #
+ # Draws the amount of disk space used on each volume relative to the
+ # lowest recorded amount. The graphs for each volume are stacked above
+ # each other so that total disk usage is visible.
+ if trace.monitor_disk:
+ ctx.set_font_size(LEGEND_FONT_SIZE)
+ # Determine set of volumes for which we have
+ # information and the minimal amount of used disk
+ # space for each. Currently samples are allowed to
+ # not have a values for all volumes; drawing could be
+ # made more efficient if that wasn't the case.
+ volumes = set()
+ min_used = {}
+ for sample in trace.monitor_disk:
+ for volume, used in sample.records.items():
+ volumes.add(volume)
+ if volume not in min_used or min_used[volume] > used:
+ min_used[volume] = used
+ volumes = sorted(list(volumes))
+ disk_scale = 0
+ for i, volume in enumerate(volumes):
+ volume_scale = max([sample.records[volume] - min_used[volume]
+ for sample in trace.monitor_disk
+ if volume in sample.records])
+ # Does not take length of volume name into account, but fixed offset
+ # works okay in practice.
+ draw_legend_box(ctx, '%s (max: %u MiB)' % (volume, volume_scale / 1024 / 1024),
+ VOLUME_COLORS[i % len(VOLUME_COLORS)],
+ off_x + i * 250, curr_y+20, leg_s)
+ disk_scale += volume_scale
+
+ # render used amount of disk space
+ chart_rect = (off_x, curr_y+30, w, bar_h)
+ if clip_visible (clip, chart_rect):
+ draw_box_ticks (ctx, chart_rect, sec_w)
+ draw_annotations (ctx, proc_tree, trace.times, chart_rect)
+ for i in range(len(volumes), 0, -1):
+ draw_chart (ctx, VOLUME_COLORS[(i - 1) % len(VOLUME_COLORS)], True, chart_rect, \
+ [(sample.time,
+ # Sum up used space of all volumes including the current one
+ # so that the graphs appear as stacked on top of each other.
+ functools.reduce(lambda x,y: x+y,
+ [sample.records[volume] - min_used[volume]
+ for volume in volumes[0:i]
+ if volume in sample.records],
+ 0))
+ for sample in trace.monitor_disk], \
+ proc_tree, [0, disk_scale])
+
+ curr_y = curr_y + 30 + bar_h
+
+ # render mem usage
+ chart_rect = (off_x, curr_y+30, w, meminfo_bar_h)
+ mem_stats = trace.mem_stats
+ if mem_stats and clip_visible (clip, chart_rect):
+ mem_scale = max(sample.buffers for sample in mem_stats)
+ draw_legend_box(ctx, "Mem cached (scale: %u MiB)" % (float(mem_scale) / 1024), MEM_CACHED_COLOR, off_x, curr_y+20, leg_s)
+ draw_legend_box(ctx, "Used", MEM_USED_COLOR, off_x + 240, curr_y+20, leg_s)
+ draw_legend_box(ctx, "Buffers", MEM_BUFFERS_COLOR, off_x + 360, curr_y+20, leg_s)
+ draw_legend_line(ctx, "Swap (scale: %u MiB)" % max([(sample.swap)/1024 for sample in mem_stats]), \
+ MEM_SWAP_COLOR, off_x + 480, curr_y+20, leg_s)
+ draw_box_ticks(ctx, chart_rect, sec_w)
+ draw_annotations(ctx, proc_tree, trace.times, chart_rect)
+ draw_chart(ctx, MEM_BUFFERS_COLOR, True, chart_rect, \
+ [(sample.time, sample.buffers) for sample in trace.mem_stats], \
+ proc_tree, [0, mem_scale])
+ draw_chart(ctx, MEM_USED_COLOR, True, chart_rect, \
+ [(sample.time, sample.used) for sample in mem_stats], \
+ proc_tree, [0, mem_scale])
+ draw_chart(ctx, MEM_CACHED_COLOR, True, chart_rect, \
+ [(sample.time, sample.cached) for sample in mem_stats], \
+ proc_tree, [0, mem_scale])
+ draw_chart(ctx, MEM_SWAP_COLOR, False, chart_rect, \
+ [(sample.time, float(sample.swap)) for sample in mem_stats], \
+ proc_tree, None)
+
+ curr_y = curr_y + meminfo_bar_h
+
+ return curr_y
def render_processes_chart(ctx, options, trace, curr_y, w, h, sec_w):
- chart_rect = [off_x, curr_y+header_h, w, h - 2 * off_y - header_h - leg_s + proc_h]
-
- draw_legend_box (ctx, "Configure", \
- TASK_COLOR_CONFIGURE, off_x , curr_y + 45, leg_s)
- draw_legend_box (ctx, "Compile", \
- TASK_COLOR_COMPILE, off_x+120, curr_y + 45, leg_s)
- draw_legend_box (ctx, "Install", \
- TASK_COLOR_INSTALL, off_x+240, curr_y + 45, leg_s)
- draw_legend_box (ctx, "Populate Sysroot", \
- TASK_COLOR_SYSROOT, off_x+360, curr_y + 45, leg_s)
- draw_legend_box (ctx, "Package", \
- TASK_COLOR_PACKAGE, off_x+480, curr_y + 45, leg_s)
- draw_legend_box (ctx, "Package Write",
- TASK_COLOR_PACKAGE_WRITE, off_x+600, curr_y + 45, leg_s)
-
- ctx.set_font_size(PROC_TEXT_FONT_SIZE)
-
- draw_box_ticks(ctx, chart_rect, sec_w)
- draw_sec_labels(ctx, options, chart_rect, sec_w, 30)
-
- y = curr_y+header_h
-
- offset = trace.min or min(trace.start.keys())
- for s in sorted(trace.start.keys()):
- for val in sorted(trace.start[s]):
- if not options.app_options.show_all and \
- trace.processes[val][1] - s < options.app_options.mintime:
- continue
- task = val.split(":")[1]
- #print val
- #print trace.processes[val][1]
- #print s
- x = chart_rect[0] + (s - offset) * sec_w
- w = ((trace.processes[val][1] - s) * sec_w)
-
- #print "proc at %s %s %s %s" % (x, y, w, proc_h)
- col = None
- if task == "do_compile":
- col = TASK_COLOR_COMPILE
- elif task == "do_configure":
- col = TASK_COLOR_CONFIGURE
- elif task == "do_install":
- col = TASK_COLOR_INSTALL
- elif task == "do_populate_sysroot":
- col = TASK_COLOR_SYSROOT
- elif task == "do_package":
- col = TASK_COLOR_PACKAGE
- elif task == "do_package_write_rpm" or \
+ chart_rect = [off_x, curr_y+header_h, w, h - curr_y - 1 * off_y - header_h ]
+
+ draw_legend_box (ctx, "Configure", \
+ TASK_COLOR_CONFIGURE, off_x , curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Compile", \
+ TASK_COLOR_COMPILE, off_x+120, curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Install", \
+ TASK_COLOR_INSTALL, off_x+240, curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Populate Sysroot", \
+ TASK_COLOR_SYSROOT, off_x+360, curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Package", \
+ TASK_COLOR_PACKAGE, off_x+480, curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Package Write", \
+ TASK_COLOR_PACKAGE_WRITE, off_x+600, curr_y + 45, leg_s)
+
+ ctx.set_font_size(PROC_TEXT_FONT_SIZE)
+
+ draw_box_ticks(ctx, chart_rect, sec_w)
+ draw_sec_labels(ctx, options, chart_rect, sec_w, 30)
+
+ y = curr_y+header_h
+
+ offset = trace.min or min(trace.start.keys())
+ for start in sorted(trace.start.keys()):
+ for process in sorted(trace.start[start]):
+ if not options.app_options.show_all and \
+ trace.processes[process][1] - start < options.app_options.mintime:
+ continue
+ task = process.split(":")[1]
+
+ #print(process)
+ #print(trace.processes[process][1])
+ #print(s)
+
+ x = chart_rect[0] + (start - offset) * sec_w
+ w = ((trace.processes[process][1] - start) * sec_w)
+
+ #print("proc at %s %s %s %s" % (x, y, w, proc_h))
+ col = None
+ if task == "do_compile":
+ col = TASK_COLOR_COMPILE
+ elif task == "do_configure":
+ col = TASK_COLOR_CONFIGURE
+ elif task == "do_install":
+ col = TASK_COLOR_INSTALL
+ elif task == "do_populate_sysroot":
+ col = TASK_COLOR_SYSROOT
+ elif task == "do_package":
+ col = TASK_COLOR_PACKAGE
+ elif task == "do_package_write_rpm" or \
task == "do_package_write_deb" or \
task == "do_package_write_ipk":
- col = TASK_COLOR_PACKAGE_WRITE
- else:
- col = WHITE
+ col = TASK_COLOR_PACKAGE_WRITE
+ else:
+ col = WHITE
- if col:
- draw_fill_rect(ctx, col, (x, y, w, proc_h))
- draw_rect(ctx, PROC_BORDER_COLOR, (x, y, w, proc_h))
+ if col:
+ draw_fill_rect(ctx, col, (x, y, w, proc_h))
+ draw_rect(ctx, PROC_BORDER_COLOR, (x, y, w, proc_h))
- draw_label_in_box(ctx, PROC_TEXT_COLOR, val, x, y + proc_h - 4, w, proc_h)
- y = y + proc_h
+ draw_label_in_box(ctx, PROC_TEXT_COLOR, process, x, y + proc_h - 4, w, proc_h)
+ y = y + proc_h
- return curr_y
+ return curr_y
#
# Render the chart.
#
def render(ctx, options, xscale, trace):
- (w, h) = extents (options, xscale, trace)
- global OPTIONS
- OPTIONS = options.app_options
+ (w, h) = extents (options, xscale, trace)
+ global OPTIONS
+ OPTIONS = options.app_options
- # x, y, w, h
- clip = ctx.clip_extents()
+ # x, y, w, h
+ clip = ctx.clip_extents()
- sec_w = int (xscale * sec_w_base)
- ctx.set_line_width(1.0)
- ctx.select_font_face(FONT_NAME)
- draw_fill_rect(ctx, WHITE, (0, 0, max(w, MIN_IMG_W), h))
- w -= 2*off_x
- curr_y = off_y;
+ sec_w = int (xscale * sec_w_base)
+ ctx.set_line_width(1.0)
+ ctx.select_font_face(FONT_NAME)
+ draw_fill_rect(ctx, WHITE, (0, 0, max(w, MIN_IMG_W), h))
+ w -= 2*off_x
+ curr_y = off_y;
- if options.charts:
- curr_y = render_charts (ctx, options, clip, trace, curr_y, w, h, sec_w)
+ if options.charts:
+ curr_y = render_charts (ctx, options, clip, trace, curr_y, w, h, sec_w)
- curr_y = render_processes_chart (ctx, options, trace, curr_y, w, h, sec_w)
+ curr_y = render_processes_chart (ctx, options, trace, curr_y, w, h, sec_w)
- return
+ return
- proc_tree = options.proc_tree (trace)
+ proc_tree = options.proc_tree (trace)
- # draw the title and headers
- if proc_tree.idle:
- duration = proc_tree.idle
- else:
- duration = proc_tree.duration
+ # draw the title and headers
+ if proc_tree.idle:
+ duration = proc_tree.idle
+ else:
+ duration = proc_tree.duration
- if not options.kernel_only:
- curr_y = draw_header (ctx, trace.headers, duration)
- else:
- curr_y = off_y;
+ if not options.kernel_only:
+ curr_y = draw_header (ctx, trace.headers, duration)
+ else:
+ curr_y = off_y;
- # draw process boxes
- proc_height = h
- if proc_tree.taskstats and options.cumulative:
- proc_height -= CUML_HEIGHT
+ # draw process boxes
+ proc_height = h
+ if proc_tree.taskstats and options.cumulative:
+ proc_height -= CUML_HEIGHT
- draw_process_bar_chart(ctx, clip, options, proc_tree, trace.times,
- curr_y, w, proc_height, sec_w)
+ draw_process_bar_chart(ctx, clip, options, proc_tree, trace.times,
+ curr_y, w, proc_height, sec_w)
- curr_y = proc_height
- ctx.set_font_size(SIG_FONT_SIZE)
- draw_text(ctx, SIGNATURE, SIG_COLOR, off_x + 5, proc_height - 8)
+ curr_y = proc_height
+ ctx.set_font_size(SIG_FONT_SIZE)
+ draw_text(ctx, SIGNATURE, SIG_COLOR, off_x + 5, proc_height - 8)
- # draw a cumulative CPU-time-per-process graph
- if proc_tree.taskstats and options.cumulative:
- cuml_rect = (off_x, curr_y + off_y, w, CUML_HEIGHT/2 - off_y * 2)
- if clip_visible (clip, cuml_rect):
- draw_cuml_graph(ctx, proc_tree, cuml_rect, duration, sec_w, STAT_TYPE_CPU)
+ # draw a cumulative CPU-time-per-process graph
+ if proc_tree.taskstats and options.cumulative:
+ cuml_rect = (off_x, curr_y + off_y, w, CUML_HEIGHT/2 - off_y * 2)
+ if clip_visible (clip, cuml_rect):
+ draw_cuml_graph(ctx, proc_tree, cuml_rect, duration, sec_w, STAT_TYPE_CPU)
- # draw a cumulative I/O-time-per-process graph
- if proc_tree.taskstats and options.cumulative:
- cuml_rect = (off_x, curr_y + off_y * 100, w, CUML_HEIGHT/2 - off_y * 2)
- if clip_visible (clip, cuml_rect):
- draw_cuml_graph(ctx, proc_tree, cuml_rect, duration, sec_w, STAT_TYPE_IO)
+ # draw a cumulative I/O-time-per-process graph
+ if proc_tree.taskstats and options.cumulative:
+ cuml_rect = (off_x, curr_y + off_y * 100, w, CUML_HEIGHT/2 - off_y * 2)
+ if clip_visible (clip, cuml_rect):
+ draw_cuml_graph(ctx, proc_tree, cuml_rect, duration, sec_w, STAT_TYPE_IO)
def draw_process_bar_chart(ctx, clip, options, proc_tree, times, curr_y, w, h, sec_w):
- header_size = 0
- if not options.kernel_only:
- draw_legend_box (ctx, "Running (%cpu)",
- PROC_COLOR_R, off_x , curr_y + 45, leg_s)
- draw_legend_box (ctx, "Unint.sleep (I/O)",
- PROC_COLOR_D, off_x+120, curr_y + 45, leg_s)
- draw_legend_box (ctx, "Sleeping",
- PROC_COLOR_S, off_x+240, curr_y + 45, leg_s)
- draw_legend_box (ctx, "Zombie",
- PROC_COLOR_Z, off_x+360, curr_y + 45, leg_s)
- header_size = 45
-
- chart_rect = [off_x, curr_y + header_size + 15,
- w, h - 2 * off_y - (curr_y + header_size + 15) + proc_h]
- ctx.set_font_size (PROC_TEXT_FONT_SIZE)
-
- draw_box_ticks (ctx, chart_rect, sec_w)
- if sec_w > 100:
- nsec = 1
- else:
- nsec = 5
- draw_sec_labels (ctx, options, chart_rect, sec_w, nsec)
- draw_annotations (ctx, proc_tree, times, chart_rect)
-
- y = curr_y + 60
- for root in proc_tree.process_tree:
- draw_processes_recursively(ctx, root, proc_tree, y, proc_h, chart_rect, clip)
- y = y + proc_h * proc_tree.num_nodes([root])
+ header_size = 0
+ if not options.kernel_only:
+ draw_legend_box (ctx, "Running (%cpu)",
+ PROC_COLOR_R, off_x , curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Unint.sleep (I/O)",
+ PROC_COLOR_D, off_x+120, curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Sleeping",
+ PROC_COLOR_S, off_x+240, curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Zombie",
+ PROC_COLOR_Z, off_x+360, curr_y + 45, leg_s)
+ header_size = 45
+
+ chart_rect = [off_x, curr_y + header_size + 15,
+ w, h - 2 * off_y - (curr_y + header_size + 15) + proc_h]
+ ctx.set_font_size (PROC_TEXT_FONT_SIZE)
+
+ draw_box_ticks (ctx, chart_rect, sec_w)
+ if sec_w > 100:
+ nsec = 1
+ else:
+ nsec = 5
+ draw_sec_labels (ctx, options, chart_rect, sec_w, nsec)
+ draw_annotations (ctx, proc_tree, times, chart_rect)
+
+ y = curr_y + 60
+ for root in proc_tree.process_tree:
+ draw_processes_recursively(ctx, root, proc_tree, y, proc_h, chart_rect, clip)
+ y = y + proc_h * proc_tree.num_nodes([root])
def draw_header (ctx, headers, duration):
@@ -678,291 +688,291 @@ def draw_header (ctx, headers, duration):
return header_y
def draw_processes_recursively(ctx, proc, proc_tree, y, proc_h, rect, clip) :
- x = rect[0] + ((proc.start_time - proc_tree.start_time) * rect[2] / proc_tree.duration)
- w = ((proc.duration) * rect[2] / proc_tree.duration)
-
- draw_process_activity_colors(ctx, proc, proc_tree, x, y, w, proc_h, rect, clip)
- draw_rect(ctx, PROC_BORDER_COLOR, (x, y, w, proc_h))
- ipid = int(proc.pid)
- if not OPTIONS.show_all:
- cmdString = proc.cmd
- else:
- cmdString = ''
- if (OPTIONS.show_pid or OPTIONS.show_all) and ipid is not 0:
- cmdString = cmdString + " [" + str(ipid // 1000) + "]"
- if OPTIONS.show_all:
- if proc.args:
- cmdString = cmdString + " '" + "' '".join(proc.args) + "'"
- else:
- cmdString = cmdString + " " + proc.exe
-
- draw_label_in_box(ctx, PROC_TEXT_COLOR, cmdString, x, y + proc_h - 4, w, rect[0] + rect[2])
-
- next_y = y + proc_h
- for child in proc.child_list:
- if next_y > clip[1] + clip[3]:
- break
- child_x, child_y = draw_processes_recursively(ctx, child, proc_tree, next_y, proc_h, rect, clip)
- draw_process_connecting_lines(ctx, x, y, child_x, child_y, proc_h)
- next_y = next_y + proc_h * proc_tree.num_nodes([child])
-
- return x, y
+ x = rect[0] + ((proc.start_time - proc_tree.start_time) * rect[2] / proc_tree.duration)
+ w = ((proc.duration) * rect[2] / proc_tree.duration)
+
+ draw_process_activity_colors(ctx, proc, proc_tree, x, y, w, proc_h, rect, clip)
+ draw_rect(ctx, PROC_BORDER_COLOR, (x, y, w, proc_h))
+ ipid = int(proc.pid)
+ if not OPTIONS.show_all:
+ cmdString = proc.cmd
+ else:
+ cmdString = ''
+ if (OPTIONS.show_pid or OPTIONS.show_all) and ipid is not 0:
+ cmdString = cmdString + " [" + str(ipid // 1000) + "]"
+ if OPTIONS.show_all:
+ if proc.args:
+ cmdString = cmdString + " '" + "' '".join(proc.args) + "'"
+ else:
+ cmdString = cmdString + " " + proc.exe
+
+ draw_label_in_box(ctx, PROC_TEXT_COLOR, cmdString, x, y + proc_h - 4, w, rect[0] + rect[2])
+
+ next_y = y + proc_h
+ for child in proc.child_list:
+ if next_y > clip[1] + clip[3]:
+ break
+ child_x, child_y = draw_processes_recursively(ctx, child, proc_tree, next_y, proc_h, rect, clip)
+ draw_process_connecting_lines(ctx, x, y, child_x, child_y, proc_h)
+ next_y = next_y + proc_h * proc_tree.num_nodes([child])
+
+ return x, y
def draw_process_activity_colors(ctx, proc, proc_tree, x, y, w, proc_h, rect, clip):
- if y > clip[1] + clip[3] or y + proc_h + 2 < clip[1]:
- return
+ if y > clip[1] + clip[3] or y + proc_h + 2 < clip[1]:
+ return
- draw_fill_rect(ctx, PROC_COLOR_S, (x, y, w, proc_h))
+ draw_fill_rect(ctx, PROC_COLOR_S, (x, y, w, proc_h))
- last_tx = -1
- for sample in proc.samples :
- tx = rect[0] + round(((sample.time - proc_tree.start_time) * rect[2] / proc_tree.duration))
+ last_tx = -1
+ for sample in proc.samples :
+ tx = rect[0] + round(((sample.time - proc_tree.start_time) * rect[2] / proc_tree.duration))
- # samples are sorted chronologically
- if tx < clip[0]:
- continue
- if tx > clip[0] + clip[2]:
- break
+ # samples are sorted chronologically
+ if tx < clip[0]:
+ continue
+ if tx > clip[0] + clip[2]:
+ break
- tw = round(proc_tree.sample_period * rect[2] / float(proc_tree.duration))
- if last_tx != -1 and abs(last_tx - tx) <= tw:
- tw -= last_tx - tx
- tx = last_tx
- tw = max (tw, 1) # nice to see at least something
+ tw = round(proc_tree.sample_period * rect[2] / float(proc_tree.duration))
+ if last_tx != -1 and abs(last_tx - tx) <= tw:
+ tw -= last_tx - tx
+ tx = last_tx
+ tw = max (tw, 1) # nice to see at least something
- last_tx = tx + tw
- state = get_proc_state( sample.state )
+ last_tx = tx + tw
+ state = get_proc_state( sample.state )
- color = STATE_COLORS[state]
- if state == STATE_RUNNING:
- alpha = min (sample.cpu_sample.user + sample.cpu_sample.sys, 1.0)
- color = tuple(list(PROC_COLOR_R[0:3]) + [alpha])
-# print "render time %d [ tx %d tw %d ], sample state %s color %s alpha %g" % (sample.time, tx, tw, state, color, alpha)
- elif state == STATE_SLEEPING:
- continue
+ color = STATE_COLORS[state]
+ if state == STATE_RUNNING:
+ alpha = min (sample.cpu_sample.user + sample.cpu_sample.sys, 1.0)
+ color = tuple(list(PROC_COLOR_R[0:3]) + [alpha])
+# print "render time %d [ tx %d tw %d ], sample state %s color %s alpha %g" % (sample.time, tx, tw, state, color, alpha)
+ elif state == STATE_SLEEPING:
+ continue
- draw_fill_rect(ctx, color, (tx, y, tw, proc_h))
+ draw_fill_rect(ctx, color, (tx, y, tw, proc_h))
def draw_process_connecting_lines(ctx, px, py, x, y, proc_h):
- ctx.set_source_rgba(*DEP_COLOR)
- ctx.set_dash([2, 2])
- if abs(px - x) < 3:
- dep_off_x = 3
- dep_off_y = proc_h / 4
- ctx.move_to(x, y + proc_h / 2)
- ctx.line_to(px - dep_off_x, y + proc_h / 2)
- ctx.line_to(px - dep_off_x, py - dep_off_y)
- ctx.line_to(px, py - dep_off_y)
- else:
- ctx.move_to(x, y + proc_h / 2)
- ctx.line_to(px, y + proc_h / 2)
- ctx.line_to(px, py)
- ctx.stroke()
- ctx.set_dash([])
+ ctx.set_source_rgba(*DEP_COLOR)
+ ctx.set_dash([2, 2])
+ if abs(px - x) < 3:
+ dep_off_x = 3
+ dep_off_y = proc_h / 4
+ ctx.move_to(x, y + proc_h / 2)
+ ctx.line_to(px - dep_off_x, y + proc_h / 2)
+ ctx.line_to(px - dep_off_x, py - dep_off_y)
+ ctx.line_to(px, py - dep_off_y)
+ else:
+ ctx.move_to(x, y + proc_h / 2)
+ ctx.line_to(px, y + proc_h / 2)
+ ctx.line_to(px, py)
+ ctx.stroke()
+ ctx.set_dash([])
# elide the bootchart collector - it is quite distorting
def elide_bootchart(proc):
- return proc.cmd == 'bootchartd' or proc.cmd == 'bootchart-colle'
+ return proc.cmd == 'bootchartd' or proc.cmd == 'bootchart-colle'
class CumlSample:
- def __init__(self, proc):
- self.cmd = proc.cmd
- self.samples = []
- self.merge_samples (proc)
- self.color = None
-
- def merge_samples(self, proc):
- self.samples.extend (proc.samples)
- self.samples.sort (key = lambda p: p.time)
-
- def next(self):
- global palette_idx
- palette_idx += HSV_STEP
- return palette_idx
-
- def get_color(self):
- if self.color is None:
- i = self.next() % HSV_MAX_MOD
- h = 0.0
- if i is not 0:
- h = (1.0 * i) / HSV_MAX_MOD
- s = 0.5
- v = 1.0
- c = colorsys.hsv_to_rgb (h, s, v)
- self.color = (c[0], c[1], c[2], 1.0)
- return self.color
+ def __init__(self, proc):
+ self.cmd = proc.cmd
+ self.samples = []
+ self.merge_samples (proc)
+ self.color = None
+
+ def merge_samples(self, proc):
+ self.samples.extend (proc.samples)
+ self.samples.sort (key = lambda p: p.time)
+
+ def next(self):
+ global palette_idx
+ palette_idx += HSV_STEP
+ return palette_idx
+
+ def get_color(self):
+ if self.color is None:
+ i = self.next() % HSV_MAX_MOD
+ h = 0.0
+ if i is not 0:
+ h = (1.0 * i) / HSV_MAX_MOD
+ s = 0.5
+ v = 1.0
+ c = colorsys.hsv_to_rgb (h, s, v)
+ self.color = (c[0], c[1], c[2], 1.0)
+ return self.color
def draw_cuml_graph(ctx, proc_tree, chart_bounds, duration, sec_w, stat_type):
- global palette_idx
- palette_idx = 0
-
- time_hash = {}
- total_time = 0.0
- m_proc_list = {}
-
- if stat_type is STAT_TYPE_CPU:
- sample_value = 'cpu'
- else:
- sample_value = 'io'
- for proc in proc_tree.process_list:
- if elide_bootchart(proc):
- continue
-
- for sample in proc.samples:
- total_time += getattr(sample.cpu_sample, sample_value)
- if not sample.time in time_hash:
- time_hash[sample.time] = 1
-
- # merge pids with the same cmd
- if not proc.cmd in m_proc_list:
- m_proc_list[proc.cmd] = CumlSample (proc)
- continue
- s = m_proc_list[proc.cmd]
- s.merge_samples (proc)
-
- # all the sample times
- times = sorted(time_hash)
- if len (times) < 2:
- print("degenerate boot chart")
- return
-
- pix_per_ns = chart_bounds[3] / total_time
-# print "total time: %g pix-per-ns %g" % (total_time, pix_per_ns)
-
- # FIXME: we have duplicates in the process list too [!] - why !?
-
- # Render bottom up, left to right
- below = {}
- for time in times:
- below[time] = chart_bounds[1] + chart_bounds[3]
-
- # same colors each time we render
- random.seed (0)
-
- ctx.set_line_width(1)
-
- legends = []
- labels = []
-
- # render each pid in order
- for cs in m_proc_list.values():
- row = {}
- cuml = 0.0
-
- # print "pid : %s -> %g samples %d" % (proc.cmd, cuml, len (cs.samples))
- for sample in cs.samples:
- cuml += getattr(sample.cpu_sample, sample_value)
- row[sample.time] = cuml
-
- process_total_time = cuml
-
- # hide really tiny processes
- if cuml * pix_per_ns <= 2:
- continue
-
- last_time = times[0]
- y = last_below = below[last_time]
- last_cuml = cuml = 0.0
-
- ctx.set_source_rgba(*cs.get_color())
- for time in times:
- render_seg = False
-
- # did the underlying trend increase ?
- if below[time] != last_below:
- last_below = below[last_time]
- last_cuml = cuml
- render_seg = True
-
- # did we move up a pixel increase ?
- if time in row:
- nc = round (row[time] * pix_per_ns)
- if nc != cuml:
- last_cuml = cuml
- cuml = nc
- render_seg = True
-
-# if last_cuml > cuml:
-# assert fail ... - un-sorted process samples
-
- # draw the trailing rectangle from the last time to
- # before now, at the height of the last segment.
- if render_seg:
- w = math.ceil ((time - last_time) * chart_bounds[2] / proc_tree.duration) + 1
- x = chart_bounds[0] + round((last_time - proc_tree.start_time) * chart_bounds[2] / proc_tree.duration)
- ctx.rectangle (x, below[last_time] - last_cuml, w, last_cuml)
- ctx.fill()
-# ctx.stroke()
- last_time = time
- y = below [time] - cuml
-
- row[time] = y
-
- # render the last segment
- x = chart_bounds[0] + round((last_time - proc_tree.start_time) * chart_bounds[2] / proc_tree.duration)
- y = below[last_time] - cuml
- ctx.rectangle (x, y, chart_bounds[2] - x, cuml)
- ctx.fill()
-# ctx.stroke()
-
- # render legend if it will fit
- if cuml > 8:
- label = cs.cmd
- extnts = ctx.text_extents(label)
- label_w = extnts[2]
- label_h = extnts[3]
-# print "Text extents %g by %g" % (label_w, label_h)
- labels.append((label,
- chart_bounds[0] + chart_bounds[2] - label_w - off_x * 2,
- y + (cuml + label_h) / 2))
- if cs in legends:
- print("ARGH - duplicate process in list !")
-
- legends.append ((cs, process_total_time))
-
- below = row
-
- # render grid-lines over the top
- draw_box_ticks(ctx, chart_bounds, sec_w)
-
- # render labels
- for l in labels:
- draw_text(ctx, l[0], TEXT_COLOR, l[1], l[2])
-
- # Render legends
- font_height = 20
- label_width = 300
- LEGENDS_PER_COL = 15
- LEGENDS_TOTAL = 45
- ctx.set_font_size (TITLE_FONT_SIZE)
- dur_secs = duration / 100
- cpu_secs = total_time / 1000000000
-
- # misleading - with multiple CPUs ...
-# idle = ((dur_secs - cpu_secs) / dur_secs) * 100.0
- if stat_type is STAT_TYPE_CPU:
- label = "Cumulative CPU usage, by process; total CPU: " \
- " %.5g(s) time: %.3g(s)" % (cpu_secs, dur_secs)
- else:
- label = "Cumulative I/O usage, by process; total I/O: " \
- " %.5g(s) time: %.3g(s)" % (cpu_secs, dur_secs)
-
- draw_text(ctx, label, TEXT_COLOR, chart_bounds[0] + off_x,
- chart_bounds[1] + font_height)
-
- i = 0
- legends = sorted(legends, key=itemgetter(1), reverse=True)
- ctx.set_font_size(TEXT_FONT_SIZE)
- for t in legends:
- cs = t[0]
- time = t[1]
- x = chart_bounds[0] + off_x + int (i/LEGENDS_PER_COL) * label_width
- y = chart_bounds[1] + font_height * ((i % LEGENDS_PER_COL) + 2)
- str = "%s - %.0f(ms) (%2.2f%%)" % (cs.cmd, time/1000000, (time/total_time) * 100.0)
- draw_legend_box(ctx, str, cs.color, x, y, leg_s)
- i = i + 1
- if i >= LEGENDS_TOTAL:
- break
+ global palette_idx
+ palette_idx = 0
+
+ time_hash = {}
+ total_time = 0.0
+ m_proc_list = {}
+
+ if stat_type is STAT_TYPE_CPU:
+ sample_value = 'cpu'
+ else:
+ sample_value = 'io'
+ for proc in proc_tree.process_list:
+ if elide_bootchart(proc):
+ continue
+
+ for sample in proc.samples:
+ total_time += getattr(sample.cpu_sample, sample_value)
+ if not sample.time in time_hash:
+ time_hash[sample.time] = 1
+
+ # merge pids with the same cmd
+ if not proc.cmd in m_proc_list:
+ m_proc_list[proc.cmd] = CumlSample (proc)
+ continue
+ s = m_proc_list[proc.cmd]
+ s.merge_samples (proc)
+
+ # all the sample times
+ times = sorted(time_hash)
+ if len (times) < 2:
+ print("degenerate boot chart")
+ return
+
+ pix_per_ns = chart_bounds[3] / total_time
+# print "total time: %g pix-per-ns %g" % (total_time, pix_per_ns)
+
+ # FIXME: we have duplicates in the process list too [!] - why !?
+
+ # Render bottom up, left to right
+ below = {}
+ for time in times:
+ below[time] = chart_bounds[1] + chart_bounds[3]
+
+ # same colors each time we render
+ random.seed (0)
+
+ ctx.set_line_width(1)
+
+ legends = []
+ labels = []
+
+ # render each pid in order
+ for cs in m_proc_list.values():
+ row = {}
+ cuml = 0.0
+
+ # print "pid : %s -> %g samples %d" % (proc.cmd, cuml, len (cs.samples))
+ for sample in cs.samples:
+ cuml += getattr(sample.cpu_sample, sample_value)
+ row[sample.time] = cuml
+
+ process_total_time = cuml
+
+ # hide really tiny processes
+ if cuml * pix_per_ns <= 2:
+ continue
+
+ last_time = times[0]
+ y = last_below = below[last_time]
+ last_cuml = cuml = 0.0
+
+ ctx.set_source_rgba(*cs.get_color())
+ for time in times:
+ render_seg = False
+
+ # did the underlying trend increase ?
+ if below[time] != last_below:
+ last_below = below[last_time]
+ last_cuml = cuml
+ render_seg = True
+
+ # did we move up a pixel increase ?
+ if time in row:
+ nc = round (row[time] * pix_per_ns)
+ if nc != cuml:
+ last_cuml = cuml
+ cuml = nc
+ render_seg = True
+
+# if last_cuml > cuml:
+# assert fail ... - un-sorted process samples
+
+ # draw the trailing rectangle from the last time to
+ # before now, at the height of the last segment.
+ if render_seg:
+ w = math.ceil ((time - last_time) * chart_bounds[2] / proc_tree.duration) + 1
+ x = chart_bounds[0] + round((last_time - proc_tree.start_time) * chart_bounds[2] / proc_tree.duration)
+ ctx.rectangle (x, below[last_time] - last_cuml, w, last_cuml)
+ ctx.fill()
+# ctx.stroke()
+ last_time = time
+ y = below [time] - cuml
+
+ row[time] = y
+
+ # render the last segment
+ x = chart_bounds[0] + round((last_time - proc_tree.start_time) * chart_bounds[2] / proc_tree.duration)
+ y = below[last_time] - cuml
+ ctx.rectangle (x, y, chart_bounds[2] - x, cuml)
+ ctx.fill()
+# ctx.stroke()
+
+ # render legend if it will fit
+ if cuml > 8:
+ label = cs.cmd
+ extnts = ctx.text_extents(label)
+ label_w = extnts[2]
+ label_h = extnts[3]
+# print "Text extents %g by %g" % (label_w, label_h)
+ labels.append((label,
+ chart_bounds[0] + chart_bounds[2] - label_w - off_x * 2,
+ y + (cuml + label_h) / 2))
+ if cs in legends:
+ print("ARGH - duplicate process in list !")
+
+ legends.append ((cs, process_total_time))
+
+ below = row
+
+ # render grid-lines over the top
+ draw_box_ticks(ctx, chart_bounds, sec_w)
+
+ # render labels
+ for l in labels:
+ draw_text(ctx, l[0], TEXT_COLOR, l[1], l[2])
+
+ # Render legends
+ font_height = 20
+ label_width = 300
+ LEGENDS_PER_COL = 15
+ LEGENDS_TOTAL = 45
+ ctx.set_font_size (TITLE_FONT_SIZE)
+ dur_secs = duration / 100
+ cpu_secs = total_time / 1000000000
+
+ # misleading - with multiple CPUs ...
+# idle = ((dur_secs - cpu_secs) / dur_secs) * 100.0
+ if stat_type is STAT_TYPE_CPU:
+ label = "Cumulative CPU usage, by process; total CPU: " \
+ " %.5g(s) time: %.3g(s)" % (cpu_secs, dur_secs)
+ else:
+ label = "Cumulative I/O usage, by process; total I/O: " \
+ " %.5g(s) time: %.3g(s)" % (cpu_secs, dur_secs)
+
+ draw_text(ctx, label, TEXT_COLOR, chart_bounds[0] + off_x,
+ chart_bounds[1] + font_height)
+
+ i = 0
+ legends = sorted(legends, key=itemgetter(1), reverse=True)
+ ctx.set_font_size(TEXT_FONT_SIZE)
+ for t in legends:
+ cs = t[0]
+ time = t[1]
+ x = chart_bounds[0] + off_x + int (i/LEGENDS_PER_COL) * label_width
+ y = chart_bounds[1] + font_height * ((i % LEGENDS_PER_COL) + 2)
+ str = "%s - %.0f(ms) (%2.2f%%)" % (cs.cmd, time/1000000, (time/total_time) * 100.0)
+ draw_legend_box(ctx, str, cs.color, x, y, leg_s)
+ i = i + 1
+ if i >= LEGENDS_TOTAL:
+ break
diff --git a/scripts/pybootchartgui/pybootchartgui/gui.py b/scripts/pybootchartgui/pybootchartgui/gui.py
index 7fedd232df..e1fe915563 100644
--- a/scripts/pybootchartgui/pybootchartgui/gui.py
+++ b/scripts/pybootchartgui/pybootchartgui/gui.py
@@ -13,64 +13,83 @@
# You should have received a copy of the GNU General Public License
# along with pybootchartgui. If not, see <http://www.gnu.org/licenses/>.
-import gobject
-import gtk
-import gtk.gdk
-import gtk.keysyms
+import gi
+gi.require_version('Gtk', '3.0')
+from gi.repository import Gtk as gtk
+from gi.repository import Gtk
+from gi.repository import Gdk
+from gi.repository import GObject as gobject
+from gi.repository import GObject
+
from . import draw
from .draw import RenderOptions
-class PyBootchartWidget(gtk.DrawingArea):
+class PyBootchartWidget(gtk.DrawingArea, gtk.Scrollable):
__gsignals__ = {
- 'expose-event': 'override',
- 'clicked' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_STRING, gtk.gdk.Event)),
+ 'clicked' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_STRING, Gdk.Event)),
'position-changed' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_INT, gobject.TYPE_INT)),
'set-scroll-adjustments' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gtk.Adjustment, gtk.Adjustment))
}
+ hadjustment = GObject.property(type=Gtk.Adjustment,
+ default=Gtk.Adjustment(),
+ flags=GObject.PARAM_READWRITE)
+ hscroll_policy = GObject.property(type=Gtk.ScrollablePolicy,
+ default=Gtk.ScrollablePolicy.MINIMUM,
+ flags=GObject.PARAM_READWRITE)
+ vadjustment = GObject.property(type=Gtk.Adjustment,
+ default=Gtk.Adjustment(),
+ flags=GObject.PARAM_READWRITE)
+ vscroll_policy = GObject.property(type=Gtk.ScrollablePolicy,
+ default=Gtk.ScrollablePolicy.MINIMUM,
+ flags=GObject.PARAM_READWRITE)
+
def __init__(self, trace, options, xscale):
gtk.DrawingArea.__init__(self)
self.trace = trace
self.options = options
- self.set_flags(gtk.CAN_FOCUS)
+ self.set_can_focus(True)
- self.add_events(gtk.gdk.BUTTON_PRESS_MASK | gtk.gdk.BUTTON_RELEASE_MASK)
+ self.add_events(Gdk.EventMask.BUTTON_PRESS_MASK | Gdk.EventMask.BUTTON_RELEASE_MASK)
self.connect("button-press-event", self.on_area_button_press)
self.connect("button-release-event", self.on_area_button_release)
- self.add_events(gtk.gdk.POINTER_MOTION_MASK | gtk.gdk.POINTER_MOTION_HINT_MASK | gtk.gdk.BUTTON_RELEASE_MASK)
+ self.add_events(Gdk.EventMask.POINTER_MOTION_MASK | Gdk.EventMask.POINTER_MOTION_HINT_MASK | Gdk.EventMask.BUTTON_RELEASE_MASK)
self.connect("motion-notify-event", self.on_area_motion_notify)
self.connect("scroll-event", self.on_area_scroll_event)
self.connect('key-press-event', self.on_key_press_event)
- self.connect('set-scroll-adjustments', self.on_set_scroll_adjustments)
self.connect("size-allocate", self.on_allocation_size_changed)
self.connect("position-changed", self.on_position_changed)
+ self.connect("draw", self.on_draw)
+
self.zoom_ratio = 1.0
self.xscale = xscale
self.x, self.y = 0.0, 0.0
self.chart_width, self.chart_height = draw.extents(self.options, self.xscale, self.trace)
- self.hadj = None
- self.vadj = None
- self.hadj_changed_signal_id = None
- self.vadj_changed_signal_id = None
-
- def do_expose_event(self, event):
- cr = self.window.cairo_create()
-
- # set a clip region for the expose event
- cr.rectangle(
- event.area.x, event.area.y,
- event.area.width, event.area.height
- )
- cr.clip()
- self.draw(cr, self.get_allocation())
- return False
-
- def draw(self, cr, rect):
+ self.our_width, self.our_height = self.chart_width, self.chart_height
+
+ self.hadj = gtk.Adjustment(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
+ self.vadj = gtk.Adjustment(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
+ self.vadj.connect('value-changed', self.on_adjustments_changed)
+ self.hadj.connect('value-changed', self.on_adjustments_changed)
+
+ def bound_vals(self):
+ self.x = max(0, self.x)
+ self.y = max(0, self.y)
+ self.x = min(self.chart_width - self.our_width, self.x)
+ self.y = min(self.chart_height - self.our_height, self.y)
+
+ def on_draw(self, darea, cr):
+ # set a clip region
+ #cr.rectangle(
+ # self.x, self.y,
+ # self.chart_width, self.chart_height
+ #)
+ #cr.clip()
cr.set_source_rgba(1.0, 1.0, 1.0, 1.0)
cr.paint()
cr.scale(self.zoom_ratio, self.zoom_ratio)
@@ -84,7 +103,7 @@ class PyBootchartWidget(gtk.DrawingArea):
def zoom_image (self, zoom_ratio):
self.zoom_ratio = zoom_ratio
- self._set_scroll_adjustments (self.hadj, self.vadj)
+ self._set_scroll_adjustments()
self.queue_draw()
def zoom_to_rect (self, rect):
@@ -122,126 +141,101 @@ class PyBootchartWidget(gtk.DrawingArea):
def show_toggled(self, button):
self.options.app_options.show_all = button.get_property ('active')
self.chart_width, self.chart_height = draw.extents(self.options, self.xscale, self.trace)
- self._set_scroll_adjustments(self.hadj, self.vadj)
+ self._set_scroll_adjustments()
self.queue_draw()
POS_INCREMENT = 100
def on_key_press_event(self, widget, event):
- if event.keyval == gtk.keysyms.Left:
+ if event.keyval == Gdk.keyval_from_name("Left"):
self.x -= self.POS_INCREMENT/self.zoom_ratio
- elif event.keyval == gtk.keysyms.Right:
+ elif event.keyval == Gdk.keyval_from_name("Right"):
self.x += self.POS_INCREMENT/self.zoom_ratio
- elif event.keyval == gtk.keysyms.Up:
+ elif event.keyval == Gdk.keyval_from_name("Up"):
self.y -= self.POS_INCREMENT/self.zoom_ratio
- elif event.keyval == gtk.keysyms.Down:
+ elif event.keyval == Gdk.keyval_from_name("Down"):
self.y += self.POS_INCREMENT/self.zoom_ratio
else:
return False
+ self.bound_vals()
self.queue_draw()
self.position_changed()
return True
def on_area_button_press(self, area, event):
if event.button == 2 or event.button == 1:
- area.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.FLEUR))
+ window = self.get_window()
+ window.set_cursor(Gdk.Cursor(Gdk.CursorType.FLEUR))
self.prevmousex = event.x
self.prevmousey = event.y
- if event.type not in (gtk.gdk.BUTTON_PRESS, gtk.gdk.BUTTON_RELEASE):
+ if event.type not in (Gdk.EventType.BUTTON_PRESS, Gdk.EventType.BUTTON_RELEASE):
return False
return False
def on_area_button_release(self, area, event):
if event.button == 2 or event.button == 1:
- area.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.ARROW))
+ window = self.get_window()
+ window.set_cursor(Gdk.Cursor(Gdk.CursorType.ARROW))
self.prevmousex = None
self.prevmousey = None
return True
return False
def on_area_scroll_event(self, area, event):
- if event.state & gtk.gdk.CONTROL_MASK:
- if event.direction == gtk.gdk.SCROLL_UP:
+ if event.state & Gdk.CONTROL_MASK:
+ if event.direction == Gdk.SCROLL_UP:
self.zoom_image(self.zoom_ratio * self.ZOOM_INCREMENT)
return True
- if event.direction == gtk.gdk.SCROLL_DOWN:
+ if event.direction == Gdk.SCROLL_DOWN:
self.zoom_image(self.zoom_ratio / self.ZOOM_INCREMENT)
return True
return False
def on_area_motion_notify(self, area, event):
state = event.state
- if state & gtk.gdk.BUTTON2_MASK or state & gtk.gdk.BUTTON1_MASK:
+ if state & Gdk.ModifierType.BUTTON2_MASK or state & Gdk.ModifierType.BUTTON1_MASK:
x, y = int(event.x), int(event.y)
# pan the image
self.x += (self.prevmousex - x)/self.zoom_ratio
self.y += (self.prevmousey - y)/self.zoom_ratio
+ self.bound_vals()
self.queue_draw()
self.prevmousex = x
self.prevmousey = y
self.position_changed()
return True
- def on_set_scroll_adjustments(self, area, hadj, vadj):
- self._set_scroll_adjustments (hadj, vadj)
-
def on_allocation_size_changed(self, widget, allocation):
self.hadj.page_size = allocation.width
self.hadj.page_increment = allocation.width * 0.9
self.vadj.page_size = allocation.height
self.vadj.page_increment = allocation.height * 0.9
+ self.our_width = allocation.width
+ if self.chart_width < self.our_width:
+ self.our_width = self.chart_width
+ self.our_height = allocation.height
+ if self.chart_height < self.our_height:
+ self.our_height = self.chart_height
+ self._set_scroll_adjustments()
def _set_adj_upper(self, adj, upper):
- changed = False
- value_changed = False
-
- if adj.upper != upper:
- adj.upper = upper
- changed = True
-
- max_value = max(0.0, upper - adj.page_size)
- if adj.value > max_value:
- adj.value = max_value
- value_changed = True
-
- if changed:
- adj.changed()
- if value_changed:
- adj.value_changed()
-
- def _set_scroll_adjustments(self, hadj, vadj):
- if hadj == None:
- hadj = gtk.Adjustment(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
- if vadj == None:
- vadj = gtk.Adjustment(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
-
- if self.hadj_changed_signal_id != None and \
- self.hadj != None and hadj != self.hadj:
- self.hadj.disconnect (self.hadj_changed_signal_id)
- if self.vadj_changed_signal_id != None and \
- self.vadj != None and vadj != self.vadj:
- self.vadj.disconnect (self.vadj_changed_signal_id)
-
- if hadj != None:
- self.hadj = hadj
- self._set_adj_upper (self.hadj, self.zoom_ratio * self.chart_width)
- self.hadj_changed_signal_id = self.hadj.connect('value-changed', self.on_adjustments_changed)
-
- if vadj != None:
- self.vadj = vadj
- self._set_adj_upper (self.vadj, self.zoom_ratio * self.chart_height)
- self.vadj_changed_signal_id = self.vadj.connect('value-changed', self.on_adjustments_changed)
+
+ if adj.get_upper() != upper:
+ adj.set_upper(upper)
+
+ def _set_scroll_adjustments(self):
+ self._set_adj_upper (self.hadj, self.zoom_ratio * (self.chart_width - self.our_width))
+ self._set_adj_upper (self.vadj, self.zoom_ratio * (self.chart_height - self.our_height))
def on_adjustments_changed(self, adj):
- self.x = self.hadj.value / self.zoom_ratio
- self.y = self.vadj.value / self.zoom_ratio
+ self.x = self.hadj.get_value() / self.zoom_ratio
+ self.y = self.vadj.get_value() / self.zoom_ratio
self.queue_draw()
def on_position_changed(self, widget, x, y):
- self.hadj.value = x * self.zoom_ratio
- self.vadj.value = y * self.zoom_ratio
-
-PyBootchartWidget.set_set_scroll_adjustments_signal('set-scroll-adjustments')
+ self.hadj.set_value(x * self.zoom_ratio)
+ #self.hadj.value_changed()
+ self.vadj.set_value(y * self.zoom_ratio)
class PyBootchartShell(gtk.VBox):
ui = '''
@@ -260,7 +254,7 @@ class PyBootchartShell(gtk.VBox):
def __init__(self, window, trace, options, xscale):
gtk.VBox.__init__(self)
- self.widget = PyBootchartWidget(trace, options, xscale)
+ self.widget2 = PyBootchartWidget(trace, options, xscale)
# Create a UIManager instance
uimanager = self.uimanager = gtk.UIManager()
@@ -275,12 +269,12 @@ class PyBootchartShell(gtk.VBox):
# Create actions
actiongroup.add_actions((
- ('Expand', gtk.STOCK_ADD, None, None, None, self.widget.on_expand),
- ('Contract', gtk.STOCK_REMOVE, None, None, None, self.widget.on_contract),
- ('ZoomIn', gtk.STOCK_ZOOM_IN, None, None, None, self.widget.on_zoom_in),
- ('ZoomOut', gtk.STOCK_ZOOM_OUT, None, None, None, self.widget.on_zoom_out),
- ('ZoomFit', gtk.STOCK_ZOOM_FIT, 'Fit Width', None, None, self.widget.on_zoom_fit),
- ('Zoom100', gtk.STOCK_ZOOM_100, None, None, None, self.widget.on_zoom_100),
+ ('Expand', gtk.STOCK_ADD, None, None, None, self.widget2.on_expand),
+ ('Contract', gtk.STOCK_REMOVE, None, None, None, self.widget2.on_contract),
+ ('ZoomIn', gtk.STOCK_ZOOM_IN, None, None, None, self.widget2.on_zoom_in),
+ ('ZoomOut', gtk.STOCK_ZOOM_OUT, None, None, None, self.widget2.on_zoom_out),
+ ('ZoomFit', gtk.STOCK_ZOOM_FIT, 'Fit Width', None, None, self.widget2.on_zoom_fit),
+ ('Zoom100', gtk.STOCK_ZOOM_100, None, None, None, self.widget2.on_zoom_100),
))
# Add the actiongroup to the uimanager
@@ -290,29 +284,33 @@ class PyBootchartShell(gtk.VBox):
uimanager.add_ui_from_string(self.ui)
# Scrolled window
- scrolled = gtk.ScrolledWindow()
- scrolled.add(self.widget)
+ scrolled = gtk.ScrolledWindow(self.widget2.hadj, self.widget2.vadj)
+ scrolled.add(self.widget2)
+
+ #scrolled.set_hadjustment()
+ #scrolled.set_vadjustment(self.widget2.vadj)
+ scrolled.set_policy(gtk.PolicyType.ALWAYS, gtk.PolicyType.ALWAYS)
# toolbar / h-box
hbox = gtk.HBox(False, 8)
# Create a Toolbar
toolbar = uimanager.get_widget('/ToolBar')
- hbox.pack_start(toolbar, True, True)
+ hbox.pack_start(toolbar, True, True, 0)
if not options.kernel_only:
# Misc. options
button = gtk.CheckButton("Show more")
- button.connect ('toggled', self.widget.show_toggled)
+ button.connect ('toggled', self.widget2.show_toggled)
button.set_active(options.app_options.show_all)
- hbox.pack_start (button, False, True)
+ hbox.pack_start (button, False, True, 0)
- self.pack_start(hbox, False)
- self.pack_start(scrolled)
+ self.pack_start(hbox, False, True, 0)
+ self.pack_start(scrolled, True, True, 0)
self.show_all()
def grab_focus(self, window):
- window.set_focus(self.widget)
+ window.set_focus(self.widget2)
class PyBootchartWindow(gtk.Window):
diff --git a/scripts/pybootchartgui/pybootchartgui/parsing.py b/scripts/pybootchartgui/pybootchartgui/parsing.py
index bcfb2da569..b42dac6b88 100644
--- a/scripts/pybootchartgui/pybootchartgui/parsing.py
+++ b/scripts/pybootchartgui/pybootchartgui/parsing.py
@@ -18,7 +18,7 @@ import string
import re
import sys
import tarfile
-from time import clock
+import time
from collections import defaultdict
from functools import reduce
@@ -267,7 +267,7 @@ def _parse_headers(file):
value = line.strip()
headers[last] += value
return headers, last
- return reduce(parse, file.read().decode('utf-8').split('\n'), (defaultdict(str),''))[0]
+ return reduce(parse, file.read().split('\n'), (defaultdict(str),''))[0]
def _parse_timed_blocks(file):
"""Parses (ie., splits) a file into so-called timed-blocks. A
@@ -281,7 +281,7 @@ def _parse_timed_blocks(file):
return (int(lines[0]), lines[1:])
except ValueError:
raise ParseError("expected a timed-block, but timestamp '%s' is not an integer" % lines[0])
- blocks = file.read().decode('utf-8').split('\n\n')
+ blocks = file.read().split('\n\n')
return [parse(block) for block in blocks if block.strip() and not block.endswith(' not running\n')]
def _parse_proc_ps_log(writer, file):
@@ -577,7 +577,7 @@ def _parse_dmesg(writer, file):
processMap['k-boot'] = kernel
base_ts = False
max_ts = 0
- for line in file.read().decode('utf-8').split('\n'):
+ for line in file.read().split('\n'):
t = timestamp_re.match (line)
if t is None:
# print "duff timestamp " + line
@@ -665,7 +665,7 @@ def _parse_pacct(writer, file):
def _parse_paternity_log(writer, file):
parent_map = {}
parent_map[0] = 0
- for line in file.read().decode('utf-8').split('\n'):
+ for line in file.read().split('\n'):
if not line:
continue
elems = line.split(' ') # <Child> <Parent>
@@ -678,7 +678,7 @@ def _parse_paternity_log(writer, file):
def _parse_cmdline_log(writer, file):
cmdLines = {}
- for block in file.read().decode('utf-8').split('\n\n'):
+ for block in file.read().split('\n\n'):
lines = block.split('\n')
if len (lines) >= 3:
# print "Lines '%s'" % (lines[0])
@@ -723,7 +723,7 @@ def get_num_cpus(headers):
def _do_parse(writer, state, filename, file):
writer.info("parsing '%s'" % filename)
- t1 = clock()
+ t1 = time.process_time()
name = os.path.basename(filename)
if name == "proc_diskstats.log":
state.disk_stats = _parse_proc_disk_stat_log(file)
@@ -743,7 +743,7 @@ def _do_parse(writer, state, filename, file):
state.monitor_disk = _parse_monitor_disk_log(file)
elif not filename.endswith('.log'):
_parse_bitbake_buildstats(writer, state, filename, file)
- t2 = clock()
+ t2 = time.process_time()
writer.info(" %s seconds" % str(t2-t1))
return state
@@ -751,7 +751,7 @@ def parse_file(writer, state, filename):
if state.filename is None:
state.filename = filename
basename = os.path.basename(filename)
- with open(filename, "rb") as file:
+ with open(filename, "r") as file:
return _do_parse(writer, state, filename, file)
def parse_paths(writer, state, paths):
diff --git a/scripts/pythondeps b/scripts/pythondeps
index 3e13a587ee..be21dd84eb 100755
--- a/scripts/pythondeps
+++ b/scripts/pythondeps
@@ -1,5 +1,7 @@
#!/usr/bin/env python3
#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Determine dependencies of python scripts or available python modules in a search path.
#
# Given the -d argument and a filename/filenames, returns the modules imported by those files.
diff --git a/scripts/recipetool b/scripts/recipetool
index 3a3c9b7445..e2d585d2c5 100755
--- a/scripts/recipetool
+++ b/scripts/recipetool
@@ -4,18 +4,8 @@
#
# Copyright (C) 2014 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os
diff --git a/scripts/relocate_sdk.py b/scripts/relocate_sdk.py
index c752fa2c61..8c0fdb986a 100755
--- a/scripts/relocate_sdk.py
+++ b/scripts/relocate_sdk.py
@@ -2,18 +2,7 @@
#
# Copyright (c) 2012 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This script is called by the SDK installer script. It replaces the dynamic
diff --git a/scripts/resulttool b/scripts/resulttool
new file mode 100755
index 0000000000..fc282bda6c
--- /dev/null
+++ b/scripts/resulttool
@@ -0,0 +1,78 @@
+#!/usr/bin/env python3
+#
+# test results tool - tool for manipulating OEQA test result json files
+# (merge results, summarise results, regression analysis, generate manual test results file)
+#
+# To look for help information.
+# $ resulttool
+#
+# To store test results from oeqa automated tests, execute the below
+# $ resulttool store <source_dir> <git_branch>
+#
+# To merge test results, execute the below
+# $ resulttool merge <base_result_file> <target_result_file>
+#
+# To report test report, execute the below
+# $ resulttool report <source_dir>
+#
+# To perform regression file analysis, execute the below
+# $ resulttool regression-file <base_result_file> <target_result_file>
+#
+# To execute manual test cases, execute the below
+# $ resulttool manualexecution <manualjsonfile>
+#
+# By default testresults.json for manualexecution store in <build>/tmp/log/manual/
+#
+# Copyright (c) 2019, Intel Corporation.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import os
+import sys
+import argparse
+import logging
+script_path = os.path.dirname(os.path.realpath(__file__))
+lib_path = script_path + '/lib'
+sys.path = sys.path + [lib_path]
+import argparse_oe
+import scriptutils
+import resulttool.merge
+import resulttool.store
+import resulttool.regression
+import resulttool.report
+import resulttool.manualexecution
+import resulttool.log
+logger = scriptutils.logger_create('resulttool')
+
+def main():
+ parser = argparse_oe.ArgumentParser(description="OEQA test result manipulation tool.",
+ epilog="Use %(prog)s <subcommand> --help to get help on a specific command")
+ parser.add_argument('-d', '--debug', help='enable debug output', action='store_true')
+ parser.add_argument('-q', '--quiet', help='print only errors', action='store_true')
+ subparsers = parser.add_subparsers(dest="subparser_name", title='subcommands', metavar='<subcommand>')
+ subparsers.required = True
+ subparsers.add_subparser_group('manualexecution', 'manual testcases', 300)
+ resulttool.manualexecution.register_commands(subparsers)
+ subparsers.add_subparser_group('setup', 'setup', 200)
+ resulttool.merge.register_commands(subparsers)
+ resulttool.store.register_commands(subparsers)
+ subparsers.add_subparser_group('analysis', 'analysis', 100)
+ resulttool.regression.register_commands(subparsers)
+ resulttool.report.register_commands(subparsers)
+ resulttool.log.register_commands(subparsers)
+
+ args = parser.parse_args()
+ if args.debug:
+ logger.setLevel(logging.DEBUG)
+ elif args.quiet:
+ logger.setLevel(logging.ERROR)
+
+ try:
+ ret = args.func(args, logger)
+ except argparse_oe.ArgumentUsageError as ae:
+ parser.error_subcommand(ae.message, ae.subcommand)
+ return ret
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/scripts/rpm2cpio.sh b/scripts/rpm2cpio.sh
index cf23472ba9..7cd771bbe7 100755
--- a/scripts/rpm2cpio.sh
+++ b/scripts/rpm2cpio.sh
@@ -1,5 +1,4 @@
#!/bin/sh -efu
-
# This file comes from rpm 4.x distribution
fatal() {
@@ -23,7 +22,8 @@ calcsize() {
i=0
while [ $i -lt 8 ]; do
- b="$(_dd $(($offset + $i)) bs=1 count=1)"
+ b=$(_dd $(($offset + $i)) bs=1 count=1; echo X)
+ b=${b%X}
[ -z "$b" ] &&
b="0" ||
b="$(exec printf '%u\n' "'$b")"
diff --git a/scripts/runqemu b/scripts/runqemu
index c4a0ca811d..2f77a7bd0f 100755
--- a/scripts/runqemu
+++ b/scripts/runqemu
@@ -5,18 +5,8 @@
# Copyright (C) 2006-2011 Linux Foundation
# Copyright (c) 2016 Wind River Systems, Inc.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import sys
@@ -28,6 +18,7 @@ import shutil
import glob
import configparser
import signal
+import time
class RunQemuError(Exception):
"""Custom exception to raise on known errors."""
@@ -69,27 +60,35 @@ def print_usage():
Usage: you can run this script with any valid combination
of the following environment variables (in any order):
KERNEL - the kernel image file to use
+ BIOS - the bios image file to use
ROOTFS - the rootfs image file or nfsroot directory to use
DEVICE_TREE - the device tree blob to use
MACHINE - the machine name (optional, autodetected from KERNEL filename if unspecified)
Simplified QEMU command-line options can be passed with:
nographic - disable video console
+ novga - Disable VGA emulation completely
+ sdl - choose the SDL UI frontend
+ gtk - choose the Gtk UI frontend
+ gl - enable virgl-based GL acceleration (also needs gtk or sdl options)
+ gl-es - enable virgl-based GL acceleration, using OpenGL ES (also needs gtk or sdl options)
+ egl-headless - enable headless EGL output; use vnc (via publicvnc option) or spice to see it
+ (hint: if /dev/dri/renderD* is absent due to lack of suitable GPU, 'modprobe vgem' will create
+ one suitable for mesa llvmpipe software renderer)
serial - enable a serial console on /dev/ttyS0
serialstdio - enable a serial console on the console (regardless of graphics mode)
- slirp - enable user networking, no root privileges is required
+ slirp - enable user networking, no root privilege is required
+ snapshot - don't write changes back to images
kvm - enable KVM when running x86/x86_64 (VT-capable CPU required)
kvm-vhost - enable KVM with vhost when running x86/x86_64 (VT-capable CPU required)
publicvnc - enable a VNC server open to all hosts
audio - enable audio
[*/]ovmf* - OVMF firmware file or base name for booting with UEFI
tcpserial=<port> - specify tcp serial port number
- biosdir=<dir> - specify custom bios dir
- biosfilename=<filename> - specify bios filename
qemuparams=<xyz> - specify custom parameters to QEMU
bootparams=<xyz> - specify custom kernel parameters during boot
help, -h, --help: print this text
-d, --debug: Enable debug output
- -q, --quite: Hide most output except error messages
+ -q, --quiet: Hide most output except error messages
Examples:
runqemu
@@ -99,11 +98,13 @@ Examples:
runqemu qemux86-64 core-image-sato ext4
runqemu qemux86-64 wic-image-minimal wic
runqemu path/to/bzImage-qemux86.bin path/to/nfsrootdir/ serial
- runqemu qemux86 iso/hddimg/wic.vmdk/wic.qcow2/wic.vdi/ramfs/cpio.gz...
+ runqemu qemux86 iso/hddimg/wic.vmdk/wic.vhd/wic.vhdx/wic.qcow2/wic.vdi/ramfs/cpio.gz...
runqemu qemux86 qemuparams="-m 256"
runqemu qemux86 bootparams="psplash=false"
runqemu path/to/<image>-<machine>.wic
runqemu path/to/<image>-<machine>.wic.vmdk
+ runqemu path/to/<image>-<machine>.wic.vhdx
+ runqemu path/to/<image>-<machine>.wic.vhd
""")
def check_tun():
@@ -115,39 +116,6 @@ def check_tun():
if not os.access(dev_tun, os.W_OK):
raise RunQemuError("TUN control device %s is not writable, please fix (e.g. sudo chmod 666 %s)" % (dev_tun, dev_tun))
-def check_libgl(qemu_bin):
- cmd = ('ldd', qemu_bin)
- logger.debug('Running %s...' % str(cmd))
- need_gl = subprocess.check_output(cmd).decode('utf-8')
- if re.search('libGLU', need_gl):
- # We can't run without a libGL.so
- libgl = False
- check_files = (('/usr/lib/libGL.so', '/usr/lib/libGLU.so'), \
- ('/usr/lib64/libGL.so', '/usr/lib64/libGLU.so'), \
- ('/usr/lib/*-linux-gnu/libGL.so', '/usr/lib/*-linux-gnu/libGLU.so'))
-
- for (f1, f2) in check_files:
- if re.search('\*', f1):
- for g1 in glob.glob(f1):
- if libgl:
- break
- if os.path.exists(g1):
- for g2 in glob.glob(f2):
- if os.path.exists(g2):
- libgl = True
- break
- if libgl:
- break
- else:
- if os.path.exists(f1) and os.path.exists(f2):
- libgl = True
- break
- if not libgl:
- logger.error("You need libGL.so and libGLU.so to exist in your library path to run the QEMU emulator.")
- logger.error("Ubuntu package names are: libgl1-mesa-dev and libglu1-mesa-dev.")
- logger.error("Fedora package names are: mesa-libGL-devel mesa-libGLU-devel.")
- raise RunQemuError('%s requires libGLU, but not found' % qemu_bin)
-
def get_first_file(cmds):
"""Return first file found in wildcard cmds"""
for cmd in cmds:
@@ -158,19 +126,6 @@ def get_first_file(cmds):
return f
return ''
-def check_free_port(host, port):
- """ Check whether the port is free or not """
- import socket
- from contextlib import closing
-
- with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
- if sock.connect_ex((host, port)) == 0:
- # Port is open, so not free
- return False
- else:
- # Port is not open, so free
- return True
-
class BaseConfig(object):
def __init__(self):
# The self.d saved vars from self.set(), part of them are from qemuboot.conf
@@ -181,27 +136,33 @@ class BaseConfig(object):
self.env_vars = ('MACHINE',
'ROOTFS',
'KERNEL',
+ 'BIOS',
'DEVICE_TREE',
'DEPLOY_DIR_IMAGE',
'OE_TMPDIR',
'OECORE_NATIVE_SYSROOT',
+ 'MULTICONFIG',
+ 'SERIAL_CONSOLES',
)
self.qemu_opt = ''
self.qemu_opt_script = ''
- self.clean_nfs_dir = False
+ self.qemuparams = ''
self.nfs_server = ''
self.rootfs = ''
# File name(s) of a OVMF firmware file or variable store,
# to be added with -drive if=pflash.
# Found in the same places as the rootfs, with or without one of
# these suffices: qcow2, bin.
- # Setting one also adds "-vga std" because that is all that
- # OVMF supports.
self.ovmf_bios = []
+ # When enrolling default Secure Boot keys, the hypervisor
+ # must provide the Platform Key and the first Key Exchange Key
+ # certificate in the Type 11 SMBIOS table.
+ self.ovmf_secboot_pkkek1 = ''
self.qemuboot = ''
self.qbconfload = False
self.kernel = ''
+ self.bios = ''
self.kernel_cmdline = ''
self.kernel_cmdline_script = ''
self.bootparams = ''
@@ -210,24 +171,35 @@ class BaseConfig(object):
self.kvm_enabled = False
self.vhost_enabled = False
self.slirp_enabled = False
+ self.net_bridge = None
self.nfs_instance = 0
self.nfs_running = False
self.serialconsole = False
self.serialstdio = False
+ self.nographic = False
+ self.sdl = False
+ self.gtk = False
+ self.gl = False
+ self.gl_es = False
+ self.egl_headless = False
+ self.novga = False
self.cleantap = False
self.saved_stty = ''
self.audio_enabled = False
self.tcpserial_portnum = ''
- self.custombiosdir = ''
- self.lock = ''
- self.lock_descriptor = None
+ self.taplock = ''
+ self.taplock_descriptor = None
+ self.portlocks = {}
self.bitbake_e = ''
self.snapshot = False
+ self.wictypes = ('wic', 'wic.vmdk', 'wic.qcow2', 'wic.vdi', "wic.vhd", "wic.vhdx")
self.fstypes = ('ext2', 'ext3', 'ext4', 'jffs2', 'nfs', 'btrfs',
'cpio.gz', 'cpio', 'ramfs', 'tar.bz2', 'tar.gz')
- self.vmtypes = ('hddimg', 'hdddirect', 'wic', 'wic.vmdk',
- 'wic.qcow2', 'wic.vdi', 'iso')
+ self.vmtypes = ('hddimg', 'iso')
+ self.fsinfo = {}
self.network_device = "-device e1000,netdev=net0,mac=@MAC@"
+ self.cmdline_ip_slirp = "ip=dhcp"
+ self.cmdline_ip_tap = "ip=192.168.7.@CLIENT@::192.168.7.@GATEWAY@:255.255.255.0::eth0:off:8.8.8.8"
# Use different mac section for tap and slirp to avoid
# conflicts, e.g., when one is running with tap, the other is
# running with slirp.
@@ -240,31 +212,90 @@ class BaseConfig(object):
self.qemupid = None
# avoid cleanup twice
self.cleaned = False
+ # Files to cleanup after run
+ self.cleanup_files = []
- def acquire_lock(self, error=True):
- logger.debug("Acquiring lockfile %s..." % self.lock)
+ def acquire_taplock(self, error=True):
+ logger.debug("Acquiring lockfile %s..." % self.taplock)
try:
- self.lock_descriptor = open(self.lock, 'w')
- fcntl.flock(self.lock_descriptor, fcntl.LOCK_EX|fcntl.LOCK_NB)
+ self.taplock_descriptor = open(self.taplock, 'w')
+ fcntl.flock(self.taplock_descriptor, fcntl.LOCK_EX|fcntl.LOCK_NB)
except Exception as e:
- msg = "Acquiring lockfile %s failed: %s" % (self.lock, e)
+ msg = "Acquiring lockfile %s failed: %s" % (self.taplock, e)
if error:
logger.error(msg)
else:
logger.info(msg)
- if self.lock_descriptor:
- self.lock_descriptor.close()
- self.lock_descriptor = None
+ if self.taplock_descriptor:
+ self.taplock_descriptor.close()
+ self.taplock_descriptor = None
return False
return True
- def release_lock(self):
- if self.lock_descriptor:
+ def release_taplock(self):
+ if self.taplock_descriptor:
logger.debug("Releasing lockfile for tap device '%s'" % self.tap)
- fcntl.flock(self.lock_descriptor, fcntl.LOCK_UN)
- self.lock_descriptor.close()
- os.remove(self.lock)
- self.lock_descriptor = None
+ # We pass the fd to the qemu process and if we unlock here, it would unlock for
+ # that too. Therefore don't unlock, just close
+ # fcntl.flock(self.taplock_descriptor, fcntl.LOCK_UN)
+ self.taplock_descriptor.close()
+ # Removing the file is a potential race, don't do that either
+ # os.remove(self.taplock)
+ self.taplock_descriptor = None
+
+ def check_free_port(self, host, port, lockdir):
+ """ Check whether the port is free or not """
+ import socket
+ from contextlib import closing
+
+ lockfile = os.path.join(lockdir, str(port) + '.lock')
+ if self.acquire_portlock(lockfile):
+ with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
+ if sock.connect_ex((host, port)) == 0:
+ # Port is open, so not free
+ self.release_portlock(lockfile)
+ return False
+ else:
+ # Port is not open, so free
+ return True
+ else:
+ return False
+
+ def acquire_portlock(self, lockfile):
+ logger.debug("Acquiring lockfile %s..." % lockfile)
+ try:
+ portlock_descriptor = open(lockfile, 'w')
+ self.portlocks.update({lockfile: portlock_descriptor})
+ fcntl.flock(self.portlocks[lockfile], fcntl.LOCK_EX|fcntl.LOCK_NB)
+ except Exception as e:
+ msg = "Acquiring lockfile %s failed: %s" % (lockfile, e)
+ logger.info(msg)
+ if lockfile in self.portlocks.keys() and self.portlocks[lockfile]:
+ self.portlocks[lockfile].close()
+ del self.portlocks[lockfile]
+ return False
+ return True
+
+ def release_portlock(self, lockfile=None):
+ if lockfile != None:
+ logger.debug("Releasing lockfile '%s'" % lockfile)
+ # We pass the fd to the qemu process and if we unlock here, it would unlock for
+ # that too. Therefore don't unlock, just close
+ # fcntl.flock(self.portlocks[lockfile], fcntl.LOCK_UN)
+ self.portlocks[lockfile].close()
+ # Removing the file is a potential race, don't do that either
+ # os.remove(lockfile)
+ del self.portlocks[lockfile]
+ elif len(self.portlocks):
+ for lockfile, descriptor in self.portlocks.items():
+ logger.debug("Releasing lockfile '%s'" % lockfile)
+ # We pass the fd to the qemu process and if we unlock here, it would unlock for
+ # that too. Therefore don't unlock, just close
+ # fcntl.flock(descriptor, fcntl.LOCK_UN)
+ descriptor.close()
+ # Removing the file is a potential race, don't do that either
+ # os.remove(lockfile)
+ self.portlocks = {}
def get(self, key):
if key in self.d:
@@ -291,7 +322,7 @@ class BaseConfig(object):
def check_arg_fstype(self, fst):
"""Check and set FSTYPE"""
- if fst not in self.fstypes + self.vmtypes:
+ if fst not in self.fstypes + self.vmtypes + self.wictypes:
logger.warning("Maybe unsupported FSTYPE: %s" % fst)
if not self.fstype or self.fstype == fst:
if fst == 'ramfs':
@@ -321,10 +352,10 @@ class BaseConfig(object):
def check_arg_path(self, p):
"""
- Check whether it is <image>.qemuboot.conf or contains <image>.qemuboot.conf
- - Check whether is a kernel file
- - Check whether is a image file
- - Check whether it is a nfs dir
- - Check whether it is a OVMF flash file
+ - Check whether it is a kernel file
+ - Check whether it is an image file
+ - Check whether it is an NFS dir
+ - Check whether it is an OVMF flash file
"""
if p.endswith('.qemuboot.conf'):
self.qemuboot = p
@@ -335,7 +366,7 @@ class BaseConfig(object):
self.kernel = p
elif os.path.exists(p) and (not os.path.isdir(p)) and '-image-' in os.path.basename(p):
self.rootfs = p
- # Check filename against self.fstypes can hanlde <file>.cpio.gz,
+ # Check filename against self.fstypes can handle <file>.cpio.gz,
# otherwise, its type would be "gz", which is incorrect.
fst = ""
for t in self.fstypes:
@@ -398,9 +429,7 @@ class BaseConfig(object):
self.set("MACHINE", arg)
return
- cmd = 'MACHINE=%s bitbake -e' % arg
- logger.info('Running %s...' % cmd)
- self.bitbake_e = subprocess.check_output(cmd, shell=True).decode('utf-8')
+ self.bitbake_e = self.run_bitbake_env(arg)
# bitbake -e doesn't report invalid MACHINE as an error, so
# let's check DEPLOY_DIR_IMAGE to make sure that it is a valid
# MACHINE.
@@ -415,6 +444,31 @@ class BaseConfig(object):
logger.error("%s not a directory valid DEPLOY_DIR_IMAGE" % deploy_dir_image)
self.set("MACHINE", arg)
+ def set_dri_path(self):
+ # As runqemu can be run within bitbake (when using testimage, for example),
+ # we need to ensure that we run host pkg-config, and that it does not
+ # get mis-directed to native build paths set by bitbake.
+ try:
+ del os.environ['PKG_CONFIG_PATH']
+ del os.environ['PKG_CONFIG_DIR']
+ del os.environ['PKG_CONFIG_LIBDIR']
+ del os.environ['PKG_CONFIG_SYSROOT_DIR']
+ except KeyError:
+ pass
+ try:
+ dripath = subprocess.check_output("PATH=/bin:/usr/bin:$PATH pkg-config --variable=dridriverdir dri", shell=True)
+ except subprocess.CalledProcessError as e:
+ raise RunQemuError("Could not determine the path to dri drivers on the host via pkg-config.\nPlease install Mesa development files (particularly, dri.pc) on the host machine.")
+ os.environ['LIBGL_DRIVERS_PATH'] = dripath.decode('utf-8').strip()
+
+ # This preloads uninative libc pieces and therefore ensures that RPATH/RUNPATH
+ # in host mesa drivers doesn't trick uninative into loading host libc.
+ preload_items = ['libdl.so.2', 'librt.so.1', 'libpthread.so.0']
+ uninative_path = os.path.dirname(self.get("UNINATIVE_LOADER"))
+ if os.path.exists(uninative_path):
+ preload_paths = [os.path.join(uninative_path, i) for i in preload_items]
+ os.environ['LD_PRELOAD'] = " ".join(preload_paths)
+
def check_args(self):
for debug in ("-d", "--debug"):
if debug in sys.argv:
@@ -426,18 +480,31 @@ class BaseConfig(object):
logger.setLevel(logging.ERROR)
sys.argv.remove(quiet)
+ if 'gl' not in sys.argv[1:] and 'gl-es' not in sys.argv[1:]:
+ os.environ['SDL_RENDER_DRIVER'] = 'software'
+ os.environ['SDL_FRAMEBUFFER_ACCELERATION'] = 'false'
+
unknown_arg = ""
for arg in sys.argv[1:]:
- if arg in self.fstypes + self.vmtypes:
+ if arg in self.fstypes + self.vmtypes + self.wictypes:
self.check_arg_fstype(arg)
elif arg == 'nographic':
- self.qemu_opt_script += ' -nographic'
- self.kernel_cmdline_script += ' console=ttyS0'
+ self.nographic = True
+ elif arg == 'sdl':
+ self.sdl = True
+ elif arg == 'gtk':
+ self.gtk = True
+ elif arg == 'gl':
+ self.gl = True
+ elif 'gl-es' in sys.argv[1:]:
+ self.gl_es = True
+ elif arg == 'egl-headless':
+ self.egl_headless = True
+ elif arg == 'novga':
+ self.novga = True
elif arg == 'serial':
- self.kernel_cmdline_script += ' console=ttyS0'
self.serialconsole = True
elif arg == "serialstdio":
- self.kernel_cmdline_script += ' console=ttyS0'
self.serialstdio = True
elif arg == 'audio':
logger.info("Enabling audio in qemu")
@@ -449,18 +516,16 @@ class BaseConfig(object):
self.vhost_enabled = True
elif arg == 'slirp':
self.slirp_enabled = True
+ elif arg.startswith('bridge='):
+ self.net_bridge = '%s' % arg[len('bridge='):]
elif arg == 'snapshot':
self.snapshot = True
elif arg == 'publicvnc':
self.qemu_opt_script += ' -vnc :0'
elif arg.startswith('tcpserial='):
- self.tcpserial_portnum = arg[len('tcpserial='):]
- elif arg.startswith('biosdir='):
- self.custombiosdir = arg[len('biosdir='):]
- elif arg.startswith('biosfilename='):
- self.qemu_opt_script += ' -bios %s' % arg[len('biosfilename='):]
+ self.tcpserial_portnum = '%s' % arg[len('tcpserial='):]
elif arg.startswith('qemuparams='):
- self.qemu_opt_script += ' %s' % arg[len('qemuparams='):]
+ self.qemuparams = ' %s' % arg[len('qemuparams='):]
elif arg.startswith('bootparams='):
self.bootparams = arg[len('bootparams='):]
elif os.path.exists(arg) or (re.search(':', arg) and re.search('/', arg)):
@@ -496,23 +561,24 @@ class BaseConfig(object):
def check_kvm(self):
"""Check kvm and kvm-host"""
if not (self.kvm_enabled or self.vhost_enabled):
- self.qemu_opt_script += ' %s %s' % (self.get('QB_MACHINE'), self.get('QB_CPU'))
+ self.qemu_opt_script += ' %s %s %s' % (self.get('QB_MACHINE'), self.get('QB_CPU'), self.get('QB_SMP'))
return
if not self.get('QB_CPU_KVM'):
raise RunQemuError("QB_CPU_KVM is NULL, this board doesn't support kvm")
- self.qemu_opt_script += ' %s %s' % (self.get('QB_MACHINE'), self.get('QB_CPU_KVM'))
+ self.qemu_opt_script += ' %s %s %s' % (self.get('QB_MACHINE'), self.get('QB_CPU_KVM'), self.get('QB_SMP'))
yocto_kvm_wiki = "https://wiki.yoctoproject.org/wiki/How_to_enable_KVM_for_Poky_qemu"
yocto_paravirt_kvm_wiki = "https://wiki.yoctoproject.org/wiki/Running_an_x86_Yocto_Linux_image_under_QEMU_KVM"
dev_kvm = '/dev/kvm'
dev_vhost = '/dev/vhost-net'
- with open('/proc/cpuinfo', 'r') as f:
- kvm_cap = re.search('vmx|svm', "".join(f.readlines()))
- if not kvm_cap:
- logger.error("You are trying to enable KVM on a cpu without VT support.")
- logger.error("Remove kvm from the command-line, or refer:")
- raise RunQemuError(yocto_kvm_wiki)
+ if self.qemu_system.endswith(('i386', 'x86_64')):
+ with open('/proc/cpuinfo', 'r') as f:
+ kvm_cap = re.search('vmx|svm', "".join(f.readlines()))
+ if not kvm_cap:
+ logger.error("You are trying to enable KVM on a cpu without VT support.")
+ logger.error("Remove kvm from the command-line, or refer:")
+ raise RunQemuError(yocto_kvm_wiki)
if not os.path.exists(dev_kvm):
logger.error("Missing KVM device. Have you inserted kvm modules?")
@@ -537,10 +603,10 @@ class BaseConfig(object):
logger.error("For further help see:")
raise RunQemuError(yocto_paravirt_kvm_wiki)
- if not os.access(dev_kvm, os.W_OK|os.R_OK):
+ if not os.access(dev_vhost, os.W_OK|os.R_OK):
logger.error("You have no read or write permission on /dev/vhost-net.")
logger.error("Please change the ownership of this file as described at:")
- raise RunQemuError(yocto_kvm_wiki)
+ raise RunQemuError(yocto_paravirt_kvm_wiki)
def check_fstype(self):
"""Check and setup FSTYPE"""
@@ -551,6 +617,40 @@ class BaseConfig(object):
else:
raise RunQemuError("FSTYPE is NULL!")
+ # parse QB_FSINFO into dict, e.g. { 'wic': ['no-kernel-in-fs', 'a-flag'], 'ext4': ['another-flag']}
+ wic_fs = False
+ qb_fsinfo = self.get('QB_FSINFO')
+ if qb_fsinfo:
+ qb_fsinfo = qb_fsinfo.split()
+ for fsinfo in qb_fsinfo:
+ try:
+ fstype, fsflag = fsinfo.split(':')
+
+ if fstype == 'wic':
+ if fsflag == 'no-kernel-in-fs':
+ wic_fs = True
+ elif fsflag == 'kernel-in-fs':
+ wic_fs = False
+ else:
+ logger.warn('Unknown flag "%s:%s" in QB_FSINFO', fstype, fsflag)
+ continue
+ else:
+ logger.warn('QB_FSINFO is not supported for image type "%s"', fstype)
+ continue
+
+ if fstype in self.fsinfo:
+ self.fsinfo[fstype].append(fsflag)
+ else:
+ self.fsinfo[fstype] = [fsflag]
+ except Exception:
+ logger.error('Invalid parameter "%s" in QB_FSINFO', fsinfo)
+
+ # treat wic images as vmimages (with kernel) or as fsimages (rootfs only)
+ if wic_fs:
+ self.fstypes = self.fstypes + self.wictypes
+ else:
+ self.vmtypes = self.vmtypes + self.wictypes
+
def check_rootfs(self):
"""Check and set rootfs"""
@@ -582,6 +682,23 @@ class BaseConfig(object):
if not os.path.exists(self.rootfs):
raise RunQemuError("Can't find rootfs: %s" % self.rootfs)
+ def setup_pkkek1(self):
+ """
+ Extract from PEM certificate the Platform Key and first Key
+ Exchange Key certificate string. The hypervisor needs to provide
+ it in the Type 11 SMBIOS table
+ """
+ pemcert = '%s/%s' % (self.get('DEPLOY_DIR_IMAGE'), 'OvmfPkKek1.pem')
+ try:
+ with open(pemcert, 'r') as pemfile:
+ key = pemfile.read().replace('\n', ''). \
+ replace('-----BEGIN CERTIFICATE-----', ''). \
+ replace('-----END CERTIFICATE-----', '')
+ self.ovmf_secboot_pkkek1 = key
+
+ except FileNotFoundError:
+ raise RunQemuError("Can't open PEM certificate %s " % pemcert)
+
def check_ovmf(self):
"""Check and set full path for OVMF firmware and variable file(s)."""
@@ -592,6 +709,8 @@ class BaseConfig(object):
path = '%s/%s.%s' % (self.get('DEPLOY_DIR_IMAGE'), ovmf, suffix)
if os.path.exists(path):
self.ovmf_bios[index] = path
+ if ovmf.endswith('secboot'):
+ self.setup_pkkek1()
break
else:
raise RunQemuError("Can't find OVMF firmware: %s" % ovmf)
@@ -646,51 +765,77 @@ class BaseConfig(object):
if not os.path.exists(self.dtb):
raise RunQemuError('DTB not found: %s, %s or %s' % cmds)
- def check_biosdir(self):
- """Check custombiosdir"""
- if not self.custombiosdir:
+ def check_bios(self):
+ """Check and set bios"""
+
+ # See if the user supplied a BIOS option
+ if self.get('BIOS'):
+ self.bios = self.get('BIOS')
+
+ # QB_DEFAULT_BIOS is always a full file path
+ bios_name = os.path.basename(self.get('QB_DEFAULT_BIOS'))
+
+ # The user didn't want a bios to be loaded
+ if (bios_name == "" or bios_name == "none") and not self.bios:
return
- biosdir = ""
- biosdir_native = "%s/%s" % (self.get('STAGING_DIR_NATIVE'), self.custombiosdir)
- biosdir_host = "%s/%s" % (self.get('STAGING_DIR_HOST'), self.custombiosdir)
- for i in (self.custombiosdir, biosdir_native, biosdir_host):
- if os.path.isdir(i):
- biosdir = i
- break
+ if not self.bios:
+ deploy_dir_image = self.get('DEPLOY_DIR_IMAGE')
+ self.bios = "%s/%s" % (deploy_dir_image, bios_name)
+
+ if not self.bios:
+ raise RunQemuError('BIOS not found: %s' % bios_match_name)
+
+ if not os.path.exists(self.bios):
+ raise RunQemuError("BIOS %s not found" % self.bios)
- if biosdir:
- logger.debug("Assuming biosdir is: %s" % biosdir)
- self.qemu_opt_script += ' -L %s' % biosdir
- else:
- logger.error("Custom BIOS directory not found. Tried: %s, %s, and %s" % (self.custombiosdir, biosdir_native, biosdir_host))
- raise RunQemuError("Invalid custombiosdir: %s" % self.custombiosdir)
def check_mem(self):
- s = re.search('-m +([0-9]+)', self.qemu_opt_script)
+ """
+ Both qemu and kernel needs memory settings, so check QB_MEM and set it
+ for both.
+ """
+ s = re.search('-m +([0-9]+)', self.qemuparams)
if s:
self.set('QB_MEM', '-m %s' % s.group(1))
elif not self.get('QB_MEM'):
- logger.info('QB_MEM is not set, use 512M by default')
- self.set('QB_MEM', '-m 512')
+ logger.info('QB_MEM is not set, use 256M by default')
+ self.set('QB_MEM', '-m 256')
+
+ # Check and remove M or m suffix
+ qb_mem = self.get('QB_MEM')
+ if qb_mem.endswith('M') or qb_mem.endswith('m'):
+ qb_mem = qb_mem[:-1]
+
+ # Add -m prefix it not present
+ if not qb_mem.startswith('-m'):
+ qb_mem = '-m %s' % qb_mem
+
+ self.set('QB_MEM', qb_mem)
mach = self.get('MACHINE')
- if not mach.startswith('qemumips'):
+ if not mach.startswith(('qemumips', 'qemux86')):
self.kernel_cmdline_script += ' mem=%s' % self.get('QB_MEM').replace('-m','').strip() + 'M'
self.qemu_opt_script += ' %s' % self.get('QB_MEM')
def check_tcpserial(self):
if self.tcpserial_portnum:
+ ports = self.tcpserial_portnum.split(':')
+ port = ports[0]
if self.get('QB_TCPSERIAL_OPT'):
- self.qemu_opt_script += ' ' + self.get('QB_TCPSERIAL_OPT').replace('@PORT@', self.tcpserial_portnum)
+ self.qemu_opt_script += ' ' + self.get('QB_TCPSERIAL_OPT').replace('@PORT@', port)
else:
- self.qemu_opt_script += ' -serial tcp:127.0.0.1:%s' % self.tcpserial_portnum
+ self.qemu_opt_script += ' -serial tcp:127.0.0.1:%s' % port
+
+ if len(ports) > 1:
+ for port in ports[1:]:
+ self.qemu_opt_script += ' -serial tcp:127.0.0.1:%s' % port
def check_and_set(self):
"""Check configs sanity and set when needed"""
self.validate_paths()
- if not self.slirp_enabled:
+ if not self.slirp_enabled and not self.net_bridge:
check_tun()
# Check audio
if self.audio_enabled:
@@ -704,13 +849,14 @@ class BaseConfig(object):
else:
os.putenv('QEMU_AUDIO_DRV', 'none')
+ self.check_qemu_system()
self.check_kvm()
self.check_fstype()
self.check_rootfs()
self.check_ovmf()
self.check_kernel()
self.check_dtb()
- self.check_biosdir()
+ self.check_bios()
self.check_mem()
self.check_tcpserial()
@@ -819,21 +965,30 @@ class BaseConfig(object):
self.set('STAGING_BINDIR_NATIVE', '%s/usr/bin' % self.get('STAGING_DIR_NATIVE'))
def print_config(self):
- logger.info('Continuing with the following parameters:\n')
+ logoutput = ['Continuing with the following parameters:']
if not self.fstype in self.vmtypes:
- print('KERNEL: [%s]' % self.kernel)
+ logoutput.append('KERNEL: [%s]' % self.kernel)
+ if self.bios:
+ logoutput.append('BIOS: [%s]' % self.bios)
if self.dtb:
- print('DTB: [%s]' % self.dtb)
- print('MACHINE: [%s]' % self.get('MACHINE'))
- print('FSTYPE: [%s]' % self.fstype)
+ logoutput.append('DTB: [%s]' % self.dtb)
+ logoutput.append('MACHINE: [%s]' % self.get('MACHINE'))
+ try:
+ fstype_flags = ' (' + ', '.join(self.fsinfo[self.fstype]) + ')'
+ except KeyError:
+ fstype_flags = ''
+ logoutput.append('FSTYPE: [%s%s]' % (self.fstype, fstype_flags))
if self.fstype == 'nfs':
- print('NFS_DIR: [%s]' % self.rootfs)
+ logoutput.append('NFS_DIR: [%s]' % self.rootfs)
else:
- print('ROOTFS: [%s]' % self.rootfs)
+ logoutput.append('ROOTFS: [%s]' % self.rootfs)
if self.ovmf_bios:
- print('OVMF: %s' % self.ovmf_bios)
- print('CONFFILE: [%s]' % self.qemuboot)
- print('')
+ logoutput.append('OVMF: %s' % self.ovmf_bios)
+ if (self.ovmf_secboot_pkkek1):
+ logoutput.append('SECBOOT PKKEK1: [%s...]' % self.ovmf_secboot_pkkek1[0:100])
+ logoutput.append('CONFFILE: [%s]' % self.qemuboot)
+ logoutput.append('')
+ logger.info('\n'.join(logoutput))
def setup_nfs(self):
if not self.nfs_server:
@@ -863,7 +1018,7 @@ class BaseConfig(object):
# Use '%s' since they are integers
os.putenv(k, '%s' % v)
- self.unfs_opts="nfsvers=3,port=%s,udp,mountport=%s" % (nfsd_port, mountd_port)
+ self.unfs_opts="nfsvers=3,port=%s,tcp,mountport=%s" % (nfsd_port, mountd_port)
# Extract .tar.bz2 or .tar.bz if no nfs dir
if not (self.rootfs and os.path.isdir(self.rootfs)):
@@ -887,8 +1042,9 @@ class BaseConfig(object):
logger.info('Running %s...' % str(cmd))
if subprocess.call(cmd) != 0:
raise RunQemuError('Failed to run %s' % cmd)
- self.clean_nfs_dir = True
self.rootfs = dest
+ self.cleanup_files.append(self.rootfs)
+ self.cleanup_files.append('%s.pseudo_state' % self.rootfs)
# Start the userspace NFS server
cmd = ('runqemu-export-rootfs', 'start', self.rootfs)
@@ -898,12 +1054,18 @@ class BaseConfig(object):
self.nfs_running = True
+ def setup_net_bridge(self):
+ self.set('NETWORK_CMD', '-netdev bridge,br=%s,id=net0,helper=%s -device virtio-net-pci,netdev=net0 ' % (
+ self.net_bridge, os.path.join(self.bindir_native, 'qemu-oe-bridge-helper')))
+
def setup_slirp(self):
"""Setup user networking"""
if self.fstype == 'nfs':
self.setup_nfs()
- self.kernel_cmdline_script += ' ip=dhcp'
+ netconf = " " + self.cmdline_ip_slirp
+ logger.info("Network configuration:%s", netconf)
+ self.kernel_cmdline_script += netconf
# Port mapping
hostfwd = ",hostfwd=tcp::2222-:22,hostfwd=tcp::2323-:23"
qb_slirp_opt_default = "-netdev user,id=net0%s,tftp=%s" % (hostfwd, self.get('DEPLOY_DIR_IMAGE'))
@@ -912,10 +1074,21 @@ class BaseConfig(object):
ports = re.findall('hostfwd=[^-]*:([0-9]+)-[^,-]*', qb_slirp_opt)
ports = [int(i) for i in ports]
mac = 2
+
+ lockdir = "/tmp/qemu-port-locks"
+ if not os.path.exists(lockdir):
+ # There might be a race issue when multi runqemu processess are
+ # running at the same time.
+ try:
+ os.mkdir(lockdir)
+ os.chmod(lockdir, 0o777)
+ except FileExistsError:
+ pass
+
# Find a free port to avoid conflicts
for p in ports[:]:
p_new = p
- while not check_free_port('localhost', p_new):
+ while not self.check_free_port('localhost', p_new, lockdir):
p_new += 1
mac += 1
while p_new in ports:
@@ -970,8 +1143,8 @@ class BaseConfig(object):
if os.path.exists('%s.skip' % lockfile):
logger.info('Found %s.skip, skipping %s' % (lockfile, p))
continue
- self.lock = lockfile + '.lock'
- if self.acquire_lock(error=False):
+ self.taplock = lockfile + '.lock'
+ if self.acquire_taplock(error=False):
tap = p
logger.info("Using preconfigured tap device %s" % tap)
logger.info("If this is not intended, touch %s.skip to make runqemu skip %s." %(lockfile, tap))
@@ -987,25 +1160,31 @@ class BaseConfig(object):
uid = os.getuid()
logger.info("Setting up tap interface under sudo")
cmd = ('sudo', self.qemuifup, str(uid), str(gid), self.bindir_native)
- tap = subprocess.check_output(cmd).decode('utf-8').strip()
+ try:
+ tap = subprocess.check_output(cmd).decode('utf-8').strip()
+ except subprocess.CalledProcessError as e:
+ logger.error('Setting up tap device failed:\n%s\nRun runqemu-gen-tapdevs to manually create one.' % str(e))
+ sys.exit(1)
lockfile = os.path.join(lockdir, tap)
- self.lock = lockfile + '.lock'
- self.acquire_lock()
+ self.taplock = lockfile + '.lock'
+ self.acquire_taplock()
self.cleantap = True
logger.debug('Created tap: %s' % tap)
if not tap:
logger.error("Failed to setup tap device. Run runqemu-gen-tapdevs to manually create.")
- return 1
+ sys.exit(1)
self.tap = tap
tapnum = int(tap[3:])
gateway = tapnum * 2 + 1
client = gateway + 1
if self.fstype == 'nfs':
self.setup_nfs()
- netconf = "192.168.7.%s::192.168.7.%s:255.255.255.0" % (client, gateway)
- logger.info("Network configuration: %s", netconf)
- self.kernel_cmdline_script += " ip=%s" % netconf
+ netconf = " " + self.cmdline_ip_tap
+ netconf = netconf.replace('@CLIENT@', str(client))
+ netconf = netconf.replace('@GATEWAY@', str(gateway))
+ logger.info("Network configuration:%s", netconf)
+ self.kernel_cmdline_script += netconf
mac = "%s%02x" % (self.mac_tap, client)
qb_tap_opt = self.get('QB_TAP_OPT')
if qb_tap_opt:
@@ -1024,9 +1203,13 @@ class BaseConfig(object):
if sys.stdin.isatty():
self.saved_stty = subprocess.check_output(("stty", "-g")).decode('utf-8').strip()
self.network_device = self.get('QB_NETWORK_DEVICE') or self.network_device
- if self.slirp_enabled:
+ if self.net_bridge:
+ self.setup_net_bridge()
+ elif self.slirp_enabled:
+ self.cmdline_ip_slirp = self.get('QB_CMDLINE_IP_SLIRP') or self.cmdline_ip_slirp
self.setup_slirp()
else:
+ self.cmdline_ip_tap = self.get('QB_CMDLINE_IP_TAP') or self.cmdline_ip_tap
self.setup_tap()
def setup_rootfs(self):
@@ -1034,7 +1217,19 @@ class BaseConfig(object):
return
if 'wic.' in self.fstype:
self.fstype = self.fstype[4:]
- rootfs_format = self.fstype if self.fstype in ('vmdk', 'qcow2', 'vdi') else 'raw'
+ rootfs_format = self.fstype if self.fstype in ('vmdk', 'vhd', 'vhdx', 'qcow2', 'vdi') else 'raw'
+
+ tmpfsdir = os.environ.get("RUNQEMU_TMPFS_DIR", None)
+ if self.snapshot and tmpfsdir:
+ newrootfs = os.path.join(tmpfsdir, os.path.basename(self.rootfs)) + "." + str(os.getpid())
+ logger.info("Copying rootfs to %s" % newrootfs)
+ copy_start = time.time()
+ shutil.copyfile(self.rootfs, newrootfs)
+ logger.info("Copy done in %s seconds" % (time.time() - copy_start))
+ self.rootfs = newrootfs
+ # Don't need a second copy now!
+ self.snapshot = False
+ self.cleanup_files.append(newrootfs)
qb_rootfs_opt = self.get('QB_ROOTFS_OPT')
if qb_rootfs_opt:
@@ -1042,6 +1237,10 @@ class BaseConfig(object):
else:
self.rootfs_options = '-drive file=%s,if=virtio,format=%s' % (self.rootfs, rootfs_format)
+ qb_rootfs_extra_opt = self.get("QB_ROOTFS_EXTRA_OPT")
+ if qb_rootfs_extra_opt and not qb_rootfs_extra_opt.startswith(","):
+ qb_rootfs_extra_opt = "," + qb_rootfs_extra_opt
+
if self.fstype in ('cpio.gz', 'cpio'):
self.kernel_cmdline = 'root=/dev/ram0 rw debugshell'
self.rootfs_options = '-initrd %s' % self.rootfs
@@ -1054,11 +1253,15 @@ class BaseConfig(object):
drive_type = self.get('QB_DRIVE_TYPE')
if drive_type.startswith("/dev/sd"):
logger.info('Using scsi drive')
- vm_drive = '-drive if=none,id=hd,file=%s,format=%s -device virtio-scsi-pci,id=scsi -device scsi-hd,drive=hd' \
- % (self.rootfs, rootfs_format)
+ vm_drive = '-drive if=none,id=hd,file=%s,format=%s -device virtio-scsi-pci,id=scsi -device scsi-hd,drive=hd%s' \
+ % (self.rootfs, rootfs_format, qb_rootfs_extra_opt)
elif drive_type.startswith("/dev/hd"):
logger.info('Using ide drive')
vm_drive = "-drive file=%s,format=%s" % (self.rootfs, rootfs_format)
+ elif drive_type.startswith("/dev/vdb"):
+ logger.info('Using block virtio drive');
+ vm_drive = '-drive id=disk0,file=%s,if=none,format=%s -device virtio-blk-device,drive=disk0%s' \
+ % (self.rootfs, rootfs_format,qb_rootfs_extra_opt)
else:
# virtio might have been selected explicitly (just use it), or
# is used as fallback (then warn about that).
@@ -1069,13 +1272,21 @@ class BaseConfig(object):
vm_drive = '-drive if=virtio,file=%s,format=%s' % (self.rootfs, rootfs_format)
# All branches above set vm_drive.
- self.rootfs_options = '%s -no-reboot' % vm_drive
- self.kernel_cmdline = 'root=%s rw highres=off' % (self.get('QB_KERNEL_ROOT'))
+ self.rootfs_options = vm_drive
+ if not self.fstype in self.vmtypes:
+ self.rootfs_options += ' -no-reboot'
+
+ # By default, ' rw' is appended to QB_KERNEL_ROOT unless either ro or rw is explicitly passed.
+ qb_kernel_root = self.get('QB_KERNEL_ROOT')
+ qb_kernel_root_l = qb_kernel_root.split()
+ if not ('ro' in qb_kernel_root_l or 'rw' in qb_kernel_root_l):
+ qb_kernel_root += ' rw'
+ self.kernel_cmdline = 'root=%s' % qb_kernel_root
if self.fstype == 'nfs':
self.rootfs_options = ''
k_root = '/dev/nfs nfsroot=%s:%s,%s' % (self.nfs_server, os.path.abspath(self.rootfs), self.unfs_opts)
- self.kernel_cmdline = 'root=%s rw highres=off' % k_root
+ self.kernel_cmdline = 'root=%s rw' % k_root
if self.fstype == 'none':
self.rootfs_options = ''
@@ -1129,21 +1340,90 @@ class BaseConfig(object):
return 'qemu-system-%s' % qbsys
- def setup_final(self):
+ def check_qemu_system(self):
qemu_system = self.get('QB_SYSTEM_NAME')
if not qemu_system:
qemu_system = self.guess_qb_system()
if not qemu_system:
raise RunQemuError("Failed to boot, QB_SYSTEM_NAME is NULL!")
+ self.qemu_system = qemu_system
+
+ def setup_vga(self):
+ if self.nographic == True:
+ if self.sdl == True:
+ raise RunQemuError('Option nographic makes no sense alongside the sdl option.')
+ if self.gtk == True:
+ raise RunQemuError('Option nographic makes no sense alongside the gtk option.')
+ self.qemu_opt += ' -nographic'
+
+ if self.novga == True:
+ self.qemu_opt += ' -vga none'
+ return
+
+ if (self.gl_es == True or self.gl == True) and (self.sdl == False and self.gtk == False):
+ raise RunQemuError('Option gl/gl-es needs gtk or sdl option.')
+
+ if self.sdl == True or self.gtk == True or self.egl_headless == True:
+ if self.gl or self.gl_es or self.egl_headless:
+ self.qemu_opt += ' -device virtio-vga-gl '
+ else:
+ self.qemu_opt += ' -device virtio-vga '
+
+ self.qemu_opt += '-display '
+ if self.egl_headless == True:
+ self.set_dri_path()
+ self.qemu_opt += 'egl-headless,'
+ else:
+ if self.sdl == True:
+ self.qemu_opt += 'sdl,'
+ elif self.gtk == True:
+ os.environ['FONTCONFIG_PATH'] = '/etc/fonts'
+ self.qemu_opt += 'gtk,'
+
+ if self.gl == True:
+ self.set_dri_path()
+ self.qemu_opt += 'gl=on,'
+ elif self.gl_es == True:
+ self.set_dri_path()
+ self.qemu_opt += 'gl=es,'
+ self.qemu_opt += 'show-cursor=on'
+
+ self.qemu_opt += ' %s' %self.get('QB_GRAPHICS')
+
+ def setup_serial(self):
+ # Setup correct kernel command line for serial
+ if self.get('SERIAL_CONSOLES') and (self.serialstdio == True or self.serialconsole == True or self.nographic == True or self.tcpserial_portnum):
+ for entry in self.get('SERIAL_CONSOLES').split(' '):
+ self.kernel_cmdline_script += ' console=%s' %entry.split(';')[1]
+
+ if self.serialstdio == True or self.nographic == True:
+ self.qemu_opt += " -serial mon:stdio"
+ else:
+ self.qemu_opt += " -serial mon:vc"
+ if self.serialconsole:
+ if sys.stdin.isatty():
+ subprocess.check_call(("stty", "intr", "^]"))
+ logger.info("Interrupt character is '^]'")
- qemu_bin = os.path.join(self.bindir_native, qemu_system)
+ self.qemu_opt += " %s" % self.get("QB_SERIAL_OPT")
+
+ # We always wants ttyS0 and ttyS1 in qemu machines (see SERIAL_CONSOLES).
+ # If no serial or serialtcp options were specified, only ttyS0 is created
+ # and sysvinit shows an error trying to enable ttyS1:
+ # INIT: Id "S1" respawning too fast: disabled for 5 minutes
+ serial_num = len(re.findall("-serial", self.qemu_opt))
+ if serial_num < 2:
+ self.qemu_opt += " -serial null"
+
+ def setup_final(self):
+ qemu_bin = os.path.join(self.bindir_native, self.qemu_system)
# It is possible to have qemu-native in ASSUME_PROVIDED, and it won't
# find QEMU in sysroot, it needs to use host's qemu.
if not os.path.exists(qemu_bin):
logger.info("QEMU binary not found in %s, trying host's QEMU" % qemu_bin)
for path in (os.environ['PATH'] or '').split(':'):
- qemu_bin_tmp = os.path.join(path, qemu_system)
+ qemu_bin_tmp = os.path.join(path, self.qemu_system)
logger.info("Trying: %s" % qemu_bin_tmp)
if os.path.exists(qemu_bin_tmp):
qemu_bin = qemu_bin_tmp
@@ -1155,54 +1435,32 @@ class BaseConfig(object):
if not os.access(qemu_bin, os.X_OK):
raise OEPathError("No QEMU binary '%s' could be found" % qemu_bin)
- check_libgl(qemu_bin)
-
- self.qemu_opt = "%s %s %s %s" % (qemu_bin, self.get('NETWORK_CMD'), self.get('ROOTFS_OPTIONS'), self.get('QB_OPT_APPEND'))
+ self.qemu_opt = "%s %s %s %s %s" % (qemu_bin, self.get('NETWORK_CMD'), self.get('QB_RNG'), self.get('ROOTFS_OPTIONS'), self.get('QB_OPT_APPEND').replace('@DEPLOY_DIR_IMAGE@', self.get('DEPLOY_DIR_IMAGE')))
for ovmf in self.ovmf_bios:
format = ovmf.rsplit('.', 1)[-1]
+ if format == "bin":
+ format = "raw"
self.qemu_opt += ' -drive if=pflash,format=%s,file=%s' % (format, ovmf)
- if self.ovmf_bios:
- # OVMF only supports normal VGA, i.e. we need to override a -vga vmware
- # that gets added for example for normal qemux86.
- self.qemu_opt += ' -vga std'
self.qemu_opt += ' ' + self.qemu_opt_script
+ if self.ovmf_secboot_pkkek1:
+ # Provide the Platform Key and first Key Exchange Key certificate as an
+ # OEM string in the SMBIOS Type 11 table. Prepend the certificate string
+ # with "application prefix" of the EnrollDefaultKeys.efi application
+ self.qemu_opt += ' -smbios type=11,value=4e32566d-8e9e-4f52-81d3-5bb9715f9727:' \
+ + self.ovmf_secboot_pkkek1
+
+ # Append qemuparams to override previous settings
+ if self.qemuparams:
+ self.qemu_opt += ' ' + self.qemuparams
+
if self.snapshot:
self.qemu_opt += " -snapshot"
- if self.serialconsole:
- if sys.stdin.isatty():
- subprocess.check_call(("stty", "intr", "^]"))
- logger.info("Interrupt character is '^]'")
-
- first_serial = ""
- if not re.search("-nographic", self.qemu_opt):
- first_serial = "-serial mon:vc"
- # We always want a ttyS1. Since qemu by default adds a serial
- # port when nodefaults is not specified, it seems that all that
- # would be needed is to make sure a "-serial" is there. However,
- # it appears that when "-serial" is specified, it ignores the
- # default serial port that is normally added. So here we make
- # sure to add two -serial if there are none. And only one if
- # there is one -serial already.
- serial_num = len(re.findall("-serial", self.qemu_opt))
- if serial_num == 0:
- self.qemu_opt += " %s %s" % (first_serial, self.get("QB_SERIAL_OPT"))
- elif serial_num == 1:
- self.qemu_opt += " %s" % self.get("QB_SERIAL_OPT")
-
- # We always wants ttyS0 and ttyS1 in qemu machines (see SERIAL_CONSOLES),
- # if not serial or serialtcp options was specified only ttyS0 is created
- # and sysvinit shows an error trying to enable ttyS1:
- # INIT: Id "S1" respawning too fast: disabled for 5 minutes
- serial_num = len(re.findall("-serial", self.qemu_opt))
- if serial_num == 0:
- if re.search("-nographic", self.qemu_opt) or self.serialstdio:
- self.qemu_opt += " -serial mon:stdio -serial null"
- else:
- self.qemu_opt += " -serial mon:vc -serial null"
+ self.setup_serial()
+ self.setup_vga()
def start_qemu(self):
import shlex
@@ -1214,12 +1472,19 @@ class BaseConfig(object):
kernel_opts += " -dtb %s" % self.dtb
else:
kernel_opts = ""
+
+ if self.bios:
+ self.qemu_opt += " -bios %s" % self.bios
+
cmd = "%s %s" % (self.qemu_opt, kernel_opts)
cmds = shlex.split(cmd)
logger.info('Running %s\n' % cmd)
pass_fds = []
- if self.lock_descriptor:
- pass_fds = [self.lock_descriptor.fileno()]
+ if self.taplock_descriptor:
+ pass_fds = [self.taplock_descriptor.fileno()]
+ if len(self.portlocks):
+ for descriptor in self.portlocks.values():
+ pass_fds.append(descriptor.fileno())
process = subprocess.Popen(cmds, stderr=subprocess.PIPE, pass_fds=pass_fds)
self.qemupid = process.pid
retcode = process.wait()
@@ -1241,7 +1506,8 @@ class BaseConfig(object):
cmd = ('sudo', self.qemuifdown, self.tap, self.bindir_native)
logger.debug('Running %s' % str(cmd))
subprocess.check_call(cmd)
- self.release_lock()
+ self.release_taplock()
+ self.release_portlock()
if self.nfs_running:
logger.info("Shutting down the userspace NFS server...")
@@ -1252,17 +1518,17 @@ class BaseConfig(object):
if self.saved_stty:
subprocess.check_call(("stty", self.saved_stty))
- if self.clean_nfs_dir:
- logger.info('Removing %s' % self.rootfs)
- shutil.rmtree(self.rootfs)
- shutil.rmtree('%s.pseudo_state' % self.rootfs)
+ if self.cleanup_files:
+ for ent in self.cleanup_files:
+ logger.info('Removing %s' % ent)
+ if os.path.isfile(ent):
+ os.remove(ent)
+ else:
+ shutil.rmtree(ent)
self.cleaned = True
- def load_bitbake_env(self, mach=None):
- if self.bitbake_e:
- return
-
+ def run_bitbake_env(self, mach=None):
bitbake = shutil.which('bitbake')
if not bitbake:
return
@@ -1270,14 +1536,24 @@ class BaseConfig(object):
if not mach:
mach = self.get('MACHINE')
+ multiconfig = self.get('MULTICONFIG')
+ if multiconfig:
+ multiconfig = "mc:%s" % multiconfig
+
if mach:
- cmd = 'MACHINE=%s bitbake -e' % mach
+ cmd = 'MACHINE=%s bitbake -e %s' % (mach, multiconfig)
else:
- cmd = 'bitbake -e'
+ cmd = 'bitbake -e %s' % multiconfig
logger.info('Running %s...' % cmd)
+ return subprocess.check_output(cmd, shell=True).decode('utf-8')
+
+ def load_bitbake_env(self, mach=None):
+ if self.bitbake_e:
+ return
+
try:
- self.bitbake_e = subprocess.check_output(cmd, shell=True).decode('utf-8')
+ self.bitbake_e = self.run_bitbake_env(mach=mach)
except subprocess.CalledProcessError as err:
self.bitbake_e = ''
logger.warning("Couldn't run 'bitbake -e' to gather environment information:\n%s" % err.output.decode('utf-8'))
@@ -1292,7 +1568,13 @@ class BaseConfig(object):
if result and os.path.exists(result):
return result
- cmd = ('bitbake', 'qemu-helper-native', '-e')
+ cmd = ['bitbake', '-e']
+ multiconfig = self.get('MULTICONFIG')
+ if multiconfig:
+ cmd.append('mc:%s:qemu-helper-native' % multiconfig)
+ else:
+ cmd.append('qemu-helper-native')
+
logger.info('Running %s...' % str(cmd))
out = subprocess.check_output(cmd).decode('utf-8')
@@ -1314,11 +1596,18 @@ def main():
try:
config = BaseConfig()
+ renice = os.path.expanduser("~/bin/runqemu-renice")
+ if os.path.exists(renice):
+ logger.info('Using %s to renice' % renice)
+ subprocess.check_call([renice, str(os.getpid())])
+
def sigterm_handler(signum, frame):
logger.info("SIGTERM received")
- os.kill(config.qemupid, signal.SIGTERM)
+ if config.qemupid:
+ os.kill(config.qemupid, signal.SIGTERM)
config.cleanup()
- subprocess.check_call(["tput", "smam"])
+ # Deliberately ignore the return code of 'tput smam'.
+ subprocess.call(["tput", "smam"])
signal.signal(signal.SIGTERM, sigterm_handler)
config.check_args()
@@ -1340,7 +1629,8 @@ def main():
return 1
finally:
config.cleanup()
- subprocess.check_call(["tput", "smam"])
+ # Deliberately ignore the return code of 'tput smam'.
+ subprocess.call(["tput", "smam"])
if __name__ == "__main__":
sys.exit(main())
diff --git a/scripts/runqemu-addptable2image b/scripts/runqemu-addptable2image
index f0195ad8a3..ca29427258 100755
--- a/scripts/runqemu-addptable2image
+++ b/scripts/runqemu-addptable2image
@@ -4,20 +4,8 @@
#
# Copyright (C) 2006-2007 OpenedHand Ltd.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
+# SPDX-License-Identifier: GPL-2.0-or-later
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
IMAGE=$1
IMAGEOUT=$2
diff --git a/scripts/runqemu-export-rootfs b/scripts/runqemu-export-rootfs
index 70cdcdbb13..384c091713 100755
--- a/scripts/runqemu-export-rootfs
+++ b/scripts/runqemu-export-rootfs
@@ -2,18 +2,8 @@
#
# Copyright (c) 2005-2009 Wind River Systems, Inc.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
usage() {
echo "Usage: $0 {start|stop|restart} <nfs-export-dir>"
diff --git a/scripts/runqemu-extract-sdk b/scripts/runqemu-extract-sdk
index 4da3eb10a7..9bc0c07fb8 100755
--- a/scripts/runqemu-extract-sdk
+++ b/scripts/runqemu-extract-sdk
@@ -7,18 +7,8 @@
#
# Copyright (c) 2010 Intel Corp.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
function usage() {
echo "Usage: $0 <image-tarball> <extract-dir>"
@@ -79,7 +69,7 @@ fi
pseudo_state_dir="$SDK_ROOTFS_DIR/../$(basename "$SDK_ROOTFS_DIR").pseudo_state"
pseudo_state_dir="$(readlink -f $pseudo_state_dir)"
-debug_image="`echo $ROOTFS_TARBALL | grep '\-dbg\.tar\.'`"
+debug_image="`echo $ROOTFS_TARBALL | grep '\-dbg\.rootfs\.tar'`"
if [ -e "$pseudo_state_dir" -a -z "$debug_image" ]; then
echo "Error: $pseudo_state_dir already exists!"
diff --git a/scripts/runqemu-gen-tapdevs b/scripts/runqemu-gen-tapdevs
index 869fee261d..a6ee4517da 100755
--- a/scripts/runqemu-gen-tapdevs
+++ b/scripts/runqemu-gen-tapdevs
@@ -9,18 +9,8 @@
#
# Copyright (C) 2010 Intel Corp.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
uid=`id -u`
gid=`id -g`
diff --git a/scripts/runqemu-ifdown b/scripts/runqemu-ifdown
index 2486968588..e0eb5344c6 100755
--- a/scripts/runqemu-ifdown
+++ b/scripts/runqemu-ifdown
@@ -13,18 +13,8 @@
#
# Copyright (c) 2006-2011 Linux Foundation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
usage() {
echo "sudo $(basename $0) <tap-dev> <native-sysroot-basedir>"
@@ -74,3 +64,4 @@ n=$[ (`echo $TAP | sed 's/tap//'` * 2) + 1 ]
dest=$[ (`echo $TAP | sed 's/tap//'` * 2) + 2 ]
$IPTABLES -D POSTROUTING -t nat -j MASQUERADE -s 192.168.7.$n/32
$IPTABLES -D POSTROUTING -t nat -j MASQUERADE -s 192.168.7.$dest/32
+true
diff --git a/scripts/runqemu-ifup b/scripts/runqemu-ifup
index 59a15eaa2e..bb661740c5 100755
--- a/scripts/runqemu-ifup
+++ b/scripts/runqemu-ifup
@@ -20,18 +20,8 @@
#
# Copyright (c) 2006-2011 Linux Foundation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
usage() {
echo "sudo $(basename $0) <uid> <gid> <native-sysroot-basedir>"
diff --git a/scripts/send-error-report b/scripts/send-error-report
index 3528cf93a9..cfbcaa52cb 100755
--- a/scripts/send-error-report
+++ b/scripts/send-error-report
@@ -6,6 +6,9 @@
# Copyright (C) 2013 Intel Corporation
# Author: Andreea Proca <andreea.b.proca@intel.com>
# Author: Michael Wood <michael.g.wood@intel.com>
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
import urllib.request, urllib.error
import sys
@@ -62,7 +65,7 @@ def edit_content(json_file_path):
def prepare_data(args):
# attempt to get the max_log_size from the server's settings
- max_log_size = getPayloadLimit("https://"+args.server+"/ClientPost/JSON")
+ max_log_size = getPayloadLimit(args.protocol+args.server+"/ClientPost/JSON")
if not os.path.isfile(args.error_file):
log.error("No data file found.")
@@ -132,9 +135,9 @@ def send_data(data, args):
headers={'Content-type': 'application/json', 'User-Agent': "send-error-report/"+version}
if args.json:
- url = "https://"+args.server+"/ClientPost/JSON/"
+ url = args.protocol+args.server+"/ClientPost/JSON/"
else:
- url = "https://"+args.server+"/ClientPost/"
+ url = args.protocol+args.server+"/ClientPost/"
req = urllib.request.Request(url, data=data, headers=headers)
try:
@@ -187,6 +190,11 @@ if __name__ == '__main__':
help="Return the result in json format, silences all other output",
action="store_true")
+ arg_parse.add_argument("--no-ssl",
+ help="Use http instead of https protocol",
+ dest="protocol",
+ action="store_const", const="http://", default="https://")
+
args = arg_parse.parse_args()
diff --git a/scripts/send-pull-request b/scripts/send-pull-request
index 883deacb07..70b5a5cfb2 100755
--- a/scripts/send-pull-request
+++ b/scripts/send-pull-request
@@ -1,21 +1,8 @@
#!/bin/bash
#
# Copyright (c) 2010-2011, Intel Corporation.
-# All Rights Reserved
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
-# the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+# SPDX-License-Identifier: GPL-2.0-or-later
#
#
diff --git a/scripts/sstate-cache-management.sh b/scripts/sstate-cache-management.sh
index 2ab450ab59..d39671f7c6 100755
--- a/scripts/sstate-cache-management.sh
+++ b/scripts/sstate-cache-management.sh
@@ -2,18 +2,7 @@
# Copyright (c) 2012 Wind River Systems, Inc.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+# SPDX-License-Identifier: GPL-2.0-only
#
# Global vars
@@ -125,7 +114,7 @@ echo_error () {
# * Add .done/.siginfo to the remove list
# * Add destination of symlink to the remove list
#
-# $1: output file, others: sstate cache file (.tgz)
+# $1: output file, others: sstate cache file (.tar.zst)
gen_rmlist (){
local rmlist_file="$1"
shift
@@ -142,13 +131,13 @@ gen_rmlist (){
dest="`readlink -e $i`"
if [ -n "$dest" ]; then
echo $dest >> $rmlist_file
- # Remove the .siginfo when .tgz is removed
+ # Remove the .siginfo when .tar.zst is removed
if [ -f "$dest.siginfo" ]; then
echo $dest.siginfo >> $rmlist_file
fi
fi
fi
- # Add the ".tgz.done" and ".siginfo.done" (may exist in the future)
+ # Add the ".tar.zst.done" and ".siginfo.done" (may exist in the future)
base_fn="${i##/*/}"
t_fn="$base_fn.done"
s_fn="$base_fn.siginfo.done"
@@ -199,10 +188,10 @@ remove_duplicated () {
total_files=`find $cache_dir -name 'sstate*' | wc -l`
# Save all the sstate files in a file
sstate_files_list=`mktemp` || exit 1
- find $cache_dir -name 'sstate:*:*:*:*:*:*:*.tgz*' >$sstate_files_list
+ find $cache_dir -iname 'sstate:*:*:*:*:*:*:*.tar.zst*' >$sstate_files_list
echo "Figuring out the suffixes in the sstate cache dir ... "
- sstate_suffixes="`sed 's%.*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^_]*_\([^:]*\)\.tgz.*%\1%g' $sstate_files_list | sort -u`"
+ sstate_suffixes="`sed 's%.*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^_]*_\([^:]*\)\.tar\.zst.*%\1%g' $sstate_files_list | sort -u`"
echo "Done"
echo "The following suffixes have been found in the cache dir:"
echo $sstate_suffixes
@@ -211,10 +200,10 @@ remove_duplicated () {
# Using this SSTATE_PKGSPEC definition it's 6th colon separated field
# SSTATE_PKGSPEC = "sstate:${PN}:${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}:${PV}:${PR}:${SSTATE_PKGARCH}:${SSTATE_VERSION}:"
for arch in $all_archs; do
- grep -q ".*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:$arch:[^:]*:[^:]*\.tgz$" $sstate_files_list
+ grep -q ".*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:$arch:[^:]*:[^:]*\.tar\.zst$" $sstate_files_list
[ $? -eq 0 ] && ava_archs="$ava_archs $arch"
# ${builder_arch}_$arch used by toolchain sstate
- grep -q ".*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:${builder_arch}_$arch:[^:]*:[^:]*\.tgz$" $sstate_files_list
+ grep -q ".*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:${builder_arch}_$arch:[^:]*:[^:]*\.tar\.zst$" $sstate_files_list
[ $? -eq 0 ] && ava_archs="$ava_archs ${builder_arch}_$arch"
done
echo "Done"
@@ -230,13 +219,13 @@ remove_duplicated () {
continue
fi
# Total number of files including .siginfo and .done files
- total_files_suffix=`grep ".*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:_]*_$suffix\.tgz.*" $sstate_files_list | wc -l 2>/dev/null`
- total_tgz_suffix=`grep ".*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:_]*_$suffix\.tgz$" $sstate_files_list | wc -l 2>/dev/null`
+ total_files_suffix=`grep ".*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:_]*_$suffix\.tar\.zst.*" $sstate_files_list | wc -l 2>/dev/null`
+ total_archive_suffix=`grep ".*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:_]*_$suffix\.tar\.zst$" $sstate_files_list | wc -l 2>/dev/null`
# Save the file list to a file, some suffix's file may not exist
- grep ".*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:_]*_$suffix\.tgz.*" $sstate_files_list >$list_suffix 2>/dev/null
- local deleted_tgz=0
+ grep ".*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:_]*_$suffix\.tar\.zst.*" $sstate_files_list >$list_suffix 2>/dev/null
+ local deleted_archives=0
local deleted_files=0
- for ext in tgz tgz.siginfo tgz.done; do
+ for ext in tar.zst tar.zst.siginfo tar.zst.done; do
echo "Figuring out the sstate:xxx_$suffix.$ext ... "
# Uniq BPNs
file_names=`for arch in $ava_archs ""; do
@@ -279,19 +268,19 @@ remove_duplicated () {
done
done
done
- deleted_tgz=`cat $rm_list.* 2>/dev/null | grep ".tgz$" | wc -l`
+ deleted_archives=`cat $rm_list.* 2>/dev/null | grep "\.tar\.zst$" | wc -l`
deleted_files=`cat $rm_list.* 2>/dev/null | wc -l`
[ "$deleted_files" -gt 0 -a $debug -gt 0 ] && cat $rm_list.*
- echo "($deleted_tgz out of $total_tgz_suffix .tgz files for $suffix suffix will be removed or $deleted_files out of $total_files_suffix when counting also .siginfo and .done files)"
+ echo "($deleted_archives out of $total_archives_suffix .tar.zst files for $suffix suffix will be removed or $deleted_files out of $total_files_suffix when counting also .siginfo and .done files)"
let total_deleted=$total_deleted+$deleted_files
done
- deleted_tgz=0
+ deleted_archives=0
rm_old_list=$remove_listdir/sstate-old-filenames
- find $cache_dir -name 'sstate-*.tgz' >$rm_old_list
- [ -s "$rm_old_list" ] && deleted_tgz=`cat $rm_old_list | grep ".tgz$" | wc -l`
+ find $cache_dir -name 'sstate-*.tar.zst' >$rm_old_list
+ [ -s "$rm_old_list" ] && deleted_archives=`cat $rm_old_list | grep "\.tar\.zst$" | wc -l`
[ -s "$rm_old_list" ] && deleted_files=`cat $rm_old_list | wc -l`
[ -s "$rm_old_list" -a $debug -gt 0 ] && cat $rm_old_list
- echo "($deleted_tgz .tgz files with old sstate-* filenames will be removed or $deleted_files when counting also .siginfo and .done files)"
+ echo "($deleted_archives or .tar.zst files with old sstate-* filenames will be removed or $deleted_files when counting also .siginfo and .done files)"
let total_deleted=$total_deleted+$deleted_files
rm -f $list_suffix
@@ -300,7 +289,7 @@ remove_duplicated () {
read_confirm
if [ "$confirm" = "y" -o "$confirm" = "Y" ]; then
for list in `ls $remove_listdir/`; do
- echo "Removing $list.tgz (`cat $remove_listdir/$list | wc -w` files) ... "
+ echo "Removing $list.tar.zst archive (`cat $remove_listdir/$list | wc -w` files) ... "
# Remove them one by one to avoid the argument list too long error
for i in `cat $remove_listdir/$list`; do
rm -f $verbose $i
@@ -333,7 +322,7 @@ rm_by_stamps (){
find $cache_dir -type f -name 'sstate*' | sort -u -o $cache_list
echo "Figuring out the suffixes in the sstate cache dir ... "
- local sstate_suffixes="`sed 's%.*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^_]*_\([^:]*\)\.tgz.*%\1%g' $cache_list | sort -u`"
+ local sstate_suffixes="`sed 's%.*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^_]*_\([^:]*\)\.tar\.zst.*%\1%g' $cache_list | sort -u`"
echo "Done"
echo "The following suffixes have been found in the cache dir:"
echo $sstate_suffixes
diff --git a/scripts/sstate-diff-machines.sh b/scripts/sstate-diff-machines.sh
index 27c6a33006..8b64e11be1 100755
--- a/scripts/sstate-diff-machines.sh
+++ b/scripts/sstate-diff-machines.sh
@@ -1,5 +1,7 @@
#!/bin/bash
-
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Used to compare sstate checksums between MACHINES.
# Execute script and compare generated list.M files.
# Using bash to have PIPESTATUS variable.
@@ -125,6 +127,8 @@ for M in ${machines}; do
fi
done
+COMPARE_TASKS="do_configure.sigdata do_populate_sysroot.sigdata do_package_write_ipk.sigdata do_package_write_rpm.sigdata do_package_write_deb.sigdata do_package_write_tar.sigdata"
+
function compareSignatures() {
MACHINE1=$1
MACHINE2=$2
@@ -132,7 +136,7 @@ function compareSignatures() {
PRE_PATTERN=""
[ -n "${PATTERN}" ] || PRE_PATTERN="-v"
[ -n "${PATTERN}" ] || PATTERN="MACHINE"
- for TASK in do_configure.sigdata do_populate_sysroot.sigdata do_package_write_ipk.sigdata; do
+ for TASK in $COMPARE_TASKS; do
printf "\n\n === Comparing signatures for task ${TASK} between ${MACHINE1} and ${MACHINE2} ===\n" | tee -a ${OUTPUT}/signatures.${MACHINE2}.${TASK}.log
diff ${OUTPUT}/${MACHINE1}/list.M ${OUTPUT}/${MACHINE2}/list.M | grep ${PRE_PATTERN} "${PATTERN}" | grep ${TASK} > ${OUTPUT}/signatures.${MACHINE2}.${TASK}
for i in `cat ${OUTPUT}/signatures.${MACHINE2}.${TASK} | sed 's#[^/]*/\([^/]*\)/.*#\1#g' | sort -u | xargs`; do
diff --git a/scripts/sstate-sysroot-cruft.sh b/scripts/sstate-sysroot-cruft.sh
index d9917f5152..9c948e932d 100755
--- a/scripts/sstate-sysroot-cruft.sh
+++ b/scripts/sstate-sysroot-cruft.sh
@@ -1,5 +1,7 @@
#!/bin/sh
-
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Used to find files installed in sysroot which are not tracked by sstate manifest
# Global vars
@@ -143,18 +145,6 @@ WHITELIST="${WHITELIST} \
.*/var/cache/fontconfig/ \
"
-# created by oe.utils.write_ld_so_conf which is used from few bbclasses and recipes:
-# meta/classes/image-prelink.bbclass: oe.utils.write_ld_so_conf(d)
-# meta/classes/insane.bbclass: oe.utils.write_ld_so_conf(d)
-# meta/classes/insane.bbclass: oe.utils.write_ld_so_conf(d)
-# meta/recipes-gnome/gobject-introspection/gobject-introspection_1.48.0.bb: oe.utils.write_ld_so_conf(d)
-# meta/recipes-gnome/gobject-introspection/gobject-introspection_1.48.0.bb: oe.utils.write_ld_so_conf(d)
-# introduced in oe-core commit 7fd1d7e639c2ed7e0699937a5cb245c187b7c811
-# and more visible since added to gobject-introspection in 10e0c1a3a452baa05d160a92a54b2e33cf0fd061
-WHITELIST="${WHITELIST} \
- [^/]*/etc/ld.so.conf \
-"
-
SYSROOTS="`readlink -f ${tmpdir}`/sysroots/"
mkdir ${OUTPUT}
diff --git a/scripts/sysroot-relativelinks.py b/scripts/sysroot-relativelinks.py
index ffe254728b..56e36f3ad5 100755
--- a/scripts/sysroot-relativelinks.py
+++ b/scripts/sysroot-relativelinks.py
@@ -1,4 +1,8 @@
#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import sys
import os
diff --git a/scripts/task-time b/scripts/task-time
index e58040a9b9..bcd1e25817 100755
--- a/scripts/task-time
+++ b/scripts/task-time
@@ -1,4 +1,7 @@
#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
import argparse
import os
diff --git a/scripts/test-reexec b/scripts/test-reexec
index 30e792c7d9..fccdac4da6 100755
--- a/scripts/test-reexec
+++ b/scripts/test-reexec
@@ -3,21 +3,8 @@
# Test Script for task re-execution
#
# Copyright 2012 Intel Corporation
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+# SPDX-License-Identifier: GPL-2.0-or-later
#
# DESCRIPTION
# This script is intended to address issues for re-execution of
diff --git a/scripts/test-remote-image b/scripts/test-remote-image
index 27b1cae38f..d209d22854 100755
--- a/scripts/test-remote-image
+++ b/scripts/test-remote-image
@@ -1,19 +1,9 @@
#!/usr/bin/env python3
-
-# Copyright (c) 2014 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# Copyright (c) 2014 Intel Corporation
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# DESCRIPTION
# This script is used to test public autobuilder images on remote hardware.
diff --git a/scripts/tiny/dirsize.py b/scripts/tiny/dirsize.py
index ddccc5a8c8..501516b0d4 100755
--- a/scripts/tiny/dirsize.py
+++ b/scripts/tiny/dirsize.py
@@ -1,22 +1,8 @@
#!/usr/bin/env python3
#
# Copyright (c) 2011, Intel Corporation.
-# All rights reserved.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
+# SPDX-License-Identifier: GPL-2.0-or-later
#
# Display details of the root filesystem size, broken up by directory.
# Allows for limiting by size to focus on the larger files.
diff --git a/scripts/tiny/ksize.py b/scripts/tiny/ksize.py
index ea1ca7ff23..db2b9ec39f 100755
--- a/scripts/tiny/ksize.py
+++ b/scripts/tiny/ksize.py
@@ -1,24 +1,10 @@
#!/usr/bin/env python3
#
# Copyright (c) 2011, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
+# SPDX-License-Identifier: GPL-2.0-or-later
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-#
-#
-# Display details of the kernel build size, broken up by built-in.o. Sort
+# Display details of the kernel build size, broken up by built-in.[o,a]. Sort
# the objects by size. Run from the top level kernel build directory.
#
# Author: Darren Hart <dvhart@linux.intel.com>
@@ -41,7 +27,7 @@ def usage():
class Sizes:
def __init__(self, glob):
self.title = glob
- p = Popen("size -t " + str(glob), shell=True, stdout=PIPE, stderr=PIPE)
+ p = Popen("size -t " + str(glob), shell=True, stdout=PIPE, stderr=PIPE, universal_newlines=True)
output = p.communicate()[0].splitlines()
if len(output) > 2:
sizes = output[-1].split()[0:4]
@@ -63,17 +49,17 @@ class Report:
path = os.path.dirname(filename)
p = Popen("ls " + str(path) + "/*.o | grep -v built-in.o",
- shell=True, stdout=PIPE, stderr=PIPE)
+ shell=True, stdout=PIPE, stderr=PIPE, universal_newlines=True)
glob = ' '.join(p.communicate()[0].splitlines())
oreport = Report(glob, str(path) + "/*.o")
oreport.sizes.title = str(path) + "/*.o"
r.parts.append(oreport)
if subglob:
- p = Popen("ls " + subglob, shell=True, stdout=PIPE, stderr=PIPE)
+ p = Popen("ls " + subglob, shell=True, stdout=PIPE, stderr=PIPE, universal_newlines=True)
for f in p.communicate()[0].splitlines():
path = os.path.dirname(f)
- r.parts.append(Report.create(f, path, str(path) + "/*/built-in.o"))
+ r.parts.append(Report.create(f, path, str(path) + "/*/built-in.[o,a]"))
r.parts.sort(reverse=True)
for b in r.parts:
@@ -153,7 +139,7 @@ def main():
else:
assert False, "unhandled option"
- glob = "arch/*/built-in.o */built-in.o"
+ glob = "arch/*/built-in.[o,a] */built-in.[o,a]"
vmlinux = Report.create("vmlinux", "Linux Kernel", glob)
vmlinux.show()
diff --git a/scripts/tiny/ksum.py b/scripts/tiny/ksum.py
index d4f3892156..8f0e4c0517 100755
--- a/scripts/tiny/ksum.py
+++ b/scripts/tiny/ksum.py
@@ -1,22 +1,8 @@
-#!/usr/bin/env python
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#!/usr/bin/env python3
#
# Copyright (c) 2016, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION 'ksum.py' generates a combined summary of vmlinux and
# module sizes for a built kernel, as a quick tool for comparing the
@@ -76,7 +62,7 @@ def is_ko_file(filename):
return False
def collect_object_files():
- print "Collecting object files recursively from %s..." % os.getcwd()
+ print("Collecting object files recursively from %s..." % os.getcwd())
for dirpath, dirs, files in os.walk(os.getcwd()):
for filename in files:
if is_ko_file(filename):
@@ -84,7 +70,7 @@ def collect_object_files():
elif is_vmlinux_file(filename):
global vmlinux_file
vmlinux_file = os.path.join(dirpath, filename)
- print "Collecting object files [DONE]"
+ print("Collecting object files [DONE]")
def add_ko_file(filename):
p = Popen("size -t " + filename, shell=True, stdout=PIPE, stderr=PIPE)
@@ -92,9 +78,9 @@ def add_ko_file(filename):
if len(output) > 2:
sizes = output[-1].split()[0:4]
if verbose:
- print " %10d %10d %10d %10d\t" % \
- (int(sizes[0]), int(sizes[1]), int(sizes[2]), int(sizes[3])),
- print "%s" % filename[len(os.getcwd()) + 1:]
+ print(" %10d %10d %10d %10d\t" % \
+ (int(sizes[0]), int(sizes[1]), int(sizes[2]), int(sizes[3])), end=' ')
+ print("%s" % filename[len(os.getcwd()) + 1:])
global n_ko_files, ko_text, ko_data, ko_bss, ko_total
ko_text += int(sizes[0])
ko_data += int(sizes[1])
@@ -108,9 +94,9 @@ def get_vmlinux_totals():
if len(output) > 2:
sizes = output[-1].split()[0:4]
if verbose:
- print " %10d %10d %10d %10d\t" % \
- (int(sizes[0]), int(sizes[1]), int(sizes[2]), int(sizes[3])),
- print "%s" % vmlinux_file[len(os.getcwd()) + 1:]
+ print(" %10d %10d %10d %10d\t" % \
+ (int(sizes[0]), int(sizes[1]), int(sizes[2]), int(sizes[3])), end=' ')
+ print("%s" % vmlinux_file[len(os.getcwd()) + 1:])
global vmlinux_text, vmlinux_data, vmlinux_bss, vmlinux_total
vmlinux_text += int(sizes[0])
vmlinux_data += int(sizes[1])
@@ -143,20 +129,20 @@ def main():
sum_ko_files()
get_vmlinux_totals()
- print "\nTotals:"
- print "\nvmlinux:"
- print " text\tdata\t\tbss\t\ttotal"
- print " %-10d\t%-10d\t%-10d\t%-10d" % \
- (vmlinux_text, vmlinux_data, vmlinux_bss, vmlinux_total)
- print "\nmodules (%d):" % n_ko_files
- print " text\tdata\t\tbss\t\ttotal"
- print " %-10d\t%-10d\t%-10d\t%-10d" % \
- (ko_text, ko_data, ko_bss, ko_total)
- print "\nvmlinux + modules:"
- print " text\tdata\t\tbss\t\ttotal"
- print " %-10d\t%-10d\t%-10d\t%-10d" % \
+ print("\nTotals:")
+ print("\nvmlinux:")
+ print(" text\tdata\t\tbss\t\ttotal")
+ print(" %-10d\t%-10d\t%-10d\t%-10d" % \
+ (vmlinux_text, vmlinux_data, vmlinux_bss, vmlinux_total))
+ print("\nmodules (%d):" % n_ko_files)
+ print(" text\tdata\t\tbss\t\ttotal")
+ print(" %-10d\t%-10d\t%-10d\t%-10d" % \
+ (ko_text, ko_data, ko_bss, ko_total))
+ print("\nvmlinux + modules:")
+ print(" text\tdata\t\tbss\t\ttotal")
+ print(" %-10d\t%-10d\t%-10d\t%-10d" % \
(vmlinux_text + ko_text, vmlinux_data + ko_data, \
- vmlinux_bss + ko_bss, vmlinux_total + ko_total)
+ vmlinux_bss + ko_bss, vmlinux_total + ko_total))
if __name__ == "__main__":
try:
diff --git a/scripts/verify-bashisms b/scripts/verify-bashisms
index a979bd2965..ec2374f183 100755
--- a/scripts/verify-bashisms
+++ b/scripts/verify-bashisms
@@ -1,8 +1,11 @@
#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
import sys, os, subprocess, re, shutil
-whitelist = (
+allowed = (
# type is supported by dash
'if type systemctl >/dev/null 2>/dev/null; then',
'if type systemd-tmpfiles >/dev/null 2>/dev/null; then',
@@ -16,8 +19,8 @@ whitelist = (
'. $target_sdk_dir/${oe_init_build_env_path} $target_sdk_dir >> $LOGFILE'
)
-def is_whitelisted(s):
- for w in whitelist:
+def is_allowed(s):
+ for w in allowed:
if w in s:
return True
return False
@@ -46,7 +49,7 @@ def process(filename, function, lineno, script):
output = e.output.replace(fn.name, function)
if not output or not output.startswith('possible bashism'):
# Probably starts with or contains only warnings. Dump verbatim
- # with one space indention. Can't do the splitting and whitelist
+ # with one space indention. Can't do the splitting and allowed
# checking below.
return '\n'.join([filename,
' Unexpected output from checkbashisms.pl'] +
@@ -62,7 +65,7 @@ def process(filename, function, lineno, script):
# ...
# ...
result = []
- # Check the results against the whitelist
+ # Check the results against the allowed list
for message, source in zip(output[0::2], output[1::2]):
if not is_whitelisted(source):
if lineno is not None:
@@ -97,7 +100,7 @@ if __name__=='__main__':
args = parser.parse_args()
if shutil.which("checkbashisms.pl") is None:
- print("Cannot find checkbashisms.pl on $PATH, get it from https://anonscm.debian.org/cgit/collab-maint/devscripts.git/plain/scripts/checkbashisms.pl")
+ print("Cannot find checkbashisms.pl on $PATH, get it from https://salsa.debian.org/debian/devscripts/raw/master/scripts/checkbashisms.pl")
sys.exit(1)
# The order of defining the worker function,
diff --git a/scripts/wic b/scripts/wic
index 37dfe2dc58..aee63a45aa 100755
--- a/scripts/wic
+++ b/scripts/wic
@@ -1,22 +1,8 @@
#!/usr/bin/env python3
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2013, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION 'wic' is the OpenEmbedded Image Creator that users can
# use to generate bootable images. Invoking it without any arguments
@@ -35,9 +21,10 @@ import os
import sys
import argparse
import logging
+import subprocess
+import shutil
from collections import namedtuple
-from distutils import spawn
# External modules
scripts_path = os.path.dirname(os.path.realpath(__file__))
@@ -60,13 +47,10 @@ if os.environ.get('SDKTARGETSYSROOT'):
break
sdkroot = os.path.dirname(sdkroot)
-bitbake_exe = spawn.find_executable('bitbake')
+bitbake_exe = shutil.which('bitbake')
if bitbake_exe:
bitbake_path = scriptpath.add_bitbake_lib_path()
- from bb import cookerdata
- from bb.main import bitbake_main, BitBakeConfigParameters
-else:
- bitbake_main = None
+ import bb
from wic import WicError
from wic.misc import get_bitbake_var, BB_VARS
@@ -124,7 +108,7 @@ def wic_create_subcommand(options, usage_str):
Command-line handling for image creation. The real work is done
by image.engine.wic_create()
"""
- if options.build_rootfs and not bitbake_main:
+ if options.build_rootfs and not bitbake_exe:
raise WicError("Can't build rootfs as bitbake is not in the $PATH")
if not options.image_name:
@@ -160,9 +144,7 @@ def wic_create_subcommand(options, usage_str):
argv.append("--debug")
logger.info("Building rootfs...\n")
- if bitbake_main(BitBakeConfigParameters(argv),
- cookerdata.CookerConfiguration()):
- raise WicError("bitbake exited with error")
+ subprocess.check_call(argv)
rootfs_dir = get_bitbake_var("IMAGE_ROOTFS", options.image_name)
kernel_dir = get_bitbake_var("DEPLOY_DIR_IMAGE", options.image_name)
@@ -177,11 +159,12 @@ def wic_create_subcommand(options, usage_str):
"(Use -e/--image-name to specify it)")
native_sysroot = options.native_sysroot
+ if options.kernel_dir:
+ kernel_dir = options.kernel_dir
+
if not options.vars_dir and (not native_sysroot or not os.path.isdir(native_sysroot)):
logger.info("Building wic-tools...\n")
- if bitbake_main(BitBakeConfigParameters("bitbake wic-tools".split()),
- cookerdata.CookerConfiguration()):
- raise WicError("bitbake wic-tools failed")
+ subprocess.check_call(["bitbake", "wic-tools"])
native_sysroot = get_bitbake_var("RECIPE_SYSROOT_NATIVE", "wic-tools")
if not native_sysroot:
@@ -332,6 +315,8 @@ def wic_init_parser_create(subparser):
subparser.add_argument("-o", "--outdir", dest="outdir", default='.',
help="name of directory to create image in")
+ subparser.add_argument("-w", "--workdir",
+ help="temporary workdir to use for intermediate files")
subparser.add_argument("-e", "--image-name", dest="image_name",
help="name of the image to use the artifacts from "
"e.g. core-image-sato")
@@ -364,6 +349,8 @@ def wic_init_parser_create(subparser):
default=False, help="output debug information")
subparser.add_argument("-i", "--imager", dest="imager",
default="direct", help="the wic imager plugin")
+ subparser.add_argument("--extra-space", type=int, dest="extra_space",
+ default=0, help="additional free disk space to add to the image")
return
@@ -412,9 +399,9 @@ def imgpathtype(arg):
def wic_init_parser_cp(subparser):
subparser.add_argument("src",
- help="source spec")
- subparser.add_argument("dest", type=imgpathtype,
- help="image spec: <image>:<vfat partition>[<path>]")
+ help="image spec: <image>:<vfat partition>[<path>] or <file>")
+ subparser.add_argument("dest",
+ help="image spec: <image>:<vfat partition>[<path>] or <file>")
subparser.add_argument("-n", "--native-sysroot",
help="path to the native sysroot containing the tools")
@@ -423,6 +410,9 @@ def wic_init_parser_rm(subparser):
help="path: <image>:<vfat partition><path>")
subparser.add_argument("-n", "--native-sysroot",
help="path to the native sysroot containing the tools")
+ subparser.add_argument("-r", dest="recursive_delete", action="store_true", default=False,
+ help="remove directories and their contents recursively, "
+ " this only applies to ext* partition")
def expandtype(rules):
"""
@@ -432,7 +422,7 @@ def expandtype(rules):
if rules == 'auto':
return {}
result = {}
- for rule in rules.split('-'):
+ for rule in rules.split(','):
try:
part, size = rule.split(':')
except ValueError:
@@ -507,31 +497,48 @@ subcommands = {
def init_parser(parser):
parser.add_argument("--version", action="version",
version="%(prog)s {version}".format(version=__version__))
+ parser.add_argument("-D", "--debug", dest="debug", action="store_true",
+ default=False, help="output debug information")
+
subparsers = parser.add_subparsers(dest='command', help=hlp.wic_usage)
for subcmd in subcommands:
subparser = subparsers.add_parser(subcmd, help=subcommands[subcmd][2])
subcommands[subcmd][3](subparser)
+class WicArgumentParser(argparse.ArgumentParser):
+ def format_help(self):
+ return hlp.wic_help
def main(argv):
- parser = argparse.ArgumentParser(
+ parser = WicArgumentParser(
description="wic version %s" % __version__)
init_parser(parser)
args = parser.parse_args(argv)
+ if args.debug:
+ logger.setLevel(logging.DEBUG)
+
if "command" in vars(args):
if args.command == "help":
if args.help_topic is None:
parser.print_help()
- print()
- print("Please specify a help topic")
elif args.help_topic in helptopics:
hlpt = helptopics[args.help_topic]
hlpt[0](hlpt[1], hlpt[2])
return 0
+ # validate wic cp src and dest parameter to identify which one of it is
+ # image and cast it into imgtype
+ if args.command == "cp":
+ if ":" in args.dest:
+ args.dest = imgtype(args.dest)
+ elif ":" in args.src:
+ args.src = imgtype(args.src)
+ else:
+ raise argparse.ArgumentTypeError("no image or partition number specified.")
+
return hlp.invoke_subcommand(args, parser, hlp.wic_help_usage, subcommands)
diff --git a/scripts/yocto-check-layer b/scripts/yocto-check-layer
index 9b7e53679b..0e5b75b1f7 100755
--- a/scripts/yocto-check-layer
+++ b/scripts/yocto-check-layer
@@ -3,7 +3,9 @@
# Yocto Project layer checking tool
#
# Copyright (C) 2017 Intel Corporation
-# Released under the MIT license (see COPYING.MIT)
+#
+# SPDX-License-Identifier: MIT
+#
import os
import sys
@@ -22,7 +24,7 @@ import scriptpath
scriptpath.add_oe_lib_path()
scriptpath.add_bitbake_lib_path()
-from checklayer import LayerType, detect_layers, add_layer, add_layer_dependencies, get_signatures
+from checklayer import LayerType, detect_layers, add_layers, add_layer_dependencies, get_layer_dependencies, get_signatures, check_bblayers, sanity_check_layers
from oeqa.utils.commands import get_bb_vars
PROGNAME = 'yocto-check-layer'
@@ -39,6 +41,12 @@ def test_layer(td, layer, test_software_layer_signatures):
tc.loadTests(CASES_PATHS)
return tc.runTests()
+def dump_layer_debug(layer):
+ logger.debug("Found layer %s (%s)" % (layer["name"], layer["path"]))
+ collections = layer.get("collections", {})
+ if collections:
+ logger.debug("%s collections: %s" % (layer["name"], ", ".join(collections)))
+
def main():
parser = argparse.ArgumentParser(
description="Yocto Project layer checking tool",
@@ -49,6 +57,8 @@ def main():
help='File to output log (optional)', action='store')
parser.add_argument('--dependency', nargs="+",
help='Layers to process for dependencies', action='store')
+ parser.add_argument('--no-auto-dependency', help='Disable automatic testing of dependencies',
+ action='store_true')
parser.add_argument('--machines', nargs="+",
help='List of MACHINEs to be used during testing', action='store')
parser.add_argument('--additional-layers', nargs="+",
@@ -82,7 +92,7 @@ def main():
logger.setLevel(logging.ERROR)
if not 'BUILDDIR' in os.environ:
- logger.error("You must source the environment before run this script.")
+ logger.error("You must source the environment before running this script.")
logger.error("$ source oe-init-build-env")
return 1
builddir = os.environ['BUILDDIR']
@@ -90,7 +100,7 @@ def main():
layers = detect_layers(args.layers, args.no_auto)
if not layers:
- logger.error("Fail to detect layers")
+ logger.error("Failed to detect layers")
return 1
if args.additional_layers:
additional_layers = detect_layers(args.additional_layers, args.no_auto)
@@ -102,15 +112,26 @@ def main():
else:
dep_layers = layers
+ logger.debug("Found additional layers:")
+ for l in additional_layers:
+ dump_layer_debug(l)
+ logger.debug("Found dependency layers:")
+ for l in dep_layers:
+ dump_layer_debug(l)
+
+ if not sanity_check_layers(additional_layers + dep_layers, logger):
+ logger.error("Failed layer validation")
+ return 1
+
logger.info("Detected layers:")
for layer in layers:
if layer['type'] == LayerType.ERROR_BSP_DISTRO:
logger.error("%s: Can't be DISTRO and BSP type at the same time."\
- " The conf/distro and conf/machine folders was found."\
+ " Both conf/distro and conf/machine folders were found."\
% layer['name'])
layers.remove(layer)
elif layer['type'] == LayerType.ERROR_NO_LAYER_CONF:
- logger.error("%s: Don't have conf/layer.conf file."\
+ logger.info("%s: Doesn't have conf/layer.conf file, so ignoring"\
% layer['name'])
layers.remove(layer)
else:
@@ -119,6 +140,21 @@ def main():
if not layers:
return 1
+ # Find all dependencies, and get them checked too
+ if not args.no_auto_dependency:
+ depends = []
+ for layer in layers:
+ layer_depends = get_layer_dependencies(layer, dep_layers, logger)
+ if layer_depends:
+ for d in layer_depends:
+ if d not in depends:
+ depends.append(d)
+
+ for d in depends:
+ if d not in layers:
+ logger.info("Adding %s to the list of layers to test, as a dependency", d['name'])
+ layers.append(d)
+
shutil.copyfile(bblayersconf, bblayersconf + '.backup')
def cleanup_bblayers(signum, frame):
shutil.copyfile(bblayersconf + '.backup', bblayersconf)
@@ -136,28 +172,34 @@ def main():
layer['type'] == LayerType.ERROR_BSP_DISTRO:
continue
+ # Reset to a clean backup copy for each run
+ shutil.copyfile(bblayersconf + '.backup', bblayersconf)
+
+ if check_bblayers(bblayersconf, layer['path'], logger):
+ logger.info("%s already in %s. To capture initial signatures, layer under test should not present "
+ "in BBLAYERS. Please remove %s from BBLAYERS." % (layer['name'], bblayersconf, layer['name']))
+ results[layer['name']] = None
+ results_status[layer['name']] = 'SKIPPED (Layer under test should not present in BBLAYERS)'
+ continue
+
logger.info('')
logger.info("Setting up for %s(%s), %s" % (layer['name'], layer['type'],
layer['path']))
- shutil.copyfile(bblayersconf + '.backup', bblayersconf)
-
missing_dependencies = not add_layer_dependencies(bblayersconf, layer, dep_layers, logger)
if not missing_dependencies:
for additional_layer in additional_layers:
if not add_layer_dependencies(bblayersconf, additional_layer, dep_layers, logger):
missing_dependencies = True
break
- if not add_layer_dependencies(bblayersconf, layer, dep_layers, logger) or \
- any(map(lambda additional_layer: not add_layer_dependencies(bblayersconf, additional_layer, dep_layers, logger),
- additional_layers)):
+ if missing_dependencies:
logger.info('Skipping %s due to missing dependencies.' % layer['name'])
results[layer['name']] = None
results_status[layer['name']] = 'SKIPPED (Missing dependencies)'
layers_tested = layers_tested + 1
continue
- if any(map(lambda additional_layer: not add_layer(bblayersconf, additional_layer, dep_layers, logger),
+ if any(map(lambda additional_layer: not add_layers(bblayersconf, [additional_layer], logger),
additional_layers)):
logger.info('Skipping %s due to missing additional layers.' % layer['name'])
results[layer['name']] = None
@@ -179,7 +221,7 @@ def main():
continue
td['machines'] = args.machines
- if not add_layer(bblayersconf, layer, dep_layers, logger):
+ if not add_layers(bblayersconf, [layer], logger):
logger.info('Skipping %s ???.' % layer['name'])
results[layer['name']] = None
results_status[layer['name']] = 'SKIPPED (Unknown)'
diff --git a/scripts/yocto-check-layer-wrapper b/scripts/yocto-check-layer-wrapper
index bbf6ee176d..2e3b699031 100755
--- a/scripts/yocto-check-layer-wrapper
+++ b/scripts/yocto-check-layer-wrapper
@@ -6,7 +6,9 @@
# script to avoid a contaminated environment.
#
# Copyright (C) 2017 Intel Corporation
-# Released under the MIT license (see COPYING.MIT)
+#
+# SPDX-License-Identifier: MIT
+#
if [ -z "$BUILDDIR" ]; then
echo "Please source oe-init-build-env before run this script."
@@ -30,7 +32,9 @@ cd $base_dir
build_dir=$(mktemp -p $base_dir -d -t build-XXXX)
-source oe-init-build-env $build_dir
+this_dir=$(dirname $(readlink -f $0))
+
+source $this_dir/../oe-init-build-env $build_dir
if [[ $output_log != '' ]]; then
yocto-check-layer -o "$output_log" "$*"
else