summaryrefslogtreecommitdiffstats
path: root/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'scripts')
-rw-r--r--scripts/.oe-layers.json7
-rwxr-xr-xscripts/bblock184
-rwxr-xr-xscripts/bitbake-prserv-tool7
-rwxr-xr-xscripts/bitbake-whatchanged318
-rwxr-xr-xscripts/buildstats-diff2
-rwxr-xr-xscripts/buildstats-summary126
-rwxr-xr-xscripts/combo-layer38
-rwxr-xr-xscripts/contrib/bb-perf/bb-matrix-plot.sh4
-rwxr-xr-xscripts/contrib/bbvars.py6
-rwxr-xr-xscripts/contrib/convert-overrides.py111
-rwxr-xr-xscripts/contrib/image-manifest2
-rwxr-xr-xscripts/contrib/patchreview.py64
-rwxr-xr-xscripts/create-pull-request7
-rwxr-xr-xscripts/devtool22
l---------scripts/esdk-tools/devtool1
l---------scripts/esdk-tools/oe-find-native-sysroot1
l---------scripts/esdk-tools/recipetool1
l---------scripts/esdk-tools/runqemu1
l---------scripts/esdk-tools/runqemu-addptable2image1
l---------scripts/esdk-tools/runqemu-export-rootfs1
l---------scripts/esdk-tools/runqemu-extract-sdk1
l---------scripts/esdk-tools/runqemu-gen-tapdevs1
l---------scripts/esdk-tools/runqemu-ifdown1
l---------scripts/esdk-tools/runqemu-ifup1
l---------scripts/esdk-tools/wic1
-rwxr-xr-xscripts/install-buildtools20
-rw-r--r--scripts/lib/buildstats.py38
-rw-r--r--scripts/lib/checklayer/__init__.py19
-rw-r--r--scripts/lib/checklayer/cases/bsp.py2
-rw-r--r--scripts/lib/checklayer/cases/common.py18
-rw-r--r--scripts/lib/checklayer/cases/distro.py2
-rw-r--r--scripts/lib/devtool/__init__.py27
-rw-r--r--scripts/lib/devtool/build_sdk.py2
-rw-r--r--scripts/lib/devtool/deploy.py240
-rw-r--r--scripts/lib/devtool/ide_plugins/__init__.py282
-rw-r--r--scripts/lib/devtool/ide_plugins/ide_code.py463
-rw-r--r--scripts/lib/devtool/ide_plugins/ide_none.py53
-rwxr-xr-xscripts/lib/devtool/ide_sdk.py1070
-rw-r--r--scripts/lib/devtool/menuconfig.py2
-rw-r--r--scripts/lib/devtool/sdk.py3
-rw-r--r--scripts/lib/devtool/standard.py434
-rw-r--r--scripts/lib/devtool/upgrade.py144
-rw-r--r--scripts/lib/recipetool/append.py66
-rw-r--r--scripts/lib/recipetool/create.py167
-rw-r--r--scripts/lib/recipetool/create_buildsys.py40
-rw-r--r--scripts/lib/recipetool/create_buildsys_python.py1071
-rw-r--r--scripts/lib/recipetool/create_go.py777
-rw-r--r--scripts/lib/recipetool/create_npm.py43
-rw-r--r--scripts/lib/recipetool/setvar.py1
-rw-r--r--scripts/lib/resulttool/log.py13
-rw-r--r--scripts/lib/resulttool/regression.py281
-rw-r--r--scripts/lib/resulttool/report.py5
-rw-r--r--scripts/lib/resulttool/resultutils.py8
-rw-r--r--scripts/lib/scriptutils.py4
-rw-r--r--scripts/lib/wic/canned-wks/efi-bootdisk.wks.in2
-rw-r--r--scripts/lib/wic/canned-wks/qemuloongarch.wks3
-rw-r--r--scripts/lib/wic/canned-wks/qemux86-directdisk.wks2
-rw-r--r--scripts/lib/wic/filemap.py7
-rw-r--r--scripts/lib/wic/help.py2
-rw-r--r--scripts/lib/wic/ksparser.py4
-rw-r--r--scripts/lib/wic/misc.py2
-rw-r--r--scripts/lib/wic/partition.py47
-rw-r--r--scripts/lib/wic/plugins/imager/direct.py122
-rw-r--r--scripts/lib/wic/plugins/source/bootimg-efi.py108
-rw-r--r--scripts/lib/wic/plugins/source/bootimg-partition.py7
-rw-r--r--scripts/lib/wic/plugins/source/bootimg-pcbios.py6
-rw-r--r--scripts/lib/wic/plugins/source/empty.py57
-rw-r--r--scripts/lib/wic/plugins/source/rawcopy.py3
-rw-r--r--scripts/lib/wic/plugins/source/rootfs.py2
-rwxr-xr-xscripts/nativesdk-intercept/chgrp5
-rwxr-xr-xscripts/nativesdk-intercept/chown5
-rwxr-xr-xscripts/oe-buildenv-internal25
-rwxr-xr-xscripts/oe-check-sstate10
-rwxr-xr-xscripts/oe-depends-dot13
-rwxr-xr-xscripts/oe-find-native-sysroot15
-rwxr-xr-xscripts/oe-pkgdata-util2
-rwxr-xr-xscripts/oe-setup-build122
-rwxr-xr-xscripts/oe-setup-builddir24
-rwxr-xr-xscripts/oe-setup-layers130
-rwxr-xr-xscripts/oe-setup-vscode93
-rwxr-xr-xscripts/opkg-query-helper.py2
-rwxr-xr-xscripts/patchtest232
-rwxr-xr-xscripts/patchtest-get-branch81
-rwxr-xr-xscripts/patchtest-get-series115
-rwxr-xr-xscripts/patchtest-send-results110
-rwxr-xr-xscripts/patchtest-setup-sharedir83
-rw-r--r--scripts/patchtest.README153
-rw-r--r--scripts/postinst-intercepts/update_gtk_icon_cache6
-rw-r--r--scripts/postinst-intercepts/update_mandb18
-rw-r--r--scripts/pybootchartgui/pybootchartgui/draw.py29
-rw-r--r--scripts/pybootchartgui/pybootchartgui/parsing.py2
-rwxr-xr-xscripts/rpm2cpio.sh30
-rwxr-xr-xscripts/runqemu368
-rwxr-xr-xscripts/runqemu-export-rootfs25
-rwxr-xr-xscripts/runqemu-extract-sdk2
-rwxr-xr-xscripts/runqemu-gen-tapdevs120
-rwxr-xr-xscripts/runqemu-ifdown41
-rwxr-xr-xscripts/runqemu-ifup65
-rwxr-xr-xscripts/sstate-cache-management.py329
-rwxr-xr-xscripts/sstate-cache-management.sh458
-rwxr-xr-xscripts/yocto-check-layer5
-rwxr-xr-xscripts/yocto_testresults_query.py131
102 files changed, 7279 insertions, 2148 deletions
diff --git a/scripts/.oe-layers.json b/scripts/.oe-layers.json
new file mode 100644
index 0000000000..1b00a84b54
--- /dev/null
+++ b/scripts/.oe-layers.json
@@ -0,0 +1,7 @@
+{
+ "layers": [
+ "../meta-poky",
+ "../meta"
+ ],
+ "version": "1.0"
+}
diff --git a/scripts/bblock b/scripts/bblock
new file mode 100755
index 0000000000..0082059af8
--- /dev/null
+++ b/scripts/bblock
@@ -0,0 +1,184 @@
+#!/usr/bin/env python3
+# bblock
+# lock/unlock task to latest signature
+#
+# Copyright (c) 2023 BayLibre, SAS
+# Author: Julien Stepahn <jstephan@baylibre.com>
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import os
+import sys
+import logging
+
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+lib_path = scripts_path + "/lib"
+sys.path = sys.path + [lib_path]
+
+import scriptpath
+
+scriptpath.add_bitbake_lib_path()
+
+import bb.tinfoil
+import bb.msg
+
+import argparse_oe
+
+myname = os.path.basename(sys.argv[0])
+logger = bb.msg.logger_create(myname)
+
+
+def getTaskSignatures(tinfoil, pn, tasks):
+ tinfoil.set_event_mask(
+ [
+ "bb.event.GetTaskSignatureResult",
+ "logging.LogRecord",
+ "bb.command.CommandCompleted",
+ "bb.command.CommandFailed",
+ ]
+ )
+ ret = tinfoil.run_command("getTaskSignatures", pn, tasks)
+ if ret:
+ while True:
+ event = tinfoil.wait_event(1)
+ if event:
+ if isinstance(event, bb.command.CommandCompleted):
+ break
+ elif isinstance(event, bb.command.CommandFailed):
+ logger.error(str(event))
+ sys.exit(2)
+ elif isinstance(event, bb.event.GetTaskSignatureResult):
+ sig = event.sig
+ elif isinstance(event, logging.LogRecord):
+ logger.handle(event)
+ else:
+ logger.error("No result returned from getTaskSignatures command")
+ sys.exit(2)
+ return sig
+
+
+def parseRecipe(tinfoil, recipe):
+ try:
+ tinfoil.parse_recipes()
+ d = tinfoil.parse_recipe(recipe)
+ except Exception:
+ logger.error("Failed to get recipe info for: %s" % recipe)
+ sys.exit(1)
+ return d
+
+
+def bblockDump(lockfile):
+ try:
+ with open(lockfile, "r") as lockfile:
+ for line in lockfile:
+ print(line.strip())
+ except IOError:
+ return 1
+ return 0
+
+
+def bblockReset(lockfile, pns, package_archs, tasks):
+ if not pns:
+ logger.info("Unlocking all recipes")
+ try:
+ os.remove(lockfile)
+ except FileNotFoundError:
+ pass
+ else:
+ logger.info("Unlocking {pns}".format(pns=pns))
+ tmp_lockfile = lockfile + ".tmp"
+ with open(lockfile, "r") as infile, open(tmp_lockfile, "w") as outfile:
+ for line in infile:
+ if not (
+ any(element in line for element in pns)
+ and any(element in line for element in package_archs.split())
+ ):
+ outfile.write(line)
+ else:
+ if tasks and not any(element in line for element in tasks):
+ outfile.write(line)
+ os.remove(lockfile)
+ os.rename(tmp_lockfile, lockfile)
+
+
+def main():
+ parser = argparse_oe.ArgumentParser(description="Lock and unlock a recipe")
+ parser.add_argument("pn", nargs="*", help="Space separated list of recipe to lock")
+ parser.add_argument(
+ "-t",
+ "--tasks",
+ help="Comma separated list of tasks",
+ type=lambda s: [
+ task if task.startswith("do_") else "do_" + task for task in s.split(",")
+ ],
+ )
+ parser.add_argument(
+ "-r",
+ "--reset",
+ action="store_true",
+ help="Unlock pn recipes, or all recipes if pn is empty",
+ )
+ parser.add_argument(
+ "-d",
+ "--dump",
+ action="store_true",
+ help="Dump generated bblock.conf file",
+ )
+
+ global_args, unparsed_args = parser.parse_known_args()
+
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=True)
+
+ package_archs = tinfoil.config_data.getVar("PACKAGE_ARCHS")
+ builddir = tinfoil.config_data.getVar("TOPDIR")
+ lockfile = "{builddir}/conf/bblock.conf".format(builddir=builddir)
+
+ if global_args.dump:
+ bblockDump(lockfile)
+ return 0
+
+ if global_args.reset:
+ bblockReset(lockfile, global_args.pn, package_archs, global_args.tasks)
+ return 0
+
+ with open(lockfile, "a") as lockfile:
+ s = ""
+ if lockfile.tell() == 0:
+ s = "# Generated by bblock\n"
+ s += 'SIGGEN_LOCKEDSIGS_TASKSIG_CHECK = "info"\n'
+ s += 'SIGGEN_LOCKEDSIGS_TYPES += "${PACKAGE_ARCHS}"\n'
+ s += "\n"
+
+ for pn in global_args.pn:
+ d = parseRecipe(tinfoil, pn)
+ package_arch = d.getVar("PACKAGE_ARCH")
+ siggen_locked_sigs_package_arch = d.getVar(
+ "SIGGEN_LOCKEDSIGS_{package_arch}".format(package_arch=package_arch)
+ )
+ sigs = getTaskSignatures(tinfoil, [pn], global_args.tasks)
+ for sig in sigs:
+ new_entry = "{pn}:{taskname}:{sig}".format(
+ pn=sig[0], taskname=sig[1], sig=sig[2]
+ )
+ if (
+ siggen_locked_sigs_package_arch
+ and not new_entry in siggen_locked_sigs_package_arch
+ ) or not siggen_locked_sigs_package_arch:
+ s += 'SIGGEN_LOCKEDSIGS_{package_arch} += "{new_entry}"\n'.format(
+ package_arch=package_arch, new_entry=new_entry
+ )
+ lockfile.write(s)
+ return 0
+
+
+if __name__ == "__main__":
+ try:
+ ret = main()
+ except Exception:
+ ret = 1
+ import traceback
+
+ traceback.print_exc()
+ sys.exit(ret)
diff --git a/scripts/bitbake-prserv-tool b/scripts/bitbake-prserv-tool
index bed97bd8ac..80028342b1 100755
--- a/scripts/bitbake-prserv-tool
+++ b/scripts/bitbake-prserv-tool
@@ -17,8 +17,11 @@ help ()
clean_cache()
{
s=`bitbake -e | grep ^CACHE= | cut -f2 -d\"`
+ # Stop any active memory resident server
+ bitbake -m
+ # Remove cache entries since we want to trigger a full reparse
if [ "x${s}" != "x" ]; then
- rm -rf ${s}
+ rm -f ${s}/bb_cache*.dat.*
fi
}
@@ -62,7 +65,7 @@ do_migrate_localcount ()
return 1
fi
- rm -rf $df
+ rm -f $df
clean_cache
echo "Exporting LOCALCOUNT to AUTOINCs..."
bitbake -R conf/migrate_localcount.conf -p
diff --git a/scripts/bitbake-whatchanged b/scripts/bitbake-whatchanged
deleted file mode 100755
index cdb730dbdb..0000000000
--- a/scripts/bitbake-whatchanged
+++ /dev/null
@@ -1,318 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright (c) 2013 Wind River Systems, Inc.
-#
-# SPDX-License-Identifier: GPL-2.0-only
-#
-
-import os
-import sys
-import getopt
-import shutil
-import re
-import warnings
-import subprocess
-import argparse
-
-scripts_path = os.path.abspath(os.path.dirname(os.path.abspath(sys.argv[0])))
-lib_path = scripts_path + '/lib'
-sys.path = sys.path + [lib_path]
-
-import scriptpath
-
-# Figure out where is the bitbake/lib/bb since we need bb.siggen and bb.process
-bitbakepath = scriptpath.add_bitbake_lib_path()
-if not bitbakepath:
- sys.stderr.write("Unable to find bitbake by searching parent directory of this script or PATH\n")
- sys.exit(1)
-scriptpath.add_oe_lib_path()
-import argparse_oe
-
-import bb.siggen
-import bb.process
-
-# Match the stamp's filename
-# group(1): PE_PV (may no PE)
-# group(2): PR
-# group(3): TASK
-# group(4): HASH
-stamp_re = re.compile("(?P<pv>.*)-(?P<pr>r\d+)\.(?P<task>do_\w+)\.(?P<hash>[^\.]*)")
-sigdata_re = re.compile(".*\.sigdata\..*")
-
-def gen_dict(stamps):
- """
- Generate the dict from the stamps dir.
- The output dict format is:
- {fake_f: {pn: PN, pv: PV, pr: PR, task: TASK, path: PATH}}
- Where:
- fake_f: pv + task + hash
- path: the path to the stamp file
- """
- # The member of the sub dict (A "path" will be appended below)
- sub_mem = ("pv", "pr", "task")
- d = {}
- for dirpath, _, files in os.walk(stamps):
- for f in files:
- # The "bitbake -S" would generate ".sigdata", but no "_setscene".
- fake_f = re.sub('_setscene.', '.', f)
- fake_f = re.sub('.sigdata', '', fake_f)
- subdict = {}
- tmp = stamp_re.match(fake_f)
- if tmp:
- for i in sub_mem:
- subdict[i] = tmp.group(i)
- if len(subdict) != 0:
- pn = os.path.basename(dirpath)
- subdict['pn'] = pn
- # The path will be used by os.stat() and bb.siggen
- subdict['path'] = dirpath + "/" + f
- fake_f = tmp.group('pv') + tmp.group('task') + tmp.group('hash')
- d[fake_f] = subdict
- return d
-
-# Re-construct the dict
-def recon_dict(dict_in):
- """
- The output dict format is:
- {pn_task: {pv: PV, pr: PR, path: PATH}}
- """
- dict_out = {}
- for k in dict_in.keys():
- subdict = {}
- # The key
- pn_task = "%s_%s" % (dict_in.get(k).get('pn'), dict_in.get(k).get('task'))
- # If more than one stamps are found, use the latest one.
- if pn_task in dict_out:
- full_path_pre = dict_out.get(pn_task).get('path')
- full_path_cur = dict_in.get(k).get('path')
- if os.stat(full_path_pre).st_mtime > os.stat(full_path_cur).st_mtime:
- continue
- subdict['pv'] = dict_in.get(k).get('pv')
- subdict['pr'] = dict_in.get(k).get('pr')
- subdict['path'] = dict_in.get(k).get('path')
- dict_out[pn_task] = subdict
-
- return dict_out
-
-def split_pntask(s):
- """
- Split the pn_task in to (pn, task) and return it
- """
- tmp = re.match("(.*)_(do_.*)", s)
- return (tmp.group(1), tmp.group(2))
-
-
-def print_added(d_new = None, d_old = None):
- """
- Print the newly added tasks
- """
- added = {}
- for k in list(d_new.keys()):
- if k not in d_old:
- # Add the new one to added dict, and remove it from
- # d_new, so the remaining ones are the changed ones
- added[k] = d_new.get(k)
- del(d_new[k])
-
- if not added:
- return 0
-
- # Format the output, the dict format is:
- # {pn: task1, task2 ...}
- added_format = {}
- counter = 0
- for k in added.keys():
- pn, task = split_pntask(k)
- if pn in added_format:
- # Append the value
- added_format[pn] = "%s %s" % (added_format.get(pn), task)
- else:
- added_format[pn] = task
- counter += 1
- print("=== Newly added tasks: (%s tasks)" % counter)
- for k in added_format.keys():
- print(" %s: %s" % (k, added_format.get(k)))
-
- return counter
-
-def print_vrchanged(d_new = None, d_old = None, vr = None):
- """
- Print the pv or pr changed tasks.
- The arg "vr" is "pv" or "pr"
- """
- pvchanged = {}
- counter = 0
- for k in list(d_new.keys()):
- if d_new.get(k).get(vr) != d_old.get(k).get(vr):
- counter += 1
- pn, task = split_pntask(k)
- if pn not in pvchanged:
- # Format the output, we only print pn (no task) since
- # all the tasks would be changed when pn or pr changed,
- # the dict format is:
- # {pn: pv/pr_old -> pv/pr_new}
- pvchanged[pn] = "%s -> %s" % (d_old.get(k).get(vr), d_new.get(k).get(vr))
- del(d_new[k])
-
- if not pvchanged:
- return 0
-
- print("\n=== %s changed: (%s tasks)" % (vr.upper(), counter))
- for k in pvchanged.keys():
- print(" %s: %s" % (k, pvchanged.get(k)))
-
- return counter
-
-def print_depchanged(d_new = None, d_old = None, verbose = False):
- """
- Print the dependency changes
- """
- depchanged = {}
- counter = 0
- for k in d_new.keys():
- counter += 1
- pn, task = split_pntask(k)
- if (verbose):
- full_path_old = d_old.get(k).get("path")
- full_path_new = d_new.get(k).get("path")
- # No counter since it is not ready here
- if sigdata_re.match(full_path_old) and sigdata_re.match(full_path_new):
- output = bb.siggen.compare_sigfiles(full_path_old, full_path_new)
- if output:
- print("\n=== The verbose changes of %s.%s:" % (pn, task))
- print('\n'.join(output))
- else:
- # Format the output, the format is:
- # {pn: task1, task2, ...}
- if pn in depchanged:
- depchanged[pn] = "%s %s" % (depchanged.get(pn), task)
- else:
- depchanged[pn] = task
-
- if len(depchanged) > 0:
- print("\n=== Dependencies changed: (%s tasks)" % counter)
- for k in depchanged.keys():
- print(" %s: %s" % (k, depchanged[k]))
-
- return counter
-
-
-def main():
- """
- Print what will be done between the current and last builds:
- 1) Run "STAMPS_DIR=<path> bitbake -S recipe" to re-generate the stamps
- 2) Figure out what are newly added and changed, can't figure out
- what are removed since we can't know the previous stamps
- clearly, for example, if there are several builds, we can't know
- which stamps the last build has used exactly.
- 3) Use bb.siggen.compare_sigfiles to diff the old and new stamps
- """
-
- parser = argparse_oe.ArgumentParser(usage = """%(prog)s [options] [package ...]
-print what will be done between the current and last builds, for example:
-
- $ bitbake core-image-sato
- # Edit the recipes
- $ bitbake-whatchanged core-image-sato
-
-The changes will be printed.
-
-Note:
- The amount of tasks is not accurate when the task is "do_build" since
- it usually depends on other tasks.
- The "nostamp" task is not included.
-"""
-)
- parser.add_argument("recipe", help="recipe to check")
- parser.add_argument("-v", "--verbose", help = "print the verbose changes", action = "store_true")
- args = parser.parse_args()
-
- # Get the STAMPS_DIR
- print("Figuring out the STAMPS_DIR ...")
- cmdline = "bitbake -e | sed -ne 's/^STAMPS_DIR=\"\(.*\)\"/\\1/p'"
- try:
- stampsdir, err = bb.process.run(cmdline)
- except:
- raise
- if not stampsdir:
- print("ERROR: No STAMPS_DIR found for '%s'" % args.recipe, file=sys.stderr)
- return 2
- stampsdir = stampsdir.rstrip("\n")
- if not os.path.isdir(stampsdir):
- print("ERROR: stamps directory \"%s\" not found!" % stampsdir, file=sys.stderr)
- return 2
-
- # The new stamps dir
- new_stampsdir = stampsdir + ".bbs"
- if os.path.exists(new_stampsdir):
- print("ERROR: %s already exists!" % new_stampsdir, file=sys.stderr)
- return 2
-
- try:
- # Generate the new stamps dir
- print("Generating the new stamps ... (need several minutes)")
- cmdline = "STAMPS_DIR=%s bitbake -S none %s" % (new_stampsdir, args.recipe)
- # FIXME
- # The "bitbake -S" may fail, not fatal error, the stamps will still
- # be generated, this might be a bug of "bitbake -S".
- try:
- bb.process.run(cmdline)
- except Exception as exc:
- print(exc)
-
- # The dict for the new and old stamps.
- old_dict = gen_dict(stampsdir)
- new_dict = gen_dict(new_stampsdir)
-
- # Remove the same one from both stamps.
- cnt_unchanged = 0
- for k in list(new_dict.keys()):
- if k in old_dict:
- cnt_unchanged += 1
- del(new_dict[k])
- del(old_dict[k])
-
- # Re-construct the dict to easily find out what is added or changed.
- # The dict format is:
- # {pn_task: {pv: PV, pr: PR, path: PATH}}
- new_recon = recon_dict(new_dict)
- old_recon = recon_dict(old_dict)
-
- del new_dict
- del old_dict
-
- # Figure out what are changed, the new_recon would be changed
- # by the print_xxx function.
- # Newly added
- cnt_added = print_added(new_recon, old_recon)
-
- # PV (including PE) and PR changed
- # Let the bb.siggen handle them if verbose
- cnt_rv = {}
- if not args.verbose:
- for i in ('pv', 'pr'):
- cnt_rv[i] = print_vrchanged(new_recon, old_recon, i)
-
- # Dependencies changed (use bitbake-diffsigs)
- cnt_dep = print_depchanged(new_recon, old_recon, args.verbose)
-
- total_changed = cnt_added + (cnt_rv.get('pv') or 0) + (cnt_rv.get('pr') or 0) + cnt_dep
-
- print("\n=== Summary: (%s changed, %s unchanged)" % (total_changed, cnt_unchanged))
- if args.verbose:
- print("Newly added: %s\nDependencies changed: %s\n" % \
- (cnt_added, cnt_dep))
- else:
- print("Newly added: %s\nPV changed: %s\nPR changed: %s\nDependencies changed: %s\n" % \
- (cnt_added, cnt_rv.get('pv') or 0, cnt_rv.get('pr') or 0, cnt_dep))
- except:
- print("ERROR occurred!")
- raise
- finally:
- # Remove the newly generated stamps dir
- if os.path.exists(new_stampsdir):
- print("Removing the newly generated stamps dir ...")
- shutil.rmtree(new_stampsdir)
-
-if __name__ == "__main__":
- sys.exit(main())
diff --git a/scripts/buildstats-diff b/scripts/buildstats-diff
index 2f6498ab67..c9aa76a8fa 100755
--- a/scripts/buildstats-diff
+++ b/scripts/buildstats-diff
@@ -1,4 +1,4 @@
-#!/usr/bin/python3
+#!/usr/bin/env python3
#
# Script for comparing buildstats from two different builds
#
diff --git a/scripts/buildstats-summary b/scripts/buildstats-summary
new file mode 100755
index 0000000000..b10c671b29
--- /dev/null
+++ b/scripts/buildstats-summary
@@ -0,0 +1,126 @@
+#!/usr/bin/env python3
+#
+# Dump a summary of the specified buildstats to the terminal, filtering and
+# sorting by walltime.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+
+import argparse
+import dataclasses
+import datetime
+import enum
+import os
+import pathlib
+import sys
+
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(scripts_path, "lib"))
+import buildstats
+
+
+@dataclasses.dataclass
+class Task:
+ recipe: str
+ task: str
+ start: datetime.datetime
+ duration: datetime.timedelta
+
+
+class Sorting(enum.Enum):
+ start = 1
+ duration = 2
+
+ # argparse integration
+ def __str__(self) -> str:
+ return self.name
+
+ def __repr__(self) -> str:
+ return self.name
+
+ @staticmethod
+ def from_string(s: str):
+ try:
+ return Sorting[s]
+ except KeyError:
+ return s
+
+
+def read_buildstats(path: pathlib.Path) -> buildstats.BuildStats:
+ if not path.exists():
+ raise Exception(f"No such file or directory: {path}")
+ if path.is_file():
+ return buildstats.BuildStats.from_file_json(path)
+ if (path / "build_stats").is_file():
+ return buildstats.BuildStats.from_dir(path)
+ raise Exception(f"Cannot find buildstats in {path}")
+
+
+def dump_buildstats(args, bs: buildstats.BuildStats):
+ tasks = []
+ for recipe in bs.values():
+ for task, stats in recipe.tasks.items():
+ t = Task(
+ recipe.name,
+ task,
+ datetime.datetime.fromtimestamp(stats["start_time"]),
+ datetime.timedelta(seconds=int(stats.walltime)),
+ )
+ tasks.append(t)
+
+ tasks.sort(key=lambda t: getattr(t, args.sort.name))
+
+ minimum = datetime.timedelta(seconds=args.shortest)
+ highlight = datetime.timedelta(seconds=args.highlight)
+
+ for t in tasks:
+ if t.duration >= minimum:
+ line = f"{t.duration} {t.recipe}:{t.task}"
+ if args.highlight and t.duration >= highlight:
+ print(f"\033[1m{line}\033[0m")
+ else:
+ print(line)
+
+
+def main(argv=None) -> int:
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter
+ )
+
+ parser.add_argument(
+ "buildstats", metavar="BUILDSTATS", help="Buildstats file", type=pathlib.Path
+ )
+ parser.add_argument(
+ "--sort",
+ "-s",
+ type=Sorting.from_string,
+ choices=list(Sorting),
+ default=Sorting.start,
+ help="Sort tasks",
+ )
+ parser.add_argument(
+ "--shortest",
+ "-t",
+ type=int,
+ default=1,
+ metavar="SECS",
+ help="Hide tasks shorter than SECS seconds",
+ )
+ parser.add_argument(
+ "--highlight",
+ "-g",
+ type=int,
+ default=60,
+ metavar="SECS",
+ help="Highlight tasks longer than SECS seconds (0 disabled)",
+ )
+
+ args = parser.parse_args(argv)
+
+ bs = read_buildstats(args.buildstats)
+ dump_buildstats(args, bs)
+
+ return 0
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/scripts/combo-layer b/scripts/combo-layer
index 045de65642..4a715914af 100755
--- a/scripts/combo-layer
+++ b/scripts/combo-layer
@@ -19,9 +19,8 @@ import tempfile
import configparser
import re
import copy
-import pipes
+import shlex
import shutil
-from collections import OrderedDict
from string import Template
from functools import reduce
@@ -192,6 +191,23 @@ def runcmd(cmd,destdir=None,printerr=True,out=None,env=None):
logger.debug("output: %s" % output.replace(chr(0), '\\0'))
return output
+def action_sync_revs(conf, args):
+ """
+ Update the last_revision config option for each repo with the latest
+ revision in the remote's branch. Useful if multiple people are using
+ combo-layer.
+ """
+ repos = get_repos(conf, args[1:])
+
+ for name in repos:
+ repo = conf.repos[name]
+ ldir = repo['local_repo_dir']
+ branch = repo.get('branch', "master")
+ runcmd("git fetch", ldir)
+ lastrev = runcmd('git rev-parse origin/%s' % branch, ldir).strip()
+ print("Updating %s to %s" % (name, lastrev))
+ conf.update(name, "last_revision", lastrev)
+
def action_init(conf, args):
"""
Clone component repositories
@@ -467,7 +483,7 @@ def check_repo_clean(repodir):
exit if repo is dirty
"""
output=runcmd("git status --porcelain", repodir)
- r = re.compile('\?\? patch-.*/')
+ r = re.compile(r'\?\? patch-.*/')
dirtyout = [item for item in output.splitlines() if not r.match(item)]
if dirtyout:
logger.error("git repo %s is dirty, please fix it first", repodir)
@@ -508,7 +524,7 @@ def check_patch(patchfile):
f.close()
if of:
of.close()
- bb.utils.rename(patchfile + '.tmp', patchfile)
+ os.rename(of.name, patchfile)
def drop_to_shell(workdir=None):
if not sys.stdin.isatty():
@@ -1259,7 +1275,7 @@ def apply_commit(parent, rev, largs, wargs, dest_dir, file_filter=None):
target = os.path.join(wargs["destdir"], dest_dir)
if not os.path.isdir(target):
os.makedirs(target)
- quoted_target = pipes.quote(target)
+ quoted_target = shlex.quote(target)
# os.sysconf('SC_ARG_MAX') is lying: running a command with
# string length 629343 already failed with "Argument list too
# long" although SC_ARG_MAX = 2097152. "man execve" explains
@@ -1271,7 +1287,7 @@ def apply_commit(parent, rev, largs, wargs, dest_dir, file_filter=None):
unquoted_args = []
cmdsize = 100 + len(quoted_target)
while update:
- quoted_next = pipes.quote(update[0])
+ quoted_next = shlex.quote(update[0])
size_next = len(quoted_next) + len(dest_dir) + 1
logger.debug('cmdline length %d + %d < %d?' % (cmdsize, size_next, os.sysconf('SC_ARG_MAX')))
if cmdsize + size_next < max_cmdsize:
@@ -1302,6 +1318,7 @@ actions = {
"update": action_update,
"pull": action_pull,
"splitpatch": action_splitpatch,
+ "sync-revs": action_sync_revs,
}
def main():
@@ -1312,10 +1329,11 @@ def main():
Create and update a combination layer repository from multiple component repositories.
Action:
- init initialise the combo layer repo
- update [components] get patches from component repos and apply them to the combo repo
- pull [components] just pull component repos only
- splitpatch [commit] generate commit patch and split per component, default commit is HEAD""")
+ init initialise the combo layer repo
+ update [components] get patches from component repos and apply them to the combo repo
+ pull [components] just pull component repos only
+ sync-revs [components] update the config file's last_revision for each repository
+ splitpatch [commit] generate commit patch and split per component, default commit is HEAD""")
parser.add_option("-c", "--conf", help = "specify the config file (conf/combo-layer.conf is the default).",
action = "store", dest = "conffile", default = "conf/combo-layer.conf")
diff --git a/scripts/contrib/bb-perf/bb-matrix-plot.sh b/scripts/contrib/bb-perf/bb-matrix-plot.sh
index e7bd129e9e..6672189c95 100755
--- a/scripts/contrib/bb-perf/bb-matrix-plot.sh
+++ b/scripts/contrib/bb-perf/bb-matrix-plot.sh
@@ -16,8 +16,8 @@
# Setup the defaults
DATFILE="bb-matrix.dat"
-XLABEL="BB_NUMBER_THREADS"
-YLABEL="PARALLEL_MAKE"
+XLABEL="BB\\\\_NUMBER\\\\_THREADS"
+YLABEL="PARALLEL\\\\_MAKE"
FIELD=3
DEF_TITLE="Elapsed Time (seconds)"
PM3D_FRAGMENT="unset surface; set pm3d at s hidden3d 100"
diff --git a/scripts/contrib/bbvars.py b/scripts/contrib/bbvars.py
index 090133600b..a9cdf082ab 100755
--- a/scripts/contrib/bbvars.py
+++ b/scripts/contrib/bbvars.py
@@ -36,8 +36,8 @@ def bbvar_is_documented(var, documented_vars):
def collect_documented_vars(docfiles):
''' Walk the docfiles and collect the documented variables '''
documented_vars = []
- prog = re.compile(".*($|[^A-Z_])<glossentry id=\'var-")
- var_prog = re.compile('<glossentry id=\'var-(.*)\'>')
+ prog = re.compile(r".*($|[^A-Z_])<glossentry id=\'var-")
+ var_prog = re.compile(r'<glossentry id=\'var-(.*)\'>')
for d in docfiles:
with open(d) as f:
documented_vars += var_prog.findall(f.read())
@@ -45,7 +45,7 @@ def collect_documented_vars(docfiles):
return documented_vars
def bbvar_doctag(var, docconf):
- prog = re.compile('^%s\[doc\] *= *"(.*)"' % (var))
+ prog = re.compile(r'^%s\[doc\] *= *"(.*)"' % (var))
if docconf == "":
return "?"
diff --git a/scripts/contrib/convert-overrides.py b/scripts/contrib/convert-overrides.py
index 4d41a4c475..c69acb4095 100755
--- a/scripts/contrib/convert-overrides.py
+++ b/scripts/contrib/convert-overrides.py
@@ -22,66 +22,78 @@ import sys
import tempfile
import shutil
import mimetypes
+import argparse
-if len(sys.argv) < 2:
- print("Please specify a directory to run the conversion script against.")
- sys.exit(1)
+parser = argparse.ArgumentParser(description="Convert override syntax")
+parser.add_argument("--override", "-o", action="append", default=[], help="Add additional strings to consider as an override (e.g. custom machines/distros")
+parser.add_argument("--skip", "-s", action="append", default=[], help="Add additional string to skip and not consider an override")
+parser.add_argument("--skip-ext", "-e", action="append", default=[], help="Additional file suffixes to skip when processing (e.g. '.foo')")
+parser.add_argument("--package-vars", action="append", default=[], help="Additional variables to treat as package variables")
+parser.add_argument("--image-vars", action="append", default=[], help="Additional variables to treat as image variables")
+parser.add_argument("--short-override", action="append", default=[], help="Additional strings to treat as short overrides")
+parser.add_argument("path", nargs="+", help="Paths to convert")
+
+args = parser.parse_args()
# List of strings to treat as overrides
-vars = ["append", "prepend", "remove"]
-vars = vars + ["qemuarm", "qemux86", "qemumips", "qemuppc", "qemuriscv", "qemuall"]
-vars = vars + ["genericx86", "edgerouter", "beaglebone-yocto"]
-vars = vars + ["armeb", "arm", "armv5", "armv6", "armv4", "powerpc64", "aarch64", "riscv32", "riscv64", "x86", "mips64", "powerpc"]
-vars = vars + ["mipsarch", "x86-x32", "mips16e", "microblaze", "e5500-64b", "mipsisa32", "mipsisa64"]
-vars = vars + ["class-native", "class-target", "class-cross-canadian", "class-cross", "class-devupstream"]
-vars = vars + ["tune-", "pn-", "forcevariable"]
-vars = vars + ["libc-musl", "libc-glibc", "libc-newlib","libc-baremetal"]
-vars = vars + ["task-configure", "task-compile", "task-install", "task-clean", "task-image-qa", "task-rm_work", "task-image-complete", "task-populate-sdk"]
-vars = vars + ["toolchain-clang", "mydistro", "nios2", "sdkmingw32", "overrideone", "overridetwo"]
-vars = vars + ["linux-gnux32", "linux-muslx32", "linux-gnun32", "mingw32", "poky", "darwin", "linuxstdbase"]
-vars = vars + ["linux-gnueabi", "eabi"]
-vars = vars + ["virtclass-multilib", "virtclass-mcextend"]
+vars = args.override
+vars += ["append", "prepend", "remove"]
+vars += ["qemuarm", "qemux86", "qemumips", "qemuppc", "qemuriscv", "qemuall"]
+vars += ["genericx86", "edgerouter", "beaglebone-yocto"]
+vars += ["armeb", "arm", "armv5", "armv6", "armv4", "powerpc64", "aarch64", "riscv32", "riscv64", "x86", "mips64", "powerpc"]
+vars += ["mipsarch", "x86-x32", "mips16e", "microblaze", "e5500-64b", "mipsisa32", "mipsisa64"]
+vars += ["class-native", "class-target", "class-cross-canadian", "class-cross", "class-devupstream"]
+vars += ["tune-", "pn-", "forcevariable"]
+vars += ["libc-musl", "libc-glibc", "libc-newlib","libc-baremetal"]
+vars += ["task-configure", "task-compile", "task-install", "task-clean", "task-image-qa", "task-rm_work", "task-image-complete", "task-populate-sdk"]
+vars += ["toolchain-clang", "mydistro", "nios2", "sdkmingw32", "overrideone", "overridetwo"]
+vars += ["linux-gnux32", "linux-muslx32", "linux-gnun32", "mingw32", "poky", "darwin", "linuxstdbase"]
+vars += ["linux-gnueabi", "eabi"]
+vars += ["virtclass-multilib", "virtclass-mcextend"]
# List of strings to treat as overrides but only with whitespace following or another override (more restricted matching).
# Handles issues with arc matching arch.
-shortvars = ["arc", "mips", "mipsel", "sh4"]
+shortvars = ["arc", "mips", "mipsel", "sh4"] + args.short_override
# Variables which take packagenames as an override
packagevars = ["FILES", "RDEPENDS", "RRECOMMENDS", "SUMMARY", "DESCRIPTION", "RSUGGESTS", "RPROVIDES", "RCONFLICTS", "PKG", "ALLOW_EMPTY",
"pkg_postrm", "pkg_postinst_ontarget", "pkg_postinst", "INITSCRIPT_NAME", "INITSCRIPT_PARAMS", "DEBIAN_NOAUTONAME", "ALTERNATIVE",
"PKGE", "PKGV", "PKGR", "USERADD_PARAM", "GROUPADD_PARAM", "CONFFILES", "SYSTEMD_SERVICE", "LICENSE", "SECTION", "pkg_preinst",
"pkg_prerm", "RREPLACES", "GROUPMEMS_PARAM", "SYSTEMD_AUTO_ENABLE", "SKIP_FILEDEPS", "PRIVATE_LIBS", "PACKAGE_ADD_METADATA",
- "INSANE_SKIP", "DEBIANNAME", "SYSTEMD_SERVICE_ESCAPED"]
+ "INSANE_SKIP", "DEBIANNAME", "SYSTEMD_SERVICE_ESCAPED"] + args.package_vars
# Expressions to skip if encountered, these are not overrides
-skips = ["parser_append", "recipe_to_append", "extra_append", "to_remove", "show_appends", "applied_appends", "file_appends", "handle_remove"]
-skips = skips + ["expanded_removes", "color_remove", "test_remove", "empty_remove", "toaster_prepend", "num_removed", "licfiles_append", "_write_append"]
-skips = skips + ["no_report_remove", "test_prepend", "test_append", "multiple_append", "test_remove", "shallow_remove", "do_remove_layer", "first_append"]
-skips = skips + ["parser_remove", "to_append", "no_remove", "bblayers_add_remove", "bblayers_remove", "apply_append", "is_x86", "base_dep_prepend"]
-skips = skips + ["autotools_dep_prepend", "go_map_arm", "alt_remove_links", "systemd_append_file", "file_append", "process_file_darwin"]
-skips = skips + ["run_loaddata_poky", "determine_if_poky_env", "do_populate_poky_src", "libc_cv_include_x86_isa_level", "test_rpm_remove", "do_install_armmultilib"]
-skips = skips + ["get_appends_for_files", "test_doubleref_remove", "test_bitbakelayers_add_remove", "elf32_x86_64", "colour_remove", "revmap_remove"]
-skips = skips + ["test_rpm_remove", "test_bitbakelayers_add_remove", "recipe_append_file", "log_data_removed", "recipe_append", "systemd_machine_unit_append"]
-skips = skips + ["recipetool_append", "changetype_remove", "try_appendfile_wc", "test_qemux86_directdisk", "test_layer_appends", "tgz_removed"]
-
-imagevars = ["IMAGE_CMD", "EXTRA_IMAGECMD", "IMAGE_TYPEDEP", "CONVERSION_CMD", "COMPRESS_CMD"]
-packagevars = packagevars + imagevars
+skips = args.skip
+skips += ["parser_append", "recipe_to_append", "extra_append", "to_remove", "show_appends", "applied_appends", "file_appends", "handle_remove"]
+skips += ["expanded_removes", "color_remove", "test_remove", "empty_remove", "toaster_prepend", "num_removed", "licfiles_append", "_write_append"]
+skips += ["no_report_remove", "test_prepend", "test_append", "multiple_append", "test_remove", "shallow_remove", "do_remove_layer", "first_append"]
+skips += ["parser_remove", "to_append", "no_remove", "bblayers_add_remove", "bblayers_remove", "apply_append", "is_x86", "base_dep_prepend"]
+skips += ["autotools_dep_prepend", "go_map_arm", "alt_remove_links", "systemd_append_file", "file_append", "process_file_darwin"]
+skips += ["run_loaddata_poky", "determine_if_poky_env", "do_populate_poky_src", "libc_cv_include_x86_isa_level", "test_rpm_remove", "do_install_armmultilib"]
+skips += ["get_appends_for_files", "test_doubleref_remove", "test_bitbakelayers_add_remove", "elf32_x86_64", "colour_remove", "revmap_remove"]
+skips += ["test_rpm_remove", "test_bitbakelayers_add_remove", "recipe_append_file", "log_data_removed", "recipe_append", "systemd_machine_unit_append"]
+skips += ["recipetool_append", "changetype_remove", "try_appendfile_wc", "test_qemux86_directdisk", "test_layer_appends", "tgz_removed"]
+
+imagevars = ["IMAGE_CMD", "EXTRA_IMAGECMD", "IMAGE_TYPEDEP", "CONVERSION_CMD", "COMPRESS_CMD"] + args.image_vars
+packagevars += imagevars
+
+skip_ext = [".html", ".patch", ".m4", ".diff"] + args.skip_ext
vars_re = {}
for exp in vars:
- vars_re[exp] = (re.compile('((^|[#\'"\s\-\+])[A-Za-z0-9_\-:${}\.]+)_' + exp), r"\1:" + exp)
+ vars_re[exp] = (re.compile(r'((^|[#\'"\s\-\+])[A-Za-z0-9_\-:${}\.]+)_' + exp), r"\1:" + exp)
shortvars_re = {}
for exp in shortvars:
- shortvars_re[exp] = (re.compile('((^|[#\'"\s\-\+])[A-Za-z0-9_\-:${}\.]+)_' + exp + '([\(\'"\s:])'), r"\1:" + exp + r"\3")
+ shortvars_re[exp] = (re.compile(r'((^|[#\'"\s\-\+])[A-Za-z0-9_\-:${}\.]+)_' + exp + r'([\(\'"\s:])'), r"\1:" + exp + r"\3")
package_re = {}
for exp in packagevars:
- package_re[exp] = (re.compile('(^|[#\'"\s\-\+]+)' + exp + '_' + '([$a-z"\'\s%\[<{\\\*].)'), r"\1" + exp + r":\2")
+ package_re[exp] = (re.compile(r'(^|[#\'"\s\-\+]+)' + exp + r'_' + r'([$a-z"\'\s%\[<{\\\*].)'), r"\1" + exp + r":\2")
# Other substitutions to make
subs = {
- 'r = re.compile("([^:]+):\s*(.*)")' : 'r = re.compile("(^.+?):\s+(.*)")',
+ 'r = re.compile(r"([^:]+):\s*(.*)")' : 'r = re.compile(r"(^.+?):\s+(.*)")',
"val = d.getVar('%s_%s' % (var, pkg))" : "val = d.getVar('%s:%s' % (var, pkg))",
"f.write('%s_%s: %s\\n' % (var, pkg, encode(val)))" : "f.write('%s:%s: %s\\n' % (var, pkg, encode(val)))",
"d.getVar('%s_%s' % (scriptlet_name, pkg))" : "d.getVar('%s:%s' % (scriptlet_name, pkg))",
@@ -124,21 +136,20 @@ def processfile(fn):
ourname = os.path.basename(sys.argv[0])
ourversion = "0.9.3"
-if os.path.isfile(sys.argv[1]):
- processfile(sys.argv[1])
- sys.exit(0)
-
-for targetdir in sys.argv[1:]:
- print("processing directory '%s'" % targetdir)
- for root, dirs, files in os.walk(targetdir):
- for name in files:
- if name == ourname:
- continue
- fn = os.path.join(root, name)
- if os.path.islink(fn):
- continue
- if "/.git/" in fn or fn.endswith(".html") or fn.endswith(".patch") or fn.endswith(".m4") or fn.endswith(".diff"):
- continue
- processfile(fn)
+for p in args.path:
+ if os.path.isfile(p):
+ processfile(p)
+ else:
+ print("processing directory '%s'" % p)
+ for root, dirs, files in os.walk(p):
+ for name in files:
+ if name == ourname:
+ continue
+ fn = os.path.join(root, name)
+ if os.path.islink(fn):
+ continue
+ if "/.git/" in fn or any(fn.endswith(ext) for ext in skip_ext):
+ continue
+ processfile(fn)
print("All files processed with version %s" % ourversion)
diff --git a/scripts/contrib/image-manifest b/scripts/contrib/image-manifest
index 3c07a73a4e..4d65a99258 100755
--- a/scripts/contrib/image-manifest
+++ b/scripts/contrib/image-manifest
@@ -392,7 +392,7 @@ def export_manifest_info(args):
for key in rd.getVarFlags('PACKAGECONFIG').keys():
if key == 'doc':
continue
- rvalues[pn]['packageconfig_opts'][key] = rd.getVarFlag('PACKAGECONFIG', key, True)
+ rvalues[pn]['packageconfig_opts'][key] = rd.getVarFlag('PACKAGECONFIG', key)
if config['patches'] == 'yes':
patches = oe.recipeutils.get_recipe_patches(rd)
diff --git a/scripts/contrib/patchreview.py b/scripts/contrib/patchreview.py
index b22cc07f0a..bceae06561 100755
--- a/scripts/contrib/patchreview.py
+++ b/scripts/contrib/patchreview.py
@@ -5,6 +5,15 @@
# SPDX-License-Identifier: GPL-2.0-only
#
+import argparse
+import collections
+import json
+import os
+import os.path
+import pathlib
+import re
+import subprocess
+
# TODO
# - option to just list all broken files
# - test suite
@@ -35,14 +44,12 @@ def blame_patch(patch):
From a patch filename, return a list of "commit summary (author name <author
email>)" strings representing the history.
"""
- import subprocess
return subprocess.check_output(("git", "log",
"--follow", "--find-renames", "--diff-filter=A",
"--format=%s (%aN <%aE>)",
"--", patch)).decode("utf-8").splitlines()
-def patchreview(path, patches):
- import re, os.path
+def patchreview(patches):
# General pattern: start of line, optional whitespace, tag with optional
# hyphen or spaces, maybe a colon, some whitespace, then the value, all case
@@ -56,11 +63,10 @@ def patchreview(path, patches):
for patch in patches:
- fullpath = os.path.join(path, patch)
result = Result()
- results[fullpath] = result
+ results[patch] = result
- content = open(fullpath, encoding='ascii', errors='ignore').read()
+ content = open(patch, encoding='ascii', errors='ignore').read()
# Find the Signed-off-by tag
match = sob_re.search(content)
@@ -193,29 +199,56 @@ Patches in Pending state: %s""" % (total_patches,
def histogram(results):
from toolz import recipes, dicttoolz
import math
+
counts = recipes.countby(lambda r: r.upstream_status, results.values())
bars = dicttoolz.valmap(lambda v: "#" * int(math.ceil(float(v) / len(results) * 100)), counts)
for k in bars:
print("%-20s %s (%d)" % (k.capitalize() if k else "No status", bars[k], counts[k]))
+def find_layers(candidate):
+ # candidate can either be the path to a layer directly (eg meta-intel), or a
+ # repository that contains other layers (meta-arm). We can determine what by
+ # looking for a conf/layer.conf file. If that file exists then it's a layer,
+ # otherwise its a repository of layers and we can assume they're called
+ # meta-*.
+
+ if (candidate / "conf" / "layer.conf").exists():
+ return [candidate.absolute()]
+ else:
+ return [d.absolute() for d in candidate.iterdir() if d.is_dir() and (d.name == "meta" or d.name.startswith("meta-"))]
+
+# TODO these don't actually handle dynamic-layers/
+
+def gather_patches(layers):
+ patches = []
+ for directory in layers:
+ filenames = subprocess.check_output(("git", "-C", directory, "ls-files", "recipes-*/**/*.patch", "recipes-*/**/*.diff"), universal_newlines=True).split()
+ patches += [os.path.join(directory, f) for f in filenames]
+ return patches
+
+def count_recipes(layers):
+ count = 0
+ for directory in layers:
+ output = subprocess.check_output(["git", "-C", directory, "ls-files", "recipes-*/**/*.bb"], universal_newlines=True)
+ count += len(output.splitlines())
+ return count
if __name__ == "__main__":
- import argparse, subprocess, os
-
args = argparse.ArgumentParser(description="Patch Review Tool")
args.add_argument("-b", "--blame", action="store_true", help="show blame for malformed patches")
args.add_argument("-v", "--verbose", action="store_true", help="show per-patch results")
args.add_argument("-g", "--histogram", action="store_true", help="show patch histogram")
args.add_argument("-j", "--json", help="update JSON")
- args.add_argument("directory", help="directory to scan")
+ args.add_argument("directory", type=pathlib.Path, metavar="DIRECTORY", help="directory to scan (layer, or repository of layers)")
args = args.parse_args()
- patches = subprocess.check_output(("git", "-C", args.directory, "ls-files", "recipes-*/**/*.patch", "recipes-*/**/*.diff")).decode("utf-8").split()
- results = patchreview(args.directory, patches)
+ layers = find_layers(args.directory)
+ print(f"Found layers {' '.join((d.name for d in layers))}")
+ patches = gather_patches(layers)
+ results = patchreview(patches)
analyse(results, want_blame=args.blame, verbose=args.verbose)
if args.json:
- import json, os.path, collections
if os.path.isfile(args.json):
data = json.load(open(args.json))
else:
@@ -223,8 +256,11 @@ if __name__ == "__main__":
row = collections.Counter()
row["total"] = len(results)
- row["date"] = subprocess.check_output(["git", "-C", args.directory, "show", "-s", "--pretty=format:%cd", "--date=format:%s"]).decode("utf-8").strip()
- row["commit"] = subprocess.check_output(["git", "-C", args.directory, "show", "-s", "--pretty=format:%H"]).decode("utf-8").strip()
+ row["date"] = subprocess.check_output(["git", "-C", args.directory, "show", "-s", "--pretty=format:%cd", "--date=format:%s"], universal_newlines=True).strip()
+ row["commit"] = subprocess.check_output(["git", "-C", args.directory, "rev-parse", "HEAD"], universal_newlines=True).strip()
+ row['commit_count'] = subprocess.check_output(["git", "-C", args.directory, "rev-list", "--count", "HEAD"], universal_newlines=True).strip()
+ row['recipe_count'] = count_recipes(layers)
+
for r in results.values():
if r.upstream_status in status_values:
row[r.upstream_status] += 1
diff --git a/scripts/create-pull-request b/scripts/create-pull-request
index 2f91a355b0..885105fab3 100755
--- a/scripts/create-pull-request
+++ b/scripts/create-pull-request
@@ -149,13 +149,10 @@ fi
WEB_URL=""
case "$REMOTE_URL" in
*git.yoctoproject.org*)
- WEB_URL="http://git.yoctoproject.org/cgit.cgi/$REMOTE_REPO/log/?h=$BRANCH"
- ;;
- *git.pokylinux.org*)
- WEB_URL="http://git.pokylinux.org/cgit.cgi/$REMOTE_REPO/log/?h=$BRANCH"
+ WEB_URL="https://git.yoctoproject.org/$REMOTE_REPO/log/?h=$BRANCH"
;;
*git.openembedded.org*)
- WEB_URL="http://cgit.openembedded.org/$REMOTE_REPO/log/?h=$BRANCH"
+ WEB_URL="https://git.openembedded.org/$REMOTE_REPO/log/?h=$BRANCH"
;;
*github.com*)
WEB_URL="https://github.com/$REMOTE_REPO/tree/$BRANCH"
diff --git a/scripts/devtool b/scripts/devtool
index 20d785c7f7..60ea3e8298 100755
--- a/scripts/devtool
+++ b/scripts/devtool
@@ -137,17 +137,27 @@ def create_workspace(args, config, basepath, workspace):
workspacedir = os.path.abspath(args.layerpath)
else:
workspacedir = os.path.abspath(os.path.join(basepath, 'workspace'))
- _create_workspace(workspacedir, config, basepath)
+ layerseries = None
+ if args.layerseries:
+ layerseries = args.layerseries
+ _create_workspace(workspacedir, config, basepath, layerseries)
if not args.create_only:
_enable_workspace_layer(workspacedir, config, basepath)
-def _create_workspace(workspacedir, config, basepath):
+def _create_workspace(workspacedir, config, basepath, layerseries=None):
import bb
confdir = os.path.join(workspacedir, 'conf')
if os.path.exists(os.path.join(confdir, 'layer.conf')):
logger.info('Specified workspace already set up, leaving as-is')
else:
+ if not layerseries:
+ tinfoil = setup_tinfoil(config_only=True, basepath=basepath)
+ try:
+ layerseries = tinfoil.config_data.getVar('LAYERSERIES_CORENAMES')
+ finally:
+ tinfoil.shutdown()
+
# Add a config file
bb.utils.mkdirhier(confdir)
with open(os.path.join(confdir, 'layer.conf'), 'w') as f:
@@ -159,7 +169,7 @@ def _create_workspace(workspacedir, config, basepath):
f.write('BBFILE_PATTERN_workspacelayer = "^$' + '{LAYERDIR}/"\n')
f.write('BBFILE_PATTERN_IGNORE_EMPTY_workspacelayer = "1"\n')
f.write('BBFILE_PRIORITY_workspacelayer = "99"\n')
- f.write('LAYERSERIES_COMPAT_workspacelayer = "${LAYERSERIES_COMPAT_core}"\n')
+ f.write('LAYERSERIES_COMPAT_workspacelayer = "%s"\n' % layerseries)
# Add a README file
with open(os.path.join(workspacedir, 'README'), 'w') as f:
f.write('This layer was created by the OpenEmbedded devtool utility in order to\n')
@@ -289,8 +299,9 @@ def main():
return 2
# Search BBPATH first to allow layers to override plugins in scripts_path
- for path in global_args.bbpath.split(':') + [scripts_path]:
- pluginpath = os.path.join(path, 'lib', 'devtool')
+ pluginpaths = [os.path.join(path, 'lib', 'devtool') for path in global_args.bbpath.split(':') + [scripts_path]]
+ context.pluginpaths = pluginpaths
+ for pluginpath in pluginpaths:
scriptutils.load_plugins(logger, plugins, pluginpath)
subparsers = parser.add_subparsers(dest="subparser_name", title='subcommands', metavar='<subcommand>')
@@ -309,6 +320,7 @@ def main():
description='Sets up a new workspace. NOTE: other devtool subcommands will create a workspace automatically as needed, so you only need to use %(prog)s if you want to specify where the workspace should be located.',
group='advanced')
parser_create_workspace.add_argument('layerpath', nargs='?', help='Path in which the workspace layer should be created')
+ parser_create_workspace.add_argument('--layerseries', help='Layer series the workspace should be set to be compatible with')
parser_create_workspace.add_argument('--create-only', action="store_true", help='Only create the workspace layer, do not alter configuration')
parser_create_workspace.set_defaults(func=create_workspace, no_workspace=True)
diff --git a/scripts/esdk-tools/devtool b/scripts/esdk-tools/devtool
new file mode 120000
index 0000000000..176a01ca68
--- /dev/null
+++ b/scripts/esdk-tools/devtool
@@ -0,0 +1 @@
+../devtool \ No newline at end of file
diff --git a/scripts/esdk-tools/oe-find-native-sysroot b/scripts/esdk-tools/oe-find-native-sysroot
new file mode 120000
index 0000000000..d3493f3310
--- /dev/null
+++ b/scripts/esdk-tools/oe-find-native-sysroot
@@ -0,0 +1 @@
+../oe-find-native-sysroot \ No newline at end of file
diff --git a/scripts/esdk-tools/recipetool b/scripts/esdk-tools/recipetool
new file mode 120000
index 0000000000..60a95dd936
--- /dev/null
+++ b/scripts/esdk-tools/recipetool
@@ -0,0 +1 @@
+../recipetool \ No newline at end of file
diff --git a/scripts/esdk-tools/runqemu b/scripts/esdk-tools/runqemu
new file mode 120000
index 0000000000..ae7e7ad7c2
--- /dev/null
+++ b/scripts/esdk-tools/runqemu
@@ -0,0 +1 @@
+../runqemu \ No newline at end of file
diff --git a/scripts/esdk-tools/runqemu-addptable2image b/scripts/esdk-tools/runqemu-addptable2image
new file mode 120000
index 0000000000..afcd00e79d
--- /dev/null
+++ b/scripts/esdk-tools/runqemu-addptable2image
@@ -0,0 +1 @@
+../runqemu-addptable2image \ No newline at end of file
diff --git a/scripts/esdk-tools/runqemu-export-rootfs b/scripts/esdk-tools/runqemu-export-rootfs
new file mode 120000
index 0000000000..a26fcf6110
--- /dev/null
+++ b/scripts/esdk-tools/runqemu-export-rootfs
@@ -0,0 +1 @@
+../runqemu-export-rootfs \ No newline at end of file
diff --git a/scripts/esdk-tools/runqemu-extract-sdk b/scripts/esdk-tools/runqemu-extract-sdk
new file mode 120000
index 0000000000..cc858aaad5
--- /dev/null
+++ b/scripts/esdk-tools/runqemu-extract-sdk
@@ -0,0 +1 @@
+../runqemu-extract-sdk \ No newline at end of file
diff --git a/scripts/esdk-tools/runqemu-gen-tapdevs b/scripts/esdk-tools/runqemu-gen-tapdevs
new file mode 120000
index 0000000000..dbdf79134c
--- /dev/null
+++ b/scripts/esdk-tools/runqemu-gen-tapdevs
@@ -0,0 +1 @@
+../runqemu-gen-tapdevs \ No newline at end of file
diff --git a/scripts/esdk-tools/runqemu-ifdown b/scripts/esdk-tools/runqemu-ifdown
new file mode 120000
index 0000000000..0097693ca3
--- /dev/null
+++ b/scripts/esdk-tools/runqemu-ifdown
@@ -0,0 +1 @@
+../runqemu-ifdown \ No newline at end of file
diff --git a/scripts/esdk-tools/runqemu-ifup b/scripts/esdk-tools/runqemu-ifup
new file mode 120000
index 0000000000..41026d2c0a
--- /dev/null
+++ b/scripts/esdk-tools/runqemu-ifup
@@ -0,0 +1 @@
+../runqemu-ifup \ No newline at end of file
diff --git a/scripts/esdk-tools/wic b/scripts/esdk-tools/wic
new file mode 120000
index 0000000000..a9d908aa25
--- /dev/null
+++ b/scripts/esdk-tools/wic
@@ -0,0 +1 @@
+../wic \ No newline at end of file
diff --git a/scripts/install-buildtools b/scripts/install-buildtools
index 10c3d043de..2218f3ffac 100755
--- a/scripts/install-buildtools
+++ b/scripts/install-buildtools
@@ -57,8 +57,8 @@ logger = scriptutils.logger_create(PROGNAME, stream=sys.stdout)
DEFAULT_INSTALL_DIR = os.path.join(os.path.split(scripts_path)[0],'buildtools')
DEFAULT_BASE_URL = 'http://downloads.yoctoproject.org/releases/yocto'
-DEFAULT_RELEASE = 'yocto-3.4'
-DEFAULT_INSTALLER_VERSION = '3.4'
+DEFAULT_RELEASE = 'yocto-4.1'
+DEFAULT_INSTALLER_VERSION = '4.1'
DEFAULT_BUILDDATE = '202110XX'
# Python version sanity check
@@ -154,6 +154,8 @@ def main():
group.add_argument('--without-extended-buildtools', action='store_false',
dest='with_extended_buildtools',
help='disable extended buildtools (traditional buildtools tarball)')
+ group.add_argument('--make-only', action='store_true',
+ help='only install make tarball')
group = parser.add_mutually_exclusive_group()
group.add_argument('-c', '--check', help='enable checksum validation',
default=True, action='store_true')
@@ -170,6 +172,9 @@ def main():
args = parser.parse_args()
+ if args.make_only:
+ args.with_extended_buildtools = False
+
if args.debug:
logger.setLevel(logging.DEBUG)
elif args.quiet:
@@ -197,7 +202,10 @@ def main():
if not args.build_date:
logger.error("Milestone installers require --build-date")
else:
- if args.with_extended_buildtools:
+ if args.make_only:
+ filename = "%s-buildtools-make-nativesdk-standalone-%s-%s.sh" % (
+ arch, args.installer_version, args.build_date)
+ elif args.with_extended_buildtools:
filename = "%s-buildtools-extended-nativesdk-standalone-%s-%s.sh" % (
arch, args.installer_version, args.build_date)
else:
@@ -207,6 +215,8 @@ def main():
buildtools_url = "%s/milestones/%s/buildtools/%s" % (base_url, args.release, safe_filename)
# regular release SDK
else:
+ if args.make_only:
+ filename = "%s-buildtools-make-nativesdk-standalone-%s.sh" % (arch, args.installer_version)
if args.with_extended_buildtools:
filename = "%s-buildtools-extended-nativesdk-standalone-%s.sh" % (arch, args.installer_version)
else:
@@ -303,7 +313,9 @@ def main():
if args.with_extended_buildtools and not m:
logger.info("Ignoring --with-extended-buildtools as filename "
"does not contain 'extended'")
- if args.with_extended_buildtools and m:
+ if args.make_only:
+ tool = 'make'
+ elif args.with_extended_buildtools and m:
tool = 'gcc'
else:
tool = 'tar'
diff --git a/scripts/lib/buildstats.py b/scripts/lib/buildstats.py
index c69b5bf4d7..6db60d5bcf 100644
--- a/scripts/lib/buildstats.py
+++ b/scripts/lib/buildstats.py
@@ -8,7 +8,7 @@ import json
import logging
import os
import re
-from collections import namedtuple,OrderedDict
+from collections import namedtuple
from statistics import mean
@@ -79,8 +79,8 @@ class BSTask(dict):
return self['rusage']['ru_oublock']
@classmethod
- def from_file(cls, buildstat_file):
- """Read buildstat text file"""
+ def from_file(cls, buildstat_file, fallback_end=0):
+ """Read buildstat text file. fallback_end is an optional end time for tasks that are not recorded as finishing."""
bs_task = cls()
log.debug("Reading task buildstats from %s", buildstat_file)
end_time = None
@@ -108,7 +108,10 @@ class BSTask(dict):
bs_task[ru_type][ru_key] = val
elif key == 'Status':
bs_task['status'] = val
- if end_time is not None and start_time is not None:
+ # If the task didn't finish, fill in the fallback end time if specified
+ if start_time and not end_time and fallback_end:
+ end_time = fallback_end
+ if start_time and end_time:
bs_task['elapsed_time'] = end_time - start_time
else:
raise BSError("{} looks like a invalid buildstats file".format(buildstat_file))
@@ -226,25 +229,44 @@ class BuildStats(dict):
epoch = match.group('epoch')
return name, epoch, version, revision
+ @staticmethod
+ def parse_top_build_stats(path):
+ """
+ Parse the top-level build_stats file for build-wide start and duration.
+ """
+ start = elapsed = 0
+ with open(path) as fobj:
+ for line in fobj.readlines():
+ key, val = line.split(':', 1)
+ val = val.strip()
+ if key == 'Build Started':
+ start = float(val)
+ elif key == "Elapsed time":
+ elapsed = float(val.split()[0])
+ return start, elapsed
+
@classmethod
def from_dir(cls, path):
"""Load buildstats from a buildstats directory"""
- if not os.path.isfile(os.path.join(path, 'build_stats')):
+ top_stats = os.path.join(path, 'build_stats')
+ if not os.path.isfile(top_stats):
raise BSError("{} does not look like a buildstats directory".format(path))
log.debug("Reading buildstats directory %s", path)
-
buildstats = cls()
+ build_started, build_elapsed = buildstats.parse_top_build_stats(top_stats)
+ build_end = build_started + build_elapsed
+
subdirs = os.listdir(path)
for dirname in subdirs:
recipe_dir = os.path.join(path, dirname)
- if not os.path.isdir(recipe_dir):
+ if dirname == "reduced_proc_pressure" or not os.path.isdir(recipe_dir):
continue
name, epoch, version, revision = cls.split_nevr(dirname)
bsrecipe = BSRecipe(name, epoch, version, revision)
for task in os.listdir(recipe_dir):
bsrecipe.tasks[task] = BSTask.from_file(
- os.path.join(recipe_dir, task))
+ os.path.join(recipe_dir, task), build_end)
if name in buildstats:
raise BSError("Cannot handle multiple versions of the same "
"package ({})".format(name))
diff --git a/scripts/lib/checklayer/__init__.py b/scripts/lib/checklayer/__init__.py
index aa946f3036..62ecdfe390 100644
--- a/scripts/lib/checklayer/__init__.py
+++ b/scripts/lib/checklayer/__init__.py
@@ -16,6 +16,7 @@ class LayerType(Enum):
BSP = 0
DISTRO = 1
SOFTWARE = 2
+ CORE = 3
ERROR_NO_LAYER_CONF = 98
ERROR_BSP_DISTRO = 99
@@ -43,7 +44,7 @@ def _get_layer_collections(layer_path, lconf=None, data=None):
ldata.setVar('LAYERDIR', layer_path)
try:
- ldata = bb.parse.handle(lconf, ldata, include=True)
+ ldata = bb.parse.handle(lconf, ldata, include=True, baseconfig=True)
except:
raise RuntimeError("Parsing of layer.conf from layer: %s failed" % layer_path)
ldata.expandVarref('LAYERDIR')
@@ -106,7 +107,13 @@ def _detect_layer(layer_path):
if distros:
is_distro = True
- if is_bsp and is_distro:
+ layer['collections'] = _get_layer_collections(layer['path'])
+
+ if layer_name == "meta" and "core" in layer['collections']:
+ layer['type'] = LayerType.CORE
+ layer['conf']['machines'] = machines
+ layer['conf']['distros'] = distros
+ elif is_bsp and is_distro:
layer['type'] = LayerType.ERROR_BSP_DISTRO
elif is_bsp:
layer['type'] = LayerType.BSP
@@ -117,8 +124,6 @@ def _detect_layer(layer_path):
else:
layer['type'] = LayerType.SOFTWARE
- layer['collections'] = _get_layer_collections(layer['path'])
-
return layer
def detect_layers(layer_directories, no_auto):
@@ -302,7 +307,7 @@ def get_signatures(builddir, failsafe=False, machine=None, extravars=None):
cmd += 'bitbake '
if failsafe:
cmd += '-k '
- cmd += '-S none world'
+ cmd += '-S lockedsigs world'
sigs_file = os.path.join(builddir, 'locked-sigs.inc')
if os.path.exists(sigs_file):
os.unlink(sigs_file)
@@ -319,8 +324,8 @@ def get_signatures(builddir, failsafe=False, machine=None, extravars=None):
else:
raise
- sig_regex = re.compile("^(?P<task>.*:.*):(?P<hash>.*) .$")
- tune_regex = re.compile("(^|\s)SIGGEN_LOCKEDSIGS_t-(?P<tune>\S*)\s*=\s*")
+ sig_regex = re.compile(r"^(?P<task>.*:.*):(?P<hash>.*) .$")
+ tune_regex = re.compile(r"(^|\s)SIGGEN_LOCKEDSIGS_t-(?P<tune>\S*)\s*=\s*")
current_tune = None
with open(sigs_file, 'r') as f:
for line in f.readlines():
diff --git a/scripts/lib/checklayer/cases/bsp.py b/scripts/lib/checklayer/cases/bsp.py
index a80a5844da..b76163fb56 100644
--- a/scripts/lib/checklayer/cases/bsp.py
+++ b/scripts/lib/checklayer/cases/bsp.py
@@ -11,7 +11,7 @@ from checklayer.case import OECheckLayerTestCase
class BSPCheckLayer(OECheckLayerTestCase):
@classmethod
def setUpClass(self):
- if self.tc.layer['type'] != LayerType.BSP:
+ if self.tc.layer['type'] not in (LayerType.BSP, LayerType.CORE):
raise unittest.SkipTest("BSPCheckLayer: Layer %s isn't BSP one." %\
self.tc.layer['name'])
diff --git a/scripts/lib/checklayer/cases/common.py b/scripts/lib/checklayer/cases/common.py
index 491a13953c..97b16f78c8 100644
--- a/scripts/lib/checklayer/cases/common.py
+++ b/scripts/lib/checklayer/cases/common.py
@@ -12,6 +12,9 @@ from checklayer.case import OECheckLayerTestCase
class CommonCheckLayer(OECheckLayerTestCase):
def test_readme(self):
+ if self.tc.layer['type'] == LayerType.CORE:
+ raise unittest.SkipTest("Core layer's README is top level")
+
# The top-level README file may have a suffix (like README.rst or README.txt).
readme_files = glob.glob(os.path.join(self.tc.layer['path'], '[Rr][Ee][Aa][Dd][Mm][Ee]*'))
self.assertTrue(len(readme_files) > 0,
@@ -69,6 +72,21 @@ class CommonCheckLayer(OECheckLayerTestCase):
self.tc.layer['name'])
self.fail('\n'.join(msg))
+ @unittest.expectedFailure
+ def test_patches_upstream_status(self):
+ import sys
+ sys.path.append(os.path.join(sys.path[0], '../../../../meta/lib/'))
+ import oe.qa
+ patches = []
+ for dirpath, dirs, files in os.walk(self.tc.layer['path']):
+ for filename in files:
+ if filename.endswith(".patch"):
+ ppath = os.path.join(dirpath, filename)
+ if oe.qa.check_upstream_status(ppath):
+ patches.append(ppath)
+ self.assertEqual(len(patches), 0 , \
+ msg="Found following patches with malformed or missing upstream status:\n%s" % '\n'.join([str(patch) for patch in patches]))
+
def test_signatures(self):
if self.tc.layer['type'] == LayerType.SOFTWARE and \
not self.tc.test_software_layer_signatures:
diff --git a/scripts/lib/checklayer/cases/distro.py b/scripts/lib/checklayer/cases/distro.py
index f0bee5493c..a35332451c 100644
--- a/scripts/lib/checklayer/cases/distro.py
+++ b/scripts/lib/checklayer/cases/distro.py
@@ -11,7 +11,7 @@ from checklayer.case import OECheckLayerTestCase
class DistroCheckLayer(OECheckLayerTestCase):
@classmethod
def setUpClass(self):
- if self.tc.layer['type'] != LayerType.DISTRO:
+ if self.tc.layer['type'] not in (LayerType.DISTRO, LayerType.CORE):
raise unittest.SkipTest("DistroCheckLayer: Layer %s isn't Distro one." %\
self.tc.layer['name'])
diff --git a/scripts/lib/devtool/__init__.py b/scripts/lib/devtool/__init__.py
index 702db669de..6133c1c5b4 100644
--- a/scripts/lib/devtool/__init__.py
+++ b/scripts/lib/devtool/__init__.py
@@ -78,12 +78,15 @@ def exec_fakeroot(d, cmd, **kwargs):
"""Run a command under fakeroot (pseudo, in fact) so that it picks up the appropriate file permissions"""
# Grab the command and check it actually exists
fakerootcmd = d.getVar('FAKEROOTCMD')
+ fakerootenv = d.getVar('FAKEROOTENV')
+ exec_fakeroot_no_d(fakerootcmd, fakerootenv, cmd, kwargs)
+
+def exec_fakeroot_no_d(fakerootcmd, fakerootenv, cmd, **kwargs):
if not os.path.exists(fakerootcmd):
logger.error('pseudo executable %s could not be found - have you run a build yet? pseudo-native should install this and if you have run any build then that should have been built')
return 2
# Set up the appropriate environment
newenv = dict(os.environ)
- fakerootenv = d.getVar('FAKEROOTENV')
for varvalue in fakerootenv.split():
if '=' in varvalue:
splitval = varvalue.split('=', 1)
@@ -233,6 +236,28 @@ def setup_git_repo(repodir, version, devbranch, basetag='devtool-base', d=None):
bb.process.run('git checkout -b %s' % devbranch, cwd=repodir)
bb.process.run('git tag -f %s' % basetag, cwd=repodir)
+ # if recipe unpacks another git repo inside S, we need to declare it as a regular git submodule now,
+ # so we will be able to tag branches on it and extract patches when doing finish/update on the recipe
+ stdout, _ = bb.process.run("git status --porcelain", cwd=repodir)
+ found = False
+ for line in stdout.splitlines():
+ if line.endswith("/"):
+ new_dir = line.split()[1]
+ for root, dirs, files in os.walk(os.path.join(repodir, new_dir)):
+ if ".git" in dirs + files:
+ (stdout, _) = bb.process.run('git remote', cwd=root)
+ remote = stdout.splitlines()[0]
+ (stdout, _) = bb.process.run('git remote get-url %s' % remote, cwd=root)
+ remote_url = stdout.splitlines()[0]
+ logger.error(os.path.relpath(os.path.join(root, ".."), root))
+ bb.process.run('git submodule add %s %s' % (remote_url, os.path.relpath(root, os.path.join(root, ".."))), cwd=os.path.join(root, ".."))
+ found = True
+ if found:
+ oe.patch.GitApplyTree.commitIgnored("Add additional submodule from SRC_URI", dir=os.path.join(root, ".."), d=d)
+ found = False
+ if os.path.exists(os.path.join(repodir, '.gitmodules')):
+ bb.process.run('git submodule foreach --recursive "git tag -f %s"' % basetag, cwd=repodir)
+
def recipe_to_append(recipefile, config, wildcard=False):
"""
Convert a recipe file to a bbappend file path within the workspace.
diff --git a/scripts/lib/devtool/build_sdk.py b/scripts/lib/devtool/build_sdk.py
index 6fe02fff2a..1cd4831d2b 100644
--- a/scripts/lib/devtool/build_sdk.py
+++ b/scripts/lib/devtool/build_sdk.py
@@ -13,7 +13,7 @@ import shutil
import errno
import sys
import tempfile
-from devtool import exec_build_env_command, setup_tinfoil, parse_recipe, DevtoolError
+from devtool import DevtoolError
from devtool import build_image
logger = logging.getLogger('devtool')
diff --git a/scripts/lib/devtool/deploy.py b/scripts/lib/devtool/deploy.py
index e14a587417..b5ca8f2c2f 100644
--- a/scripts/lib/devtool/deploy.py
+++ b/scripts/lib/devtool/deploy.py
@@ -16,7 +16,7 @@ import bb.utils
import argparse_oe
import oe.types
-from devtool import exec_fakeroot, setup_tinfoil, check_workspace_recipe, DevtoolError
+from devtool import exec_fakeroot_no_d, setup_tinfoil, check_workspace_recipe, DevtoolError
logger = logging.getLogger('devtool')
@@ -133,16 +133,38 @@ def _prepare_remote_script(deploy, verbose=False, dryrun=False, undeployall=Fals
return '\n'.join(lines)
-
-
def deploy(args, config, basepath, workspace):
"""Entry point for the devtool 'deploy' subcommand"""
- import math
- import oe.recipeutils
- import oe.package
+ import oe.utils
check_workspace_recipe(workspace, args.recipename, checksrc=False)
+ tinfoil = setup_tinfoil(basepath=basepath)
+ try:
+ try:
+ rd = tinfoil.parse_recipe(args.recipename)
+ except Exception as e:
+ raise DevtoolError('Exception parsing recipe %s: %s' %
+ (args.recipename, e))
+
+ srcdir = rd.getVar('D')
+ workdir = rd.getVar('WORKDIR')
+ path = rd.getVar('PATH')
+ strip_cmd = rd.getVar('STRIP')
+ libdir = rd.getVar('libdir')
+ base_libdir = rd.getVar('base_libdir')
+ max_process = oe.utils.get_bb_number_threads(rd)
+ fakerootcmd = rd.getVar('FAKEROOTCMD')
+ fakerootenv = rd.getVar('FAKEROOTENV')
+ finally:
+ tinfoil.shutdown()
+
+ return deploy_no_d(srcdir, workdir, path, strip_cmd, libdir, base_libdir, max_process, fakerootcmd, fakerootenv, args)
+
+def deploy_no_d(srcdir, workdir, path, strip_cmd, libdir, base_libdir, max_process, fakerootcmd, fakerootenv, args):
+ import math
+ import oe.package
+
try:
host, destdir = args.target.split(':')
except ValueError:
@@ -152,118 +174,108 @@ def deploy(args, config, basepath, workspace):
if not destdir.endswith('/'):
destdir += '/'
- tinfoil = setup_tinfoil(basepath=basepath)
- try:
- try:
- rd = tinfoil.parse_recipe(args.recipename)
- except Exception as e:
- raise DevtoolError('Exception parsing recipe %s: %s' %
- (args.recipename, e))
- recipe_outdir = rd.getVar('D')
- if not os.path.exists(recipe_outdir) or not os.listdir(recipe_outdir):
- raise DevtoolError('No files to deploy - have you built the %s '
- 'recipe? If so, the install step has not installed '
- 'any files.' % args.recipename)
-
- if args.strip and not args.dry_run:
- # Fakeroot copy to new destination
- srcdir = recipe_outdir
- recipe_outdir = os.path.join(rd.getVar('WORKDIR'), 'devtool-deploy-target-stripped')
- if os.path.isdir(recipe_outdir):
- exec_fakeroot(rd, "rm -rf %s" % recipe_outdir, shell=True)
- exec_fakeroot(rd, "cp -af %s %s" % (os.path.join(srcdir, '.'), recipe_outdir), shell=True)
- os.environ['PATH'] = ':'.join([os.environ['PATH'], rd.getVar('PATH') or ''])
- oe.package.strip_execs(args.recipename, recipe_outdir, rd.getVar('STRIP'), rd.getVar('libdir'),
- rd.getVar('base_libdir'), rd)
-
- filelist = []
- inodes = set({})
- ftotalsize = 0
- for root, _, files in os.walk(recipe_outdir):
- for fn in files:
- fstat = os.lstat(os.path.join(root, fn))
- # Get the size in kiB (since we'll be comparing it to the output of du -k)
- # MUST use lstat() here not stat() or getfilesize() since we don't want to
- # dereference symlinks
- if fstat.st_ino in inodes:
- fsize = 0
- else:
- fsize = int(math.ceil(float(fstat.st_size)/1024))
- inodes.add(fstat.st_ino)
- ftotalsize += fsize
- # The path as it would appear on the target
- fpath = os.path.join(destdir, os.path.relpath(root, recipe_outdir), fn)
- filelist.append((fpath, fsize))
-
- if args.dry_run:
- print('Files to be deployed for %s on target %s:' % (args.recipename, args.target))
- for item, _ in filelist:
- print(' %s' % item)
- return 0
-
- extraoptions = ''
- if args.no_host_check:
- extraoptions += '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
- if not args.show_status:
- extraoptions += ' -q'
-
- scp_sshexec = ''
- ssh_sshexec = 'ssh'
- if args.ssh_exec:
- scp_sshexec = "-S %s" % args.ssh_exec
- ssh_sshexec = args.ssh_exec
- scp_port = ''
- ssh_port = ''
- if args.port:
- scp_port = "-P %s" % args.port
- ssh_port = "-p %s" % args.port
-
- if args.key:
- extraoptions += ' -i %s' % args.key
-
- # In order to delete previously deployed files and have the manifest file on
- # the target, we write out a shell script and then copy it to the target
- # so we can then run it (piping tar output to it).
- # (We cannot use scp here, because it doesn't preserve symlinks.)
- tmpdir = tempfile.mkdtemp(prefix='devtool')
- try:
- tmpscript = '/tmp/devtool_deploy.sh'
- tmpfilelist = os.path.join(os.path.dirname(tmpscript), 'devtool_deploy.list')
- shellscript = _prepare_remote_script(deploy=True,
- verbose=args.show_status,
- nopreserve=args.no_preserve,
- nocheckspace=args.no_check_space)
- # Write out the script to a file
- with open(os.path.join(tmpdir, os.path.basename(tmpscript)), 'w') as f:
- f.write(shellscript)
- # Write out the file list
- with open(os.path.join(tmpdir, os.path.basename(tmpfilelist)), 'w') as f:
- f.write('%d\n' % ftotalsize)
- for fpath, fsize in filelist:
- f.write('%s %d\n' % (fpath, fsize))
- # Copy them to the target
- ret = subprocess.call("scp %s %s %s %s/* %s:%s" % (scp_sshexec, scp_port, extraoptions, tmpdir, args.target, os.path.dirname(tmpscript)), shell=True)
- if ret != 0:
- raise DevtoolError('Failed to copy script to %s - rerun with -s to '
- 'get a complete error message' % args.target)
- finally:
- shutil.rmtree(tmpdir)
+ recipe_outdir = srcdir
+ if not os.path.exists(recipe_outdir) or not os.listdir(recipe_outdir):
+ raise DevtoolError('No files to deploy - have you built the %s '
+ 'recipe? If so, the install step has not installed '
+ 'any files.' % args.recipename)
+
+ if args.strip and not args.dry_run:
+ # Fakeroot copy to new destination
+ srcdir = recipe_outdir
+ recipe_outdir = os.path.join(workdir, 'devtool-deploy-target-stripped')
+ if os.path.isdir(recipe_outdir):
+ exec_fakeroot_no_d(fakerootcmd, fakerootenv, "rm -rf %s" % recipe_outdir, shell=True)
+ exec_fakeroot_no_d(fakerootcmd, fakerootenv, "cp -af %s %s" % (os.path.join(srcdir, '.'), recipe_outdir), shell=True)
+ os.environ['PATH'] = ':'.join([os.environ['PATH'], path or ''])
+ oe.package.strip_execs(args.recipename, recipe_outdir, strip_cmd, libdir, base_libdir, max_process)
+
+ filelist = []
+ inodes = set({})
+ ftotalsize = 0
+ for root, _, files in os.walk(recipe_outdir):
+ for fn in files:
+ fstat = os.lstat(os.path.join(root, fn))
+ # Get the size in kiB (since we'll be comparing it to the output of du -k)
+ # MUST use lstat() here not stat() or getfilesize() since we don't want to
+ # dereference symlinks
+ if fstat.st_ino in inodes:
+ fsize = 0
+ else:
+ fsize = int(math.ceil(float(fstat.st_size)/1024))
+ inodes.add(fstat.st_ino)
+ ftotalsize += fsize
+ # The path as it would appear on the target
+ fpath = os.path.join(destdir, os.path.relpath(root, recipe_outdir), fn)
+ filelist.append((fpath, fsize))
+
+ if args.dry_run:
+ print('Files to be deployed for %s on target %s:' % (args.recipename, args.target))
+ for item, _ in filelist:
+ print(' %s' % item)
+ return 0
- # Now run the script
- ret = exec_fakeroot(rd, 'tar cf - . | %s %s %s %s \'sh %s %s %s %s\'' % (ssh_sshexec, ssh_port, extraoptions, args.target, tmpscript, args.recipename, destdir, tmpfilelist), cwd=recipe_outdir, shell=True)
- if ret != 0:
- raise DevtoolError('Deploy failed - rerun with -s to get a complete '
- 'error message')
+ extraoptions = ''
+ if args.no_host_check:
+ extraoptions += '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
+ if not args.show_status:
+ extraoptions += ' -q'
- logger.info('Successfully deployed %s' % recipe_outdir)
+ scp_sshexec = ''
+ ssh_sshexec = 'ssh'
+ if args.ssh_exec:
+ scp_sshexec = "-S %s" % args.ssh_exec
+ ssh_sshexec = args.ssh_exec
+ scp_port = ''
+ ssh_port = ''
+ if args.port:
+ scp_port = "-P %s" % args.port
+ ssh_port = "-p %s" % args.port
+
+ if args.key:
+ extraoptions += ' -i %s' % args.key
- files_list = []
- for root, _, files in os.walk(recipe_outdir):
- for filename in files:
- filename = os.path.relpath(os.path.join(root, filename), recipe_outdir)
- files_list.append(os.path.join(destdir, filename))
+ # In order to delete previously deployed files and have the manifest file on
+ # the target, we write out a shell script and then copy it to the target
+ # so we can then run it (piping tar output to it).
+ # (We cannot use scp here, because it doesn't preserve symlinks.)
+ tmpdir = tempfile.mkdtemp(prefix='devtool')
+ try:
+ tmpscript = '/tmp/devtool_deploy.sh'
+ tmpfilelist = os.path.join(os.path.dirname(tmpscript), 'devtool_deploy.list')
+ shellscript = _prepare_remote_script(deploy=True,
+ verbose=args.show_status,
+ nopreserve=args.no_preserve,
+ nocheckspace=args.no_check_space)
+ # Write out the script to a file
+ with open(os.path.join(tmpdir, os.path.basename(tmpscript)), 'w') as f:
+ f.write(shellscript)
+ # Write out the file list
+ with open(os.path.join(tmpdir, os.path.basename(tmpfilelist)), 'w') as f:
+ f.write('%d\n' % ftotalsize)
+ for fpath, fsize in filelist:
+ f.write('%s %d\n' % (fpath, fsize))
+ # Copy them to the target
+ ret = subprocess.call("scp %s %s %s %s/* %s:%s" % (scp_sshexec, scp_port, extraoptions, tmpdir, args.target, os.path.dirname(tmpscript)), shell=True)
+ if ret != 0:
+ raise DevtoolError('Failed to copy script to %s - rerun with -s to '
+ 'get a complete error message' % args.target)
finally:
- tinfoil.shutdown()
+ shutil.rmtree(tmpdir)
+
+ # Now run the script
+ ret = exec_fakeroot_no_d(fakerootcmd, fakerootenv, 'tar cf - . | %s %s %s %s \'sh %s %s %s %s\'' % (ssh_sshexec, ssh_port, extraoptions, args.target, tmpscript, args.recipename, destdir, tmpfilelist), cwd=recipe_outdir, shell=True)
+ if ret != 0:
+ raise DevtoolError('Deploy failed - rerun with -s to get a complete '
+ 'error message')
+
+ logger.info('Successfully deployed %s' % recipe_outdir)
+
+ files_list = []
+ for root, _, files in os.walk(recipe_outdir):
+ for filename in files:
+ filename = os.path.relpath(os.path.join(root, filename), recipe_outdir)
+ files_list.append(os.path.join(destdir, filename))
return 0
diff --git a/scripts/lib/devtool/ide_plugins/__init__.py b/scripts/lib/devtool/ide_plugins/__init__.py
new file mode 100644
index 0000000000..19c2f61c5f
--- /dev/null
+++ b/scripts/lib/devtool/ide_plugins/__init__.py
@@ -0,0 +1,282 @@
+#
+# Copyright (C) 2023-2024 Siemens AG
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+"""Devtool ide-sdk IDE plugin interface definition and helper functions"""
+
+import errno
+import json
+import logging
+import os
+import stat
+from enum import Enum, auto
+from devtool import DevtoolError
+from bb.utils import mkdirhier
+
+logger = logging.getLogger('devtool')
+
+
+class BuildTool(Enum):
+ UNDEFINED = auto()
+ CMAKE = auto()
+ MESON = auto()
+
+ @property
+ def is_c_ccp(self):
+ if self is BuildTool.CMAKE:
+ return True
+ if self is BuildTool.MESON:
+ return True
+ return False
+
+
+class GdbCrossConfig:
+ """Base class defining the GDB configuration generator interface
+
+ Generate a GDB configuration for a binary on the target device.
+ Only one instance per binary is allowed. This allows to assign unique port
+ numbers for all gdbserver instances.
+ """
+ _gdbserver_port_next = 1234
+ _binaries = []
+
+ def __init__(self, image_recipe, modified_recipe, binary, gdbserver_multi=True):
+ self.image_recipe = image_recipe
+ self.modified_recipe = modified_recipe
+ self.gdb_cross = modified_recipe.gdb_cross
+ self.binary = binary
+ if binary in GdbCrossConfig._binaries:
+ raise DevtoolError(
+ "gdbserver config for binary %s is already generated" % binary)
+ GdbCrossConfig._binaries.append(binary)
+ self.script_dir = modified_recipe.ide_sdk_scripts_dir
+ self.gdbinit_dir = os.path.join(self.script_dir, 'gdbinit')
+ self.gdbserver_multi = gdbserver_multi
+ self.binary_pretty = self.binary.replace(os.sep, '-').lstrip('-')
+ self.gdbserver_port = GdbCrossConfig._gdbserver_port_next
+ GdbCrossConfig._gdbserver_port_next += 1
+ self.id_pretty = "%d_%s" % (self.gdbserver_port, self.binary_pretty)
+ # gdbserver start script
+ gdbserver_script_file = 'gdbserver_' + self.id_pretty
+ if self.gdbserver_multi:
+ gdbserver_script_file += "_m"
+ self.gdbserver_script = os.path.join(
+ self.script_dir, gdbserver_script_file)
+ # gdbinit file
+ self.gdbinit = os.path.join(
+ self.gdbinit_dir, 'gdbinit_' + self.id_pretty)
+ # gdb start script
+ self.gdb_script = os.path.join(
+ self.script_dir, 'gdb_' + self.id_pretty)
+
+ def _gen_gdbserver_start_script(self):
+ """Generate a shell command starting the gdbserver on the remote device via ssh
+
+ GDB supports two modes:
+ multi: gdbserver remains running over several debug sessions
+ once: gdbserver terminates after the debugged process terminates
+ """
+ cmd_lines = ['#!/bin/sh']
+ if self.gdbserver_multi:
+ temp_dir = "TEMP_DIR=/tmp/gdbserver_%s; " % self.id_pretty
+ gdbserver_cmd_start = temp_dir
+ gdbserver_cmd_start += "test -f \\$TEMP_DIR/pid && exit 0; "
+ gdbserver_cmd_start += "mkdir -p \\$TEMP_DIR; "
+ gdbserver_cmd_start += "%s --multi :%s > \\$TEMP_DIR/log 2>&1 & " % (
+ self.gdb_cross.gdbserver_path, self.gdbserver_port)
+ gdbserver_cmd_start += "echo \\$! > \\$TEMP_DIR/pid;"
+
+ gdbserver_cmd_stop = temp_dir
+ gdbserver_cmd_stop += "test -f \\$TEMP_DIR/pid && kill \\$(cat \\$TEMP_DIR/pid); "
+ gdbserver_cmd_stop += "rm -rf \\$TEMP_DIR; "
+
+ gdbserver_cmd_l = []
+ gdbserver_cmd_l.append('if [ "$1" = "stop" ]; then')
+ gdbserver_cmd_l.append(' shift')
+ gdbserver_cmd_l.append(" %s %s %s %s 'sh -c \"%s\"'" % (
+ self.gdb_cross.target_device.ssh_sshexec, self.gdb_cross.target_device.ssh_port, self.gdb_cross.target_device.extraoptions, self.gdb_cross.target_device.target, gdbserver_cmd_stop))
+ gdbserver_cmd_l.append('else')
+ gdbserver_cmd_l.append(" %s %s %s %s 'sh -c \"%s\"'" % (
+ self.gdb_cross.target_device.ssh_sshexec, self.gdb_cross.target_device.ssh_port, self.gdb_cross.target_device.extraoptions, self.gdb_cross.target_device.target, gdbserver_cmd_start))
+ gdbserver_cmd_l.append('fi')
+ gdbserver_cmd = os.linesep.join(gdbserver_cmd_l)
+ else:
+ gdbserver_cmd_start = "%s --once :%s %s" % (
+ self.gdb_cross.gdbserver_path, self.gdbserver_port, self.binary)
+ gdbserver_cmd = "%s %s %s %s 'sh -c \"%s\"'" % (
+ self.gdb_cross.target_device.ssh_sshexec, self.gdb_cross.target_device.ssh_port, self.gdb_cross.target_device.extraoptions, self.gdb_cross.target_device.target, gdbserver_cmd_start)
+ cmd_lines.append(gdbserver_cmd)
+ GdbCrossConfig.write_file(self.gdbserver_script, cmd_lines, True)
+
+ def _gen_gdbinit_config(self):
+ """Generate a gdbinit file for this binary and the corresponding gdbserver configuration"""
+ gdbinit_lines = ['# This file is generated by devtool ide-sdk']
+ if self.gdbserver_multi:
+ target_help = '# gdbserver --multi :%d' % self.gdbserver_port
+ remote_cmd = 'target extended-remote'
+ else:
+ target_help = '# gdbserver :%d %s' % (
+ self.gdbserver_port, self.binary)
+ remote_cmd = 'target remote'
+ gdbinit_lines.append('# On the remote target:')
+ gdbinit_lines.append(target_help)
+ gdbinit_lines.append('# On the build machine:')
+ gdbinit_lines.append('# cd ' + self.modified_recipe.real_srctree)
+ gdbinit_lines.append(
+ '# ' + self.gdb_cross.gdb + ' -ix ' + self.gdbinit)
+
+ gdbinit_lines.append('set sysroot ' + self.modified_recipe.d)
+ gdbinit_lines.append('set substitute-path "/usr/include" "' +
+ os.path.join(self.modified_recipe.recipe_sysroot, 'usr', 'include') + '"')
+ # Disable debuginfod for now, the IDE configuration uses rootfs-dbg from the image workdir.
+ gdbinit_lines.append('set debuginfod enabled off')
+ if self.image_recipe.rootfs_dbg:
+ gdbinit_lines.append(
+ 'set solib-search-path "' + self.modified_recipe.solib_search_path_str(self.image_recipe) + '"')
+ # First: Search for sources of this recipe in the workspace folder
+ if self.modified_recipe.pn in self.modified_recipe.target_dbgsrc_dir:
+ gdbinit_lines.append('set substitute-path "%s" "%s"' %
+ (self.modified_recipe.target_dbgsrc_dir, self.modified_recipe.real_srctree))
+ else:
+ logger.error(
+ "TARGET_DBGSRC_DIR must contain the recipe name PN.")
+ # Second: Search for sources of other recipes in the rootfs-dbg
+ if self.modified_recipe.target_dbgsrc_dir.startswith("/usr/src/debug"):
+ gdbinit_lines.append('set substitute-path "/usr/src/debug" "%s"' % os.path.join(
+ self.image_recipe.rootfs_dbg, "usr", "src", "debug"))
+ else:
+ logger.error(
+ "TARGET_DBGSRC_DIR must start with /usr/src/debug.")
+ else:
+ logger.warning(
+ "Cannot setup debug symbols configuration for GDB. IMAGE_GEN_DEBUGFS is not enabled.")
+ gdbinit_lines.append(
+ '%s %s:%d' % (remote_cmd, self.gdb_cross.host, self.gdbserver_port))
+ gdbinit_lines.append('set remote exec-file ' + self.binary)
+ gdbinit_lines.append(
+ 'run ' + os.path.join(self.modified_recipe.d, self.binary))
+
+ GdbCrossConfig.write_file(self.gdbinit, gdbinit_lines)
+
+ def _gen_gdb_start_script(self):
+ """Generate a script starting GDB with the corresponding gdbinit configuration."""
+ cmd_lines = ['#!/bin/sh']
+ cmd_lines.append('cd ' + self.modified_recipe.real_srctree)
+ cmd_lines.append(self.gdb_cross.gdb + ' -ix ' +
+ self.gdbinit + ' "$@"')
+ GdbCrossConfig.write_file(self.gdb_script, cmd_lines, True)
+
+ def initialize(self):
+ self._gen_gdbserver_start_script()
+ self._gen_gdbinit_config()
+ self._gen_gdb_start_script()
+
+ @staticmethod
+ def write_file(script_file, cmd_lines, executable=False):
+ script_dir = os.path.dirname(script_file)
+ mkdirhier(script_dir)
+ with open(script_file, 'w') as script_f:
+ script_f.write(os.linesep.join(cmd_lines))
+ script_f.write(os.linesep)
+ if executable:
+ st = os.stat(script_file)
+ os.chmod(script_file, st.st_mode | stat.S_IEXEC)
+ logger.info("Created: %s" % script_file)
+
+
+class IdeBase:
+ """Base class defining the interface for IDE plugins"""
+
+ def __init__(self):
+ self.ide_name = 'undefined'
+ self.gdb_cross_configs = []
+
+ @classmethod
+ def ide_plugin_priority(cls):
+ """Used to find the default ide handler if --ide is not passed"""
+ return 10
+
+ def setup_shared_sysroots(self, shared_env):
+ logger.warn("Shared sysroot mode is not supported for IDE %s" %
+ self.ide_name)
+
+ def setup_modified_recipe(self, args, image_recipe, modified_recipe):
+ logger.warn("Modified recipe mode is not supported for IDE %s" %
+ self.ide_name)
+
+ def initialize_gdb_cross_configs(self, image_recipe, modified_recipe, gdb_cross_config_class=GdbCrossConfig):
+ binaries = modified_recipe.find_installed_binaries()
+ for binary in binaries:
+ gdb_cross_config = gdb_cross_config_class(
+ image_recipe, modified_recipe, binary)
+ gdb_cross_config.initialize()
+ self.gdb_cross_configs.append(gdb_cross_config)
+
+ @staticmethod
+ def gen_oe_scrtips_sym_link(modified_recipe):
+ # create a sym-link from sources to the scripts directory
+ if os.path.isdir(modified_recipe.ide_sdk_scripts_dir):
+ IdeBase.symlink_force(modified_recipe.ide_sdk_scripts_dir,
+ os.path.join(modified_recipe.real_srctree, 'oe-scripts'))
+
+ @staticmethod
+ def update_json_file(json_dir, json_file, update_dict):
+ """Update a json file
+
+ By default it uses the dict.update function. If this is not sutiable
+ the update function might be passed via update_func parameter.
+ """
+ json_path = os.path.join(json_dir, json_file)
+ logger.info("Updating IDE config file: %s (%s)" %
+ (json_file, json_path))
+ if not os.path.exists(json_dir):
+ os.makedirs(json_dir)
+ try:
+ with open(json_path) as f:
+ orig_dict = json.load(f)
+ except json.decoder.JSONDecodeError:
+ logger.info(
+ "Decoding %s failed. Probably because of comments in the json file" % json_path)
+ orig_dict = {}
+ except FileNotFoundError:
+ orig_dict = {}
+ orig_dict.update(update_dict)
+ with open(json_path, 'w') as f:
+ json.dump(orig_dict, f, indent=4)
+
+ @staticmethod
+ def symlink_force(tgt, dst):
+ try:
+ os.symlink(tgt, dst)
+ except OSError as err:
+ if err.errno == errno.EEXIST:
+ if os.readlink(dst) != tgt:
+ os.remove(dst)
+ os.symlink(tgt, dst)
+ else:
+ raise err
+
+
+def get_devtool_deploy_opts(args):
+ """Filter args for devtool deploy-target args"""
+ if not args.target:
+ return None
+ devtool_deploy_opts = [args.target]
+ if args.no_host_check:
+ devtool_deploy_opts += ["-c"]
+ if args.show_status:
+ devtool_deploy_opts += ["-s"]
+ if args.no_preserve:
+ devtool_deploy_opts += ["-p"]
+ if args.no_check_space:
+ devtool_deploy_opts += ["--no-check-space"]
+ if args.ssh_exec:
+ devtool_deploy_opts += ["-e", args.ssh.exec]
+ if args.port:
+ devtool_deploy_opts += ["-P", args.port]
+ if args.key:
+ devtool_deploy_opts += ["-I", args.key]
+ if args.strip is False:
+ devtool_deploy_opts += ["--no-strip"]
+ return devtool_deploy_opts
diff --git a/scripts/lib/devtool/ide_plugins/ide_code.py b/scripts/lib/devtool/ide_plugins/ide_code.py
new file mode 100644
index 0000000000..a62b93224e
--- /dev/null
+++ b/scripts/lib/devtool/ide_plugins/ide_code.py
@@ -0,0 +1,463 @@
+#
+# Copyright (C) 2023-2024 Siemens AG
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+"""Devtool ide-sdk IDE plugin for VSCode and VSCodium"""
+
+import json
+import logging
+import os
+import shutil
+from devtool.ide_plugins import BuildTool, IdeBase, GdbCrossConfig, get_devtool_deploy_opts
+
+logger = logging.getLogger('devtool')
+
+
+class GdbCrossConfigVSCode(GdbCrossConfig):
+ def __init__(self, image_recipe, modified_recipe, binary):
+ super().__init__(image_recipe, modified_recipe, binary, False)
+
+ def initialize(self):
+ self._gen_gdbserver_start_script()
+
+
+class IdeVSCode(IdeBase):
+ """Manage IDE configurations for VSCode
+
+ Modified recipe mode:
+ - cmake: use the cmake-preset generated by devtool ide-sdk
+ - meson: meson is called via a wrapper script generated by devtool ide-sdk
+
+ Shared sysroot mode:
+ In shared sysroot mode, the cross tool-chain is exported to the user's global configuration.
+ A workspace cannot be created because there is no recipe that defines how a workspace could
+ be set up.
+ - cmake: adds a cmake-kit to .local/share/CMakeTools/cmake-tools-kits.json
+ The cmake-kit uses the environment script and the tool-chain file
+ generated by meta-ide-support.
+ - meson: Meson needs manual workspace configuration.
+ """
+
+ @classmethod
+ def ide_plugin_priority(cls):
+ """If --ide is not passed this is the default plugin"""
+ if shutil.which('code'):
+ return 100
+ return 0
+
+ def setup_shared_sysroots(self, shared_env):
+ """Expose the toolchain of the shared sysroots SDK"""
+ datadir = shared_env.ide_support.datadir
+ deploy_dir_image = shared_env.ide_support.deploy_dir_image
+ real_multimach_target_sys = shared_env.ide_support.real_multimach_target_sys
+ standalone_sysroot_native = shared_env.build_sysroots.standalone_sysroot_native
+ vscode_ws_path = os.path.join(
+ os.environ['HOME'], '.local', 'share', 'CMakeTools')
+ cmake_kits_path = os.path.join(vscode_ws_path, 'cmake-tools-kits.json')
+ oecmake_generator = "Ninja"
+ env_script = os.path.join(
+ deploy_dir_image, 'environment-setup-' + real_multimach_target_sys)
+
+ if not os.path.isdir(vscode_ws_path):
+ os.makedirs(vscode_ws_path)
+ cmake_kits_old = []
+ if os.path.exists(cmake_kits_path):
+ with open(cmake_kits_path, 'r', encoding='utf-8') as cmake_kits_file:
+ cmake_kits_old = json.load(cmake_kits_file)
+ cmake_kits = cmake_kits_old.copy()
+
+ cmake_kit_new = {
+ "name": "OE " + real_multimach_target_sys,
+ "environmentSetupScript": env_script,
+ "toolchainFile": standalone_sysroot_native + datadir + "/cmake/OEToolchainConfig.cmake",
+ "preferredGenerator": {
+ "name": oecmake_generator
+ }
+ }
+
+ def merge_kit(cmake_kits, cmake_kit_new):
+ i = 0
+ while i < len(cmake_kits):
+ if 'environmentSetupScript' in cmake_kits[i] and \
+ cmake_kits[i]['environmentSetupScript'] == cmake_kit_new['environmentSetupScript']:
+ cmake_kits[i] = cmake_kit_new
+ return
+ i += 1
+ cmake_kits.append(cmake_kit_new)
+ merge_kit(cmake_kits, cmake_kit_new)
+
+ if cmake_kits != cmake_kits_old:
+ logger.info("Updating: %s" % cmake_kits_path)
+ with open(cmake_kits_path, 'w', encoding='utf-8') as cmake_kits_file:
+ json.dump(cmake_kits, cmake_kits_file, indent=4)
+ else:
+ logger.info("Already up to date: %s" % cmake_kits_path)
+
+ cmake_native = os.path.join(
+ shared_env.build_sysroots.standalone_sysroot_native, 'usr', 'bin', 'cmake')
+ if os.path.isfile(cmake_native):
+ logger.info('cmake-kits call cmake by default. If the cmake provided by this SDK should be used, please add the following line to ".vscode/settings.json" file: "cmake.cmakePath": "%s"' % cmake_native)
+ else:
+ logger.error("Cannot find cmake native at: %s" % cmake_native)
+
+ def dot_code_dir(self, modified_recipe):
+ return os.path.join(modified_recipe.srctree, '.vscode')
+
+ def __vscode_settings_meson(self, settings_dict, modified_recipe):
+ if modified_recipe.build_tool is not BuildTool.MESON:
+ return
+ settings_dict["mesonbuild.mesonPath"] = modified_recipe.meson_wrapper
+
+ confopts = modified_recipe.mesonopts.split()
+ confopts += modified_recipe.meson_cross_file.split()
+ confopts += modified_recipe.extra_oemeson.split()
+ settings_dict["mesonbuild.configureOptions"] = confopts
+ settings_dict["mesonbuild.buildFolder"] = modified_recipe.b
+
+ def __vscode_settings_cmake(self, settings_dict, modified_recipe):
+ """Add cmake specific settings to settings.json.
+
+ Note: most settings are passed to the cmake preset.
+ """
+ if modified_recipe.build_tool is not BuildTool.CMAKE:
+ return
+ settings_dict["cmake.configureOnOpen"] = True
+ settings_dict["cmake.sourceDirectory"] = modified_recipe.real_srctree
+
+ def vscode_settings(self, modified_recipe, image_recipe):
+ files_excludes = {
+ "**/.git/**": True,
+ "**/oe-logs/**": True,
+ "**/oe-workdir/**": True,
+ "**/source-date-epoch/**": True
+ }
+ python_exclude = [
+ "**/.git/**",
+ "**/oe-logs/**",
+ "**/oe-workdir/**",
+ "**/source-date-epoch/**"
+ ]
+ files_readonly = {
+ modified_recipe.recipe_sysroot + '/**': True,
+ modified_recipe.recipe_sysroot_native + '/**': True,
+ }
+ if image_recipe.rootfs_dbg is not None:
+ files_readonly[image_recipe.rootfs_dbg + '/**'] = True
+ settings_dict = {
+ "files.watcherExclude": files_excludes,
+ "files.exclude": files_excludes,
+ "files.readonlyInclude": files_readonly,
+ "python.analysis.exclude": python_exclude
+ }
+ self.__vscode_settings_cmake(settings_dict, modified_recipe)
+ self.__vscode_settings_meson(settings_dict, modified_recipe)
+
+ settings_file = 'settings.json'
+ IdeBase.update_json_file(
+ self.dot_code_dir(modified_recipe), settings_file, settings_dict)
+
+ def __vscode_extensions_cmake(self, modified_recipe, recommendations):
+ if modified_recipe.build_tool is not BuildTool.CMAKE:
+ return
+ recommendations += [
+ "twxs.cmake",
+ "ms-vscode.cmake-tools",
+ "ms-vscode.cpptools",
+ "ms-vscode.cpptools-extension-pack",
+ "ms-vscode.cpptools-themes"
+ ]
+
+ def __vscode_extensions_meson(self, modified_recipe, recommendations):
+ if modified_recipe.build_tool is not BuildTool.MESON:
+ return
+ recommendations += [
+ 'mesonbuild.mesonbuild',
+ "ms-vscode.cpptools",
+ "ms-vscode.cpptools-extension-pack",
+ "ms-vscode.cpptools-themes"
+ ]
+
+ def vscode_extensions(self, modified_recipe):
+ recommendations = []
+ self.__vscode_extensions_cmake(modified_recipe, recommendations)
+ self.__vscode_extensions_meson(modified_recipe, recommendations)
+ extensions_file = 'extensions.json'
+ IdeBase.update_json_file(
+ self.dot_code_dir(modified_recipe), extensions_file, {"recommendations": recommendations})
+
+ def vscode_c_cpp_properties(self, modified_recipe):
+ properties_dict = {
+ "name": modified_recipe.recipe_id_pretty,
+ }
+ if modified_recipe.build_tool is BuildTool.CMAKE:
+ properties_dict["configurationProvider"] = "ms-vscode.cmake-tools"
+ elif modified_recipe.build_tool is BuildTool.MESON:
+ properties_dict["configurationProvider"] = "mesonbuild.mesonbuild"
+ properties_dict["compilerPath"] = os.path.join(modified_recipe.staging_bindir_toolchain, modified_recipe.cxx.split()[0])
+ else: # no C/C++ build
+ return
+
+ properties_dicts = {
+ "configurations": [
+ properties_dict
+ ],
+ "version": 4
+ }
+ prop_file = 'c_cpp_properties.json'
+ IdeBase.update_json_file(
+ self.dot_code_dir(modified_recipe), prop_file, properties_dicts)
+
+ def vscode_launch_bin_dbg(self, gdb_cross_config):
+ modified_recipe = gdb_cross_config.modified_recipe
+
+ launch_config = {
+ "name": gdb_cross_config.id_pretty,
+ "type": "cppdbg",
+ "request": "launch",
+ "program": os.path.join(modified_recipe.d, gdb_cross_config.binary.lstrip('/')),
+ "stopAtEntry": True,
+ "cwd": "${workspaceFolder}",
+ "environment": [],
+ "externalConsole": False,
+ "MIMode": "gdb",
+ "preLaunchTask": gdb_cross_config.id_pretty,
+ "miDebuggerPath": modified_recipe.gdb_cross.gdb,
+ "miDebuggerServerAddress": "%s:%d" % (modified_recipe.gdb_cross.host, gdb_cross_config.gdbserver_port)
+ }
+
+ # Search for header files in recipe-sysroot.
+ src_file_map = {
+ "/usr/include": os.path.join(modified_recipe.recipe_sysroot, "usr", "include")
+ }
+ # First of all search for not stripped binaries in the image folder.
+ # These binaries are copied (and optionally stripped) by deploy-target
+ setup_commands = [
+ {
+ "description": "sysroot",
+ "text": "set sysroot " + modified_recipe.d
+ }
+ ]
+
+ if gdb_cross_config.image_recipe.rootfs_dbg:
+ launch_config['additionalSOLibSearchPath'] = modified_recipe.solib_search_path_str(
+ gdb_cross_config.image_recipe)
+ # First: Search for sources of this recipe in the workspace folder
+ if modified_recipe.pn in modified_recipe.target_dbgsrc_dir:
+ src_file_map[modified_recipe.target_dbgsrc_dir] = "${workspaceFolder}"
+ else:
+ logger.error(
+ "TARGET_DBGSRC_DIR must contain the recipe name PN.")
+ # Second: Search for sources of other recipes in the rootfs-dbg
+ if modified_recipe.target_dbgsrc_dir.startswith("/usr/src/debug"):
+ src_file_map["/usr/src/debug"] = os.path.join(
+ gdb_cross_config.image_recipe.rootfs_dbg, "usr", "src", "debug")
+ else:
+ logger.error(
+ "TARGET_DBGSRC_DIR must start with /usr/src/debug.")
+ else:
+ logger.warning(
+ "Cannot setup debug symbols configuration for GDB. IMAGE_GEN_DEBUGFS is not enabled.")
+
+ launch_config['sourceFileMap'] = src_file_map
+ launch_config['setupCommands'] = setup_commands
+ return launch_config
+
+ def vscode_launch(self, modified_recipe):
+ """GDB Launch configuration for binaries (elf files)"""
+
+ configurations = []
+ for gdb_cross_config in self.gdb_cross_configs:
+ if gdb_cross_config.modified_recipe is modified_recipe:
+ configurations.append(self.vscode_launch_bin_dbg(gdb_cross_config))
+ launch_dict = {
+ "version": "0.2.0",
+ "configurations": configurations
+ }
+ launch_file = 'launch.json'
+ IdeBase.update_json_file(
+ self.dot_code_dir(modified_recipe), launch_file, launch_dict)
+
+ def vscode_tasks_cpp(self, args, modified_recipe):
+ run_install_deploy = modified_recipe.gen_install_deploy_script(args)
+ install_task_name = "install && deploy-target %s" % modified_recipe.recipe_id_pretty
+ tasks_dict = {
+ "version": "2.0.0",
+ "tasks": [
+ {
+ "label": install_task_name,
+ "type": "shell",
+ "command": run_install_deploy,
+ "problemMatcher": []
+ }
+ ]
+ }
+ for gdb_cross_config in self.gdb_cross_configs:
+ if gdb_cross_config.modified_recipe is not modified_recipe:
+ continue
+ tasks_dict['tasks'].append(
+ {
+ "label": gdb_cross_config.id_pretty,
+ "type": "shell",
+ "isBackground": True,
+ "dependsOn": [
+ install_task_name
+ ],
+ "command": gdb_cross_config.gdbserver_script,
+ "problemMatcher": [
+ {
+ "pattern": [
+ {
+ "regexp": ".",
+ "file": 1,
+ "location": 2,
+ "message": 3
+ }
+ ],
+ "background": {
+ "activeOnStart": True,
+ "beginsPattern": ".",
+ "endsPattern": ".",
+ }
+ }
+ ]
+ })
+ tasks_file = 'tasks.json'
+ IdeBase.update_json_file(
+ self.dot_code_dir(modified_recipe), tasks_file, tasks_dict)
+
+ def vscode_tasks_fallback(self, args, modified_recipe):
+ oe_init_dir = modified_recipe.oe_init_dir
+ oe_init = ". %s %s > /dev/null && " % (modified_recipe.oe_init_build_env, modified_recipe.topdir)
+ dt_build = "devtool build "
+ dt_build_label = dt_build + modified_recipe.recipe_id_pretty
+ dt_build_cmd = dt_build + modified_recipe.bpn
+ clean_opt = " --clean"
+ dt_build_clean_label = dt_build + modified_recipe.recipe_id_pretty + clean_opt
+ dt_build_clean_cmd = dt_build + modified_recipe.bpn + clean_opt
+ dt_deploy = "devtool deploy-target "
+ dt_deploy_label = dt_deploy + modified_recipe.recipe_id_pretty
+ dt_deploy_cmd = dt_deploy + modified_recipe.bpn
+ dt_build_deploy_label = "devtool build & deploy-target %s" % modified_recipe.recipe_id_pretty
+ deploy_opts = ' '.join(get_devtool_deploy_opts(args))
+ tasks_dict = {
+ "version": "2.0.0",
+ "tasks": [
+ {
+ "label": dt_build_label,
+ "type": "shell",
+ "command": "bash",
+ "linux": {
+ "options": {
+ "cwd": oe_init_dir
+ }
+ },
+ "args": [
+ "--login",
+ "-c",
+ "%s%s" % (oe_init, dt_build_cmd)
+ ],
+ "problemMatcher": []
+ },
+ {
+ "label": dt_deploy_label,
+ "type": "shell",
+ "command": "bash",
+ "linux": {
+ "options": {
+ "cwd": oe_init_dir
+ }
+ },
+ "args": [
+ "--login",
+ "-c",
+ "%s%s %s" % (
+ oe_init, dt_deploy_cmd, deploy_opts)
+ ],
+ "problemMatcher": []
+ },
+ {
+ "label": dt_build_deploy_label,
+ "dependsOrder": "sequence",
+ "dependsOn": [
+ dt_build_label,
+ dt_deploy_label
+ ],
+ "problemMatcher": [],
+ "group": {
+ "kind": "build",
+ "isDefault": True
+ }
+ },
+ {
+ "label": dt_build_clean_label,
+ "type": "shell",
+ "command": "bash",
+ "linux": {
+ "options": {
+ "cwd": oe_init_dir
+ }
+ },
+ "args": [
+ "--login",
+ "-c",
+ "%s%s" % (oe_init, dt_build_clean_cmd)
+ ],
+ "problemMatcher": []
+ }
+ ]
+ }
+ if modified_recipe.gdb_cross:
+ for gdb_cross_config in self.gdb_cross_configs:
+ if gdb_cross_config.modified_recipe is not modified_recipe:
+ continue
+ tasks_dict['tasks'].append(
+ {
+ "label": gdb_cross_config.id_pretty,
+ "type": "shell",
+ "isBackground": True,
+ "dependsOn": [
+ dt_build_deploy_label
+ ],
+ "command": gdb_cross_config.gdbserver_script,
+ "problemMatcher": [
+ {
+ "pattern": [
+ {
+ "regexp": ".",
+ "file": 1,
+ "location": 2,
+ "message": 3
+ }
+ ],
+ "background": {
+ "activeOnStart": True,
+ "beginsPattern": ".",
+ "endsPattern": ".",
+ }
+ }
+ ]
+ })
+ tasks_file = 'tasks.json'
+ IdeBase.update_json_file(
+ self.dot_code_dir(modified_recipe), tasks_file, tasks_dict)
+
+ def vscode_tasks(self, args, modified_recipe):
+ if modified_recipe.build_tool.is_c_ccp:
+ self.vscode_tasks_cpp(args, modified_recipe)
+ else:
+ self.vscode_tasks_fallback(args, modified_recipe)
+
+ def setup_modified_recipe(self, args, image_recipe, modified_recipe):
+ self.vscode_settings(modified_recipe, image_recipe)
+ self.vscode_extensions(modified_recipe)
+ self.vscode_c_cpp_properties(modified_recipe)
+ if args.target:
+ self.initialize_gdb_cross_configs(
+ image_recipe, modified_recipe, gdb_cross_config_class=GdbCrossConfigVSCode)
+ self.vscode_launch(modified_recipe)
+ self.vscode_tasks(args, modified_recipe)
+
+
+def register_ide_plugin(ide_plugins):
+ ide_plugins['code'] = IdeVSCode
diff --git a/scripts/lib/devtool/ide_plugins/ide_none.py b/scripts/lib/devtool/ide_plugins/ide_none.py
new file mode 100644
index 0000000000..f106c5a026
--- /dev/null
+++ b/scripts/lib/devtool/ide_plugins/ide_none.py
@@ -0,0 +1,53 @@
+#
+# Copyright (C) 2023-2024 Siemens AG
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+"""Devtool ide-sdk generic IDE plugin"""
+
+import os
+import logging
+from devtool.ide_plugins import IdeBase, GdbCrossConfig
+
+logger = logging.getLogger('devtool')
+
+
+class IdeNone(IdeBase):
+ """Generate some generic helpers for other IDEs
+
+ Modified recipe mode:
+ Generate some helper scripts for remote debugging with GDB
+
+ Shared sysroot mode:
+ A wrapper for bitbake meta-ide-support and bitbake build-sysroots
+ """
+
+ def __init__(self):
+ super().__init__()
+
+ def setup_shared_sysroots(self, shared_env):
+ real_multimach_target_sys = shared_env.ide_support.real_multimach_target_sys
+ deploy_dir_image = shared_env.ide_support.deploy_dir_image
+ env_script = os.path.join(
+ deploy_dir_image, 'environment-setup-' + real_multimach_target_sys)
+ logger.info(
+ "To use this SDK please source this: %s" % env_script)
+
+ def setup_modified_recipe(self, args, image_recipe, modified_recipe):
+ """generate some helper scripts and config files
+
+ - Execute the do_install task
+ - Execute devtool deploy-target
+ - Generate a gdbinit file per executable
+ - Generate the oe-scripts sym-link
+ """
+ script_path = modified_recipe.gen_install_deploy_script(args)
+ logger.info("Created: %s" % script_path)
+
+ self.initialize_gdb_cross_configs(image_recipe, modified_recipe)
+
+ IdeBase.gen_oe_scrtips_sym_link(modified_recipe)
+
+
+def register_ide_plugin(ide_plugins):
+ ide_plugins['none'] = IdeNone
diff --git a/scripts/lib/devtool/ide_sdk.py b/scripts/lib/devtool/ide_sdk.py
new file mode 100755
index 0000000000..7807b322b3
--- /dev/null
+++ b/scripts/lib/devtool/ide_sdk.py
@@ -0,0 +1,1070 @@
+# Development tool - ide-sdk command plugin
+#
+# Copyright (C) 2023-2024 Siemens AG
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+"""Devtool ide-sdk plugin"""
+
+import json
+import logging
+import os
+import re
+import shutil
+import stat
+import subprocess
+import sys
+from argparse import RawTextHelpFormatter
+from enum import Enum
+
+import scriptutils
+import bb
+from devtool import exec_build_env_command, setup_tinfoil, check_workspace_recipe, DevtoolError, parse_recipe
+from devtool.standard import get_real_srctree
+from devtool.ide_plugins import BuildTool
+
+
+logger = logging.getLogger('devtool')
+
+# dict of classes derived from IdeBase
+ide_plugins = {}
+
+
+class DevtoolIdeMode(Enum):
+ """Different modes are supported by the ide-sdk plugin.
+
+ The enum might be extended by more advanced modes in the future. Some ideas:
+ - auto: modified if all recipes are modified, shared if none of the recipes is modified.
+ - mixed: modified mode for modified recipes, shared mode for all other recipes.
+ """
+
+ modified = 'modified'
+ shared = 'shared'
+
+
+class TargetDevice:
+ """SSH remote login parameters"""
+
+ def __init__(self, args):
+ self.extraoptions = ''
+ if args.no_host_check:
+ self.extraoptions += '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
+ self.ssh_sshexec = 'ssh'
+ if args.ssh_exec:
+ self.ssh_sshexec = args.ssh_exec
+ self.ssh_port = ''
+ if args.port:
+ self.ssh_port = "-p %s" % args.port
+ if args.key:
+ self.extraoptions += ' -i %s' % args.key
+
+ self.target = args.target
+ target_sp = args.target.split('@')
+ if len(target_sp) == 1:
+ self.login = ""
+ self.host = target_sp[0]
+ elif len(target_sp) == 2:
+ self.login = target_sp[0]
+ self.host = target_sp[1]
+ else:
+ logger.error("Invalid target argument: %s" % args.target)
+
+
+class RecipeNative:
+ """Base class for calling bitbake to provide a -native recipe"""
+
+ def __init__(self, name, target_arch=None):
+ self.name = name
+ self.target_arch = target_arch
+ self.bootstrap_tasks = [self.name + ':do_addto_recipe_sysroot']
+ self.staging_bindir_native = None
+ self.target_sys = None
+ self.__native_bin = None
+
+ def _initialize(self, config, workspace, tinfoil):
+ """Get the parsed recipe"""
+ recipe_d = parse_recipe(
+ config, tinfoil, self.name, appends=True, filter_workspace=False)
+ if not recipe_d:
+ raise DevtoolError("Parsing %s recipe failed" % self.name)
+ self.staging_bindir_native = os.path.realpath(
+ recipe_d.getVar('STAGING_BINDIR_NATIVE'))
+ self.target_sys = recipe_d.getVar('TARGET_SYS')
+ return recipe_d
+
+ def initialize(self, config, workspace, tinfoil):
+ """Basic initialization that can be overridden by a derived class"""
+ self._initialize(config, workspace, tinfoil)
+
+ @property
+ def native_bin(self):
+ if not self.__native_bin:
+ raise DevtoolError("native binary name is not defined.")
+ return self.__native_bin
+
+
+class RecipeGdbCross(RecipeNative):
+ """Handle handle gdb-cross on the host and the gdbserver on the target device"""
+
+ def __init__(self, args, target_arch, target_device):
+ super().__init__('gdb-cross-' + target_arch, target_arch)
+ self.target_device = target_device
+ self.gdb = None
+ self.gdbserver_port_next = int(args.gdbserver_port_start)
+ self.config_db = {}
+
+ def __find_gdbserver(self, config, tinfoil):
+ """Absolute path of the gdbserver"""
+ recipe_d_gdb = parse_recipe(
+ config, tinfoil, 'gdb', appends=True, filter_workspace=False)
+ if not recipe_d_gdb:
+ raise DevtoolError("Parsing gdb recipe failed")
+ return os.path.join(recipe_d_gdb.getVar('bindir'), 'gdbserver')
+
+ def initialize(self, config, workspace, tinfoil):
+ super()._initialize(config, workspace, tinfoil)
+ gdb_bin = self.target_sys + '-gdb'
+ gdb_path = os.path.join(
+ self.staging_bindir_native, self.target_sys, gdb_bin)
+ self.gdb = gdb_path
+ self.gdbserver_path = self.__find_gdbserver(config, tinfoil)
+
+ @property
+ def host(self):
+ return self.target_device.host
+
+
+class RecipeImage:
+ """Handle some image recipe related properties
+
+ Most workflows require firmware that runs on the target device.
+ This firmware must be consistent with the setup of the host system.
+ In particular, the debug symbols must be compatible. For this, the
+ rootfs must be created as part of the SDK.
+ """
+
+ def __init__(self, name):
+ self.combine_dbg_image = False
+ self.gdbserver_missing = False
+ self.name = name
+ self.rootfs = None
+ self.__rootfs_dbg = None
+ self.bootstrap_tasks = [self.name + ':do_build']
+
+ def initialize(self, config, tinfoil):
+ image_d = parse_recipe(
+ config, tinfoil, self.name, appends=True, filter_workspace=False)
+ if not image_d:
+ raise DevtoolError(
+ "Parsing image recipe %s failed" % self.name)
+
+ self.combine_dbg_image = bb.data.inherits_class(
+ 'image-combined-dbg', image_d)
+
+ workdir = image_d.getVar('WORKDIR')
+ self.rootfs = os.path.join(workdir, 'rootfs')
+ if image_d.getVar('IMAGE_GEN_DEBUGFS') == "1":
+ self.__rootfs_dbg = os.path.join(workdir, 'rootfs-dbg')
+
+ self.gdbserver_missing = 'gdbserver' not in image_d.getVar(
+ 'IMAGE_INSTALL')
+
+ @property
+ def debug_support(self):
+ return bool(self.rootfs_dbg)
+
+ @property
+ def rootfs_dbg(self):
+ if self.__rootfs_dbg and os.path.isdir(self.__rootfs_dbg):
+ return self.__rootfs_dbg
+ return None
+
+
+class RecipeMetaIdeSupport:
+ """For the shared sysroots mode meta-ide-support is needed
+
+ For use cases where just a cross tool-chain is required but
+ no recipe is used, devtool ide-sdk abstracts calling bitbake meta-ide-support
+ and bitbake build-sysroots. This also allows to expose the cross-toolchains
+ to IDEs. For example VSCode support different tool-chains with e.g. cmake-kits.
+ """
+
+ def __init__(self):
+ self.bootstrap_tasks = ['meta-ide-support:do_build']
+ self.topdir = None
+ self.datadir = None
+ self.deploy_dir_image = None
+ self.build_sys = None
+ # From toolchain-scripts
+ self.real_multimach_target_sys = None
+
+ def initialize(self, config, tinfoil):
+ meta_ide_support_d = parse_recipe(
+ config, tinfoil, 'meta-ide-support', appends=True, filter_workspace=False)
+ if not meta_ide_support_d:
+ raise DevtoolError("Parsing meta-ide-support recipe failed")
+
+ self.topdir = meta_ide_support_d.getVar('TOPDIR')
+ self.datadir = meta_ide_support_d.getVar('datadir')
+ self.deploy_dir_image = meta_ide_support_d.getVar(
+ 'DEPLOY_DIR_IMAGE')
+ self.build_sys = meta_ide_support_d.getVar('BUILD_SYS')
+ self.real_multimach_target_sys = meta_ide_support_d.getVar(
+ 'REAL_MULTIMACH_TARGET_SYS')
+
+
+class RecipeBuildSysroots:
+ """For the shared sysroots mode build-sysroots is needed"""
+
+ def __init__(self):
+ self.standalone_sysroot = None
+ self.standalone_sysroot_native = None
+ self.bootstrap_tasks = [
+ 'build-sysroots:do_build_target_sysroot',
+ 'build-sysroots:do_build_native_sysroot'
+ ]
+
+ def initialize(self, config, tinfoil):
+ build_sysroots_d = parse_recipe(
+ config, tinfoil, 'build-sysroots', appends=True, filter_workspace=False)
+ if not build_sysroots_d:
+ raise DevtoolError("Parsing build-sysroots recipe failed")
+ self.standalone_sysroot = build_sysroots_d.getVar(
+ 'STANDALONE_SYSROOT')
+ self.standalone_sysroot_native = build_sysroots_d.getVar(
+ 'STANDALONE_SYSROOT_NATIVE')
+
+
+class SharedSysrootsEnv:
+ """Handle the shared sysroots based workflow
+
+ Support the workflow with just a tool-chain without a recipe.
+ It's basically like:
+ bitbake some-dependencies
+ bitbake meta-ide-support
+ bitbake build-sysroots
+ Use the environment-* file found in the deploy folder
+ """
+
+ def __init__(self):
+ self.ide_support = None
+ self.build_sysroots = None
+
+ def initialize(self, ide_support, build_sysroots):
+ self.ide_support = ide_support
+ self.build_sysroots = build_sysroots
+
+ def setup_ide(self, ide):
+ ide.setup(self)
+
+
+class RecipeNotModified:
+ """Handling of recipes added to the Direct DSK shared sysroots."""
+
+ def __init__(self, name):
+ self.name = name
+ self.bootstrap_tasks = [name + ':do_populate_sysroot']
+
+
+class RecipeModified:
+ """Handling of recipes in the workspace created by devtool modify"""
+ OE_INIT_BUILD_ENV = 'oe-init-build-env'
+
+ VALID_BASH_ENV_NAME_CHARS = re.compile(r"^[a-zA-Z0-9_]*$")
+
+ def __init__(self, name):
+ self.name = name
+ self.bootstrap_tasks = [name + ':do_install']
+ self.gdb_cross = None
+ # workspace
+ self.real_srctree = None
+ self.srctree = None
+ self.ide_sdk_dir = None
+ self.ide_sdk_scripts_dir = None
+ self.bbappend = None
+ # recipe variables from d.getVar
+ self.b = None
+ self.base_libdir = None
+ self.bblayers = None
+ self.bpn = None
+ self.d = None
+ self.fakerootcmd = None
+ self.fakerootenv = None
+ self.libdir = None
+ self.max_process = None
+ self.package_arch = None
+ self.package_debug_split_style = None
+ self.path = None
+ self.pn = None
+ self.recipe_sysroot = None
+ self.recipe_sysroot_native = None
+ self.staging_incdir = None
+ self.strip_cmd = None
+ self.target_arch = None
+ self.target_dbgsrc_dir = None
+ self.topdir = None
+ self.workdir = None
+ self.recipe_id = None
+ # replicate bitbake build environment
+ self.exported_vars = None
+ self.cmd_compile = None
+ self.__oe_init_dir = None
+ # main build tool used by this recipe
+ self.build_tool = BuildTool.UNDEFINED
+ # build_tool = cmake
+ self.oecmake_generator = None
+ self.cmake_cache_vars = None
+ # build_tool = meson
+ self.meson_buildtype = None
+ self.meson_wrapper = None
+ self.mesonopts = None
+ self.extra_oemeson = None
+ self.meson_cross_file = None
+
+ def initialize(self, config, workspace, tinfoil):
+ recipe_d = parse_recipe(
+ config, tinfoil, self.name, appends=True, filter_workspace=False)
+ if not recipe_d:
+ raise DevtoolError("Parsing %s recipe failed" % self.name)
+
+ # Verify this recipe is built as externalsrc setup by devtool modify
+ workspacepn = check_workspace_recipe(
+ workspace, self.name, bbclassextend=True)
+ self.srctree = workspace[workspacepn]['srctree']
+ # Need to grab this here in case the source is within a subdirectory
+ self.real_srctree = get_real_srctree(
+ self.srctree, recipe_d.getVar('S'), recipe_d.getVar('WORKDIR'))
+ self.bbappend = workspace[workspacepn]['bbappend']
+
+ self.ide_sdk_dir = os.path.join(
+ config.workspace_path, 'ide-sdk', self.name)
+ if os.path.exists(self.ide_sdk_dir):
+ shutil.rmtree(self.ide_sdk_dir)
+ self.ide_sdk_scripts_dir = os.path.join(self.ide_sdk_dir, 'scripts')
+
+ self.b = recipe_d.getVar('B')
+ self.base_libdir = recipe_d.getVar('base_libdir')
+ self.bblayers = recipe_d.getVar('BBLAYERS').split()
+ self.bpn = recipe_d.getVar('BPN')
+ self.cxx = recipe_d.getVar('CXX')
+ self.d = recipe_d.getVar('D')
+ self.fakerootcmd = recipe_d.getVar('FAKEROOTCMD')
+ self.fakerootenv = recipe_d.getVar('FAKEROOTENV')
+ self.libdir = recipe_d.getVar('libdir')
+ self.max_process = int(recipe_d.getVar(
+ "BB_NUMBER_THREADS") or os.cpu_count() or 1)
+ self.package_arch = recipe_d.getVar('PACKAGE_ARCH')
+ self.package_debug_split_style = recipe_d.getVar(
+ 'PACKAGE_DEBUG_SPLIT_STYLE')
+ self.path = recipe_d.getVar('PATH')
+ self.pn = recipe_d.getVar('PN')
+ self.recipe_sysroot = os.path.realpath(
+ recipe_d.getVar('RECIPE_SYSROOT'))
+ self.recipe_sysroot_native = os.path.realpath(
+ recipe_d.getVar('RECIPE_SYSROOT_NATIVE'))
+ self.staging_bindir_toolchain = os.path.realpath(
+ recipe_d.getVar('STAGING_BINDIR_TOOLCHAIN'))
+ self.staging_incdir = os.path.realpath(
+ recipe_d.getVar('STAGING_INCDIR'))
+ self.strip_cmd = recipe_d.getVar('STRIP')
+ self.target_arch = recipe_d.getVar('TARGET_ARCH')
+ self.target_dbgsrc_dir = recipe_d.getVar('TARGET_DBGSRC_DIR')
+ self.topdir = recipe_d.getVar('TOPDIR')
+ self.workdir = os.path.realpath(recipe_d.getVar('WORKDIR'))
+
+ self.__init_exported_variables(recipe_d)
+
+ if bb.data.inherits_class('cmake', recipe_d):
+ self.oecmake_generator = recipe_d.getVar('OECMAKE_GENERATOR')
+ self.__init_cmake_preset_cache(recipe_d)
+ self.build_tool = BuildTool.CMAKE
+ elif bb.data.inherits_class('meson', recipe_d):
+ self.meson_buildtype = recipe_d.getVar('MESON_BUILDTYPE')
+ self.mesonopts = recipe_d.getVar('MESONOPTS')
+ self.extra_oemeson = recipe_d.getVar('EXTRA_OEMESON')
+ self.meson_cross_file = recipe_d.getVar('MESON_CROSS_FILE')
+ self.build_tool = BuildTool.MESON
+
+ # Recipe ID is the identifier for IDE config sections
+ self.recipe_id = self.bpn + "-" + self.package_arch
+ self.recipe_id_pretty = self.bpn + ": " + self.package_arch
+
+ def append_to_bbappend(self, append_text):
+ with open(self.bbappend, 'a') as bbap:
+ bbap.write(append_text)
+
+ def remove_from_bbappend(self, append_text):
+ with open(self.bbappend, 'r') as bbap:
+ text = bbap.read()
+ new_text = text.replace(append_text, '')
+ with open(self.bbappend, 'w') as bbap:
+ bbap.write(new_text)
+
+ @staticmethod
+ def is_valid_shell_variable(var):
+ """Skip strange shell variables like systemd
+
+ prevent from strange bugs because of strange variables which
+ are not used in this context but break various tools.
+ """
+ if RecipeModified.VALID_BASH_ENV_NAME_CHARS.match(var):
+ bb.debug(1, "ignoring variable: %s" % var)
+ return True
+ return False
+
+ def debug_build_config(self, args):
+ """Explicitely set for example CMAKE_BUILD_TYPE to Debug if not defined otherwise"""
+ if self.build_tool is BuildTool.CMAKE:
+ append_text = os.linesep + \
+ 'OECMAKE_ARGS:append = " -DCMAKE_BUILD_TYPE:STRING=Debug"' + os.linesep
+ if args.debug_build_config and not 'CMAKE_BUILD_TYPE' in self.cmake_cache_vars:
+ self.cmake_cache_vars['CMAKE_BUILD_TYPE'] = {
+ "type": "STRING",
+ "value": "Debug",
+ }
+ self.append_to_bbappend(append_text)
+ elif 'CMAKE_BUILD_TYPE' in self.cmake_cache_vars:
+ del self.cmake_cache_vars['CMAKE_BUILD_TYPE']
+ self.remove_from_bbappend(append_text)
+ elif self.build_tool is BuildTool.MESON:
+ append_text = os.linesep + 'MESON_BUILDTYPE = "debug"' + os.linesep
+ if args.debug_build_config and self.meson_buildtype != "debug":
+ self.mesonopts.replace(
+ '--buildtype ' + self.meson_buildtype, '--buildtype debug')
+ self.append_to_bbappend(append_text)
+ elif self.meson_buildtype == "debug":
+ self.mesonopts.replace(
+ '--buildtype debug', '--buildtype plain')
+ self.remove_from_bbappend(append_text)
+ elif args.debug_build_config:
+ logger.warn(
+ "--debug-build-config is not implemented for this build tool yet.")
+
+ def solib_search_path(self, image):
+ """Search for debug symbols in the rootfs and rootfs-dbg
+
+ The debug symbols of shared libraries which are provided by other packages
+ are grabbed from the -dbg packages in the rootfs-dbg.
+
+ But most cross debugging tools like gdb, perf, and systemtap need to find
+ executable/library first and through it debuglink note find corresponding
+ symbols file. Therefore the library paths from the rootfs are added as well.
+
+ Note: For the devtool modified recipe compiled from the IDE, the debug
+ symbols are taken from the unstripped binaries in the image folder.
+ Also, devtool deploy-target takes the files from the image folder.
+ debug symbols in the image folder refer to the corresponding source files
+ with absolute paths of the build machine. Debug symbols found in the
+ rootfs-dbg are relocated and contain paths which refer to the source files
+ installed on the target device e.g. /usr/src/...
+ """
+ base_libdir = self.base_libdir.lstrip('/')
+ libdir = self.libdir.lstrip('/')
+ so_paths = [
+ # debug symbols for package_debug_split_style: debug-with-srcpkg or .debug
+ os.path.join(image.rootfs_dbg, base_libdir, ".debug"),
+ os.path.join(image.rootfs_dbg, libdir, ".debug"),
+ # debug symbols for package_debug_split_style: debug-file-directory
+ os.path.join(image.rootfs_dbg, "usr", "lib", "debug"),
+
+ # The binaries are required as well, the debug packages are not enough
+ # With image-combined-dbg.bbclass the binaries are copied into rootfs-dbg
+ os.path.join(image.rootfs_dbg, base_libdir),
+ os.path.join(image.rootfs_dbg, libdir),
+ # Without image-combined-dbg.bbclass the binaries are only in rootfs.
+ # Note: Stepping into source files located in rootfs-dbg does not
+ # work without image-combined-dbg.bbclass yet.
+ os.path.join(image.rootfs, base_libdir),
+ os.path.join(image.rootfs, libdir)
+ ]
+ return so_paths
+
+ def solib_search_path_str(self, image):
+ """Return a : separated list of paths usable by GDB's set solib-search-path"""
+ return ':'.join(self.solib_search_path(image))
+
+ def __init_exported_variables(self, d):
+ """Find all variables with export flag set.
+
+ This allows to generate IDE configurations which compile with the same
+ environment as bitbake does. That's at least a reasonable default behavior.
+ """
+ exported_vars = {}
+
+ vars = (key for key in d.keys() if not key.startswith(
+ "__") and not d.getVarFlag(key, "func", False))
+ for var in vars:
+ func = d.getVarFlag(var, "func", False)
+ if d.getVarFlag(var, 'python', False) and func:
+ continue
+ export = d.getVarFlag(var, "export", False)
+ unexport = d.getVarFlag(var, "unexport", False)
+ if not export and not unexport and not func:
+ continue
+ if unexport:
+ continue
+
+ val = d.getVar(var)
+ if val is None:
+ continue
+ if set(var) & set("-.{}+"):
+ logger.warn(
+ "Warning: Found invalid character in variable name %s", str(var))
+ continue
+ varExpanded = d.expand(var)
+ val = str(val)
+
+ if not RecipeModified.is_valid_shell_variable(varExpanded):
+ continue
+
+ if func:
+ code_line = "line: {0}, file: {1}\n".format(
+ d.getVarFlag(var, "lineno", False),
+ d.getVarFlag(var, "filename", False))
+ val = val.rstrip('\n')
+ logger.warn("Warning: exported shell function %s() is not exported (%s)" %
+ (varExpanded, code_line))
+ continue
+
+ if export:
+ exported_vars[varExpanded] = val.strip()
+ continue
+
+ self.exported_vars = exported_vars
+
+ def __init_cmake_preset_cache(self, d):
+ """Get the arguments passed to cmake
+
+ Replicate the cmake configure arguments with all details to
+ share on build folder between bitbake and SDK.
+ """
+ site_file = os.path.join(self.workdir, 'site-file.cmake')
+ if os.path.exists(site_file):
+ print("Warning: site-file.cmake is not supported")
+
+ cache_vars = {}
+ oecmake_args = d.getVar('OECMAKE_ARGS').split()
+ extra_oecmake = d.getVar('EXTRA_OECMAKE').split()
+ for param in oecmake_args + extra_oecmake:
+ d_pref = "-D"
+ if param.startswith(d_pref):
+ param = param[len(d_pref):]
+ else:
+ print("Error: expected a -D")
+ param_s = param.split('=', 1)
+ param_nt = param_s[0].split(':', 1)
+
+ def handle_undefined_variable(var):
+ if var.startswith('${') and var.endswith('}'):
+ return ''
+ else:
+ return var
+ # Example: FOO=ON
+ if len(param_nt) == 1:
+ cache_vars[param_s[0]] = handle_undefined_variable(param_s[1])
+ # Example: FOO:PATH=/tmp
+ elif len(param_nt) == 2:
+ cache_vars[param_nt[0]] = {
+ "type": param_nt[1],
+ "value": handle_undefined_variable(param_s[1]),
+ }
+ else:
+ print("Error: cannot parse %s" % param)
+ self.cmake_cache_vars = cache_vars
+
+ def cmake_preset(self):
+ """Create a preset for cmake that mimics how bitbake calls cmake"""
+ toolchain_file = os.path.join(self.workdir, 'toolchain.cmake')
+ cmake_executable = os.path.join(
+ self.recipe_sysroot_native, 'usr', 'bin', 'cmake')
+ self.cmd_compile = cmake_executable + " --build --preset " + self.recipe_id
+
+ preset_dict_configure = {
+ "name": self.recipe_id,
+ "displayName": self.recipe_id_pretty,
+ "description": "Bitbake build environment for the recipe %s compiled for %s" % (self.bpn, self.package_arch),
+ "binaryDir": self.b,
+ "generator": self.oecmake_generator,
+ "toolchainFile": toolchain_file,
+ "cacheVariables": self.cmake_cache_vars,
+ "environment": self.exported_vars,
+ "cmakeExecutable": cmake_executable
+ }
+
+ preset_dict_build = {
+ "name": self.recipe_id,
+ "displayName": self.recipe_id_pretty,
+ "description": "Bitbake build environment for the recipe %s compiled for %s" % (self.bpn, self.package_arch),
+ "configurePreset": self.recipe_id,
+ "inheritConfigureEnvironment": True
+ }
+
+ preset_dict_test = {
+ "name": self.recipe_id,
+ "displayName": self.recipe_id_pretty,
+ "description": "Bitbake build environment for the recipe %s compiled for %s" % (self.bpn, self.package_arch),
+ "configurePreset": self.recipe_id,
+ "inheritConfigureEnvironment": True
+ }
+
+ preset_dict = {
+ "version": 3, # cmake 3.21, backward compatible with kirkstone
+ "configurePresets": [preset_dict_configure],
+ "buildPresets": [preset_dict_build],
+ "testPresets": [preset_dict_test]
+ }
+
+ # Finally write the json file
+ json_file = 'CMakeUserPresets.json'
+ json_path = os.path.join(self.real_srctree, json_file)
+ logger.info("Updating CMake preset: %s (%s)" % (json_file, json_path))
+ if not os.path.exists(self.real_srctree):
+ os.makedirs(self.real_srctree)
+ try:
+ with open(json_path) as f:
+ orig_dict = json.load(f)
+ except json.decoder.JSONDecodeError:
+ logger.info(
+ "Decoding %s failed. Probably because of comments in the json file" % json_path)
+ orig_dict = {}
+ except FileNotFoundError:
+ orig_dict = {}
+
+ # Add or update the presets for the recipe and keep other presets
+ for k, v in preset_dict.items():
+ if isinstance(v, list):
+ update_preset = v[0]
+ preset_added = False
+ if k in orig_dict:
+ for index, orig_preset in enumerate(orig_dict[k]):
+ if 'name' in orig_preset:
+ if orig_preset['name'] == update_preset['name']:
+ logger.debug("Updating preset: %s" %
+ orig_preset['name'])
+ orig_dict[k][index] = update_preset
+ preset_added = True
+ break
+ else:
+ logger.debug("keeping preset: %s" %
+ orig_preset['name'])
+ else:
+ logger.warn("preset without a name found")
+ if not preset_added:
+ if not k in orig_dict:
+ orig_dict[k] = []
+ orig_dict[k].append(update_preset)
+ logger.debug("Added preset: %s" %
+ update_preset['name'])
+ else:
+ orig_dict[k] = v
+
+ with open(json_path, 'w') as f:
+ json.dump(orig_dict, f, indent=4)
+
+ def gen_meson_wrapper(self):
+ """Generate a wrapper script to call meson with the cross environment"""
+ bb.utils.mkdirhier(self.ide_sdk_scripts_dir)
+ meson_wrapper = os.path.join(self.ide_sdk_scripts_dir, 'meson')
+ meson_real = os.path.join(
+ self.recipe_sysroot_native, 'usr', 'bin', 'meson.real')
+ with open(meson_wrapper, 'w') as mwrap:
+ mwrap.write("#!/bin/sh" + os.linesep)
+ for var, val in self.exported_vars.items():
+ mwrap.write('export %s="%s"' % (var, val) + os.linesep)
+ mwrap.write("unset CC CXX CPP LD AR NM STRIP" + os.linesep)
+ private_temp = os.path.join(self.b, "meson-private", "tmp")
+ mwrap.write('mkdir -p "%s"' % private_temp + os.linesep)
+ mwrap.write('export TMPDIR="%s"' % private_temp + os.linesep)
+ mwrap.write('exec "%s" "$@"' % meson_real + os.linesep)
+ st = os.stat(meson_wrapper)
+ os.chmod(meson_wrapper, st.st_mode | stat.S_IEXEC)
+ self.meson_wrapper = meson_wrapper
+ self.cmd_compile = meson_wrapper + " compile -C " + self.b
+
+ def which(self, executable):
+ bin_path = shutil.which(executable, path=self.path)
+ if not bin_path:
+ raise DevtoolError(
+ 'Cannot find %s. Probably the recipe %s is not built yet.' % (executable, self.bpn))
+ return bin_path
+
+ @staticmethod
+ def is_elf_file(file_path):
+ with open(file_path, "rb") as f:
+ data = f.read(4)
+ if data == b'\x7fELF':
+ return True
+ return False
+
+ def find_installed_binaries(self):
+ """find all executable elf files in the image directory"""
+ binaries = []
+ d_len = len(self.d)
+ re_so = re.compile(r'.*\.so[.0-9]*$')
+ for root, _, files in os.walk(self.d, followlinks=False):
+ for file in files:
+ if os.path.islink(file):
+ continue
+ if re_so.match(file):
+ continue
+ abs_name = os.path.join(root, file)
+ if os.access(abs_name, os.X_OK) and RecipeModified.is_elf_file(abs_name):
+ binaries.append(abs_name[d_len:])
+ return sorted(binaries)
+
+ def gen_delete_package_dirs(self):
+ """delete folders of package tasks
+
+ This is a workaround for and issue with recipes having their sources
+ downloaded as file://
+ This likely breaks pseudo like:
+ path mismatch [3 links]: ino 79147802 db
+ .../build/tmp/.../cmake-example/1.0/package/usr/src/debug/
+ cmake-example/1.0-r0/oe-local-files/cpp-example-lib.cpp
+ .../build/workspace/sources/cmake-example/oe-local-files/cpp-example-lib.cpp
+ Since the files are anyway outdated lets deleted them (also from pseudo's db) to workaround this issue.
+ """
+ cmd_lines = ['#!/bin/sh']
+
+ # Set up the appropriate environment
+ newenv = dict(os.environ)
+ for varvalue in self.fakerootenv.split():
+ if '=' in varvalue:
+ splitval = varvalue.split('=', 1)
+ newenv[splitval[0]] = splitval[1]
+
+ # Replicate the environment variables from bitbake
+ for var, val in newenv.items():
+ if not RecipeModified.is_valid_shell_variable(var):
+ continue
+ cmd_lines.append('%s="%s"' % (var, val))
+ cmd_lines.append('export %s' % var)
+
+ # Delete the folders
+ pkg_dirs = ' '.join([os.path.join(self.workdir, d) for d in [
+ "package", "packages-split", "pkgdata", "sstate-install-package", "debugsources.list", "*.spec"]])
+ cmd = "%s rm -rf %s" % (self.fakerootcmd, pkg_dirs)
+ cmd_lines.append('%s || { "%s failed"; exit 1; }' % (cmd, cmd))
+
+ return self.write_script(cmd_lines, 'delete_package_dirs')
+
+ def gen_deploy_target_script(self, args):
+ """Generate a script which does what devtool deploy-target does
+
+ This script is much quicker than devtool target-deploy. Because it
+ does not need to start a bitbake server. All information from tinfoil
+ is hard-coded in the generated script.
+ """
+ cmd_lines = ['#!%s' % str(sys.executable)]
+ cmd_lines.append('import sys')
+ cmd_lines.append('devtool_sys_path = %s' % str(sys.path))
+ cmd_lines.append('devtool_sys_path.reverse()')
+ cmd_lines.append('for p in devtool_sys_path:')
+ cmd_lines.append(' if p not in sys.path:')
+ cmd_lines.append(' sys.path.insert(0, p)')
+ cmd_lines.append('from devtool.deploy import deploy_no_d')
+ args_filter = ['debug', 'dry_run', 'key', 'no_check_space', 'no_host_check',
+ 'no_preserve', 'port', 'show_status', 'ssh_exec', 'strip', 'target']
+ filtered_args_dict = {key: value for key, value in vars(
+ args).items() if key in args_filter}
+ cmd_lines.append('filtered_args_dict = %s' % str(filtered_args_dict))
+ cmd_lines.append('class Dict2Class(object):')
+ cmd_lines.append(' def __init__(self, my_dict):')
+ cmd_lines.append(' for key in my_dict:')
+ cmd_lines.append(' setattr(self, key, my_dict[key])')
+ cmd_lines.append('filtered_args = Dict2Class(filtered_args_dict)')
+ cmd_lines.append(
+ 'setattr(filtered_args, "recipename", "%s")' % self.bpn)
+ cmd_lines.append('deploy_no_d("%s", "%s", "%s", "%s", "%s", "%s", %d, "%s", "%s", filtered_args)' %
+ (self.d, self.workdir, self.path, self.strip_cmd,
+ self.libdir, self.base_libdir, self.max_process,
+ self.fakerootcmd, self.fakerootenv))
+ return self.write_script(cmd_lines, 'deploy_target')
+
+ def gen_install_deploy_script(self, args):
+ """Generate a script which does install and deploy"""
+ cmd_lines = ['#!/bin/bash']
+
+ cmd_lines.append(self.gen_delete_package_dirs())
+
+ # . oe-init-build-env $BUILDDIR
+ # Note: Sourcing scripts with arguments requires bash
+ cmd_lines.append('cd "%s" || { echo "cd %s failed"; exit 1; }' % (
+ self.oe_init_dir, self.oe_init_dir))
+ cmd_lines.append('. "%s" "%s" || { echo ". %s %s failed"; exit 1; }' % (
+ self.oe_init_build_env, self.topdir, self.oe_init_build_env, self.topdir))
+
+ # bitbake -c install
+ cmd_lines.append(
+ 'bitbake %s -c install --force || { echo "bitbake %s -c install --force failed"; exit 1; }' % (self.bpn, self.bpn))
+
+ # Self contained devtool deploy-target
+ cmd_lines.append(self.gen_deploy_target_script(args))
+
+ return self.write_script(cmd_lines, 'install_and_deploy')
+
+ def write_script(self, cmd_lines, script_name):
+ bb.utils.mkdirhier(self.ide_sdk_scripts_dir)
+ script_name_arch = script_name + '_' + self.recipe_id
+ script_file = os.path.join(self.ide_sdk_scripts_dir, script_name_arch)
+ with open(script_file, 'w') as script_f:
+ script_f.write(os.linesep.join(cmd_lines))
+ st = os.stat(script_file)
+ os.chmod(script_file, st.st_mode | stat.S_IEXEC)
+ return script_file
+
+ @property
+ def oe_init_build_env(self):
+ """Find the oe-init-build-env used for this setup"""
+ oe_init_dir = self.oe_init_dir
+ if oe_init_dir:
+ return os.path.join(oe_init_dir, RecipeModified.OE_INIT_BUILD_ENV)
+ return None
+
+ @property
+ def oe_init_dir(self):
+ """Find the directory where the oe-init-build-env is located
+
+ Assumption: There might be a layer with higher priority than poky
+ which provides to oe-init-build-env in the layer's toplevel folder.
+ """
+ if not self.__oe_init_dir:
+ for layer in reversed(self.bblayers):
+ result = subprocess.run(
+ ['git', 'rev-parse', '--show-toplevel'], cwd=layer, capture_output=True)
+ if result.returncode == 0:
+ oe_init_dir = result.stdout.decode('utf-8').strip()
+ oe_init_path = os.path.join(
+ oe_init_dir, RecipeModified.OE_INIT_BUILD_ENV)
+ if os.path.exists(oe_init_path):
+ logger.debug("Using %s from: %s" % (
+ RecipeModified.OE_INIT_BUILD_ENV, oe_init_path))
+ self.__oe_init_dir = oe_init_dir
+ break
+ if not self.__oe_init_dir:
+ logger.error("Cannot find the bitbake top level folder")
+ return self.__oe_init_dir
+
+
+def ide_setup(args, config, basepath, workspace):
+ """Generate the IDE configuration for the workspace"""
+
+ # Explicitely passing some special recipes does not make sense
+ for recipe in args.recipenames:
+ if recipe in ['meta-ide-support', 'build-sysroots']:
+ raise DevtoolError("Invalid recipe: %s." % recipe)
+
+ # Collect information about tasks which need to be bitbaked
+ bootstrap_tasks = []
+ bootstrap_tasks_late = []
+ tinfoil = setup_tinfoil(config_only=False, basepath=basepath)
+ try:
+ # define mode depending on recipes which need to be processed
+ recipes_image_names = []
+ recipes_modified_names = []
+ recipes_other_names = []
+ for recipe in args.recipenames:
+ try:
+ check_workspace_recipe(
+ workspace, recipe, bbclassextend=True)
+ recipes_modified_names.append(recipe)
+ except DevtoolError:
+ recipe_d = parse_recipe(
+ config, tinfoil, recipe, appends=True, filter_workspace=False)
+ if not recipe_d:
+ raise DevtoolError("Parsing recipe %s failed" % recipe)
+ if bb.data.inherits_class('image', recipe_d):
+ recipes_image_names.append(recipe)
+ else:
+ recipes_other_names.append(recipe)
+
+ invalid_params = False
+ if args.mode == DevtoolIdeMode.shared:
+ if len(recipes_modified_names):
+ logger.error("In shared sysroots mode modified recipes %s cannot be handled." % str(
+ recipes_modified_names))
+ invalid_params = True
+ if args.mode == DevtoolIdeMode.modified:
+ if len(recipes_other_names):
+ logger.error("Only in shared sysroots mode not modified recipes %s can be handled." % str(
+ recipes_other_names))
+ invalid_params = True
+ if len(recipes_image_names) != 1:
+ logger.error(
+ "One image recipe is required as the rootfs for the remote development.")
+ invalid_params = True
+ for modified_recipe_name in recipes_modified_names:
+ if modified_recipe_name.startswith('nativesdk-') or modified_recipe_name.endswith('-native'):
+ logger.error(
+ "Only cross compiled recipes are support. %s is not cross." % modified_recipe_name)
+ invalid_params = True
+
+ if invalid_params:
+ raise DevtoolError("Invalid parameters are passed.")
+
+ # For the shared sysroots mode, add all dependencies of all the images to the sysroots
+ # For the modified mode provide one rootfs and the corresponding debug symbols via rootfs-dbg
+ recipes_images = []
+ for recipes_image_name in recipes_image_names:
+ logger.info("Using image: %s" % recipes_image_name)
+ recipe_image = RecipeImage(recipes_image_name)
+ recipe_image.initialize(config, tinfoil)
+ bootstrap_tasks += recipe_image.bootstrap_tasks
+ recipes_images.append(recipe_image)
+
+ # Provide a Direct SDK with shared sysroots
+ recipes_not_modified = []
+ if args.mode == DevtoolIdeMode.shared:
+ ide_support = RecipeMetaIdeSupport()
+ ide_support.initialize(config, tinfoil)
+ bootstrap_tasks += ide_support.bootstrap_tasks
+
+ logger.info("Adding %s to the Direct SDK sysroots." %
+ str(recipes_other_names))
+ for recipe_name in recipes_other_names:
+ recipe_not_modified = RecipeNotModified(recipe_name)
+ bootstrap_tasks += recipe_not_modified.bootstrap_tasks
+ recipes_not_modified.append(recipe_not_modified)
+
+ build_sysroots = RecipeBuildSysroots()
+ build_sysroots.initialize(config, tinfoil)
+ bootstrap_tasks_late += build_sysroots.bootstrap_tasks
+ shared_env = SharedSysrootsEnv()
+ shared_env.initialize(ide_support, build_sysroots)
+
+ recipes_modified = []
+ if args.mode == DevtoolIdeMode.modified:
+ logger.info("Setting up workspaces for modified recipe: %s" %
+ str(recipes_modified_names))
+ gdbs_cross = {}
+ for recipe_name in recipes_modified_names:
+ recipe_modified = RecipeModified(recipe_name)
+ recipe_modified.initialize(config, workspace, tinfoil)
+ bootstrap_tasks += recipe_modified.bootstrap_tasks
+ recipes_modified.append(recipe_modified)
+
+ if recipe_modified.target_arch not in gdbs_cross:
+ target_device = TargetDevice(args)
+ gdb_cross = RecipeGdbCross(
+ args, recipe_modified.target_arch, target_device)
+ gdb_cross.initialize(config, workspace, tinfoil)
+ bootstrap_tasks += gdb_cross.bootstrap_tasks
+ gdbs_cross[recipe_modified.target_arch] = gdb_cross
+ recipe_modified.gdb_cross = gdbs_cross[recipe_modified.target_arch]
+
+ finally:
+ tinfoil.shutdown()
+
+ if not args.skip_bitbake:
+ bb_cmd = 'bitbake '
+ if args.bitbake_k:
+ bb_cmd += "-k "
+ bb_cmd_early = bb_cmd + ' '.join(bootstrap_tasks)
+ exec_build_env_command(
+ config.init_path, basepath, bb_cmd_early, watch=True)
+ if bootstrap_tasks_late:
+ bb_cmd_late = bb_cmd + ' '.join(bootstrap_tasks_late)
+ exec_build_env_command(
+ config.init_path, basepath, bb_cmd_late, watch=True)
+
+ for recipe_image in recipes_images:
+ if (recipe_image.gdbserver_missing):
+ logger.warning(
+ "gdbserver not installed in image %s. Remote debugging will not be available" % recipe_image)
+
+ if recipe_image.combine_dbg_image is False:
+ logger.warning(
+ 'IMAGE_CLASSES += "image-combined-dbg" is missing for image %s. Remote debugging will not find debug symbols from rootfs-dbg.' % recipe_image)
+
+ # Instantiate the active IDE plugin
+ ide = ide_plugins[args.ide]()
+ if args.mode == DevtoolIdeMode.shared:
+ ide.setup_shared_sysroots(shared_env)
+ elif args.mode == DevtoolIdeMode.modified:
+ for recipe_modified in recipes_modified:
+ if recipe_modified.build_tool is BuildTool.CMAKE:
+ recipe_modified.cmake_preset()
+ if recipe_modified.build_tool is BuildTool.MESON:
+ recipe_modified.gen_meson_wrapper()
+ ide.setup_modified_recipe(
+ args, recipe_image, recipe_modified)
+ else:
+ raise DevtoolError("Must not end up here.")
+
+
+def register_commands(subparsers, context):
+ """Register devtool subcommands from this plugin"""
+
+ global ide_plugins
+
+ # Search for IDE plugins in all sub-folders named ide_plugins where devtool seraches for plugins.
+ pluginpaths = [os.path.join(path, 'ide_plugins')
+ for path in context.pluginpaths]
+ ide_plugin_modules = []
+ for pluginpath in pluginpaths:
+ scriptutils.load_plugins(logger, ide_plugin_modules, pluginpath)
+
+ for ide_plugin_module in ide_plugin_modules:
+ if hasattr(ide_plugin_module, 'register_ide_plugin'):
+ ide_plugin_module.register_ide_plugin(ide_plugins)
+ # Sort plugins according to their priority. The first entry is the default IDE plugin.
+ ide_plugins = dict(sorted(ide_plugins.items(),
+ key=lambda p: p[1].ide_plugin_priority(), reverse=True))
+
+ parser_ide_sdk = subparsers.add_parser('ide-sdk', group='working', order=50, formatter_class=RawTextHelpFormatter,
+ help='Setup the SDK and configure the IDE')
+ parser_ide_sdk.add_argument(
+ 'recipenames', nargs='+', help='Generate an IDE configuration suitable to work on the given recipes.\n'
+ 'Depending on the --mode paramter different types of SDKs and IDE configurations are generated.')
+ parser_ide_sdk.add_argument(
+ '-m', '--mode', type=DevtoolIdeMode, default=DevtoolIdeMode.modified,
+ help='Different SDK types are supported:\n'
+ '- "' + DevtoolIdeMode.modified.name + '" (default):\n'
+ ' devtool modify creates a workspace to work on the source code of a recipe.\n'
+ ' devtool ide-sdk builds the SDK and generates the IDE configuration(s) in the workspace directorie(s)\n'
+ ' Usage example:\n'
+ ' devtool modify cmake-example\n'
+ ' devtool ide-sdk cmake-example core-image-minimal\n'
+ ' Start the IDE in the workspace folder\n'
+ ' At least one devtool modified recipe plus one image recipe are required:\n'
+ ' The image recipe is used to generate the target image and the remote debug configuration.\n'
+ '- "' + DevtoolIdeMode.shared.name + '":\n'
+ ' Usage example:\n'
+ ' devtool ide-sdk -m ' + DevtoolIdeMode.shared.name + ' recipe(s)\n'
+ ' This command generates a cross-toolchain as well as the corresponding shared sysroot directories.\n'
+ ' To use this tool-chain the environment-* file found in the deploy..image folder needs to be sourced into a shell.\n'
+ ' In case of VSCode and cmake the tool-chain is also exposed as a cmake-kit')
+ default_ide = list(ide_plugins.keys())[0]
+ parser_ide_sdk.add_argument(
+ '-i', '--ide', choices=ide_plugins.keys(), default=default_ide,
+ help='Setup the configuration for this IDE (default: %s)' % default_ide)
+ parser_ide_sdk.add_argument(
+ '-t', '--target', default='root@192.168.7.2',
+ help='Live target machine running an ssh server: user@hostname.')
+ parser_ide_sdk.add_argument(
+ '-G', '--gdbserver-port-start', default="1234", help='port where gdbserver is listening.')
+ parser_ide_sdk.add_argument(
+ '-c', '--no-host-check', help='Disable ssh host key checking', action='store_true')
+ parser_ide_sdk.add_argument(
+ '-e', '--ssh-exec', help='Executable to use in place of ssh')
+ parser_ide_sdk.add_argument(
+ '-P', '--port', help='Specify ssh port to use for connection to the target')
+ parser_ide_sdk.add_argument(
+ '-I', '--key', help='Specify ssh private key for connection to the target')
+ parser_ide_sdk.add_argument(
+ '--skip-bitbake', help='Generate IDE configuration but skip calling bibtake to update the SDK.', action='store_true')
+ parser_ide_sdk.add_argument(
+ '-k', '--bitbake-k', help='Pass -k parameter to bitbake', action='store_true')
+ parser_ide_sdk.add_argument(
+ '--no-strip', help='Do not strip executables prior to deploy', dest='strip', action='store_false')
+ parser_ide_sdk.add_argument(
+ '-n', '--dry-run', help='List files to be undeployed only', action='store_true')
+ parser_ide_sdk.add_argument(
+ '-s', '--show-status', help='Show progress/status output', action='store_true')
+ parser_ide_sdk.add_argument(
+ '-p', '--no-preserve', help='Do not preserve existing files', action='store_true')
+ parser_ide_sdk.add_argument(
+ '--no-check-space', help='Do not check for available space before deploying', action='store_true')
+ parser_ide_sdk.add_argument(
+ '--debug-build-config', help='Use debug build flags, for example set CMAKE_BUILD_TYPE=Debug', action='store_true')
+ parser_ide_sdk.set_defaults(func=ide_setup)
diff --git a/scripts/lib/devtool/menuconfig.py b/scripts/lib/devtool/menuconfig.py
index d87a01e7a9..18daef30c3 100644
--- a/scripts/lib/devtool/menuconfig.py
+++ b/scripts/lib/devtool/menuconfig.py
@@ -45,7 +45,7 @@ def menuconfig(args, config, basepath, workspace):
return 1
check_workspace_recipe(workspace, args.component)
- pn = rd.getVar('PN', True)
+ pn = rd.getVar('PN')
if not rd.getVarFlag('do_menuconfig','task'):
raise DevtoolError("This recipe does not support menuconfig option")
diff --git a/scripts/lib/devtool/sdk.py b/scripts/lib/devtool/sdk.py
index d717b6c2b8..9aefd7e354 100644
--- a/scripts/lib/devtool/sdk.py
+++ b/scripts/lib/devtool/sdk.py
@@ -300,7 +300,8 @@ def sdk_install(args, config, basepath, workspace):
return 2
try:
- exec_build_env_command(config.init_path, basepath, 'bitbake build-sysroots', watch=True)
+ exec_build_env_command(config.init_path, basepath, 'bitbake build-sysroots -c build_native_sysroot', watch=True)
+ exec_build_env_command(config.init_path, basepath, 'bitbake build-sysroots -c build_target_sysroot', watch=True)
except bb.process.ExecutionError as e:
raise DevtoolError('Failed to bitbake build-sysroots:\n%s' % (str(e)))
diff --git a/scripts/lib/devtool/standard.py b/scripts/lib/devtool/standard.py
index e3b74ab8f0..6674e67267 100644
--- a/scripts/lib/devtool/standard.py
+++ b/scripts/lib/devtool/standard.py
@@ -147,6 +147,8 @@ def add(args, config, basepath, workspace):
extracmdopts += ' -a'
if args.npm_dev:
extracmdopts += ' --npm-dev'
+ if args.no_pypi:
+ extracmdopts += ' --no-pypi'
if args.mirrors:
extracmdopts += ' --mirrors'
if args.srcrev:
@@ -234,10 +236,14 @@ def add(args, config, basepath, workspace):
if args.fetchuri and not args.no_git:
setup_git_repo(srctree, args.version, 'devtool', d=tinfoil.config_data)
- initial_rev = None
+ initial_rev = {}
if os.path.exists(os.path.join(srctree, '.git')):
(stdout, _) = bb.process.run('git rev-parse HEAD', cwd=srctree)
- initial_rev = stdout.rstrip()
+ initial_rev["."] = stdout.rstrip()
+ (stdout, _) = bb.process.run('git submodule --quiet foreach --recursive \'echo `git rev-parse HEAD` $PWD\'', cwd=srctree)
+ for line in stdout.splitlines():
+ (rev, submodule) = line.split()
+ initial_rev[os.path.relpath(submodule, srctree)] = rev
if args.src_subdir:
srctree = os.path.join(srctree, args.src_subdir)
@@ -251,7 +257,8 @@ def add(args, config, basepath, workspace):
if b_is_s:
f.write('EXTERNALSRC_BUILD = "%s"\n' % srctree)
if initial_rev:
- f.write('\n# initial_rev: %s\n' % initial_rev)
+ for key, value in initial_rev.items():
+ f.write('\n# initial_rev %s: %s\n' % (key, value))
if args.binary:
f.write('do_install:append() {\n')
@@ -453,7 +460,7 @@ def sync(args, config, basepath, workspace):
finally:
tinfoil.shutdown()
-def symlink_oelocal_files_srctree(rd,srctree):
+def symlink_oelocal_files_srctree(rd, srctree):
import oe.patch
if os.path.abspath(rd.getVar('S')) == os.path.abspath(rd.getVar('WORKDIR')):
# If recipe extracts to ${WORKDIR}, symlink the files into the srctree
@@ -477,11 +484,7 @@ def symlink_oelocal_files_srctree(rd,srctree):
os.symlink('oe-local-files/%s' % fn, destpth)
addfiles.append(os.path.join(relpth, fn))
if addfiles:
- bb.process.run('git add %s' % ' '.join(addfiles), cwd=srctree)
- useroptions = []
- oe.patch.GitApplyTree.gitCommandUserOptions(useroptions, d=rd)
- bb.process.run('git %s commit -m "Committing local file symlinks\n\n%s"' % (' '.join(useroptions), oe.patch.GitApplyTree.ignore_commit_prefix), cwd=srctree)
-
+ oe.patch.GitApplyTree.commitIgnored("Add local file symlinks", dir=srctree, files=addfiles, d=rd)
def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, workspace, fixed_setup, d, tinfoil, no_overrides=False):
"""Extract sources of a recipe"""
@@ -567,6 +570,9 @@ def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, works
logger.debug('writing append file %s' % appendfile)
with open(appendfile, 'a') as f:
f.write('###--- _extract_source\n')
+ f.write('deltask do_recipe_qa\n')
+ f.write('deltask do_recipe_qa_setscene\n')
+ f.write('ERROR_QA:remove = "patch-fuzz"\n')
f.write('DEVTOOL_TEMPDIR = "%s"\n' % tempdir)
f.write('DEVTOOL_DEVBRANCH = "%s"\n' % devbranch)
if not is_kernel_yocto:
@@ -584,6 +590,7 @@ def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, works
preservestampfile = os.path.join(sstate_manifests, 'preserve-stamps')
with open(preservestampfile, 'w') as f:
f.write(d.getVar('STAMP'))
+ tinfoil.modified_files()
try:
if is_kernel_yocto:
# We need to generate the kernel config
@@ -646,9 +653,9 @@ def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, works
if os.path.exists(workshareddir) and (not os.listdir(workshareddir) or kernelVersion != staging_kerVer):
shutil.rmtree(workshareddir)
- oe.path.copyhardlinktree(srcsubdir,workshareddir)
+ oe.path.copyhardlinktree(srcsubdir, workshareddir)
elif not os.path.exists(workshareddir):
- oe.path.copyhardlinktree(srcsubdir,workshareddir)
+ oe.path.copyhardlinktree(srcsubdir, workshareddir)
tempdir_localdir = os.path.join(tempdir, 'oe-local-files')
srctree_localdir = os.path.join(srctree, 'oe-local-files')
@@ -656,13 +663,13 @@ def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, works
if sync:
bb.process.run('git fetch file://' + srcsubdir + ' ' + devbranch + ':' + devbranch, cwd=srctree)
- # Move oe-local-files directory to srctree
- # As the oe-local-files is not part of the constructed git tree,
- # remove them directly during the synchrounizating might surprise
- # the users. Instead, we move it to oe-local-files.bak and remind
- # user in the log message.
+ # Move the oe-local-files directory to srctree.
+ # As oe-local-files is not part of the constructed git tree,
+ # removing it directly during the synchronization might surprise
+ # the user. Instead, we move it to oe-local-files.bak and remind
+ # the user in the log message.
if os.path.exists(srctree_localdir + '.bak'):
- shutil.rmtree(srctree_localdir, srctree_localdir + '.bak')
+ shutil.rmtree(srctree_localdir + '.bak')
if os.path.exists(srctree_localdir):
logger.info('Backing up current local file directory %s' % srctree_localdir)
@@ -678,7 +685,7 @@ def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, works
shutil.move(tempdir_localdir, srcsubdir)
shutil.move(srcsubdir, srctree)
- symlink_oelocal_files_srctree(d,srctree)
+ symlink_oelocal_files_srctree(d, srctree)
if is_kernel_yocto:
logger.info('Copying kernel config to srctree')
@@ -751,7 +758,7 @@ def get_staging_kver(srcdir):
kerver = []
staging_kerVer=""
if os.path.exists(srcdir) and os.listdir(srcdir):
- with open(os.path.join(srcdir,"Makefile")) as f:
+ with open(os.path.join(srcdir, "Makefile")) as f:
version = [next(f) for x in range(5)][1:4]
for word in version:
kerver.append(word.split('= ')[1].split('\n')[0])
@@ -761,10 +768,20 @@ def get_staging_kver(srcdir):
def get_staging_kbranch(srcdir):
staging_kbranch = ""
if os.path.exists(srcdir) and os.listdir(srcdir):
- (branch, _) = bb.process.run('git branch | grep \* | cut -d \' \' -f2', cwd=srcdir)
+ (branch, _) = bb.process.run('git branch | grep \\* | cut -d \' \' -f2', cwd=srcdir)
staging_kbranch = "".join(branch.split('\n')[0])
return staging_kbranch
+def get_real_srctree(srctree, s, workdir):
+ # Check that recipe isn't using a shared workdir
+ s = os.path.abspath(s)
+ workdir = os.path.abspath(workdir)
+ if s.startswith(workdir) and s != workdir and os.path.dirname(s) != workdir:
+ # Handle if S is set to a subdirectory of the source
+ srcsubdir = os.path.relpath(s, workdir).split(os.sep, 1)[1]
+ srctree = os.path.join(srctree, srcsubdir)
+ return srctree
+
def modify(args, config, basepath, workspace):
"""Entry point for the devtool 'modify' subcommand"""
import bb
@@ -809,8 +826,8 @@ def modify(args, config, basepath, workspace):
_check_compatible_recipe(pn, rd)
- initial_rev = None
- commits = []
+ initial_revs = {}
+ commits = {}
check_commits = False
if bb.data.inherits_class('kernel-yocto', rd):
@@ -822,10 +839,10 @@ def modify(args, config, basepath, workspace):
staging_kerVer = get_staging_kver(srcdir)
staging_kbranch = get_staging_kbranch(srcdir)
if (os.path.exists(srcdir) and os.listdir(srcdir)) and (kernelVersion in staging_kerVer and staging_kbranch == kbranch):
- oe.path.copyhardlinktree(srcdir,srctree)
+ oe.path.copyhardlinktree(srcdir, srctree)
workdir = rd.getVar('WORKDIR')
srcsubdir = rd.getVar('S')
- localfilesdir = os.path.join(srctree,'oe-local-files')
+ localfilesdir = os.path.join(srctree, 'oe-local-files')
# Move local source files into separate subdir
recipe_patches = [os.path.basename(patch) for patch in oe.recipeutils.get_recipe_patches(rd)]
local_files = oe.recipeutils.get_recipe_local_files(rd)
@@ -849,9 +866,9 @@ def modify(args, config, basepath, workspace):
for fname in local_files:
_move_file(os.path.join(workdir, fname), os.path.join(srctree, 'oe-local-files', fname))
with open(os.path.join(srctree, 'oe-local-files', '.gitignore'), 'w') as f:
- f.write('# Ignore local files, by default. Remove this file ''if you want to commit the directory to Git\n*\n')
+ f.write('# Ignore local files, by default. Remove this file if you want to commit the directory to Git\n*\n')
- symlink_oelocal_files_srctree(rd,srctree)
+ symlink_oelocal_files_srctree(rd, srctree)
task = 'do_configure'
res = tinfoil.build_targets(pn, task, handle_events=True)
@@ -859,22 +876,30 @@ def modify(args, config, basepath, workspace):
# Copy .config to workspace
kconfpath = rd.getVar('B')
logger.info('Copying kernel config to workspace')
- shutil.copy2(os.path.join(kconfpath, '.config'),srctree)
+ shutil.copy2(os.path.join(kconfpath, '.config'), srctree)
# Set this to true, we still need to get initial_rev
# by parsing the git repo
args.no_extract = True
if not args.no_extract:
- initial_rev, _ = _extract_source(srctree, args.keep_temp, args.branch, False, config, basepath, workspace, args.fixed_setup, rd, tinfoil, no_overrides=args.no_overrides)
- if not initial_rev:
+ initial_revs["."], _ = _extract_source(srctree, args.keep_temp, args.branch, False, config, basepath, workspace, args.fixed_setup, rd, tinfoil, no_overrides=args.no_overrides)
+ if not initial_revs["."]:
return 1
logger.info('Source tree extracted to %s' % srctree)
+
if os.path.exists(os.path.join(srctree, '.git')):
# Get list of commits since this revision
- (stdout, _) = bb.process.run('git rev-list --reverse %s..HEAD' % initial_rev, cwd=srctree)
- commits = stdout.split()
+ (stdout, _) = bb.process.run('git rev-list --reverse %s..HEAD' % initial_revs["."], cwd=srctree)
+ commits["."] = stdout.split()
check_commits = True
+ (stdout, _) = bb.process.run('git submodule --quiet foreach --recursive \'echo `git rev-parse devtool-base` $PWD\'', cwd=srctree)
+ for line in stdout.splitlines():
+ (rev, submodule_path) = line.split()
+ submodule = os.path.relpath(submodule_path, srctree)
+ initial_revs[submodule] = rev
+ (stdout, _) = bb.process.run('git rev-list --reverse devtool-base..HEAD', cwd=submodule_path)
+ commits[submodule] = stdout.split()
else:
if os.path.exists(os.path.join(srctree, '.git')):
# Check if it's a tree previously extracted by us. This is done
@@ -891,11 +916,11 @@ def modify(args, config, basepath, workspace):
for line in stdout.splitlines():
if line.startswith('*'):
(stdout, _) = bb.process.run('git rev-parse devtool-base', cwd=srctree)
- initial_rev = stdout.rstrip()
- if not initial_rev:
+ initial_revs["."] = stdout.rstrip()
+ if "." not in initial_revs:
# Otherwise, just grab the head revision
(stdout, _) = bb.process.run('git rev-parse HEAD', cwd=srctree)
- initial_rev = stdout.rstrip()
+ initial_revs["."] = stdout.rstrip()
branch_patches = {}
if check_commits:
@@ -912,28 +937,40 @@ def modify(args, config, basepath, workspace):
seen_patches = []
for branch in branches:
branch_patches[branch] = []
- (stdout, _) = bb.process.run('git log devtool-base..%s' % branch, cwd=srctree)
- for line in stdout.splitlines():
- line = line.strip()
- if line.startswith(oe.patch.GitApplyTree.patch_line_prefix):
- origpatch = line[len(oe.patch.GitApplyTree.patch_line_prefix):].split(':', 1)[-1].strip()
- if not origpatch in seen_patches:
- seen_patches.append(origpatch)
- branch_patches[branch].append(origpatch)
+ (stdout, _) = bb.process.run('git rev-list devtool-base..%s' % branch, cwd=srctree)
+ for sha1 in stdout.splitlines():
+ notes = oe.patch.GitApplyTree.getNotes(srctree, sha1.strip())
+ origpatch = notes.get(oe.patch.GitApplyTree.original_patch)
+ if origpatch and origpatch not in seen_patches:
+ seen_patches.append(origpatch)
+ branch_patches[branch].append(origpatch)
# Need to grab this here in case the source is within a subdirectory
srctreebase = srctree
-
- # Check that recipe isn't using a shared workdir
- s = os.path.abspath(rd.getVar('S'))
- workdir = os.path.abspath(rd.getVar('WORKDIR'))
- if s.startswith(workdir) and s != workdir and os.path.dirname(s) != workdir:
- # Handle if S is set to a subdirectory of the source
- srcsubdir = os.path.relpath(s, workdir).split(os.sep, 1)[1]
- srctree = os.path.join(srctree, srcsubdir)
+ srctree = get_real_srctree(srctree, rd.getVar('S'), rd.getVar('WORKDIR'))
bb.utils.mkdirhier(os.path.dirname(appendfile))
with open(appendfile, 'w') as f:
+ # if not present, add type=git-dependency to the secondary sources
+ # (non local files) so they can be extracted correctly when building a recipe after
+ # doing a devtool modify on it
+ src_uri = rd.getVar('SRC_URI').split()
+ src_uri_append = []
+ src_uri_remove = []
+
+ # Assume first entry is main source extracted in ${S} so skip it
+ src_uri = src_uri[1::]
+
+ # Add "type=git-dependency" to all non local sources
+ for url in src_uri:
+ if not url.startswith('file://') and not 'type=' in url:
+ src_uri_remove.append(url)
+ src_uri_append.append('%s;type=git-dependency' % url)
+
+ if src_uri_remove:
+ f.write('SRC_URI:remove = "%s"\n' % ' '.join(src_uri_remove))
+ f.write('SRC_URI:append = " %s"\n\n' % ' '.join(src_uri_append))
+
f.write('FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"\n')
# Local files can be modified/tracked in separate subdir under srctree
# Mostly useful for packages with S != WORKDIR
@@ -962,17 +999,19 @@ def modify(args, config, basepath, workspace):
' mv ${S}/.config ${S}/.config.old\n'
' fi\n'
'}\n')
- if rd.getVarFlag('do_menuconfig','task'):
+ if rd.getVarFlag('do_menuconfig', 'task'):
f.write('\ndo_configure:append() {\n'
- ' if [ ! ${DEVTOOL_DISABLE_MENUCONFIG} ]; then\n'
- ' cp ${B}/.config ${S}/.config.baseline\n'
- ' ln -sfT ${B}/.config ${S}/.config.new\n'
+ ' if [ ${@oe.types.boolean(d.getVar("KCONFIG_CONFIG_ENABLE_MENUCONFIG"))} = True ]; then\n'
+ ' cp ${KCONFIG_CONFIG_ROOTDIR}/.config ${S}/.config.baseline\n'
+ ' ln -sfT ${KCONFIG_CONFIG_ROOTDIR}/.config ${S}/.config.new\n'
' fi\n'
'}\n')
- if initial_rev:
- f.write('\n# initial_rev: %s\n' % initial_rev)
- for commit in commits:
- f.write('# commit: %s\n' % commit)
+ if initial_revs:
+ for name, rev in initial_revs.items():
+ f.write('\n# initial_rev %s: %s\n' % (name, rev))
+ if name in commits:
+ for commit in commits[name]:
+ f.write('# commit %s: %s\n' % (name, commit))
if branch_patches:
for branch in branch_patches:
if branch == args.branch:
@@ -1195,44 +1234,56 @@ def _get_patchset_revs(srctree, recipe_path, initial_rev=None, force_patch_refre
branchname = stdout.rstrip()
# Parse initial rev from recipe if not specified
- commits = []
+ commits = {}
patches = []
+ initial_revs = {}
with open(recipe_path, 'r') as f:
for line in f:
- if line.startswith('# initial_rev:'):
- if not initial_rev:
- initial_rev = line.split(':')[-1].strip()
- elif line.startswith('# commit:') and not force_patch_refresh:
- commits.append(line.split(':')[-1].strip())
- elif line.startswith('# patches_%s:' % branchname):
- patches = line.split(':')[-1].strip().split(',')
-
- update_rev = initial_rev
- changed_revs = None
- if initial_rev:
+ pattern = r'^#\s.*\s(.*):\s([0-9a-fA-F]+)$'
+ match = re.search(pattern, line)
+ if match:
+ name = match.group(1)
+ rev = match.group(2)
+ if line.startswith('# initial_rev'):
+ if not (name == "." and initial_rev):
+ initial_revs[name] = rev
+ elif line.startswith('# commit') and not force_patch_refresh:
+ if name not in commits:
+ commits[name] = [rev]
+ else:
+ commits[name].append(rev)
+ elif line.startswith('# patches_%s:' % branchname):
+ patches = line.split(':')[-1].strip().split(',')
+
+ update_revs = dict(initial_revs)
+ changed_revs = {}
+ for name, rev in initial_revs.items():
# Find first actually changed revision
stdout, _ = bb.process.run('git rev-list --reverse %s..HEAD' %
- initial_rev, cwd=srctree)
+ rev, cwd=os.path.join(srctree, name))
newcommits = stdout.split()
- for i in range(min(len(commits), len(newcommits))):
- if newcommits[i] == commits[i]:
- update_rev = commits[i]
+ if name in commits:
+ for i in range(min(len(commits[name]), len(newcommits))):
+ if newcommits[i] == commits[name][i]:
+ update_revs[name] = commits[name][i]
try:
stdout, _ = bb.process.run('git cherry devtool-patched',
- cwd=srctree)
+ cwd=os.path.join(srctree, name))
except bb.process.ExecutionError as err:
stdout = None
if stdout is not None and not force_patch_refresh:
- changed_revs = []
for line in stdout.splitlines():
if line.startswith('+ '):
rev = line.split()[1]
if rev in newcommits:
- changed_revs.append(rev)
+ if name not in changed_revs:
+ changed_revs[name] = [rev]
+ else:
+ changed_revs[name].append(rev)
- return initial_rev, update_rev, changed_revs, patches
+ return initial_revs, update_revs, changed_revs, patches
def _remove_file_entries(srcuri, filelist):
"""Remove file:// entries from SRC_URI"""
@@ -1287,14 +1338,17 @@ def _remove_source_files(append, files, destpath, no_report_remove=False, dry_ru
raise
-def _export_patches(srctree, rd, start_rev, destdir, changed_revs=None):
+def _export_patches(srctree, rd, start_revs, destdir, changed_revs=None):
"""Export patches from srctree to given location.
Returns three-tuple of dicts:
1. updated - patches that already exist in SRCURI
2. added - new patches that don't exist in SRCURI
3 removed - patches that exist in SRCURI but not in exported patches
- In each dict the key is the 'basepath' of the URI and value is the
- absolute path to the existing file in recipe space (if any).
+ In each dict the key is the 'basepath' of the URI and value is:
+ - for updated and added dicts, a dict with 2 optionnal keys:
+ - 'path': the absolute path to the existing file in recipe space (if any)
+ - 'patchdir': the directory in wich the patch should be applied (if any)
+ - for removed dict, the absolute path to the existing file in recipe space
"""
import oe.recipeutils
from oe.patch import GitApplyTree
@@ -1308,54 +1362,60 @@ def _export_patches(srctree, rd, start_rev, destdir, changed_revs=None):
# Generate patches from Git, exclude local files directory
patch_pathspec = _git_exclude_path(srctree, 'oe-local-files')
- GitApplyTree.extractPatches(srctree, start_rev, destdir, patch_pathspec)
-
- new_patches = sorted(os.listdir(destdir))
- for new_patch in new_patches:
- # Strip numbering from patch names. If it's a git sequence named patch,
- # the numbers might not match up since we are starting from a different
- # revision This does assume that people are using unique shortlog
- # values, but they ought to be anyway...
- new_basename = seqpatch_re.match(new_patch).group(2)
- match_name = None
- for old_patch in existing_patches:
- old_basename = seqpatch_re.match(old_patch).group(2)
- old_basename_splitext = os.path.splitext(old_basename)
- if old_basename.endswith(('.gz', '.bz2', '.Z')) and old_basename_splitext[0] == new_basename:
- old_patch_noext = os.path.splitext(old_patch)[0]
- match_name = old_patch_noext
- break
- elif new_basename == old_basename:
- match_name = old_patch
- break
- if match_name:
- # Rename patch files
- if new_patch != match_name:
- bb.utils.rename(os.path.join(destdir, new_patch),
- os.path.join(destdir, match_name))
- # Need to pop it off the list now before checking changed_revs
- oldpath = existing_patches.pop(old_patch)
- if changed_revs is not None:
- # Avoid updating patches that have not actually changed
- with open(os.path.join(destdir, match_name), 'r') as f:
- firstlineitems = f.readline().split()
- # Looking for "From <hash>" line
- if len(firstlineitems) > 1 and len(firstlineitems[1]) == 40:
- if not firstlineitems[1] in changed_revs:
- continue
- # Recompress if necessary
- if oldpath.endswith(('.gz', '.Z')):
- bb.process.run(['gzip', match_name], cwd=destdir)
- if oldpath.endswith('.gz'):
- match_name += '.gz'
- else:
- match_name += '.Z'
- elif oldpath.endswith('.bz2'):
- bb.process.run(['bzip2', match_name], cwd=destdir)
- match_name += '.bz2'
- updated[match_name] = oldpath
- else:
- added[new_patch] = None
+ GitApplyTree.extractPatches(srctree, start_revs, destdir, patch_pathspec)
+ for dirpath, dirnames, filenames in os.walk(destdir):
+ new_patches = filenames
+ reldirpath = os.path.relpath(dirpath, destdir)
+ for new_patch in new_patches:
+ # Strip numbering from patch names. If it's a git sequence named patch,
+ # the numbers might not match up since we are starting from a different
+ # revision This does assume that people are using unique shortlog
+ # values, but they ought to be anyway...
+ new_basename = seqpatch_re.match(new_patch).group(2)
+ match_name = None
+ for old_patch in existing_patches:
+ old_basename = seqpatch_re.match(old_patch).group(2)
+ old_basename_splitext = os.path.splitext(old_basename)
+ if old_basename.endswith(('.gz', '.bz2', '.Z')) and old_basename_splitext[0] == new_basename:
+ old_patch_noext = os.path.splitext(old_patch)[0]
+ match_name = old_patch_noext
+ break
+ elif new_basename == old_basename:
+ match_name = old_patch
+ break
+ if match_name:
+ # Rename patch files
+ if new_patch != match_name:
+ bb.utils.rename(os.path.join(destdir, new_patch),
+ os.path.join(destdir, match_name))
+ # Need to pop it off the list now before checking changed_revs
+ oldpath = existing_patches.pop(old_patch)
+ if changed_revs is not None and dirpath in changed_revs:
+ # Avoid updating patches that have not actually changed
+ with open(os.path.join(dirpath, match_name), 'r') as f:
+ firstlineitems = f.readline().split()
+ # Looking for "From <hash>" line
+ if len(firstlineitems) > 1 and len(firstlineitems[1]) == 40:
+ if not firstlineitems[1] in changed_revs[dirpath]:
+ continue
+ # Recompress if necessary
+ if oldpath.endswith(('.gz', '.Z')):
+ bb.process.run(['gzip', match_name], cwd=destdir)
+ if oldpath.endswith('.gz'):
+ match_name += '.gz'
+ else:
+ match_name += '.Z'
+ elif oldpath.endswith('.bz2'):
+ bb.process.run(['bzip2', match_name], cwd=destdir)
+ match_name += '.bz2'
+ updated[match_name] = {'path' : oldpath}
+ if reldirpath != ".":
+ updated[match_name]['patchdir'] = reldirpath
+ else:
+ added[new_patch] = {}
+ if reldirpath != ".":
+ added[new_patch]['patchdir'] = reldirpath
+
return (updated, added, existing_patches)
@@ -1406,6 +1466,18 @@ def _export_local_files(srctree, rd, destdir, srctreebase):
updated = OrderedDict()
added = OrderedDict()
removed = OrderedDict()
+
+ # Get current branch and return early with empty lists
+ # if on one of the override branches
+ # (local files are provided only for the main branch and processing
+ # them against lists from recipe overrides will result in mismatches
+ # and broken modifications to recipes).
+ stdout, _ = bb.process.run('git rev-parse --abbrev-ref HEAD',
+ cwd=srctree)
+ branchname = stdout.rstrip()
+ if branchname.startswith(override_branch_prefix):
+ return (updated, added, removed)
+
local_files_dir = os.path.join(srctreebase, 'oe-local-files')
git_files = _git_ls_tree(srctree)
if 'oe-local-files' in git_files:
@@ -1511,6 +1583,12 @@ def _update_recipe_srcrev(recipename, workspace, srctree, rd, appendlayerdir, wi
recipedir = os.path.basename(recipefile)
logger.info('Updating SRCREV in recipe %s%s' % (recipedir, dry_run_suffix))
+ # Get original SRCREV
+ old_srcrev = rd.getVar('SRCREV') or ''
+ if old_srcrev == "INVALID":
+ raise DevtoolError('Update mode srcrev is only valid for recipe fetched from an SCM repository')
+ old_srcrev = {'.': old_srcrev}
+
# Get HEAD revision
try:
stdout, _ = bb.process.run('git rev-parse HEAD', cwd=srctree)
@@ -1537,13 +1615,12 @@ def _update_recipe_srcrev(recipename, workspace, srctree, rd, appendlayerdir, wi
if not no_remove:
# Find list of existing patches in recipe file
patches_dir = tempfile.mkdtemp(dir=tempdir)
- old_srcrev = rd.getVar('SRCREV') or ''
upd_p, new_p, del_p = _export_patches(srctree, rd, old_srcrev,
patches_dir)
logger.debug('Patches: update %s, new %s, delete %s' % (dict(upd_p), dict(new_p), dict(del_p)))
# Remove deleted local files and "overlapping" patches
- remove_files = list(del_f.values()) + list(upd_p.values()) + list(del_p.values())
+ remove_files = list(del_f.values()) + [value["path"] for value in upd_p.values() if "path" in value] + [value["path"] for value in del_p.values() if "path" in value]
if remove_files:
removedentries = _remove_file_entries(srcuri, remove_files)[0]
update_srcuri = True
@@ -1557,11 +1634,10 @@ def _update_recipe_srcrev(recipename, workspace, srctree, rd, appendlayerdir, wi
patchfields['SRC_URI'] = '\\\n '.join(srcuri)
if dry_run_outdir:
logger.info('Creating bbappend (dry-run)')
- else:
- appendfile, destpath = oe.recipeutils.bbappend_recipe(
- rd, appendlayerdir, files, wildcardver=wildcard_version,
- extralines=patchfields, removevalues=removevalues,
- redirect_output=dry_run_outdir)
+ appendfile, destpath = oe.recipeutils.bbappend_recipe(
+ rd, appendlayerdir, files, wildcardver=wildcard_version,
+ extralines=patchfields, removevalues=removevalues,
+ redirect_output=dry_run_outdir)
else:
files_dir = _determine_files_dir(rd)
for basepath, path in upd_f.items():
@@ -1613,15 +1689,15 @@ def _update_recipe_patch(recipename, workspace, srctree, rd, appendlayerdir, wil
else:
patchdir_params = {'patchdir': relpatchdir}
- def srcuri_entry(fname):
+ def srcuri_entry(basepath, patchdir_params):
if patchdir_params:
paramstr = ';' + ';'.join('%s=%s' % (k,v) for k,v in patchdir_params.items())
else:
paramstr = ''
return 'file://%s%s' % (basepath, paramstr)
- initial_rev, update_rev, changed_revs, filter_patches = _get_patchset_revs(srctree, append, initial_rev, force_patch_refresh)
- if not initial_rev:
+ initial_revs, update_revs, changed_revs, filter_patches = _get_patchset_revs(srctree, append, initial_rev, force_patch_refresh)
+ if not initial_revs:
raise DevtoolError('Unable to find initial revision - please specify '
'it with --initial-rev')
@@ -1635,47 +1711,54 @@ def _update_recipe_patch(recipename, workspace, srctree, rd, appendlayerdir, wil
tempdir = tempfile.mkdtemp(prefix='devtool')
try:
local_files_dir = tempfile.mkdtemp(dir=tempdir)
- if filter_patches:
- upd_f = {}
- new_f = {}
- del_f = {}
- else:
- upd_f, new_f, del_f = _export_local_files(srctree, rd, local_files_dir, srctreebase)
-
- remove_files = []
- if not no_remove:
- # Get all patches from source tree and check if any should be removed
- all_patches_dir = tempfile.mkdtemp(dir=tempdir)
- _, _, del_p = _export_patches(srctree, rd, initial_rev,
- all_patches_dir)
- # Remove deleted local files and patches
- remove_files = list(del_f.values()) + list(del_p.values())
+ upd_f, new_f, del_f = _export_local_files(srctree, rd, local_files_dir, srctreebase)
# Get updated patches from source tree
patches_dir = tempfile.mkdtemp(dir=tempdir)
- upd_p, new_p, _ = _export_patches(srctree, rd, update_rev,
+ upd_p, new_p, _ = _export_patches(srctree, rd, update_revs,
patches_dir, changed_revs)
+ # Get all patches from source tree and check if any should be removed
+ all_patches_dir = tempfile.mkdtemp(dir=tempdir)
+ _, _, del_p = _export_patches(srctree, rd, initial_revs,
+ all_patches_dir)
logger.debug('Pre-filtering: update: %s, new: %s' % (dict(upd_p), dict(new_p)))
if filter_patches:
new_p = OrderedDict()
upd_p = OrderedDict((k,v) for k,v in upd_p.items() if k in filter_patches)
- remove_files = [f for f in remove_files if f in filter_patches]
+ del_p = OrderedDict((k,v) for k,v in del_p.items() if k in filter_patches)
+ remove_files = []
+ if not no_remove:
+ # Remove deleted local files and patches
+ remove_files = list(del_f.values()) + list(del_p.values())
updatefiles = False
updaterecipe = False
destpath = None
srcuri = (rd.getVar('SRC_URI', False) or '').split()
+
if appendlayerdir:
files = OrderedDict((os.path.join(local_files_dir, key), val) for
key, val in list(upd_f.items()) + list(new_f.items()))
files.update(OrderedDict((os.path.join(patches_dir, key), val) for
key, val in list(upd_p.items()) + list(new_p.items())))
+
+ params = []
+ for file, param in files.items():
+ patchdir_param = dict(patchdir_params)
+ patchdir = param.get('patchdir', ".")
+ if patchdir != "." :
+ if patchdir_param:
+ patchdir_param['patchdir'] += patchdir
+ else:
+ patchdir_param['patchdir'] = patchdir
+ params.append(patchdir_param)
+
if files or remove_files:
removevalues = None
if remove_files:
removedentries, remaining = _remove_file_entries(
srcuri, remove_files)
if removedentries or remaining:
- remaining = [srcuri_entry(os.path.basename(item)) for
+ remaining = [srcuri_entry(os.path.basename(item), patchdir_params) for
item in remaining]
removevalues = {'SRC_URI': removedentries + remaining}
appendfile, destpath = oe.recipeutils.bbappend_recipe(
@@ -1683,7 +1766,7 @@ def _update_recipe_patch(recipename, workspace, srctree, rd, appendlayerdir, wil
wildcardver=wildcard_version,
removevalues=removevalues,
redirect_output=dry_run_outdir,
- params=[patchdir_params] * len(files))
+ params=params)
else:
logger.info('No patches or local source files needed updating')
else:
@@ -1700,14 +1783,22 @@ def _update_recipe_patch(recipename, workspace, srctree, rd, appendlayerdir, wil
_move_file(os.path.join(local_files_dir, basepath), path,
dry_run_outdir=dry_run_outdir, base_outdir=recipedir)
updatefiles = True
- for basepath, path in upd_p.items():
- patchfn = os.path.join(patches_dir, basepath)
+ for basepath, param in upd_p.items():
+ path = param['path']
+ patchdir = param.get('patchdir', ".")
+ if patchdir != "." :
+ patchdir_param = dict(patchdir_params)
+ if patchdir_param:
+ patchdir_param['patchdir'] += patchdir
+ else:
+ patchdir_param['patchdir'] = patchdir
+ patchfn = os.path.join(patches_dir, patchdir, basepath)
if os.path.dirname(path) + '/' == dl_dir:
# This is a a downloaded patch file - we now need to
# replace the entry in SRC_URI with our local version
logger.info('Replacing remote patch %s with updated local version' % basepath)
path = os.path.join(files_dir, basepath)
- _replace_srcuri_entry(srcuri, basepath, srcuri_entry(basepath))
+ _replace_srcuri_entry(srcuri, basepath, srcuri_entry(basepath, patchdir_param))
updaterecipe = True
else:
logger.info('Updating patch %s%s' % (basepath, dry_run_suffix))
@@ -1721,15 +1812,23 @@ def _update_recipe_patch(recipename, workspace, srctree, rd, appendlayerdir, wil
os.path.join(files_dir, basepath),
dry_run_outdir=dry_run_outdir,
base_outdir=recipedir)
- srcuri.append(srcuri_entry(basepath))
+ srcuri.append(srcuri_entry(basepath, patchdir_params))
updaterecipe = True
- for basepath, path in new_p.items():
+ for basepath, param in new_p.items():
+ patchdir = param.get('patchdir', ".")
logger.info('Adding new patch %s%s' % (basepath, dry_run_suffix))
- _move_file(os.path.join(patches_dir, basepath),
+ _move_file(os.path.join(patches_dir, patchdir, basepath),
os.path.join(files_dir, basepath),
dry_run_outdir=dry_run_outdir,
base_outdir=recipedir)
- srcuri.append(srcuri_entry(basepath))
+ params = dict(patchdir_params)
+ if patchdir != "." :
+ if params:
+ params['patchdir'] += patchdir
+ else:
+ params['patchdir'] = patchdir
+
+ srcuri.append(srcuri_entry(basepath, params))
updaterecipe = True
# Update recipe, if needed
if _remove_file_entries(srcuri, remove_files)[0]:
@@ -1786,6 +1885,8 @@ def _update_recipe(recipename, workspace, rd, mode, appendlayerdir, wildcard_ver
for line in stdout.splitlines():
branchname = line[2:]
if line.startswith('* '):
+ if 'HEAD' in line:
+ raise DevtoolError('Detached HEAD - please check out a branch, e.g., "devtool"')
startbranch = branchname
if branchname.startswith(override_branch_prefix):
override_branches.append(branchname)
@@ -1977,7 +2078,7 @@ def _reset(recipes, no_clean, remove_work, config, basepath, workspace):
# We don't want to risk wiping out any work in progress
if srctreebase.startswith(os.path.join(config.workspace_path, 'sources')):
from datetime import datetime
- preservesrc = os.path.join(config.workspace_path, 'attic', 'sources', "{}.{}".format(pn,datetime.now().strftime("%Y%m%d%H%M%S")))
+ preservesrc = os.path.join(config.workspace_path, 'attic', 'sources', "{}.{}".format(pn, datetime.now().strftime("%Y%m%d%H%M%S")))
logger.info('Preserving source tree in %s\nIf you no '
'longer need it then please delete it manually.\n'
'It is also possible to reuse it via devtool source tree argument.'
@@ -2247,6 +2348,7 @@ def register_commands(subparsers, context):
group.add_argument('--no-same-dir', help='Force build in a separate build directory', action="store_true")
parser_add.add_argument('--fetch', '-f', help='Fetch the specified URI and extract it to create the source tree (deprecated - pass as positional argument instead)', metavar='URI')
parser_add.add_argument('--npm-dev', help='For npm, also fetch devDependencies', action="store_true")
+ parser_add.add_argument('--no-pypi', help='Do not inherit pypi class', action="store_true")
parser_add.add_argument('--version', '-V', help='Version to use within recipe (PV)')
parser_add.add_argument('--no-git', '-g', help='If fetching source, do not set up source tree as a git repository', action="store_true")
group = parser_add.add_mutually_exclusive_group()
diff --git a/scripts/lib/devtool/upgrade.py b/scripts/lib/devtool/upgrade.py
index 39a1910a49..fa5b8ef3c7 100644
--- a/scripts/lib/devtool/upgrade.py
+++ b/scripts/lib/devtool/upgrade.py
@@ -35,6 +35,8 @@ def _get_srctree(tmpdir):
dirs = scriptutils.filter_src_subdirs(tmpdir)
if len(dirs) == 1:
srctree = os.path.join(tmpdir, dirs[0])
+ else:
+ raise DevtoolError("Cannot determine where the source tree is after unpacking in {}: {}".format(tmpdir,dirs))
return srctree
def _copy_source_code(orig, dest):
@@ -88,7 +90,7 @@ def _rename_recipe_files(oldrecipe, bpn, oldpv, newpv, path):
_rename_recipe_dirs(oldpv, newpv, path)
return _rename_recipe_file(oldrecipe, bpn, oldpv, newpv, path)
-def _write_append(rc, srctree, same_dir, no_same_dir, rev, copied, workspace, d):
+def _write_append(rc, srctreebase, srctree, same_dir, no_same_dir, revs, copied, workspace, d):
"""Writes an append file"""
if not os.path.exists(rc):
raise DevtoolError("bbappend not created because %s does not exist" % rc)
@@ -104,6 +106,11 @@ def _write_append(rc, srctree, same_dir, no_same_dir, rev, copied, workspace, d)
af = os.path.join(appendpath, '%s.bbappend' % brf)
with open(af, 'w') as f:
f.write('FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"\n\n')
+ # Local files can be modified/tracked in separate subdir under srctree
+ # Mostly useful for packages with S != WORKDIR
+ f.write('FILESPATH:prepend := "%s:"\n' %
+ os.path.join(srctreebase, 'oe-local-files'))
+ f.write('# srctreebase: %s\n' % srctreebase)
f.write('inherit externalsrc\n')
f.write(('# NOTE: We use pn- overrides here to avoid affecting'
'multiple variants in the case where the recipe uses BBCLASSEXTEND\n'))
@@ -112,19 +119,17 @@ def _write_append(rc, srctree, same_dir, no_same_dir, rev, copied, workspace, d)
if b_is_s:
f.write('EXTERNALSRC_BUILD:pn-%s = "%s"\n' % (pn, srctree))
f.write('\n')
- if rev:
- f.write('# initial_rev: %s\n' % rev)
+ if revs:
+ for name, rev in revs.items():
+ f.write('# initial_rev %s: %s\n' % (name, rev))
if copied:
f.write('# original_path: %s\n' % os.path.dirname(d.getVar('FILE')))
f.write('# original_files: %s\n' % ' '.join(copied))
return af
def _cleanup_on_error(rd, srctree):
- rdp = os.path.split(rd)[0] # recipes folder
if os.path.exists(rd):
shutil.rmtree(rd)
- if not len(os.listdir(rdp)):
- os.rmdir(rdp)
srctree = os.path.abspath(srctree)
if os.path.exists(srctree):
shutil.rmtree(srctree)
@@ -178,12 +183,16 @@ def _extract_new_source(newpv, srctree, no_patch, srcrev, srcbranch, branch, kee
uri, rev = _get_uri(crd)
if srcrev:
rev = srcrev
+ paths = [srctree]
if uri.startswith('git://') or uri.startswith('gitsm://'):
__run('git fetch')
__run('git checkout %s' % rev)
__run('git tag -f devtool-base-new')
- md5 = None
- sha256 = None
+ __run('git submodule update --recursive')
+ __run('git submodule foreach \'git tag -f devtool-base-new\'')
+ (stdout, _) = __run('git submodule --quiet foreach \'echo $sm_path\'')
+ paths += [os.path.join(srctree, p) for p in stdout.splitlines()]
+ checksums = {}
_, _, _, _, _, params = bb.fetch2.decodeurl(uri)
srcsubdir_rel = params.get('destsuffix', 'git')
if not srcbranch:
@@ -216,9 +225,6 @@ def _extract_new_source(newpv, srctree, no_patch, srcrev, srcbranch, branch, kee
if ftmpdir and keep_temp:
logger.info('Fetch temp directory is %s' % ftmpdir)
- md5 = checksums['md5sum']
- sha256 = checksums['sha256sum']
-
tmpsrctree = _get_srctree(tmpdir)
srctree = os.path.abspath(srctree)
srcsubdir_rel = os.path.relpath(tmpsrctree, tmpdir)
@@ -252,29 +258,50 @@ def _extract_new_source(newpv, srctree, no_patch, srcrev, srcbranch, branch, kee
__run('git %s commit -q -m "Commit of upstream changes at version %s" --allow-empty' % (' '.join(useroptions), newpv))
__run('git tag -f devtool-base-%s' % newpv)
- (stdout, _) = __run('git rev-parse HEAD')
- rev = stdout.rstrip()
+ revs = {}
+ for path in paths:
+ (stdout, _) = _run('git rev-parse HEAD', cwd=path)
+ revs[os.path.relpath(path, srctree)] = stdout.rstrip()
if no_patch:
patches = oe.recipeutils.get_recipe_patches(crd)
if patches:
logger.warning('By user choice, the following patches will NOT be applied to the new source tree:\n %s' % '\n '.join([os.path.basename(patch) for patch in patches]))
else:
- __run('git checkout devtool-patched -b %s' % branch)
- (stdout, _) = __run('git branch --list devtool-override-*')
- branches_to_rebase = [branch] + stdout.split()
- for b in branches_to_rebase:
- logger.info("Rebasing {} onto {}".format(b, rev))
- __run('git checkout %s' % b)
- try:
- __run('git rebase %s' % rev)
- except bb.process.ExecutionError as e:
- if 'conflict' in e.stdout:
- logger.warning('Command \'%s\' failed:\n%s\n\nYou will need to resolve conflicts in order to complete the upgrade.' % (e.command, e.stdout.rstrip()))
- __run('git rebase --abort')
- else:
- logger.warning('Command \'%s\' failed:\n%s' % (e.command, e.stdout))
- __run('git checkout %s' % branch)
+ for path in paths:
+ _run('git checkout devtool-patched -b %s' % branch, cwd=path)
+ (stdout, _) = _run('git branch --list devtool-override-*', cwd=path)
+ branches_to_rebase = [branch] + stdout.split()
+ target_branch = revs[os.path.relpath(path, srctree)]
+
+ # There is a bug (or feature?) in git rebase where if a commit with
+ # a note is fully rebased away by being part of an old commit, the
+ # note is still attached to the old commit. Avoid this by making
+ # sure all old devtool related commits have a note attached to them
+ # (this assumes git config notes.rewriteMode is set to ignore).
+ (stdout, _) = __run('git rev-list devtool-base..%s' % target_branch)
+ for rev in stdout.splitlines():
+ if not oe.patch.GitApplyTree.getNotes(path, rev):
+ oe.patch.GitApplyTree.addNote(path, rev, "dummy")
+
+ for b in branches_to_rebase:
+ logger.info("Rebasing {} onto {}".format(b, target_branch))
+ _run('git checkout %s' % b, cwd=path)
+ try:
+ _run('git rebase %s' % target_branch, cwd=path)
+ except bb.process.ExecutionError as e:
+ if 'conflict' in e.stdout:
+ logger.warning('Command \'%s\' failed:\n%s\n\nYou will need to resolve conflicts in order to complete the upgrade.' % (e.command, e.stdout.rstrip()))
+ _run('git rebase --abort', cwd=path)
+ else:
+ logger.warning('Command \'%s\' failed:\n%s' % (e.command, e.stdout))
+
+ # Remove any dummy notes added above.
+ (stdout, _) = __run('git rev-list devtool-base..%s' % target_branch)
+ for rev in stdout.splitlines():
+ oe.patch.GitApplyTree.removeNote(path, rev, "dummy")
+
+ _run('git checkout %s' % branch, cwd=path)
if tmpsrctree:
if keep_temp:
@@ -284,7 +311,7 @@ def _extract_new_source(newpv, srctree, no_patch, srcrev, srcbranch, branch, kee
if tmpdir != tmpsrctree:
shutil.rmtree(tmpdir)
- return (rev, md5, sha256, srcbranch, srcsubdir_rel)
+ return (revs, checksums, srcbranch, srcsubdir_rel)
def _add_license_diff_to_recipe(path, diff):
notice_text = """# FIXME: the LIC_FILES_CHKSUM values have been updated by 'devtool upgrade'.
@@ -305,7 +332,7 @@ def _add_license_diff_to_recipe(path, diff):
f.write("\n#\n\n".encode())
f.write(orig_content)
-def _create_new_recipe(newpv, md5, sha256, srcrev, srcbranch, srcsubdir_old, srcsubdir_new, workspace, tinfoil, rd, license_diff, new_licenses, srctree, keep_failure):
+def _create_new_recipe(newpv, checksums, srcrev, srcbranch, srcsubdir_old, srcsubdir_new, workspace, tinfoil, rd, license_diff, new_licenses, srctree, keep_failure):
"""Creates the new recipe under workspace"""
bpn = rd.getVar('BPN')
@@ -377,30 +404,39 @@ def _create_new_recipe(newpv, md5, sha256, srcrev, srcbranch, srcsubdir_old, src
addnames.append(params['name'])
# Find what's been set in the original recipe
oldnames = []
+ oldsums = []
noname = False
for varflag in rd.getVarFlags('SRC_URI'):
- if varflag.endswith(('.md5sum', '.sha256sum')):
- name = varflag.rsplit('.', 1)[0]
- if name not in oldnames:
- oldnames.append(name)
- elif varflag in ['md5sum', 'sha256sum']:
- noname = True
+ for checksum in checksums:
+ if varflag.endswith('.' + checksum):
+ name = varflag.rsplit('.', 1)[0]
+ if name not in oldnames:
+ oldnames.append(name)
+ oldsums.append(checksum)
+ elif varflag == checksum:
+ noname = True
+ oldsums.append(checksum)
# Even if SRC_URI has named entries it doesn't have to actually use the name
if noname and addnames and addnames[0] not in oldnames:
addnames = []
# Drop any old names (the name actually might include ${PV})
for name in oldnames:
if name not in newnames:
- newvalues['SRC_URI[%s.md5sum]' % name] = None
- newvalues['SRC_URI[%s.sha256sum]' % name] = None
+ for checksum in oldsums:
+ newvalues['SRC_URI[%s.%s]' % (name, checksum)] = None
- if sha256:
- if addnames:
- nameprefix = '%s.' % addnames[0]
- else:
- nameprefix = ''
+ nameprefix = '%s.' % addnames[0] if addnames else ''
+
+ # md5sum is deprecated, remove any traces of it. If it was the only old
+ # checksum, then replace it with the default checksums.
+ if 'md5sum' in oldsums:
newvalues['SRC_URI[%smd5sum]' % nameprefix] = None
- newvalues['SRC_URI[%ssha256sum]' % nameprefix] = sha256
+ oldsums.remove('md5sum')
+ if not oldsums:
+ oldsums = ["%ssum" % s for s in bb.fetch2.SHOWN_CHECKSUM_LIST]
+
+ for checksum in oldsums:
+ newvalues['SRC_URI[%s%s]' % (nameprefix, checksum)] = checksums[checksum]
if srcsubdir_new != srcsubdir_old:
s_subdir_old = os.path.relpath(os.path.abspath(rd.getVar('S')), rd.getVar('WORKDIR'))
@@ -425,6 +461,7 @@ def _create_new_recipe(newpv, md5, sha256, srcrev, srcbranch, srcsubdir_old, src
newvalues["LIC_FILES_CHKSUM"] = newlicchksum
_add_license_diff_to_recipe(fullpath, license_diff)
+ tinfoil.modified_files()
try:
rd = tinfoil.parse_recipe_file(fullpath, False)
except bb.tinfoil.TinfoilCommandFailed as e:
@@ -437,7 +474,7 @@ def _create_new_recipe(newpv, md5, sha256, srcrev, srcbranch, srcsubdir_old, src
def _check_git_config():
def getconfig(name):
try:
- value = bb.process.run('git config --global %s' % name)[0].strip()
+ value = bb.process.run('git config %s' % name)[0].strip()
except bb.process.ExecutionError as e:
if e.exitcode == 1:
value = None
@@ -524,14 +561,7 @@ def upgrade(args, config, basepath, workspace):
else:
srctree = standard.get_default_srctree(config, pn)
- # Check that recipe isn't using a shared workdir
- s = os.path.abspath(rd.getVar('S'))
- workdir = os.path.abspath(rd.getVar('WORKDIR'))
- srctree_s = srctree
- if s.startswith(workdir) and s != workdir and os.path.dirname(s) != workdir:
- # Handle if S is set to a subdirectory of the source
- srcsubdir = os.path.relpath(s, workdir).split(os.sep, 1)[1]
- srctree_s = os.path.join(srctree, srcsubdir)
+ srctree_s = standard.get_real_srctree(srctree, rd.getVar('S'), rd.getVar('WORKDIR'))
# try to automatically discover latest version and revision if not provided on command line
if not args.version and not args.srcrev:
@@ -564,18 +594,18 @@ def upgrade(args, config, basepath, workspace):
rev1, srcsubdir1 = standard._extract_source(srctree, False, 'devtool-orig', False, config, basepath, workspace, args.fixed_setup, rd, tinfoil, no_overrides=args.no_overrides)
old_licenses = _extract_licenses(srctree_s, (rd.getVar('LIC_FILES_CHKSUM') or ""))
logger.info('Extracting upgraded version source...')
- rev2, md5, sha256, srcbranch, srcsubdir2 = _extract_new_source(args.version, srctree, args.no_patch,
+ rev2, checksums, srcbranch, srcsubdir2 = _extract_new_source(args.version, srctree, args.no_patch,
args.srcrev, args.srcbranch, args.branch, args.keep_temp,
tinfoil, rd)
new_licenses = _extract_licenses(srctree_s, (rd.getVar('LIC_FILES_CHKSUM') or ""))
license_diff = _generate_license_diff(old_licenses, new_licenses)
- rf, copied = _create_new_recipe(args.version, md5, sha256, args.srcrev, srcbranch, srcsubdir1, srcsubdir2, config.workspace_path, tinfoil, rd, license_diff, new_licenses, srctree, args.keep_failure)
+ rf, copied = _create_new_recipe(args.version, checksums, args.srcrev, srcbranch, srcsubdir1, srcsubdir2, config.workspace_path, tinfoil, rd, license_diff, new_licenses, srctree, args.keep_failure)
except (bb.process.CmdError, DevtoolError) as e:
recipedir = os.path.join(config.workspace_path, 'recipes', rd.getVar('BPN'))
_upgrade_error(e, recipedir, srctree, args.keep_failure)
standard._add_md5(config, pn, os.path.dirname(rf))
- af = _write_append(rf, srctree_s, args.same_dir, args.no_same_dir, rev2,
+ af = _write_append(rf, srctree, srctree_s, args.same_dir, args.no_same_dir, rev2,
copied, config.workspace_path, rd)
standard._add_md5(config, pn, af)
@@ -619,7 +649,7 @@ def check_upgrade_status(args, config, basepath, workspace):
for result in results:
# pn, update_status, current, latest, maintainer, latest_commit, no_update_reason
if args.all or result[1] != 'MATCH':
- logger.info("{:25} {:15} {:15} {} {} {}".format( result[0],
+ print("{:25} {:15} {:15} {} {} {}".format( result[0],
result[2],
result[1] if result[1] != 'UPDATE' else (result[3] if not result[3].endswith("new-commits-available") else "new commits"),
result[4],
diff --git a/scripts/lib/recipetool/append.py b/scripts/lib/recipetool/append.py
index 88ed8c5f01..341e893305 100644
--- a/scripts/lib/recipetool/append.py
+++ b/scripts/lib/recipetool/append.py
@@ -18,6 +18,7 @@ import shutil
import scriptutils
import errno
from collections import defaultdict
+import difflib
logger = logging.getLogger('recipetool')
@@ -299,7 +300,10 @@ def appendfile(args):
if st.st_mode & stat.S_IXUSR:
perms = '0755'
install = {args.newfile: (args.targetpath, perms)}
- oe.recipeutils.bbappend_recipe(rd, args.destlayer, {args.newfile: sourcepath}, install, wildcardver=args.wildcard_version, machine=args.machine)
+ if sourcepath:
+ sourcepath = os.path.basename(sourcepath)
+ oe.recipeutils.bbappend_recipe(rd, args.destlayer, {args.newfile: {'newname' : sourcepath}}, install, wildcardver=args.wildcard_version, machine=args.machine)
+ tinfoil.modified_files()
return 0
else:
if alternative_pns:
@@ -327,6 +331,7 @@ def appendsrc(args, files, rd, extralines=None):
copyfiles = {}
extralines = extralines or []
+ params = []
for newfile, srcfile in files.items():
src_destdir = os.path.dirname(srcfile)
if not args.use_workdir:
@@ -337,25 +342,46 @@ def appendsrc(args, files, rd, extralines=None):
src_destdir = os.path.join(os.path.relpath(srcdir, workdir), src_destdir)
src_destdir = os.path.normpath(src_destdir)
- source_uri = 'file://{0}'.format(os.path.basename(srcfile))
if src_destdir and src_destdir != '.':
- source_uri += ';subdir={0}'.format(src_destdir)
-
- simple = bb.fetch.URI(source_uri)
- simple.params = {}
- simple_str = str(simple)
- if simple_str in simplified:
- existing = simplified[simple_str]
- if source_uri != existing:
- logger.warning('{0!r} is already in SRC_URI, with different parameters: {1!r}, not adding'.format(source_uri, existing))
- else:
- logger.warning('{0!r} is already in SRC_URI, not adding'.format(source_uri))
+ params.append({'subdir': src_destdir})
else:
- extralines.append('SRC_URI += {0}'.format(source_uri))
- copyfiles[newfile] = srcfile
-
- oe.recipeutils.bbappend_recipe(rd, args.destlayer, copyfiles, None, wildcardver=args.wildcard_version, machine=args.machine, extralines=extralines)
-
+ params.append({})
+
+ copyfiles[newfile] = {'newname' : os.path.basename(srcfile)}
+
+ dry_run_output = None
+ dry_run_outdir = None
+ if args.dry_run:
+ import tempfile
+ dry_run_output = tempfile.TemporaryDirectory(prefix='devtool')
+ dry_run_outdir = dry_run_output.name
+
+ appendfile, _ = oe.recipeutils.bbappend_recipe(rd, args.destlayer, copyfiles, None, wildcardver=args.wildcard_version, machine=args.machine, extralines=extralines, params=params,
+ redirect_output=dry_run_outdir, update_original_recipe=args.update_recipe)
+ if not appendfile:
+ return
+ if args.dry_run:
+ output = ''
+ appendfilename = os.path.basename(appendfile)
+ newappendfile = appendfile
+ if appendfile and os.path.exists(appendfile):
+ with open(appendfile, 'r') as f:
+ oldlines = f.readlines()
+ else:
+ appendfile = '/dev/null'
+ oldlines = []
+
+ with open(os.path.join(dry_run_outdir, appendfilename), 'r') as f:
+ newlines = f.readlines()
+ diff = difflib.unified_diff(oldlines, newlines, appendfile, newappendfile)
+ difflines = list(diff)
+ if difflines:
+ output += ''.join(difflines)
+ if output:
+ logger.info('Diff of changed files:\n%s' % output)
+ else:
+ logger.info('No changed files')
+ tinfoil.modified_files()
def appendsrcfiles(parser, args):
recipedata = _parse_recipe(args.recipe, tinfoil)
@@ -435,6 +461,8 @@ def register_commands(subparsers):
help='Create/update a bbappend to add or replace source files',
description='Creates a bbappend (or updates an existing one) to add or replace the specified file in the recipe sources, either those in WORKDIR or those in the source tree. This command lets you specify multiple files with a destination directory, so cannot specify the destination filename. See the `appendsrcfile` command for the other behavior.')
parser.add_argument('-D', '--destdir', help='Destination directory (relative to S or WORKDIR, defaults to ".")', default='', type=destination_path)
+ parser.add_argument('-u', '--update-recipe', help='Update recipe instead of creating (or updating) a bbapend file. DESTLAYER must contains the recipe to update', action='store_true')
+ parser.add_argument('-n', '--dry-run', help='Dry run mode', action='store_true')
parser.add_argument('files', nargs='+', metavar='FILE', help='File(s) to be added to the recipe sources (WORKDIR or S)', type=existing_path)
parser.set_defaults(func=lambda a: appendsrcfiles(parser, a), parserecipes=True)
@@ -442,6 +470,8 @@ def register_commands(subparsers):
parents=[common_src],
help='Create/update a bbappend to add or replace a source file',
description='Creates a bbappend (or updates an existing one) to add or replace the specified files in the recipe sources, either those in WORKDIR or those in the source tree. This command lets you specify the destination filename, not just destination directory, but only works for one file. See the `appendsrcfiles` command for the other behavior.')
+ parser.add_argument('-u', '--update-recipe', help='Update recipe instead of creating (or updating) a bbapend file. DESTLAYER must contains the recipe to update', action='store_true')
+ parser.add_argument('-n', '--dry-run', help='Dry run mode', action='store_true')
parser.add_argument('file', metavar='FILE', help='File to be added to the recipe sources (WORKDIR or S)', type=existing_path)
parser.add_argument('destfile', metavar='DESTFILE', nargs='?', help='Destination path (relative to S or WORKDIR, optional)', type=destination_path)
parser.set_defaults(func=lambda a: appendsrcfile(parser, a), parserecipes=True)
diff --git a/scripts/lib/recipetool/create.py b/scripts/lib/recipetool/create.py
index 824ac6350d..8e9ff38db6 100644
--- a/scripts/lib/recipetool/create.py
+++ b/scripts/lib/recipetool/create.py
@@ -423,6 +423,36 @@ def create_recipe(args):
storeTagName = ''
pv_srcpv = False
+ handled = []
+ classes = []
+
+ # Find all plugins that want to register handlers
+ logger.debug('Loading recipe handlers')
+ raw_handlers = []
+ for plugin in plugins:
+ if hasattr(plugin, 'register_recipe_handlers'):
+ plugin.register_recipe_handlers(raw_handlers)
+ # Sort handlers by priority
+ handlers = []
+ for i, handler in enumerate(raw_handlers):
+ if isinstance(handler, tuple):
+ handlers.append((handler[0], handler[1], i))
+ else:
+ handlers.append((handler, 0, i))
+ handlers.sort(key=lambda item: (item[1], -item[2]), reverse=True)
+ for handler, priority, _ in handlers:
+ logger.debug('Handler: %s (priority %d)' % (handler.__class__.__name__, priority))
+ setattr(handler, '_devtool', args.devtool)
+ handlers = [item[0] for item in handlers]
+
+ fetchuri = None
+ for handler in handlers:
+ if hasattr(handler, 'process_url'):
+ ret = handler.process_url(args, classes, handled, extravalues)
+ if 'url' in handled and ret:
+ fetchuri = ret
+ break
+
if os.path.isfile(source):
source = 'file://%s' % os.path.abspath(source)
@@ -431,7 +461,8 @@ def create_recipe(args):
if re.match(r'https?://github.com/[^/]+/[^/]+/archive/.+(\.tar\..*|\.zip)$', source):
logger.warning('github archive files are not guaranteed to be stable and may be re-generated over time. If the latter occurs, the checksums will likely change and the recipe will fail at do_fetch. It is recommended that you point to an actual commit or tag in the repository instead (using the repository URL in conjunction with the -S/--srcrev option).')
# Fetch a URL
- fetchuri = reformat_git_uri(urldefrag(source)[0])
+ if not fetchuri:
+ fetchuri = reformat_git_uri(urldefrag(source)[0])
if args.binary:
# Assume the archive contains the directory structure verbatim
# so we need to extract to a subdirectory
@@ -638,8 +669,6 @@ def create_recipe(args):
# We'll come back and replace this later in handle_license_vars()
lines_before.append('##LICENSE_PLACEHOLDER##')
- handled = []
- classes = []
# FIXME This is kind of a hack, we probably ought to be using bitbake to do this
pn = None
@@ -677,8 +706,10 @@ def create_recipe(args):
if not srcuri:
lines_before.append('# No information for SRC_URI yet (only an external source tree was specified)')
lines_before.append('SRC_URI = "%s"' % srcuri)
+ shown_checksums = ["%ssum" % s for s in bb.fetch2.SHOWN_CHECKSUM_LIST]
for key, value in sorted(checksums.items()):
- lines_before.append('SRC_URI[%s] = "%s"' % (key, value))
+ if key in shown_checksums:
+ lines_before.append('SRC_URI[%s] = "%s"' % (key, value))
if srcuri and supports_srcrev(srcuri):
lines_before.append('')
lines_before.append('# Modify these as desired')
@@ -690,7 +721,7 @@ def create_recipe(args):
srcpvprefix = 'svnr'
else:
srcpvprefix = scheme
- lines_before.append('PV = "%s+%s${SRCPV}"' % (realpv or '1.0', srcpvprefix))
+ lines_before.append('PV = "%s+%s"' % (realpv or '1.0', srcpvprefix))
pv_srcpv = True
if not args.autorev and srcrev == '${AUTOREV}':
if os.path.exists(os.path.join(srctree, '.git')):
@@ -718,25 +749,6 @@ def create_recipe(args):
if args.npm_dev:
extravalues['NPM_INSTALL_DEV'] = 1
- # Find all plugins that want to register handlers
- logger.debug('Loading recipe handlers')
- raw_handlers = []
- for plugin in plugins:
- if hasattr(plugin, 'register_recipe_handlers'):
- plugin.register_recipe_handlers(raw_handlers)
- # Sort handlers by priority
- handlers = []
- for i, handler in enumerate(raw_handlers):
- if isinstance(handler, tuple):
- handlers.append((handler[0], handler[1], i))
- else:
- handlers.append((handler, 0, i))
- handlers.sort(key=lambda item: (item[1], -item[2]), reverse=True)
- for handler, priority, _ in handlers:
- logger.debug('Handler: %s (priority %d)' % (handler.__class__.__name__, priority))
- setattr(handler, '_devtool', args.devtool)
- handlers = [item[0] for item in handlers]
-
# Apply the handlers
if args.binary:
classes.append('bin_package')
@@ -745,6 +757,10 @@ def create_recipe(args):
for handler in handlers:
handler.process(srctree_use, classes, lines_before, lines_after, handled, extravalues)
+ # native and nativesdk classes are special and must be inherited last
+ # If present, put them at the end of the classes list
+ classes.sort(key=lambda c: c in ("native", "nativesdk"))
+
extrafiles = extravalues.pop('extrafiles', {})
extra_pn = extravalues.pop('PN', None)
extra_pv = extravalues.pop('PV', None)
@@ -869,8 +885,10 @@ def create_recipe(args):
outlines.append('')
outlines.extend(lines_after)
+ outlines = [ line.rstrip('\n') +"\n" for line in outlines]
+
if extravalues:
- _, outlines = oe.recipeutils.patch_recipe_lines(outlines, extravalues, trailing_newline=False)
+ _, outlines = oe.recipeutils.patch_recipe_lines(outlines, extravalues, trailing_newline=True)
if args.extract_to:
scriptutils.git_convert_standalone_clone(srctree)
@@ -886,7 +904,7 @@ def create_recipe(args):
log_info_cond('Source extracted to %s' % args.extract_to, args.devtool)
if outfile == '-':
- sys.stdout.write('\n'.join(outlines) + '\n')
+ sys.stdout.write(''.join(outlines) + '\n')
else:
with open(outfile, 'w') as f:
lastline = None
@@ -894,9 +912,10 @@ def create_recipe(args):
if not lastline and not line:
# Skip extra blank lines
continue
- f.write('%s\n' % line)
+ f.write('%s' % line)
lastline = line
log_info_cond('Recipe %s has been created; further editing may be required to make it fully functional' % outfile, args.devtool)
+ tinfoil.modified_files()
if tempsrc:
if args.keep_temp:
@@ -1054,54 +1073,18 @@ def get_license_md5sums(d, static_only=False, linenumbers=False):
return md5sums
-def crunch_license(licfile):
+def crunch_known_licenses(d):
'''
- Remove non-material text from a license file and then check
- its md5sum against a known list. This works well for licenses
- which contain a copyright statement, but is also a useful way
- to handle people's insistence upon reformatting the license text
- slightly (with no material difference to the text of the
- license).
+ Calculate the MD5 checksums for the crunched versions of all common
+ licenses. Also add additional known checksums.
'''
-
- import oe.utils
-
- # Note: these are carefully constructed!
- license_title_re = re.compile(r'^#*\(? *(This is )?([Tt]he )?.{0,15} ?[Ll]icen[sc]e( \(.{1,10}\))?\)?[:\.]? ?#*$')
- license_statement_re = re.compile(r'^((This (project|software)|.{1,10}) is( free software)? (released|licen[sc]ed)|(Released|Licen[cs]ed)) under the .{1,10} [Ll]icen[sc]e:?$')
- copyright_re = re.compile('^ *[#\*]* *(Modified work |MIT LICENSED )?Copyright ?(\([cC]\))? .*$')
- disclaimer_re = re.compile('^ *\*? ?All [Rr]ights [Rr]eserved\.$')
- email_re = re.compile('^.*<[\w\.-]*@[\w\.\-]*>$')
- header_re = re.compile('^(\/\**!?)? ?[\-=\*]* ?(\*\/)?$')
- tag_re = re.compile('^ *@?\(?([Ll]icense|MIT)\)?$')
- url_re = re.compile('^ *[#\*]* *https?:\/\/[\w\.\/\-]+$')
-
+
crunched_md5sums = {}
# common licenses
- crunched_md5sums['89f3bf322f30a1dcfe952e09945842f0'] = 'Apache-2.0'
- crunched_md5sums['13b6fe3075f8f42f2270a748965bf3a1'] = '0BSD'
- crunched_md5sums['ba87a7d7c20719c8df4b8beed9b78c43'] = 'BSD-2-Clause'
- crunched_md5sums['7f8892c03b72de419c27be4ebfa253f8'] = 'BSD-3-Clause'
- crunched_md5sums['21128c0790b23a8a9f9e260d5f6b3619'] = 'BSL-1.0'
- crunched_md5sums['975742a59ae1b8abdea63a97121f49f4'] = 'EDL-1.0'
- crunched_md5sums['5322cee4433d84fb3aafc9e253116447'] = 'EPL-1.0'
- crunched_md5sums['6922352e87de080f42419bed93063754'] = 'EPL-2.0'
- crunched_md5sums['793475baa22295cae1d3d4046a3a0ceb'] = 'GPL-2.0-only'
- crunched_md5sums['ff9047f969b02c20f0559470df5cb433'] = 'GPL-2.0-or-later'
- crunched_md5sums['ea6de5453fcadf534df246e6cdafadcd'] = 'GPL-3.0-only'
- crunched_md5sums['b419257d4d153a6fde92ddf96acf5b67'] = 'GPL-3.0-or-later'
- crunched_md5sums['228737f4c49d3ee75b8fb3706b090b84'] = 'ISC'
- crunched_md5sums['c6a782e826ca4e85bf7f8b89435a677d'] = 'LGPL-2.0-only'
- crunched_md5sums['32d8f758a066752f0db09bd7624b8090'] = 'LGPL-2.0-or-later'
- crunched_md5sums['4820937eb198b4f84c52217ed230be33'] = 'LGPL-2.1-only'
- crunched_md5sums['db13fe9f3a13af7adab2dc7a76f9e44a'] = 'LGPL-2.1-or-later'
- crunched_md5sums['d7a0f2e4e0950e837ac3eabf5bd1d246'] = 'LGPL-3.0-only'
- crunched_md5sums['abbf328e2b434f9153351f06b9f79d02'] = 'LGPL-3.0-or-later'
- crunched_md5sums['eecf6429523cbc9693547cf2db790b5c'] = 'MIT'
- crunched_md5sums['b218b0e94290b9b818c4be67c8e1cc82'] = 'MIT-0'
- crunched_md5sums['ddc18131d6748374f0f35a621c245b49'] = 'Unlicense'
- crunched_md5sums['51f9570ff32571fc0a443102285c5e33'] = 'WTFPL'
+ crunched_md5sums['ad4e9d34a2e966dfe9837f18de03266d'] = 'GFDL-1.1-only'
+ crunched_md5sums['d014fb11a34eb67dc717fdcfc97e60ed'] = 'GFDL-1.2-only'
+ crunched_md5sums['e020ca655b06c112def28e597ab844f1'] = 'GFDL-1.3-only'
# The following two were gleaned from the "forever" npm package
crunched_md5sums['0a97f8e4cbaf889d6fa51f84b89a79f6'] = 'ISC'
@@ -1157,6 +1140,39 @@ def crunch_license(licfile):
# https://raw.githubusercontent.com/stackgl/gl-mat3/v2.0.0/LICENSE.md
crunched_md5sums['75512892d6f59dddb6d1c7e191957e9c'] = 'Zlib'
+ commonlicdir = d.getVar('COMMON_LICENSE_DIR')
+ for fn in sorted(os.listdir(commonlicdir)):
+ md5value, lictext = crunch_license(os.path.join(commonlicdir, fn))
+ if md5value not in crunched_md5sums:
+ crunched_md5sums[md5value] = fn
+ elif fn != crunched_md5sums[md5value]:
+ bb.debug(2, "crunched_md5sums['%s'] is already set to '%s' rather than '%s'" % (md5value, crunched_md5sums[md5value], fn))
+ else:
+ bb.debug(2, "crunched_md5sums['%s'] is already set to '%s'" % (md5value, crunched_md5sums[md5value]))
+
+ return crunched_md5sums
+
+def crunch_license(licfile):
+ '''
+ Remove non-material text from a license file and then calculate its
+ md5sum. This works well for licenses that contain a copyright statement,
+ but is also a useful way to handle people's insistence upon reformatting
+ the license text slightly (with no material difference to the text of the
+ license).
+ '''
+
+ import oe.utils
+
+ # Note: these are carefully constructed!
+ license_title_re = re.compile(r'^#*\(? *(This is )?([Tt]he )?.{0,15} ?[Ll]icen[sc]e( \(.{1,10}\))?\)?[:\.]? ?#*$')
+ license_statement_re = re.compile(r'^((This (project|software)|.{1,10}) is( free software)? (released|licen[sc]ed)|(Released|Licen[cs]ed)) under the .{1,10} [Ll]icen[sc]e:?$')
+ copyright_re = re.compile(r'^ *[#\*]* *(Modified work |MIT LICENSED )?Copyright ?(\([cC]\))? .*$')
+ disclaimer_re = re.compile(r'^ *\*? ?All [Rr]ights [Rr]eserved\.$')
+ email_re = re.compile(r'^.*<[\w\.-]*@[\w\.\-]*>$')
+ header_re = re.compile(r'^(\/\**!?)? ?[\-=\*]* ?(\*\/)?$')
+ tag_re = re.compile(r'^ *@?\(?([Ll]icense|MIT)\)?$')
+ url_re = re.compile(r'^ *[#\*]* *https?:\/\/[\w\.\/\-]+$')
+
lictext = []
with open(licfile, 'r', errors='surrogateescape') as f:
for line in f:
@@ -1198,16 +1214,17 @@ def crunch_license(licfile):
except UnicodeEncodeError:
md5val = None
lictext = ''
- license = crunched_md5sums.get(md5val, None)
- return license, md5val, lictext
+ return md5val, lictext
def guess_license(srctree, d):
import bb
md5sums = get_license_md5sums(d)
+ crunched_md5sums = crunch_known_licenses(d)
+
licenses = []
licspecs = ['*LICEN[CS]E*', 'COPYING*', '*[Ll]icense*', 'LEGAL*', '[Ll]egal*', '*GPL*', 'README.lic*', 'COPYRIGHT*', '[Cc]opyright*', 'e[dp]l-v10']
- skip_extensions = (".html", ".js", ".json", ".svg", ".ts")
+ skip_extensions = (".html", ".js", ".json", ".svg", ".ts", ".go")
licfiles = []
for root, dirs, files in os.walk(srctree):
for fn in files:
@@ -1222,7 +1239,8 @@ def guess_license(srctree, d):
md5value = bb.utils.md5_file(licfile)
license = md5sums.get(md5value, None)
if not license:
- license, crunched_md5, lictext = crunch_license(licfile)
+ crunched_md5, lictext = crunch_license(licfile)
+ license = crunched_md5sums.get(crunched_md5, None)
if lictext and not license:
license = 'Unknown'
logger.info("Please add the following line for '%s' to a 'lib/recipetool/licenses.csv' " \
@@ -1396,6 +1414,7 @@ def register_commands(subparsers):
parser_create.add_argument('-B', '--srcbranch', help='Branch in source repository if fetching from an SCM such as git (default master)')
parser_create.add_argument('--keep-temp', action="store_true", help='Keep temporary directory (for debugging)')
parser_create.add_argument('--npm-dev', action="store_true", help='For npm, also fetch devDependencies')
+ parser_create.add_argument('--no-pypi', action="store_true", help='Do not inherit pypi class')
parser_create.add_argument('--devtool', action="store_true", help=argparse.SUPPRESS)
parser_create.add_argument('--mirrors', action="store_true", help='Enable PREMIRRORS and MIRRORS for source tree fetching (disabled by default).')
parser_create.set_defaults(func=create_recipe)
diff --git a/scripts/lib/recipetool/create_buildsys.py b/scripts/lib/recipetool/create_buildsys.py
index 5015634476..ec9d510e23 100644
--- a/scripts/lib/recipetool/create_buildsys.py
+++ b/scripts/lib/recipetool/create_buildsys.py
@@ -5,9 +5,9 @@
# SPDX-License-Identifier: GPL-2.0-only
#
+import os
import re
import logging
-import glob
from recipetool.create import RecipeHandler, validate_pv
logger = logging.getLogger('recipetool')
@@ -137,15 +137,15 @@ class CmakeRecipeHandler(RecipeHandler):
deps = []
unmappedpkgs = []
- proj_re = re.compile('project\s*\(([^)]*)\)', re.IGNORECASE)
- pkgcm_re = re.compile('pkg_check_modules\s*\(\s*[a-zA-Z0-9-_]+\s*(REQUIRED)?\s+([^)\s]+)\s*\)', re.IGNORECASE)
- pkgsm_re = re.compile('pkg_search_module\s*\(\s*[a-zA-Z0-9-_]+\s*(REQUIRED)?((\s+[^)\s]+)+)\s*\)', re.IGNORECASE)
- findpackage_re = re.compile('find_package\s*\(\s*([a-zA-Z0-9-_]+)\s*.*', re.IGNORECASE)
- findlibrary_re = re.compile('find_library\s*\(\s*[a-zA-Z0-9-_]+\s*(NAMES\s+)?([a-zA-Z0-9-_ ]+)\s*.*')
- checklib_re = re.compile('check_library_exists\s*\(\s*([^\s)]+)\s*.*', re.IGNORECASE)
- include_re = re.compile('include\s*\(\s*([^)\s]*)\s*\)', re.IGNORECASE)
- subdir_re = re.compile('add_subdirectory\s*\(\s*([^)\s]*)\s*([^)\s]*)\s*\)', re.IGNORECASE)
- dep_re = re.compile('([^ ><=]+)( *[<>=]+ *[^ ><=]+)?')
+ proj_re = re.compile(r'project\s*\(([^)]*)\)', re.IGNORECASE)
+ pkgcm_re = re.compile(r'pkg_check_modules\s*\(\s*[a-zA-Z0-9-_]+\s*(REQUIRED)?\s+([^)\s]+)\s*\)', re.IGNORECASE)
+ pkgsm_re = re.compile(r'pkg_search_module\s*\(\s*[a-zA-Z0-9-_]+\s*(REQUIRED)?((\s+[^)\s]+)+)\s*\)', re.IGNORECASE)
+ findpackage_re = re.compile(r'find_package\s*\(\s*([a-zA-Z0-9-_]+)\s*.*', re.IGNORECASE)
+ findlibrary_re = re.compile(r'find_library\s*\(\s*[a-zA-Z0-9-_]+\s*(NAMES\s+)?([a-zA-Z0-9-_ ]+)\s*.*')
+ checklib_re = re.compile(r'check_library_exists\s*\(\s*([^\s)]+)\s*.*', re.IGNORECASE)
+ include_re = re.compile(r'include\s*\(\s*([^)\s]*)\s*\)', re.IGNORECASE)
+ subdir_re = re.compile(r'add_subdirectory\s*\(\s*([^)\s]*)\s*([^)\s]*)\s*\)', re.IGNORECASE)
+ dep_re = re.compile(r'([^ ><=]+)( *[<>=]+ *[^ ><=]+)?')
def find_cmake_package(pkg):
RecipeHandler.load_devel_filemap(tinfoil.config_data)
@@ -423,16 +423,16 @@ class AutotoolsRecipeHandler(RecipeHandler):
'makeinfo': 'texinfo',
}
- pkg_re = re.compile('PKG_CHECK_MODULES\(\s*\[?[a-zA-Z0-9_]*\]?,\s*\[?([^,\]]*)\]?[),].*')
- pkgce_re = re.compile('PKG_CHECK_EXISTS\(\s*\[?([^,\]]*)\]?[),].*')
- lib_re = re.compile('AC_CHECK_LIB\(\s*\[?([^,\]]*)\]?,.*')
- libx_re = re.compile('AX_CHECK_LIBRARY\(\s*\[?[^,\]]*\]?,\s*\[?([^,\]]*)\]?,\s*\[?([a-zA-Z0-9-]*)\]?,.*')
- progs_re = re.compile('_PROGS?\(\s*\[?[a-zA-Z0-9_]*\]?,\s*\[?([^,\]]*)\]?[),].*')
- dep_re = re.compile('([^ ><=]+)( [<>=]+ [^ ><=]+)?')
- ac_init_re = re.compile('AC_INIT\(\s*([^,]+),\s*([^,]+)[,)].*')
- am_init_re = re.compile('AM_INIT_AUTOMAKE\(\s*([^,]+),\s*([^,]+)[,)].*')
- define_re = re.compile('\s*(m4_)?define\(\s*([^,]+),\s*([^,]+)\)')
- version_re = re.compile('([0-9.]+)')
+ pkg_re = re.compile(r'PKG_CHECK_MODULES\(\s*\[?[a-zA-Z0-9_]*\]?,\s*\[?([^,\]]*)\]?[),].*')
+ pkgce_re = re.compile(r'PKG_CHECK_EXISTS\(\s*\[?([^,\]]*)\]?[),].*')
+ lib_re = re.compile(r'AC_CHECK_LIB\(\s*\[?([^,\]]*)\]?,.*')
+ libx_re = re.compile(r'AX_CHECK_LIBRARY\(\s*\[?[^,\]]*\]?,\s*\[?([^,\]]*)\]?,\s*\[?([a-zA-Z0-9-]*)\]?,.*')
+ progs_re = re.compile(r'_PROGS?\(\s*\[?[a-zA-Z0-9_]*\]?,\s*\[?([^,\]]*)\]?[),].*')
+ dep_re = re.compile(r'([^ ><=]+)( [<>=]+ [^ ><=]+)?')
+ ac_init_re = re.compile(r'AC_INIT\(\s*([^,]+),\s*([^,]+)[,)].*')
+ am_init_re = re.compile(r'AM_INIT_AUTOMAKE\(\s*([^,]+),\s*([^,]+)[,)].*')
+ define_re = re.compile(r'\s*(m4_)?define\(\s*([^,]+),\s*([^,]+)\)')
+ version_re = re.compile(r'([0-9.]+)')
defines = {}
def subst_defines(value):
diff --git a/scripts/lib/recipetool/create_buildsys_python.py b/scripts/lib/recipetool/create_buildsys_python.py
index 4675cc68fa..a807dafae5 100644
--- a/scripts/lib/recipetool/create_buildsys_python.py
+++ b/scripts/lib/recipetool/create_buildsys_python.py
@@ -10,7 +10,7 @@ import codecs
import collections
import setuptools.command.build_py
import email
-import imp
+import importlib
import glob
import itertools
import logging
@@ -18,7 +18,11 @@ import os
import re
import sys
import subprocess
+import json
+import urllib.request
from recipetool.create import RecipeHandler
+from urllib.parse import urldefrag
+from recipetool.create import determine_from_url
logger = logging.getLogger('recipetool')
@@ -37,63 +41,8 @@ class PythonRecipeHandler(RecipeHandler):
assume_provided = ['builtins', 'os.path']
# Assumes that the host python3 builtin_module_names is sane for target too
assume_provided = assume_provided + list(sys.builtin_module_names)
+ excluded_fields = []
- bbvar_map = {
- 'Name': 'PN',
- 'Version': 'PV',
- 'Home-page': 'HOMEPAGE',
- 'Summary': 'SUMMARY',
- 'Description': 'DESCRIPTION',
- 'License': 'LICENSE',
- 'Requires': 'RDEPENDS:${PN}',
- 'Provides': 'RPROVIDES:${PN}',
- 'Obsoletes': 'RREPLACES:${PN}',
- }
- # PN/PV are already set by recipetool core & desc can be extremely long
- excluded_fields = [
- 'Description',
- ]
- setup_parse_map = {
- 'Url': 'Home-page',
- 'Classifiers': 'Classifier',
- 'Description': 'Summary',
- }
- setuparg_map = {
- 'Home-page': 'url',
- 'Classifier': 'classifiers',
- 'Summary': 'description',
- 'Description': 'long-description',
- }
- # Values which are lists, used by the setup.py argument based metadata
- # extraction method, to determine how to process the setup.py output.
- setuparg_list_fields = [
- 'Classifier',
- 'Requires',
- 'Provides',
- 'Obsoletes',
- 'Platform',
- 'Supported-Platform',
- ]
- setuparg_multi_line_values = ['Description']
- replacements = [
- ('License', r' +$', ''),
- ('License', r'^ +', ''),
- ('License', r' ', '-'),
- ('License', r'^GNU-', ''),
- ('License', r'-[Ll]icen[cs]e(,?-[Vv]ersion)?', ''),
- ('License', r'^UNKNOWN$', ''),
-
- # Remove currently unhandled version numbers from these variables
- ('Requires', r' *\([^)]*\)', ''),
- ('Provides', r' *\([^)]*\)', ''),
- ('Obsoletes', r' *\([^)]*\)', ''),
- ('Install-requires', r'^([^><= ]+).*', r'\1'),
- ('Extras-require', r'^([^><= ]+).*', r'\1'),
- ('Tests-require', r'^([^><= ]+).*', r'\1'),
-
- # Remove unhandled dependency on particular features (e.g. foo[PDF])
- ('Install-requires', r'\[[^\]]+\]$', ''),
- ]
classifier_license_map = {
'License :: OSI Approved :: Academic Free License (AFL)': 'AFL',
@@ -166,16 +115,473 @@ class PythonRecipeHandler(RecipeHandler):
def __init__(self):
pass
+ def process_url(self, args, classes, handled, extravalues):
+ """
+ Convert any pypi url https://pypi.org/project/<package>/<version> into https://files.pythonhosted.org/packages/source/...
+ which corresponds to the archive location, and add pypi class
+ """
+
+ if 'url' in handled:
+ return None
+
+ fetch_uri = None
+ source = args.source
+ required_version = args.version if args.version else None
+ match = re.match(r'https?://pypi.org/project/([^/]+)(?:/([^/]+))?/?$', urldefrag(source)[0])
+ if match:
+ package = match.group(1)
+ version = match.group(2) if match.group(2) else required_version
+
+ json_url = f"https://pypi.org/pypi/%s/json" % package
+ response = urllib.request.urlopen(json_url)
+ if response.status == 200:
+ data = json.loads(response.read())
+ if not version:
+ # grab latest version
+ version = data["info"]["version"]
+ pypi_package = data["info"]["name"]
+ for release in reversed(data["releases"][version]):
+ if release["packagetype"] == "sdist":
+ fetch_uri = release["url"]
+ break
+ else:
+ logger.warning("Cannot handle pypi url %s: cannot fetch package information using %s", source, json_url)
+ return None
+ else:
+ match = re.match(r'^https?://files.pythonhosted.org/packages.*/(.*)-.*$', source)
+ if match:
+ fetch_uri = source
+ pypi_package = match.group(1)
+ _, version = determine_from_url(fetch_uri)
+
+ if match and not args.no_pypi:
+ if required_version and version != required_version:
+ raise Exception("Version specified using --version/-V (%s) and version specified in the url (%s) do not match" % (required_version, version))
+ # This is optionnal if BPN looks like "python-<pypi_package>" or "python3-<pypi_package>" (see pypi.bbclass)
+ # but at this point we cannot know because because user can specify the output name of the recipe on the command line
+ extravalues["PYPI_PACKAGE"] = pypi_package
+ # If the tarball extension is not 'tar.gz' (default value in pypi.bblcass) whe should set PYPI_PACKAGE_EXT in the recipe
+ pypi_package_ext = re.match(r'.*%s-%s\.(.*)$' % (pypi_package, version), fetch_uri)
+ if pypi_package_ext:
+ pypi_package_ext = pypi_package_ext.group(1)
+ if pypi_package_ext != "tar.gz":
+ extravalues["PYPI_PACKAGE_EXT"] = pypi_package_ext
+
+ # Pypi class will handle S and SRC_URI variables, so remove them
+ # TODO: allow oe.recipeutils.patch_recipe_lines() to accept regexp so we can simplify the following to:
+ # extravalues['SRC_URI(?:\[.*?\])?'] = None
+ extravalues['S'] = None
+ extravalues['SRC_URI'] = None
+
+ classes.append('pypi')
+
+ handled.append('url')
+ return fetch_uri
+
+ def handle_classifier_license(self, classifiers, existing_licenses=""):
+
+ licenses = []
+ for classifier in classifiers:
+ if classifier in self.classifier_license_map:
+ license = self.classifier_license_map[classifier]
+ if license == 'Apache' and 'Apache-2.0' in existing_licenses:
+ license = 'Apache-2.0'
+ elif license == 'GPL':
+ if 'GPL-2.0' in existing_licenses or 'GPLv2' in existing_licenses:
+ license = 'GPL-2.0'
+ elif 'GPL-3.0' in existing_licenses or 'GPLv3' in existing_licenses:
+ license = 'GPL-3.0'
+ elif license == 'LGPL':
+ if 'LGPL-2.1' in existing_licenses or 'LGPLv2.1' in existing_licenses:
+ license = 'LGPL-2.1'
+ elif 'LGPL-2.0' in existing_licenses or 'LGPLv2' in existing_licenses:
+ license = 'LGPL-2.0'
+ elif 'LGPL-3.0' in existing_licenses or 'LGPLv3' in existing_licenses:
+ license = 'LGPL-3.0'
+ licenses.append(license)
+
+ if licenses:
+ return ' & '.join(licenses)
+
+ return None
+
+ def map_info_to_bbvar(self, info, extravalues):
+
+ # Map PKG-INFO & setup.py fields to bitbake variables
+ for field, values in info.items():
+ if field in self.excluded_fields:
+ continue
+
+ if field not in self.bbvar_map:
+ continue
+
+ if isinstance(values, str):
+ value = values
+ else:
+ value = ' '.join(str(v) for v in values if v)
+
+ bbvar = self.bbvar_map[field]
+ if bbvar == "PN":
+ # by convention python recipes start with "python3-"
+ if not value.startswith('python'):
+ value = 'python3-' + value
+
+ if bbvar not in extravalues and value:
+ extravalues[bbvar] = value
+
+ def apply_info_replacements(self, info):
+ if not self.replacements:
+ return
+
+ for variable, search, replace in self.replacements:
+ if variable not in info:
+ continue
+
+ def replace_value(search, replace, value):
+ if replace is None:
+ if re.search(search, value):
+ return None
+ else:
+ new_value = re.sub(search, replace, value)
+ if value != new_value:
+ return new_value
+ return value
+
+ value = info[variable]
+ if isinstance(value, str):
+ new_value = replace_value(search, replace, value)
+ if new_value is None:
+ del info[variable]
+ elif new_value != value:
+ info[variable] = new_value
+ elif hasattr(value, 'items'):
+ for dkey, dvalue in list(value.items()):
+ new_list = []
+ for pos, a_value in enumerate(dvalue):
+ new_value = replace_value(search, replace, a_value)
+ if new_value is not None and new_value != value:
+ new_list.append(new_value)
+
+ if value != new_list:
+ value[dkey] = new_list
+ else:
+ new_list = []
+ for pos, a_value in enumerate(value):
+ new_value = replace_value(search, replace, a_value)
+ if new_value is not None and new_value != value:
+ new_list.append(new_value)
+
+ if value != new_list:
+ info[variable] = new_list
+
+
+ def scan_python_dependencies(self, paths):
+ deps = set()
+ try:
+ dep_output = self.run_command(['pythondeps', '-d'] + paths)
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ for line in dep_output.splitlines():
+ line = line.rstrip()
+ dep, filename = line.split('\t', 1)
+ if filename.endswith('/setup.py'):
+ continue
+ deps.add(dep)
+
+ try:
+ provides_output = self.run_command(['pythondeps', '-p'] + paths)
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ provides_lines = (l.rstrip() for l in provides_output.splitlines())
+ provides = set(l for l in provides_lines if l and l != 'setup')
+ deps -= provides
+
+ return deps
+
+ def parse_pkgdata_for_python_packages(self):
+ pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR')
+
+ ldata = tinfoil.config_data.createCopy()
+ bb.parse.handle('classes-recipe/python3-dir.bbclass', ldata, True)
+ python_sitedir = ldata.getVar('PYTHON_SITEPACKAGES_DIR')
+
+ dynload_dir = os.path.join(os.path.dirname(python_sitedir), 'lib-dynload')
+ python_dirs = [python_sitedir + os.sep,
+ os.path.join(os.path.dirname(python_sitedir), 'dist-packages') + os.sep,
+ os.path.dirname(python_sitedir) + os.sep]
+ packages = {}
+ for pkgdatafile in glob.glob('{}/runtime/*'.format(pkgdata_dir)):
+ files_info = None
+ with open(pkgdatafile, 'r') as f:
+ for line in f.readlines():
+ field, value = line.split(': ', 1)
+ if field.startswith('FILES_INFO'):
+ files_info = ast.literal_eval(value)
+ break
+ else:
+ continue
+
+ for fn in files_info:
+ for suffix in importlib.machinery.all_suffixes():
+ if fn.endswith(suffix):
+ break
+ else:
+ continue
+
+ if fn.startswith(dynload_dir + os.sep):
+ if '/.debug/' in fn:
+ continue
+ base = os.path.basename(fn)
+ provided = base.split('.', 1)[0]
+ packages[provided] = os.path.basename(pkgdatafile)
+ continue
+
+ for python_dir in python_dirs:
+ if fn.startswith(python_dir):
+ relpath = fn[len(python_dir):]
+ relstart, _, relremaining = relpath.partition(os.sep)
+ if relstart.endswith('.egg'):
+ relpath = relremaining
+ base, _ = os.path.splitext(relpath)
+
+ if '/.debug/' in base:
+ continue
+ if os.path.basename(base) == '__init__':
+ base = os.path.dirname(base)
+ base = base.replace(os.sep + os.sep, os.sep)
+ provided = base.replace(os.sep, '.')
+ packages[provided] = os.path.basename(pkgdatafile)
+ return packages
+
+ @classmethod
+ def run_command(cls, cmd, **popenargs):
+ if 'stderr' not in popenargs:
+ popenargs['stderr'] = subprocess.STDOUT
+ try:
+ return subprocess.check_output(cmd, **popenargs).decode('utf-8')
+ except OSError as exc:
+ logger.error('Unable to run `{}`: {}', ' '.join(cmd), exc)
+ raise
+ except subprocess.CalledProcessError as exc:
+ logger.error('Unable to run `{}`: {}', ' '.join(cmd), exc.output)
+ raise
+
+class PythonSetupPyRecipeHandler(PythonRecipeHandler):
+ bbvar_map = {
+ 'Name': 'PN',
+ 'Version': 'PV',
+ 'Home-page': 'HOMEPAGE',
+ 'Summary': 'SUMMARY',
+ 'Description': 'DESCRIPTION',
+ 'License': 'LICENSE',
+ 'Requires': 'RDEPENDS:${PN}',
+ 'Provides': 'RPROVIDES:${PN}',
+ 'Obsoletes': 'RREPLACES:${PN}',
+ }
+ # PN/PV are already set by recipetool core & desc can be extremely long
+ excluded_fields = [
+ 'Description',
+ ]
+ setup_parse_map = {
+ 'Url': 'Home-page',
+ 'Classifiers': 'Classifier',
+ 'Description': 'Summary',
+ }
+ setuparg_map = {
+ 'Home-page': 'url',
+ 'Classifier': 'classifiers',
+ 'Summary': 'description',
+ 'Description': 'long-description',
+ }
+ # Values which are lists, used by the setup.py argument based metadata
+ # extraction method, to determine how to process the setup.py output.
+ setuparg_list_fields = [
+ 'Classifier',
+ 'Requires',
+ 'Provides',
+ 'Obsoletes',
+ 'Platform',
+ 'Supported-Platform',
+ ]
+ setuparg_multi_line_values = ['Description']
+
+ replacements = [
+ ('License', r' +$', ''),
+ ('License', r'^ +', ''),
+ ('License', r' ', '-'),
+ ('License', r'^GNU-', ''),
+ ('License', r'-[Ll]icen[cs]e(,?-[Vv]ersion)?', ''),
+ ('License', r'^UNKNOWN$', ''),
+
+ # Remove currently unhandled version numbers from these variables
+ ('Requires', r' *\([^)]*\)', ''),
+ ('Provides', r' *\([^)]*\)', ''),
+ ('Obsoletes', r' *\([^)]*\)', ''),
+ ('Install-requires', r'^([^><= ]+).*', r'\1'),
+ ('Extras-require', r'^([^><= ]+).*', r'\1'),
+ ('Tests-require', r'^([^><= ]+).*', r'\1'),
+
+ # Remove unhandled dependency on particular features (e.g. foo[PDF])
+ ('Install-requires', r'\[[^\]]+\]$', ''),
+ ]
+
+ def __init__(self):
+ pass
+
+ def parse_setup_py(self, setupscript='./setup.py'):
+ with codecs.open(setupscript) as f:
+ info, imported_modules, non_literals, extensions = gather_setup_info(f)
+
+ def _map(key):
+ key = key.replace('_', '-')
+ key = key[0].upper() + key[1:]
+ if key in self.setup_parse_map:
+ key = self.setup_parse_map[key]
+ return key
+
+ # Naive mapping of setup() arguments to PKG-INFO field names
+ for d in [info, non_literals]:
+ for key, value in list(d.items()):
+ if key is None:
+ continue
+ new_key = _map(key)
+ if new_key != key:
+ del d[key]
+ d[new_key] = value
+
+ return info, 'setuptools' in imported_modules, non_literals, extensions
+
+ def get_setup_args_info(self, setupscript='./setup.py'):
+ cmd = ['python3', setupscript]
+ info = {}
+ keys = set(self.bbvar_map.keys())
+ keys |= set(self.setuparg_list_fields)
+ keys |= set(self.setuparg_multi_line_values)
+ grouped_keys = itertools.groupby(keys, lambda k: (k in self.setuparg_list_fields, k in self.setuparg_multi_line_values))
+ for index, keys in grouped_keys:
+ if index == (True, False):
+ # Splitlines output for each arg as a list value
+ for key in keys:
+ arg = self.setuparg_map.get(key, key.lower())
+ try:
+ arg_info = self.run_command(cmd + ['--' + arg], cwd=os.path.dirname(setupscript))
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ info[key] = [l.rstrip() for l in arg_info.splitlines()]
+ elif index == (False, True):
+ # Entire output for each arg
+ for key in keys:
+ arg = self.setuparg_map.get(key, key.lower())
+ try:
+ arg_info = self.run_command(cmd + ['--' + arg], cwd=os.path.dirname(setupscript))
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ info[key] = arg_info
+ else:
+ info.update(self.get_setup_byline(list(keys), setupscript))
+ return info
+
+ def get_setup_byline(self, fields, setupscript='./setup.py'):
+ info = {}
+
+ cmd = ['python3', setupscript]
+ cmd.extend('--' + self.setuparg_map.get(f, f.lower()) for f in fields)
+ try:
+ info_lines = self.run_command(cmd, cwd=os.path.dirname(setupscript)).splitlines()
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ if len(fields) != len(info_lines):
+ logger.error('Mismatch between setup.py output lines and number of fields')
+ sys.exit(1)
+
+ for lineno, line in enumerate(info_lines):
+ line = line.rstrip()
+ info[fields[lineno]] = line
+ return info
+
+ def get_pkginfo(self, pkginfo_fn):
+ msg = email.message_from_file(open(pkginfo_fn, 'r'))
+ msginfo = {}
+ for field in msg.keys():
+ values = msg.get_all(field)
+ if len(values) == 1:
+ msginfo[field] = values[0]
+ else:
+ msginfo[field] = values
+ return msginfo
+
+ def scan_setup_python_deps(self, srctree, setup_info, setup_non_literals):
+ if 'Package-dir' in setup_info:
+ package_dir = setup_info['Package-dir']
+ else:
+ package_dir = {}
+
+ dist = setuptools.Distribution()
+
+ class PackageDir(setuptools.command.build_py.build_py):
+ def __init__(self, package_dir):
+ self.package_dir = package_dir
+ self.dist = dist
+ super().__init__(self.dist)
+
+ pd = PackageDir(package_dir)
+ to_scan = []
+ if not any(v in setup_non_literals for v in ['Py-modules', 'Scripts', 'Packages']):
+ if 'Py-modules' in setup_info:
+ for module in setup_info['Py-modules']:
+ try:
+ package, module = module.rsplit('.', 1)
+ except ValueError:
+ package, module = '.', module
+ module_path = os.path.join(pd.get_package_dir(package), module + '.py')
+ to_scan.append(module_path)
+
+ if 'Packages' in setup_info:
+ for package in setup_info['Packages']:
+ to_scan.append(pd.get_package_dir(package))
+
+ if 'Scripts' in setup_info:
+ to_scan.extend(setup_info['Scripts'])
+ else:
+ logger.info("Scanning the entire source tree, as one or more of the following setup keywords are non-literal: py_modules, scripts, packages.")
+
+ if not to_scan:
+ to_scan = ['.']
+
+ logger.info("Scanning paths for packages & dependencies: %s", ', '.join(to_scan))
+
+ provided_packages = self.parse_pkgdata_for_python_packages()
+ scanned_deps = self.scan_python_dependencies([os.path.join(srctree, p) for p in to_scan])
+ mapped_deps, unmapped_deps = set(self.base_pkgdeps), set()
+ for dep in scanned_deps:
+ mapped = provided_packages.get(dep)
+ if mapped:
+ logger.debug('Mapped %s to %s' % (dep, mapped))
+ mapped_deps.add(mapped)
+ else:
+ logger.debug('Could not map %s' % dep)
+ unmapped_deps.add(dep)
+ return mapped_deps, unmapped_deps
+
def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
+
if 'buildsystem' in handled:
return False
+ logger.debug("Trying setup.py parser")
+
# Check for non-zero size setup.py files
setupfiles = RecipeHandler.checkfiles(srctree, ['setup.py'])
for fn in setupfiles:
if os.path.getsize(fn):
break
else:
+ logger.debug("No setup.py found")
return False
# setup.py is always parsed to get at certain required information, such as
@@ -254,51 +660,16 @@ class PythonRecipeHandler(RecipeHandler):
if license_str:
for i, line in enumerate(lines_before):
- if line.startswith('LICENSE = '):
+ if line.startswith('##LICENSE_PLACEHOLDER##'):
lines_before.insert(i, '# NOTE: License in setup.py/PKGINFO is: %s' % license_str)
break
if 'Classifier' in info:
- existing_licenses = info.get('License', '')
- licenses = []
- for classifier in info['Classifier']:
- if classifier in self.classifier_license_map:
- license = self.classifier_license_map[classifier]
- if license == 'Apache' and 'Apache-2.0' in existing_licenses:
- license = 'Apache-2.0'
- elif license == 'GPL':
- if 'GPL-2.0' in existing_licenses or 'GPLv2' in existing_licenses:
- license = 'GPL-2.0'
- elif 'GPL-3.0' in existing_licenses or 'GPLv3' in existing_licenses:
- license = 'GPL-3.0'
- elif license == 'LGPL':
- if 'LGPL-2.1' in existing_licenses or 'LGPLv2.1' in existing_licenses:
- license = 'LGPL-2.1'
- elif 'LGPL-2.0' in existing_licenses or 'LGPLv2' in existing_licenses:
- license = 'LGPL-2.0'
- elif 'LGPL-3.0' in existing_licenses or 'LGPLv3' in existing_licenses:
- license = 'LGPL-3.0'
- licenses.append(license)
-
- if licenses:
- info['License'] = ' & '.join(licenses)
-
- # Map PKG-INFO & setup.py fields to bitbake variables
- for field, values in info.items():
- if field in self.excluded_fields:
- continue
+ license = self.handle_classifier_license(info['Classifier'], info.get('License', ''))
+ if license:
+ info['License'] = license
- if field not in self.bbvar_map:
- continue
-
- if isinstance(values, str):
- value = values
- else:
- value = ' '.join(str(v) for v in values if v)
-
- bbvar = self.bbvar_map[field]
- if bbvar not in extravalues and value:
- extravalues[bbvar] = value
+ self.map_info_to_bbvar(info, extravalues)
mapped_deps, unmapped_deps = self.scan_setup_python_deps(srctree, setup_info, setup_non_literals)
@@ -355,279 +726,283 @@ class PythonRecipeHandler(RecipeHandler):
handled.append('buildsystem')
- def get_pkginfo(self, pkginfo_fn):
- msg = email.message_from_file(open(pkginfo_fn, 'r'))
- msginfo = {}
- for field in msg.keys():
- values = msg.get_all(field)
- if len(values) == 1:
- msginfo[field] = values[0]
- else:
- msginfo[field] = values
- return msginfo
+class PythonPyprojectTomlRecipeHandler(PythonRecipeHandler):
+ """Base class to support PEP517 and PEP518
+
+ PEP517 https://peps.python.org/pep-0517/#source-trees
+ PEP518 https://peps.python.org/pep-0518/#build-system-table
+ """
+ # bitbake currently supports the 4 following backends
+ build_backend_map = {
+ "setuptools.build_meta": "python_setuptools_build_meta",
+ "poetry.core.masonry.api": "python_poetry_core",
+ "flit_core.buildapi": "python_flit_core",
+ "hatchling.build": "python_hatchling",
+ "maturin": "python_maturin",
+ "mesonpy": "python_mesonpy",
+ }
- def parse_setup_py(self, setupscript='./setup.py'):
- with codecs.open(setupscript) as f:
- info, imported_modules, non_literals, extensions = gather_setup_info(f)
+ # setuptools.build_meta and flit declare project metadata into the "project" section of pyproject.toml
+ # according to PEP-621: https://packaging.python.org/en/latest/specifications/declaring-project-metadata/#declaring-project-metadata
+ # while poetry uses the "tool.poetry" section according to its official documentation: https://python-poetry.org/docs/pyproject/
+ # keys from "project" and "tool.poetry" sections are almost the same except for the HOMEPAGE which is "homepage" for tool.poetry
+ # and "Homepage" for "project" section. So keep both
+ bbvar_map = {
+ "name": "PN",
+ "version": "PV",
+ "Homepage": "HOMEPAGE",
+ "homepage": "HOMEPAGE",
+ "description": "SUMMARY",
+ "license": "LICENSE",
+ "dependencies": "RDEPENDS:${PN}",
+ "requires": "DEPENDS",
+ }
- def _map(key):
- key = key.replace('_', '-')
- key = key[0].upper() + key[1:]
- if key in self.setup_parse_map:
- key = self.setup_parse_map[key]
- return key
+ replacements = [
+ ("license", r" +$", ""),
+ ("license", r"^ +", ""),
+ ("license", r" ", "-"),
+ ("license", r"^GNU-", ""),
+ ("license", r"-[Ll]icen[cs]e(,?-[Vv]ersion)?", ""),
+ ("license", r"^UNKNOWN$", ""),
+ # Remove currently unhandled version numbers from these variables
+ ("requires", r"\[[^\]]+\]$", ""),
+ ("requires", r"^([^><= ]+).*", r"\1"),
+ ("dependencies", r"\[[^\]]+\]$", ""),
+ ("dependencies", r"^([^><= ]+).*", r"\1"),
+ ]
- # Naive mapping of setup() arguments to PKG-INFO field names
- for d in [info, non_literals]:
- for key, value in list(d.items()):
- if key is None:
- continue
- new_key = _map(key)
- if new_key != key:
- del d[key]
- d[new_key] = value
+ excluded_native_pkgdeps = [
+ # already provided by python_setuptools_build_meta.bbclass
+ "python3-setuptools-native",
+ "python3-wheel-native",
+ # already provided by python_poetry_core.bbclass
+ "python3-poetry-core-native",
+ # already provided by python_flit_core.bbclass
+ "python3-flit-core-native",
+ # already provided by python_mesonpy
+ "python3-meson-python-native",
+ ]
- return info, 'setuptools' in imported_modules, non_literals, extensions
+ # add here a list of known and often used packages and the corresponding bitbake package
+ known_deps_map = {
+ "setuptools": "python3-setuptools",
+ "wheel": "python3-wheel",
+ "poetry-core": "python3-poetry-core",
+ "flit_core": "python3-flit-core",
+ "setuptools-scm": "python3-setuptools-scm",
+ "hatchling": "python3-hatchling",
+ "hatch-vcs": "python3-hatch-vcs",
+ "meson-python" : "python3-meson-python",
+ }
- def get_setup_args_info(self, setupscript='./setup.py'):
- cmd = ['python3', setupscript]
- info = {}
- keys = set(self.bbvar_map.keys())
- keys |= set(self.setuparg_list_fields)
- keys |= set(self.setuparg_multi_line_values)
- grouped_keys = itertools.groupby(keys, lambda k: (k in self.setuparg_list_fields, k in self.setuparg_multi_line_values))
- for index, keys in grouped_keys:
- if index == (True, False):
- # Splitlines output for each arg as a list value
- for key in keys:
- arg = self.setuparg_map.get(key, key.lower())
- try:
- arg_info = self.run_command(cmd + ['--' + arg], cwd=os.path.dirname(setupscript))
- except (OSError, subprocess.CalledProcessError):
- pass
- else:
- info[key] = [l.rstrip() for l in arg_info.splitlines()]
- elif index == (False, True):
- # Entire output for each arg
- for key in keys:
- arg = self.setuparg_map.get(key, key.lower())
- try:
- arg_info = self.run_command(cmd + ['--' + arg], cwd=os.path.dirname(setupscript))
- except (OSError, subprocess.CalledProcessError):
- pass
- else:
- info[key] = arg_info
- else:
- info.update(self.get_setup_byline(list(keys), setupscript))
- return info
+ def __init__(self):
+ pass
- def get_setup_byline(self, fields, setupscript='./setup.py'):
+ def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
info = {}
+ metadata = {}
- cmd = ['python3', setupscript]
- cmd.extend('--' + self.setuparg_map.get(f, f.lower()) for f in fields)
- try:
- info_lines = self.run_command(cmd, cwd=os.path.dirname(setupscript)).splitlines()
- except (OSError, subprocess.CalledProcessError):
- pass
- else:
- if len(fields) != len(info_lines):
- logger.error('Mismatch between setup.py output lines and number of fields')
- sys.exit(1)
-
- for lineno, line in enumerate(info_lines):
- line = line.rstrip()
- info[fields[lineno]] = line
- return info
-
- def apply_info_replacements(self, info):
- for variable, search, replace in self.replacements:
- if variable not in info:
- continue
-
- def replace_value(search, replace, value):
- if replace is None:
- if re.search(search, value):
- return None
- else:
- new_value = re.sub(search, replace, value)
- if value != new_value:
- return new_value
- return value
-
- value = info[variable]
- if isinstance(value, str):
- new_value = replace_value(search, replace, value)
- if new_value is None:
- del info[variable]
- elif new_value != value:
- info[variable] = new_value
- elif hasattr(value, 'items'):
- for dkey, dvalue in list(value.items()):
- new_list = []
- for pos, a_value in enumerate(dvalue):
- new_value = replace_value(search, replace, a_value)
- if new_value is not None and new_value != value:
- new_list.append(new_value)
-
- if value != new_list:
- value[dkey] = new_list
- else:
- new_list = []
- for pos, a_value in enumerate(value):
- new_value = replace_value(search, replace, a_value)
- if new_value is not None and new_value != value:
- new_list.append(new_value)
-
- if value != new_list:
- info[variable] = new_list
-
- def scan_setup_python_deps(self, srctree, setup_info, setup_non_literals):
- if 'Package-dir' in setup_info:
- package_dir = setup_info['Package-dir']
- else:
- package_dir = {}
-
- dist = setuptools.Distribution()
-
- class PackageDir(setuptools.command.build_py.build_py):
- def __init__(self, package_dir):
- self.package_dir = package_dir
- self.dist = dist
- super().__init__(self.dist)
-
- pd = PackageDir(package_dir)
- to_scan = []
- if not any(v in setup_non_literals for v in ['Py-modules', 'Scripts', 'Packages']):
- if 'Py-modules' in setup_info:
- for module in setup_info['Py-modules']:
- try:
- package, module = module.rsplit('.', 1)
- except ValueError:
- package, module = '.', module
- module_path = os.path.join(pd.get_package_dir(package), module + '.py')
- to_scan.append(module_path)
+ if 'buildsystem' in handled:
+ return False
- if 'Packages' in setup_info:
- for package in setup_info['Packages']:
- to_scan.append(pd.get_package_dir(package))
+ logger.debug("Trying pyproject.toml parser")
- if 'Scripts' in setup_info:
- to_scan.extend(setup_info['Scripts'])
+ # Check for non-zero size setup.py files
+ setupfiles = RecipeHandler.checkfiles(srctree, ["pyproject.toml"])
+ for fn in setupfiles:
+ if os.path.getsize(fn):
+ break
else:
- logger.info("Scanning the entire source tree, as one or more of the following setup keywords are non-literal: py_modules, scripts, packages.")
-
- if not to_scan:
- to_scan = ['.']
-
- logger.info("Scanning paths for packages & dependencies: %s", ', '.join(to_scan))
+ logger.debug("No pyproject.toml found")
+ return False
- provided_packages = self.parse_pkgdata_for_python_packages()
- scanned_deps = self.scan_python_dependencies([os.path.join(srctree, p) for p in to_scan])
- mapped_deps, unmapped_deps = set(self.base_pkgdeps), set()
- for dep in scanned_deps:
- mapped = provided_packages.get(dep)
- if mapped:
- logger.debug('Mapped %s to %s' % (dep, mapped))
- mapped_deps.add(mapped)
- else:
- logger.debug('Could not map %s' % dep)
- unmapped_deps.add(dep)
- return mapped_deps, unmapped_deps
+ setupscript = os.path.join(srctree, "pyproject.toml")
- def scan_python_dependencies(self, paths):
- deps = set()
try:
- dep_output = self.run_command(['pythondeps', '-d'] + paths)
- except (OSError, subprocess.CalledProcessError):
- pass
- else:
- for line in dep_output.splitlines():
- line = line.rstrip()
- dep, filename = line.split('\t', 1)
- if filename.endswith('/setup.py'):
- continue
- deps.add(dep)
+ try:
+ import tomllib
+ except ImportError:
+ try:
+ import tomli as tomllib
+ except ImportError:
+ logger.error("Neither 'tomllib' nor 'tomli' could be imported, cannot scan pyproject.toml.")
+ return False
+
+ try:
+ with open(setupscript, "rb") as f:
+ config = tomllib.load(f)
+ except Exception:
+ logger.exception("Failed to parse pyproject.toml")
+ return False
+
+ build_backend = config["build-system"]["build-backend"]
+ if build_backend in self.build_backend_map:
+ classes.append(self.build_backend_map[build_backend])
+ else:
+ logger.error(
+ "Unsupported build-backend: %s, cannot use pyproject.toml. Will try to use legacy setup.py"
+ % build_backend
+ )
+ return False
- try:
- provides_output = self.run_command(['pythondeps', '-p'] + paths)
- except (OSError, subprocess.CalledProcessError):
- pass
- else:
- provides_lines = (l.rstrip() for l in provides_output.splitlines())
- provides = set(l for l in provides_lines if l and l != 'setup')
- deps -= provides
+ licfile = ""
- return deps
+ if build_backend == "poetry.core.masonry.api":
+ if "tool" in config and "poetry" in config["tool"]:
+ metadata = config["tool"]["poetry"]
+ else:
+ if "project" in config:
+ metadata = config["project"]
+
+ if metadata:
+ for field, values in metadata.items():
+ if field == "license":
+ # For setuptools.build_meta and flit, licence is a table
+ # but for poetry licence is a string
+ # for hatchling, both table (jsonschema) and string (iniconfig) have been used
+ if build_backend == "poetry.core.masonry.api":
+ value = values
+ else:
+ value = values.get("text", "")
+ if not value:
+ licfile = values.get("file", "")
+ continue
+ elif field == "dependencies" and build_backend == "poetry.core.masonry.api":
+ # For poetry backend, "dependencies" section looks like:
+ # [tool.poetry.dependencies]
+ # requests = "^2.13.0"
+ # requests = { version = "^2.13.0", source = "private" }
+ # See https://python-poetry.org/docs/master/pyproject/#dependencies-and-dependency-groups for more details
+ # This class doesn't handle versions anyway, so we just get the dependencies name here and construct a list
+ value = []
+ for k in values.keys():
+ value.append(k)
+ elif isinstance(values, dict):
+ for k, v in values.items():
+ info[k] = v
+ continue
+ else:
+ value = values
- def parse_pkgdata_for_python_packages(self):
- suffixes = [t[0] for t in imp.get_suffixes()]
- pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR')
+ info[field] = value
- ldata = tinfoil.config_data.createCopy()
- bb.parse.handle('classes-recipe/python3-dir.bbclass', ldata, True)
- python_sitedir = ldata.getVar('PYTHON_SITEPACKAGES_DIR')
+ # Grab the license value before applying replacements
+ license_str = info.get("license", "").strip()
- dynload_dir = os.path.join(os.path.dirname(python_sitedir), 'lib-dynload')
- python_dirs = [python_sitedir + os.sep,
- os.path.join(os.path.dirname(python_sitedir), 'dist-packages') + os.sep,
- os.path.dirname(python_sitedir) + os.sep]
- packages = {}
- for pkgdatafile in glob.glob('{}/runtime/*'.format(pkgdata_dir)):
- files_info = None
- with open(pkgdatafile, 'r') as f:
- for line in f.readlines():
- field, value = line.split(': ', 1)
- if field.startswith('FILES_INFO'):
- files_info = ast.literal_eval(value)
+ if license_str:
+ for i, line in enumerate(lines_before):
+ if line.startswith("##LICENSE_PLACEHOLDER##"):
+ lines_before.insert(
+ i, "# NOTE: License in pyproject.toml is: %s" % license_str
+ )
break
- else:
- continue
- for fn in files_info:
- for suffix in suffixes:
- if fn.endswith(suffix):
- break
- else:
- continue
+ info["requires"] = config["build-system"]["requires"]
+
+ self.apply_info_replacements(info)
+
+ if "classifiers" in info:
+ license = self.handle_classifier_license(
+ info["classifiers"], info.get("license", "")
+ )
+ if license:
+ if licfile:
+ lines = []
+ md5value = bb.utils.md5_file(os.path.join(srctree, licfile))
+ lines.append('LICENSE = "%s"' % license)
+ lines.append(
+ 'LIC_FILES_CHKSUM = "file://%s;md5=%s"'
+ % (licfile, md5value)
+ )
+ lines.append("")
+
+ # Replace the placeholder so we get the values in the right place in the recipe file
+ try:
+ pos = lines_before.index("##LICENSE_PLACEHOLDER##")
+ except ValueError:
+ pos = -1
+ if pos == -1:
+ lines_before.extend(lines)
+ else:
+ lines_before[pos : pos + 1] = lines
- if fn.startswith(dynload_dir + os.sep):
- if '/.debug/' in fn:
- continue
- base = os.path.basename(fn)
- provided = base.split('.', 1)[0]
- packages[provided] = os.path.basename(pkgdatafile)
- continue
+ handled.append(("license", [license, licfile, md5value]))
+ else:
+ info["license"] = license
- for python_dir in python_dirs:
- if fn.startswith(python_dir):
- relpath = fn[len(python_dir):]
- relstart, _, relremaining = relpath.partition(os.sep)
- if relstart.endswith('.egg'):
- relpath = relremaining
- base, _ = os.path.splitext(relpath)
+ provided_packages = self.parse_pkgdata_for_python_packages()
+ provided_packages.update(self.known_deps_map)
+ native_mapped_deps, native_unmapped_deps = set(), set()
+ mapped_deps, unmapped_deps = set(), set()
- if '/.debug/' in base:
- continue
- if os.path.basename(base) == '__init__':
- base = os.path.dirname(base)
- base = base.replace(os.sep + os.sep, os.sep)
- provided = base.replace(os.sep, '.')
- packages[provided] = os.path.basename(pkgdatafile)
- return packages
+ if "requires" in info:
+ for require in info["requires"]:
+ mapped = provided_packages.get(require)
- @classmethod
- def run_command(cls, cmd, **popenargs):
- if 'stderr' not in popenargs:
- popenargs['stderr'] = subprocess.STDOUT
- try:
- return subprocess.check_output(cmd, **popenargs).decode('utf-8')
- except OSError as exc:
- logger.error('Unable to run `{}`: {}', ' '.join(cmd), exc)
- raise
- except subprocess.CalledProcessError as exc:
- logger.error('Unable to run `{}`: {}', ' '.join(cmd), exc.output)
- raise
+ if mapped:
+ logger.debug("Mapped %s to %s" % (require, mapped))
+ native_mapped_deps.add(mapped)
+ else:
+ logger.debug("Could not map %s" % require)
+ native_unmapped_deps.add(require)
+
+ info.pop("requires")
+
+ if native_mapped_deps != set():
+ native_mapped_deps = {
+ item + "-native" for item in native_mapped_deps
+ }
+ native_mapped_deps -= set(self.excluded_native_pkgdeps)
+ if native_mapped_deps != set():
+ info["requires"] = " ".join(sorted(native_mapped_deps))
+
+ if native_unmapped_deps:
+ lines_after.append("")
+ lines_after.append(
+ "# WARNING: We were unable to map the following python package/module"
+ )
+ lines_after.append(
+ "# dependencies to the bitbake packages which include them:"
+ )
+ lines_after.extend(
+ "# {}".format(d) for d in sorted(native_unmapped_deps)
+ )
+
+ if "dependencies" in info:
+ for dependency in info["dependencies"]:
+ mapped = provided_packages.get(dependency)
+ if mapped:
+ logger.debug("Mapped %s to %s" % (dependency, mapped))
+ mapped_deps.add(mapped)
+ else:
+ logger.debug("Could not map %s" % dependency)
+ unmapped_deps.add(dependency)
+
+ info.pop("dependencies")
+
+ if mapped_deps != set():
+ if mapped_deps != set():
+ info["dependencies"] = " ".join(sorted(mapped_deps))
+
+ if unmapped_deps:
+ lines_after.append("")
+ lines_after.append(
+ "# WARNING: We were unable to map the following python package/module"
+ )
+ lines_after.append(
+ "# runtime dependencies to the bitbake packages which include them:"
+ )
+ lines_after.extend(
+ "# {}".format(d) for d in sorted(unmapped_deps)
+ )
+
+ self.map_info_to_bbvar(info, extravalues)
+
+ handled.append("buildsystem")
+ except Exception:
+ logger.exception("Failed to correctly handle pyproject.toml, falling back to another method")
+ return False
def gather_setup_info(fileobj):
@@ -743,5 +1118,7 @@ def has_non_literals(value):
def register_recipe_handlers(handlers):
- # We need to make sure this is ahead of the makefile fallback handler
- handlers.append((PythonRecipeHandler(), 70))
+ # We need to make sure these are ahead of the makefile fallback handler
+ # and the pyproject.toml handler ahead of the setup.py handler
+ handlers.append((PythonPyprojectTomlRecipeHandler(), 75))
+ handlers.append((PythonSetupPyRecipeHandler(), 70))
diff --git a/scripts/lib/recipetool/create_go.py b/scripts/lib/recipetool/create_go.py
new file mode 100644
index 0000000000..a85a2f2786
--- /dev/null
+++ b/scripts/lib/recipetool/create_go.py
@@ -0,0 +1,777 @@
+# Recipe creation tool - go support plugin
+#
+# The code is based on golang internals. See the afftected
+# methods for further reference and information.
+#
+# Copyright (C) 2023 Weidmueller GmbH & Co KG
+# Author: Lukas Funke <lukas.funke@weidmueller.com>
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+
+from collections import namedtuple
+from enum import Enum
+from html.parser import HTMLParser
+from recipetool.create import RecipeHandler, handle_license_vars
+from recipetool.create import guess_license, tidy_licenses, fixup_license
+from recipetool.create import determine_from_url
+from urllib.error import URLError, HTTPError
+
+import bb.utils
+import json
+import logging
+import os
+import re
+import subprocess
+import sys
+import shutil
+import tempfile
+import urllib.parse
+import urllib.request
+
+
+GoImport = namedtuple('GoImport', 'root vcs url suffix')
+logger = logging.getLogger('recipetool')
+CodeRepo = namedtuple(
+ 'CodeRepo', 'path codeRoot codeDir pathMajor pathPrefix pseudoMajor')
+
+tinfoil = None
+
+# Regular expression to parse pseudo semantic version
+# see https://go.dev/ref/mod#pseudo-versions
+re_pseudo_semver = re.compile(
+ r"^v[0-9]+\.(0\.0-|\d+\.\d+-([^+]*\.)?0\.)(?P<utc>\d{14})-(?P<commithash>[A-Za-z0-9]+)(\+[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?$")
+# Regular expression to parse semantic version
+re_semver = re.compile(
+ r"^v(?P<major>0|[1-9]\d*)\.(?P<minor>0|[1-9]\d*)\.(?P<patch>0|[1-9]\d*)(?:-(?P<prerelease>(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+(?P<buildmetadata>[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$")
+
+
+def tinfoil_init(instance):
+ global tinfoil
+ tinfoil = instance
+
+
+class GoRecipeHandler(RecipeHandler):
+ """Class to handle the go recipe creation"""
+
+ @staticmethod
+ def __ensure_go():
+ """Check if the 'go' command is available in the recipes"""
+ recipe = "go-native"
+ if not tinfoil.recipes_parsed:
+ tinfoil.parse_recipes()
+ try:
+ rd = tinfoil.parse_recipe(recipe)
+ except bb.providers.NoProvider:
+ bb.error(
+ "Nothing provides '%s' which is required for the build" % (recipe))
+ bb.note(
+ "You will likely need to add a layer that provides '%s'" % (recipe))
+ return None
+
+ bindir = rd.getVar('STAGING_BINDIR_NATIVE')
+ gopath = os.path.join(bindir, 'go')
+
+ if not os.path.exists(gopath):
+ tinfoil.build_targets(recipe, 'addto_recipe_sysroot')
+
+ if not os.path.exists(gopath):
+ logger.error(
+ '%s required to process specified source, but %s did not seem to populate it' % 'go', recipe)
+ return None
+
+ return bindir
+
+ def __resolve_repository_static(self, modulepath):
+ """Resolve the repository in a static manner
+
+ The method is based on the go implementation of
+ `repoRootFromVCSPaths` in
+ https://github.com/golang/go/blob/master/src/cmd/go/internal/vcs/vcs.go
+ """
+
+ url = urllib.parse.urlparse("https://" + modulepath)
+ req = urllib.request.Request(url.geturl())
+
+ try:
+ resp = urllib.request.urlopen(req)
+ # Some modulepath are just redirects to github (or some other vcs
+ # hoster). Therefore, we check if this modulepath redirects to
+ # somewhere else
+ if resp.geturl() != url.geturl():
+ bb.debug(1, "%s is redirectred to %s" %
+ (url.geturl(), resp.geturl()))
+ url = urllib.parse.urlparse(resp.geturl())
+ modulepath = url.netloc + url.path
+
+ except URLError as url_err:
+ # This is probably because the module path
+ # contains the subdir and major path. Thus,
+ # we ignore this error for now
+ logger.debug(
+ 1, "Failed to fetch page from [%s]: %s" % (url, str(url_err)))
+
+ host, _, _ = modulepath.partition('/')
+
+ class vcs(Enum):
+ pathprefix = "pathprefix"
+ regexp = "regexp"
+ type = "type"
+ repo = "repo"
+ check = "check"
+ schemelessRepo = "schemelessRepo"
+
+ # GitHub
+ vcsGitHub = {}
+ vcsGitHub[vcs.pathprefix] = "github.com"
+ vcsGitHub[vcs.regexp] = re.compile(
+ r'^(?P<root>github\.com/[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+)(/(?P<suffix>[A-Za-z0-9_.\-]+))*$')
+ vcsGitHub[vcs.type] = "git"
+ vcsGitHub[vcs.repo] = "https://\\g<root>"
+
+ # Bitbucket
+ vcsBitbucket = {}
+ vcsBitbucket[vcs.pathprefix] = "bitbucket.org"
+ vcsBitbucket[vcs.regexp] = re.compile(
+ r'^(?P<root>bitbucket\.org/(?P<bitname>[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+))(/(?P<suffix>[A-Za-z0-9_.\-]+))*$')
+ vcsBitbucket[vcs.type] = "git"
+ vcsBitbucket[vcs.repo] = "https://\\g<root>"
+
+ # IBM DevOps Services (JazzHub)
+ vcsIBMDevOps = {}
+ vcsIBMDevOps[vcs.pathprefix] = "hub.jazz.net/git"
+ vcsIBMDevOps[vcs.regexp] = re.compile(
+ r'^(?P<root>hub\.jazz\.net/git/[a-z0-9]+/[A-Za-z0-9_.\-]+)(/(?P<suffix>[A-Za-z0-9_.\-]+))*$')
+ vcsIBMDevOps[vcs.type] = "git"
+ vcsIBMDevOps[vcs.repo] = "https://\\g<root>"
+
+ # Git at Apache
+ vcsApacheGit = {}
+ vcsApacheGit[vcs.pathprefix] = "git.apache.org"
+ vcsApacheGit[vcs.regexp] = re.compile(
+ r'^(?P<root>git\.apache\.org/[a-z0-9_.\-]+\.git)(/(?P<suffix>[A-Za-z0-9_.\-]+))*$')
+ vcsApacheGit[vcs.type] = "git"
+ vcsApacheGit[vcs.repo] = "https://\\g<root>"
+
+ # Git at OpenStack
+ vcsOpenStackGit = {}
+ vcsOpenStackGit[vcs.pathprefix] = "git.openstack.org"
+ vcsOpenStackGit[vcs.regexp] = re.compile(
+ r'^(?P<root>git\.openstack\.org/[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+)(\.git)?(/(?P<suffix>[A-Za-z0-9_.\-]+))*$')
+ vcsOpenStackGit[vcs.type] = "git"
+ vcsOpenStackGit[vcs.repo] = "https://\\g<root>"
+
+ # chiselapp.com for fossil
+ vcsChiselapp = {}
+ vcsChiselapp[vcs.pathprefix] = "chiselapp.com"
+ vcsChiselapp[vcs.regexp] = re.compile(
+ r'^(?P<root>chiselapp\.com/user/[A-Za-z0-9]+/repository/[A-Za-z0-9_.\-]+)$')
+ vcsChiselapp[vcs.type] = "fossil"
+ vcsChiselapp[vcs.repo] = "https://\\g<root>"
+
+ # General syntax for any server.
+ # Must be last.
+ vcsGeneralServer = {}
+ vcsGeneralServer[vcs.regexp] = re.compile(
+ "(?P<root>(?P<repo>([a-z0-9.\\-]+\\.)+[a-z0-9.\\-]+(:[0-9]+)?(/~?[A-Za-z0-9_.\\-]+)+?)\\.(?P<vcs>bzr|fossil|git|hg|svn))(/~?(?P<suffix>[A-Za-z0-9_.\\-]+))*$")
+ vcsGeneralServer[vcs.schemelessRepo] = True
+
+ vcsPaths = [vcsGitHub, vcsBitbucket, vcsIBMDevOps,
+ vcsApacheGit, vcsOpenStackGit, vcsChiselapp,
+ vcsGeneralServer]
+
+ if modulepath.startswith("example.net") or modulepath == "rsc.io":
+ logger.warning("Suspicious module path %s" % modulepath)
+ return None
+ if modulepath.startswith("http:") or modulepath.startswith("https:"):
+ logger.warning("Import path should not start with %s %s" %
+ ("http", "https"))
+ return None
+
+ rootpath = None
+ vcstype = None
+ repourl = None
+ suffix = None
+
+ for srv in vcsPaths:
+ m = srv[vcs.regexp].match(modulepath)
+ if vcs.pathprefix in srv:
+ if host == srv[vcs.pathprefix]:
+ rootpath = m.group('root')
+ vcstype = srv[vcs.type]
+ repourl = m.expand(srv[vcs.repo])
+ suffix = m.group('suffix')
+ break
+ elif m and srv[vcs.schemelessRepo]:
+ rootpath = m.group('root')
+ vcstype = m[vcs.type]
+ repourl = m[vcs.repo]
+ suffix = m.group('suffix')
+ break
+
+ return GoImport(rootpath, vcstype, repourl, suffix)
+
+ def __resolve_repository_dynamic(self, modulepath):
+ """Resolve the repository root in a dynamic manner.
+
+ The method is based on the go implementation of
+ `repoRootForImportDynamic` in
+ https://github.com/golang/go/blob/master/src/cmd/go/internal/vcs/vcs.go
+ """
+ url = urllib.parse.urlparse("https://" + modulepath)
+
+ class GoImportHTMLParser(HTMLParser):
+
+ def __init__(self):
+ super().__init__()
+ self.__srv = {}
+
+ def handle_starttag(self, tag, attrs):
+ if tag == 'meta' and list(
+ filter(lambda a: (a[0] == 'name' and a[1] == 'go-import'), attrs)):
+ content = list(
+ filter(lambda a: (a[0] == 'content'), attrs))
+ if content:
+ srv = content[0][1].split()
+ self.__srv[srv[0]] = srv
+
+ def go_import(self, modulepath):
+ if modulepath in self.__srv:
+ srv = self.__srv[modulepath]
+ return GoImport(srv[0], srv[1], srv[2], None)
+ return None
+
+ url = url.geturl() + "?go-get=1"
+ req = urllib.request.Request(url)
+
+ try:
+ body = urllib.request.urlopen(req).read()
+ except HTTPError as http_err:
+ logger.warning(
+ "Unclean status when fetching page from [%s]: %s", url, str(http_err))
+ body = http_err.fp.read()
+ except URLError as url_err:
+ logger.warning(
+ "Failed to fetch page from [%s]: %s", url, str(url_err))
+ return None
+
+ parser = GoImportHTMLParser()
+ parser.feed(body.decode('utf-8'))
+ parser.close()
+
+ return parser.go_import(modulepath)
+
+ def __resolve_from_golang_proxy(self, modulepath, version):
+ """
+ Resolves repository data from golang proxy
+ """
+ url = urllib.parse.urlparse("https://proxy.golang.org/"
+ + modulepath
+ + "/@v/"
+ + version
+ + ".info")
+
+ # Transform url to lower case, golang proxy doesn't like mixed case
+ req = urllib.request.Request(url.geturl().lower())
+
+ try:
+ resp = urllib.request.urlopen(req)
+ except URLError as url_err:
+ logger.warning(
+ "Failed to fetch page from [%s]: %s", url, str(url_err))
+ return None
+
+ golang_proxy_res = resp.read().decode('utf-8')
+ modinfo = json.loads(golang_proxy_res)
+
+ if modinfo and 'Origin' in modinfo:
+ origin = modinfo['Origin']
+ _root_url = urllib.parse.urlparse(origin['URL'])
+
+ # We normalize the repo URL since we don't want the scheme in it
+ _subdir = origin['Subdir'] if 'Subdir' in origin else None
+ _root, _, _ = self.__split_path_version(modulepath)
+ if _subdir:
+ _root = _root[:-len(_subdir)].strip('/')
+
+ _commit = origin['Hash']
+ _vcs = origin['VCS']
+ return (GoImport(_root, _vcs, _root_url.geturl(), None), _commit)
+
+ return None
+
+ def __resolve_repository(self, modulepath):
+ """
+ Resolves src uri from go module-path
+ """
+ repodata = self.__resolve_repository_static(modulepath)
+ if not repodata or not repodata.url:
+ repodata = self.__resolve_repository_dynamic(modulepath)
+ if not repodata or not repodata.url:
+ logger.error(
+ "Could not resolve repository for module path '%s'" % modulepath)
+ # There is no way to recover from this
+ sys.exit(14)
+ if repodata:
+ logger.debug(1, "Resolved download path for import '%s' => %s" % (
+ modulepath, repodata.url))
+ return repodata
+
+ def __split_path_version(self, path):
+ i = len(path)
+ dot = False
+ for j in range(i, 0, -1):
+ if path[j - 1] < '0' or path[j - 1] > '9':
+ break
+ if path[j - 1] == '.':
+ dot = True
+ break
+ i = j - 1
+
+ if i <= 1 or i == len(
+ path) or path[i - 1] != 'v' or path[i - 2] != '/':
+ return path, "", True
+
+ prefix, pathMajor = path[:i - 2], path[i - 2:]
+ if dot or len(
+ pathMajor) <= 2 or pathMajor[2] == '0' or pathMajor == "/v1":
+ return path, "", False
+
+ return prefix, pathMajor, True
+
+ def __get_path_major(self, pathMajor):
+ if not pathMajor:
+ return ""
+
+ if pathMajor[0] != '/' and pathMajor[0] != '.':
+ logger.error(
+ "pathMajor suffix %s passed to PathMajorPrefix lacks separator", pathMajor)
+
+ if pathMajor.startswith(".v") and pathMajor.endswith("-unstable"):
+ pathMajor = pathMajor[:len("-unstable") - 2]
+
+ return pathMajor[1:]
+
+ def __build_coderepo(self, repo, path):
+ codedir = ""
+ pathprefix, pathMajor, _ = self.__split_path_version(path)
+ if repo.root == path:
+ pathprefix = path
+ elif path.startswith(repo.root):
+ codedir = pathprefix[len(repo.root):].strip('/')
+
+ pseudoMajor = self.__get_path_major(pathMajor)
+
+ logger.debug("root='%s', codedir='%s', prefix='%s', pathMajor='%s', pseudoMajor='%s'",
+ repo.root, codedir, pathprefix, pathMajor, pseudoMajor)
+
+ return CodeRepo(path, repo.root, codedir,
+ pathMajor, pathprefix, pseudoMajor)
+
+ def __resolve_version(self, repo, path, version):
+ hash = None
+ coderoot = self.__build_coderepo(repo, path)
+
+ def vcs_fetch_all():
+ tmpdir = tempfile.mkdtemp()
+ clone_cmd = "%s clone --bare %s %s" % ('git', repo.url, tmpdir)
+ bb.process.run(clone_cmd)
+ log_cmd = "git log --all --pretty='%H %d' --decorate=short"
+ output, _ = bb.process.run(
+ log_cmd, shell=True, stderr=subprocess.PIPE, cwd=tmpdir)
+ bb.utils.prunedir(tmpdir)
+ return output.strip().split('\n')
+
+ def vcs_fetch_remote(tag):
+ # add * to grab ^{}
+ refs = {}
+ ls_remote_cmd = "git ls-remote -q --tags {} {}*".format(
+ repo.url, tag)
+ output, _ = bb.process.run(ls_remote_cmd)
+ output = output.strip().split('\n')
+ for line in output:
+ f = line.split(maxsplit=1)
+ if len(f) != 2:
+ continue
+
+ for prefix in ["HEAD", "refs/heads/", "refs/tags/"]:
+ if f[1].startswith(prefix):
+ refs[f[1][len(prefix):]] = f[0]
+
+ for key, hash in refs.items():
+ if key.endswith(r"^{}"):
+ refs[key.strip(r"^{}")] = hash
+
+ return refs[tag]
+
+ m_pseudo_semver = re_pseudo_semver.match(version)
+
+ if m_pseudo_semver:
+ remote_refs = vcs_fetch_all()
+ short_commit = m_pseudo_semver.group('commithash')
+ for l in remote_refs:
+ r = l.split(maxsplit=1)
+ sha1 = r[0] if len(r) else None
+ if not sha1:
+ logger.error(
+ "Ups: could not resolve abbref commit for %s" % short_commit)
+
+ elif sha1.startswith(short_commit):
+ hash = sha1
+ break
+ else:
+ m_semver = re_semver.match(version)
+ if m_semver:
+
+ def get_sha1_remote(re):
+ rsha1 = None
+ for line in remote_refs:
+ # Split lines of the following format:
+ # 22e90d9b964610628c10f673ca5f85b8c2a2ca9a (tag: sometag)
+ lineparts = line.split(maxsplit=1)
+ sha1 = lineparts[0] if len(lineparts) else None
+ refstring = lineparts[1] if len(
+ lineparts) == 2 else None
+ if refstring:
+ # Normalize tag string and split in case of multiple
+ # regs e.g. (tag: speech/v1.10.0, tag: orchestration/v1.5.0 ...)
+ refs = refstring.strip('(), ').split(',')
+ for ref in refs:
+ if re.match(ref.strip()):
+ rsha1 = sha1
+ return rsha1
+
+ semver = "v" + m_semver.group('major') + "."\
+ + m_semver.group('minor') + "."\
+ + m_semver.group('patch') \
+ + (("-" + m_semver.group('prerelease'))
+ if m_semver.group('prerelease') else "")
+
+ tag = os.path.join(
+ coderoot.codeDir, semver) if coderoot.codeDir else semver
+
+ # probe tag using 'ls-remote', which is faster than fetching
+ # complete history
+ hash = vcs_fetch_remote(tag)
+ if not hash:
+ # backup: fetch complete history
+ remote_refs = vcs_fetch_all()
+ hash = get_sha1_remote(
+ re.compile(fr"(tag:|HEAD ->) ({tag})"))
+
+ logger.debug(
+ "Resolving commit for tag '%s' -> '%s'", tag, hash)
+ return hash
+
+ def __generate_srcuri_inline_fcn(self, path, version, replaces=None):
+ """Generate SRC_URI functions for go imports"""
+
+ logger.info("Resolving repository for module %s", path)
+ # First try to resolve repo and commit from golang proxy
+ # Most info is already there and we don't have to go through the
+ # repository or even perform the version resolve magic
+ golang_proxy_info = self.__resolve_from_golang_proxy(path, version)
+ if golang_proxy_info:
+ repo = golang_proxy_info[0]
+ commit = golang_proxy_info[1]
+ else:
+ # Fallback
+ # Resolve repository by 'hand'
+ repo = self.__resolve_repository(path)
+ commit = self.__resolve_version(repo, path, version)
+
+ url = urllib.parse.urlparse(repo.url)
+ repo_url = url.netloc + url.path
+
+ coderoot = self.__build_coderepo(repo, path)
+
+ inline_fcn = "${@go_src_uri("
+ inline_fcn += f"'{repo_url}','{version}'"
+ if repo_url != path:
+ inline_fcn += f",path='{path}'"
+ if coderoot.codeDir:
+ inline_fcn += f",subdir='{coderoot.codeDir}'"
+ if repo.vcs != 'git':
+ inline_fcn += f",vcs='{repo.vcs}'"
+ if replaces:
+ inline_fcn += f",replaces='{replaces}'"
+ if coderoot.pathMajor:
+ inline_fcn += f",pathmajor='{coderoot.pathMajor}'"
+ inline_fcn += ")}"
+
+ return inline_fcn, commit
+
+ def __go_handle_dependencies(self, go_mod, srctree, localfilesdir, extravalues, d):
+
+ import re
+ src_uris = []
+ src_revs = []
+
+ def generate_src_rev(path, version, commithash):
+ src_rev = f"# {path}@{version} => {commithash}\n"
+ # Ups...maybe someone manipulated the source repository and the
+ # version or commit could not be resolved. This is a sign of
+ # a) the supply chain was manipulated (bad)
+ # b) the implementation for the version resolving didn't work
+ # anymore (less bad)
+ if not commithash:
+ src_rev += f"#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
+ src_rev += f"#!!! Could not resolve version !!!\n"
+ src_rev += f"#!!! Possible supply chain attack !!!\n"
+ src_rev += f"#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
+ src_rev += f"SRCREV_{path.replace('/', '.')} = \"{commithash}\""
+
+ return src_rev
+
+ # we first go over replacement list, because we are essentialy
+ # interested only in the replaced path
+ if go_mod['Replace']:
+ for replacement in go_mod['Replace']:
+ oldpath = replacement['Old']['Path']
+ path = replacement['New']['Path']
+ version = ''
+ if 'Version' in replacement['New']:
+ version = replacement['New']['Version']
+
+ if os.path.exists(os.path.join(srctree, path)):
+ # the module refers to the local path, remove it from requirement list
+ # because it's a local module
+ go_mod['Require'][:] = [v for v in go_mod['Require'] if v.get('Path') != oldpath]
+ else:
+ # Replace the path and the version, so we don't iterate replacement list anymore
+ for require in go_mod['Require']:
+ if require['Path'] == oldpath:
+ require.update({'Path': path, 'Version': version})
+ break
+
+ for require in go_mod['Require']:
+ path = require['Path']
+ version = require['Version']
+
+ inline_fcn, commithash = self.__generate_srcuri_inline_fcn(
+ path, version)
+ src_uris.append(inline_fcn)
+ src_revs.append(generate_src_rev(path, version, commithash))
+
+ # strip version part from module URL /vXX
+ baseurl = re.sub(r'/v(\d+)$', '', go_mod['Module']['Path'])
+ pn, _ = determine_from_url(baseurl)
+ go_mods_basename = "%s-modules.inc" % pn
+
+ go_mods_filename = os.path.join(localfilesdir, go_mods_basename)
+ with open(go_mods_filename, "w") as f:
+ # We introduce this indirection to make the tests a little easier
+ f.write("SRC_URI += \"${GO_DEPENDENCIES_SRC_URI}\"\n")
+ f.write("GO_DEPENDENCIES_SRC_URI = \"\\\n")
+ for uri in src_uris:
+ f.write(" " + uri + " \\\n")
+ f.write("\"\n\n")
+ for rev in src_revs:
+ f.write(rev + "\n")
+
+ extravalues['extrafiles'][go_mods_basename] = go_mods_filename
+
+ def __go_run_cmd(self, cmd, cwd, d):
+ return bb.process.run(cmd, env=dict(os.environ, PATH=d.getVar('PATH')),
+ shell=True, cwd=cwd)
+
+ def __go_native_version(self, d):
+ stdout, _ = self.__go_run_cmd("go version", None, d)
+ m = re.match(r".*\sgo((\d+).(\d+).(\d+))\s([\w\/]*)", stdout)
+ major = int(m.group(2))
+ minor = int(m.group(3))
+ patch = int(m.group(4))
+
+ return major, minor, patch
+
+ def __go_mod_patch(self, srctree, localfilesdir, extravalues, d):
+
+ patchfilename = "go.mod.patch"
+ go_native_version_major, go_native_version_minor, _ = self.__go_native_version(
+ d)
+ self.__go_run_cmd("go mod tidy -go=%d.%d" %
+ (go_native_version_major, go_native_version_minor), srctree, d)
+ stdout, _ = self.__go_run_cmd("go mod edit -json", srctree, d)
+
+ # Create patch in order to upgrade go version
+ self.__go_run_cmd("git diff go.mod > %s" % (patchfilename), srctree, d)
+ # Restore original state
+ self.__go_run_cmd("git checkout HEAD go.mod go.sum", srctree, d)
+
+ go_mod = json.loads(stdout)
+ tmpfile = os.path.join(localfilesdir, patchfilename)
+ shutil.move(os.path.join(srctree, patchfilename), tmpfile)
+
+ extravalues['extrafiles'][patchfilename] = tmpfile
+
+ return go_mod, patchfilename
+
+ def __go_mod_vendor(self, go_mod, srctree, localfilesdir, extravalues, d):
+ # Perform vendoring to retrieve the correct modules.txt
+ tmp_vendor_dir = tempfile.mkdtemp()
+
+ # -v causes to go to print modules.txt to stderr
+ _, stderr = self.__go_run_cmd(
+ "go mod vendor -v -o %s" % (tmp_vendor_dir), srctree, d)
+
+ modules_txt_basename = "modules.txt"
+ modules_txt_filename = os.path.join(localfilesdir, modules_txt_basename)
+ with open(modules_txt_filename, "w") as f:
+ f.write(stderr)
+
+ extravalues['extrafiles'][modules_txt_basename] = modules_txt_filename
+
+ licenses = []
+ lic_files_chksum = []
+ licvalues = guess_license(tmp_vendor_dir, d)
+ shutil.rmtree(tmp_vendor_dir)
+
+ if licvalues:
+ for licvalue in licvalues:
+ license = licvalue[0]
+ lics = tidy_licenses(fixup_license(license))
+ lics = [lic for lic in lics if lic not in licenses]
+ if len(lics):
+ licenses.extend(lics)
+ lic_files_chksum.append(
+ 'file://src/${GO_IMPORT}/vendor/%s;md5=%s' % (licvalue[1], licvalue[2]))
+
+ # strip version part from module URL /vXX
+ baseurl = re.sub(r'/v(\d+)$', '', go_mod['Module']['Path'])
+ pn, _ = determine_from_url(baseurl)
+ licenses_basename = "%s-licenses.inc" % pn
+
+ licenses_filename = os.path.join(localfilesdir, licenses_basename)
+ with open(licenses_filename, "w") as f:
+ f.write("GO_MOD_LICENSES = \"%s\"\n\n" %
+ ' & '.join(sorted(licenses, key=str.casefold)))
+ # We introduce this indirection to make the tests a little easier
+ f.write("LIC_FILES_CHKSUM += \"${VENDORED_LIC_FILES_CHKSUM}\"\n")
+ f.write("VENDORED_LIC_FILES_CHKSUM = \"\\\n")
+ for lic in lic_files_chksum:
+ f.write(" " + lic + " \\\n")
+ f.write("\"\n")
+
+ extravalues['extrafiles'][licenses_basename] = licenses_filename
+
+ def process(self, srctree, classes, lines_before,
+ lines_after, handled, extravalues):
+
+ if 'buildsystem' in handled:
+ return False
+
+ files = RecipeHandler.checkfiles(srctree, ['go.mod'])
+ if not files:
+ return False
+
+ d = bb.data.createCopy(tinfoil.config_data)
+ go_bindir = self.__ensure_go()
+ if not go_bindir:
+ sys.exit(14)
+
+ d.prependVar('PATH', '%s:' % go_bindir)
+ handled.append('buildsystem')
+ classes.append("go-vendor")
+
+ stdout, _ = self.__go_run_cmd("go mod edit -json", srctree, d)
+
+ go_mod = json.loads(stdout)
+ go_import = go_mod['Module']['Path']
+ go_version_match = re.match("([0-9]+).([0-9]+)", go_mod['Go'])
+ go_version_major = int(go_version_match.group(1))
+ go_version_minor = int(go_version_match.group(2))
+ src_uris = []
+
+ localfilesdir = tempfile.mkdtemp(prefix='recipetool-go-')
+ extravalues.setdefault('extrafiles', {})
+
+ # Use an explicit name determined from the module name because it
+ # might differ from the actual URL for replaced modules
+ # strip version part from module URL /vXX
+ baseurl = re.sub(r'/v(\d+)$', '', go_mod['Module']['Path'])
+ pn, _ = determine_from_url(baseurl)
+
+ # go.mod files with version < 1.17 may not include all indirect
+ # dependencies. Thus, we have to upgrade the go version.
+ if go_version_major == 1 and go_version_minor < 17:
+ logger.warning(
+ "go.mod files generated by Go < 1.17 might have incomplete indirect dependencies.")
+ go_mod, patchfilename = self.__go_mod_patch(srctree, localfilesdir,
+ extravalues, d)
+ src_uris.append(
+ "file://%s;patchdir=src/${GO_IMPORT}" % (patchfilename))
+
+ # Check whether the module is vendored. If so, we have nothing to do.
+ # Otherwise we gather all dependencies and add them to the recipe
+ if not os.path.exists(os.path.join(srctree, "vendor")):
+
+ # Write additional $BPN-modules.inc file
+ self.__go_mod_vendor(go_mod, srctree, localfilesdir, extravalues, d)
+ lines_before.append("LICENSE += \" & ${GO_MOD_LICENSES}\"")
+ lines_before.append("require %s-licenses.inc" % (pn))
+
+ self.__rewrite_src_uri(lines_before, ["file://modules.txt"])
+
+ self.__go_handle_dependencies(go_mod, srctree, localfilesdir, extravalues, d)
+ lines_before.append("require %s-modules.inc" % (pn))
+
+ # Do generic license handling
+ handle_license_vars(srctree, lines_before, handled, extravalues, d)
+ self.__rewrite_lic_uri(lines_before)
+
+ lines_before.append("GO_IMPORT = \"{}\"".format(baseurl))
+ lines_before.append("SRCREV_FORMAT = \"${BPN}\"")
+
+ def __update_lines_before(self, updated, newlines, lines_before):
+ if updated:
+ del lines_before[:]
+ for line in newlines:
+ # Hack to avoid newlines that edit_metadata inserts
+ if line.endswith('\n'):
+ line = line[:-1]
+ lines_before.append(line)
+ return updated
+
+ def __rewrite_lic_uri(self, lines_before):
+
+ def varfunc(varname, origvalue, op, newlines):
+ if varname == 'LIC_FILES_CHKSUM':
+ new_licenses = []
+ licenses = origvalue.split('\\')
+ for license in licenses:
+ if not license:
+ logger.warning("No license file was detected for the main module!")
+ # the license list of the main recipe must be empty
+ # this can happen for example in case of CLOSED license
+ # Fall through to complete recipe generation
+ continue
+ license = license.strip()
+ uri, chksum = license.split(';', 1)
+ url = urllib.parse.urlparse(uri)
+ new_uri = os.path.join(
+ url.scheme + "://", "src", "${GO_IMPORT}", url.netloc + url.path) + ";" + chksum
+ new_licenses.append(new_uri)
+
+ return new_licenses, None, -1, True
+ return origvalue, None, 0, True
+
+ updated, newlines = bb.utils.edit_metadata(
+ lines_before, ['LIC_FILES_CHKSUM'], varfunc)
+ return self.__update_lines_before(updated, newlines, lines_before)
+
+ def __rewrite_src_uri(self, lines_before, additional_uris = []):
+
+ def varfunc(varname, origvalue, op, newlines):
+ if varname == 'SRC_URI':
+ src_uri = ["git://${GO_IMPORT};destsuffix=git/src/${GO_IMPORT};nobranch=1;name=${BPN};protocol=https"]
+ src_uri.extend(additional_uris)
+ return src_uri, None, -1, True
+ return origvalue, None, 0, True
+
+ updated, newlines = bb.utils.edit_metadata(lines_before, ['SRC_URI'], varfunc)
+ return self.__update_lines_before(updated, newlines, lines_before)
+
+
+def register_recipe_handlers(handlers):
+ handlers.append((GoRecipeHandler(), 60))
diff --git a/scripts/lib/recipetool/create_npm.py b/scripts/lib/recipetool/create_npm.py
index 3394a89970..113a89f6a6 100644
--- a/scripts/lib/recipetool/create_npm.py
+++ b/scripts/lib/recipetool/create_npm.py
@@ -13,6 +13,7 @@ import sys
import tempfile
import bb
from bb.fetch2.npm import NpmEnvironment
+from bb.fetch2.npm import npm_package
from bb.fetch2.npmsw import foreach_dependencies
from recipetool.create import RecipeHandler
from recipetool.create import get_license_md5sums
@@ -31,15 +32,6 @@ class NpmRecipeHandler(RecipeHandler):
"""Class to handle the npm recipe creation"""
@staticmethod
- def _npm_name(name):
- """Generate a Yocto friendly npm name"""
- name = re.sub("/", "-", name)
- name = name.lower()
- name = re.sub(r"[^\-a-z0-9]", "", name)
- name = name.strip("-")
- return name
-
- @staticmethod
def _get_registry(lines):
"""Get the registry value from the 'npm://registry' url"""
registry = None
@@ -142,11 +134,10 @@ class NpmRecipeHandler(RecipeHandler):
licfiles.append(os.path.relpath(readme, srctree))
# Handle the dependencies
- def _handle_dependency(name, params, deptree):
- suffix = "-".join([self._npm_name(dep) for dep in deptree])
- destdirs = [os.path.join("node_modules", dep) for dep in deptree]
- destdir = os.path.join(*destdirs)
- packages["${PN}-" + suffix] = destdir
+ def _handle_dependency(name, params, destdir):
+ deptree = destdir.split('node_modules/')
+ suffix = "-".join([npm_package(dep) for dep in deptree])
+ packages["${PN}" + suffix] = destdir
_licfiles_append_fallback_readme_files(destdir)
with open(shrinkwrap_file, "r") as f:
@@ -155,6 +146,23 @@ class NpmRecipeHandler(RecipeHandler):
foreach_dependencies(shrinkwrap, _handle_dependency, dev)
return licfiles, packages
+
+ # Handle the peer dependencies
+ def _handle_peer_dependency(self, shrinkwrap_file):
+ """Check if package has peer dependencies and show warning if it is the case"""
+ with open(shrinkwrap_file, "r") as f:
+ shrinkwrap = json.load(f)
+
+ packages = shrinkwrap.get("packages", {})
+ peer_deps = packages.get("", {}).get("peerDependencies", {})
+
+ for peer_dep in peer_deps:
+ peer_dep_yocto_name = npm_package(peer_dep)
+ bb.warn(peer_dep + " is a peer dependencie of the actual package. " +
+ "Please add this peer dependencie to the RDEPENDS variable as %s and generate its recipe with devtool"
+ % peer_dep_yocto_name)
+
+
def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
"""Handle the npm recipe creation"""
@@ -173,7 +181,7 @@ class NpmRecipeHandler(RecipeHandler):
if "name" not in data or "version" not in data:
return False
- extravalues["PN"] = self._npm_name(data["name"])
+ extravalues["PN"] = npm_package(data["name"])
extravalues["PV"] = data["version"]
if "description" in data:
@@ -242,7 +250,7 @@ class NpmRecipeHandler(RecipeHandler):
value = origvalue.replace("version=" + data["version"], "version=${PV}")
value = value.replace("version=latest", "version=${PV}")
values = [line.strip() for line in value.strip('\n').splitlines()]
- if "dependencies" in shrinkwrap:
+ if "dependencies" in shrinkwrap.get("packages", {}).get("", {}):
values.append(url_recipe)
return values, None, 4, False
@@ -292,6 +300,9 @@ class NpmRecipeHandler(RecipeHandler):
classes.append("npm")
handled.append("buildsystem")
+ # Check if package has peer dependencies and inform the user
+ self._handle_peer_dependency(shrinkwrap_file)
+
return True
def register_recipe_handlers(handlers):
diff --git a/scripts/lib/recipetool/setvar.py b/scripts/lib/recipetool/setvar.py
index f8e2ee75fb..b5ad335cae 100644
--- a/scripts/lib/recipetool/setvar.py
+++ b/scripts/lib/recipetool/setvar.py
@@ -49,6 +49,7 @@ def setvar(args):
for patch in patches:
for line in patch:
sys.stdout.write(line)
+ tinfoil.modified_files()
return 0
diff --git a/scripts/lib/resulttool/log.py b/scripts/lib/resulttool/log.py
index eb3927ec82..15148ca288 100644
--- a/scripts/lib/resulttool/log.py
+++ b/scripts/lib/resulttool/log.py
@@ -28,12 +28,10 @@ def show_reproducible(result, reproducible, logger):
def log(args, logger):
results = resultutils.load_resultsdata(args.source)
- ptest_count = sum(1 for _, _, _, r in resultutils.test_run_results(results) if 'ptestresult.sections' in r)
- if ptest_count > 1 and not args.prepend_run:
- print("%i ptest sections found. '--prepend-run' is required" % ptest_count)
- return 1
-
for _, run_name, _, r in resultutils.test_run_results(results):
+ if args.list_ptest:
+ print('\n'.join(sorted(r['ptestresult.sections'].keys())))
+
if args.dump_ptest:
for sectname in ['ptestresult.sections', 'ltpposixresult.sections', 'ltpresult.sections']:
if sectname in r:
@@ -48,6 +46,9 @@ def log(args, logger):
os.makedirs(dest_dir, exist_ok=True)
dest = os.path.join(dest_dir, '%s.log' % name)
+ if os.path.exists(dest):
+ print("Overlapping ptest logs found, skipping %s. The '--prepend-run' option would avoid this" % name)
+ continue
print(dest)
with open(dest, 'w') as f:
f.write(logdata)
@@ -86,6 +87,8 @@ def register_commands(subparsers):
parser.set_defaults(func=log)
parser.add_argument('source',
help='the results file/directory/URL to import')
+ parser.add_argument('--list-ptest', action='store_true',
+ help='list the ptest test names')
parser.add_argument('--ptest', action='append', default=[],
help='show logs for a ptest')
parser.add_argument('--dump-ptest', metavar='DIR',
diff --git a/scripts/lib/resulttool/regression.py b/scripts/lib/resulttool/regression.py
index 9f952951b3..10e7d13841 100644
--- a/scripts/lib/resulttool/regression.py
+++ b/scripts/lib/resulttool/regression.py
@@ -7,15 +7,209 @@
#
import resulttool.resultutils as resultutils
-import json
from oeqa.utils.git import GitRepo
import oeqa.utils.gitarchive as gitarchive
-def compare_result(logger, base_name, target_name, base_result, target_result):
+METADATA_MATCH_TABLE = {
+ "oeselftest": "OESELFTEST_METADATA"
+}
+
+OESELFTEST_METADATA_GUESS_TABLE={
+ "trigger-build-posttrigger": {
+ "run_all_tests": False,
+ "run_tests":["buildoptions.SourceMirroring.test_yocto_source_mirror"],
+ "skips": None,
+ "machine": None,
+ "select_tags":None,
+ "exclude_tags": None
+ },
+ "reproducible": {
+ "run_all_tests": False,
+ "run_tests":["reproducible"],
+ "skips": None,
+ "machine": None,
+ "select_tags":None,
+ "exclude_tags": None
+ },
+ "arch-qemu-quick": {
+ "run_all_tests": True,
+ "run_tests":None,
+ "skips": None,
+ "machine": None,
+ "select_tags":["machine"],
+ "exclude_tags": None
+ },
+ "arch-qemu-full-x86-or-x86_64": {
+ "run_all_tests": True,
+ "run_tests":None,
+ "skips": None,
+ "machine": None,
+ "select_tags":["machine", "toolchain-system"],
+ "exclude_tags": None
+ },
+ "arch-qemu-full-others": {
+ "run_all_tests": True,
+ "run_tests":None,
+ "skips": None,
+ "machine": None,
+ "select_tags":["machine", "toolchain-user"],
+ "exclude_tags": None
+ },
+ "selftest": {
+ "run_all_tests": True,
+ "run_tests":None,
+ "skips": ["distrodata.Distrodata.test_checkpkg", "buildoptions.SourceMirroring.test_yocto_source_mirror", "reproducible"],
+ "machine": None,
+ "select_tags":None,
+ "exclude_tags": ["machine", "toolchain-system", "toolchain-user"]
+ },
+ "bringup": {
+ "run_all_tests": True,
+ "run_tests":None,
+ "skips": ["distrodata.Distrodata.test_checkpkg", "buildoptions.SourceMirroring.test_yocto_source_mirror"],
+ "machine": None,
+ "select_tags":None,
+ "exclude_tags": ["machine", "toolchain-system", "toolchain-user"]
+ }
+}
+
+STATUS_STRINGS = {
+ "None": "No matching test result"
+}
+
+REGRESSIONS_DISPLAY_LIMIT=50
+
+MISSING_TESTS_BANNER = "-------------------------- Missing tests --------------------------"
+ADDITIONAL_DATA_BANNER = "--------------------- Matches and improvements --------------------"
+
+def test_has_at_least_one_matching_tag(test, tag_list):
+ return "oetags" in test and any(oetag in tag_list for oetag in test["oetags"])
+
+def all_tests_have_at_least_one_matching_tag(results, tag_list):
+ return all(test_has_at_least_one_matching_tag(test_result, tag_list) or test_name.startswith("ptestresult") for (test_name, test_result) in results.items())
+
+def any_test_have_any_matching_tag(results, tag_list):
+ return any(test_has_at_least_one_matching_tag(test, tag_list) for test in results.values())
+
+def have_skipped_test(result, test_prefix):
+ return all( result[test]['status'] == "SKIPPED" for test in result if test.startswith(test_prefix))
+
+def have_all_tests_skipped(result, test_prefixes_list):
+ return all(have_skipped_test(result, test_prefix) for test_prefix in test_prefixes_list)
+
+def guess_oeselftest_metadata(results):
+ """
+ When an oeselftest test result is lacking OESELFTEST_METADATA, we can try to guess it based on results content.
+ Check results for specific values (absence/presence of oetags, number and name of executed tests...),
+ and if it matches one of known configuration from autobuilder configuration, apply guessed OSELFTEST_METADATA
+ to it to allow proper test filtering.
+ This guessing process is tightly coupled to config.json in autobuilder. It should trigger less and less,
+ as new tests will have OESELFTEST_METADATA properly appended at test reporting time
+ """
+
+ if len(results) == 1 and "buildoptions.SourceMirroring.test_yocto_source_mirror" in results:
+ return OESELFTEST_METADATA_GUESS_TABLE['trigger-build-posttrigger']
+ elif all(result.startswith("reproducible") for result in results):
+ return OESELFTEST_METADATA_GUESS_TABLE['reproducible']
+ elif all_tests_have_at_least_one_matching_tag(results, ["machine"]):
+ return OESELFTEST_METADATA_GUESS_TABLE['arch-qemu-quick']
+ elif all_tests_have_at_least_one_matching_tag(results, ["machine", "toolchain-system"]):
+ return OESELFTEST_METADATA_GUESS_TABLE['arch-qemu-full-x86-or-x86_64']
+ elif all_tests_have_at_least_one_matching_tag(results, ["machine", "toolchain-user"]):
+ return OESELFTEST_METADATA_GUESS_TABLE['arch-qemu-full-others']
+ elif not any_test_have_any_matching_tag(results, ["machine", "toolchain-user", "toolchain-system"]):
+ if have_all_tests_skipped(results, ["distrodata.Distrodata.test_checkpkg", "buildoptions.SourceMirroring.test_yocto_source_mirror", "reproducible"]):
+ return OESELFTEST_METADATA_GUESS_TABLE['selftest']
+ elif have_all_tests_skipped(results, ["distrodata.Distrodata.test_checkpkg", "buildoptions.SourceMirroring.test_yocto_source_mirror"]):
+ return OESELFTEST_METADATA_GUESS_TABLE['bringup']
+
+ return None
+
+
+def metadata_matches(base_configuration, target_configuration):
+ """
+ For passed base and target, check test type. If test type matches one of
+ properties described in METADATA_MATCH_TABLE, compare metadata if it is
+ present in base. Return true if metadata matches, or if base lacks some
+ data (either TEST_TYPE or the corresponding metadata)
+ """
+ test_type = base_configuration.get('TEST_TYPE')
+ if test_type not in METADATA_MATCH_TABLE:
+ return True
+
+ metadata_key = METADATA_MATCH_TABLE.get(test_type)
+ if target_configuration.get(metadata_key) != base_configuration.get(metadata_key):
+ return False
+
+ return True
+
+
+def machine_matches(base_configuration, target_configuration):
+ return base_configuration.get('MACHINE') == target_configuration.get('MACHINE')
+
+
+def can_be_compared(logger, base, target):
+ """
+ Some tests are not relevant to be compared, for example some oeselftest
+ run with different tests sets or parameters. Return true if tests can be
+ compared
+ """
+ ret = True
+ base_configuration = base['configuration']
+ target_configuration = target['configuration']
+
+ # Older test results lack proper OESELFTEST_METADATA: if not present, try to guess it based on tests results.
+ if base_configuration.get('TEST_TYPE') == 'oeselftest' and 'OESELFTEST_METADATA' not in base_configuration:
+ guess = guess_oeselftest_metadata(base['result'])
+ if guess is None:
+ logger.error(f"ERROR: did not manage to guess oeselftest metadata for {base_configuration['STARTTIME']}")
+ else:
+ logger.debug(f"Enriching {base_configuration['STARTTIME']} with {guess}")
+ base_configuration['OESELFTEST_METADATA'] = guess
+ if target_configuration.get('TEST_TYPE') == 'oeselftest' and 'OESELFTEST_METADATA' not in target_configuration:
+ guess = guess_oeselftest_metadata(target['result'])
+ if guess is None:
+ logger.error(f"ERROR: did not manage to guess oeselftest metadata for {target_configuration['STARTTIME']}")
+ else:
+ logger.debug(f"Enriching {target_configuration['STARTTIME']} with {guess}")
+ target_configuration['OESELFTEST_METADATA'] = guess
+
+ # Test runs with LTP results in should only be compared with other runs with LTP tests in them
+ if base_configuration.get('TEST_TYPE') == 'runtime' and any(result.startswith("ltpresult") for result in base['result']):
+ ret = target_configuration.get('TEST_TYPE') == 'runtime' and any(result.startswith("ltpresult") for result in target['result'])
+
+ return ret and metadata_matches(base_configuration, target_configuration) \
+ and machine_matches(base_configuration, target_configuration)
+
+def get_status_str(raw_status):
+ raw_status_lower = raw_status.lower() if raw_status else "None"
+ return STATUS_STRINGS.get(raw_status_lower, raw_status)
+
+def get_additional_info_line(new_pass_count, new_tests):
+ result=[]
+ if new_tests:
+ result.append(f'+{new_tests} test(s) present')
+ if new_pass_count:
+ result.append(f'+{new_pass_count} test(s) now passing')
+
+ if not result:
+ return ""
+
+ return ' -> ' + ', '.join(result) + '\n'
+
+def compare_result(logger, base_name, target_name, base_result, target_result, display_limit=None):
base_result = base_result.get('result')
target_result = target_result.get('result')
result = {}
+ new_tests = 0
+ regressions = {}
+ resultstring = ""
+ new_tests = 0
+ new_pass_count = 0
+
+ display_limit = int(display_limit) if display_limit else REGRESSIONS_DISPLAY_LIMIT
+
if base_result and target_result:
for k in base_result:
base_testcase = base_result[k]
@@ -27,12 +221,47 @@ def compare_result(logger, base_name, target_name, base_result, target_result):
result[k] = {'base': base_status, 'target': target_status}
else:
logger.error('Failed to retrieved base test case status: %s' % k)
+
+ # Also count new tests that were not present in base results: it
+ # could be newly added tests, but it could also highlights some tests
+ # renames or fixed faulty ptests
+ for k in target_result:
+ if k not in base_result:
+ new_tests += 1
if result:
- resultstring = "Regression: %s\n %s\n" % (base_name, target_name)
- for k in sorted(result):
- resultstring += ' %s: %s -> %s\n' % (k, result[k]['base'], result[k]['target'])
+ new_pass_count = sum(test['target'] is not None and test['target'].startswith("PASS") for test in result.values())
+ # Print a regression report only if at least one test has a regression status (FAIL, SKIPPED, absent...)
+ if new_pass_count < len(result):
+ resultstring = "Regression: %s\n %s\n" % (base_name, target_name)
+ for k in sorted(result):
+ if not result[k]['target'] or not result[k]['target'].startswith("PASS"):
+ # Differentiate each ptest kind when listing regressions
+ key_parts = k.split('.')
+ key = '.'.join(key_parts[:2]) if k.startswith('ptest') else key_parts[0]
+ # Append new regression to corresponding test family
+ regressions[key] = regressions.setdefault(key, []) + [' %s: %s -> %s\n' % (k, get_status_str(result[k]['base']), get_status_str(result[k]['target']))]
+ resultstring += f" Total: {sum([len(regressions[r]) for r in regressions])} new regression(s):\n"
+ for k in regressions:
+ resultstring += f" {len(regressions[k])} regression(s) for {k}\n"
+ count_to_print=min([display_limit, len(regressions[k])]) if display_limit > 0 else len(regressions[k])
+ resultstring += ''.join(regressions[k][:count_to_print])
+ if count_to_print < len(regressions[k]):
+ resultstring+=' [...]\n'
+ if new_pass_count > 0:
+ resultstring += f' Additionally, {new_pass_count} previously failing test(s) is/are now passing\n'
+ if new_tests > 0:
+ resultstring += f' Additionally, {new_tests} new test(s) is/are present\n'
+ else:
+ resultstring = "%s\n%s\n" % (base_name, target_name)
+ result = None
else:
- resultstring = "Match: %s\n %s" % (base_name, target_name)
+ resultstring = "%s\n%s\n" % (base_name, target_name)
+
+ if not result:
+ additional_info = get_additional_info_line(new_pass_count, new_tests)
+ if additional_info:
+ resultstring += additional_info
+
return result, resultstring
def get_results(logger, source):
@@ -44,12 +273,38 @@ def regression(args, logger):
regression_common(args, logger, base_results, target_results)
+# Some test case naming is poor and contains random strings, particularly lttng/babeltrace.
+# Truncating the test names works since they contain file and line number identifiers
+# which allows us to match them without the random components.
+def fixup_ptest_names(results, logger):
+ for r in results:
+ for i in results[r]:
+ tests = list(results[r][i]['result'].keys())
+ for test in tests:
+ new = None
+ if test.startswith(("ptestresult.lttng-tools.", "ptestresult.babeltrace.", "ptestresult.babeltrace2")) and "_-_" in test:
+ new = test.split("_-_")[0]
+ elif test.startswith(("ptestresult.curl.")) and "__" in test:
+ new = test.split("__")[0]
+ elif test.startswith(("ptestresult.dbus.")) and "__" in test:
+ new = test.split("__")[0]
+ elif test.startswith("ptestresult.binutils") and "build-st-" in test:
+ new = test.split(" ")[0]
+ elif test.startswith("ptestresult.gcc") and "/tmp/runtest." in test:
+ new = ".".join(test.split(".")[:2])
+ if new:
+ results[r][i]['result'][new] = results[r][i]['result'][test]
+ del results[r][i]['result'][test]
+
def regression_common(args, logger, base_results, target_results):
if args.base_result_id:
base_results = resultutils.filter_resultsdata(base_results, args.base_result_id)
if args.target_result_id:
target_results = resultutils.filter_resultsdata(target_results, args.target_result_id)
+ fixup_ptest_names(base_results, logger)
+ fixup_ptest_names(target_results, logger)
+
matches = []
regressions = []
notfound = []
@@ -62,7 +317,9 @@ def regression_common(args, logger, base_results, target_results):
# removing any pairs which match
for c in base.copy():
for b in target.copy():
- res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b])
+ if not can_be_compared(logger, base_results[a][c], target_results[a][b]):
+ continue
+ res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b], args.limit)
if not res:
matches.append(resstr)
base.remove(c)
@@ -71,15 +328,18 @@ def regression_common(args, logger, base_results, target_results):
# Should only now see regressions, we may not be able to match multiple pairs directly
for c in base:
for b in target:
- res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b])
+ if not can_be_compared(logger, base_results[a][c], target_results[a][b]):
+ continue
+ res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b], args.limit)
if res:
regressions.append(resstr)
else:
notfound.append("%s not found in target" % a)
- print("\n".join(sorted(matches)))
print("\n".join(sorted(regressions)))
+ print("\n" + MISSING_TESTS_BANNER + "\n")
print("\n".join(sorted(notfound)))
-
+ print("\n" + ADDITIONAL_DATA_BANNER + "\n")
+ print("\n".join(sorted(matches)))
return 0
def regression_git(args, logger):
@@ -183,4 +443,5 @@ def register_commands(subparsers):
parser_build.add_argument('--commit-number', help="Revision number to search for, redundant if --commit is specified")
parser_build.add_argument('--commit2', help="Revision to compare with")
parser_build.add_argument('--commit-number2', help="Revision number to compare with, redundant if --commit2 is specified")
+ parser_build.add_argument('-l', '--limit', default=REGRESSIONS_DISPLAY_LIMIT, help="Maximum number of changes to display per test. Can be set to 0 to print all changes")
diff --git a/scripts/lib/resulttool/report.py b/scripts/lib/resulttool/report.py
index f0ca50ebe2..a349510ab8 100644
--- a/scripts/lib/resulttool/report.py
+++ b/scripts/lib/resulttool/report.py
@@ -176,7 +176,10 @@ class ResultsTextReport(object):
vals['sort'] = line['testseries'] + "_" + line['result_id']
vals['failed_testcases'] = line['failed_testcases']
for k in cols:
- vals[k] = "%d (%s%%)" % (line[k], format(line[k] / total_tested * 100, '.0f'))
+ if total_tested:
+ vals[k] = "%d (%s%%)" % (line[k], format(line[k] / total_tested * 100, '.0f'))
+ else:
+ vals[k] = "0 (0%)"
for k in maxlen:
if k in vals and len(vals[k]) > maxlen[k]:
maxlen[k] = len(vals[k])
diff --git a/scripts/lib/resulttool/resultutils.py b/scripts/lib/resulttool/resultutils.py
index 8917022d36..c5521d81bd 100644
--- a/scripts/lib/resulttool/resultutils.py
+++ b/scripts/lib/resulttool/resultutils.py
@@ -58,7 +58,11 @@ def append_resultsdata(results, f, configmap=store_map, configvars=extra_configv
testseries = posixpath.basename(posixpath.dirname(url.path))
else:
with open(f, "r") as filedata:
- data = json.load(filedata)
+ try:
+ data = json.load(filedata)
+ except json.decoder.JSONDecodeError:
+ print("Cannot decode {}. Possible corruption. Skipping.".format(f))
+ data = ""
testseries = os.path.basename(os.path.dirname(f))
else:
data = f
@@ -142,7 +146,7 @@ def generic_get_log(sectionname, results, section):
return decode_log(ptest['log'])
def ptestresult_get_log(results, section):
- return generic_get_log('ptestresuls.sections', results, section)
+ return generic_get_log('ptestresult.sections', results, section)
def generic_get_rawlogs(sectname, results):
if sectname not in results:
diff --git a/scripts/lib/scriptutils.py b/scripts/lib/scriptutils.py
index f7c1bd9e4e..f23e53cba9 100644
--- a/scripts/lib/scriptutils.py
+++ b/scripts/lib/scriptutils.py
@@ -177,7 +177,7 @@ def fetch_url(tinfoil, srcuri, srcrev, destdir, logger, preserve_tmp=False, mirr
f.write('BB_STRICT_CHECKSUM = "ignore"\n')
f.write('SRC_URI = "%s"\n' % srcuri)
f.write('SRCREV = "%s"\n' % srcrev)
- f.write('PV = "0.0+${SRCPV}"\n')
+ f.write('PV = "0.0+"\n')
f.write('WORKDIR = "%s"\n' % tmpworkdir)
# Set S out of the way so it doesn't get created under the workdir
f.write('S = "%s"\n' % os.path.join(tmpdir, 'emptysrc'))
@@ -277,6 +277,6 @@ def filter_src_subdirs(pth):
Used by devtool and recipetool.
"""
dirlist = os.listdir(pth)
- filterout = ['git.indirectionsymlink', 'source-date-epoch']
+ filterout = ['git.indirectionsymlink', 'source-date-epoch', 'sstate-install-recipe_qa']
dirlist = [x for x in dirlist if x not in filterout]
return dirlist
diff --git a/scripts/lib/wic/canned-wks/efi-bootdisk.wks.in b/scripts/lib/wic/canned-wks/efi-bootdisk.wks.in
index 7300e65e32..2fd286ff98 100644
--- a/scripts/lib/wic/canned-wks/efi-bootdisk.wks.in
+++ b/scripts/lib/wic/canned-wks/efi-bootdisk.wks.in
@@ -1,3 +1,3 @@
bootloader --ptable gpt
-part /boot --source rootfs --rootfs-dir=${IMAGE_ROOTFS}/boot --fstype=vfat --label boot --active --align 1024 --use-uuid --overhead-factor 1.0
+part /boot --source rootfs --rootfs-dir=${IMAGE_ROOTFS}/boot --fstype=vfat --label boot --active --align 1024 --use-uuid --overhead-factor 1.1
part / --source rootfs --fstype=ext4 --label root --align 1024 --exclude-path boot/
diff --git a/scripts/lib/wic/canned-wks/qemuloongarch.wks b/scripts/lib/wic/canned-wks/qemuloongarch.wks
new file mode 100644
index 0000000000..8465c7a8c0
--- /dev/null
+++ b/scripts/lib/wic/canned-wks/qemuloongarch.wks
@@ -0,0 +1,3 @@
+# short-description: Create qcow2 image for LoongArch QEMU machines
+
+part / --source rootfs --fstype=ext4 --label root --align 4096 --size 5G
diff --git a/scripts/lib/wic/canned-wks/qemux86-directdisk.wks b/scripts/lib/wic/canned-wks/qemux86-directdisk.wks
index 22b45217f1..808997611a 100644
--- a/scripts/lib/wic/canned-wks/qemux86-directdisk.wks
+++ b/scripts/lib/wic/canned-wks/qemux86-directdisk.wks
@@ -4,5 +4,5 @@
include common.wks.inc
-bootloader --timeout=0 --append="rw oprofile.timer=1 rootfstype=ext4 "
+bootloader --timeout=0 --append="rw oprofile.timer=1 rootfstype=ext4 console=tty console=ttyS0 "
diff --git a/scripts/lib/wic/filemap.py b/scripts/lib/wic/filemap.py
index 4d9da28172..85b39d5d74 100644
--- a/scripts/lib/wic/filemap.py
+++ b/scripts/lib/wic/filemap.py
@@ -46,6 +46,13 @@ def get_block_size(file_obj):
bsize = stat.st_blksize
else:
raise IOError("Unable to determine block size")
+
+ # The logic in this script only supports a maximum of a 4KB
+ # block size
+ max_block_size = 4 * 1024
+ if bsize > max_block_size:
+ bsize = max_block_size
+
return bsize
class ErrorNotSupp(Exception):
diff --git a/scripts/lib/wic/help.py b/scripts/lib/wic/help.py
index 73e3380cde..163535e431 100644
--- a/scripts/lib/wic/help.py
+++ b/scripts/lib/wic/help.py
@@ -1118,7 +1118,7 @@ COMMAND:
TOPIC:
overview - Presents an overall overview of Wic
plugins - Presents an overview and API for Wic plugins
- kickstart - Presents a Wic kicstart file reference
+ kickstart - Presents a Wic kickstart file reference
Examples:
diff --git a/scripts/lib/wic/ksparser.py b/scripts/lib/wic/ksparser.py
index d1e546b12d..7ef3dc83dd 100644
--- a/scripts/lib/wic/ksparser.py
+++ b/scripts/lib/wic/ksparser.py
@@ -171,6 +171,7 @@ class KickStart():
part.add_argument('--rootfs-dir')
part.add_argument('--type', default='primary',
choices = ('primary', 'logical'))
+ part.add_argument('--hidden', action='store_true')
# --size and --fixed-size cannot be specified together; options
# ----extra-space and --overhead-factor should also raise a parser
@@ -187,11 +188,12 @@ class KickStart():
part.add_argument('--uuid')
part.add_argument('--fsuuid')
part.add_argument('--no-fstab-update', action='store_true')
+ part.add_argument('--mbr', action='store_true')
bootloader = subparsers.add_parser('bootloader')
bootloader.add_argument('--append')
bootloader.add_argument('--configfile')
- bootloader.add_argument('--ptable', choices=('msdos', 'gpt'),
+ bootloader.add_argument('--ptable', choices=('msdos', 'gpt', 'gpt-hybrid'),
default='msdos')
bootloader.add_argument('--timeout', type=int)
bootloader.add_argument('--source')
diff --git a/scripts/lib/wic/misc.py b/scripts/lib/wic/misc.py
index 2b90821b30..1a7c140fa6 100644
--- a/scripts/lib/wic/misc.py
+++ b/scripts/lib/wic/misc.py
@@ -25,7 +25,7 @@ from wic import WicError
logger = logging.getLogger('wic')
# executable -> recipe pairs for exec_native_cmd
-NATIVE_RECIPES = {"bmaptool": "bmap-tools",
+NATIVE_RECIPES = {"bmaptool": "bmaptool",
"dumpe2fs": "e2fsprogs",
"grub-mkimage": "grub-efi",
"isohybrid": "syslinux",
diff --git a/scripts/lib/wic/partition.py b/scripts/lib/wic/partition.py
index e50871b8d7..795707ec5d 100644
--- a/scripts/lib/wic/partition.py
+++ b/scripts/lib/wic/partition.py
@@ -59,6 +59,8 @@ class Partition():
self.updated_fstab_path = None
self.has_fstab = False
self.update_fstab_in_rootfs = False
+ self.hidden = args.hidden
+ self.mbr = args.mbr
self.lineno = lineno
self.source_file = ""
@@ -133,6 +135,8 @@ class Partition():
self.update_fstab_in_rootfs = True
if not self.source:
+ if self.fstype == "none" or self.no_table:
+ return
if not self.size and not self.fixed_size:
raise WicError("The %s partition has a size of zero. Please "
"specify a non-zero --size/--fixed-size for that "
@@ -280,6 +284,20 @@ class Partition():
extraopts = self.mkfs_extraopts or "-F -i 8192"
+ if os.getenv('SOURCE_DATE_EPOCH'):
+ sde_time = int(os.getenv('SOURCE_DATE_EPOCH'))
+ if pseudo:
+ pseudo = "export E2FSPROGS_FAKE_TIME=%s;%s " % (sde_time, pseudo)
+ else:
+ pseudo = "export E2FSPROGS_FAKE_TIME=%s; " % sde_time
+
+ # Set hash_seed to generate deterministic directory indexes
+ namespace = uuid.UUID("e7429877-e7b3-4a68-a5c9-2f2fdf33d460")
+ if self.fsuuid:
+ namespace = uuid.UUID(self.fsuuid)
+ hash_seed = str(uuid.uuid5(namespace, str(sde_time)))
+ extraopts += " -E hash_seed=%s" % hash_seed
+
label_str = ""
if self.label:
label_str = "-L %s" % self.label
@@ -300,6 +318,30 @@ class Partition():
mkfs_cmd = "fsck.%s -pvfD %s" % (self.fstype, rootfs)
exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo)
+ if os.getenv('SOURCE_DATE_EPOCH'):
+ sde_time = hex(int(os.getenv('SOURCE_DATE_EPOCH')))
+ debugfs_script_path = os.path.join(cr_workdir, "debugfs_script")
+ files = []
+ for root, dirs, others in os.walk(rootfs_dir):
+ base = root.replace(rootfs_dir, "").rstrip(os.sep)
+ files += [ "/" if base == "" else base ]
+ files += [ base + "/" + n for n in dirs + others ]
+ with open(debugfs_script_path, "w") as f:
+ f.write("set_current_time %s\n" % (sde_time))
+ if self.updated_fstab_path and self.has_fstab and not self.no_fstab_update:
+ f.write("set_inode_field /etc/fstab mtime %s\n" % (sde_time))
+ f.write("set_inode_field /etc/fstab mtime_extra 0\n")
+ for file in set(files):
+ for time in ["atime", "ctime", "crtime"]:
+ f.write("set_inode_field \"%s\" %s %s\n" % (file, time, sde_time))
+ f.write("set_inode_field \"%s\" %s_extra 0\n" % (file, time))
+ for time in ["wtime", "mkfs_time", "lastcheck"]:
+ f.write("set_super_value %s %s\n" % (time, sde_time))
+ for time in ["mtime", "first_error_time", "last_error_time"]:
+ f.write("set_super_value %s 0\n" % (time))
+ debugfs_cmd = "debugfs -w -f %s %s" % (debugfs_script_path, rootfs)
+ exec_native_cmd(debugfs_cmd, native_sysroot)
+
self.check_for_Y2038_problem(rootfs, native_sysroot)
def prepare_rootfs_btrfs(self, rootfs, cr_workdir, oe_builddir, rootfs_dir,
@@ -353,7 +395,7 @@ class Partition():
exec_native_cmd(mcopy_cmd, native_sysroot)
if self.updated_fstab_path and self.has_fstab and not self.no_fstab_update:
- mcopy_cmd = "mcopy -i %s %s ::/etc/fstab" % (rootfs, self.updated_fstab_path)
+ mcopy_cmd = "mcopy -m -i %s %s ::/etc/fstab" % (rootfs, self.updated_fstab_path)
exec_native_cmd(mcopy_cmd, native_sysroot)
chmod_cmd = "chmod 644 %s" % rootfs
@@ -381,6 +423,9 @@ class Partition():
(extraopts, self.fsuuid, rootfs, rootfs_dir)
exec_native_cmd(erofs_cmd, native_sysroot, pseudo=pseudo)
+ def prepare_empty_partition_none(self, rootfs, oe_builddir, native_sysroot):
+ pass
+
def prepare_empty_partition_ext(self, rootfs, oe_builddir,
native_sysroot):
"""
diff --git a/scripts/lib/wic/plugins/imager/direct.py b/scripts/lib/wic/plugins/imager/direct.py
index da483daed5..a1d152659b 100644
--- a/scripts/lib/wic/plugins/imager/direct.py
+++ b/scripts/lib/wic/plugins/imager/direct.py
@@ -117,7 +117,7 @@ class DirectPlugin(ImagerPlugin):
updated = False
for part in self.parts:
if not part.realnum or not part.mountpoint \
- or part.mountpoint == "/" or not part.mountpoint.startswith('/'):
+ or part.mountpoint == "/" or not (part.mountpoint.startswith('/') or part.mountpoint == "swap"):
continue
if part.use_uuid:
@@ -149,6 +149,9 @@ class DirectPlugin(ImagerPlugin):
self.updated_fstab_path = os.path.join(self.workdir, "fstab")
with open(self.updated_fstab_path, "w") as f:
f.writelines(fstab_lines)
+ if os.getenv('SOURCE_DATE_EPOCH'):
+ fstab_time = int(os.getenv('SOURCE_DATE_EPOCH'))
+ os.utime(self.updated_fstab_path, (fstab_time, fstab_time))
def _full_path(self, path, name, extention):
""" Construct full file path to a file we generate. """
@@ -310,7 +313,10 @@ class PartitionedImage():
# all partitions (in bytes)
self.ptable_format = ptable_format # Partition table format
# Disk system identifier
- self.identifier = random.SystemRandom().randint(1, 0xffffffff)
+ if os.getenv('SOURCE_DATE_EPOCH'):
+ self.identifier = random.Random(int(os.getenv('SOURCE_DATE_EPOCH'))).randint(1, 0xffffffff)
+ else:
+ self.identifier = random.SystemRandom().randint(1, 0xffffffff)
self.partitions = partitions
self.partimages = []
@@ -336,7 +342,7 @@ class PartitionedImage():
# generate parition and filesystem UUIDs
for part in self.partitions:
if not part.uuid and part.use_uuid:
- if self.ptable_format == 'gpt':
+ if self.ptable_format in ('gpt', 'gpt-hybrid'):
part.uuid = str(uuid.uuid4())
else: # msdos partition table
part.uuid = '%08x-%02d' % (self.identifier, part.realnum)
@@ -392,6 +398,10 @@ class PartitionedImage():
raise WicError("setting custom partition type is not " \
"implemented for msdos partitions")
+ if part.mbr and self.ptable_format != 'gpt-hybrid':
+ raise WicError("Partition may only be included in MBR with " \
+ "a gpt-hybrid partition table")
+
# Get the disk where the partition is located
self.numpart += 1
if not part.no_table:
@@ -400,7 +410,7 @@ class PartitionedImage():
if self.numpart == 1:
if self.ptable_format == "msdos":
overhead = MBR_OVERHEAD
- elif self.ptable_format == "gpt":
+ elif self.ptable_format in ("gpt", "gpt-hybrid"):
overhead = GPT_OVERHEAD
# Skip one sector required for the partitioning scheme overhead
@@ -484,7 +494,7 @@ class PartitionedImage():
# Once all the partitions have been layed out, we can calculate the
# minumim disk size
self.min_size = self.offset
- if self.ptable_format == "gpt":
+ if self.ptable_format in ("gpt", "gpt-hybrid"):
self.min_size += GPT_OVERHEAD
self.min_size *= self.sector_size
@@ -505,22 +515,49 @@ class PartitionedImage():
return exec_native_cmd(cmd, self.native_sysroot)
+ def _write_identifier(self, device, identifier):
+ logger.debug("Set disk identifier %x", identifier)
+ with open(device, 'r+b') as img:
+ img.seek(0x1B8)
+ img.write(identifier.to_bytes(4, 'little'))
+
+ def _make_disk(self, device, ptable_format, min_size):
+ logger.debug("Creating sparse file %s", device)
+ with open(device, 'w') as sparse:
+ os.ftruncate(sparse.fileno(), min_size)
+
+ logger.debug("Initializing partition table for %s", device)
+ exec_native_cmd("parted -s %s mklabel %s" % (device, ptable_format),
+ self.native_sysroot)
+
+ def _write_disk_guid(self):
+ if self.ptable_format in ('gpt', 'gpt-hybrid'):
+ if os.getenv('SOURCE_DATE_EPOCH'):
+ self.disk_guid = uuid.UUID(int=int(os.getenv('SOURCE_DATE_EPOCH')))
+ else:
+ self.disk_guid = uuid.uuid4()
+
+ logger.debug("Set disk guid %s", self.disk_guid)
+ sfdisk_cmd = "sfdisk --disk-id %s %s" % (self.path, self.disk_guid)
+ exec_native_cmd(sfdisk_cmd, self.native_sysroot)
+
def create(self):
- logger.debug("Creating sparse file %s", self.path)
- with open(self.path, 'w') as sparse:
- os.ftruncate(sparse.fileno(), self.min_size)
+ self._make_disk(self.path,
+ "gpt" if self.ptable_format == "gpt-hybrid" else self.ptable_format,
+ self.min_size)
- logger.debug("Initializing partition table for %s", self.path)
- exec_native_cmd("parted -s %s mklabel %s" %
- (self.path, self.ptable_format), self.native_sysroot)
+ self._write_identifier(self.path, self.identifier)
+ self._write_disk_guid()
- logger.debug("Set disk identifier %x", self.identifier)
- with open(self.path, 'r+b') as img:
- img.seek(0x1B8)
- img.write(self.identifier.to_bytes(4, 'little'))
+ if self.ptable_format == "gpt-hybrid":
+ mbr_path = self.path + ".mbr"
+ self._make_disk(mbr_path, "msdos", self.min_size)
+ self._write_identifier(mbr_path, self.identifier)
logger.debug("Creating partitions")
+ hybrid_mbr_part_num = 0
+
for part in self.partitions:
if part.num == 0:
continue
@@ -565,11 +602,19 @@ class PartitionedImage():
self._create_partition(self.path, part.type,
parted_fs_type, part.start, part.size_sec)
- if part.part_name:
+ if self.ptable_format == "gpt-hybrid" and part.mbr:
+ hybrid_mbr_part_num += 1
+ if hybrid_mbr_part_num > 4:
+ raise WicError("Extended MBR partitions are not supported in hybrid MBR")
+ self._create_partition(mbr_path, "primary",
+ parted_fs_type, part.start, part.size_sec)
+
+ if self.ptable_format in ("gpt", "gpt-hybrid") and (part.part_name or part.label):
+ partition_label = part.part_name if part.part_name else part.label
logger.debug("partition %d: set name to %s",
- part.num, part.part_name)
+ part.num, partition_label)
exec_native_cmd("sgdisk --change-name=%d:%s %s" % \
- (part.num, part.part_name,
+ (part.num, partition_label,
self.path), self.native_sysroot)
if part.part_type:
@@ -579,32 +624,55 @@ class PartitionedImage():
(part.num, part.part_type,
self.path), self.native_sysroot)
- if part.uuid and self.ptable_format == "gpt":
+ if part.uuid and self.ptable_format in ("gpt", "gpt-hybrid"):
logger.debug("partition %d: set UUID to %s",
part.num, part.uuid)
exec_native_cmd("sgdisk --partition-guid=%d:%s %s" % \
(part.num, part.uuid, self.path),
self.native_sysroot)
- if part.label and self.ptable_format == "gpt":
- logger.debug("partition %d: set name to %s",
- part.num, part.label)
- exec_native_cmd("parted -s %s name %d %s" % \
- (self.path, part.num, part.label),
- self.native_sysroot)
-
if part.active:
- flag_name = "legacy_boot" if self.ptable_format == 'gpt' else "boot"
+ flag_name = "legacy_boot" if self.ptable_format in ('gpt', 'gpt-hybrid') else "boot"
logger.debug("Set '%s' flag for partition '%s' on disk '%s'",
flag_name, part.num, self.path)
exec_native_cmd("parted -s %s set %d %s on" % \
(self.path, part.num, flag_name),
self.native_sysroot)
+ if self.ptable_format == 'gpt-hybrid' and part.mbr:
+ exec_native_cmd("parted -s %s set %d %s on" % \
+ (mbr_path, hybrid_mbr_part_num, "boot"),
+ self.native_sysroot)
if part.system_id:
exec_native_cmd("sfdisk --part-type %s %s %s" % \
(self.path, part.num, part.system_id),
self.native_sysroot)
+ if part.hidden and self.ptable_format == "gpt":
+ logger.debug("Set hidden attribute for partition '%s' on disk '%s'",
+ part.num, self.path)
+ exec_native_cmd("sfdisk --part-attrs %s %s RequiredPartition" % \
+ (self.path, part.num),
+ self.native_sysroot)
+
+ if self.ptable_format == "gpt-hybrid":
+ # Write a protective GPT partition
+ hybrid_mbr_part_num += 1
+ if hybrid_mbr_part_num > 4:
+ raise WicError("Extended MBR partitions are not supported in hybrid MBR")
+
+ # parted cannot directly create a protective GPT partition, so
+ # create with an arbitrary type, then change it to the correct type
+ # with sfdisk
+ self._create_partition(mbr_path, "primary", "fat32", 1, GPT_OVERHEAD)
+ exec_native_cmd("sfdisk --part-type %s %d 0xee" % (mbr_path, hybrid_mbr_part_num),
+ self.native_sysroot)
+
+ # Copy hybrid MBR
+ with open(mbr_path, "rb") as mbr_file:
+ with open(self.path, "r+b") as image_file:
+ mbr = mbr_file.read(512)
+ image_file.write(mbr)
+
def cleanup(self):
pass
diff --git a/scripts/lib/wic/plugins/source/bootimg-efi.py b/scripts/lib/wic/plugins/source/bootimg-efi.py
index 634a808d78..13a9cddf4e 100644
--- a/scripts/lib/wic/plugins/source/bootimg-efi.py
+++ b/scripts/lib/wic/plugins/source/bootimg-efi.py
@@ -220,6 +220,8 @@ class BootimgEFIPlugin(SourcePlugin):
cls.do_configure_grubefi(hdddir, creator, cr_workdir, source_params)
elif source_params['loader'] == 'systemd-boot':
cls.do_configure_systemdboot(hdddir, creator, cr_workdir, source_params)
+ elif source_params['loader'] == 'uefi-kernel':
+ pass
else:
raise WicError("unrecognized bootimg-efi loader: %s" % source_params['loader'])
except KeyError:
@@ -330,40 +332,78 @@ class BootimgEFIPlugin(SourcePlugin):
shutil.copyfileobj(in_file, initrd)
initrd.close()
+ # Searched by systemd-boot:
+ # https://systemd.io/BOOT_LOADER_SPECIFICATION/#type-2-efi-unified-kernel-images
+ install_cmd = "install -d %s/EFI/Linux" % hdddir
+ exec_cmd(install_cmd)
+
+ staging_dir_host = get_bitbake_var("STAGING_DIR_HOST")
+ target_sys = get_bitbake_var("TARGET_SYS")
+
+ objdump_cmd = "%s-objdump" % target_sys
+ objdump_cmd += " -p %s" % efi_stub
+ objdump_cmd += " | awk '{ if ($1 == \"SectionAlignment\"){print $2} }'"
+
+ ret, align_str = exec_native_cmd(objdump_cmd, native_sysroot)
+ align = int(align_str, 16)
+
+ objdump_cmd = "%s-objdump" % target_sys
+ objdump_cmd += " -h %s | tail -2" % efi_stub
+ ret, output = exec_native_cmd(objdump_cmd, native_sysroot)
+
+ offset = int(output.split()[2], 16) + int(output.split()[3], 16)
+
+ osrel_off = offset + align - offset % align
+ osrel_path = "%s/usr/lib/os-release" % staging_dir_host
+ osrel_sz = os.stat(osrel_path).st_size
+
+ cmdline_off = osrel_off + osrel_sz
+ cmdline_off = cmdline_off + align - cmdline_off % align
+ cmdline_sz = os.stat(cmdline.name).st_size
+
+ dtb_off = cmdline_off + cmdline_sz
+ dtb_off = dtb_off + align - dtb_off % align
+
dtb = source_params.get('dtb')
if dtb:
if ';' in dtb:
raise WicError("Only one DTB supported, exiting")
- dtb_params = '--add-section .dtb=%s/%s --change-section-vma .dtb=0x40000' % \
- (deploy_dir, dtb)
+ dtb_path = "%s/%s" % (deploy_dir, dtb)
+ dtb_params = '--add-section .dtb=%s --change-section-vma .dtb=0x%x' % \
+ (dtb_path, dtb_off)
+ linux_off = dtb_off + os.stat(dtb_path).st_size
+ linux_off = linux_off + align - linux_off % align
else:
dtb_params = ''
+ linux_off = dtb_off
- # Searched by systemd-boot:
- # https://systemd.io/BOOT_LOADER_SPECIFICATION/#type-2-efi-unified-kernel-images
- install_cmd = "install -d %s/EFI/Linux" % hdddir
- exec_cmd(install_cmd)
+ linux_path = "%s/%s" % (staging_kernel_dir, kernel)
+ linux_sz = os.stat(linux_path).st_size
- staging_dir_host = get_bitbake_var("STAGING_DIR_HOST")
- target_sys = get_bitbake_var("TARGET_SYS")
+ initrd_off = linux_off + linux_sz
+ initrd_off = initrd_off + align - initrd_off % align
# https://www.freedesktop.org/software/systemd/man/systemd-stub.html
objcopy_cmd = "%s-objcopy" % target_sys
- objcopy_cmd += " --add-section .osrel=%s/usr/lib/os-release" % staging_dir_host
- objcopy_cmd += " --change-section-vma .osrel=0x20000"
+ objcopy_cmd += " --enable-deterministic-archives"
+ objcopy_cmd += " --preserve-dates"
+ objcopy_cmd += " --add-section .osrel=%s" % osrel_path
+ objcopy_cmd += " --change-section-vma .osrel=0x%x" % osrel_off
objcopy_cmd += " --add-section .cmdline=%s" % cmdline.name
- objcopy_cmd += " --change-section-vma .cmdline=0x30000"
+ objcopy_cmd += " --change-section-vma .cmdline=0x%x" % cmdline_off
objcopy_cmd += dtb_params
- objcopy_cmd += " --add-section .linux=%s/%s" % (staging_kernel_dir, kernel)
- objcopy_cmd += " --change-section-vma .linux=0x2000000"
+ objcopy_cmd += " --add-section .linux=%s" % linux_path
+ objcopy_cmd += " --change-section-vma .linux=0x%x" % linux_off
objcopy_cmd += " --add-section .initrd=%s" % initrd.name
- objcopy_cmd += " --change-section-vma .initrd=0x3000000"
+ objcopy_cmd += " --change-section-vma .initrd=0x%x" % initrd_off
objcopy_cmd += " %s %s/EFI/Linux/linux.efi" % (efi_stub, hdddir)
+
exec_native_cmd(objcopy_cmd, native_sysroot)
else:
- install_cmd = "install -m 0644 %s/%s %s/%s" % \
- (staging_kernel_dir, kernel, hdddir, kernel)
- exec_cmd(install_cmd)
+ if source_params.get('install-kernel-into-boot-dir') != 'false':
+ install_cmd = "install -m 0644 %s/%s %s/%s" % \
+ (staging_kernel_dir, kernel, hdddir, kernel)
+ exec_cmd(install_cmd)
if get_bitbake_var("IMAGE_EFI_BOOT_FILES"):
for src_path, dst_path in cls.install_task:
@@ -385,6 +425,28 @@ class BootimgEFIPlugin(SourcePlugin):
for mod in [x for x in os.listdir(kernel_dir) if x.startswith("systemd-")]:
cp_cmd = "cp %s/%s %s/EFI/BOOT/%s" % (kernel_dir, mod, hdddir, mod[8:])
exec_cmd(cp_cmd, True)
+ elif source_params['loader'] == 'uefi-kernel':
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if not kernel:
+ raise WicError("Empty KERNEL_IMAGETYPE %s\n" % target)
+ target = get_bitbake_var("TARGET_SYS")
+ if not target:
+ raise WicError("Unknown arch (TARGET_SYS) %s\n" % target)
+
+ if re.match("x86_64", target):
+ kernel_efi_image = "bootx64.efi"
+ elif re.match('i.86', target):
+ kernel_efi_image = "bootia32.efi"
+ elif re.match('aarch64', target):
+ kernel_efi_image = "bootaa64.efi"
+ elif re.match('arm', target):
+ kernel_efi_image = "bootarm.efi"
+ else:
+ raise WicError("UEFI stub kernel is incompatible with target %s" % target)
+
+ for mod in [x for x in os.listdir(kernel_dir) if x.startswith(kernel)]:
+ cp_cmd = "cp %s/%s %s/EFI/BOOT/%s" % (kernel_dir, mod, hdddir, kernel_efi_image)
+ exec_cmd(cp_cmd, True)
else:
raise WicError("unrecognized bootimg-efi loader: %s" %
source_params['loader'])
@@ -396,6 +458,11 @@ class BootimgEFIPlugin(SourcePlugin):
cp_cmd = "cp %s %s/" % (startup, hdddir)
exec_cmd(cp_cmd, True)
+ for paths in part.include_path or []:
+ for path in paths:
+ cp_cmd = "cp -r %s %s/" % (path, hdddir)
+ exec_cmd(cp_cmd, True)
+
du_cmd = "du -bks %s" % hdddir
out = exec_cmd(du_cmd)
blocks = int(out.split()[0])
@@ -410,6 +477,13 @@ class BootimgEFIPlugin(SourcePlugin):
logger.debug("Added %d extra blocks to %s to get to %d total blocks",
extra_blocks, part.mountpoint, blocks)
+ # required for compatibility with certain devices expecting file system
+ # block count to be equal to partition block count
+ if blocks < part.fixed_size:
+ blocks = part.fixed_size
+ logger.debug("Overriding %s to %d total blocks for compatibility",
+ part.mountpoint, blocks)
+
# dosfs image, created by mkdosfs
bootimg = "%s/boot.img" % cr_workdir
diff --git a/scripts/lib/wic/plugins/source/bootimg-partition.py b/scripts/lib/wic/plugins/source/bootimg-partition.py
index 8956162205..1071d1af3f 100644
--- a/scripts/lib/wic/plugins/source/bootimg-partition.py
+++ b/scripts/lib/wic/plugins/source/bootimg-partition.py
@@ -32,6 +32,7 @@ class BootimgPartitionPlugin(SourcePlugin):
"""
name = 'bootimg-partition'
+ image_boot_files_var_name = 'IMAGE_BOOT_FILES'
@classmethod
def do_configure_partition(cls, part, source_params, cr, cr_workdir,
@@ -56,12 +57,12 @@ class BootimgPartitionPlugin(SourcePlugin):
else:
var = ""
- boot_files = get_bitbake_var("IMAGE_BOOT_FILES" + var)
+ boot_files = get_bitbake_var(cls.image_boot_files_var_name + var)
if boot_files is not None:
break
if boot_files is None:
- raise WicError('No boot files defined, IMAGE_BOOT_FILES unset for entry #%d' % part.lineno)
+ raise WicError('No boot files defined, %s unset for entry #%d' % (cls.image_boot_files_var_name, part.lineno))
logger.debug('Boot files: %s', boot_files)
@@ -112,7 +113,7 @@ class BootimgPartitionPlugin(SourcePlugin):
# Use a custom configuration for extlinux.conf
extlinux_conf = custom_cfg
logger.debug("Using custom configuration file "
- "%s for extlinux.cfg", configfile)
+ "%s for extlinux.conf", configfile)
else:
raise WicError("configfile is specified but failed to "
"get it from %s." % configfile)
diff --git a/scripts/lib/wic/plugins/source/bootimg-pcbios.py b/scripts/lib/wic/plugins/source/bootimg-pcbios.py
index 32e47f1831..a207a83530 100644
--- a/scripts/lib/wic/plugins/source/bootimg-pcbios.py
+++ b/scripts/lib/wic/plugins/source/bootimg-pcbios.py
@@ -122,7 +122,7 @@ class BootimgPcbiosPlugin(SourcePlugin):
syslinux_conf += "DEFAULT boot\n"
syslinux_conf += "LABEL boot\n"
- kernel = "/vmlinuz"
+ kernel = "/" + get_bitbake_var("KERNEL_IMAGETYPE")
syslinux_conf += "KERNEL " + kernel + "\n"
syslinux_conf += "APPEND label=boot root=%s %s\n" % \
@@ -155,8 +155,8 @@ class BootimgPcbiosPlugin(SourcePlugin):
kernel = "%s-%s.bin" % \
(get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
- cmds = ("install -m 0644 %s/%s %s/vmlinuz" %
- (staging_kernel_dir, kernel, hdddir),
+ cmds = ("install -m 0644 %s/%s %s/%s" %
+ (staging_kernel_dir, kernel, hdddir, get_bitbake_var("KERNEL_IMAGETYPE")),
"install -m 444 %s/syslinux/ldlinux.sys %s/ldlinux.sys" %
(bootimg_dir, hdddir),
"install -m 0644 %s/syslinux/vesamenu.c32 %s/vesamenu.c32" %
diff --git a/scripts/lib/wic/plugins/source/empty.py b/scripts/lib/wic/plugins/source/empty.py
index 9c492ca206..4178912377 100644
--- a/scripts/lib/wic/plugins/source/empty.py
+++ b/scripts/lib/wic/plugins/source/empty.py
@@ -9,9 +9,19 @@
# To use it you must pass "empty" as argument for the "--source" parameter in
# the wks file. For example:
# part foo --source empty --ondisk sda --size="1024" --align 1024
+#
+# The plugin supports writing zeros to the start of the
+# partition. This is useful to overwrite old content like
+# filesystem signatures which may be re-recognized otherwise.
+# This feature can be enabled with
+# '--sourceparams="[fill|size=<N>[S|s|K|k|M|G]][,][bs=<N>[S|s|K|k|M|G]]"'
+# Conflicting or missing options throw errors.
import logging
+import os
+from wic import WicError
+from wic.ksparser import sizetype
from wic.pluginbase import SourcePlugin
logger = logging.getLogger('wic')
@@ -19,6 +29,16 @@ logger = logging.getLogger('wic')
class EmptyPartitionPlugin(SourcePlugin):
"""
Populate unformatted empty partition.
+
+ The following sourceparams are supported:
+ - fill
+ Fill the entire partition with zeros. Requires '--fixed-size' option
+ to be set.
+ - size=<N>[S|s|K|k|M|G]
+ Set the first N bytes of the partition to zero. Default unit is 'K'.
+ - bs=<N>[S|s|K|k|M|G]
+ Write at most N bytes at a time during source file creation.
+ Defaults to '1M'. Default unit is 'K'.
"""
name = 'empty'
@@ -31,4 +51,39 @@ class EmptyPartitionPlugin(SourcePlugin):
Called to do the actual content population for a partition i.e. it
'prepares' the partition to be incorporated into the image.
"""
- return
+ get_byte_count = sizetype('K', True)
+ size = 0
+
+ if 'fill' in source_params and 'size' in source_params:
+ raise WicError("Conflicting source parameters 'fill' and 'size' specified, exiting.")
+
+ # Set the size of the zeros to be written to the partition
+ if 'fill' in source_params:
+ if part.fixed_size == 0:
+ raise WicError("Source parameter 'fill' only works with the '--fixed-size' option, exiting.")
+ size = get_byte_count(part.fixed_size)
+ elif 'size' in source_params:
+ size = get_byte_count(source_params['size'])
+
+ if size == 0:
+ # Nothing to do, create empty partition
+ return
+
+ if 'bs' in source_params:
+ bs = get_byte_count(source_params['bs'])
+ else:
+ bs = get_byte_count('1M')
+
+ # Create a binary file of the requested size filled with zeros
+ source_file = os.path.join(cr_workdir, 'empty-plugin-zeros%s.bin' % part.lineno)
+ if not os.path.exists(os.path.dirname(source_file)):
+ os.makedirs(os.path.dirname(source_file))
+
+ quotient, remainder = divmod(size, bs)
+ with open(source_file, 'wb') as file:
+ for _ in range(quotient):
+ file.write(bytearray(bs))
+ file.write(bytearray(remainder))
+
+ part.size = (size + 1024 - 1) // 1024 # size in KB rounded up
+ part.source_file = source_file
diff --git a/scripts/lib/wic/plugins/source/rawcopy.py b/scripts/lib/wic/plugins/source/rawcopy.py
index ccf332554e..21903c2f23 100644
--- a/scripts/lib/wic/plugins/source/rawcopy.py
+++ b/scripts/lib/wic/plugins/source/rawcopy.py
@@ -58,7 +58,8 @@ class RawCopyPlugin(SourcePlugin):
decompressor = {
".bz2": "bzip2",
".gz": "gzip",
- ".xz": "xz"
+ ".xz": "xz",
+ ".zst": "zstd -f",
}.get(extension)
if not decompressor:
raise WicError("Not supported compressor filename extension: %s" % extension)
diff --git a/scripts/lib/wic/plugins/source/rootfs.py b/scripts/lib/wic/plugins/source/rootfs.py
index fc06312ee4..e29f3a4c2f 100644
--- a/scripts/lib/wic/plugins/source/rootfs.py
+++ b/scripts/lib/wic/plugins/source/rootfs.py
@@ -224,7 +224,7 @@ class RootfsPlugin(SourcePlugin):
if part.update_fstab_in_rootfs and part.has_fstab and not part.no_fstab_update:
fstab_path = os.path.join(new_rootfs, "etc/fstab")
# Assume that fstab should always be owned by root with fixed permissions
- install_cmd = "install -m 0644 %s %s" % (part.updated_fstab_path, fstab_path)
+ install_cmd = "install -m 0644 -p %s %s" % (part.updated_fstab_path, fstab_path)
if new_pseudo:
pseudo = cls.__get_pseudo(native_sysroot, new_rootfs, new_pseudo)
else:
diff --git a/scripts/nativesdk-intercept/chgrp b/scripts/nativesdk-intercept/chgrp
index 30cc417d3a..f8ae84b8b3 100755
--- a/scripts/nativesdk-intercept/chgrp
+++ b/scripts/nativesdk-intercept/chgrp
@@ -14,7 +14,10 @@ real_chgrp = shutil.which('chgrp', path=path)
args = list()
found = False
-for i in sys.argv:
+
+args.append(real_chgrp)
+
+for i in sys.argv[1:]:
if i.startswith("-"):
args.append(i)
continue
diff --git a/scripts/nativesdk-intercept/chown b/scripts/nativesdk-intercept/chown
index 3914b3e384..0805ceb70a 100755
--- a/scripts/nativesdk-intercept/chown
+++ b/scripts/nativesdk-intercept/chown
@@ -14,7 +14,10 @@ real_chown = shutil.which('chown', path=path)
args = list()
found = False
-for i in sys.argv:
+
+args.append(real_chown)
+
+for i in sys.argv[1:]:
if i.startswith("-"):
args.append(i)
continue
diff --git a/scripts/oe-buildenv-internal b/scripts/oe-buildenv-internal
index 485d4c52e1..2fdb19565a 100755
--- a/scripts/oe-buildenv-internal
+++ b/scripts/oe-buildenv-internal
@@ -32,12 +32,12 @@ fi
# We potentially have code that doesn't parse correctly with older versions
# of Python, and rather than fixing that and being eternally vigilant for
# any other new feature use, just check the version here.
-py_v35_check=$(python3 -c 'import sys; print(sys.version_info >= (3,5,0))')
-if [ "$py_v35_check" != "True" ]; then
- echo >&2 "BitBake requires Python 3.5.0 or later as 'python3 (scripts/install-buildtools can be used if needed)'"
+py_v38_check=$(python3 -c 'import sys; print(sys.version_info >= (3,8,0))')
+if [ "$py_v38_check" != "True" ]; then
+ echo >&2 "BitBake requires Python 3.8.0 or later as 'python3' (scripts/install-buildtools can be used if needed)"
return 1
fi
-unset py_v35_check
+unset py_v38_check
if [ -z "$BDIR" ]; then
if [ -z "$1" ]; then
@@ -92,19 +92,20 @@ fi
PYTHONPATH=$BITBAKEDIR/lib:$PYTHONPATH
export PYTHONPATH
+# Remove any paths added by sourcing this script before
+[ -n "$OE_ADDED_PATHS" ] && PATH=$(echo $PATH | sed -e "s#$OE_ADDED_PATHS##") ||
+ PATH=$(echo $PATH | sed -e "s#$OEROOT/scripts:$BITBAKEDIR/bin:##")
+
# Make sure our paths are at the beginning of $PATH
-for newpath in "$BITBAKEDIR/bin" "$OEROOT/scripts"; do
- # Remove any existences of $newpath from $PATH
- PATH=$(echo $PATH | sed -re "s#(^|:)$newpath(:|$)#\2#g;s#^:##")
+OE_ADDED_PATHS="$OEROOT/scripts:$BITBAKEDIR/bin:"
+PATH="$OE_ADDED_PATHS$PATH"
+export OE_ADDED_PATHS
- # Add $newpath to $PATH
- PATH="$newpath:$PATH"
-done
-unset BITBAKEDIR newpath
+# This is not needed anymore
+unset BITBAKEDIR
# Used by the runqemu script
export BUILDDIR
-export PATH
BB_ENV_PASSTHROUGH_ADDITIONS_OE="MACHINE DISTRO TCMODE TCLIBC HTTP_PROXY http_proxy \
HTTPS_PROXY https_proxy FTP_PROXY ftp_proxy FTPS_PROXY ftps_proxy ALL_PROXY \
diff --git a/scripts/oe-check-sstate b/scripts/oe-check-sstate
index f4cc5869de..0d171c4463 100755
--- a/scripts/oe-check-sstate
+++ b/scripts/oe-check-sstate
@@ -18,7 +18,6 @@ import re
scripts_path = os.path.dirname(os.path.realpath(__file__))
lib_path = scripts_path + '/lib'
sys.path = sys.path + [lib_path]
-import scriptutils
import scriptpath
scriptpath.add_bitbake_lib_path()
import argparse_oe
@@ -51,13 +50,10 @@ def check(args):
env['TMPDIR:forcevariable'] = tmpdir
try:
- output = subprocess.check_output(
- 'bitbake -n %s' % ' '.join(args.target),
- stderr=subprocess.STDOUT,
- env=env,
- shell=True)
+ cmd = ['bitbake', '--dry-run', '--runall=build'] + args.target
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env)
- task_re = re.compile('NOTE: Running setscene task [0-9]+ of [0-9]+ \(([^)]+)\)')
+ task_re = re.compile(r'NOTE: Running setscene task [0-9]+ of [0-9]+ \(([^)]+)\)')
tasks = []
for line in output.decode('utf-8').splitlines():
res = task_re.match(line)
diff --git a/scripts/oe-depends-dot b/scripts/oe-depends-dot
index 1c2d51c6ec..d02ee455f6 100755
--- a/scripts/oe-depends-dot
+++ b/scripts/oe-depends-dot
@@ -14,7 +14,7 @@ import re
class Dot(object):
def __init__(self):
parser = argparse.ArgumentParser(
- description="Analyse recipe-depends.dot generated by bitbake -g",
+ description="Analyse task-depends.dot generated by bitbake -g",
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("dotfile",
help = "Specify the dotfile", nargs = 1, action='store', default='')
@@ -159,9 +159,14 @@ Reduce the .dot file packages only, no tasks:
reverse_deps = []
if self.args.why:
- for k, v in depends.items():
- if self.args.key in v and not k in reverse_deps:
- reverse_deps.append(k)
+ key_list = [self.args.key]
+ current_key = self.args.key
+ while (len(key_list) != 0):
+ current_key = key_list.pop()
+ for k, v in depends.items():
+ if current_key in v and not k in reverse_deps:
+ reverse_deps.append(k)
+ key_list.append(k)
print('Because: %s' % ' '.join(reverse_deps))
Dot.print_dep_chains(self.args.key, reverse_deps, depends)
diff --git a/scripts/oe-find-native-sysroot b/scripts/oe-find-native-sysroot
index 5146bbf999..6228efcbee 100755
--- a/scripts/oe-find-native-sysroot
+++ b/scripts/oe-find-native-sysroot
@@ -36,20 +36,9 @@ if [ "$1" = '--help' -o "$1" = '-h' -o $# -ne 1 ] ; then
fi
# Global vars
-BITBAKE_E=""
set_oe_native_sysroot(){
- echo "Running bitbake -e $1"
- BITBAKE_E="`bitbake -e $1`"
- OECORE_NATIVE_SYSROOT=`echo "$BITBAKE_E" | grep ^STAGING_DIR_NATIVE= | cut -d '"' -f2`
-
- if [ "x$OECORE_NATIVE_SYSROOT" = "x" ]; then
- # This indicates that there was an error running bitbake -e that
- # the user needs to be informed of
- echo "There was an error running bitbake to determine STAGING_DIR_NATIVE"
- echo "Here is the output from bitbake -e $1"
- echo $BITBAKE_E
- exit 1
- fi
+ echo "Getting sysroot..."
+ OECORE_NATIVE_SYSROOT=$(bitbake-getvar -r $1 --value STAGING_DIR_NATIVE)
}
if [ "x$OECORE_NATIVE_SYSROOT" = "x" ]; then
diff --git a/scripts/oe-pkgdata-util b/scripts/oe-pkgdata-util
index 7412cc1f47..44ae40549a 100755
--- a/scripts/oe-pkgdata-util
+++ b/scripts/oe-pkgdata-util
@@ -296,7 +296,7 @@ def package_info(args):
extra = ''
for line in f:
for var in vars:
- m = re.match(var + '(?::\S+)?:\s*(.+?)\s*$', line)
+ m = re.match(var + r'(?::\S+)?:\s*(.+?)\s*$', line)
if m:
vals[var] = m.group(1)
pkg_version = vals['PKGV'] or ''
diff --git a/scripts/oe-setup-build b/scripts/oe-setup-build
new file mode 100755
index 0000000000..5364f2b481
--- /dev/null
+++ b/scripts/oe-setup-build
@@ -0,0 +1,122 @@
+#!/usr/bin/env python3
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+import argparse
+import json
+import os
+import subprocess
+
+def defaultlayers():
+ return os.path.abspath(os.path.join(os.path.dirname(__file__), '.oe-layers.json'))
+
+def makebuildpath(topdir, template):
+ return os.path.join(topdir, "build-{}".format(template))
+
+def discover_templates(layers_file):
+ if not os.path.exists(layers_file):
+ print("List of layers {} does not exist; were the layers set up using the setup-layers script?".format(layers_file))
+ return None
+
+ templates = []
+ layers_list = json.load(open(layers_file))["layers"]
+ for layer in layers_list:
+ template_dir = os.path.join(os.path.dirname(layers_file), layer, 'conf','templates')
+ if os.path.exists(template_dir):
+ for d in sorted(os.listdir(template_dir)):
+ templatepath = os.path.join(template_dir,d)
+ if not os.path.isfile(os.path.join(templatepath,'local.conf.sample')):
+ continue
+ layer_base = os.path.basename(layer)
+ templatename = "{}-{}".format(layer_base[5:] if layer_base.startswith("meta-") else layer_base, d)
+ buildpath = makebuildpath(os.getcwd(), templatename)
+ notespath = os.path.join(template_dir, d, 'conf-notes.txt')
+ try: notes = open(notespath).read()
+ except: notes = None
+ try: summary = open(os.path.join(template_dir, d, 'conf-summary.txt')).read()
+ except: summary = None
+ templates.append({"templatename":templatename,"templatepath":templatepath,"buildpath":buildpath,"notespath":notespath,"notes":notes,"summary":summary})
+
+ return templates
+
+def print_templates(templates, verbose):
+ print("Available build configurations:\n")
+
+ for i in range(len(templates)):
+ t = templates[i]
+ print("{}. {}".format(i+1, t["templatename"]))
+ print("{}".format(t["summary"].strip() if t["summary"] else "This configuration does not have a summary."))
+ if verbose:
+ print("Configuration template path:", t["templatepath"])
+ print("Build path:", t["buildpath"])
+ print("Usage notes:", t["notespath"] if t["notes"] else "This configuration does not have usage notes.")
+ print("")
+ if not verbose:
+ print("Re-run with 'list -v' to see additional information.")
+
+def list_templates(args):
+ templates = discover_templates(args.layerlist)
+ if not templates:
+ return
+
+ verbose = args.v
+ print_templates(templates, verbose)
+
+def find_template(template_name, templates):
+ print_templates(templates, False)
+ if not template_name:
+ n_s = input("Please choose a configuration by its number: ")
+ try: return templates[int(n_s) - 1]
+ except:
+ print("Invalid selection, please try again.")
+ return None
+ else:
+ for t in templates:
+ if t["templatename"] == template_name:
+ return t
+ print("Configuration {} is not one of {}, please try again.".format(tempalte_name, [t["templatename"] for t in templates]))
+ return None
+
+def setup_build_env(args):
+ templates = discover_templates(args.layerlist)
+ if not templates:
+ return
+
+ template = find_template(args.c, templates)
+ if not template:
+ return
+ builddir = args.b if args.b else template["buildpath"]
+ no_shell = args.no_shell
+ coredir = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
+ cmd = "TEMPLATECONF={} . {} {}".format(template["templatepath"], os.path.join(coredir, 'oe-init-build-env'), builddir)
+ if not no_shell:
+ cmd = cmd + " && {}".format(os.environ['SHELL'])
+ print("Running:", cmd)
+ subprocess.run(cmd, shell=True, executable=os.environ['SHELL'])
+
+parser = argparse.ArgumentParser(description="A script that discovers available build configurations and sets up a build environment based on one of them. Run without arguments to choose one interactively.")
+parser.add_argument("--layerlist", default=defaultlayers(), help='Where to look for available layers (as written out by setup-layers script) (default is {}).'.format(defaultlayers()))
+
+subparsers = parser.add_subparsers()
+parser_list_templates = subparsers.add_parser('list', help='List available configurations')
+parser_list_templates.add_argument('-v', action='store_true',
+ help='Print detailed information and usage notes for each available build configuration.')
+parser_list_templates.set_defaults(func=list_templates)
+
+parser_setup_env = subparsers.add_parser('setup', help='Set up a build environment and open a shell session with it, ready to run builds.')
+parser_setup_env.add_argument('-c', metavar='configuration_name', help="Use a build configuration configuration_name to set up a build environment (run this script with 'list' to see what is available)")
+parser_setup_env.add_argument('-b', metavar='build_path', help="Set up a build directory in build_path (run this script with 'list -v' to see where it would be by default)")
+parser_setup_env.add_argument('--no-shell', action='store_true',
+ help='Create a build directory but do not start a shell session with the build environment from it.')
+parser_setup_env.set_defaults(func=setup_build_env)
+
+args = parser.parse_args()
+
+if 'func' in args:
+ args.func(args)
+else:
+ from argparse import Namespace
+ setup_build_env(Namespace(layerlist=args.layerlist, c=None, b=None, no_shell=False))
diff --git a/scripts/oe-setup-builddir b/scripts/oe-setup-builddir
index 89ae30f609..dcb384c33a 100755
--- a/scripts/oe-setup-builddir
+++ b/scripts/oe-setup-builddir
@@ -57,6 +57,7 @@ if [ -n "$TEMPLATECONF" ]; then
fi
OECORELAYERCONF="$TEMPLATECONF/bblayers.conf.sample"
OECORELOCALCONF="$TEMPLATECONF/local.conf.sample"
+ OECORESUMMARYCONF="$TEMPLATECONF/conf-summary.txt"
OECORENOTESCONF="$TEMPLATECONF/conf-notes.txt"
fi
@@ -98,9 +99,25 @@ EOM
SHOWYPDOC=yes
fi
+if [ -z "$OECORESUMMARYCONF" ]; then
+ OECORESUMMARYCONF="$OEROOT/meta/conf/templates/default/conf-summary.txt"
+fi
+if [ ! -r "$BUILDDIR/conf/conf-summary.txt" ]; then
+ [ ! -r "$OECORESUMMARYCONF" ] || cp "$OECORESUMMARYCONF" "$BUILDDIR/conf/conf-summary.txt"
+fi
+
+if [ -z "$OECORENOTESCONF" ]; then
+ OECORENOTESCONF="$OEROOT/meta/conf/templates/default/conf-notes.txt"
+fi
+if [ ! -r "$BUILDDIR/conf/conf-notes.txt" ]; then
+ [ ! -r "$OECORENOTESCONF" ] || cp "$OECORENOTESCONF" "$BUILDDIR/conf/conf-notes.txt"
+fi
+
# Prevent disturbing a new GIT clone in same console
unset OECORELOCALCONF
unset OECORELAYERCONF
+unset OECORESUMMARYCONF
+unset OECORENOTESCONF
# Ending the first-time run message. Show the YP Documentation banner.
if [ -n "$SHOWYPDOC" ]; then
@@ -116,11 +133,8 @@ EOM
# unset SHOWYPDOC
fi
-if [ -z "$OECORENOTESCONF" ]; then
- OECORENOTESCONF="$OEROOT/meta/conf/templates/default/conf-notes.txt"
-fi
-[ ! -r "$OECORENOTESCONF" ] || cat "$OECORENOTESCONF"
-unset OECORENOTESCONF
+[ ! -r "$BUILDDIR/conf/conf-summary.txt" ] || cat "$BUILDDIR/conf/conf-summary.txt"
+[ ! -r "$BUILDDIR/conf/conf-notes.txt" ] || cat "$BUILDDIR/conf/conf-notes.txt"
if [ ! -f "$BUILDDIR/conf/templateconf.cfg" ]; then
echo "$ORG_TEMPLATECONF" >"$BUILDDIR/conf/templateconf.cfg"
diff --git a/scripts/oe-setup-layers b/scripts/oe-setup-layers
index 6ecaffed75..6fbfefd656 100755
--- a/scripts/oe-setup-layers
+++ b/scripts/oe-setup-layers
@@ -10,48 +10,116 @@
# bitbake-layers create-layers-setup destdir
#
# It is recommended that you do not modify this file directly, but rather re-run the above command to get the freshest upstream copy.
+#
+# This script is idempotent. Subsequent runs only change what is necessary to
+# ensure your layers match your configuration.
import argparse
import json
import os
import subprocess
-def _do_checkout(args, json):
- layers = json['sources']
- for l_name in layers:
- l_data = layers[l_name]
- layerdir = os.path.abspath(os.path.join(args['destdir'], l_data['path']))
+def _is_repo_git_repo(repodir):
+ try:
+ curr_toplevel = subprocess.check_output("git -C %s rev-parse --show-toplevel" % repodir, shell=True, stderr=subprocess.DEVNULL)
+ if curr_toplevel.strip().decode("utf-8") == repodir:
+ return True
+ except subprocess.CalledProcessError:
+ pass
+ return False
+
+def _is_repo_at_rev(repodir, rev):
+ try:
+ curr_rev = subprocess.check_output("git -C %s rev-parse HEAD" % repodir, shell=True, stderr=subprocess.DEVNULL)
+ if curr_rev.strip().decode("utf-8") == rev:
+ return True
+ except subprocess.CalledProcessError:
+ pass
+ return False
+
+def _is_repo_at_remote_uri(repodir, remote, uri):
+ try:
+ curr_uri = subprocess.check_output("git -C %s remote get-url %s" % (repodir, remote), shell=True, stderr=subprocess.DEVNULL)
+ if curr_uri.strip().decode("utf-8") == uri:
+ return True
+ except subprocess.CalledProcessError:
+ pass
+ return False
+
+def _contains_submodules(repodir):
+ return os.path.exists(os.path.join(repodir,".gitmodules"))
+
+def _write_layer_list(dest, repodirs):
+ layers = []
+ for r in repodirs:
+ for root, dirs, files in os.walk(r):
+ if os.path.basename(root) == 'conf' and 'layer.conf' in files:
+ layers.append(os.path.relpath(os.path.dirname(root), dest))
+ layers_f = os.path.join(dest, ".oe-layers.json")
+ print("Writing list of layers into {}".format(layers_f))
+ with open(layers_f, 'w') as f:
+ json.dump({"version":"1.0","layers":layers}, f, sort_keys=True, indent=4)
- if 'contains_this_file' in l_data.keys():
+def _do_checkout(args, json):
+ repos = json['sources']
+ repodirs = []
+ oesetupbuild = None
+ for r_name in repos:
+ r_data = repos[r_name]
+ repodir = os.path.abspath(os.path.join(args['destdir'], r_data['path']))
+ repodirs.append(repodir)
+
+ if 'contains_this_file' in r_data.keys():
force_arg = 'force_bootstraplayer_checkout'
if not args[force_arg]:
- print('Note: not checking out source {layer}, use {layerflag} to override.'.format(layer=l_name, layerflag='--force-bootstraplayer-checkout'))
+ print('Note: not checking out source {repo}, use {repoflag} to override.'.format(repo=r_name, repoflag='--force-bootstraplayer-checkout'))
continue
- l_remote = l_data['git-remote']
- rev = l_remote['rev']
- desc = l_remote['describe']
+ r_remote = r_data['git-remote']
+ rev = r_remote['rev']
+ desc = r_remote['describe']
if not desc:
desc = rev[:10]
- branch = l_remote['branch']
- remotes = l_remote['remotes']
+ branch = r_remote['branch']
+ remotes = r_remote['remotes']
- print('\nSetting up source {}, revision {}, branch {}'.format(l_name, desc, branch))
- cmd = 'git init -q {}'.format(layerdir)
- print("Running '{}'".format(cmd))
- subprocess.check_output(cmd, shell=True)
+ print('\nSetting up source {}, revision {}, branch {}'.format(r_name, desc, branch))
+ if not _is_repo_git_repo(repodir):
+ cmd = 'git init -q {}'.format(repodir)
+ print("Running '{}'".format(cmd))
+ subprocess.check_output(cmd, shell=True)
for remote in remotes:
- cmd = "git remote remove {} > /dev/null 2>&1; git remote add {} {}".format(remote, remote, remotes[remote]['uri'])
- print("Running '{}' in {}".format(cmd, layerdir))
- subprocess.check_output(cmd, shell=True, cwd=layerdir)
-
- cmd = "git fetch -q {} || true".format(remote)
- print("Running '{}' in {}".format(cmd, layerdir))
- subprocess.check_output(cmd, shell=True, cwd=layerdir)
-
- cmd = 'git checkout -q {}'.format(rev)
- print("Running '{}' in {}".format(cmd, layerdir))
- subprocess.check_output(cmd, shell=True, cwd=layerdir)
+ if not _is_repo_at_remote_uri(repodir, remote, remotes[remote]['uri']):
+ cmd = "git remote remove {} > /dev/null 2>&1; git remote add {} {}".format(remote, remote, remotes[remote]['uri'])
+ print("Running '{}' in {}".format(cmd, repodir))
+ subprocess.check_output(cmd, shell=True, cwd=repodir)
+
+ cmd = "git fetch -q {} || true".format(remote)
+ print("Running '{}' in {}".format(cmd, repodir))
+ subprocess.check_output(cmd, shell=True, cwd=repodir)
+
+ if not _is_repo_at_rev(repodir, rev):
+ cmd = "git fetch -q --all || true"
+ print("Running '{}' in {}".format(cmd, repodir))
+ subprocess.check_output(cmd, shell=True, cwd=repodir)
+
+ cmd = 'git checkout -q {}'.format(rev)
+ print("Running '{}' in {}".format(cmd, repodir))
+ subprocess.check_output(cmd, shell=True, cwd=repodir)
+
+ if _contains_submodules(repodir):
+ print("Repo {} contains submodules, use 'git submodule update' to ensure they are up to date".format(repodir))
+ if os.path.exists(os.path.join(repodir, 'scripts/oe-setup-build')):
+ oesetupbuild = os.path.join(repodir, 'scripts/oe-setup-build')
+
+ _write_layer_list(args['destdir'], repodirs)
+
+ if oesetupbuild:
+ oesetupbuild_symlink = os.path.join(args['destdir'], 'setup-build')
+ if os.path.exists(oesetupbuild_symlink):
+ os.remove(oesetupbuild_symlink)
+ os.symlink(os.path.relpath(oesetupbuild,args['destdir']),oesetupbuild_symlink)
+ print("\nRun '{}' to list available build configuration templates and set up a build from one of them.".format(oesetupbuild_symlink))
parser = argparse.ArgumentParser(description="A self contained python script that fetches all the needed layers and sets them to correct revisions using data in a json format from a separate file. The json data can be created from an active build directory with 'bitbake-layers create-layers-setup destdir' and there's a sample file and a schema in meta/files/")
@@ -69,10 +137,10 @@ parser.add_argument('--jsondata', default=__file__+".json", help='File containin
args = parser.parse_args()
with open(args.jsondata) as f:
- json = json.load(f)
+ json_f = json.load(f)
supported_versions = ["1.0"]
-if json["version"] not in supported_versions:
- raise Exception("File {} has version {}, which is not in supported versions: {}".format(args.jsondata, json["version"], supported_versions))
+if json_f["version"] not in supported_versions:
+ raise Exception("File {} has version {}, which is not in supported versions: {}".format(args.jsondata, json_f["version"], supported_versions))
-_do_checkout(vars(args), json)
+_do_checkout(vars(args), json_f)
diff --git a/scripts/oe-setup-vscode b/scripts/oe-setup-vscode
new file mode 100755
index 0000000000..b8642780d5
--- /dev/null
+++ b/scripts/oe-setup-vscode
@@ -0,0 +1,93 @@
+#!/bin/sh
+
+usage() {
+ echo "$0 <OEINIT> <BUILDDIR>"
+ echo " OEINIT: path to directory where the .vscode folder is"
+ echo " BUILDDIR: directory passed to the oe-init-setup-env script"
+}
+
+if [ "$#" -ne 2 ]; then
+ usage
+ exit 1
+fi
+
+OEINIT=$(readlink -f "$1")
+BUILDDIR=$(readlink -f "$2")
+VSCODEDIR=$OEINIT/.vscode
+
+if [ ! -d "$OEINIT" ] || [ ! -d "$BUILDDIR" ]; then
+ echo "$OEINIT and/or $BUILDDIR directories are not present."
+ exit 1
+fi
+
+VSCODE_SETTINGS=$VSCODEDIR/settings.json
+ws_builddir="$(echo "$BUILDDIR" | sed -e "s|$OEINIT|\${workspaceFolder}|g")"
+
+# If BUILDDIR is in scope of VSCode ensure VSCode does not try to index the build folder.
+# This would lead to a busy CPU and finally to an OOM exception.
+mkdir -p "$VSCODEDIR"
+cat <<EOMsettings > "$VSCODE_SETTINGS"
+{
+ "bitbake.pathToBitbakeFolder": "\${workspaceFolder}/bitbake",
+ "bitbake.pathToEnvScript": "\${workspaceFolder}/oe-init-build-env",
+ "bitbake.pathToBuildFolder": "$ws_builddir",
+ "bitbake.commandWrapper": "",
+ "bitbake.workingDirectory": "\${workspaceFolder}",
+ "files.exclude": {
+ "**/.git/**": true,
+ "**/_build/**": true,
+ "**/buildhistory/**": true,
+ "**/cache/**": true,
+ "**/downloads/**": true,
+ "**/node_modules/**": true,
+ "**/oe-logs/**": true,
+ "**/oe-workdir/**": true,
+ "**/sstate-cache/**": true,
+ "**/tmp*/**": true,
+ "**/workspace/attic/**": true,
+ "**/workspace/sources/**": true
+ },
+ "files.watcherExclude": {
+ "**/.git/**": true,
+ "**/_build/**": true,
+ "**/buildhistory/**": true,
+ "**/cache/**": true,
+ "**/downloads/**": true,
+ "**/node_modules/**": true,
+ "**/oe-logs/**": true,
+ "**/oe-workdir/**": true,
+ "**/sstate-cache/**": true,
+ "**/tmp*/**": true,
+ "**/workspace/attic/**": true,
+ "**/workspace/sources/**": true
+ },
+ "python.analysis.exclude": [
+ "**/_build/**",
+ "**/.git/**",
+ "**/buildhistory/**",
+ "**/cache/**",
+ "**/downloads/**",
+ "**/node_modules/**",
+ "**/oe-logs/**",
+ "**/oe-workdir/**",
+ "**/sstate-cache/**",
+ "**/tmp*/**",
+ "**/workspace/attic/**",
+ "**/workspace/sources/**"
+ ]
+}
+EOMsettings
+
+
+# Ask the user if the yocto-bitbake extension should be installed
+VSCODE_EXTENSIONS=$VSCODEDIR/extensions.json
+cat <<EOMextensions > "$VSCODE_EXTENSIONS"
+{
+ "recommendations": [
+ "yocto-project.yocto-bitbake"
+ ]
+}
+EOMextensions
+
+echo "You had no $VSCODEDIR configuration."
+echo "These configuration files have therefore been created for you."
diff --git a/scripts/opkg-query-helper.py b/scripts/opkg-query-helper.py
index bc3ab43823..084d9ef684 100755
--- a/scripts/opkg-query-helper.py
+++ b/scripts/opkg-query-helper.py
@@ -29,7 +29,7 @@ for arg in sys.argv[1:]:
args.append(arg)
# Regex for removing version specs after dependency items
-verregex = re.compile(' \([=<>]* [^ )]*\)')
+verregex = re.compile(r' \([=<>]* [^ )]*\)')
pkg = ""
ver = ""
diff --git a/scripts/patchtest b/scripts/patchtest
new file mode 100755
index 0000000000..0be7062dc2
--- /dev/null
+++ b/scripts/patchtest
@@ -0,0 +1,232 @@
+#!/usr/bin/env python3
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# patchtest: execute all unittest test cases discovered for a single patch
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import sys
+import os
+import unittest
+import logging
+import traceback
+import json
+
+# Include current path so test cases can see it
+sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
+
+# Include patchtest library
+sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), '../meta/lib/patchtest'))
+
+from data import PatchTestInput
+from repo import PatchTestRepo
+
+import utils
+logger = utils.logger_create('patchtest')
+info = logger.info
+error = logger.error
+
+import repo
+
+def getResult(patch, mergepatch, logfile=None):
+
+ class PatchTestResult(unittest.TextTestResult):
+ """ Patchtest TextTestResult """
+ shouldStop = True
+ longMessage = False
+
+ success = 'PASS'
+ fail = 'FAIL'
+ skip = 'SKIP'
+
+ def startTestRun(self):
+ # let's create the repo already, it can be used later on
+ repoargs = {
+ 'repodir': PatchTestInput.repodir,
+ 'commit' : PatchTestInput.basecommit,
+ 'branch' : PatchTestInput.basebranch,
+ 'patch' : patch,
+ }
+
+ self.repo_error = False
+ self.test_error = False
+ self.test_failure = False
+
+ try:
+ self.repo = PatchTestInput.repo = PatchTestRepo(**repoargs)
+ except:
+ logger.error(traceback.print_exc())
+ self.repo_error = True
+ self.stop()
+ return
+
+ if mergepatch:
+ self.repo.merge()
+
+ def addError(self, test, err):
+ self.test_error = True
+ (ty, va, trace) = err
+ logger.error(traceback.print_exc())
+
+ def addFailure(self, test, err):
+ test_description = test.id().split('.')[-1].replace('_', ' ').replace("cve", "CVE").replace("signed off by",
+ "Signed-off-by").replace("upstream status",
+ "Upstream-Status").replace("non auh",
+ "non-AUH").replace("presence format", "presence")
+ self.test_failure = True
+ fail_str = '{}: {}: {} ({})'.format(self.fail,
+ test_description, json.loads(str(err[1]))["issue"],
+ test.id())
+ print(fail_str)
+ if logfile:
+ with open(logfile, "a") as f:
+ f.write(fail_str + "\n")
+
+ def addSuccess(self, test):
+ test_description = test.id().split('.')[-1].replace('_', ' ').replace("cve", "CVE").replace("signed off by",
+ "Signed-off-by").replace("upstream status",
+ "Upstream-Status").replace("non auh",
+ "non-AUH").replace("presence format", "presence")
+ success_str = '{}: {} ({})'.format(self.success,
+ test_description, test.id())
+ print(success_str)
+ if logfile:
+ with open(logfile, "a") as f:
+ f.write(success_str + "\n")
+
+ def addSkip(self, test, reason):
+ test_description = test.id().split('.')[-1].replace('_', ' ').replace("cve", "CVE").replace("signed off by",
+ "Signed-off-by").replace("upstream status",
+ "Upstream-Status").replace("non auh",
+ "non-AUH").replace("presence format", "presence")
+ skip_str = '{}: {}: {} ({})'.format(self.skip,
+ test_description, json.loads(str(reason))["issue"],
+ test.id())
+ print(skip_str)
+ if logfile:
+ with open(logfile, "a") as f:
+ f.write(skip_str + "\n")
+
+ def stopTestRun(self):
+
+ # in case there was an error on repo object creation, just return
+ if self.repo_error:
+ return
+
+ self.repo.clean()
+
+ return PatchTestResult
+
+def _runner(resultklass, prefix=None):
+ # load test with the corresponding prefix
+ loader = unittest.TestLoader()
+ if prefix:
+ loader.testMethodPrefix = prefix
+
+ # create the suite with discovered tests and the corresponding runner
+ suite = loader.discover(start_dir=PatchTestInput.testdir, pattern=PatchTestInput.pattern, top_level_dir=PatchTestInput.topdir)
+ ntc = suite.countTestCases()
+
+ # if there are no test cases, just quit
+ if not ntc:
+ return 2
+ runner = unittest.TextTestRunner(resultclass=resultklass, verbosity=0)
+
+ try:
+ result = runner.run(suite)
+ except:
+ logger.error(traceback.print_exc())
+ logger.error('patchtest: something went wrong')
+ return 1
+ if result.test_failure or result.test_error:
+ return 1
+
+ return 0
+
+def run(patch, logfile=None):
+ """ Load, setup and run pre and post-merge tests """
+ # Get the result class and install the control-c handler
+ unittest.installHandler()
+
+ # run pre-merge tests, meaning those methods with 'pretest' as prefix
+ premerge_resultklass = getResult(patch, False, logfile)
+ premerge_result = _runner(premerge_resultklass, 'pretest')
+
+ # run post-merge tests, meaning those methods with 'test' as prefix
+ postmerge_resultklass = getResult(patch, True, logfile)
+ postmerge_result = _runner(postmerge_resultklass, 'test')
+
+ print('----------------------------------------------------------------------\n')
+ if premerge_result == 2 and postmerge_result == 2:
+ logger.error('patchtest: No test cases found - did you specify the correct suite directory?')
+ if premerge_result == 1 or postmerge_result == 1:
+ logger.error('WARNING: patchtest: At least one patchtest caused a failure or an error - please check https://wiki.yoctoproject.org/wiki/Patchtest for further guidance')
+ else:
+ logger.info('OK: patchtest: All patchtests passed')
+ print('----------------------------------------------------------------------\n')
+ return premerge_result or postmerge_result
+
+def main():
+ tmp_patch = False
+ patch_path = PatchTestInput.patch_path
+ log_results = PatchTestInput.log_results
+ log_path = None
+ patch_list = None
+
+ git_status = os.popen("(cd %s && git status)" % PatchTestInput.repodir).read()
+ status_matches = ["Changes not staged for commit", "Changes to be committed"]
+ if any([match in git_status for match in status_matches]):
+ logger.error("patchtest: there are uncommitted changes in the target repo that would be overwritten. Please commit or restore them before running patchtest")
+ return 1
+
+ if os.path.isdir(patch_path):
+ patch_list = [os.path.join(patch_path, filename) for filename in sorted(os.listdir(patch_path))]
+ else:
+ patch_list = [patch_path]
+
+ for patch in patch_list:
+ if os.path.getsize(patch) == 0:
+ logger.error('patchtest: patch is empty')
+ return 1
+
+ logger.info('Testing patch %s' % patch)
+
+ if log_results:
+ log_path = patch + ".testresult"
+ with open(log_path, "a") as f:
+ f.write("Patchtest results for patch '%s':\n\n" % patch)
+
+ try:
+ if log_path:
+ run(patch, log_path)
+ else:
+ run(patch)
+ finally:
+ if tmp_patch:
+ os.remove(patch)
+
+if __name__ == '__main__':
+ ret = 1
+
+ # Parse the command line arguments and store it on the PatchTestInput namespace
+ PatchTestInput.set_namespace()
+
+ # set debugging level
+ if PatchTestInput.debug:
+ logger.setLevel(logging.DEBUG)
+
+ # if topdir not define, default it to testdir
+ if not PatchTestInput.topdir:
+ PatchTestInput.topdir = PatchTestInput.testdir
+
+ try:
+ ret = main()
+ except Exception:
+ import traceback
+ traceback.print_exc(5)
+
+ sys.exit(ret)
diff --git a/scripts/patchtest-get-branch b/scripts/patchtest-get-branch
new file mode 100755
index 0000000000..c6e242f8b6
--- /dev/null
+++ b/scripts/patchtest-get-branch
@@ -0,0 +1,81 @@
+#!/usr/bin/env python3
+
+# Get target branch from the corresponding mbox
+#
+# NOTE: this script was based on patches coming to the openembedded-core
+# where target branch is defined inside brackets as subject prefix
+# i.e. [master], [rocko], etc.
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import mailbox
+import argparse
+import re
+import git
+
+re_prefix = re.compile(r"(\[.*\])", re.DOTALL)
+
+def get_branch(filepath_repo, filepath_mbox, default_branch):
+ branch = None
+
+ # get all remotes branches
+ gitbranches = git.Git(filepath_repo).branch('-a').splitlines()
+
+ # from gitbranches, just get the names
+ branches = [b.split('/')[-1] for b in gitbranches]
+
+ subject = ' '.join(mailbox.mbox(filepath_mbox)[0]['subject'].splitlines())
+
+ # we expect that patches will have somewhere between one and three
+ # consecutive sets of square brackets with tokens inside, e.g.:
+ # 1. [PATCH]
+ # 2. [OE-core][PATCH]
+ # 3. [OE-core][kirkstone][PATCH]
+ # Some of them may also be part of a series, in which case the PATCH
+ # token will be formatted like:
+ # [PATCH 1/4]
+ # or they will be revisions to previous patches, where it will be:
+ # [PATCH v2]
+ # Or they may contain both:
+ # [PATCH v2 3/4]
+ # In any case, we want mprefix to contain all of these tokens so
+ # that we can search for branch names within them.
+ mprefix = re.findall(r'\[.*?\]', subject)
+ found_branch = None
+ if mprefix:
+ # Iterate over the tokens and compare against the branch list to
+ # figure out which one the patch is targeting
+ for token in mprefix:
+ stripped = token.lower().strip('[]')
+ if default_branch in stripped:
+ found_branch = default_branch
+ break
+ else:
+ for branch in branches:
+ # ignore branches named "core"
+ if branch != "core" and stripped.rfind(branch) != -1:
+ found_branch = token.split(' ')[0].strip('[]')
+ break
+
+ # if there's no mprefix content or no known branches were found in
+ # the tokens, assume the target is master
+ if found_branch is None:
+ found_branch = "master"
+
+ return (subject, found_branch)
+
+if __name__ == '__main__':
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument('repo', metavar='REPO', help='Main repository')
+ parser.add_argument('mbox', metavar='MBOX', help='mbox filename')
+ parser.add_argument('--default-branch', metavar='DEFAULT_BRANCH', default='master', help='Use this branch if no one is found')
+ parser.add_argument('--separator', '-s', metavar='SEPARATOR', default=' ', help='Char separator for output data')
+ args = parser.parse_args()
+
+ subject, branch = get_branch(args.repo, args.mbox, args.default_branch)
+ print("branch: %s" % branch)
+
diff --git a/scripts/patchtest-get-series b/scripts/patchtest-get-series
new file mode 100755
index 0000000000..908442089f
--- /dev/null
+++ b/scripts/patchtest-get-series
@@ -0,0 +1,115 @@
+#!/bin/bash -e
+#
+# get-latest-series: Download latest patch series from Patchwork
+#
+# Copyright (C) 2023 BayLibre Inc.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+# the interval into the past which we want to check for new series, in minutes
+INTERVAL_MINUTES=30
+
+# Maximum number of series to retrieve. the Patchwork API can support up to 250
+# at once
+SERIES_LIMIT=250
+
+# Location to save patches
+DOWNLOAD_PATH="."
+
+# Name of the file to use/check as a log of previously-tested series IDs
+SERIES_TEST_LOG=".series_test.log"
+
+# Patchwork project to pull series patches from
+PROJECT="oe-core"
+
+# The Patchwork server to pull from
+SERVER="https://patchwork.yoctoproject.org/api/1.2/"
+
+help()
+{
+ echo "Usage: get-latest-series [ -i | --interval MINUTES ]
+ [ -d | --directory DIRECTORY ]
+ [ -l | --limit COUNT ]
+ [ -h | --help ]
+ [ -t | --tested-series LOGFILE]
+ [ -p | --project PROJECT ]
+ [ -s | --server SERVER ]"
+ exit 2
+}
+
+while [ "$1" != "" ]; do
+ case $1 in
+ -i|--interval)
+ INTERVAL_MINUTES=$2
+ shift 2
+ ;;
+ -l|--limit)
+ SERIES_LIMIT=$2
+ shift 2
+ ;;
+ -d|--directory)
+ DOWNLOAD_PATH=$2
+ shift 2
+ ;;
+ -p|--project)
+ PROJECT=$2
+ shift 2
+ ;;
+ -s|--server)
+ SERVER=$2
+ shift 2
+ ;;
+ -t|--tested-series)
+ SERIES_TEST_LOG=$2
+ shift 2
+ ;;
+ -h|--help)
+ help
+ ;;
+ *)
+ echo "Unknown option $1"
+ help
+ ;;
+ esac
+done
+
+# The time this script is running at
+START_TIME=$(date --date "now" +"%Y-%m-%dT%H:%M:%S")
+
+# the corresponding timestamp we want to check against for new patch series
+SERIES_CHECK_LIMIT=$(date --date "now - ${INTERVAL_MINUTES} minutes" +"%Y-%m-%dT%H:%M:%S")
+
+echo "Start time is $START_TIME"
+echo "Series check limit is $SERIES_CHECK_LIMIT"
+
+# Create DOWNLOAD_PATH if it doesn't exist
+if [ ! -d "$DOWNLOAD_PATH" ]; then
+ mkdir "${DOWNLOAD_PATH}"
+fi
+
+# Create SERIES_TEST_LOG if it doesn't exist
+if [ ! -f "$SERIES_TEST_LOG" ]; then
+ touch "${SERIES_TEST_LOG}"
+fi
+
+# Retrieve a list of series IDs from the 'git-pw series list' output. The API
+# supports a maximum of 250 results, so make sure we allow that when required
+SERIES_LIST=$(git-pw --project "${PROJECT}" --server "${SERVER}" series list --since "${SERIES_CHECK_LIMIT}" --limit "${SERIES_LIMIT}" | awk '{print $2}' | xargs | sed -e 's/[^0-9 ]//g')
+
+if [ -z "$SERIES_LIST" ]; then
+ echo "No new series for project ${PROJECT} since ${SERIES_CHECK_LIMIT}"
+ exit 0
+fi
+
+# Check each series ID
+for SERIES in $SERIES_LIST; do
+ # Download the series only if it's not found in the SERIES_TEST_LOG
+ if ! grep -w --quiet "${SERIES}" "${SERIES_TEST_LOG}"; then
+ echo "Downloading $SERIES..."
+ git-pw series download --separate "${SERIES}" "${DOWNLOAD_PATH}"
+ echo "${SERIES}" >> "${SERIES_TEST_LOG}"
+ else
+ echo "Already tested ${SERIES}. Skipping..."
+ fi
+done
diff --git a/scripts/patchtest-send-results b/scripts/patchtest-send-results
new file mode 100755
index 0000000000..8a3dadbd11
--- /dev/null
+++ b/scripts/patchtest-send-results
@@ -0,0 +1,110 @@
+#!/usr/bin/env python3
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# patchtest: execute all unittest test cases discovered for a single patch
+# Note that this script is currently under development and has been
+# hard-coded with default values for testing purposes. This script
+# should not be used without changing the default recipient, at minimum.
+#
+# Copyright (C) 2023 BayLibre Inc.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import argparse
+import boto3
+import configparser
+import mailbox
+import os
+import re
+import sys
+
+greeting = """Thank you for your submission. Patchtest identified one
+or more issues with the patch. Please see the log below for
+more information:\n\n---\n"""
+
+suggestions = """\n---\n\nPlease address the issues identified and
+submit a new revision of the patch, or alternatively, reply to this
+email with an explanation of why the patch should be accepted. If you
+believe these results are due to an error in patchtest, please submit a
+bug at https://bugzilla.yoctoproject.org/ (use the 'Patchtest' category
+under 'Yocto Project Subprojects'). For more information on specific
+failures, see: https://wiki.yoctoproject.org/wiki/Patchtest. Thank
+you!"""
+
+def has_a_failed_test(raw_results):
+ return any(raw_result.split(':')[0] == "FAIL" for raw_result in raw_results.splitlines())
+
+parser = argparse.ArgumentParser(description="Send patchtest results to a submitter for a given patch")
+parser.add_argument("-p", "--patch", dest="patch", required=True, help="The patch file to summarize")
+parser.add_argument("-d", "--debug", dest="debug", required=False, action='store_true', help="Print raw email headers and content, but don't actually send it")
+args = parser.parse_args()
+
+if not os.path.exists(args.patch):
+ print(f"Patch '{args.patch}' not found - did you provide the right path?")
+ sys.exit(1)
+elif not os.path.exists(args.patch + ".testresult"):
+ print(f"Found patch '{args.patch}' but '{args.patch}.testresult' was not present. Have you run patchtest on the patch?")
+ sys.exit(1)
+
+result_file = args.patch + ".testresult"
+testresult = None
+
+with open(result_file, "r") as f:
+ testresult = f.read()
+
+# we know these patch files will only contain a single patch, so only
+# worry about the first element for getting the subject
+mbox = mailbox.mbox(args.patch)
+mbox_subject = mbox[0]['subject']
+subject_line = f"Patchtest results for {mbox_subject}"
+
+# extract the submitter email address and use it as the reply address
+# for the results
+reply_address = mbox[0]['from']
+
+# extract the message ID and use that as the in-reply-to address
+# TODO: This will need to change again when patchtest can handle a whole
+# series at once
+in_reply_to = mbox[0]['Message-ID']
+
+# the address the results email is sent from
+from_address = "patchtest@automation.yoctoproject.org"
+
+# mailing list to CC
+cc_address = "openembedded-core@lists.openembedded.org"
+
+if has_a_failed_test(testresult):
+ reply_contents = None
+ if len(max(open(result_file, 'r'), key=len)) > 220:
+ warning = "Tests failed for the patch, but the results log could not be processed due to excessive result line length."
+ reply_contents = greeting + warning + suggestions
+ else:
+ reply_contents = greeting + testresult + suggestions
+
+ ses_client = boto3.client('ses', region_name='us-west-2')
+
+ # Construct the headers for the email. We only want to reply
+ # directly to the tested patch, so make In-Reply-To and References
+ # the same value.
+ raw_data = 'From: ' + from_address + '\nTo: ' + reply_address + \
+ '\nCC: ' + cc_address + '\nSubject:' + subject_line + \
+ '\nIn-Reply-To:' + in_reply_to + \
+ '\nReferences:' + in_reply_to + \
+ '\nMIME-Version: 1.0" + \
+ "\nContent-type: Multipart/Mixed;boundary="NextPart"\n\n--NextPart\nContent-Type: text/plain\n\n' + \
+ reply_contents + '\n\n--NextPart'
+
+ if args.debug:
+ print(f"RawMessage: \n\n{raw_data}")
+ else:
+ response = ses_client.send_raw_email(
+ Source="patchtest@automation.yoctoproject.org",
+ RawMessage={
+ "Data": raw_data,
+ },
+ )
+
+else:
+ print(f"No failures identified for {args.patch}.")
diff --git a/scripts/patchtest-setup-sharedir b/scripts/patchtest-setup-sharedir
new file mode 100755
index 0000000000..277677e527
--- /dev/null
+++ b/scripts/patchtest-setup-sharedir
@@ -0,0 +1,83 @@
+#!/bin/bash -e
+#
+# patchtest-setup-sharedir: Setup a directory for storing mboxes and
+# repositories to be shared with the guest machine, including updates to
+# the repos if the directory already exists
+#
+# Copyright (C) 2023 BayLibre Inc.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+# poky repository
+POKY_REPO="https://git.yoctoproject.org/poky"
+
+# patchtest repository
+PATCHTEST_REPO="https://git.yoctoproject.org/patchtest"
+
+# the name of the directory
+SHAREDIR="patchtest_share"
+
+help()
+{
+ echo "Usage: patchtest-setup-sharedir [ -d | --directory SHAREDIR ]
+ [ -p | --patchtest PATCHTEST_REPO ]
+ [ -y | --poky POKY_REPO ]"
+ exit 2
+}
+
+while [ "$1" != "" ]; do
+ case $1 in
+ -d|--directory)
+ SHAREDIR=$2
+ shift 2
+ ;;
+ -p|--patchtest)
+ PATCHTEST_REPO=$2
+ shift 2
+ ;;
+ -y|--poky)
+ POKY_REPO=$2
+ shift 2
+ ;;
+ -h|--help)
+ help
+ ;;
+ *)
+ echo "Unknown option $1"
+ help
+ ;;
+ esac
+done
+
+# define MBOX_DIR where the patch series will be stored by
+# get-latest-series
+MBOX_DIR="${SHAREDIR}/mboxes"
+
+# Create SHAREDIR if it doesn't exist
+if [ ! -d "$SHAREDIR" ]; then
+ mkdir -p "${SHAREDIR}"
+ echo "Created ${SHAREDIR}"
+fi
+
+# Create the mboxes directory if it doesn't exist
+if [ ! -d "$MBOX_DIR" ]; then
+ mkdir -p "${MBOX_DIR}"
+ echo "Created ${MBOX_DIR}"
+fi
+
+# clone poky if it's not already present; otherwise, update it
+if [ ! -d "$POKY_REPO" ]; then
+ BASENAME=$(basename ${POKY_REPO})
+ git clone "${POKY_REPO}" "${SHAREDIR}/${BASENAME}"
+else
+ (cd "${SHAREDIR}/$BASENAME" && git pull)
+fi
+
+# clone patchtest if it's not already present; otherwise, update it
+if [ ! -d "$PATCHTEST_REPO" ]; then
+ BASENAME=$(basename ${PATCHTEST_REPO})
+ git clone "${PATCHTEST_REPO}" "${SHAREDIR}/${BASENAME}"
+else
+ (cd "${SHAREDIR}/$BASENAME" && git pull)
+fi
diff --git a/scripts/patchtest.README b/scripts/patchtest.README
new file mode 100644
index 0000000000..76b5fcdb6d
--- /dev/null
+++ b/scripts/patchtest.README
@@ -0,0 +1,153 @@
+# Patchtest
+
+## Introduction
+
+Patchtest is a test framework for community patches based on the standard
+unittest python module. As input, it needs tree elements to work properly:
+a patch in mbox format (either created with `git format-patch` or fetched
+from 'patchwork'), a test suite and a target repository.
+
+The first test suite intended to be used with patchtest is found in the
+openembedded-core repository [1] targeted for patches that get into the
+openembedded-core mailing list [2]. This suite is also intended as a
+baseline for development of similar suites for other layers as needed.
+
+Patchtest can either run on a host or a guest machine, depending on which
+environment the execution needs to be done. If you plan to test your own patches
+(a good practice before these are sent to the mailing list), the easiest way is
+to install and execute on your local host; in the other hand, if automatic
+testing is intended, the guest method is strongly recommended. The guest
+method requires the use of the patchtest layer, in addition to the tools
+available in oe-core: https://git.yoctoproject.org/patchtest/
+
+## Installation
+
+As a tool for use with the Yocto Project, the [quick start guide](https://docs.yoctoproject.org/brief-yoctoprojectqs/index.html)
+contains the necessary prerequisites for a basic project. In addition,
+patchtest relies on the following Python modules:
+
+- boto3 (for sending automated results emails only)
+- git-pw>=2.5.0
+- jinja2
+- pylint
+- pyparsing>=3.0.9
+- unidiff
+
+These can be installed by running `pip install -r
+meta/lib/patchtest/requirements.txt`. Note that git-pw is not
+automatically added to the user's PATH; by default, it is installed at
+~/.local/bin/git-pw.
+
+For git-pw (and therefore scripts such as patchtest-get--series) to work, you need
+to provide a Patchwork instance in your user's .gitconfig, like so (the project
+can be specified using the --project argument):
+
+ git config --global pw.server "https://patchwork.yoctoproject.org/api/1.2/"
+
+To work with patchtest, you should have the following repositories cloned:
+
+1. https://git.openembedded.org/openembedded-core/ (or https://git.yoctoproject.org/poky/)
+2. https://git.openembedded.org/bitbake/ (if not using poky)
+3. https://git.yoctoproject.org/patchtest (if using guest mode)
+
+## Usage
+
+### Obtaining Patches
+
+Patch files can be obtained directly from cloned repositories using `git
+format-patch -N` (where N is the number of patches starting from HEAD to
+generate). git-pw can also be used with filters for users, patch/series IDs,
+and timeboxes if specific patches are desired. For more information, see the
+git-pw [documentation](https://patchwork.readthedocs.io/projects/git-pw/en/latest/).
+
+Alternatively, `scripts/patchtest-get-series` can be used to pull mbox files from
+the Patchwork instance configured previously in .gitconfig. It uses a log file
+called ".series_test.log" to store and compare series IDs so that the same
+versions of a patch are not tested multiple times unintentionally. By default,
+it will pull up to five patch series from the last 30 minutes using oe-core as
+the target project, but these parameters can be configured using the `--limit`,
+`--interval`, and `--project` arguments respectively. For more information, run
+`patchtest-get-series -h`.
+
+### Host Mode
+
+To run patchtest on the host, do the following:
+
+1. In openembedded-core/poky, do `source oe-init-build-env`
+2. Generate patch files from the target repository by doing `git-format patch -N`,
+ where N is the number of patches starting at HEAD, or by using git-pw
+ or patchtest-get-series
+3. Run patchtest on a patch file by doing the following:
+
+ patchtest --patch /path/to/patch/file
+
+ or, if you have stored the patch files in a directory, do:
+
+ patchtest --directory /path/to/patch/directory
+
+ For example, to test `master-gcc-Fix--fstack-protector-issue-on-aarch64.patch` against the oe-core test suite:
+
+ patchtest --patch master-gcc-Fix--fstack-protector-issue-on-aarch64.patch
+
+ If you want to use a different test suite or target repository, you can use the --testdir and --repodir flags:
+
+ patchtest --patch /path/to/patch/file --repodir /path/to/repo --testdir /path/to/test/dir
+
+### Guest Mode
+
+Patchtest's guest mode has been refactored to more closely mirror the
+typical Yocto Project image build workflow, but there are still some key
+differences to keep in mind. The primary objective is to provide a level
+of isolation from the host when testing patches pulled automatically
+from the mailing lists. When executed this way, the test process is
+essentially running random code from the internet and could be
+catastrophic if malicious bits or even poorly-handled edge cases aren't
+protected against. In order to use this mode, the
+https://git.yoctoproject.org/patchtest/ repository must be cloned and
+the meta-patchtest layer added to bblayers.conf.
+
+The general flow of guest mode is:
+
+1. Run patchtest-setup-sharedir --directory <dirname> to create a
+ directory for mounting
+2. Collect patches via patchtest-get-series (or other manual step) into the
+ <dirname>/mboxes path
+3. Ensure that a user with ID 1200 has appropriate read/write
+ permissions to <dirname> and <dirname>/mboxes, so that the
+ "patchtest" user in the core-image-patchtest image can function
+4. Build the core-image-patchtest image
+5. Run the core-image-patchtest image with the mounted sharedir, like
+ so:
+ `runqemu kvm nographic qemuparams="-snapshot -fsdev
+ local,id=test_mount,path=/workspace/yocto/poky/build/patchtestdir,security_model=mapped
+ -device virtio-9p-pci,fsdev=test_mount,mount_tag=test_mount -smp 4 -m
+ 2048"`
+
+Patchtest runs as an initscript for the core-image-patchtest image and
+shuts down after completion, so there is no input required from a user
+during operation. Unlike in host mode, the guest is designed to
+automatically generate test result files, in the same directory as the
+targeted patch files but with .testresult as an extension. These contain
+the entire output of the patchtest run for each respective pass,
+including the PASS, FAIL, and SKIP indicators for each test run.
+
+## Contributing
+
+The yocto mailing list (openembedded-core@lists.openembedded.org) is used for questions,
+comments and patch review. It is subscriber only, so please register before
+posting.
+
+When sending single patches, please use something like:
+
+ git send-email -M -1 --to=openembedded-core@lists.openembedded.org --subject-prefix=OE-core][PATCH
+
+## Maintenance
+-----------
+
+Maintainers:
+ Trevor Gamblin <tgamblin@baylibre.com>
+
+## Links
+-----
+[1] https://git.openembedded.org/openembedded-core/
+[2] https://www.yoctoproject.org/community/mailing-lists/
diff --git a/scripts/postinst-intercepts/update_gtk_icon_cache b/scripts/postinst-intercepts/update_gtk_icon_cache
index 99367a2855..a92bd840c6 100644
--- a/scripts/postinst-intercepts/update_gtk_icon_cache
+++ b/scripts/postinst-intercepts/update_gtk_icon_cache
@@ -11,7 +11,11 @@ $STAGING_DIR_NATIVE/${libdir_native}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders --u
for icondir in $D/usr/share/icons/*/ ; do
if [ -d $icondir ] ; then
- gtk-update-icon-cache -fqt $icondir
+ for gtkuic_cmd in gtk-update-icon-cache gtk4-update-icon-cache ; do
+ if [ -n "$(which $gtkuic_cmd)" ]; then
+ $gtkuic_cmd -fqt $icondir
+ fi
+ done
fi
done
diff --git a/scripts/postinst-intercepts/update_mandb b/scripts/postinst-intercepts/update_mandb
new file mode 100644
index 0000000000..f91bafdb11
--- /dev/null
+++ b/scripts/postinst-intercepts/update_mandb
@@ -0,0 +1,18 @@
+#!/bin/sh
+#
+# SPDX-License-Identifier: MIT
+#
+
+set -eu
+
+# Create a temporary man_db.conf with paths to the rootfs, as mandb needs absolute paths
+CONFIG=$(mktemp --tmpdir update-mandb.XXXXX)
+sed "s:\(\s\)/:\1$D/:g" $D${sysconfdir}/man_db.conf > $CONFIG
+
+mkdir -p $D${localstatedir}/cache/man/
+
+PSEUDO_UNLOAD=1 ${binprefix}qemuwrapper -L $D $D${bindir}/mandb --config-file $CONFIG --create
+
+rm -f $CONFIG
+
+chown -R man:man $D${localstatedir}/cache/man/
diff --git a/scripts/pybootchartgui/pybootchartgui/draw.py b/scripts/pybootchartgui/pybootchartgui/draw.py
index 4326361426..c6e67833ab 100644
--- a/scripts/pybootchartgui/pybootchartgui/draw.py
+++ b/scripts/pybootchartgui/pybootchartgui/draw.py
@@ -356,6 +356,12 @@ def extents(options, xscale, trace):
h += 30 + bar_h
if trace.disk_stats:
h += 30 + bar_h
+ if trace.cpu_pressure:
+ h += 30 + bar_h
+ if trace.io_pressure:
+ h += 30 + bar_h
+ if trace.mem_pressure:
+ h += 30 + bar_h
if trace.monitor_disk:
h += 30 + bar_h
if trace.mem_stats:
@@ -614,8 +620,8 @@ def render_charts(ctx, options, clip, trace, curr_y, w, h, sec_w):
return curr_y
-def render_processes_chart(ctx, options, trace, curr_y, w, h, sec_w):
- chart_rect = [off_x, curr_y+header_h, w, h - curr_y - 1 * off_y - header_h ]
+def render_processes_chart(ctx, options, trace, curr_y, width, h, sec_w):
+ chart_rect = [off_x, curr_y+header_h, width, h - curr_y - 1 * off_y - header_h ]
draw_legend_box (ctx, "Configure", \
TASK_COLOR_CONFIGURE, off_x , curr_y + 45, leg_s)
@@ -640,8 +646,9 @@ def render_processes_chart(ctx, options, trace, curr_y, w, h, sec_w):
offset = trace.min or min(trace.start.keys())
for start in sorted(trace.start.keys()):
for process in sorted(trace.start[start]):
+ elapsed_time = trace.processes[process][1] - start
if not options.app_options.show_all and \
- trace.processes[process][1] - start < options.app_options.mintime:
+ elapsed_time < options.app_options.mintime:
continue
task = process.split(":")[1]
@@ -650,14 +657,23 @@ def render_processes_chart(ctx, options, trace, curr_y, w, h, sec_w):
#print(s)
x = chart_rect[0] + (start - offset) * sec_w
- w = ((trace.processes[process][1] - start) * sec_w)
+ w = elapsed_time * sec_w
+
+ def set_alfa(color, alfa):
+ clist = list(color)
+ clist[-1] = alfa
+ return tuple(clist)
#print("proc at %s %s %s %s" % (x, y, w, proc_h))
col = None
if task == "do_compile":
col = TASK_COLOR_COMPILE
+ elif "do_compile" in task:
+ col = set_alfa(TASK_COLOR_COMPILE, 0.25)
elif task == "do_configure":
col = TASK_COLOR_CONFIGURE
+ elif "do_configure" in task:
+ col = set_alfa(TASK_COLOR_CONFIGURE, 0.25)
elif task == "do_install":
col = TASK_COLOR_INSTALL
elif task == "do_populate_sysroot":
@@ -675,7 +691,10 @@ def render_processes_chart(ctx, options, trace, curr_y, w, h, sec_w):
draw_fill_rect(ctx, col, (x, y, w, proc_h))
draw_rect(ctx, PROC_BORDER_COLOR, (x, y, w, proc_h))
- draw_label_in_box(ctx, PROC_TEXT_COLOR, process, x, y + proc_h - 4, w, proc_h)
+ # Show elapsed time for each task
+ process = "%ds %s" % (elapsed_time, process)
+ draw_label_in_box(ctx, PROC_TEXT_COLOR, process, x, y + proc_h - 4, w, width)
+
y = y + proc_h
return curr_y
diff --git a/scripts/pybootchartgui/pybootchartgui/parsing.py b/scripts/pybootchartgui/pybootchartgui/parsing.py
index 362d5153e8..63a53b6b88 100644
--- a/scripts/pybootchartgui/pybootchartgui/parsing.py
+++ b/scripts/pybootchartgui/pybootchartgui/parsing.py
@@ -131,7 +131,7 @@ class Trace:
def compile(self, writer):
def find_parent_id_for(pid):
- if pid is 0:
+ if pid == 0:
return 0
ppid = self.parent_map.get(pid)
if ppid:
diff --git a/scripts/rpm2cpio.sh b/scripts/rpm2cpio.sh
index 7cd771bbe7..8199b43784 100755
--- a/scripts/rpm2cpio.sh
+++ b/scripts/rpm2cpio.sh
@@ -7,7 +7,7 @@ fatal() {
}
pkg="$1"
-[ -n "$pkg" -a -e "$pkg" ] ||
+[ -n "$pkg" ] && [ -e "$pkg" ] ||
fatal "No package supplied"
_dd() {
@@ -16,14 +16,23 @@ _dd() {
}
calcsize() {
+
+ case "$(_dd $1 bs=4 count=1 | tr -d '\0')" in
+ "$(printf '\216\255\350')"*) ;; # '\x8e\xad\xe8'
+ *) fatal "File doesn't look like rpm: $pkg" ;;
+ esac
+
offset=$(($1 + 8))
local i b b0 b1 b2 b3 b4 b5 b6 b7
i=0
while [ $i -lt 8 ]; do
- b=$(_dd $(($offset + $i)) bs=1 count=1; echo X)
- b=${b%X}
+ # add . to not loose \n
+ # strip \0 as it gets dropped with warning otherwise
+ b="$(_dd $(($offset + $i)) bs=1 count=1 | tr -d '\0' ; echo .)"
+ b=${b%.} # strip . again
+
[ -z "$b" ] &&
b="0" ||
b="$(exec printf '%u\n' "'$b")"
@@ -35,7 +44,7 @@ calcsize() {
offset=$(($offset + $rsize))
}
-case "$(_dd 0 bs=8 count=1)" in
+case "$(_dd 0 bs=4 count=1 | tr -d '\0')" in
"$(printf '\355\253\356\333')"*) ;; # '\xed\xab\xee\xdb'
*) fatal "File doesn't look like rpm: $pkg" ;;
esac
@@ -46,10 +55,11 @@ sigsize=$rsize
calcsize $(($offset + (8 - ($sigsize % 8)) % 8))
hdrsize=$rsize
-case "$(_dd $offset bs=3 count=1)" in
- "$(printf '\102\132')"*) _dd $offset | bunzip2 ;; # '\x42\x5a'
- "$(printf '\037\213')"*) _dd $offset | gunzip ;; # '\x1f\x8b'
- "$(printf '\375\067')"*) _dd $offset | xzcat ;; # '\xfd\x37'
- "$(printf '\135\000')"*) _dd $offset | unlzma ;; # '\x5d\x00'
- *) fatal "Unrecognized rpm file: $pkg" ;;
+case "$(_dd $offset bs=2 count=1 | tr -d '\0')" in
+ "$(printf '\102\132')") _dd $offset | bunzip2 ;; # '\x42\x5a'
+ "$(printf '\037\213')") _dd $offset | gunzip ;; # '\x1f\x8b'
+ "$(printf '\375\067')") _dd $offset | xzcat ;; # '\xfd\x37'
+ "$(printf '\135')") _dd $offset | unlzma ;; # '\x5d\x00'
+ "$(printf '\050\265')") _dd $offset | unzstd ;; # '\x28\xb5'
+ *) fatal "Unrecognized payload compression format in rpm file: $pkg" ;;
esac
diff --git a/scripts/runqemu b/scripts/runqemu
index 983f7514c7..69cd44864e 100755
--- a/scripts/runqemu
+++ b/scripts/runqemu
@@ -66,6 +66,7 @@ of the following environment variables (in any order):
MACHINE - the machine name (optional, autodetected from KERNEL filename if unspecified)
Simplified QEMU command-line options can be passed with:
nographic - disable video console
+ nonetwork - disable network connectivity
novga - Disable VGA emulation completely
sdl - choose the SDL UI frontend
gtk - choose the Gtk UI frontend
@@ -82,6 +83,8 @@ of the following environment variables (in any order):
kvm-vhost - enable KVM with vhost when running x86/x86_64 (VT-capable CPU required)
publicvnc - enable a VNC server open to all hosts
audio - enable audio
+ guestagent - enable guest agent communication
+ qmp=<path> - create a QMP socket (defaults to unix:qmp.sock if unspecified)
[*/]ovmf* - OVMF firmware file or base name for booting with UEFI
tcpserial=<port> - specify tcp serial port number
qemuparams=<xyz> - specify custom parameters to QEMU
@@ -116,10 +119,10 @@ def check_tun():
if not os.access(dev_tun, os.W_OK):
raise RunQemuError("TUN control device %s is not writable, please fix (e.g. sudo chmod 666 %s)" % (dev_tun, dev_tun))
-def get_first_file(cmds):
- """Return first file found in wildcard cmds"""
- for cmd in cmds:
- all_files = glob.glob(cmd)
+def get_first_file(globs):
+ """Return first file found in wildcard globs"""
+ for g in globs:
+ all_files = glob.glob(g)
if all_files:
for f in all_files:
if not os.path.isdir(f):
@@ -177,6 +180,7 @@ class BaseConfig(object):
self.serialconsole = False
self.serialstdio = False
self.nographic = False
+ self.nonetwork = False
self.sdl = False
self.gtk = False
self.gl = False
@@ -195,12 +199,14 @@ class BaseConfig(object):
self.snapshot = False
self.wictypes = ('wic', 'wic.vmdk', 'wic.qcow2', 'wic.vdi', "wic.vhd", "wic.vhdx")
self.fstypes = ('ext2', 'ext3', 'ext4', 'jffs2', 'nfs', 'btrfs',
- 'cpio.gz', 'cpio', 'ramfs', 'tar.bz2', 'tar.gz')
+ 'cpio.gz', 'cpio', 'ramfs', 'tar.bz2', 'tar.gz',
+ 'squashfs', 'squashfs-xz', 'squashfs-lzo',
+ 'squashfs-lz4', 'squashfs-zst')
self.vmtypes = ('hddimg', 'iso')
self.fsinfo = {}
self.network_device = "-device e1000,netdev=net0,mac=@MAC@"
self.cmdline_ip_slirp = "ip=dhcp"
- self.cmdline_ip_tap = "ip=192.168.7.@CLIENT@::192.168.7.@GATEWAY@:255.255.255.0::eth0:off:8.8.8.8"
+ self.cmdline_ip_tap = "ip=192.168.7.@CLIENT@::192.168.7.@GATEWAY@:255.255.255.0::eth0:off:8.8.8.8 net.ifnames=0"
# Use different mac section for tap and slirp to avoid
# conflicts, e.g., when one is running with tap, the other is
# running with slirp.
@@ -210,11 +216,15 @@ class BaseConfig(object):
self.mac_tap = "52:54:00:12:34:"
self.mac_slirp = "52:54:00:12:35:"
# pid of the actual qemu process
- self.qemupid = None
+ self.qemu_environ = os.environ.copy()
+ self.qemuprocess = None
# avoid cleanup twice
self.cleaned = False
# Files to cleanup after run
self.cleanup_files = []
+ self.qmp = None
+ self.guest_agent = False
+ self.guest_agent_sockpath = '/tmp/qga.sock'
def acquire_taplock(self, error=True):
logger.debug("Acquiring lockfile %s..." % self.taplock)
@@ -361,11 +371,11 @@ class BaseConfig(object):
if p.endswith('.qemuboot.conf'):
self.qemuboot = p
self.qbconfload = True
- elif re.search('\.bin$', p) or re.search('bzImage', p) or \
+ elif re.search('\\.bin$', p) or re.search('bzImage', p) or \
re.search('zImage', p) or re.search('vmlinux', p) or \
re.search('fitImage', p) or re.search('uImage', p):
self.kernel = p
- elif os.path.exists(p) and (not os.path.isdir(p)) and '-image-' in os.path.basename(p):
+ elif os.path.isfile(p) and ('-image-' in os.path.basename(p) or '.rootfs.' in os.path.basename(p)):
self.rootfs = p
# Check filename against self.fstypes can handle <file>.cpio.gz,
# otherwise, its type would be "gz", which is incorrect.
@@ -375,18 +385,24 @@ class BaseConfig(object):
fst = t
break
if not fst:
- m = re.search('.*\.(.*)$', self.rootfs)
+ m = re.search('.*\\.(.*)$', self.rootfs)
if m:
fst = m.group(1)
if fst:
self.check_arg_fstype(fst)
- qb = re.sub('\.' + fst + "$", '', self.rootfs)
- qb = '%s%s' % (re.sub('\.rootfs$', '', qb), '.qemuboot.conf')
+ qb = re.sub('\\.' + fst + "$", '.qemuboot.conf', self.rootfs)
if os.path.exists(qb):
self.qemuboot = qb
self.qbconfload = True
else:
- logger.warning("%s doesn't exist" % qb)
+ logger.warning("%s doesn't exist, will try to remove '.rootfs' from filename" % qb)
+ # They to remove .rootfs (IMAGE_NAME_SUFFIX) as well
+ qb = re.sub('\\.rootfs.qemuboot.conf$', '.qemuboot.conf', qb)
+ if os.path.exists(qb):
+ self.qemuboot = qb
+ self.qbconfload = True
+ else:
+ logger.warning("%s doesn't exist" % qb)
else:
raise RunQemuError("Can't find FSTYPE from: %s" % p)
@@ -420,6 +436,7 @@ class BaseConfig(object):
# are there other scenarios in which we need to support being
# invoked by bitbake?
deploy = self.get('DEPLOY_DIR_IMAGE')
+ image_link_name = self.get('IMAGE_LINK_NAME')
bbchild = deploy and self.get('OE_TMPDIR')
if bbchild:
self.set_machine_deploy_dir(arg, deploy)
@@ -444,31 +461,24 @@ class BaseConfig(object):
else:
logger.error("%s not a directory valid DEPLOY_DIR_IMAGE" % deploy_dir_image)
self.set("MACHINE", arg)
+ if not image_link_name:
+ s = re.search('^IMAGE_LINK_NAME="(.*)"', self.bitbake_e, re.M)
+ if s:
+ image_link_name = s.group(1)
+ self.set("IMAGE_LINK_NAME", image_link_name)
+ logger.debug('Using IMAGE_LINK_NAME = "%s"' % image_link_name)
def set_dri_path(self):
- # As runqemu can be run within bitbake (when using testimage, for example),
- # we need to ensure that we run host pkg-config, and that it does not
- # get mis-directed to native build paths set by bitbake.
- try:
- del os.environ['PKG_CONFIG_PATH']
- del os.environ['PKG_CONFIG_DIR']
- del os.environ['PKG_CONFIG_LIBDIR']
- del os.environ['PKG_CONFIG_SYSROOT_DIR']
- except KeyError:
- pass
- try:
- dripath = subprocess.check_output("PATH=/bin:/usr/bin:$PATH pkg-config --variable=dridriverdir dri", shell=True)
- except subprocess.CalledProcessError as e:
- raise RunQemuError("Could not determine the path to dri drivers on the host via pkg-config.\nPlease install Mesa development files (particularly, dri.pc) on the host machine.")
- os.environ['LIBGL_DRIVERS_PATH'] = dripath.decode('utf-8').strip()
-
- # This preloads uninative libc pieces and therefore ensures that RPATH/RUNPATH
- # in host mesa drivers doesn't trick uninative into loading host libc.
- preload_items = ['libdl.so.2', 'librt.so.1', 'libpthread.so.0']
- uninative_path = os.path.dirname(self.get("UNINATIVE_LOADER"))
- if os.path.exists(uninative_path):
- preload_paths = [os.path.join(uninative_path, i) for i in preload_items]
- os.environ['LD_PRELOAD'] = " ".join(preload_paths)
+ drivers_path = os.path.join(self.bindir_native, '../lib/dri')
+ if not os.path.exists(drivers_path) or not os.listdir(drivers_path):
+ raise RunQemuError("""
+qemu has been built without opengl support and accelerated graphics support is not available.
+To enable it, add:
+DISTRO_FEATURES_NATIVE:append = " opengl"
+DISTRO_FEATURES_NATIVESDK:append = " opengl"
+to your build configuration.
+""")
+ self.qemu_environ['LIBGL_DRIVERS_PATH'] = drivers_path
def check_args(self):
for debug in ("-d", "--debug"):
@@ -482,8 +492,8 @@ class BaseConfig(object):
sys.argv.remove(quiet)
if 'gl' not in sys.argv[1:] and 'gl-es' not in sys.argv[1:]:
- os.environ['SDL_RENDER_DRIVER'] = 'software'
- os.environ['SDL_FRAMEBUFFER_ACCELERATION'] = 'false'
+ self.qemu_environ['SDL_RENDER_DRIVER'] = 'software'
+ self.qemu_environ['SDL_FRAMEBUFFER_ACCELERATION'] = 'false'
unknown_arg = ""
for arg in sys.argv[1:]:
@@ -491,13 +501,15 @@ class BaseConfig(object):
self.check_arg_fstype(arg)
elif arg == 'nographic':
self.nographic = True
+ elif arg == "nonetwork":
+ self.nonetwork = True
elif arg == 'sdl':
self.sdl = True
elif arg == 'gtk':
self.gtk = True
elif arg == 'gl':
self.gl = True
- elif 'gl-es' in sys.argv[1:]:
+ elif arg == 'gl-es':
self.gl_es = True
elif arg == 'egl-headless':
self.egl_headless = True
@@ -524,6 +536,14 @@ class BaseConfig(object):
elif arg == 'publicvnc':
self.publicvnc = True
self.qemu_opt_script += ' -vnc :0'
+ elif arg == 'guestagent':
+ self.guest_agent = True
+ elif arg == "qmp":
+ self.qmp = "unix:qmp.sock"
+ elif arg.startswith("qmp="):
+ self.qmp = arg[len('qmp='):]
+ elif arg.startswith('guestagent-sockpath='):
+ self.guest_agent_sockpath = '%s' % arg[len('guestagent-sockpath='):]
elif arg.startswith('tcpserial='):
self.tcpserial_portnum = '%s' % arg[len('tcpserial='):]
elif arg.startswith('qemuparams='):
@@ -555,11 +575,18 @@ class BaseConfig(object):
self.check_arg_machine(unknown_arg)
if not (self.get('DEPLOY_DIR_IMAGE') or self.qbconfload):
- self.load_bitbake_env()
+ self.load_bitbake_env(target=self.rootfs)
s = re.search('^DEPLOY_DIR_IMAGE="(.*)"', self.bitbake_e, re.M)
if s:
self.set("DEPLOY_DIR_IMAGE", s.group(1))
+ if not self.get('IMAGE_LINK_NAME') and self.rootfs:
+ s = re.search('^IMAGE_LINK_NAME="(.*)"', self.bitbake_e, re.M)
+ if s:
+ image_link_name = s.group(1)
+ self.set("IMAGE_LINK_NAME", image_link_name)
+ logger.debug('Using IMAGE_LINK_NAME = "%s"' % image_link_name)
+
def check_kvm(self):
"""Check kvm and kvm-host"""
if not (self.kvm_enabled or self.vhost_enabled):
@@ -589,11 +616,6 @@ class BaseConfig(object):
if os.access(dev_kvm, os.W_OK|os.R_OK):
self.qemu_opt_script += ' -enable-kvm'
- if self.get('MACHINE') == "qemux86":
- # Workaround for broken APIC window on pre 4.15 host kernels which causes boot hangs
- # See YOCTO #12301
- # On 64 bit we use x2apic
- self.kernel_cmdline_script += " clocksource=kvm-clock hpet=disable noapic nolapic"
else:
logger.error("You have no read or write permission on /dev/kvm.")
logger.error("Please change the ownership of this file as described at:")
@@ -634,10 +656,10 @@ class BaseConfig(object):
elif fsflag == 'kernel-in-fs':
wic_fs = False
else:
- logger.warn('Unknown flag "%s:%s" in QB_FSINFO', fstype, fsflag)
+ logger.warning('Unknown flag "%s:%s" in QB_FSINFO', fstype, fsflag)
continue
else:
- logger.warn('QB_FSINFO is not supported for image type "%s"', fstype)
+ logger.warning('QB_FSINFO is not supported for image type "%s"', fstype)
continue
if fstype in self.fsinfo:
@@ -670,16 +692,16 @@ class BaseConfig(object):
if self.rootfs and not os.path.exists(self.rootfs):
# Lazy rootfs
- self.rootfs = "%s/%s-%s.%s" % (self.get('DEPLOY_DIR_IMAGE'),
- self.rootfs, self.get('MACHINE'),
+ self.rootfs = "%s/%s.%s" % (self.get('DEPLOY_DIR_IMAGE'),
+ self.get('IMAGE_LINK_NAME'),
self.fstype)
elif not self.rootfs:
- cmd_name = '%s/%s*.%s' % (self.get('DEPLOY_DIR_IMAGE'), self.get('IMAGE_NAME'), self.fstype)
- cmd_link = '%s/%s*.%s' % (self.get('DEPLOY_DIR_IMAGE'), self.get('IMAGE_LINK_NAME'), self.fstype)
- cmds = (cmd_name, cmd_link)
- self.rootfs = get_first_file(cmds)
+ glob_name = '%s/%s*.%s' % (self.get('DEPLOY_DIR_IMAGE'), self.get('IMAGE_NAME'), self.fstype)
+ glob_link = '%s/%s*.%s' % (self.get('DEPLOY_DIR_IMAGE'), self.get('IMAGE_LINK_NAME'), self.fstype)
+ globs = (glob_name, glob_link)
+ self.rootfs = get_first_file(globs)
if not self.rootfs:
- raise RunQemuError("Failed to find rootfs: %s or %s" % cmds)
+ raise RunQemuError("Failed to find rootfs: %s or %s" % globs)
if not os.path.exists(self.rootfs):
raise RunQemuError("Can't find rootfs: %s" % self.rootfs)
@@ -739,10 +761,10 @@ class BaseConfig(object):
kernel_match_name = "%s/%s" % (deploy_dir_image, kernel_name)
kernel_match_link = "%s/%s" % (deploy_dir_image, self.get('KERNEL_IMAGETYPE'))
kernel_startswith = "%s/%s*" % (deploy_dir_image, self.get('KERNEL_IMAGETYPE'))
- cmds = (kernel_match_name, kernel_match_link, kernel_startswith)
- self.kernel = get_first_file(cmds)
+ globs = (kernel_match_name, kernel_match_link, kernel_startswith)
+ self.kernel = get_first_file(globs)
if not self.kernel:
- raise RunQemuError('KERNEL not found: %s, %s or %s' % cmds)
+ raise RunQemuError('KERNEL not found: %s, %s or %s' % globs)
if not os.path.exists(self.kernel):
raise RunQemuError("KERNEL %s not found" % self.kernel)
@@ -759,13 +781,13 @@ class BaseConfig(object):
dtb = self.get('QB_DTB')
if dtb:
deploy_dir_image = self.get('DEPLOY_DIR_IMAGE')
- cmd_match = "%s/%s" % (deploy_dir_image, dtb)
- cmd_startswith = "%s/%s*" % (deploy_dir_image, dtb)
- cmd_wild = "%s/*.dtb" % deploy_dir_image
- cmds = (cmd_match, cmd_startswith, cmd_wild)
- self.dtb = get_first_file(cmds)
+ glob_match = "%s/%s" % (deploy_dir_image, dtb)
+ glob_startswith = "%s/%s*" % (deploy_dir_image, dtb)
+ glob_wild = "%s/*.dtb" % deploy_dir_image
+ globs = (glob_match, glob_startswith, glob_wild)
+ self.dtb = get_first_file(globs)
if not os.path.exists(self.dtb):
- raise RunQemuError('DTB not found: %s, %s or %s' % cmds)
+ raise RunQemuError('DTB not found: %s, %s or %s' % globs)
def check_bios(self):
"""Check and set bios"""
@@ -816,7 +838,7 @@ class BaseConfig(object):
self.set('QB_MEM', qb_mem)
mach = self.get('MACHINE')
- if not mach.startswith(('qemumips', 'qemux86')):
+ if not mach.startswith(('qemumips', 'qemux86', 'qemuloongarch64')):
self.kernel_cmdline_script += ' mem=%s' % self.get('QB_MEM').replace('-m','').strip() + 'M'
self.qemu_opt_script += ' %s' % self.get('QB_MEM')
@@ -828,11 +850,11 @@ class BaseConfig(object):
if self.get('QB_TCPSERIAL_OPT'):
self.qemu_opt_script += ' ' + self.get('QB_TCPSERIAL_OPT').replace('@PORT@', port)
else:
- self.qemu_opt_script += ' -serial tcp:127.0.0.1:%s' % port
+ self.qemu_opt_script += ' -serial tcp:127.0.0.1:%s,nodelay=on' % port
if len(ports) > 1:
for port in ports[1:]:
- self.qemu_opt_script += ' -serial tcp:127.0.0.1:%s' % port
+ self.qemu_opt_script += ' -serial tcp:127.0.0.1:%s,nodelay=on' % port
def check_and_set(self):
"""Check configs sanity and set when needed"""
@@ -875,8 +897,10 @@ class BaseConfig(object):
machine = self.get('MACHINE')
if not machine:
machine = os.path.basename(deploy_dir_image)
- self.qemuboot = "%s/%s-%s.qemuboot.conf" % (deploy_dir_image,
- self.rootfs, machine)
+ if not self.get('IMAGE_LINK_NAME'):
+ raise RunQemuError("IMAGE_LINK_NAME wasn't set to find corresponding .qemuboot.conf file")
+ self.qemuboot = "%s/%s.qemuboot.conf" % (deploy_dir_image,
+ self.get('IMAGE_LINK_NAME'))
else:
cmd = 'ls -t %s/*.qemuboot.conf' % deploy_dir_image
logger.debug('Running %s...' % cmd)
@@ -997,19 +1021,16 @@ class BaseConfig(object):
if self.slirp_enabled:
self.nfs_server = '10.0.2.2'
else:
- self.nfs_server = '192.168.7.1'
-
- # Figure out a new nfs_instance to allow multiple qemus running.
- ps = subprocess.check_output(("ps", "auxww")).decode('utf-8')
- pattern = '/bin/unfsd .* -i .*\.pid -e .*/exports([0-9]+) '
- all_instances = re.findall(pattern, ps, re.M)
- if all_instances:
- all_instances.sort(key=int)
- self.nfs_instance = int(all_instances.pop()) + 1
+ self.nfs_server = '192.168.7.@GATEWAY@'
- nfsd_port = 3049 + 2 * self.nfs_instance
- mountd_port = 3048 + 2 * self.nfs_instance
+ nfsd_port = 3048 + self.nfs_instance
+ lockdir = "/tmp/qemu-port-locks"
+ self.make_lock_dir(lockdir)
+ while not self.check_free_port('localhost', nfsd_port, lockdir):
+ self.nfs_instance += 1
+ nfsd_port += 1
+ mountd_port = nfsd_port
# Export vars for runqemu-export-rootfs
export_dict = {
'NFS_INSTANCE': self.nfs_instance,
@@ -1020,7 +1041,11 @@ class BaseConfig(object):
# Use '%s' since they are integers
os.putenv(k, '%s' % v)
- self.unfs_opts="nfsvers=3,port=%s,tcp,mountport=%s" % (nfsd_port, mountd_port)
+ qb_nfsrootfs_extra_opt = self.get("QB_NFSROOTFS_EXTRA_OPT")
+ if qb_nfsrootfs_extra_opt and not qb_nfsrootfs_extra_opt.startswith(","):
+ qb_nfsrootfs_extra_opt = "," + qb_nfsrootfs_extra_opt
+
+ self.unfs_opts="nfsvers=3,port=%s,tcp,mountport=%s%s" % (nfsd_port, mountd_port, qb_nfsrootfs_extra_opt)
# Extract .tar.bz2 or .tar.bz if no nfs dir
if not (self.rootfs and os.path.isdir(self.rootfs)):
@@ -1043,7 +1068,7 @@ class BaseConfig(object):
cmd = ('runqemu-extract-sdk', src, dest)
logger.info('Running %s...' % str(cmd))
if subprocess.call(cmd) != 0:
- raise RunQemuError('Failed to run %s' % cmd)
+ raise RunQemuError('Failed to run %s' % str(cmd))
self.rootfs = dest
self.cleanup_files.append(self.rootfs)
self.cleanup_files.append('%s.pseudo_state' % self.rootfs)
@@ -1052,14 +1077,32 @@ class BaseConfig(object):
cmd = ('runqemu-export-rootfs', 'start', self.rootfs)
logger.info('Running %s...' % str(cmd))
if subprocess.call(cmd) != 0:
- raise RunQemuError('Failed to run %s' % cmd)
+ raise RunQemuError('Failed to run %s' % str(cmd))
self.nfs_running = True
+ def setup_cmd(self):
+ cmd = self.get('QB_SETUP_CMD')
+ if cmd != '':
+ logger.info('Running setup command %s' % str(cmd))
+ if subprocess.call(cmd, shell=True) != 0:
+ raise RunQemuError('Failed to run %s' % str(cmd))
+
def setup_net_bridge(self):
self.set('NETWORK_CMD', '-netdev bridge,br=%s,id=net0,helper=%s -device virtio-net-pci,netdev=net0 ' % (
self.net_bridge, os.path.join(self.bindir_native, 'qemu-oe-bridge-helper')))
+ def make_lock_dir(self, lockdir):
+ if not os.path.exists(lockdir):
+ # There might be a race issue when multi runqemu processess are
+ # running at the same time.
+ try:
+ os.mkdir(lockdir)
+ os.chmod(lockdir, 0o777)
+ except FileExistsError:
+ pass
+ return
+
def setup_slirp(self):
"""Setup user networking"""
@@ -1069,7 +1112,7 @@ class BaseConfig(object):
logger.info("Network configuration:%s", netconf)
self.kernel_cmdline_script += netconf
# Port mapping
- hostfwd = ",hostfwd=tcp::2222-:22,hostfwd=tcp::2323-:23"
+ hostfwd = ",hostfwd=tcp:127.0.0.1:2222-:22,hostfwd=tcp:127.0.0.1:2323-:23"
qb_slirp_opt_default = "-netdev user,id=net0%s,tftp=%s" % (hostfwd, self.get('DEPLOY_DIR_IMAGE'))
qb_slirp_opt = self.get('QB_SLIRP_OPT') or qb_slirp_opt_default
# Figure out the port
@@ -1078,14 +1121,7 @@ class BaseConfig(object):
mac = 2
lockdir = "/tmp/qemu-port-locks"
- if not os.path.exists(lockdir):
- # There might be a race issue when multi runqemu processess are
- # running at the same time.
- try:
- os.mkdir(lockdir)
- os.chmod(lockdir, 0o777)
- except FileExistsError:
- pass
+ self.make_lock_dir(lockdir)
# Find a free port to avoid conflicts
for p in ports[:]:
@@ -1125,20 +1161,17 @@ class BaseConfig(object):
logger.error("ip: %s" % ip)
raise OEPathError("runqemu-ifup, runqemu-ifdown or ip not found")
- if not os.path.exists(lockdir):
- # There might be a race issue when multi runqemu processess are
- # running at the same time.
- try:
- os.mkdir(lockdir)
- os.chmod(lockdir, 0o777)
- except FileExistsError:
- pass
+ self.make_lock_dir(lockdir)
cmd = (ip, 'link')
logger.debug('Running %s...' % str(cmd))
ip_link = subprocess.check_output(cmd).decode('utf-8')
# Matches line like: 6: tap0: <foo>
- possibles = re.findall('^[0-9]+: +(tap[0-9]+): <.*', ip_link, re.M)
+ oe_tap_name = 'tap'
+ if 'OE_TAP_NAME' in os.environ:
+ oe_tap_name = os.environ['OE_TAP_NAME']
+ tap_re = '^[0-9]+: +(' + oe_tap_name + '[0-9]+): <.*'
+ possibles = re.findall(tap_re, ip_link, re.M)
tap = ""
for p in possibles:
lockfile = os.path.join(lockdir, p)
@@ -1161,7 +1194,7 @@ class BaseConfig(object):
gid = os.getgid()
uid = os.getuid()
logger.info("Setting up tap interface under sudo")
- cmd = ('sudo', self.qemuifup, str(uid), str(gid), self.bindir_native)
+ cmd = ('sudo', self.qemuifup, str(gid))
try:
tap = subprocess.check_output(cmd).decode('utf-8').strip()
except subprocess.CalledProcessError as e:
@@ -1177,7 +1210,7 @@ class BaseConfig(object):
logger.error("Failed to setup tap device. Run runqemu-gen-tapdevs to manually create.")
sys.exit(1)
self.tap = tap
- tapnum = int(tap[3:])
+ tapnum = int(tap[len(oe_tap_name):])
gateway = tapnum * 2 + 1
client = gateway + 1
if self.fstype == 'nfs':
@@ -1185,6 +1218,7 @@ class BaseConfig(object):
netconf = " " + self.cmdline_ip_tap
netconf = netconf.replace('@CLIENT@', str(client))
netconf = netconf.replace('@GATEWAY@', str(gateway))
+ self.nfs_server = self.nfs_server.replace('@GATEWAY@', str(gateway))
logger.info("Network configuration:%s", netconf)
self.kernel_cmdline_script += netconf
mac = "%s%02x" % (self.mac_tap, client)
@@ -1200,7 +1234,8 @@ class BaseConfig(object):
self.set('NETWORK_CMD', '%s %s' % (self.network_device.replace('@MAC@', mac), qemu_tap_opt))
def setup_network(self):
- if self.get('QB_NET') == 'none':
+ if self.nonetwork or self.get('QB_NET') == 'none':
+ self.set('NETWORK_CMD', '-nic none')
return
if sys.stdin.isatty():
self.saved_stty = subprocess.check_output(("stty", "-g")).decode('utf-8').strip()
@@ -1299,7 +1334,7 @@ class BaseConfig(object):
"""attempt to determine the appropriate qemu-system binary"""
mach = self.get('MACHINE')
if not mach:
- search = '.*(qemux86-64|qemux86|qemuarm64|qemuarm|qemumips64|qemumips64el|qemumipsel|qemumips|qemuppc).*'
+ search = '.*(qemux86-64|qemux86|qemuarm64|qemuarm|qemuloongarch64|qemumips64|qemumips64el|qemumipsel|qemumips|qemuppc).*'
if self.rootfs:
match = re.match(search, self.rootfs)
if match:
@@ -1322,6 +1357,8 @@ class BaseConfig(object):
qbsys = 'x86_64'
elif mach == 'qemuppc':
qbsys = 'ppc'
+ elif mach == 'qemuloongarch64':
+ qbsys = 'loongarch64'
elif mach == 'qemumips':
qbsys = 'mips'
elif mach == 'qemumips64':
@@ -1350,6 +1387,35 @@ class BaseConfig(object):
raise RunQemuError("Failed to boot, QB_SYSTEM_NAME is NULL!")
self.qemu_system = qemu_system
+ def check_render_nodes(self):
+ render_hint = """If /dev/dri/renderD* is absent due to lack of suitable GPU, 'modprobe vgem' will create one suitable for mesa llvmpipe software renderer."""
+ try:
+ content = os.listdir("/dev/dri")
+ nodes = [i for i in content if i.startswith('renderD')]
+ if len(nodes) == 0:
+ raise RunQemuError("No render nodes found in /dev/dri/: %s. %s" %(content, render_hint))
+ for n in nodes:
+ try:
+ with open(os.path.join("/dev/dri", n), "w") as f:
+ f.close()
+ break
+ except IOError:
+ pass
+ else:
+ raise RunQemuError("None of the render nodes in /dev/dri/ are accessible: %s; you may need to add yourself to 'render' group or otherwise ensure you have read-write permissions on one of them." %(nodes))
+ except FileNotFoundError:
+ raise RunQemuError("/dev/dri directory does not exist; no render nodes available on this machine. %s" %(render_hint))
+
+ def setup_guest_agent(self):
+ if self.guest_agent == True:
+ self.qemu_opt += ' -chardev socket,path=' + self.guest_agent_sockpath + ',server,nowait,id=qga0 '
+ self.qemu_opt += ' -device virtio-serial '
+ self.qemu_opt += ' -device virtserialport,chardev=qga0,name=org.qemu.guest_agent.0 '
+
+ def setup_qmp(self):
+ if self.qmp:
+ self.qemu_opt += " -qmp %s,server,nowait" % self.qmp
+
def setup_vga(self):
if self.nographic == True:
if self.sdl == True:
@@ -1369,7 +1435,7 @@ class BaseConfig(object):
# need our font setup and show-cusor below so we need to see what qemu --help says
# is supported so we can pass our correct config in.
if not self.nographic and not self.sdl and not self.gtk and not self.publicvnc and not self.egl_headless == True:
- output = subprocess.check_output([self.qemu_bin, "--help"], universal_newlines=True)
+ output = subprocess.check_output([self.qemu_bin, "--help"], universal_newlines=True, env=self.qemu_environ)
if "-display gtk" in output:
self.gtk = True
elif "-display sdl" in output:
@@ -1387,13 +1453,14 @@ class BaseConfig(object):
self.qemu_opt += ' -display '
if self.egl_headless == True:
+ self.check_render_nodes()
self.set_dri_path()
self.qemu_opt += 'egl-headless,'
else:
if self.sdl == True:
self.qemu_opt += 'sdl,'
elif self.gtk == True:
- os.environ['FONTCONFIG_PATH'] = '/etc/fonts'
+ self.qemu_environ['FONTCONFIG_PATH'] = '/etc/fonts'
self.qemu_opt += 'gtk,'
if self.gl == True:
@@ -1412,6 +1479,19 @@ class BaseConfig(object):
for entry in self.get('SERIAL_CONSOLES').split(' '):
self.kernel_cmdline_script += ' console=%s' %entry.split(';')[1]
+ # We always wants ttyS0 and ttyS1 in qemu machines (see SERIAL_CONSOLES).
+ # If no serial or serialtcp options were specified, only ttyS0 is created
+ # and sysvinit shows an error trying to enable ttyS1:
+ # INIT: Id "S1" respawning too fast: disabled for 5 minutes
+ serial_num = len(re.findall("-serial", self.qemu_opt))
+
+ # Assume if the user passed serial options, they know what they want
+ # and pad to two devices
+ if serial_num == 1:
+ self.qemu_opt += " -serial null"
+ elif serial_num >= 2:
+ return
+
if self.serialstdio == True or self.nographic == True:
self.qemu_opt += " -serial mon:stdio"
else:
@@ -1423,10 +1503,6 @@ class BaseConfig(object):
self.qemu_opt += " %s" % self.get("QB_SERIAL_OPT")
- # We always wants ttyS0 and ttyS1 in qemu machines (see SERIAL_CONSOLES).
- # If no serial or serialtcp options were specified, only ttyS0 is created
- # and sysvinit shows an error trying to enable ttyS1:
- # INIT: Id "S1" respawning too fast: disabled for 5 minutes
serial_num = len(re.findall("-serial", self.qemu_opt))
if serial_num < 2:
self.qemu_opt += " -serial null"
@@ -1480,6 +1556,8 @@ class BaseConfig(object):
if self.snapshot:
self.qemu_opt += " -snapshot"
+ self.setup_guest_agent()
+ self.setup_qmp()
self.setup_serial()
self.setup_vga()
@@ -1514,8 +1592,8 @@ class BaseConfig(object):
if len(self.portlocks):
for descriptor in self.portlocks.values():
pass_fds.append(descriptor.fileno())
- process = subprocess.Popen(cmds, stderr=subprocess.PIPE, pass_fds=pass_fds)
- self.qemupid = process.pid
+ process = subprocess.Popen(cmds, stderr=subprocess.PIPE, pass_fds=pass_fds, env=self.qemu_environ)
+ self.qemuprocess = process
retcode = process.wait()
if retcode:
if retcode == -signal.SIGTERM:
@@ -1523,6 +1601,13 @@ class BaseConfig(object):
else:
logger.error("Failed to run qemu: %s", process.stderr.read().decode())
+ def cleanup_cmd(self):
+ cmd = self.get('QB_CLEANUP_CMD')
+ if cmd != '':
+ logger.info('Running cleanup command %s' % str(cmd))
+ if subprocess.call(cmd, shell=True) != 0:
+ raise RunQemuError('Failed to run %s' % str(cmd))
+
def cleanup(self):
if self.cleaned:
return
@@ -1531,21 +1616,30 @@ class BaseConfig(object):
signal.signal(signal.SIGTERM, signal.SIG_IGN)
logger.info("Cleaning up")
+
+ if self.qemuprocess:
+ try:
+ # give it some time to shut down, ignore return values and output
+ self.qemuprocess.send_signal(signal.SIGTERM)
+ self.qemuprocess.communicate(timeout=5)
+ except subprocess.TimeoutExpired:
+ self.qemuprocess.kill()
+
with open('/proc/uptime', 'r') as f:
uptime_seconds = f.readline().split()[0]
logger.info('Host uptime: %s\n' % uptime_seconds)
if self.cleantap:
- cmd = ('sudo', self.qemuifdown, self.tap, self.bindir_native)
+ cmd = ('sudo', self.qemuifdown, self.tap)
logger.debug('Running %s' % str(cmd))
subprocess.check_call(cmd)
self.release_taplock()
- self.release_portlock()
if self.nfs_running:
logger.info("Shutting down the userspace NFS server...")
cmd = ("runqemu-export-rootfs", "stop", self.rootfs)
logger.debug('Running %s' % str(cmd))
subprocess.check_call(cmd)
+ self.release_portlock()
if self.saved_stty:
subprocess.check_call(("stty", self.saved_stty))
@@ -1558,9 +1652,12 @@ class BaseConfig(object):
else:
shutil.rmtree(ent)
+ # Deliberately ignore the return code of 'tput smam'.
+ subprocess.call(["tput", "smam"])
+
self.cleaned = True
- def run_bitbake_env(self, mach=None):
+ def run_bitbake_env(self, mach=None, target=''):
bitbake = shutil.which('bitbake')
if not bitbake:
return
@@ -1573,22 +1670,33 @@ class BaseConfig(object):
multiconfig = "mc:%s" % multiconfig
if mach:
- cmd = 'MACHINE=%s bitbake -e %s' % (mach, multiconfig)
+ cmd = 'MACHINE=%s bitbake -e %s %s' % (mach, multiconfig, target)
else:
- cmd = 'bitbake -e %s' % multiconfig
+ cmd = 'bitbake -e %s %s' % (multiconfig, target)
logger.info('Running %s...' % cmd)
- return subprocess.check_output(cmd, shell=True).decode('utf-8')
+ try:
+ return subprocess.check_output(cmd, shell=True).decode('utf-8')
+ except subprocess.CalledProcessError as err:
+ logger.warning("Couldn't run '%s' to gather environment information, maybe the target wasn't an image name, will retry with virtual/kernel as a target:\n%s" % (cmd, err.output.decode('utf-8')))
+ # need something with IMAGE_NAME_SUFFIX/IMAGE_LINK_NAME defined (kernel also inherits image-artifact-names.bbclass)
+ target = 'virtual/kernel'
+ if mach:
+ cmd = 'MACHINE=%s bitbake -e %s %s' % (mach, multiconfig, target)
+ else:
+ cmd = 'bitbake -e %s %s' % (multiconfig, target)
+ try:
+ return subprocess.check_output(cmd, shell=True).decode('utf-8')
+ except subprocess.CalledProcessError as err:
+ logger.warning("Couldn't run '%s' to gather environment information, giving up with 'bitbake -e':\n%s" % (cmd, err.output.decode('utf-8')))
+ return ''
+
- def load_bitbake_env(self, mach=None):
+ def load_bitbake_env(self, mach=None, target=None):
if self.bitbake_e:
return
- try:
- self.bitbake_e = self.run_bitbake_env(mach=mach)
- except subprocess.CalledProcessError as err:
- self.bitbake_e = ''
- logger.warning("Couldn't run 'bitbake -e' to gather environment information:\n%s" % err.output.decode('utf-8'))
+ self.bitbake_e = self.run_bitbake_env(mach=mach, target=target)
def validate_combos(self):
if (self.fstype in self.vmtypes) and self.kernel:
@@ -1618,7 +1726,7 @@ class BaseConfig(object):
return result
raise RunQemuError("Native sysroot directory %s doesn't exist" % result)
else:
- raise RunQemuError("Can't find STAGING_BINDIR_NATIVE in '%s' output" % cmd)
+ raise RunQemuError("Can't find STAGING_BINDIR_NATIVE in '%s' output" % str(cmd))
def main():
@@ -1634,12 +1742,8 @@ def main():
subprocess.check_call([renice, str(os.getpid())])
def sigterm_handler(signum, frame):
- logger.info("SIGTERM received")
- if config.qemupid:
- os.kill(config.qemupid, signal.SIGTERM)
+ logger.info("Received signal: %s" % (signum))
config.cleanup()
- # Deliberately ignore the return code of 'tput smam'.
- subprocess.call(["tput", "smam"])
signal.signal(signal.SIGTERM, sigterm_handler)
config.check_args()
@@ -1651,6 +1755,7 @@ def main():
config.setup_network()
config.setup_rootfs()
config.setup_final()
+ config.setup_cmd()
config.start_qemu()
except RunQemuError as err:
logger.error(err)
@@ -1660,9 +1765,8 @@ def main():
traceback.print_exc()
return 1
finally:
+ config.cleanup_cmd()
config.cleanup()
- # Deliberately ignore the return code of 'tput smam'.
- subprocess.call(["tput", "smam"])
if __name__ == "__main__":
sys.exit(main())
diff --git a/scripts/runqemu-export-rootfs b/scripts/runqemu-export-rootfs
index 384c091713..6a8acd0d5a 100755
--- a/scripts/runqemu-export-rootfs
+++ b/scripts/runqemu-export-rootfs
@@ -34,16 +34,12 @@ if [ -z "$SYSROOT_SETUP_SCRIPT" ]; then
echo "Did you forget to source your build environment setup script?"
exit 1
fi
-. $SYSROOT_SETUP_SCRIPT meta-ide-support
+. $SYSROOT_SETUP_SCRIPT qemu-helper-native
if [ ! -e "$OECORE_NATIVE_SYSROOT/usr/bin/unfsd" ]; then
echo "Error: Unable to find unfsd binary in $OECORE_NATIVE_SYSROOT/usr/bin/"
- if [ "x$OECORE_DISTRO_VERSION" = "x" ]; then
- echo "Have you run 'bitbake meta-ide-support'?"
- else
- echo "This shouldn't happen - something is missing from your toolchain installation"
- fi
+ echo "This shouldn't happen - something is missing from your toolchain installation"
exit 1
fi
@@ -74,26 +70,11 @@ MOUNTD_PORT=${MOUNTD_PORT:=$[ 3048 + 2 * $NFS_INSTANCE ]}
## For debugging you would additionally add
## --debug all
-UNFSD_OPTS="-p -N -i $NFSPID -e $EXPORTS -n $NFSD_PORT -m $MOUNTD_PORT"
+UNFSD_OPTS="-p -i $NFSPID -e $EXPORTS -n $NFSD_PORT -m $MOUNTD_PORT"
# See how we were called.
case "$1" in
start)
- PORTMAP_RUNNING=`ps -ef | grep portmap | grep -v grep`
- RPCBIND_RUNNING=`ps -ef | grep rpcbind | grep -v grep`
- if [[ "x$PORTMAP_RUNNING" = "x" && "x$RPCBIND_RUNNING" = "x" ]]; then
- echo "======================================================="
- echo "Error: neither rpcbind nor portmap appear to be running"
- echo "Please install and start one of these services first"
- echo "======================================================="
- echo "Tip: for recent Ubuntu hosts, run:"
- echo " sudo apt-get install rpcbind"
- echo "Then add OPTIONS=\"-i -w\" to /etc/default/rpcbind and run"
- echo " sudo service portmap restart"
-
- exit 1
- fi
-
echo "Creating exports file..."
echo "$NFS_EXPORT_DIR (rw,no_root_squash,no_all_squash,insecure)" > $EXPORTS
diff --git a/scripts/runqemu-extract-sdk b/scripts/runqemu-extract-sdk
index 9bc0c07fb8..db05da25f2 100755
--- a/scripts/runqemu-extract-sdk
+++ b/scripts/runqemu-extract-sdk
@@ -25,7 +25,7 @@ if [ -z "$SYSROOT_SETUP_SCRIPT" ]; then
echo "Did you forget to source your build system environment setup script?"
exit 1
fi
-. $SYSROOT_SETUP_SCRIPT meta-ide-support
+. $SYSROOT_SETUP_SCRIPT qemu-helper-native
PSEUDO_OPTS="-P $OECORE_NATIVE_SYSROOT/usr"
ROOTFS_TARBALL=$1
diff --git a/scripts/runqemu-gen-tapdevs b/scripts/runqemu-gen-tapdevs
index a6ee4517da..a00c79c442 100755
--- a/scripts/runqemu-gen-tapdevs
+++ b/scripts/runqemu-gen-tapdevs
@@ -1,53 +1,58 @@
#!/bin/bash
#
# Create a "bank" of tap network devices that can be used by the
-# runqemu script. This script needs to be run as root, and will
-# use the tunctl binary from the build system sysroot. Note: many Linux
-# distros these days still use an older version of tunctl which does not
-# support the group permissions option, hence the need to use the build
-# system provided version.
+# runqemu script. This script needs to be run as root
#
# Copyright (C) 2010 Intel Corp.
#
# SPDX-License-Identifier: GPL-2.0-only
#
-uid=`id -u`
gid=`id -g`
-if [ -n "$SUDO_UID" ]; then
- uid=$SUDO_UID
-fi
if [ -n "$SUDO_GID" ]; then
gid=$SUDO_GID
fi
usage() {
- echo "Usage: sudo $0 <uid> <gid> <num> <staging_bindir_native>"
- echo "Where <uid> is the numeric user id the tap devices will be owned by"
+ echo "Usage: sudo $0 <gid> <num>"
echo "Where <gid> is the numeric group id the tap devices will be owned by"
echo "<num> is the number of tap devices to create (0 to remove all)"
- echo "<native-sysroot-basedir> is the path to the build system's native sysroot"
echo "For example:"
echo "$ bitbake qemu-helper-native"
- echo "$ sudo $0 $uid $gid 4 tmp/sysroots-components/x86_64/qemu-helper-native/usr/bin"
+ echo "$ sudo $0 $gid 4"
echo ""
exit 1
}
-if [ $# -ne 4 ]; then
+# Allow passing 4 arguments for backward compatibility with warning
+if [ $# -gt 4 ]; then
+ echo "Error: Incorrect number of arguments"
+ usage
+fi
+if [ $# -gt 3 ]; then
+ echo "Warning: Ignoring the <native-sysroot-basedir> parameter. It is no longer needed."
+fi
+if [ $# -gt 2 ]; then
+ echo "Warning: Ignoring the <uid> parameter. It is no longer needed."
+ GID=$2
+ COUNT=$3
+elif [ $# -eq 2 ]; then
+ GID=$1
+ COUNT=$2
+else
echo "Error: Incorrect number of arguments"
usage
fi
-TUID=$1
-GID=$2
-COUNT=$3
-STAGING_BINDIR_NATIVE=$4
-TUNCTL=$STAGING_BINDIR_NATIVE/tunctl
-if [[ ! -x "$TUNCTL" || -d "$TUNCTL" ]]; then
- echo "Error: $TUNCTL is not an executable"
- usage
+if [ -z "$OE_TAP_NAME" ]; then
+ OE_TAP_NAME=tap
+fi
+
+# check if COUNT is a number and >= 0
+if ! [ $COUNT -ge 0 ]; then
+ echo "Error: Incorrect count: $COUNT"
+ exit 1
fi
if [ $EUID -ne 0 ]; then
@@ -62,48 +67,41 @@ if [ ! -x "$RUNQEMU_IFUP" ]; then
exit 1
fi
-IFCONFIG=`which ip 2> /dev/null`
-if [ -z "$IFCONFIG" ]; then
- # Is it ever anywhere else?
- IFCONFIG=/sbin/ip
-fi
-if [ ! -x "$IFCONFIG" ]; then
- echo "$IFCONFIG cannot be executed"
- exit 1
-fi
-
-if [ $COUNT -ge 0 ]; then
- # Ensure we start with a clean slate
- for tap in `$IFCONFIG link | grep tap | awk '{ print \$2 }' | sed s/://`; do
- echo "Note: Destroying pre-existing tap interface $tap..."
- $TUNCTL -d $tap
- done
- rm -f /etc/runqemu-nosudo
+if interfaces=`ip tuntap list` 2>/dev/null; then
+ interfaces=`echo "$interfaces" |cut -f1 -d: |grep -E "^$OE_TAP_NAME.*"`
else
- echo "Error: Incorrect count: $COUNT"
+ echo "Failed to call 'ip tuntap list'" >&2
exit 1
fi
-if [ $COUNT -gt 0 ]; then
- echo "Creating $COUNT tap devices for UID: $TUID GID: $GID..."
- for ((index=0; index < $COUNT; index++)); do
- echo "Creating tap$index"
- ifup=`$RUNQEMU_IFUP $TUID $GID $STAGING_BINDIR_NATIVE 2>&1`
- if [ $? -ne 0 ]; then
- echo "Error running tunctl: $ifup"
- exit 1
- fi
- done
+# Ensure we start with a clean slate
+for tap in $interfaces; do
+ echo "Note: Destroying pre-existing tap interface $tap..."
+ ip tuntap del $tap mode tap
+done
+rm -f /etc/runqemu-nosudo
- echo "Note: For systems running NetworkManager, it's recommended"
- echo "Note: that the tap devices be set as unmanaged in the"
- echo "Note: NetworkManager.conf file. Add the following lines to"
- echo "Note: /etc/NetworkManager/NetworkManager.conf"
- echo "[keyfile]"
- echo "unmanaged-devices=interface-name:tap*"
-
- # The runqemu script will check for this file, and if it exists,
- # will use the existing bank of tap devices without creating
- # additional ones via sudo.
- touch /etc/runqemu-nosudo
+if [ $COUNT -eq 0 ]; then
+ exit 0
fi
+
+echo "Creating $COUNT tap devices for GID: $GID..."
+for ((index=0; index < $COUNT; index++)); do
+ echo "Creating $OE_TAP_NAME$index"
+ if ! ifup=`$RUNQEMU_IFUP $GID 2>&1`; then
+ echo "Error bringing up interface: $ifup"
+ exit 1
+ fi
+done
+
+echo "Note: For systems running NetworkManager, it's recommended"
+echo "Note: that the tap devices be set as unmanaged in the"
+echo "Note: NetworkManager.conf file. Add the following lines to"
+echo "Note: /etc/NetworkManager/NetworkManager.conf"
+echo "[keyfile]"
+echo "unmanaged-devices=interface-name:$OE_TAP_NAME*"
+
+# The runqemu script will check for this file, and if it exists,
+# will use the existing bank of tap devices without creating
+# additional ones via sudo.
+touch /etc/runqemu-nosudo
diff --git a/scripts/runqemu-ifdown b/scripts/runqemu-ifdown
index e0eb5344c6..822a2a39b9 100755
--- a/scripts/runqemu-ifdown
+++ b/scripts/runqemu-ifdown
@@ -1,8 +1,7 @@
#!/bin/bash
#
# QEMU network configuration script to bring down tap devices. This
-# utility needs to be run as root, and will use the tunctl binary
-# from the native sysroot.
+# utility needs to be run as root, and will use the ip utility
#
# If you find yourself calling this script a lot, you can add the
# the following to your /etc/sudoers file to be able to run this
@@ -17,7 +16,7 @@
#
usage() {
- echo "sudo $(basename $0) <tap-dev> <native-sysroot-basedir>"
+ echo "sudo $(basename $0) <tap-dev>"
}
if [ $EUID -ne 0 ]; then
@@ -25,30 +24,31 @@ if [ $EUID -ne 0 ]; then
exit 1
fi
-if [ $# -ne 2 ]; then
+if [ $# -gt 2 ] || [ $# -lt 1 ]; then
usage
exit 1
fi
+# backward compatibility
+if [ $# -eq 2 ] ; then
+ echo "Warning: native-sysroot-basedir parameter is ignored. It is no longer needed." >&2
+fi
+
TAP=$1
-STAGING_BINDIR_NATIVE=$2
-TUNCTL=$STAGING_BINDIR_NATIVE/tunctl
-if [ ! -e "$TUNCTL" ]; then
- echo "Error: Unable to find tunctl binary in '$STAGING_BINDIR_NATIVE', please bitbake qemu-helper-native"
+if ! ip tuntap del $TAP mode tap 2>/dev/null; then
+ echo "Error: Unable to run up tuntap del"
exit 1
fi
-$TUNCTL -d $TAP
-
-IFCONFIG=`which ip 2> /dev/null`
-if [ "x$IFCONFIG" = "x" ]; then
+IPTOOL=`which ip 2> /dev/null`
+if [ "x$IPTOOL" = "x" ]; then
# better than nothing...
- IFCONFIG=/sbin/ip
+ IPTOOL=/sbin/ip
fi
-if [ -x "$IFCONFIG" ]; then
- if `$IFCONFIG link show $TAP > /dev/null 2>&1`; then
- $IFCONFIG link del $TAP
+if [ -x "$IPTOOL" ]; then
+ if `$IPTOOL link show $TAP > /dev/null 2>&1`; then
+ $IPTOOL link del $TAP
fi
fi
# cleanup the remaining iptables rules
@@ -60,8 +60,13 @@ if [ ! -x "$IPTABLES" ]; then
echo "$IPTABLES cannot be executed"
exit 1
fi
-n=$[ (`echo $TAP | sed 's/tap//'` * 2) + 1 ]
-dest=$[ (`echo $TAP | sed 's/tap//'` * 2) + 2 ]
+
+if [ -z "$OE_TAP_NAME" ]; then
+ OE_TAP_NAME=tap
+fi
+
+n=$[ (`echo $TAP | sed "s/$OE_TAP_NAME//"` * 2) + 1 ]
+dest=$[ (`echo $TAP | sed "s/$OE_TAP_NAME//"` * 2) + 2 ]
$IPTABLES -D POSTROUTING -t nat -j MASQUERADE -s 192.168.7.$n/32
$IPTABLES -D POSTROUTING -t nat -j MASQUERADE -s 192.168.7.$dest/32
true
diff --git a/scripts/runqemu-ifup b/scripts/runqemu-ifup
index bb661740c5..05c9325b6b 100755
--- a/scripts/runqemu-ifup
+++ b/scripts/runqemu-ifup
@@ -1,10 +1,7 @@
#!/bin/bash
#
# QEMU network interface configuration script. This utility needs to
-# be run as root, and will use the tunctl binary from a native sysroot.
-# Note: many Linux distros these days still use an older version of
-# tunctl which does not support the group permissions option, hence
-# the need to use build system's version.
+# be run as root, and will use the ip utility
#
# If you find yourself calling this script a lot, you can add the
# the following to your /etc/sudoers file to be able to run this
@@ -24,7 +21,7 @@
#
usage() {
- echo "sudo $(basename $0) <uid> <gid> <native-sysroot-basedir>"
+ echo "sudo $(basename $0) <gid>"
}
if [ $EUID -ne 0 ]; then
@@ -32,41 +29,43 @@ if [ $EUID -ne 0 ]; then
exit 1
fi
-if [ $# -ne 3 ]; then
+if [ $# -eq 2 ]; then
+ echo "Warning: uid parameter is ignored. It is no longer needed." >&2
+ GROUP="$2"
+elif [ $# -eq 1 ]; then
+ GROUP="$1"
+else
usage
exit 1
fi
-USERID="-u $1"
-GROUP="-g $2"
-STAGING_BINDIR_NATIVE=$3
-TUNCTL=$STAGING_BINDIR_NATIVE/tunctl
-if [ ! -x "$TUNCTL" ]; then
- echo "Error: Unable to find tunctl binary in '$STAGING_BINDIR_NATIVE', please bitbake qemu-helper-native"
- exit 1
+if [ -z "$OE_TAP_NAME" ]; then
+ OE_TAP_NAME=tap
fi
-TAP=`$TUNCTL -b $GROUP 2>&1`
-STATUS=$?
-if [ $STATUS -ne 0 ]; then
-# If tunctl -g fails, try using tunctl -u, for older host kernels
-# which do not support the TUNSETGROUP ioctl
- TAP=`$TUNCTL -b $USERID 2>&1`
- STATUS=$?
- if [ $STATUS -ne 0 ]; then
- echo "tunctl failed:"
- exit 1
+if taps=$(ip tuntap list 2>/dev/null); then
+ tap_no_last=$(echo "$taps" |cut -f 1 -d ":" |grep -E "^$OE_TAP_NAME.*" |sed "s/$OE_TAP_NAME//g" | sort -rn | head -n 1)
+ if [ -z "$tap_no_last" ]; then
+ tap_no=0
+ else
+ tap_no=$(("$tap_no_last" + 1))
fi
+ ip tuntap add "$OE_TAP_NAME$tap_no" mode tap group "$GROUP" && TAP=$OE_TAP_NAME$tap_no
+fi
+
+if [ -z "$TAP" ]; then
+ echo "Error: Unable to find a tap device to use"
+ exit 1
fi
-IFCONFIG=`which ip 2> /dev/null`
-if [ "x$IFCONFIG" = "x" ]; then
+IPTOOL=`which ip 2> /dev/null`
+if [ "x$IPTOOL" = "x" ]; then
# better than nothing...
- IFCONFIG=/sbin/ip
+ IPTOOL=/sbin/ip
fi
-if [ ! -x "$IFCONFIG" ]; then
- echo "$IFCONFIG cannot be executed"
+if [ ! -x "$IPTOOL" ]; then
+ echo "$IPTOOL cannot be executed"
exit 1
fi
@@ -79,22 +78,22 @@ if [ ! -x "$IPTABLES" ]; then
exit 1
fi
-n=$[ (`echo $TAP | sed 's/tap//'` * 2) + 1 ]
-$IFCONFIG addr add 192.168.7.$n/32 broadcast 192.168.7.255 dev $TAP
+n=$[ (`echo $TAP | sed "s/$OE_TAP_NAME//"` * 2) + 1 ]
+$IPTOOL addr add 192.168.7.$n/32 broadcast 192.168.7.255 dev $TAP
STATUS=$?
if [ $STATUS -ne 0 ]; then
echo "Failed to set up IP addressing on $TAP"
exit 1
fi
-$IFCONFIG link set dev $TAP up
+$IPTOOL link set dev $TAP up
STATUS=$?
if [ $STATUS -ne 0 ]; then
echo "Failed to bring up $TAP"
exit 1
fi
-dest=$[ (`echo $TAP | sed 's/tap//'` * 2) + 2 ]
-$IFCONFIG route add to 192.168.7.$dest dev $TAP
+dest=$[ (`echo $TAP | sed "s/$OE_TAP_NAME//"` * 2) + 2 ]
+$IPTOOL route add to 192.168.7.$dest dev $TAP
STATUS=$?
if [ $STATUS -ne 0 ]; then
echo "Failed to add route to 192.168.7.$dest using $TAP"
diff --git a/scripts/sstate-cache-management.py b/scripts/sstate-cache-management.py
new file mode 100755
index 0000000000..d3f600bd28
--- /dev/null
+++ b/scripts/sstate-cache-management.py
@@ -0,0 +1,329 @@
+#!/usr/bin/env python3
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+import argparse
+import os
+import re
+import sys
+
+from collections import defaultdict
+from concurrent.futures import ThreadPoolExecutor
+from dataclasses import dataclass
+from pathlib import Path
+
+if sys.version_info < (3, 8, 0):
+ raise RuntimeError("Sorry, python 3.8.0 or later is required for this script.")
+
+SSTATE_PREFIX = "sstate:"
+SSTATE_EXTENSION = ".tar.zst"
+# SSTATE_EXTENSION = ".tgz"
+# .siginfo.done files are mentioned in the original script?
+SSTATE_SUFFIXES = (
+ SSTATE_EXTENSION,
+ f"{SSTATE_EXTENSION}.siginfo",
+ f"{SSTATE_EXTENSION}.done",
+)
+
+RE_SSTATE_PKGSPEC = re.compile(
+ rf"""sstate:(?P<pn>[^:]*):
+ (?P<package_target>[^:]*):
+ (?P<pv>[^:]*):
+ (?P<pr>[^:]*):
+ (?P<sstate_pkgarch>[^:]*):
+ (?P<sstate_version>[^_]*):
+ (?P<bb_unihash>[^_]*)_
+ (?P<bb_task>[^:]*)
+ (?P<ext>({"|".join([re.escape(s) for s in SSTATE_SUFFIXES])}))$""",
+ re.X,
+)
+
+
+# Really we'd like something like a Path subclass which implements a stat
+# cache here, unfortunately there's no good way to do that transparently
+# (yet); see:
+#
+# https://github.com/python/cpython/issues/70219
+# https://discuss.python.org/t/make-pathlib-extensible/3428/77
+@dataclass
+class SstateEntry:
+ """Class for keeping track of an entry in sstate-cache."""
+
+ path: Path
+ match: re.Match
+ stat_result: os.stat_result = None
+
+ def __hash__(self):
+ return self.path.__hash__()
+
+ def __getattr__(self, name):
+ return self.match.group(name)
+
+
+# this is what's in the original script; as far as I can tell, it's an
+# implementation artefact which we don't need?
+def find_archs():
+ # all_archs
+ builder_arch = os.uname().machine
+
+ # FIXME
+ layer_paths = [Path("../..")]
+
+ tune_archs = set()
+ re_tune = re.compile(r'AVAILTUNES .*=.*"(.*)"')
+ for path in layer_paths:
+ for tunefile in [
+ p for p in path.glob("meta*/conf/machine/include/**/*") if p.is_file()
+ ]:
+ with open(tunefile) as f:
+ for line in f:
+ m = re_tune.match(line)
+ if m:
+ tune_archs.update(m.group(1).split())
+
+ # all_machines
+ machine_archs = set()
+ for path in layer_paths:
+ for machine_file in path.glob("meta*/conf/machine/*.conf"):
+ machine_archs.add(machine_file.parts[-1][:-5])
+
+ extra_archs = set()
+ all_archs = (
+ set(
+ arch.replace("-", "_")
+ for arch in machine_archs | tune_archs | set(["allarch", builder_arch])
+ )
+ | extra_archs
+ )
+
+ print(all_archs)
+
+
+# again, not needed?
+def find_tasks():
+ print(set([p.bb_task for p in paths]))
+
+
+def collect_sstate_paths(args):
+ def scandir(path, paths):
+ # Assume everything is a directory; by not checking we avoid needing an
+ # additional stat which is potentially a synchronous roundtrip over NFS
+ try:
+ for p in path.iterdir():
+ filename = p.parts[-1]
+ if filename.startswith(SSTATE_PREFIX):
+ if filename.endswith(SSTATE_SUFFIXES):
+ m = RE_SSTATE_PKGSPEC.match(p.parts[-1])
+ assert m
+ paths.add(SstateEntry(p, m))
+ # ignore other things (includes things like lockfiles)
+ else:
+ scandir(p, paths)
+
+ except NotADirectoryError:
+ pass
+
+ paths = set()
+ # TODO: parellise scandir
+ scandir(Path(args.cache_dir), paths)
+
+ def path_stat(p):
+ p.stat_result = p.path.lstat()
+
+ if args.remove_duplicated:
+ # This is probably slightly performance negative on a local filesystem
+ # when we interact with the GIL; over NFS it's a massive win.
+ with ThreadPoolExecutor(max_workers=args.jobs) as executor:
+ executor.map(path_stat, paths)
+
+ return paths
+
+
+def remove_by_stamps(args, paths):
+ all_sums = set()
+ for stamps_dir in args.stamps_dir:
+ stamps_path = Path(stamps_dir)
+ assert stamps_path.is_dir()
+ re_sigdata = re.compile(r"do_.*\.sigdata\.([^.]*)")
+ all_sums |= set(
+ [
+ re_sigdata.search(x.parts[-1]).group(1)
+ for x in stamps_path.glob("*/*/*.do_*.sigdata.*")
+ ]
+ )
+ re_setscene = re.compile(r"do_.*_setscene\.([^.]*)")
+ all_sums |= set(
+ [
+ re_setscene.search(x.parts[-1]).group(1)
+ for x in stamps_path.glob("*/*/*.do_*_setscene.*")
+ ]
+ )
+ return [p for p in paths if p.bb_unihash not in all_sums]
+
+
+def remove_duplicated(args, paths):
+ # Skip populate_lic as it produces duplicates in a normal build
+ #
+ # 9ae16469e707 sstate-cache-management: skip populate_lic archives when removing duplicates
+ valid_paths = [p for p in paths if p.bb_task != "populate_lic"]
+
+ keep = dict()
+ remove = list()
+ for p in valid_paths:
+ sstate_sig = ":".join([p.pn, p.sstate_pkgarch, p.bb_task, p.ext])
+ if sstate_sig not in keep:
+ keep[sstate_sig] = p
+ elif p.stat_result.st_mtime > keep[sstate_sig].stat_result.st_mtime:
+ remove.append(keep[sstate_sig])
+ keep[sstate_sig] = p
+ else:
+ remove.append(p)
+
+ return remove
+
+
+def remove_orphans(args, paths):
+ remove = list()
+ pathsigs = defaultdict(list)
+ for p in paths:
+ sstate_sig = ":".join([p.pn, p.sstate_pkgarch, p.bb_task])
+ pathsigs[sstate_sig].append(p)
+ for k, v in pathsigs.items():
+ if len([p for p in v if p.ext == SSTATE_EXTENSION]) == 0:
+ remove.extend(v)
+ return remove
+
+
+def parse_arguments():
+ parser = argparse.ArgumentParser(description="sstate cache management utility.")
+
+ parser.add_argument(
+ "--cache-dir",
+ default=os.environ.get("SSTATE_CACHE_DIR"),
+ help="""Specify sstate cache directory, will use the environment
+ variable SSTATE_CACHE_DIR if it is not specified.""",
+ )
+
+ # parser.add_argument(
+ # "--extra-archs",
+ # help="""Specify list of architectures which should be tested, this list
+ # will be extended with native arch, allarch and empty arch. The
+ # script won't be trying to generate list of available archs from
+ # AVAILTUNES in tune files.""",
+ # )
+
+ # parser.add_argument(
+ # "--extra-layer",
+ # help="""Specify the layer which will be used for searching the archs,
+ # it will search the meta and meta-* layers in the top dir by
+ # default, and will search meta, meta-*, <layer1>, <layer2>,
+ # ...<layern> when specified. Use "," as the separator.
+ #
+ # This is useless for --stamps-dir or when --extra-archs is used.""",
+ # )
+
+ parser.add_argument(
+ "-d",
+ "--remove-duplicated",
+ action="store_true",
+ help="""Remove the duplicated sstate cache files of one package, only
+ the newest one will be kept. The duplicated sstate cache files
+ of one package must have the same arch, which means sstate cache
+ files with multiple archs are not considered duplicate.
+
+ Conflicts with --stamps-dir.""",
+ )
+
+ parser.add_argument(
+ "--remove-orphans",
+ action="store_true",
+ help=f"""Remove orphan siginfo files from the sstate cache, i.e. those
+ where this is no {SSTATE_EXTENSION} file but there are associated
+ tracking files.""",
+ )
+
+ parser.add_argument(
+ "--stamps-dir",
+ action="append",
+ help="""Specify the build directory's stamps directories, the sstate
+ cache file which IS USED by these build diretories will be KEPT,
+ other sstate cache files in cache-dir will be removed. Can be
+ specified multiple times for several directories.
+
+ Conflicts with --remove-duplicated.""",
+ )
+
+ parser.add_argument(
+ "-j", "--jobs", default=8, type=int, help="Run JOBS jobs in parallel."
+ )
+
+ # parser.add_argument(
+ # "-L",
+ # "--follow-symlink",
+ # action="store_true",
+ # help="Remove both the symbol link and the destination file, default: no.",
+ # )
+
+ parser.add_argument(
+ "-y",
+ "--yes",
+ action="store_true",
+ help="""Automatic yes to prompts; assume "yes" as answer to all prompts
+ and run non-interactively.""",
+ )
+
+ parser.add_argument(
+ "-v", "--verbose", action="store_true", help="Explain what is being done."
+ )
+
+ parser.add_argument(
+ "-D",
+ "--debug",
+ action="count",
+ default=0,
+ help="Show debug info, repeat for more debug info.",
+ )
+
+ args = parser.parse_args()
+ if args.cache_dir is None or (
+ not args.remove_duplicated and not args.stamps_dir and not args.remove_orphans
+ ):
+ parser.print_usage()
+ sys.exit(1)
+
+ return args
+
+
+def main():
+ args = parse_arguments()
+
+ paths = collect_sstate_paths(args)
+ if args.remove_duplicated:
+ remove = remove_duplicated(args, paths)
+ elif args.stamps_dir:
+ remove = remove_by_stamps(args, paths)
+ else:
+ remove = list()
+
+ if args.remove_orphans:
+ remove = set(remove) | set(remove_orphans(args, paths))
+
+ if args.debug >= 1:
+ print("\n".join([str(p.path) for p in remove]))
+ print(f"{len(remove)} out of {len(paths)} files will be removed!")
+ if not args.yes:
+ print("Do you want to continue (y/n)?")
+ confirm = input() in ("y", "Y")
+ else:
+ confirm = True
+ if confirm:
+ # TODO: parallelise remove
+ for p in remove:
+ p.path.unlink()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/scripts/sstate-cache-management.sh b/scripts/sstate-cache-management.sh
deleted file mode 100755
index d39671f7c6..0000000000
--- a/scripts/sstate-cache-management.sh
+++ /dev/null
@@ -1,458 +0,0 @@
-#!/bin/bash
-
-# Copyright (c) 2012 Wind River Systems, Inc.
-#
-# SPDX-License-Identifier: GPL-2.0-only
-#
-
-# Global vars
-cache_dir=
-confirm=
-fsym=
-total_deleted=0
-verbose=
-debug=0
-
-usage () {
- cat << EOF
-Welcome to sstate cache management utilities.
-sstate-cache-management.sh <OPTION>
-
-Options:
- -h, --help
- Display this help and exit.
-
- --cache-dir=<sstate cache dir>
- Specify sstate cache directory, will use the environment
- variable SSTATE_CACHE_DIR if it is not specified.
-
- --extra-archs=<arch1>,<arch2>...<archn>
- Specify list of architectures which should be tested, this list
- will be extended with native arch, allarch and empty arch. The
- script won't be trying to generate list of available archs from
- AVAILTUNES in tune files.
-
- --extra-layer=<layer1>,<layer2>...<layern>
- Specify the layer which will be used for searching the archs,
- it will search the meta and meta-* layers in the top dir by
- default, and will search meta, meta-*, <layer1>, <layer2>,
- ...<layern> when specified. Use "," as the separator.
-
- This is useless for --stamps-dir or when --extra-archs is used.
-
- -d, --remove-duplicated
- Remove the duplicated sstate cache files of one package, only
- the newest one will be kept. The duplicated sstate cache files
- of one package must have the same arch, which means sstate cache
- files with multiple archs are not considered duplicate.
-
- Conflicts with --stamps-dir.
-
- --stamps-dir=<dir1>,<dir2>...<dirn>
- Specify the build directory's stamps directories, the sstate
- cache file which IS USED by these build diretories will be KEPT,
- other sstate cache files in cache-dir will be removed. Use ","
- as the separator. For example:
- --stamps-dir=build1/tmp/stamps,build2/tmp/stamps
-
- Conflicts with --remove-duplicated.
-
- -L, --follow-symlink
- Remove both the symbol link and the destination file, default: no.
-
- -y, --yes
- Automatic yes to prompts; assume "yes" as answer to all prompts
- and run non-interactively.
-
- -v, --verbose
- Explain what is being done.
-
- -D, --debug
- Show debug info, repeat for more debug info.
-
-EOF
-}
-
-if [ $# -lt 1 ]; then
- usage
- exit 0
-fi
-
-# Echo no files to remove
-no_files () {
- echo No files to remove
-}
-
-# Echo nothing to do
-do_nothing () {
- echo Nothing to do
-}
-
-# Read the input "y"
-read_confirm () {
- echo "$total_deleted out of $total_files files will be removed! "
- if [ "$confirm" != "y" ]; then
- echo "Do you want to continue (y/n)? "
- while read confirm; do
- [ "$confirm" = "Y" -o "$confirm" = "y" -o "$confirm" = "n" \
- -o "$confirm" = "N" ] && break
- echo "Invalid input \"$confirm\", please input 'y' or 'n': "
- done
- else
- echo
- fi
-}
-
-# Print error information and exit.
-echo_error () {
- echo "ERROR: $1" >&2
- exit 1
-}
-
-# Generate the remove list:
-#
-# * Add .done/.siginfo to the remove list
-# * Add destination of symlink to the remove list
-#
-# $1: output file, others: sstate cache file (.tar.zst)
-gen_rmlist (){
- local rmlist_file="$1"
- shift
- local files="$@"
- for i in $files; do
- echo $i >> $rmlist_file
- # Add the ".siginfo"
- if [ -e $i.siginfo ]; then
- echo $i.siginfo >> $rmlist_file
- fi
- # Add the destination of symlink
- if [ -L "$i" ]; then
- if [ "$fsym" = "y" ]; then
- dest="`readlink -e $i`"
- if [ -n "$dest" ]; then
- echo $dest >> $rmlist_file
- # Remove the .siginfo when .tar.zst is removed
- if [ -f "$dest.siginfo" ]; then
- echo $dest.siginfo >> $rmlist_file
- fi
- fi
- fi
- # Add the ".tar.zst.done" and ".siginfo.done" (may exist in the future)
- base_fn="${i##/*/}"
- t_fn="$base_fn.done"
- s_fn="$base_fn.siginfo.done"
- for d in $t_fn $s_fn; do
- if [ -f $cache_dir/$d ]; then
- echo $cache_dir/$d >> $rmlist_file
- fi
- done
- fi
- done
-}
-
-# Remove the duplicated cache files for the pkg, keep the newest one
-remove_duplicated () {
-
- local topdir
- local oe_core_dir
- local tunedirs
- local all_archs
- local all_machines
- local ava_archs
- local arch
- local file_names
- local sstate_files_list
- local fn_tmp
- local list_suffix=`mktemp` || exit 1
-
- if [ -z "$extra_archs" ] ; then
- # Find out the archs in all the layers
- echo "Figuring out the archs in the layers ... "
- oe_core_dir=$(dirname $(dirname $(readlink -e $0)))
- topdir=$(dirname $oe_core_dir)
- tunedirs="`find $topdir/meta* ${oe_core_dir}/meta* $layers -path '*/meta*/conf/machine/include' 2>/dev/null`"
- [ -n "$tunedirs" ] || echo_error "Can't find the tune directory"
- all_machines="`find $topdir/meta* ${oe_core_dir}/meta* $layers -path '*/meta*/conf/machine/*' -name '*.conf' 2>/dev/null | sed -e 's/.*\///' -e 's/.conf$//'`"
- all_archs=`grep -r -h "^AVAILTUNES .*=" $tunedirs | sed -e 's/.*=//' -e 's/\"//g'`
- fi
-
- # Use the "_" to substitute "-", e.g., x86-64 to x86_64, but not for extra_archs which can be something like cortexa9t2-vfp-neon
- # Sort to remove the duplicated ones
- # Add allarch and builder arch (native)
- builder_arch=$(uname -m)
- all_archs="$(echo allarch $all_archs $all_machines $builder_arch \
- | sed -e 's/-/_/g' -e 's/ /\n/g' | sort -u) $extra_archs"
- echo "Done"
-
- # Total number of files including sstate-, .siginfo and .done files
- total_files=`find $cache_dir -name 'sstate*' | wc -l`
- # Save all the sstate files in a file
- sstate_files_list=`mktemp` || exit 1
- find $cache_dir -iname 'sstate:*:*:*:*:*:*:*.tar.zst*' >$sstate_files_list
-
- echo "Figuring out the suffixes in the sstate cache dir ... "
- sstate_suffixes="`sed 's%.*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^_]*_\([^:]*\)\.tar\.zst.*%\1%g' $sstate_files_list | sort -u`"
- echo "Done"
- echo "The following suffixes have been found in the cache dir:"
- echo $sstate_suffixes
-
- echo "Figuring out the archs in the sstate cache dir ... "
- # Using this SSTATE_PKGSPEC definition it's 6th colon separated field
- # SSTATE_PKGSPEC = "sstate:${PN}:${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}:${PV}:${PR}:${SSTATE_PKGARCH}:${SSTATE_VERSION}:"
- for arch in $all_archs; do
- grep -q ".*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:$arch:[^:]*:[^:]*\.tar\.zst$" $sstate_files_list
- [ $? -eq 0 ] && ava_archs="$ava_archs $arch"
- # ${builder_arch}_$arch used by toolchain sstate
- grep -q ".*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:${builder_arch}_$arch:[^:]*:[^:]*\.tar\.zst$" $sstate_files_list
- [ $? -eq 0 ] && ava_archs="$ava_archs ${builder_arch}_$arch"
- done
- echo "Done"
- echo "The following archs have been found in the cache dir:"
- echo $ava_archs
- echo ""
-
- # Save the file list which needs to be removed
- local remove_listdir=`mktemp -d` || exit 1
- for suffix in $sstate_suffixes; do
- if [ "$suffix" = "populate_lic" ] ; then
- echo "Skipping populate_lic, because removing duplicates doesn't work correctly for them (use --stamps-dir instead)"
- continue
- fi
- # Total number of files including .siginfo and .done files
- total_files_suffix=`grep ".*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:_]*_$suffix\.tar\.zst.*" $sstate_files_list | wc -l 2>/dev/null`
- total_archive_suffix=`grep ".*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:_]*_$suffix\.tar\.zst$" $sstate_files_list | wc -l 2>/dev/null`
- # Save the file list to a file, some suffix's file may not exist
- grep ".*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:_]*_$suffix\.tar\.zst.*" $sstate_files_list >$list_suffix 2>/dev/null
- local deleted_archives=0
- local deleted_files=0
- for ext in tar.zst tar.zst.siginfo tar.zst.done; do
- echo "Figuring out the sstate:xxx_$suffix.$ext ... "
- # Uniq BPNs
- file_names=`for arch in $ava_archs ""; do
- sed -ne "s%.*/sstate:\([^:]*\):[^:]*:[^:]*:[^:]*:$arch:[^:]*:[^:]*\.${ext}$%\1%p" $list_suffix
- done | sort -u`
-
- fn_tmp=`mktemp` || exit 1
- rm_list="$remove_listdir/sstate:xxx_$suffix"
- for fn in $file_names; do
- [ -z "$verbose" ] || echo "Analyzing sstate:$fn-xxx_$suffix.${ext}"
- for arch in $ava_archs ""; do
- grep -h ".*/sstate:$fn:[^:]*:[^:]*:[^:]*:$arch:[^:]*:[^:]*\.${ext}$" $list_suffix >$fn_tmp
- if [ -s $fn_tmp ] ; then
- [ $debug -gt 1 ] && echo "Available files for $fn-$arch- with suffix $suffix.${ext}:" && cat $fn_tmp
- # Use the modification time
- to_del=$(ls -t $(cat $fn_tmp) | sed -n '1!p')
- [ $debug -gt 2 ] && echo "Considering to delete: $to_del"
- # The sstate file which is downloaded from the SSTATE_MIRROR is
- # put in SSTATE_DIR, and there is a symlink in SSTATE_DIR/??/ to
- # it, so filter it out from the remove list if it should not be
- # removed.
- to_keep=$(ls -t $(cat $fn_tmp) | sed -n '1p')
- [ $debug -gt 2 ] && echo "Considering to keep: $to_keep"
- for k in $to_keep; do
- if [ -L "$k" ]; then
- # The symlink's destination
- k_dest="`readlink -e $k`"
- # Maybe it is the one in cache_dir
- k_maybe="$cache_dir/${k##/*/}"
- # Remove it from the remove list if they are the same.
- if [ "$k_dest" = "$k_maybe" ]; then
- to_del="`echo $to_del | sed 's#'\"$k_maybe\"'##g'`"
- fi
- fi
- done
- rm -f $fn_tmp
- [ $debug -gt 2 ] && echo "Decided to delete: $to_del"
- gen_rmlist $rm_list.$ext "$to_del"
- fi
- done
- done
- done
- deleted_archives=`cat $rm_list.* 2>/dev/null | grep "\.tar\.zst$" | wc -l`
- deleted_files=`cat $rm_list.* 2>/dev/null | wc -l`
- [ "$deleted_files" -gt 0 -a $debug -gt 0 ] && cat $rm_list.*
- echo "($deleted_archives out of $total_archives_suffix .tar.zst files for $suffix suffix will be removed or $deleted_files out of $total_files_suffix when counting also .siginfo and .done files)"
- let total_deleted=$total_deleted+$deleted_files
- done
- deleted_archives=0
- rm_old_list=$remove_listdir/sstate-old-filenames
- find $cache_dir -name 'sstate-*.tar.zst' >$rm_old_list
- [ -s "$rm_old_list" ] && deleted_archives=`cat $rm_old_list | grep "\.tar\.zst$" | wc -l`
- [ -s "$rm_old_list" ] && deleted_files=`cat $rm_old_list | wc -l`
- [ -s "$rm_old_list" -a $debug -gt 0 ] && cat $rm_old_list
- echo "($deleted_archives or .tar.zst files with old sstate-* filenames will be removed or $deleted_files when counting also .siginfo and .done files)"
- let total_deleted=$total_deleted+$deleted_files
-
- rm -f $list_suffix
- rm -f $sstate_files_list
- if [ $total_deleted -gt 0 ]; then
- read_confirm
- if [ "$confirm" = "y" -o "$confirm" = "Y" ]; then
- for list in `ls $remove_listdir/`; do
- echo "Removing $list.tar.zst archive (`cat $remove_listdir/$list | wc -w` files) ... "
- # Remove them one by one to avoid the argument list too long error
- for i in `cat $remove_listdir/$list`; do
- rm -f $verbose $i
- done
- echo "Done"
- done
- echo "$total_deleted files have been removed!"
- else
- do_nothing
- fi
- else
- no_files
- fi
- [ -d $remove_listdir ] && rm -fr $remove_listdir
-}
-
-# Remove the sstate file by stamps dir, the file not used by the stamps dir
-# will be removed.
-rm_by_stamps (){
-
- local cache_list=`mktemp` || exit 1
- local keep_list=`mktemp` || exit 1
- local rm_list=`mktemp` || exit 1
- local sums
- local all_sums
-
- # Total number of files including sstate-, .siginfo and .done files
- total_files=`find $cache_dir -type f -name 'sstate*' | wc -l`
- # Save all the state file list to a file
- find $cache_dir -type f -name 'sstate*' | sort -u -o $cache_list
-
- echo "Figuring out the suffixes in the sstate cache dir ... "
- local sstate_suffixes="`sed 's%.*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^_]*_\([^:]*\)\.tar\.zst.*%\1%g' $cache_list | sort -u`"
- echo "Done"
- echo "The following suffixes have been found in the cache dir:"
- echo $sstate_suffixes
-
- # Figure out all the md5sums in the stamps dir.
- echo "Figuring out all the md5sums in stamps dir ... "
- for i in $sstate_suffixes; do
- # There is no "\.sigdata" but "_setcene" when it is mirrored
- # from the SSTATE_MIRRORS, use them to figure out the sum.
- sums=`find $stamps -maxdepth 3 -name "*.do_$i.*" \
- -o -name "*.do_${i}_setscene.*" | \
- sed -ne 's#.*_setscene\.##p' -e 's#.*\.sigdata\.##p' | \
- sed -e 's#\..*##' | sort -u`
- all_sums="$all_sums $sums"
- done
- echo "Done"
-
- echo "Figuring out the files which will be removed ... "
- for i in $all_sums; do
- grep ".*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:${i}_.*" $cache_list >>$keep_list
- done
- echo "Done"
-
- if [ -s $keep_list ]; then
- sort -u $keep_list -o $keep_list
- to_del=`comm -1 -3 $keep_list $cache_list`
- gen_rmlist $rm_list "$to_del"
- let total_deleted=`cat $rm_list | sort -u | wc -w`
- if [ $total_deleted -gt 0 ]; then
- [ $debug -gt 0 ] && cat $rm_list | sort -u
- read_confirm
- if [ "$confirm" = "y" -o "$confirm" = "Y" ]; then
- echo "Removing sstate cache files ... ($total_deleted files)"
- # Remove them one by one to avoid the argument list too long error
- for i in `cat $rm_list | sort -u`; do
- rm -f $verbose $i
- done
- echo "$total_deleted files have been removed"
- else
- do_nothing
- fi
- else
- no_files
- fi
- else
- echo_error "All files in cache dir will be removed! Abort!"
- fi
-
- rm -f $cache_list
- rm -f $keep_list
- rm -f $rm_list
-}
-
-# Parse arguments
-while [ -n "$1" ]; do
- case $1 in
- --cache-dir=*)
- cache_dir=`echo $1 | sed -e 's#^--cache-dir=##' | xargs readlink -e`
- [ -d "$cache_dir" ] || echo_error "Invalid argument to --cache-dir"
- shift
- ;;
- --remove-duplicated|-d)
- rm_duplicated="y"
- shift
- ;;
- --yes|-y)
- confirm="y"
- shift
- ;;
- --follow-symlink|-L)
- fsym="y"
- shift
- ;;
- --extra-archs=*)
- extra_archs=`echo $1 | sed -e 's#^--extra-archs=##' -e 's#,# #g'`
- [ -n "$extra_archs" ] || echo_error "Invalid extra arch parameter"
- shift
- ;;
- --extra-layer=*)
- extra_layers=`echo $1 | sed -e 's#^--extra-layer=##' -e 's#,# #g'`
- [ -n "$extra_layers" ] || echo_error "Invalid extra layer parameter"
- for i in $extra_layers; do
- l=`readlink -e $i`
- if [ -d "$l" ]; then
- layers="$layers $l"
- else
- echo_error "Can't find layer $i"
- fi
- done
- shift
- ;;
- --stamps-dir=*)
- stamps=`echo $1 | sed -e 's#^--stamps-dir=##' -e 's#,# #g'`
- [ -n "$stamps" ] || echo_error "Invalid stamps dir $i"
- for i in $stamps; do
- [ -d "$i" ] || echo_error "Invalid stamps dir $i"
- done
- shift
- ;;
- --verbose|-v)
- verbose="-v"
- shift
- ;;
- --debug|-D)
- debug=`expr $debug + 1`
- echo "Debug level $debug"
- shift
- ;;
- --help|-h)
- usage
- exit 0
- ;;
- *)
- echo "Invalid arguments $*"
- echo_error "Try 'sstate-cache-management.sh -h' for more information."
- ;;
- esac
-done
-
-# sstate cache directory, use environment variable SSTATE_CACHE_DIR
-# if it was not specified, otherwise, error.
-[ -n "$cache_dir" ] || cache_dir=$SSTATE_CACHE_DIR
-[ -n "$cache_dir" ] || echo_error "No cache dir found!"
-[ -d "$cache_dir" ] || echo_error "Invalid cache directory \"$cache_dir\""
-
-[ -n "$rm_duplicated" -a -n "$stamps" ] && \
- echo_error "Can not use both --remove-duplicated and --stamps-dir"
-
-[ "$rm_duplicated" = "y" ] && remove_duplicated
-[ -n "$stamps" ] && rm_by_stamps
-[ -z "$rm_duplicated" -a -z "$stamps" ] && \
- echo "What do you want to do?"
-exit 0
diff --git a/scripts/yocto-check-layer b/scripts/yocto-check-layer
index 0e5b75b1f7..67cc71950f 100755
--- a/scripts/yocto-check-layer
+++ b/scripts/yocto-check-layer
@@ -168,14 +168,13 @@ def main():
layers_tested = 0
for layer in layers:
- if layer['type'] == LayerType.ERROR_NO_LAYER_CONF or \
- layer['type'] == LayerType.ERROR_BSP_DISTRO:
+ if layer['type'] in (LayerType.ERROR_NO_LAYER_CONF, LayerType.ERROR_BSP_DISTRO):
continue
# Reset to a clean backup copy for each run
shutil.copyfile(bblayersconf + '.backup', bblayersconf)
- if check_bblayers(bblayersconf, layer['path'], logger):
+ if layer['type'] not in (LayerType.CORE, ) and check_bblayers(bblayersconf, layer['path'], logger):
logger.info("%s already in %s. To capture initial signatures, layer under test should not present "
"in BBLAYERS. Please remove %s from BBLAYERS." % (layer['name'], bblayersconf, layer['name']))
results[layer['name']] = None
diff --git a/scripts/yocto_testresults_query.py b/scripts/yocto_testresults_query.py
new file mode 100755
index 0000000000..521ead8473
--- /dev/null
+++ b/scripts/yocto_testresults_query.py
@@ -0,0 +1,131 @@
+#!/usr/bin/env python3
+
+# Yocto Project test results management tool
+# This script is an thin layer over resulttool to manage tes results and regression reports.
+# Its main feature is to translate tags or branch names to revisions SHA1, and then to run resulttool
+# with those computed revisions
+#
+# Copyright (C) 2023 OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+import sys
+import os
+import argparse
+import subprocess
+import tempfile
+import lib.scriptutils as scriptutils
+
+script_path = os.path.dirname(os.path.realpath(__file__))
+poky_path = os.path.abspath(os.path.join(script_path, ".."))
+resulttool = os.path.abspath(os.path.join(script_path, "resulttool"))
+logger = scriptutils.logger_create(sys.argv[0])
+testresults_default_url="git://git.yoctoproject.org/yocto-testresults"
+
+def create_workdir():
+ workdir = tempfile.mkdtemp(prefix='yocto-testresults-query.')
+ logger.info(f"Shallow-cloning testresults in {workdir}")
+ subprocess.check_call(["git", "clone", testresults_default_url, workdir, "--depth", "1"])
+ return workdir
+
+def get_sha1(pokydir, revision):
+ try:
+ rev = subprocess.check_output(["git", "rev-list", "-n", "1", revision], cwd=pokydir).decode('utf-8').strip()
+ logger.info(f"SHA-1 revision for {revision} in {pokydir} is {rev}")
+ return rev
+ except subprocess.CalledProcessError:
+ logger.error(f"Can not find SHA-1 for {revision} in {pokydir}")
+ return None
+
+def get_branch(tag):
+ # The tags in test results repository, as returned by git rev-list, have the following form:
+ # refs/tags/<branch>/<count>-g<sha1>/<num>
+ return '/'.join(tag.split("/")[2:-2])
+
+def fetch_testresults(workdir, sha1):
+ logger.info(f"Fetching test results for {sha1} in {workdir}")
+ rawtags = subprocess.check_output(["git", "ls-remote", "--refs", "--tags", "origin", f"*{sha1}*"], cwd=workdir).decode('utf-8').strip()
+ if not rawtags:
+ raise Exception(f"No reference found for commit {sha1} in {workdir}")
+ branch = ""
+ for rev in [rawtag.split()[1] for rawtag in rawtags.splitlines()]:
+ if not branch:
+ branch = get_branch(rev)
+ logger.info(f"Fetching matching revision: {rev}")
+ subprocess.check_call(["git", "fetch", "--depth", "1", "origin", f"{rev}:{rev}"], cwd=workdir)
+ return branch
+
+def compute_regression_report(workdir, basebranch, baserevision, targetbranch, targetrevision, args):
+ logger.info(f"Running resulttool regression between SHA1 {baserevision} and {targetrevision}")
+ command = [resulttool, "regression-git", "--branch", basebranch, "--commit", baserevision, "--branch2", targetbranch, "--commit2", targetrevision, workdir]
+ if args.limit:
+ command.extend(["-l", args.limit])
+ report = subprocess.check_output(command).decode("utf-8")
+ return report
+
+def print_report_with_header(report, baseversion, baserevision, targetversion, targetrevision):
+ print("========================== Regression report ==============================")
+ print(f'{"=> Target:": <16}{targetversion: <16}({targetrevision})')
+ print(f'{"=> Base:": <16}{baseversion: <16}({baserevision})')
+ print("===========================================================================\n")
+ print(report, end='')
+
+def regression(args):
+ logger.info(f"Compute regression report between {args.base} and {args.target}")
+ if args.testresultsdir:
+ workdir = args.testresultsdir
+ else:
+ workdir = create_workdir()
+
+ try:
+ baserevision = get_sha1(poky_path, args.base)
+ targetrevision = get_sha1(poky_path, args.target)
+ if not baserevision or not targetrevision:
+ logger.error("One or more revision(s) missing. You might be targeting nonexistant tags/branches, or are in wrong repository (you must use Poky and not oe-core)")
+ if not args.testresultsdir:
+ subprocess.check_call(["rm", "-rf", workdir])
+ sys.exit(1)
+ basebranch = fetch_testresults(workdir, baserevision)
+ targetbranch = fetch_testresults(workdir, targetrevision)
+ report = compute_regression_report(workdir, basebranch, baserevision, targetbranch, targetrevision, args)
+ print_report_with_header(report, args.base, baserevision, args.target, targetrevision)
+ finally:
+ if not args.testresultsdir:
+ subprocess.check_call(["rm", "-rf", workdir])
+
+def main():
+ parser = argparse.ArgumentParser(description="Yocto Project test results helper")
+ subparsers = parser.add_subparsers(
+ help="Supported commands for test results helper",
+ required=True)
+ parser_regression_report = subparsers.add_parser(
+ "regression-report",
+ help="Generate regression report between two fixed revisions. Revisions can be branch name or tag")
+ parser_regression_report.add_argument(
+ 'base',
+ help="Revision or tag against which to compare results (i.e: the older)")
+ parser_regression_report.add_argument(
+ 'target',
+ help="Revision or tag to compare against the base (i.e: the newer)")
+ parser_regression_report.add_argument(
+ '-t',
+ '--testresultsdir',
+ help=f"An existing test results directory. {sys.argv[0]} will automatically clone it and use default branch if not provided")
+ parser_regression_report.add_argument(
+ '-l',
+ '--limit',
+ help=f"Maximum number of changes to display per test. Can be set to 0 to print all changes")
+ parser_regression_report.set_defaults(func=regression)
+
+ args = parser.parse_args()
+ args.func(args)
+
+if __name__ == '__main__':
+ try:
+ ret = main()
+ except Exception:
+ ret = 1
+ import traceback
+ traceback.print_exc()
+ sys.exit(ret)