summaryrefslogtreecommitdiffstats
path: root/meta/classes
diff options
context:
space:
mode:
Diffstat (limited to 'meta/classes')
-rw-r--r--meta/classes/allarch.bbclass2
-rw-r--r--meta/classes/archiver.bbclass31
-rw-r--r--meta/classes/autotools.bbclass34
-rw-r--r--meta/classes/baremetal-image.bbclass38
-rw-r--r--meta/classes/base.bbclass200
-rw-r--r--meta/classes/bash-completion.bbclass6
-rw-r--r--meta/classes/bin_package.bbclass2
-rw-r--r--meta/classes/binconfig-disabled.bbclass4
-rw-r--r--meta/classes/binconfig.bbclass2
-rw-r--r--meta/classes/blacklist.bbclass20
-rw-r--r--meta/classes/buildhistory.bbclass183
-rw-r--r--meta/classes/buildstats.bbclass84
-rw-r--r--meta/classes/cargo.bbclass90
-rw-r--r--meta/classes/cargo_common.bbclass124
-rw-r--r--meta/classes/ccache.bbclass6
-rw-r--r--meta/classes/clutter.bbclass18
-rw-r--r--meta/classes/cmake.bbclass82
-rw-r--r--meta/classes/cml1.bbclass36
-rw-r--r--meta/classes/compress_doc.bbclass6
-rw-r--r--meta/classes/core-image.bbclass4
-rw-r--r--meta/classes/cpan-base.bbclass15
-rw-r--r--meta/classes/cpan.bbclass4
-rw-r--r--meta/classes/create-spdx.bbclass1022
-rw-r--r--meta/classes/cross-canadian.bbclass18
-rw-r--r--meta/classes/cross.bbclass8
-rw-r--r--meta/classes/cve-check.bbclass227
-rw-r--r--meta/classes/debian.bbclass18
-rw-r--r--meta/classes/deploy.bbclass2
-rw-r--r--meta/classes/devicetree.bbclass10
-rw-r--r--meta/classes/devshell.bbclass11
-rw-r--r--meta/classes/devtool-source.bbclass4
-rw-r--r--meta/classes/devupstream.bbclass25
-rw-r--r--meta/classes/distrooverrides.bbclass6
-rw-r--r--meta/classes/distutils3-base.bbclass5
-rw-r--r--meta/classes/distutils3.bbclass65
-rw-r--r--meta/classes/externalsrc.bbclass39
-rw-r--r--meta/classes/extrausers.bbclass7
-rw-r--r--meta/classes/features_check.bbclass13
-rw-r--r--meta/classes/fontcache.bbclass10
-rw-r--r--meta/classes/gconf.bbclass12
-rw-r--r--meta/classes/gettext.bbclass6
-rw-r--r--meta/classes/gi-docgen.bbclass24
-rw-r--r--meta/classes/gio-module-cache.bbclass10
-rw-r--r--meta/classes/glide.bbclass4
-rw-r--r--meta/classes/gnomebase.bbclass9
-rw-r--r--meta/classes/go-mod.bbclass2
-rw-r--r--meta/classes/go-ptest.bbclass2
-rw-r--r--meta/classes/go.bbclass58
-rw-r--r--meta/classes/goarch.bbclass63
-rw-r--r--meta/classes/gobject-introspection.bbclass28
-rw-r--r--meta/classes/godep.bbclass8
-rw-r--r--meta/classes/grub-efi-cfg.bbclass1
-rw-r--r--meta/classes/gsettings.bbclass14
-rw-r--r--meta/classes/gtk-doc.bbclass23
-rw-r--r--meta/classes/gtk-icon-cache.bbclass48
-rw-r--r--meta/classes/gtk-immodules-cache.bbclass11
-rw-r--r--meta/classes/icecc.bbclass91
-rw-r--r--meta/classes/image-artifact-names.bbclass22
-rw-r--r--meta/classes/image-combined-dbg.bbclass2
-rw-r--r--meta/classes/image-container.bbclass2
-rw-r--r--meta/classes/image-live.bbclass14
-rw-r--r--meta/classes/image-mklibs.bbclass56
-rw-r--r--meta/classes/image-prelink.bbclass81
-rw-r--r--meta/classes/image.bbclass92
-rw-r--r--meta/classes/image_types.bbclass150
-rw-r--r--meta/classes/image_types_wic.bbclass54
-rw-r--r--meta/classes/insane.bbclass480
-rw-r--r--meta/classes/kernel-artifact-names.bbclass13
-rw-r--r--meta/classes/kernel-devicetree.bbclass56
-rw-r--r--meta/classes/kernel-fitimage.bbclass549
-rw-r--r--meta/classes/kernel-grub.bbclass4
-rw-r--r--meta/classes/kernel-module-split.bbclass64
-rw-r--r--meta/classes/kernel-uboot.bbclass14
-rw-r--r--meta/classes/kernel-yocto.bbclass371
-rw-r--r--meta/classes/kernel.bbclass242
-rw-r--r--meta/classes/libc-package.bbclass34
-rw-r--r--meta/classes/license.bbclass133
-rw-r--r--meta/classes/license_image.bbclass95
-rw-r--r--meta/classes/linux-dummy.bbclass26
-rw-r--r--meta/classes/linuxloader.bbclass18
-rw-r--r--meta/classes/manpages.bbclass11
-rw-r--r--meta/classes/meson-routines.bbclass51
-rw-r--r--meta/classes/meson.bbclass167
-rw-r--r--meta/classes/meta.bbclass4
-rw-r--r--meta/classes/metadata_scm.bbclass8
-rw-r--r--meta/classes/mime-xdg.bbclass12
-rw-r--r--meta/classes/mime.bbclass12
-rw-r--r--meta/classes/mirrors.bbclass136
-rw-r--r--meta/classes/module.bbclass6
-rw-r--r--meta/classes/multilib.bbclass37
-rw-r--r--meta/classes/multilib_global.bbclass86
-rw-r--r--meta/classes/multilib_header.bbclass4
-rw-r--r--meta/classes/multilib_script.bbclass4
-rw-r--r--meta/classes/native.bbclass66
-rw-r--r--meta/classes/nativesdk.bbclass9
-rw-r--r--meta/classes/nopackages.bbclass1
-rw-r--r--meta/classes/npm.bbclass68
-rw-r--r--meta/classes/overlayfs-etc.bbclass76
-rw-r--r--meta/classes/overlayfs.bbclass119
-rw-r--r--meta/classes/own-mirrors.bbclass25
-rw-r--r--meta/classes/package.bbclass462
-rw-r--r--meta/classes/package_deb.bbclass9
-rw-r--r--meta/classes/package_ipk.bbclass13
-rw-r--r--meta/classes/package_pkgdata.bbclass2
-rw-r--r--meta/classes/package_rpm.bbclass64
-rw-r--r--meta/classes/package_tar.bbclass6
-rw-r--r--meta/classes/packagedata.bbclass4
-rw-r--r--meta/classes/packagefeed-stability.bbclass252
-rw-r--r--meta/classes/packagegroup.bbclass2
-rw-r--r--meta/classes/patch.bbclass10
-rw-r--r--meta/classes/pixbufcache.bbclass16
-rw-r--r--meta/classes/pkgconfig.bbclass2
-rw-r--r--meta/classes/populate_sdk_base.bbclass43
-rw-r--r--meta/classes/populate_sdk_ext.bbclass163
-rw-r--r--meta/classes/ptest-gnome.bbclass6
-rw-r--r--meta/classes/ptest-perl.bbclass6
-rw-r--r--meta/classes/ptest.bbclass39
-rw-r--r--meta/classes/pypi.bbclass8
-rw-r--r--meta/classes/python3-dir.bbclass2
-rw-r--r--meta/classes/python3native.bbclass6
-rw-r--r--meta/classes/python3targetconfig.bbclass29
-rw-r--r--meta/classes/python_flit_core.bbclass5
-rw-r--r--meta/classes/python_pep517.bbclass56
-rw-r--r--meta/classes/python_poetry_core.bbclass5
-rw-r--r--meta/classes/python_pyo3.bbclass30
-rw-r--r--meta/classes/python_setuptools3_rust.bbclass11
-rw-r--r--meta/classes/qemu.bbclass2
-rw-r--r--meta/classes/qemuboot.bbclass37
-rw-r--r--meta/classes/report-error.bbclass2
-rw-r--r--meta/classes/reproducible_build.bbclass202
-rw-r--r--meta/classes/reproducible_build_simple.bbclass9
-rw-r--r--meta/classes/rm_work.bbclass12
-rw-r--r--meta/classes/rm_work_and_downloads.bbclass2
-rw-r--r--meta/classes/rootfs-postcommands.bbclass75
-rw-r--r--meta/classes/rootfs_deb.bbclass6
-rw-r--r--meta/classes/rootfs_ipk.bbclass6
-rw-r--r--meta/classes/rootfs_rpm.bbclass6
-rw-r--r--meta/classes/rootfsdebugfiles.bbclass2
-rw-r--r--meta/classes/rust-bin.bbclass149
-rw-r--r--meta/classes/rust-common.bbclass185
-rw-r--r--meta/classes/rust.bbclass45
-rw-r--r--meta/classes/sanity.bbclass150
-rw-r--r--meta/classes/scons.bbclass11
-rw-r--r--meta/classes/setuptools3-base.bbclass (renamed from meta/classes/distutils-common-base.bbclass)14
-rw-r--r--meta/classes/setuptools3.bbclass33
-rw-r--r--meta/classes/setuptools3_legacy.bbclass78
-rw-r--r--meta/classes/setuptools_build_meta.bbclass5
-rw-r--r--meta/classes/sign_package_feed.bbclass2
-rw-r--r--meta/classes/siteinfo.bbclass59
-rw-r--r--meta/classes/sstate.bbclass351
-rw-r--r--meta/classes/staging.bbclass73
-rw-r--r--meta/classes/systemd-boot.bbclass2
-rw-r--r--meta/classes/systemd.bbclass49
-rw-r--r--meta/classes/terminal.bbclass5
-rw-r--r--meta/classes/testexport.bbclass8
-rw-r--r--meta/classes/testimage.bbclass70
-rw-r--r--meta/classes/testsdk.bbclass2
-rw-r--r--meta/classes/texinfo.bbclass12
-rw-r--r--meta/classes/toaster.bbclass2
-rw-r--r--meta/classes/toolchain-scripts.bbclass10
-rw-r--r--meta/classes/uboot-config.bbclass72
-rw-r--r--meta/classes/uboot-extlinux-config.bbclass3
-rw-r--r--meta/classes/uboot-sign.bbclass422
-rw-r--r--meta/classes/uninative.bbclass23
-rw-r--r--meta/classes/update-alternatives.bbclass40
-rw-r--r--meta/classes/update-rc.d.bbclass28
-rw-r--r--meta/classes/useradd-staticids.bbclass30
-rw-r--r--meta/classes/useradd.bbclass28
-rw-r--r--meta/classes/useradd_base.bbclass18
-rw-r--r--meta/classes/utility-tasks.bbclass3
-rw-r--r--meta/classes/utils.bbclass37
-rw-r--r--meta/classes/vala.bbclass8
-rw-r--r--meta/classes/waf.bbclass26
-rw-r--r--meta/classes/xmlcatalog.bbclass2
-rw-r--r--meta/classes/yocto-check-layer.bbclass16
175 files changed, 6767 insertions, 3285 deletions
diff --git a/meta/classes/allarch.bbclass b/meta/classes/allarch.bbclass
index 5bd5c44a27..a766a654a9 100644
--- a/meta/classes/allarch.bbclass
+++ b/meta/classes/allarch.bbclass
@@ -61,3 +61,5 @@ python () {
bb.error("Please ensure recipe %s sets PACKAGE_ARCH before inherit packagegroup" % d.getVar("FILE"))
}
+def qemu_wrapper_cmdline(data, rootfs_path, library_paths):
+ return 'false'
diff --git a/meta/classes/archiver.bbclass b/meta/classes/archiver.bbclass
index aff1f9dbb0..c19c770d11 100644
--- a/meta/classes/archiver.bbclass
+++ b/meta/classes/archiver.bbclass
@@ -5,7 +5,7 @@
# 1) original (or unpacked) source: ARCHIVER_MODE[src] = "original"
# 2) patched source: ARCHIVER_MODE[src] = "patched" (default)
# 3) configured source: ARCHIVER_MODE[src] = "configured"
-# 4) source mirror: ARCHIVE_MODE[src] = "mirror"
+# 4) source mirror: ARCHIVER_MODE[src] = "mirror"
# 5) The patches between do_unpack and do_patch:
# ARCHIVER_MODE[diff] = "1"
# And you can set the one that you'd like to exclude from the diff:
@@ -51,9 +51,10 @@ ARCHIVER_MODE[diff-exclude] ?= ".pc autom4te.cache patches"
ARCHIVER_MODE[dumpdata] ?= "0"
ARCHIVER_MODE[recipe] ?= "0"
ARCHIVER_MODE[mirror] ?= "split"
+ARCHIVER_MODE[compression] ?= "xz"
DEPLOY_DIR_SRC ?= "${DEPLOY_DIR}/sources"
-ARCHIVER_TOPDIR ?= "${WORKDIR}/deploy-sources"
+ARCHIVER_TOPDIR ?= "${WORKDIR}/archiver-sources"
ARCHIVER_OUTDIR = "${ARCHIVER_TOPDIR}/${TARGET_SYS}/${PF}/"
ARCHIVER_RPMTOPDIR ?= "${WORKDIR}/deploy-sources-rpm"
ARCHIVER_RPMOUTDIR = "${ARCHIVER_RPMTOPDIR}/${TARGET_SYS}/${PF}/"
@@ -62,7 +63,7 @@ ARCHIVER_WORKDIR = "${WORKDIR}/archiver-work/"
# When producing a combined mirror directory, allow duplicates for the case
# where multiple recipes use the same SRC_URI.
ARCHIVER_COMBINED_MIRRORDIR = "${ARCHIVER_TOPDIR}/mirror"
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_SRC}/mirror"
+SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_SRC}/mirror"
do_dumpdata[dirs] = "${ARCHIVER_OUTDIR}"
do_ar_recipe[dirs] = "${ARCHIVER_OUTDIR}"
@@ -118,7 +119,7 @@ python () {
d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_patched' % pn)
elif ar_src == "configured":
# We can't use "addtask do_ar_configured after do_configure" since it
- # will cause the deptask of do_populate_sysroot to run not matter what
+ # will cause the deptask of do_populate_sysroot to run no matter what
# archives we need, so we add the depends here.
# There is a corner case with "gcc-source-${PV}" recipes, they don't have
@@ -163,7 +164,7 @@ python () {
d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_configured' % pn)
}
-# Take all the sources for a recipe and puts them in WORKDIR/archiver-work/.
+# Take all the sources for a recipe and put them in WORKDIR/archiver-work/.
# Files in SRC_URI are copied directly, anything that's a directory
# (e.g. git repositories) is "unpacked" and then put into a tarball.
python do_ar_original() {
@@ -281,7 +282,10 @@ python do_ar_configured() {
# ${STAGING_DATADIR}/aclocal/libtool.m4, so we can't re-run the
# do_configure, we archive the already configured ${S} to
# instead of.
- elif pn != 'libtool-native':
+ # The kernel class functions require it to be on work-shared, we
+ # don't unpack, patch, configure again, just archive the already
+ # configured ${S}
+ elif not (pn == 'libtool-native' or is_work_shared(d)):
def runTask(task):
prefuncs = d.getVarFlag(task, 'prefuncs') or ''
for func in prefuncs.split():
@@ -406,15 +410,16 @@ def create_tarball(d, srcdir, suffix, ar_outdir):
# that we archive the actual directory and not just the link.
srcdir = os.path.realpath(srcdir)
+ compression_method = d.getVarFlag('ARCHIVER_MODE', 'compression')
bb.utils.mkdirhier(ar_outdir)
if suffix:
- filename = '%s-%s.tar.gz' % (d.getVar('PF'), suffix)
+ filename = '%s-%s.tar.%s' % (d.getVar('PF'), suffix, compression_method)
else:
- filename = '%s.tar.gz' % d.getVar('PF')
+ filename = '%s.tar.%s' % (d.getVar('PF'), compression_method)
tarname = os.path.join(ar_outdir, filename)
bb.note('Creating %s' % tarname)
- tar = tarfile.open(tarname, 'w:gz')
+ tar = tarfile.open(tarname, 'w:%s' % compression_method)
tar.add(srcdir, arcname=os.path.basename(srcdir), filter=exclude_useless_paths)
tar.close()
@@ -463,7 +468,7 @@ python do_unpack_and_patch() {
ar_sysroot_native = d.getVar('STAGING_DIR_NATIVE')
pn = d.getVar('PN')
- # The kernel class functions require it to be on work-shared, so we dont change WORKDIR
+ # The kernel class functions require it to be on work-shared, so we don't change WORKDIR
if not is_work_shared(d):
# Change the WORKDIR to make do_unpack do_patch run in another dir.
d.setVar('WORKDIR', ar_workdir)
@@ -483,6 +488,9 @@ python do_unpack_and_patch() {
src_orig = '%s.orig' % src
oe.path.copytree(src, src_orig)
+ if bb.data.inherits_class('dos2unix', d):
+ bb.build.exec_func('do_convert_crlf_to_lf', d)
+
# Make sure gcc and kernel sources are patched only once
if not (d.getVar('SRC_URI') == "" or is_work_shared(d)):
bb.build.exec_func('do_patch', d)
@@ -505,7 +513,7 @@ python do_unpack_and_patch() {
# of the output file ensures that we create it each time the recipe
# gets rebuilt, at least as long as a PR server is used. We also rely
# on that mechanism to catch changes in the file content, because the
-# file content is not part of of the task signature either.
+# file content is not part of the task signature either.
do_ar_recipe[vardepsexclude] += "BBINCLUDED"
python do_ar_recipe () {
"""
@@ -590,6 +598,7 @@ addtask do_dumpdata
addtask do_ar_recipe
addtask do_deploy_archives
do_build[recrdeptask] += "do_deploy_archives"
+do_rootfs[recrdeptask] += "do_deploy_archives"
do_populate_sdk[recrdeptask] += "do_deploy_archives"
python () {
diff --git a/meta/classes/autotools.bbclass b/meta/classes/autotools.bbclass
index 6c2a33ac72..4ab2460990 100644
--- a/meta/classes/autotools.bbclass
+++ b/meta/classes/autotools.bbclass
@@ -1,11 +1,11 @@
-def autotools_dep_prepend(d):
+def get_autotools_dep(d):
if d.getVar('INHIBIT_AUTOTOOLS_DEPS'):
return ''
pn = d.getVar('PN')
deps = ''
- if pn in ['autoconf-native', 'automake-native', 'help2man-native']:
+ if pn in ['autoconf-native', 'automake-native']:
return deps
deps += 'autoconf-native automake-native '
@@ -17,9 +17,10 @@ def autotools_dep_prepend(d):
and not d.getVar('INHIBIT_DEFAULT_DEPS'):
deps += 'libtool-cross '
- return deps + 'gnu-config-native '
+ return deps
-DEPENDS_prepend = "${@autotools_dep_prepend(d)} "
+
+DEPENDS:prepend = "${@get_autotools_dep(d)} "
inherit siteinfo
@@ -30,7 +31,7 @@ inherit siteinfo
export CONFIG_SITE
acpaths ?= "default"
-EXTRA_AUTORECONF = "--exclude=autopoint"
+EXTRA_AUTORECONF = "--exclude=autopoint --exclude=gtkdocize"
export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir}"
@@ -90,7 +91,7 @@ oe_runconf () {
cfgscript=`python3 -c "import os; print(os.path.relpath(os.path.dirname('${CONFIGURE_SCRIPT}'), '.'))"`/$cfgscript_name
if [ -x "$cfgscript" ] ; then
bbnote "Running $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} $@"
- if ! ${CACHED_CONFIGUREVARS} CONFIG_SHELL=/bin/bash $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} "$@"; then
+ if ! CONFIG_SHELL=${CONFIG_SHELL-/bin/bash} ${CACHED_CONFIGUREVARS} $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} "$@"; then
bbnote "The following config.log files may provide further information."
bbnote `find ${B} -ignore_readdir_race -type f -name config.log`
bbfatal_log "configure failed"
@@ -131,7 +132,7 @@ autotools_postconfigure(){
EXTRACONFFUNCS ??= ""
-EXTRA_OECONF_append = " ${PACKAGECONFIG_CONFARGS}"
+EXTRA_OECONF:append = " ${PACKAGECONFIG_CONFARGS}"
do_configure[prefuncs] += "autotools_preconfigure autotools_aclocals ${EXTRACONFFUNCS}"
do_compile[prefuncs] += "autotools_aclocals"
@@ -140,13 +141,16 @@ do_configure[postfuncs] += "autotools_postconfigure"
ACLOCALDIR = "${STAGING_DATADIR}/aclocal"
ACLOCALEXTRAPATH = ""
-ACLOCALEXTRAPATH_class-target = " -I ${STAGING_DATADIR_NATIVE}/aclocal/"
-ACLOCALEXTRAPATH_class-nativesdk = " -I ${STAGING_DATADIR_NATIVE}/aclocal/"
+ACLOCALEXTRAPATH:class-target = " -I ${STAGING_DATADIR_NATIVE}/aclocal/"
+ACLOCALEXTRAPATH:class-nativesdk = " -I ${STAGING_DATADIR_NATIVE}/aclocal/"
python autotools_aclocals () {
- d.setVar("CONFIG_SITE", siteinfo_get_files(d, sysrootcache=True))
+ sitefiles, searched = siteinfo_get_files(d, sysrootcache=True)
+ d.setVar("CONFIG_SITE", " ".join(sitefiles))
}
+do_configure[file-checksums] += "${@' '.join(siteinfo_get_files(d, sysrootcache=False)[1])}"
+
CONFIGURE_FILES = "${S}/configure.in ${S}/configure.ac ${S}/config.h.in ${S}/acinclude.m4 Makefile.am"
autotools_do_configure() {
@@ -215,21 +219,13 @@ autotools_do_configure() {
PRUNE_M4="$PRUNE_M4 gettext.m4 iconv.m4 lib-ld.m4 lib-link.m4 lib-prefix.m4 nls.m4 po.m4 progtest.m4"
fi
mkdir -p m4
- if grep -q "^[[:space:]]*[AI][CT]_PROG_INTLTOOL" $CONFIGURE_AC; then
- if ! echo "${DEPENDS}" | grep -q intltool-native; then
- bbwarn "Missing DEPENDS on intltool-native"
- fi
- PRUNE_M4="$PRUNE_M4 intltool.m4"
- bbnote Executing intltoolize --copy --force --automake
- intltoolize --copy --force --automake
- fi
for i in $PRUNE_M4; do
find ${S} -ignore_readdir_race -name $i -delete
done
bbnote Executing ACLOCAL=\"$ACLOCAL\" autoreconf -Wcross --verbose --install --force ${EXTRA_AUTORECONF} $acpaths
- ACLOCAL="$ACLOCAL" autoreconf -Wcross --verbose --install --force ${EXTRA_AUTORECONF} $acpaths || die "autoreconf execution failed."
+ ACLOCAL="$ACLOCAL" autoreconf -Wcross -Wno-obsolete --verbose --install --force ${EXTRA_AUTORECONF} $acpaths || die "autoreconf execution failed."
cd $olddir
fi
if [ -e ${CONFIGURE_SCRIPT} ]; then
diff --git a/meta/classes/baremetal-image.bbclass b/meta/classes/baremetal-image.bbclass
index 90d58f2615..81f5e5e93d 100644
--- a/meta/classes/baremetal-image.bbclass
+++ b/meta/classes/baremetal-image.bbclass
@@ -12,8 +12,8 @@
# Toolchain should be baremetal or newlib based.
# TCLIBC="baremetal" or TCLIBC="newlib"
-COMPATIBLE_HOST_libc-musl_class-target = "null"
-COMPATIBLE_HOST_libc-glibc_class-target = "null"
+COMPATIBLE_HOST:libc-musl:class-target = "null"
+COMPATIBLE_HOST:libc-glibc:class-target = "null"
inherit rootfs-postcommands
@@ -50,15 +50,18 @@ python do_rootfs(){
if os.path.lexists(manifest_link):
os.remove(manifest_link)
os.symlink(os.path.basename(manifest_name), manifest_link)
+ # A lot of postprocess commands assume the existence of rootfs/etc
+ sysconfdir = d.getVar("IMAGE_ROOTFS") + d.getVar('sysconfdir')
+ bb.utils.mkdirhier(sysconfdir)
+
execute_pre_post_process(d, d.getVar('ROOTFS_POSTPROCESS_COMMAND'))
}
# Assure binaries, manifest and qemubootconf are populated on DEPLOY_DIR_IMAGE
do_image_complete[dirs] = "${TOPDIR}"
-do_image_complete[umask] = "022"
SSTATETASKS += "do_image_complete"
-SSTATE_SKIP_CREATION_task-image-complete = '1'
+SSTATE_SKIP_CREATION:task-image-complete = '1'
do_image_complete[sstate-inputdirs] = "${IMGDEPLOYDIR}"
do_image_complete[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}"
do_image_complete[stamp-extra-info] = "${MACHINE_ARCH}"
@@ -74,7 +77,22 @@ QB_DEFAULT_KERNEL ?= "${IMAGE_LINK_NAME}.bin"
QB_MEM ?= "-m 256"
QB_DEFAULT_FSTYPE ?= "bin"
QB_DTB ?= ""
-QB_OPT_APPEND = "-nographic"
+QB_OPT_APPEND:append = " -nographic"
+
+# RISC-V tunes set the BIOS, unset, and instruct QEMU to
+# ignore the BIOS and boot from -kernel
+QB_DEFAULT_BIOS:qemuriscv64 = ""
+QB_DEFAULT_BIOS:qemuriscv32 = ""
+QB_OPT_APPEND:append:qemuriscv64 = " -bios none"
+QB_OPT_APPEND:append:qemuriscv32 = " -bios none"
+
+
+# Use the medium-any code model for the RISC-V 64 bit implementation,
+# since medlow can only access addresses below 0x80000000 and RAM
+# starts at 0x80000000 on RISC-V 64
+# Keep RISC-V 32 using -mcmodel=medlow (symbols lie between -2GB:2GB)
+CFLAGS:append:qemuriscv64 = " -mcmodel=medany"
+
# This next part is necessary to trick the build system into thinking
# its building an image recipe so it generates the qemuboot.conf
@@ -87,13 +105,17 @@ inherit qemuboot
python(){
# do_addto_recipe_sysroot doesnt exist for all recipes, but we need it to have
# /usr/bin on recipe-sysroot (qemu) populated
+ # The do_addto_recipe_sysroot dependency is coming from EXTRA_IMAGDEPENDS now,
+ # we just need to add the logic to add its dependency to do_image.
def extraimage_getdepends(task):
deps = ""
for dep in (d.getVar('EXTRA_IMAGEDEPENDS') or "").split():
# Make sure we only add it for qemu
if 'qemu' in dep:
- deps += " %s:%s" % (dep, task)
+ if ":" in dep:
+ deps += " %s " % (dep)
+ else:
+ deps += " %s:%s" % (dep, task)
return deps
- d.appendVarFlag('do_image', 'depends', extraimage_getdepends('do_addto_recipe_sysroot'))
- d.appendVarFlag('do_image', 'depends', extraimage_getdepends('do_populate_sysroot'))
+ d.appendVarFlag('do_image', 'depends', extraimage_getdepends('do_populate_sysroot'))
}
diff --git a/meta/classes/base.bbclass b/meta/classes/base.bbclass
index 4c681cc870..cc81461473 100644
--- a/meta/classes/base.bbclass
+++ b/meta/classes/base.bbclass
@@ -12,7 +12,7 @@ inherit logging
OE_EXTRA_IMPORTS ?= ""
-OE_IMPORTS += "os sys time oe.path oe.utils oe.types oe.package oe.packagegroup oe.sstatesig oe.lsb oe.cachedpath oe.license ${OE_EXTRA_IMPORTS}"
+OE_IMPORTS += "os sys time oe.path oe.utils oe.types oe.package oe.packagegroup oe.sstatesig oe.lsb oe.cachedpath oe.license oe.qa oe.reproducible oe.rust ${OE_EXTRA_IMPORTS}"
OE_IMPORTS[type] = "list"
PACKAGECONFIG_CONFARGS ??= ""
@@ -66,18 +66,18 @@ oe_runmake() {
}
-def base_dep_prepend(d):
+def get_base_dep(d):
if d.getVar('INHIBIT_DEFAULT_DEPS', False):
return ""
return "${BASE_DEFAULT_DEPS}"
-BASE_DEFAULT_DEPS = "virtual/${TARGET_PREFIX}gcc virtual/${TARGET_PREFIX}compilerlibs virtual/libc"
+BASE_DEFAULT_DEPS = "virtual/${HOST_PREFIX}gcc virtual/${HOST_PREFIX}compilerlibs virtual/libc"
BASEDEPENDS = ""
-BASEDEPENDS_class-target = "${@base_dep_prepend(d)}"
-BASEDEPENDS_class-nativesdk = "${@base_dep_prepend(d)}"
+BASEDEPENDS:class-target = "${@get_base_dep(d)}"
+BASEDEPENDS:class-nativesdk = "${@get_base_dep(d)}"
-DEPENDS_prepend="${BASEDEPENDS} "
+DEPENDS:prepend="${BASEDEPENDS} "
FILESPATH = "${@base_set_filespath(["${FILE_DIRNAME}/${BP}", "${FILE_DIRNAME}/${BPN}", "${FILE_DIRNAME}/files"], d)}"
# THISDIR only works properly with imediate expansion as it has to run
@@ -91,7 +91,7 @@ def extra_path_elements(d):
path = path + "${STAGING_BINDIR_NATIVE}/" + e + ":"
return path
-PATH_prepend = "${@extra_path_elements(d)}"
+PATH:prepend = "${@extra_path_elements(d)}"
def get_lic_checksum_file_list(d):
filelist = []
@@ -150,17 +150,18 @@ do_fetch[dirs] = "${DL_DIR}"
do_fetch[file-checksums] = "${@bb.fetch.get_checksum_file_list(d)}"
do_fetch[file-checksums] += " ${@get_lic_checksum_file_list(d)}"
do_fetch[vardeps] += "SRCREV"
+do_fetch[network] = "1"
python base_do_fetch() {
src_uri = (d.getVar('SRC_URI') or "").split()
- if len(src_uri) == 0:
+ if not src_uri:
return
try:
fetcher = bb.fetch2.Fetch(src_uri, d)
fetcher.download()
except bb.fetch2.BBFetchException as e:
- bb.fatal(str(e))
+ bb.fatal("Bitbake Fetcher Error: " + repr(e))
}
addtask unpack after do_fetch
@@ -170,15 +171,53 @@ do_unpack[cleandirs] = "${@d.getVar('S') if os.path.normpath(d.getVar('S')) != o
python base_do_unpack() {
src_uri = (d.getVar('SRC_URI') or "").split()
- if len(src_uri) == 0:
+ if not src_uri:
return
try:
fetcher = bb.fetch2.Fetch(src_uri, d)
fetcher.unpack(d.getVar('WORKDIR'))
except bb.fetch2.BBFetchException as e:
- bb.fatal(str(e))
+ bb.fatal("Bitbake Fetcher Error: " + repr(e))
+}
+
+SSTATETASKS += "do_deploy_source_date_epoch"
+
+do_deploy_source_date_epoch () {
+ mkdir -p ${SDE_DEPLOYDIR}
+ if [ -e ${SDE_FILE} ]; then
+ echo "Deploying SDE from ${SDE_FILE} -> ${SDE_DEPLOYDIR}."
+ cp -p ${SDE_FILE} ${SDE_DEPLOYDIR}/__source_date_epoch.txt
+ else
+ echo "${SDE_FILE} not found!"
+ fi
+}
+
+python do_deploy_source_date_epoch_setscene () {
+ sstate_setscene(d)
+ bb.utils.mkdirhier(d.getVar('SDE_DIR'))
+ sde_file = os.path.join(d.getVar('SDE_DEPLOYDIR'), '__source_date_epoch.txt')
+ if os.path.exists(sde_file):
+ target = d.getVar('SDE_FILE')
+ bb.debug(1, "Moving setscene SDE file %s -> %s" % (sde_file, target))
+ bb.utils.rename(sde_file, target)
+ else:
+ bb.debug(1, "%s not found!" % sde_file)
+}
+
+do_deploy_source_date_epoch[dirs] = "${SDE_DEPLOYDIR}"
+do_deploy_source_date_epoch[sstate-plaindirs] = "${SDE_DEPLOYDIR}"
+addtask do_deploy_source_date_epoch_setscene
+addtask do_deploy_source_date_epoch before do_configure after do_patch
+
+python create_source_date_epoch_stamp() {
+ source_date_epoch = oe.reproducible.get_source_date_epoch(d, d.getVar('S'))
+ oe.reproducible.epochfile_write(source_date_epoch, d.getVar('SDE_FILE'), d)
}
+do_unpack[postfuncs] += "create_source_date_epoch_stamp"
+
+def get_source_date_epoch_value(d):
+ return oe.reproducible.epochfile_read(d.getVar('SDE_FILE'), d)
def get_layers_branch_rev(d):
layers = (d.getVar("BBLAYERS") or "").split()
@@ -231,6 +270,7 @@ python base_eventhandler() {
if isinstance(e, bb.event.ConfigParsed):
if not d.getVar("NATIVELSBSTRING", False):
d.setVar("NATIVELSBSTRING", lsb_distro_identifier(d))
+ d.setVar("ORIGNATIVELSBSTRING", d.getVar("NATIVELSBSTRING", False))
d.setVar('BB_VERSION', bb.__version__)
# There might be no bb.event.ConfigParsed event if bitbake server is
@@ -289,9 +329,9 @@ python base_eventhandler() {
source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', False)
if not source_mirror_fetch:
provs = (d.getVar("PROVIDES") or "").split()
- multiwhitelist = (d.getVar("MULTI_PROVIDER_WHITELIST") or "").split()
+ multiprovidersallowed = (d.getVar("BB_MULTI_PROVIDER_ALLOWED") or "").split()
for p in provs:
- if p.startswith("virtual/") and p not in multiwhitelist:
+ if p.startswith("virtual/") and p not in multiprovidersallowed:
profprov = d.getVar("PREFERRED_PROVIDER_" + p)
if profprov and pn != profprov:
raise bb.parse.SkipRecipe("PREFERRED_PROVIDER_%s set to %s, not %s" % (p, profprov, pn))
@@ -388,6 +428,24 @@ python () {
oe.utils.features_backfill("DISTRO_FEATURES", d)
oe.utils.features_backfill("MACHINE_FEATURES", d)
+ if d.getVar("S")[-1] == '/':
+ bb.warn("Recipe %s sets S variable with trailing slash '%s', remove it" % (d.getVar("PN"), d.getVar("S")))
+ if d.getVar("B")[-1] == '/':
+ bb.warn("Recipe %s sets B variable with trailing slash '%s', remove it" % (d.getVar("PN"), d.getVar("B")))
+
+ if os.path.normpath(d.getVar("WORKDIR")) != os.path.normpath(d.getVar("S")):
+ d.appendVar("PSEUDO_IGNORE_PATHS", ",${S}")
+ if os.path.normpath(d.getVar("WORKDIR")) != os.path.normpath(d.getVar("B")):
+ d.appendVar("PSEUDO_IGNORE_PATHS", ",${B}")
+
+ # To add a recipe to the skip list , set:
+ # SKIP_RECIPE[pn] = "message"
+ pn = d.getVar('PN')
+ skip_msg = d.getVarFlag('SKIP_RECIPE', pn)
+ if skip_msg:
+ bb.debug(1, "Skipping %s %s" % (pn, skip_msg))
+ raise bb.parse.SkipRecipe("Recipe will be skipped because: %s" % (skip_msg))
+
# Handle PACKAGECONFIG
#
# These take the form:
@@ -470,8 +528,8 @@ python () {
% (d.getVar('PN'), flag, 's' if len(intersec) > 1 else '', ' '.join(intersec)))
appendVar('DEPENDS', extradeps)
- appendVar('RDEPENDS_${PN}', extrardeps)
- appendVar('RRECOMMENDS_${PN}', extrarrecs)
+ appendVar('RDEPENDS:${PN}', extrardeps)
+ appendVar('RRECOMMENDS:${PN}', extrarrecs)
appendVar('PACKAGECONFIG_CONFARGS', extraconf)
pn = d.getVar('PN')
@@ -484,9 +542,9 @@ python () {
unmatched_license_flags = check_license_flags(d)
if unmatched_license_flags:
if len(unmatched_license_flags) == 1:
- message = "because it has a restricted license '{0}'. Which is not whitelisted in LICENSE_FLAGS_WHITELIST".format(unmatched_license_flags[0])
+ message = "because it has a restricted license '{0}'. Which is not listed in LICENSE_FLAGS_ACCEPTED".format(unmatched_license_flags[0])
else:
- message = "because it has restricted licenses {0}. Which are not whitelisted in LICENSE_FLAGS_WHITELIST".format(
+ message = "because it has restricted licenses {0}. Which are not listed in LICENSE_FLAGS_ACCEPTED".format(
", ".join("'{0}'".format(f) for f in unmatched_license_flags))
bb.debug(1, "Skipping %s %s" % (pn, message))
raise bb.parse.SkipRecipe(message)
@@ -495,15 +553,10 @@ python () {
# in order to capture permissions, owners, groups and special files
if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
d.appendVarFlag('do_prepare_recipe_sysroot', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
- d.setVarFlag('do_unpack', 'umask', '022')
- d.setVarFlag('do_configure', 'umask', '022')
- d.setVarFlag('do_compile', 'umask', '022')
d.appendVarFlag('do_install', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
d.setVarFlag('do_install', 'fakeroot', '1')
- d.setVarFlag('do_install', 'umask', '022')
d.appendVarFlag('do_package', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
d.setVarFlag('do_package', 'fakeroot', '1')
- d.setVarFlag('do_package', 'umask', '022')
d.setVarFlag('do_package_setscene', 'fakeroot', '1')
d.appendVarFlag('do_package_setscene', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
d.setVarFlag('do_devshell', 'fakeroot', '1')
@@ -542,87 +595,92 @@ python () {
if check_license and bad_licenses:
bad_licenses = expand_wildcard_licenses(d, bad_licenses)
- whitelist = []
- for lic in bad_licenses:
- spdx_license = return_spdx(d, lic)
- whitelist.extend((d.getVar("WHITELIST_" + lic) or "").split())
- if spdx_license:
- whitelist.extend((d.getVar("WHITELIST_" + spdx_license) or "").split())
-
- if pn in whitelist:
- '''
- We need to track what we are whitelisting and why. If pn is
- incompatible we need to be able to note that the image that
- is created may infact contain incompatible licenses despite
- INCOMPATIBLE_LICENSE being set.
- '''
- bb.note("Including %s as buildable despite it having an incompatible license because it has been whitelisted" % pn)
- else:
- pkgs = d.getVar('PACKAGES').split()
- skipped_pkgs = {}
- unskipped_pkgs = []
- for pkg in pkgs:
- incompatible_lic = incompatible_license(d, bad_licenses, pkg)
- if incompatible_lic:
- skipped_pkgs[pkg] = incompatible_lic
- else:
- unskipped_pkgs.append(pkg)
- if unskipped_pkgs:
- for pkg in skipped_pkgs:
- bb.debug(1, "Skipping the package %s at do_rootfs because of incompatible license(s): %s" % (pkg, ' '.join(skipped_pkgs[pkg])))
- d.setVar('LICENSE_EXCLUSION-' + pkg, ' '.join(skipped_pkgs[pkg]))
- for pkg in unskipped_pkgs:
- bb.debug(1, "Including the package %s" % pkg)
+ exceptions = (d.getVar("INCOMPATIBLE_LICENSE_EXCEPTIONS") or "").split()
+
+ for lic_exception in exceptions:
+ if ":" in lic_exception:
+ lic_exception.split(":")[0]
+ if lic_exception in oe.license.obsolete_license_list():
+ bb.fatal("Invalid license %s used in INCOMPATIBLE_LICENSE_EXCEPTIONS" % lic_exception)
+
+ pkgs = d.getVar('PACKAGES').split()
+ skipped_pkgs = {}
+ unskipped_pkgs = []
+ for pkg in pkgs:
+ remaining_bad_licenses = oe.license.apply_pkg_license_exception(pkg, bad_licenses, exceptions)
+
+ incompatible_lic = incompatible_license(d, remaining_bad_licenses, pkg)
+ if incompatible_lic:
+ skipped_pkgs[pkg] = incompatible_lic
else:
- incompatible_lic = incompatible_license(d, bad_licenses)
- for pkg in skipped_pkgs:
- incompatible_lic += skipped_pkgs[pkg]
- incompatible_lic = sorted(list(set(incompatible_lic)))
+ unskipped_pkgs.append(pkg)
+
+ if unskipped_pkgs:
+ for pkg in skipped_pkgs:
+ bb.debug(1, "Skipping the package %s at do_rootfs because of incompatible license(s): %s" % (pkg, ' '.join(skipped_pkgs[pkg])))
+ d.setVar('_exclude_incompatible-' + pkg, ' '.join(skipped_pkgs[pkg]))
+ for pkg in unskipped_pkgs:
+ bb.debug(1, "Including the package %s" % pkg)
+ else:
+ incompatible_lic = incompatible_license(d, bad_licenses)
+ for pkg in skipped_pkgs:
+ incompatible_lic += skipped_pkgs[pkg]
+ incompatible_lic = sorted(list(set(incompatible_lic)))
- if incompatible_lic:
- bb.debug(1, "Skipping recipe %s because of incompatible license(s): %s" % (pn, ' '.join(incompatible_lic)))
- raise bb.parse.SkipRecipe("it has incompatible license(s): %s" % ' '.join(incompatible_lic))
+ if incompatible_lic:
+ bb.debug(1, "Skipping recipe %s because of incompatible license(s): %s" % (pn, ' '.join(incompatible_lic)))
+ raise bb.parse.SkipRecipe("it has incompatible license(s): %s" % ' '.join(incompatible_lic))
needsrcrev = False
srcuri = d.getVar('SRC_URI')
- for uri in srcuri.split():
- (scheme, _ , path) = bb.fetch.decodeurl(uri)[:3]
+ for uri_string in srcuri.split():
+ uri = bb.fetch.URI(uri_string)
+ # Also check downloadfilename as the URL path might not be useful for sniffing
+ path = uri.params.get("downloadfilename", uri.path)
# HTTP/FTP use the wget fetcher
- if scheme in ("http", "https", "ftp"):
+ if uri.scheme in ("http", "https", "ftp"):
d.appendVarFlag('do_fetch', 'depends', ' wget-native:do_populate_sysroot')
# Svn packages should DEPEND on subversion-native
- if scheme == "svn":
+ if uri.scheme == "svn":
needsrcrev = True
d.appendVarFlag('do_fetch', 'depends', ' subversion-native:do_populate_sysroot')
# Git packages should DEPEND on git-native
- elif scheme in ("git", "gitsm"):
+ elif uri.scheme in ("git", "gitsm"):
needsrcrev = True
d.appendVarFlag('do_fetch', 'depends', ' git-native:do_populate_sysroot')
# Mercurial packages should DEPEND on mercurial-native
- elif scheme == "hg":
+ elif uri.scheme == "hg":
needsrcrev = True
d.appendVar("EXTRANATIVEPATH", ' python3-native ')
d.appendVarFlag('do_fetch', 'depends', ' mercurial-native:do_populate_sysroot')
# Perforce packages support SRCREV = "${AUTOREV}"
- elif scheme == "p4":
+ elif uri.scheme == "p4":
needsrcrev = True
# OSC packages should DEPEND on osc-native
- elif scheme == "osc":
+ elif uri.scheme == "osc":
d.appendVarFlag('do_fetch', 'depends', ' osc-native:do_populate_sysroot')
- elif scheme == "npm":
+ elif uri.scheme == "npm":
d.appendVarFlag('do_fetch', 'depends', ' nodejs-native:do_populate_sysroot')
+ elif uri.scheme == "repo":
+ needsrcrev = True
+ d.appendVarFlag('do_fetch', 'depends', ' repo-native:do_populate_sysroot')
+
# *.lz4 should DEPEND on lz4-native for unpacking
if path.endswith('.lz4'):
d.appendVarFlag('do_unpack', 'depends', ' lz4-native:do_populate_sysroot')
+ # *.zst should DEPEND on zstd-native for unpacking
+ elif path.endswith('.zst'):
+ d.appendVarFlag('do_unpack', 'depends', ' zstd-native:do_populate_sysroot')
+
# *.lz should DEPEND on lzip-native for unpacking
elif path.endswith('.lz'):
d.appendVarFlag('do_unpack', 'depends', ' lzip-native:do_populate_sysroot')
@@ -681,7 +739,7 @@ python () {
if os.path.basename(p) == machine and os.path.isdir(p):
paths.append(p)
- if len(paths) != 0:
+ if paths:
for s in srcuri.split():
if not s.startswith("file://"):
continue
@@ -714,7 +772,7 @@ do_cleansstate[nostamp] = "1"
python do_cleanall() {
src_uri = (d.getVar('SRC_URI') or "").split()
- if len(src_uri) == 0:
+ if not src_uri:
return
try:
diff --git a/meta/classes/bash-completion.bbclass b/meta/classes/bash-completion.bbclass
index 80ee9b4874..803b2cae4d 100644
--- a/meta/classes/bash-completion.bbclass
+++ b/meta/classes/bash-completion.bbclass
@@ -1,7 +1,7 @@
-DEPENDS_append_class-target = " bash-completion"
+DEPENDS:append:class-target = " bash-completion"
PACKAGES += "${PN}-bash-completion"
-FILES_${PN}-bash-completion = "${datadir}/bash-completion ${sysconfdir}/bash_completion.d"
+FILES:${PN}-bash-completion = "${datadir}/bash-completion ${sysconfdir}/bash_completion.d"
-RDEPENDS_${PN}-bash-completion = "bash-completion"
+RDEPENDS:${PN}-bash-completion = "bash-completion"
diff --git a/meta/classes/bin_package.bbclass b/meta/classes/bin_package.bbclass
index cbc9b1fa13..c3aca20443 100644
--- a/meta/classes/bin_package.bbclass
+++ b/meta/classes/bin_package.bbclass
@@ -34,6 +34,6 @@ bin_package_do_install () {
| tar --no-same-owner -xpf - -C ${D}
}
-FILES_${PN} = "/"
+FILES:${PN} = "/"
EXPORT_FUNCTIONS do_install
diff --git a/meta/classes/binconfig-disabled.bbclass b/meta/classes/binconfig-disabled.bbclass
index 096b670e12..e8ac41b2d4 100644
--- a/meta/classes/binconfig-disabled.bbclass
+++ b/meta/classes/binconfig-disabled.bbclass
@@ -5,9 +5,9 @@
# The list of scripts which should be disabled.
BINCONFIG ?= ""
-FILES_${PN}-dev += "${bindir}/*-config"
+FILES:${PN}-dev += "${bindir}/*-config"
-do_install_append () {
+do_install:append () {
for x in ${BINCONFIG}; do
# Make the disabled script emit invalid parameters for those configure
# scripts which call it without checking the return code.
diff --git a/meta/classes/binconfig.bbclass b/meta/classes/binconfig.bbclass
index 9112ed4608..6e0c88269a 100644
--- a/meta/classes/binconfig.bbclass
+++ b/meta/classes/binconfig.bbclass
@@ -1,4 +1,4 @@
-FILES_${PN}-dev += "${bindir}/*-config"
+FILES:${PN}-dev += "${bindir}/*-config"
# The namespaces can clash here hence the two step replace
def get_binconfig_mangle(d):
diff --git a/meta/classes/blacklist.bbclass b/meta/classes/blacklist.bbclass
deleted file mode 100644
index dc794228ff..0000000000
--- a/meta/classes/blacklist.bbclass
+++ /dev/null
@@ -1,20 +0,0 @@
-# anonymous support class from originally from angstrom
-#
-# To use the blacklist, a distribution should include this
-# class in the INHERIT_DISTRO
-#
-# No longer use ANGSTROM_BLACKLIST, instead use a table of
-# recipes in PNBLACKLIST
-#
-# Features:
-#
-# * To add a package to the blacklist, set:
-# PNBLACKLIST[pn] = "message"
-#
-
-python () {
- blacklist = d.getVarFlag('PNBLACKLIST', d.getVar('PN'))
-
- if blacklist:
- raise bb.parse.SkipRecipe("Recipe is blacklisted: %s" % (blacklist))
-}
diff --git a/meta/classes/buildhistory.bbclass b/meta/classes/buildhistory.bbclass
index a4288ef9e1..8db79a4829 100644
--- a/meta/classes/buildhistory.bbclass
+++ b/meta/classes/buildhistory.bbclass
@@ -7,6 +7,8 @@
# Copyright (C) 2007-2011 Koen Kooi <koen@openembedded.org>
#
+inherit image-artifact-names
+
BUILDHISTORY_FEATURES ?= "image package sdk"
BUILDHISTORY_DIR ?= "${TOPDIR}/buildhistory"
BUILDHISTORY_DIR_IMAGE = "${BUILDHISTORY_DIR}/images/${MACHINE_ARCH}/${TCLIBC}/${IMAGE_BASENAME}"
@@ -29,7 +31,7 @@ BUILDHISTORY_DIR_PACKAGE = "${BUILDHISTORY_DIR}/packages/${MULTIMACH_TARGET_SYS}
# of failed builds.
#
# The expected usage is via auto.conf, but passing via the command line also works
-# with: BB_ENV_EXTRAWHITE=BUILDHISTORY_RESET BUILDHISTORY_RESET=1
+# with: BB_ENV_PASSTHROUGH_ADDITIONS=BUILDHISTORY_RESET BUILDHISTORY_RESET=1
BUILDHISTORY_RESET ?= ""
BUILDHISTORY_OLD_DIR = "${BUILDHISTORY_DIR}/${@ "old" if "${BUILDHISTORY_RESET}" else ""}"
@@ -41,15 +43,16 @@ BUILDHISTORY_COMMIT ?= "1"
BUILDHISTORY_COMMIT_AUTHOR ?= "buildhistory <buildhistory@${DISTRO}>"
BUILDHISTORY_PUSH_REPO ?= ""
BUILDHISTORY_TAG ?= "build"
+BUILDHISTORY_PATH_PREFIX_STRIP ?= ""
-SSTATEPOSTINSTFUNCS_append = " buildhistory_emit_pkghistory"
+SSTATEPOSTINSTFUNCS:append = " buildhistory_emit_pkghistory"
# We want to avoid influencing the signatures of sstate tasks - first the function itself:
sstate_install[vardepsexclude] += "buildhistory_emit_pkghistory"
# then the value added to SSTATEPOSTINSTFUNCS:
SSTATEPOSTINSTFUNCS[vardepvalueexclude] .= "| buildhistory_emit_pkghistory"
# Similarly for our function that gets the output signatures
-SSTATEPOSTUNPACKFUNCS_append = " buildhistory_emit_outputsigs"
+SSTATEPOSTUNPACKFUNCS:append = " buildhistory_emit_outputsigs"
sstate_installpkgdir[vardepsexclude] += "buildhistory_emit_outputsigs"
SSTATEPOSTUNPACKFUNCS[vardepvalueexclude] .= "| buildhistory_emit_outputsigs"
@@ -88,13 +91,19 @@ buildhistory_emit_sysroot() {
python buildhistory_emit_pkghistory() {
if d.getVar('BB_CURRENTTASK') in ['populate_sysroot', 'populate_sysroot_setscene']:
bb.build.exec_func("buildhistory_emit_sysroot", d)
-
- if not d.getVar('BB_CURRENTTASK') in ['packagedata', 'packagedata_setscene']:
return 0
if not "package" in (d.getVar('BUILDHISTORY_FEATURES') or "").split():
return 0
+ if d.getVar('BB_CURRENTTASK') in ['package', 'package_setscene']:
+ # Create files-in-<package-name>.txt files containing a list of files of each recipe's package
+ bb.build.exec_func("buildhistory_list_pkg_files", d)
+ return 0
+
+ if not d.getVar('BB_CURRENTTASK') in ['packagedata', 'packagedata_setscene']:
+ return 0
+
import re
import json
import shlex
@@ -113,7 +122,9 @@ python buildhistory_emit_pkghistory() {
self.packages = ""
self.srcrev = ""
self.layer = ""
+ self.license = ""
self.config = ""
+ self.src_uri = ""
class PackageInfo:
@@ -215,6 +226,7 @@ python buildhistory_emit_pkghistory() {
pv = d.getVar('PV')
pr = d.getVar('PR')
layer = bb.utils.get_file_layer(d.getVar('FILE'), d)
+ license = d.getVar('LICENSE')
pkgdata_dir = d.getVar('PKGDATA_DIR')
packages = ""
@@ -255,23 +267,20 @@ python buildhistory_emit_pkghistory() {
rcpinfo.depends = sortlist(oe.utils.squashspaces(d.getVar('DEPENDS') or ""))
rcpinfo.packages = packages
rcpinfo.layer = layer
+ rcpinfo.license = license
rcpinfo.config = sortlist(oe.utils.squashspaces(d.getVar('PACKAGECONFIG') or ""))
+ rcpinfo.src_uri = oe.utils.squashspaces(d.getVar('SRC_URI') or "")
write_recipehistory(rcpinfo, d)
- pkgdest = d.getVar('PKGDEST')
+ bb.build.exec_func("read_subpackage_metadata", d)
+
for pkg in packagelist:
- pkgdata = {}
- with open(os.path.join(pkgdata_dir, 'runtime', pkg)) as f:
- for line in f.readlines():
- item = line.rstrip('\n').split(': ', 1)
- key = item[0]
- if key.endswith('_' + pkg):
- key = key[:-len(pkg)-1]
- pkgdata[key] = item[1].encode('latin-1').decode('unicode_escape')
-
- pkge = pkgdata.get('PKGE', '0')
- pkgv = pkgdata['PKGV']
- pkgr = pkgdata['PKGR']
+ localdata = d.createCopy()
+ localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + pkg)
+
+ pkge = localdata.getVar("PKGE") or '0'
+ pkgv = localdata.getVar("PKGV")
+ pkgr = localdata.getVar("PKGR")
#
# Find out what the last version was
# Make sure the version did not decrease
@@ -284,40 +293,39 @@ python buildhistory_emit_pkghistory() {
r = bb.utils.vercmp((pkge, pkgv, pkgr), (last_pkge, last_pkgv, last_pkgr))
if r < 0:
msg = "Package version for package %s went backwards which would break package feeds (from %s:%s-%s to %s:%s-%s)" % (pkg, last_pkge, last_pkgv, last_pkgr, pkge, pkgv, pkgr)
- package_qa_handle_error("version-going-backwards", msg, d)
+ oe.qa.handle_error("version-going-backwards", msg, d)
pkginfo = PackageInfo(pkg)
# Apparently the version can be different on a per-package basis (see Python)
- pkginfo.pe = pkgdata.get('PE', '0')
- pkginfo.pv = pkgdata['PV']
- pkginfo.pr = pkgdata['PR']
- pkginfo.pkg = pkgdata['PKG']
+ pkginfo.pe = localdata.getVar("PE") or '0'
+ pkginfo.pv = localdata.getVar("PV")
+ pkginfo.pr = localdata.getVar("PR")
+ pkginfo.pkg = localdata.getVar("PKG")
pkginfo.pkge = pkge
pkginfo.pkgv = pkgv
pkginfo.pkgr = pkgr
- pkginfo.rprovides = sortpkglist(oe.utils.squashspaces(pkgdata.get('RPROVIDES', "")))
- pkginfo.rdepends = sortpkglist(oe.utils.squashspaces(pkgdata.get('RDEPENDS', "")))
- pkginfo.rrecommends = sortpkglist(oe.utils.squashspaces(pkgdata.get('RRECOMMENDS', "")))
- pkginfo.rsuggests = sortpkglist(oe.utils.squashspaces(pkgdata.get('RSUGGESTS', "")))
- pkginfo.rreplaces = sortpkglist(oe.utils.squashspaces(pkgdata.get('RREPLACES', "")))
- pkginfo.rconflicts = sortpkglist(oe.utils.squashspaces(pkgdata.get('RCONFLICTS', "")))
- pkginfo.files = oe.utils.squashspaces(pkgdata.get('FILES', ""))
+ pkginfo.rprovides = sortpkglist(oe.utils.squashspaces(localdata.getVar("RPROVIDES") or ""))
+ pkginfo.rdepends = sortpkglist(oe.utils.squashspaces(localdata.getVar("RDEPENDS") or ""))
+ pkginfo.rrecommends = sortpkglist(oe.utils.squashspaces(localdata.getVar("RRECOMMENDS") or ""))
+ pkginfo.rsuggests = sortpkglist(oe.utils.squashspaces(localdata.getVar("RSUGGESTS") or ""))
+ pkginfo.replaces = sortpkglist(oe.utils.squashspaces(localdata.getVar("RREPLACES") or ""))
+ pkginfo.rconflicts = sortpkglist(oe.utils.squashspaces(localdata.getVar("RCONFLICTS") or ""))
+ pkginfo.files = oe.utils.squashspaces(localdata.getVar("FILES") or "")
for filevar in pkginfo.filevars:
- pkginfo.filevars[filevar] = pkgdata.get(filevar, "")
+ pkginfo.filevars[filevar] = localdata.getVar(filevar) or ""
# Gather information about packaged files
- val = pkgdata.get('FILES_INFO', '')
+ val = localdata.getVar('FILES_INFO') or ''
dictval = json.loads(val)
filelist = list(dictval.keys())
filelist.sort()
pkginfo.filelist = " ".join([shlex.quote(x) for x in filelist])
- pkginfo.size = int(pkgdata['PKGSIZE'])
+ pkginfo.size = int(localdata.getVar('PKGSIZE') or '0')
write_pkghistory(pkginfo, d)
- # Create files-in-<package-name>.txt files containing a list of files of each recipe's package
- bb.build.exec_func("buildhistory_list_pkg_files", d)
+ oe.qa.exit_if_errors(d)
}
python buildhistory_emit_outputsigs() {
@@ -370,7 +378,9 @@ def write_recipehistory(rcpinfo, d):
f.write(u"DEPENDS = %s\n" % rcpinfo.depends)
f.write(u"PACKAGES = %s\n" % rcpinfo.packages)
f.write(u"LAYER = %s\n" % rcpinfo.layer)
+ f.write(u"LICENSE = %s\n" % rcpinfo.license)
f.write(u"CONFIG = %s\n" % rcpinfo.config)
+ f.write(u"SRC_URI = %s\n" % rcpinfo.src_uri)
write_latest_srcrev(d, pkghistdir)
@@ -429,19 +439,24 @@ def buildhistory_list_installed(d, rootfs_type="image"):
from oe.sdk import sdk_list_installed_packages
from oe.utils import format_pkg_list
- process_list = [('file', 'bh_installed_pkgs.txt'),\
- ('deps', 'bh_installed_pkgs_deps.txt')]
+ process_list = [('file', 'bh_installed_pkgs_%s.txt' % os.getpid()),\
+ ('deps', 'bh_installed_pkgs_deps_%s.txt' % os.getpid())]
if rootfs_type == "image":
pkgs = image_list_installed_packages(d)
else:
pkgs = sdk_list_installed_packages(d, rootfs_type == "sdk_target")
+ if rootfs_type == "sdk_host":
+ pkgdata_dir = d.getVar('PKGDATA_DIR_SDK')
+ else:
+ pkgdata_dir = d.getVar('PKGDATA_DIR')
+
for output_type, output_file in process_list:
output_file_full = os.path.join(d.getVar('WORKDIR'), output_file)
with open(output_file_full, 'w') as output:
- output.write(format_pkg_list(pkgs, output_type))
+ output.write(format_pkg_list(pkgs, output_type, pkgdata_dir))
python buildhistory_list_installed_image() {
buildhistory_list_installed(d)
@@ -460,9 +475,10 @@ buildhistory_get_installed() {
# Get list of installed packages
pkgcache="$1/installed-packages.tmp"
- cat ${WORKDIR}/bh_installed_pkgs.txt | sort > $pkgcache && rm ${WORKDIR}/bh_installed_pkgs.txt
+ cat ${WORKDIR}/bh_installed_pkgs_${PID}.txt | sort > $pkgcache && rm ${WORKDIR}/bh_installed_pkgs_${PID}.txt
cat $pkgcache | awk '{ print $1 }' > $1/installed-package-names.txt
+
if [ -s $pkgcache ] ; then
cat $pkgcache | awk '{ print $2 }' | xargs -n1 basename > $1/installed-packages.txt
else
@@ -471,8 +487,8 @@ buildhistory_get_installed() {
# Produce dependency graph
# First, quote each name to handle characters that cause issues for dot
- sed 's:\([^| ]*\):"\1":g' ${WORKDIR}/bh_installed_pkgs_deps.txt > $1/depends.tmp &&
- rm ${WORKDIR}/bh_installed_pkgs_deps.txt
+ sed 's:\([^| ]*\):"\1":g' ${WORKDIR}/bh_installed_pkgs_deps_${PID}.txt > $1/depends.tmp &&
+ rm ${WORKDIR}/bh_installed_pkgs_deps_${PID}.txt
# Remove lines with rpmlib(...) and config(...) dependencies, change the
# delimiter from pipe to "->", set the style for recommend lines and
# turn versioned dependencies into edge labels.
@@ -481,6 +497,8 @@ buildhistory_get_installed() {
-e 's:|: -> :' \
-e 's:"\[REC\]":[style=dotted]:' \
-e 's:"\([<>=]\+\)" "\([^"]*\)":[label="\1 \2"]:' \
+ -e 's:"\([*]\+\)" "\([^"]*\)":[label="\2"]:' \
+ -e 's:"\[RPROVIDES\]":[style=dashed]:' \
$1/depends.tmp
# Add header, sorted and de-duped contents and footer and then delete the temp file
printf "digraph depends {\n node [shape=plaintext]\n" > $1/depends.dot
@@ -488,11 +506,22 @@ buildhistory_get_installed() {
echo "}" >> $1/depends.dot
rm $1/depends.tmp
+ # Set correct pkgdatadir
+ pkgdatadir=${PKGDATA_DIR}
+ if [ "$2" == "sdk" ] && [ "$3" == "host" ]; then
+ pkgdatadir="${PKGDATA_DIR_SDK}"
+ fi
+
# Produce installed package sizes list
- oe-pkgdata-util -p ${PKGDATA_DIR} read-value "PKGSIZE" -n -f $pkgcache > $1/installed-package-sizes.tmp
+ oe-pkgdata-util -p $pkgdatadir read-value "PKGSIZE" -n -f $pkgcache > $1/installed-package-sizes.tmp
cat $1/installed-package-sizes.tmp | awk '{print $2 "\tKiB\t" $1}' | sort -n -r > $1/installed-package-sizes.txt
rm $1/installed-package-sizes.tmp
+ # Produce package info: runtime_name, buildtime_name, recipe, version, size
+ oe-pkgdata-util -p $pkgdatadir read-value "PACKAGE,PN,PV,PKGSIZE" -n -f $pkgcache > $1/installed-package-info.tmp
+ cat $1/installed-package-info.tmp | sort -n -r -k 5 > $1/installed-package-info.txt
+ rm $1/installed-package-info.tmp
+
# We're now done with the cache, delete it
rm $pkgcache
@@ -529,7 +558,7 @@ buildhistory_get_sdk_installed() {
return
fi
- buildhistory_get_installed ${BUILDHISTORY_DIR_SDK}/$1 sdk
+ buildhistory_get_installed ${BUILDHISTORY_DIR_SDK}/$1 sdk $1
}
buildhistory_get_sdk_installed_host() {
@@ -670,16 +699,19 @@ IMAGE_POSTPROCESS_COMMAND[vardepvalueexclude] .= "| buildhistory_get_imageinfo ;
IMAGE_POSTPROCESS_COMMAND[vardepsexclude] += "buildhistory_get_imageinfo"
# We want these to be the last run so that we get called after complementary package installation
-POPULATE_SDK_POST_TARGET_COMMAND_append = " buildhistory_list_installed_sdk_target;"
-POPULATE_SDK_POST_TARGET_COMMAND_append = " buildhistory_get_sdk_installed_target;"
+POPULATE_SDK_POST_TARGET_COMMAND:append = " buildhistory_list_installed_sdk_target;"
+POPULATE_SDK_POST_TARGET_COMMAND:append = " buildhistory_get_sdk_installed_target;"
POPULATE_SDK_POST_TARGET_COMMAND[vardepvalueexclude] .= "| buildhistory_list_installed_sdk_target;| buildhistory_get_sdk_installed_target;"
+POPULATE_SDK_POST_TARGET_COMMAND[vardepsexclude] += "buildhistory_list_installed_sdk_target buildhistory_get_sdk_installed_target"
-POPULATE_SDK_POST_HOST_COMMAND_append = " buildhistory_list_installed_sdk_host;"
-POPULATE_SDK_POST_HOST_COMMAND_append = " buildhistory_get_sdk_installed_host;"
+POPULATE_SDK_POST_HOST_COMMAND:append = " buildhistory_list_installed_sdk_host;"
+POPULATE_SDK_POST_HOST_COMMAND:append = " buildhistory_get_sdk_installed_host;"
POPULATE_SDK_POST_HOST_COMMAND[vardepvalueexclude] .= "| buildhistory_list_installed_sdk_host;| buildhistory_get_sdk_installed_host;"
+POPULATE_SDK_POST_HOST_COMMAND[vardepsexclude] += "buildhistory_list_installed_sdk_host buildhistory_get_sdk_installed_host"
-SDK_POSTPROCESS_COMMAND_append = " buildhistory_get_sdkinfo ; buildhistory_get_extra_sdkinfo; "
+SDK_POSTPROCESS_COMMAND:append = " buildhistory_get_sdkinfo ; buildhistory_get_extra_sdkinfo; "
SDK_POSTPROCESS_COMMAND[vardepvalueexclude] .= "| buildhistory_get_sdkinfo ; buildhistory_get_extra_sdkinfo; "
+SDK_POSTPROCESS_COMMAND[vardepsexclude] += "buildhistory_get_sdkinfo buildhistory_get_extra_sdkinfo"
python buildhistory_write_sigs() {
if not "task" in (d.getVar('BUILDHISTORY_FEATURES') or "").split():
@@ -689,7 +721,7 @@ python buildhistory_write_sigs() {
if hasattr(bb.parse.siggen, 'dump_siglist'):
taskoutdir = os.path.join(d.getVar('BUILDHISTORY_DIR'), 'task')
bb.utils.mkdirhier(taskoutdir)
- bb.parse.siggen.dump_siglist(os.path.join(taskoutdir, 'tasksigs.txt'))
+ bb.parse.siggen.dump_siglist(os.path.join(taskoutdir, 'tasksigs.txt'), d.getVar("BUILDHISTORY_PATH_PREFIX_STRIP"))
}
def buildhistory_get_build_id(d):
@@ -757,11 +789,11 @@ def buildhistory_get_imagevars(d):
def buildhistory_get_sdkvars(d):
if d.getVar('BB_WORKERCONTEXT') != '1':
return ""
- sdkvars = "DISTRO DISTRO_VERSION SDK_NAME SDK_VERSION SDKMACHINE SDKIMAGE_FEATURES BAD_RECOMMENDATIONS NO_RECOMMENDATIONS PACKAGE_EXCLUDE"
+ sdkvars = "DISTRO DISTRO_VERSION SDK_NAME SDK_VERSION SDKMACHINE SDKIMAGE_FEATURES TOOLCHAIN_HOST_TASK TOOLCHAIN_TARGET_TASK BAD_RECOMMENDATIONS NO_RECOMMENDATIONS PACKAGE_EXCLUDE"
if d.getVar('BB_CURRENTTASK') == 'populate_sdk_ext':
# Extensible SDK uses some additional variables
- sdkvars += " SDK_LOCAL_CONF_WHITELIST SDK_LOCAL_CONF_BLACKLIST SDK_INHERIT_BLACKLIST SDK_UPDATE_URL SDK_EXT_TYPE SDK_RECRDEP_TASKS SDK_INCLUDE_PKGDATA SDK_INCLUDE_TOOLCHAIN"
- listvars = "SDKIMAGE_FEATURES BAD_RECOMMENDATIONS PACKAGE_EXCLUDE SDK_LOCAL_CONF_WHITELIST SDK_LOCAL_CONF_BLACKLIST SDK_INHERIT_BLACKLIST"
+ sdkvars += " ESDK_LOCALCONF_ALLOW ESDK_LOCALCONF_REMOVE ESDK_CLASS_INHERIT_DISABLE SDK_UPDATE_URL SDK_EXT_TYPE SDK_RECRDEP_TASKS SDK_INCLUDE_PKGDATA SDK_INCLUDE_TOOLCHAIN"
+ listvars = "SDKIMAGE_FEATURES BAD_RECOMMENDATIONS PACKAGE_EXCLUDE ESDK_LOCALCONF_ALLOW ESDK_LOCALCONF_REMOVE ESDK_CLASS_INHERIT_DISABLE"
return outputvars(sdkvars, listvars, d)
@@ -854,7 +886,7 @@ END
}
python buildhistory_eventhandler() {
- if e.data.getVar('BUILDHISTORY_FEATURES').strip():
+ if (e.data.getVar('BUILDHISTORY_FEATURES') or "").strip():
reset = e.data.getVar("BUILDHISTORY_RESET")
olddir = e.data.getVar("BUILDHISTORY_OLD_DIR")
if isinstance(e, bb.event.BuildStarted):
@@ -864,10 +896,11 @@ python buildhistory_eventhandler() {
if os.path.isdir(olddir):
shutil.rmtree(olddir)
rootdir = e.data.getVar("BUILDHISTORY_DIR")
+ bb.utils.mkdirhier(rootdir)
entries = [ x for x in os.listdir(rootdir) if not x.startswith('.') ]
bb.utils.mkdirhier(olddir)
for entry in entries:
- os.rename(os.path.join(rootdir, entry),
+ bb.utils.rename(os.path.join(rootdir, entry),
os.path.join(olddir, entry))
elif isinstance(e, bb.event.BuildCompleted):
if reset:
@@ -906,22 +939,12 @@ def _get_srcrev_values(d):
if urldata[u].method.supports_srcrev():
scms.append(u)
- autoinc_templ = 'AUTOINC+'
dict_srcrevs = {}
dict_tag_srcrevs = {}
for scm in scms:
ud = urldata[scm]
for name in ud.names:
- try:
- rev = ud.method.sortable_revision(ud, d, name)
- except TypeError:
- # support old bitbake versions
- rev = ud.method.sortable_revision(scm, ud, d, name)
- # Clean this up when we next bump bitbake version
- if type(rev) != str:
- autoinc, rev = rev
- elif rev.startswith(autoinc_templ):
- rev = rev[len(autoinc_templ):]
+ autoinc, rev = ud.method.sortable_revision(ud, d, name)
dict_srcrevs[name] = rev
if 'tag' in ud.parm:
tag = ud.parm['tag'];
@@ -952,23 +975,19 @@ def write_latest_srcrev(d, pkghistdir):
value = value.replace('"', '').strip()
old_tag_srcrevs[key] = value
with open(srcrevfile, 'w') as f:
- orig_srcrev = d.getVar('SRCREV', False) or 'INVALID'
- if orig_srcrev != 'INVALID':
- f.write('# SRCREV = "%s"\n' % orig_srcrev)
- if len(srcrevs) > 1:
- for name, srcrev in sorted(srcrevs.items()):
- orig_srcrev = d.getVar('SRCREV_%s' % name, False)
- if orig_srcrev:
- f.write('# SRCREV_%s = "%s"\n' % (name, orig_srcrev))
- f.write('SRCREV_%s = "%s"\n' % (name, srcrev))
- else:
- f.write('SRCREV = "%s"\n' % next(iter(srcrevs.values())))
- if len(tag_srcrevs) > 0:
- for name, srcrev in sorted(tag_srcrevs.items()):
- f.write('# tag_%s = "%s"\n' % (name, srcrev))
- if name in old_tag_srcrevs and old_tag_srcrevs[name] != srcrev:
- pkg = d.getVar('PN')
- bb.warn("Revision for tag %s in package %s was changed since last build (from %s to %s)" % (name, pkg, old_tag_srcrevs[name], srcrev))
+ for name, srcrev in sorted(srcrevs.items()):
+ suffix = "_" + name
+ if name == "default":
+ suffix = ""
+ orig_srcrev = d.getVar('SRCREV%s' % suffix, False)
+ if orig_srcrev:
+ f.write('# SRCREV%s = "%s"\n' % (suffix, orig_srcrev))
+ f.write('SRCREV%s = "%s"\n' % (suffix, srcrev))
+ for name, srcrev in sorted(tag_srcrevs.items()):
+ f.write('# tag_%s = "%s"\n' % (name, srcrev))
+ if name in old_tag_srcrevs and old_tag_srcrevs[name] != srcrev:
+ pkg = d.getVar('PN')
+ bb.warn("Revision for tag %s in package %s was changed since last build (from %s to %s)" % (name, pkg, old_tag_srcrevs[name], srcrev))
else:
if os.path.exists(srcrevfile):
diff --git a/meta/classes/buildstats.bbclass b/meta/classes/buildstats.bbclass
index 6f87187233..0de605200a 100644
--- a/meta/classes/buildstats.bbclass
+++ b/meta/classes/buildstats.bbclass
@@ -104,14 +104,90 @@ def write_task_data(status, logfile, e, d):
f.write("Status: FAILED \n")
f.write("Ended: %0.2f \n" % e.time)
+def write_host_data(logfile, e, d, type):
+ import subprocess, os, datetime
+ # minimum time allowed for each command to run, in seconds
+ time_threshold = 0.5
+ limit = 10
+ # the total number of commands
+ num_cmds = 0
+ msg = ""
+ if type == "interval":
+ # interval at which data will be logged
+ interval = d.getVar("BB_HEARTBEAT_EVENT", False)
+ if interval is None:
+ bb.warn("buildstats: Collecting host data at intervals failed. Set BB_HEARTBEAT_EVENT=\"<interval>\" in conf/local.conf for the interval at which host data will be logged.")
+ d.setVar("BB_LOG_HOST_STAT_ON_INTERVAL", "0")
+ return
+ interval = int(interval)
+ cmds = d.getVar('BB_LOG_HOST_STAT_CMDS_INTERVAL')
+ msg = "Host Stats: Collecting data at %d second intervals.\n" % interval
+ if cmds is None:
+ d.setVar("BB_LOG_HOST_STAT_ON_INTERVAL", "0")
+ bb.warn("buildstats: Collecting host data at intervals failed. Set BB_LOG_HOST_STAT_CMDS_INTERVAL=\"command1 ; command2 ; ... \" in conf/local.conf.")
+ return
+ if type == "failure":
+ cmds = d.getVar('BB_LOG_HOST_STAT_CMDS_FAILURE')
+ msg = "Host Stats: Collecting data on failure.\n"
+ msg += "Failed at task: " + e.task + "\n"
+ if cmds is None:
+ d.setVar("BB_LOG_HOST_STAT_ON_FAILURE", "0")
+ bb.warn("buildstats: Collecting host data on failure failed. Set BB_LOG_HOST_STAT_CMDS_FAILURE=\"command1 ; command2 ; ... \" in conf/local.conf.")
+ return
+ c_san = []
+ for cmd in cmds.split(";"):
+ if len(cmd) == 0:
+ continue
+ num_cmds += 1
+ c_san.append(cmd)
+ if num_cmds == 0:
+ if type == "interval":
+ d.setVar("BB_LOG_HOST_STAT_ON_INTERVAL", "0")
+ if type == "failure":
+ d.setVar("BB_LOG_HOST_STAT_ON_FAILURE", "0")
+ return
+
+ # return if the interval is not enough to run all commands within the specified BB_HEARTBEAT_EVENT interval
+ if type == "interval":
+ limit = interval / num_cmds
+ if limit <= time_threshold:
+ d.setVar("BB_LOG_HOST_STAT_ON_INTERVAL", "0")
+ bb.warn("buildstats: Collecting host data failed. BB_HEARTBEAT_EVENT interval not enough to run the specified commands. Increase value of BB_HEARTBEAT_EVENT in conf/local.conf.")
+ return
+
+ # set the environment variables
+ path = d.getVar("PATH")
+ opath = d.getVar("BB_ORIGENV", False).getVar("PATH")
+ ospath = os.environ['PATH']
+ os.environ['PATH'] = path + ":" + opath + ":" + ospath
+ with open(logfile, "a") as f:
+ f.write("Event Time: %f\nDate: %s\n" % (e.time, datetime.datetime.now()))
+ f.write("%s" % msg)
+ for c in c_san:
+ try:
+ output = subprocess.check_output(c.split(), stderr=subprocess.STDOUT, timeout=limit).decode('utf-8')
+ except (subprocess.CalledProcessError, subprocess.TimeoutExpired, FileNotFoundError) as err:
+ output = "Error running command: %s\n%s\n" % (c, err)
+ f.write("%s\n%s\n" % (c, output))
+ # reset the environment
+ os.environ['PATH'] = ospath
+
python run_buildstats () {
import bb.build
import bb.event
import time, subprocess, platform
bn = d.getVar('BUILDNAME')
- bsdir = os.path.join(d.getVar('BUILDSTATS_BASE'), bn)
- taskdir = os.path.join(bsdir, d.getVar('PF'))
+ ########################################################################
+ # bitbake fires HeartbeatEvent even before a build has been
+ # triggered, causing BUILDNAME to be None
+ ########################################################################
+ if bn is not None:
+ bsdir = os.path.join(d.getVar('BUILDSTATS_BASE'), bn)
+ taskdir = os.path.join(bsdir, d.getVar('PF'))
+ if isinstance(e, bb.event.HeartbeatEvent) and bb.utils.to_boolean(d.getVar("BB_LOG_HOST_STAT_ON_INTERVAL")):
+ bb.utils.mkdirhier(bsdir)
+ write_host_data(os.path.join(bsdir, "host_stats_interval"), e, d, "interval")
if isinstance(e, bb.event.BuildStarted):
########################################################################
@@ -186,10 +262,12 @@ python run_buildstats () {
build_status = os.path.join(bsdir, "build_stats")
with open(build_status, "a") as f:
f.write(d.expand("Failed at: ${PF} at task: %s \n" % e.task))
+ if bb.utils.to_boolean(d.getVar("BB_LOG_HOST_STAT_ON_FAILURE")):
+ write_host_data(os.path.join(bsdir, "host_stats_%s_failure" % e.task), e, d, "failure")
}
addhandler run_buildstats
-run_buildstats[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted bb.build.TaskStarted bb.build.TaskSucceeded bb.build.TaskFailed"
+run_buildstats[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted bb.event.HeartbeatEvent bb.build.TaskStarted bb.build.TaskSucceeded bb.build.TaskFailed"
python runqueue_stats () {
import buildstats
diff --git a/meta/classes/cargo.bbclass b/meta/classes/cargo.bbclass
new file mode 100644
index 0000000000..4a780a501f
--- /dev/null
+++ b/meta/classes/cargo.bbclass
@@ -0,0 +1,90 @@
+##
+## Purpose:
+## This class is used by any recipes that are built using
+## Cargo.
+
+inherit cargo_common
+
+# the binary we will use
+CARGO = "cargo"
+
+# We need cargo to compile for the target
+BASEDEPENDS:append = " cargo-native"
+
+# Ensure we get the right rust variant
+DEPENDS:append:class-target = " virtual/${TARGET_PREFIX}rust ${RUSTLIB_DEP}"
+DEPENDS:append:class-nativesdk = " virtual/${TARGET_PREFIX}rust ${RUSTLIB_DEP}"
+DEPENDS:append:class-native = " rust-native"
+
+# Enable build separation
+B = "${WORKDIR}/build"
+
+# In case something fails in the build process, give a bit more feedback on
+# where the issue occured
+export RUST_BACKTRACE = "1"
+
+# The directory of the Cargo.toml relative to the root directory, per default
+# assume there's a Cargo.toml directly in the root directory
+CARGO_SRC_DIR ??= ""
+
+# The actual path to the Cargo.toml
+MANIFEST_PATH ??= "${S}/${CARGO_SRC_DIR}/Cargo.toml"
+
+RUSTFLAGS ??= ""
+BUILD_MODE = "${@['--release', ''][d.getVar('DEBUG_BUILD') == '1']}"
+CARGO_BUILD_FLAGS = "-v --target ${HOST_SYS} ${BUILD_MODE} --manifest-path=${MANIFEST_PATH}"
+
+# This is based on the content of CARGO_BUILD_FLAGS and generally will need to
+# change if CARGO_BUILD_FLAGS changes.
+BUILD_DIR = "${@['release', 'debug'][d.getVar('DEBUG_BUILD') == '1']}"
+CARGO_TARGET_SUBDIR="${HOST_SYS}/${BUILD_DIR}"
+oe_cargo_build () {
+ export RUSTFLAGS="${RUSTFLAGS}"
+ export RUST_TARGET_PATH="${RUST_TARGET_PATH}"
+ bbnote "cargo = $(which ${CARGO})"
+ bbnote "rustc = $(which ${RUSTC})"
+ bbnote "${CARGO} build ${CARGO_BUILD_FLAGS} $@"
+ "${CARGO}" build ${CARGO_BUILD_FLAGS} "$@"
+}
+
+do_compile[progress] = "outof:\s+(\d+)/(\d+)"
+cargo_do_compile () {
+ oe_cargo_fix_env
+ oe_cargo_build
+}
+
+cargo_do_install () {
+ local have_installed=false
+ for tgt in "${B}/target/${CARGO_TARGET_SUBDIR}/"*; do
+ case $tgt in
+ *.so|*.rlib)
+ install -d "${D}${rustlibdir}"
+ install -m755 "$tgt" "${D}${rustlibdir}"
+ have_installed=true
+ ;;
+ *examples)
+ if [ -d "$tgt" ]; then
+ for example in "$tgt/"*; do
+ if [ -f "$example" ] && [ -x "$example" ]; then
+ install -d "${D}${bindir}"
+ install -m755 "$example" "${D}${bindir}"
+ have_installed=true
+ fi
+ done
+ fi
+ ;;
+ *)
+ if [ -f "$tgt" ] && [ -x "$tgt" ]; then
+ install -d "${D}${bindir}"
+ install -m755 "$tgt" "${D}${bindir}"
+ have_installed=true
+ fi
+ ;;
+ esac
+ done
+ if ! $have_installed; then
+ die "Did not find anything to install"
+ fi
+}
+
+EXPORT_FUNCTIONS do_compile do_install
diff --git a/meta/classes/cargo_common.bbclass b/meta/classes/cargo_common.bbclass
new file mode 100644
index 0000000000..90fad75415
--- /dev/null
+++ b/meta/classes/cargo_common.bbclass
@@ -0,0 +1,124 @@
+##
+## Purpose:
+## This class is to support building with cargo. It
+## must be different than cargo.bbclass because Rust
+## now builds with Cargo but cannot use cargo.bbclass
+## due to dependencies and assumptions in cargo.bbclass
+## that Rust & Cargo are already installed. So this
+## is used by cargo.bbclass and Rust
+##
+
+# add crate fetch support
+inherit rust-common
+
+# Where we download our registry and dependencies to
+export CARGO_HOME = "${WORKDIR}/cargo_home"
+
+# The pkg-config-rs library used by cargo build scripts disables itself when
+# cross compiling unless this is defined. We set up pkg-config appropriately
+# for cross compilation, so tell it we know better than it.
+export PKG_CONFIG_ALLOW_CROSS = "1"
+
+# Don't instruct cargo to use crates downloaded by bitbake. Some rust packages,
+# for example the rust compiler itself, come with their own vendored sources.
+# Specifying two [source.crates-io] will not work.
+CARGO_DISABLE_BITBAKE_VENDORING ?= "0"
+
+# Used by libstd-rs to point to the vendor dir included in rustc src
+CARGO_VENDORING_DIRECTORY ?= "${CARGO_HOME}/bitbake"
+
+CARGO_RUST_TARGET_CCLD ?= "${RUST_TARGET_CCLD}"
+cargo_common_do_configure () {
+ mkdir -p ${CARGO_HOME}/bitbake
+
+ cat <<- EOF > ${CARGO_HOME}/config
+ # EXTRA_OECARGO_PATHS
+ paths = [
+ $(for p in ${EXTRA_OECARGO_PATHS}; do echo \"$p\",; done)
+ ]
+ EOF
+
+ cat <<- EOF >> ${CARGO_HOME}/config
+
+ # Local mirror vendored by bitbake
+ [source.bitbake]
+ directory = "${CARGO_VENDORING_DIRECTORY}"
+ EOF
+
+ if [ -z "${EXTERNALSRC}" ] && [ ${CARGO_DISABLE_BITBAKE_VENDORING} = "0" ]; then
+ cat <<- EOF >> ${CARGO_HOME}/config
+
+ [source.crates-io]
+ replace-with = "bitbake"
+ local-registry = "/nonexistant"
+ EOF
+ fi
+
+ cat <<- EOF >> ${CARGO_HOME}/config
+
+ [http]
+ # Multiplexing can't be enabled because http2 can't be enabled
+ # in curl-native without dependency loops
+ multiplexing = false
+
+ # Ignore the hard coded and incorrect path to certificates
+ cainfo = "${STAGING_ETCDIR_NATIVE}/ssl/certs/ca-certificates.crt"
+
+ EOF
+
+ cat <<- EOF >> ${CARGO_HOME}/config
+
+ # HOST_SYS
+ [target.${HOST_SYS}]
+ linker = "${CARGO_RUST_TARGET_CCLD}"
+ EOF
+
+ if [ "${HOST_SYS}" != "${BUILD_SYS}" ]; then
+ cat <<- EOF >> ${CARGO_HOME}/config
+
+ # BUILD_SYS
+ [target.${BUILD_SYS}]
+ linker = "${RUST_BUILD_CCLD}"
+ EOF
+ fi
+
+ # Put build output in build directory preferred by bitbake instead of
+ # inside source directory unless they are the same
+ if [ "${B}" != "${S}" ]; then
+ cat <<- EOF >> ${CARGO_HOME}/config
+
+ [build]
+ # Use out of tree build destination to avoid poluting the source tree
+ target-dir = "${B}/target"
+ EOF
+ fi
+
+ cat <<- EOF >> ${CARGO_HOME}/config
+
+ [term]
+ progress.when = 'always'
+ progress.width = 80
+ EOF
+}
+
+oe_cargo_fix_env () {
+ export CC="${RUST_TARGET_CC}"
+ export CXX="${RUST_TARGET_CXX}"
+ export CFLAGS="${CFLAGS}"
+ export CXXFLAGS="${CXXFLAGS}"
+ export AR="${AR}"
+ export TARGET_CC="${RUST_TARGET_CC}"
+ export TARGET_CXX="${RUST_TARGET_CXX}"
+ export TARGET_CFLAGS="${CFLAGS}"
+ export TARGET_CXXFLAGS="${CXXFLAGS}"
+ export TARGET_AR="${AR}"
+ export HOST_CC="${RUST_BUILD_CC}"
+ export HOST_CXX="${RUST_BUILD_CXX}"
+ export HOST_CFLAGS="${BUILD_CFLAGS}"
+ export HOST_CXXFLAGS="${BUILD_CXXFLAGS}"
+ export HOST_AR="${BUILD_AR}"
+}
+
+EXTRA_OECARGO_PATHS ??= ""
+
+EXPORT_FUNCTIONS do_configure
diff --git a/meta/classes/ccache.bbclass b/meta/classes/ccache.bbclass
index b5457359ca..4532894c57 100644
--- a/meta/classes/ccache.bbclass
+++ b/meta/classes/ccache.bbclass
@@ -33,6 +33,10 @@ export CCACHE_CONFIGPATH ?= "${COREBASE}/meta/conf/ccache.conf"
export CCACHE_DIR ?= "${CCACHE_TOP_DIR}/${MULTIMACH_TARGET_SYS}/${PN}"
+# Fixed errors:
+# ccache: error: Failed to create directory /run/user/0/ccache-tmp: Permission denied
+export CCACHE_TEMPDIR ?= "${CCACHE_DIR}/tmp"
+
# We need to stop ccache considering the current directory or the
# debug-prefix-map target directory to be significant when calculating
# its hash. Without this the cache would be invalidated every time
@@ -45,7 +49,7 @@ python() {
"""
pn = d.getVar('PN')
# quilt-native doesn't need ccache since no c files
- if not (pn in ('ccache-native', 'quilt-native') or
+ if not (bb.data.inherits_class("native", d) or
bb.utils.to_boolean(d.getVar('CCACHE_DISABLE'))):
d.appendVar('DEPENDS', ' ccache-native')
d.setVar('CCACHE', 'ccache ')
diff --git a/meta/classes/clutter.bbclass b/meta/classes/clutter.bbclass
deleted file mode 100644
index 24b53a13e4..0000000000
--- a/meta/classes/clutter.bbclass
+++ /dev/null
@@ -1,18 +0,0 @@
-def get_minor_dir(v):
- import re
- m = re.match(r"^([0-9]+)\.([0-9]+)", v)
- return "%s.%s" % (m.group(1), m.group(2))
-
-def get_real_name(n):
- import re
- m = re.match(r"^([a-z]+(-[a-z]+)?)(-[0-9]+\.[0-9]+)?", n)
- return "%s" % (m.group(1))
-
-VERMINOR = "${@get_minor_dir("${PV}")}"
-REALNAME = "${@get_real_name("${BPN}")}"
-
-SRC_URI = "${GNOME_MIRROR}/${REALNAME}/${VERMINOR}/${REALNAME}-${PV}.tar.xz;name=archive"
-S = "${WORKDIR}/${REALNAME}-${PV}"
-
-CLUTTERBASEBUILDCLASS ??= "autotools"
-inherit ${CLUTTERBASEBUILDCLASS} pkgconfig gtk-doc gettext
diff --git a/meta/classes/cmake.bbclass b/meta/classes/cmake.bbclass
index 94ed8061bb..d9bcddbdbb 100644
--- a/meta/classes/cmake.bbclass
+++ b/meta/classes/cmake.bbclass
@@ -1,7 +1,7 @@
# Path to the CMake file to process.
OECMAKE_SOURCEPATH ??= "${S}"
-DEPENDS_prepend = "cmake-native "
+DEPENDS:prepend = "cmake-native "
B = "${WORKDIR}/build"
# What CMake generator to use.
@@ -21,23 +21,6 @@ python() {
d.setVarFlag("do_compile", "progress", r"outof:^\[(\d+)/(\d+)\]\s+")
else:
bb.fatal("Unknown CMake Generator %s" % generator)
-
- # C/C++ Compiler (without cpu arch/tune arguments)
- if not d.getVar('OECMAKE_C_COMPILER'):
- cc_list = d.getVar('CC').split()
- if cc_list[0] == 'ccache':
- d.setVar('OECMAKE_C_COMPILER_LAUNCHER', cc_list[0])
- d.setVar('OECMAKE_C_COMPILER', cc_list[1])
- else:
- d.setVar('OECMAKE_C_COMPILER', cc_list[0])
-
- if not d.getVar('OECMAKE_CXX_COMPILER'):
- cxx_list = d.getVar('CXX').split()
- if cxx_list[0] == 'ccache':
- d.setVar('OECMAKE_CXX_COMPILER_LAUNCHER', cxx_list[0])
- d.setVar('OECMAKE_CXX_COMPILER', cxx_list[1])
- else:
- d.setVar('OECMAKE_CXX_COMPILER', cxx_list[0])
}
OECMAKE_AR ?= "${AR}"
@@ -48,37 +31,59 @@ OECMAKE_C_FLAGS_RELEASE ?= "-DNDEBUG"
OECMAKE_CXX_FLAGS_RELEASE ?= "-DNDEBUG"
OECMAKE_C_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CPPFLAGS} ${LDFLAGS}"
OECMAKE_CXX_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS} ${LDFLAGS}"
-CXXFLAGS += "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS}"
-CFLAGS += "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS}"
-OECMAKE_C_COMPILER_LAUNCHER ?= ""
-OECMAKE_CXX_COMPILER_LAUNCHER ?= ""
+def oecmake_map_compiler(compiler, d):
+ args = d.getVar(compiler).split()
+ if args[0] == "ccache":
+ return args[1], args[0]
+ return args[0], ""
+
+# C/C++ Compiler (without cpu arch/tune arguments)
+OECMAKE_C_COMPILER ?= "${@oecmake_map_compiler('CC', d)[0]}"
+OECMAKE_C_COMPILER_LAUNCHER ?= "${@oecmake_map_compiler('CC', d)[1]}"
+OECMAKE_CXX_COMPILER ?= "${@oecmake_map_compiler('CXX', d)[0]}"
+OECMAKE_CXX_COMPILER_LAUNCHER ?= "${@oecmake_map_compiler('CXX', d)[1]}"
+
+# clear compiler vars for allarch to avoid sig hash difference
+OECMAKE_C_COMPILER_allarch = ""
+OECMAKE_C_COMPILER_LAUNCHER_allarch = ""
+OECMAKE_CXX_COMPILER_allarch = ""
+OECMAKE_CXX_COMPILER_LAUNCHER_allarch = ""
OECMAKE_RPATH ?= ""
OECMAKE_PERLNATIVE_DIR ??= ""
OECMAKE_EXTRA_ROOT_PATH ?= ""
OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM = "ONLY"
-OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM_class-native = "BOTH"
+OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM:class-native = "BOTH"
-EXTRA_OECMAKE_append = " ${PACKAGECONFIG_CONFARGS}"
+EXTRA_OECMAKE:append = " ${PACKAGECONFIG_CONFARGS}"
export CMAKE_BUILD_PARALLEL_LEVEL
-CMAKE_BUILD_PARALLEL_LEVEL_task-compile = "${@oe.utils.parallel_make(d, False)}"
-CMAKE_BUILD_PARALLEL_LEVEL_task-install = "${@oe.utils.parallel_make(d, True)}"
+CMAKE_BUILD_PARALLEL_LEVEL:task-compile = "${@oe.utils.parallel_make(d, False)}"
+CMAKE_BUILD_PARALLEL_LEVEL:task-install = "${@oe.utils.parallel_make(d, True)}"
OECMAKE_TARGET_COMPILE ?= "all"
OECMAKE_TARGET_INSTALL ?= "install"
+def map_host_os_to_system_name(host_os):
+ if host_os.startswith('mingw'):
+ return 'Windows'
+ if host_os.startswith('linux'):
+ return 'Linux'
+ return host_os
+
# CMake expects target architectures in the format of uname(2),
# which do not always match TARGET_ARCH, so all the necessary
# conversions should happen here.
-def map_target_arch_to_uname_arch(target_arch):
- if target_arch == "powerpc":
+def map_host_arch_to_uname_arch(host_arch):
+ if host_arch == "powerpc":
return "ppc"
- if target_arch == "powerpc64":
+ if host_arch == "powerpc64le":
+ return "ppc64le"
+ if host_arch == "powerpc64":
return "ppc64"
- return target_arch
+ return host_arch
cmake_do_generate_toolchain_file() {
if [ "${BUILD_SYS}" = "${HOST_SYS}" ]; then
@@ -88,14 +93,15 @@ cmake_do_generate_toolchain_file() {
# CMake system name must be something like "Linux".
# This is important for cross-compiling.
$cmake_crosscompiling
-set( CMAKE_SYSTEM_NAME `echo ${TARGET_OS} | sed -e 's/^./\u&/' -e 's/^\(Linux\).*/\1/'` )
-set( CMAKE_SYSTEM_PROCESSOR ${@map_target_arch_to_uname_arch(d.getVar('TARGET_ARCH'))} )
+set( CMAKE_SYSTEM_NAME ${@map_host_os_to_system_name(d.getVar('HOST_OS'))} )
+set( CMAKE_SYSTEM_PROCESSOR ${@map_host_arch_to_uname_arch(d.getVar('HOST_ARCH'))} )
set( CMAKE_C_COMPILER ${OECMAKE_C_COMPILER} )
set( CMAKE_CXX_COMPILER ${OECMAKE_CXX_COMPILER} )
set( CMAKE_C_COMPILER_LAUNCHER ${OECMAKE_C_COMPILER_LAUNCHER} )
set( CMAKE_CXX_COMPILER_LAUNCHER ${OECMAKE_CXX_COMPILER_LAUNCHER} )
set( CMAKE_ASM_COMPILER ${OECMAKE_C_COMPILER} )
-set( CMAKE_AR ${OECMAKE_AR} CACHE FILEPATH "Archiver" )
+find_program( CMAKE_AR ${OECMAKE_AR} DOC "Archiver" REQUIRED )
+
set( CMAKE_C_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "CFLAGS" )
set( CMAKE_CXX_FLAGS "${OECMAKE_CXX_FLAGS}" CACHE STRING "CXXFLAGS" )
set( CMAKE_ASM_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "ASM FLAGS" )
@@ -141,16 +147,14 @@ addtask generate_toolchain_file after do_patch before do_configure
CONFIGURE_FILES = "CMakeLists.txt"
+do_configure[cleandirs] = "${@d.getVar('B') if d.getVar('S') != d.getVar('B') else ''}"
+
cmake_do_configure() {
if [ "${OECMAKE_BUILDPATH}" ]; then
bbnote "cmake.bbclass no longer uses OECMAKE_BUILDPATH. The default behaviour is now out-of-tree builds with B=WORKDIR/build."
fi
- if [ "${S}" != "${B}" ]; then
- rm -rf ${B}
- mkdir -p ${B}
- cd ${B}
- else
+ if [ "${S}" = "${B}" ]; then
find ${B} -name CMakeFiles -or -name Makefile -or -name cmake_install.cmake -or -name CMakeCache.txt -delete
fi
@@ -182,6 +186,8 @@ cmake_do_configure() {
-DCMAKE_INSTALL_SO_NO_EXE=0 \
-DCMAKE_TOOLCHAIN_FILE=${WORKDIR}/toolchain.cmake \
-DCMAKE_NO_SYSTEM_FROM_IMPORTED=1 \
+ -DCMAKE_EXPORT_NO_PACKAGE_REGISTRY=ON \
+ -DFETCHCONTENT_FULLY_DISCONNECTED=ON \
${EXTRA_OECMAKE} \
-Wno-dev
}
diff --git a/meta/classes/cml1.bbclass b/meta/classes/cml1.bbclass
index c7f6723cb3..d319d66ab2 100644
--- a/meta/classes/cml1.bbclass
+++ b/meta/classes/cml1.bbclass
@@ -1,3 +1,13 @@
+# returns all the elements from the src uri that are .cfg files
+def find_cfgs(d):
+ sources=src_patches(d, True)
+ sources_list=[]
+ for s in sources:
+ if s.endswith('.cfg'):
+ sources_list.append(s)
+
+ return sources_list
+
cml1_do_configure() {
set -e
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
@@ -17,22 +27,34 @@ CROSS_CURSES_INC = '-DCURSES_LOC="<curses.h>"'
TERMINFO = "${STAGING_DATADIR_NATIVE}/terminfo"
KCONFIG_CONFIG_COMMAND ??= "menuconfig"
+KCONFIG_CONFIG_ROOTDIR ??= "${B}"
python do_menuconfig() {
import shutil
+ config = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config")
+ configorig = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config.orig")
+
try:
- mtime = os.path.getmtime(".config")
- shutil.copy(".config", ".config.orig")
+ mtime = os.path.getmtime(config)
+ shutil.copy(config, configorig)
except OSError:
mtime = 0
+ # setup native pkg-config variables (kconfig scripts call pkg-config directly, cannot generically be overriden to pkg-config-native)
+ d.setVar("PKG_CONFIG_DIR", "${STAGING_DIR_NATIVE}${libdir_native}/pkgconfig")
+ d.setVar("PKG_CONFIG_PATH", "${PKG_CONFIG_DIR}:${STAGING_DATADIR_NATIVE}/pkgconfig")
+ d.setVar("PKG_CONFIG_LIBDIR", "${PKG_CONFIG_DIR}")
+ d.setVarFlag("PKG_CONFIG_SYSROOT_DIR", "unexport", "1")
+ # ensure that environment variables are overwritten with this tasks 'd' values
+ d.appendVar("OE_TERMINAL_EXPORTS", " PKG_CONFIG_DIR PKG_CONFIG_PATH PKG_CONFIG_LIBDIR PKG_CONFIG_SYSROOT_DIR")
+
oe_terminal("sh -c \"make %s; if [ \\$? -ne 0 ]; then echo 'Command failed.'; printf 'Press any key to continue... '; read r; fi\"" % d.getVar('KCONFIG_CONFIG_COMMAND'),
d.getVar('PN') + ' Configuration', d)
# FIXME this check can be removed when the minimum bitbake version has been bumped
if hasattr(bb.build, 'write_taint'):
try:
- newmtime = os.path.getmtime(".config")
+ newmtime = os.path.getmtime(config)
except OSError:
newmtime = 0
@@ -42,7 +64,7 @@ python do_menuconfig() {
}
do_menuconfig[depends] += "ncurses-native:do_populate_sysroot"
do_menuconfig[nostamp] = "1"
-do_menuconfig[dirs] = "${B}"
+do_menuconfig[dirs] = "${KCONFIG_CONFIG_ROOTDIR}"
addtask menuconfig after do_configure
python do_diffconfig() {
@@ -51,8 +73,8 @@ python do_diffconfig() {
workdir = d.getVar('WORKDIR')
fragment = workdir + '/fragment.cfg'
- configorig = '.config.orig'
- config = '.config'
+ configorig = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config.orig")
+ config = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config")
try:
md5newconfig = bb.utils.md5_file(configorig)
@@ -75,5 +97,5 @@ python do_diffconfig() {
}
do_diffconfig[nostamp] = "1"
-do_diffconfig[dirs] = "${B}"
+do_diffconfig[dirs] = "${KCONFIG_CONFIG_ROOTDIR}"
addtask diffconfig
diff --git a/meta/classes/compress_doc.bbclass b/meta/classes/compress_doc.bbclass
index d6d11fad26..379b6c169e 100644
--- a/meta/classes/compress_doc.bbclass
+++ b/meta/classes/compress_doc.bbclass
@@ -8,7 +8,7 @@
#
# 3. It is easy to add a new type compression by editing
# local.conf, such as:
-# DOC_COMPRESS_LIST_append = ' abc'
+# DOC_COMPRESS_LIST:append = ' abc'
# DOC_COMPRESS = 'abc'
# DOC_COMPRESS_CMD[abc] = 'abc compress cmd ***'
# DOC_DECOMPRESS_CMD[abc] = 'abc decompress cmd ***'
@@ -225,7 +225,7 @@ python compress_doc_updatealternatives () {
infodir = d.getVar("infodir")
compress_mode = d.getVar('DOC_COMPRESS')
for pkg in (d.getVar('PACKAGES') or "").split():
- old_names = (d.getVar('ALTERNATIVE_%s' % pkg) or "").split()
+ old_names = (d.getVar('ALTERNATIVE:%s' % pkg) or "").split()
new_names = []
for old_name in old_names:
old_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', old_name)
@@ -258,6 +258,6 @@ python compress_doc_updatealternatives () {
new_names.append(new_name)
if new_names:
- d.setVar('ALTERNATIVE_%s' % pkg, ' '.join(new_names))
+ d.setVar('ALTERNATIVE:%s' % pkg, ' '.join(new_names))
}
diff --git a/meta/classes/core-image.bbclass b/meta/classes/core-image.bbclass
index 88ca272145..84fd3eeb38 100644
--- a/meta/classes/core-image.bbclass
+++ b/meta/classes/core-image.bbclass
@@ -9,6 +9,7 @@
#
# Available IMAGE_FEATURES:
#
+# - weston - Weston Wayland compositor
# - x11 - X server
# - x11-base - X server with minimal environment
# - x11-sato - OpenedHand Sato environment
@@ -30,6 +31,8 @@
# - post-install-logging
# - dev-pkgs - development packages (headers, etc.) for all installed packages in the rootfs
# - dbg-pkgs - debug symbol packages for all installed packages in the rootfs
+# - lic-pkgs - license packages for all installed pacakges in the rootfs, requires
+# LICENSE_CREATE_PACKAGE="1" to be set when building packages too
# - doc-pkgs - documentation packages for all installed packages in the rootfs
# - bash-completion-pkgs - bash-completion packages for recipes using bash-completion bbclass
# - ptest-pkgs - ptest packages for all ptest-enabled recipes
@@ -37,6 +40,7 @@
# - stateless-rootfs - systemctl-native not run, image populated by systemd at runtime
# - splash - bootup splash screen
#
+FEATURE_PACKAGES_weston = "packagegroup-core-weston"
FEATURE_PACKAGES_x11 = "packagegroup-core-x11"
FEATURE_PACKAGES_x11-base = "packagegroup-core-x11-base"
FEATURE_PACKAGES_x11-sato = "packagegroup-core-x11-sato"
diff --git a/meta/classes/cpan-base.bbclass b/meta/classes/cpan-base.bbclass
index 867edf8707..93d11e1bee 100644
--- a/meta/classes/cpan-base.bbclass
+++ b/meta/classes/cpan-base.bbclass
@@ -2,10 +2,10 @@
# cpan-base providers various perl related information needed for building
# cpan modules
#
-FILES_${PN} += "${libdir}/perl5 ${datadir}/perl5"
+FILES:${PN} += "${libdir}/perl5 ${datadir}/perl5"
DEPENDS += "${@["perl", "perl-native"][(bb.data.inherits_class('native', d))]}"
-RDEPENDS_${PN} += "${@["perl", ""][(bb.data.inherits_class('native', d))]}"
+RDEPENDS:${PN} += "${@["perl", ""][(bb.data.inherits_class('native', d))]}"
inherit perl-version
@@ -15,4 +15,13 @@ def is_target(d):
return "no"
PERLLIBDIRS = "${libdir}/perl5"
-PERLLIBDIRS_class-native = "${libdir}/perl5"
+PERLLIBDIRS:class-native = "${libdir}/perl5"
+
+def cpan_upstream_check_pattern(d):
+ for x in (d.getVar('SRC_URI') or '').split(' '):
+ if x.startswith("https://cpan.metacpan.org"):
+ _pattern = x.split('/')[-1].replace(d.getVar('PV'), r'(?P<pver>\d+.\d+)')
+ return _pattern
+ return ''
+
+UPSTREAM_CHECK_REGEX ?= "${@cpan_upstream_check_pattern(d)}"
diff --git a/meta/classes/cpan.bbclass b/meta/classes/cpan.bbclass
index e9908ae4b8..18f1b9d575 100644
--- a/meta/classes/cpan.bbclass
+++ b/meta/classes/cpan.bbclass
@@ -41,12 +41,12 @@ cpan_do_configure () {
fi
}
-do_configure_append_class-target() {
+do_configure:append:class-target() {
find . -name Makefile | xargs sed -E -i \
-e 's:LD_RUN_PATH ?= ?"?[^"]*"?::g'
}
-do_configure_append_class-nativesdk() {
+do_configure:append:class-nativesdk() {
find . -name Makefile | xargs sed -E -i \
-e 's:LD_RUN_PATH ?= ?"?[^"]*"?::g'
}
diff --git a/meta/classes/create-spdx.bbclass b/meta/classes/create-spdx.bbclass
new file mode 100644
index 0000000000..1a4804a7c5
--- /dev/null
+++ b/meta/classes/create-spdx.bbclass
@@ -0,0 +1,1022 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+DEPLOY_DIR_SPDX ??= "${DEPLOY_DIR}/spdx/${MACHINE}"
+
+# The product name that the CVE database uses. Defaults to BPN, but may need to
+# be overriden per recipe (for example tiff.bb sets CVE_PRODUCT=libtiff).
+CVE_PRODUCT ??= "${BPN}"
+CVE_VERSION ??= "${PV}"
+
+SPDXDIR ??= "${WORKDIR}/spdx"
+SPDXDEPLOY = "${SPDXDIR}/deploy"
+SPDXWORK = "${SPDXDIR}/work"
+
+SPDX_TOOL_NAME ??= "oe-spdx-creator"
+SPDX_TOOL_VERSION ??= "1.0"
+
+SPDXRUNTIMEDEPLOY = "${SPDXDIR}/runtime-deploy"
+
+SPDX_INCLUDE_SOURCES ??= "0"
+SPDX_INCLUDE_PACKAGED ??= "0"
+SPDX_ARCHIVE_SOURCES ??= "0"
+SPDX_ARCHIVE_PACKAGED ??= "0"
+
+SPDX_UUID_NAMESPACE ??= "sbom.openembedded.org"
+SPDX_NAMESPACE_PREFIX ??= "http://spdx.org/spdxdoc"
+
+SPDX_LICENSES ??= "${COREBASE}/meta/files/spdx-licenses.json"
+
+SPDX_ORG ??= "OpenEmbedded ()"
+SPDX_SUPPLIER ??= "Organization: ${SPDX_ORG}"
+SPDX_SUPPLIER[doc] = "The SPDX PackageSupplier field for SPDX packages created from \
+ this recipe. For SPDX documents create using this class during the build, this \
+ is the contact information for the person or organization who is doing the \
+ build."
+
+do_image_complete[depends] = "virtual/kernel:do_create_spdx"
+
+def extract_licenses(filename):
+ import re
+
+ lic_regex = re.compile(b'^\W*SPDX-License-Identifier:\s*([ \w\d.()+-]+?)(?:\s+\W*)?$', re.MULTILINE)
+
+ try:
+ with open(filename, 'rb') as f:
+ size = min(15000, os.stat(filename).st_size)
+ txt = f.read(size)
+ licenses = re.findall(lic_regex, txt)
+ if licenses:
+ ascii_licenses = [lic.decode('ascii') for lic in licenses]
+ return ascii_licenses
+ except Exception as e:
+ bb.warn(f"Exception reading {filename}: {e}")
+ return None
+
+def get_doc_namespace(d, doc):
+ import uuid
+ namespace_uuid = uuid.uuid5(uuid.NAMESPACE_DNS, d.getVar("SPDX_UUID_NAMESPACE"))
+ return "%s/%s-%s" % (d.getVar("SPDX_NAMESPACE_PREFIX"), doc.name, str(uuid.uuid5(namespace_uuid, doc.name)))
+
+def create_annotation(d, comment):
+ from datetime import datetime, timezone
+
+ creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
+ annotation = oe.spdx.SPDXAnnotation()
+ annotation.annotationDate = creation_time
+ annotation.annotationType = "OTHER"
+ annotation.annotator = "Tool: %s - %s" % (d.getVar("SPDX_TOOL_NAME"), d.getVar("SPDX_TOOL_VERSION"))
+ annotation.comment = comment
+ return annotation
+
+def recipe_spdx_is_native(d, recipe):
+ return any(a.annotationType == "OTHER" and
+ a.annotator == "Tool: %s - %s" % (d.getVar("SPDX_TOOL_NAME"), d.getVar("SPDX_TOOL_VERSION")) and
+ a.comment == "isNative" for a in recipe.annotations)
+
+def is_work_shared_spdx(d):
+ return bb.data.inherits_class('kernel', d) or ('work-shared' in d.getVar('WORKDIR'))
+
+python() {
+ import json
+ if d.getVar("SPDX_LICENSE_DATA"):
+ return
+
+ with open(d.getVar("SPDX_LICENSES"), "r") as f:
+ data = json.load(f)
+ # Transform the license array to a dictionary
+ data["licenses"] = {l["licenseId"]: l for l in data["licenses"]}
+ d.setVar("SPDX_LICENSE_DATA", data)
+}
+
+def convert_license_to_spdx(lic, document, d, existing={}):
+ from pathlib import Path
+ import oe.spdx
+
+ avail_licenses = available_licenses(d)
+ license_data = d.getVar("SPDX_LICENSE_DATA")
+ extracted = {}
+
+ def add_extracted_license(ident, name):
+ nonlocal document
+
+ if name in extracted:
+ return
+
+ extracted_info = oe.spdx.SPDXExtractedLicensingInfo()
+ extracted_info.name = name
+ extracted_info.licenseId = ident
+ extracted_info.extractedText = None
+
+ if name == "PD":
+ # Special-case this.
+ extracted_info.extractedText = "Software released to the public domain"
+ elif name in avail_licenses:
+ # This license can be found in COMMON_LICENSE_DIR or LICENSE_PATH
+ for directory in [d.getVar('COMMON_LICENSE_DIR')] + (d.getVar('LICENSE_PATH') or '').split():
+ try:
+ with (Path(directory) / name).open(errors="replace") as f:
+ extracted_info.extractedText = f.read()
+ break
+ except FileNotFoundError:
+ pass
+ if extracted_info.extractedText is None:
+ # Error out, as the license was in avail_licenses so should
+ # be on disk somewhere.
+ bb.error("Cannot find text for license %s" % name)
+ else:
+ # If it's not SPDX, or PD, or in avail_licenses, then NO_GENERIC_LICENSE must be set
+ filename = d.getVarFlag('NO_GENERIC_LICENSE', name)
+ if filename:
+ filename = d.expand("${S}/" + filename)
+ with open(filename, errors="replace") as f:
+ extracted_info.extractedText = f.read()
+ else:
+ bb.error("Cannot find any text for license %s" % name)
+
+ extracted[name] = extracted_info
+ document.hasExtractedLicensingInfos.append(extracted_info)
+
+ def convert(l):
+ if l == "(" or l == ")":
+ return l
+
+ if l == "&":
+ return "AND"
+
+ if l == "|":
+ return "OR"
+
+ if l == "CLOSED":
+ return "NONE"
+
+ spdx_license = d.getVarFlag("SPDXLICENSEMAP", l) or l
+ if spdx_license in license_data["licenses"]:
+ return spdx_license
+
+ try:
+ spdx_license = existing[l]
+ except KeyError:
+ spdx_license = "LicenseRef-" + l
+ add_extracted_license(spdx_license, l)
+
+ return spdx_license
+
+ lic_split = lic.replace("(", " ( ").replace(")", " ) ").split()
+
+ return ' '.join(convert(l) for l in lic_split)
+
+def process_sources(d):
+ pn = d.getVar('PN')
+ assume_provided = (d.getVar("ASSUME_PROVIDED") or "").split()
+ if pn in assume_provided:
+ for p in d.getVar("PROVIDES").split():
+ if p != pn:
+ pn = p
+ break
+
+ # glibc-locale: do_fetch, do_unpack and do_patch tasks have been deleted,
+ # so avoid archiving source here.
+ if pn.startswith('glibc-locale'):
+ return False
+ if d.getVar('PN') == "libtool-cross":
+ return False
+ if d.getVar('PN') == "libgcc-initial":
+ return False
+ if d.getVar('PN') == "shadow-sysroot":
+ return False
+
+ # We just archive gcc-source for all the gcc related recipes
+ if d.getVar('BPN') in ['gcc', 'libgcc']:
+ bb.debug(1, 'spdx: There is bug in scan of %s is, do nothing' % pn)
+ return False
+
+ return True
+
+
+def add_package_files(d, doc, spdx_pkg, topdir, get_spdxid, get_types, *, archive=None, ignore_dirs=[], ignore_top_level_dirs=[]):
+ from pathlib import Path
+ import oe.spdx
+ import hashlib
+
+ source_date_epoch = d.getVar("SOURCE_DATE_EPOCH")
+ if source_date_epoch:
+ source_date_epoch = int(source_date_epoch)
+
+ sha1s = []
+ spdx_files = []
+
+ file_counter = 1
+ for subdir, dirs, files in os.walk(topdir):
+ dirs[:] = [d for d in dirs if d not in ignore_dirs]
+ if subdir == str(topdir):
+ dirs[:] = [d for d in dirs if d not in ignore_top_level_dirs]
+
+ for file in files:
+ filepath = Path(subdir) / file
+ filename = str(filepath.relative_to(topdir))
+
+ if filepath.is_file() and not filepath.is_symlink():
+ spdx_file = oe.spdx.SPDXFile()
+ spdx_file.SPDXID = get_spdxid(file_counter)
+ for t in get_types(filepath):
+ spdx_file.fileTypes.append(t)
+ spdx_file.fileName = filename
+
+ if archive is not None:
+ with filepath.open("rb") as f:
+ info = archive.gettarinfo(fileobj=f)
+ info.name = filename
+ info.uid = 0
+ info.gid = 0
+ info.uname = "root"
+ info.gname = "root"
+
+ if source_date_epoch is not None and info.mtime > source_date_epoch:
+ info.mtime = source_date_epoch
+
+ archive.addfile(info, f)
+
+ sha1 = bb.utils.sha1_file(filepath)
+ sha1s.append(sha1)
+ spdx_file.checksums.append(oe.spdx.SPDXChecksum(
+ algorithm="SHA1",
+ checksumValue=sha1,
+ ))
+ spdx_file.checksums.append(oe.spdx.SPDXChecksum(
+ algorithm="SHA256",
+ checksumValue=bb.utils.sha256_file(filepath),
+ ))
+
+ if "SOURCE" in spdx_file.fileTypes:
+ extracted_lics = extract_licenses(filepath)
+ if extracted_lics:
+ spdx_file.licenseInfoInFiles = extracted_lics
+
+ doc.files.append(spdx_file)
+ doc.add_relationship(spdx_pkg, "CONTAINS", spdx_file)
+ spdx_pkg.hasFiles.append(spdx_file.SPDXID)
+
+ spdx_files.append(spdx_file)
+
+ file_counter += 1
+
+ sha1s.sort()
+ verifier = hashlib.sha1()
+ for v in sha1s:
+ verifier.update(v.encode("utf-8"))
+ spdx_pkg.packageVerificationCode.packageVerificationCodeValue = verifier.hexdigest()
+
+ return spdx_files
+
+
+def add_package_sources_from_debug(d, package_doc, spdx_package, package, package_files, sources):
+ from pathlib import Path
+ import hashlib
+ import oe.packagedata
+ import oe.spdx
+
+ debug_search_paths = [
+ Path(d.getVar('PKGD')),
+ Path(d.getVar('STAGING_DIR_TARGET')),
+ Path(d.getVar('STAGING_DIR_NATIVE')),
+ Path(d.getVar('STAGING_KERNEL_DIR')),
+ ]
+
+ pkg_data = oe.packagedata.read_subpkgdata_extended(package, d)
+
+ if pkg_data is None:
+ return
+
+ for file_path, file_data in pkg_data["files_info"].items():
+ if not "debugsrc" in file_data:
+ continue
+
+ for pkg_file in package_files:
+ if file_path.lstrip("/") == pkg_file.fileName.lstrip("/"):
+ break
+ else:
+ bb.fatal("No package file found for %s" % str(file_path))
+ continue
+
+ for debugsrc in file_data["debugsrc"]:
+ ref_id = "NOASSERTION"
+ for search in debug_search_paths:
+ if debugsrc.startswith("/usr/src/kernel"):
+ debugsrc_path = search / debugsrc.replace('/usr/src/kernel/', '')
+ else:
+ debugsrc_path = search / debugsrc.lstrip("/")
+ if not debugsrc_path.exists():
+ continue
+
+ file_sha256 = bb.utils.sha256_file(debugsrc_path)
+
+ if file_sha256 in sources:
+ source_file = sources[file_sha256]
+
+ doc_ref = package_doc.find_external_document_ref(source_file.doc.documentNamespace)
+ if doc_ref is None:
+ doc_ref = oe.spdx.SPDXExternalDocumentRef()
+ doc_ref.externalDocumentId = "DocumentRef-dependency-" + source_file.doc.name
+ doc_ref.spdxDocument = source_file.doc.documentNamespace
+ doc_ref.checksum.algorithm = "SHA1"
+ doc_ref.checksum.checksumValue = source_file.doc_sha1
+ package_doc.externalDocumentRefs.append(doc_ref)
+
+ ref_id = "%s:%s" % (doc_ref.externalDocumentId, source_file.file.SPDXID)
+ else:
+ bb.debug(1, "Debug source %s with SHA256 %s not found in any dependency" % (str(debugsrc_path), file_sha256))
+ break
+ else:
+ bb.debug(1, "Debug source %s not found" % debugsrc)
+
+ package_doc.add_relationship(pkg_file, "GENERATED_FROM", ref_id, comment=debugsrc)
+
+def collect_dep_recipes(d, doc, spdx_recipe):
+ from pathlib import Path
+ import oe.sbom
+ import oe.spdx
+
+ deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
+
+ dep_recipes = []
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ deps = sorted(set(
+ dep[0] for dep in taskdepdata.values() if
+ dep[1] == "do_create_spdx" and dep[0] != d.getVar("PN")
+ ))
+ for dep_pn in deps:
+ dep_recipe_path = deploy_dir_spdx / "recipes" / ("recipe-%s.spdx.json" % dep_pn)
+
+ spdx_dep_doc, spdx_dep_sha1 = oe.sbom.read_doc(dep_recipe_path)
+
+ for pkg in spdx_dep_doc.packages:
+ if pkg.name == dep_pn:
+ spdx_dep_recipe = pkg
+ break
+ else:
+ continue
+
+ dep_recipes.append(oe.sbom.DepRecipe(spdx_dep_doc, spdx_dep_sha1, spdx_dep_recipe))
+
+ dep_recipe_ref = oe.spdx.SPDXExternalDocumentRef()
+ dep_recipe_ref.externalDocumentId = "DocumentRef-dependency-" + spdx_dep_doc.name
+ dep_recipe_ref.spdxDocument = spdx_dep_doc.documentNamespace
+ dep_recipe_ref.checksum.algorithm = "SHA1"
+ dep_recipe_ref.checksum.checksumValue = spdx_dep_sha1
+
+ doc.externalDocumentRefs.append(dep_recipe_ref)
+
+ doc.add_relationship(
+ "%s:%s" % (dep_recipe_ref.externalDocumentId, spdx_dep_recipe.SPDXID),
+ "BUILD_DEPENDENCY_OF",
+ spdx_recipe
+ )
+
+ return dep_recipes
+
+collect_dep_recipes[vardepsexclude] += "BB_TASKDEPDATA"
+
+
+def collect_dep_sources(d, dep_recipes):
+ import oe.sbom
+
+ sources = {}
+ for dep in dep_recipes:
+ # Don't collect sources from native recipes as they
+ # match non-native sources also.
+ if recipe_spdx_is_native(d, dep.recipe):
+ continue
+ recipe_files = set(dep.recipe.hasFiles)
+
+ for spdx_file in dep.doc.files:
+ if spdx_file.SPDXID not in recipe_files:
+ continue
+
+ if "SOURCE" in spdx_file.fileTypes:
+ for checksum in spdx_file.checksums:
+ if checksum.algorithm == "SHA256":
+ sources[checksum.checksumValue] = oe.sbom.DepSource(dep.doc, dep.doc_sha1, dep.recipe, spdx_file)
+ break
+
+ return sources
+
+
+python do_create_spdx() {
+ from datetime import datetime, timezone
+ import oe.sbom
+ import oe.spdx
+ import uuid
+ from pathlib import Path
+ from contextlib import contextmanager
+ import oe.cve_check
+
+ @contextmanager
+ def optional_tarfile(name, guard, mode="w"):
+ import tarfile
+ import bb.compress.zstd
+
+ num_threads = int(d.getVar("BB_NUMBER_THREADS"))
+
+ if guard:
+ name.parent.mkdir(parents=True, exist_ok=True)
+ with bb.compress.zstd.open(name, mode=mode + "b", num_threads=num_threads) as f:
+ with tarfile.open(fileobj=f, mode=mode + "|") as tf:
+ yield tf
+ else:
+ yield None
+
+
+ deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
+ spdx_workdir = Path(d.getVar("SPDXWORK"))
+ include_packaged = d.getVar("SPDX_INCLUDE_PACKAGED") == "1"
+ include_sources = d.getVar("SPDX_INCLUDE_SOURCES") == "1"
+ archive_sources = d.getVar("SPDX_ARCHIVE_SOURCES") == "1"
+ archive_packaged = d.getVar("SPDX_ARCHIVE_PACKAGED") == "1"
+
+ creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
+
+ doc = oe.spdx.SPDXDocument()
+
+ doc.name = "recipe-" + d.getVar("PN")
+ doc.documentNamespace = get_doc_namespace(d, doc)
+ doc.creationInfo.created = creation_time
+ doc.creationInfo.comment = "This document was created by analyzing recipe files during the build."
+ doc.creationInfo.licenseListVersion = d.getVar("SPDX_LICENSE_DATA")["licenseListVersion"]
+ doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass")
+ doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG"))
+ doc.creationInfo.creators.append("Person: N/A ()")
+
+ recipe = oe.spdx.SPDXPackage()
+ recipe.name = d.getVar("PN")
+ recipe.versionInfo = d.getVar("PV")
+ recipe.SPDXID = oe.sbom.get_recipe_spdxid(d)
+ recipe.packageSupplier = d.getVar("SPDX_SUPPLIER")
+ if bb.data.inherits_class("native", d) or bb.data.inherits_class("cross", d):
+ recipe.annotations.append(create_annotation(d, "isNative"))
+
+ for s in d.getVar('SRC_URI').split():
+ if not s.startswith("file://"):
+ recipe.downloadLocation = s
+ break
+ else:
+ recipe.downloadLocation = "NOASSERTION"
+
+ homepage = d.getVar("HOMEPAGE")
+ if homepage:
+ recipe.homepage = homepage
+
+ license = d.getVar("LICENSE")
+ if license:
+ recipe.licenseDeclared = convert_license_to_spdx(license, doc, d)
+
+ summary = d.getVar("SUMMARY")
+ if summary:
+ recipe.summary = summary
+
+ description = d.getVar("DESCRIPTION")
+ if description:
+ recipe.description = description
+
+ # Some CVEs may be patched during the build process without incrementing the version number,
+ # so querying for CVEs based on the CPE id can lead to false positives. To account for this,
+ # save the CVEs fixed by patches to source information field in the SPDX.
+ patched_cves = oe.cve_check.get_patched_cves(d)
+ patched_cves = list(patched_cves)
+ patched_cves = ' '.join(patched_cves)
+ if patched_cves:
+ recipe.sourceInfo = "CVEs fixed: " + patched_cves
+
+ cpe_ids = oe.cve_check.get_cpe_ids(d.getVar("CVE_PRODUCT"), d.getVar("CVE_VERSION"))
+ if cpe_ids:
+ for cpe_id in cpe_ids:
+ cpe = oe.spdx.SPDXExternalReference()
+ cpe.referenceCategory = "SECURITY"
+ cpe.referenceType = "http://spdx.org/rdf/references/cpe23Type"
+ cpe.referenceLocator = cpe_id
+ recipe.externalRefs.append(cpe)
+
+ doc.packages.append(recipe)
+ doc.add_relationship(doc, "DESCRIBES", recipe)
+
+ if process_sources(d) and include_sources:
+ recipe_archive = deploy_dir_spdx / "recipes" / (doc.name + ".tar.zst")
+ with optional_tarfile(recipe_archive, archive_sources) as archive:
+ spdx_get_src(d)
+
+ add_package_files(
+ d,
+ doc,
+ recipe,
+ spdx_workdir,
+ lambda file_counter: "SPDXRef-SourceFile-%s-%d" % (d.getVar("PN"), file_counter),
+ lambda filepath: ["SOURCE"],
+ ignore_dirs=[".git"],
+ ignore_top_level_dirs=["temp"],
+ archive=archive,
+ )
+
+ if archive is not None:
+ recipe.packageFileName = str(recipe_archive.name)
+
+ dep_recipes = collect_dep_recipes(d, doc, recipe)
+
+ doc_sha1 = oe.sbom.write_doc(d, doc, "recipes")
+ dep_recipes.append(oe.sbom.DepRecipe(doc, doc_sha1, recipe))
+
+ recipe_ref = oe.spdx.SPDXExternalDocumentRef()
+ recipe_ref.externalDocumentId = "DocumentRef-recipe-" + recipe.name
+ recipe_ref.spdxDocument = doc.documentNamespace
+ recipe_ref.checksum.algorithm = "SHA1"
+ recipe_ref.checksum.checksumValue = doc_sha1
+
+ sources = collect_dep_sources(d, dep_recipes)
+ found_licenses = {license.name:recipe_ref.externalDocumentId + ":" + license.licenseId for license in doc.hasExtractedLicensingInfos}
+
+ if not recipe_spdx_is_native(d, recipe):
+ bb.build.exec_func("read_subpackage_metadata", d)
+
+ pkgdest = Path(d.getVar("PKGDEST"))
+ for package in d.getVar("PACKAGES").split():
+ if not oe.packagedata.packaged(package, d):
+ continue
+
+ package_doc = oe.spdx.SPDXDocument()
+ pkg_name = d.getVar("PKG:%s" % package) or package
+ package_doc.name = pkg_name
+ package_doc.documentNamespace = get_doc_namespace(d, package_doc)
+ package_doc.creationInfo.created = creation_time
+ package_doc.creationInfo.comment = "This document was created by analyzing packages created during the build."
+ package_doc.creationInfo.licenseListVersion = d.getVar("SPDX_LICENSE_DATA")["licenseListVersion"]
+ package_doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass")
+ package_doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG"))
+ package_doc.creationInfo.creators.append("Person: N/A ()")
+ package_doc.externalDocumentRefs.append(recipe_ref)
+
+ package_license = d.getVar("LICENSE:%s" % package) or d.getVar("LICENSE")
+
+ spdx_package = oe.spdx.SPDXPackage()
+
+ spdx_package.SPDXID = oe.sbom.get_package_spdxid(pkg_name)
+ spdx_package.name = pkg_name
+ spdx_package.versionInfo = d.getVar("PV")
+ spdx_package.licenseDeclared = convert_license_to_spdx(package_license, package_doc, d, found_licenses)
+ spdx_package.packageSupplier = d.getVar("SPDX_SUPPLIER")
+
+ package_doc.packages.append(spdx_package)
+
+ package_doc.add_relationship(spdx_package, "GENERATED_FROM", "%s:%s" % (recipe_ref.externalDocumentId, recipe.SPDXID))
+ package_doc.add_relationship(package_doc, "DESCRIBES", spdx_package)
+
+ package_archive = deploy_dir_spdx / "packages" / (package_doc.name + ".tar.zst")
+ with optional_tarfile(package_archive, archive_packaged) as archive:
+ package_files = add_package_files(
+ d,
+ package_doc,
+ spdx_package,
+ pkgdest / package,
+ lambda file_counter: oe.sbom.get_packaged_file_spdxid(pkg_name, file_counter),
+ lambda filepath: ["BINARY"],
+ archive=archive,
+ )
+
+ if archive is not None:
+ spdx_package.packageFileName = str(package_archive.name)
+
+ add_package_sources_from_debug(d, package_doc, spdx_package, package, package_files, sources)
+
+ oe.sbom.write_doc(d, package_doc, "packages")
+}
+# NOTE: depending on do_unpack is a hack that is necessary to get it's dependencies for archive the source
+addtask do_create_spdx after do_package do_packagedata do_unpack before do_populate_sdk do_build do_rm_work
+
+SSTATETASKS += "do_create_spdx"
+do_create_spdx[sstate-inputdirs] = "${SPDXDEPLOY}"
+do_create_spdx[sstate-outputdirs] = "${DEPLOY_DIR_SPDX}"
+
+python do_create_spdx_setscene () {
+ sstate_setscene(d)
+}
+addtask do_create_spdx_setscene
+
+do_create_spdx[dirs] = "${SPDXWORK}"
+do_create_spdx[cleandirs] = "${SPDXDEPLOY} ${SPDXWORK}"
+do_create_spdx[depends] += "${PATCHDEPENDENCY}"
+do_create_spdx[deptask] = "do_create_spdx"
+
+def collect_package_providers(d):
+ from pathlib import Path
+ import oe.sbom
+ import oe.spdx
+ import json
+
+ deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
+
+ providers = {}
+
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ deps = sorted(set(
+ dep[0] for dep in taskdepdata.values() if dep[0] != d.getVar("PN")
+ ))
+ deps.append(d.getVar("PN"))
+
+ for dep_pn in deps:
+ recipe_data = oe.packagedata.read_pkgdata(dep_pn, d)
+
+ for pkg in recipe_data.get("PACKAGES", "").split():
+
+ pkg_data = oe.packagedata.read_subpkgdata_dict(pkg, d)
+ rprovides = set(n for n, _ in bb.utils.explode_dep_versions2(pkg_data.get("RPROVIDES", "")).items())
+ rprovides.add(pkg)
+
+ for r in rprovides:
+ providers[r] = pkg
+
+ return providers
+
+collect_package_providers[vardepsexclude] += "BB_TASKDEPDATA"
+
+python do_create_runtime_spdx() {
+ from datetime import datetime, timezone
+ import oe.sbom
+ import oe.spdx
+ import oe.packagedata
+ from pathlib import Path
+
+ deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
+ spdx_deploy = Path(d.getVar("SPDXRUNTIMEDEPLOY"))
+ is_native = bb.data.inherits_class("native", d) or bb.data.inherits_class("cross", d)
+
+ creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
+
+ providers = collect_package_providers(d)
+
+ if not is_native:
+ bb.build.exec_func("read_subpackage_metadata", d)
+
+ dep_package_cache = {}
+
+ pkgdest = Path(d.getVar("PKGDEST"))
+ for package in d.getVar("PACKAGES").split():
+ localdata = bb.data.createCopy(d)
+ pkg_name = d.getVar("PKG:%s" % package) or package
+ localdata.setVar("PKG", pkg_name)
+ localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + package)
+
+ if not oe.packagedata.packaged(package, localdata):
+ continue
+
+ pkg_spdx_path = deploy_dir_spdx / "packages" / (pkg_name + ".spdx.json")
+
+ package_doc, package_doc_sha1 = oe.sbom.read_doc(pkg_spdx_path)
+
+ for p in package_doc.packages:
+ if p.name == pkg_name:
+ spdx_package = p
+ break
+ else:
+ bb.fatal("Package '%s' not found in %s" % (pkg_name, pkg_spdx_path))
+
+ runtime_doc = oe.spdx.SPDXDocument()
+ runtime_doc.name = "runtime-" + pkg_name
+ runtime_doc.documentNamespace = get_doc_namespace(localdata, runtime_doc)
+ runtime_doc.creationInfo.created = creation_time
+ runtime_doc.creationInfo.comment = "This document was created by analyzing package runtime dependencies."
+ runtime_doc.creationInfo.licenseListVersion = d.getVar("SPDX_LICENSE_DATA")["licenseListVersion"]
+ runtime_doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass")
+ runtime_doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG"))
+ runtime_doc.creationInfo.creators.append("Person: N/A ()")
+
+ package_ref = oe.spdx.SPDXExternalDocumentRef()
+ package_ref.externalDocumentId = "DocumentRef-package-" + package
+ package_ref.spdxDocument = package_doc.documentNamespace
+ package_ref.checksum.algorithm = "SHA1"
+ package_ref.checksum.checksumValue = package_doc_sha1
+
+ runtime_doc.externalDocumentRefs.append(package_ref)
+
+ runtime_doc.add_relationship(
+ runtime_doc.SPDXID,
+ "AMENDS",
+ "%s:%s" % (package_ref.externalDocumentId, package_doc.SPDXID)
+ )
+
+ deps = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS") or "")
+ seen_deps = set()
+ for dep, _ in deps.items():
+ if dep in seen_deps:
+ continue
+
+ if dep not in providers:
+ continue
+
+ dep = providers[dep]
+
+ if not oe.packagedata.packaged(dep, localdata):
+ continue
+
+ dep_pkg_data = oe.packagedata.read_subpkgdata_dict(dep, d)
+ dep_pkg = dep_pkg_data["PKG"]
+
+ if dep in dep_package_cache:
+ (dep_spdx_package, dep_package_ref) = dep_package_cache[dep]
+ else:
+ dep_path = deploy_dir_spdx / "packages" / ("%s.spdx.json" % dep_pkg)
+
+ spdx_dep_doc, spdx_dep_sha1 = oe.sbom.read_doc(dep_path)
+
+ for pkg in spdx_dep_doc.packages:
+ if pkg.name == dep_pkg:
+ dep_spdx_package = pkg
+ break
+ else:
+ bb.fatal("Package '%s' not found in %s" % (dep_pkg, dep_path))
+
+ dep_package_ref = oe.spdx.SPDXExternalDocumentRef()
+ dep_package_ref.externalDocumentId = "DocumentRef-runtime-dependency-" + spdx_dep_doc.name
+ dep_package_ref.spdxDocument = spdx_dep_doc.documentNamespace
+ dep_package_ref.checksum.algorithm = "SHA1"
+ dep_package_ref.checksum.checksumValue = spdx_dep_sha1
+
+ dep_package_cache[dep] = (dep_spdx_package, dep_package_ref)
+
+ runtime_doc.externalDocumentRefs.append(dep_package_ref)
+
+ runtime_doc.add_relationship(
+ "%s:%s" % (dep_package_ref.externalDocumentId, dep_spdx_package.SPDXID),
+ "RUNTIME_DEPENDENCY_OF",
+ "%s:%s" % (package_ref.externalDocumentId, spdx_package.SPDXID)
+ )
+ seen_deps.add(dep)
+
+ oe.sbom.write_doc(d, runtime_doc, "runtime", spdx_deploy)
+}
+
+addtask do_create_runtime_spdx after do_create_spdx before do_build do_rm_work
+SSTATETASKS += "do_create_runtime_spdx"
+do_create_runtime_spdx[sstate-inputdirs] = "${SPDXRUNTIMEDEPLOY}"
+do_create_runtime_spdx[sstate-outputdirs] = "${DEPLOY_DIR_SPDX}"
+
+python do_create_runtime_spdx_setscene () {
+ sstate_setscene(d)
+}
+addtask do_create_runtime_spdx_setscene
+
+do_create_runtime_spdx[dirs] = "${SPDXRUNTIMEDEPLOY}"
+do_create_runtime_spdx[cleandirs] = "${SPDXRUNTIMEDEPLOY}"
+do_create_runtime_spdx[rdeptask] = "do_create_spdx"
+
+def spdx_get_src(d):
+ """
+ save patched source of the recipe in SPDX_WORKDIR.
+ """
+ import shutil
+ spdx_workdir = d.getVar('SPDXWORK')
+ spdx_sysroot_native = d.getVar('STAGING_DIR_NATIVE')
+ pn = d.getVar('PN')
+
+ workdir = d.getVar("WORKDIR")
+
+ try:
+ # The kernel class functions require it to be on work-shared, so we dont change WORKDIR
+ if not is_work_shared_spdx(d):
+ # Change the WORKDIR to make do_unpack do_patch run in another dir.
+ d.setVar('WORKDIR', spdx_workdir)
+ # Restore the original path to recipe's native sysroot (it's relative to WORKDIR).
+ d.setVar('STAGING_DIR_NATIVE', spdx_sysroot_native)
+
+ # The changed 'WORKDIR' also caused 'B' changed, create dir 'B' for the
+ # possibly requiring of the following tasks (such as some recipes's
+ # do_patch required 'B' existed).
+ bb.utils.mkdirhier(d.getVar('B'))
+
+ bb.build.exec_func('do_unpack', d)
+ # Copy source of kernel to spdx_workdir
+ if is_work_shared_spdx(d):
+ d.setVar('WORKDIR', spdx_workdir)
+ d.setVar('STAGING_DIR_NATIVE', spdx_sysroot_native)
+ src_dir = spdx_workdir + "/" + d.getVar('PN')+ "-" + d.getVar('PV') + "-" + d.getVar('PR')
+ bb.utils.mkdirhier(src_dir)
+ if bb.data.inherits_class('kernel',d):
+ share_src = d.getVar('STAGING_KERNEL_DIR')
+ cmd_copy_share = "cp -rf " + share_src + "/* " + src_dir + "/"
+ cmd_copy_kernel_result = os.popen(cmd_copy_share).read()
+ bb.note("cmd_copy_kernel_result = " + cmd_copy_kernel_result)
+
+ git_path = src_dir + "/.git"
+ if os.path.exists(git_path):
+ shutils.rmtree(git_path)
+
+ # Make sure gcc and kernel sources are patched only once
+ if not (d.getVar('SRC_URI') == "" or is_work_shared_spdx(d)):
+ bb.build.exec_func('do_patch', d)
+
+ # Some userland has no source.
+ if not os.path.exists( spdx_workdir ):
+ bb.utils.mkdirhier(spdx_workdir)
+ finally:
+ d.setVar("WORKDIR", workdir)
+
+do_rootfs[recrdeptask] += "do_create_spdx do_create_runtime_spdx"
+
+ROOTFS_POSTUNINSTALL_COMMAND =+ "image_combine_spdx ; "
+
+do_populate_sdk[recrdeptask] += "do_create_spdx do_create_runtime_spdx"
+POPULATE_SDK_POST_HOST_COMMAND:append:task-populate-sdk = " sdk_host_combine_spdx; "
+POPULATE_SDK_POST_TARGET_COMMAND:append:task-populate-sdk = " sdk_target_combine_spdx; "
+
+python image_combine_spdx() {
+ import os
+ import oe.sbom
+ from pathlib import Path
+ from oe.rootfs import image_list_installed_packages
+
+ image_name = d.getVar("IMAGE_NAME")
+ image_link_name = d.getVar("IMAGE_LINK_NAME")
+ imgdeploydir = Path(d.getVar("IMGDEPLOYDIR"))
+ img_spdxid = oe.sbom.get_image_spdxid(image_name)
+ packages = image_list_installed_packages(d)
+
+ combine_spdx(d, image_name, imgdeploydir, img_spdxid, packages)
+
+ if image_link_name:
+ image_spdx_path = imgdeploydir / (image_name + ".spdx.json")
+ image_spdx_link = imgdeploydir / (image_link_name + ".spdx.json")
+ image_spdx_link.symlink_to(os.path.relpath(image_spdx_path, image_spdx_link.parent))
+
+ def make_image_link(target_path, suffix):
+ if image_link_name:
+ link = imgdeploydir / (image_link_name + suffix)
+ link.symlink_to(os.path.relpath(target_path, link.parent))
+
+ spdx_tar_path = imgdeploydir / (image_name + ".spdx.tar.zst")
+ make_image_link(spdx_tar_path, ".spdx.tar.zst")
+ spdx_index_path = imgdeploydir / (image_name + ".spdx.index.json")
+ make_image_link(spdx_index_path, ".spdx.index.json")
+}
+
+python sdk_host_combine_spdx() {
+ sdk_combine_spdx(d, "host")
+}
+
+python sdk_target_combine_spdx() {
+ sdk_combine_spdx(d, "target")
+}
+
+def sdk_combine_spdx(d, sdk_type):
+ import oe.sbom
+ from pathlib import Path
+ from oe.sdk import sdk_list_installed_packages
+
+ sdk_name = d.getVar("SDK_NAME") + "-" + sdk_type
+ sdk_deploydir = Path(d.getVar("SDKDEPLOYDIR"))
+ sdk_spdxid = oe.sbom.get_sdk_spdxid(sdk_name)
+ sdk_packages = sdk_list_installed_packages(d, sdk_type == "target")
+ combine_spdx(d, sdk_name, sdk_deploydir, sdk_spdxid, sdk_packages)
+
+def combine_spdx(d, rootfs_name, rootfs_deploydir, rootfs_spdxid, packages):
+ import os
+ import oe.spdx
+ import oe.sbom
+ import io
+ import json
+ from datetime import timezone, datetime
+ from pathlib import Path
+ import tarfile
+ import bb.compress.zstd
+
+ creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
+ deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
+ source_date_epoch = d.getVar("SOURCE_DATE_EPOCH")
+
+ doc = oe.spdx.SPDXDocument()
+ doc.name = rootfs_name
+ doc.documentNamespace = get_doc_namespace(d, doc)
+ doc.creationInfo.created = creation_time
+ doc.creationInfo.comment = "This document was created by analyzing the source of the Yocto recipe during the build."
+ doc.creationInfo.licenseListVersion = d.getVar("SPDX_LICENSE_DATA")["licenseListVersion"]
+ doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass")
+ doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG"))
+ doc.creationInfo.creators.append("Person: N/A ()")
+
+ image = oe.spdx.SPDXPackage()
+ image.name = d.getVar("PN")
+ image.versionInfo = d.getVar("PV")
+ image.SPDXID = rootfs_spdxid
+ image.packageSupplier = d.getVar("SPDX_SUPPLIER")
+
+ doc.packages.append(image)
+
+ for name in sorted(packages.keys()):
+ pkg_spdx_path = deploy_dir_spdx / "packages" / (name + ".spdx.json")
+ pkg_doc, pkg_doc_sha1 = oe.sbom.read_doc(pkg_spdx_path)
+
+ for p in pkg_doc.packages:
+ if p.name == name:
+ pkg_ref = oe.spdx.SPDXExternalDocumentRef()
+ pkg_ref.externalDocumentId = "DocumentRef-%s" % pkg_doc.name
+ pkg_ref.spdxDocument = pkg_doc.documentNamespace
+ pkg_ref.checksum.algorithm = "SHA1"
+ pkg_ref.checksum.checksumValue = pkg_doc_sha1
+
+ doc.externalDocumentRefs.append(pkg_ref)
+ doc.add_relationship(image, "CONTAINS", "%s:%s" % (pkg_ref.externalDocumentId, p.SPDXID))
+ break
+ else:
+ bb.fatal("Unable to find package with name '%s' in SPDX file %s" % (name, pkg_spdx_path))
+
+ runtime_spdx_path = deploy_dir_spdx / "runtime" / ("runtime-" + name + ".spdx.json")
+ runtime_doc, runtime_doc_sha1 = oe.sbom.read_doc(runtime_spdx_path)
+
+ runtime_ref = oe.spdx.SPDXExternalDocumentRef()
+ runtime_ref.externalDocumentId = "DocumentRef-%s" % runtime_doc.name
+ runtime_ref.spdxDocument = runtime_doc.documentNamespace
+ runtime_ref.checksum.algorithm = "SHA1"
+ runtime_ref.checksum.checksumValue = runtime_doc_sha1
+
+ # "OTHER" isn't ideal here, but I can't find a relationship that makes sense
+ doc.externalDocumentRefs.append(runtime_ref)
+ doc.add_relationship(
+ image,
+ "OTHER",
+ "%s:%s" % (runtime_ref.externalDocumentId, runtime_doc.SPDXID),
+ comment="Runtime dependencies for %s" % name
+ )
+
+ image_spdx_path = rootfs_deploydir / (rootfs_name + ".spdx.json")
+
+ with image_spdx_path.open("wb") as f:
+ doc.to_json(f, sort_keys=True)
+
+ num_threads = int(d.getVar("BB_NUMBER_THREADS"))
+
+ visited_docs = set()
+
+ index = {"documents": []}
+
+ spdx_tar_path = rootfs_deploydir / (rootfs_name + ".spdx.tar.zst")
+ with bb.compress.zstd.open(spdx_tar_path, "w", num_threads=num_threads) as f:
+ with tarfile.open(fileobj=f, mode="w|") as tar:
+ def collect_spdx_document(path):
+ nonlocal tar
+ nonlocal deploy_dir_spdx
+ nonlocal source_date_epoch
+ nonlocal index
+
+ if path in visited_docs:
+ return
+
+ visited_docs.add(path)
+
+ with path.open("rb") as f:
+ doc, sha1 = oe.sbom.read_doc(f)
+ f.seek(0)
+
+ if doc.documentNamespace in visited_docs:
+ return
+
+ bb.note("Adding SPDX document %s" % path)
+ visited_docs.add(doc.documentNamespace)
+ info = tar.gettarinfo(fileobj=f)
+
+ info.name = doc.name + ".spdx.json"
+ info.uid = 0
+ info.gid = 0
+ info.uname = "root"
+ info.gname = "root"
+
+ if source_date_epoch is not None and info.mtime > int(source_date_epoch):
+ info.mtime = int(source_date_epoch)
+
+ tar.addfile(info, f)
+
+ index["documents"].append({
+ "filename": info.name,
+ "documentNamespace": doc.documentNamespace,
+ "sha1": sha1,
+ })
+
+ for ref in doc.externalDocumentRefs:
+ ref_path = deploy_dir_spdx / "by-namespace" / ref.spdxDocument.replace("/", "_")
+ collect_spdx_document(ref_path)
+
+ collect_spdx_document(image_spdx_path)
+
+ index["documents"].sort(key=lambda x: x["filename"])
+
+ index_str = io.BytesIO(json.dumps(index, sort_keys=True).encode("utf-8"))
+
+ info = tarfile.TarInfo()
+ info.name = "index.json"
+ info.size = len(index_str.getvalue())
+ info.uid = 0
+ info.gid = 0
+ info.uname = "root"
+ info.gname = "root"
+
+ tar.addfile(info, fileobj=index_str)
+
+ spdx_index_path = rootfs_deploydir / (rootfs_name + ".spdx.index.json")
+ with spdx_index_path.open("w") as f:
+ json.dump(index, f, sort_keys=True)
diff --git a/meta/classes/cross-canadian.bbclass b/meta/classes/cross-canadian.bbclass
index f5c9f61595..a0e9d23836 100644
--- a/meta/classes/cross-canadian.bbclass
+++ b/meta/classes/cross-canadian.bbclass
@@ -36,10 +36,12 @@ python () {
return
tos = d.getVar("TARGET_OS")
- whitelist = []
+ tos_known = ["mingw32"]
extralibcs = [""]
if "musl" in d.getVar("BASECANADIANEXTRAOS"):
extralibcs.append("musl")
+ if "android" in tos:
+ extralibcs.append("android")
for variant in ["", "spe", "x32", "eabi", "n32", "_ilp32"]:
for libc in extralibcs:
entry = "linux"
@@ -49,8 +51,8 @@ python () {
entry = entry + "-gnu" + variant
elif libc:
entry = entry + "-" + libc
- whitelist.append(entry)
- if tos not in whitelist:
+ tos_known.append(entry)
+ if tos not in tos_known:
bb.fatal("Building cross-candian for an unknown TARGET_SYS (%s), please update cross-canadian.bbclass" % d.getVar("TARGET_SYS"))
for n in ["PROVIDES", "DEPENDS"]:
@@ -104,7 +106,7 @@ STAGING_DIR_HOST = "${RECIPE_SYSROOT}"
TOOLCHAIN_OPTIONS = " --sysroot=${RECIPE_SYSROOT}"
-PATH_append = ":${TMPDIR}/sysroots/${HOST_ARCH}/${bindir_cross}"
+PATH:append = ":${TMPDIR}/sysroots/${HOST_ARCH}/${bindir_cross}"
PKGHIST_DIR = "${TMPDIR}/pkghistory/${HOST_ARCH}-${SDKPKGSUFFIX}${HOST_VENDOR}-${HOST_OS}/"
HOST_ARCH = "${SDK_ARCH}"
@@ -129,7 +131,7 @@ LDFLAGS = "${BUILDSDK_LDFLAGS} \
# We need chrpath >= 0.14 to ensure we can deal with 32 and 64 bit
# binaries
#
-DEPENDS_append = " chrpath-replacement-native"
+DEPENDS:append = " chrpath-replacement-native"
EXTRANATIVEPATH += "chrpath-native"
# Path mangling needed by the cross packaging
@@ -153,9 +155,9 @@ base_sbindir = "${bindir}"
libdir = "${exec_prefix}/lib/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
libexecdir = "${exec_prefix}/libexec/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
-FILES_${PN} = "${prefix}"
+FILES:${PN} = "${prefix}"
-export PKG_CONFIG_DIR = "${STAGING_DIR_HOST}${layout_libdir}/pkgconfig"
+export PKG_CONFIG_DIR = "${STAGING_DIR_HOST}${exec_prefix}/lib/pkgconfig"
export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR_HOST}"
do_populate_sysroot[stamp-extra-info] = ""
@@ -167,7 +169,7 @@ USE_NLS = "${SDKUSE_NLS}"
# and not any particular tune that is enabled.
TARGET_ARCH[vardepsexclude] = "TUNE_ARCH"
-PKGDATA_DIR = "${TMPDIR}/pkgdata/${SDK_SYS}"
+PKGDATA_DIR = "${PKGDATA_DIR_SDK}"
# If MLPREFIX is set by multilib code, shlibs
# points to the wrong place so force it
SHLIBSDIRS = "${PKGDATA_DIR}/nativesdk-shlibs2"
diff --git a/meta/classes/cross.bbclass b/meta/classes/cross.bbclass
index bfec91d043..9d951076a7 100644
--- a/meta/classes/cross.bbclass
+++ b/meta/classes/cross.bbclass
@@ -7,7 +7,7 @@ EXCLUDE_FROM_WORLD = "1"
CLASSOVERRIDE = "class-cross"
PACKAGES = ""
PACKAGES_DYNAMIC = ""
-PACKAGES_DYNAMIC_class-native = ""
+PACKAGES_DYNAMIC:class-native = ""
HOST_ARCH = "${BUILD_ARCH}"
HOST_VENDOR = "${BUILD_VENDOR}"
@@ -72,10 +72,6 @@ libexecdir = "${exec_prefix}/libexec/${CROSS_TARGET_SYS_DIR}"
do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE}/"
do_packagedata[stamp-extra-info] = ""
-do_install () {
- oe_runmake 'DESTDIR=${D}' install
-}
-
USE_NLS = "no"
export CC = "${BUILD_CC}"
@@ -97,3 +93,5 @@ python do_addto_recipe_sysroot () {
}
addtask addto_recipe_sysroot after do_populate_sysroot
do_addto_recipe_sysroot[deptask] = "do_populate_sysroot"
+
+PATH:prepend = "${COREBASE}/scripts/cross-intercept:"
diff --git a/meta/classes/cve-check.bbclass b/meta/classes/cve-check.bbclass
index 514897e8b8..dfad10c22b 100644
--- a/meta/classes/cve-check.bbclass
+++ b/meta/classes/cve-check.bbclass
@@ -20,53 +20,99 @@
# the only method to check against CVEs. Running this tool
# doesn't guarantee your packages are free of CVEs.
-# The product name that the CVE database uses. Defaults to BPN, but may need to
+# The product name that the CVE database uses defaults to BPN, but may need to
# be overriden per recipe (for example tiff.bb sets CVE_PRODUCT=libtiff).
CVE_PRODUCT ??= "${BPN}"
CVE_VERSION ??= "${PV}"
CVE_CHECK_DB_DIR ?= "${DL_DIR}/CVE_CHECK"
CVE_CHECK_DB_FILE ?= "${CVE_CHECK_DB_DIR}/nvdcve_1.1.db"
+CVE_CHECK_DB_FILE_LOCK ?= "${CVE_CHECK_DB_FILE}.lock"
CVE_CHECK_LOG ?= "${T}/cve.log"
CVE_CHECK_TMP_FILE ?= "${TMPDIR}/cve_check"
+CVE_CHECK_SUMMARY_DIR ?= "${LOG_DIR}/cve"
+CVE_CHECK_SUMMARY_FILE_NAME ?= "cve-summary"
+CVE_CHECK_SUMMARY_FILE ?= "${CVE_CHECK_SUMMARY_DIR}/${CVE_CHECK_SUMMARY_FILE_NAME}"
CVE_CHECK_DIR ??= "${DEPLOY_DIR}/cve"
+CVE_CHECK_RECIPE_FILE ?= "${CVE_CHECK_DIR}/${PN}"
CVE_CHECK_MANIFEST ?= "${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cve"
CVE_CHECK_COPY_FILES ??= "1"
CVE_CHECK_CREATE_MANIFEST ??= "1"
-# Whitelist for packages (PN)
-CVE_CHECK_PN_WHITELIST ?= ""
+CVE_CHECK_REPORT_PATCHED ??= "1"
-# Whitelist for CVE. If a CVE is found, then it is considered patched.
-# The value is a string containing space separated CVE values:
-#
-# CVE_CHECK_WHITELIST = 'CVE-2014-2524 CVE-2018-1234'
-#
-CVE_CHECK_WHITELIST ?= ""
+# Skip CVE Check for packages (PN)
+CVE_CHECK_SKIP_RECIPE ?= ""
+
+# Ingore the check for a given list of CVEs. If a CVE is found,
+# then it is considered patched. The value is a string containing
+# space separated CVE values:
+#
+# CVE_CHECK_IGNORE = 'CVE-2014-2524 CVE-2018-1234'
+#
+CVE_CHECK_IGNORE ?= ""
+
+# Layers to be excluded
+CVE_CHECK_LAYER_EXCLUDELIST ??= ""
+
+# Layers to be included
+CVE_CHECK_LAYER_INCLUDELIST ??= ""
+
+
+# set to "alphabetical" for version using single alphabetical character as increment release
+CVE_VERSION_SUFFIX ??= ""
+
+python cve_save_summary_handler () {
+ import shutil
+ import datetime
+
+ cve_tmp_file = d.getVar("CVE_CHECK_TMP_FILE")
+
+ cve_summary_name = d.getVar("CVE_CHECK_SUMMARY_FILE_NAME")
+ cvelogpath = d.getVar("CVE_CHECK_SUMMARY_DIR")
+ bb.utils.mkdirhier(cvelogpath)
+
+ timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
+ cve_summary_file = os.path.join(cvelogpath, "%s-%s.txt" % (cve_summary_name, timestamp))
+
+ if os.path.exists(cve_tmp_file):
+ shutil.copyfile(cve_tmp_file, cve_summary_file)
+
+ if cve_summary_file and os.path.exists(cve_summary_file):
+ cvefile_link = os.path.join(cvelogpath, cve_summary_name)
+
+ if os.path.exists(os.path.realpath(cvefile_link)):
+ os.remove(cvefile_link)
+ os.symlink(os.path.basename(cve_summary_file), cvefile_link)
+}
+
+addhandler cve_save_summary_handler
+cve_save_summary_handler[eventmask] = "bb.event.BuildCompleted"
python do_cve_check () {
"""
Check recipe for patched and unpatched CVEs
"""
+ from oe.cve_check import get_patched_cves
if os.path.exists(d.getVar("CVE_CHECK_DB_FILE")):
try:
- patched_cves = get_patches_cves(d)
+ patched_cves = get_patched_cves(d)
except FileNotFoundError:
bb.fatal("Failure in searching patches")
- whitelisted, patched, unpatched = check_cves(d, patched_cves)
+ ignored, patched, unpatched = check_cves(d, patched_cves)
if patched or unpatched:
cve_data = get_cve_info(d, patched + unpatched)
- cve_write_data(d, patched, unpatched, whitelisted, cve_data)
+ cve_write_data(d, patched, unpatched, ignored, cve_data)
else:
bb.note("No CVE database found, skipping CVE check")
}
addtask cve_check before do_build after do_fetch
-do_cve_check[depends] = "cve-update-db-native:do_populate_cve_db"
+do_cve_check[depends] = "cve-update-db-native:do_fetch"
do_cve_check[nostamp] = "1"
python cve_check_cleanup () {
@@ -87,7 +133,7 @@ python cve_check_write_rootfs_manifest () {
import shutil
if d.getVar("CVE_CHECK_COPY_FILES") == "1":
- deploy_file = os.path.join(d.getVar("CVE_CHECK_DIR"), d.getVar("PN"))
+ deploy_file = d.getVar("CVE_CHECK_RECIPE_FILE")
if os.path.exists(deploy_file):
bb.utils.remove(deploy_file)
@@ -98,6 +144,7 @@ python cve_check_write_rootfs_manifest () {
manifest_name = d.getVar("CVE_CHECK_MANIFEST")
cve_tmp_file = d.getVar("CVE_CHECK_TMP_FILE")
+ bb.utils.mkdirhier(os.path.dirname(manifest_name))
shutil.copyfile(cve_tmp_file, manifest_name)
if manifest_name and os.path.exists(manifest_name):
@@ -109,70 +156,18 @@ python cve_check_write_rootfs_manifest () {
bb.plain("Image CVE report stored in: %s" % manifest_name)
}
-ROOTFS_POSTPROCESS_COMMAND_prepend = "${@'cve_check_write_rootfs_manifest; ' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
+ROOTFS_POSTPROCESS_COMMAND:prepend = "${@'cve_check_write_rootfs_manifest; ' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
do_rootfs[recrdeptask] += "${@'do_cve_check' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
-def get_patches_cves(d):
- """
- Get patches that solve CVEs using the "CVE: " tag.
- """
-
- import re
-
- pn = d.getVar("PN")
- cve_match = re.compile("CVE:( CVE\-\d{4}\-\d+)+")
-
- # Matches last CVE-1234-211432 in the file name, also if written
- # with small letters. Not supporting multiple CVE id's in a single
- # file name.
- cve_file_name_match = re.compile(".*([Cc][Vv][Ee]\-\d{4}\-\d+)")
-
- patched_cves = set()
- bb.debug(2, "Looking for patches that solves CVEs for %s" % pn)
- for url in src_patches(d):
- patch_file = bb.fetch.decodeurl(url)[2]
-
- if not os.path.isfile(patch_file):
- bb.error("File Not found: %s" % patch_file)
- raise FileNotFoundError
-
- # Check patch file name for CVE ID
- fname_match = cve_file_name_match.search(patch_file)
- if fname_match:
- cve = fname_match.group(1).upper()
- patched_cves.add(cve)
- bb.debug(2, "Found CVE %s from patch file name %s" % (cve, patch_file))
-
- with open(patch_file, "r", encoding="utf-8") as f:
- try:
- patch_text = f.read()
- except UnicodeDecodeError:
- bb.debug(1, "Failed to read patch %s using UTF-8 encoding"
- " trying with iso8859-1" % patch_file)
- f.close()
- with open(patch_file, "r", encoding="iso8859-1") as f:
- patch_text = f.read()
-
- # Search for one or more "CVE: " lines
- text_match = False
- for match in cve_match.finditer(patch_text):
- # Get only the CVEs without the "CVE: " tag
- cves = patch_text[match.start()+5:match.end()]
- for cve in cves.split():
- bb.debug(2, "Patch %s solves %s" % (patch_file, cve))
- patched_cves.add(cve)
- text_match = True
-
- if not fname_match and not text_match:
- bb.debug(2, "Patch %s doesn't solve CVEs" % patch_file)
-
- return patched_cves
-
def check_cves(d, patched_cves):
"""
Connect to the NVD database and find unpatched cves.
"""
- from distutils.version import LooseVersion
+ from oe.cve_check import Version
+
+ pn = d.getVar("PN")
+ real_pv = d.getVar("PV")
+ suffix = d.getVar("CVE_VERSION_SUFFIX")
cves_unpatched = []
# CVE_PRODUCT can contain more than one product (eg. curl/libcurl)
@@ -182,15 +177,12 @@ def check_cves(d, patched_cves):
return ([], [], [])
pv = d.getVar("CVE_VERSION").split("+git")[0]
- # If the recipe has been whitlisted we return empty lists
- if d.getVar("PN") in d.getVar("CVE_CHECK_PN_WHITELIST").split():
- bb.note("Recipe has been whitelisted, skipping check")
+ # If the recipe has been skipped/ignored we return empty lists
+ if pn in d.getVar("CVE_CHECK_SKIP_RECIPE").split():
+ bb.note("Recipe has been skipped by cve-check")
return ([], [], [])
- old_cve_whitelist = d.getVar("CVE_CHECK_CVE_WHITELIST")
- if old_cve_whitelist:
- bb.warn("CVE_CHECK_CVE_WHITELIST is deprecated, please use CVE_CHECK_WHITELIST.")
- cve_whitelist = d.getVar("CVE_CHECK_WHITELIST").split()
+ cve_ignore = d.getVar("CVE_CHECK_IGNORE").split()
import sqlite3
db_file = d.expand("file:${CVE_CHECK_DB_FILE}?mode=ro")
@@ -207,9 +199,9 @@ def check_cves(d, patched_cves):
for cverow in conn.execute("SELECT DISTINCT ID FROM PRODUCTS WHERE PRODUCT IS ? AND VENDOR LIKE ?", (product, vendor)):
cve = cverow[0]
- if cve in cve_whitelist:
- bb.note("%s-%s has been whitelisted for %s" % (product, pv, cve))
- # TODO: this should be in the report as 'whitelisted'
+ if cve in cve_ignore:
+ bb.note("%s-%s has been ignored for %s" % (product, pv, cve))
+ # TODO: this should be in the report as 'ignored'
patched_cves.add(cve)
continue
elif cve in patched_cves:
@@ -226,8 +218,8 @@ def check_cves(d, patched_cves):
else:
if operator_start:
try:
- vulnerable_start = (operator_start == '>=' and LooseVersion(pv) >= LooseVersion(version_start))
- vulnerable_start |= (operator_start == '>' and LooseVersion(pv) > LooseVersion(version_start))
+ vulnerable_start = (operator_start == '>=' and Version(pv,suffix) >= Version(version_start,suffix))
+ vulnerable_start |= (operator_start == '>' and Version(pv,suffix) > Version(version_start,suffix))
except:
bb.warn("%s: Failed to compare %s %s %s for %s" %
(product, pv, operator_start, version_start, cve))
@@ -237,8 +229,8 @@ def check_cves(d, patched_cves):
if operator_end:
try:
- vulnerable_end = (operator_end == '<=' and LooseVersion(pv) <= LooseVersion(version_end))
- vulnerable_end |= (operator_end == '<' and LooseVersion(pv) < LooseVersion(version_end))
+ vulnerable_end = (operator_end == '<=' and Version(pv,suffix) <= Version(version_end,suffix) )
+ vulnerable_end |= (operator_end == '<' and Version(pv,suffix) < Version(version_end,suffix) )
except:
bb.warn("%s: Failed to compare %s %s %s for %s" %
(product, pv, operator_end, version_end, cve))
@@ -252,18 +244,18 @@ def check_cves(d, patched_cves):
vulnerable = vulnerable_start or vulnerable_end
if vulnerable:
- bb.note("%s-%s is vulnerable to %s" % (product, pv, cve))
+ bb.note("%s-%s is vulnerable to %s" % (pn, real_pv, cve))
cves_unpatched.append(cve)
break
if not vulnerable:
- bb.note("%s-%s is not vulnerable to %s" % (product, pv, cve))
+ bb.note("%s-%s is not vulnerable to %s" % (pn, real_pv, cve))
# TODO: not patched but not vulnerable
patched_cves.add(cve)
conn.close()
- return (list(cve_whitelist), list(patched_cves), cves_unpatched)
+ return (list(cve_ignore), list(patched_cves), cves_unpatched)
def get_cve_info(d, cves):
"""
@@ -273,7 +265,8 @@ def get_cve_info(d, cves):
import sqlite3
cve_data = {}
- conn = sqlite3.connect(d.getVar("CVE_CHECK_DB_FILE"))
+ db_file = d.expand("file:${CVE_CHECK_DB_FILE}?mode=ro")
+ conn = sqlite3.connect(db_file, uri=True)
for cve in cves:
for row in conn.execute("SELECT * FROM NVD WHERE ID IS ?", (cve,)):
@@ -287,25 +280,42 @@ def get_cve_info(d, cves):
conn.close()
return cve_data
-def cve_write_data(d, patched, unpatched, whitelisted, cve_data):
+def cve_write_data(d, patched, unpatched, ignored, cve_data):
"""
Write CVE information in WORKDIR; and to CVE_CHECK_DIR, and
CVE manifest if enabled.
"""
+
cve_file = d.getVar("CVE_CHECK_LOG")
- nvd_link = "https://web.nvd.nist.gov/view/vuln/detail?vulnId="
+ fdir_name = d.getVar("FILE_DIRNAME")
+ layer = fdir_name.split("/")[-3]
+
+ include_layers = d.getVar("CVE_CHECK_LAYER_INCLUDELIST").split()
+ exclude_layers = d.getVar("CVE_CHECK_LAYER_EXCLUDELIST").split()
+
+ if exclude_layers and layer in exclude_layers:
+ return
+
+ if include_layers and layer not in include_layers:
+ return
+
+ nvd_link = "https://nvd.nist.gov/vuln/detail/"
write_string = ""
unpatched_cves = []
bb.utils.mkdirhier(os.path.dirname(cve_file))
for cve in sorted(cve_data):
+ is_patched = cve in patched
+ if is_patched and (d.getVar("CVE_CHECK_REPORT_PATCHED") != "1"):
+ continue
+ write_string += "LAYER: %s\n" % layer
write_string += "PACKAGE NAME: %s\n" % d.getVar("PN")
write_string += "PACKAGE VERSION: %s%s\n" % (d.getVar("EXTENDPE"), d.getVar("PV"))
write_string += "CVE: %s\n" % cve
- if cve in whitelisted:
- write_string += "CVE STATUS: Whitelisted\n"
- elif cve in patched:
+ if cve in ignored:
+ write_string += "CVE STATUS: Ignored\n"
+ elif is_patched:
write_string += "CVE STATUS: Patched\n"
else:
unpatched_cves.append(cve)
@@ -319,17 +329,20 @@ def cve_write_data(d, patched, unpatched, whitelisted, cve_data):
if unpatched_cves:
bb.warn("Found unpatched CVE (%s), for more information check %s" % (" ".join(unpatched_cves),cve_file))
- with open(cve_file, "w") as f:
- bb.note("Writing file %s with CVE information" % cve_file)
- f.write(write_string)
-
- if d.getVar("CVE_CHECK_COPY_FILES") == "1":
- cve_dir = d.getVar("CVE_CHECK_DIR")
- bb.utils.mkdirhier(cve_dir)
- deploy_file = os.path.join(cve_dir, d.getVar("PN"))
- with open(deploy_file, "w") as f:
+ if write_string:
+ with open(cve_file, "w") as f:
+ bb.note("Writing file %s with CVE information" % cve_file)
f.write(write_string)
- if d.getVar("CVE_CHECK_CREATE_MANIFEST") == "1":
- with open(d.getVar("CVE_CHECK_TMP_FILE"), "a") as f:
- f.write("%s" % write_string)
+ if d.getVar("CVE_CHECK_COPY_FILES") == "1":
+ deploy_file = d.getVar("CVE_CHECK_RECIPE_FILE")
+ bb.utils.mkdirhier(os.path.dirname(deploy_file))
+ with open(deploy_file, "w") as f:
+ f.write(write_string)
+
+ if d.getVar("CVE_CHECK_CREATE_MANIFEST") == "1":
+ cvelogpath = d.getVar("CVE_CHECK_SUMMARY_DIR")
+ bb.utils.mkdirhier(cvelogpath)
+
+ with open(d.getVar("CVE_CHECK_TMP_FILE"), "a") as f:
+ f.write("%s" % write_string)
diff --git a/meta/classes/debian.bbclass b/meta/classes/debian.bbclass
index 6f8a599ccb..8367be9f37 100644
--- a/meta/classes/debian.bbclass
+++ b/meta/classes/debian.bbclass
@@ -4,7 +4,7 @@
# depends are correct
#
# Custom library package names can be defined setting
-# DEBIANNAME_ + pkgname to the desired name.
+# DEBIANNAME: + pkgname to the desired name.
#
# Better expressed as ensure all RDEPENDS package before we package
# This means we can't have circular RDEPENDS/RRECOMMENDS
@@ -14,6 +14,10 @@ AUTO_LIBNAME_PKGS = "${PACKAGES}"
inherit package
DEBIANRDEP = "do_packagedata"
+do_package_write_ipk[deptask] = "${DEBIANRDEP}"
+do_package_write_deb[deptask] = "${DEBIANRDEP}"
+do_package_write_tar[deptask] = "${DEBIANRDEP}"
+do_package_write_rpm[deptask] = "${DEBIANRDEP}"
do_package_write_ipk[rdeptask] = "${DEBIANRDEP}"
do_package_write_deb[rdeptask] = "${DEBIANRDEP}"
do_package_write_tar[rdeptask] = "${DEBIANRDEP}"
@@ -51,11 +55,11 @@ python debian_package_name_hook () {
return (s[stat.ST_MODE] & stat.S_IEXEC)
def add_rprovides(pkg, d):
- newpkg = d.getVar('PKG_' + pkg)
+ newpkg = d.getVar('PKG:' + pkg)
if newpkg and newpkg != pkg:
- provs = (d.getVar('RPROVIDES_' + pkg) or "").split()
+ provs = (d.getVar('RPROVIDES:' + pkg) or "").split()
if pkg not in provs:
- d.appendVar('RPROVIDES_' + pkg, " " + pkg + " (=" + d.getVar("PKGV") + ")")
+ d.appendVar('RPROVIDES:' + pkg, " " + pkg + " (=" + d.getVar("PKGV") + ")")
def auto_libname(packages, orig_pkg):
p = lambda var: pathlib.PurePath(d.getVar(var))
@@ -110,10 +114,10 @@ python debian_package_name_hook () {
if soname_result:
(pkgname, devname) = soname_result
for pkg in packages.split():
- if (d.getVar('PKG_' + pkg, False) or d.getVar('DEBIAN_NOAUTONAME_' + pkg, False)):
+ if (d.getVar('PKG:' + pkg, False) or d.getVar('DEBIAN_NOAUTONAME:' + pkg, False)):
add_rprovides(pkg, d)
continue
- debian_pn = d.getVar('DEBIANNAME_' + pkg, False)
+ debian_pn = d.getVar('DEBIANNAME:' + pkg, False)
if debian_pn:
newpkg = debian_pn
elif pkg == orig_pkg:
@@ -126,7 +130,7 @@ python debian_package_name_hook () {
newpkg = mlpre + newpkg
if newpkg != pkg:
bb.note("debian: renaming %s to %s" % (pkg, newpkg))
- d.setVar('PKG_' + pkg, newpkg)
+ d.setVar('PKG:' + pkg, newpkg)
add_rprovides(pkg, d)
else:
add_rprovides(orig_pkg, d)
diff --git a/meta/classes/deploy.bbclass b/meta/classes/deploy.bbclass
index 737c26122b..7fbffe996b 100644
--- a/meta/classes/deploy.bbclass
+++ b/meta/classes/deploy.bbclass
@@ -7,6 +7,6 @@ python do_deploy_setscene () {
sstate_setscene(d)
}
addtask do_deploy_setscene
-do_deploy[dirs] = "${DEPLOYDIR} ${B}"
+do_deploy[dirs] = "${B}"
do_deploy[cleandirs] = "${DEPLOYDIR}"
do_deploy[stamp-extra-info] = "${MACHINE_ARCH}"
diff --git a/meta/classes/devicetree.bbclass b/meta/classes/devicetree.bbclass
index c772ab2ab9..2a62ae7bc8 100644
--- a/meta/classes/devicetree.bbclass
+++ b/meta/classes/devicetree.bbclass
@@ -15,10 +15,10 @@
SECTION ?= "bsp"
# The default inclusion of kernel device tree includes and headers means that
-# device trees built with them are at least GPLv2 (and in some cases dual
-# licensed). Default to GPLv2 if the recipe does not specify a license.
-LICENSE ?= "GPLv2"
-LIC_FILES_CHKSUM ?= "file://${COMMON_LICENSE_DIR}/GPL-2.0;md5=801f80980d171dd6425610833a22dbe6"
+# device trees built with them are at least GPL-2.0-only (and in some cases dual
+# licensed). Default to GPL-2.0-only if the recipe does not specify a license.
+LICENSE ?= "GPL-2.0-only"
+LIC_FILES_CHKSUM ?= "file://${COMMON_LICENSE_DIR}/GPL-2.0-only;md5=801f80980d171dd6425610833a22dbe6"
INHIBIT_DEFAULT_DEPS = "1"
DEPENDS += "dtc-native"
@@ -32,7 +32,7 @@ PROVIDES = "virtual/dtb"
PACKAGE_ARCH = "${MACHINE_ARCH}"
SYSROOT_DIRS += "/boot/devicetree"
-FILES_${PN} = "/boot/devicetree/*.dtb /boot/devicetree/*.dtbo"
+FILES:${PN} = "/boot/devicetree/*.dtb /boot/devicetree/*.dtbo"
S = "${WORKDIR}"
B = "${WORKDIR}/build"
diff --git a/meta/classes/devshell.bbclass b/meta/classes/devshell.bbclass
index fdf7dc100f..62dc958d9a 100644
--- a/meta/classes/devshell.bbclass
+++ b/meta/classes/devshell.bbclass
@@ -34,7 +34,7 @@ python () {
d.delVarFlag("do_devshell", "fakeroot")
}
-def devpyshell(d):
+def pydevshell(d):
import code
import select
@@ -128,6 +128,7 @@ def devpyshell(d):
more = i.runsource(source, "<pyshell>")
if not more:
buf = []
+ sys.stderr.flush()
prompt(more)
except KeyboardInterrupt:
i.write("\nKeyboardInterrupt\n")
@@ -139,17 +140,17 @@ def devpyshell(d):
os.kill(child, signal.SIGTERM)
break
-python do_devpyshell() {
+python do_pydevshell() {
import signal
try:
- devpyshell(d)
+ pydevshell(d)
except SystemExit:
# Stop the SIGTERM above causing an error exit code
return
finally:
return
}
-addtask devpyshell after do_patch
+addtask pydevshell after do_patch
-do_devpyshell[nostamp] = "1"
+do_pydevshell[nostamp] = "1"
diff --git a/meta/classes/devtool-source.bbclass b/meta/classes/devtool-source.bbclass
index 280d6009f3..41900e651f 100644
--- a/meta/classes/devtool-source.bbclass
+++ b/meta/classes/devtool-source.bbclass
@@ -199,6 +199,7 @@ python devtool_post_patch() {
# Run do_patch function with the override applied
localdata = bb.data.createCopy(d)
localdata.setVar('OVERRIDES', ':'.join(no_overrides))
+ localdata.setVar('FILESOVERRIDES', ':'.join(no_overrides))
bb.build.exec_func('do_patch', localdata)
rm_patches()
# Now we need to reconcile the dev branch with the no-overrides one
@@ -216,7 +217,8 @@ python devtool_post_patch() {
# Reset back to the initial commit on a new branch
bb.process.run('git checkout %s -b devtool-override-%s' % (initial_rev, override), cwd=srcsubdir)
# Run do_patch function with the override applied
- localdata.appendVar('OVERRIDES', ':%s' % override)
+ localdata.setVar('OVERRIDES', ':'.join(no_overrides + [override]))
+ localdata.setVar('FILESOVERRIDES', ':'.join(no_overrides + [override]))
bb.build.exec_func('do_patch', localdata)
rm_patches()
# Now we need to reconcile the new branch with the no-overrides one
diff --git a/meta/classes/devupstream.bbclass b/meta/classes/devupstream.bbclass
index 7780c5482c..ba6dc4136c 100644
--- a/meta/classes/devupstream.bbclass
+++ b/meta/classes/devupstream.bbclass
@@ -4,8 +4,8 @@
#
# Usage:
# BBCLASSEXTEND = "devupstream:target"
-# SRC_URI_class-devupstream = "git://git.example.com/example"
-# SRCREV_class-devupstream = "abcdef"
+# SRC_URI:class-devupstream = "git://git.example.com/example;branch=master"
+# SRCREV:class-devupstream = "abcdef"
#
# If the first entry in SRC_URI is a git: URL then S is rewritten to
# WORKDIR/git.
@@ -16,8 +16,6 @@
# - If the fetcher requires native tools (such as subversion-native) then
# bitbake won't be able to add them automatically.
-CLASSOVERRIDE .= ":class-devupstream"
-
python devupstream_virtclass_handler () {
# Do nothing if this is inherited, as it's for BBCLASSEXTEND
if "devupstream" not in (d.getVar('BBCLASSEXTEND') or ""):
@@ -25,23 +23,32 @@ python devupstream_virtclass_handler () {
return
variant = d.getVar("BBEXTENDVARIANT")
- if variant not in ("target"):
- bb.error("Pass the variant when using devupstream, for example devupstream:target")
+ if variant not in ("target", "native"):
+ bb.error("Unsupported variant %s. Pass the variant when using devupstream, for example devupstream:target" % variant)
return
# Develpment releases are never preferred by default
d.setVar("DEFAULT_PREFERENCE", "-1")
- uri = bb.fetch2.URI(d.getVar("SRC_URI").split()[0])
+ src_uri = d.getVar("SRC_URI:class-devupstream") or d.getVar("SRC_URI")
+ uri = bb.fetch2.URI(src_uri.split()[0])
- if uri.scheme == "git":
+ if uri.scheme == "git" and not d.getVar("S:class-devupstream"):
d.setVar("S", "${WORKDIR}/git")
# Modify the PV if the recipe hasn't already overridden it
pv = d.getVar("PV")
proto_marker = "+" + uri.scheme
- if proto_marker not in pv:
+ if proto_marker not in pv and not d.getVar("PV:class-devupstream"):
d.setVar("PV", pv + proto_marker + "${SRCPV}")
+
+ if variant == "native":
+ pn = d.getVar("PN")
+ d.setVar("PN", "%s-native" % (pn))
+ fn = d.getVar("FILE")
+ bb.parse.BBHandler.inherit("native", fn, 0, d)
+
+ d.appendVar("CLASSOVERRIDE", ":class-devupstream")
}
addhandler devupstream_virtclass_handler
diff --git a/meta/classes/distrooverrides.bbclass b/meta/classes/distrooverrides.bbclass
index 9f4db0d771..bf3a2b2090 100644
--- a/meta/classes/distrooverrides.bbclass
+++ b/meta/classes/distrooverrides.bbclass
@@ -6,7 +6,7 @@
# This makes it simpler to write .bbappends that only change the
# task signatures of the recipe if the change is really enabled,
# for example with:
-# do_install_append_df-my-feature () { ... }
+# do_install:append:df-my-feature () { ... }
# where "my-feature" is a DISTRO_FEATURE.
#
# The class is meant to be used in a layer.conf or distro
@@ -22,8 +22,8 @@ DISTRO_FEATURES_OVERRIDES ?= ""
DISTRO_FEATURES_OVERRIDES[doc] = "A space-separated list of <feature> entries. \
Each entry is added to OVERRIDES as df-<feature> if <feature> is in DISTRO_FEATURES."
-DISTRO_FEATURES_FILTER_NATIVE_append = " ${DISTRO_FEATURES_OVERRIDES}"
-DISTRO_FEATURES_FILTER_NATIVESDK_append = " ${DISTRO_FEATURES_OVERRIDES}"
+DISTRO_FEATURES_FILTER_NATIVE:append = " ${DISTRO_FEATURES_OVERRIDES}"
+DISTRO_FEATURES_FILTER_NATIVESDK:append = " ${DISTRO_FEATURES_OVERRIDES}"
# If DISTRO_FEATURES_OVERRIDES or DISTRO_FEATURES show up in a task
# signature because of this line, then the task dependency on
diff --git a/meta/classes/distutils3-base.bbclass b/meta/classes/distutils3-base.bbclass
deleted file mode 100644
index 7dbf07ac4b..0000000000
--- a/meta/classes/distutils3-base.bbclass
+++ /dev/null
@@ -1,5 +0,0 @@
-DEPENDS += "${@["${PYTHON_PN}-native ${PYTHON_PN}", ""][(d.getVar('PACKAGES') == '')]}"
-RDEPENDS_${PN} += "${@['', '${PYTHON_PN}-core']['${CLASSOVERRIDE}' == 'class-target']}"
-
-inherit distutils-common-base python3native
-
diff --git a/meta/classes/distutils3.bbclass b/meta/classes/distutils3.bbclass
deleted file mode 100644
index 7356b5245a..0000000000
--- a/meta/classes/distutils3.bbclass
+++ /dev/null
@@ -1,65 +0,0 @@
-inherit distutils3-base
-
-B = "${WORKDIR}/build"
-distutils_do_configure[cleandirs] = "${B}"
-
-DISTUTILS_BUILD_ARGS ?= ""
-DISTUTILS_INSTALL_ARGS ?= "--root=${D} \
- --prefix=${prefix} \
- --install-lib=${PYTHON_SITEPACKAGES_DIR} \
- --install-data=${datadir}"
-
-DISTUTILS_PYTHON = "python3"
-DISTUTILS_PYTHON_class-native = "nativepython3"
-
-distutils3_do_configure() {
- :
-}
-
-distutils3_do_compile() {
- cd ${S}
- NO_FETCH_BUILD=1 \
- STAGING_INCDIR=${STAGING_INCDIR} \
- STAGING_LIBDIR=${STAGING_LIBDIR} \
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} ${S}/setup.py \
- build --build-base=${B} ${DISTUTILS_BUILD_ARGS} || \
- bbfatal_log "'${PYTHON_PN} setup.py build ${DISTUTILS_BUILD_ARGS}' execution failed."
-}
-distutils3_do_compile[vardepsexclude] = "MACHINE"
-
-distutils3_do_install() {
- cd ${S}
- install -d ${D}${PYTHON_SITEPACKAGES_DIR}
- STAGING_INCDIR=${STAGING_INCDIR} \
- STAGING_LIBDIR=${STAGING_LIBDIR} \
- PYTHONPATH=${D}${PYTHON_SITEPACKAGES_DIR} \
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} ${S}/setup.py \
- build --build-base=${B} install --skip-build ${DISTUTILS_INSTALL_ARGS} || \
- bbfatal_log "'${PYTHON_PN} setup.py install ${DISTUTILS_INSTALL_ARGS}' execution failed."
-
- # support filenames with *spaces*
- find ${D} -name "*.py" -exec grep -q ${D} {} \; \
- -exec sed -i -e s:${D}::g {} \;
-
- for i in ${D}${bindir}/* ${D}${sbindir}/*; do
- if [ -f "$i" ]; then
- sed -i -e s:${PYTHON}:${USRBINPATH}/env\ ${DISTUTILS_PYTHON}:g $i
- sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
- fi
- done
-
- rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/easy-install.pth
-
- #
- # FIXME: Bandaid against wrong datadir computation
- #
- if [ -e ${D}${datadir}/share ]; then
- mv -f ${D}${datadir}/share/* ${D}${datadir}/
- rmdir ${D}${datadir}/share
- fi
-}
-distutils3_do_install[vardepsexclude] = "MACHINE"
-
-EXPORT_FUNCTIONS do_configure do_compile do_install
-
-export LDSHARED="${CCLD} -shared"
diff --git a/meta/classes/externalsrc.bbclass b/meta/classes/externalsrc.bbclass
index d200129987..abfe24bace 100644
--- a/meta/classes/externalsrc.bbclass
+++ b/meta/classes/externalsrc.bbclass
@@ -13,7 +13,7 @@
# called "myrecipe" you would do:
#
# INHERIT += "externalsrc"
-# EXTERNALSRC_pn-myrecipe = "/path/to/my/source/tree"
+# EXTERNALSRC:pn-myrecipe = "/path/to/my/source/tree"
#
# In order to make this class work for both target and native versions (or with
# multilibs/cross or other BBCLASSEXTEND variants), B is set to point to a separate
@@ -21,7 +21,7 @@
# the default, but the build directory can be set to the source directory if
# circumstances dictate by setting EXTERNALSRC_BUILD to the same value, e.g.:
#
-# EXTERNALSRC_BUILD_pn-myrecipe = "/path/to/my/source/tree"
+# EXTERNALSRC_BUILD:pn-myrecipe = "/path/to/my/source/tree"
#
SRCTREECOVEREDTASKS ?= "do_patch do_unpack do_fetch"
@@ -45,11 +45,11 @@ python () {
if bpn == d.getVar('PN') or not classextend:
if (externalsrc or
('native' in classextend and
- d.getVar('EXTERNALSRC_pn-%s-native' % bpn)) or
+ d.getVar('EXTERNALSRC:pn-%s-native' % bpn)) or
('nativesdk' in classextend and
- d.getVar('EXTERNALSRC_pn-nativesdk-%s' % bpn)) or
+ d.getVar('EXTERNALSRC:pn-nativesdk-%s' % bpn)) or
('cross' in classextend and
- d.getVar('EXTERNALSRC_pn-%s-cross' % bpn))):
+ d.getVar('EXTERNALSRC:pn-%s-cross' % bpn))):
d.setVar('BB_DONT_CACHE', '1')
if externalsrc:
@@ -68,6 +68,7 @@ python () {
url_data = fetch.ud[url]
parm = url_data.parm
if (url_data.type == 'file' or
+ url_data.type == 'npmsw' or
'type' in parm and parm['type'] == 'kmeta'):
local_srcuri.append(url)
@@ -85,7 +86,7 @@ python () {
if task.endswith("_setscene"):
# sstate is never going to work for external source trees, disable it
bb.build.deltask(task, d)
- else:
+ elif os.path.realpath(d.getVar('S')) == os.path.realpath(d.getVar('B')):
# Since configure will likely touch ${S}, ensure only we lock so one task has access at a time
d.appendVarFlag(task, "lockfiles", " ${S}/singletask.lock")
@@ -108,6 +109,15 @@ python () {
if local_srcuri and task in fetch_tasks:
continue
bb.build.deltask(task, d)
+ if task == 'do_unpack':
+ # The reproducible build create_source_date_epoch_stamp function must
+ # be run after the source is available and before the
+ # do_deploy_source_date_epoch task. In the normal case, it's attached
+ # to do_unpack as a postfuncs, but since we removed do_unpack (above)
+ # we need to move the function elsewhere. The easiest thing to do is
+ # move it into the prefuncs of the do_deploy_source_date_epoch task.
+ # This is safe, as externalsrc runs with the source already unpacked.
+ d.prependVarFlag('do_deploy_source_date_epoch', 'prefuncs', 'create_source_date_epoch_stamp ')
d.prependVarFlag('do_compile', 'prefuncs', "externalsrc_compile_prefunc ")
d.prependVarFlag('do_configure', 'prefuncs', "externalsrc_configure_prefunc ")
@@ -190,6 +200,7 @@ def srctree_hash_files(d, srcdir=None):
import shutil
import subprocess
import tempfile
+ import hashlib
s_dir = srcdir or d.getVar('EXTERNALSRC')
git_dir = None
@@ -197,6 +208,10 @@ def srctree_hash_files(d, srcdir=None):
try:
git_dir = os.path.join(s_dir,
subprocess.check_output(['git', '-C', s_dir, 'rev-parse', '--git-dir'], stderr=subprocess.DEVNULL).decode("utf-8").rstrip())
+ top_git_dir = os.path.join(s_dir, subprocess.check_output(['git', '-C', d.getVar("TOPDIR"), 'rev-parse', '--git-dir'],
+ stderr=subprocess.DEVNULL).decode("utf-8").rstrip())
+ if git_dir == top_git_dir:
+ git_dir = None
except subprocess.CalledProcessError:
pass
@@ -210,7 +225,17 @@ def srctree_hash_files(d, srcdir=None):
env = os.environ.copy()
env['GIT_INDEX_FILE'] = tmp_index.name
subprocess.check_output(['git', 'add', '-A', '.'], cwd=s_dir, env=env)
- sha1 = subprocess.check_output(['git', 'write-tree'], cwd=s_dir, env=env).decode("utf-8")
+ git_sha1 = subprocess.check_output(['git', 'write-tree'], cwd=s_dir, env=env).decode("utf-8")
+ submodule_helper = subprocess.check_output(['git', 'submodule--helper', 'list'], cwd=s_dir, env=env).decode("utf-8")
+ for line in submodule_helper.splitlines():
+ module_dir = os.path.join(s_dir, line.rsplit(maxsplit=1)[1])
+ if os.path.isdir(module_dir):
+ proc = subprocess.Popen(['git', 'add', '-A', '.'], cwd=module_dir, env=env, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
+ proc.communicate()
+ proc = subprocess.Popen(['git', 'write-tree'], cwd=module_dir, env=env, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
+ stdout, _ = proc.communicate()
+ git_sha1 += stdout.decode("utf-8")
+ sha1 = hashlib.sha1(git_sha1.encode("utf-8")).hexdigest()
with open(oe_hash_file, 'w') as fobj:
fobj.write(sha1)
ret = oe_hash_file + ':True'
diff --git a/meta/classes/extrausers.bbclass b/meta/classes/extrausers.bbclass
index 32569e97db..a8ef660b30 100644
--- a/meta/classes/extrausers.bbclass
+++ b/meta/classes/extrausers.bbclass
@@ -14,10 +14,10 @@
inherit useradd_base
-PACKAGE_INSTALL_append = " ${@['', 'base-passwd shadow'][bool(d.getVar('EXTRA_USERS_PARAMS'))]}"
+PACKAGE_INSTALL:append = " ${@['', 'base-passwd shadow'][bool(d.getVar('EXTRA_USERS_PARAMS'))]}"
# Image level user / group settings
-ROOTFS_POSTPROCESS_COMMAND_append = " set_user_group;"
+ROOTFS_POSTPROCESS_COMMAND:append = " set_user_group;"
# Image level user / group settings
set_user_group () {
@@ -46,6 +46,9 @@ set_user_group () {
usermod)
perform_usermod "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts"
;;
+ passwd-expire)
+ perform_passwd_expire "${IMAGE_ROOTFS}" "$opts"
+ ;;
groupmod)
perform_groupmod "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts"
;;
diff --git a/meta/classes/features_check.bbclass b/meta/classes/features_check.bbclass
index b3c8047861..3ef6b35baa 100644
--- a/meta/classes/features_check.bbclass
+++ b/meta/classes/features_check.bbclass
@@ -1,6 +1,6 @@
# Allow checking of required and conflicting features
#
-# xxx = [DISTRO,MACHINE,COMBINED]
+# xxx = [DISTRO,MACHINE,COMBINED,IMAGE]
#
# ANY_OF_xxx_FEATURES: ensure at least one item on this list is included
# in xxx_FEATURES.
@@ -18,13 +18,10 @@ python () {
unused = True
- for kind in ['DISTRO', 'MACHINE', 'COMBINED']:
- if d.getVar('ANY_OF_' + kind + '_FEATURES') is None and \
- d.overridedata.get('ANY_OF_' + kind + '_FEATURES') is None and \
- d.getVar('REQUIRED_' + kind + '_FEATURES') is None and \
- d.overridedata.get('REQUIRED_' + kind + '_FEATURES') is None and \
- d.getVar('CONFLICT_' + kind + '_FEATURES') is None and \
- d.overridedata.get('CONFLICT_' + kind + '_FEATURES') is None:
+ for kind in ['DISTRO', 'MACHINE', 'COMBINED', 'IMAGE']:
+ if d.getVar('ANY_OF_' + kind + '_FEATURES') is None and not d.hasOverrides('ANY_OF_' + kind + '_FEATURES') and \
+ d.getVar('REQUIRED_' + kind + '_FEATURES') is None and not d.hasOverrides('REQUIRED_' + kind + '_FEATURES') and \
+ d.getVar('CONFLICT_' + kind + '_FEATURES') is None and not d.hasOverrides('CONFLICT_' + kind + '_FEATURES'):
continue
unused = False
diff --git a/meta/classes/fontcache.bbclass b/meta/classes/fontcache.bbclass
index 624a420a0d..442bfc7392 100644
--- a/meta/classes/fontcache.bbclass
+++ b/meta/classes/fontcache.bbclass
@@ -35,23 +35,23 @@ python () {
deps = d.getVar("FONT_EXTRA_RDEPENDS")
for pkg in font_pkgs:
- if deps: d.appendVar('RDEPENDS_' + pkg, ' '+deps)
+ if deps: d.appendVar('RDEPENDS:' + pkg, ' '+deps)
}
python add_fontcache_postinsts() {
for pkg in d.getVar('FONT_PACKAGES').split():
bb.note("adding fonts postinst and postrm scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg) or d.getVar('pkg_postinst')
+ postinst = d.getVar('pkg_postinst:%s' % pkg) or d.getVar('pkg_postinst')
if not postinst:
postinst = '#!/bin/sh\n'
postinst += d.getVar('fontcache_common')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
- postrm = d.getVar('pkg_postrm_%s' % pkg) or d.getVar('pkg_postrm')
+ postrm = d.getVar('pkg_postrm:%s' % pkg) or d.getVar('pkg_postrm')
if not postrm:
postrm = '#!/bin/sh\n'
postrm += d.getVar('fontcache_common')
- d.setVar('pkg_postrm_%s' % pkg, postrm)
+ d.setVar('pkg_postrm:%s' % pkg, postrm)
}
PACKAGEFUNCS =+ "add_fontcache_postinsts"
diff --git a/meta/classes/gconf.bbclass b/meta/classes/gconf.bbclass
index 3e3c509d5f..9d3668edd3 100644
--- a/meta/classes/gconf.bbclass
+++ b/meta/classes/gconf.bbclass
@@ -41,7 +41,7 @@ for SCHEMA in ${SCHEMA_FILES}; do
done
}
-python populate_packages_append () {
+python populate_packages:append () {
import re
packages = d.getVar('PACKAGES').split()
pkgdest = d.getVar('PKGDEST')
@@ -57,15 +57,15 @@ python populate_packages_append () {
if schemas != []:
bb.note("adding gconf postinst and prerm scripts to %s" % pkg)
d.setVar('SCHEMA_FILES', " ".join(schemas))
- postinst = d.getVar('pkg_postinst_%s' % pkg)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
postinst += d.getVar('gconf_postinst')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
- prerm = d.getVar('pkg_prerm_%s' % pkg)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
+ prerm = d.getVar('pkg_prerm:%s' % pkg)
if not prerm:
prerm = '#!/bin/sh\n'
prerm += d.getVar('gconf_prerm')
- d.setVar('pkg_prerm_%s' % pkg, prerm)
- d.appendVar("RDEPENDS_%s" % pkg, ' ' + d.getVar('MLPREFIX', False) + 'gconf')
+ d.setVar('pkg_prerm:%s' % pkg, prerm)
+ d.appendVar("RDEPENDS:%s" % pkg, ' ' + d.getVar('MLPREFIX', False) + 'gconf')
}
diff --git a/meta/classes/gettext.bbclass b/meta/classes/gettext.bbclass
index be2ef3b311..f11cb04456 100644
--- a/meta/classes/gettext.bbclass
+++ b/meta/classes/gettext.bbclass
@@ -13,10 +13,10 @@ def gettext_oeconf(d):
return '--disable-nls'
return "--enable-nls"
-BASEDEPENDS_append = " ${@gettext_dependencies(d)}"
-EXTRA_OECONF_append = " ${@gettext_oeconf(d)}"
+BASEDEPENDS:append = " ${@gettext_dependencies(d)}"
+EXTRA_OECONF:append = " ${@gettext_oeconf(d)}"
# Without this, msgfmt from gettext-native will not find ITS files
# provided by target recipes (for example, polkit.its).
-GETTEXTDATADIRS_append_class-target = ":${STAGING_DATADIR}/gettext"
+GETTEXTDATADIRS:append:class-target = ":${STAGING_DATADIR}/gettext"
export GETTEXTDATADIRS
diff --git a/meta/classes/gi-docgen.bbclass b/meta/classes/gi-docgen.bbclass
new file mode 100644
index 0000000000..15581ca127
--- /dev/null
+++ b/meta/classes/gi-docgen.bbclass
@@ -0,0 +1,24 @@
+# gi-docgen is a new gnome documentation generator, which
+# seems to be a successor to gtk-doc:
+# https://gitlab.gnome.org/GNOME/gi-docgen
+
+# This variable is set to True if api-documentation is in
+# DISTRO_FEATURES, and False otherwise.
+GIDOCGEN_ENABLED ?= "${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'True', 'False', d)}"
+# When building native recipes, disable gi-docgen, as it is not necessary,
+# pulls in additional dependencies, and makes build times longer
+GIDOCGEN_ENABLED:class-native = "False"
+GIDOCGEN_ENABLED:class-nativesdk = "False"
+
+# meson: default option name to enable/disable gi-docgen. This matches most
+# projects' configuration. In doubts - check meson_options.txt in project's
+# source path.
+GIDOCGEN_MESON_OPTION ?= 'gtk_doc'
+GIDOCGEN_MESON_ENABLE_FLAG ?= 'true'
+GIDOCGEN_MESON_DISABLE_FLAG ?= 'false'
+
+# Auto enable/disable based on GIDOCGEN_ENABLED
+EXTRA_OEMESON:prepend = "-D${GIDOCGEN_MESON_OPTION}=${@bb.utils.contains('GIDOCGEN_ENABLED', 'True', '${GIDOCGEN_MESON_ENABLE_FLAG}', '${GIDOCGEN_MESON_DISABLE_FLAG}', d)} "
+
+DEPENDS:append = "${@' gi-docgen-native gi-docgen' if d.getVar('GIDOCGEN_ENABLED') == 'True' else ''}"
+
diff --git a/meta/classes/gio-module-cache.bbclass b/meta/classes/gio-module-cache.bbclass
index e429bd3197..021eeb1cf8 100644
--- a/meta/classes/gio-module-cache.bbclass
+++ b/meta/classes/gio-module-cache.bbclass
@@ -17,22 +17,22 @@ else
fi
}
-python populate_packages_append () {
+python populate_packages:append () {
packages = d.getVar('GIO_MODULE_PACKAGES').split()
for pkg in packages:
bb.note("adding gio-module-cache postinst and postrm scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
postinst += d.getVar('gio_module_cache_common')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
- postrm = d.getVar('pkg_postrm_%s' % pkg)
+ postrm = d.getVar('pkg_postrm:%s' % pkg)
if not postrm:
postrm = '#!/bin/sh\n'
postrm += d.getVar('gio_module_cache_common')
- d.setVar('pkg_postrm_%s' % pkg, postrm)
+ d.setVar('pkg_postrm:%s' % pkg, postrm)
}
diff --git a/meta/classes/glide.bbclass b/meta/classes/glide.bbclass
index db421745bd..2db4ac6846 100644
--- a/meta/classes/glide.bbclass
+++ b/meta/classes/glide.bbclass
@@ -2,8 +2,8 @@
#
# Copyright 2018 (C) O.S. Systems Software LTDA.
-DEPENDS_append = " glide-native"
+DEPENDS:append = " glide-native"
-do_compile_prepend() {
+do_compile:prepend() {
( cd ${B}/src/${GO_IMPORT} && glide install )
}
diff --git a/meta/classes/gnomebase.bbclass b/meta/classes/gnomebase.bbclass
index efcb6caae1..9a5bd9a232 100644
--- a/meta/classes/gnomebase.bbclass
+++ b/meta/classes/gnomebase.bbclass
@@ -1,12 +1,13 @@
def gnome_verdir(v):
- return oe.utils.trim_version(v, 2)
+ return ".".join(v.split(".")[:-1])
+
GNOME_COMPRESS_TYPE ?= "xz"
SECTION ?= "x11/gnome"
GNOMEBN ?= "${BPN}"
SRC_URI = "${GNOME_MIRROR}/${GNOMEBN}/${@gnome_verdir("${PV}")}/${GNOMEBN}-${PV}.tar.${GNOME_COMPRESS_TYPE};name=archive"
-FILES_${PN} += "${datadir}/application-registry \
+FILES:${PN} += "${datadir}/application-registry \
${datadir}/mime-info \
${datadir}/mime/packages \
${datadir}/mime/application \
@@ -18,12 +19,12 @@ FILES_${PN} += "${datadir}/application-registry \
${datadir}/icons \
"
-FILES_${PN}-doc += "${datadir}/devhelp"
+FILES:${PN}-doc += "${datadir}/devhelp"
GNOMEBASEBUILDCLASS ??= "autotools"
inherit ${GNOMEBASEBUILDCLASS} pkgconfig
-do_install_append() {
+do_install:append() {
rm -rf ${D}${localstatedir}/lib/scrollkeeper/*
rm -rf ${D}${localstatedir}/scrollkeeper/*
rm -f ${D}${datadir}/applications/*.cache
diff --git a/meta/classes/go-mod.bbclass b/meta/classes/go-mod.bbclass
index 5871d02506..674d2434e0 100644
--- a/meta/classes/go-mod.bbclass
+++ b/meta/classes/go-mod.bbclass
@@ -12,7 +12,7 @@
# The '-modcacherw' option ensures we have write access to the cached objects so
# we avoid errors during clean task as well as when removing the TMPDIR.
-export GOBUILDFLAGS ?= "-v ${GO_LDFLAGS} -modcacherw"
+GOBUILDFLAGS:append = " -modcacherw"
inherit go
diff --git a/meta/classes/go-ptest.bbclass b/meta/classes/go-ptest.bbclass
index e230a80587..b282ff7374 100644
--- a/meta/classes/go-ptest.bbclass
+++ b/meta/classes/go-ptest.bbclass
@@ -50,5 +50,5 @@ do_install_ptest_base() {
chown -R root:root ${D}${PTEST_PATH}
}
-INSANE_SKIP_${PN}-ptest += "ldflags"
+INSANE_SKIP:${PN}-ptest += "ldflags"
diff --git a/meta/classes/go.bbclass b/meta/classes/go.bbclass
index a9e31b50ea..9c4c92bffd 100644
--- a/meta/classes/go.bbclass
+++ b/meta/classes/go.bbclass
@@ -2,8 +2,10 @@ inherit goarch
GO_PARALLEL_BUILD ?= "${@oe.utils.parallel_make_argument(d, '-p %d')}"
-GOROOT_class-native = "${STAGING_LIBDIR_NATIVE}/go"
-GOROOT_class-nativesdk = "${STAGING_DIR_TARGET}${libdir}/go"
+export GODEBUG = "gocachehash=1"
+
+GOROOT:class-native = "${STAGING_LIBDIR_NATIVE}/go"
+GOROOT:class-nativesdk = "${STAGING_DIR_TARGET}${libdir}/go"
GOROOT = "${STAGING_LIBDIR}/go"
export GOROOT
export GOROOT_FINAL = "${libdir}/go"
@@ -15,45 +17,46 @@ export GOHOSTARCH="${BUILD_GOARCH}"
export GOHOSTOS="${BUILD_GOOS}"
GOARM[export] = "0"
-GOARM_arm_class-target = "${TARGET_GOARM}"
-GOARM_arm_class-target[export] = "1"
+GOARM:arm:class-target = "${TARGET_GOARM}"
+GOARM:arm:class-target[export] = "1"
GO386[export] = "0"
-GO386_x86_class-target = "${TARGET_GO386}"
-GO386_x86_class-target[export] = "1"
+GO386:x86:class-target = "${TARGET_GO386}"
+GO386:x86:class-target[export] = "1"
GOMIPS[export] = "0"
-GOMIPS_mips_class-target = "${TARGET_GOMIPS}"
-GOMIPS_mips_class-target[export] = "1"
+GOMIPS:mips:class-target = "${TARGET_GOMIPS}"
+GOMIPS:mips:class-target[export] = "1"
-DEPENDS_GOLANG_class-target = "virtual/${TUNE_PKGARCH}-go virtual/${TARGET_PREFIX}go-runtime"
-DEPENDS_GOLANG_class-native = "go-native"
-DEPENDS_GOLANG_class-nativesdk = "virtual/${TARGET_PREFIX}go-crosssdk virtual/${TARGET_PREFIX}go-runtime"
+DEPENDS_GOLANG:class-target = "virtual/${TUNE_PKGARCH}-go virtual/${TARGET_PREFIX}go-runtime"
+DEPENDS_GOLANG:class-native = "go-native"
+DEPENDS_GOLANG:class-nativesdk = "virtual/${TARGET_PREFIX}go-crosssdk virtual/${TARGET_PREFIX}go-runtime"
-DEPENDS_append = " ${DEPENDS_GOLANG}"
+DEPENDS:append = " ${DEPENDS_GOLANG}"
GO_LINKSHARED ?= "${@'-linkshared' if d.getVar('GO_DYNLINK') else ''}"
GO_RPATH_LINK = "${@'-Wl,-rpath-link=${STAGING_DIR_TARGET}${libdir}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
GO_RPATH = "${@'-r ${libdir}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
-GO_RPATH_class-native = "${@'-r ${STAGING_LIBDIR_NATIVE}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
-GO_RPATH_LINK_class-native = "${@'-Wl,-rpath-link=${STAGING_LIBDIR_NATIVE}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
+GO_RPATH:class-native = "${@'-r ${STAGING_LIBDIR_NATIVE}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
+GO_RPATH_LINK:class-native = "${@'-Wl,-rpath-link=${STAGING_LIBDIR_NATIVE}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
GO_EXTLDFLAGS ?= "${HOST_CC_ARCH}${TOOLCHAIN_OPTIONS} ${GO_RPATH_LINK} ${LDFLAGS}"
GO_LINKMODE ?= ""
-GO_LINKMODE_class-nativesdk = "--linkmode=external"
-GO_LDFLAGS ?= '-ldflags="${GO_RPATH} ${GO_LINKMODE} -extldflags '${GO_EXTLDFLAGS}'"'
+GO_LINKMODE:class-nativesdk = "--linkmode=external"
+GO_LINKMODE:class-native = "--linkmode=external"
+GO_EXTRA_LDFLAGS ?= ""
+GO_LDFLAGS ?= '-ldflags="${GO_RPATH} ${GO_LINKMODE} ${GO_EXTRA_LDFLAGS} -extldflags '${GO_EXTLDFLAGS}'"'
export GOBUILDFLAGS ?= "-v ${GO_LDFLAGS} -trimpath"
export GOPATH_OMIT_IN_ACTIONID ?= "1"
export GOPTESTBUILDFLAGS ?= "${GOBUILDFLAGS} -c"
export GOPTESTFLAGS ?= ""
-GOBUILDFLAGS_prepend_task-compile = "${GO_PARALLEL_BUILD} "
+GOBUILDFLAGS:prepend:task-compile = "${GO_PARALLEL_BUILD} "
export GO = "${HOST_PREFIX}go"
GOTOOLDIR = "${STAGING_LIBDIR_NATIVE}/${TARGET_SYS}/go/pkg/tool/${BUILD_GOTUPLE}"
-GOTOOLDIR_class-native = "${STAGING_LIBDIR_NATIVE}/go/pkg/tool/${BUILD_GOTUPLE}"
+GOTOOLDIR:class-native = "${STAGING_LIBDIR_NATIVE}/go/pkg/tool/${BUILD_GOTUPLE}"
export GOTOOLDIR
export CGO_ENABLED ?= "1"
-export CGO_ENABLED_riscv64 = "0"
export CGO_CFLAGS ?= "${CFLAGS}"
export CGO_CPPFLAGS ?= "${CPPFLAGS}"
export CGO_CXXFLAGS ?= "${CXXFLAGS}"
@@ -64,7 +67,7 @@ GO_INSTALL_FILTEROUT ?= "${GO_IMPORT}/vendor/"
B = "${WORKDIR}/build"
export GOPATH = "${B}"
-export GOTMPDIR ?= "${WORKDIR}/go-tmp"
+export GOTMPDIR ?= "${WORKDIR}/build-tmp"
GOTMPDIR[vardepvalue] = ""
python go_do_unpack() {
@@ -115,7 +118,8 @@ go_do_install() {
install -d ${D}${libdir}/go/src/${GO_IMPORT}
tar -C ${S}/src/${GO_IMPORT} -cf - --exclude-vcs --exclude '*.test' --exclude 'testdata' . | \
tar -C ${D}${libdir}/go/src/${GO_IMPORT} --no-same-owner -xf -
- tar -C ${B} -cf - --exclude-vcs pkg | tar -C ${D}${libdir}/go --no-same-owner -xf -
+ tar -C ${B} -cf - --exclude-vcs --exclude '*.test' --exclude 'testdata' pkg | \
+ tar -C ${D}${libdir}/go --no-same-owner -xf -
if [ -n "`ls ${B}/${GO_BUILD_BINDIR}/`" ]; then
install -d ${D}${bindir}
@@ -139,17 +143,17 @@ go_stage_testdata() {
EXPORT_FUNCTIONS do_unpack do_configure do_compile do_install
-FILES_${PN}-dev = "${libdir}/go/src"
-FILES_${PN}-staticdev = "${libdir}/go/pkg"
+FILES:${PN}-dev = "${libdir}/go/src"
+FILES:${PN}-staticdev = "${libdir}/go/pkg"
-INSANE_SKIP_${PN} += "ldflags"
+INSANE_SKIP:${PN} += "ldflags"
# Add -buildmode=pie to GOBUILDFLAGS to satisfy "textrel" QA checking, but mips
-# doesn't support -buildmode=pie, so skip the QA checking for mips and its
+# doesn't support -buildmode=pie, so skip the QA checking for mips/rv32 and its
# variants.
python() {
- if 'mips' in d.getVar('TARGET_ARCH') or 'riscv' in d.getVar('TARGET_ARCH'):
- d.appendVar('INSANE_SKIP_%s' % d.getVar('PN'), " textrel")
+ if 'mips' in d.getVar('TARGET_ARCH') or 'riscv32' in d.getVar('TARGET_ARCH'):
+ d.appendVar('INSANE_SKIP:%s' % d.getVar('PN'), " textrel")
else:
d.appendVar('GOBUILDFLAGS', ' -buildmode=pie')
}
diff --git a/meta/classes/goarch.bbclass b/meta/classes/goarch.bbclass
index 1099b95769..92fec16b82 100644
--- a/meta/classes/goarch.bbclass
+++ b/meta/classes/goarch.bbclass
@@ -6,55 +6,54 @@ HOST_GOARCH = "${@go_map_arch(d.getVar('HOST_ARCH'), d)}"
HOST_GOARM = "${@go_map_arm(d.getVar('HOST_ARCH'), d)}"
HOST_GO386 = "${@go_map_386(d.getVar('HOST_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
HOST_GOMIPS = "${@go_map_mips(d.getVar('HOST_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
-HOST_GOARM_class-native = "7"
-HOST_GO386_class-native = "sse2"
-HOST_GOMIPS_class-native = "hardfloat"
+HOST_GOARM:class-native = "7"
+HOST_GO386:class-native = "sse2"
+HOST_GOMIPS:class-native = "hardfloat"
HOST_GOTUPLE = "${HOST_GOOS}_${HOST_GOARCH}"
TARGET_GOOS = "${@go_map_os(d.getVar('TARGET_OS'), d)}"
TARGET_GOARCH = "${@go_map_arch(d.getVar('TARGET_ARCH'), d)}"
TARGET_GOARM = "${@go_map_arm(d.getVar('TARGET_ARCH'), d)}"
TARGET_GO386 = "${@go_map_386(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
TARGET_GOMIPS = "${@go_map_mips(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
-TARGET_GOARM_class-native = "7"
-TARGET_GO386_class-native = "sse2"
-TARGET_GOMIPS_class-native = "hardfloat"
+TARGET_GOARM:class-native = "7"
+TARGET_GO386:class-native = "sse2"
+TARGET_GOMIPS:class-native = "hardfloat"
TARGET_GOTUPLE = "${TARGET_GOOS}_${TARGET_GOARCH}"
GO_BUILD_BINDIR = "${@['bin/${HOST_GOTUPLE}','bin'][d.getVar('BUILD_GOTUPLE') == d.getVar('HOST_GOTUPLE')]}"
# Use the MACHINEOVERRIDES to map ARM CPU architecture passed to GO via GOARM.
# This is combined with *_ARCH to set HOST_GOARM and TARGET_GOARM.
BASE_GOARM = ''
-BASE_GOARM_armv7ve = '7'
-BASE_GOARM_armv7a = '7'
-BASE_GOARM_armv6 = '6'
-BASE_GOARM_armv5 = '5'
+BASE_GOARM:armv7ve = '7'
+BASE_GOARM:armv7a = '7'
+BASE_GOARM:armv6 = '6'
+BASE_GOARM:armv5 = '5'
# Go supports dynamic linking on a limited set of architectures.
# See the supportsDynlink function in go/src/cmd/compile/internal/gc/main.go
GO_DYNLINK = ""
-GO_DYNLINK_arm = "1"
-GO_DYNLINK_aarch64 = "1"
-GO_DYNLINK_x86 = "1"
-GO_DYNLINK_x86-64 = "1"
-GO_DYNLINK_powerpc64 = "1"
-GO_DYNLINK_powerpc64le = "1"
-GO_DYNLINK_class-native = ""
-GO_DYNLINK_class-nativesdk = ""
+GO_DYNLINK:arm ?= "1"
+GO_DYNLINK:aarch64 ?= "1"
+GO_DYNLINK:x86 ?= "1"
+GO_DYNLINK:x86-64 ?= "1"
+GO_DYNLINK:powerpc64 ?= "1"
+GO_DYNLINK:powerpc64le ?= "1"
+GO_DYNLINK:class-native ?= ""
+GO_DYNLINK:class-nativesdk = ""
# define here because everybody inherits this class
#
-COMPATIBLE_HOST_linux-gnux32 = "null"
-COMPATIBLE_HOST_linux-muslx32 = "null"
-COMPATIBLE_HOST_powerpc = "null"
-COMPATIBLE_HOST_powerpc64 = "null"
-COMPATIBLE_HOST_powerpc64le = "null"
-COMPATIBLE_HOST_mipsarchn32 = "null"
+COMPATIBLE_HOST:linux-gnux32 = "null"
+COMPATIBLE_HOST:linux-muslx32 = "null"
+COMPATIBLE_HOST:powerpc = "null"
+COMPATIBLE_HOST:powerpc64 = "null"
+COMPATIBLE_HOST:mipsarchn32 = "null"
-ARM_INSTRUCTION_SET_armv4 = "arm"
-ARM_INSTRUCTION_SET_armv5 = "arm"
-ARM_INSTRUCTION_SET_armv6 = "arm"
+ARM_INSTRUCTION_SET:armv4 = "arm"
+ARM_INSTRUCTION_SET:armv5 = "arm"
+ARM_INSTRUCTION_SET:armv6 = "arm"
-TUNE_CCARGS_remove = "-march=mips32r2"
+TUNE_CCARGS:remove = "-march=mips32r2"
SECURITY_NOPIE_CFLAGS ??= ""
# go can't be built with ccache:
@@ -79,10 +78,10 @@ def go_map_arch(a, d):
return 'mips'
elif a == 'mipsel':
return 'mipsle'
+ elif re.match('p(pc|owerpc)(64le)', a):
+ return 'ppc64le'
elif re.match('p(pc|owerpc)(64)', a):
return 'ppc64'
- elif re.match('p(pc|owerpc)(64el)', a):
- return 'ppc64le'
elif a == 'riscv64':
return 'riscv64'
else:
@@ -99,7 +98,7 @@ def go_map_386(a, f, d):
if ('core2' in f) or ('corei7' in f):
return 'sse2'
else:
- return '387'
+ return 'softfloat'
return ''
def go_map_mips(a, f, d):
@@ -115,5 +114,3 @@ def go_map_os(o, d):
if o.startswith('linux'):
return 'linux'
return o
-
-
diff --git a/meta/classes/gobject-introspection.bbclass b/meta/classes/gobject-introspection.bbclass
index 504f75e28d..7bf9feb0d6 100644
--- a/meta/classes/gobject-introspection.bbclass
+++ b/meta/classes/gobject-introspection.bbclass
@@ -14,30 +14,32 @@ GIR_MESON_OPTION ?= 'introspection'
GIR_MESON_ENABLE_FLAG ?= 'true'
GIR_MESON_DISABLE_FLAG ?= 'false'
+# Define g-i options such that they can be disabled completely when GIR_MESON_OPTION is empty
+GIRMESONTARGET = "-D${GIR_MESON_OPTION}=${@bb.utils.contains('GI_DATA_ENABLED', 'True', '${GIR_MESON_ENABLE_FLAG}', '${GIR_MESON_DISABLE_FLAG}', d)} "
+GIRMESONBUILD = "-D${GIR_MESON_OPTION}=${GIR_MESON_DISABLE_FLAG} "
# Auto enable/disable based on GI_DATA_ENABLED
-EXTRA_OECONF_prepend_class-target = "${@bb.utils.contains('GI_DATA_ENABLED', 'True', '--enable-introspection', '--disable-introspection', d)} "
-EXTRA_OEMESON_prepend_class-target = "-D${GIR_MESON_OPTION}=${@bb.utils.contains('GI_DATA_ENABLED', 'True', '${GIR_MESON_ENABLE_FLAG}', '${GIR_MESON_DISABLE_FLAG}', d)} "
-
+EXTRA_OECONF:prepend:class-target = "${@bb.utils.contains('GI_DATA_ENABLED', 'True', '--enable-introspection', '--disable-introspection', d)} "
+EXTRA_OEMESON:prepend:class-target = "${@['', '${GIRMESONTARGET}'][d.getVar('GIR_MESON_OPTION') != '']}"
# When building native recipes, disable introspection, as it is not necessary,
# pulls in additional dependencies, and makes build times longer
-EXTRA_OECONF_prepend_class-native = "--disable-introspection "
-EXTRA_OECONF_prepend_class-nativesdk = "--disable-introspection "
-EXTRA_OEMESON_prepend_class-native = "-D${GIR_MESON_OPTION}=${GIR_MESON_DISABLE_FLAG} "
-EXTRA_OEMESON_prepend_class-nativesdk = "-D${GIR_MESON_OPTION}=${GIR_MESON_DISABLE_FLAG} "
+EXTRA_OECONF:prepend:class-native = "--disable-introspection "
+EXTRA_OECONF:prepend:class-nativesdk = "--disable-introspection "
+EXTRA_OEMESON:prepend:class-native = "${@['', '${GIRMESONBUILD}'][d.getVar('GIR_MESON_OPTION') != '']}"
+EXTRA_OEMESON:prepend:class-nativesdk = "${@['', '${GIRMESONBUILD}'][d.getVar('GIR_MESON_OPTION') != '']}"
# Generating introspection data depends on a combination of native and target
# introspection tools, and qemu to run the target tools.
-DEPENDS_append_class-target = " gobject-introspection gobject-introspection-native qemu-native prelink-native"
+DEPENDS:append:class-target = " gobject-introspection gobject-introspection-native qemu-native"
# Even though introspection is disabled on -native, gobject-introspection package is still
# needed for m4 macros.
-DEPENDS_append_class-native = " gobject-introspection-native"
-DEPENDS_append_class-nativesdk = " gobject-introspection-native"
+DEPENDS:append:class-native = " gobject-introspection-native"
+DEPENDS:append:class-nativesdk = " gobject-introspection-native"
# This is used by introspection tools to find .gir includes
export XDG_DATA_DIRS = "${STAGING_DATADIR}:${STAGING_LIBDIR}"
-do_configure_prepend_class-target () {
+do_configure:prepend:class-target () {
# introspection.m4 pre-packaged with upstream tarballs does not yet
# have our fixes
mkdir -p ${S}/m4
@@ -46,8 +48,8 @@ do_configure_prepend_class-target () {
# .typelib files are needed at runtime and so they go to the main package (so
# they'll be together with libraries they support).
-FILES_${PN}_append = " ${libdir}/girepository-*/*.typelib"
+FILES:${PN}:append = " ${libdir}/girepository-*/*.typelib"
# .gir files go to dev package, as they're needed for developing (but not for
# running) things that depends on introspection.
-FILES_${PN}-dev_append = " ${datadir}/gir-*/*.gir ${libdir}/gir-*/*.gir"
+FILES:${PN}-dev:append = " ${datadir}/gir-*/*.gir ${libdir}/gir-*/*.gir"
diff --git a/meta/classes/godep.bbclass b/meta/classes/godep.bbclass
deleted file mode 100644
index c82401c313..0000000000
--- a/meta/classes/godep.bbclass
+++ /dev/null
@@ -1,8 +0,0 @@
-DEPENDS_append = " go-dep-native"
-
-do_compile_prepend() {
- rm -f ${WORKDIR}/build/src/${GO_IMPORT}/Gopkg.toml
- rm -f ${WORKDIR}/build/src/${GO_IMPORT}/Gopkg.lock
- ( cd ${WORKDIR}/build/src/${GO_IMPORT} && dep init && dep ensure )
-}
-
diff --git a/meta/classes/grub-efi-cfg.bbclass b/meta/classes/grub-efi-cfg.bbclass
index 3a2cdd698b..ea21b3de3d 100644
--- a/meta/classes/grub-efi-cfg.bbclass
+++ b/meta/classes/grub-efi-cfg.bbclass
@@ -120,3 +120,4 @@ python build_efi_cfg() {
cfgfile.close()
}
+build_efi_cfg[vardepsexclude] += "OVERRIDES"
diff --git a/meta/classes/gsettings.bbclass b/meta/classes/gsettings.bbclass
index 33afc96a9c..3fa5bd40b3 100644
--- a/meta/classes/gsettings.bbclass
+++ b/meta/classes/gsettings.bbclass
@@ -13,30 +13,30 @@ python __anonymous() {
pkg = d.getVar("GSETTINGS_PACKAGE")
if pkg:
d.appendVar("PACKAGE_WRITE_DEPS", " glib-2.0-native")
- d.appendVar("RDEPENDS_" + pkg, " ${MLPREFIX}glib-2.0-utils")
- d.appendVar("FILES_" + pkg, " ${datadir}/glib-2.0/schemas")
+ d.appendVar("RDEPENDS:" + pkg, " ${MLPREFIX}glib-2.0-utils")
+ d.appendVar("FILES:" + pkg, " ${datadir}/glib-2.0/schemas")
}
gsettings_postinstrm () {
glib-compile-schemas $D${datadir}/glib-2.0/schemas
}
-python populate_packages_append () {
+python populate_packages:append () {
pkg = d.getVar('GSETTINGS_PACKAGE')
if pkg:
bb.note("adding gsettings postinst scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg) or d.getVar('pkg_postinst')
+ postinst = d.getVar('pkg_postinst:%s' % pkg) or d.getVar('pkg_postinst')
if not postinst:
postinst = '#!/bin/sh\n'
postinst += d.getVar('gsettings_postinstrm')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
bb.note("adding gsettings postrm scripts to %s" % pkg)
- postrm = d.getVar('pkg_postrm_%s' % pkg) or d.getVar('pkg_postrm')
+ postrm = d.getVar('pkg_postrm:%s' % pkg) or d.getVar('pkg_postrm')
if not postrm:
postrm = '#!/bin/sh\n'
postrm += d.getVar('gsettings_postinstrm')
- d.setVar('pkg_postrm_%s' % pkg, postrm)
+ d.setVar('pkg_postrm:%s' % pkg, postrm)
}
diff --git a/meta/classes/gtk-doc.bbclass b/meta/classes/gtk-doc.bbclass
index 7dd662bf86..07b46ac829 100644
--- a/meta/classes/gtk-doc.bbclass
+++ b/meta/classes/gtk-doc.bbclass
@@ -7,6 +7,7 @@
#
# It should be used in recipes to determine whether gtk-doc based documentation should be built,
# so that qemu use can be avoided when necessary.
+GTKDOC_ENABLED:class-native = "False"
GTKDOC_ENABLED ?= "${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', \
bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d), 'False', d)}"
@@ -18,20 +19,20 @@ GTKDOC_MESON_ENABLE_FLAG ?= 'true'
GTKDOC_MESON_DISABLE_FLAG ?= 'false'
# Auto enable/disable based on GTKDOC_ENABLED
-EXTRA_OECONF_prepend_class-target = "${@bb.utils.contains('GTKDOC_ENABLED', 'True', '--enable-gtk-doc --enable-gtk-doc-html --disable-gtk-doc-pdf', \
+EXTRA_OECONF:prepend:class-target = "${@bb.utils.contains('GTKDOC_ENABLED', 'True', '--enable-gtk-doc --enable-gtk-doc-html --disable-gtk-doc-pdf', \
'--disable-gtk-doc', d)} "
-EXTRA_OEMESON_prepend_class-target = "-D${GTKDOC_MESON_OPTION}=${@bb.utils.contains('GTKDOC_ENABLED', 'True', '${GTKDOC_MESON_ENABLE_FLAG}', '${GTKDOC_MESON_DISABLE_FLAG}', d)} "
+EXTRA_OEMESON:prepend:class-target = "-D${GTKDOC_MESON_OPTION}=${@bb.utils.contains('GTKDOC_ENABLED', 'True', '${GTKDOC_MESON_ENABLE_FLAG}', '${GTKDOC_MESON_DISABLE_FLAG}', d)} "
# When building native recipes, disable gtkdoc, as it is not necessary,
# pulls in additional dependencies, and makes build times longer
-EXTRA_OECONF_prepend_class-native = "--disable-gtk-doc "
-EXTRA_OECONF_prepend_class-nativesdk = "--disable-gtk-doc "
-EXTRA_OEMESON_prepend_class-native = "-D${GTKDOC_MESON_OPTION}=${GTKDOC_MESON_DISABLE_FLAG} "
-EXTRA_OEMESON_prepend_class-nativesdk = "-D${GTKDOC_MESON_OPTION}=${GTKDOC_MESON_DISABLE_FLAG} "
+EXTRA_OECONF:prepend:class-native = "--disable-gtk-doc "
+EXTRA_OECONF:prepend:class-nativesdk = "--disable-gtk-doc "
+EXTRA_OEMESON:prepend:class-native = "-D${GTKDOC_MESON_OPTION}=${GTKDOC_MESON_DISABLE_FLAG} "
+EXTRA_OEMESON:prepend:class-nativesdk = "-D${GTKDOC_MESON_OPTION}=${GTKDOC_MESON_DISABLE_FLAG} "
# Even though gtkdoc is disabled on -native, gtk-doc package is still
# needed for m4 macros.
-DEPENDS_append = " gtk-doc-native"
+DEPENDS:append = " gtk-doc-native"
# The documentation directory, where the infrastructure will be copied.
# gtkdocize has a default of "." so to handle out-of-tree builds set this to $S.
@@ -40,15 +41,15 @@ GTKDOC_DOCDIR ?= "${S}"
export STAGING_DIR_HOST
inherit python3native pkgconfig qemu
-DEPENDS_append = "${@' qemu-native' if d.getVar('GTKDOC_ENABLED') == 'True' else ''}"
+DEPENDS:append = "${@' qemu-native' if d.getVar('GTKDOC_ENABLED') == 'True' else ''}"
-do_configure_prepend () {
+do_configure:prepend () {
# Need to use ||true as this is only needed if configure.ac both exists
# and uses GTK_DOC_CHECK.
gtkdocize --srcdir ${S} --docdir ${GTKDOC_DOCDIR} || true
}
-do_compile_prepend_class-target () {
+do_compile:prepend:class-target () {
if [ ${GTKDOC_ENABLED} = True ]; then
# Write out a qemu wrapper that will be given to gtkdoc-scangobj so that it
# can run target helper binaries through that.
@@ -62,7 +63,7 @@ export GIO_MODULE_DIR=${STAGING_LIBDIR}/gio/modules-dummy
GIR_EXTRA_LIBS_PATH=\`find ${B} -name *.so -printf "%h\n"|sort|uniq| tr '\n' ':'\`\$GIR_EXTRA_LIBS_PATH
GIR_EXTRA_LIBS_PATH=\`find ${B} -name .libs| tr '\n' ':'\`\$GIR_EXTRA_LIBS_PATH
-# meson sets this wrongly (only to libs in build-dir), qemu-wrapper_cmdline() and GIR_EXTRA_LIBS_PATH take care of it properly
+# meson sets this wrongly (only to libs in build-dir), qemu_wrapper_cmdline() and GIR_EXTRA_LIBS_PATH take care of it properly
unset LD_LIBRARY_PATH
if [ -d ".libs" ]; then
diff --git a/meta/classes/gtk-icon-cache.bbclass b/meta/classes/gtk-icon-cache.bbclass
index dd394af27c..6808339b90 100644
--- a/meta/classes/gtk-icon-cache.bbclass
+++ b/meta/classes/gtk-icon-cache.bbclass
@@ -1,12 +1,22 @@
-FILES_${PN} += "${datadir}/icons/hicolor"
+FILES:${PN} += "${datadir}/icons/hicolor"
-DEPENDS +=" ${@['hicolor-icon-theme', '']['${BPN}' == 'hicolor-icon-theme']} \
- ${@['gdk-pixbuf', '']['${BPN}' == 'gdk-pixbuf']} \
- ${@['gtk+3', '']['${BPN}' == 'gtk+3']} \
- gtk+3-native \
+GTKIC_VERSION ??= '3'
+
+GTKPN = "${@ 'gtk4' if d.getVar('GTKIC_VERSION') == '4' else 'gtk+3' }"
+GTKIC_CMD = "${@ 'gtk-update-icon-cache-3.0.0' if d.getVar('GTKIC_VERSION') == '4' else 'gtk4-update-icon-cache' }"
+
+#gtk+3/gtk4 require GTK3DISTROFEATURES, DEPENDS on it make all the
+#recipes inherit this class require GTK3DISTROFEATURES
+inherit features_check
+ANY_OF_DISTRO_FEATURES = "${GTK3DISTROFEATURES}"
+
+DEPENDS +=" ${@ '' if d.getVar('BPN') == 'hicolor-icon-theme' else 'hicolor-icon-theme' } \
+ ${@ '' if d.getVar('BPN') == 'gdk-pixbuf' else 'gdk-pixbuf' } \
+ ${@ '' if d.getVar('BPN') == d.getVar('GTKPN') else d.getVar('GTKPN') } \
+ ${GTKPN}-native \
"
-PACKAGE_WRITE_DEPS += "gtk+3-native gdk-pixbuf-native"
+PACKAGE_WRITE_DEPS += "${GTKPN}-native gdk-pixbuf-native"
gtk_icon_cache_postinst() {
if [ "x$D" != "x" ]; then
@@ -20,7 +30,7 @@ else
for icondir in /usr/share/icons/* ; do
if [ -d $icondir ] ; then
- gtk-update-icon-cache -fqt $icondir
+ ${GTKIC_CMD} -fqt $icondir
fi
done
fi
@@ -34,13 +44,13 @@ if [ "x$D" != "x" ]; then
else
for icondir in /usr/share/icons/* ; do
if [ -d $icondir ] ; then
- gtk-update-icon-cache -qt $icondir
+ ${GTKIC_CMD} -qt $icondir
fi
done
fi
}
-python populate_packages_append () {
+python populate_packages:append () {
packages = d.getVar('PACKAGES').split()
pkgdest = d.getVar('PKGDEST')
@@ -51,29 +61,29 @@ python populate_packages_append () {
bb.note("adding hicolor-icon-theme dependency to %s" % pkg)
rdepends = ' ' + d.getVar('MLPREFIX', False) + "hicolor-icon-theme"
- d.appendVar('RDEPENDS_%s' % pkg, rdepends)
+ d.appendVar('RDEPENDS:%s' % pkg, rdepends)
- #gtk_icon_cache_postinst depend on gdk-pixbuf and gtk+3
+ #gtk_icon_cache_postinst depend on gdk-pixbuf and gtk+3/gtk4
bb.note("adding gdk-pixbuf dependency to %s" % pkg)
rdepends = ' ' + d.getVar('MLPREFIX', False) + "gdk-pixbuf"
- d.appendVar('RDEPENDS_%s' % pkg, rdepends)
+ d.appendVar('RDEPENDS:%s' % pkg, rdepends)
- bb.note("adding gtk+3 dependency to %s" % pkg)
- rdepends = ' ' + d.getVar('MLPREFIX', False) + "gtk+3"
- d.appendVar('RDEPENDS_%s' % pkg, rdepends)
+ bb.note("adding %s dependency to %s" % (d.getVar('GTKPN'), pkg))
+ rdepends = ' ' + d.getVar('MLPREFIX', False) + d.getVar('GTKPN')
+ d.appendVar('RDEPENDS:%s' % pkg, rdepends)
bb.note("adding gtk-icon-cache postinst and postrm scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
postinst += d.getVar('gtk_icon_cache_postinst')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
- postrm = d.getVar('pkg_postrm_%s' % pkg)
+ postrm = d.getVar('pkg_postrm:%s' % pkg)
if not postrm:
postrm = '#!/bin/sh\n'
postrm += d.getVar('gtk_icon_cache_postrm')
- d.setVar('pkg_postrm_%s' % pkg, postrm)
+ d.setVar('pkg_postrm:%s' % pkg, postrm)
}
diff --git a/meta/classes/gtk-immodules-cache.bbclass b/meta/classes/gtk-immodules-cache.bbclass
index 9bb0af8b26..2107517540 100644
--- a/meta/classes/gtk-immodules-cache.bbclass
+++ b/meta/classes/gtk-immodules-cache.bbclass
@@ -22,6 +22,7 @@ else
gtk-query-immodules-2.0 > ${libdir}/gtk-2.0/2.10.0/immodules.cache
fi
if [ ! -z `which gtk-query-immodules-3.0` ]; then
+ mkdir -p ${libdir}/gtk-3.0/3.0.0
gtk-query-immodules-3.0 > ${libdir}/gtk-3.0/3.0.0/immodules.cache
fi
fi
@@ -46,23 +47,23 @@ else
fi
}
-python populate_packages_append () {
+python populate_packages:append () {
gtkimmodules_pkgs = d.getVar('GTKIMMODULES_PACKAGES').split()
for pkg in gtkimmodules_pkgs:
bb.note("adding gtk-immodule-cache postinst and postrm scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
postinst += d.getVar('gtk_immodule_cache_postinst')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
- postrm = d.getVar('pkg_postrm_%s' % pkg)
+ postrm = d.getVar('pkg_postrm:%s' % pkg)
if not postrm:
postrm = '#!/bin/sh\n'
postrm += d.getVar('gtk_immodule_cache_postrm')
- d.setVar('pkg_postrm_%s' % pkg, postrm)
+ d.setVar('pkg_postrm:%s' % pkg, postrm)
}
python __anonymous() {
diff --git a/meta/classes/icecc.bbclass b/meta/classes/icecc.bbclass
index d095305ed8..9b912a3083 100644
--- a/meta/classes/icecc.bbclass
+++ b/meta/classes/icecc.bbclass
@@ -19,22 +19,21 @@
# or the default one provided by icecc-create-env.bb will be used
# (NOTE that this is a modified version of the script need it and *not the one that comes with icecc*
#
-# User can specify if specific packages or packages belonging to class should not use icecc to distribute
-# compile jobs to remote machines, but handled locally, by defining ICECC_USER_CLASS_BL and ICECC_USER_PACKAGE_BL
-# with the appropriate values in local.conf. In addition the user can force to enable icecc for packages
-# which set an empty PARALLEL_MAKE variable by defining ICECC_USER_PACKAGE_WL.
+# User can specify if specific recipes or recipes belonging to class should not use icecc to distribute
+# compile jobs to remote machines, but handled locally, by defining ICECC_CLASS_DISABLE and ICECC_RECIPE_DISABLE
+# with the appropriate values in local.conf. In addition the user can force to enable icecc for recipes
+# which set an empty PARALLEL_MAKE variable by defining ICECC_RECIPE_ENABLE.
#
#########################################################################################
#Error checking is kept to minimum so double check any parameters you pass to the class
###########################################################################################
-BB_HASHBASE_WHITELIST += "ICECC_PARALLEL_MAKE ICECC_DISABLED ICECC_USER_PACKAGE_BL \
- ICECC_USER_CLASS_BL ICECC_USER_PACKAGE_WL ICECC_PATH ICECC_ENV_EXEC \
+BB_BASEHASH_IGNORE_VARS += "ICECC_PARALLEL_MAKE ICECC_DISABLED ICECC_RECIPE_DISABLE \
+ ICECC_CLASS_DISABLE ICECC_RECIPE_ENABLE ICECC_PATH ICECC_ENV_EXEC \
ICECC_CARET_WORKAROUND ICECC_CFLAGS ICECC_ENV_VERSION \
ICECC_DEBUG ICECC_LOGFILE ICECC_REPEAT_RATE ICECC_PREFERRED_HOST \
ICECC_CLANG_REMOTE_CPP ICECC_IGNORE_UNVERIFIED ICECC_TEST_SOCKET \
- ICECC_ENV_DEBUG ICECC_SYSTEM_PACKAGE_BL ICECC_SYSTEM_CLASS_BL \
- ICECC_REMOTE_CPP \
+ ICECC_ENV_DEBUG ICECC_REMOTE_CPP \
"
ICECC_ENV_EXEC ?= "${STAGING_BINDIR_NATIVE}/icecc-create-env"
@@ -47,7 +46,7 @@ HOSTTOOLS_NONFATAL += "icecc patchelf"
#
# A useful thing to do for testing Icecream changes locally is to add a
# subversion in local.conf:
-# ICECC_ENV_VERSION_append = "-my-ver-1"
+# ICECC_ENV_VERSION:append = "-my-ver-1"
ICECC_ENV_VERSION = "2"
# Default to disabling the caret workaround, If set to "1" in local.conf, icecc
@@ -66,7 +65,7 @@ CXXFLAGS += "${ICECC_CFLAGS}"
# Debug flags when generating environments
ICECC_ENV_DEBUG ??= ""
-# "system" recipe blacklist contains a list of packages that can not distribute
+# Disable recipe list contains a list of recipes that can not distribute
# compile tasks for one reason or the other. When adding new entry, please
# document why (how it failed) so that we can re-evaluate it later e.g. when
# there is new version
@@ -79,25 +78,25 @@ ICECC_ENV_DEBUG ??= ""
# inline assembly
# target-sdk-provides-dummy - ${HOST_PREFIX} is empty which triggers the "NULL
# prefix" error.
-ICECC_SYSTEM_PACKAGE_BL += "\
+ICECC_RECIPE_DISABLE += "\
libgcc-initial \
pixman \
systemtap \
target-sdk-provides-dummy \
"
-# "system" classes that should be blacklisted. When adding new entry, please
+# Classes that should not use icecc. When adding new entry, please
# document why (how it failed) so that we can re-evaluate it later
#
# image - Image aren't compiling, but the testing framework for images captures
# PARALLEL_MAKE as part of the test environment. Many tests won't use
# icecream, but leaving the high level of parallelism can cause them to
# consume an unnecessary amount of resources.
-ICECC_SYSTEM_CLASS_BL += "\
+ICECC_CLASS_DISABLE += "\
image \
"
-def icecc_dep_prepend(d):
+def get_icecc_dep(d):
# INHIBIT_DEFAULT_DEPS doesn't apply to the patch command. Whether or not
# we need that built is the responsibility of the patch function / class, not
# the application.
@@ -105,7 +104,7 @@ def icecc_dep_prepend(d):
return "icecc-create-env-native"
return ""
-DEPENDS_prepend = "${@icecc_dep_prepend(d)} "
+DEPENDS:prepend = "${@get_icecc_dep(d)} "
get_cross_kernel_cc[vardepsexclude] += "KERNEL_CC"
def get_cross_kernel_cc(bb,d):
@@ -138,39 +137,31 @@ def use_icecc(bb,d):
if icecc_is_cross_canadian(bb, d):
return "no"
- if d.getVar('INHIBIT_DEFAULT_DEPS', False):
- # We don't have a compiler, so no icecc
- return "no"
-
pn = d.getVar('PN')
bpn = d.getVar('BPN')
- # Blacklist/whitelist checks are made against BPN, because there is a good
+ # Enable/disable checks are made against BPN, because there is a good
# chance that if icecc should be skipped for a recipe, it should be skipped
# for all the variants of that recipe. PN is still checked in case a user
# specified a more specific recipe.
check_pn = set([pn, bpn])
- system_class_blacklist = (d.getVar('ICECC_SYSTEM_CLASS_BL') or "").split()
- user_class_blacklist = (d.getVar('ICECC_USER_CLASS_BL') or "none").split()
- package_class_blacklist = system_class_blacklist + user_class_blacklist
+ class_disable = (d.getVar('ICECC_CLASS_DISABLE') or "").split()
- for black in package_class_blacklist:
- if bb.data.inherits_class(black, d):
- bb.debug(1, "%s: class %s found in blacklist, disable icecc" % (pn, black))
+ for bbclass in class_disable:
+ if bb.data.inherits_class(bbclass, d):
+ bb.debug(1, "%s: bbclass %s found in disable, disable icecc" % (pn, bbclass))
return "no"
- system_package_blacklist = (d.getVar('ICECC_SYSTEM_PACKAGE_BL') or "").split()
- user_package_blacklist = (d.getVar('ICECC_USER_PACKAGE_BL') or "").split()
- user_package_whitelist = (d.getVar('ICECC_USER_PACKAGE_WL') or "").split()
- package_blacklist = system_package_blacklist + user_package_blacklist
+ disabled_recipes = (d.getVar('ICECC_RECIPE_DISABLE') or "").split()
+ enabled_recipes = (d.getVar('ICECC_RECIPE_ENABLE') or "").split()
- if check_pn & set(package_blacklist):
- bb.debug(1, "%s: found in blacklist, disable icecc" % pn)
+ if check_pn & set(disabled_recipes):
+ bb.debug(1, "%s: found in disable list, disable icecc" % pn)
return "no"
- if check_pn & set(user_package_whitelist):
- bb.debug(1, "%s: found in whitelist, enable icecc" % pn)
+ if check_pn & set(enabled_recipes):
+ bb.debug(1, "%s: found in enabled recipes list, enable icecc" % pn)
return "yes"
if d.getVar('PARALLEL_MAKE') == "":
@@ -313,7 +304,7 @@ wait_for_file() {
local TIMEOUT=$2
until [ -f "$FILE_TO_TEST" ]
do
- TIME_ELAPSED=`expr $TIME_ELAPSED + 1`
+ TIME_ELAPSED=$(expr $TIME_ELAPSED + 1)
if [ $TIME_ELAPSED -gt $TIMEOUT ]
then
return 1
@@ -362,12 +353,12 @@ set_icecc_env() {
ICECC_WHICH_AS="${@bb.utils.which(os.getenv('PATH'), 'as')}"
if [ ! -x "${ICECC_CC}" -o ! -x "${ICECC_CXX}" ]
then
- bbwarn "Cannot use icecc: could not get ICECC_CC or ICECC_CXX"
+ bbnote "Cannot use icecc: could not get ICECC_CC or ICECC_CXX"
return
fi
- ICE_VERSION=`$ICECC_CC -dumpversion`
- ICECC_VERSION=`echo ${ICECC_VERSION} | sed -e "s/@VERSION@/$ICE_VERSION/g"`
+ ICE_VERSION="$($ICECC_CC -dumpversion)"
+ ICECC_VERSION=$(echo ${ICECC_VERSION} | sed -e "s/@VERSION@/$ICE_VERSION/g")
if [ ! -x "${ICECC_ENV_EXEC}" ]
then
bbwarn "Cannot use icecc: invalid ICECC_ENV_EXEC"
@@ -394,18 +385,18 @@ set_icecc_env() {
chmod 775 $ICE_PATH/$compiler
done
- ICECC_AS="`${ICECC_CC} -print-prog-name=as`"
+ ICECC_AS="$(${ICECC_CC} -print-prog-name=as)"
# for target recipes should return something like:
# /OE/tmp-eglibc/sysroots/x86_64-linux/usr/libexec/arm920tt-oe-linux-gnueabi/gcc/arm-oe-linux-gnueabi/4.8.2/as
# and just "as" for native, if it returns "as" in current directory (for whatever reason) use "as" from PATH
- if [ "`dirname "${ICECC_AS}"`" = "." ]
+ if [ "$(dirname "${ICECC_AS}")" = "." ]
then
ICECC_AS="${ICECC_WHICH_AS}"
fi
if [ ! -f "${ICECC_VERSION}.done" ]
then
- mkdir -p "`dirname "${ICECC_VERSION}"`"
+ mkdir -p "$(dirname "${ICECC_VERSION}")"
# the ICECC_VERSION generation step must be locked by a mutex
# in order to prevent race conditions
@@ -432,28 +423,32 @@ set_icecc_env() {
bbnote "Using icecc tarball: $ICECC_VERSION"
}
-do_configure_prepend() {
+do_configure[network] = "1"
+do_configure:prepend() {
set_icecc_env
}
-do_compile_prepend() {
+do_compile[network] = "1"
+do_compile:prepend() {
set_icecc_env
}
-do_compile_kernelmodules_prepend() {
+do_compile_kernelmodules[network] = "1"
+do_compile_kernelmodules:prepend() {
set_icecc_env
}
-do_install_prepend() {
+do_install[network] = "1"
+do_install:prepend() {
set_icecc_env
}
# IceCream is not (currently) supported in the extensible SDK
ICECC_SDK_HOST_TASK = "nativesdk-icecc-toolchain"
-ICECC_SDK_HOST_TASK_task-populate-sdk-ext = ""
+ICECC_SDK_HOST_TASK:task-populate-sdk-ext = ""
# Don't include IceCream in uninative tarball
-ICECC_SDK_HOST_TASK_pn-uninative-tarball = ""
+ICECC_SDK_HOST_TASK:pn-uninative-tarball = ""
# Add the toolchain scripts to the SDK
-TOOLCHAIN_HOST_TASK_append = " ${ICECC_SDK_HOST_TASK}"
+TOOLCHAIN_HOST_TASK:append = " ${ICECC_SDK_HOST_TASK}"
diff --git a/meta/classes/image-artifact-names.bbclass b/meta/classes/image-artifact-names.bbclass
new file mode 100644
index 0000000000..f5769e520f
--- /dev/null
+++ b/meta/classes/image-artifact-names.bbclass
@@ -0,0 +1,22 @@
+##################################################################
+# Specific image creation and rootfs population info.
+##################################################################
+
+IMAGE_BASENAME ?= "${PN}"
+IMAGE_VERSION_SUFFIX ?= "-${DATETIME}"
+IMAGE_VERSION_SUFFIX[vardepsexclude] += "DATETIME SOURCE_DATE_EPOCH"
+IMAGE_NAME ?= "${IMAGE_BASENAME}-${MACHINE}${IMAGE_VERSION_SUFFIX}"
+IMAGE_LINK_NAME ?= "${IMAGE_BASENAME}-${MACHINE}"
+
+# IMAGE_NAME is the base name for everything produced when building images.
+# The actual image that contains the rootfs has an additional suffix (.rootfs
+# by default) followed by additional suffices which describe the format (.ext4,
+# .ext4.xz, etc.).
+IMAGE_NAME_SUFFIX ??= ".rootfs"
+
+python () {
+ if bb.data.inherits_class('deploy', d) and d.getVar("IMAGE_VERSION_SUFFIX") == "-${DATETIME}":
+ import datetime
+ d.setVar("IMAGE_VERSION_SUFFIX", "-" + datetime.datetime.fromtimestamp(int(d.getVar("SOURCE_DATE_EPOCH")), datetime.timezone.utc).strftime('%Y%m%d%H%M%S'))
+ d.setVarFlag("IMAGE_VERSION_SUFFIX", "vardepvalue", "")
+}
diff --git a/meta/classes/image-combined-dbg.bbclass b/meta/classes/image-combined-dbg.bbclass
index f4772f7ea1..e5dc61f857 100644
--- a/meta/classes/image-combined-dbg.bbclass
+++ b/meta/classes/image-combined-dbg.bbclass
@@ -1,4 +1,4 @@
-IMAGE_PREPROCESS_COMMAND_append = " combine_dbg_image; "
+IMAGE_PREPROCESS_COMMAND:append = " combine_dbg_image; "
combine_dbg_image () {
if [ "${IMAGE_GEN_DEBUGFS}" = "1" -a -e ${IMAGE_ROOTFS}-dbg ]; then
diff --git a/meta/classes/image-container.bbclass b/meta/classes/image-container.bbclass
index f002858bd2..3d1993576a 100644
--- a/meta/classes/image-container.bbclass
+++ b/meta/classes/image-container.bbclass
@@ -1,6 +1,6 @@
ROOTFS_BOOTSTRAP_INSTALL = ""
IMAGE_TYPES_MASKED += "container"
-IMAGE_TYPEDEP_container = "tar.bz2"
+IMAGE_TYPEDEP:container = "tar.bz2"
python __anonymous() {
if "container" in d.getVar("IMAGE_FSTYPES") and \
diff --git a/meta/classes/image-live.bbclass b/meta/classes/image-live.bbclass
index 54058b350d..2c948190cf 100644
--- a/meta/classes/image-live.bbclass
+++ b/meta/classes/image-live.bbclass
@@ -22,7 +22,7 @@
# ${HDDIMG_ID} - FAT image volume-id
# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
-inherit live-vm-common
+inherit live-vm-common image-artifact-names
do_bootimg[depends] += "dosfstools-native:do_populate_sysroot \
mtools-native:do_populate_sysroot \
@@ -30,7 +30,7 @@ do_bootimg[depends] += "dosfstools-native:do_populate_sysroot \
virtual/kernel:do_deploy \
${MLPREFIX}syslinux:do_populate_sysroot \
syslinux-native:do_populate_sysroot \
- ${PN}:do_image_${@d.getVar('LIVE_ROOTFS_TYPE').replace('-', '_')} \
+ ${@'%s:do_image_%s' % (d.getVar('PN'), d.getVar('LIVE_ROOTFS_TYPE').replace('-', '_')) if d.getVar('ROOTFS') else ''} \
"
@@ -42,9 +42,9 @@ INITRD_LIVE ?= "${DEPLOY_DIR_IMAGE}/${INITRD_IMAGE_LIVE}-${MACHINE}.${INITRAMFS_
LIVE_ROOTFS_TYPE ?= "ext4"
ROOTFS ?= "${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.${LIVE_ROOTFS_TYPE}"
-IMAGE_TYPEDEP_live = "${LIVE_ROOTFS_TYPE}"
-IMAGE_TYPEDEP_iso = "${LIVE_ROOTFS_TYPE}"
-IMAGE_TYPEDEP_hddimg = "${LIVE_ROOTFS_TYPE}"
+IMAGE_TYPEDEP:live = "${LIVE_ROOTFS_TYPE}"
+IMAGE_TYPEDEP:iso = "${LIVE_ROOTFS_TYPE}"
+IMAGE_TYPEDEP:hddimg = "${LIVE_ROOTFS_TYPE}"
IMAGE_TYPES_MASKED += "live hddimg iso"
python() {
@@ -234,7 +234,7 @@ build_hddimg() {
bberror "${HDDDIR}/rootfs.img rootfs size is greather than or equal to 4GB,"
bberror "and this doesn't work on a FAT filesystem. You can either:"
bberror "1) Reduce the size of rootfs.img, or,"
- bbfatal "2) Use wic, vmdk or vdi instead of hddimg\n"
+ bbfatal "2) Use wic, vmdk,vhd, vhdx or vdi instead of hddimg\n"
fi
fi
@@ -261,4 +261,4 @@ python do_bootimg() {
do_bootimg[subimages] = "hddimg iso"
do_bootimg[imgsuffix] = "."
-addtask bootimg before do_image_complete
+addtask bootimg before do_image_complete after do_rootfs
diff --git a/meta/classes/image-mklibs.bbclass b/meta/classes/image-mklibs.bbclass
deleted file mode 100644
index 68e11d4365..0000000000
--- a/meta/classes/image-mklibs.bbclass
+++ /dev/null
@@ -1,56 +0,0 @@
-do_rootfs[depends] += "mklibs-native:do_populate_sysroot"
-
-IMAGE_PREPROCESS_COMMAND += "mklibs_optimize_image; "
-
-inherit linuxloader
-
-mklibs_optimize_image_doit() {
- rm -rf ${WORKDIR}/mklibs
- mkdir -p ${WORKDIR}/mklibs/dest
- cd ${IMAGE_ROOTFS}
- du -bs > ${WORKDIR}/mklibs/du.before.mklibs.txt
-
- # Build a list of dynamically linked executable ELF files.
- # Omit libc/libpthread as a special case because it has an interpreter
- # but is primarily what we intend to strip down.
- for i in `find . -type f -executable ! -name 'libc-*' ! -name 'libpthread-*'`; do
- file $i | grep -q ELF || continue
- ${HOST_PREFIX}readelf -l $i | grep -q INTERP || continue
- echo $i
- done > ${WORKDIR}/mklibs/executables.list
-
- dynamic_loader=${@get_linuxloader(d)}
-
- mklibs -v \
- --ldlib ${dynamic_loader} \
- --libdir ${baselib} \
- --sysroot ${PKG_CONFIG_SYSROOT_DIR} \
- --gcc-options "--sysroot=${PKG_CONFIG_SYSROOT_DIR}" \
- --root ${IMAGE_ROOTFS} \
- --target `echo ${TARGET_PREFIX} | sed 's/-$//' ` \
- -d ${WORKDIR}/mklibs/dest \
- `cat ${WORKDIR}/mklibs/executables.list`
-
- cd ${WORKDIR}/mklibs/dest
- for i in *
- do
- cp $i `find ${IMAGE_ROOTFS} -name $i`
- done
-
- cd ${IMAGE_ROOTFS}
- du -bs > ${WORKDIR}/mklibs/du.after.mklibs.txt
-
- echo rootfs size before mklibs optimization: `cat ${WORKDIR}/mklibs/du.before.mklibs.txt`
- echo rootfs size after mklibs optimization: `cat ${WORKDIR}/mklibs/du.after.mklibs.txt`
-}
-
-mklibs_optimize_image() {
- for img in ${MKLIBS_OPTIMIZED_IMAGES}
- do
- if [ "${img}" = "${PN}" ] || [ "${img}" = "all" ]
- then
- mklibs_optimize_image_doit
- break
- fi
- done
-}
diff --git a/meta/classes/image-prelink.bbclass b/meta/classes/image-prelink.bbclass
deleted file mode 100644
index ebf6e6d7ee..0000000000
--- a/meta/classes/image-prelink.bbclass
+++ /dev/null
@@ -1,81 +0,0 @@
-do_rootfs[depends] += "prelink-native:do_populate_sysroot"
-
-IMAGE_PREPROCESS_COMMAND_append_libc-glibc = " prelink_setup; prelink_image; "
-
-python prelink_setup () {
- oe.utils.write_ld_so_conf(d)
-}
-
-inherit linuxloader
-
-prelink_image () {
-# export PSEUDO_DEBUG=4
-# /bin/env | /bin/grep PSEUDO
-# echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
-# echo "LD_PRELOAD=$LD_PRELOAD"
-
- pre_prelink_size=`du -ks ${IMAGE_ROOTFS} | awk '{size = $1 ; print size }'`
- echo "Size before prelinking $pre_prelink_size."
-
- # The filesystem may not contain sysconfdir so establish what is present
- # to enable cleanup after temporary creation of sysconfdir if needed
- presentdir="${IMAGE_ROOTFS}${sysconfdir}"
- while [ "${IMAGE_ROOTFS}" != "${presentdir}" ] ; do
- [ ! -d "${presentdir}" ] || break
- presentdir=`dirname "${presentdir}"`
- done
-
- mkdir -p "${IMAGE_ROOTFS}${sysconfdir}"
-
- # We need a prelink conf on the filesystem, add one if it's missing
- if [ ! -e ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf ]; then
- cp ${STAGING_ETCDIR_NATIVE}/prelink.conf \
- ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf
- dummy_prelink_conf=true;
- else
- dummy_prelink_conf=false;
- fi
-
- # We need a ld.so.conf with pathnames in,prelink conf on the filesystem, add one if it's missing
- ldsoconf=${IMAGE_ROOTFS}${sysconfdir}/ld.so.conf
- if [ -e $ldsoconf ]; then
- cp $ldsoconf $ldsoconf.prelink
- fi
- cat ${STAGING_DIR_TARGET}${sysconfdir}/ld.so.conf >> $ldsoconf
-
- dynamic_loader=${@get_linuxloader(d)}
-
- # prelink!
- if [ "${BUILD_REPRODUCIBLE_BINARIES}" = "1" ]; then
- bbnote " prelink: BUILD_REPRODUCIBLE_BINARIES..."
- if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then
- export PRELINK_TIMESTAMP=`git log -1 --pretty=%ct `
- else
- export PRELINK_TIMESTAMP=$REPRODUCIBLE_TIMESTAMP_ROOTFS
- fi
- ${STAGING_SBINDIR_NATIVE}/prelink --root ${IMAGE_ROOTFS} -am -N -c ${sysconfdir}/prelink.conf --dynamic-linker $dynamic_loader
- else
- ${STAGING_SBINDIR_NATIVE}/prelink --root ${IMAGE_ROOTFS} -amR -N -c ${sysconfdir}/prelink.conf --dynamic-linker $dynamic_loader
- fi
-
- # Remove the prelink.conf if we had to add it.
- if [ "$dummy_prelink_conf" = "true" ]; then
- rm -f ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf
- fi
-
- if [ -e $ldsoconf.prelink ]; then
- mv $ldsoconf.prelink $ldsoconf
- else
- rm $ldsoconf
- fi
-
- # Remove any directories temporarily created for sysconfdir
- cleanupdir="${IMAGE_ROOTFS}${sysconfdir}"
- while [ "${presentdir}" != "${cleanupdir}" ] ; do
- rmdir "${cleanupdir}"
- cleanupdir=`dirname ${cleanupdir}`
- done
-
- pre_prelink_size=`du -ks ${IMAGE_ROOTFS} | awk '{size = $1 ; print size }'`
- echo "Size after prelinking $pre_prelink_size."
-}
diff --git a/meta/classes/image.bbclass b/meta/classes/image.bbclass
index 694b58fc9f..7f1f6f80a4 100644
--- a/meta/classes/image.bbclass
+++ b/meta/classes/image.bbclass
@@ -15,6 +15,7 @@ IMGCLASSES += "${@bb.utils.contains('IMAGE_FSTYPES', 'container', 'image-contain
IMGCLASSES += "image_types_wic"
IMGCLASSES += "rootfs-postcommands"
IMGCLASSES += "image-postinst-intercepts"
+IMGCLASSES += "overlayfs-etc"
inherit ${IMGCLASSES}
TOOLCHAIN_TARGET_TASK += "${PACKAGE_INSTALL}"
@@ -26,19 +27,19 @@ PACKAGES = ""
DEPENDS += "${@' '.join(["%s-qemuwrapper-cross" % m for m in d.getVar("MULTILIB_VARIANTS").split()])} qemuwrapper-cross depmodwrapper-cross cross-localedef-native"
RDEPENDS += "${PACKAGE_INSTALL} ${LINGUAS_INSTALL} ${IMAGE_INSTALL_DEBUGFS}"
RRECOMMENDS += "${PACKAGE_INSTALL_ATTEMPTONLY}"
-PATH_prepend = "${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:"
+PATH:prepend = "${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:"
INHIBIT_DEFAULT_DEPS = "1"
# IMAGE_FEATURES may contain any available package group
IMAGE_FEATURES ?= ""
IMAGE_FEATURES[type] = "list"
-IMAGE_FEATURES[validitems] += "debug-tweaks read-only-rootfs stateless-rootfs empty-root-password allow-empty-password allow-root-login post-install-logging"
+IMAGE_FEATURES[validitems] += "debug-tweaks read-only-rootfs read-only-rootfs-delayed-postinsts stateless-rootfs empty-root-password allow-empty-password allow-root-login post-install-logging overlayfs-etc"
# Generate companion debugfs?
IMAGE_GEN_DEBUGFS ?= "0"
-# These pacackages will be installed as additional into debug rootfs
+# These packages will be installed as additional into debug rootfs
IMAGE_INSTALL_DEBUGFS ?= ""
# These packages will be removed from a read-only rootfs after all other
@@ -53,7 +54,7 @@ FEATURE_INSTALL_OPTIONAL[vardepvalue] = "${FEATURE_INSTALL_OPTIONAL}"
# Define some very basic feature package groups
FEATURE_PACKAGES_package-management = "${ROOTFS_PKGMANAGE}"
-SPLASH ?= "psplash"
+SPLASH ?= "${@bb.utils.contains("MACHINE_FEATURES", "screen", "psplash", "", d)}"
FEATURE_PACKAGES_splash = "${SPLASH}"
IMAGE_INSTALL_COMPLEMENTARY = '${@complementary_globs("IMAGE_FEATURES", d)}'
@@ -92,7 +93,7 @@ PID = "${@os.getpid()}"
PACKAGE_ARCH = "${MACHINE_ARCH}"
LDCONFIGDEPEND ?= "ldconfig-native:do_populate_sysroot"
-LDCONFIGDEPEND_libc-musl = ""
+LDCONFIGDEPEND:libc-musl = ""
# This is needed to have depmod data in PKGDATA_DIR,
# but if you're building small initramfs image
@@ -112,7 +113,7 @@ def rootfs_command_variables(d):
'IMAGE_PREPROCESS_COMMAND','RPM_PREPROCESS_COMMANDS','RPM_POSTPROCESS_COMMANDS','DEB_PREPROCESS_COMMANDS','DEB_POSTPROCESS_COMMANDS']
python () {
- variables = rootfs_command_variables(d) + sdk_command_variables(d)
+ variables = rootfs_command_variables(d)
for var in variables:
if d.getVar(var, False):
d.setVarFlag(var, 'func', '1')
@@ -121,7 +122,7 @@ python () {
def rootfs_variables(d):
from oe.rootfs import variable_depends
variables = ['IMAGE_DEVICE_TABLE','IMAGE_DEVICE_TABLES','BUILD_IMAGES_FROM_FEEDS','IMAGE_TYPES_MASKED','IMAGE_ROOTFS_ALIGNMENT','IMAGE_OVERHEAD_FACTOR','IMAGE_ROOTFS_SIZE','IMAGE_ROOTFS_EXTRA_SPACE',
- 'IMAGE_ROOTFS_MAXSIZE','IMAGE_NAME','IMAGE_LINK_NAME','IMAGE_MANIFEST','DEPLOY_DIR_IMAGE','IMAGE_FSTYPES','IMAGE_INSTALL_COMPLEMENTARY','IMAGE_LINGUAS', 'IMAGE_LINGUAS_COMPLEMENTARY',
+ 'IMAGE_ROOTFS_MAXSIZE','IMAGE_NAME','IMAGE_LINK_NAME','IMAGE_MANIFEST','DEPLOY_DIR_IMAGE','IMAGE_FSTYPES','IMAGE_INSTALL_COMPLEMENTARY','IMAGE_LINGUAS', 'IMAGE_LINGUAS_COMPLEMENTARY', 'IMAGE_LOCALES_ARCHIVE',
'MULTILIBRE_ALLOW_REP','MULTILIB_TEMP_ROOTFS','MULTILIB_VARIANTS','MULTILIBS','ALL_MULTILIB_PACKAGE_ARCHS','MULTILIB_GLOBAL_VARIANTS','BAD_RECOMMENDATIONS','NO_RECOMMENDATIONS',
'PACKAGE_ARCHS','PACKAGE_CLASSES','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','OVERRIDES','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI','INTERCEPT_DIR','USE_DEVFS',
'CONVERSIONTYPES', 'IMAGE_GEN_DEBUGFS', 'ROOTFS_RO_UNNEEDED', 'IMGDEPLOYDIR', 'PACKAGE_EXCLUDE_COMPLEMENTARY', 'REPRODUCIBLE_TIMESTAMP_ROOTFS', 'IMAGE_INSTALL_DEBUGFS']
@@ -138,7 +139,10 @@ python () {
def extraimage_getdepends(task):
deps = ""
for dep in (d.getVar('EXTRA_IMAGEDEPENDS') or "").split():
- deps += " %s:%s" % (dep, task)
+ if ":" in dep:
+ deps += " %s " % (dep)
+ else:
+ deps += " %s:%s" % (dep, task)
return deps
d.appendVarFlag('do_image_complete', 'depends', extraimage_getdepends('do_populate_sysroot'))
@@ -173,10 +177,15 @@ IMAGE_LINGUAS ?= "de-de fr-fr en-gb"
LINGUAS_INSTALL ?= "${@" ".join(map(lambda s: "locale-base-%s" % s, d.getVar('IMAGE_LINGUAS').split()))}"
+# per default create a locale archive
+IMAGE_LOCALES_ARCHIVE ?= '1'
+
# Prefer image, but use the fallback files for lookups if the image ones
# aren't yet available.
PSEUDO_PASSWD = "${IMAGE_ROOTFS}:${STAGING_DIR_NATIVE}"
+PSEUDO_IGNORE_PATHS .= ",${WORKDIR}/intercept_scripts,${WORKDIR}/oe-rootfs-repo,${WORKDIR}/sstate-build-image_complete"
+
PACKAGE_EXCLUDE ??= ""
PACKAGE_EXCLUDE[type] = "list"
@@ -244,8 +253,7 @@ fakeroot python do_rootfs () {
progress_reporter.finish()
}
do_rootfs[dirs] = "${TOPDIR}"
-do_rootfs[cleandirs] += "${S} ${IMGDEPLOYDIR}"
-do_rootfs[umask] = "022"
+do_rootfs[cleandirs] += "${IMAGE_ROOTFS} ${IMGDEPLOYDIR} ${S}"
do_rootfs[file-checksums] += "${POSTINST_INTERCEPT_CHECKSUMS}"
addtask rootfs after do_prepare_recipe_sysroot
@@ -258,7 +266,6 @@ fakeroot python do_image () {
execute_pre_post_process(d, pre_process_cmds)
}
do_image[dirs] = "${TOPDIR}"
-do_image[umask] = "022"
addtask do_image after do_rootfs
fakeroot python do_image_complete () {
@@ -269,9 +276,8 @@ fakeroot python do_image_complete () {
execute_pre_post_process(d, post_process_cmds)
}
do_image_complete[dirs] = "${TOPDIR}"
-do_image_complete[umask] = "022"
SSTATETASKS += "do_image_complete"
-SSTATE_SKIP_CREATION_task-image-complete = '1'
+SSTATE_SKIP_CREATION:task-image-complete = '1'
do_image_complete[sstate-inputdirs] = "${IMGDEPLOYDIR}"
do_image_complete[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}"
do_image_complete[stamp-extra-info] = "${MACHINE_ARCH}"
@@ -312,7 +318,7 @@ fakeroot python do_image_qa () {
addtask do_image_qa after do_rootfs before do_image
SSTATETASKS += "do_image_qa"
-SSTATE_SKIP_CREATION_task-image-qa = '1'
+SSTATE_SKIP_CREATION:task-image-qa = '1'
do_image_qa[sstate-inputdirs] = ""
do_image_qa[sstate-outputdirs] = ""
python do_image_qa_setscene () {
@@ -380,8 +386,8 @@ python () {
if t.startswith("debugfs_"):
t = t[8:]
debug = "debugfs_"
- deps = (d.getVar('IMAGE_TYPEDEP_' + t) or "").split()
- vardeps.add('IMAGE_TYPEDEP_' + t)
+ deps = (d.getVar('IMAGE_TYPEDEP:' + t) or "").split()
+ vardeps.add('IMAGE_TYPEDEP:' + t)
if baset not in typedeps:
typedeps[baset] = set()
deps = [debug + dep for dep in deps]
@@ -429,21 +435,22 @@ python () {
localdata.delVar('DATETIME')
localdata.delVar('DATE')
localdata.delVar('TMPDIR')
- vardepsexclude = (d.getVarFlag('IMAGE_CMD_' + realt, 'vardepsexclude', True) or '').split()
+ localdata.delVar('IMAGE_VERSION_SUFFIX')
+ vardepsexclude = (d.getVarFlag('IMAGE_CMD:' + realt, 'vardepsexclude', True) or '').split()
for dep in vardepsexclude:
localdata.delVar(dep)
image_cmd = localdata.getVar("IMAGE_CMD")
- vardeps.add('IMAGE_CMD_' + realt)
+ vardeps.add('IMAGE_CMD:' + realt)
if image_cmd:
cmds.append("\t" + image_cmd)
else:
bb.fatal("No IMAGE_CMD defined for IMAGE_FSTYPES entry '%s' - possibly invalid type name or missing support class" % t)
cmds.append(localdata.expand("\tcd ${IMGDEPLOYDIR}"))
- # Since a copy of IMAGE_CMD_xxx will be inlined within do_image_xxx,
- # prevent a redundant copy of IMAGE_CMD_xxx being emitted as a function.
- d.delVarFlag('IMAGE_CMD_' + realt, 'func')
+ # Since a copy of IMAGE_CMD:xxx will be inlined within do_image_xxx,
+ # prevent a redundant copy of IMAGE_CMD:xxx being emitted as a function.
+ d.delVarFlag('IMAGE_CMD:' + realt, 'func')
rm_tmp_images = set()
def gen_conversion_cmds(bt):
@@ -455,11 +462,10 @@ python () {
# Create input image first.
gen_conversion_cmds(type)
localdata.setVar('type', type)
- cmd = "\t" + (localdata.getVar("CONVERSION_CMD_" + ctype) or localdata.getVar("COMPRESS_CMD_" + ctype))
+ cmd = "\t" + localdata.getVar("CONVERSION_CMD:" + ctype)
if cmd not in cmds:
cmds.append(cmd)
- vardeps.add('CONVERSION_CMD_' + ctype)
- vardeps.add('COMPRESS_CMD_' + ctype)
+ vardeps.add('CONVERSION_CMD:' + ctype)
subimage = type + "." + ctype
if subimage not in subimages:
subimages.append(subimage)
@@ -505,7 +511,7 @@ python () {
# Compute the rootfs size
#
def get_rootfs_size(d):
- import subprocess
+ import subprocess, oe.utils
rootfs_alignment = int(d.getVar('IMAGE_ROOTFS_ALIGNMENT'))
overhead_factor = float(d.getVar('IMAGE_OVERHEAD_FACTOR'))
@@ -516,9 +522,7 @@ def get_rootfs_size(d):
initramfs_fstypes = d.getVar('INITRAMFS_FSTYPES') or ''
initramfs_maxsize = d.getVar('INITRAMFS_MAXSIZE')
- output = subprocess.check_output(['du', '-ks',
- d.getVar('IMAGE_ROOTFS')])
- size_kb = int(output.split()[0])
+ size_kb = oe.utils.directory_size(d.getVar("IMAGE_ROOTFS")) / 1024
base_size = size_kb * overhead_factor
bb.debug(1, '%f = %d * %f' % (base_size, size_kb, overhead_factor))
@@ -548,14 +552,14 @@ def get_rootfs_size(d):
if rootfs_maxsize:
rootfs_maxsize_int = int(rootfs_maxsize)
if base_size > rootfs_maxsize_int:
- bb.fatal("The rootfs size %d(K) overrides IMAGE_ROOTFS_MAXSIZE: %d(K)" % \
+ bb.fatal("The rootfs size %d(K) exceeds IMAGE_ROOTFS_MAXSIZE: %d(K)" % \
(base_size, rootfs_maxsize_int))
# Check the initramfs size against INITRAMFS_MAXSIZE (if set)
if image_fstypes == initramfs_fstypes != '' and initramfs_maxsize:
initramfs_maxsize_int = int(initramfs_maxsize)
if base_size > initramfs_maxsize_int:
- bb.error("The initramfs size %d(K) overrides INITRAMFS_MAXSIZE: %d(K)" % \
+ bb.error("The initramfs size %d(K) exceeds INITRAMFS_MAXSIZE: %d(K)" % \
(base_size, initramfs_maxsize_int))
bb.error("You can set INITRAMFS_MAXSIZE a larger value. Usually, it should")
bb.fatal("be less than 1/2 of ram size, or you may fail to boot it.\n")
@@ -610,7 +614,7 @@ deltask do_populate_lic
deltask do_populate_sysroot
do_package[noexec] = "1"
deltask do_package_qa
-do_packagedata[noexec] = "1"
+deltask do_packagedata
deltask do_package_write_ipk
deltask do_package_write_deb
deltask do_package_write_rpm
@@ -619,20 +623,20 @@ deltask do_package_write_rpm
create_merged_usr_symlinks() {
root="$1"
install -d $root${base_bindir} $root${base_sbindir} $root${base_libdir}
- lnr $root${base_bindir} $root/bin
- lnr $root${base_sbindir} $root/sbin
- lnr $root${base_libdir} $root/${baselib}
+ ln -rs $root${base_bindir} $root/bin
+ ln -rs $root${base_sbindir} $root/sbin
+ ln -rs $root${base_libdir} $root/${baselib}
if [ "${nonarch_base_libdir}" != "${base_libdir}" ]; then
install -d $root${nonarch_base_libdir}
- lnr $root${nonarch_base_libdir} $root/lib
+ ln -rs $root${nonarch_base_libdir} $root/lib
fi
# create base links for multilibs
multi_libdirs="${@d.getVar('MULTILIB_VARIANTS')}"
for d in $multi_libdirs; do
install -d $root${exec_prefix}/$d
- lnr $root${exec_prefix}/$d $root/$d
+ ln -rs $root${exec_prefix}/$d $root/$d
done
}
@@ -648,17 +652,15 @@ ROOTFS_PREPROCESS_COMMAND += "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge'
POPULATE_SDK_PRE_TARGET_COMMAND += "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', 'create_merged_usr_symlinks_sdk; ', '',d)}"
reproducible_final_image_task () {
- if [ "${BUILD_REPRODUCIBLE_BINARIES}" = "1" ]; then
+ if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then
+ REPRODUCIBLE_TIMESTAMP_ROOTFS=`git -C "${COREBASE}" log -1 --pretty=%ct 2>/dev/null` || true
if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then
- REPRODUCIBLE_TIMESTAMP_ROOTFS=`git -C "${COREBASE}" log -1 --pretty=%ct 2>/dev/null` || true
- if [ "${REPRODUCIBLE_TIMESTAMP_ROOTFS}" = "" ]; then
- REPRODUCIBLE_TIMESTAMP_ROOTFS=`stat -c%Y ${@bb.utils.which(d.getVar("BBPATH"), "conf/bitbake.conf")}`
- fi
+ REPRODUCIBLE_TIMESTAMP_ROOTFS=`stat -c%Y ${@bb.utils.which(d.getVar("BBPATH"), "conf/bitbake.conf")}`
fi
- # Set mtime of all files to a reproducible value
- bbnote "reproducible_final_image_task: mtime set to $REPRODUCIBLE_TIMESTAMP_ROOTFS"
- find ${IMAGE_ROOTFS} -exec touch -h --date=@$REPRODUCIBLE_TIMESTAMP_ROOTFS {} \;
fi
+ # Set mtime of all files to a reproducible value
+ bbnote "reproducible_final_image_task: mtime set to $REPRODUCIBLE_TIMESTAMP_ROOTFS"
+ find ${IMAGE_ROOTFS} -print0 | xargs -0 touch -h --date=@$REPRODUCIBLE_TIMESTAMP_ROOTFS
}
systemd_preset_all () {
@@ -667,6 +669,6 @@ systemd_preset_all () {
fi
}
-IMAGE_PREPROCESS_COMMAND_append = " ${@ 'systemd_preset_all;' if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d) and not bb.utils.contains('IMAGE_FEATURES', 'stateless-rootfs', True, False, d) else ''} reproducible_final_image_task; "
+IMAGE_PREPROCESS_COMMAND:append = " ${@ 'systemd_preset_all;' if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d) and not bb.utils.contains('IMAGE_FEATURES', 'stateless-rootfs', True, False, d) else ''} reproducible_final_image_task; "
CVE_PRODUCT = ""
diff --git a/meta/classes/image_types.bbclass b/meta/classes/image_types.bbclass
index ab05cc90ff..f643ed3ce7 100644
--- a/meta/classes/image_types.bbclass
+++ b/meta/classes/image_types.bbclass
@@ -1,9 +1,3 @@
-# IMAGE_NAME is the base name for everything produced when building images.
-# The actual image that contains the rootfs has an additional suffix (.rootfs
-# by default) followed by additional suffices which describe the format (.ext4,
-# .ext4.xz, etc.).
-IMAGE_NAME_SUFFIX ??= ".rootfs"
-
# The default aligment of the size of the rootfs is set to 1KiB. In case
# you're using the SD card emulation of a QEMU system simulator you may
# set this value to 2048 (2MiB alignment).
@@ -35,7 +29,7 @@ def imagetypes_getdepends(d):
if d.getVar(var) is not None:
deprecated.add(var)
- for typedepends in (d.getVar("IMAGE_TYPEDEP_%s" % basetype) or "").split():
+ for typedepends in (d.getVar("IMAGE_TYPEDEP:%s" % basetype) or "").split():
base, rest = split_types(typedepends)
resttypes += rest
@@ -62,9 +56,9 @@ ZIP_COMPRESSION_LEVEL ?= "-9"
ZSTD_COMPRESSION_LEVEL ?= "-3"
JFFS2_SUM_EXTRA_ARGS ?= ""
-IMAGE_CMD_jffs2 = "mkfs.jffs2 --root=${IMAGE_ROOTFS} --faketime --output=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.jffs2 ${EXTRA_IMAGECMD}"
+IMAGE_CMD:jffs2 = "mkfs.jffs2 --root=${IMAGE_ROOTFS} --faketime --output=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.jffs2 ${EXTRA_IMAGECMD}"
-IMAGE_CMD_cramfs = "mkfs.cramfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cramfs ${EXTRA_IMAGECMD}"
+IMAGE_CMD:cramfs = "mkfs.cramfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cramfs ${EXTRA_IMAGECMD}"
oe_mkext234fs () {
fstype=$1
@@ -94,12 +88,12 @@ oe_mkext234fs () {
fsck.$fstype -pvfD ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype || [ $? -le 3 ]
}
-IMAGE_CMD_ext2 = "oe_mkext234fs ext2 ${EXTRA_IMAGECMD}"
-IMAGE_CMD_ext3 = "oe_mkext234fs ext3 ${EXTRA_IMAGECMD}"
-IMAGE_CMD_ext4 = "oe_mkext234fs ext4 ${EXTRA_IMAGECMD}"
+IMAGE_CMD:ext2 = "oe_mkext234fs ext2 ${EXTRA_IMAGECMD}"
+IMAGE_CMD:ext3 = "oe_mkext234fs ext3 ${EXTRA_IMAGECMD}"
+IMAGE_CMD:ext4 = "oe_mkext234fs ext4 ${EXTRA_IMAGECMD}"
MIN_BTRFS_SIZE ?= "16384"
-IMAGE_CMD_btrfs () {
+IMAGE_CMD:btrfs () {
size=${ROOTFS_SIZE}
if [ ${size} -lt ${MIN_BTRFS_SIZE} ] ; then
size=${MIN_BTRFS_SIZE}
@@ -109,27 +103,23 @@ IMAGE_CMD_btrfs () {
mkfs.btrfs ${EXTRA_IMAGECMD} -r ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.btrfs
}
-IMAGE_CMD_squashfs = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs ${EXTRA_IMAGECMD} -noappend"
-IMAGE_CMD_squashfs-xz = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-xz ${EXTRA_IMAGECMD} -noappend -comp xz"
-IMAGE_CMD_squashfs-lzo = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-lzo ${EXTRA_IMAGECMD} -noappend -comp lzo"
-IMAGE_CMD_squashfs-lz4 = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-lz4 ${EXTRA_IMAGECMD} -noappend -comp lz4"
-
-# By default, tar from the host is used, which can be quite old. If
-# you need special parameters (like --xattrs) which are only supported
-# by GNU tar upstream >= 1.27, then override that default:
-# IMAGE_CMD_TAR = "tar --xattrs --xattrs-include=*"
-# do_image_tar[depends] += "tar-replacement-native:do_populate_sysroot"
-# EXTRANATIVEPATH += "tar-native"
-#
-# The GNU documentation does not specify whether --xattrs-include is necessary.
-# In practice, it turned out to be not needed when creating archives and
-# required when extracting, but it seems prudent to use it in both cases.
+IMAGE_CMD:squashfs = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs ${EXTRA_IMAGECMD} -noappend"
+IMAGE_CMD:squashfs-xz = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-xz ${EXTRA_IMAGECMD} -noappend -comp xz"
+IMAGE_CMD:squashfs-lzo = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-lzo ${EXTRA_IMAGECMD} -noappend -comp lzo"
+IMAGE_CMD:squashfs-lz4 = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-lz4 ${EXTRA_IMAGECMD} -noappend -comp lz4"
+IMAGE_CMD:squashfs-zst = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-zst ${EXTRA_IMAGECMD} -noappend -comp zstd"
+
+IMAGE_CMD:erofs = "mkfs.erofs ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.erofs ${IMAGE_ROOTFS}"
+IMAGE_CMD:erofs-lz4 = "mkfs.erofs -zlz4 ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.erofs-lz4 ${IMAGE_ROOTFS}"
+IMAGE_CMD:erofs-lz4hc = "mkfs.erofs -zlz4hc ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.erofs-lz4hc ${IMAGE_ROOTFS}"
+
+
IMAGE_CMD_TAR ?= "tar"
# ignore return code 1 "file changed as we read it" as other tasks(e.g. do_image_wic) may be hardlinking rootfs
-IMAGE_CMD_tar = "${IMAGE_CMD_TAR} --numeric-owner -cf ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.tar -C ${IMAGE_ROOTFS} . || [ $? -eq 1 ]"
+IMAGE_CMD:tar = "${IMAGE_CMD_TAR} --sort=name --format=posix --numeric-owner -cf ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.tar -C ${IMAGE_ROOTFS} . || [ $? -eq 1 ]"
do_image_cpio[cleandirs] += "${WORKDIR}/cpio_append"
-IMAGE_CMD_cpio () {
+IMAGE_CMD:cpio () {
(cd ${IMAGE_ROOTFS} && find . | sort | cpio --reproducible -o -H newc >${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cpio)
# We only need the /init symlink if we're building the real
# image. The -dbg image doesn't need it! By being clever
@@ -149,16 +139,18 @@ IMAGE_CMD_cpio () {
}
UBI_VOLNAME ?= "${MACHINE}-rootfs"
+UBI_VOLTYPE ?= "dynamic"
+UBI_IMGTYPE ?= "ubifs"
multiubi_mkfs() {
local mkubifs_args="$1"
local ubinize_args="$2"
-
+
# Added prompt error message for ubi and ubifs image creation.
if [ -z "$mkubifs_args" ] || [ -z "$ubinize_args" ]; then
bbfatal "MKUBIFS_ARGS and UBINIZE_ARGS have to be set, see http://www.linux-mtd.infradead.org/faq/ubifs.html for details"
fi
-
+
if [ -z "$3" ]; then
local vname=""
else
@@ -167,9 +159,9 @@ multiubi_mkfs() {
echo \[ubifs\] > ubinize${vname}-${IMAGE_NAME}.cfg
echo mode=ubi >> ubinize${vname}-${IMAGE_NAME}.cfg
- echo image=${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs >> ubinize${vname}-${IMAGE_NAME}.cfg
+ echo image=${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.${UBI_IMGTYPE} >> ubinize${vname}-${IMAGE_NAME}.cfg
echo vol_id=0 >> ubinize${vname}-${IMAGE_NAME}.cfg
- echo vol_type=dynamic >> ubinize${vname}-${IMAGE_NAME}.cfg
+ echo vol_type=${UBI_VOLTYPE} >> ubinize${vname}-${IMAGE_NAME}.cfg
echo vol_name=${UBI_VOLNAME} >> ubinize${vname}-${IMAGE_NAME}.cfg
echo vol_flags=autoresize >> ubinize${vname}-${IMAGE_NAME}.cfg
if [ -n "$vname" ]; then
@@ -195,7 +187,7 @@ multiubi_mkfs() {
fi
}
-IMAGE_CMD_multiubi () {
+IMAGE_CMD:multiubi () {
# Split MKUBIFS_ARGS_<name> and UBINIZE_ARGS_<name>
for name in ${MULTIUBI_BUILD}; do
eval local mkubifs_args=\"\$MKUBIFS_ARGS_${name}\"
@@ -205,15 +197,15 @@ IMAGE_CMD_multiubi () {
done
}
-IMAGE_CMD_ubi () {
+IMAGE_CMD:ubi () {
multiubi_mkfs "${MKUBIFS_ARGS}" "${UBINIZE_ARGS}"
}
-IMAGE_TYPEDEP_ubi = "ubifs"
+IMAGE_TYPEDEP:ubi = "${UBI_IMGTYPE}"
-IMAGE_CMD_ubifs = "mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.ubifs ${MKUBIFS_ARGS}"
+IMAGE_CMD:ubifs = "mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.ubifs ${MKUBIFS_ARGS}"
MIN_F2FS_SIZE ?= "524288"
-IMAGE_CMD_f2fs () {
+IMAGE_CMD:f2fs () {
# We need to add additional smarts here form devices smaller than 1.5G
# Need to scale appropriately between 40M -> 1.5G as the "overprovision
# ratio" goes down as the device gets bigger (70% -> 4.5%), below about
@@ -231,17 +223,18 @@ IMAGE_CMD_f2fs () {
EXTRA_IMAGECMD = ""
-inherit siteinfo kernel-arch
+inherit siteinfo kernel-arch image-artifact-names
+
JFFS2_ENDIANNESS ?= "${@oe.utils.conditional('SITEINFO_ENDIANNESS', 'le', '-l', '-b', d)}"
JFFS2_ERASEBLOCK ?= "0x40000"
-EXTRA_IMAGECMD_jffs2 ?= "--pad ${JFFS2_ENDIANNESS} --eraseblock=${JFFS2_ERASEBLOCK} --no-cleanmarkers"
+EXTRA_IMAGECMD:jffs2 ?= "--pad ${JFFS2_ENDIANNESS} --eraseblock=${JFFS2_ERASEBLOCK} --no-cleanmarkers"
# Change these if you want default mkfs behavior (i.e. create minimal inode number)
-EXTRA_IMAGECMD_ext2 ?= "-i 4096"
-EXTRA_IMAGECMD_ext3 ?= "-i 4096"
-EXTRA_IMAGECMD_ext4 ?= "-i 4096"
-EXTRA_IMAGECMD_btrfs ?= "-n 4096"
-EXTRA_IMAGECMD_f2fs ?= ""
+EXTRA_IMAGECMD:ext2 ?= "-i 4096"
+EXTRA_IMAGECMD:ext3 ?= "-i 4096"
+EXTRA_IMAGECMD:ext4 ?= "-i 4096"
+EXTRA_IMAGECMD:btrfs ?= "-n 4096 --shrink"
+EXTRA_IMAGECMD:f2fs ?= ""
do_image_cpio[depends] += "cpio-native:do_populate_sysroot"
do_image_jffs2[depends] += "mtd-utils-native:do_populate_sysroot"
@@ -254,10 +247,14 @@ do_image_squashfs[depends] += "squashfs-tools-native:do_populate_sysroot"
do_image_squashfs_xz[depends] += "squashfs-tools-native:do_populate_sysroot"
do_image_squashfs_lzo[depends] += "squashfs-tools-native:do_populate_sysroot"
do_image_squashfs_lz4[depends] += "squashfs-tools-native:do_populate_sysroot"
+do_image_squashfs_zst[depends] += "squashfs-tools-native:do_populate_sysroot"
do_image_ubi[depends] += "mtd-utils-native:do_populate_sysroot"
do_image_ubifs[depends] += "mtd-utils-native:do_populate_sysroot"
do_image_multiubi[depends] += "mtd-utils-native:do_populate_sysroot"
do_image_f2fs[depends] += "f2fs-tools-native:do_populate_sysroot"
+do_image_erofs[depends] += "erofs-utils-native:do_populate_sysroot"
+do_image_erofs_lz4[depends] += "erofs-utils-native:do_populate_sysroot"
+do_image_erofs_lz4hc[depends] += "erofs-utils-native:do_populate_sysroot"
# This variable is available to request which values are suitable for IMAGE_FSTYPES
IMAGE_TYPES = " \
@@ -269,13 +266,14 @@ IMAGE_TYPES = " \
btrfs \
iso \
hddimg \
- squashfs squashfs-xz squashfs-lzo squashfs-lz4 \
+ squashfs squashfs-xz squashfs-lzo squashfs-lz4 squashfs-zst \
ubi ubifs multiubi \
tar tar.gz tar.bz2 tar.xz tar.lz4 tar.zst \
- cpio cpio.gz cpio.xz cpio.lzma cpio.lz4 \
+ cpio cpio.gz cpio.xz cpio.lzma cpio.lz4 cpio.zst \
wic wic.gz wic.bz2 wic.lzma wic.zst \
container \
f2fs \
+ erofs erofs-lz4 erofs-lz4hc \
"
# Compression is a special case of conversion. The old variable
@@ -284,28 +282,32 @@ IMAGE_TYPES = " \
# CONVERSION_CMD/DEPENDS.
COMPRESSIONTYPES ?= ""
-CONVERSIONTYPES = "gz bz2 lzma xz lz4 lzo zip zst sum md5sum sha1sum sha224sum sha256sum sha384sum sha512sum bmap u-boot vmdk vdi qcow2 base64 ${COMPRESSIONTYPES}"
-CONVERSION_CMD_lzma = "lzma -k -f -7 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
-CONVERSION_CMD_gz = "gzip -f -9 -n -c --rsyncable ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.gz"
-CONVERSION_CMD_bz2 = "pbzip2 -f -k ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
-CONVERSION_CMD_xz = "xz -f -k -c ${XZ_COMPRESSION_LEVEL} ${XZ_DEFAULTS} --check=${XZ_INTEGRITY_CHECK} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.xz"
-CONVERSION_CMD_lz4 = "lz4 -9 -z -l ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.lz4"
-CONVERSION_CMD_lzo = "lzop -9 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
-CONVERSION_CMD_zip = "zip ${ZIP_COMPRESSION_LEVEL} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.zip ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
-CONVERSION_CMD_zst = "zstd -f -k -T0 -c ${ZSTD_COMPRESSION_LEVEL} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.zst"
-CONVERSION_CMD_sum = "sumtool -i ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} -o ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sum ${JFFS2_SUM_EXTRA_ARGS}"
-CONVERSION_CMD_md5sum = "md5sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.md5sum"
-CONVERSION_CMD_sha1sum = "sha1sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha1sum"
-CONVERSION_CMD_sha224sum = "sha224sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha224sum"
-CONVERSION_CMD_sha256sum = "sha256sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha256sum"
-CONVERSION_CMD_sha384sum = "sha384sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha384sum"
-CONVERSION_CMD_sha512sum = "sha512sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha512sum"
-CONVERSION_CMD_bmap = "bmaptool create ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} -o ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.bmap"
-CONVERSION_CMD_u-boot = "mkimage -A ${UBOOT_ARCH} -O linux -T ramdisk -C none -n ${IMAGE_NAME} -d ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.u-boot"
-CONVERSION_CMD_vmdk = "qemu-img convert -O vmdk ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vmdk"
-CONVERSION_CMD_vdi = "qemu-img convert -O vdi ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vdi"
-CONVERSION_CMD_qcow2 = "qemu-img convert -O qcow2 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.qcow2"
-CONVERSION_CMD_base64 = "base64 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.base64"
+CONVERSIONTYPES = "gz bz2 lzma xz lz4 lzo zip zst sum md5sum sha1sum sha224sum sha256sum sha384sum sha512sum bmap u-boot vmdk vhd vhdx vdi qcow2 base64 gzsync zsync ${COMPRESSIONTYPES}"
+CONVERSION_CMD:lzma = "lzma -k -f -7 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
+CONVERSION_CMD:gz = "gzip -f -9 -n -c --rsyncable ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.gz"
+CONVERSION_CMD:bz2 = "pbzip2 -f -k ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
+CONVERSION_CMD:xz = "xz -f -k -c ${XZ_COMPRESSION_LEVEL} ${XZ_DEFAULTS} --check=${XZ_INTEGRITY_CHECK} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.xz"
+CONVERSION_CMD:lz4 = "lz4 -9 -z -l ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.lz4"
+CONVERSION_CMD:lzo = "lzop -9 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
+CONVERSION_CMD:zip = "zip ${ZIP_COMPRESSION_LEVEL} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.zip ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
+CONVERSION_CMD:zst = "zstd -f -k -T0 -c ${ZSTD_COMPRESSION_LEVEL} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.zst"
+CONVERSION_CMD:sum = "sumtool -i ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} -o ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sum ${JFFS2_SUM_EXTRA_ARGS}"
+CONVERSION_CMD:md5sum = "md5sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.md5sum"
+CONVERSION_CMD:sha1sum = "sha1sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha1sum"
+CONVERSION_CMD:sha224sum = "sha224sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha224sum"
+CONVERSION_CMD:sha256sum = "sha256sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha256sum"
+CONVERSION_CMD:sha384sum = "sha384sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha384sum"
+CONVERSION_CMD:sha512sum = "sha512sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha512sum"
+CONVERSION_CMD:bmap = "bmaptool create ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} -o ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.bmap"
+CONVERSION_CMD:u-boot = "mkimage -A ${UBOOT_ARCH} -O linux -T ramdisk -C none -n ${IMAGE_NAME} -d ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.u-boot"
+CONVERSION_CMD:vmdk = "qemu-img convert -O vmdk ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vmdk"
+CONVERSION_CMD:vhdx = "qemu-img convert -O vhdx -o subformat=dynamic ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vhdx"
+CONVERSION_CMD:vhd = "qemu-img convert -O vpc -o subformat=fixed ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vhd"
+CONVERSION_CMD:vdi = "qemu-img convert -O vdi ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vdi"
+CONVERSION_CMD:qcow2 = "qemu-img convert -O qcow2 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.qcow2"
+CONVERSION_CMD:base64 = "base64 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.base64"
+CONVERSION_CMD:zsync = "zsyncmake_curl ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
+CONVERSION_CMD:gzsync = "zsyncmake_curl -z ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
CONVERSION_DEPENDS_lzma = "xz-native"
CONVERSION_DEPENDS_gz = "pigz-native"
CONVERSION_DEPENDS_bz2 = "pbzip2-native"
@@ -321,14 +323,18 @@ CONVERSION_DEPENDS_vmdk = "qemu-system-native"
CONVERSION_DEPENDS_vdi = "qemu-system-native"
CONVERSION_DEPENDS_qcow2 = "qemu-system-native"
CONVERSION_DEPENDS_base64 = "coreutils-native"
+CONVERSION_DEPENDS_vhdx = "qemu-system-native"
+CONVERSION_DEPENDS_vhd = "qemu-system-native"
+CONVERSION_DEPENDS_zsync = "zsync-curl-native"
+CONVERSION_DEPENDS_gzsync = "zsync-curl-native"
RUNNABLE_IMAGE_TYPES ?= "ext2 ext3 ext4"
RUNNABLE_MACHINE_PATTERNS ?= "qemu"
-DEPLOYABLE_IMAGE_TYPES ?= "hddimg iso"
+DEPLOYABLE_IMAGE_TYPES ?= "hddimg iso"
# The IMAGE_TYPES_MASKED variable will be used to mask out from the IMAGE_FSTYPES,
-# images that will not be built at do_rootfs time: vmdk, vdi, qcow2, hddimg, iso, etc.
+# images that will not be built at do_rootfs time: vmdk, vhd, vhdx, vdi, qcow2, hddimg, iso, etc.
IMAGE_TYPES_MASKED ?= ""
# bmap requires python3 to be in the PATH
diff --git a/meta/classes/image_types_wic.bbclass b/meta/classes/image_types_wic.bbclass
index 7b1db50a28..e3863c88a9 100644
--- a/meta/classes/image_types_wic.bbclass
+++ b/meta/classes/image_types_wic.bbclass
@@ -1,10 +1,36 @@
# The WICVARS variable is used to define list of bitbake variables used in wic code
# variables from this list is written to <image>.env file
WICVARS ?= "\
- BBLAYERS IMGDEPLOYDIR DEPLOY_DIR_IMAGE FAKEROOTCMD IMAGE_BASENAME IMAGE_BOOT_FILES \
- IMAGE_LINK_NAME IMAGE_ROOTFS INITRAMFS_FSTYPES INITRD INITRD_LIVE ISODIR RECIPE_SYSROOT_NATIVE \
- ROOTFS_SIZE STAGING_DATADIR STAGING_DIR STAGING_LIBDIR TARGET_SYS \
- KERNEL_IMAGETYPE MACHINE INITRAMFS_IMAGE INITRAMFS_IMAGE_BUNDLE INITRAMFS_LINK_NAME APPEND"
+ APPEND \
+ ASSUME_PROVIDED \
+ BBLAYERS \
+ DEPLOY_DIR_IMAGE \
+ FAKEROOTCMD \
+ HOSTTOOLS_DIR \
+ IMAGE_BASENAME \
+ IMAGE_BOOT_FILES \
+ IMAGE_EFI_BOOT_FILES \
+ IMAGE_LINK_NAME \
+ IMAGE_ROOTFS \
+ IMGDEPLOYDIR \
+ INITRAMFS_FSTYPES \
+ INITRAMFS_IMAGE \
+ INITRAMFS_IMAGE_BUNDLE \
+ INITRAMFS_LINK_NAME \
+ INITRD \
+ INITRD_LIVE \
+ ISODIR \
+ KERNEL_IMAGETYPE \
+ MACHINE \
+ PSEUDO_IGNORE_PATHS \
+ RECIPE_SYSROOT_NATIVE \
+ ROOTFS_SIZE \
+ STAGING_DATADIR \
+ STAGING_DIR \
+ STAGING_DIR_HOST \
+ STAGING_LIBDIR \
+ TARGET_SYS \
+"
inherit ${@bb.utils.contains('INITRAMFS_IMAGE_BUNDLE', '1', 'kernel-artifact-names', '', d)}
@@ -25,19 +51,27 @@ def wks_search(files, search_path):
WIC_CREATE_EXTRA_ARGS ?= ""
-IMAGE_CMD_wic () {
+IMAGE_CMD:wic () {
out="${IMGDEPLOYDIR}/${IMAGE_NAME}"
build_wic="${WORKDIR}/build-wic"
+ tmp_wic="${WORKDIR}/tmp-wic"
wks="${WKS_FULL_PATH}"
+ if [ -e "$tmp_wic" ]; then
+ # Ensure we don't have any junk leftover from a previously interrupted
+ # do_image_wic execution
+ rm -rf "$tmp_wic"
+ fi
if [ -z "$wks" ]; then
bbfatal "No kickstart files from WKS_FILES were found: ${WKS_FILES}. Please set WKS_FILE or WKS_FILES appropriately."
fi
- BUILDDIR="${TOPDIR}" PSEUDO_UNLOAD=1 wic create "$wks" --vars "${STAGING_DIR}/${MACHINE}/imgdata/" -e "${IMAGE_BASENAME}" -o "$build_wic/" ${WIC_CREATE_EXTRA_ARGS}
+ BUILDDIR="${TOPDIR}" PSEUDO_UNLOAD=1 wic create "$wks" --vars "${STAGING_DIR}/${MACHINE}/imgdata/" -e "${IMAGE_BASENAME}" -o "$build_wic/" -w "$tmp_wic" ${WIC_CREATE_EXTRA_ARGS}
mv "$build_wic/$(basename "${wks%.wks}")"*.direct "$out${IMAGE_NAME_SUFFIX}.wic"
}
-IMAGE_CMD_wic[vardepsexclude] = "WKS_FULL_PATH WKS_FILES TOPDIR"
+IMAGE_CMD:wic[vardepsexclude] = "WKS_FULL_PATH WKS_FILES TOPDIR"
do_image_wic[cleandirs] = "${WORKDIR}/build-wic"
+PSEUDO_IGNORE_PATHS .= ",${WORKDIR}/build-wic"
+
# Rebuild when the wks file or vars in WICVARS change
USING_WIC = "${@bb.utils.contains_any('IMAGE_FSTYPES', 'wic ' + ' '.join('wic.%s' % c for c in '${CONVERSIONTYPES}'.split()), '1', '', d)}"
WKS_FILE_CHECKSUM = "${@'${WKS_FULL_PATH}:%s' % os.path.exists('${WKS_FULL_PATH}') if '${USING_WIC}' else ''}"
@@ -51,9 +85,9 @@ do_image_wic[deptask] += "do_image_complete"
WKS_FILE_DEPENDS_DEFAULT = '${@bb.utils.contains_any("BUILD_ARCH", [ 'x86_64', 'i686' ], "syslinux-native", "",d)}'
WKS_FILE_DEPENDS_DEFAULT += "bmap-tools-native cdrtools-native btrfs-tools-native squashfs-tools-native e2fsprogs-native"
WKS_FILE_DEPENDS_BOOTLOADERS = ""
-WKS_FILE_DEPENDS_BOOTLOADERS_x86 = "syslinux grub-efi systemd-boot"
-WKS_FILE_DEPENDS_BOOTLOADERS_x86-64 = "syslinux grub-efi systemd-boot"
-WKS_FILE_DEPENDS_BOOTLOADERS_x86-x32 = "syslinux grub-efi"
+WKS_FILE_DEPENDS_BOOTLOADERS:x86 = "syslinux grub-efi systemd-boot os-release"
+WKS_FILE_DEPENDS_BOOTLOADERS:x86-64 = "syslinux grub-efi systemd-boot os-release"
+WKS_FILE_DEPENDS_BOOTLOADERS:x86-x32 = "syslinux grub-efi"
WKS_FILE_DEPENDS ??= "${WKS_FILE_DEPENDS_DEFAULT} ${WKS_FILE_DEPENDS_BOOTLOADERS}"
diff --git a/meta/classes/insane.bbclass b/meta/classes/insane.bbclass
index 649aea1da1..0deebdb148 100644
--- a/meta/classes/insane.bbclass
+++ b/meta/classes/insane.bbclass
@@ -18,8 +18,6 @@
# files under exec_prefix
# -Check if the package name is upper case
-QA_SANE = "True"
-
# Elect whether a given type of error is a warning or error, they may
# have been set by other files.
WARN_QA ?= " libdir xorg-driver-abi \
@@ -27,6 +25,9 @@ WARN_QA ?= " libdir xorg-driver-abi \
infodir build-deps src-uri-bad symlink-to-sysroot multilib \
invalid-packageconfig host-user-contaminated uppercase-pn patch-fuzz \
mime mime-xdg unlisted-pkg-lics unhandled-features-check \
+ missing-update-alternatives native-last missing-ptest \
+ license-exists license-no-generic license-syntax license-format \
+ license-incompatible license-file-missing obsolete-license \
"
ERROR_QA ?= "dev-so debug-deps dev-deps debug-files arch pkgconfig la \
perms dep-cmp pkgvarcheck perm-config perm-line perm-link \
@@ -36,10 +37,10 @@ ERROR_QA ?= "dev-so debug-deps dev-deps debug-files arch pkgconfig la \
configure-gettext perllocalpod shebang-size \
already-stripped installed-vs-shipped ldflags compile-host-path \
install-host-path pn-overrides unknown-configure-option \
- useless-rpaths rpaths staticdev \
+ useless-rpaths rpaths staticdev empty-dirs \
"
# Add usrmerge QA check based on distro feature
-ERROR_QA_append = "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', ' usrmerge', '', d)}"
+ERROR_QA:append = "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', ' usrmerge', '', d)}"
FAKEROOT_QA = "host-user-contaminated"
FAKEROOT_QA[doc] = "QA tests which need to run under fakeroot. If any \
@@ -47,7 +48,22 @@ enabled tests are listed here, the do_package_qa task will run under fakeroot."
ALL_QA = "${WARN_QA} ${ERROR_QA}"
-UNKNOWN_CONFIGURE_WHITELIST ?= "--enable-nls --disable-nls --disable-silent-rules --disable-dependency-tracking --with-libtool-sysroot --disable-static"
+UNKNOWN_CONFIGURE_OPT_IGNORE ?= "--enable-nls --disable-nls --disable-silent-rules --disable-dependency-tracking --with-libtool-sysroot --disable-static"
+
+# This is a list of directories that are expected to be empty.
+QA_EMPTY_DIRS ?= " \
+ /dev/pts \
+ /media \
+ /proc \
+ /run \
+ /tmp \
+ ${localstatedir}/run \
+ ${localstatedir}/volatile \
+"
+# It is possible to specify why a directory is expected to be empty by defining
+# QA_EMPTY_DIRS_RECOMMENDATION:<path>, which will then be included in the error
+# message if the directory is not empty. If it is not specified for a directory,
+# then "but it is expected to be empty" will be used.
def package_qa_clean_path(path, d, pkg=None):
"""
@@ -58,35 +74,10 @@ def package_qa_clean_path(path, d, pkg=None):
path = path.replace(os.path.join(d.getVar("PKGDEST"), pkg), "/")
return path.replace(d.getVar("TMPDIR"), "/").replace("//", "/")
-def package_qa_write_error(type, error, d):
- logfile = d.getVar('QA_LOGFILE')
- if logfile:
- p = d.getVar('P')
- with open(logfile, "a+") as f:
- f.write("%s: %s [%s]\n" % (p, error, type))
-
-def package_qa_handle_error(error_class, error_msg, d):
- if error_class in (d.getVar("ERROR_QA") or "").split():
- package_qa_write_error(error_class, error_msg, d)
- bb.error("QA Issue: %s [%s]" % (error_msg, error_class))
- d.setVar("QA_SANE", False)
- return False
- elif error_class in (d.getVar("WARN_QA") or "").split():
- package_qa_write_error(error_class, error_msg, d)
- bb.warn("QA Issue: %s [%s]" % (error_msg, error_class))
- else:
- bb.note("QA Issue: %s [%s]" % (error_msg, error_class))
- return True
-
-def package_qa_add_message(messages, section, new_msg):
- if section not in messages:
- messages[section] = new_msg
- else:
- messages[section] = messages[section] + "\n" + new_msg
-
QAPATHTEST[shebang-size] = "package_qa_check_shebang_size"
def package_qa_check_shebang_size(path, name, d, elf, messages):
- if os.path.islink(path) or elf:
+ import stat
+ if os.path.islink(path) or stat.S_ISFIFO(os.stat(path).st_mode) or elf:
return
try:
@@ -104,7 +95,7 @@ def package_qa_check_shebang_size(path, name, d, elf, messages):
return
if len(stanza) > 129:
- package_qa_add_message(messages, "shebang-size", "%s: %s maximum shebang size exceeded, the maximum size is 128." % (name, package_qa_clean_path(path, d)))
+ oe.qa.add_message(messages, "shebang-size", "%s: %s maximum shebang size exceeded, the maximum size is 128." % (name, package_qa_clean_path(path, d)))
return
QAPATHTEST[libexec] = "package_qa_check_libexec"
@@ -116,7 +107,7 @@ def package_qa_check_libexec(path,name, d, elf, messages):
return True
if 'libexec' in path.split(os.path.sep):
- package_qa_add_message(messages, "libexec", "%s: %s is using libexec please relocate to %s" % (name, package_qa_clean_path(path, d), libexec))
+ oe.qa.add_message(messages, "libexec", "%s: %s is using libexec please relocate to %s" % (name, package_qa_clean_path(path, d), libexec))
return False
return True
@@ -144,7 +135,7 @@ def package_qa_check_rpath(file,name, d, elf, messages):
rpath = m.group(1)
for dir in bad_dirs:
if dir in rpath:
- package_qa_add_message(messages, "rpaths", "package %s contains bad RPATH %s in file %s" % (name, rpath, file))
+ oe.qa.add_message(messages, "rpaths", "package %s contains bad RPATH %s in file %s" % (name, rpath, file))
QAPATHTEST[useless-rpaths] = "package_qa_check_useless_rpaths"
def package_qa_check_useless_rpaths(file, name, d, elf, messages):
@@ -174,7 +165,7 @@ def package_qa_check_useless_rpaths(file, name, d, elf, messages):
if rpath_eq(rpath, libdir) or rpath_eq(rpath, base_libdir):
# The dynamic linker searches both these places anyway. There is no point in
# looking there again.
- package_qa_add_message(messages, "useless-rpaths", "%s: %s contains probably-redundant RPATH %s" % (name, package_qa_clean_path(file, d), rpath))
+ oe.qa.add_message(messages, "useless-rpaths", "%s: %s contains probably-redundant RPATH %s" % (name, package_qa_clean_path(file, d, name), rpath))
QAPATHTEST[dev-so] = "package_qa_check_dev"
def package_qa_check_dev(path, name, d, elf, messages):
@@ -183,8 +174,8 @@ def package_qa_check_dev(path, name, d, elf, messages):
"""
if not name.endswith("-dev") and not name.endswith("-dbg") and not name.endswith("-ptest") and not name.startswith("nativesdk-") and path.endswith(".so") and os.path.islink(path):
- package_qa_add_message(messages, "dev-so", "non -dev/-dbg/nativesdk- package contains symlink .so: %s path '%s'" % \
- (name, package_qa_clean_path(path,d)))
+ oe.qa.add_message(messages, "dev-so", "non -dev/-dbg/nativesdk- package %s contains symlink .so '%s'" % \
+ (name, package_qa_clean_path(path, d, name)))
QAPATHTEST[dev-elf] = "package_qa_check_dev_elf"
def package_qa_check_dev_elf(path, name, d, elf, messages):
@@ -194,8 +185,8 @@ def package_qa_check_dev_elf(path, name, d, elf, messages):
install link-time .so files that are linker scripts.
"""
if name.endswith("-dev") and path.endswith(".so") and not os.path.islink(path) and elf:
- package_qa_add_message(messages, "dev-elf", "-dev package contains non-symlink .so: %s path '%s'" % \
- (name, package_qa_clean_path(path,d)))
+ oe.qa.add_message(messages, "dev-elf", "-dev package %s contains non-symlink .so '%s'" % \
+ (name, package_qa_clean_path(path, d, name)))
QAPATHTEST[staticdev] = "package_qa_check_staticdev"
def package_qa_check_staticdev(path, name, d, elf, messages):
@@ -207,8 +198,8 @@ def package_qa_check_staticdev(path, name, d, elf, messages):
"""
if not name.endswith("-pic") and not name.endswith("-staticdev") and not name.endswith("-ptest") and path.endswith(".a") and not path.endswith("_nonshared.a") and not '/usr/lib/debug-static/' in path and not '/.debug-static/' in path:
- package_qa_add_message(messages, "staticdev", "non -staticdev package contains static .a library: %s path '%s'" % \
- (name, package_qa_clean_path(path,d)))
+ oe.qa.add_message(messages, "staticdev", "non -staticdev package contains static .a library: %s path '%s'" % \
+ (name, package_qa_clean_path(path,d, name)))
QAPATHTEST[mime] = "package_qa_check_mime"
def package_qa_check_mime(path, name, d, elf, messages):
@@ -218,7 +209,7 @@ def package_qa_check_mime(path, name, d, elf, messages):
"""
if d.getVar("datadir") + "/mime/packages" in path and path.endswith('.xml') and not bb.data.inherits_class("mime", d):
- package_qa_add_message(messages, "mime", "package contains mime types but does not inherit mime: %s path '%s'" % \
+ oe.qa.add_message(messages, "mime", "package contains mime types but does not inherit mime: %s path '%s'" % \
(name, package_qa_clean_path(path,d)))
QAPATHTEST[mime-xdg] = "package_qa_check_mime_xdg"
@@ -244,10 +235,10 @@ def package_qa_check_mime_xdg(path, name, d, elf, messages):
pkgname = name
if name == d.getVar('PN'):
pkgname = '${PN}'
- wstr += "If yes: add \'inhert mime-xdg\' and \'MIME_XDG_PACKAGES += \"%s\"\' / if no add \'INSANE_SKIP_%s += \"mime-xdg\"\' to recipe." % (pkgname, pkgname)
- package_qa_add_message(messages, "mime-xdg", wstr)
+ wstr += "If yes: add \'inhert mime-xdg\' and \'MIME_XDG_PACKAGES += \"%s\"\' / if no add \'INSANE_SKIP:%s += \"mime-xdg\"\' to recipe." % (pkgname, pkgname)
+ oe.qa.add_message(messages, "mime-xdg", wstr)
if mime_type_found:
- package_qa_add_message(messages, "mime-xdg", "package contains desktop file with key 'MimeType' but does not inhert mime-xdg: %s path '%s'" % \
+ oe.qa.add_message(messages, "mime-xdg", "package contains desktop file with key 'MimeType' but does not inhert mime-xdg: %s path '%s'" % \
(name, package_qa_clean_path(path,d)))
def package_qa_check_libdir(d):
@@ -277,7 +268,7 @@ def package_qa_check_libdir(d):
# Skip subdirectories for any packages with libdir in INSANE_SKIP
skippackages = []
for package in dirs:
- if 'libdir' in (d.getVar('INSANE_SKIP_' + package) or "").split():
+ if 'libdir' in (d.getVar('INSANE_SKIP:' + package) or "").split():
bb.note("Package %s skipping libdir QA test" % (package))
skippackages.append(package)
elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-file-directory' and package.endswith("-dbg"):
@@ -311,7 +302,7 @@ def package_qa_check_libdir(d):
pass
if messages:
- package_qa_handle_error("libdir", "\n".join(messages), d)
+ oe.qa.handle_error("libdir", "\n".join(messages), d)
QAPATHTEST[debug-files] = "package_qa_check_dbg"
def package_qa_check_dbg(path, name, d, elf, messages):
@@ -321,7 +312,7 @@ def package_qa_check_dbg(path, name, d, elf, messages):
if not "-dbg" in name and not "-ptest" in name:
if '.debug' in path.split(os.path.sep):
- package_qa_add_message(messages, "debug-files", "non debug package contains .debug directory: %s path %s" % \
+ oe.qa.add_message(messages, "debug-files", "non debug package contains .debug directory: %s path %s" % \
(name, package_qa_clean_path(path,d)))
QAPATHTEST[arch] = "package_qa_check_arch"
@@ -334,14 +325,14 @@ def package_qa_check_arch(path,name,d, elf, messages):
if not elf:
return
- target_os = d.getVar('TARGET_OS')
- target_arch = d.getVar('TARGET_ARCH')
+ target_os = d.getVar('HOST_OS')
+ target_arch = d.getVar('HOST_ARCH')
provides = d.getVar('PROVIDES')
bpn = d.getVar('BPN')
if target_arch == "allarch":
pn = d.getVar('PN')
- package_qa_add_message(messages, "arch", pn + ": Recipe inherits the allarch class, but has packaged architecture-specific binaries")
+ oe.qa.add_message(messages, "arch", pn + ": Recipe inherits the allarch class, but has packaged architecture-specific binaries")
return
# FIXME: Cross package confuse this check, so just skip them
@@ -364,14 +355,14 @@ def package_qa_check_arch(path,name,d, elf, messages):
target_os == "linux-gnu_ilp32" or re.match(r'mips64.*32', d.getVar('DEFAULTTUNE')))
is_bpf = (oe.qa.elf_machine_to_string(elf.machine()) == "BPF")
if not ((machine == elf.machine()) or is_32 or is_bpf):
- package_qa_add_message(messages, "arch", "Architecture did not match (%s, expected %s) on %s" % \
- (oe.qa.elf_machine_to_string(elf.machine()), oe.qa.elf_machine_to_string(machine), package_qa_clean_path(path,d)))
+ oe.qa.add_message(messages, "arch", "Architecture did not match (%s, expected %s) in %s" % \
+ (oe.qa.elf_machine_to_string(elf.machine()), oe.qa.elf_machine_to_string(machine), package_qa_clean_path(path, d, name)))
elif not ((bits == elf.abiSize()) or is_32 or is_bpf):
- package_qa_add_message(messages, "arch", "Bit size did not match (%d to %d) %s on %s" % \
- (bits, elf.abiSize(), bpn, package_qa_clean_path(path,d)))
+ oe.qa.add_message(messages, "arch", "Bit size did not match (%d, expected %d) in %s" % \
+ (elf.abiSize(), bits, package_qa_clean_path(path, d, name)))
elif not ((littleendian == elf.isLittleEndian()) or is_bpf):
- package_qa_add_message(messages, "arch", "Endiannes did not match (%d to %d) on %s" % \
- (littleendian, elf.isLittleEndian(), package_qa_clean_path(path,d)))
+ oe.qa.add_message(messages, "arch", "Endiannes did not match (%d, expected %d) in %s" % \
+ (elf.isLittleEndian(), littleendian, package_qa_clean_path(path,d, name)))
QAPATHTEST[desktop] = "package_qa_check_desktop"
def package_qa_check_desktop(path, name, d, elf, messages):
@@ -383,7 +374,7 @@ def package_qa_check_desktop(path, name, d, elf, messages):
output = os.popen("%s %s" % (desktop_file_validate, path))
# This only produces output on errors
for l in output:
- package_qa_add_message(messages, "desktop", "Desktop file issue: " + l.strip())
+ oe.qa.add_message(messages, "desktop", "Desktop file issue: " + l.strip())
QAPATHTEST[textrel] = "package_qa_textrel"
def package_qa_textrel(path, name, d, elf, messages):
@@ -409,7 +400,7 @@ def package_qa_textrel(path, name, d, elf, messages):
if not sane:
path = package_qa_clean_path(path, d, name)
- package_qa_add_message(messages, "textrel", "%s: ELF binary %s has relocations in .text" % (name, path))
+ oe.qa.add_message(messages, "textrel", "%s: ELF binary %s has relocations in .text" % (name, path))
QAPATHTEST[ldflags] = "package_qa_hash_style"
def package_qa_hash_style(path, name, d, elf, messages):
@@ -438,18 +429,20 @@ def package_qa_hash_style(path, name, d, elf, messages):
for line in phdrs.split("\n"):
if "SYMTAB" in line:
has_syms = True
- if "GNU_HASH" or "DT_MIPS_XHASH" in line:
+ if "GNU_HASH" in line or "MIPS_XHASH" in line:
sane = True
if ("[mips32]" in line or "[mips64]" in line) and d.getVar('TCLIBC') == "musl":
sane = True
if has_syms and not sane:
- package_qa_add_message(messages, "ldflags", "No GNU_HASH in the ELF binary %s, didn't pass LDFLAGS?" % path)
+ path = package_qa_clean_path(path, d, name)
+ oe.qa.add_message(messages, "ldflags", "File %s in package %s doesn't have GNU_HASH (didn't pass LDFLAGS?)" % (path, name))
QAPATHTEST[buildpaths] = "package_qa_check_buildpaths"
def package_qa_check_buildpaths(path, name, d, elf, messages):
"""
- Check for build paths inside target files and error if not found in the whitelist
+ Check for build paths inside target files and error if paths are not
+ explicitly ignored.
"""
# Ignore .debug files, not interesting
if path.find(".debug") != -1:
@@ -459,16 +452,12 @@ def package_qa_check_buildpaths(path, name, d, elf, messages):
if os.path.islink(path):
return
- # Ignore ipk and deb's CONTROL dir
- if path.find(name + "/CONTROL/") != -1 or path.find(name + "/DEBIAN/") != -1:
- return
-
tmpdir = bytes(d.getVar('TMPDIR'), encoding="utf-8")
with open(path, 'rb') as f:
file_content = f.read()
if tmpdir in file_content:
trimmed = path.replace(os.path.join (d.getVar("PKGDEST"), name), "")
- package_qa_add_message(messages, "buildpaths", "File %s in package %s contains reference to TMPDIR" % (trimmed, name))
+ oe.qa.add_message(messages, "buildpaths", "File %s in package %s contains reference to TMPDIR" % (trimmed, name))
QAPATHTEST[xorg-driver-abi] = "package_qa_check_xorg_driver_abi"
@@ -484,10 +473,10 @@ def package_qa_check_xorg_driver_abi(path, name, d, elf, messages):
driverdir = d.expand("${libdir}/xorg/modules/drivers/")
if driverdir in path and path.endswith(".so"):
mlprefix = d.getVar('MLPREFIX') or ''
- for rdep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + name) or ""):
+ for rdep in bb.utils.explode_deps(d.getVar('RDEPENDS:' + name) or ""):
if rdep.startswith("%sxorg-abi-" % mlprefix):
return
- package_qa_add_message(messages, "xorg-driver-abi", "Package %s contains Xorg driver (%s) but no xorg-abi- dependencies" % (name, os.path.basename(path)))
+ oe.qa.add_message(messages, "xorg-driver-abi", "Package %s contains Xorg driver (%s) but no xorg-abi- dependencies" % (name, os.path.basename(path)))
QAPATHTEST[infodir] = "package_qa_check_infodir"
def package_qa_check_infodir(path, name, d, elf, messages):
@@ -497,7 +486,7 @@ def package_qa_check_infodir(path, name, d, elf, messages):
infodir = d.expand("${infodir}/dir")
if infodir in path:
- package_qa_add_message(messages, "infodir", "The /usr/share/info/dir file is not meant to be shipped in a particular package.")
+ oe.qa.add_message(messages, "infodir", "The /usr/share/info/dir file is not meant to be shipped in a particular package.")
QAPATHTEST[symlink-to-sysroot] = "package_qa_check_symlink_to_sysroot"
def package_qa_check_symlink_to_sysroot(path, name, d, elf, messages):
@@ -510,7 +499,7 @@ def package_qa_check_symlink_to_sysroot(path, name, d, elf, messages):
tmpdir = d.getVar('TMPDIR')
if target.startswith(tmpdir):
trimmed = path.replace(os.path.join (d.getVar("PKGDEST"), name), "")
- package_qa_add_message(messages, "symlink-to-sysroot", "Symlink %s in %s points to TMPDIR" % (trimmed, name))
+ oe.qa.add_message(messages, "symlink-to-sysroot", "Symlink %s in %s points to TMPDIR" % (trimmed, name))
# Check license variables
do_populate_lic[postfuncs] += "populate_lic_qa_checksum"
@@ -518,7 +507,6 @@ python populate_lic_qa_checksum() {
"""
Check for changes in the license files.
"""
- sane = True
lic_files = d.getVar('LIC_FILES_CHKSUM') or ''
lic = d.getVar('LICENSE')
@@ -528,7 +516,7 @@ python populate_lic_qa_checksum() {
return
if not lic_files and d.getVar('SRC_URI'):
- sane &= package_qa_handle_error("license-checksum", pn + ": Recipe file fetches files and does not have license file information (LIC_FILES_CHKSUM)", d)
+ oe.qa.handle_error("license-checksum", pn + ": Recipe file fetches files and does not have license file information (LIC_FILES_CHKSUM)", d)
srcdir = d.getVar('S')
corebase_licensefile = d.getVar('COREBASE') + "/LICENSE"
@@ -536,11 +524,11 @@ python populate_lic_qa_checksum() {
try:
(type, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
except bb.fetch.MalformedUrl:
- sane &= package_qa_handle_error("license-checksum", pn + ": LIC_FILES_CHKSUM contains an invalid URL: " + url, d)
+ oe.qa.handle_error("license-checksum", pn + ": LIC_FILES_CHKSUM contains an invalid URL: " + url, d)
continue
srclicfile = os.path.join(srcdir, path)
if not os.path.isfile(srclicfile):
- sane &= package_qa_handle_error("license-checksum", pn + ": LIC_FILES_CHKSUM points to an invalid file: " + srclicfile, d)
+ oe.qa.handle_error("license-checksum", pn + ": LIC_FILES_CHKSUM points to an invalid file: " + srclicfile, d)
continue
if (srclicfile == corebase_licensefile):
@@ -562,7 +550,7 @@ python populate_lic_qa_checksum() {
import hashlib
lineno = 0
license = []
- m = hashlib.md5()
+ m = hashlib.new('MD5', usedforsecurity=False)
for line in f:
lineno += 1
if (lineno >= beginline):
@@ -608,10 +596,9 @@ python populate_lic_qa_checksum() {
else:
msg = pn + ": LIC_FILES_CHKSUM is not specified for " + url
msg = msg + "\n" + pn + ": The md5 checksum is " + md5chksum
- sane &= package_qa_handle_error("license-checksum", msg, d)
+ oe.qa.handle_error("license-checksum", msg, d)
- if not sane:
- bb.fatal("Fatal QA errors found, failing task.")
+ oe.qa.exit_if_errors(d)
}
def qa_check_staged(path,d):
@@ -623,7 +610,6 @@ def qa_check_staged(path,d):
responsible for the errors easily even if we look at every .pc and .la file.
"""
- sane = True
tmpdir = d.getVar('TMPDIR')
workdir = os.path.join(tmpdir, "work")
recipesysroot = d.getVar("RECIPE_SYSROOT")
@@ -656,16 +642,14 @@ def qa_check_staged(path,d):
file_content = file_content.replace(recipesysroot, "")
if workdir in file_content:
error_msg = "%s failed sanity test (workdir) in path %s" % (file,root)
- sane &= package_qa_handle_error("la", error_msg, d)
+ oe.qa.handle_error("la", error_msg, d)
elif file.endswith(".pc") and not skip_pkgconfig:
with open(path) as f:
file_content = f.read()
file_content = file_content.replace(recipesysroot, "")
if pkgconfigcheck in file_content:
error_msg = "%s failed sanity test (tmpdir) in path %s" % (file,root)
- sane &= package_qa_handle_error("pkgconfig", error_msg, d)
-
- return sane
+ oe.qa.handle_error("pkgconfig", error_msg, d)
# Run all package-wide warnfuncs and errorfuncs
def package_qa_package(warnfuncs, errorfuncs, package, d):
@@ -678,9 +662,9 @@ def package_qa_package(warnfuncs, errorfuncs, package, d):
func(package, d, errors)
for w in warnings:
- package_qa_handle_error(w, warnings[w], d)
+ oe.qa.handle_error(w, warnings[w], d)
for e in errors:
- package_qa_handle_error(e, errors[e], d)
+ oe.qa.handle_error(e, errors[e], d)
return len(errors) == 0
@@ -695,38 +679,55 @@ def package_qa_recipe(warnfuncs, errorfuncs, pn, d):
func(pn, d, errors)
for w in warnings:
- package_qa_handle_error(w, warnings[w], d)
+ oe.qa.handle_error(w, warnings[w], d)
for e in errors:
- package_qa_handle_error(e, errors[e], d)
+ oe.qa.handle_error(e, errors[e], d)
return len(errors) == 0
+def prepopulate_objdump_p(elf, d):
+ output = elf.run_objdump("-p", d)
+ return (elf.name, output)
+
# Walk over all files in a directory and call func
def package_qa_walk(warnfuncs, errorfuncs, package, d):
- import oe.qa
-
#if this will throw an exception, then fix the dict above
- target_os = d.getVar('TARGET_OS')
- target_arch = d.getVar('TARGET_ARCH')
+ target_os = d.getVar('HOST_OS')
+ target_arch = d.getVar('HOST_ARCH')
warnings = {}
errors = {}
+ elves = {}
for path in pkgfiles[package]:
- elf = oe.qa.ELFFile(path)
- try:
- elf.open()
- except (IOError, oe.qa.NotELFFileError):
- # IOError can happen if the packaging control files disappear,
- elf = None
+ elf = None
+ if os.path.isfile(path):
+ elf = oe.qa.ELFFile(path)
+ try:
+ elf.open()
+ elf.close()
+ except oe.qa.NotELFFileError:
+ elf = None
+ if elf:
+ elves[path] = elf
+
+ results = oe.utils.multiprocess_launch(prepopulate_objdump_p, elves.values(), d, extraargs=(d,))
+ for item in results:
+ elves[item[0]].set_objdump("-p", item[1])
+
+ for path in pkgfiles[package]:
+ if path in elves:
+ elves[path].open()
for func in warnfuncs:
- func(path, package, d, elf, warnings)
+ func(path, package, d, elves.get(path), warnings)
for func in errorfuncs:
- func(path, package, d, elf, errors)
+ func(path, package, d, elves.get(path), errors)
+ if path in elves:
+ elves[path].close()
for w in warnings:
- package_qa_handle_error(w, warnings[w], d)
+ oe.qa.handle_error(w, warnings[w], d)
for e in errors:
- package_qa_handle_error(e, errors[e], d)
+ oe.qa.handle_error(e, errors[e], d)
def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
# Don't do this check for kernel/module recipes, there aren't too many debug/development
@@ -746,10 +747,10 @@ def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
for rdepend in rdepends:
if "-dbg" in rdepend and "debug-deps" not in skip:
error_msg = "%s rdepends on %s" % (pkg,rdepend)
- package_qa_handle_error("debug-deps", error_msg, d)
+ oe.qa.handle_error("debug-deps", error_msg, d)
if (not "-dev" in pkg and not "-staticdev" in pkg) and rdepend.endswith("-dev") and "dev-deps" not in skip:
error_msg = "%s rdepends on %s" % (pkg, rdepend)
- package_qa_handle_error("dev-deps", error_msg, d)
+ oe.qa.handle_error("dev-deps", error_msg, d)
if rdepend not in packages:
rdep_data = oe.packagedata.read_subpkgdata(rdepend, d)
if rdep_data and 'PN' in rdep_data and rdep_data['PN'] in taskdeps:
@@ -770,7 +771,7 @@ def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
error_msg = "%s rdepends on %s, but it isn't a build dependency, missing %s in DEPENDS or PACKAGECONFIG?" % (pkg, rdepend, rdep_data['PN'])
else:
error_msg = "%s rdepends on %s, but it isn't a build dependency?" % (pkg, rdepend)
- package_qa_handle_error("build-deps", error_msg, d)
+ oe.qa.handle_error("build-deps", error_msg, d)
if "file-rdeps" not in skip:
ignored_file_rdeps = set(['/bin/sh', '/usr/bin/env', 'rtld(GNU_HASH)'])
@@ -780,7 +781,7 @@ def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
filerdepends = {}
rdep_data = oe.packagedata.read_subpkgdata(pkg, d)
for key in rdep_data:
- if key.startswith("FILERDEPENDS_"):
+ if key.startswith("FILERDEPENDS:"):
for subkey in bb.utils.explode_deps(rdep_data[key]):
if subkey not in ignored_file_rdeps and \
not subkey.startswith('perl('):
@@ -795,7 +796,7 @@ def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
# The python is not a package, but python-core provides it, so
# skip checking /usr/bin/python if python is in the rdeps, in
- # case there is a RDEPENDS_pkg = "python" in the recipe.
+ # case there is a RDEPENDS:pkg = "python" in the recipe.
for py in [ d.getVar('MLPREFIX') + "python", "python" ]:
if py in done:
filerdepends.pop("/usr/bin/python",None)
@@ -808,11 +809,11 @@ def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
# For Saving the FILERPROVIDES, RPROVIDES and FILES_INFO
rdep_data = oe.packagedata.read_subpkgdata(rdep, d)
for key in rdep_data:
- if key.startswith("FILERPROVIDES_") or key.startswith("RPROVIDES_"):
+ if key.startswith("FILERPROVIDES:") or key.startswith("RPROVIDES:"):
for subkey in bb.utils.explode_deps(rdep_data[key]):
filerdepends.pop(subkey,None)
# Add the files list to the rprovides
- if key == "FILES_INFO":
+ if key.startswith("FILES_INFO:"):
# Use eval() to make it as a dict
for subkey in eval(rdep_data[key]):
filerdepends.pop(subkey,None)
@@ -821,9 +822,9 @@ def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
break
if filerdepends:
for key in filerdepends:
- error_msg = "%s contained in package %s requires %s, but no providers found in RDEPENDS_%s?" % \
- (filerdepends[key].replace("_%s" % pkg, "").replace("@underscore@", "_"), pkg, key, pkg)
- package_qa_handle_error("file-rdeps", error_msg, d)
+ error_msg = "%s contained in package %s requires %s, but no providers found in RDEPENDS:%s?" % \
+ (filerdepends[key].replace(":%s" % pkg, "").replace("@underscore@", "_"), pkg, key, pkg)
+ oe.qa.handle_error("file-rdeps", error_msg, d)
package_qa_check_rdepends[vardepsexclude] = "OVERRIDES"
def package_qa_check_deps(pkg, pkgdest, d):
@@ -835,12 +836,12 @@ def package_qa_check_deps(pkg, pkgdest, d):
try:
rvar = bb.utils.explode_dep_versions2(localdata.getVar(var) or "")
except ValueError as e:
- bb.fatal("%s_%s: %s" % (var, pkg, e))
+ bb.fatal("%s:%s: %s" % (var, pkg, e))
for dep in rvar:
for v in rvar[dep]:
if v and not v.startswith(('< ', '= ', '> ', '<= ', '>=')):
- error_msg = "%s_%s is invalid: %s (%s) only comparisons <, =, >, <=, and >= are allowed" % (var, pkg, dep, v)
- package_qa_handle_error("dep-cmp", error_msg, d)
+ error_msg = "%s:%s is invalid: %s (%s) only comparisons <, =, >, <=, and >= are allowed" % (var, pkg, dep, v)
+ oe.qa.handle_error("dep-cmp", error_msg, d)
check_valid_deps('RDEPENDS')
check_valid_deps('RRECOMMENDS')
@@ -851,13 +852,14 @@ def package_qa_check_deps(pkg, pkgdest, d):
QAPKGTEST[usrmerge] = "package_qa_check_usrmerge"
def package_qa_check_usrmerge(pkg, d, messages):
+
pkgdest = d.getVar('PKGDEST')
pkg_dir = pkgdest + os.sep + pkg + os.sep
merged_dirs = ['bin', 'sbin', 'lib'] + d.getVar('MULTILIB_VARIANTS').split()
for f in merged_dirs:
if os.path.exists(pkg_dir + f) and not os.path.islink(pkg_dir + f):
msg = "%s package is not obeying usrmerge distro feature. /%s should be relocated to /usr." % (pkg, f)
- package_qa_add_message(messages, "usrmerge", msg)
+ oe.qa.add_message(messages, "usrmerge", msg)
return False
return True
@@ -876,7 +878,7 @@ def package_qa_check_perllocalpod(pkg, d, messages):
if matches:
matches = [package_qa_clean_path(path, d, pkg) for path in matches]
msg = "%s contains perllocal.pod (%s), should not be installed" % (pkg, " ".join(matches))
- package_qa_add_message(messages, "perllocalpod", msg)
+ oe.qa.add_message(messages, "perllocalpod", msg)
QAPKGTEST[expanded-d] = "package_qa_check_expanded_d"
def package_qa_check_expanded_d(package, d, messages):
@@ -888,13 +890,13 @@ def package_qa_check_expanded_d(package, d, messages):
expanded_d = d.getVar('D')
for var in 'FILES','pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm':
- bbvar = d.getVar(var + "_" + package) or ""
+ bbvar = d.getVar(var + ":" + package) or ""
if expanded_d in bbvar:
if var == 'FILES':
- package_qa_add_message(messages, "expanded-d", "FILES in %s recipe should not contain the ${D} variable as it references the local build directory not the target filesystem, best solution is to remove the ${D} reference" % package)
+ oe.qa.add_message(messages, "expanded-d", "FILES in %s recipe should not contain the ${D} variable as it references the local build directory not the target filesystem, best solution is to remove the ${D} reference" % package)
sane = False
else:
- package_qa_add_message(messages, "expanded-d", "%s in %s recipe contains ${D}, it should be replaced by $D instead" % (var, package))
+ oe.qa.add_message(messages, "expanded-d", "%s in %s recipe contains ${D}, it should be replaced by $D instead" % (var, package))
sane = False
return sane
@@ -903,19 +905,40 @@ def package_qa_check_unlisted_pkg_lics(package, d, messages):
"""
Check that all licenses for a package are among the licenses for the recipe.
"""
- pkg_lics = d.getVar('LICENSE_' + package)
+ pkg_lics = d.getVar('LICENSE:' + package)
if not pkg_lics:
return True
recipe_lics_set = oe.license.list_licenses(d.getVar('LICENSE'))
- unlisted = oe.license.list_licenses(pkg_lics) - recipe_lics_set
- if not unlisted:
- return True
+ package_lics = oe.license.list_licenses(pkg_lics)
+ unlisted = package_lics - recipe_lics_set
+ if unlisted:
+ oe.qa.add_message(messages, "unlisted-pkg-lics",
+ "LICENSE:%s includes licenses (%s) that are not "
+ "listed in LICENSE" % (package, ' '.join(unlisted)))
+ return False
+ obsolete = set(oe.license.obsolete_license_list()) & package_lics - recipe_lics_set
+ if obsolete:
+ oe.qa.add_message(messages, "obsolete-license",
+ "LICENSE:%s includes obsolete licenses %s" % (package, ' '.join(obsolete)))
+ return False
+ return True
- package_qa_add_message(messages, "unlisted-pkg-lics",
- "LICENSE_%s includes licenses (%s) that are not "
- "listed in LICENSE" % (package, ' '.join(unlisted)))
- return False
+QAPKGTEST[empty-dirs] = "package_qa_check_empty_dirs"
+def package_qa_check_empty_dirs(pkg, d, messages):
+ """
+ Check for the existence of files in directories that are expected to be
+ empty.
+ """
+
+ pkgd = oe.path.join(d.getVar('PKGDEST'), pkg)
+ for dir in (d.getVar('QA_EMPTY_DIRS') or "").split():
+ empty_dir = oe.path.join(pkgd, dir)
+ if os.path.exists(empty_dir) and os.listdir(empty_dir):
+ recommendation = (d.getVar('QA_EMPTY_DIRS_RECOMMENDATION:' + dir) or
+ "but it is expected to be empty")
+ msg = "%s installs files in %s, %s" % (pkg, dir, recommendation)
+ oe.qa.add_message(messages, "empty-dirs", msg)
def package_qa_check_encoding(keys, encode, d):
def check_encoding(key, enc):
@@ -927,7 +950,7 @@ def package_qa_check_encoding(keys, encode, d):
except UnicodeDecodeError as e:
error_msg = "%s has non %s characters" % (key,enc)
sane = False
- package_qa_handle_error("invalid-chars", error_msg, d)
+ oe.qa.handle_error("invalid-chars", error_msg, d)
return sane
for key in keys:
@@ -960,36 +983,33 @@ def package_qa_check_host_user(path, name, d, elf, messages):
else:
check_uid = int(d.getVar('HOST_USER_UID'))
if stat.st_uid == check_uid:
- package_qa_add_message(messages, "host-user-contaminated", "%s: %s is owned by uid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, package_qa_clean_path(path, d, name), check_uid))
+ oe.qa.add_message(messages, "host-user-contaminated", "%s: %s is owned by uid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, package_qa_clean_path(path, d, name), check_uid))
return False
check_gid = int(d.getVar('HOST_USER_GID'))
if stat.st_gid == check_gid:
- package_qa_add_message(messages, "host-user-contaminated", "%s: %s is owned by gid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, package_qa_clean_path(path, d, name), check_gid))
+ oe.qa.add_message(messages, "host-user-contaminated", "%s: %s is owned by gid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, package_qa_clean_path(path, d, name), check_gid))
return False
return True
-QARECIPETEST[src-uri-bad] = "package_qa_check_src_uri"
-def package_qa_check_src_uri(pn, d, messages):
- import re
-
- if "${PN}" in d.getVar("SRC_URI", False):
- package_qa_handle_error("src-uri-bad", "%s: SRC_URI uses PN not BPN" % pn, d)
-
- for url in d.getVar("SRC_URI").split():
- if re.search(r"github\.com/.+/.+/archive/.+", url):
- package_qa_handle_error("src-uri-bad", "%s: SRC_URI uses unstable GitHub archives" % pn, d)
-
QARECIPETEST[unhandled-features-check] = "package_qa_check_unhandled_features_check"
def package_qa_check_unhandled_features_check(pn, d, messages):
if not bb.data.inherits_class('features_check', d):
var_set = False
for kind in ['DISTRO', 'MACHINE', 'COMBINED']:
for var in ['ANY_OF_' + kind + '_FEATURES', 'REQUIRED_' + kind + '_FEATURES', 'CONFLICT_' + kind + '_FEATURES']:
- if d.getVar(var) is not None or d.overridedata.get(var) is not None:
+ if d.getVar(var) is not None or d.hasOverrides(var):
var_set = True
if var_set:
- package_qa_handle_error("unhandled-features-check", "%s: recipe doesn't inherit features_check" % pn, d)
+ oe.qa.handle_error("unhandled-features-check", "%s: recipe doesn't inherit features_check" % pn, d)
+
+QARECIPETEST[missing-update-alternatives] = "package_qa_check_missing_update_alternatives"
+def package_qa_check_missing_update_alternatives(pn, d, messages):
+ # Look at all packages and find out if any of those sets ALTERNATIVE variable
+ # without inheriting update-alternatives class
+ for pkg in (d.getVar('PACKAGES') or '').split():
+ if d.getVar('ALTERNATIVE:%s' % pkg) and not bb.data.inherits_class('update-alternatives', d):
+ oe.qa.handle_error("missing-update-alternatives", "%s: recipe defines ALTERNATIVE:%s but doesn't inherit update-alternatives. This might fail during do_rootfs later!" % (pn, pkg), d)
# The PACKAGE FUNC to scan each package
python do_package_qa () {
@@ -998,6 +1018,14 @@ python do_package_qa () {
bb.note("DO PACKAGE QA")
+ main_lic = d.getVar('LICENSE')
+
+ # Check for obsolete license references in main LICENSE (packages are checked below for any changes)
+ main_licenses = oe.license.list_licenses(d.getVar('LICENSE'))
+ obsolete = set(oe.license.obsolete_license_list()) & main_licenses
+ if obsolete:
+ oe.qa.handle_error("obsolete-license", "Recipe LICENSE includes obsolete licenses %s" % ' '.join(obsolete), d)
+
bb.build.exec_func("read_subpackage_metadata", d)
# Check non UTF-8 characters on recipe's metadata
@@ -1006,26 +1034,6 @@ python do_package_qa () {
logdir = d.getVar('T')
pn = d.getVar('PN')
- # Check the compile log for host contamination
- compilelog = os.path.join(logdir,"log.do_compile")
-
- if os.path.exists(compilelog):
- statement = "grep -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s > /dev/null" % compilelog
- if subprocess.call(statement, shell=True) == 0:
- msg = "%s: The compile log indicates that host include and/or library paths were used.\n \
- Please check the log '%s' for more information." % (pn, compilelog)
- package_qa_handle_error("compile-host-path", msg, d)
-
- # Check the install log for host contamination
- installlog = os.path.join(logdir,"log.do_install")
-
- if os.path.exists(installlog):
- statement = "grep -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s > /dev/null" % installlog
- if subprocess.call(statement, shell=True) == 0:
- msg = "%s: The install log indicates that host include and/or library paths were used.\n \
- Please check the log '%s' for more information." % (pn, installlog)
- package_qa_handle_error("install-host-path", msg, d)
-
# Scan the packages...
pkgdest = d.getVar('PKGDEST')
packages = set((d.getVar('PACKAGES') or '').split())
@@ -1034,7 +1042,14 @@ python do_package_qa () {
pkgfiles = {}
for pkg in packages:
pkgfiles[pkg] = []
- for walkroot, dirs, files in os.walk(os.path.join(pkgdest, pkg)):
+ pkgdir = os.path.join(pkgdest, pkg)
+ for walkroot, dirs, files in os.walk(pkgdir):
+ # Don't walk into top-level CONTROL or DEBIAN directories as these
+ # are temporary directories created by do_package.
+ if walkroot == pkgdir:
+ for control in ("CONTROL", "DEBIAN"):
+ if control in dirs:
+ dirs.remove(control)
for file in files:
pkgfiles[pkg].append(os.path.join(walkroot, file))
@@ -1071,14 +1086,14 @@ python do_package_qa () {
for package in packages:
skip = set((d.getVar('INSANE_SKIP') or "").split() +
- (d.getVar('INSANE_SKIP_' + package) or "").split())
+ (d.getVar('INSANE_SKIP:' + package) or "").split())
if skip:
bb.note("Package %s skipping QA tests: %s" % (package, str(skip)))
bb.note("Checking Package: %s" % package)
# Check package name
if not pkgname_pattern.match(package):
- package_qa_handle_error("pkgname",
+ oe.qa.handle_error("pkgname",
"%s doesn't match the [a-z0-9.+-]+ regex" % package, d)
warn_checks, error_checks = parse_test_matrix("QAPATHTEST")
@@ -1096,10 +1111,7 @@ python do_package_qa () {
if 'libdir' in d.getVar("ALL_QA").split():
package_qa_check_libdir(d)
- qa_sane = d.getVar("QA_SANE")
- if not qa_sane:
- bb.fatal("QA run found fatal errors. Please consider fixing them.")
- bb.note("DONE with PACKAGE QA")
+ oe.qa.exit_if_errors(d)
}
# binutils is used for most checks, so need to set as dependency
@@ -1113,7 +1125,7 @@ addtask do_package_qa after do_packagedata do_package before do_build
python() {
pkgs = (d.getVar('PACKAGES') or '').split()
for pkg in pkgs:
- d.appendVarFlag("do_package_qa", "vardeps", " INSANE_SKIP_{}".format(pkg))
+ d.appendVarFlag("do_package_qa", "vardeps", " INSANE_SKIP:{}".format(pkg))
}
SSTATETASKS += "do_package_qa"
@@ -1126,8 +1138,8 @@ addtask do_package_qa_setscene
python do_qa_staging() {
bb.note("QA checking staging")
- if not qa_check_staged(d.expand('${SYSROOT_DESTDIR}${libdir}'), d):
- bb.fatal("QA staging was broken by the package built above")
+ qa_check_staged(d.expand('${SYSROOT_DESTDIR}${libdir}'), d)
+ oe.qa.exit_with_message_if_errors("QA staging was broken by the package built above", d)
}
python do_qa_patch() {
@@ -1175,7 +1187,31 @@ python do_qa_patch() {
elif 'patch-fuzz' in d.getVar('WARN_QA'):
bb.warn(msg)
msg = "Patch log indicates that patches do not apply cleanly."
- package_qa_handle_error("patch-fuzz", msg, d)
+ oe.qa.handle_error("patch-fuzz", msg, d)
+
+ # Check if the patch contains a correctly formatted and spelled Upstream-Status
+ import re
+ from oe import patch
+
+ for url in patch.src_patches(d):
+ (_, _, fullpath, _, _, _) = bb.fetch.decodeurl(url)
+
+ # skip patches not in oe-core
+ if '/meta/' not in fullpath:
+ continue
+
+ content = open(fullpath, encoding='utf-8', errors='ignore').read()
+ kinda_status_re = re.compile(r"^.*upstream.*status.*$", re.IGNORECASE | re.MULTILINE)
+ strict_status_re = re.compile(r"^Upstream-Status: (Pending|Submitted|Denied|Accepted|Inappropriate|Backport|Inactive-Upstream)( .+)?$", re.MULTILINE)
+ match_kinda = kinda_status_re.search(content)
+ match_strict = strict_status_re.search(content)
+ guidelines = "https://www.openembedded.org/wiki/Commit_Patch_Message_Guidelines#Patch_Header_Recommendations:_Upstream-Status"
+
+ if not match_strict:
+ if match_kinda:
+ bb.error("Malformed Upstream-Status in patch\n%s\nPlease correct according to %s :\n%s" % (fullpath, guidelines, match_kinda.group(0)))
+ else:
+ bb.error("Missing Upstream-Status in patch\n%s\nPlease add according to %s ." % (fullpath, guidelines))
}
python do_qa_configure() {
@@ -1197,13 +1233,13 @@ python do_qa_configure() {
if bb.data.inherits_class('autotools', d) and not skip_configure_unsafe:
bb.note("Checking autotools environment for common misconfiguration")
for root, dirs, files in os.walk(workdir):
- statement = "grep -q -F -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s" % \
+ statement = "grep -q -F -e 'is unsafe for cross-compilation' %s" % \
os.path.join(root,"config.log")
if "config.log" in files:
if subprocess.call(statement, shell=True) == 0:
error_msg = """This autoconf log indicates errors, it looked at host include and/or library paths while determining system capabilities.
Rerun configure task after fixing this."""
- package_qa_handle_error("configure-unsafe", error_msg, d)
+ oe.qa.handle_error("configure-unsafe", error_msg, d)
if "configure.ac" in files:
configs.append(os.path.join(root,"configure.ac"))
@@ -1233,30 +1269,27 @@ Rerun configure task after fixing this."""
gnu = "grep \"^[[:space:]]*AM_GNU_GETTEXT\" %s >/dev/null" % config
if subprocess.call(gnu, shell=True) == 0:
error_msg = "AM_GNU_GETTEXT used but no inherit gettext"
- package_qa_handle_error("configure-gettext", error_msg, d)
+ oe.qa.handle_error("configure-gettext", error_msg, d)
###########################################################################
# Check unrecognised configure options (with a white list)
###########################################################################
- if bb.data.inherits_class("autotools", d) or bb.data.inherits_class("meson", d):
+ if bb.data.inherits_class("autotools", d):
bb.note("Checking configure output for unrecognised options")
try:
if bb.data.inherits_class("autotools", d):
flag = "WARNING: unrecognized options:"
log = os.path.join(d.getVar('B'), 'config.log')
- if bb.data.inherits_class("meson", d):
- flag = "WARNING: Unknown options:"
- log = os.path.join(d.getVar('T'), 'log.do_configure')
output = subprocess.check_output(['grep', '-F', flag, log]).decode("utf-8").replace(', ', ' ').replace('"', '')
options = set()
for line in output.splitlines():
options |= set(line.partition(flag)[2].split())
- whitelist = set(d.getVar("UNKNOWN_CONFIGURE_WHITELIST").split())
- options -= whitelist
+ ignore_opts = set(d.getVar("UNKNOWN_CONFIGURE_OPT_IGNORE").split())
+ options -= ignore_opts
if options:
pn = d.getVar('PN')
error_msg = pn + ": configure was passed unrecognised options: " + " ".join(options)
- package_qa_handle_error("unknown-configure-option", error_msg, d)
+ oe.qa.handle_error("unknown-configure-option", error_msg, d)
except subprocess.CalledProcessError:
pass
@@ -1268,18 +1301,33 @@ Rerun configure task after fixing this."""
if pconfig not in pkgconfigflags:
pn = d.getVar('PN')
error_msg = "%s: invalid PACKAGECONFIG: %s" % (pn, pconfig)
- package_qa_handle_error("invalid-packageconfig", error_msg, d)
+ oe.qa.handle_error("invalid-packageconfig", error_msg, d)
- qa_sane = d.getVar("QA_SANE")
- if not qa_sane:
- bb.fatal("Fatal QA errors found, failing task.")
+ oe.qa.exit_if_errors(d)
}
+def unpack_check_src_uri(pn, d):
+ import re
+
+ skip = (d.getVar('INSANE_SKIP') or "").split()
+ if 'src-uri-bad' in skip:
+ bb.note("Recipe %s skipping qa checking: src-uri-bad" % d.getVar('PN'))
+ return
+
+ if "${PN}" in d.getVar("SRC_URI", False):
+ oe.qa.handle_error("src-uri-bad", "%s: SRC_URI uses PN not BPN" % pn, d)
+
+ for url in d.getVar("SRC_URI").split():
+ if re.search(r"git(hu|la)b\.com/.+/.+/archive/.+", url):
+ oe.qa.handle_error("src-uri-bad", "%s: SRC_URI uses unstable GitHub/GitLab archives, convert recipe to use git protocol" % pn, d)
+
python do_qa_unpack() {
src_uri = d.getVar('SRC_URI')
s_dir = d.getVar('S')
if src_uri and not os.path.exists(s_dir):
bb.warn('%s: the directory %s (%s) pointed to by the S variable doesn\'t exist - please set S within the recipe to point to where the source has been unpacked to' % (d.getVar('PN'), d.getVar('S', False), s_dir))
+
+ unpack_check_src_uri(d.getVar('PN'), d)
}
# The Staging Func, to check all staging
@@ -1311,11 +1359,11 @@ python () {
# Checking ${FILESEXTRAPATHS}
extrapaths = (d.getVar("FILESEXTRAPATHS") or "")
if '__default' not in extrapaths.split(":"):
- msg = "FILESEXTRAPATHS-variable, must always use _prepend (or _append)\n"
+ msg = "FILESEXTRAPATHS-variable, must always use :prepend (or :append)\n"
msg += "type of assignment, and don't forget the colon.\n"
msg += "Please assign it with the format of:\n"
- msg += " FILESEXTRAPATHS_append := \":${THISDIR}/Your_Files_Path\" or\n"
- msg += " FILESEXTRAPATHS_prepend := \"${THISDIR}/Your_Files_Path:\"\n"
+ msg += " FILESEXTRAPATHS:append := \":${THISDIR}/Your_Files_Path\" or\n"
+ msg += " FILESEXTRAPATHS:prepend := \"${THISDIR}/Your_Files_Path:\"\n"
msg += "in your bbappend file\n\n"
msg += "Your incorrect assignment is:\n"
msg += "%s\n" % extrapaths
@@ -1325,15 +1373,15 @@ python () {
pn = d.getVar('PN')
if pn in overrides:
msg = 'Recipe %s has PN of "%s" which is in OVERRIDES, this can result in unexpected behaviour.' % (d.getVar("FILE"), pn)
- package_qa_handle_error("pn-overrides", msg, d)
+ oe.qa.handle_error("pn-overrides", msg, d)
prog = re.compile(r'[A-Z]')
if prog.search(pn):
- package_qa_handle_error("uppercase-pn", 'PN: %s is upper case, this can result in unexpected behavior.' % pn, d)
+ oe.qa.handle_error("uppercase-pn", 'PN: %s is upper case, this can result in unexpected behavior.' % pn, d)
- # Some people mistakenly use DEPENDS_${PN} instead of DEPENDS and wonder
+ # Some people mistakenly use DEPENDS:${PN} instead of DEPENDS and wonder
# why it doesn't work.
- if (d.getVar(d.expand('DEPENDS_${PN}'))):
- package_qa_handle_error("pkgvarcheck", "recipe uses DEPENDS_${PN}, should use DEPENDS", d)
+ if (d.getVar(d.expand('DEPENDS:${PN}'))):
+ oe.qa.handle_error("pkgvarcheck", "recipe uses DEPENDS:${PN}, should use DEPENDS", d)
issues = []
if (d.getVar('PACKAGES') or "").split():
@@ -1350,8 +1398,36 @@ python () {
else:
d.setVarFlag('do_package_qa', 'rdeptask', '')
for i in issues:
- package_qa_handle_error("pkgvarcheck", "%s: Variable %s is set as not being package specific, please fix this." % (d.getVar("FILE"), i), d)
- qa_sane = d.getVar("QA_SANE")
- if not qa_sane:
- bb.fatal("Fatal QA errors found, failing task.")
+ oe.qa.handle_error("pkgvarcheck", "%s: Variable %s is set as not being package specific, please fix this." % (d.getVar("FILE"), i), d)
+
+ if 'native-last' not in (d.getVar('INSANE_SKIP') or "").split():
+ for native_class in ['native', 'nativesdk']:
+ if bb.data.inherits_class(native_class, d):
+
+ inherited_classes = d.getVar('__inherit_cache', False) or []
+ needle = os.path.join('classes', native_class)
+
+ bbclassextend = (d.getVar('BBCLASSEXTEND') or '').split()
+ # BBCLASSEXTEND items are always added in the end
+ skip_classes = bbclassextend
+ if bb.data.inherits_class('native', d) or 'native' in bbclassextend:
+ # native also inherits nopackages and relocatable bbclasses
+ skip_classes.extend(['nopackages', 'relocatable'])
+
+ broken_order = []
+ for class_item in reversed(inherited_classes):
+ if needle not in class_item:
+ for extend_item in skip_classes:
+ if os.path.join('classes', '%s.bbclass' % extend_item) in class_item:
+ break
+ else:
+ pn = d.getVar('PN')
+ broken_order.append(os.path.basename(class_item))
+ else:
+ break
+ if broken_order:
+ oe.qa.handle_error("native-last", "%s: native/nativesdk class is not inherited last, this can result in unexpected behaviour. "
+ "Classes inherited after native/nativesdk: %s" % (pn, " ".join(broken_order)), d)
+
+ oe.qa.exit_if_errors(d)
}
diff --git a/meta/classes/kernel-artifact-names.bbclass b/meta/classes/kernel-artifact-names.bbclass
index bbeecba7bd..e77107c893 100644
--- a/meta/classes/kernel-artifact-names.bbclass
+++ b/meta/classes/kernel-artifact-names.bbclass
@@ -1,14 +1,27 @@
+##################################################################
+# Specific kernel creation info
+# for recipes/bbclasses which need to reuse some of the kernel
+# artifacts, but aren't kernel recipes themselves
+##################################################################
+
+inherit image-artifact-names
+
KERNEL_ARTIFACT_NAME ?= "${PKGE}-${PKGV}-${PKGR}-${MACHINE}${IMAGE_VERSION_SUFFIX}"
KERNEL_ARTIFACT_LINK_NAME ?= "${MACHINE}"
+KERNEL_ARTIFACT_BIN_EXT ?= ".bin"
KERNEL_IMAGE_NAME ?= "${KERNEL_ARTIFACT_NAME}"
KERNEL_IMAGE_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
+KERNEL_IMAGE_BIN_EXT ?= "${KERNEL_ARTIFACT_BIN_EXT}"
+KERNEL_IMAGETYPE_SYMLINK ?= "1"
KERNEL_DTB_NAME ?= "${KERNEL_ARTIFACT_NAME}"
KERNEL_DTB_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
+KERNEL_DTB_BIN_EXT ?= "${KERNEL_ARTIFACT_BIN_EXT}"
KERNEL_FIT_NAME ?= "${KERNEL_ARTIFACT_NAME}"
KERNEL_FIT_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
+KERNEL_FIT_BIN_EXT ?= "${KERNEL_ARTIFACT_BIN_EXT}"
MODULE_TARBALL_NAME ?= "${KERNEL_ARTIFACT_NAME}"
MODULE_TARBALL_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
diff --git a/meta/classes/kernel-devicetree.bbclass b/meta/classes/kernel-devicetree.bbclass
index 522c46575d..b4338da1b1 100644
--- a/meta/classes/kernel-devicetree.bbclass
+++ b/meta/classes/kernel-devicetree.bbclass
@@ -1,14 +1,20 @@
# Support for device tree generation
-PACKAGES_append = " \
- ${KERNEL_PACKAGE_NAME}-devicetree \
- ${@[d.getVar('KERNEL_PACKAGE_NAME') + '-image-zimage-bundle', ''][d.getVar('KERNEL_DEVICETREE_BUNDLE') != '1']} \
-"
-FILES_${KERNEL_PACKAGE_NAME}-devicetree = "/${KERNEL_IMAGEDEST}/*.dtb /${KERNEL_IMAGEDEST}/*.dtbo"
-FILES_${KERNEL_PACKAGE_NAME}-image-zimage-bundle = "/${KERNEL_IMAGEDEST}/zImage-*.dtb.bin"
+python () {
+ if not bb.data.inherits_class('nopackages', d):
+ d.appendVar("PACKAGES", " ${KERNEL_PACKAGE_NAME}-devicetree")
+ if d.getVar('KERNEL_DEVICETREE_BUNDLE') == '1':
+ d.appendVar("PACKAGES", " ${KERNEL_PACKAGE_NAME}-image-zimage-bundle")
+}
+
+FILES:${KERNEL_PACKAGE_NAME}-devicetree = "/${KERNEL_IMAGEDEST}/*.dtb /${KERNEL_IMAGEDEST}/*.dtbo"
+FILES:${KERNEL_PACKAGE_NAME}-image-zimage-bundle = "/${KERNEL_IMAGEDEST}/zImage-*.dtb.bin"
# Generate kernel+devicetree bundle
KERNEL_DEVICETREE_BUNDLE ?= "0"
+# dtc flags passed via DTC_FLAGS env variable
+KERNEL_DTC_FLAGS ?= ""
+
normalize_dtb () {
dtb="$1"
if echo $dtb | grep -q '/dts/'; then
@@ -27,7 +33,7 @@ get_real_dtb_path_in_kernel () {
echo "$dtb_path"
}
-do_configure_append() {
+do_configure:append() {
if [ "${KERNEL_DEVICETREE_BUNDLE}" = "1" ]; then
if echo ${KERNEL_IMAGETYPE_FOR_MAKE} | grep -q 'zImage'; then
case "${ARCH}" in
@@ -49,14 +55,18 @@ do_configure_append() {
fi
}
-do_compile_append() {
+do_compile:append() {
+ if [ -n "${KERNEL_DTC_FLAGS}" ]; then
+ export DTC_FLAGS="${KERNEL_DTC_FLAGS}"
+ fi
+
for dtbf in ${KERNEL_DEVICETREE}; do
dtb=`normalize_dtb "$dtbf"`
- oe_runmake $dtb
+ oe_runmake $dtb CC="${KERNEL_CC} $cc_extra " LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS}
done
}
-do_install_append() {
+do_install:append() {
for dtbf in ${KERNEL_DEVICETREE}; do
dtb=`normalize_dtb "$dtbf"`
dtb_ext=${dtb##*.}
@@ -66,28 +76,36 @@ do_install_append() {
done
}
-do_deploy_append() {
+do_deploy:append() {
for dtbf in ${KERNEL_DEVICETREE}; do
dtb=`normalize_dtb "$dtbf"`
dtb_ext=${dtb##*.}
dtb_base_name=`basename $dtb .$dtb_ext`
install -d $deployDir
install -m 0644 ${D}/${KERNEL_IMAGEDEST}/$dtb_base_name.$dtb_ext $deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext
- ln -sf $dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext $deployDir/$dtb_base_name.$dtb_ext
- ln -sf $dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext $deployDir/$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext
+ if [ "${KERNEL_IMAGETYPE_SYMLINK}" = "1" ] ; then
+ ln -sf $dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext $deployDir/$dtb_base_name.$dtb_ext
+ fi
+ if [ -n "${KERNEL_DTB_LINK_NAME}" ] ; then
+ ln -sf $dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext $deployDir/$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext
+ fi
for type in ${KERNEL_IMAGETYPE_FOR_MAKE}; do
if [ "$type" = "zImage" ] && [ "${KERNEL_DEVICETREE_BUNDLE}" = "1" ]; then
cat ${D}/${KERNEL_IMAGEDEST}/$type \
$deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext \
- > $deployDir/$type-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin
- ln -sf $type-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin \
- $deployDir/$type-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext.bin
+ > $deployDir/$type-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT}
+ if [ -n "${KERNEL_DTB_LINK_NAME}" ]; then
+ ln -sf $type-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT} \
+ $deployDir/$type-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT}
+ fi
if [ -e "${KERNEL_OUTPUT_DIR}/${type}.initramfs" ]; then
cat ${KERNEL_OUTPUT_DIR}/${type}.initramfs \
$deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext \
- > $deployDir/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin
- ln -sf ${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin \
- $deployDir/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext.bin
+ > $deployDir/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT}
+ if [ -n "${KERNEL_DTB_LINK_NAME}" ]; then
+ ln -sf ${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT} \
+ $deployDir/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT}
+ fi
fi
fi
done
diff --git a/meta/classes/kernel-fitimage.bbclass b/meta/classes/kernel-fitimage.bbclass
index 72b05ff8d1..8a9b195d6e 100644
--- a/meta/classes/kernel-fitimage.bbclass
+++ b/meta/classes/kernel-fitimage.bbclass
@@ -1,5 +1,7 @@
inherit kernel-uboot kernel-artifact-names uboot-sign
+KERNEL_IMAGETYPE_REPLACEMENT = ""
+
python __anonymous () {
kerneltypes = d.getVar('KERNEL_IMAGETYPES') or ""
if 'fitImage' in kerneltypes.split():
@@ -21,6 +23,8 @@ python __anonymous () {
else:
replacementtype = "zImage"
+ d.setVar("KERNEL_IMAGETYPE_REPLACEMENT", replacementtype)
+
# Override KERNEL_IMAGETYPE_FOR_MAKE variable, which is internal
# to kernel.bbclass . We have to override it, since we pack zImage
# (at least for now) into the fitImage .
@@ -32,6 +36,10 @@ python __anonymous () {
if image:
d.appendVarFlag('do_assemble_fitimage_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete')
+ ubootenv = d.getVar('UBOOT_ENV')
+ if ubootenv:
+ d.appendVarFlag('do_assemble_fitimage', 'depends', ' virtual/bootloader:do_populate_sysroot')
+
#check if there are any dtb providers
providerdtb = d.getVar("PREFERRED_PROVIDER_virtual/dtb")
if providerdtb:
@@ -45,27 +53,38 @@ python __anonymous () {
if d.getVar('UBOOT_SIGN_ENABLE') == "1" and d.getVar('UBOOT_DTB_BINARY'):
uboot_pn = d.getVar('PREFERRED_PROVIDER_u-boot') or 'u-boot'
d.appendVarFlag('do_assemble_fitimage', 'depends', ' %s:do_populate_sysroot' % uboot_pn)
+ if d.getVar('INITRAMFS_IMAGE_BUNDLE') == "1":
+ d.appendVarFlag('do_assemble_fitimage_initramfs', 'depends', ' %s:do_populate_sysroot' % uboot_pn)
}
-# Options for the device tree compiler passed to mkimage '-D' feature:
-UBOOT_MKIMAGE_DTCOPTS ??= ""
-# fitImage Hash Algo
-FIT_HASH_ALG ?= "sha256"
+# Description string
+FIT_DESC ?= "Kernel fitImage for ${DISTRO_NAME}/${PV}/${MACHINE}"
-# fitImage Signature Algo
-FIT_SIGN_ALG ?= "rsa2048"
+# Sign individual images as well
+FIT_SIGN_INDIVIDUAL ?= "0"
+
+FIT_CONF_PREFIX ?= "conf-"
+FIT_CONF_PREFIX[doc] = "Prefix to use for FIT configuration node name"
+
+# Keys used to sign individually image nodes.
+# The keys to sign image nodes must be different from those used to sign
+# configuration nodes, otherwise the "required" property, from
+# UBOOT_DTB_BINARY, will be set to "conf", because "conf" prevails on "image".
+# Then the images signature checking will not be mandatory and no error will be
+# raised in case of failure.
+# UBOOT_SIGN_IMG_KEYNAME = "dev2" # keys name in keydir (eg. "dev2.crt", "dev2.key")
#
# Emit the fitImage ITS header
#
# $1 ... .its filename
fitimage_emit_fit_header() {
- cat << EOF >> ${1}
+ cat << EOF >> $1
/dts-v1/;
/ {
- description = "U-Boot fitImage for ${DISTRO_NAME}/${PV}/${MACHINE}";
+ description = "${FIT_DESC}";
#address-cells = <1>;
EOF
}
@@ -82,24 +101,24 @@ EOF
fitimage_emit_section_maint() {
case $2 in
imagestart)
- cat << EOF >> ${1}
+ cat << EOF >> $1
images {
EOF
;;
confstart)
- cat << EOF >> ${1}
+ cat << EOF >> $1
configurations {
EOF
;;
sectend)
- cat << EOF >> ${1}
+ cat << EOF >> $1
};
EOF
;;
fitend)
- cat << EOF >> ${1}
+ cat << EOF >> $1
};
EOF
;;
@@ -116,6 +135,8 @@ EOF
fitimage_emit_section_kernel() {
kernel_csum="${FIT_HASH_ALG}"
+ kernel_sign_algo="${FIT_SIGN_ALG}"
+ kernel_sign_keyname="${UBOOT_SIGN_IMG_KEYNAME}"
ENTRYPOINT="${UBOOT_ENTRYPOINT}"
if [ -n "${UBOOT_ENTRYSYMBOL}" ]; then
@@ -123,21 +144,32 @@ fitimage_emit_section_kernel() {
awk '$3=="${UBOOT_ENTRYSYMBOL}" {print "0x"$1;exit}'`
fi
- cat << EOF >> ${1}
- kernel@${2} {
+ cat << EOF >> $1
+ kernel-$2 {
description = "Linux kernel";
- data = /incbin/("${3}");
+ data = /incbin/("$3");
type = "kernel";
arch = "${UBOOT_ARCH}";
os = "linux";
- compression = "${4}";
+ compression = "$4";
load = <${UBOOT_LOADADDRESS}>;
- entry = <${ENTRYPOINT}>;
- hash@1 {
- algo = "${kernel_csum}";
+ entry = <$ENTRYPOINT>;
+ hash-1 {
+ algo = "$kernel_csum";
};
};
EOF
+
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "$kernel_sign_keyname" ] ; then
+ sed -i '$ d' $1
+ cat << EOF >> $1
+ signature-1 {
+ algo = "$kernel_csum,$kernel_sign_algo";
+ key-name-hint = "$kernel_sign_keyname";
+ };
+ };
+EOF
+ fi
}
#
@@ -149,6 +181,8 @@ EOF
fitimage_emit_section_dtb() {
dtb_csum="${FIT_HASH_ALG}"
+ dtb_sign_algo="${FIT_SIGN_ALG}"
+ dtb_sign_keyname="${UBOOT_SIGN_IMG_KEYNAME}"
dtb_loadline=""
dtb_ext=${DTB##*.}
@@ -159,19 +193,67 @@ fitimage_emit_section_dtb() {
elif [ -n "${UBOOT_DTB_LOADADDRESS}" ]; then
dtb_loadline="load = <${UBOOT_DTB_LOADADDRESS}>;"
fi
- cat << EOF >> ${1}
- fdt@${2} {
+ cat << EOF >> $1
+ fdt-$2 {
description = "Flattened Device Tree blob";
- data = /incbin/("${3}");
+ data = /incbin/("$3");
type = "flat_dt";
arch = "${UBOOT_ARCH}";
compression = "none";
- ${dtb_loadline}
- hash@1 {
- algo = "${dtb_csum}";
+ $dtb_loadline
+ hash-1 {
+ algo = "$dtb_csum";
+ };
+ };
+EOF
+
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "$dtb_sign_keyname" ] ; then
+ sed -i '$ d' $1
+ cat << EOF >> $1
+ signature-1 {
+ algo = "$dtb_csum,$dtb_sign_algo";
+ key-name-hint = "$dtb_sign_keyname";
};
};
EOF
+ fi
+}
+
+#
+# Emit the fitImage ITS u-boot script section
+#
+# $1 ... .its filename
+# $2 ... Image counter
+# $3 ... Path to boot script image
+fitimage_emit_section_boot_script() {
+
+ bootscr_csum="${FIT_HASH_ALG}"
+ bootscr_sign_algo="${FIT_SIGN_ALG}"
+ bootscr_sign_keyname="${UBOOT_SIGN_IMG_KEYNAME}"
+
+ cat << EOF >> $1
+ bootscr-$2 {
+ description = "U-boot script";
+ data = /incbin/("$3");
+ type = "script";
+ arch = "${UBOOT_ARCH}";
+ compression = "none";
+ hash-1 {
+ algo = "$bootscr_csum";
+ };
+ };
+EOF
+
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "$bootscr_sign_keyname" ] ; then
+ sed -i '$ d' $1
+ cat << EOF >> $1
+ signature-1 {
+ algo = "$bootscr_csum,$bootscr_sign_algo";
+ key-name-hint = "$bootscr_sign_keyname";
+ };
+ };
+EOF
+ fi
}
#
@@ -184,18 +266,18 @@ fitimage_emit_section_setup() {
setup_csum="${FIT_HASH_ALG}"
- cat << EOF >> ${1}
- setup@${2} {
+ cat << EOF >> $1
+ setup-$2 {
description = "Linux setup.bin";
- data = /incbin/("${3}");
+ data = /incbin/("$3");
type = "x86_setup";
arch = "${UBOOT_ARCH}";
os = "linux";
compression = "none";
load = <0x00090000>;
entry = <0x00090000>;
- hash@1 {
- algo = "${setup_csum}";
+ hash-1 {
+ algo = "$setup_csum";
};
};
EOF
@@ -210,6 +292,8 @@ EOF
fitimage_emit_section_ramdisk() {
ramdisk_csum="${FIT_HASH_ALG}"
+ ramdisk_sign_algo="${FIT_SIGN_ALG}"
+ ramdisk_sign_keyname="${UBOOT_SIGN_IMG_KEYNAME}"
ramdisk_loadline=""
ramdisk_entryline=""
@@ -220,21 +304,32 @@ fitimage_emit_section_ramdisk() {
ramdisk_entryline="entry = <${UBOOT_RD_ENTRYPOINT}>;"
fi
- cat << EOF >> ${1}
- ramdisk@${2} {
+ cat << EOF >> $1
+ ramdisk-$2 {
description = "${INITRAMFS_IMAGE}";
- data = /incbin/("${3}");
+ data = /incbin/("$3");
type = "ramdisk";
arch = "${UBOOT_ARCH}";
os = "linux";
compression = "none";
- ${ramdisk_loadline}
- ${ramdisk_entryline}
- hash@1 {
- algo = "${ramdisk_csum}";
+ $ramdisk_loadline
+ $ramdisk_entryline
+ hash-1 {
+ algo = "$ramdisk_csum";
};
};
EOF
+
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "$ramdisk_sign_keyname" ] ; then
+ sed -i '$ d' $1
+ cat << EOF >> $1
+ signature-1 {
+ algo = "$ramdisk_csum,$ramdisk_sign_algo";
+ key-name-hint = "$ramdisk_sign_keyname";
+ };
+ };
+EOF
+ fi
}
#
@@ -244,101 +339,138 @@ EOF
# $2 ... Linux kernel ID
# $3 ... DTB image name
# $4 ... ramdisk ID
-# $5 ... config ID
-# $6 ... default flag
+# $5 ... u-boot script ID
+# $6 ... config ID
+# $7 ... default flag
fitimage_emit_section_config() {
conf_csum="${FIT_HASH_ALG}"
conf_sign_algo="${FIT_SIGN_ALG}"
- if [ -n "${UBOOT_SIGN_ENABLE}" ] ; then
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" ] ; then
conf_sign_keyname="${UBOOT_SIGN_KEYNAME}"
fi
+ its_file="$1"
+ kernel_id="$2"
+ dtb_image="$3"
+ ramdisk_id="$4"
+ bootscr_id="$5"
+ config_id="$6"
+ default_flag="$7"
+
# Test if we have any DTBs at all
sep=""
conf_desc=""
+ conf_node="${FIT_CONF_PREFIX}"
kernel_line=""
fdt_line=""
ramdisk_line=""
+ bootscr_line=""
setup_line=""
default_line=""
- if [ -n "${2}" ]; then
+ # conf node name is selected based on dtb ID if it is present,
+ # otherwise its selected based on kernel ID
+ if [ -n "$dtb_image" ]; then
+ conf_node=$conf_node$dtb_image
+ else
+ conf_node=$conf_node$kernel_id
+ fi
+
+ if [ -n "$kernel_id" ]; then
conf_desc="Linux kernel"
sep=", "
- kernel_line="kernel = \"kernel@${2}\";"
+ kernel_line="kernel = \"kernel-$kernel_id\";"
fi
- if [ -n "${3}" ]; then
- conf_desc="${conf_desc}${sep}FDT blob"
+ if [ -n "$dtb_image" ]; then
+ conf_desc="$conf_desc${sep}FDT blob"
sep=", "
- fdt_line="fdt = \"fdt@${3}\";"
+ fdt_line="fdt = \"fdt-$dtb_image\";"
fi
- if [ -n "${4}" ]; then
- conf_desc="${conf_desc}${sep}ramdisk"
+ if [ -n "$ramdisk_id" ]; then
+ conf_desc="$conf_desc${sep}ramdisk"
sep=", "
- ramdisk_line="ramdisk = \"ramdisk@${4}\";"
+ ramdisk_line="ramdisk = \"ramdisk-$ramdisk_id\";"
fi
- if [ -n "${5}" ]; then
- conf_desc="${conf_desc}${sep}setup"
- setup_line="setup = \"setup@${5}\";"
+ if [ -n "$bootscr_id" ]; then
+ conf_desc="$conf_desc${sep}u-boot script"
+ sep=", "
+ bootscr_line="bootscr = \"bootscr-$bootscr_id\";"
fi
- if [ "${6}" = "1" ]; then
- default_line="default = \"conf@${3}\";"
+ if [ -n "$config_id" ]; then
+ conf_desc="$conf_desc${sep}setup"
+ setup_line="setup = \"setup-$config_id\";"
+ fi
+
+ if [ "$default_flag" = "1" ]; then
+ # default node is selected based on dtb ID if it is present,
+ # otherwise its selected based on kernel ID
+ if [ -n "$dtb_image" ]; then
+ default_line="default = \"${FIT_CONF_PREFIX}$dtb_image\";"
+ else
+ default_line="default = \"${FIT_CONF_PREFIX}$kernel_id\";"
+ fi
fi
- cat << EOF >> ${1}
- ${default_line}
- conf@${3} {
- description = "${6} ${conf_desc}";
- ${kernel_line}
- ${fdt_line}
- ${ramdisk_line}
- ${setup_line}
- hash@1 {
- algo = "${conf_csum}";
+ cat << EOF >> $its_file
+ $default_line
+ $conf_node {
+ description = "$default_flag $conf_desc";
+ $kernel_line
+ $fdt_line
+ $ramdisk_line
+ $bootscr_line
+ $setup_line
+ hash-1 {
+ algo = "$conf_csum";
};
EOF
- if [ ! -z "${conf_sign_keyname}" ] ; then
+ if [ -n "$conf_sign_keyname" ] ; then
sign_line="sign-images = "
sep=""
- if [ -n "${2}" ]; then
- sign_line="${sign_line}${sep}\"kernel\""
+ if [ -n "$kernel_id" ]; then
+ sign_line="$sign_line${sep}\"kernel\""
sep=", "
fi
- if [ -n "${3}" ]; then
- sign_line="${sign_line}${sep}\"fdt\""
+ if [ -n "$dtb_image" ]; then
+ sign_line="$sign_line${sep}\"fdt\""
sep=", "
fi
- if [ -n "${4}" ]; then
- sign_line="${sign_line}${sep}\"ramdisk\""
+ if [ -n "$ramdisk_id" ]; then
+ sign_line="$sign_line${sep}\"ramdisk\""
sep=", "
fi
- if [ -n "${5}" ]; then
- sign_line="${sign_line}${sep}\"setup\""
+ if [ -n "$bootscr_id" ]; then
+ sign_line="$sign_line${sep}\"bootscr\""
+ sep=", "
+ fi
+
+ if [ -n "$config_id" ]; then
+ sign_line="$sign_line${sep}\"setup\""
fi
- sign_line="${sign_line};"
+ sign_line="$sign_line;"
- cat << EOF >> ${1}
- signature@1 {
- algo = "${conf_csum},${conf_sign_algo}";
- key-name-hint = "${conf_sign_keyname}";
- ${sign_line}
+ cat << EOF >> $its_file
+ signature-1 {
+ algo = "$conf_csum,$conf_sign_algo";
+ key-name-hint = "$conf_sign_keyname";
+ $sign_line
};
EOF
fi
- cat << EOF >> ${1}
+ cat << EOF >> $its_file
};
EOF
}
@@ -353,114 +485,154 @@ fitimage_assemble() {
kernelcount=1
dtbcount=""
DTBS=""
- ramdiskcount=${3}
+ ramdiskcount=$3
setupcount=""
- rm -f ${1} arch/${ARCH}/boot/${2}
+ bootscr_id=""
+ rm -f $1 arch/${ARCH}/boot/$2
- fitimage_emit_fit_header ${1}
+ if [ -n "${UBOOT_SIGN_IMG_KEYNAME}" -a "${UBOOT_SIGN_KEYNAME}" = "${UBOOT_SIGN_IMG_KEYNAME}" ]; then
+ bbfatal "Keys used to sign images and configuration nodes must be different."
+ fi
+
+ fitimage_emit_fit_header $1
#
# Step 1: Prepare a kernel image section.
#
- fitimage_emit_section_maint ${1} imagestart
+ fitimage_emit_section_maint $1 imagestart
uboot_prep_kimage
- fitimage_emit_section_kernel ${1} "${kernelcount}" linux.bin "${linux_comp}"
+ fitimage_emit_section_kernel $1 $kernelcount linux.bin "$linux_comp"
#
# Step 2: Prepare a DTB image section
#
- if [ -z "${EXTERNAL_KERNEL_DEVICETREE}" ] && [ -n "${KERNEL_DEVICETREE}" ]; then
+ if [ -n "${KERNEL_DEVICETREE}" ]; then
dtbcount=1
for DTB in ${KERNEL_DEVICETREE}; do
- if echo ${DTB} | grep -q '/dts/'; then
- bbwarn "${DTB} contains the full path to the the dts file, but only the dtb name should be used."
- DTB=`basename ${DTB} | sed 's,\.dts$,.dtb,g'`
+ if echo $DTB | grep -q '/dts/'; then
+ bbwarn "$DTB contains the full path to the the dts file, but only the dtb name should be used."
+ DTB=`basename $DTB | sed 's,\.dts$,.dtb,g'`
fi
- DTB_PATH="arch/${ARCH}/boot/dts/${DTB}"
- if [ ! -e "${DTB_PATH}" ]; then
- DTB_PATH="arch/${ARCH}/boot/${DTB}"
+
+ # Skip ${DTB} if it's also provided in ${EXTERNAL_KERNEL_DEVICETREE}
+ if [ -n "${EXTERNAL_KERNEL_DEVICETREE}" ] && [ -s ${EXTERNAL_KERNEL_DEVICETREE}/${DTB} ]; then
+ continue
fi
- DTB=$(echo "${DTB}" | tr '/' '_')
- DTBS="${DTBS} ${DTB}"
- fitimage_emit_section_dtb ${1} ${DTB} ${DTB_PATH}
+ DTB_PATH="arch/${ARCH}/boot/dts/$DTB"
+ if [ ! -e "$DTB_PATH" ]; then
+ DTB_PATH="arch/${ARCH}/boot/$DTB"
+ fi
+
+ DTB=$(echo "$DTB" | tr '/' '_')
+ DTBS="$DTBS $DTB"
+ fitimage_emit_section_dtb $1 $DTB $DTB_PATH
done
fi
if [ -n "${EXTERNAL_KERNEL_DEVICETREE}" ]; then
dtbcount=1
for DTB in $(find "${EXTERNAL_KERNEL_DEVICETREE}" \( -name '*.dtb' -o -name '*.dtbo' \) -printf '%P\n' | sort); do
- DTB=$(echo "${DTB}" | tr '/' '_')
- DTBS="${DTBS} ${DTB}"
- fitimage_emit_section_dtb ${1} ${DTB} "${EXTERNAL_KERNEL_DEVICETREE}/${DTB}"
+ DTB=$(echo "$DTB" | tr '/' '_')
+ DTBS="$DTBS $DTB"
+ fitimage_emit_section_dtb $1 $DTB "${EXTERNAL_KERNEL_DEVICETREE}/$DTB"
done
fi
#
- # Step 3: Prepare a setup section. (For x86)
+ # Step 3: Prepare a u-boot script section
+ #
+
+ if [ -n "${UBOOT_ENV}" ] && [ -d "${STAGING_DIR_HOST}/boot" ]; then
+ if [ -e "${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY}" ]; then
+ cp ${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY} ${B}
+ bootscr_id="${UBOOT_ENV_BINARY}"
+ fitimage_emit_section_boot_script $1 "$bootscr_id" ${UBOOT_ENV_BINARY}
+ else
+ bbwarn "${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY} not found."
+ fi
+ fi
+
+ #
+ # Step 4: Prepare a setup section. (For x86)
#
if [ -e arch/${ARCH}/boot/setup.bin ]; then
setupcount=1
- fitimage_emit_section_setup ${1} "${setupcount}" arch/${ARCH}/boot/setup.bin
+ fitimage_emit_section_setup $1 $setupcount arch/${ARCH}/boot/setup.bin
fi
#
- # Step 4: Prepare a ramdisk section.
+ # Step 5: Prepare a ramdisk section.
#
- if [ "x${ramdiskcount}" = "x1" ] ; then
+ if [ "x${ramdiskcount}" = "x1" ] && [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
# Find and use the first initramfs image archive type we find
- for img in cpio.lz4 cpio.lzo cpio.lzma cpio.xz cpio.gz ext2.gz cpio; do
- initramfs_path="${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.${img}"
- echo "Using $initramfs_path"
- if [ -e "${initramfs_path}" ]; then
- fitimage_emit_section_ramdisk ${1} "${ramdiskcount}" "${initramfs_path}"
+ for img in cpio.lz4 cpio.lzo cpio.lzma cpio.xz cpio.zst cpio.gz ext2.gz cpio; do
+ initramfs_path="${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.$img"
+ echo -n "Searching for $initramfs_path..."
+ if [ -e "$initramfs_path" ]; then
+ echo "found"
+ fitimage_emit_section_ramdisk $1 "$ramdiskcount" "$initramfs_path"
break
+ else
+ echo "not found"
fi
done
fi
- fitimage_emit_section_maint ${1} sectend
+ fitimage_emit_section_maint $1 sectend
# Force the first Kernel and DTB in the default config
kernelcount=1
- if [ -n "${dtbcount}" ]; then
+ if [ -n "$dtbcount" ]; then
dtbcount=1
fi
#
- # Step 5: Prepare a configurations section
+ # Step 6: Prepare a configurations section
#
- fitimage_emit_section_maint ${1} confstart
-
- if [ -n "${DTBS}" ]; then
+ fitimage_emit_section_maint $1 confstart
+
+ # kernel-fitimage.bbclass currently only supports a single kernel (no less or
+ # more) to be added to the FIT image along with 0 or more device trees and
+ # 0 or 1 ramdisk.
+ # It is also possible to include an initramfs bundle (kernel and rootfs in one binary)
+ # When the initramfs bundle is used ramdisk is disabled.
+ # If a device tree is to be part of the FIT image, then select
+ # the default configuration to be used is based on the dtbcount. If there is
+ # no dtb present than select the default configuation to be based on
+ # the kernelcount.
+ if [ -n "$DTBS" ]; then
i=1
for DTB in ${DTBS}; do
dtb_ext=${DTB##*.}
- if [ "${dtb_ext}" = "dtbo" ]; then
- fitimage_emit_section_config ${1} "" "${DTB}" "" "" "`expr ${i} = ${dtbcount}`"
+ if [ "$dtb_ext" = "dtbo" ]; then
+ fitimage_emit_section_config $1 "" "$DTB" "" "$bootscr_id" "" "`expr $i = $dtbcount`"
else
- fitimage_emit_section_config ${1} "${kernelcount}" "${DTB}" "${ramdiskcount}" "${setupcount}" "`expr ${i} = ${dtbcount}`"
+ fitimage_emit_section_config $1 $kernelcount "$DTB" "$ramdiskcount" "$bootscr_id" "$setupcount" "`expr $i = $dtbcount`"
fi
- i=`expr ${i} + 1`
+ i=`expr $i + 1`
done
+ else
+ defaultconfigcount=1
+ fitimage_emit_section_config $1 $kernelcount "" "$ramdiskcount" "$bootscr_id" "$setupcount" $defaultconfigcount
fi
- fitimage_emit_section_maint ${1} sectend
+ fitimage_emit_section_maint $1 sectend
- fitimage_emit_section_maint ${1} fitend
+ fitimage_emit_section_maint $1 fitend
#
- # Step 6: Assemble the image
+ # Step 7: Assemble the image
#
- uboot-mkimage \
+ ${UBOOT_MKIMAGE} \
${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \
- -f ${1} \
- arch/${ARCH}/boot/${2}
+ -f $1 \
+ arch/${ARCH}/boot/$2
#
- # Step 7: Sign the image and add public key to U-Boot dtb
+ # Step 8: Sign the image and add public key to U-Boot dtb
#
if [ "x${UBOOT_SIGN_ENABLE}" = "x1" ] ; then
add_key_to_u_boot=""
@@ -470,18 +642,19 @@ fitimage_assemble() {
cp -P ${STAGING_DATADIR}/u-boot*.dtb ${B}
add_key_to_u_boot="-K ${B}/${UBOOT_DTB_BINARY}"
fi
- uboot-mkimage \
+ ${UBOOT_MKIMAGE_SIGN} \
${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \
-F -k "${UBOOT_SIGN_KEYDIR}" \
$add_key_to_u_boot \
- -r arch/${ARCH}/boot/${2}
+ -r arch/${ARCH}/boot/$2 \
+ ${UBOOT_MKIMAGE_SIGN_ARGS}
fi
}
do_assemble_fitimage() {
if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage"; then
cd ${B}
- fitimage_assemble fit-image.its fitImage
+ fitimage_assemble fit-image.its fitImage ""
fi
}
@@ -491,39 +664,127 @@ do_assemble_fitimage_initramfs() {
if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage" && \
test -n "${INITRAMFS_IMAGE}" ; then
cd ${B}
- fitimage_assemble fit-image-${INITRAMFS_IMAGE}.its fitImage-${INITRAMFS_IMAGE} 1
+ if [ "${INITRAMFS_IMAGE_BUNDLE}" = "1" ]; then
+ fitimage_assemble fit-image-${INITRAMFS_IMAGE}.its fitImage ""
+ else
+ fitimage_assemble fit-image-${INITRAMFS_IMAGE}.its fitImage-${INITRAMFS_IMAGE} 1
+ fi
fi
}
addtask assemble_fitimage_initramfs before do_deploy after do_bundle_initramfs
+do_kernel_generate_rsa_keys() {
+ if [ "${UBOOT_SIGN_ENABLE}" = "0" ] && [ "${FIT_GENERATE_KEYS}" = "1" ]; then
+ bbwarn "FIT_GENERATE_KEYS is set to 1 even though UBOOT_SIGN_ENABLE is set to 0. The keys will not be generated as they won't be used."
+ fi
+
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" ] && [ "${FIT_GENERATE_KEYS}" = "1" ]; then
+
+ # Generate keys to sign configuration nodes, only if they don't already exist
+ if [ ! -f "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".key ] || \
+ [ ! -f "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".crt ]; then
+
+ # make directory if it does not already exist
+ mkdir -p "${UBOOT_SIGN_KEYDIR}"
+
+ echo "Generating RSA private key for signing fitImage"
+ openssl genrsa ${FIT_KEY_GENRSA_ARGS} -out \
+ "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".key \
+ "${FIT_SIGN_NUMBITS}"
+
+ echo "Generating certificate for signing fitImage"
+ openssl req ${FIT_KEY_REQ_ARGS} "${FIT_KEY_SIGN_PKCS}" \
+ -key "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".key \
+ -out "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".crt
+ fi
+
+ # Generate keys to sign image nodes, only if they don't already exist
+ if [ ! -f "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_IMG_KEYNAME}".key ] || \
+ [ ! -f "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_IMG_KEYNAME}".crt ]; then
+
+ # make directory if it does not already exist
+ mkdir -p "${UBOOT_SIGN_KEYDIR}"
+
+ echo "Generating RSA private key for signing fitImage"
+ openssl genrsa ${FIT_KEY_GENRSA_ARGS} -out \
+ "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_IMG_KEYNAME}".key \
+ "${FIT_SIGN_NUMBITS}"
+
+ echo "Generating certificate for signing fitImage"
+ openssl req ${FIT_KEY_REQ_ARGS} "${FIT_KEY_SIGN_PKCS}" \
+ -key "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_IMG_KEYNAME}".key \
+ -out "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_IMG_KEYNAME}".crt
+ fi
+ fi
+}
+
+addtask kernel_generate_rsa_keys before do_assemble_fitimage after do_compile
kernel_do_deploy[vardepsexclude] = "DATETIME"
-kernel_do_deploy_append() {
+kernel_do_deploy:append() {
# Update deploy directory
if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage"; then
- echo "Copying fit-image.its source file..."
- install -m 0644 ${B}/fit-image.its "$deployDir/fitImage-its-${KERNEL_FIT_NAME}.its"
- ln -snf fitImage-its-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${KERNEL_FIT_LINK_NAME}"
- echo "Copying linux.bin file..."
- install -m 0644 ${B}/linux.bin $deployDir/fitImage-linux.bin-${KERNEL_FIT_NAME}.bin
- ln -snf fitImage-linux.bin-${KERNEL_FIT_NAME}.bin "$deployDir/fitImage-linux.bin-${KERNEL_FIT_LINK_NAME}"
+ if [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
+ echo "Copying fit-image.its source file..."
+ install -m 0644 ${B}/fit-image.its "$deployDir/fitImage-its-${KERNEL_FIT_NAME}.its"
+ if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
+ ln -snf fitImage-its-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${KERNEL_FIT_LINK_NAME}"
+ fi
+
+ echo "Copying linux.bin file..."
+ install -m 0644 ${B}/linux.bin $deployDir/fitImage-linux.bin-${KERNEL_FIT_NAME}${KERNEL_FIT_BIN_EXT}
+ if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
+ ln -snf fitImage-linux.bin-${KERNEL_FIT_NAME}${KERNEL_FIT_BIN_EXT} "$deployDir/fitImage-linux.bin-${KERNEL_FIT_LINK_NAME}"
+ fi
+ fi
if [ -n "${INITRAMFS_IMAGE}" ]; then
echo "Copying fit-image-${INITRAMFS_IMAGE}.its source file..."
install -m 0644 ${B}/fit-image-${INITRAMFS_IMAGE}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its"
- ln -snf fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
+ if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
+ ln -snf fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
+ fi
- echo "Copying fitImage-${INITRAMFS_IMAGE} file..."
- install -m 0644 ${B}/arch/${ARCH}/boot/fitImage-${INITRAMFS_IMAGE} "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin"
- ln -snf fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
- fi
- if [ "${UBOOT_SIGN_ENABLE}" = "1" -a -n "${UBOOT_DTB_BINARY}" ] ; then
- # UBOOT_DTB_IMAGE is a realfile, but we can't use
- # ${UBOOT_DTB_IMAGE} since it contains ${PV} which is aimed
- # for u-boot, but we are in kernel env now.
- install -m 0644 ${B}/u-boot-${MACHINE}*.dtb "$deployDir/"
+ if [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
+ echo "Copying fitImage-${INITRAMFS_IMAGE} file..."
+ install -m 0644 ${B}/arch/${ARCH}/boot/fitImage-${INITRAMFS_IMAGE} "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}${KERNEL_FIT_BIN_EXT}"
+ if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
+ ln -snf fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}${KERNEL_FIT_BIN_EXT} "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
+ fi
+ fi
fi
fi
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" -o "${UBOOT_FITIMAGE_ENABLE}" = "1" ] && \
+ [ -n "${UBOOT_DTB_BINARY}" ] ; then
+ # UBOOT_DTB_IMAGE is a realfile, but we can't use
+ # ${UBOOT_DTB_IMAGE} since it contains ${PV} which is aimed
+ # for u-boot, but we are in kernel env now.
+ install -m 0644 ${B}/u-boot-${MACHINE}*.dtb "$deployDir/"
+ fi
+ if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" -a -n "${UBOOT_BINARY}" -a -n "${SPL_DTB_BINARY}" ] ; then
+ # If we're also creating and/or signing the uboot fit, now we need to
+ # deploy it, it's its file, as well as u-boot-spl.dtb
+ install -m 0644 ${B}/u-boot-spl-${MACHINE}*.dtb "$deployDir/"
+ echo "Copying u-boot-fitImage file..."
+ install -m 0644 ${B}/u-boot-fitImage-* "$deployDir/"
+ echo "Copying u-boot-its file..."
+ install -m 0644 ${B}/u-boot-its-* "$deployDir/"
+ fi
+}
+
+# The function below performs the following in case of initramfs bundles:
+# - Removes do_assemble_fitimage. FIT generation is done through
+# do_assemble_fitimage_initramfs. do_assemble_fitimage is not needed
+# and should not be part of the tasks to be executed.
+# - Since do_kernel_generate_rsa_keys is inserted by default
+# between do_compile and do_assemble_fitimage, this is
+# not suitable in case of initramfs bundles. do_kernel_generate_rsa_keys
+# should be between do_bundle_initramfs and do_assemble_fitimage_initramfs.
+python () {
+ if d.getVar('INITRAMFS_IMAGE_BUNDLE') == "1":
+ bb.build.deltask('do_assemble_fitimage', d)
+ bb.build.deltask('kernel_generate_rsa_keys', d)
+ bb.build.addtask('kernel_generate_rsa_keys', 'do_assemble_fitimage_initramfs', 'do_bundle_initramfs', d)
}
diff --git a/meta/classes/kernel-grub.bbclass b/meta/classes/kernel-grub.bbclass
index 5d92f3b636..44b2015468 100644
--- a/meta/classes/kernel-grub.bbclass
+++ b/meta/classes/kernel-grub.bbclass
@@ -99,7 +99,7 @@ python __anonymous () {
typelower = type.lower()
preinst_append = preinst.replace('KERNEL_IMAGETYPE', type)
postinst_prepend = postinst.replace('KERNEL_IMAGETYPE', type)
- d.setVar('pkg_preinst_kernel-image-' + typelower + '_append', preinst_append)
- d.setVar('pkg_postinst_kernel-image-' + typelower + '_prepend', postinst_prepend)
+ d.setVar('pkg_preinst:kernel-image-' + typelower + ':append', preinst_append)
+ d.setVar('pkg_postinst:kernel-image-' + typelower + ':prepend', postinst_prepend)
}
diff --git a/meta/classes/kernel-module-split.bbclass b/meta/classes/kernel-module-split.bbclass
index 221022b7bc..a29c294810 100644
--- a/meta/classes/kernel-module-split.bbclass
+++ b/meta/classes/kernel-module-split.bbclass
@@ -1,4 +1,4 @@
-pkg_postinst_modules () {
+pkg_postinst:modules () {
if [ -z "$D" ]; then
depmod -a ${KERNEL_VERSION}
else
@@ -8,7 +8,7 @@ else
fi
}
-pkg_postrm_modules () {
+pkg_postrm:modules () {
if [ -z "$D" ]; then
depmod -a ${KERNEL_VERSION}
else
@@ -24,11 +24,12 @@ fi
PACKAGE_WRITE_DEPS += "kmod-native depmodwrapper-cross"
-do_install_append() {
+do_install:append() {
install -d ${D}${sysconfdir}/modules-load.d/ ${D}${sysconfdir}/modprobe.d/
}
-PACKAGESPLITFUNCS_prepend = "split_kernel_module_packages "
+KERNEL_SPLIT_MODULES ?= "1"
+PACKAGESPLITFUNCS:prepend = "split_kernel_module_packages "
KERNEL_MODULES_META_PACKAGE ?= "${@ d.getVar("KERNEL_PACKAGE_NAME") or "kernel" }-modules"
@@ -44,17 +45,20 @@ python split_kernel_module_packages () {
def extract_modinfo(file):
import tempfile, subprocess
tempfile.tempdir = d.getVar("WORKDIR")
- compressed = re.match( r'.*\.([xg])z$', file)
+ compressed = re.match( r'.*\.(gz|xz|zst)$', file)
tf = tempfile.mkstemp()
tmpfile = tf[1]
if compressed:
tmpkofile = tmpfile + ".ko"
- if compressed.group(1) == 'g':
+ if compressed.group(1) == 'gz':
cmd = "gunzip -dc %s > %s" % (file, tmpkofile)
subprocess.check_call(cmd, shell=True)
- elif compressed.group(1) == 'x':
+ elif compressed.group(1) == 'xz':
cmd = "xz -dc %s > %s" % (file, tmpkofile)
subprocess.check_call(cmd, shell=True)
+ elif compressed.group(1) == 'zst':
+ cmd = "zstd -dc %s > %s" % (file, tmpkofile)
+ subprocess.check_call(cmd, shell=True)
else:
msg = "Cannot decompress '%s'" % file
raise msg
@@ -100,11 +104,11 @@ python split_kernel_module_packages () {
else:
f.write('%s\n' % basename)
f.close()
- postinst = d.getVar('pkg_postinst_%s' % pkg)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
if not postinst:
- bb.fatal("pkg_postinst_%s not defined" % pkg)
+ bb.fatal("pkg_postinst:%s not defined" % pkg)
postinst += d.getVar('autoload_postinst_fragment') % (autoload or basename)
- d.setVar('pkg_postinst_%s' % pkg, postinst)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
# Write out any modconf fragment
modconflist = (d.getVar("KERNEL_MODULE_PROBECONF") or "").split()
@@ -117,15 +121,19 @@ python split_kernel_module_packages () {
elif modconf:
bb.error("Please ensure module %s is listed in KERNEL_MODULE_PROBECONF since module_conf_%s is set" % (basename, basename))
- files = d.getVar('FILES_%s' % pkg)
+ files = d.getVar('FILES:%s' % pkg)
files = "%s /etc/modules-load.d/%s.conf /etc/modprobe.d/%s.conf" % (files, basename, basename)
- d.setVar('FILES_%s' % pkg, files)
+ d.setVar('FILES:%s' % pkg, files)
+
+ conffiles = d.getVar('CONFFILES:%s' % pkg)
+ conffiles = "%s /etc/modules-load.d/%s.conf /etc/modprobe.d/%s.conf" % (conffiles, basename, basename)
+ d.setVar('CONFFILES:%s' % pkg, conffiles)
if "description" in vals:
- old_desc = d.getVar('DESCRIPTION_' + pkg) or ""
- d.setVar('DESCRIPTION_' + pkg, old_desc + "; " + vals["description"])
+ old_desc = d.getVar('DESCRIPTION:' + pkg) or ""
+ d.setVar('DESCRIPTION:' + pkg, old_desc + "; " + vals["description"])
- rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg) or "")
+ rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS:' + pkg) or "")
modinfo_deps = []
if "depends" in vals and vals["depends"] != "":
for dep in vals["depends"].split(","):
@@ -135,33 +143,41 @@ python split_kernel_module_packages () {
for dep in modinfo_deps:
if not dep in rdepends:
rdepends[dep] = []
- d.setVar('RDEPENDS_' + pkg, bb.utils.join_deps(rdepends, commasep=False))
+ d.setVar('RDEPENDS:' + pkg, bb.utils.join_deps(rdepends, commasep=False))
# Avoid automatic -dev recommendations for modules ending with -dev.
- d.setVarFlag('RRECOMMENDS_' + pkg, 'nodeprrecs', 1)
+ d.setVarFlag('RRECOMMENDS:' + pkg, 'nodeprrecs', 1)
# Provide virtual package without postfix
providevirt = d.getVar('KERNEL_MODULE_PROVIDE_VIRTUAL')
if providevirt == "1":
postfix = format.split('%s')[1]
- d.setVar('RPROVIDES_' + pkg, pkg.replace(postfix, ''))
+ d.setVar('RPROVIDES:' + pkg, pkg.replace(postfix, ''))
kernel_package_name = d.getVar("KERNEL_PACKAGE_NAME") or "kernel"
kernel_version = d.getVar("KERNEL_VERSION")
- module_regex = r'^(.*)\.k?o(?:\.[xg]z)?$'
+ metapkg = d.getVar('KERNEL_MODULES_META_PACKAGE')
+ splitmods = d.getVar('KERNEL_SPLIT_MODULES')
+ postinst = d.getVar('pkg_postinst:modules')
+ postrm = d.getVar('pkg_postrm:modules')
+
+ if splitmods != '1':
+ etcdir = d.getVar('sysconfdir')
+ d.appendVar('FILES:' + metapkg, '%s/modules-load.d/ %s/modprobe.d/ %s/modules/' % (etcdir, etcdir, d.getVar("nonarch_base_libdir")))
+ d.appendVar('pkg_postinst:%s' % metapkg, postinst)
+ d.prependVar('pkg_postrm:%s' % metapkg, postrm);
+ return
+
+ module_regex = r'^(.*)\.k?o(?:\.(gz|xz|zst))?$'
module_pattern_prefix = d.getVar('KERNEL_MODULE_PACKAGE_PREFIX')
module_pattern_suffix = d.getVar('KERNEL_MODULE_PACKAGE_SUFFIX')
module_pattern = module_pattern_prefix + kernel_package_name + '-module-%s' + module_pattern_suffix
- postinst = d.getVar('pkg_postinst_modules')
- postrm = d.getVar('pkg_postrm_modules')
-
modules = do_split_packages(d, root='${nonarch_base_libdir}/modules', file_regex=module_regex, output_pattern=module_pattern, description='%s kernel module', postinst=postinst, postrm=postrm, recursive=True, hook=frob_metadata, extra_depends='%s-%s' % (kernel_package_name, kernel_version))
if modules:
- metapkg = d.getVar('KERNEL_MODULES_META_PACKAGE')
- d.appendVar('RDEPENDS_' + metapkg, ' '+' '.join(modules))
+ d.appendVar('RDEPENDS:' + metapkg, ' '+' '.join(modules))
# If modules-load.d and modprobe.d are empty at this point, remove them to
# avoid warnings. removedirs only raises an OSError if an empty
diff --git a/meta/classes/kernel-uboot.bbclass b/meta/classes/kernel-uboot.bbclass
index 87f02654fa..2daa068298 100644
--- a/meta/classes/kernel-uboot.bbclass
+++ b/meta/classes/kernel-uboot.bbclass
@@ -1,3 +1,7 @@
+# fitImage kernel compression algorithm
+FIT_KERNEL_COMP_ALG ?= "gzip"
+FIT_KERNEL_COMP_ALG_EXTENSION ?= ".gz"
+
uboot_prep_kimage() {
if [ -e arch/${ARCH}/boot/compressed/vmlinux ]; then
vmlinux_path="arch/${ARCH}/boot/compressed/vmlinux"
@@ -11,14 +15,18 @@ uboot_prep_kimage() {
linux_comp="none"
else
vmlinux_path="vmlinux"
- linux_suffix=".gz"
- linux_comp="gzip"
+ linux_suffix="${FIT_KERNEL_COMP_ALG_EXTENSION}"
+ linux_comp="${FIT_KERNEL_COMP_ALG}"
fi
[ -n "${vmlinux_path}" ] && ${OBJCOPY} -O binary -R .note -R .comment -S "${vmlinux_path}" linux.bin
if [ "${linux_comp}" != "none" ] ; then
- gzip -9 linux.bin
+ if [ "${linux_comp}" = "gzip" ] ; then
+ gzip -9 linux.bin
+ elif [ "${linux_comp}" = "lzo" ] ; then
+ lzop -9 linux.bin
+ fi
mv -f "linux.bin${linux_suffix}" linux.bin
fi
diff --git a/meta/classes/kernel-yocto.bbclass b/meta/classes/kernel-yocto.bbclass
index 54a1a1627a..1d5a8cdf29 100644
--- a/meta/classes/kernel-yocto.bbclass
+++ b/meta/classes/kernel-yocto.bbclass
@@ -18,6 +18,7 @@ SRCREV_FORMAT ?= "meta_machine"
KCONF_AUDIT_LEVEL ?= "1"
KCONF_BSP_AUDIT_LEVEL ?= "0"
KMETA_AUDIT ?= "yes"
+KMETA_AUDIT_WERROR ?= ""
# returns local (absolute) path names for all valid patches in the
# src_uri
@@ -35,7 +36,10 @@ def find_patches(d,subdir):
if subdir == patchdir:
patch_list.append(local)
else:
- patch_list.append(local)
+ # skip the patch if a patchdir was supplied, it won't be handled
+ # properly
+ if not patchdir:
+ patch_list.append(local)
return patch_list
@@ -85,11 +89,35 @@ def get_machine_branch(d, default):
return default
+# returns a list of all directories that are on FILESEXTRAPATHS (and
+# hence available to the build) that contain .scc or .cfg files
+def get_dirs_with_fragments(d):
+ extrapaths = []
+ extrafiles = []
+ extrapathsvalue = (d.getVar("FILESEXTRAPATHS") or "")
+ # Remove default flag which was used for checking
+ extrapathsvalue = extrapathsvalue.replace("__default:", "")
+ extrapaths = extrapathsvalue.split(":")
+ for path in extrapaths:
+ if path + ":True" not in extrafiles:
+ extrafiles.append(path + ":" + str(os.path.exists(path)))
+
+ return " ".join(extrafiles)
+
do_kernel_metadata() {
set +e
+
+ if [ -n "$1" ]; then
+ mode="$1"
+ else
+ mode="patch"
+ fi
+
cd ${S}
export KMETA=${KMETA}
+ bbnote "do_kernel_metadata: for summary/debug, set KCONF_AUDIT_LEVEL > 0"
+
# if kernel tools are available in-tree, they are preferred
# and are placed on the path before any external tools. Unless
# the external tools flag is set, in that case we do nothing.
@@ -120,34 +148,35 @@ do_kernel_metadata() {
if [ -n "${KBUILD_DEFCONFIG}" ]; then
if [ -f "${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG}" ]; then
if [ -f "${WORKDIR}/defconfig" ]; then
- # If the two defconfig's are different, warn that we didn't overwrite the
- # one already placed in WORKDIR by the fetcher.
+ # If the two defconfig's are different, warn that we overwrote the
+ # one already placed in WORKDIR
cmp "${WORKDIR}/defconfig" "${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG}"
if [ $? -ne 0 ]; then
- bbwarn "defconfig detected in WORKDIR. ${KBUILD_DEFCONFIG} skipped"
- else
- cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${WORKDIR}/defconfig
+ bbdebug 1 "detected SRC_URI or unpatched defconfig in WORKDIR. ${KBUILD_DEFCONFIG} copied over it"
fi
+ cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${WORKDIR}/defconfig
else
cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${WORKDIR}/defconfig
fi
in_tree_defconfig="${WORKDIR}/defconfig"
else
- bbfatal "A KBUILD_DEFCONFIG '${KBUILD_DEFCONFIG}' was specified, but not present in the source tree"
+ bbfatal "A KBUILD_DEFCONFIG '${KBUILD_DEFCONFIG}' was specified, but not present in the source tree (${S}/arch/${ARCH}/configs/)"
fi
fi
- # was anyone trying to patch the kernel meta data ?, we need to do
- # this here, since the scc commands migrate the .cfg fragments to the
- # kernel source tree, where they'll be used later.
- check_git_config
- patches="${@" ".join(find_patches(d,'kernel-meta'))}"
- for p in $patches; do
- (
- cd ${WORKDIR}/kernel-meta
- git am -s $p
- )
- done
+ if [ "$mode" = "patch" ]; then
+ # was anyone trying to patch the kernel meta data ?, we need to do
+ # this here, since the scc commands migrate the .cfg fragments to the
+ # kernel source tree, where they'll be used later.
+ check_git_config
+ patches="${@" ".join(find_patches(d,'kernel-meta'))}"
+ for p in $patches; do
+ (
+ cd ${WORKDIR}/kernel-meta
+ git am -s $p
+ )
+ done
+ fi
sccs_from_src_uri="${@" ".join(find_sccs(d))}"
patches="${@" ".join(find_patches(d,''))}"
@@ -212,13 +241,40 @@ do_kernel_metadata() {
fi
meta_dir=$(kgit --meta)
- # run1: pull all the configuration fragments, no matter where they come from
- elements="`echo -n ${bsp_definition} ${sccs} ${patches} ${KERNEL_FEATURES}`"
- if [ -n "${elements}" ]; then
- echo "${bsp_definition}" > ${S}/${meta_dir}/bsp_definition
- scc --force -o ${S}/${meta_dir}:cfg,merge,meta ${includes} $sccs_defconfig $bsp_definition $sccs $patches ${KERNEL_FEATURES}
- if [ $? -ne 0 ]; then
- bbfatal_log "Could not generate configuration queue for ${KMACHINE}."
+ KERNEL_FEATURES_FINAL=""
+ if [ -n "${KERNEL_FEATURES}" ]; then
+ for feature in ${KERNEL_FEATURES}; do
+ feature_found=f
+ for d in $includes; do
+ path_to_check=$(echo $d | sed 's/^-I//')
+ if [ "$feature_found" = "f" ] && [ -e "$path_to_check/$feature" ]; then
+ feature_found=t
+ fi
+ done
+ if [ "$feature_found" = "f" ]; then
+ if [ -n "${KERNEL_DANGLING_FEATURES_WARN_ONLY}" ]; then
+ bbwarn "Feature '$feature' not found, but KERNEL_DANGLING_FEATURES_WARN_ONLY is set"
+ bbwarn "This may cause runtime issues, dropping feature and allowing configuration to continue"
+ else
+ bberror "Feature '$feature' not found, this will cause configuration failures."
+ bberror "Check the SRC_URI for meta-data repositories or directories that may be missing"
+ bbfatal_log "Set KERNEL_DANGLING_FEATURES_WARN_ONLY to ignore this issue"
+ fi
+ else
+ KERNEL_FEATURES_FINAL="$KERNEL_FEATURES_FINAL $feature"
+ fi
+ done
+ fi
+
+ if [ "$mode" = "config" ]; then
+ # run1: pull all the configuration fragments, no matter where they come from
+ elements="`echo -n ${bsp_definition} $sccs_defconfig ${sccs} ${patches} $KERNEL_FEATURES_FINAL`"
+ if [ -n "${elements}" ]; then
+ echo "${bsp_definition}" > ${S}/${meta_dir}/bsp_definition
+ scc --force -o ${S}/${meta_dir}:cfg,merge,meta ${includes} $sccs_defconfig $bsp_definition $sccs $patches $KERNEL_FEATURES_FINAL
+ if [ $? -ne 0 ]; then
+ bbfatal_log "Could not generate configuration queue for ${KMACHINE}."
+ fi
fi
fi
@@ -229,13 +285,30 @@ do_kernel_metadata() {
sccs="${bsp_definition} ${sccs}"
fi
- # run2: only generate patches for elements that have been passed on the SRC_URI
- elements="`echo -n ${sccs} ${patches} ${KERNEL_FEATURES}`"
- if [ -n "${elements}" ]; then
- scc --force -o ${S}/${meta_dir}:patch --cmds patch ${includes} ${sccs} ${patches} ${KERNEL_FEATURES}
- if [ $? -ne 0 ]; then
- bbfatal_log "Could not generate configuration queue for ${KMACHINE}."
+ if [ "$mode" = "patch" ]; then
+ # run2: only generate patches for elements that have been passed on the SRC_URI
+ elements="`echo -n ${sccs} ${patches} $KERNEL_FEATURES_FINAL`"
+ if [ -n "${elements}" ]; then
+ scc --force -o ${S}/${meta_dir}:patch --cmds patch ${includes} ${sccs} ${patches} $KERNEL_FEATURES_FINAL
+ if [ $? -ne 0 ]; then
+ bbfatal_log "Could not generate configuration queue for ${KMACHINE}."
+ fi
+ fi
+ fi
+
+ if [ ${KCONF_AUDIT_LEVEL} -gt 0 ]; then
+ bbnote "kernel meta data summary for ${KMACHINE} (${LINUX_KERNEL_TYPE}):"
+ bbnote "======================================================================"
+ if [ -n "${KMETA_EXTERNAL_BSPS}" ]; then
+ bbnote "Non kernel-cache (external) bsp"
fi
+ bbnote "BSP entry point / definition: $bsp_definition"
+ if [ -n "$in_tree_defconfig" ]; then
+ bbnote "KBUILD_DEFCONFIG: ${KBUILD_DEFCONFIG}"
+ fi
+ bbnote "Fragments from SRC_URI: $sccs_from_src_uri"
+ bbnote "KERNEL_FEATURES: $KERNEL_FEATURES_FINAL"
+ bbnote "Final scc/cfg list: $sccs_defconfig $bsp_definition $sccs $KERNEL_FEATURES_FINAL"
fi
}
@@ -288,6 +361,21 @@ do_kernel_checkout() {
fi
fi
cd ${S}
+
+ # convert any remote branches to local tracking ones
+ for i in `git branch -a --no-color | grep remotes | grep -v HEAD`; do
+ b=`echo $i | cut -d' ' -f2 | sed 's%remotes/origin/%%'`;
+ git show-ref --quiet --verify -- "refs/heads/$b"
+ if [ $? -ne 0 ]; then
+ git branch $b $i > /dev/null
+ fi
+ done
+
+ # Create a working tree copy of the kernel by checking out a branch
+ machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
+
+ # checkout and clobber any unimportant files
+ git checkout -f ${machine_branch}
else
# case: we have no git repository at all.
# To support low bandwidth options for building the kernel, we'll just
@@ -309,27 +397,13 @@ do_kernel_checkout() {
git commit -q -m "baseline commit: creating repo for ${PN}-${PV}"
git clean -d -f
fi
-
- # convert any remote branches to local tracking ones
- for i in `git branch -a --no-color | grep remotes | grep -v HEAD`; do
- b=`echo $i | cut -d' ' -f2 | sed 's%remotes/origin/%%'`;
- git show-ref --quiet --verify -- "refs/heads/$b"
- if [ $? -ne 0 ]; then
- git branch $b $i > /dev/null
- fi
- done
-
- # Create a working tree copy of the kernel by checking out a branch
- machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
-
- # checkout and clobber any unimportant files
- git checkout -f ${machine_branch}
}
-do_kernel_checkout[dirs] = "${S}"
+do_kernel_checkout[dirs] = "${S} ${WORKDIR}"
addtask kernel_checkout before do_kernel_metadata after do_symlink_kernsrc
addtask kernel_metadata after do_validate_branches do_unpack before do_patch
do_kernel_metadata[depends] = "kern-tools-native:do_populate_sysroot"
+do_kernel_metadata[file-checksums] = " ${@get_dirs_with_fragments(d)}"
do_validate_branches[depends] = "kern-tools-native:do_populate_sysroot"
do_kernel_configme[depends] += "virtual/${TARGET_PREFIX}binutils:do_populate_sysroot"
@@ -338,6 +412,8 @@ do_kernel_configme[depends] += "bc-native:do_populate_sysroot bison-native:do_po
do_kernel_configme[depends] += "kern-tools-native:do_populate_sysroot"
do_kernel_configme[dirs] += "${S} ${B}"
do_kernel_configme() {
+ do_kernel_metadata config
+
# translate the kconfig_mode into something that merge_config.sh
# understands
case ${KCONFIG_MODE} in
@@ -347,11 +423,11 @@ do_kernel_configme() {
*alldefconfig)
config_flags=""
;;
- *)
- if [ -f ${WORKDIR}/defconfig ]; then
- config_flags="-n"
- fi
- ;;
+ *)
+ if [ -f ${WORKDIR}/defconfig ]; then
+ config_flags="-n"
+ fi
+ ;;
esac
cd ${S}
@@ -380,6 +456,67 @@ do_kernel_configme() {
}
addtask kernel_configme before do_configure after do_patch
+addtask config_analysis
+
+do_config_analysis[depends] = "virtual/kernel:do_configure"
+do_config_analysis[depends] += "kern-tools-native:do_populate_sysroot"
+
+CONFIG_AUDIT_FILE ?= "${WORKDIR}/config-audit.txt"
+CONFIG_ANALYSIS_FILE ?= "${WORKDIR}/config-analysis.txt"
+
+python do_config_analysis() {
+ import re, string, sys, subprocess
+
+ s = d.getVar('S')
+
+ env = os.environ.copy()
+ env['PATH'] = "%s:%s%s" % (d.getVar('PATH'), s, "/scripts/util/")
+ env['LD'] = d.getVar('KERNEL_LD')
+ env['CC'] = d.getVar('KERNEL_CC')
+ env['ARCH'] = d.getVar('ARCH')
+ env['srctree'] = s
+
+ # read specific symbols from the kernel recipe or from local.conf
+ # i.e.: CONFIG_ANALYSIS:pn-linux-yocto-dev = 'NF_CONNTRACK LOCALVERSION'
+ config = d.getVar( 'CONFIG_ANALYSIS' )
+ if not config:
+ config = [ "" ]
+ else:
+ config = config.split()
+
+ for c in config:
+ for action in ["analysis","audit"]:
+ if action == "analysis":
+ try:
+ analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--blame', c], cwd=s, env=env ).decode('utf-8')
+ except subprocess.CalledProcessError as e:
+ bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
+
+ outfile = d.getVar( 'CONFIG_ANALYSIS_FILE' )
+
+ if action == "audit":
+ try:
+ analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--summary', '--extended', '--sanity', c], cwd=s, env=env ).decode('utf-8')
+ except subprocess.CalledProcessError as e:
+ bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
+
+ outfile = d.getVar( 'CONFIG_AUDIT_FILE' )
+
+ if c:
+ outdir = os.path.dirname( outfile )
+ outname = os.path.basename( outfile )
+ outfile = outdir + '/'+ c + '-' + outname
+
+ if config and os.path.isfile(outfile):
+ os.remove(outfile)
+
+ with open(outfile, 'w+') as f:
+ f.write( analysis )
+
+ bb.warn( "Configuration {} executed, see: {} for details".format(action,outfile ))
+ if c:
+ bb.warn( analysis )
+}
python do_kernel_configcheck() {
import re, string, sys, subprocess
@@ -389,59 +526,99 @@ python do_kernel_configcheck() {
# meta-series for processing
kmeta = d.getVar("KMETA") or "meta"
if not os.path.exists(kmeta):
- kmeta = "." + kmeta
+ kmeta = subprocess.check_output(['kgit', '--meta'], cwd=d.getVar('S')).decode('utf-8').rstrip()
s = d.getVar('S')
env = os.environ.copy()
env['PATH'] = "%s:%s%s" % (d.getVar('PATH'), s, "/scripts/util/")
- env['LD'] = "${KERNEL_LD}"
+ env['LD'] = d.getVar('KERNEL_LD')
+ env['CC'] = d.getVar('KERNEL_CC')
+ env['ARCH'] = d.getVar('ARCH')
+ env['srctree'] = s
try:
configs = subprocess.check_output(['scc', '--configs', '-o', s + '/.kernel-meta'], env=env).decode('utf-8')
except subprocess.CalledProcessError as e:
bb.fatal( "Cannot gather config fragments for audit: %s" % e.output.decode("utf-8") )
- try:
- subprocess.check_call(['kconf_check', '--report', '-o',
- '%s/%s/cfg' % (s, kmeta), d.getVar('B') + '/.config', s, configs], cwd=s, env=env)
- except subprocess.CalledProcessError:
- # The configuration gathering can return different exit codes, but
- # we interpret them based on the KCONF_AUDIT_LEVEL variable, so we catch
- # everything here, and let the run continue.
- pass
-
config_check_visibility = int(d.getVar("KCONF_AUDIT_LEVEL") or 0)
bsp_check_visibility = int(d.getVar("KCONF_BSP_AUDIT_LEVEL") or 0)
+ kmeta_audit_werror = d.getVar("KMETA_AUDIT_WERROR") or ""
+ warnings_detected = False
+
+ # if config check visibility is "1", that's the lowest level of audit. So
+ # we add the --classify option to the run, since classification will
+ # streamline the output to only report options that could be boot issues,
+ # or are otherwise required for proper operation.
+ extra_params = ""
+ if config_check_visibility == 1:
+ extra_params = "--classify"
+
+ # category #1: mismatches
+ try:
+ analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--mismatches', extra_params], cwd=s, env=env ).decode('utf-8')
+ except subprocess.CalledProcessError as e:
+ bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
- # if config check visibility is non-zero, report dropped configuration values
- mismatch_file = d.expand("${S}/%s/cfg/mismatch.txt" % kmeta)
- if os.path.exists(mismatch_file):
- if config_check_visibility:
- with open (mismatch_file, "r") as myfile:
+ if analysis:
+ outfile = "{}/{}/cfg/mismatch.txt".format( s, kmeta )
+ if os.path.isfile(outfile):
+ os.remove(outfile)
+ with open(outfile, 'w+') as f:
+ f.write( analysis )
+
+ if config_check_visibility and os.stat(outfile).st_size > 0:
+ with open (outfile, "r") as myfile:
results = myfile.read()
bb.warn( "[kernel config]: specified values did not make it into the kernel's final configuration:\n\n%s" % results)
+ warnings_detected = True
+
+ # category #2: invalid fragment elements
+ extra_params = ""
+ if bsp_check_visibility > 1:
+ extra_params = "--strict"
+ try:
+ analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--invalid', extra_params], cwd=s, env=env ).decode('utf-8')
+ except subprocess.CalledProcessError as e:
+ bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
- if bsp_check_visibility:
- invalid_file = d.expand("${S}/%s/cfg/invalid.cfg" % kmeta)
- if os.path.exists(invalid_file) and os.stat(invalid_file).st_size > 0:
- with open (invalid_file, "r") as myfile:
+ if analysis:
+ outfile = "{}/{}/cfg/invalid.txt".format(s,kmeta)
+ if os.path.isfile(outfile):
+ os.remove(outfile)
+ with open(outfile, 'w+') as f:
+ f.write( analysis )
+
+ if bsp_check_visibility and os.stat(outfile).st_size > 0:
+ with open (outfile, "r") as myfile:
results = myfile.read()
- bb.warn( "[kernel config]: This BSP sets config options that are not offered anywhere within this kernel:\n\n%s" % results)
- errors_file = d.expand("${S}/%s/cfg/fragment_errors.txt" % kmeta)
- if os.path.exists(errors_file) and os.stat(errors_file).st_size > 0:
- with open (errors_file, "r") as myfile:
- results = myfile.read()
- bb.warn( "[kernel config]: This BSP contains fragments with errors:\n\n%s" % results)
-
- # if the audit level is greater than two, we report if a fragment has overriden
- # a value from a base fragment. This is really only used for new kernel introduction
- if bsp_check_visibility > 2:
- redefinition_file = d.expand("${S}/%s/cfg/redefinition.txt" % kmeta)
- if os.path.exists(redefinition_file) and os.stat(redefinition_file).st_size > 0:
- with open (redefinition_file, "r") as myfile:
+ bb.warn( "[kernel config]: This BSP contains fragments with warnings:\n\n%s" % results)
+ warnings_detected = True
+
+ # category #3: redefined options (this is pretty verbose and is debug only)
+ try:
+ analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--sanity'], cwd=s, env=env ).decode('utf-8')
+ except subprocess.CalledProcessError as e:
+ bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
+
+ if analysis:
+ outfile = "{}/{}/cfg/redefinition.txt".format(s,kmeta)
+ if os.path.isfile(outfile):
+ os.remove(outfile)
+ with open(outfile, 'w+') as f:
+ f.write( analysis )
+
+ # if the audit level is greater than two, we report if a fragment has overriden
+ # a value from a base fragment. This is really only used for new kernel introduction
+ if bsp_check_visibility > 2 and os.stat(outfile).st_size > 0:
+ with open (outfile, "r") as myfile:
results = myfile.read()
bb.warn( "[kernel config]: This BSP has configuration options defined in more than one config, with differing values:\n\n%s" % results)
+ warnings_detected = True
+
+ if warnings_detected and kmeta_audit_werror:
+ bb.fatal( "configuration warnings detected, werror is set, promoting to fatal" )
}
# Ensure that the branches (BSP and meta) are on the locations specified by
@@ -457,7 +634,31 @@ do_validate_branches() {
# if SRCREV is AUTOREV it shows up as AUTOINC there's nothing to
# check and we can exit early
if [ "${machine_srcrev}" = "AUTOINC" ]; then
+ linux_yocto_dev='${@oe.utils.conditional("PREFERRED_PROVIDER_virtual/kernel", "linux-yocto-dev", "1", "", d)}'
+ if [ -n "$linux_yocto_dev" ]; then
+ git checkout -q -f ${machine_branch}
+ ver=$(grep "^VERSION =" ${S}/Makefile | sed s/.*=\ *//)
+ patchlevel=$(grep "^PATCHLEVEL =" ${S}/Makefile | sed s/.*=\ *//)
+ sublevel=$(grep "^SUBLEVEL =" ${S}/Makefile | sed s/.*=\ *//)
+ kver="$ver.$patchlevel"
+ bbnote "dev kernel: performing version -> branch -> SRCREV validation"
+ bbnote "dev kernel: recipe version ${LINUX_VERSION}, src version: $kver"
+ echo "${LINUX_VERSION}" | grep -q $kver
+ if [ $? -ne 0 ]; then
+ version="$(echo ${LINUX_VERSION} | sed 's/\+.*$//g')"
+ versioned_branch="v$version/$machine_branch"
+
+ machine_branch=$versioned_branch
+ force_srcrev="$(git rev-parse $machine_branch 2> /dev/null)"
+ if [ $? -ne 0 ]; then
+ bbfatal "kernel version mismatch detected, and no valid branch $machine_branch detected"
+ fi
+
+ bbnote "dev kernel: adjusting branch to $machine_branch, srcrev to: $force_srcrev"
+ fi
+ else
bbnote "SRCREV validation is not required for AUTOREV"
+ fi
elif [ "${machine_srcrev}" = "" ]; then
if [ "${SRCREV}" != "AUTOINC" ] && [ "${SRCREV}" != "INVALID" ]; then
# SRCREV_machine_<MACHINE> was not set. This means that a custom recipe
diff --git a/meta/classes/kernel.bbclass b/meta/classes/kernel.bbclass
index cf43a5d604..4f304eb9c7 100644
--- a/meta/classes/kernel.bbclass
+++ b/meta/classes/kernel.bbclass
@@ -1,12 +1,15 @@
inherit linux-kernel-base kernel-module-split
+COMPATIBLE_HOST = ".*-linux"
+
KERNEL_PACKAGE_NAME ??= "kernel"
KERNEL_DEPLOYSUBDIR ??= "${@ "" if (d.getVar("KERNEL_PACKAGE_NAME") == "kernel") else d.getVar("KERNEL_PACKAGE_NAME") }"
-PROVIDES += "${@ "virtual/kernel" if (d.getVar("KERNEL_PACKAGE_NAME") == "kernel") else "" }"
+PROVIDES += "virtual/kernel"
DEPENDS += "virtual/${TARGET_PREFIX}binutils virtual/${TARGET_PREFIX}gcc kmod-native bc-native bison-native"
DEPENDS += "${@bb.utils.contains("INITRAMFS_FSTYPES", "cpio.lzo", "lzop-native", "", d)}"
DEPENDS += "${@bb.utils.contains("INITRAMFS_FSTYPES", "cpio.lz4", "lz4-native", "", d)}"
+DEPENDS += "${@bb.utils.contains("INITRAMFS_FSTYPES", "cpio.zst", "zstd-native", "", d)}"
PACKAGE_WRITE_DEPS += "depmodwrapper-cross"
do_deploy[depends] += "depmodwrapper-cross:do_populate_sysroot gzip-native:do_populate_sysroot"
@@ -27,6 +30,8 @@ INITRAMFS_IMAGE ?= ""
INITRAMFS_IMAGE_NAME ?= "${@['${INITRAMFS_IMAGE}-${MACHINE}', ''][d.getVar('INITRAMFS_IMAGE') == '']}"
INITRAMFS_TASK ?= ""
INITRAMFS_IMAGE_BUNDLE ?= ""
+INITRAMFS_DEPLOY_DIR_IMAGE ?= "${DEPLOY_DIR_IMAGE}"
+INITRAMFS_MULTICONFIG ?= ""
# KERNEL_VERSION is extracted from source code. It is evaluated as
# None for the first parsing, since the code has not been fetched.
@@ -44,7 +49,7 @@ python __anonymous () {
kpn = d.getVar("KERNEL_PACKAGE_NAME")
# XXX Remove this after bug 11905 is resolved
- # FILES_${KERNEL_PACKAGE_NAME}-dev doesn't expand correctly
+ # FILES:${KERNEL_PACKAGE_NAME}-dev doesn't expand correctly
if kpn == pn:
bb.warn("Some packages (E.g. *-dev) might be missing due to "
"bug 11905 (variable KERNEL_PACKAGE_NAME == PN)")
@@ -74,7 +79,7 @@ python __anonymous () {
# KERNEL_IMAGETYPES may contain a mixture of image types supported directly
# by the kernel build system and types which are created by post-processing
# the output of the kernel build system (e.g. compressing vmlinux ->
- # vmlinux.gz in kernel_do_compile()).
+ # vmlinux.gz in kernel_do_transform_kernel()).
# KERNEL_IMAGETYPE_FOR_MAKE should contain only image types supported
# directly by the kernel build system.
if not d.getVar('KERNEL_IMAGETYPE_FOR_MAKE'):
@@ -90,13 +95,22 @@ python __anonymous () {
imagedest = d.getVar('KERNEL_IMAGEDEST')
for type in types.split():
+ if bb.data.inherits_class('nopackages', d):
+ continue
typelower = type.lower()
d.appendVar('PACKAGES', ' %s-image-%s' % (kname, typelower))
- d.setVar('FILES_' + kname + '-image-' + typelower, '/' + imagedest + '/' + type + '-${KERNEL_VERSION_NAME}' + ' /' + imagedest + '/' + type)
- d.appendVar('RDEPENDS_%s-image' % kname, ' %s-image-%s' % (kname, typelower))
- d.setVar('PKG_%s-image-%s' % (kname,typelower), '%s-image-%s-${KERNEL_VERSION_PKG_NAME}' % (kname, typelower))
- d.setVar('ALLOW_EMPTY_%s-image-%s' % (kname, typelower), '1')
- d.setVar('pkg_postinst_%s-image-%s' % (kname,typelower), """set +e
+ d.setVar('FILES:' + kname + '-image-' + typelower, '/' + imagedest + '/' + type + '-${KERNEL_VERSION_NAME}' + ' /' + imagedest + '/' + type)
+ d.appendVar('RDEPENDS:%s-image' % kname, ' %s-image-%s (= ${EXTENDPKGV})' % (kname, typelower))
+ splitmods = d.getVar("KERNEL_SPLIT_MODULES")
+ if splitmods != '1':
+ d.appendVar('RDEPENDS:%s-image' % kname, ' %s-modules (= ${EXTENDPKGV})' % kname)
+ d.appendVar('RDEPENDS:%s-image-%s' % (kname, typelower), ' %s-modules-${KERNEL_VERSION_PKG_NAME} (= ${EXTENDPKGV})' % kname)
+ d.setVar('PKG:%s-modules' % kname, '%s-modules-${KERNEL_VERSION_PKG_NAME}' % kname)
+ d.appendVar('RPROVIDES:%s-modules' % kname, '%s-modules-${KERNEL_VERSION_PKG_NAME}' % kname)
+
+ d.setVar('PKG:%s-image-%s' % (kname,typelower), '%s-image-%s-${KERNEL_VERSION_PKG_NAME}' % (kname, typelower))
+ d.setVar('ALLOW_EMPTY:%s-image-%s' % (kname, typelower), '1')
+ d.setVar('pkg_postinst:%s-image-%s' % (kname,typelower), """set +e
if [ -n "$D" ]; then
ln -sf %s-${KERNEL_VERSION} $D/${KERNEL_IMAGEDEST}/%s > /dev/null 2>&1
else
@@ -108,7 +122,7 @@ else
fi
set -e
""" % (type, type, type, type, type, type, type))
- d.setVar('pkg_postrm_%s-image-%s' % (kname,typelower), """set +e
+ d.setVar('pkg_postrm:%s-image-%s' % (kname,typelower), """set +e
if [ -f "${KERNEL_IMAGEDEST}/%s" -o -L "${KERNEL_IMAGEDEST}/%s" ]; then
rm -f ${KERNEL_IMAGEDEST}/%s > /dev/null 2>&1
fi
@@ -121,7 +135,12 @@ set -e
# the do_bundle_initramfs does nothing, but the INITRAMFS_IMAGE is built
# standalone for use by wic and other tools.
if image:
- d.appendVarFlag('do_bundle_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete')
+ if d.getVar('INITRAMFS_MULTICONFIG'):
+ d.appendVarFlag('do_bundle_initramfs', 'mcdepends', ' mc::${INITRAMFS_MULTICONFIG}:${INITRAMFS_IMAGE}:do_image_complete')
+ else:
+ d.appendVarFlag('do_bundle_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete')
+ if image and bb.utils.to_boolean(d.getVar('INITRAMFS_IMAGE_BUNDLE')):
+ bb.build.addtask('do_transform_bundled_initramfs', 'do_deploy', 'do_bundle_initramfs', d)
# NOTE: setting INITRAMFS_TASK is for backward compatibility
# The preferred method is to set INITRAMFS_IMAGE, because
@@ -172,7 +191,10 @@ python do_symlink_kernsrc () {
shutil.move(s, kernsrc)
os.symlink(kernsrc, s)
}
-addtask symlink_kernsrc before do_configure after do_unpack
+# do_patch is normally ordered before do_configure, but
+# externalsrc.bbclass deletes do_patch, breaking the dependency of
+# do_configure on do_symlink_kernsrc.
+addtask symlink_kernsrc before do_patch do_configure after do_unpack
inherit kernel-arch deploy
@@ -211,7 +233,9 @@ UBOOT_LOADADDRESS ?= "${UBOOT_ENTRYPOINT}"
# Some Linux kernel configurations need additional parameters on the command line
KERNEL_EXTRA_ARGS ?= ""
-EXTRA_OEMAKE = " HOSTCC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}""
+EXTRA_OEMAKE = " HOSTCC="${BUILD_CC}" HOSTCFLAGS="${BUILD_CFLAGS}" HOSTLDFLAGS="${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}""
+EXTRA_OEMAKE += " HOSTCXX="${BUILD_CXX}" HOSTCXXFLAGS="${BUILD_CXXFLAGS}""
+
KERNEL_ALT_IMAGETYPE ??= ""
copy_initramfs() {
@@ -220,9 +244,9 @@ copy_initramfs() {
mkdir -p ${B}/usr
# Find and use the first initramfs image archive type we find
rm -f ${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
- for img in cpio cpio.gz cpio.lz4 cpio.lzo cpio.lzma cpio.xz; do
- if [ -e "${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.$img" ]; then
- cp ${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.$img ${B}/usr/.
+ for img in cpio cpio.gz cpio.lz4 cpio.lzo cpio.lzma cpio.xz cpio.zst; do
+ if [ -e "${INITRAMFS_DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.$img" ]; then
+ cp ${INITRAMFS_DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.$img ${B}/usr/.
case $img in
*gz)
echo "gzip decompressing image"
@@ -249,12 +273,17 @@ copy_initramfs() {
xz -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
break
;;
+ *zst)
+ echo "zst decompressing image"
+ zstd -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
+ break
+ ;;
esac
break
fi
done
# Verify that the above loop found a initramfs, fail otherwise
- [ -f ${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio ] && echo "Finished copy of initramfs into ./usr" || die "Could not find any ${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.cpio{.gz|.lz4|.lzo|.lzma|.xz) for bundling; INITRAMFS_IMAGE_NAME might be wrong."
+ [ -f ${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio ] && echo "Finished copy of initramfs into ./usr" || die "Could not find any ${INITRAMFS_DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.cpio{.gz|.lz4|.lzo|.lzma|.xz|.zst) for bundling; INITRAMFS_IMAGE_NAME might be wrong."
}
do_bundle_initramfs () {
@@ -294,24 +323,32 @@ do_bundle_initramfs () {
}
do_bundle_initramfs[dirs] = "${B}"
-python do_devshell_prepend () {
+kernel_do_transform_bundled_initramfs() {
+ # vmlinux.gz is not built by kernel
+ if (echo "${KERNEL_IMAGETYPES}" | grep -wq "vmlinux\.gz"); then
+ gzip -9cn < ${KERNEL_OUTPUT_DIR}/vmlinux.initramfs > ${KERNEL_OUTPUT_DIR}/vmlinux.gz.initramfs
+ fi
+}
+do_transform_bundled_initramfs[dirs] = "${B}"
+
+python do_devshell:prepend () {
os.environ["LDFLAGS"] = ''
}
addtask bundle_initramfs after do_install before do_deploy
-get_cc_option () {
- # Check if KERNEL_CC supports the option "file-prefix-map".
- # This option allows us to build images with __FILE__ values that do not
- # contain the host build path.
- if ${KERNEL_CC} -Q --help=joined | grep -q "\-ffile-prefix-map=<old=new>"; then
- echo "-ffile-prefix-map=${S}=/kernel-source/"
- fi
-}
+KERNEL_DEBUG_TIMESTAMPS ??= "0"
kernel_do_compile() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
- if [ "${BUILD_REPRODUCIBLE_BINARIES}" = "1" ]; then
+
+ # setup native pkg-config variables (kconfig scripts call pkg-config directly, cannot generically be overriden to pkg-config-native)
+ export PKG_CONFIG_DIR="${STAGING_DIR_NATIVE}${libdir_native}/pkgconfig"
+ export PKG_CONFIG_PATH="$PKG_CONFIG_DIR:${STAGING_DATADIR_NATIVE}/pkgconfig"
+ export PKG_CONFIG_LIBDIR="$PKG_CONFIG_DIR"
+ export PKG_CONFIG_SYSROOT_DIR=""
+
+ if [ "${KERNEL_DEBUG_TIMESTAMPS}" != "1" ]; then
# kernel sources do not use do_unpack, so SOURCE_DATE_EPOCH may not
# be set....
if [ "${SOURCE_DATE_EPOCH}" = "" -o "${SOURCE_DATE_EPOCH}" = "0" ]; then
@@ -339,20 +376,24 @@ kernel_do_compile() {
copy_initramfs
use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
fi
- cc_extra=$(get_cc_option)
for typeformake in ${KERNEL_IMAGETYPE_FOR_MAKE} ; do
- oe_runmake ${typeformake} CC="${KERNEL_CC} $cc_extra " LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS} $use_alternate_initrd
+ oe_runmake ${typeformake} CC="${KERNEL_CC}" LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS} $use_alternate_initrd
done
+}
+
+kernel_do_transform_kernel() {
# vmlinux.gz is not built by kernel
if (echo "${KERNEL_IMAGETYPES}" | grep -wq "vmlinux\.gz"); then
mkdir -p "${KERNEL_OUTPUT_DIR}"
gzip -9cn < ${B}/vmlinux > "${KERNEL_OUTPUT_DIR}/vmlinux.gz"
fi
}
+do_transform_kernel[dirs] = "${B}"
+addtask transform_kernel after do_compile before do_install
do_compile_kernelmodules() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
- if [ "${BUILD_REPRODUCIBLE_BINARIES}" = "1" ]; then
+ if [ "${KERNEL_DEBUG_TIMESTAMPS}" != "1" ]; then
# kernel sources do not use do_unpack, so SOURCE_DATE_EPOCH may not
# be set....
if [ "${SOURCE_DATE_EPOCH}" = "" -o "${SOURCE_DATE_EPOCH}" = "0" ]; then
@@ -368,8 +409,7 @@ do_compile_kernelmodules() {
bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
fi
if (grep -q -i -e '^CONFIG_MODULES=y$' ${B}/.config); then
- cc_extra=$(get_cc_option)
- oe_runmake -C ${B} ${PARALLEL_MAKE} modules CC="${KERNEL_CC} $cc_extra " LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS}
+ oe_runmake -C ${B} ${PARALLEL_MAKE} modules CC="${KERNEL_CC}" LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS}
# Module.symvers gets updated during the
# building of the kernel modules. We need to
@@ -378,6 +418,10 @@ do_compile_kernelmodules() {
# other kernel modules and will look at this
# file to do symbol lookups
cp ${B}/Module.symvers ${STAGING_KERNEL_BUILDDIR}/
+ # 5.10+ kernels have module.lds that we need to copy for external module builds
+ if [ -e "${B}/scripts/module.lds" ]; then
+ install -Dm 0644 ${B}/scripts/module.lds ${STAGING_KERNEL_BUILDDIR}/scripts/module.lds
+ fi
else
bbnote "no modules to compile"
fi
@@ -404,9 +448,23 @@ kernel_do_install() {
#
install -d ${D}/${KERNEL_IMAGEDEST}
install -d ${D}/boot
+
+ #
+ # When including an initramfs bundle inside a FIT image, the fitImage is created after the install task
+ # by do_assemble_fitimage_initramfs.
+ # This happens after the generation of the initramfs bundle (done by do_bundle_initramfs).
+ # So, at the level of the install task we should not try to install the fitImage. fitImage is still not
+ # generated yet.
+ # After the generation of the fitImage, the deploy task copies the fitImage from the build directory to
+ # the deploy folder.
+ #
+
for imageType in ${KERNEL_IMAGETYPES} ; do
- install -m 0644 ${KERNEL_OUTPUT_DIR}/${imageType} ${D}/${KERNEL_IMAGEDEST}/${imageType}-${KERNEL_VERSION}
+ if [ $imageType != "fitImage" ] || [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ] ; then
+ install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType ${D}/${KERNEL_IMAGEDEST}/$imageType-${KERNEL_VERSION}
+ fi
done
+
install -m 0644 System.map ${D}/boot/System.map-${KERNEL_VERSION}
install -m 0644 .config ${D}/boot/config-${KERNEL_VERSION}
install -m 0644 vmlinux ${D}/boot/vmlinux-${KERNEL_VERSION}
@@ -414,7 +472,6 @@ kernel_do_install() {
install -d ${D}${sysconfdir}/modules-load.d
install -d ${D}${sysconfdir}/modprobe.d
}
-do_install[prefuncs] += "package_get_auto_pr"
# Must be ran no earlier than after do_kernel_checkout or else Makefile won't be in ${S}/Makefile
do_kernel_version_sanity_check() {
@@ -565,7 +622,7 @@ kernel_do_configure() {
fi
# Copy defconfig to .config if .config does not exist. This allows
- # recipes to manage the .config themselves in do_configure_prepend().
+ # recipes to manage the .config themselves in do_configure:prepend().
if [ -f "${WORKDIR}/defconfig" ] && [ ! -f "${B}/.config" ]; then
cp "${WORKDIR}/defconfig" "${B}/.config"
fi
@@ -582,34 +639,34 @@ addtask savedefconfig after do_configure
inherit cml1
-KCONFIG_CONFIG_COMMAND_append = " HOSTLDFLAGS='${BUILD_LDFLAGS}'"
+KCONFIG_CONFIG_COMMAND:append = " LD='${KERNEL_LD}' HOSTLDFLAGS='${BUILD_LDFLAGS}'"
-EXPORT_FUNCTIONS do_compile do_install do_configure
+EXPORT_FUNCTIONS do_compile do_transform_kernel do_transform_bundled_initramfs do_install do_configure
# kernel-base becomes kernel-${KERNEL_VERSION}
# kernel-image becomes kernel-image-${KERNEL_VERSION}
-PACKAGES = "${KERNEL_PACKAGE_NAME} ${KERNEL_PACKAGE_NAME}-base ${KERNEL_PACKAGE_NAME}-vmlinux ${KERNEL_PACKAGE_NAME}-image ${KERNEL_PACKAGE_NAME}-dev ${KERNEL_PACKAGE_NAME}-modules"
-FILES_${PN} = ""
-FILES_${KERNEL_PACKAGE_NAME}-base = "${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.order ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.builtin ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.builtin.modinfo"
-FILES_${KERNEL_PACKAGE_NAME}-image = ""
-FILES_${KERNEL_PACKAGE_NAME}-dev = "/boot/System.map* /boot/Module.symvers* /boot/config* ${KERNEL_SRC_PATH} ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
-FILES_${KERNEL_PACKAGE_NAME}-vmlinux = "/boot/vmlinux-${KERNEL_VERSION_NAME}"
-FILES_${KERNEL_PACKAGE_NAME}-modules = ""
-RDEPENDS_${KERNEL_PACKAGE_NAME} = "${KERNEL_PACKAGE_NAME}-base"
+PACKAGES = "${KERNEL_PACKAGE_NAME} ${KERNEL_PACKAGE_NAME}-base ${KERNEL_PACKAGE_NAME}-vmlinux ${KERNEL_PACKAGE_NAME}-image ${KERNEL_PACKAGE_NAME}-dev ${KERNEL_PACKAGE_NAME}-modules ${KERNEL_PACKAGE_NAME}-dbg"
+FILES:${PN} = ""
+FILES:${KERNEL_PACKAGE_NAME}-base = "${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.order ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.builtin ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.builtin.modinfo"
+FILES:${KERNEL_PACKAGE_NAME}-image = ""
+FILES:${KERNEL_PACKAGE_NAME}-dev = "/boot/System.map* /boot/Module.symvers* /boot/config* ${KERNEL_SRC_PATH} ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
+FILES:${KERNEL_PACKAGE_NAME}-vmlinux = "/boot/vmlinux-${KERNEL_VERSION_NAME}"
+FILES:${KERNEL_PACKAGE_NAME}-modules = ""
+RDEPENDS:${KERNEL_PACKAGE_NAME} = "${KERNEL_PACKAGE_NAME}-base (= ${EXTENDPKGV})"
# Allow machines to override this dependency if kernel image files are
# not wanted in images as standard
-RDEPENDS_${KERNEL_PACKAGE_NAME}-base ?= "${KERNEL_PACKAGE_NAME}-image"
-PKG_${KERNEL_PACKAGE_NAME}-image = "${KERNEL_PACKAGE_NAME}-image-${@legitimize_package_name(d.getVar('KERNEL_VERSION'))}"
-RDEPENDS_${KERNEL_PACKAGE_NAME}-image += "${@oe.utils.conditional('KERNEL_IMAGETYPE', 'vmlinux', '${KERNEL_PACKAGE_NAME}-vmlinux', '', d)}"
-PKG_${KERNEL_PACKAGE_NAME}-base = "${KERNEL_PACKAGE_NAME}-${@legitimize_package_name(d.getVar('KERNEL_VERSION'))}"
-RPROVIDES_${KERNEL_PACKAGE_NAME}-base += "${KERNEL_PACKAGE_NAME}-${KERNEL_VERSION}"
-ALLOW_EMPTY_${KERNEL_PACKAGE_NAME} = "1"
-ALLOW_EMPTY_${KERNEL_PACKAGE_NAME}-base = "1"
-ALLOW_EMPTY_${KERNEL_PACKAGE_NAME}-image = "1"
-ALLOW_EMPTY_${KERNEL_PACKAGE_NAME}-modules = "1"
-DESCRIPTION_${KERNEL_PACKAGE_NAME}-modules = "Kernel modules meta package"
-
-pkg_postinst_${KERNEL_PACKAGE_NAME}-base () {
+RRECOMMENDS:${KERNEL_PACKAGE_NAME}-base ?= "${KERNEL_PACKAGE_NAME}-image (= ${EXTENDPKGV})"
+PKG:${KERNEL_PACKAGE_NAME}-image = "${KERNEL_PACKAGE_NAME}-image-${@legitimize_package_name(d.getVar('KERNEL_VERSION'))}"
+RDEPENDS:${KERNEL_PACKAGE_NAME}-image += "${@oe.utils.conditional('KERNEL_IMAGETYPE', 'vmlinux', '${KERNEL_PACKAGE_NAME}-vmlinux (= ${EXTENDPKGV})', '', d)}"
+PKG:${KERNEL_PACKAGE_NAME}-base = "${KERNEL_PACKAGE_NAME}-${@legitimize_package_name(d.getVar('KERNEL_VERSION'))}"
+RPROVIDES:${KERNEL_PACKAGE_NAME}-base += "${KERNEL_PACKAGE_NAME}-${KERNEL_VERSION}"
+ALLOW_EMPTY:${KERNEL_PACKAGE_NAME} = "1"
+ALLOW_EMPTY:${KERNEL_PACKAGE_NAME}-base = "1"
+ALLOW_EMPTY:${KERNEL_PACKAGE_NAME}-image = "1"
+ALLOW_EMPTY:${KERNEL_PACKAGE_NAME}-modules = "1"
+DESCRIPTION:${KERNEL_PACKAGE_NAME}-modules = "Kernel modules meta package"
+
+pkg_postinst:${KERNEL_PACKAGE_NAME}-base () {
if [ ! -e "$D/lib/modules/${KERNEL_VERSION}" ]; then
mkdir -p $D/lib/modules/${KERNEL_VERSION}
fi
@@ -620,7 +677,7 @@ pkg_postinst_${KERNEL_PACKAGE_NAME}-base () {
fi
}
-PACKAGESPLITFUNCS_prepend = "split_kernel_packages "
+PACKAGESPLITFUNCS:prepend = "split_kernel_packages "
python split_kernel_packages () {
do_split_packages(d, root='${nonarch_base_libdir}/firmware', file_regex=r'^(.*)\.(bin|fw|cis|csp|dsp)$', output_pattern='${KERNEL_PACKAGE_NAME}-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='')
@@ -648,30 +705,19 @@ do_kernel_link_images() {
}
addtask kernel_link_images after do_compile before do_strip
-do_strip() {
- if [ -n "${KERNEL_IMAGE_STRIP_EXTRA_SECTIONS}" ]; then
- if ! (echo "${KERNEL_IMAGETYPES}" | grep -wq "vmlinux"); then
- bbwarn "image type(s) will not be stripped (not supported): ${KERNEL_IMAGETYPES}"
- return
- fi
-
- cd ${B}
- headers=`"$CROSS_COMPILE"readelf -S ${KERNEL_OUTPUT_DIR}/vmlinux | \
- grep "^ \{1,\}\[[0-9 ]\{1,\}\] [^ ]" | \
- sed "s/^ \{1,\}\[[0-9 ]\{1,\}\] //" | \
- gawk '{print $1}'`
-
- for str in ${KERNEL_IMAGE_STRIP_EXTRA_SECTIONS}; do {
- if ! (echo "$headers" | grep -q "^$str$"); then
- bbwarn "Section not found: $str";
- fi
+python do_strip() {
+ import shutil
- "$CROSS_COMPILE"strip -s -R $str ${KERNEL_OUTPUT_DIR}/vmlinux
- }; done
+ strip = d.getVar('STRIP')
+ extra_sections = d.getVar('KERNEL_IMAGE_STRIP_EXTRA_SECTIONS')
+ kernel_image = d.getVar('B') + "/" + d.getVar('KERNEL_OUTPUT_DIR') + "/vmlinux"
- bbnote "KERNEL_IMAGE_STRIP_EXTRA_SECTIONS is set, stripping sections:" \
- "${KERNEL_IMAGE_STRIP_EXTRA_SECTIONS}"
- fi;
+ if (extra_sections and kernel_image.find('boot/vmlinux') != -1):
+ kernel_image_stripped = kernel_image + ".stripped"
+ shutil.copy2(kernel_image, kernel_image_stripped)
+ oe.package.runstrip((kernel_image_stripped, 8, strip, extra_sections))
+ bb.debug(1, "KERNEL_IMAGE_STRIP_EXTRA_SECTIONS is set, stripping sections: " + \
+ extra_sections)
}
do_strip[dirs] = "${B}"
@@ -690,7 +736,7 @@ do_sizecheck() {
at_least_one_fits=
for imageType in ${KERNEL_IMAGETYPES} ; do
size=`du -ks ${B}/${KERNEL_OUTPUT_DIR}/$imageType | awk '{print $1}'`
- if [ $size -ge ${KERNEL_IMAGE_MAXSIZE} ]; then
+ if [ $size -gt ${KERNEL_IMAGE_MAXSIZE} ]; then
bbwarn "This kernel $imageType (size=$size(K) > ${KERNEL_IMAGE_MAXSIZE}(K)) is too big for your device."
else
at_least_one_fits=y
@@ -715,11 +761,19 @@ kernel_do_deploy() {
fi
for imageType in ${KERNEL_IMAGETYPES} ; do
- base_name=${imageType}-${KERNEL_IMAGE_NAME}
- install -m 0644 ${KERNEL_OUTPUT_DIR}/${imageType} $deployDir/${base_name}.bin
- symlink_name=${imageType}-${KERNEL_IMAGE_LINK_NAME}
- ln -sf ${base_name}.bin $deployDir/${symlink_name}.bin
- ln -sf ${base_name}.bin $deployDir/${imageType}
+ baseName=$imageType-${KERNEL_IMAGE_NAME}
+
+ if [ -s ${KERNEL_OUTPUT_DIR}/$imageType.stripped ] ; then
+ install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType.stripped $deployDir/$baseName${KERNEL_IMAGE_BIN_EXT}
+ else
+ install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType $deployDir/$baseName${KERNEL_IMAGE_BIN_EXT}
+ fi
+ if [ -n "${KERNEL_IMAGE_LINK_NAME}" ] ; then
+ ln -sf $baseName${KERNEL_IMAGE_BIN_EXT} $deployDir/$imageType-${KERNEL_IMAGE_LINK_NAME}${KERNEL_IMAGE_BIN_EXT}
+ fi
+ if [ "${KERNEL_IMAGETYPE_SYMLINK}" = "1" ] ; then
+ ln -sf $baseName${KERNEL_IMAGE_BIN_EXT} $deployDir/$imageType
+ fi
done
if [ ${MODULE_TARBALL_DEPLOY} = "1" ] && (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
@@ -732,7 +786,9 @@ kernel_do_deploy() {
TAR_ARGS="$TAR_ARGS --owner=0 --group=0"
tar $TAR_ARGS -cv -C ${D}${root_prefix} lib | gzip -9n > $deployDir/modules-${MODULE_TARBALL_NAME}.tgz
- ln -sf modules-${MODULE_TARBALL_NAME}.tgz $deployDir/modules-${MODULE_TARBALL_LINK_NAME}.tgz
+ if [ -n "${MODULE_TARBALL_LINK_NAME}" ] ; then
+ ln -sf modules-${MODULE_TARBALL_NAME}.tgz $deployDir/modules-${MODULE_TARBALL_LINK_NAME}.tgz
+ fi
fi
if [ ! -z "${INITRAMFS_IMAGE}" -a x"${INITRAMFS_IMAGE_BUNDLE}" = x1 ]; then
@@ -740,14 +796,18 @@ kernel_do_deploy() {
if [ "$imageType" = "fitImage" ] ; then
continue
fi
- initramfs_base_name=${imageType}-${INITRAMFS_NAME}
- initramfs_symlink_name=${imageType}-${INITRAMFS_LINK_NAME}
- install -m 0644 ${KERNEL_OUTPUT_DIR}/${imageType}.initramfs $deployDir/${initramfs_base_name}.bin
- ln -sf ${initramfs_base_name}.bin $deployDir/${initramfs_symlink_name}.bin
+ initramfsBaseName=$imageType-${INITRAMFS_NAME}
+ install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType.initramfs $deployDir/$initramfsBaseName${KERNEL_IMAGE_BIN_EXT}
+ if [ -n "${INITRAMFS_LINK_NAME}" ] ; then
+ ln -sf $initramfsBaseName${KERNEL_IMAGE_BIN_EXT} $deployDir/$imageType-${INITRAMFS_LINK_NAME}${KERNEL_IMAGE_BIN_EXT}
+ fi
done
fi
}
-do_deploy[prefuncs] += "package_get_auto_pr"
+
+# We deploy to filenames that include PKGV and PKGR, read the saved data to
+# ensure we get the right values for both
+do_deploy[prefuncs] += "read_subpackage_metadata"
addtask deploy after do_populate_sysroot do_packagedata
diff --git a/meta/classes/libc-package.bbclass b/meta/classes/libc-package.bbclass
index de3b4250c7..13ef8cdc0d 100644
--- a/meta/classes/libc-package.bbclass
+++ b/meta/classes/libc-package.bbclass
@@ -42,7 +42,7 @@ python __anonymous () {
# try to fix disable charsets/locales/locale-code compile fail
PACKAGE_NO_GCONV ?= "0"
-OVERRIDES_append = ":${TARGET_ARCH}-${TARGET_OS}"
+OVERRIDES:append = ":${TARGET_ARCH}-${TARGET_OS}"
locale_base_postinst_ontarget() {
localedef --inputfile=${datadir}/i18n/locales/%s --charmap=%s %s
@@ -129,9 +129,9 @@ python package_do_split_gconvs () {
deps.append(dp)
f.close()
if deps != []:
- d.setVar('RDEPENDS_%s' % pkg, " ".join(deps))
+ d.setVar('RDEPENDS:%s' % pkg, " ".join(deps))
if bpn != 'glibc':
- d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
+ d.setVar('RPROVIDES:%s' % pkg, pkg.replace(bpn, 'glibc'))
do_split_packages(d, gconv_libdir, file_regex=r'^(.*)\.so$', output_pattern=bpn+'-gconv-%s', \
description='gconv module for character set %s', hook=calc_gconv_deps, \
@@ -151,9 +151,9 @@ python package_do_split_gconvs () {
deps.append(dp)
f.close()
if deps != []:
- d.setVar('RDEPENDS_%s' % pkg, " ".join(deps))
+ d.setVar('RDEPENDS:%s' % pkg, " ".join(deps))
if bpn != 'glibc':
- d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
+ d.setVar('RPROVIDES:%s' % pkg, pkg.replace(bpn, 'glibc'))
do_split_packages(d, charmap_dir, file_regex=r'^(.*)\.gz$', output_pattern=bpn+'-charmap-%s', \
description='character map for %s encoding', hook=calc_charmap_deps, extra_depends='')
@@ -172,9 +172,9 @@ python package_do_split_gconvs () {
deps.append(dp)
f.close()
if deps != []:
- d.setVar('RDEPENDS_%s' % pkg, " ".join(deps))
+ d.setVar('RDEPENDS:%s' % pkg, " ".join(deps))
if bpn != 'glibc':
- d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
+ d.setVar('RPROVIDES:%s' % pkg, pkg.replace(bpn, 'glibc'))
do_split_packages(d, locales_dir, file_regex=r'(.*)', output_pattern=bpn+'-localedata-%s', \
description='locale definition for %s', hook=calc_locale_deps, extra_depends='')
@@ -210,11 +210,11 @@ python package_do_split_gconvs () {
supported[locale] = charset
def output_locale_source(name, pkgname, locale, encoding):
- d.setVar('RDEPENDS_%s' % pkgname, '%slocaledef %s-localedata-%s %s-charmap-%s' % \
+ d.setVar('RDEPENDS:%s' % pkgname, '%slocaledef %s-localedata-%s %s-charmap-%s' % \
(mlprefix, mlprefix+bpn, legitimize_package_name(locale), mlprefix+bpn, legitimize_package_name(encoding)))
- d.setVar('pkg_postinst_ontarget_%s' % pkgname, d.getVar('locale_base_postinst_ontarget') \
+ d.setVar('pkg_postinst_ontarget:%s' % pkgname, d.getVar('locale_base_postinst_ontarget') \
% (locale, encoding, locale))
- d.setVar('pkg_postrm_%s' % pkgname, d.getVar('locale_base_postrm') % \
+ d.setVar('pkg_postrm:%s' % pkgname, d.getVar('locale_base_postrm') % \
(locale, encoding, locale))
def output_locale_binary_rdepends(name, pkgname, locale, encoding):
@@ -222,8 +222,8 @@ python package_do_split_gconvs () {
lcsplit = d.getVar('GLIBC_SPLIT_LC_PACKAGES')
if lcsplit and int(lcsplit):
d.appendVar('PACKAGES', ' ' + dep)
- d.setVar('ALLOW_EMPTY_%s' % dep, '1')
- d.setVar('RDEPENDS_%s' % pkgname, mlprefix + dep)
+ d.setVar('ALLOW_EMPTY:%s' % dep, '1')
+ d.setVar('RDEPENDS:%s' % pkgname, mlprefix + dep)
commands = {}
@@ -293,13 +293,13 @@ python package_do_split_gconvs () {
def output_locale(name, locale, encoding):
pkgname = d.getVar('MLPREFIX', False) + 'locale-base-' + legitimize_package_name(name)
- d.setVar('ALLOW_EMPTY_%s' % pkgname, '1')
+ d.setVar('ALLOW_EMPTY:%s' % pkgname, '1')
d.setVar('PACKAGES', '%s %s' % (pkgname, d.getVar('PACKAGES')))
rprovides = ' %svirtual-locale-%s' % (mlprefix, legitimize_package_name(name))
m = re.match(r"(.*)_(.*)", name)
if m:
rprovides += ' %svirtual-locale-%s' % (mlprefix, m.group(1))
- d.setVar('RPROVIDES_%s' % pkgname, rprovides)
+ d.setVar('RPROVIDES:%s' % pkgname, rprovides)
if use_bin == "compile":
output_locale_binary_rdepends(name, pkgname, locale, encoding)
@@ -343,7 +343,7 @@ python package_do_split_gconvs () {
def metapkg_hook(file, pkg, pattern, format, basename):
name = basename.split('/', 1)[0]
metapkg = legitimize_package_name('%s-binary-localedata-%s' % (mlprefix+bpn, name))
- d.appendVar('RDEPENDS_%s' % metapkg, ' ' + pkg)
+ d.appendVar('RDEPENDS:%s' % metapkg, ' ' + pkg)
if use_bin == "compile":
makefile = oe.path.join(d.getVar("WORKDIR"), "locale-tree", "Makefile")
@@ -355,7 +355,7 @@ python package_do_split_gconvs () {
m.write("\t@echo 'Progress %d/%d'\n" % (i, total))
m.write("\t" + makerecipe + "\n\n")
d.setVar("EXTRA_OEMAKE", "-C %s ${PARALLEL_MAKE}" % (os.path.dirname(makefile)))
- d.setVarFlag("oe_runmake", "progress", "outof:Progress\s(\d+)/(\d+)")
+ d.setVarFlag("oe_runmake", "progress", r"outof:Progress\s(\d+)/(\d+)")
bb.note("Executing binary locale generation makefile")
bb.build.exec_func("oe_runmake", d)
bb.note("collecting binary locales from locale tree")
@@ -379,6 +379,6 @@ python package_do_split_gconvs () {
# We want to do this indirection so that we can safely 'return'
# from the called function even though we're prepending
-python populate_packages_prepend () {
+python populate_packages:prepend () {
bb.build.exec_func('package_do_split_gconvs', d)
}
diff --git a/meta/classes/license.bbclass b/meta/classes/license.bbclass
index f90176d6c0..813e1ea4f5 100644
--- a/meta/classes/license.bbclass
+++ b/meta/classes/license.bbclass
@@ -6,7 +6,7 @@
LICENSE_DIRECTORY ??= "${DEPLOY_DIR}/licenses"
LICSSTATEDIR = "${WORKDIR}/license-destdir/"
-# Create extra package with license texts and add it to RRECOMMENDS_${PN}
+# Create extra package with license texts and add it to RRECOMMENDS:${PN}
LICENSE_CREATE_PACKAGE[type] = "boolean"
LICENSE_CREATE_PACKAGE ??= "0"
LICENSE_PACKAGE_SUFFIX ??= "-lic"
@@ -29,10 +29,12 @@ python do_populate_lic() {
with open(os.path.join(destdir, "recipeinfo"), "w") as f:
for key in sorted(info.keys()):
f.write("%s: %s\n" % (key, info[key]))
+ oe.qa.exit_if_errors(d)
}
-# it would be better to copy them in do_install_append, but find_license_filesa is python
-python perform_packagecopy_prepend () {
+PSEUDO_IGNORE_PATHS .= ",${@','.join(((d.getVar('COMMON_LICENSE_DIR') or '') + ' ' + (d.getVar('LICENSE_PATH') or '') + ' ' + d.getVar('COREBASE') + '/meta/COPYING').split())}"
+# it would be better to copy them in do_install:append, but find_license_filesa is python
+python perform_packagecopy:prepend () {
enabled = oe.data.typed_value('LICENSE_CREATE_PACKAGE', d)
if d.getVar('CLASSOVERRIDE') == 'class-target' and enabled:
lic_files_paths = find_license_files(d)
@@ -61,15 +63,7 @@ def add_package_and_files(d):
else:
# first in PACKAGES to be sure that nothing else gets LICENSE_FILES_DIRECTORY
d.setVar('PACKAGES', "%s %s" % (pn_lic, packages))
- d.setVar('FILES_' + pn_lic, files)
- for pn in packages.split():
- if pn == pn_lic:
- continue
- rrecommends_pn = d.getVar('RRECOMMENDS_' + pn)
- if rrecommends_pn:
- d.setVar('RRECOMMENDS_' + pn, "%s %s" % (pn_lic, rrecommends_pn))
- else:
- d.setVar('RRECOMMENDS_' + pn, "%s" % (pn_lic))
+ d.setVar('FILES:' + pn_lic, files)
def copy_license_files(lic_files_paths, destdir):
import shutil
@@ -152,6 +146,10 @@ def find_license_files(d):
find_license(node.s.replace("+", "").replace("*", ""))
self.generic_visit(node)
+ def visit_Constant(self, node):
+ find_license(node.value.replace("+", "").replace("*", ""))
+ self.generic_visit(node)
+
def find_license(license_type):
try:
bb.utils.mkdirhier(gen_lic_dest)
@@ -185,7 +183,8 @@ def find_license_files(d):
# The user may attempt to use NO_GENERIC_LICENSE for a generic license which doesn't make sense
# and should not be allowed, warn the user in this case.
if d.getVarFlag('NO_GENERIC_LICENSE', license_type):
- bb.warn("%s: %s is a generic license, please don't use NO_GENERIC_LICENSE for it." % (pn, license_type))
+ oe.qa.handle_error("license-no-generic",
+ "%s: %s is a generic license, please don't use NO_GENERIC_LICENSE for it." % (pn, license_type), d)
elif non_generic_lic and non_generic_lic in lic_chksums:
# if NO_GENERIC_LICENSE is set, we copy the license files from the fetched source
@@ -194,10 +193,11 @@ def find_license_files(d):
os.path.join(srcdir, non_generic_lic), None, None))
non_generic_lics[non_generic_lic] = license_type
else:
- # Add explicity avoid of CLOSED license because this isn't generic
+ # Explicitly avoid the CLOSED license because this isn't generic
if license_type != 'CLOSED':
# And here is where we warn people that their licenses are lousy
- bb.warn("%s: No generic license file exists for: %s in any provider" % (pn, license_type))
+ oe.qa.handle_error("license-exists",
+ "%s: No generic license file exists for: %s in any provider" % (pn, license_type), d)
pass
if not generic_directory:
@@ -222,7 +222,8 @@ def find_license_files(d):
except oe.license.InvalidLicense as exc:
bb.fatal('%s: %s' % (d.getVar('PF'), exc))
except SyntaxError:
- bb.warn("%s: Failed to parse it's LICENSE field." % (d.getVar('PF')))
+ oe.qa.handle_error("license-syntax",
+ "%s: Failed to parse it's LICENSE field." % (d.getVar('PF')), d)
# Add files from LIC_FILES_CHKSUM to list of license files
lic_chksum_paths = defaultdict(OrderedDict)
for path, data in sorted(lic_chksums.items()):
@@ -251,16 +252,9 @@ def return_spdx(d, license):
def canonical_license(d, license):
"""
Return the canonical (SPDX) form of the license if available (so GPLv3
- becomes GPL-3.0), for the license named 'X+', return canonical form of
- 'X' if available and the tailing '+' (so GPLv3+ becomes GPL-3.0+),
- or the passed license if there is no canonical form.
+ becomes GPL-3.0-only) or the passed license if there is no canonical form.
"""
- lic = d.getVarFlag('SPDXLICENSEMAP', license) or ""
- if not lic and license.endswith('+'):
- lic = d.getVarFlag('SPDXLICENSEMAP', license.rstrip('+'))
- if lic:
- lic += '+'
- return lic or license
+ return d.getVarFlag('SPDXLICENSEMAP', license) or license
def available_licenses(d):
"""
@@ -277,28 +271,30 @@ def available_licenses(d):
licenses = sorted(licenses)
return licenses
-# Only determine the list of all available licenses once. This assumes that any
-# additions to LICENSE_PATH have been done before this file is parsed.
-AVAILABLE_LICENSES := "${@' '.join(available_licenses(d))}"
-
def expand_wildcard_licenses(d, wildcard_licenses):
"""
- Return actual spdx format license names if wildcards are used. We expand
- wildcards from SPDXLICENSEMAP flags and AVAILABLE_LICENSES.
+ There are some common wildcard values users may want to use. Support them
+ here.
"""
- import fnmatch
- licenses = wildcard_licenses[:]
- spdxmapkeys = d.getVarFlags('SPDXLICENSEMAP').keys()
- for wld_lic in wildcard_licenses:
- spdxflags = fnmatch.filter(spdxmapkeys, wld_lic)
- licenses += [d.getVarFlag('SPDXLICENSEMAP', flag) for flag in spdxflags]
-
- spdx_lics = d.getVar('AVAILABLE_LICENSES').split()
- for wld_lic in wildcard_licenses:
- licenses += fnmatch.filter(spdx_lics, wld_lic)
-
- licenses = list(set(licenses))
- return licenses
+ licenses = set(wildcard_licenses)
+ mapping = {
+ "AGPL-3.0*" : ["AGPL-3.0-only", "AGPL-3.0-or-later"],
+ "GPL-3.0*" : ["GPL-3.0-only", "GPL-3.0-or-later"],
+ "LGPL-3.0*" : ["LGPL-3.0-only", "LGPL-3.0-or-later"],
+ }
+ for k in mapping:
+ if k in wildcard_licenses:
+ licenses.remove(k)
+ for item in mapping[k]:
+ licenses.add(item)
+
+ for l in licenses:
+ if l in oe.license.obsolete_license_list():
+ bb.fatal("Error, %s is an obsolete license, please use an SPDX reference in INCOMPATIBLE_LICENSE" % l)
+ if "*" in l:
+ bb.fatal("Error, %s is an invalid license wildcard entry" % l)
+
+ return list(licenses)
def incompatible_license_contains(license, truevalue, falsevalue, d):
license = canonical_license(d, license)
@@ -333,7 +329,7 @@ def incompatible_license(d, dont_want_licenses, package=None):
as canonical (SPDX) names.
"""
import oe.license
- license = d.getVar("LICENSE_%s" % package) if package else None
+ license = d.getVar("LICENSE:%s" % package) if package else None
if not license:
license = d.getVar('LICENSE')
@@ -342,30 +338,31 @@ def incompatible_license(d, dont_want_licenses, package=None):
def check_license_flags(d):
"""
This function checks if a recipe has any LICENSE_FLAGS that
- aren't whitelisted.
+ aren't acceptable.
- If it does, it returns the all LICENSE_FLAGS missing from the whitelist, or
- all of the LICENSE_FLAGS if there is no whitelist.
+ If it does, it returns the all LICENSE_FLAGS missing from the list
+ of acceptable license flags, or all of the LICENSE_FLAGS if there
+ is no list of acceptable flags.
- If everything is is properly whitelisted, it returns None.
+ If everything is is acceptable, it returns None.
"""
- def license_flag_matches(flag, whitelist, pn):
+ def license_flag_matches(flag, acceptlist, pn):
"""
- Return True if flag matches something in whitelist, None if not.
+ Return True if flag matches something in acceptlist, None if not.
- Before we test a flag against the whitelist, we append _${PN}
+ Before we test a flag against the acceptlist, we append _${PN}
to it. We then try to match that string against the
- whitelist. This covers the normal case, where we expect
+ acceptlist. This covers the normal case, where we expect
LICENSE_FLAGS to be a simple string like 'commercial', which
- the user typically matches exactly in the whitelist by
+ the user typically matches exactly in the acceptlist by
explicitly appending the package name e.g 'commercial_foo'.
If we fail the match however, we then split the flag across
'_' and append each fragment and test until we either match or
run out of fragments.
"""
flag_pn = ("%s_%s" % (flag, pn))
- for candidate in whitelist:
+ for candidate in acceptlist:
if flag_pn == candidate:
return True
@@ -376,27 +373,27 @@ def check_license_flags(d):
if flag_cur:
flag_cur += "_"
flag_cur += flagment
- for candidate in whitelist:
+ for candidate in acceptlist:
if flag_cur == candidate:
return True
return False
- def all_license_flags_match(license_flags, whitelist):
+ def all_license_flags_match(license_flags, acceptlist):
""" Return all unmatched flags, None if all flags match """
pn = d.getVar('PN')
- split_whitelist = whitelist.split()
+ split_acceptlist = acceptlist.split()
flags = []
for flag in license_flags.split():
- if not license_flag_matches(flag, split_whitelist, pn):
+ if not license_flag_matches(flag, split_acceptlist, pn):
flags.append(flag)
return flags if flags else None
license_flags = d.getVar('LICENSE_FLAGS')
if license_flags:
- whitelist = d.getVar('LICENSE_FLAGS_WHITELIST')
- if not whitelist:
+ acceptlist = d.getVar('LICENSE_FLAGS_ACCEPTED')
+ if not acceptlist:
return license_flags.split()
- unmatched_flags = all_license_flags_match(license_flags, whitelist)
+ unmatched_flags = all_license_flags_match(license_flags, acceptlist)
if unmatched_flags:
return unmatched_flags
return None
@@ -415,20 +412,22 @@ def check_license_format(d):
for pos, element in enumerate(elements):
if license_pattern.match(element):
if pos > 0 and license_pattern.match(elements[pos - 1]):
- bb.warn('%s: LICENSE value "%s" has an invalid format - license names ' \
+ oe.qa.handle_error('license-format',
+ '%s: LICENSE value "%s" has an invalid format - license names ' \
'must be separated by the following characters to indicate ' \
'the license selection: %s' %
- (pn, licenses, license_operator_chars))
+ (pn, licenses, license_operator_chars), d)
elif not license_operator.match(element):
- bb.warn('%s: LICENSE value "%s" has an invalid separator "%s" that is not ' \
+ oe.qa.handle_error('license-format',
+ '%s: LICENSE value "%s" has an invalid separator "%s" that is not ' \
'in the valid list of separators (%s)' %
- (pn, licenses, element, license_operator_chars))
+ (pn, licenses, element, license_operator_chars), d)
SSTATETASKS += "do_populate_lic"
do_populate_lic[sstate-inputdirs] = "${LICSSTATEDIR}"
do_populate_lic[sstate-outputdirs] = "${LICENSE_DIRECTORY}/"
-IMAGE_CLASSES_append = " license_image"
+IMAGE_CLASSES:append = " license_image"
python do_populate_lic_setscene () {
sstate_setscene(d)
diff --git a/meta/classes/license_image.bbclass b/meta/classes/license_image.bbclass
index a8c72da3cb..0a5ea0a2fb 100644
--- a/meta/classes/license_image.bbclass
+++ b/meta/classes/license_image.bbclass
@@ -1,3 +1,15 @@
+ROOTFS_LICENSE_DIR = "${IMAGE_ROOTFS}/usr/share/common-licenses"
+
+# This requires LICENSE_CREATE_PACKAGE=1 to work too
+COMPLEMENTARY_GLOB[lic-pkgs] = "*-lic"
+
+python() {
+ if not oe.data.typed_value('LICENSE_CREATE_PACKAGE', d):
+ features = set(oe.data.typed_value('IMAGE_FEATURES', d))
+ if 'lic-pkgs' in features:
+ bb.error("'lic-pkgs' in IMAGE_FEATURES but LICENSE_CREATE_PACKAGE not enabled to generate -lic packages")
+}
+
python write_package_manifest() {
# Get list of installed packages
license_image_dir = d.expand('${LICENSE_DIRECTORY}/${IMAGE_NAME}')
@@ -27,7 +39,7 @@ python license_create_manifest() {
pkg_dic[pkg_name] = oe.packagedata.read_pkgdatafile(pkg_info)
if not "LICENSE" in pkg_dic[pkg_name].keys():
- pkg_lic_name = "LICENSE_" + pkg_name
+ pkg_lic_name = "LICENSE:" + pkg_name
pkg_dic[pkg_name]["LICENSE"] = pkg_dic[pkg_name][pkg_lic_name]
rootfs_license_manifest = os.path.join(d.getVar('LICENSE_DIRECTORY'),
@@ -40,31 +52,25 @@ def write_license_files(d, license_manifest, pkg_dic, rootfs=True):
import stat
bad_licenses = (d.getVar("INCOMPATIBLE_LICENSE") or "").split()
- bad_licenses = [canonical_license(d, l) for l in bad_licenses]
bad_licenses = expand_wildcard_licenses(d, bad_licenses)
- whitelist = []
- for lic in bad_licenses:
- whitelist.extend((d.getVar("WHITELIST_" + lic) or "").split())
-
+ exceptions = (d.getVar("INCOMPATIBLE_LICENSE_EXCEPTIONS") or "").split()
with open(license_manifest, "w") as license_file:
for pkg in sorted(pkg_dic):
- if bad_licenses and pkg not in whitelist:
- try:
- licenses = incompatible_pkg_license(d, bad_licenses, pkg_dic[pkg]["LICENSE"])
- if licenses:
- bb.fatal("Package %s cannot be installed into the image because it has incompatible license(s): %s" %(pkg, ' '.join(licenses)))
- (pkg_dic[pkg]["LICENSE"], pkg_dic[pkg]["LICENSES"]) = \
- oe.license.manifest_licenses(pkg_dic[pkg]["LICENSE"],
- bad_licenses, canonical_license, d)
- except oe.license.LicenseError as exc:
- bb.fatal('%s: %s' % (d.getVar('P'), exc))
+ remaining_bad_licenses = oe.license.apply_pkg_license_exception(pkg, bad_licenses, exceptions)
+ incompatible_licenses = incompatible_pkg_license(d, remaining_bad_licenses, pkg_dic[pkg]["LICENSE"])
+ if incompatible_licenses:
+ bb.fatal("Package %s cannot be installed into the image because it has incompatible license(s): %s" %(pkg, ' '.join(incompatible_licenses)))
else:
- pkg_dic[pkg]["LICENSES"] = re.sub(r'[|&()*]', ' ', pkg_dic[pkg]["LICENSE"])
- pkg_dic[pkg]["LICENSES"] = re.sub(r' *', ' ', pkg_dic[pkg]["LICENSES"])
- pkg_dic[pkg]["LICENSES"] = pkg_dic[pkg]["LICENSES"].split()
- if pkg in whitelist:
- bb.warn("Including %s with an incompatible license %s into the image, because it has been whitelisted." %(pkg, pkg_dic[pkg]["LICENSE"]))
+ incompatible_licenses = incompatible_pkg_license(d, bad_licenses, pkg_dic[pkg]["LICENSE"])
+ if incompatible_licenses:
+ oe.qa.handle_error('license-incompatible', "Including %s with incompatible license(s) %s into the image, because it has been allowed by exception list." %(pkg, ' '.join(incompatible_licenses)), d)
+ try:
+ (pkg_dic[pkg]["LICENSE"], pkg_dic[pkg]["LICENSES"]) = \
+ oe.license.manifest_licenses(pkg_dic[pkg]["LICENSE"],
+ remaining_bad_licenses, canonical_license, d)
+ except oe.license.LicenseError as exc:
+ bb.fatal('%s: %s' % (d.getVar('P'), exc))
if not "IMAGE_MANIFEST" in pkg_dic[pkg]:
# Rootfs manifest
@@ -76,7 +82,7 @@ def write_license_files(d, license_manifest, pkg_dic, rootfs=True):
# If the package doesn't contain any file, that is, its size is 0, the license
# isn't relevant as far as the final image is concerned. So doing license check
# doesn't make much sense, skip it.
- if pkg_dic[pkg]["PKGSIZE_%s" % pkg] == "0":
+ if pkg_dic[pkg]["PKGSIZE:%s" % pkg] == "0":
continue
else:
# Image manifest
@@ -94,10 +100,10 @@ def write_license_files(d, license_manifest, pkg_dic, rootfs=True):
continue
if not os.path.exists(lic_file):
- bb.warn("The license listed %s was not in the "\
- "licenses collected for recipe %s"
- % (lic, pkg_dic[pkg]["PN"]))
-
+ oe.qa.handle_error('license-file-missing',
+ "The license listed %s was not in the "\
+ "licenses collected for recipe %s"
+ % (lic, pkg_dic[pkg]["PN"]), d)
# Two options here:
# - Just copy the manifest
# - Copy the manifest and the license directories
@@ -105,8 +111,7 @@ def write_license_files(d, license_manifest, pkg_dic, rootfs=True):
copy_lic_manifest = d.getVar('COPY_LIC_MANIFEST')
copy_lic_dirs = d.getVar('COPY_LIC_DIRS')
if rootfs and copy_lic_manifest == "1":
- rootfs_license_dir = os.path.join(d.getVar('IMAGE_ROOTFS'),
- 'usr', 'share', 'common-licenses')
+ rootfs_license_dir = d.getVar('ROOTFS_LICENSE_DIR')
bb.utils.mkdirhier(rootfs_license_dir)
rootfs_license_manifest = os.path.join(rootfs_license_dir,
os.path.split(license_manifest)[1])
@@ -125,7 +130,6 @@ def write_license_files(d, license_manifest, pkg_dic, rootfs=True):
licenses = os.listdir(pkg_license_dir)
for lic in licenses:
- rootfs_license = os.path.join(rootfs_license_dir, lic)
pkg_license = os.path.join(pkg_license_dir, lic)
pkg_rootfs_license = os.path.join(pkg_rootfs_license_dir, lic)
@@ -144,11 +148,14 @@ def write_license_files(d, license_manifest, pkg_dic, rootfs=True):
bad_licenses) == False:
continue
+ # Make sure we use only canonical name for the license file
+ generic_lic_file = "generic_%s" % generic_lic
+ rootfs_license = os.path.join(rootfs_license_dir, generic_lic_file)
if not os.path.exists(rootfs_license):
oe.path.copyhardlink(pkg_license, rootfs_license)
if not os.path.exists(pkg_rootfs_license):
- os.symlink(os.path.join('..', lic), pkg_rootfs_license)
+ os.symlink(os.path.join('..', generic_lic_file), pkg_rootfs_license)
else:
if (oe.license.license_ok(canonical_license(d,
lic), bad_licenses) == False or
@@ -200,6 +207,18 @@ def license_deployed_manifest(d):
image_license_manifest = os.path.join(lic_manifest_dir, 'image_license.manifest')
write_license_files(d, image_license_manifest, man_dic, rootfs=False)
+ link_name = d.getVar('IMAGE_LINK_NAME')
+ if link_name:
+ lic_manifest_symlink_dir = os.path.join(d.getVar('LICENSE_DIRECTORY'),
+ link_name)
+ # remove old symlink
+ if os.path.islink(lic_manifest_symlink_dir):
+ os.unlink(lic_manifest_symlink_dir)
+
+ # create the image dir symlink
+ if lic_manifest_dir != lic_manifest_symlink_dir:
+ os.symlink(lic_manifest_dir, lic_manifest_symlink_dir)
+
def get_deployed_dependencies(d):
"""
Get all the deployed dependencies of an image
@@ -208,9 +227,10 @@ def get_deployed_dependencies(d):
deploy = {}
# Get all the dependencies for the current task (rootfs).
taskdata = d.getVar("BB_TASKDEPDATA", False)
+ pn = d.getVar("PN", True)
depends = list(set([dep[0] for dep
in list(taskdata.values())
- if not dep[0].endswith("-native")]))
+ if not dep[0].endswith("-native") and not dep[0] == pn]))
# To verify what was deployed it checks the rootfs dependencies against
# the SSTATE_MANIFESTS for "deploy" task.
@@ -244,13 +264,24 @@ def get_deployed_files(man_file):
dep_files.append(os.path.basename(f))
return dep_files
-ROOTFS_POSTPROCESS_COMMAND_prepend = "write_package_manifest; license_create_manifest; "
+ROOTFS_POSTPROCESS_COMMAND:prepend = "write_package_manifest; license_create_manifest; "
do_rootfs[recrdeptask] += "do_populate_lic"
python do_populate_lic_deploy() {
license_deployed_manifest(d)
+ oe.qa.exit_if_errors(d)
}
addtask populate_lic_deploy before do_build after do_image_complete
do_populate_lic_deploy[recrdeptask] += "do_populate_lic do_deploy"
+python license_qa_dead_symlink() {
+ import os
+
+ for root, dirs, files in os.walk(d.getVar('ROOTFS_LICENSE_DIR')):
+ for file in files:
+ full_path = root + "/" + file
+ if os.path.islink(full_path) and not os.path.exists(full_path):
+ bb.error("broken symlink: " + full_path)
+}
+IMAGE_QA_COMMANDS += "license_qa_dead_symlink"
diff --git a/meta/classes/linux-dummy.bbclass b/meta/classes/linux-dummy.bbclass
new file mode 100644
index 0000000000..9a06a509dd
--- /dev/null
+++ b/meta/classes/linux-dummy.bbclass
@@ -0,0 +1,26 @@
+
+python __anonymous () {
+ if d.getVar('PREFERRED_PROVIDER_virtual/kernel') == 'linux-dummy':
+ # copy part codes from kernel.bbclass
+ kname = d.getVar('KERNEL_PACKAGE_NAME') or "kernel"
+
+ # set an empty package of kernel-devicetree
+ d.appendVar('PACKAGES', ' %s-devicetree' % kname)
+ d.setVar('ALLOW_EMPTY:%s-devicetree' % kname, '1')
+
+ # Merge KERNEL_IMAGETYPE and KERNEL_ALT_IMAGETYPE into KERNEL_IMAGETYPES
+ type = d.getVar('KERNEL_IMAGETYPE') or ""
+ alttype = d.getVar('KERNEL_ALT_IMAGETYPE') or ""
+ types = d.getVar('KERNEL_IMAGETYPES') or ""
+ if type not in types.split():
+ types = (type + ' ' + types).strip()
+ if alttype not in types.split():
+ types = (alttype + ' ' + types).strip()
+
+ # set empty packages of kernel-image-*
+ for type in types.split():
+ typelower = type.lower()
+ d.appendVar('PACKAGES', ' %s-image-%s' % (kname, typelower))
+ d.setVar('ALLOW_EMPTY:%s-image-%s' % (kname, typelower), '1')
+}
+
diff --git a/meta/classes/linuxloader.bbclass b/meta/classes/linuxloader.bbclass
index ec0e0556dd..4447c8847c 100644
--- a/meta/classes/linuxloader.bbclass
+++ b/meta/classes/linuxloader.bbclass
@@ -1,16 +1,16 @@
def get_musl_loader_arch(d):
import re
- ldso_arch = None
+ ldso_arch = "NotSupported"
targetarch = d.getVar("TARGET_ARCH")
if targetarch.startswith("microblaze"):
- ldso_arch = "microblaze${@bb.utils.contains('TUNE_FEATURES', 'bigendian', '', 'el' ,d)}"
+ ldso_arch = "microblaze${@bb.utils.contains('TUNE_FEATURES', 'bigendian', '', 'el', d)}"
elif targetarch.startswith("mips"):
ldso_arch = "mips${ABIEXTENSION}${MIPSPKGSFX_BYTE}${MIPSPKGSFX_R6}${MIPSPKGSFX_ENDIAN}${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}"
elif targetarch == "powerpc":
ldso_arch = "powerpc${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}"
- elif targetarch == "powerpc64":
- ldso_arch = "powerpc64"
+ elif targetarch.startswith("powerpc64"):
+ ldso_arch = "powerpc64${@bb.utils.contains('TUNE_FEATURES', 'bigendian', '', 'le', d)}"
elif targetarch == "x86_64":
ldso_arch = "x86_64"
elif re.search("i.86", targetarch):
@@ -21,6 +21,8 @@ def get_musl_loader_arch(d):
ldso_arch = "aarch64${ARMPKGSFX_ENDIAN_64}"
elif targetarch.startswith("riscv64"):
ldso_arch = "riscv64${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}"
+ elif targetarch.startswith("riscv32"):
+ ldso_arch = "riscv32${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}"
return ldso_arch
def get_musl_loader(d):
@@ -30,7 +32,7 @@ def get_musl_loader(d):
def get_glibc_loader(d):
import re
- dynamic_loader = None
+ dynamic_loader = "NotSupported"
targetarch = d.getVar("TARGET_ARCH")
if targetarch in ["powerpc", "microblaze"]:
dynamic_loader = "${base_libdir}/ld.so.1"
@@ -38,6 +40,8 @@ def get_glibc_loader(d):
dynamic_loader = "${base_libdir}/ld-linux-mipsn8.so.1"
elif targetarch.startswith("mips"):
dynamic_loader = "${base_libdir}/ld.so.1"
+ elif targetarch == "powerpc64le":
+ dynamic_loader = "${base_libdir}/ld64.so.2"
elif targetarch == "powerpc64":
dynamic_loader = "${base_libdir}/ld64.so.1"
elif targetarch == "x86_64":
@@ -50,13 +54,15 @@ def get_glibc_loader(d):
dynamic_loader = "${base_libdir}/ld-linux-aarch64${ARMPKGSFX_ENDIAN_64}.so.1"
elif targetarch.startswith("riscv64"):
dynamic_loader = "${base_libdir}/ld-linux-riscv64-lp64${@['d', ''][d.getVar('TARGET_FPU') == 'soft']}.so.1"
+ elif targetarch.startswith("riscv32"):
+ dynamic_loader = "${base_libdir}/ld-linux-riscv32-ilp32${@['d', ''][d.getVar('TARGET_FPU') == 'soft']}.so.1"
return dynamic_loader
def get_linuxloader(d):
overrides = d.getVar("OVERRIDES").split(":")
if "libc-baremetal" in overrides:
- return None
+ return "NotSupported"
if "libc-musl" in overrides:
dynamic_loader = get_musl_loader(d)
diff --git a/meta/classes/manpages.bbclass b/meta/classes/manpages.bbclass
index 1e66780646..5e09c77fe6 100644
--- a/meta/classes/manpages.bbclass
+++ b/meta/classes/manpages.bbclass
@@ -2,7 +2,7 @@
# depending on whether 'api-documentation' is in DISTRO_FEATURES. Such building
# tends to pull in the entire XML stack and other tools, so it's not enabled
# by default.
-PACKAGECONFIG_append_class-target = " ${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'manpages', '', d)}"
+PACKAGECONFIG:append:class-target = " ${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'manpages', '', d)}"
inherit qemu
@@ -10,15 +10,16 @@ inherit qemu
MAN_PKG ?= "${PN}-doc"
# only add man-db to RDEPENDS when manual files are built and installed
-RDEPENDS_${MAN_PKG} += "${@bb.utils.contains('PACKAGECONFIG', 'manpages', 'man-db', '', d)}"
+RDEPENDS:${MAN_PKG} += "${@bb.utils.contains('PACKAGECONFIG', 'manpages', 'man-db', '', d)}"
-pkg_postinst_append_${MAN_PKG} () {
+pkg_postinst:${MAN_PKG}:append () {
# only update manual page index caches when manual files are built and installed
if ${@bb.utils.contains('PACKAGECONFIG', 'manpages', 'true', 'false', d)}; then
if test -n "$D"; then
- if ${@bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'true','false', d)}; then
+ if ${@bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'true', 'false', d)}; then
sed "s:\(\s\)/:\1$D/:g" $D${sysconfdir}/man_db.conf | ${@qemu_run_binary(d, '$D', '${bindir}/mandb')} -C - -u -q $D${mandir}
chown -R root:root $D${mandir}
+
mkdir -p $D${localstatedir}/cache/man
cd $D${mandir}
find . -name index.db | while read index; do
@@ -36,7 +37,7 @@ pkg_postinst_append_${MAN_PKG} () {
fi
}
-pkg_postrm_append_${MAN_PKG} () {
+pkg_postrm:${MAN_PKG}:append () {
# only update manual page index caches when manual files are built and installed
if ${@bb.utils.contains('PACKAGECONFIG', 'manpages', 'true', 'false', d)}; then
mandb -q
diff --git a/meta/classes/meson-routines.bbclass b/meta/classes/meson-routines.bbclass
new file mode 100644
index 0000000000..be3aeedeba
--- /dev/null
+++ b/meta/classes/meson-routines.bbclass
@@ -0,0 +1,51 @@
+inherit siteinfo
+
+def meson_array(var, d):
+ items = d.getVar(var).split()
+ return repr(items[0] if len(items) == 1 else items)
+
+# Map our ARCH values to what Meson expects:
+# http://mesonbuild.com/Reference-tables.html#cpu-families
+def meson_cpu_family(var, d):
+ import re
+ arch = d.getVar(var)
+ if arch == 'powerpc':
+ return 'ppc'
+ elif arch == 'powerpc64' or arch == 'powerpc64le':
+ return 'ppc64'
+ elif arch == 'armeb':
+ return 'arm'
+ elif arch == 'aarch64_be':
+ return 'aarch64'
+ elif arch == 'mipsel':
+ return 'mips'
+ elif arch == 'mips64el':
+ return 'mips64'
+ elif re.match(r"i[3-6]86", arch):
+ return "x86"
+ elif arch == "microblazeel":
+ return "microblaze"
+ else:
+ return arch
+
+# Map our OS values to what Meson expects:
+# https://mesonbuild.com/Reference-tables.html#operating-system-names
+def meson_operating_system(var, d):
+ os = d.getVar(var)
+ if "mingw" in os:
+ return "windows"
+ # avoid e.g 'linux-gnueabi'
+ elif "linux" in os:
+ return "linux"
+ else:
+ return os
+
+def meson_endian(prefix, d):
+ arch, os = d.getVar(prefix + "_ARCH"), d.getVar(prefix + "_OS")
+ sitedata = siteinfo_data_for_machine(arch, os, d)
+ if "endian-little" in sitedata:
+ return "little"
+ elif "endian-big" in sitedata:
+ return "big"
+ else:
+ bb.fatal("Cannot determine endianism for %s-%s" % (arch, os))
diff --git a/meta/classes/meson.bbclass b/meta/classes/meson.bbclass
index ff52d20e56..0bfe945811 100644
--- a/meta/classes/meson.bbclass
+++ b/meta/classes/meson.bbclass
@@ -1,6 +1,11 @@
-inherit siteinfo python3native
+inherit python3native meson-routines qemu
-DEPENDS_append = " meson-native ninja-native"
+DEPENDS:append = " meson-native ninja-native"
+
+EXEWRAPPER_ENABLED:class-native = "False"
+EXEWRAPPER_ENABLED:class-nativesdk = "False"
+EXEWRAPPER_ENABLED ?= "${@bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d)}"
+DEPENDS:append = "${@' qemu-native' if d.getVar('EXEWRAPPER_ENABLED') == 'True' else ''}"
# As Meson enforces out-of-tree builds we can just use cleandirs
B = "${WORKDIR}/build"
@@ -12,7 +17,8 @@ MESON_SOURCEPATH = "${S}"
def noprefix(var, d):
return d.getVar(var).replace(d.getVar('prefix') + '/', '', 1)
-MESON_BUILDTYPE ?= "plain"
+MESON_BUILDTYPE ?= "${@oe.utils.vartrue('DEBUG_BUILD', 'debug', 'plain', d)}"
+MESON_BUILDTYPE[vardeps] += "DEBUG_BUILD"
MESONOPTS = " --prefix ${prefix} \
--buildtype ${MESON_BUILDTYPE} \
--bindir ${@noprefix('bindir', d)} \
@@ -26,66 +32,27 @@ MESONOPTS = " --prefix ${prefix} \
--sysconfdir ${sysconfdir} \
--localstatedir ${localstatedir} \
--sharedstatedir ${sharedstatedir} \
- --wrap-mode nodownload"
+ --wrap-mode nodownload \
+ --native-file ${WORKDIR}/meson.native"
-EXTRA_OEMESON_append = " ${PACKAGECONFIG_CONFARGS}"
+EXTRA_OEMESON:append = " ${PACKAGECONFIG_CONFARGS}"
MESON_CROSS_FILE = ""
-MESON_CROSS_FILE_class-target = "--cross-file ${WORKDIR}/meson.cross"
-MESON_CROSS_FILE_class-nativesdk = "--cross-file ${WORKDIR}/meson.cross"
+MESON_CROSS_FILE:class-target = "--cross-file ${WORKDIR}/meson.cross"
+MESON_CROSS_FILE:class-nativesdk = "--cross-file ${WORKDIR}/meson.cross"
-def meson_array(var, d):
- items = d.getVar(var).split()
- return repr(items[0] if len(items) == 1 else items)
+# Needed to set up qemu wrapper below
+export STAGING_DIR_HOST
-# Map our ARCH values to what Meson expects:
-# http://mesonbuild.com/Reference-tables.html#cpu-families
-def meson_cpu_family(var, d):
- import re
- arch = d.getVar(var)
- if arch == 'powerpc':
- return 'ppc'
- elif arch == 'powerpc64' or arch == 'powerpc64le':
- return 'ppc64'
- elif arch == 'armeb':
- return 'arm'
- elif arch == 'aarch64_be':
- return 'aarch64'
- elif arch == 'mipsel':
- return 'mips'
- elif arch == 'mips64el':
- return 'mips64'
- elif re.match(r"i[3-6]86", arch):
- return "x86"
- elif arch == "microblazeel":
- return "microblaze"
- else:
- return arch
-
-# Map our OS values to what Meson expects:
-# https://mesonbuild.com/Reference-tables.html#operating-system-names
-def meson_operating_system(var, d):
- os = d.getVar(var)
- if "mingw" in os:
- return "windows"
- # avoid e.g 'linux-gnueabi'
- elif "linux" in os:
- return "linux"
- else:
- return os
-
-def meson_endian(prefix, d):
- arch, os = d.getVar(prefix + "_ARCH"), d.getVar(prefix + "_OS")
- sitedata = siteinfo_data_for_machine(arch, os, d)
- if "endian-little" in sitedata:
- return "little"
- elif "endian-big" in sitedata:
- return "big"
- else:
- bb.fatal("Cannot determine endianism for %s-%s" % (arch, os))
+def rust_tool(d, target_var):
+ rustc = d.getVar('RUSTC')
+ if not rustc:
+ return ""
+ cmd = [rustc, "--target", d.getVar(target_var)] + d.getVar("RUSTFLAGS").split()
+ return "rust = %s" % repr(cmd)
addtask write_config before do_configure
-do_write_config[vardeps] += "CC CXX LD AR NM STRIP READELF CFLAGS CXXFLAGS LDFLAGS"
+do_write_config[vardeps] += "CC CXX LD AR NM STRIP READELF CFLAGS CXXFLAGS LDFLAGS RUSTC RUSTFLAGS"
do_write_config() {
# This needs to be Py to split the args into single-element lists
cat >${WORKDIR}/meson.cross <<EOF
@@ -96,16 +63,23 @@ ar = ${@meson_array('AR', d)}
nm = ${@meson_array('NM', d)}
strip = ${@meson_array('STRIP', d)}
readelf = ${@meson_array('READELF', d)}
+objcopy = ${@meson_array('OBJCOPY', d)}
pkgconfig = 'pkg-config'
llvm-config = 'llvm-config${LLVMVERSION}'
+cups-config = 'cups-config'
+g-ir-scanner = '${STAGING_BINDIR}/g-ir-scanner-wrapper'
+g-ir-compiler = '${STAGING_BINDIR}/g-ir-compiler-wrapper'
+${@rust_tool(d, "HOST_SYS")}
+${@"exe_wrapper = '${WORKDIR}/meson-qemuwrapper'" if d.getVar('EXEWRAPPER_ENABLED') == 'True' else ""}
-[properties]
-needs_exe_wrapper = true
+[built-in options]
c_args = ${@meson_array('CFLAGS', d)}
c_link_args = ${@meson_array('LDFLAGS', d)}
cpp_args = ${@meson_array('CXXFLAGS', d)}
cpp_link_args = ${@meson_array('LDFLAGS', d)}
-gtkdoc_exe_wrapper = '${B}/gtkdoc-qemuwrapper'
+
+[properties]
+needs_exe_wrapper = true
[host_machine]
system = '${@meson_operating_system('HOST_OS', d)}'
@@ -119,8 +93,46 @@ cpu_family = '${@meson_cpu_family('TARGET_ARCH', d)}'
cpu = '${TARGET_ARCH}'
endian = '${@meson_endian('TARGET', d)}'
EOF
+
+ cat >${WORKDIR}/meson.native <<EOF
+[binaries]
+c = ${@meson_array('BUILD_CC', d)}
+cpp = ${@meson_array('BUILD_CXX', d)}
+ar = ${@meson_array('BUILD_AR', d)}
+nm = ${@meson_array('BUILD_NM', d)}
+strip = ${@meson_array('BUILD_STRIP', d)}
+readelf = ${@meson_array('BUILD_READELF', d)}
+objcopy = ${@meson_array('BUILD_OBJCOPY', d)}
+pkgconfig = 'pkg-config-native'
+${@rust_tool(d, "BUILD_SYS")}
+
+[built-in options]
+c_args = ${@meson_array('BUILD_CFLAGS', d)}
+c_link_args = ${@meson_array('BUILD_LDFLAGS', d)}
+cpp_args = ${@meson_array('BUILD_CXXFLAGS', d)}
+cpp_link_args = ${@meson_array('BUILD_LDFLAGS', d)}
+EOF
+}
+
+do_write_config:append:class-target() {
+ # Write out a qemu wrapper that will be used as exe_wrapper so that meson
+ # can run target helper binaries through that.
+ qemu_binary="${@qemu_wrapper_cmdline(d, '$STAGING_DIR_HOST', ['$STAGING_DIR_HOST/${libdir}','$STAGING_DIR_HOST/${base_libdir}'])}"
+ cat > ${WORKDIR}/meson-qemuwrapper << EOF
+#!/bin/sh
+# Use a modules directory which doesn't exist so we don't load random things
+# which may then get deleted (or their dependencies) and potentially segfault
+export GIO_MODULE_DIR=${STAGING_LIBDIR}/gio/modules-dummy
+
+# meson sets this wrongly (only to libs in build-dir), qemu_wrapper_cmdline() and GIR_EXTRA_LIBS_PATH take care of it properly
+unset LD_LIBRARY_PATH
+
+$qemu_binary "\$@"
+EOF
+ chmod +x ${WORKDIR}/meson-qemuwrapper
}
+# Tell externalsrc that changes to this file require a reconfigure
CONFIGURE_FILES = "meson.build"
meson_do_configure() {
@@ -128,6 +140,16 @@ meson_do_configure() {
# https://github.com/mesonbuild/meson/commit/ef9aeb188ea2bc7353e59916c18901cde90fa2b3
unset LD
+ # sstate.bbclass no longer removes empty directories to avoid a race (see
+ # commit 4f94d929 "sstate/staging: Handle directory creation race issue").
+ # Unfortunately Python apparently treats an empty egg-info directory as if
+ # the version it previously contained still exists and fails if a newer
+ # version is required, which Meson does. To avoid this, make sure there are
+ # no empty egg-info directories from previous versions left behind. Ignore
+ # all errors from rmdir since the egg-info may be a file rather than a
+ # directory.
+ rmdir ${STAGING_LIBDIR_NATIVE}/${PYTHON_DIR}/site-packages/*.egg-info 2>/dev/null || :
+
# Work around "Meson fails if /tmp is mounted with noexec #2972"
mkdir -p "${B}/meson-private/tmp"
export TMPDIR="${B}/meson-private/tmp"
@@ -137,33 +159,6 @@ meson_do_configure() {
fi
}
-override_native_tools() {
- # Set these so that meson uses the native tools for its build sanity tests,
- # which require executables to be runnable. The cross file will still
- # override these for the target build.
- export CC="${BUILD_CC}"
- export CXX="${BUILD_CXX}"
- export LD="${BUILD_LD}"
- export AR="${BUILD_AR}"
- export STRIP="${BUILD_STRIP}"
- # These contain *target* flags but will be used as *native* flags. The
- # correct native flags will be passed via -Dc_args and so on, unset them so
- # they don't interfere with tools invoked by Meson (such as g-ir-scanner)
- unset CPPFLAGS CFLAGS CXXFLAGS LDFLAGS
-}
-
-meson_do_configure_prepend_class-target() {
- override_native_tools
-}
-
-meson_do_configure_prepend_class-nativesdk() {
- override_native_tools
-}
-
-meson_do_configure_prepend_class-native() {
- export PKG_CONFIG="pkg-config-native"
-}
-
python meson_do_qa_configure() {
import re
warn_re = re.compile(r"^WARNING: Cross property (.+) is using default value (.+)$", re.MULTILINE)
diff --git a/meta/classes/meta.bbclass b/meta/classes/meta.bbclass
deleted file mode 100644
index 5e6890238b..0000000000
--- a/meta/classes/meta.bbclass
+++ /dev/null
@@ -1,4 +0,0 @@
-
-PACKAGES = ""
-
-do_build[recrdeptask] = "do_build"
diff --git a/meta/classes/metadata_scm.bbclass b/meta/classes/metadata_scm.bbclass
index 58bb4c555a..47cb969b8d 100644
--- a/meta/classes/metadata_scm.bbclass
+++ b/meta/classes/metadata_scm.bbclass
@@ -1,6 +1,3 @@
-METADATA_BRANCH ?= "${@base_detect_branch(d)}"
-METADATA_REVISION ?= "${@base_detect_revision(d)}"
-
def base_detect_revision(d):
path = base_get_scmbasepath(d)
return base_get_metadata_git_revision(path, d)
@@ -40,3 +37,8 @@ def base_get_metadata_git_revision(path, d):
except bb.process.ExecutionError:
rev = '<unknown>'
return rev.strip()
+
+METADATA_BRANCH := "${@base_detect_branch(d)}"
+METADATA_BRANCH[vardepvalue] = "${METADATA_BRANCH}"
+METADATA_REVISION := "${@base_detect_revision(d)}"
+METADATA_REVISION[vardepvalue] = "${METADATA_REVISION}"
diff --git a/meta/classes/mime-xdg.bbclass b/meta/classes/mime-xdg.bbclass
index 642a5b7595..271f48dd72 100644
--- a/meta/classes/mime-xdg.bbclass
+++ b/meta/classes/mime-xdg.bbclass
@@ -34,7 +34,7 @@ else
fi
}
-python populate_packages_append () {
+python populate_packages:append () {
packages = d.getVar('PACKAGES').split()
pkgdest = d.getVar('PKGDEST')
desktop_base = d.getVar('DESKTOPDIR')
@@ -59,16 +59,16 @@ python populate_packages_append () {
break
if desktops_with_mime_found:
bb.note("adding mime-xdg postinst and postrm scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
postinst += d.getVar('mime_xdg_postinst')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
- postrm = d.getVar('pkg_postrm_%s' % pkg)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
+ postrm = d.getVar('pkg_postrm:%s' % pkg)
if not postrm:
postrm = '#!/bin/sh\n'
postrm += d.getVar('mime_xdg_postrm')
- d.setVar('pkg_postrm_%s' % pkg, postrm)
+ d.setVar('pkg_postrm:%s' % pkg, postrm)
bb.note("adding desktop-file-utils dependency to %s" % pkg)
- d.appendVar('RDEPENDS_' + pkg, " " + d.getVar('MLPREFIX')+"desktop-file-utils")
+ d.appendVar('RDEPENDS:' + pkg, " " + d.getVar('MLPREFIX')+"desktop-file-utils")
}
diff --git a/meta/classes/mime.bbclass b/meta/classes/mime.bbclass
index bb99bc35cb..8d176a884e 100644
--- a/meta/classes/mime.bbclass
+++ b/meta/classes/mime.bbclass
@@ -39,7 +39,7 @@ fi
fi
}
-python populate_packages_append () {
+python populate_packages:append () {
packages = d.getVar('PACKAGES').split()
pkgdest = d.getVar('PKGDEST')
mimedir = d.getVar('MIMEDIR')
@@ -54,17 +54,17 @@ python populate_packages_append () {
break
if mimes_types_found:
bb.note("adding mime postinst and postrm scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
postinst += d.getVar('mime_postinst')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
- postrm = d.getVar('pkg_postrm_%s' % pkg)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
+ postrm = d.getVar('pkg_postrm:%s' % pkg)
if not postrm:
postrm = '#!/bin/sh\n'
postrm += d.getVar('mime_postrm')
- d.setVar('pkg_postrm_%s' % pkg, postrm)
+ d.setVar('pkg_postrm:%s' % pkg, postrm)
if pkg != 'shared-mime-info-data':
bb.note("adding shared-mime-info-data dependency to %s" % pkg)
- d.appendVar('RDEPENDS_' + pkg, " " + d.getVar('MLPREFIX')+"shared-mime-info-data")
+ d.appendVar('RDEPENDS:' + pkg, " " + d.getVar('MLPREFIX')+"shared-mime-info-data")
}
diff --git a/meta/classes/mirrors.bbclass b/meta/classes/mirrors.bbclass
index 87bba41472..8e7b35d900 100644
--- a/meta/classes/mirrors.bbclass
+++ b/meta/classes/mirrors.bbclass
@@ -1,76 +1,76 @@
MIRRORS += "\
-${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian/20180310T215105Z/pool \n \
-${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20120328T092752Z/debian/pool \n \
-${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20110127T084257Z/debian/pool \n \
-${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20090802T004153Z/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.de.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.au.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.cl.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.hr.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.fi.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.hk.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.hu.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.ie.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.it.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.jp.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.no.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.pl.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.ro.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.si.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.es.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.se.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.tr.debian.org/debian/pool \n \
-${GNU_MIRROR} https://mirrors.kernel.org/gnu \n \
-${KERNELORG_MIRROR} http://www.kernel.org/pub \n \
-${GNUPG_MIRROR} ftp://ftp.gnupg.org/gcrypt \n \
-${GNUPG_MIRROR} ftp://ftp.franken.de/pub/crypt/mirror/ftp.gnupg.org/gcrypt \n \
-${GNUPG_MIRROR} ftp://mirrors.dotsrc.org/gcrypt \n \
-ftp://dante.ctan.org/tex-archive ftp://ftp.fu-berlin.de/tex/CTAN \n \
-ftp://dante.ctan.org/tex-archive http://sunsite.sut.ac.jp/pub/archives/ctan/ \n \
-ftp://dante.ctan.org/tex-archive http://ctan.unsw.edu.au/ \n \
-ftp://ftp.gnutls.org/gcrypt/gnutls ${GNUPG_MIRROR}/gnutls \n \
-http://ftp.info-zip.org/pub/infozip/src/ http://mirror.switch.ch/ftp/mirror/infozip/src/ \n \
-http://ftp.info-zip.org/pub/infozip/src/ ftp://sunsite.icm.edu.pl/pub/unix/archiving/info-zip/src/ \n \
-http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/ http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/OLD/ \n \
-${APACHE_MIRROR} http://www.us.apache.org/dist \n \
-${APACHE_MIRROR} http://archive.apache.org/dist \n \
-http://downloads.sourceforge.net/watchdog/ http://fossies.org/linux/misc/ \n \
-${SAVANNAH_GNU_MIRROR} http://download-mirror.savannah.gnu.org/releases \n \
-${SAVANNAH_NONGNU_MIRROR} http://download-mirror.savannah.nongnu.org/releases \n \
-ftp://sourceware.org/pub http://mirrors.kernel.org/sourceware \n \
-ftp://sourceware.org/pub http://gd.tuwien.ac.at/gnu/sourceware \n \
-ftp://sourceware.org/pub http://ftp.gwdg.de/pub/linux/sources.redhat.com/sourceware \n \
-cvs://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-svn://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-git://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-hg://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-bzr://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-p4://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-osc://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-https?$://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-ftp://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-npm://.*/?.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-cvs://.*/.* http://sources.openembedded.org/ \n \
-svn://.*/.* http://sources.openembedded.org/ \n \
-git://.*/.* http://sources.openembedded.org/ \n \
-hg://.*/.* http://sources.openembedded.org/ \n \
-bzr://.*/.* http://sources.openembedded.org/ \n \
-p4://.*/.* http://sources.openembedded.org/ \n \
-osc://.*/.* http://sources.openembedded.org/ \n \
-https?$://.*/.* http://sources.openembedded.org/ \n \
-ftp://.*/.* http://sources.openembedded.org/ \n \
-npm://.*/?.* http://sources.openembedded.org/ \n \
-${CPAN_MIRROR} http://cpan.metacpan.org/ \n \
-${CPAN_MIRROR} http://search.cpan.org/CPAN/ \n \
+${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian/20180310T215105Z/pool \
+${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20120328T092752Z/debian/pool \
+${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20110127T084257Z/debian/pool \
+${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20090802T004153Z/debian/pool \
+${DEBIAN_MIRROR} http://ftp.de.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.au.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.cl.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.hr.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.fi.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.hk.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.hu.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.ie.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.it.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.jp.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.no.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.pl.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.ro.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.si.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.es.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.se.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.tr.debian.org/debian/pool \
+${GNU_MIRROR} https://mirrors.kernel.org/gnu \
+${KERNELORG_MIRROR} http://www.kernel.org/pub \
+${GNUPG_MIRROR} ftp://ftp.gnupg.org/gcrypt \
+${GNUPG_MIRROR} ftp://ftp.franken.de/pub/crypt/mirror/ftp.gnupg.org/gcrypt \
+${GNUPG_MIRROR} ftp://mirrors.dotsrc.org/gcrypt \
+ftp://dante.ctan.org/tex-archive ftp://ftp.fu-berlin.de/tex/CTAN \
+ftp://dante.ctan.org/tex-archive http://sunsite.sut.ac.jp/pub/archives/ctan/ \
+ftp://dante.ctan.org/tex-archive http://ctan.unsw.edu.au/ \
+ftp://ftp.gnutls.org/gcrypt/gnutls ${GNUPG_MIRROR}/gnutls \
+http://ftp.info-zip.org/pub/infozip/src/ ftp://sunsite.icm.edu.pl/pub/unix/archiving/info-zip/src/ \
+http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/ http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/OLD/ \
+${APACHE_MIRROR} http://www.us.apache.org/dist \
+${APACHE_MIRROR} http://archive.apache.org/dist \
+http://downloads.sourceforge.net/watchdog/ http://fossies.org/linux/misc/ \
+${SAVANNAH_GNU_MIRROR} http://download-mirror.savannah.gnu.org/releases \
+${SAVANNAH_NONGNU_MIRROR} http://download-mirror.savannah.nongnu.org/releases \
+ftp://sourceware.org/pub http://mirrors.kernel.org/sourceware \
+ftp://sourceware.org/pub http://gd.tuwien.ac.at/gnu/sourceware \
+ftp://sourceware.org/pub http://ftp.gwdg.de/pub/linux/sources.redhat.com/sourceware \
+cvs://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
+svn://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
+git://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
+hg://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
+bzr://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
+p4://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
+osc://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
+https?://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
+ftp://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
+npm://.*/?.* http://downloads.yoctoproject.org/mirror/sources/ \
+cvs://.*/.* http://sources.openembedded.org/ \
+svn://.*/.* http://sources.openembedded.org/ \
+git://.*/.* http://sources.openembedded.org/ \
+hg://.*/.* http://sources.openembedded.org/ \
+bzr://.*/.* http://sources.openembedded.org/ \
+p4://.*/.* http://sources.openembedded.org/ \
+osc://.*/.* http://sources.openembedded.org/ \
+https?://.*/.* http://sources.openembedded.org/ \
+ftp://.*/.* http://sources.openembedded.org/ \
+npm://.*/?.* http://sources.openembedded.org/ \
+${CPAN_MIRROR} http://cpan.metacpan.org/ \
+${CPAN_MIRROR} http://search.cpan.org/CPAN/ \
+https?://downloads.yoctoproject.org/releases/uninative/ https://mirrors.kernel.org/yocto/uninative/ \
+https?://downloads.yoctoproject.org/mirror/sources/ https://mirrors.kernel.org/yocto-sources/ \
"
# Use MIRRORS to provide git repo fallbacks using the https protocol, for cases
# where git native protocol fetches may fail due to local firewall rules, etc.
MIRRORS += "\
-git://salsa.debian.org/.* git://salsa.debian.org/PATH;protocol=https \n \
-git://git.gnome.org/.* git://gitlab.gnome.org/GNOME/PATH;protocol=https \n \
-git://git.savannah.gnu.org/.* git://git.savannah.gnu.org/git/PATH;protocol=https \n \
-git://git.yoctoproject.org/.* git://git.yoctoproject.org/git/PATH;protocol=https \n \
-git://.*/.* git://HOST/PATH;protocol=https \n \
+git://salsa.debian.org/.* git://salsa.debian.org/PATH;protocol=https \
+git://git.gnome.org/.* git://gitlab.gnome.org/GNOME/PATH;protocol=https \
+git://.*/.* git://HOST/PATH;protocol=https \
+git://.*/.* git://HOST/git/PATH;protocol=https \
"
diff --git a/meta/classes/module.bbclass b/meta/classes/module.bbclass
index c0dfa35061..a09ec3ed1e 100644
--- a/meta/classes/module.bbclass
+++ b/meta/classes/module.bbclass
@@ -14,7 +14,7 @@ python __anonymous () {
d.setVar('KBUILD_EXTRA_SYMBOLS', " ".join(extra_symbols))
}
-python do_devshell_prepend () {
+python do_devshell:prepend () {
os.environ['CFLAGS'] = ''
os.environ['CPPFLAGS'] = ''
os.environ['CXXFLAGS'] = ''
@@ -70,5 +70,5 @@ EXPORT_FUNCTIONS do_compile do_install
# add all splitted modules to PN RDEPENDS, PN can be empty now
KERNEL_MODULES_META_PACKAGE = "${PN}"
-FILES_${PN} = ""
-ALLOW_EMPTY_${PN} = "1"
+FILES:${PN} = ""
+ALLOW_EMPTY:${PN} = "1"
diff --git a/meta/classes/multilib.bbclass b/meta/classes/multilib.bbclass
index 9f726e4537..5859ca8d21 100644
--- a/meta/classes/multilib.bbclass
+++ b/meta/classes/multilib.bbclass
@@ -35,7 +35,7 @@ python multilib_virtclass_handler () {
e.data.setVar('SDKTARGETSYSROOT', e.data.getVar('SDKTARGETSYSROOT'))
override = ":virtclass-multilib-" + variant
e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
- target_vendor = e.data.getVar("TARGET_VENDOR_" + "virtclass-multilib-" + variant, False)
+ target_vendor = e.data.getVar("TARGET_VENDOR:" + "virtclass-multilib-" + variant, False)
if target_vendor:
e.data.setVar("TARGET_VENDOR", target_vendor)
return
@@ -65,24 +65,25 @@ python multilib_virtclass_handler () {
override = ":virtclass-multilib-" + variant
- blacklist = e.data.getVarFlag('PNBLACKLIST', e.data.getVar('PN'))
- if blacklist:
+ skip_msg = e.data.getVarFlag('SKIP_RECIPE', e.data.getVar('PN'))
+ if skip_msg:
pn_new = variant + "-" + e.data.getVar('PN')
- if not e.data.getVarFlag('PNBLACKLIST', pn_new):
- e.data.setVarFlag('PNBLACKLIST', pn_new, blacklist)
+ if not e.data.getVarFlag('SKIP_RECIPE', pn_new):
+ e.data.setVarFlag('SKIP_RECIPE', pn_new, skip_msg)
e.data.setVar("MLPREFIX", variant + "-")
e.data.setVar("PN", variant + "-" + e.data.getVar("PN", False))
e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
- # Expand WHITELIST_GPL-3.0 with multilib prefix
- pkgs = e.data.getVar("WHITELIST_GPL-3.0")
- for pkg in pkgs.split():
- pkgs += " " + variant + "-" + pkg
- e.data.setVar("WHITELIST_GPL-3.0", pkgs)
+ # Expand INCOMPATIBLE_LICENSE_EXCEPTIONS with multilib prefix
+ pkgs = e.data.getVar("INCOMPATIBLE_LICENSE_EXCEPTIONS")
+ if pkgs:
+ for pkg in pkgs.split():
+ pkgs += " " + variant + "-" + pkg
+ e.data.setVar("INCOMPATIBLE_LICENSE_EXCEPTIONS", pkgs)
# DEFAULTTUNE can change TARGET_ARCH override so expand this now before update_data
- newtune = e.data.getVar("DEFAULTTUNE_" + "virtclass-multilib-" + variant, False)
+ newtune = e.data.getVar("DEFAULTTUNE:" + "virtclass-multilib-" + variant, False)
if newtune:
e.data.setVar("DEFAULTTUNE", newtune)
}
@@ -92,6 +93,10 @@ multilib_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
python __anonymous () {
if bb.data.inherits_class('image', d):
+ # set rpm preferred file color for 32-bit multilib image
+ if d.getVar("SITEINFO_BITS") == "32":
+ d.setVar("RPM_PREFER_ELF_ARCH", "1")
+
variant = d.getVar("BBEXTENDVARIANT")
import oe.classextend
@@ -105,7 +110,6 @@ python __anonymous () {
d.setVar("LINGUAS_INSTALL", "")
# FIXME, we need to map this to something, not delete it!
d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", "")
- bb.build.deltask('do_populate_sdk', d)
bb.build.deltask('do_populate_sdk_ext', d)
return
}
@@ -177,7 +181,7 @@ def reset_alternative_priority(d):
bb.debug(1, '%s: Setting ALTERNATIVE_PRIORITY_%s to %s' % (pkg, pkg, reset_priority))
d.setVar('ALTERNATIVE_PRIORITY_%s' % pkg, reset_priority)
- for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg) or "").split():
+ for alt_name in (d.getVar('ALTERNATIVE:%s' % pkg) or "").split():
# ALTERNATIVE_PRIORITY_pkg[tool] = priority
alt_priority_pkg_name = d.getVarFlag('ALTERNATIVE_PRIORITY_%s' % pkg, alt_name)
# ALTERNATIVE_PRIORITY[tool] = priority
@@ -192,12 +196,12 @@ def reset_alternative_priority(d):
bb.debug(1, '%s: Setting ALTERNATIVE_PRIORITY[%s] to %s' % (pkg, alt_name, reset_priority))
d.setVarFlag('ALTERNATIVE_PRIORITY', alt_name, reset_priority)
-PACKAGEFUNCS_append = " do_package_qa_multilib"
+PACKAGEFUNCS:append = " do_package_qa_multilib"
python do_package_qa_multilib() {
def check_mlprefix(pkg, var, mlprefix):
- values = bb.utils.explode_deps(d.getVar('%s_%s' % (var, pkg)) or d.getVar(var) or "")
+ values = bb.utils.explode_deps(d.getVar('%s:%s' % (var, pkg)) or d.getVar(var) or "")
candidates = []
for i in values:
if i.startswith('virtual/'):
@@ -211,7 +215,7 @@ python do_package_qa_multilib() {
if len(candidates) > 0:
msg = "%s package %s - suspicious values '%s' in %s" \
% (d.getVar('PN'), pkg, ' '.join(candidates), var)
- package_qa_handle_error("multilib", msg, d)
+ oe.qa.handle_error("multilib", msg, d)
ml = d.getVar('MLPREFIX')
if not ml:
@@ -229,4 +233,5 @@ python do_package_qa_multilib() {
check_mlprefix(pkg, 'RSUGGESTS', ml)
check_mlprefix(pkg, 'RREPLACES', ml)
check_mlprefix(pkg, 'RCONFLICTS', ml)
+ oe.qa.exit_if_errors(d)
}
diff --git a/meta/classes/multilib_global.bbclass b/meta/classes/multilib_global.bbclass
index 98f65c8aae..e06307d057 100644
--- a/meta/classes/multilib_global.bbclass
+++ b/meta/classes/multilib_global.bbclass
@@ -1,6 +1,7 @@
def preferred_ml_updates(d):
- # If any PREFERRED_PROVIDER or PREFERRED_VERSION are set,
- # we need to mirror these variables in the multilib case;
+ # If any of PREFERRED_PROVIDER, PREFERRED_RPROVIDER, REQUIRED_VERSION
+ # or PREFERRED_VERSION are set, we need to mirror these variables in
+ # the multilib case;
multilibs = d.getVar('MULTILIBS') or ""
if not multilibs:
return
@@ -11,43 +12,54 @@ def preferred_ml_updates(d):
if len(eext) > 1 and eext[0] == 'multilib':
prefixes.append(eext[1])
- versions = []
+ required_versions = []
+ preferred_versions = []
providers = []
rproviders = []
for v in d.keys():
+ if v.startswith("REQUIRED_VERSION_"):
+ required_versions.append(v)
if v.startswith("PREFERRED_VERSION_"):
- versions.append(v)
+ preferred_versions.append(v)
if v.startswith("PREFERRED_PROVIDER_"):
providers.append(v)
if v.startswith("PREFERRED_RPROVIDER_"):
rproviders.append(v)
- for v in versions:
- val = d.getVar(v, False)
- pkg = v.replace("PREFERRED_VERSION_", "")
- if pkg.endswith("-native") or "-crosssdk-" in pkg or pkg.startswith(("nativesdk-", "virtual/nativesdk-")):
- continue
- if '-cross-' in pkg and '${' in pkg:
+ def sort_versions(versions, keyword):
+ version_str = "_".join([keyword, "VERSION", ""])
+ for v in versions:
+ val = d.getVar(v, False)
+ pkg = v.replace(version_str, "")
+ if pkg.endswith("-native") or "-crosssdk-" in pkg or pkg.startswith(("nativesdk-", "virtual/nativesdk-")):
+ continue
+ if '-cross-' in pkg and '${' in pkg:
+ for p in prefixes:
+ localdata = bb.data.createCopy(d)
+ override = ":virtclass-multilib-" + p
+ localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
+ if "-canadian-" in pkg:
+ newtune = localdata.getVar("DEFAULTTUNE:" + "virtclass-multilib-" + p, False)
+ if newtune:
+ localdata.setVar("DEFAULTTUNE", newtune)
+ newname = localdata.expand(v)
+ else:
+ newname = localdata.expand(v).replace(version_str, version_str + p + '-')
+ if newname != v:
+ newval = localdata.expand(val)
+ d.setVar(newname, newval)
+ # Avoid future variable key expansion
+ vexp = d.expand(v)
+ if v != vexp and d.getVar(v, False):
+ d.renameVar(v, vexp)
+ continue
for p in prefixes:
- localdata = bb.data.createCopy(d)
- override = ":virtclass-multilib-" + p
- localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
- if "-canadian-" in pkg:
- newname = localdata.expand(v)
- else:
- newname = localdata.expand(v).replace("PREFERRED_VERSION_", "PREFERRED_VERSION_" + p + '-')
- if newname != v:
- newval = localdata.expand(val)
- d.setVar(newname, newval)
- # Avoid future variable key expansion
- vexp = d.expand(v)
- if v != vexp and d.getVar(v, False):
- d.renameVar(v, vexp)
- continue
- for p in prefixes:
- newname = "PREFERRED_VERSION_" + p + "-" + pkg
- if not d.getVar(newname, False):
- d.setVar(newname, val)
+ newname = version_str + p + "-" + pkg
+ if not d.getVar(newname, False):
+ d.setVar(newname, val)
+
+ sort_versions(required_versions, "REQUIRED")
+ sort_versions(preferred_versions, "PREFERRED")
for prov in providers:
val = d.getVar(prov, False)
@@ -128,14 +140,14 @@ def preferred_ml_updates(d):
prov = prov.replace("virtual/", "")
return "virtual/" + prefix + "-" + prov
- mp = (d.getVar("MULTI_PROVIDER_WHITELIST") or "").split()
+ mp = (d.getVar("BB_MULTI_PROVIDER_ALLOWED") or "").split()
extramp = []
for p in mp:
if p.endswith("-native") or "-crosssdk-" in p or p.startswith(("nativesdk-", "virtual/nativesdk-")) or 'cross-canadian' in p:
continue
for pref in prefixes:
extramp.append(translate_provide(pref, p))
- d.setVar("MULTI_PROVIDER_WHITELIST", " ".join(mp + extramp))
+ d.setVar("BB_MULTI_PROVIDER_ALLOWED", " ".join(mp + extramp))
abisafe = (d.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE") or "").split()
extras = []
@@ -155,8 +167,8 @@ def preferred_ml_updates(d):
python multilib_virtclass_handler_vendor () {
if isinstance(e, bb.event.ConfigParsed):
for v in e.data.getVar("MULTILIB_VARIANTS").split():
- if e.data.getVar("TARGET_VENDOR_virtclass-multilib-" + v, False) is None:
- e.data.setVar("TARGET_VENDOR_virtclass-multilib-" + v, e.data.getVar("TARGET_VENDOR", False) + "ml" + v)
+ if e.data.getVar("TARGET_VENDOR:virtclass-multilib-" + v, False) is None:
+ e.data.setVar("TARGET_VENDOR:virtclass-multilib-" + v, e.data.getVar("TARGET_VENDOR", False) + "ml" + v)
preferred_ml_updates(e.data)
}
addhandler multilib_virtclass_handler_vendor
@@ -198,13 +210,13 @@ python multilib_virtclass_handler_global () {
if rprovs.strip():
e.data.setVar("RPROVIDES", rprovs)
- # Process RPROVIDES_${PN}...
+ # Process RPROVIDES:${PN}...
for pkg in (e.data.getVar("PACKAGES") or "").split():
- origrprovs = rprovs = localdata.getVar("RPROVIDES_%s" % pkg) or ""
+ origrprovs = rprovs = localdata.getVar("RPROVIDES:%s" % pkg) or ""
for clsextend in clsextends:
- rprovs = rprovs + " " + clsextend.map_variable("RPROVIDES_%s" % pkg, setvar=False)
+ rprovs = rprovs + " " + clsextend.map_variable("RPROVIDES:%s" % pkg, setvar=False)
rprovs = rprovs + " " + clsextend.extname + "-" + pkg
- e.data.setVar("RPROVIDES_%s" % pkg, rprovs)
+ e.data.setVar("RPROVIDES:%s" % pkg, rprovs)
}
addhandler multilib_virtclass_handler_global
diff --git a/meta/classes/multilib_header.bbclass b/meta/classes/multilib_header.bbclass
index e03f5b13b2..efbc24f59b 100644
--- a/meta/classes/multilib_header.bbclass
+++ b/meta/classes/multilib_header.bbclass
@@ -42,11 +42,11 @@ oe_multilib_header() {
# Dependencies on arch variables like MIPSPKGSFX_ABI can be problematic.
# We don't need multilib headers for native builds so brute force things.
-oe_multilib_header_class-native () {
+oe_multilib_header:class-native () {
return
}
# Nor do we need multilib headers for nativesdk builds.
-oe_multilib_header_class-nativesdk () {
+oe_multilib_header:class-nativesdk () {
return
}
diff --git a/meta/classes/multilib_script.bbclass b/meta/classes/multilib_script.bbclass
index b11efc1ec5..41597341cd 100644
--- a/meta/classes/multilib_script.bbclass
+++ b/meta/classes/multilib_script.bbclass
@@ -26,9 +26,9 @@ python () {
pkg, script = entry.split(":")
epkg = d.expand(pkg)
scriptname = os.path.basename(script)
- d.appendVar("ALTERNATIVE_" + epkg, " " + scriptname + " ")
+ d.appendVar("ALTERNATIVE:" + epkg, " " + scriptname + " ")
d.setVarFlag("ALTERNATIVE_LINK_NAME", scriptname, script)
d.setVarFlag("ALTERNATIVE_TARGET", scriptname, script + "-${MULTILIB_SUFFIX}")
d.appendVar("multilibscript_rename", "\n mv ${PKGD}" + script + " ${PKGD}" + script + "-${MULTILIB_SUFFIX}")
- d.appendVar("FILES_" + epkg, " " + script + "-${MULTILIB_SUFFIX}")
+ d.appendVar("FILES:" + epkg, " " + script + "-${MULTILIB_SUFFIX}")
}
diff --git a/meta/classes/native.bbclass b/meta/classes/native.bbclass
index 08106e345c..fc7422c5d7 100644
--- a/meta/classes/native.bbclass
+++ b/meta/classes/native.bbclass
@@ -5,19 +5,11 @@ inherit relocatable
# no need for them to be a direct target of 'world'
EXCLUDE_FROM_WORLD = "1"
-PACKAGES = ""
-PACKAGES_class-native = ""
-PACKAGES_DYNAMIC = ""
-PACKAGES_DYNAMIC_class-native = ""
PACKAGE_ARCH = "${BUILD_ARCH}"
# used by cmake class
OECMAKE_RPATH = "${libdir}"
-OECMAKE_RPATH_class-native = "${libdir}"
-
-# When this class has packaging enabled, setting
-# RPROVIDES becomes unnecessary.
-RPROVIDES = "${PN}"
+OECMAKE_RPATH:class-native = "${libdir}"
TARGET_ARCH = "${BUILD_ARCH}"
TARGET_OS = "${BUILD_OS}"
@@ -114,7 +106,7 @@ CLASSOVERRIDE = "class-native"
MACHINEOVERRIDES = ""
MACHINE_FEATURES = ""
-PATH_prepend = "${COREBASE}/scripts/native-intercept:"
+PATH:prepend = "${COREBASE}/scripts/native-intercept:"
# This class encodes staging paths into its scripts data so can only be
# reused if we manipulate the paths.
@@ -127,6 +119,7 @@ python native_virtclass_handler () {
pn = e.data.getVar("PN")
if not pn.endswith("-native"):
return
+ bpn = e.data.getVar("BPN")
# Set features here to prevent appends and distro features backfill
# from modifying native distro features
@@ -138,9 +131,9 @@ python native_virtclass_handler () {
if "native" not in classextend:
return
- def map_dependencies(varname, d, suffix = ""):
+ def map_dependencies(varname, d, suffix = "", selfref=True):
if suffix:
- varname = varname + "_" + suffix
+ varname = varname + ":" + suffix
deps = d.getVar(varname)
if not deps:
return
@@ -148,22 +141,28 @@ python native_virtclass_handler () {
newdeps = []
for dep in deps:
if dep == pn:
- continue
+ if not selfref:
+ continue
+ newdeps.append(dep)
elif "-cross-" in dep:
newdeps.append(dep.replace("-cross", "-native"))
elif not dep.endswith("-native"):
- newdeps.append(dep + "-native")
+ # Replace ${PN} with ${BPN} in the dependency to make sure
+ # dependencies on, e.g., ${PN}-foo become ${BPN}-foo-native
+ # rather than ${BPN}-native-foo-native.
+ newdeps.append(dep.replace(pn, bpn) + "-native")
else:
newdeps.append(dep)
- d.setVar(varname, " ".join(newdeps))
+ d.setVar(varname, " ".join(newdeps), parsing=True)
- map_dependencies("DEPENDS", e.data)
- for pkg in [e.data.getVar("PN"), "", "${PN}"]:
+ map_dependencies("DEPENDS", e.data, selfref=False)
+ for pkg in e.data.getVar("PACKAGES", False).split():
map_dependencies("RDEPENDS", e.data, pkg)
map_dependencies("RRECOMMENDS", e.data, pkg)
map_dependencies("RSUGGESTS", e.data, pkg)
map_dependencies("RPROVIDES", e.data, pkg)
map_dependencies("RREPLACES", e.data, pkg)
+ map_dependencies("PACKAGES", e.data)
provides = e.data.getVar("PROVIDES")
nprovides = []
@@ -171,7 +170,7 @@ python native_virtclass_handler () {
if prov.find(pn) != -1:
nprovides.append(prov)
elif not prov.endswith("-native"):
- nprovides.append(prov.replace(prov, prov + "-native"))
+ nprovides.append(prov + "-native")
else:
nprovides.append(prov)
e.data.setVar("PROVIDES", ' '.join(nprovides))
@@ -196,3 +195,34 @@ USE_NLS = "no"
RECIPERDEPTASK = "do_populate_sysroot"
do_populate_sysroot[rdeptask] = "${RECIPERDEPTASK}"
+
+#
+# Native task outputs are directly run on the target (host) system after being
+# built. Even if the output of this recipe doesn't change, a change in one of
+# its dependencies may cause a change in the output it generates (e.g. rpm
+# output depends on the output of its dependent zstd library).
+#
+# This can cause poor interactions with hash equivalence, since this recipes
+# output-changing dependency is "hidden" and downstream task only see that this
+# recipe has the same outhash and therefore is equivalent. This can result in
+# different output in different cases.
+#
+# To resolve this, unhide the output-changing dependency by adding its unihash
+# to this tasks outhash calculation. Unfortunately, don't know specifically
+# know which dependencies are output-changing, so we have to add all of them.
+#
+python native_add_do_populate_sysroot_deps () {
+ current_task = "do_" + d.getVar("BB_CURRENTTASK")
+ if current_task != "do_populate_sysroot":
+ return
+
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ pn = d.getVar("PN")
+ deps = {
+ dep[0]:dep[6] for dep in taskdepdata.values() if
+ dep[1] == current_task and dep[0] != pn
+ }
+
+ d.setVar("HASHEQUIV_EXTRA_SIGDATA", "\n".join("%s: %s" % (k, deps[k]) for k in sorted(deps.keys())))
+}
+SSTATECREATEFUNCS += "native_add_do_populate_sysroot_deps"
diff --git a/meta/classes/nativesdk.bbclass b/meta/classes/nativesdk.bbclass
index 7b75710726..f8e9607513 100644
--- a/meta/classes/nativesdk.bbclass
+++ b/meta/classes/nativesdk.bbclass
@@ -9,6 +9,7 @@ NATIVESDKLIBC ?= "libc-glibc"
LIBCOVERRIDE = ":${NATIVESDKLIBC}"
CLASSOVERRIDE = "class-nativesdk"
MACHINEOVERRIDES = ""
+MACHINE_FEATURES = ""
MULTILIBS = ""
@@ -27,10 +28,10 @@ PACKAGE_ARCHS = "${SDK_PACKAGE_ARCHS}"
# We need chrpath >= 0.14 to ensure we can deal with 32 and 64 bit
# binaries
#
-DEPENDS_append = " chrpath-replacement-native"
+DEPENDS:append = " chrpath-replacement-native"
EXTRANATIVEPATH += "chrpath-native"
-PKGDATA_DIR = "${TMPDIR}/pkgdata/${SDK_SYS}"
+PKGDATA_DIR = "${PKGDATA_DIR_SDK}"
HOST_ARCH = "${SDK_ARCH}"
HOST_VENDOR = "${SDK_VENDOR}"
@@ -57,7 +58,7 @@ EXTRA_OECONF_GCC_FLOAT = ""
CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
CFLAGS = "${BUILDSDK_CFLAGS}"
-CXXFLAGS = "${BUILDSDK_CFLAGS}"
+CXXFLAGS = "${BUILDSDK_CXXFLAGS}"
LDFLAGS = "${BUILDSDK_LDFLAGS}"
# Change to place files in SDKPATH
@@ -112,3 +113,5 @@ do_packagedata[stamp-extra-info] = ""
USE_NLS = "${SDKUSE_NLS}"
OLDEST_KERNEL = "${SDK_OLDEST_KERNEL}"
+
+PATH:prepend = "${COREBASE}/scripts/nativesdk-intercept:"
diff --git a/meta/classes/nopackages.bbclass b/meta/classes/nopackages.bbclass
index 559f5078bd..7a4f632d71 100644
--- a/meta/classes/nopackages.bbclass
+++ b/meta/classes/nopackages.bbclass
@@ -2,6 +2,7 @@ deltask do_package
deltask do_package_write_rpm
deltask do_package_write_ipk
deltask do_package_write_deb
+deltask do_package_write_tar
deltask do_package_qa
deltask do_packagedata
deltask do_package_setscene
diff --git a/meta/classes/npm.bbclass b/meta/classes/npm.bbclass
index 068032a1e5..ba50fcac20 100644
--- a/meta/classes/npm.bbclass
+++ b/meta/classes/npm.bbclass
@@ -17,11 +17,17 @@
# NPM_INSTALL_DEV:
# Set to 1 to also install devDependencies.
-DEPENDS_prepend = "nodejs-native "
-RDEPENDS_${PN}_prepend = "nodejs "
+inherit python3native
+
+DEPENDS:prepend = "nodejs-native "
+RDEPENDS:${PN}:append:class-target = " nodejs"
+
+EXTRA_OENPM = ""
NPM_INSTALL_DEV ?= "0"
+NPM_NODEDIR ?= "${RECIPE_SYSROOT_NATIVE}${prefix_native}"
+
def npm_target_arch_map(target_arch):
"""Maps arch names to npm arch names"""
import re
@@ -55,8 +61,8 @@ def npm_pack(env, srcdir, workdir):
"""Run 'npm pack' on a specified directory"""
import shlex
cmd = "npm pack %s" % shlex.quote(srcdir)
- configs = [("ignore-scripts", "true")]
- tarball = env.run(cmd, configs=configs, workdir=workdir).strip("\n")
+ args = [("ignore-scripts", "true")]
+ tarball = env.run(cmd, args=args, workdir=workdir).strip("\n")
return os.path.join(workdir, tarball)
python npm_do_configure() {
@@ -130,11 +136,17 @@ python npm_do_configure() {
cached_manifest.pop("dependencies", None)
cached_manifest.pop("devDependencies", None)
- with open(orig_shrinkwrap_file, "r") as f:
- orig_shrinkwrap = json.load(f)
+ has_shrinkwrap_file = True
- cached_shrinkwrap = copy.deepcopy(orig_shrinkwrap)
- cached_shrinkwrap.pop("dependencies", None)
+ try:
+ with open(orig_shrinkwrap_file, "r") as f:
+ orig_shrinkwrap = json.load(f)
+ except IOError:
+ has_shrinkwrap_file = False
+
+ if has_shrinkwrap_file:
+ cached_shrinkwrap = copy.deepcopy(orig_shrinkwrap)
+ cached_shrinkwrap.pop("dependencies", None)
# Manage the dependencies
progress = OutOfProgressHandler(d, r"^(\d+)/(\d+)$")
@@ -165,8 +177,10 @@ python npm_do_configure() {
progress.write("%d/%d" % (progress_done, progress_total))
dev = bb.utils.to_boolean(d.getVar("NPM_INSTALL_DEV"), False)
- foreach_dependencies(orig_shrinkwrap, _count_dependency, dev)
- foreach_dependencies(orig_shrinkwrap, _cache_dependency, dev)
+
+ if has_shrinkwrap_file:
+ foreach_dependencies(orig_shrinkwrap, _count_dependency, dev)
+ foreach_dependencies(orig_shrinkwrap, _cache_dependency, dev)
# Configure the main package
with tempfile.TemporaryDirectory() as tmpdir:
@@ -181,16 +195,19 @@ python npm_do_configure() {
cached_manifest[depkey] = {}
cached_manifest[depkey][name] = version
- _update_manifest("dependencies")
+ if has_shrinkwrap_file:
+ _update_manifest("dependencies")
if dev:
- _update_manifest("devDependencies")
+ if has_shrinkwrap_file:
+ _update_manifest("devDependencies")
with open(cached_manifest_file, "w") as f:
json.dump(cached_manifest, f, indent=2)
- with open(cached_shrinkwrap_file, "w") as f:
- json.dump(cached_shrinkwrap, f, indent=2)
+ if has_shrinkwrap_file:
+ with open(cached_shrinkwrap_file, "w") as f:
+ json.dump(cached_shrinkwrap, f, indent=2)
}
python npm_do_compile() {
@@ -211,15 +228,11 @@ python npm_do_compile() {
bb.utils.remove(d.getVar("NPM_BUILD"), recurse=True)
- env = NpmEnvironment(d, configs=npm_global_configs(d))
-
- dev = bb.utils.to_boolean(d.getVar("NPM_INSTALL_DEV"), False)
-
with tempfile.TemporaryDirectory() as tmpdir:
args = []
- configs = []
+ configs = npm_global_configs(d)
- if dev:
+ if bb.utils.to_boolean(d.getVar("NPM_INSTALL_DEV"), False):
configs.append(("also", "development"))
else:
configs.append(("only", "production"))
@@ -234,12 +247,10 @@ python npm_do_compile() {
# Add node-gyp configuration
configs.append(("arch", d.getVar("NPM_ARCH")))
configs.append(("release", "true"))
- sysroot = d.getVar("RECIPE_SYSROOT_NATIVE")
- nodedir = os.path.join(sysroot, d.getVar("prefix_native").strip("/"))
- configs.append(("nodedir", nodedir))
- bindir = os.path.join(sysroot, d.getVar("bindir_native").strip("/"))
- pythondir = os.path.join(bindir, "python-native", "python")
- configs.append(("python", pythondir))
+ configs.append(("nodedir", d.getVar("NPM_NODEDIR")))
+ configs.append(("python", d.getVar("PYTHON")))
+
+ env = NpmEnvironment(d, configs)
# Add node-pre-gyp configuration
args.append(("target_arch", d.getVar("NPM_ARCH")))
@@ -247,7 +258,8 @@ python npm_do_compile() {
# Pack and install the main package
tarball = npm_pack(env, d.getVar("NPM_PACKAGE"), tmpdir)
- env.run("npm install %s" % shlex.quote(tarball), args=args, configs=configs)
+ cmd = "npm install %s %s" % (shlex.quote(tarball), d.getVar("EXTRA_OENPM"))
+ env.run(cmd, args=args)
}
npm_do_install() {
@@ -299,7 +311,7 @@ npm_do_install() {
ln -fs node_modules ${D}/${nonarch_libdir}/node
}
-FILES_${PN} += " \
+FILES:${PN} += " \
${bindir} \
${nonarch_libdir} \
"
diff --git a/meta/classes/overlayfs-etc.bbclass b/meta/classes/overlayfs-etc.bbclass
new file mode 100644
index 0000000000..91afee695c
--- /dev/null
+++ b/meta/classes/overlayfs-etc.bbclass
@@ -0,0 +1,76 @@
+# Class for setting up /etc in overlayfs
+#
+# In order to have /etc directory in overlayfs a special handling at early boot stage is required
+# The idea is to supply a custom init script that mounts /etc before launching actual init program,
+# because the latter already requires /etc to be mounted
+#
+# The configuration must be machine specific. You should at least set these three variables:
+# OVERLAYFS_ETC_MOUNT_POINT ?= "/data"
+# OVERLAYFS_ETC_FSTYPE ?= "ext4"
+# OVERLAYFS_ETC_DEVICE ?= "/dev/mmcblk0p2"
+#
+# To control more mount options you should consider setting mount options:
+# OVERLAYFS_ETC_MOUNT_OPTIONS ?= "defaults"
+#
+# The class provides two options for /sbin/init generation
+# 1. Default option is to rename original /sbin/init to /sbin/init.orig and place generated init under
+# original name, i.e. /sbin/init. It has an advantage that you won't need to change any kernel
+# parameters in order to make it work, but it poses a restriction that package-management can't
+# be used, becaause updating init manager would remove generated script
+# 2. If you are would like to keep original init as is, you can set
+# OVERLAYFS_ETC_USE_ORIG_INIT_NAME = "0"
+# Then generated init will be named /sbin/preinit and you would need to extend you kernel parameters
+# manually in your bootloader configuration.
+#
+# Regardless which mode you choose, update and migration strategy of configuration files under /etc
+# overlay is out of scope of this class
+
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "overlayfs-etc", "create_overlayfs_etc_preinit;", "", d)}'
+IMAGE_FEATURES_CONFLICTS_overlayfs-etc = "${@ 'package-management' if bb.utils.to_boolean(d.getVar('OVERLAYFS_ETC_USE_ORIG_INIT_NAME'), True) else ''}"
+
+OVERLAYFS_ETC_MOUNT_POINT ??= ""
+OVERLAYFS_ETC_FSTYPE ??= ""
+OVERLAYFS_ETC_DEVICE ??= ""
+OVERLAYFS_ETC_USE_ORIG_INIT_NAME ??= "1"
+OVERLAYFS_ETC_MOUNT_OPTIONS ??= "defaults"
+OVERLAYFS_ETC_INIT_TEMPLATE ??= "${COREBASE}/meta/files/overlayfs-etc-preinit.sh.in"
+
+python create_overlayfs_etc_preinit() {
+ overlayEtcMountPoint = d.getVar("OVERLAYFS_ETC_MOUNT_POINT")
+ overlayEtcFsType = d.getVar("OVERLAYFS_ETC_FSTYPE")
+ overlayEtcDevice = d.getVar("OVERLAYFS_ETC_DEVICE")
+
+ if not overlayEtcMountPoint:
+ bb.fatal("OVERLAYFS_ETC_MOUNT_POINT must be set in your MACHINE configuration")
+ if not overlayEtcDevice:
+ bb.fatal("OVERLAYFS_ETC_DEVICE must be set in your MACHINE configuration")
+ if not overlayEtcFsType:
+ bb.fatal("OVERLAYFS_ETC_FSTYPE should contain a valid file system type on {0}".format(overlayEtcDevice))
+
+ with open(d.getVar("OVERLAYFS_ETC_INIT_TEMPLATE"), "r") as f:
+ PreinitTemplate = f.read()
+
+ useOrigInit = oe.types.boolean(d.getVar('OVERLAYFS_ETC_USE_ORIG_INIT_NAME'))
+ preinitPath = oe.path.join(d.getVar("IMAGE_ROOTFS"), d.getVar("base_sbindir"), "preinit")
+ initBaseName = oe.path.join(d.getVar("base_sbindir"), "init")
+ origInitNameSuffix = ".orig"
+
+ args = {
+ 'OVERLAYFS_ETC_MOUNT_POINT': overlayEtcMountPoint,
+ 'OVERLAYFS_ETC_MOUNT_OPTIONS': d.getVar('OVERLAYFS_ETC_MOUNT_OPTIONS'),
+ 'OVERLAYFS_ETC_FSTYPE': overlayEtcFsType,
+ 'OVERLAYFS_ETC_DEVICE': overlayEtcDevice,
+ 'SBIN_INIT_NAME': initBaseName + origInitNameSuffix if useOrigInit else initBaseName
+ }
+
+ if useOrigInit:
+ # rename original /sbin/init
+ origInit = oe.path.join(d.getVar("IMAGE_ROOTFS"), initBaseName)
+ bb.debug(1, "rootfs path %s, init path %s, test %s" % (d.getVar('IMAGE_ROOTFS'), origInit, d.getVar("IMAGE_ROOTFS")))
+ bb.utils.rename(origInit, origInit + origInitNameSuffix)
+ preinitPath = origInit
+
+ with open(preinitPath, 'w') as f:
+ f.write(PreinitTemplate.format(**args))
+ os.chmod(preinitPath, 0o755)
+}
diff --git a/meta/classes/overlayfs.bbclass b/meta/classes/overlayfs.bbclass
new file mode 100644
index 0000000000..29fced2ca7
--- /dev/null
+++ b/meta/classes/overlayfs.bbclass
@@ -0,0 +1,119 @@
+# Class for generation of overlayfs mount units
+#
+# It's often desired in Embedded System design to have a read-only rootfs.
+# But a lot of different applications might want to have a read-write access to
+# some parts of a filesystem. It can be especially useful when your update mechanism
+# overwrites the whole rootfs, but you want your application data to be preserved
+# between updates. This class provides a way to achieve that by means
+# of overlayfs and at the same time keeping the base rootfs read-only.
+#
+# Usage example.
+#
+# Set a mount point for a partition overlayfs is going to use as upper layer
+# in your machine configuration. Underlying file system can be anything that
+# is supported by overlayfs. This has to be done in your machine configuration.
+# QA check fails to catch file existence if you redefine this variable in your recipe!
+#
+# OVERLAYFS_MOUNT_POINT[data] ?= "/data"
+#
+# The class assumes you have a data.mount systemd unit defined in your
+# systemd-machine-units recipe and installed to the image.
+#
+# Then you can specify writable directories on a recipe base
+#
+# OVERLAYFS_WRITABLE_PATHS[data] = "/usr/share/my-custom-application"
+#
+# To support several mount points you can use a different variable flag. Assume we
+# want to have a writable location on the file system, but not interested where the data
+# survive a reboot. Then we could have a mnt-overlay.mount unit for a tmpfs file system:
+#
+# OVERLAYFS_MOUNT_POINT[mnt-overlay] = "/mnt/overlay"
+# OVERLAYFS_WRITABLE_PATHS[mnt-overlay] = "/usr/share/another-application"
+#
+# Note: the class does not support /etc directory itself, because systemd depends on it
+# For /etc directory use overlayfs-etc class
+
+REQUIRED_DISTRO_FEATURES += "systemd overlayfs"
+
+inherit systemd features_check
+
+OVERLAYFS_CREATE_DIRS_TEMPLATE ??= "${COREBASE}/meta/files/overlayfs-create-dirs.service.in"
+OVERLAYFS_MOUNT_UNIT_TEMPLATE ??= "${COREBASE}/meta/files/overlayfs-unit.mount.in"
+OVERLAYFS_ALL_OVERLAYS_TEMPLATE ??= "${COREBASE}/meta/files/overlayfs-all-overlays.service.in"
+
+python do_create_overlayfs_units() {
+ from oe.overlayfs import mountUnitName
+
+ with open(d.getVar("OVERLAYFS_CREATE_DIRS_TEMPLATE"), "r") as f:
+ CreateDirsUnitTemplate = f.read()
+ with open(d.getVar("OVERLAYFS_MOUNT_UNIT_TEMPLATE"), "r") as f:
+ MountUnitTemplate = f.read()
+ with open(d.getVar("OVERLAYFS_ALL_OVERLAYS_TEMPLATE"), "r") as f:
+ AllOverlaysTemplate = f.read()
+
+ def prepareUnits(data, lower):
+ from oe.overlayfs import helperUnitName
+
+ args = {
+ 'DATA_MOUNT_POINT': data,
+ 'DATA_MOUNT_UNIT': mountUnitName(data),
+ 'CREATE_DIRS_SERVICE': helperUnitName(lower),
+ 'LOWERDIR': lower,
+ }
+
+ bb.debug(1, "Generate systemd unit %s" % mountUnitName(lower))
+ with open(os.path.join(d.getVar('WORKDIR'), mountUnitName(lower)), 'w') as f:
+ f.write(MountUnitTemplate.format(**args))
+
+ bb.debug(1, "Generate helper systemd unit %s" % helperUnitName(lower))
+ with open(os.path.join(d.getVar('WORKDIR'), helperUnitName(lower)), 'w') as f:
+ f.write(CreateDirsUnitTemplate.format(**args))
+
+ def prepareGlobalUnit(dependentUnits):
+ from oe.overlayfs import allOverlaysUnitName
+ args = {
+ 'ALL_OVERLAYFS_UNITS': " ".join(dependentUnits),
+ 'PN': d.getVar('PN')
+ }
+
+ bb.debug(1, "Generate systemd unit with all overlays %s" % allOverlaysUnitName(d))
+ with open(os.path.join(d.getVar('WORKDIR'), allOverlaysUnitName(d)), 'w') as f:
+ f.write(AllOverlaysTemplate.format(**args))
+
+ mountUnitList = []
+ overlayMountPoints = d.getVarFlags("OVERLAYFS_MOUNT_POINT")
+ for mountPoint in overlayMountPoints:
+ bb.debug(1, "Process variable flag %s" % mountPoint)
+ for lower in d.getVarFlag('OVERLAYFS_WRITABLE_PATHS', mountPoint).split():
+ bb.debug(1, "Prepare mount unit for %s with data mount point %s" %
+ (lower, d.getVarFlag('OVERLAYFS_MOUNT_POINT', mountPoint)))
+ prepareUnits(d.getVarFlag('OVERLAYFS_MOUNT_POINT', mountPoint), lower)
+ mountUnitList.append(mountUnitName(lower))
+
+ # set up one unit, which depends on all mount units, so users can set
+ # only one dependency in their units to make sure software starts
+ # when all overlays are mounted
+ prepareGlobalUnit(mountUnitList)
+}
+
+# we need to generate file names early during parsing stage
+python () {
+ from oe.overlayfs import strForBash, unitFileList
+
+ unitList = unitFileList(d)
+ for unit in unitList:
+ d.appendVar('SYSTEMD_SERVICE:' + d.getVar('PN'), ' ' + unit)
+ d.appendVar('FILES:' + d.getVar('PN'), ' ' +
+ d.getVar('systemd_system_unitdir') + '/' + strForBash(unit))
+
+ d.setVar('OVERLAYFS_UNIT_LIST', ' '.join([strForBash(s) for s in unitList]))
+}
+
+do_install:append() {
+ install -d ${D}${systemd_system_unitdir}
+ for unit in ${OVERLAYFS_UNIT_LIST}; do
+ install -m 0444 ${WORKDIR}/${unit} ${D}${systemd_system_unitdir}
+ done
+}
+
+addtask create_overlayfs_units before do_install
diff --git a/meta/classes/own-mirrors.bbclass b/meta/classes/own-mirrors.bbclass
index a777835138..ef972740ce 100644
--- a/meta/classes/own-mirrors.bbclass
+++ b/meta/classes/own-mirrors.bbclass
@@ -1,13 +1,14 @@
-PREMIRRORS_prepend = " \
-cvs://.*/.* ${SOURCE_MIRROR_URL} \n \
-svn://.*/.* ${SOURCE_MIRROR_URL} \n \
-git://.*/.* ${SOURCE_MIRROR_URL} \n \
-gitsm://.*/.* ${SOURCE_MIRROR_URL} \n \
-hg://.*/.* ${SOURCE_MIRROR_URL} \n \
-bzr://.*/.* ${SOURCE_MIRROR_URL} \n \
-p4://.*/.* ${SOURCE_MIRROR_URL} \n \
-osc://.*/.* ${SOURCE_MIRROR_URL} \n \
-https?$://.*/.* ${SOURCE_MIRROR_URL} \n \
-ftp://.*/.* ${SOURCE_MIRROR_URL} \n \
-npm://.*/?.* ${SOURCE_MIRROR_URL} \n \
+PREMIRRORS:prepend = " \
+cvs://.*/.* ${SOURCE_MIRROR_URL} \
+svn://.*/.* ${SOURCE_MIRROR_URL} \
+git://.*/.* ${SOURCE_MIRROR_URL} \
+gitsm://.*/.* ${SOURCE_MIRROR_URL} \
+hg://.*/.* ${SOURCE_MIRROR_URL} \
+bzr://.*/.* ${SOURCE_MIRROR_URL} \
+p4://.*/.* ${SOURCE_MIRROR_URL} \
+osc://.*/.* ${SOURCE_MIRROR_URL} \
+https?://.*/.* ${SOURCE_MIRROR_URL} \
+ftp://.*/.* ${SOURCE_MIRROR_URL} \
+npm://.*/?.* ${SOURCE_MIRROR_URL} \
+s3://.*/.* ${SOURCE_MIRROR_URL} \
"
diff --git a/meta/classes/package.bbclass b/meta/classes/package.bbclass
index 0af5f66733..e71daafe94 100644
--- a/meta/classes/package.bbclass
+++ b/meta/classes/package.bbclass
@@ -7,7 +7,7 @@
#
# There are the following default steps but PACKAGEFUNCS can be extended:
#
-# a) package_get_auto_pr - get PRAUTO from remote PR service
+# a) package_convert_pr_autoinc - convert AUTOINC in PKGV to ${PRSERV_PV_AUTOINC}
#
# b) perform_packagecopy - Copy D into PKGD
#
@@ -41,8 +41,6 @@
inherit packagedata
inherit chrpath
inherit package_pkgdata
-
-# Need the package_qa_handle_error() in insane.bbclass
inherit insane
PKGD = "${WORKDIR}/package"
@@ -199,7 +197,7 @@ def do_split_packages(d, root, file_regex, output_pattern, description, postinst
packages = [pkg] + packages
else:
packages.append(pkg)
- oldfiles = d.getVar('FILES_' + pkg)
+ oldfiles = d.getVar('FILES:' + pkg)
newfile = os.path.join(root, o)
# These names will be passed through glob() so if the filename actually
# contains * or ? (rare, but possible) we need to handle that specially
@@ -219,19 +217,19 @@ def do_split_packages(d, root, file_regex, output_pattern, description, postinst
the_files.append(fp % m.group(1))
else:
the_files.append(aux_files_pattern_verbatim % m.group(1))
- d.setVar('FILES_' + pkg, " ".join(the_files))
+ d.setVar('FILES:' + pkg, " ".join(the_files))
else:
- d.setVar('FILES_' + pkg, oldfiles + " " + newfile)
+ d.setVar('FILES:' + pkg, oldfiles + " " + newfile)
if extra_depends != '':
- d.appendVar('RDEPENDS_' + pkg, ' ' + extra_depends)
- if not d.getVar('DESCRIPTION_' + pkg):
- d.setVar('DESCRIPTION_' + pkg, description % on)
- if not d.getVar('SUMMARY_' + pkg):
- d.setVar('SUMMARY_' + pkg, summary % on)
+ d.appendVar('RDEPENDS:' + pkg, ' ' + extra_depends)
+ if not d.getVar('DESCRIPTION:' + pkg):
+ d.setVar('DESCRIPTION:' + pkg, description % on)
+ if not d.getVar('SUMMARY:' + pkg):
+ d.setVar('SUMMARY:' + pkg, summary % on)
if postinst:
- d.setVar('pkg_postinst_' + pkg, postinst)
+ d.setVar('pkg_postinst:' + pkg, postinst)
if postrm:
- d.setVar('pkg_postrm_' + pkg, postrm)
+ d.setVar('pkg_postrm:' + pkg, postrm)
if callable(hook):
hook(f, pkg, file_regex, output_pattern, m.group(1))
@@ -303,7 +301,7 @@ def get_conffiles(pkg, d):
cwd = os.getcwd()
os.chdir(root)
- conffiles = d.getVar('CONFFILES_%s' % pkg);
+ conffiles = d.getVar('CONFFILES:%s' % pkg);
if conffiles == None:
conffiles = d.getVar('CONFFILES')
if conffiles == None:
@@ -369,7 +367,7 @@ def source_info(file, d, fatal=True):
return list(debugsources)
-def splitdebuginfo(file, dvar, debugdir, debuglibdir, debugappend, debugsrcdir, d):
+def splitdebuginfo(file, dvar, dv, d):
# Function to split a single file into two components, one is the stripped
# target system binary, the other contains any debugging information. The
# two files are linked to reference each other.
@@ -380,7 +378,7 @@ def splitdebuginfo(file, dvar, debugdir, debuglibdir, debugappend, debugsrcdir,
import subprocess
src = file[len(dvar):]
- dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend
+ dest = dv["libdir"] + os.path.dirname(src) + dv["dir"] + "/" + os.path.basename(src) + dv["append"]
debugfile = dvar + dest
sources = []
@@ -392,10 +390,6 @@ def splitdebuginfo(file, dvar, debugdir, debuglibdir, debugappend, debugsrcdir,
dvar = d.getVar('PKGD')
objcopy = d.getVar("OBJCOPY")
- # We ignore kernel modules, we don't generate debug info files.
- if file.find("/lib/modules/") != -1 and file.endswith(".ko"):
- return (file, sources)
-
newmode = None
if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
origmode = os.stat(file)[stat.ST_MODE]
@@ -403,7 +397,7 @@ def splitdebuginfo(file, dvar, debugdir, debuglibdir, debugappend, debugsrcdir,
os.chmod(file, newmode)
# We need to extract the debug src information here...
- if debugsrcdir:
+ if dv["srcdir"]:
sources = source_info(file, d)
bb.utils.mkdirhier(os.path.dirname(debugfile))
@@ -418,7 +412,7 @@ def splitdebuginfo(file, dvar, debugdir, debuglibdir, debugappend, debugsrcdir,
return (file, sources)
-def splitstaticdebuginfo(file, dvar, debugstaticdir, debugstaticlibdir, debugstaticappend, debugsrcdir, d):
+def splitstaticdebuginfo(file, dvar, dv, d):
# Unlike the function above, there is no way to split a static library
# two components. So to get similar results we will copy the unmodified
# static library (containing the debug symbols) into a new directory.
@@ -431,7 +425,7 @@ def splitstaticdebuginfo(file, dvar, debugstaticdir, debugstaticlibdir, debugsta
import shutil
src = file[len(dvar):]
- dest = debugstaticlibdir + os.path.dirname(src) + debugstaticdir + "/" + os.path.basename(src) + debugstaticappend
+ dest = dv["staticlibdir"] + os.path.dirname(src) + dv["staticdir"] + "/" + os.path.basename(src) + dv["staticappend"]
debugfile = dvar + dest
sources = []
@@ -448,7 +442,7 @@ def splitstaticdebuginfo(file, dvar, debugstaticdir, debugstaticlibdir, debugsta
os.chmod(file, newmode)
# We need to extract the debug src information here...
- if debugsrcdir:
+ if dv["srcdir"]:
sources = source_info(file, d)
bb.utils.mkdirhier(os.path.dirname(debugfile))
@@ -461,7 +455,7 @@ def splitstaticdebuginfo(file, dvar, debugstaticdir, debugstaticlibdir, debugsta
return (file, sources)
-def inject_minidebuginfo(file, dvar, debugdir, debuglibdir, debugappend, debugsrcdir, d):
+def inject_minidebuginfo(file, dvar, dv, d):
# Extract just the symbols from debuginfo into minidebuginfo,
# compress it with xz and inject it back into the binary in a .gnu_debugdata section.
# https://sourceware.org/gdb/onlinedocs/gdb/MiniDebugInfo.html
@@ -475,7 +469,7 @@ def inject_minidebuginfo(file, dvar, debugdir, debuglibdir, debugappend, debugsr
minidebuginfodir = d.expand('${WORKDIR}/minidebuginfo')
src = file[len(dvar):]
- dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend
+ dest = dv["libdir"] + os.path.dirname(src) + dv["dir"] + "/" + os.path.basename(src) + dv["append"]
debugfile = dvar + dest
minidebugfile = minidebuginfodir + src + '.minidebug'
bb.utils.mkdirhier(os.path.dirname(minidebugfile))
@@ -618,16 +612,18 @@ def get_package_mapping (pkg, basepkg, d, depversions=None):
import oe.packagedata
data = oe.packagedata.read_subpkgdata(pkg, d)
- key = "PKG_%s" % pkg
+ key = "PKG:%s" % pkg
if key in data:
+ if bb.data.inherits_class('allarch', d) and bb.data.inherits_class('packagegroup', d) and pkg != data[key]:
+ bb.error("An allarch packagegroup shouldn't depend on packages which are dynamically renamed (%s to %s)" % (pkg, data[key]))
# Have to avoid undoing the write_extra_pkgs(global_variants...)
if bb.data.inherits_class('allarch', d) and not d.getVar('MULTILIB_VARIANTS') \
and data[key] == basepkg:
return pkg
if depversions == []:
# Avoid returning a mapping if the renamed package rprovides its original name
- rprovkey = "RPROVIDES_%s" % pkg
+ rprovkey = "RPROVIDES:%s" % pkg
if rprovkey in data:
if pkg in bb.utils.explode_dep_versions2(data[rprovkey]):
bb.note("%s rprovides %s, not replacing the latter" % (data[key], pkg))
@@ -664,12 +660,20 @@ def runtime_mapping_rename (varname, pkg, d):
#bb.note("%s after: %s" % (varname, d.getVar(varname)))
#
-# Package functions suitable for inclusion in PACKAGEFUNCS
+# Used by do_packagedata (and possibly other routines post do_package)
#
+package_get_auto_pr[vardepsexclude] = "BB_TASKDEPDATA"
python package_get_auto_pr() {
import oe.prservice
- import re
+
+ def get_do_package_hash(pn):
+ if d.getVar("BB_RUNTASK") != "do_package":
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ for dep in taskdepdata:
+ if taskdepdata[dep][1] == "do_package" and taskdepdata[dep][0] == pn:
+ return taskdepdata[dep][6]
+ return None
# Support per recipe PRSERV_HOST
pn = d.getVar('PN')
@@ -681,15 +685,22 @@ python package_get_auto_pr() {
# PR Server not active, handle AUTOINC
if not d.getVar('PRSERV_HOST'):
- if 'AUTOINC' in pkgv:
- d.setVar("PKGV", pkgv.replace("AUTOINC", "0"))
+ d.setVar("PRSERV_PV_AUTOINC", "0")
return
auto_pr = None
pv = d.getVar("PV")
version = d.getVar("PRAUTOINX")
pkgarch = d.getVar("PACKAGE_ARCH")
- checksum = d.getVar("BB_TASKHASH")
+ checksum = get_do_package_hash(pn)
+
+ # If do_package isn't in the dependencies, we can't get the checksum...
+ if not checksum:
+ bb.warn('Task %s requested do_package unihash, but it was not available.' % d.getVar('BB_RUNTASK'))
+ #taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ #for dep in taskdepdata:
+ # bb.warn('%s:%s = %s' % (taskdepdata[dep][0], taskdepdata[dep][1], taskdepdata[dep][6]))
+ return
if d.getVar('PRSERV_LOCKDOWN'):
auto_pr = d.getVar('PRAUTO_' + version + '_' + pkgarch) or d.getVar('PRAUTO_' + version) or None
@@ -699,17 +710,16 @@ python package_get_auto_pr() {
return
try:
- conn = d.getVar("__PRSERV_CONN")
- if conn is None:
- conn = oe.prservice.prserv_make_conn(d)
+ conn = oe.prservice.prserv_make_conn(d)
if conn is not None:
if "AUTOINC" in pkgv:
srcpv = bb.fetch2.get_srcrev(d)
base_ver = "AUTOINC-%s" % version[:version.find(srcpv)]
value = conn.getPR(base_ver, pkgarch, srcpv)
- d.setVar("PKGV", pkgv.replace("AUTOINC", str(value)))
+ d.setVar("PRSERV_PV_AUTOINC", str(value))
auto_pr = conn.getPR(version, pkgarch, checksum)
+ conn.close()
except Exception as e:
bb.fatal("Can NOT get PRAUTO, exception %s" % str(e))
if auto_pr is None:
@@ -717,6 +727,22 @@ python package_get_auto_pr() {
d.setVar('PRAUTO',str(auto_pr))
}
+#
+# Package functions suitable for inclusion in PACKAGEFUNCS
+#
+
+python package_convert_pr_autoinc() {
+ pkgv = d.getVar("PKGV")
+
+ # Adjust pkgv as necessary...
+ if 'AUTOINC' in pkgv:
+ d.setVar("PKGV", pkgv.replace("AUTOINC", "${PRSERV_PV_AUTOINC}"))
+
+ # Change PRSERV_PV_AUTOINC and EXTENDPRAUTO usage to special values
+ d.setVar('PRSERV_PV_AUTOINC', '@PRSERV_PV_AUTOINC@')
+ d.setVar('EXTENDPRAUTO', '@EXTENDPRAUTO@')
+}
+
LOCALEBASEPN ??= "${PN}"
python package_do_split_locales() {
@@ -753,13 +779,13 @@ python package_do_split_locales() {
ln = legitimize_package_name(l)
pkg = pn + '-locale-' + ln
packages.append(pkg)
- d.setVar('FILES_' + pkg, os.path.join(datadir, 'locale', l))
- d.setVar('RRECOMMENDS_' + pkg, '%svirtual-locale-%s' % (mlprefix, ln))
- d.setVar('RPROVIDES_' + pkg, '%s-locale %s%s-translation' % (pn, mlprefix, ln))
- d.setVar('SUMMARY_' + pkg, '%s - %s translations' % (summary, l))
- d.setVar('DESCRIPTION_' + pkg, '%s This package contains language translation files for the %s locale.' % (description, l))
+ d.setVar('FILES:' + pkg, os.path.join(datadir, 'locale', l))
+ d.setVar('RRECOMMENDS:' + pkg, '%svirtual-locale-%s' % (mlprefix, ln))
+ d.setVar('RPROVIDES:' + pkg, '%s-locale %s%s-translation' % (pn, mlprefix, ln))
+ d.setVar('SUMMARY:' + pkg, '%s - %s translations' % (summary, l))
+ d.setVar('DESCRIPTION:' + pkg, '%s This package contains language translation files for the %s locale.' % (description, l))
if locale_section:
- d.setVar('SECTION_' + pkg, locale_section)
+ d.setVar('SECTION:' + pkg, locale_section)
d.setVar('PACKAGES', ' '.join(packages))
@@ -769,17 +795,23 @@ python package_do_split_locales() {
# glibc-localedata-translit* won't install as a dependency
# for some other package which breaks meta-toolchain
# Probably breaks since virtual-locale- isn't provided anywhere
- #rdep = (d.getVar('RDEPENDS_%s' % pn) or "").split()
+ #rdep = (d.getVar('RDEPENDS:%s' % pn) or "").split()
#rdep.append('%s-locale*' % pn)
- #d.setVar('RDEPENDS_%s' % pn, ' '.join(rdep))
+ #d.setVar('RDEPENDS:%s' % pn, ' '.join(rdep))
}
python perform_packagecopy () {
import subprocess
+ import shutil
dest = d.getVar('D')
dvar = d.getVar('PKGD')
+ # Remove ${D}/sysroot-only if present
+ sysroot_only = os.path.join(dest, 'sysroot-only')
+ if cpath.exists(sysroot_only) and cpath.isdir(sysroot_only):
+ shutil.rmtree(sysroot_only)
+
# Start by package population by taking a copy of the installed
# files to operate on
# Preserve sparse files and hard links
@@ -829,7 +861,7 @@ python fixup_perms () {
self._setdir(lsplit[0], lsplit[1], lsplit[2], lsplit[3], lsplit[4], lsplit[5], lsplit[6], lsplit[7])
else:
msg = "Fixup Perms: invalid config line %s" % line
- package_qa_handle_error("perm-config", msg, d)
+ oe.qa.handle_error("perm-config", msg, d)
self.path = None
self.link = None
@@ -969,7 +1001,7 @@ python fixup_perms () {
continue
if len(lsplit) != 8 and not (len(lsplit) == 3 and lsplit[1].lower() == "link"):
msg = "Fixup perms: %s invalid line: %s" % (conf, line)
- package_qa_handle_error("perm-line", msg, d)
+ oe.qa.handle_error("perm-line", msg, d)
continue
entry = fs_perms_entry(d.expand(line))
if entry and entry.path:
@@ -1006,13 +1038,13 @@ python fixup_perms () {
ptarget = os.path.join(os.path.dirname(dir), link)
if os.path.exists(target):
msg = "Fixup Perms: Unable to correct directory link, target already exists: %s -> %s" % (dir, ptarget)
- package_qa_handle_error("perm-link", msg, d)
+ oe.qa.handle_error("perm-link", msg, d)
continue
# Create path to move directory to, move it, and then setup the symlink
bb.utils.mkdirhier(os.path.dirname(target))
#bb.note("Fixup Perms: Rename %s -> %s" % (dir, ptarget))
- os.rename(origin, target)
+ bb.utils.rename(origin, target)
#bb.note("Fixup Perms: Link %s -> %s" % (dir, link))
os.symlink(link, origin)
@@ -1033,60 +1065,72 @@ python fixup_perms () {
fix_perms(each_file, fs_perms_table[dir].fmode, fs_perms_table[dir].fuid, fs_perms_table[dir].fgid, dir)
}
+def package_debug_vars(d):
+ # We default to '.debug' style
+ if d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-file-directory':
+ # Single debug-file-directory style debug info
+ debug_vars = {
+ "append": ".debug",
+ "staticappend": "",
+ "dir": "",
+ "staticdir": "",
+ "libdir": "/usr/lib/debug",
+ "staticlibdir": "/usr/lib/debug-static",
+ "srcdir": "/usr/src/debug",
+ }
+ elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-without-src':
+ # Original OE-core, a.k.a. ".debug", style debug info, but without sources in /usr/src/debug
+ debug_vars = {
+ "append": "",
+ "staticappend": "",
+ "dir": "/.debug",
+ "staticdir": "/.debug-static",
+ "libdir": "",
+ "staticlibdir": "",
+ "srcdir": "",
+ }
+ elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg':
+ debug_vars = {
+ "append": "",
+ "staticappend": "",
+ "dir": "/.debug",
+ "staticdir": "/.debug-static",
+ "libdir": "",
+ "staticlibdir": "",
+ "srcdir": "/usr/src/debug",
+ }
+ else:
+ # Original OE-core, a.k.a. ".debug", style debug info
+ debug_vars = {
+ "append": "",
+ "staticappend": "",
+ "dir": "/.debug",
+ "staticdir": "/.debug-static",
+ "libdir": "",
+ "staticlibdir": "",
+ "srcdir": "/usr/src/debug",
+ }
+
+ return debug_vars
+
python split_and_strip_files () {
import stat, errno
import subprocess
dvar = d.getVar('PKGD')
pn = d.getVar('PN')
- targetos = d.getVar('TARGET_OS')
+ hostos = d.getVar('HOST_OS')
oldcwd = os.getcwd()
os.chdir(dvar)
- # We default to '.debug' style
- if d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-file-directory':
- # Single debug-file-directory style debug info
- debugappend = ".debug"
- debugstaticappend = ""
- debugdir = ""
- debugstaticdir = ""
- debuglibdir = "/usr/lib/debug"
- debugstaticlibdir = "/usr/lib/debug-static"
- debugsrcdir = "/usr/src/debug"
- elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-without-src':
- # Original OE-core, a.k.a. ".debug", style debug info, but without sources in /usr/src/debug
- debugappend = ""
- debugstaticappend = ""
- debugdir = "/.debug"
- debugstaticdir = "/.debug-static"
- debuglibdir = ""
- debugstaticlibdir = ""
- debugsrcdir = ""
- elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg':
- debugappend = ""
- debugstaticappend = ""
- debugdir = "/.debug"
- debugstaticdir = "/.debug-static"
- debuglibdir = ""
- debugstaticlibdir = ""
- debugsrcdir = "/usr/src/debug"
- else:
- # Original OE-core, a.k.a. ".debug", style debug info
- debugappend = ""
- debugstaticappend = ""
- debugdir = "/.debug"
- debugstaticdir = "/.debug-static"
- debuglibdir = ""
- debugstaticlibdir = ""
- debugsrcdir = "/usr/src/debug"
+ dv = package_debug_vars(d)
#
# First lets figure out all of the files we may have to process ... do this only once!
#
elffiles = {}
symlinks = {}
- kernmods = []
staticlibs = []
inodes = {}
libdir = os.path.abspath(dvar + os.sep + d.getVar("libdir"))
@@ -1101,17 +1145,14 @@ python split_and_strip_files () {
file = os.path.join(root, f)
# Skip debug files
- if debugappend and file.endswith(debugappend):
+ if dv["append"] and file.endswith(dv["append"]):
continue
- if debugdir and debugdir in os.path.dirname(file[len(dvar):]):
+ if dv["dir"] and dv["dir"] in os.path.dirname(file[len(dvar):]):
continue
if file in skipfiles:
continue
- if file.endswith(".ko") and file.find("/lib/modules/") != -1:
- kernmods.append(file)
- continue
if oe.package.is_static_lib(file):
staticlibs.append(file)
continue
@@ -1128,8 +1169,11 @@ python split_and_strip_files () {
if not s:
continue
# Check its an executable
- if (s[stat.ST_MODE] & stat.S_IXUSR) or (s[stat.ST_MODE] & stat.S_IXGRP) or (s[stat.ST_MODE] & stat.S_IXOTH) \
- or ((file.startswith(libdir) or file.startswith(baselibdir)) and (".so" in f or ".node" in f)):
+ if (s[stat.ST_MODE] & stat.S_IXUSR) or (s[stat.ST_MODE] & stat.S_IXGRP) \
+ or (s[stat.ST_MODE] & stat.S_IXOTH) \
+ or ((file.startswith(libdir) or file.startswith(baselibdir)) \
+ and (".so" in f or ".node" in f)) \
+ or (f.startswith('vmlinux') or ".ko" in f):
if cpath.islink(file):
checkelflinks[file] = ltarget
@@ -1162,11 +1206,11 @@ python split_and_strip_files () {
# ...but is it ELF, and is it already stripped?
if elf_file & 1:
if elf_file & 2:
- if 'already-stripped' in (d.getVar('INSANE_SKIP_' + pn) or "").split():
+ if 'already-stripped' in (d.getVar('INSANE_SKIP:' + pn) or "").split():
bb.note("Skipping file %s from %s for already-stripped QA test" % (file[len(dvar):], pn))
else:
msg = "File '%s' from %s was already stripped, this will prevent future debugging!" % (file[len(dvar):], pn)
- package_qa_handle_error("already-stripped", msg, d)
+ oe.qa.handle_error("already-stripped", msg, d)
continue
# At this point we have an unstripped elf file. We need to:
@@ -1188,19 +1232,29 @@ python split_and_strip_files () {
# Modified the file so clear the cache
cpath.updatecache(file)
+ def strip_pkgd_prefix(f):
+ nonlocal dvar
+
+ if f.startswith(dvar):
+ return f[len(dvar):]
+
+ return f
+
#
# First lets process debug splitting
#
if (d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT') != '1'):
- results = oe.utils.multiprocess_launch(splitdebuginfo, list(elffiles), d, extraargs=(dvar, debugdir, debuglibdir, debugappend, debugsrcdir, d))
+ results = oe.utils.multiprocess_launch(splitdebuginfo, list(elffiles), d, extraargs=(dvar, dv, d))
- if debugsrcdir and not targetos.startswith("mingw"):
+ if dv["srcdir"] and not hostos.startswith("mingw"):
if (d.getVar('PACKAGE_DEBUG_STATIC_SPLIT') == '1'):
- results = oe.utils.multiprocess_launch(splitstaticdebuginfo, staticlibs, d, extraargs=(dvar, debugstaticdir, debugstaticlibdir, debugstaticappend, debugsrcdir, d))
+ results = oe.utils.multiprocess_launch(splitstaticdebuginfo, staticlibs, d, extraargs=(dvar, dv, d))
else:
for file in staticlibs:
results.append( (file,source_info(file, d)) )
+ d.setVar("PKGDEBUGSOURCES", {strip_pkgd_prefix(f): sorted(s) for f, s in results})
+
sources = set()
for r in results:
sources.update(r[1])
@@ -1213,9 +1267,9 @@ python split_and_strip_files () {
target = inodes[ref][0][len(dvar):]
for file in inodes[ref][1:]:
src = file[len(dvar):]
- dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(target) + debugappend
+ dest = dv["libdir"] + os.path.dirname(src) + dv["dir"] + "/" + os.path.basename(target) + dv["append"]
fpath = dvar + dest
- ftarget = dvar + debuglibdir + os.path.dirname(target) + debugdir + "/" + os.path.basename(target) + debugappend
+ ftarget = dvar + dv["libdir"] + os.path.dirname(target) + dv["dir"] + "/" + os.path.basename(target) + dv["append"]
bb.utils.mkdirhier(os.path.dirname(fpath))
# Only one hardlink of separated debug info file in each directory
if not os.access(fpath, os.R_OK):
@@ -1225,7 +1279,7 @@ python split_and_strip_files () {
# Create symlinks for all cases we were able to split symbols
for file in symlinks:
src = file[len(dvar):]
- dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend
+ dest = dv["libdir"] + os.path.dirname(src) + dv["dir"] + "/" + os.path.basename(src) + dv["append"]
fpath = dvar + dest
# Skip it if the target doesn't exist
try:
@@ -1241,17 +1295,17 @@ python split_and_strip_files () {
lbase = os.path.basename(ltarget)
ftarget = ""
if lpath and lpath != ".":
- ftarget += lpath + debugdir + "/"
- ftarget += lbase + debugappend
+ ftarget += lpath + dv["dir"] + "/"
+ ftarget += lbase + dv["append"]
if lpath.startswith(".."):
ftarget = os.path.join("..", ftarget)
bb.utils.mkdirhier(os.path.dirname(fpath))
#bb.note("Symlink %s -> %s" % (fpath, ftarget))
os.symlink(ftarget, fpath)
- # Process the debugsrcdir if requested...
+ # Process the dv["srcdir"] if requested...
# This copies and places the referenced sources for later debugging...
- copydebugsources(debugsrcdir, sources, d)
+ copydebugsources(dv["srcdir"], sources, d)
#
# End of debug splitting
#
@@ -1266,8 +1320,6 @@ python split_and_strip_files () {
elf_file = int(elffiles[file])
#bb.note("Strip %s" % file)
sfiles.append((file, elf_file, strip))
- for f in kernmods:
- sfiles.append((f, 16, strip))
if (d.getVar('PACKAGE_STRIP_STATIC') == '1' or d.getVar('PACKAGE_DEBUG_STATIC_SPLIT') == '1'):
for f in staticlibs:
sfiles.append((f, 16, strip))
@@ -1277,7 +1329,7 @@ python split_and_strip_files () {
# Build "minidebuginfo" and reinject it back into the stripped binaries
if d.getVar('PACKAGE_MINIDEBUGINFO') == '1':
oe.utils.multiprocess_launch(inject_minidebuginfo, list(elffiles), d,
- extraargs=(dvar, debugdir, debuglibdir, debugappend, debugsrcdir, d))
+ extraargs=(dvar, dv, d))
#
# End of strip
@@ -1307,7 +1359,7 @@ python populate_packages () {
src_package_name = ('%s-src' % d.getVar('PN'))
if not src_package_name in packages:
packages.append(src_package_name)
- d.setVar('FILES_%s' % src_package_name, '/usr/src/debug')
+ d.setVar('FILES:%s' % src_package_name, '/usr/src/debug')
# Sanity check PACKAGES for duplicates
# Sanity should be moved to sanity.bbclass once we have the infrastructure
@@ -1316,7 +1368,7 @@ python populate_packages () {
for i, pkg in enumerate(packages):
if pkg in package_dict:
msg = "%s is listed in PACKAGES multiple times, this leads to packaging errors." % pkg
- package_qa_handle_error("packages-list", msg, d)
+ oe.qa.handle_error("packages-list", msg, d)
# Ensure the source package gets the chance to pick up the source files
# before the debug package by ordering it first in PACKAGES. Whether it
# actually picks up any source files is controlled by
@@ -1350,10 +1402,10 @@ python populate_packages () {
root = os.path.join(pkgdest, pkg)
bb.utils.mkdirhier(root)
- filesvar = d.getVar('FILES_%s' % pkg) or ""
+ filesvar = d.getVar('FILES:%s' % pkg) or ""
if "//" in filesvar:
msg = "FILES variable for package %s contains '//' which is invalid. Attempting to fix this but you should correct the metadata.\n" % pkg
- package_qa_handle_error("files-invalid", msg, d)
+ oe.qa.handle_error("files-invalid", msg, d)
filesvar.replace("//", "/")
origfiles = filesvar.split()
@@ -1416,13 +1468,13 @@ python populate_packages () {
os.umask(oldumask)
os.chdir(workdir)
- # Handle LICENSE_EXCLUSION
+ # Handle excluding packages with incompatible licenses
package_list = []
for pkg in packages:
- licenses = d.getVar('LICENSE_EXCLUSION-' + pkg)
+ licenses = d.getVar('_exclude_incompatible-' + pkg)
if licenses:
msg = "Excluding %s from packaging as it has incompatible license(s): %s" % (pkg, licenses)
- package_qa_handle_error("incompatible-license", msg, d)
+ oe.qa.handle_error("incompatible-license", msg, d)
else:
package_list.append(pkg)
d.setVar('PACKAGES', ' '.join(package_list))
@@ -1439,14 +1491,14 @@ python populate_packages () {
if unshipped != []:
msg = pn + ": Files/directories were installed but not shipped in any package:"
- if "installed-vs-shipped" in (d.getVar('INSANE_SKIP_' + pn) or "").split():
+ if "installed-vs-shipped" in (d.getVar('INSANE_SKIP:' + pn) or "").split():
bb.note("Package %s skipping QA tests: installed-vs-shipped" % pn)
else:
for f in unshipped:
msg = msg + "\n " + f
msg = msg + "\nPlease set FILES such that these items are packaged. Alternatively if they are unneeded, avoid installing them or delete them within do_install.\n"
msg = msg + "%s: %d installed and not shipped files." % (pn, len(unshipped))
- package_qa_handle_error("installed-vs-shipped", msg, d)
+ oe.qa.handle_error("installed-vs-shipped", msg, d)
}
populate_packages[dirs] = "${D}"
@@ -1487,11 +1539,11 @@ python package_fixsymlinks () {
bb.note("%s contains dangling symlink to %s" % (pkg, l))
for pkg in newrdepends:
- rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg) or "")
+ rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS:' + pkg) or "")
for p in newrdepends[pkg]:
if p not in rdepends:
rdepends[p] = []
- d.setVar('RDEPENDS_' + pkg, bb.utils.join_deps(rdepends, commasep=False))
+ d.setVar('RDEPENDS:' + pkg, bb.utils.join_deps(rdepends, commasep=False))
}
@@ -1513,9 +1565,10 @@ PKGDATA_VARS = "PN PE PV PR PKGE PKGV PKGR LICENSE DESCRIPTION SUMMARY RDEPENDS
python emit_pkgdata() {
from glob import glob
import json
+ import bb.compress.zstd
def process_postinst_on_target(pkg, mlprefix):
- pkgval = d.getVar('PKG_%s' % pkg)
+ pkgval = d.getVar('PKG:%s' % pkg)
if pkgval is None:
pkgval = pkg
@@ -1526,8 +1579,8 @@ if [ -n "$D" ]; then
fi
""" % (pkgval, mlprefix)
- postinst = d.getVar('pkg_postinst_%s' % pkg)
- postinst_ontarget = d.getVar('pkg_postinst_ontarget_%s' % pkg)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
+ postinst_ontarget = d.getVar('pkg_postinst_ontarget:%s' % pkg)
if postinst_ontarget:
bb.debug(1, 'adding deferred pkg_postinst_ontarget() to pkg_postinst() for %s' % pkg)
@@ -1535,18 +1588,18 @@ fi
postinst = '#!/bin/sh\n'
postinst += defer_fragment
postinst += postinst_ontarget
- d.setVar('pkg_postinst_%s' % pkg, postinst)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
def add_set_e_to_scriptlets(pkg):
for scriptlet_name in ('pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm'):
- scriptlet = d.getVar('%s_%s' % (scriptlet_name, pkg))
+ scriptlet = d.getVar('%s:%s' % (scriptlet_name, pkg))
if scriptlet:
scriptlet_split = scriptlet.split('\n')
if scriptlet_split[0].startswith("#!"):
scriptlet = scriptlet_split[0] + "\nset -e\n" + "\n".join(scriptlet_split[1:])
else:
scriptlet = "set -e\n" + "\n".join(scriptlet_split[0:])
- d.setVar('%s_%s' % (scriptlet_name, pkg), scriptlet)
+ d.setVar('%s:%s' % (scriptlet_name, pkg), scriptlet)
def write_if_exists(f, pkg, var):
def encode(str):
@@ -1554,9 +1607,9 @@ fi
c = codecs.getencoder("unicode_escape")
return c(str)[0].decode("latin1")
- val = d.getVar('%s_%s' % (var, pkg))
+ val = d.getVar('%s:%s' % (var, pkg))
if val:
- f.write('%s_%s: %s\n' % (var, pkg, encode(val)))
+ f.write('%s:%s: %s\n' % (var, pkg, encode(val)))
return val
val = d.getVar('%s' % (var))
if val:
@@ -1575,7 +1628,7 @@ fi
ml_pkg = "%s-%s" % (variant, pkg)
subdata_file = "%s/runtime/%s" % (pkgdatadir, ml_pkg)
with open(subdata_file, 'w') as fd:
- fd.write("PKG_%s: %s" % (ml_pkg, pkg))
+ fd.write("PKG:%s: %s" % (ml_pkg, pkg))
packages = d.getVar('PACKAGES')
pkgdest = d.getVar('PKGDEST')
@@ -1585,6 +1638,8 @@ fi
with open(data_file, 'w') as fd:
fd.write("PACKAGES: %s\n" % packages)
+ pkgdebugsource = d.getVar("PKGDEBUGSOURCES") or []
+
pn = d.getVar('PN')
global_variants = (d.getVar('MULTILIB_GLOBAL_VARIANTS') or "").split()
variants = (d.getVar('MULTILIB_VARIANTS') or "").split()
@@ -1599,23 +1654,38 @@ fi
workdir = d.getVar('WORKDIR')
for pkg in packages.split():
- pkgval = d.getVar('PKG_%s' % pkg)
+ pkgval = d.getVar('PKG:%s' % pkg)
if pkgval is None:
pkgval = pkg
- d.setVar('PKG_%s' % pkg, pkg)
+ d.setVar('PKG:%s' % pkg, pkg)
+
+ extended_data = {
+ "files_info": {}
+ }
pkgdestpkg = os.path.join(pkgdest, pkg)
files = {}
+ files_extra = {}
total_size = 0
seen = set()
for f in pkgfiles[pkg]:
- relpth = os.path.relpath(f, pkgdestpkg)
+ fpath = os.sep + os.path.relpath(f, pkgdestpkg)
+
fstat = os.lstat(f)
- files[os.sep + relpth] = fstat.st_size
+ files[fpath] = fstat.st_size
+
+ extended_data["files_info"].setdefault(fpath, {})
+ extended_data["files_info"][fpath]['size'] = fstat.st_size
+
if fstat.st_ino not in seen:
seen.add(fstat.st_ino)
total_size += fstat.st_size
- d.setVar('FILES_INFO', json.dumps(files, sort_keys=True))
+
+ if fpath in pkgdebugsource:
+ extended_data["files_info"][fpath]['debugsrc'] = pkgdebugsource[fpath]
+ del pkgdebugsource[fpath]
+
+ d.setVar('FILES_INFO:' + pkg , json.dumps(files, sort_keys=True))
process_postinst_on_target(pkg, d.getVar("MLPREFIX"))
add_set_e_to_scriptlets(pkg)
@@ -1626,24 +1696,29 @@ fi
val = write_if_exists(sf, pkg, var)
write_if_exists(sf, pkg, 'FILERPROVIDESFLIST')
- for dfile in (d.getVar('FILERPROVIDESFLIST_' + pkg) or "").split():
- write_if_exists(sf, pkg, 'FILERPROVIDES_' + dfile)
+ for dfile in sorted((d.getVar('FILERPROVIDESFLIST:' + pkg) or "").split()):
+ write_if_exists(sf, pkg, 'FILERPROVIDES:' + dfile)
write_if_exists(sf, pkg, 'FILERDEPENDSFLIST')
- for dfile in (d.getVar('FILERDEPENDSFLIST_' + pkg) or "").split():
- write_if_exists(sf, pkg, 'FILERDEPENDS_' + dfile)
+ for dfile in sorted((d.getVar('FILERDEPENDSFLIST:' + pkg) or "").split()):
+ write_if_exists(sf, pkg, 'FILERDEPENDS:' + dfile)
+
+ sf.write('%s:%s: %d\n' % ('PKGSIZE', pkg, total_size))
- sf.write('%s_%s: %d\n' % ('PKGSIZE', pkg, total_size))
+ subdata_extended_file = pkgdatadir + "/extended/%s.json.zstd" % pkg
+ num_threads = int(d.getVar("BB_NUMBER_THREADS"))
+ with bb.compress.zstd.open(subdata_extended_file, "wt", encoding="utf-8", num_threads=num_threads) as f:
+ json.dump(extended_data, f, sort_keys=True, separators=(",", ":"))
# Symlinks needed for rprovides lookup
- rprov = d.getVar('RPROVIDES_%s' % pkg) or d.getVar('RPROVIDES')
+ rprov = d.getVar('RPROVIDES:%s' % pkg) or d.getVar('RPROVIDES')
if rprov:
- for p in rprov.strip().split():
+ for p in bb.utils.explode_deps(rprov):
subdata_sym = pkgdatadir + "/runtime-rprovides/%s/%s" % (p, pkg)
bb.utils.mkdirhier(os.path.dirname(subdata_sym))
oe.path.symlink("../../runtime/%s" % pkg, subdata_sym, True)
- allow_empty = d.getVar('ALLOW_EMPTY_%s' % pkg)
+ allow_empty = d.getVar('ALLOW_EMPTY:%s' % pkg)
if not allow_empty:
allow_empty = d.getVar('ALLOW_EMPTY')
root = "%s/%s" % (pkgdest, pkg)
@@ -1665,7 +1740,8 @@ fi
write_extra_runtime_pkgs(global_variants, packages, pkgdatadir)
}
-emit_pkgdata[dirs] = "${PKGDESTWORK}/runtime ${PKGDESTWORK}/runtime-reverse ${PKGDESTWORK}/runtime-rprovides"
+emit_pkgdata[dirs] = "${PKGDESTWORK}/runtime ${PKGDESTWORK}/runtime-reverse ${PKGDESTWORK}/runtime-rprovides ${PKGDESTWORK}/extended"
+emit_pkgdata[vardepsexclude] = "BB_NUMBER_THREADS"
ldconfig_postinst_fragment() {
if [ x"$D" = "x" ]; then
@@ -1673,15 +1749,15 @@ if [ x"$D" = "x" ]; then
fi
}
-RPMDEPS = "${STAGING_LIBDIR_NATIVE}/rpm/rpmdeps --alldeps"
+RPMDEPS = "${STAGING_LIBDIR_NATIVE}/rpm/rpmdeps --alldeps --define '__font_provides %{nil}'"
# Collect perfile run-time dependency metadata
# Output:
-# FILERPROVIDESFLIST_pkg - list of all files w/ deps
-# FILERPROVIDES_filepath_pkg - per file dep
+# FILERPROVIDESFLIST:pkg - list of all files w/ deps
+# FILERPROVIDES:filepath:pkg - per file dep
#
-# FILERDEPENDSFLIST_pkg - list of all files w/ deps
-# FILERDEPENDS_filepath_pkg - per file dep
+# FILERDEPENDSFLIST:pkg - list of all files w/ deps
+# FILERDEPENDS:filepath:pkg - per file dep
python package_do_filedeps() {
if d.getVar('SKIP_FILEDEPS') == '1':
@@ -1696,7 +1772,7 @@ python package_do_filedeps() {
pkglist = []
for pkg in packages.split():
- if d.getVar('SKIP_FILEDEPS_' + pkg) == '1':
+ if d.getVar('SKIP_FILEDEPS:' + pkg) == '1':
continue
if pkg.endswith('-dbg') or pkg.endswith('-doc') or pkg.find('-locale-') != -1 or pkg.find('-localedata-') != -1 or pkg.find('-gconv-') != -1 or pkg.find('-charmap-') != -1 or pkg.startswith('kernel-module-') or pkg.endswith('-src'):
continue
@@ -1718,18 +1794,18 @@ python package_do_filedeps() {
for file in sorted(provides):
provides_files[pkg].append(file)
- key = "FILERPROVIDES_" + file + "_" + pkg
+ key = "FILERPROVIDES:" + file + ":" + pkg
d.appendVar(key, " " + " ".join(provides[file]))
for file in sorted(requires):
requires_files[pkg].append(file)
- key = "FILERDEPENDS_" + file + "_" + pkg
+ key = "FILERDEPENDS:" + file + ":" + pkg
d.appendVar(key, " " + " ".join(requires[file]))
for pkg in requires_files:
- d.setVar("FILERDEPENDSFLIST_" + pkg, " ".join(requires_files[pkg]))
+ d.setVar("FILERDEPENDSFLIST:" + pkg, " ".join(sorted(requires_files[pkg])))
for pkg in provides_files:
- d.setVar("FILERPROVIDESFLIST_" + pkg, " ".join(provides_files[pkg]))
+ d.setVar("FILERPROVIDESFLIST:" + pkg, " ".join(sorted(provides_files[pkg])))
}
SHLIBSDIRS = "${WORKDIR_PKGDATA}/${MLPREFIX}shlibs2"
@@ -1761,14 +1837,14 @@ python package_do_shlibs() {
else:
shlib_pkgs = packages.split()
- targetos = d.getVar('TARGET_OS')
+ hostos = d.getVar('HOST_OS')
workdir = d.getVar('WORKDIR')
ver = d.getVar('PKGV')
if not ver:
msg = "PKGV not defined"
- package_qa_handle_error("pkgv-undefined", msg, d)
+ oe.qa.handle_error("pkgv-undefined", msg, d)
return
pkgdest = d.getVar('PKGDEST')
@@ -1808,7 +1884,7 @@ python package_do_shlibs() {
sonames.add(prov)
if libdir_re.match(os.path.dirname(file)):
needs_ldconfig = True
- if snap_symlinks and (os.path.basename(file) != this_soname):
+ if needs_ldconfig and snap_symlinks and (os.path.basename(file) != this_soname):
renames.append((file, os.path.join(os.path.dirname(file), this_soname)))
return (needs_ldconfig, needed, sonames, renames)
@@ -1893,12 +1969,12 @@ python package_do_shlibs() {
shlib_provider = oe.package.read_shlib_providers(d)
for pkg in shlib_pkgs:
- private_libs = d.getVar('PRIVATE_LIBS_' + pkg) or d.getVar('PRIVATE_LIBS') or ""
+ private_libs = d.getVar('PRIVATE_LIBS:' + pkg) or d.getVar('PRIVATE_LIBS') or ""
private_libs = private_libs.split()
needs_ldconfig = False
bb.debug(2, "calculating shlib provides for %s" % pkg)
- pkgver = d.getVar('PKGV_' + pkg)
+ pkgver = d.getVar('PKGV:' + pkg)
if not pkgver:
pkgver = d.getVar('PV_' + pkg)
if not pkgver:
@@ -1912,9 +1988,9 @@ python package_do_shlibs() {
soname = None
if cpath.islink(file):
continue
- if targetos == "darwin" or targetos == "darwin8":
+ if hostos == "darwin" or hostos == "darwin8":
darwin_so(file, needed, sonames, renames, pkgver)
- elif targetos.startswith("mingw"):
+ elif hostos.startswith("mingw"):
mingw_dll(file, needed, sonames, renames, pkgver)
elif os.access(file, os.X_OK) or lib_re.match(file):
linuxlist.append(file)
@@ -1930,13 +2006,13 @@ python package_do_shlibs() {
for (old, new) in renames:
bb.note("Renaming %s to %s" % (old, new))
- os.rename(old, new)
+ bb.utils.rename(old, new)
pkgfiles[pkg].remove(old)
shlibs_file = os.path.join(shlibswork_dir, pkg + ".list")
if len(sonames):
with open(shlibs_file, 'w') as fd:
- for s in sonames:
+ for s in sorted(sonames):
if s[0] in shlib_provider and s[1] in shlib_provider[s[0]]:
(old_pkg, old_pkgver) = shlib_provider[s[0]][s[1]]
if old_pkg != pkg:
@@ -1948,11 +2024,11 @@ python package_do_shlibs() {
shlib_provider[s[0]][s[1]] = (pkg, pkgver)
if needs_ldconfig:
bb.debug(1, 'adding ldconfig call to postinst for %s' % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
postinst += d.getVar('ldconfig_postinst_fragment')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
bb.debug(1, 'LIBNAMES: pkg %s sonames %s' % (pkg, sonames))
assumed_libs = d.getVar('ASSUME_SHLIBS')
@@ -1974,7 +2050,7 @@ python package_do_shlibs() {
for pkg in shlib_pkgs:
bb.debug(2, "calculating shlib requirements for %s" % pkg)
- private_libs = d.getVar('PRIVATE_LIBS_' + pkg) or d.getVar('PRIVATE_LIBS') or ""
+ private_libs = d.getVar('PRIVATE_LIBS:' + pkg) or d.getVar('PRIVATE_LIBS') or ""
private_libs = private_libs.split()
deps = list()
@@ -2042,12 +2118,12 @@ python package_do_pkgconfig () {
for pkg in packages.split():
pkgconfig_provided[pkg] = []
pkgconfig_needed[pkg] = []
- for file in pkgfiles[pkg]:
+ for file in sorted(pkgfiles[pkg]):
m = pc_re.match(file)
if m:
pd = bb.data.init()
name = m.group(1)
- pkgconfig_provided[pkg].append(name)
+ pkgconfig_provided[pkg].append(os.path.basename(name))
if not os.access(file, os.R_OK):
continue
with open(file, 'r') as f:
@@ -2070,7 +2146,7 @@ python package_do_pkgconfig () {
pkgs_file = os.path.join(shlibswork_dir, pkg + ".pclist")
if pkgconfig_provided[pkg] != []:
with open(pkgs_file, 'w') as f:
- for p in pkgconfig_provided[pkg]:
+ for p in sorted(pkgconfig_provided[pkg]):
f.write('%s\n' % p)
# Go from least to most specific since the last one found wins
@@ -2128,7 +2204,7 @@ python read_shlibdeps () {
packages = d.getVar('PACKAGES').split()
for pkg in packages:
- rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg) or "")
+ rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS:' + pkg) or "")
for dep in sorted(pkglibdeps[pkg]):
# Add the dep if it's not already there, or if no comparison is set
if dep not in rdepends:
@@ -2136,7 +2212,7 @@ python read_shlibdeps () {
for v in pkglibdeps[pkg][dep]:
if v not in rdepends[dep]:
rdepends[dep].append(v)
- d.setVar('RDEPENDS_' + pkg, bb.utils.join_deps(rdepends, commasep=False))
+ d.setVar('RDEPENDS:' + pkg, bb.utils.join_deps(rdepends, commasep=False))
}
python package_depchains() {
@@ -2160,7 +2236,7 @@ python package_depchains() {
def pkg_adddeprrecs(pkg, base, suffix, getname, depends, d):
#bb.note('depends for %s is %s' % (base, depends))
- rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS_' + pkg) or "")
+ rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS:' + pkg) or "")
for depend in sorted(depends):
if depend.find('-native') != -1 or depend.find('-cross') != -1 or depend.startswith('virtual/'):
@@ -2175,13 +2251,13 @@ python package_depchains() {
if pkgname not in rreclist and pkgname != pkg:
rreclist[pkgname] = []
- #bb.note('setting: RRECOMMENDS_%s=%s' % (pkg, ' '.join(rreclist)))
- d.setVar('RRECOMMENDS_%s' % pkg, bb.utils.join_deps(rreclist, commasep=False))
+ #bb.note('setting: RRECOMMENDS:%s=%s' % (pkg, ' '.join(rreclist)))
+ d.setVar('RRECOMMENDS:%s' % pkg, bb.utils.join_deps(rreclist, commasep=False))
def pkg_addrrecs(pkg, base, suffix, getname, rdepends, d):
#bb.note('rdepends for %s is %s' % (base, rdepends))
- rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS_' + pkg) or "")
+ rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS:' + pkg) or "")
for depend in sorted(rdepends):
if depend.find('virtual-locale-') != -1:
@@ -2196,8 +2272,8 @@ python package_depchains() {
if pkgname not in rreclist and pkgname != pkg:
rreclist[pkgname] = []
- #bb.note('setting: RRECOMMENDS_%s=%s' % (pkg, ' '.join(rreclist)))
- d.setVar('RRECOMMENDS_%s' % pkg, bb.utils.join_deps(rreclist, commasep=False))
+ #bb.note('setting: RRECOMMENDS:%s=%s' % (pkg, ' '.join(rreclist)))
+ d.setVar('RRECOMMENDS:%s' % pkg, bb.utils.join_deps(rreclist, commasep=False))
def add_dep(list, dep):
if dep not in list:
@@ -2209,7 +2285,7 @@ python package_depchains() {
rdepends = []
for pkg in packages.split():
- for dep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + pkg) or ""):
+ for dep in bb.utils.explode_deps(d.getVar('RDEPENDS:' + pkg) or ""):
add_dep(rdepends, dep)
#bb.note('rdepends is %s' % rdepends)
@@ -2243,7 +2319,7 @@ python package_depchains() {
for suffix in pkgs:
for pkg in pkgs[suffix]:
- if d.getVarFlag('RRECOMMENDS_' + pkg, 'nodeprrecs'):
+ if d.getVarFlag('RRECOMMENDS:' + pkg, 'nodeprrecs'):
continue
(base, func) = pkgs[suffix][pkg]
if suffix == "-dev":
@@ -2256,7 +2332,7 @@ python package_depchains() {
pkg_addrrecs(pkg, base, suffix, func, rdepends, d)
else:
rdeps = []
- for dep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + base) or ""):
+ for dep in bb.utils.explode_deps(d.getVar('RDEPENDS:' + base) or ""):
add_dep(rdeps, dep)
pkg_addrrecs(pkg, base, suffix, func, rdeps, d)
}
@@ -2273,11 +2349,11 @@ def gen_packagevar(d, pkgvars="PACKAGEVARS"):
ret.append(v)
for p in pkgs:
for v in vars:
- ret.append(v + "_" + p)
+ ret.append(v + ":" + p)
# Ensure that changes to INCOMPATIBLE_LICENSE re-run do_package for
# affected recipes.
- ret.append('LICENSE_EXCLUSION-%s' % p)
+ ret.append('_exclude_incompatible-%s' % p)
return " ".join(ret)
PACKAGE_PREPROCESS_FUNCS ?= ""
@@ -2309,7 +2385,7 @@ python do_package () {
# cache. This is useful if an item this class depends on changes in a
# way that the output of this class changes. rpmdeps is a good example
# as any change to rpmdeps requires this to be rerun.
- # PACKAGE_BBCLASS_VERSION = "2"
+ # PACKAGE_BBCLASS_VERSION = "4"
# Init cachedpath
global cpath
@@ -2332,10 +2408,10 @@ python do_package () {
if not workdir or not outdir or not dest or not dvar or not pn:
msg = "WORKDIR, DEPLOY_DIR, D, PN and PKGD all must be defined, unable to package"
- package_qa_handle_error("var-undefined", msg, d)
+ oe.qa.handle_error("var-undefined", msg, d)
return
- bb.build.exec_func("package_get_auto_pr", d)
+ bb.build.exec_func("package_convert_pr_autoinc", d)
###########################################################################
# Optimisations
@@ -2385,12 +2461,10 @@ python do_package () {
for f in (d.getVar('PACKAGEFUNCS') or '').split():
bb.build.exec_func(f, d)
- qa_sane = d.getVar("QA_SANE")
- if not qa_sane:
- bb.fatal("Fatal QA errors found, failing task.")
+ oe.qa.exit_if_errors(d)
}
-do_package[dirs] = "${SHLIBSWORKDIR} ${PKGDESTWORK} ${D}"
+do_package[dirs] = "${SHLIBSWORKDIR} ${D}"
do_package[vardeps] += "${PACKAGEBUILDPKGD} ${PACKAGESPLITFUNCS} ${PACKAGEFUNCS} ${@gen_packagevar(d)}"
addtask package after do_install
@@ -2407,9 +2481,21 @@ addtask do_package_setscene
# Copy from PKGDESTWORK to tempdirectory as tempdirectory can be cleaned at both
# do_package_setscene and do_packagedata_setscene leading to races
python do_packagedata () {
+ bb.build.exec_func("package_get_auto_pr", d)
+
src = d.expand("${PKGDESTWORK}")
dest = d.expand("${WORKDIR}/pkgdata-pdata-input")
oe.path.copyhardlinktree(src, dest)
+
+ bb.build.exec_func("packagedata_translate_pr_autoinc", d)
+}
+do_packagedata[cleandirs] += "${WORKDIR}/pkgdata-pdata-input"
+
+# Translate the EXTENDPRAUTO and AUTOINC to the final values
+packagedata_translate_pr_autoinc() {
+ find ${WORKDIR}/pkgdata-pdata-input -type f | xargs --no-run-if-empty \
+ sed -e 's,@PRSERV_PV_AUTOINC@,${PRSERV_PV_AUTOINC},g' \
+ -e 's,@EXTENDPRAUTO@,${EXTENDPRAUTO},g' -i
}
addtask packagedata before do_build after do_package
diff --git a/meta/classes/package_deb.bbclass b/meta/classes/package_deb.bbclass
index cb723fc1d6..2e75e222bc 100644
--- a/meta/classes/package_deb.bbclass
+++ b/meta/classes/package_deb.bbclass
@@ -81,7 +81,7 @@ def deb_write_pkg(pkg, d):
localdata.setVar('ROOT', '')
localdata.setVar('ROOT_%s' % pkg, root)
- pkgname = localdata.getVar('PKG_%s' % pkg)
+ pkgname = localdata.getVar('PKG:%s' % pkg)
if not pkgname:
pkgname = pkg
localdata.setVar('PKG', pkgname)
@@ -314,12 +314,9 @@ python do_package_write_deb () {
}
do_package_write_deb[dirs] = "${PKGWRITEDIRDEB}"
do_package_write_deb[cleandirs] = "${PKGWRITEDIRDEB}"
-do_package_write_deb[umask] = "022"
do_package_write_deb[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
-addtask package_write_deb after do_packagedata do_package
-
+addtask package_write_deb after do_packagedata do_package do_deploy_source_date_epoch before do_build
+do_build[rdeptask] += "do_package_write_deb"
PACKAGEINDEXDEPS += "dpkg-native:do_populate_sysroot"
PACKAGEINDEXDEPS += "apt-native:do_populate_sysroot"
-
-do_build[recrdeptask] += "do_package_write_deb"
diff --git a/meta/classes/package_ipk.bbclass b/meta/classes/package_ipk.bbclass
index 79cb36c513..f67cb0e5c9 100644
--- a/meta/classes/package_ipk.bbclass
+++ b/meta/classes/package_ipk.bbclass
@@ -4,6 +4,7 @@ IMAGE_PKGTYPE ?= "ipk"
IPKGCONF_TARGET = "${WORKDIR}/opkg.conf"
IPKGCONF_SDK = "${WORKDIR}/opkg-sdk.conf"
+IPKGCONF_SDK_TARGET = "${WORKDIR}/opkg-sdk-target.conf"
PKGWRITEDIRIPK = "${WORKDIR}/deploy-ipks"
@@ -64,7 +65,7 @@ def ipk_write_pkg(pkg, d):
try:
localdata.setVar('ROOT', '')
localdata.setVar('ROOT_%s' % pkg, root)
- pkgname = localdata.getVar('PKG_%s' % pkg)
+ pkgname = localdata.getVar('PKG:%s' % pkg)
if not pkgname:
pkgname = pkg
localdata.setVar('PKG', pkgname)
@@ -229,8 +230,8 @@ def ipk_write_pkg(pkg, d):
shell=True)
if d.getVar('IPK_SIGN_PACKAGES') == '1':
- ipkver = "%s-%s" % (d.getVar('PKGV'), d.getVar('PKGR'))
- ipk_to_sign = "%s/%s_%s_%s.ipk" % (pkgoutdir, pkgname, ipkver, d.getVar('PACKAGE_ARCH'))
+ ipkver = "%s-%s" % (localdata.getVar('PKGV'), localdata.getVar('PKGR'))
+ ipk_to_sign = "%s/%s_%s_%s.ipk" % (pkgoutdir, pkgname, ipkver, localdata.getVar('PACKAGE_ARCH'))
sign_ipk(d, ipk_to_sign)
finally:
@@ -272,11 +273,9 @@ python do_package_write_ipk () {
}
do_package_write_ipk[dirs] = "${PKGWRITEDIRIPK}"
do_package_write_ipk[cleandirs] = "${PKGWRITEDIRIPK}"
-do_package_write_ipk[umask] = "022"
do_package_write_ipk[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
-addtask package_write_ipk after do_packagedata do_package
+addtask package_write_ipk after do_packagedata do_package do_deploy_source_date_epoch before do_build
+do_build[rdeptask] += "do_package_write_ipk"
PACKAGEINDEXDEPS += "opkg-utils-native:do_populate_sysroot"
PACKAGEINDEXDEPS += "opkg-native:do_populate_sysroot"
-
-do_build[recrdeptask] += "do_package_write_ipk"
diff --git a/meta/classes/package_pkgdata.bbclass b/meta/classes/package_pkgdata.bbclass
index 18b7ed62e0..a1ea8fc041 100644
--- a/meta/classes/package_pkgdata.bbclass
+++ b/meta/classes/package_pkgdata.bbclass
@@ -162,6 +162,6 @@ python package_prepare_pkgdata() {
}
package_prepare_pkgdata[cleandirs] = "${WORKDIR_PKGDATA}"
-package_prepare_pkgdata[vardepsexclude] += "MACHINE_ARCH PACKAGE_EXTRA_ARCHS SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA"
+package_prepare_pkgdata[vardepsexclude] += "MACHINE_ARCH PACKAGE_EXTRA_ARCHS SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA SSTATETASKS"
diff --git a/meta/classes/package_rpm.bbclass b/meta/classes/package_rpm.bbclass
index 519c22be47..e9ff1f7e65 100644
--- a/meta/classes/package_rpm.bbclass
+++ b/meta/classes/package_rpm.bbclass
@@ -40,10 +40,10 @@ def write_rpm_perfiledata(srcname, d):
outfile.write("# Dependency table\n")
outfile.write('deps = {\n')
for pkg in packages.split():
- dependsflist_key = 'FILE' + varname + 'FLIST' + "_" + pkg
+ dependsflist_key = 'FILE' + varname + 'FLIST' + ":" + pkg
dependsflist = (d.getVar(dependsflist_key) or "")
for dfile in dependsflist.split():
- key = "FILE" + varname + "_" + dfile + "_" + pkg
+ key = "FILE" + varname + ":" + dfile + ":" + pkg
deps = filter_nativesdk_deps(srcname, d.getVar(key) or "")
depends_dict = bb.utils.explode_dep_versions(deps)
file = dfile.replace("@underscore@", "_")
@@ -249,10 +249,10 @@ python write_specfile () {
def get_perfile(varname, pkg, d):
deps = []
- dependsflist_key = 'FILE' + varname + 'FLIST' + "_" + pkg
+ dependsflist_key = 'FILE' + varname + 'FLIST' + ":" + pkg
dependsflist = (d.getVar(dependsflist_key) or "")
for dfile in dependsflist.split():
- key = "FILE" + varname + "_" + dfile + "_" + pkg
+ key = "FILE" + varname + ":" + dfile + ":" + pkg
depends = d.getVar(key)
if depends:
deps.append(depends)
@@ -300,13 +300,13 @@ python write_specfile () {
srccustomtagschunk = get_package_additional_metadata("rpm", localdata)
srcdepends = d.getVar('DEPENDS')
- srcrdepends = []
- srcrrecommends = []
- srcrsuggests = []
- srcrprovides = []
- srcrreplaces = []
- srcrconflicts = []
- srcrobsoletes = []
+ srcrdepends = ""
+ srcrrecommends = ""
+ srcrsuggests = ""
+ srcrprovides = ""
+ srcrreplaces = ""
+ srcrconflicts = ""
+ srcrobsoletes = ""
srcrpreinst = []
srcrpostinst = []
@@ -332,7 +332,7 @@ python write_specfile () {
localdata.setVar('ROOT', '')
localdata.setVar('ROOT_%s' % pkg, root)
- pkgname = localdata.getVar('PKG_%s' % pkg)
+ pkgname = localdata.getVar('PKG:%s' % pkg)
if not pkgname:
pkgname = pkg
localdata.setVar('PKG', pkgname)
@@ -365,13 +365,13 @@ python write_specfile () {
# Map the dependencies into their final form
mapping_rename_hook(localdata)
- splitrdepends = localdata.getVar('RDEPENDS')
- splitrrecommends = localdata.getVar('RRECOMMENDS')
- splitrsuggests = localdata.getVar('RSUGGESTS')
- splitrprovides = localdata.getVar('RPROVIDES')
- splitrreplaces = localdata.getVar('RREPLACES')
- splitrconflicts = localdata.getVar('RCONFLICTS')
- splitrobsoletes = []
+ splitrdepends = localdata.getVar('RDEPENDS') or ""
+ splitrrecommends = localdata.getVar('RRECOMMENDS') or ""
+ splitrsuggests = localdata.getVar('RSUGGESTS') or ""
+ splitrprovides = localdata.getVar('RPROVIDES') or ""
+ splitrreplaces = localdata.getVar('RREPLACES') or ""
+ splitrconflicts = localdata.getVar('RCONFLICTS') or ""
+ splitrobsoletes = ""
splitrpreinst = localdata.getVar('pkg_preinst')
splitrpostinst = localdata.getVar('pkg_postinst')
@@ -439,9 +439,9 @@ python write_specfile () {
spec_preamble_bottom.append(splitcustomtagschunk)
# Replaces == Obsoletes && Provides
- robsoletes = bb.utils.explode_dep_versions2(splitrobsoletes or "")
- rprovides = bb.utils.explode_dep_versions2(splitrprovides or "")
- rreplaces = bb.utils.explode_dep_versions2(splitrreplaces or "")
+ robsoletes = bb.utils.explode_dep_versions2(splitrobsoletes)
+ rprovides = bb.utils.explode_dep_versions2(splitrprovides)
+ rreplaces = bb.utils.explode_dep_versions2(splitrreplaces)
for dep in rreplaces:
if not dep in robsoletes:
robsoletes[dep] = rreplaces[dep]
@@ -533,9 +533,9 @@ python write_specfile () {
tail_source(d)
# Replaces == Obsoletes && Provides
- robsoletes = bb.utils.explode_dep_versions2(srcrobsoletes or "")
- rprovides = bb.utils.explode_dep_versions2(srcrprovides or "")
- rreplaces = bb.utils.explode_dep_versions2(srcrreplaces or "")
+ robsoletes = bb.utils.explode_dep_versions2(srcrobsoletes)
+ rprovides = bb.utils.explode_dep_versions2(srcrprovides)
+ rreplaces = bb.utils.explode_dep_versions2(srcrreplaces)
for dep in rreplaces:
if not dep in robsoletes:
robsoletes[dep] = rreplaces[dep]
@@ -557,7 +557,7 @@ python write_specfile () {
print_deps(srcrrecommends, "Recommends", spec_preamble_top, d)
print_deps(srcrsuggests, "Suggests", spec_preamble_top, d)
- print_deps(srcrprovides + (" /bin/sh" if srcname.startswith("nativesdk-") else ""), "Provides", spec_preamble_top, d)
+ print_deps(srcrprovides, "Provides", spec_preamble_top, d)
print_deps(srcrobsoletes, "Obsoletes", spec_preamble_top, d)
print_deps(srcrconflicts, "Conflicts", spec_preamble_top, d)
@@ -684,10 +684,12 @@ python do_package_rpm () {
cmd = cmd + " --define '_use_internal_dependency_generator 0'"
cmd = cmd + " --define '_binaries_in_noarch_packages_terminate_build 0'"
cmd = cmd + " --define '_build_id_links none'"
- cmd = cmd + " --define '_binary_payload w6T.xzdio'"
- cmd = cmd + " --define '_source_payload w6T.xzdio'"
+ cmd = cmd + " --define '_binary_payload w19T%d.zstdio'" % int(d.getVar("ZSTD_THREADS"))
+ cmd = cmd + " --define '_source_payload w19T%d.zstdio'" % int(d.getVar("ZSTD_THREADS"))
cmd = cmd + " --define 'clamp_mtime_to_source_date_epoch 1'"
+ cmd = cmd + " --define 'use_source_date_epoch_as_buildtime 1'"
cmd = cmd + " --define '_buildhost reproducible'"
+ cmd = cmd + " --define '__font_provides %{nil}'"
if perfiledeps:
cmd = cmd + " --define '__find_requires " + outdepends + "'"
cmd = cmd + " --define '__find_provides " + outprovides + "'"
@@ -745,11 +747,9 @@ python do_package_write_rpm () {
do_package_write_rpm[dirs] = "${PKGWRITEDIRRPM}"
do_package_write_rpm[cleandirs] = "${PKGWRITEDIRRPM}"
-do_package_write_rpm[umask] = "022"
do_package_write_rpm[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
-addtask package_write_rpm after do_packagedata do_package
+addtask package_write_rpm after do_packagedata do_package do_deploy_source_date_epoch before do_build
+do_build[rdeptask] += "do_package_write_rpm"
PACKAGEINDEXDEPS += "rpm-native:do_populate_sysroot"
PACKAGEINDEXDEPS += "createrepo-c-native:do_populate_sysroot"
-
-do_build[recrdeptask] += "do_package_write_rpm"
diff --git a/meta/classes/package_tar.bbclass b/meta/classes/package_tar.bbclass
index ce3ab4c8e2..d6c1b306fc 100644
--- a/meta/classes/package_tar.bbclass
+++ b/meta/classes/package_tar.bbclass
@@ -57,10 +57,8 @@ python do_package_tar () {
python () {
if d.getVar('PACKAGES') != '':
- deps = (d.getVarFlag('do_package_write_tar', 'depends') or "").split()
- deps.append('tar-native:do_populate_sysroot')
- deps.append('virtual/fakeroot-native:do_populate_sysroot')
- d.setVarFlag('do_package_write_tar', 'depends', " ".join(deps))
+ deps = ' tar-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
+ d.appendVarFlag('do_package_write_tar', 'depends', deps)
d.setVarFlag('do_package_write_tar', 'fakeroot', "1")
}
diff --git a/meta/classes/packagedata.bbclass b/meta/classes/packagedata.bbclass
index a903e5cfd2..c2760e2bf0 100644
--- a/meta/classes/packagedata.bbclass
+++ b/meta/classes/packagedata.bbclass
@@ -24,10 +24,10 @@ python read_subpackage_metadata () {
continue
#
# If we set unsuffixed variables here there is a chance they could clobber override versions
- # of that variable, e.g. DESCRIPTION could clobber DESCRIPTION_<pkgname>
+ # of that variable, e.g. DESCRIPTION could clobber DESCRIPTION:<pkgname>
# We therefore don't clobber for the unsuffixed variable versions
#
- if key.endswith("_" + pkg):
+ if key.endswith(":" + pkg):
d.setVar(key, sdata[key])
else:
d.setVar(key, sdata[key], parsing=True)
diff --git a/meta/classes/packagefeed-stability.bbclass b/meta/classes/packagefeed-stability.bbclass
deleted file mode 100644
index 5648602564..0000000000
--- a/meta/classes/packagefeed-stability.bbclass
+++ /dev/null
@@ -1,252 +0,0 @@
-# Class to avoid copying packages into the feed if they haven't materially changed
-#
-# Copyright (C) 2015 Intel Corporation
-# Released under the MIT license (see COPYING.MIT for details)
-#
-# This class effectively intercepts packages as they are written out by
-# do_package_write_*, causing them to be written into a different
-# directory where we can compare them to whatever older packages might
-# be in the "real" package feed directory, and avoid copying the new
-# package to the feed if it has not materially changed. The idea is to
-# avoid unnecessary churn in the packages when dependencies trigger task
-# reexecution (and thus repackaging). Enabling the class is simple:
-#
-# INHERIT += "packagefeed-stability"
-#
-# Caveats:
-# 1) Latest PR values in the build system may not match those in packages
-# seen on the target (naturally)
-# 2) If you rebuild from sstate without the existing package feed present,
-# you will lose the "state" of the package feed i.e. the preserved old
-# package versions. Not the end of the world, but would negate the
-# entire purpose of this class.
-#
-# Note that running -c cleanall on a recipe will purposely delete the old
-# package files so they will definitely be copied the next time.
-
-python() {
- if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d):
- return
- # Package backend agnostic intercept
- # This assumes that the package_write task is called package_write_<pkgtype>
- # and that the directory in which packages should be written is
- # pointed to by the variable DEPLOY_DIR_<PKGTYPE>
- for pkgclass in (d.getVar('PACKAGE_CLASSES') or '').split():
- if pkgclass.startswith('package_'):
- pkgtype = pkgclass.split('_', 1)[1]
- pkgwritefunc = 'do_package_write_%s' % pkgtype
- sstate_outputdirs = d.getVarFlag(pkgwritefunc, 'sstate-outputdirs', False)
- deploydirvar = 'DEPLOY_DIR_%s' % pkgtype.upper()
- deploydirvarref = '${' + deploydirvar + '}'
- pkgcomparefunc = 'do_package_compare_%s' % pkgtype
-
- if bb.data.inherits_class('image', d):
- d.appendVarFlag('do_rootfs', 'recrdeptask', ' ' + pkgcomparefunc)
-
- if bb.data.inherits_class('populate_sdk_base', d):
- d.appendVarFlag('do_populate_sdk', 'recrdeptask', ' ' + pkgcomparefunc)
-
- if bb.data.inherits_class('populate_sdk_ext', d):
- d.appendVarFlag('do_populate_sdk_ext', 'recrdeptask', ' ' + pkgcomparefunc)
-
- d.appendVarFlag('do_build', 'recrdeptask', ' ' + pkgcomparefunc)
-
- if d.getVarFlag(pkgwritefunc, 'noexec') or not d.getVarFlag(pkgwritefunc, 'task'):
- # Packaging is disabled for this recipe, we shouldn't do anything
- continue
-
- if deploydirvarref in sstate_outputdirs:
- deplor_dir_pkgtype = d.expand(deploydirvarref + '-prediff')
- # Set intermediate output directory
- d.setVarFlag(pkgwritefunc, 'sstate-outputdirs', sstate_outputdirs.replace(deploydirvarref, deplor_dir_pkgtype))
- # Update SSTATE_DUPWHITELIST to avoid shared location conflicted error
- d.appendVar('SSTATE_DUPWHITELIST', ' %s' % deplor_dir_pkgtype)
-
- d.setVar(pkgcomparefunc, d.getVar('do_package_compare', False))
- d.setVarFlags(pkgcomparefunc, d.getVarFlags('do_package_compare', False))
- d.appendVarFlag(pkgcomparefunc, 'depends', ' build-compare-native:do_populate_sysroot')
- bb.build.addtask(pkgcomparefunc, 'do_build', 'do_packagedata ' + pkgwritefunc, d)
-}
-
-# This isn't the real task function - it's a template that we use in the
-# anonymous python code above
-fakeroot python do_package_compare () {
- currenttask = d.getVar('BB_CURRENTTASK')
- pkgtype = currenttask.rsplit('_', 1)[1]
- package_compare_impl(pkgtype, d)
-}
-
-def package_compare_impl(pkgtype, d):
- import errno
- import fnmatch
- import glob
- import subprocess
- import oe.sstatesig
-
- pn = d.getVar('PN')
- deploydir = d.getVar('DEPLOY_DIR_%s' % pkgtype.upper())
- prepath = deploydir + '-prediff/'
-
- # Find out PKGR values are
- pkgdatadir = d.getVar('PKGDATA_DIR')
- packages = []
- try:
- with open(os.path.join(pkgdatadir, pn), 'r') as f:
- for line in f:
- if line.startswith('PACKAGES:'):
- packages = line.split(':', 1)[1].split()
- break
- except IOError as e:
- if e.errno == errno.ENOENT:
- pass
-
- if not packages:
- bb.debug(2, '%s: no packages, nothing to do' % pn)
- return
-
- pkgrvalues = {}
- rpkgnames = {}
- rdepends = {}
- pkgvvalues = {}
- for pkg in packages:
- with open(os.path.join(pkgdatadir, 'runtime', pkg), 'r') as f:
- for line in f:
- if line.startswith('PKGR:'):
- pkgrvalues[pkg] = line.split(':', 1)[1].strip()
- if line.startswith('PKGV:'):
- pkgvvalues[pkg] = line.split(':', 1)[1].strip()
- elif line.startswith('PKG_%s:' % pkg):
- rpkgnames[pkg] = line.split(':', 1)[1].strip()
- elif line.startswith('RDEPENDS_%s:' % pkg):
- rdepends[pkg] = line.split(':', 1)[1].strip()
-
- # Prepare a list of the runtime package names for packages that were
- # actually produced
- rpkglist = []
- for pkg, rpkg in rpkgnames.items():
- if os.path.exists(os.path.join(pkgdatadir, 'runtime', pkg + '.packaged')):
- rpkglist.append((rpkg, pkg))
- rpkglist.sort(key=lambda x: len(x[0]), reverse=True)
-
- pvu = d.getVar('PV', False)
- if '$' + '{SRCPV}' in pvu:
- pvprefix = pvu.split('$' + '{SRCPV}', 1)[0]
- else:
- pvprefix = None
-
- pkgwritetask = 'package_write_%s' % pkgtype
- files = []
- docopy = False
- manifest, _ = oe.sstatesig.sstate_get_manifest_filename(pkgwritetask, d)
- mlprefix = d.getVar('MLPREFIX')
- # Copy recipe's all packages if one of the packages are different to make
- # they have the same PR.
- with open(manifest, 'r') as f:
- for line in f:
- if line.startswith(prepath):
- srcpath = line.rstrip()
- if os.path.isfile(srcpath):
- destpath = os.path.join(deploydir, os.path.relpath(srcpath, prepath))
-
- # This is crude but should work assuming the output
- # package file name starts with the package name
- # and rpkglist is sorted by length (descending)
- pkgbasename = os.path.basename(destpath)
- pkgname = None
- for rpkg, pkg in rpkglist:
- if mlprefix and pkgtype == 'rpm' and rpkg.startswith(mlprefix):
- rpkg = rpkg[len(mlprefix):]
- if pkgbasename.startswith(rpkg):
- pkgr = pkgrvalues[pkg]
- destpathspec = destpath.replace(pkgr, '*')
- if pvprefix:
- pkgv = pkgvvalues[pkg]
- if pkgv.startswith(pvprefix):
- pkgvsuffix = pkgv[len(pvprefix):]
- if '+' in pkgvsuffix:
- newpkgv = pvprefix + '*+' + pkgvsuffix.split('+', 1)[1]
- destpathspec = destpathspec.replace(pkgv, newpkgv)
- pkgname = pkg
- break
- else:
- bb.warn('Unable to map %s back to package' % pkgbasename)
- destpathspec = destpath
-
- oldfile = None
- if not docopy:
- oldfiles = glob.glob(destpathspec)
- if oldfiles:
- oldfile = oldfiles[-1]
- result = subprocess.call(['pkg-diff.sh', oldfile, srcpath])
- if result != 0:
- docopy = True
- bb.note("%s and %s are different, will copy packages" % (oldfile, srcpath))
- else:
- docopy = True
- bb.note("No old packages found for %s, will copy packages" % pkgname)
-
- files.append((pkgname, pkgbasename, srcpath, destpath))
-
- # Remove all the old files and copy again if docopy
- if docopy:
- bb.note('Copying packages for recipe %s' % pn)
- pcmanifest = os.path.join(prepath, d.expand('pkg-compare-manifest-${MULTIMACH_TARGET_SYS}-${PN}'))
- try:
- with open(pcmanifest, 'r') as f:
- for line in f:
- fn = line.rstrip()
- if fn:
- try:
- os.remove(fn)
- bb.note('Removed old package %s' % fn)
- except OSError as e:
- if e.errno == errno.ENOENT:
- pass
- except IOError as e:
- if e.errno == errno.ENOENT:
- pass
-
- # Create new manifest
- with open(pcmanifest, 'w') as f:
- for pkgname, pkgbasename, srcpath, destpath in files:
- destdir = os.path.dirname(destpath)
- bb.utils.mkdirhier(destdir)
- # Remove allarch rpm pkg if it is already existed (for
- # multilib), they're identical in theory, but sstate.bbclass
- # copies it again, so keep align with that.
- if os.path.exists(destpath) and pkgtype == 'rpm' \
- and d.getVar('PACKAGE_ARCH') == 'all':
- os.unlink(destpath)
- if (os.stat(srcpath).st_dev == os.stat(destdir).st_dev):
- # Use a hard link to save space
- os.link(srcpath, destpath)
- else:
- shutil.copyfile(srcpath, destpath)
- f.write('%s\n' % destpath)
- else:
- bb.note('Not copying packages for recipe %s' % pn)
-
-do_cleansstate[postfuncs] += "pfs_cleanpkgs"
-python pfs_cleanpkgs () {
- import errno
- for pkgclass in (d.getVar('PACKAGE_CLASSES') or '').split():
- if pkgclass.startswith('package_'):
- pkgtype = pkgclass.split('_', 1)[1]
- deploydir = d.getVar('DEPLOY_DIR_%s' % pkgtype.upper())
- prepath = deploydir + '-prediff'
- pcmanifest = os.path.join(prepath, d.expand('pkg-compare-manifest-${MULTIMACH_TARGET_SYS}-${PN}'))
- try:
- with open(pcmanifest, 'r') as f:
- for line in f:
- fn = line.rstrip()
- if fn:
- try:
- os.remove(fn)
- except OSError as e:
- if e.errno == errno.ENOENT:
- pass
- os.remove(pcmanifest)
- except IOError as e:
- if e.errno == errno.ENOENT:
- pass
-}
diff --git a/meta/classes/packagegroup.bbclass b/meta/classes/packagegroup.bbclass
index 1541c8fbff..557b1b6382 100644
--- a/meta/classes/packagegroup.bbclass
+++ b/meta/classes/packagegroup.bbclass
@@ -32,7 +32,7 @@ python () {
for suffix in types]
d.setVar('PACKAGES', ' '.join(packages))
for pkg in packages:
- d.setVar('ALLOW_EMPTY_%s' % pkg, '1')
+ d.setVar('ALLOW_EMPTY:%s' % pkg, '1')
}
# We don't want to look at shared library dependencies for the
diff --git a/meta/classes/patch.bbclass b/meta/classes/patch.bbclass
index 25ec089ae1..8de7025491 100644
--- a/meta/classes/patch.bbclass
+++ b/meta/classes/patch.bbclass
@@ -10,7 +10,7 @@ PATCHDEPENDENCY = "${PATCHTOOL}-native:do_populate_sysroot"
# http://git.savannah.gnu.org/cgit/patch.git/patch/?id=82b800c9552a088a241457948219d25ce0a407a4
# This leaks into debug sources in particular. Add the dependency
# to target recipes to avoid this problem until we can rely on 2.7.4 or later.
-PATCHDEPENDENCY_append_class-target = " patch-replacement-native:do_populate_sysroot"
+PATCHDEPENDENCY:append:class-target = " patch-replacement-native:do_populate_sysroot"
PATCH_GIT_USER_NAME ?= "OpenEmbedded"
PATCH_GIT_USER_EMAIL ?= "oe.patch@oe"
@@ -131,6 +131,9 @@ python patch_do_patch() {
patchdir = parm["patchdir"]
if not os.path.isabs(patchdir):
patchdir = os.path.join(s, patchdir)
+ if not os.path.isdir(patchdir):
+ bb.fatal("Target directory '%s' not found, patchdir '%s' is incorrect in patch file '%s'" %
+ (patchdir, parm["patchdir"], parm['patchname']))
else:
patchdir = s
@@ -147,12 +150,12 @@ python patch_do_patch() {
patchset.Import({"file":local, "strippath": parm['striplevel']}, True)
except Exception as exc:
bb.utils.remove(process_tmpdir, True)
- bb.fatal(str(exc))
+ bb.fatal("Importing patch '%s' with striplevel '%s'\n%s" % (parm['patchname'], parm['striplevel'], repr(exc).replace("\\n", "\n")))
try:
resolver.Resolve()
except bb.BBHandledException as e:
bb.utils.remove(process_tmpdir, True)
- bb.fatal(str(e))
+ bb.fatal("Applying patch '%s' on target directory '%s'\n%s" % (parm['patchname'], patchdir, repr(e).replace("\\n", "\n")))
bb.utils.remove(process_tmpdir, True)
del os.environ['TMPDIR']
@@ -160,7 +163,6 @@ python patch_do_patch() {
patch_do_patch[vardepsexclude] = "PATCHRESOLVE"
addtask patch after do_unpack
-do_patch[umask] = "022"
do_patch[dirs] = "${WORKDIR}"
do_patch[depends] = "${PATCHDEPENDENCY}"
diff --git a/meta/classes/pixbufcache.bbclass b/meta/classes/pixbufcache.bbclass
index b07f51ed56..886bf195b3 100644
--- a/meta/classes/pixbufcache.bbclass
+++ b/meta/classes/pixbufcache.bbclass
@@ -3,7 +3,7 @@
# packages.
#
-DEPENDS_append_class-target = " qemu-native"
+DEPENDS:append:class-target = " qemu-native"
inherit qemu
PIXBUF_PACKAGES ??= "${PN}"
@@ -29,30 +29,30 @@ else
fi
}
-python populate_packages_append() {
+python populate_packages:append() {
pixbuf_pkgs = d.getVar('PIXBUF_PACKAGES').split()
for pkg in pixbuf_pkgs:
bb.note("adding pixbuf postinst and postrm scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg) or d.getVar('pkg_postinst')
+ postinst = d.getVar('pkg_postinst:%s' % pkg) or d.getVar('pkg_postinst')
if not postinst:
postinst = '#!/bin/sh\n'
postinst += d.getVar('pixbufcache_common')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
- postrm = d.getVar('pkg_postrm_%s' % pkg) or d.getVar('pkg_postrm')
+ postrm = d.getVar('pkg_postrm:%s' % pkg) or d.getVar('pkg_postrm')
if not postrm:
postrm = '#!/bin/sh\n'
postrm += d.getVar('pixbufcache_common')
- d.setVar('pkg_postrm_%s' % pkg, postrm)
+ d.setVar('pkg_postrm:%s' % pkg, postrm)
}
gdkpixbuf_complete() {
GDK_PIXBUF_FATAL_LOADER=1 ${STAGING_LIBDIR_NATIVE}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders --update-cache || exit 1
}
-DEPENDS_append_class-native = " gdk-pixbuf-native"
-SYSROOT_PREPROCESS_FUNCS_append_class-native = " pixbufcache_sstate_postinst"
+DEPENDS:append:class-native = " gdk-pixbuf-native"
+SYSROOT_PREPROCESS_FUNCS:append:class-native = " pixbufcache_sstate_postinst"
pixbufcache_sstate_postinst() {
mkdir -p ${SYSROOT_DESTDIR}${bindir}
diff --git a/meta/classes/pkgconfig.bbclass b/meta/classes/pkgconfig.bbclass
index ad1f84f506..fa94527ce9 100644
--- a/meta/classes/pkgconfig.bbclass
+++ b/meta/classes/pkgconfig.bbclass
@@ -1,2 +1,2 @@
-DEPENDS_prepend = "pkgconfig-native "
+DEPENDS:prepend = "pkgconfig-native "
diff --git a/meta/classes/populate_sdk_base.bbclass b/meta/classes/populate_sdk_base.bbclass
index 990505e89b..16f929bf59 100644
--- a/meta/classes/populate_sdk_base.bbclass
+++ b/meta/classes/populate_sdk_base.bbclass
@@ -1,4 +1,6 @@
-inherit meta image-postinst-intercepts
+PACKAGES = ""
+
+inherit image-postinst-intercepts image-artifact-names
# Wildcards specifying complementary packages to install for every package that has been explicitly
# installed into the rootfs
@@ -23,7 +25,7 @@ SDKIMAGE_FEATURES ??= "dev-pkgs dbg-pkgs src-pkgs ${@bb.utils.contains('DISTRO_F
SDKIMAGE_INSTALL_COMPLEMENTARY = '${@complementary_globs("SDKIMAGE_FEATURES", d)}'
SDKIMAGE_INSTALL_COMPLEMENTARY[vardeps] += "SDKIMAGE_FEATURES"
-PACKAGE_ARCHS_append_task-populate-sdk = " sdk-provides-dummy-target"
+PACKAGE_ARCHS:append:task-populate-sdk = " sdk-provides-dummy-target"
SDK_PACKAGE_ARCHS += "sdk-provides-dummy-${SDKPKGSUFFIX}"
# List of locales to install, or "all" for all of them, or unset for none.
@@ -37,7 +39,7 @@ SDK_DEPLOY = "${DEPLOY_DIR}/sdk"
SDKDEPLOYDIR = "${WORKDIR}/${SDKMACHINE}-deploy-${PN}-populate-sdk"
-B_task-populate-sdk = "${SDK_DIR}"
+B:task-populate-sdk = "${SDK_DIR}"
SDKTARGETSYSROOT = "${SDKPATH}/sysroots/${REAL_MULTIMACH_TARGET_SYS}"
@@ -66,7 +68,7 @@ python () {
SDK_RDEPENDS = "${TOOLCHAIN_TARGET_TASK} ${TOOLCHAIN_HOST_TASK}"
SDK_DEPENDS = "virtual/fakeroot-native ${SDK_ARCHIVE_DEPENDS} cross-localedef-native nativesdk-qemuwrapper-cross ${@' '.join(["%s-qemuwrapper-cross" % m for m in d.getVar("MULTILIB_VARIANTS").split()])} qemuwrapper-cross"
-PATH_prepend = "${STAGING_DIR_HOST}${SDKPATHNATIVE}${bindir}/crossscripts:${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:"
+PATH:prepend = "${WORKDIR}/recipe-sysroot/${SDKPATHNATIVE}${bindir}/crossscripts:${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:"
SDK_DEPENDS += "nativesdk-glibc-locale"
# We want the MULTIARCH_TARGET_SYS to point to the TUNE_PKGARCH, not PACKAGE_ARCH as it
@@ -90,6 +92,8 @@ SDK_HOST_MANIFEST = "${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.host.manifest"
SDK_EXT_TARGET_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.target.manifest"
SDK_EXT_HOST_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.host.manifest"
+SDK_PRUNE_SYSROOT_DIRS ?= "/dev"
+
python write_target_sdk_manifest () {
from oe.sdk import sdk_list_installed_packages
from oe.utils import format_pkg_list
@@ -101,6 +105,12 @@ python write_target_sdk_manifest () {
output.write(format_pkg_list(pkgs, 'ver'))
}
+sdk_prune_dirs () {
+ for d in ${SDK_PRUNE_SYSROOT_DIRS}; do
+ rm -rf ${SDK_OUTPUT}${SDKTARGETSYSROOT}$d
+ done
+}
+
python write_sdk_test_data() {
from oe.data import export2json
testdata = "%s/%s.testdata.json" % (d.getVar('SDKDEPLOYDIR'), d.getVar('TOOLCHAIN_OUTPUTNAME'))
@@ -119,9 +129,10 @@ python write_host_sdk_manifest () {
output.write(format_pkg_list(pkgs, 'ver'))
}
-POPULATE_SDK_POST_TARGET_COMMAND_append = " write_sdk_test_data ; "
-POPULATE_SDK_POST_TARGET_COMMAND_append_task-populate-sdk = " write_target_sdk_manifest ; "
-POPULATE_SDK_POST_HOST_COMMAND_append_task-populate-sdk = " write_host_sdk_manifest; "
+POPULATE_SDK_POST_TARGET_COMMAND:append = " write_sdk_test_data ; "
+POPULATE_SDK_POST_TARGET_COMMAND:append:task-populate-sdk = " write_target_sdk_manifest; sdk_prune_dirs; "
+POPULATE_SDK_POST_HOST_COMMAND:append:task-populate-sdk = " write_host_sdk_manifest; "
+
SDK_PACKAGING_COMMAND = "${@'${SDK_PACKAGING_FUNC};' if '${SDK_PACKAGING_FUNC}' else ''}"
SDK_POSTPROCESS_COMMAND = " create_sdk_files; check_sdk_sysroots; archive_sdk; ${SDK_PACKAGING_COMMAND} "
@@ -172,11 +183,17 @@ fakeroot python do_populate_sdk() {
populate_sdk_common(d)
}
SSTATETASKS += "do_populate_sdk"
-SSTATE_SKIP_CREATION_task-populate-sdk = '1'
+SSTATE_SKIP_CREATION:task-populate-sdk = '1'
do_populate_sdk[cleandirs] = "${SDKDEPLOYDIR}"
do_populate_sdk[sstate-inputdirs] = "${SDKDEPLOYDIR}"
do_populate_sdk[sstate-outputdirs] = "${SDK_DEPLOY}"
do_populate_sdk[stamp-extra-info] = "${MACHINE_ARCH}${SDKMACHINE}"
+python do_populate_sdk_setscene () {
+ sstate_setscene(d)
+}
+addtask do_populate_sdk_setscene
+
+PSEUDO_IGNORE_PATHS .= ",${SDKDEPLOYDIR},${WORKDIR}/oe-sdk-repo,${WORKDIR}/sstate-build-populate_sdk"
fakeroot create_sdk_files() {
cp ${COREBASE}/scripts/relocate_sdk.py ${SDK_OUTPUT}/${SDKPATH}/
@@ -261,7 +278,7 @@ fakeroot create_shar() {
rm -f ${T}/pre_install_command ${T}/post_install_command
- if [ ${SDK_RELOCATE_AFTER_INSTALL} -eq 1 ] ; then
+ if [ "${SDK_RELOCATE_AFTER_INSTALL}" = "1" ] ; then
cp ${TOOLCHAIN_SHAR_REL_TMPL} ${T}/post_install_command
fi
cat << "EOF" >> ${T}/pre_install_command
@@ -278,6 +295,7 @@ EOF
# substitute variables
sed -i -e 's#@SDK_ARCH@#${SDK_ARCH}#g' \
-e 's#@SDKPATH@#${SDKPATH}#g' \
+ -e 's#@SDKPATHINSTALL@#${SDKPATHINSTALL}#g' \
-e 's#@SDKEXTPATH@#${SDKEXTPATH}#g' \
-e 's#@OLDEST_KERNEL@#${SDK_OLDEST_KERNEL}#g' \
-e 's#@REAL_MULTIMACH_TARGET_SYS@#${REAL_MULTIMACH_TARGET_SYS}#g' \
@@ -327,6 +345,13 @@ def sdk_variables(d):
do_populate_sdk[vardeps] += "${@sdk_variables(d)}"
+python () {
+ variables = sdk_command_variables(d)
+ for var in variables:
+ if d.getVar(var, False):
+ d.setVarFlag(var, 'func', '1')
+}
+
do_populate_sdk[file-checksums] += "${TOOLCHAIN_SHAR_REL_TMPL}:True \
${TOOLCHAIN_SHAR_EXT_TMPL}:True"
diff --git a/meta/classes/populate_sdk_ext.bbclass b/meta/classes/populate_sdk_ext.bbclass
index fd0da16e7e..e2019f9bbf 100644
--- a/meta/classes/populate_sdk_ext.bbclass
+++ b/meta/classes/populate_sdk_ext.bbclass
@@ -2,19 +2,15 @@
inherit populate_sdk_base
-# NOTE: normally you cannot use task overrides for this kind of thing - this
-# only works because of get_sdk_ext_rdepends()
-
-TOOLCHAIN_HOST_TASK_task-populate-sdk-ext = " \
+# Used to override TOOLCHAIN_HOST_TASK in the eSDK case
+TOOLCHAIN_HOST_TASK_ESDK = " \
meta-environment-extsdk-${MACHINE} \
"
-TOOLCHAIN_TARGET_TASK_task-populate-sdk-ext = ""
-
-SDK_RELOCATE_AFTER_INSTALL_task-populate-sdk-ext = "0"
+SDK_RELOCATE_AFTER_INSTALL:task-populate-sdk-ext = "0"
SDK_EXT = ""
-SDK_EXT_task-populate-sdk-ext = "-ext"
+SDK_EXT:task-populate-sdk-ext = "-ext"
# Options are full or minimal
SDK_EXT_TYPE ?= "full"
@@ -24,9 +20,10 @@ SDK_INCLUDE_NATIVESDK ?= "0"
SDK_INCLUDE_BUILDTOOLS ?= '1'
SDK_RECRDEP_TASKS ?= ""
+SDK_CUSTOM_TEMPLATECONF ?= "0"
-SDK_LOCAL_CONF_WHITELIST ?= ""
-SDK_LOCAL_CONF_BLACKLIST ?= "CONF_VERSION \
+ESDK_LOCALCONF_ALLOW ?= ""
+ESDK_LOCALCONF_REMOVE ?= "CONF_VERSION \
BB_NUMBER_THREADS \
BB_NUMBER_PARSE_THREADS \
PARALLEL_MAKE \
@@ -37,7 +34,7 @@ SDK_LOCAL_CONF_BLACKLIST ?= "CONF_VERSION \
TMPDIR \
BB_SERVER_TIMEOUT \
"
-SDK_INHERIT_BLACKLIST ?= "buildhistory icecc"
+ESDK_CLASS_INHERIT_DISABLE ?= "buildhistory icecc"
SDK_UPDATE_URL ?= ""
SDK_TARGETS ?= "${PN}"
@@ -77,10 +74,10 @@ COREBASE_FILES ?= " \
.templateconf \
"
-SDK_DIR_task-populate-sdk-ext = "${WORKDIR}/sdk-ext"
-B_task-populate-sdk-ext = "${SDK_DIR}"
+SDK_DIR:task-populate-sdk-ext = "${WORKDIR}/sdk-ext"
+B:task-populate-sdk-ext = "${SDK_DIR}"
TOOLCHAINEXT_OUTPUTNAME ?= "${SDK_NAME}-toolchain-ext-${SDK_VERSION}"
-TOOLCHAIN_OUTPUTNAME_task-populate-sdk-ext = "${TOOLCHAINEXT_OUTPUTNAME}"
+TOOLCHAIN_OUTPUTNAME:task-populate-sdk-ext = "${TOOLCHAINEXT_OUTPUTNAME}"
SDK_EXT_TARGET_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.target.manifest"
SDK_EXT_HOST_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.host.manifest"
@@ -117,9 +114,9 @@ python write_host_sdk_ext_manifest () {
f.write("%s %s %s\n" % (info[1], info[2], info[3]))
}
-SDK_POSTPROCESS_COMMAND_append_task-populate-sdk-ext = "write_target_sdk_ext_manifest; write_host_sdk_ext_manifest; "
+SDK_POSTPROCESS_COMMAND:append:task-populate-sdk-ext = "write_target_sdk_ext_manifest; write_host_sdk_ext_manifest; "
-SDK_TITLE_task-populate-sdk-ext = "${@d.getVar('DISTRO_NAME') or d.getVar('DISTRO')} Extensible SDK"
+SDK_TITLE:task-populate-sdk-ext = "${@d.getVar('DISTRO_NAME') or d.getVar('DISTRO')} Extensible SDK"
def clean_esdk_builddir(d, sdkbasepath):
"""Clean up traces of the fake build for create_filtered_tasklist()"""
@@ -146,15 +143,15 @@ def create_filtered_tasklist(d, sdkbasepath, tasklistfile, conf_initpath):
try:
with open(sdkbasepath + '/conf/local.conf', 'a') as f:
# Force the use of sstate from the build system
- f.write('\nSSTATE_DIR_forcevariable = "%s"\n' % d.getVar('SSTATE_DIR'))
- f.write('SSTATE_MIRRORS_forcevariable = "file://universal/(.*) file://universal-4.9/\\1 file://universal-4.9/(.*) file://universal-4.8/\\1"\n')
+ f.write('\nSSTATE_DIR:forcevariable = "%s"\n' % d.getVar('SSTATE_DIR'))
+ f.write('SSTATE_MIRRORS:forcevariable = "file://universal/(.*) file://universal-4.9/\\1 file://universal-4.9/(.*) file://universal-4.8/\\1"\n')
# Ensure TMPDIR is the default so that clean_esdk_builddir() can delete it
- f.write('TMPDIR_forcevariable = "${TOPDIR}/tmp"\n')
- f.write('TCLIBCAPPEND_forcevariable = ""\n')
+ f.write('TMPDIR:forcevariable = "${TOPDIR}/tmp"\n')
+ f.write('TCLIBCAPPEND:forcevariable = ""\n')
# Drop uninative if the build isn't using it (or else NATIVELSBSTRING will
# be different and we won't be able to find our native sstate)
if not bb.data.inherits_class('uninative', d):
- f.write('INHERIT_remove = "uninative"\n')
+ f.write('INHERIT:remove = "uninative"\n')
# Unfortunately the default SDKPATH (or even a custom value) may contain characters that bitbake
# will not allow in its COREBASE path, so we need to rename the directory temporarily
@@ -164,7 +161,7 @@ def create_filtered_tasklist(d, sdkbasepath, tasklistfile, conf_initpath):
shutil.rmtree(temp_sdkbasepath)
except FileNotFoundError:
pass
- os.rename(sdkbasepath, temp_sdkbasepath)
+ bb.utils.rename(sdkbasepath, temp_sdkbasepath)
cmdprefix = '. %s .; ' % conf_initpath
logfile = d.getVar('WORKDIR') + '/tasklist_bb_log.txt'
try:
@@ -174,7 +171,7 @@ def create_filtered_tasklist(d, sdkbasepath, tasklistfile, conf_initpath):
if 'attempted to execute unexpectedly and should have been setscened' in e.stdout:
msg += '\n----------\n\nNOTE: "attempted to execute unexpectedly and should have been setscened" errors indicate this may be caused by missing sstate artifacts that were likely produced in earlier builds, but have been subsequently deleted for some reason.\n'
bb.fatal(msg)
- os.rename(temp_sdkbasepath, sdkbasepath)
+ bb.utils.rename(temp_sdkbasepath, sdkbasepath)
# Clean out residue of running bitbake, which check_sstate_task_list()
# will effectively do
clean_esdk_builddir(d, sdkbasepath)
@@ -199,6 +196,9 @@ python copy_buildsystem () {
buildsystem = oe.copy_buildsystem.BuildSystem('extensible SDK', d)
baseoutpath = d.getVar('SDK_OUTPUT') + '/' + d.getVar('SDKPATH')
+ #check if custome templateconf path is set
+ use_custom_templateconf = d.getVar('SDK_CUSTOM_TEMPLATECONF')
+
# Determine if we're building a derivative extensible SDK (from devtool build-sdk)
derivative = (d.getVar('SDK_DERIVATIVE') or '') == '1'
if derivative:
@@ -247,7 +247,9 @@ python copy_buildsystem () {
# Create a layer for new recipes / appends
bbpath = d.getVar('BBPATH')
- bb.process.run(['devtool', '--bbpath', bbpath, '--basepath', baseoutpath, 'create-workspace', '--create-only', os.path.join(baseoutpath, 'workspace')])
+ env = os.environ.copy()
+ env['PYTHONDONTWRITEBYTECODE'] = '1'
+ bb.process.run(['devtool', '--bbpath', bbpath, '--basepath', baseoutpath, 'create-workspace', '--create-only', os.path.join(baseoutpath, 'workspace')], env=env)
# Create bblayers.conf
bb.utils.mkdirhier(baseoutpath + '/conf')
@@ -280,8 +282,8 @@ python copy_buildsystem () {
bb.utils.mkdirhier(uninative_outdir)
shutil.copy(uninative_file, uninative_outdir)
- env_whitelist = (d.getVar('BB_ENV_EXTRAWHITE') or '').split()
- env_whitelist_values = {}
+ env_passthrough = (d.getVar('BB_ENV_PASSTHROUGH_ADDITIONS') or '').split()
+ env_passthrough_values = {}
# Create local.conf
builddir = d.getVar('TOPDIR')
@@ -292,15 +294,15 @@ python copy_buildsystem () {
if derivative:
shutil.copyfile(builddir + '/conf/local.conf', baseoutpath + '/conf/local.conf')
else:
- local_conf_whitelist = (d.getVar('SDK_LOCAL_CONF_WHITELIST') or '').split()
- local_conf_blacklist = (d.getVar('SDK_LOCAL_CONF_BLACKLIST') or '').split()
+ local_conf_allowed = (d.getVar('ESDK_LOCALCONF_ALLOW') or '').split()
+ local_conf_remove = (d.getVar('ESDK_LOCALCONF_REMOVE') or '').split()
def handle_var(varname, origvalue, op, newlines):
- if varname in local_conf_blacklist or (origvalue.strip().startswith('/') and not varname in local_conf_whitelist):
+ if varname in local_conf_remove or (origvalue.strip().startswith('/') and not varname in local_conf_allowed):
newlines.append('# Removed original setting of %s\n' % varname)
return None, op, 0, True
else:
- if varname in env_whitelist:
- env_whitelist_values[varname] = origvalue
+ if varname in env_passthrough:
+ env_passthrough_values[varname] = origvalue
return origvalue, op, 0, True
varlist = ['[^#=+ ]*']
oldlines = []
@@ -310,8 +312,9 @@ python copy_buildsystem () {
if os.path.exists(builddir + '/conf/auto.conf'):
with open(builddir + '/conf/auto.conf', 'r') as f:
oldlines += f.readlines()
- with open(builddir + '/conf/local.conf', 'r') as f:
- oldlines += f.readlines()
+ if os.path.exists(builddir + '/conf/local.conf'):
+ with open(builddir + '/conf/local.conf', 'r') as f:
+ oldlines += f.readlines()
(updated, newlines) = bb.utils.edit_metadata(oldlines, varlist, handle_var)
with open(baseoutpath + '/conf/local.conf', 'w') as f:
@@ -335,7 +338,7 @@ python copy_buildsystem () {
f.write('CONF_VERSION = "%s"\n\n' % d.getVar('CONF_VERSION', False))
# Some classes are not suitable for SDK, remove them from INHERIT
- f.write('INHERIT_remove = "%s"\n' % d.getVar('SDK_INHERIT_BLACKLIST', False))
+ f.write('INHERIT:remove = "%s"\n' % d.getVar('ESDK_CLASS_INHERIT_DISABLE', False))
# Bypass the default connectivity check if any
f.write('CONNECTIVITY_CHECK_URIS = ""\n\n')
@@ -351,20 +354,27 @@ python copy_buildsystem () {
f.write('SIGGEN_LOCKEDSIGS_TASKSIG_CHECK = "warn"\n\n')
# We want to be able to set this without a full reparse
- f.write('BB_HASHCONFIG_WHITELIST_append = " SIGGEN_UNLOCKED_RECIPES"\n\n')
+ f.write('BB_HASHCONFIG_IGNORE_VARS:append = " SIGGEN_UNLOCKED_RECIPES"\n\n')
- # Set up whitelist for run on install
- f.write('BB_SETSCENE_ENFORCE_WHITELIST = "%:* *:do_shared_workdir *:do_rm_work wic-tools:* *:do_addto_recipe_sysroot"\n\n')
+ # Set up which tasks are ignored for run on install
+ f.write('BB_SETSCENE_ENFORCE_IGNORE_TASKS = "%:* *:do_shared_workdir *:do_rm_work wic-tools:* *:do_addto_recipe_sysroot"\n\n')
# Hide the config information from bitbake output (since it's fixed within the SDK)
f.write('BUILDCFG_HEADER = ""\n\n')
+ # Write METADATA_REVISION
+ f.write('METADATA_REVISION = "%s"\n\n' % d.getVar('METADATA_REVISION'))
+
f.write('# Provide a flag to indicate we are in the EXT_SDK Context\n')
f.write('WITHIN_EXT_SDK = "1"\n\n')
# Map gcc-dependent uninative sstate cache for installer usage
f.write('SSTATE_MIRRORS += " file://universal/(.*) file://universal-4.9/\\1 file://universal-4.9/(.*) file://universal-4.8/\\1"\n\n')
+ if d.getVar("PRSERV_HOST"):
+ # Override this, we now include PR data, so it should only point ot the local database
+ f.write('PRSERV_HOST = "localhost:0"\n\n')
+
# Allow additional config through sdk-extra.conf
fn = bb.cookerdata.findConfigFile('sdk-extra.conf', d)
if fn:
@@ -383,13 +393,42 @@ python copy_buildsystem () {
f.write('require conf/locked-sigs.inc\n')
f.write('require conf/unlocked-sigs.inc\n')
+ # Copy multiple configurations if they exist in the users config directory
+ if d.getVar('BBMULTICONFIG') is not None:
+ bb.utils.mkdirhier(os.path.join(baseoutpath, 'conf', 'multiconfig'))
+ for mc in d.getVar('BBMULTICONFIG').split():
+ dest_stub = "/conf/multiconfig/%s.conf" % (mc,)
+ if os.path.exists(builddir + dest_stub):
+ shutil.copyfile(builddir + dest_stub, baseoutpath + dest_stub)
+
if os.path.exists(builddir + '/cache/bb_unihashes.dat'):
bb.parse.siggen.save_unitaskhashes()
bb.utils.mkdirhier(os.path.join(baseoutpath, 'cache'))
shutil.copyfile(builddir + '/cache/bb_unihashes.dat', baseoutpath + '/cache/bb_unihashes.dat')
+ # If PR Service is in use, we need to export this as well
+ bb.note('Do we have a pr database?')
+ if d.getVar("PRSERV_HOST"):
+ bb.note('Writing PR database...')
+ # Based on the code in classes/prexport.bbclass
+ import oe.prservice
+ #dump meta info of tables
+ localdata = d.createCopy()
+ localdata.setVar('PRSERV_DUMPOPT_COL', "1")
+ localdata.setVar('PRSERV_DUMPDIR', os.path.join(baseoutpath, 'conf'))
+ localdata.setVar('PRSERV_DUMPFILE', '${PRSERV_DUMPDIR}/prserv.inc')
+
+ bb.note('PR Database write to %s' % (localdata.getVar('PRSERV_DUMPFILE')))
+
+ retval = oe.prservice.prserv_dump_db(localdata)
+ if not retval:
+ bb.error("prexport_handler: export failed!")
+ return
+ (metainfo, datainfo) = retval
+ oe.prservice.prserv_export_tofile(localdata, metainfo, datainfo, True)
+
# Use templateconf.cfg file from builddir if exists
- if os.path.exists(builddir + '/conf/templateconf.cfg'):
+ if os.path.exists(builddir + '/conf/templateconf.cfg') and use_custom_templateconf == '1':
shutil.copyfile(builddir + '/conf/templateconf.cfg', baseoutpath + '/conf/templateconf.cfg')
else:
# Write a templateconf.cfg
@@ -397,9 +436,9 @@ python copy_buildsystem () {
f.write('meta/conf\n')
# Ensure any variables set from the external environment (by way of
- # BB_ENV_EXTRAWHITE) are set in the SDK's configuration
+ # BB_ENV_PASSTHROUGH_ADDITIONS) are set in the SDK's configuration
extralines = []
- for name, value in env_whitelist_values.items():
+ for name, value in env_passthrough_values.items():
actualvalue = d.getVar(name) or ''
if value != actualvalue:
extralines.append('%s = "%s"\n' % (name, actualvalue))
@@ -511,7 +550,7 @@ python copy_buildsystem () {
# We don't need sstate do_package files
for root, dirs, files in os.walk(sstate_out):
for name in files:
- if name.endswith("_package.tgz"):
+ if name.endswith("_package.tar.zst"):
f = os.path.join(root, name)
os.remove(f)
@@ -521,11 +560,20 @@ python copy_buildsystem () {
# sdk_ext_postinst() below) thus the checksum we take here would always
# be different.
manifest_file_list = ['conf/*']
+ if d.getVar('BBMULTICONFIG') is not None:
+ manifest_file_list.append('conf/multiconfig/*')
+
+ esdk_manifest_excludes = (d.getVar('ESDK_MANIFEST_EXCLUDES') or '').split()
+ esdk_manifest_excludes_list = []
+ for exclude_item in esdk_manifest_excludes:
+ esdk_manifest_excludes_list += glob.glob(os.path.join(baseoutpath, exclude_item))
manifest_file = os.path.join(baseoutpath, 'conf', 'sdk-conf-manifest')
with open(manifest_file, 'w') as f:
for item in manifest_file_list:
for fn in glob.glob(os.path.join(baseoutpath, item)):
- if fn == manifest_file:
+ if fn == manifest_file or os.path.isdir(fn):
+ continue
+ if fn in esdk_manifest_excludes_list:
continue
chksum = bb.utils.sha256_file(fn)
f.write('%s\t%s\n' % (chksum, os.path.relpath(fn, baseoutpath)))
@@ -578,7 +626,7 @@ install_tools() {
for script in $scripts; do
for scriptfn in `find ${SDK_OUTPUT}/${SDKPATH}/${scriptrelpath} -maxdepth 1 -executable -name "$script"`; do
targetscriptfn="${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}/$(basename $scriptfn)"
- test -e ${targetscriptfn} || lnr ${scriptfn} ${targetscriptfn}
+ test -e ${targetscriptfn} || ln -rs ${scriptfn} ${targetscriptfn}
done
done
# We can't use the same method as above because files in the sysroot won't exist at this point
@@ -586,7 +634,7 @@ install_tools() {
unfsd_path="${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}/unfsd"
if [ "${SDK_INCLUDE_TOOLCHAIN}" = "1" -a ! -e $unfsd_path ] ; then
binrelpath=${@os.path.relpath(d.getVar('STAGING_BINDIR_NATIVE'), d.getVar('TMPDIR'))}
- lnr ${SDK_OUTPUT}/${SDKPATH}/tmp/$binrelpath/unfsd $unfsd_path
+ ln -rs ${SDK_OUTPUT}/${SDKPATH}/tmp/$binrelpath/unfsd $unfsd_path
fi
touch ${SDK_OUTPUT}/${SDKPATH}/.devtoolbase
@@ -636,7 +684,7 @@ sdk_ext_preinst() {
EXTRA_TAR_OPTIONS="$EXTRA_TAR_OPTIONS --exclude=sstate-cache"
fi
}
-SDK_PRE_INSTALL_COMMAND_task-populate-sdk-ext = "${sdk_ext_preinst}"
+SDK_PRE_INSTALL_COMMAND:task-populate-sdk-ext = "${sdk_ext_preinst}"
# FIXME this preparation should be done as part of the SDK construction
sdk_ext_postinst() {
@@ -653,7 +701,10 @@ sdk_ext_postinst() {
# Make sure when the user sets up the environment, they also get
# the buildtools-tarball tools in their path.
+ echo "# Save and reset OECORE_NATIVE_SYSROOT as buildtools may change it" >> $env_setup_script
+ echo "SAVED=\"\$OECORE_NATIVE_SYSROOT\"" >> $env_setup_script
echo ". $target_sdk_dir/buildtools/environment-setup*" >> $env_setup_script
+ echo "OECORE_NATIVE_SYSROOT=\"\$SAVED\"" >> $env_setup_script
fi
# Allow bitbake environment setup to be ran as part of this sdk.
@@ -684,9 +735,9 @@ sdk_ext_postinst() {
echo done
}
-SDK_POST_INSTALL_COMMAND_task-populate-sdk-ext = "${sdk_ext_postinst}"
+SDK_POST_INSTALL_COMMAND:task-populate-sdk-ext = "${sdk_ext_postinst}"
-SDK_POSTPROCESS_COMMAND_prepend_task-populate-sdk-ext = "copy_buildsystem; install_tools; "
+SDK_POSTPROCESS_COMMAND:prepend:task-populate-sdk-ext = "copy_buildsystem; install_tools; "
SDK_INSTALL_TARGETS = ""
fakeroot python do_populate_sdk_ext() {
@@ -695,6 +746,15 @@ fakeroot python do_populate_sdk_ext() {
if d.getVar('SDK_ARCH') != d.getVar('BUILD_ARCH'):
bb.fatal('The extensible SDK can currently only be built for the same architecture as the machine being built on - SDK_ARCH is set to %s (likely via setting SDKMACHINE) which is different from the architecture of the build machine (%s). Unable to continue.' % (d.getVar('SDK_ARCH'), d.getVar('BUILD_ARCH')))
+ # FIXME hopefully we can remove this restriction at some point, but the eSDK
+ # can only be built for the primary (default) multiconfig
+ if d.getVar('BB_CURRENT_MC') != 'default':
+ bb.fatal('The extensible SDK can currently only be built for the default multiconfig. Currently trying to build for %s.' % d.getVar('BB_CURRENT_MC'))
+
+ # eSDK dependencies don't use the traditional variables and things don't work properly if they are set
+ d.setVar("TOOLCHAIN_HOST_TASK", "${TOOLCHAIN_HOST_TASK_ESDK}")
+ d.setVar("TOOLCHAIN_TARGET_TASK", "")
+
d.setVar('SDK_INSTALL_TARGETS', get_sdk_install_targets(d))
if d.getVar('SDK_INCLUDE_BUILDTOOLS') == '1':
buildtools_fn = get_current_buildtools(d)
@@ -740,12 +800,7 @@ do_sdk_depends[dirs] = "${WORKDIR}"
do_sdk_depends[depends] = "${@get_ext_sdk_depends(d)} meta-extsdk-toolchain:do_populate_sysroot"
do_sdk_depends[recrdeptask] = "${@d.getVarFlag('do_populate_sdk', 'recrdeptask', False)}"
do_sdk_depends[recrdeptask] += "do_populate_lic do_package_qa do_populate_sysroot do_deploy ${SDK_RECRDEP_TASKS}"
-do_sdk_depends[rdepends] = "${@get_sdk_ext_rdepends(d)}"
-
-def get_sdk_ext_rdepends(d):
- localdata = d.createCopy()
- localdata.appendVar('OVERRIDES', ':task-populate-sdk-ext')
- return localdata.getVarFlag('do_populate_sdk', 'rdepends')
+do_sdk_depends[rdepends] = "${@' '.join([x + ':do_package_write_${IMAGE_PKGTYPE} ' + x + ':do_packagedata' for x in d.getVar('TOOLCHAIN_HOST_TASK_ESDK').split()])}"
do_populate_sdk_ext[dirs] = "${@d.getVarFlag('do_populate_sdk', 'dirs', False)}"
@@ -773,7 +828,7 @@ do_populate_sdk_ext[nostamp] = "1"
SDKEXTDEPLOYDIR = "${WORKDIR}/deploy-${PN}-populate-sdk-ext"
SSTATETASKS += "do_populate_sdk_ext"
-SSTATE_SKIP_CREATION_task-populate-sdk-ext = '1'
+SSTATE_SKIP_CREATION:task-populate-sdk-ext = '1'
do_populate_sdk_ext[cleandirs] = "${SDKEXTDEPLOYDIR}"
do_populate_sdk_ext[sstate-inputdirs] = "${SDKEXTDEPLOYDIR}"
do_populate_sdk_ext[sstate-outputdirs] = "${SDK_DEPLOY}"
diff --git a/meta/classes/ptest-gnome.bbclass b/meta/classes/ptest-gnome.bbclass
index 478a33474d..18bd3dbff9 100644
--- a/meta/classes/ptest-gnome.bbclass
+++ b/meta/classes/ptest-gnome.bbclass
@@ -1,8 +1,8 @@
inherit ptest
-EXTRA_OECONF_append = " ${@bb.utils.contains('PTEST_ENABLED', '1', '--enable-installed-tests', '--disable-installed-tests', d)}"
+EXTRA_OECONF:append = " ${@bb.utils.contains('PTEST_ENABLED', '1', '--enable-installed-tests', '--disable-installed-tests', d)}"
-FILES_${PN}-ptest += "${libexecdir}/installed-tests/ \
+FILES:${PN}-ptest += "${libexecdir}/installed-tests/ \
${datadir}/installed-tests/"
-RDEPENDS_${PN}-ptest += "gnome-desktop-testing"
+RDEPENDS:${PN}-ptest += "gnome-desktop-testing"
diff --git a/meta/classes/ptest-perl.bbclass b/meta/classes/ptest-perl.bbclass
index a4bc40b51a..5dd72c9dad 100644
--- a/meta/classes/ptest-perl.bbclass
+++ b/meta/classes/ptest-perl.bbclass
@@ -1,6 +1,6 @@
inherit ptest
-FILESEXTRAPATHS_prepend := "${COREBASE}/meta/files:"
+FILESEXTRAPATHS:prepend := "${COREBASE}/meta/files:"
SRC_URI += "file://ptest-perl/run-ptest"
@@ -13,9 +13,9 @@ do_install_ptest_perl() {
chown -R root:root ${D}${PTEST_PATH}
}
-FILES_${PN}-ptest_prepend = "${PTEST_PATH}/t/* ${PTEST_PATH}/run-ptest "
+FILES:${PN}-ptest:prepend = "${PTEST_PATH}/t/* ${PTEST_PATH}/run-ptest "
-RDEPENDS_${PN}-ptest_prepend = "perl "
+RDEPENDS:${PN}-ptest:prepend = "perl "
addtask install_ptest_perl after do_install_ptest_base before do_package
diff --git a/meta/classes/ptest.bbclass b/meta/classes/ptest.bbclass
index fa4c36ec76..1ec23c0923 100644
--- a/meta/classes/ptest.bbclass
+++ b/meta/classes/ptest.bbclass
@@ -1,25 +1,27 @@
-SUMMARY_${PN}-ptest ?= "${SUMMARY} - Package test files"
-DESCRIPTION_${PN}-ptest ?= "${DESCRIPTION} \
+SUMMARY:${PN}-ptest ?= "${SUMMARY} - Package test files"
+DESCRIPTION:${PN}-ptest ?= "${DESCRIPTION} \
This package contains a test directory ${PTEST_PATH} for package test purposes."
PTEST_PATH ?= "${libdir}/${BPN}/ptest"
PTEST_BUILD_HOST_FILES ?= "Makefile"
PTEST_BUILD_HOST_PATTERN ?= ""
-FILES_${PN}-ptest = "${PTEST_PATH}"
-SECTION_${PN}-ptest = "devel"
-ALLOW_EMPTY_${PN}-ptest = "1"
+FILES:${PN}-ptest += "${PTEST_PATH}"
+SECTION:${PN}-ptest = "devel"
+ALLOW_EMPTY:${PN}-ptest = "1"
PTEST_ENABLED = "${@bb.utils.contains('DISTRO_FEATURES', 'ptest', '1', '0', d)}"
-PTEST_ENABLED_class-native = ""
-PTEST_ENABLED_class-nativesdk = ""
-PTEST_ENABLED_class-cross-canadian = ""
-RDEPENDS_${PN}-ptest += "${PN}"
-RDEPENDS_${PN}-ptest_class-native = ""
-RDEPENDS_${PN}-ptest_class-nativesdk = ""
-RRECOMMENDS_${PN}-ptest += "ptest-runner"
+PTEST_ENABLED:class-native = ""
+PTEST_ENABLED:class-nativesdk = ""
+PTEST_ENABLED:class-cross-canadian = ""
+RDEPENDS:${PN}-ptest += "${PN}"
+RDEPENDS:${PN}-ptest:class-native = ""
+RDEPENDS:${PN}-ptest:class-nativesdk = ""
+RRECOMMENDS:${PN}-ptest += "ptest-runner"
PACKAGES =+ "${@bb.utils.contains('PTEST_ENABLED', '1', '${PN}-ptest', '', d)}"
+require conf/distro/include/ptest-packagelists.inc
+
do_configure_ptest() {
:
}
@@ -71,7 +73,7 @@ PTEST_BINDIR_PKGD_PATH = "${PKGD}${PTEST_PATH}/bin"
# This function needs to run after apply_update_alternative_renames because the
# aforementioned function will update the ALTERNATIVE_LINK_NAME flag. Append is
# used here to make this function to run as late as possible.
-PACKAGE_PREPROCESS_FUNCS_append = "${@bb.utils.contains('PTEST_BINDIR', '1', \
+PACKAGE_PREPROCESS_FUNCS:append = "${@bb.utils.contains('PTEST_BINDIR', '1', \
bb.utils.contains('PTEST_ENABLED', '1', ' ptest_update_alternatives', '', d), '', d)}"
python ptest_update_alternatives() {
@@ -117,3 +119,14 @@ python () {
for i in ['do_configure_ptest_base', 'do_compile_ptest_base', 'do_install_ptest_base']:
bb.build.deltask(i, d)
}
+
+QARECIPETEST[missing-ptest] = "package_qa_check_missing_ptest"
+def package_qa_check_missing_ptest(pn, d, messages):
+ # This checks that ptest package is actually included
+ # in standard oe-core ptest images - only for oe-core recipes
+ if not 'meta/recipes' in d.getVar('FILE') or not(d.getVar('PTEST_ENABLED') == "1"):
+ return
+
+ enabled_ptests = " ".join([d.getVar('PTESTS_FAST'), d.getVar('PTESTS_SLOW'), d.getVar('PTESTS_PROBLEMS')]).split()
+ if (pn + "-ptest").replace(d.getVar('MLPREFIX'), '') not in enabled_ptests:
+ oe.qa.handle_error("missing-ptest", "supports ptests but is not included in oe-core's ptest-packagelists.inc", d)
diff --git a/meta/classes/pypi.bbclass b/meta/classes/pypi.bbclass
index 87b4c85fc0..9405d58601 100644
--- a/meta/classes/pypi.bbclass
+++ b/meta/classes/pypi.bbclass
@@ -8,18 +8,18 @@ def pypi_package(d):
PYPI_PACKAGE ?= "${@pypi_package(d)}"
PYPI_PACKAGE_EXT ?= "tar.gz"
+PYPI_ARCHIVE_NAME ?= "${PYPI_PACKAGE}-${PV}.${PYPI_PACKAGE_EXT}"
def pypi_src_uri(d):
package = d.getVar('PYPI_PACKAGE')
- package_ext = d.getVar('PYPI_PACKAGE_EXT')
- pv = d.getVar('PV')
- return 'https://files.pythonhosted.org/packages/source/%s/%s/%s-%s.%s' % (package[0], package, package, pv, package_ext)
+ archive_name = d.getVar('PYPI_ARCHIVE_NAME')
+ return 'https://files.pythonhosted.org/packages/source/%s/%s/%s' % (package[0], package, archive_name)
PYPI_SRC_URI ?= "${@pypi_src_uri(d)}"
HOMEPAGE ?= "https://pypi.python.org/pypi/${PYPI_PACKAGE}/"
SECTION = "devel/python"
-SRC_URI += "${PYPI_SRC_URI}"
+SRC_URI:prepend = "${PYPI_SRC_URI} "
S = "${WORKDIR}/${PYPI_PACKAGE}-${PV}"
UPSTREAM_CHECK_URI ?= "https://pypi.org/project/${PYPI_PACKAGE}/"
diff --git a/meta/classes/python3-dir.bbclass b/meta/classes/python3-dir.bbclass
index 036d7140d9..ff03e584d4 100644
--- a/meta/classes/python3-dir.bbclass
+++ b/meta/classes/python3-dir.bbclass
@@ -1,4 +1,4 @@
-PYTHON_BASEVERSION = "3.8"
+PYTHON_BASEVERSION = "3.10"
PYTHON_ABI = ""
PYTHON_DIR = "python${PYTHON_BASEVERSION}"
PYTHON_PN = "python3"
diff --git a/meta/classes/python3native.bbclass b/meta/classes/python3native.bbclass
index d98fb4c758..3783c0c47e 100644
--- a/meta/classes/python3native.bbclass
+++ b/meta/classes/python3native.bbclass
@@ -2,9 +2,9 @@ inherit python3-dir
PYTHON="${STAGING_BINDIR_NATIVE}/python3-native/python3"
EXTRANATIVEPATH += "python3-native"
-DEPENDS_append = " python3-native "
+DEPENDS:append = " python3-native "
-# python-config and other scripts are using distutils modules
+# python-config and other scripts are using sysconfig modules
# which we patch to access these variables
export STAGING_INCDIR
export STAGING_LIBDIR
@@ -17,8 +17,6 @@ export STAGING_LIBDIR
export PYTHON_LIBRARY="${STAGING_LIBDIR}/lib${PYTHON_DIR}${PYTHON_ABI}.so"
export PYTHON_INCLUDE_DIR="${STAGING_INCDIR}/${PYTHON_DIR}${PYTHON_ABI}"
-export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
-
# suppress host user's site-packages dirs.
export PYTHONNOUSERSITE = "1"
diff --git a/meta/classes/python3targetconfig.bbclass b/meta/classes/python3targetconfig.bbclass
new file mode 100644
index 0000000000..2476858cae
--- /dev/null
+++ b/meta/classes/python3targetconfig.bbclass
@@ -0,0 +1,29 @@
+inherit python3native
+
+EXTRA_PYTHON_DEPENDS ?= ""
+EXTRA_PYTHON_DEPENDS:class-target = "python3"
+DEPENDS:append = " ${EXTRA_PYTHON_DEPENDS}"
+
+do_configure:prepend:class-target() {
+ export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
+}
+
+do_compile:prepend:class-target() {
+ export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
+}
+
+do_install:prepend:class-target() {
+ export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
+}
+
+do_configure:prepend:class-nativesdk() {
+ export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
+}
+
+do_compile:prepend:class-nativesdk() {
+ export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
+}
+
+do_install:prepend:class-nativesdk() {
+ export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
+}
diff --git a/meta/classes/python_flit_core.bbclass b/meta/classes/python_flit_core.bbclass
new file mode 100644
index 0000000000..96652aa204
--- /dev/null
+++ b/meta/classes/python_flit_core.bbclass
@@ -0,0 +1,5 @@
+inherit python_pep517 python3native python3-dir setuptools3-base
+
+DEPENDS += "python3 python3-flit-core-native"
+
+PEP517_BUILD_API = "flit_core.buildapi"
diff --git a/meta/classes/python_pep517.bbclass b/meta/classes/python_pep517.bbclass
new file mode 100644
index 0000000000..34ffdc9c0d
--- /dev/null
+++ b/meta/classes/python_pep517.bbclass
@@ -0,0 +1,56 @@
+# Common infrastructure for Python packages that use PEP-517 compliant packaging.
+# https://www.python.org/dev/peps/pep-0517/
+#
+# This class will build a wheel in do_compile, and use pypa/installer to install
+# it in do_install.
+
+DEPENDS:append = " python3-installer-native"
+
+# Where to execute the build process from
+PEP517_SOURCE_PATH ?= "${S}"
+
+# The PEP517 build API entry point
+PEP517_BUILD_API ?= "unset"
+
+# The directory where wheels will be written
+PEP517_WHEEL_PATH ?= "${WORKDIR}/dist"
+
+# The interpreter to use for installed scripts
+PEP517_INSTALL_PYTHON = "python3"
+PEP517_INSTALL_PYTHON:class-native = "nativepython3"
+
+# pypa/installer option to control the bytecode compilation
+INSTALL_WHEEL_COMPILE_BYTECODE ?= "--compile-bytecode=0"
+
+# PEP517 doesn't have a specific configure step, so set an empty do_configure to avoid
+# running base_do_configure.
+python_pep517_do_configure () {
+ :
+}
+
+# When we have Python 3.11 we can parse pyproject.toml to determine the build
+# API entry point directly
+python_pep517_do_compile () {
+ cd ${PEP517_SOURCE_PATH}
+ nativepython3 -c "import ${PEP517_BUILD_API} as api; api.build_wheel('${PEP517_WHEEL_PATH}')"
+}
+do_compile[cleandirs] += "${PEP517_WHEEL_PATH}"
+
+python_pep517_do_install () {
+ COUNT=$(find ${PEP517_WHEEL_PATH} -name '*.whl' | wc -l)
+ if test $COUNT -eq 0; then
+ bbfatal No wheels found in ${PEP517_WHEEL_PATH}
+ elif test $COUNT -gt 1; then
+ bbfatal More than one wheel found in ${PEP517_WHEEL_PATH}, this should not happen
+ fi
+
+ nativepython3 -m installer ${INSTALL_WHEEL_COMPILE_BYTECODE} --interpreter "${USRBINPATH}/env ${PEP517_INSTALL_PYTHON}" --destdir=${D} ${PEP517_WHEEL_PATH}/*.whl
+}
+
+# A manual do_install that just uses unzip for bootstrapping purposes. Callers should DEPEND on unzip-native.
+python_pep517_do_bootstrap_install () {
+ install -d ${D}${PYTHON_SITEPACKAGES_DIR}
+ unzip -d ${D}${PYTHON_SITEPACKAGES_DIR} ${PEP517_WHEEL_PATH}/*.whl
+}
+
+EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes/python_poetry_core.bbclass b/meta/classes/python_poetry_core.bbclass
new file mode 100644
index 0000000000..577663b8f1
--- /dev/null
+++ b/meta/classes/python_poetry_core.bbclass
@@ -0,0 +1,5 @@
+inherit python_pep517 python3native setuptools3-base
+
+DEPENDS += "python3-poetry-core-native"
+
+PEP517_BUILD_API = "poetry.core.masonry.api"
diff --git a/meta/classes/python_pyo3.bbclass b/meta/classes/python_pyo3.bbclass
new file mode 100644
index 0000000000..10cc3a0645
--- /dev/null
+++ b/meta/classes/python_pyo3.bbclass
@@ -0,0 +1,30 @@
+#
+# This class helps make sure that Python extensions built with PyO3
+# and setuptools_rust properly set up the environment for cross compilation
+#
+
+inherit cargo python3-dir siteinfo
+
+export PYO3_CROSS="1"
+export PYO3_CROSS_PYTHON_VERSION="${PYTHON_BASEVERSION}"
+export PYO3_CROSS_LIB_DIR="${STAGING_LIBDIR}"
+export CARGO_BUILD_TARGET="${HOST_SYS}"
+export RUSTFLAGS
+export PYO3_PYTHON="${PYTHON}"
+export PYO3_CONFIG_FILE="${WORKDIR}/pyo3.config"
+
+python_pyo3_do_configure () {
+ cat > ${WORKDIR}/pyo3.config << EOF
+implementation=CPython
+version=${PYTHON_BASEVERSION}
+shared=true
+abi3=false
+lib_name=${PYTHON_DIR}
+lib_dir=${STAGING_LIBDIR}
+pointer_width=${SITEINFO_BITS}
+build_flags=WITH_THREAD
+suppress_build_script_link_lines=false
+EOF
+}
+
+EXPORT_FUNCTIONS do_configure
diff --git a/meta/classes/python_setuptools3_rust.bbclass b/meta/classes/python_setuptools3_rust.bbclass
new file mode 100644
index 0000000000..f12e5d0cbd
--- /dev/null
+++ b/meta/classes/python_setuptools3_rust.bbclass
@@ -0,0 +1,11 @@
+inherit python_pyo3 setuptools3
+
+DEPENDS += "python3-setuptools-rust-native"
+
+python_setuptools3_rust_do_configure() {
+ python_pyo3_do_configure
+ cargo_common_do_configure
+ setuptools3_do_configure
+}
+
+EXPORT_FUNCTIONS do_configure
diff --git a/meta/classes/qemu.bbclass b/meta/classes/qemu.bbclass
index 55bdff816b..01a7b86ae1 100644
--- a/meta/classes/qemu.bbclass
+++ b/meta/classes/qemu.bbclass
@@ -64,4 +64,4 @@ QEMU_EXTRAOPTIONS_ppc64e5500 = " -cpu e500mc"
QEMU_EXTRAOPTIONS_ppce6500 = " -cpu e500mc"
QEMU_EXTRAOPTIONS_ppc64e6500 = " -cpu e500mc"
QEMU_EXTRAOPTIONS_ppc7400 = " -cpu 7400"
-QEMU_EXTRAOPTIONS_powerpc64le = " -cpu POWER8"
+QEMU_EXTRAOPTIONS:powerpc64le = " -cpu POWER8"
diff --git a/meta/classes/qemuboot.bbclass b/meta/classes/qemuboot.bbclass
index 4162c4e790..ad8489902a 100644
--- a/meta/classes/qemuboot.bbclass
+++ b/meta/classes/qemuboot.bbclass
@@ -19,6 +19,9 @@
# QB_CPU_KVM: the similar to QB_CPU, but used when kvm, e.g., '-cpu kvm64',
# set it when support kvm.
#
+# QB_SMP: amount of CPU cores inside qemu guest, each mapped to a thread on the host,
+# e.g. "-smp 8".
+#
# QB_KERNEL_CMDLINE_APPEND: options to append to kernel's -append
# option, e.g., "console=ttyS0 console=tty"
#
@@ -26,10 +29,15 @@
#
# QB_AUDIO_DRV: qemu audio driver, e.g., "alsa", set it when support audio
#
-# QB_AUDIO_OPT: qemu audio option, e.g., "-soundhw ac97,es1370", used
+# QB_AUDIO_OPT: qemu audio option, e.g., "-device AC97", used
# when QB_AUDIO_DRV is set.
#
+# QB_RNG: Pass-through for host random number generator, it can speedup boot
+# in system mode, where system is experiencing entropy starvation
+#
# QB_KERNEL_ROOT: kernel's root, e.g., /dev/vda
+# By default "/dev/vda rw" gets passed to the kernel.
+# To mount the rootfs read-only QB_KERNEL_ROOT can be set to e.g. "/dev/vda ro".
#
# QB_NETWORK_DEVICE: network device, e.g., "-device virtio-net-pci,netdev=net0,mac=@MAC@",
# it needs work with QB_TAP_OPT and QB_SLIRP_OPT.
@@ -37,10 +45,10 @@
# a custom one, but that may cause conflicts when multiple qemus are
# running on the same host.
# Note: If more than one interface of type -device virtio-net-device gets added,
-# QB_NETWORK_DEVICE_prepend might be used, since Qemu enumerates the eth*
+# QB_NETWORK_DEVICE:prepend might be used, since Qemu enumerates the eth*
# devices in reverse order to -device arguments.
#
-# QB_TAP_OPT: netowrk option for 'tap' mode, e.g.,
+# QB_TAP_OPT: network option for 'tap' mode, e.g.,
# "-netdev tap,id=net0,ifname=@TAP@,script=no,downscript=no"
# Note, runqemu will replace "@TAP@" with the one which is used, such as tap0, tap1 ...
#
@@ -69,23 +77,31 @@
# Can be used to automatically determine the image from the other variables
# but define things link 'bootindex' when booting from EFI or 'readonly' when using squashfs
# without the need to specify a dedicated qemu configuration
+#
+# QB_GRAPHICS: QEMU video card type (e.g. "-vga std")
+#
# Usage:
# IMAGE_CLASSES += "qemuboot"
# See "runqemu help" for more info
QB_MEM ?= "-m 256"
+QB_SMP ?= ""
QB_SERIAL_OPT ?= "-serial mon:stdio -serial null"
QB_DEFAULT_KERNEL ?= "${KERNEL_IMAGETYPE}"
QB_DEFAULT_FSTYPE ?= "ext4"
+QB_RNG ?= "-object rng-random,filename=/dev/urandom,id=rng0 -device virtio-rng-pci,rng=rng0"
QB_OPT_APPEND ?= ""
QB_NETWORK_DEVICE ?= "-device virtio-net-pci,netdev=net0,mac=@MAC@"
QB_CMDLINE_IP_SLIRP ?= "ip=dhcp"
-QB_CMDLINE_IP_TAP ?= "ip=192.168.7.@CLIENT@::192.168.7.@GATEWAY@:255.255.255.0"
+QB_CMDLINE_IP_TAP ?= "ip=192.168.7.@CLIENT@::192.168.7.@GATEWAY@:255.255.255.0::eth0:off:8.8.8.8"
QB_ROOTFS_EXTRA_OPT ?= ""
+QB_GRAPHICS ?= ""
# This should be kept align with ROOT_VM
QB_DRIVE_TYPE ?= "/dev/sd"
+inherit image-artifact-names
+
# Create qemuboot.conf
addtask do_write_qemuboot_conf after do_rootfs before do_image
@@ -93,7 +109,7 @@ def qemuboot_vars(d):
build_vars = ['MACHINE', 'TUNE_ARCH', 'DEPLOY_DIR_IMAGE',
'KERNEL_IMAGETYPE', 'IMAGE_NAME', 'IMAGE_LINK_NAME',
'STAGING_DIR_NATIVE', 'STAGING_BINDIR_NATIVE',
- 'STAGING_DIR_HOST']
+ 'STAGING_DIR_HOST', 'SERIAL_CONSOLES', 'UNINATIVE_LOADER']
return build_vars + [k for k in d.keys() if k.startswith('QB_')]
do_write_qemuboot_conf[vardeps] += "${@' '.join(qemuboot_vars(d))}"
@@ -102,12 +118,17 @@ python do_write_qemuboot_conf() {
import configparser
qemuboot = "%s/%s.qemuboot.conf" % (d.getVar('IMGDEPLOYDIR'), d.getVar('IMAGE_NAME'))
- qemuboot_link = "%s/%s.qemuboot.conf" % (d.getVar('IMGDEPLOYDIR'), d.getVar('IMAGE_LINK_NAME'))
+ if d.getVar('IMAGE_LINK_NAME'):
+ qemuboot_link = "%s/%s.qemuboot.conf" % (d.getVar('IMGDEPLOYDIR'), d.getVar('IMAGE_LINK_NAME'))
+ else:
+ qemuboot_link = ""
finalpath = d.getVar("DEPLOY_DIR_IMAGE")
topdir = d.getVar('TOPDIR')
cf = configparser.ConfigParser()
cf.add_section('config_bsp')
for k in sorted(qemuboot_vars(d)):
+ if ":" in k:
+ continue
# qemu-helper-native sysroot is not removed by rm_work and
# contains all tools required by runqemu
if k == 'STAGING_BINDIR_NATIVE':
@@ -115,6 +136,8 @@ python do_write_qemuboot_conf() {
'qemu-helper-native/1.0-r1/recipe-sysroot-native/usr/bin/')
else:
val = d.getVar(k)
+ if val is None:
+ continue
# we only want to write out relative paths so that we can relocate images
# and still run them
if val.startswith(topdir):
@@ -135,7 +158,7 @@ python do_write_qemuboot_conf() {
with open(qemuboot, 'w') as f:
cf.write(f)
- if qemuboot_link != qemuboot:
+ if qemuboot_link and qemuboot_link != qemuboot:
if os.path.lexists(qemuboot_link):
os.remove(qemuboot_link)
os.symlink(os.path.basename(qemuboot), qemuboot_link)
diff --git a/meta/classes/report-error.bbclass b/meta/classes/report-error.bbclass
index 1a12db1206..de48e4ff0f 100644
--- a/meta/classes/report-error.bbclass
+++ b/meta/classes/report-error.bbclass
@@ -64,6 +64,8 @@ python errorreport_handler () {
data['failures'] = []
data['component'] = " ".join(e.getPkgs())
data['branch_commit'] = str(base_detect_branch(e.data)) + ": " + str(base_detect_revision(e.data))
+ data['bitbake_version'] = e.data.getVar("BB_VERSION")
+ data['layer_version'] = get_layers_branch_rev(e.data)
data['local_conf'] = get_conf_data(e, 'local.conf')
data['auto_conf'] = get_conf_data(e, 'auto.conf')
lock = bb.utils.lockfile(datafile + '.lock')
diff --git a/meta/classes/reproducible_build.bbclass b/meta/classes/reproducible_build.bbclass
deleted file mode 100644
index 8da40f656a..0000000000
--- a/meta/classes/reproducible_build.bbclass
+++ /dev/null
@@ -1,202 +0,0 @@
-# reproducible_build.bbclass
-#
-# Sets SOURCE_DATE_EPOCH in each component's build environment.
-# Upstream components (generally) respect this environment variable,
-# using it in place of the "current" date and time.
-# See https://reproducible-builds.org/specs/source-date-epoch/
-#
-# After sources are unpacked but before they are patched, we set a reproducible value for SOURCE_DATE_EPOCH.
-# This value should be reproducible for anyone who builds the same revision from the same sources.
-#
-# There are 4 ways we determine SOURCE_DATE_EPOCH:
-#
-# 1. Use the value from __source_date_epoch.txt file if this file exists.
-# This file was most likely created in the previous build by one of the following methods 2,3,4.
-# Alternatively, it can be provided by a recipe via SRC_URI.
-#
-# If the file does not exist:
-#
-# 2. If there is a git checkout, use the last git commit timestamp.
-# Git does not preserve file timestamps on checkout.
-#
-# 3. Use the mtime of "known" files such as NEWS, CHANGLELOG, ...
-# This works for well-kept repositories distributed via tarball.
-#
-# 4. Use the modification time of the youngest file in the source tree, if there is one.
-# This will be the newest file from the distribution tarball, if any.
-#
-# 5. Fall back to a fixed timestamp.
-#
-# Once the value of SOURCE_DATE_EPOCH is determined, it is stored in the recipe's SDE_FILE.
-# If none of these mechanisms are suitable, replace the do_deploy_source_date_epoch task
-# with recipe-specific functionality to write the appropriate SOURCE_DATE_EPOCH into the SDE_FILE.
-#
-# If this file is found by other tasks, the value is exported in the SOURCE_DATE_EPOCH variable.
-# SOURCE_DATE_EPOCH is set for all tasks that might use it (do_configure, do_compile, do_package, ...)
-
-BUILD_REPRODUCIBLE_BINARIES ??= '1'
-inherit ${@oe.utils.ifelse(d.getVar('BUILD_REPRODUCIBLE_BINARIES') == '1', 'reproducible_build_simple', '')}
-
-SDE_DIR ="${WORKDIR}/source-date-epoch"
-SDE_FILE = "${SDE_DIR}/__source_date_epoch.txt"
-SDE_DEPLOYDIR = "${WORKDIR}/deploy-source-date-epoch"
-
-SSTATETASKS += "do_deploy_source_date_epoch"
-
-do_deploy_source_date_epoch () {
- mkdir -p ${SDE_DEPLOYDIR}
- if [ -e ${SDE_FILE} ]; then
- echo "Deploying SDE from ${SDE_FILE} -> ${SDE_DEPLOYDIR}."
- cp -p ${SDE_FILE} ${SDE_DEPLOYDIR}/__source_date_epoch.txt
- else
- echo "${SDE_FILE} not found!"
- fi
-}
-
-python do_deploy_source_date_epoch_setscene () {
- sstate_setscene(d)
- bb.utils.mkdirhier(d.getVar('SDE_DIR'))
- sde_file = os.path.join(d.getVar('SDE_DEPLOYDIR'), '__source_date_epoch.txt')
- if os.path.exists(sde_file):
- target = d.getVar('SDE_FILE')
- bb.debug(1, "Moving setscene SDE file %s -> %s" % (sde_file, target))
- os.rename(sde_file, target)
- else:
- bb.debug(1, "%s not found!" % sde_file)
-}
-
-do_deploy_source_date_epoch[dirs] = "${SDE_DEPLOYDIR}"
-do_deploy_source_date_epoch[sstate-plaindirs] = "${SDE_DEPLOYDIR}"
-addtask do_deploy_source_date_epoch_setscene
-addtask do_deploy_source_date_epoch before do_configure after do_patch
-
-def get_source_date_epoch_from_known_files(d, sourcedir):
- source_date_epoch = None
- newest_file = None
- known_files = set(["NEWS", "ChangeLog", "Changelog", "CHANGES"])
- for file in known_files:
- filepath = os.path.join(sourcedir, file)
- if os.path.isfile(filepath):
- mtime = int(os.lstat(filepath).st_mtime)
- # There may be more than one "known_file" present, if so, use the youngest one
- if not source_date_epoch or mtime > source_date_epoch:
- source_date_epoch = mtime
- newest_file = filepath
- if newest_file:
- bb.debug(1, "SOURCE_DATE_EPOCH taken from: %s" % newest_file)
- return source_date_epoch
-
-def find_git_folder(d, sourcedir):
- # First guess: WORKDIR/git
- # This is the default git fetcher unpack path
- workdir = d.getVar('WORKDIR')
- gitpath = os.path.join(workdir, "git/.git")
- if os.path.isdir(gitpath):
- return gitpath
-
- # Second guess: ${S}
- gitpath = os.path.join(sourcedir, ".git")
- if os.path.isdir(gitpath):
- return gitpath
-
- # Perhaps there was a subpath or destsuffix specified.
- # Go looking in the WORKDIR
- exclude = set(["build", "image", "license-destdir", "patches", "pseudo",
- "recipe-sysroot", "recipe-sysroot-native", "sysroot-destdir", "temp"])
- for root, dirs, files in os.walk(workdir, topdown=True):
- dirs[:] = [d for d in dirs if d not in exclude]
- if '.git' in dirs:
- return root
-
- bb.warn("Failed to find a git repository in WORKDIR: %s" % workdir)
- return None
-
-def get_source_date_epoch_from_git(d, sourcedir):
- source_date_epoch = None
- if "git://" in d.getVar('SRC_URI'):
- gitpath = find_git_folder(d, sourcedir)
- if gitpath:
- import subprocess
- source_date_epoch = int(subprocess.check_output(['git','log','-1','--pretty=%ct'], cwd=gitpath))
- bb.debug(1, "git repository: %s" % gitpath)
- return source_date_epoch
-
-def get_source_date_epoch_from_youngest_file(d, sourcedir):
- if sourcedir == d.getVar('WORKDIR'):
- # These sources are almost certainly not from a tarball
- return None
-
- # Do it the hard way: check all files and find the youngest one...
- source_date_epoch = None
- newest_file = None
- for root, dirs, files in os.walk(sourcedir, topdown=True):
- files = [f for f in files if not f[0] == '.']
-
- for fname in files:
- filename = os.path.join(root, fname)
- try:
- mtime = int(os.lstat(filename).st_mtime)
- except ValueError:
- mtime = 0
- if not source_date_epoch or mtime > source_date_epoch:
- source_date_epoch = mtime
- newest_file = filename
-
- if newest_file:
- bb.debug(1, "Newest file found: %s" % newest_file)
- return source_date_epoch
-
-def fixed_source_date_epoch():
- bb.debug(1, "No tarball or git repo found to determine SOURCE_DATE_EPOCH")
- return 0
-
-python create_source_date_epoch_stamp() {
- epochfile = d.getVar('SDE_FILE')
- # If it exists we need to regenerate as the sources may have changed
- if os.path.isfile(epochfile):
- bb.debug(1, "Deleting existing SOURCE_DATE_EPOCH from: %s" % epochfile)
- os.remove(epochfile)
-
- sourcedir = d.getVar('S')
- source_date_epoch = (
- get_source_date_epoch_from_git(d, sourcedir) or
- get_source_date_epoch_from_known_files(d, sourcedir) or
- get_source_date_epoch_from_youngest_file(d, sourcedir) or
- fixed_source_date_epoch() # Last resort
- )
-
- bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch)
- bb.utils.mkdirhier(d.getVar('SDE_DIR'))
- with open(epochfile, 'w') as f:
- f.write(str(source_date_epoch))
-}
-
-def get_source_date_epoch_value(d):
- cached = d.getVar('__CACHED_SOURCE_DATE_EPOCH')
- if cached:
- return cached
-
- epochfile = d.getVar('SDE_FILE')
- source_date_epoch = 0
- if os.path.isfile(epochfile):
- with open(epochfile, 'r') as f:
- s = f.read()
- try:
- source_date_epoch = int(s)
- except ValueError:
- bb.warn("SOURCE_DATE_EPOCH value '%s' is invalid. Reverting to 0" % s)
- source_date_epoch = 0
- bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch)
- else:
- bb.debug(1, "Cannot find %s. SOURCE_DATE_EPOCH will default to %d" % (epochfile, source_date_epoch))
-
- d.setVar('__CACHED_SOURCE_DATE_EPOCH', str(source_date_epoch))
- return str(source_date_epoch)
-
-export SOURCE_DATE_EPOCH ?= "${@get_source_date_epoch_value(d)}"
-BB_HASHBASE_WHITELIST += "SOURCE_DATE_EPOCH"
-
-python () {
- if d.getVar('BUILD_REPRODUCIBLE_BINARIES') == '1':
- d.appendVarFlag("do_unpack", "postfuncs", " create_source_date_epoch_stamp")
-}
diff --git a/meta/classes/reproducible_build_simple.bbclass b/meta/classes/reproducible_build_simple.bbclass
deleted file mode 100644
index 393372993d..0000000000
--- a/meta/classes/reproducible_build_simple.bbclass
+++ /dev/null
@@ -1,9 +0,0 @@
-# Setup default environment for reproducible builds.
-
-BUILD_REPRODUCIBLE_BINARIES = "1"
-
-export PYTHONHASHSEED = "0"
-export PERL_HASH_SEED = "0"
-export SOURCE_DATE_EPOCH ??= "1520598896"
-
-REPRODUCIBLE_TIMESTAMP_ROOTFS ??= "1520598896"
diff --git a/meta/classes/rm_work.bbclass b/meta/classes/rm_work.bbclass
index 01c2ab1c78..5f12d5aaeb 100644
--- a/meta/classes/rm_work.bbclass
+++ b/meta/classes/rm_work.bbclass
@@ -13,7 +13,7 @@
# Recipes can also configure which entries in their ${WORKDIR}
# are preserved besides temp, which already gets excluded by default
# because it contains logs:
-# do_install_append () {
+# do_install:append () {
# echo "bar" >${WORKDIR}/foo
# }
# RM_WORK_EXCLUDE_ITEMS += "foo"
@@ -24,7 +24,7 @@ RM_WORK_EXCLUDE_ITEMS = "temp"
BB_SCHEDULER ?= "completion"
# Run the rm_work task in the idle scheduling class
-BB_TASK_IONICE_LEVEL_task-rm_work = "3.0"
+BB_TASK_IONICE_LEVEL:task-rm_work = "3.0"
do_rm_work () {
# If the recipe name is in the RM_WORK_EXCLUDE, skip the recipe.
@@ -73,7 +73,7 @@ do_rm_work () {
# sstate version since otherwise we'd need to leave 'plaindirs' around
# such as 'packages' and 'packages-split' and these can be large. No end
# of chain tasks depend directly on do_package anymore.
- rm -f $i;
+ rm -f -- $i;
;;
*_setscene*)
# Skip stamps which are already setscene versions
@@ -90,7 +90,7 @@ do_rm_work () {
;;
esac
done
- rm -f $i
+ rm -f -- $i
esac
done
@@ -100,9 +100,9 @@ do_rm_work () {
# Retain only logs and other files in temp, safely ignore
# failures of removing pseudo folers on NFS2/3 server.
if [ $dir = 'pseudo' ]; then
- rm -rf $dir 2> /dev/null || true
+ rm -rf -- $dir 2> /dev/null || true
elif ! echo "$excludes" | grep -q -w "$dir"; then
- rm -rf $dir
+ rm -rf -- $dir
fi
done
}
diff --git a/meta/classes/rm_work_and_downloads.bbclass b/meta/classes/rm_work_and_downloads.bbclass
index 7c00bea597..15e6091b9d 100644
--- a/meta/classes/rm_work_and_downloads.bbclass
+++ b/meta/classes/rm_work_and_downloads.bbclass
@@ -28,6 +28,6 @@ inherit rm_work
# Instead go up one level and remove ourself.
DL_DIR = "${BASE_WORKDIR}/${MULTIMACH_TARGET_SYS}/${PN}/downloads"
-do_rm_work_append () {
+do_rm_work:append () {
rm -rf ${DL_DIR}
}
diff --git a/meta/classes/rootfs-postcommands.bbclass b/meta/classes/rootfs-postcommands.bbclass
index c43b9a9823..7b92df69c5 100644
--- a/meta/classes/rootfs-postcommands.bbclass
+++ b/meta/classes/rootfs-postcommands.bbclass
@@ -1,6 +1,6 @@
# Zap the root password if debug-tweaks feature is not enabled
-ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'empty-root-password' ], "", "zap_empty_root_password ; ",d)}'
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'empty-root-password' ], "", "zap_empty_root_password; ",d)}'
# Allow dropbear/openssh to accept logins from accounts with an empty password string if debug-tweaks or allow-empty-password is enabled
ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'allow-empty-password' ], "ssh_allow_empty_password; ", "",d)}'
@@ -12,7 +12,7 @@ ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'deb
ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'post-install-logging' ], "postinst_enable_logging; ", "",d)}'
# Create /etc/timestamp during image construction to give a reasonably sane default time setting
-ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp ; "
+ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp; "
# Tweak the mount options for rootfs in /etc/fstab if read-only-rootfs is enabled
ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", "read_only_rootfs_hook; ", "",d)}'
@@ -21,12 +21,12 @@ ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only
# otherwise kernel or initramfs end up mounting the rootfs read/write
# (the default) if supported by the underlying storage.
#
-# We do this with _append because the default value might get set later with ?=
+# We do this with :append because the default value might get set later with ?=
# and we don't want to disable such a default that by setting a value here.
-APPEND_append = '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", " ro", "", d)}'
+APPEND:append = '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", " ro", "", d)}'
# Generates test data file with data store variables expanded in json format
-ROOTFS_POSTPROCESS_COMMAND += "write_image_test_data ; "
+ROOTFS_POSTPROCESS_COMMAND += "write_image_test_data; "
# Write manifest
IMAGE_MANIFEST = "${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.manifest"
@@ -34,11 +34,15 @@ ROOTFS_POSTUNINSTALL_COMMAND =+ "write_image_manifest ; "
# Set default postinst log file
POSTINST_LOGFILE ?= "${localstatedir}/log/postinstall.log"
# Set default target for systemd images
-SYSTEMD_DEFAULT_TARGET ?= '${@bb.utils.contains("IMAGE_FEATURES", "x11-base", "graphical.target", "multi-user.target", d)}'
+SYSTEMD_DEFAULT_TARGET ?= '${@bb.utils.contains_any("IMAGE_FEATURES", [ "x11-base", "weston" ], "graphical.target", "multi-user.target", d)}'
ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("DISTRO_FEATURES", "systemd", "set_systemd_default_target; systemd_create_users;", "", d)}'
ROOTFS_POSTPROCESS_COMMAND += 'empty_var_volatile;'
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("DISTRO_FEATURES", "overlayfs", "overlayfs_qa_check;", "", d)}'
+
+inherit image-artifact-names
+
# Sort the user and group entries in /etc by ID in order to make the content
# deterministic. Package installs are not deterministic, causing the ordering
# of entries to change between builds. In case that this isn't desired,
@@ -48,7 +52,7 @@ ROOTFS_POSTPROCESS_COMMAND += 'empty_var_volatile;'
# the numeric IDs of dynamically created entries remain stable.
#
# We want this to run as late as possible, in particular after
-# systemd_sysusers_create and set_user_group. Using _append is not
+# systemd_sysusers_create and set_user_group. Using :append is not
# enough for that, set_user_group is added that way and would end
# up running after us.
SORT_PASSWD_POSTPROCESS_COMMAND ??= " sort_passwd; "
@@ -58,7 +62,7 @@ python () {
}
systemd_create_users () {
- for conffile in ${IMAGE_ROOTFS}/usr/lib/sysusers.d/systemd.conf ${IMAGE_ROOTFS}/usr/lib/sysusers.d/systemd-remote.conf; do
+ for conffile in ${IMAGE_ROOTFS}/usr/lib/sysusers.d/*.conf; do
[ -e $conffile ] || continue
grep -v "^#" $conffile | sed -e '/^$/d' | while read type name id comment; do
if [ "$type" = "u" ]; then
@@ -74,12 +78,8 @@ systemd_create_users () {
eval groupadd --root ${IMAGE_ROOTFS} $groupadd_params || true
elif [ "$type" = "m" ]; then
group=$id
- if [ ! `grep -q "^${group}:" ${IMAGE_ROOTFS}${sysconfdir}/group` ]; then
- eval groupadd --root ${IMAGE_ROOTFS} --system $group
- fi
- if [ ! `grep -q "^${name}:" ${IMAGE_ROOTFS}${sysconfdir}/passwd` ]; then
- eval useradd --root ${IMAGE_ROOTFS} --shell /sbin/nologin --system $name
- fi
+ eval groupadd --root ${IMAGE_ROOTFS} --system $group || true
+ eval useradd --root ${IMAGE_ROOTFS} --shell /sbin/nologin --system $name --no-user-group || true
eval usermod --root ${IMAGE_ROOTFS} -a -G $group $name
fi
done
@@ -214,8 +214,8 @@ postinst_enable_logging () {
# Modify systemd default target
#
set_systemd_default_target () {
- if [ -d ${IMAGE_ROOTFS}${sysconfdir}/systemd/system -a -e ${IMAGE_ROOTFS}${systemd_unitdir}/system/${SYSTEMD_DEFAULT_TARGET} ]; then
- ln -sf ${systemd_unitdir}/system/${SYSTEMD_DEFAULT_TARGET} ${IMAGE_ROOTFS}${sysconfdir}/systemd/system/default.target
+ if [ -d ${IMAGE_ROOTFS}${sysconfdir}/systemd/system -a -e ${IMAGE_ROOTFS}${systemd_system_unitdir}/${SYSTEMD_DEFAULT_TARGET} ]; then
+ ln -sf ${systemd_system_unitdir}/${SYSTEMD_DEFAULT_TARGET} ${IMAGE_ROOTFS}${sysconfdir}/systemd/system/default.target
fi
}
@@ -371,3 +371,46 @@ rootfs_reproducible () {
fi
fi
}
+
+# Perform a dumb check for unit existence, not its validity
+python overlayfs_qa_check() {
+ from oe.overlayfs import mountUnitName
+
+ overlayMountPoints = d.getVarFlags("OVERLAYFS_MOUNT_POINT") or {}
+ imagepath = d.getVar("IMAGE_ROOTFS")
+ sysconfdir = d.getVar("sysconfdir")
+ searchpaths = [oe.path.join(imagepath, sysconfdir, "systemd", "system"),
+ oe.path.join(imagepath, d.getVar("systemd_system_unitdir"))]
+ fstabpath = oe.path.join(imagepath, sysconfdir, "fstab")
+
+ if not any(os.path.exists(path) for path in [*searchpaths, fstabpath]):
+ return
+
+ fstabDevices = []
+ if os.path.isfile(fstabpath):
+ with open(fstabpath, 'r') as f:
+ for line in f:
+ if line[0] == '#':
+ continue
+ path = line.split(maxsplit=2)
+ if len(path) > 2:
+ fstabDevices.append(path[1])
+
+ allUnitExist = True;
+ for mountPoint in overlayMountPoints:
+ mountPath = d.getVarFlag('OVERLAYFS_MOUNT_POINT', mountPoint)
+ if mountPath in fstabDevices:
+ continue
+
+ mountUnit = mountUnitName(mountPath)
+ if any(os.path.isfile(oe.path.join(dirpath, mountUnit))
+ for dirpath in searchpaths):
+ continue
+
+ bb.warn('Mount path %s not found in fstat and unit %s not found '
+ 'in systemd unit directories' % (mountPath, mountUnit))
+ allUnitExist = False;
+
+ if not allUnitExist:
+ bb.fatal('Not all mount paths and units are installed in the image')
+}
diff --git a/meta/classes/rootfs_deb.bbclass b/meta/classes/rootfs_deb.bbclass
index 2b93796a76..0469ba7059 100644
--- a/meta/classes/rootfs_deb.bbclass
+++ b/meta/classes/rootfs_deb.bbclass
@@ -7,7 +7,7 @@ ROOTFS_PKGMANAGE = "dpkg apt"
do_rootfs[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot"
do_populate_sdk[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot bzip2-native:do_populate_sysroot"
do_rootfs[recrdeptask] += "do_package_write_deb do_package_qa"
-do_rootfs[vardeps] += "PACKAGE_FEED_URIS"
+do_rootfs[vardeps] += "PACKAGE_FEED_URIS PACKAGE_FEED_BASE_PATHS PACKAGE_FEED_ARCHS"
do_rootfs[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock"
do_populate_sdk[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock"
@@ -32,4 +32,8 @@ python () {
d.setVar('DEB_SDK_ARCH', 'amd64')
elif darch == "arm":
d.setVar('DEB_SDK_ARCH', 'armel')
+ elif darch == "aarch64":
+ d.setVar('DEB_SDK_ARCH', 'arm64')
+ else:
+ bb.fatal("Unhandled SDK_ARCH %s" % darch)
}
diff --git a/meta/classes/rootfs_ipk.bbclass b/meta/classes/rootfs_ipk.bbclass
index e73d2bfdae..245c256a6f 100644
--- a/meta/classes/rootfs_ipk.bbclass
+++ b/meta/classes/rootfs_ipk.bbclass
@@ -11,11 +11,11 @@ ROOTFS_PKGMANAGE = "opkg ${EXTRAOPKGCONFIG}"
do_rootfs[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
do_populate_sdk[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
do_rootfs[recrdeptask] += "do_package_write_ipk do_package_qa"
-do_rootfs[vardeps] += "PACKAGE_FEED_URIS"
+do_rootfs[vardeps] += "PACKAGE_FEED_URIS PACKAGE_FEED_BASE_PATHS PACKAGE_FEED_ARCHS"
do_rootfs[lockfiles] += "${WORKDIR}/ipk.lock"
-do_populate_sdk[lockfiles] += "${WORKDIR}/ipk.lock"
-do_populate_sdk_ext[lockfiles] += "${WORKDIR}/ipk.lock"
+do_populate_sdk[lockfiles] += "${WORKDIR}/sdk-ipk.lock"
+do_populate_sdk_ext[lockfiles] += "${WORKDIR}/sdk-ipk.lock"
OPKG_PREPROCESS_COMMANDS = ""
diff --git a/meta/classes/rootfs_rpm.bbclass b/meta/classes/rootfs_rpm.bbclass
index 51f89ea990..bec4d63ed6 100644
--- a/meta/classes/rootfs_rpm.bbclass
+++ b/meta/classes/rootfs_rpm.bbclass
@@ -4,12 +4,12 @@
ROOTFS_PKGMANAGE = "rpm dnf"
-# dnf is using our custom distutils, and so will fail without these
+# dnf is using our custom sysconfig module, and so will fail without these
export STAGING_INCDIR
export STAGING_LIBDIR
# Add 100Meg of extra space for dnf
-IMAGE_ROOTFS_EXTRA_SPACE_append = "${@bb.utils.contains("PACKAGE_INSTALL", "dnf", " + 102400", "" ,d)}"
+IMAGE_ROOTFS_EXTRA_SPACE:append = "${@bb.utils.contains("PACKAGE_INSTALL", "dnf", " + 102400", "", d)}"
# Dnf is python based, so be sure python3-native is available to us.
EXTRANATIVEPATH += "python3-native"
@@ -24,7 +24,7 @@ do_rootfs[depends] += "${RPMROOTFSDEPENDS}"
do_populate_sdk[depends] += "${RPMROOTFSDEPENDS}"
do_rootfs[recrdeptask] += "do_package_write_rpm do_package_qa"
-do_rootfs[vardeps] += "PACKAGE_FEED_URIS"
+do_rootfs[vardeps] += "PACKAGE_FEED_URIS PACKAGE_FEED_BASE_PATHS PACKAGE_FEED_ARCHS"
python () {
if d.getVar('BUILD_IMAGES_FROM_FEEDS'):
diff --git a/meta/classes/rootfsdebugfiles.bbclass b/meta/classes/rootfsdebugfiles.bbclass
index e2ba4e3647..85c7ec7434 100644
--- a/meta/classes/rootfsdebugfiles.bbclass
+++ b/meta/classes/rootfsdebugfiles.bbclass
@@ -28,7 +28,7 @@
ROOTFS_DEBUG_FILES ?= ""
ROOTFS_DEBUG_FILES[doc] = "Lists additional files or directories to be installed with 'cp -a' in the format 'source1 target1;source2 target2;...'"
-ROOTFS_POSTPROCESS_COMMAND += "rootfs_debug_files ;"
+ROOTFS_POSTPROCESS_COMMAND += "rootfs_debug_files;"
rootfs_debug_files () {
#!/bin/sh -e
echo "${ROOTFS_DEBUG_FILES}" | sed -e 's/;/\n/g' | while read source target mode; do
diff --git a/meta/classes/rust-bin.bbclass b/meta/classes/rust-bin.bbclass
new file mode 100644
index 0000000000..c87343b3cf
--- /dev/null
+++ b/meta/classes/rust-bin.bbclass
@@ -0,0 +1,149 @@
+inherit rust
+
+RDEPENDS:${PN}:append:class-target = " ${RUSTLIB_DEP}"
+
+RUSTC_ARCHFLAGS += "-C opt-level=3 -g -L ${STAGING_DIR_HOST}/${rustlibdir} -C linker=${RUST_TARGET_CCLD}"
+EXTRA_OEMAKE += 'RUSTC_ARCHFLAGS="${RUSTC_ARCHFLAGS}"'
+
+# Some libraries alias with the standard library but libstd is configured to
+# make it difficult or imposisble to use its version. Unfortunately libstd
+# must be explicitly overridden using extern.
+OVERLAP_LIBS = "\
+ libc \
+ log \
+ getopts \
+ rand \
+"
+def get_overlap_deps(d):
+ deps = d.getVar("DEPENDS").split()
+ overlap_deps = []
+ for o in d.getVar("OVERLAP_LIBS").split():
+ l = len([o for dep in deps if (o + '-rs' in dep)])
+ if l > 0:
+ overlap_deps.append(o)
+ return " ".join(overlap_deps)
+OVERLAP_DEPS = "${@get_overlap_deps(d)}"
+
+# Prevents multiple static copies of standard library modules
+# See https://github.com/rust-lang/rust/issues/19680
+RUSTC_PREFER_DYNAMIC = "-C prefer-dynamic"
+RUSTC_FLAGS += "${RUSTC_PREFER_DYNAMIC}"
+
+CRATE_NAME ?= "${@d.getVar('BPN').replace('-rs', '').replace('-', '_')}"
+BINNAME ?= "${BPN}"
+LIBNAME ?= "lib${CRATE_NAME}-rs"
+CRATE_TYPE ?= "dylib"
+BIN_SRC ?= "${S}/src/main.rs"
+LIB_SRC ?= "${S}/src/lib.rs"
+
+rustbindest ?= "${bindir}"
+rustlibdest ?= "${rustlibdir}"
+RUST_RPATH_ABS ?= "${rustlibdir}:${rustlib}"
+
+def relative_rpaths(paths, base):
+ relpaths = set()
+ for p in paths.split(':'):
+ if p == base:
+ relpaths.add('$ORIGIN')
+ continue
+ relpaths.add(os.path.join('$ORIGIN', os.path.relpath(p, base)))
+ return '-rpath=' + ':'.join(relpaths) if len(relpaths) else ''
+
+RUST_LIB_RPATH_FLAGS ?= "${@relative_rpaths(d.getVar('RUST_RPATH_ABS', True), d.getVar('rustlibdest', True))}"
+RUST_BIN_RPATH_FLAGS ?= "${@relative_rpaths(d.getVar('RUST_RPATH_ABS', True), d.getVar('rustbindest', True))}"
+
+def libfilename(d):
+ if d.getVar('CRATE_TYPE', True) == 'dylib':
+ return d.getVar('LIBNAME', True) + '.so'
+ else:
+ return d.getVar('LIBNAME', True) + '.rlib'
+
+def link_args(d, bin):
+ linkargs = []
+ if bin:
+ rpaths = d.getVar('RUST_BIN_RPATH_FLAGS', False)
+ else:
+ rpaths = d.getVar('RUST_LIB_RPATH_FLAGS', False)
+ if d.getVar('CRATE_TYPE', True) == 'dylib':
+ linkargs.append('-soname')
+ linkargs.append(libfilename(d))
+ if len(rpaths):
+ linkargs.append(rpaths)
+ if len(linkargs):
+ return ' '.join(['-Wl,' + arg for arg in linkargs])
+ else:
+ return ''
+
+get_overlap_externs () {
+ externs=
+ for dep in ${OVERLAP_DEPS}; do
+ extern=$(ls ${STAGING_DIR_HOST}/${rustlibdir}/lib$dep-rs.{so,rlib} 2>/dev/null \
+ | awk '{print $1}');
+ if [ -n "$extern" ]; then
+ externs="$externs --extern $dep=$extern"
+ else
+ echo "$dep in depends but no such library found in ${rustlibdir}!" >&2
+ exit 1
+ fi
+ done
+ echo "$externs"
+}
+
+do_configure () {
+}
+
+oe_runrustc () {
+ export RUST_TARGET_PATH="${RUST_TARGET_PATH}"
+ bbnote ${RUSTC} ${RUSTC_ARCHFLAGS} ${RUSTC_FLAGS} "$@"
+ "${RUSTC}" ${RUSTC_ARCHFLAGS} ${RUSTC_FLAGS} "$@"
+}
+
+oe_compile_rust_lib () {
+ rm -rf ${LIBNAME}.{rlib,so}
+ local -a link_args
+ if [ -n '${@link_args(d, False)}' ]; then
+ link_args[0]='-C'
+ link_args[1]='link-args=${@link_args(d, False)}'
+ fi
+ oe_runrustc $(get_overlap_externs) \
+ "${link_args[@]}" \
+ ${LIB_SRC} \
+ -o ${@libfilename(d)} \
+ --crate-name=${CRATE_NAME} --crate-type=${CRATE_TYPE} \
+ "$@"
+}
+oe_compile_rust_lib[vardeps] += "get_overlap_externs"
+
+oe_compile_rust_bin () {
+ rm -rf ${BINNAME}
+ local -a link_args
+ if [ -n '${@link_args(d, True)}' ]; then
+ link_args[0]='-C'
+ link_args[1]='link-args=${@link_args(d, True)}'
+ fi
+ oe_runrustc $(get_overlap_externs) \
+ "${link_args[@]}" \
+ ${BIN_SRC} -o ${BINNAME} "$@"
+}
+oe_compile_rust_bin[vardeps] += "get_overlap_externs"
+
+oe_install_rust_lib () {
+ for lib in $(ls ${LIBNAME}.{so,rlib} 2>/dev/null); do
+ echo Installing $lib
+ install -D -m 755 $lib ${D}/${rustlibdest}/$lib
+ done
+}
+
+oe_install_rust_bin () {
+ echo Installing ${BINNAME}
+ install -D -m 755 ${BINNAME} ${D}/${rustbindest}/${BINNAME}
+}
+
+do_rust_bin_fixups() {
+ for f in `find ${PKGD} -name '*.so*'`; do
+ echo "Strip rust note: $f"
+ ${OBJCOPY} -R .note.rustc $f $f
+ done
+}
+PACKAGE_PREPROCESS_FUNCS += "do_rust_bin_fixups"
+
diff --git a/meta/classes/rust-common.bbclass b/meta/classes/rust-common.bbclass
new file mode 100644
index 0000000000..65ad677499
--- /dev/null
+++ b/meta/classes/rust-common.bbclass
@@ -0,0 +1,185 @@
+inherit python3native
+
+# Common variables used by all Rust builds
+export rustlibdir = "${libdir}/rust"
+FILES:${PN} += "${rustlibdir}/*.so"
+FILES:${PN}-dev += "${rustlibdir}/*.rlib ${rustlibdir}/*.rmeta"
+FILES:${PN}-dbg += "${rustlibdir}/.debug"
+
+RUSTLIB = "-L ${STAGING_LIBDIR}/rust"
+RUST_DEBUG_REMAP = "--remap-path-prefix=${WORKDIR}=/usr/src/debug/${PN}/${EXTENDPE}${PV}-${PR}"
+RUSTFLAGS += "${RUSTLIB} ${RUST_DEBUG_REMAP}"
+RUSTLIB_DEP ?= "libstd-rs"
+export RUST_TARGET_PATH = "${STAGING_LIBDIR_NATIVE}/rustlib"
+RUST_PANIC_STRATEGY ?= "unwind"
+
+# Native builds are not effected by TCLIBC. Without this, rust-native
+# thinks it's "target" (i.e. x86_64-linux) is a musl target.
+RUST_LIBC = "${TCLIBC}"
+RUST_LIBC:class-native = "glibc"
+
+def determine_libc(d, thing):
+ '''Determine which libc something should target'''
+
+ # BUILD is never musl, TARGET may be musl or glibc,
+ # HOST could be musl, but only if a compiler is built to be run on
+ # target in which case HOST_SYS != BUILD_SYS.
+ if thing == 'TARGET':
+ libc = d.getVar('RUST_LIBC')
+ elif thing == 'BUILD' and (d.getVar('HOST_SYS') != d.getVar('BUILD_SYS')):
+ libc = d.getVar('RUST_LIBC')
+ else:
+ libc = d.getVar('RUST_LIBC:class-native')
+
+ return libc
+
+def target_is_armv7(d):
+ '''Determine if target is armv7'''
+ # TUNE_FEATURES may include arm* even if the target is not arm
+ # in the case of *-native packages
+ if d.getVar('TARGET_ARCH') != 'arm':
+ return False
+
+ feat = d.getVar('TUNE_FEATURES')
+ feat = frozenset(feat.split())
+ mach_overrides = d.getVar('MACHINEOVERRIDES')
+ mach_overrides = frozenset(mach_overrides.split(':'))
+
+ v7=frozenset(['armv7a', 'armv7r', 'armv7m', 'armv7ve'])
+ if mach_overrides.isdisjoint(v7) and feat.isdisjoint(v7):
+ return False
+ else:
+ return True
+target_is_armv7[vardepvalue] = "${@target_is_armv7(d)}"
+
+# Responsible for taking Yocto triples and converting it to Rust triples
+def rust_base_triple(d, thing):
+ '''
+ Mangle bitbake's *_SYS into something that rust might support (see
+ rust/mk/cfg/* for a list)
+
+ Note that os is assumed to be some linux form
+ '''
+
+ # The llvm-target for armv7 is armv7-unknown-linux-gnueabihf
+ if thing == "TARGET" and target_is_armv7(d):
+ arch = "armv7"
+ else:
+ arch = oe.rust.arch_to_rust_arch(d.getVar('{}_ARCH'.format(thing)))
+
+ # All the Yocto targets are Linux and are 'unknown'
+ vendor = "-unknown"
+ os = d.getVar('{}_OS'.format(thing))
+ libc = determine_libc(d, thing)
+
+ # Prefix with a dash and convert glibc -> gnu
+ if libc == "glibc":
+ libc = "-gnu"
+ elif libc == "musl":
+ libc = "-musl"
+
+ # Don't double up musl (only appears to be the case on aarch64)
+ if os == "linux-musl":
+ if libc != "-musl":
+ bb.fatal("{}_OS was '{}' but TCLIBC was not 'musl'".format(thing, os))
+ os = "linux"
+
+ # This catches ARM targets and appends the necessary hard float bits
+ if os == "linux-gnueabi" or os == "linux-musleabi":
+ libc = bb.utils.contains('TUNE_FEATURES', 'callconvention-hard', 'hf', '', d)
+ return arch + vendor + '-' + os + libc
+
+
+# In some cases uname and the toolchain differ on their idea of the arch name
+RUST_BUILD_ARCH = "${@oe.rust.arch_to_rust_arch(d.getVar('BUILD_ARCH'))}"
+
+# Naming explanation
+# Yocto
+# - BUILD_SYS - Yocto triple of the build environment
+# - HOST_SYS - What we're building for in Yocto
+# - TARGET_SYS - What we're building for in Yocto
+#
+# So when building '-native' packages BUILD_SYS == HOST_SYS == TARGET_SYS
+# When building packages for the image HOST_SYS == TARGET_SYS
+# This is a gross over simplification as there are other modes but
+# currently this is all that's supported.
+#
+# Rust
+# - TARGET - the system where the binary will run
+# - HOST - the system where the binary is being built
+#
+# Rust additionally will use two additional cases:
+# - undecorated (e.g. CC) - equivalent to TARGET
+# - triple suffix (e.g. CC:x86_64_unknown_linux_gnu) - both
+# see: https://github.com/alexcrichton/gcc-rs
+# The way that Rust's internal triples and Yocto triples are mapped together
+# its likely best to not use the triple suffix due to potential confusion.
+
+RUST_BUILD_SYS = "${@rust_base_triple(d, 'BUILD')}"
+RUST_HOST_SYS = "${@rust_base_triple(d, 'HOST')}"
+RUST_TARGET_SYS = "${@rust_base_triple(d, 'TARGET')}"
+
+# wrappers to get around the fact that Rust needs a single
+# binary but Yocto's compiler and linker commands have
+# arguments. Technically the archiver is always one command but
+# this is necessary for builds that determine the prefix and then
+# use those commands based on the prefix.
+WRAPPER_DIR = "${WORKDIR}/wrapper"
+RUST_BUILD_CC = "${WRAPPER_DIR}/build-rust-cc"
+RUST_BUILD_CXX = "${WRAPPER_DIR}/build-rust-cxx"
+RUST_BUILD_CCLD = "${WRAPPER_DIR}/build-rust-ccld"
+RUST_BUILD_AR = "${WRAPPER_DIR}/build-rust-ar"
+RUST_TARGET_CC = "${WRAPPER_DIR}/target-rust-cc"
+RUST_TARGET_CXX = "${WRAPPER_DIR}/target-rust-cxx"
+RUST_TARGET_CCLD = "${WRAPPER_DIR}/target-rust-ccld"
+RUST_TARGET_AR = "${WRAPPER_DIR}/target-rust-ar"
+
+create_wrapper () {
+ file="$1"
+ shift
+
+ cat <<- EOF > "${file}"
+ #!/usr/bin/env python3
+ import os, sys
+ orig_binary = "$@"
+ binary = orig_binary.split()[0]
+ args = orig_binary.split() + sys.argv[1:]
+ os.execvp(binary, args)
+ EOF
+ chmod +x "${file}"
+}
+
+export WRAPPER_TARGET_CC = "${CC}"
+export WRAPPER_TARGET_CXX = "${CXX}"
+export WRAPPER_TARGET_CCLD = "${CCLD}"
+export WRAPPER_TARGET_LDFLAGS = "${LDFLAGS}"
+export WRAPPER_TARGET_AR = "${AR}"
+
+# compiler is used by gcc-rs
+# linker is used by rustc/cargo
+# archiver is used by the build of libstd-rs
+do_rust_create_wrappers () {
+ mkdir -p "${WRAPPER_DIR}"
+
+ # Yocto Build / Rust Host C compiler
+ create_wrapper "${RUST_BUILD_CC}" "${BUILD_CC}"
+ # Yocto Build / Rust Host C++ compiler
+ create_wrapper "${RUST_BUILD_CXX}" "${BUILD_CXX}"
+ # Yocto Build / Rust Host linker
+ create_wrapper "${RUST_BUILD_CCLD}" "${BUILD_CCLD}" "${BUILD_LDFLAGS}"
+ # Yocto Build / Rust Host archiver
+ create_wrapper "${RUST_BUILD_AR}" "${BUILD_AR}"
+
+ # Yocto Target / Rust Target C compiler
+ create_wrapper "${RUST_TARGET_CC}" "${WRAPPER_TARGET_CC}" "${WRAPPER_TARGET_LDFLAGS}"
+ # Yocto Target / Rust Target C++ compiler
+ create_wrapper "${RUST_TARGET_CXX}" "${WRAPPER_TARGET_CXX}"
+ # Yocto Target / Rust Target linker
+ create_wrapper "${RUST_TARGET_CCLD}" "${WRAPPER_TARGET_CCLD}" "${WRAPPER_TARGET_LDFLAGS}"
+ # Yocto Target / Rust Target archiver
+ create_wrapper "${RUST_TARGET_AR}" "${WRAPPER_TARGET_AR}"
+
+}
+
+addtask rust_create_wrappers before do_configure after do_patch do_prepare_recipe_sysroot
+do_rust_create_wrappers[dirs] += "${WRAPPER_DIR}"
diff --git a/meta/classes/rust.bbclass b/meta/classes/rust.bbclass
new file mode 100644
index 0000000000..5c8938d09f
--- /dev/null
+++ b/meta/classes/rust.bbclass
@@ -0,0 +1,45 @@
+inherit rust-common
+
+RUSTC = "rustc"
+
+RUSTC_ARCHFLAGS += "--target=${HOST_SYS} ${RUSTFLAGS}"
+
+def rust_base_dep(d):
+ # Taken from meta/classes/base.bbclass `base_dep_prepend` and modified to
+ # use rust instead of gcc
+ deps = ""
+ if not d.getVar('INHIBIT_DEFAULT_RUST_DEPS'):
+ if (d.getVar('HOST_SYS') != d.getVar('BUILD_SYS')):
+ deps += " virtual/${TARGET_PREFIX}rust ${RUSTLIB_DEP}"
+ else:
+ deps += " rust-native"
+ return deps
+
+DEPENDS:append = " ${@rust_base_dep(d)}"
+
+# BUILD_LDFLAGS
+# ${STAGING_LIBDIR_NATIVE}
+# ${STAGING_BASE_LIBDIR_NATIVE}
+# BUILDSDK_LDFLAGS
+# ${STAGING_LIBDIR}
+# #{STAGING_DIR_HOST}
+# TARGET_LDFLAGS ?????
+#RUSTC_BUILD_LDFLAGS = "\
+# --sysroot ${STAGING_DIR_NATIVE} \
+# -L${STAGING_LIBDIR_NATIVE} \
+# -L${STAGING_BASE_LIBDIR_NATIVE} \
+#"
+
+# XXX: for some reason bitbake sets BUILD_* & TARGET_* but uses the bare
+# variables for HOST. Alias things to make it easier for us.
+HOST_LDFLAGS ?= "${LDFLAGS}"
+HOST_CFLAGS ?= "${CFLAGS}"
+HOST_CXXFLAGS ?= "${CXXFLAGS}"
+HOST_CPPFLAGS ?= "${CPPFLAGS}"
+
+rustlib_suffix="${TUNE_ARCH}${TARGET_VENDOR}-${TARGET_OS}/rustlib/${HOST_SYS}/lib"
+# Native sysroot standard library path
+rustlib_src="${prefix}/lib/${rustlib_suffix}"
+# Host sysroot standard library path
+rustlib="${libdir}/${rustlib_suffix}"
+rustlib:class-native="${libdir}/rustlib/${BUILD_SYS}/lib"
diff --git a/meta/classes/sanity.bbclass b/meta/classes/sanity.bbclass
index e021b9d240..92807dc88e 100644
--- a/meta/classes/sanity.bbclass
+++ b/meta/classes/sanity.bbclass
@@ -185,37 +185,6 @@ def raise_sanity_error(msg, d, network_error=False):
%s""" % msg)
-# Check flags associated with a tuning.
-def check_toolchain_tune_args(data, tune, multilib, errs):
- found_errors = False
- if check_toolchain_args_present(data, tune, multilib, errs, 'CCARGS'):
- found_errors = True
- if check_toolchain_args_present(data, tune, multilib, errs, 'ASARGS'):
- found_errors = True
- if check_toolchain_args_present(data, tune, multilib, errs, 'LDARGS'):
- found_errors = True
-
- return found_errors
-
-def check_toolchain_args_present(data, tune, multilib, tune_errors, which):
- args_set = (data.getVar("TUNE_%s" % which) or "").split()
- args_wanted = (data.getVar("TUNEABI_REQUIRED_%s_tune-%s" % (which, tune)) or "").split()
- args_missing = []
-
- # If no args are listed/required, we are done.
- if not args_wanted:
- return
- for arg in args_wanted:
- if arg not in args_set:
- args_missing.append(arg)
-
- found_errors = False
- if args_missing:
- found_errors = True
- tune_errors.append("TUNEABI for %s requires '%s' in TUNE_%s (%s)." %
- (tune, ' '.join(args_missing), which, ' '.join(args_set)))
- return found_errors
-
# Check a single tune for validity.
def check_toolchain_tune(data, tune, multilib):
tune_errors = []
@@ -227,7 +196,7 @@ def check_toolchain_tune(data, tune, multilib):
overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + multilib
localdata.setVar("OVERRIDES", overrides)
bb.debug(2, "Sanity-checking tuning '%s' (%s) features:" % (tune, multilib))
- features = (localdata.getVar("TUNE_FEATURES_tune-%s" % tune) or "").split()
+ features = (localdata.getVar("TUNE_FEATURES:tune-%s" % tune) or "").split()
if not features:
return "Tuning '%s' has no defined features, and cannot be used." % tune
valid_tunes = localdata.getVarFlags('TUNEVALID') or {}
@@ -247,17 +216,6 @@ def check_toolchain_tune(data, tune, multilib):
bb.debug(2, " %s: %s" % (feature, valid_tunes[feature]))
else:
tune_errors.append("Feature '%s' is not defined." % feature)
- whitelist = localdata.getVar("TUNEABI_WHITELIST")
- if whitelist:
- tuneabi = localdata.getVar("TUNEABI_tune-%s" % tune)
- if not tuneabi:
- tuneabi = tune
- if True not in [x in whitelist.split() for x in tuneabi.split()]:
- tune_errors.append("Tuning '%s' (%s) cannot be used with any supported tuning/ABI." %
- (tune, tuneabi))
- else:
- if not check_toolchain_tune_args(localdata, tuneabi, multilib, tune_errors):
- bb.debug(2, "Sanity check: Compiler args OK for %s." % tune)
if tune_errors:
return "Tuning '%s' has the following errors:\n" % tune + '\n'.join(tune_errors)
@@ -281,7 +239,7 @@ def check_toolchain(data):
seen_libs.append(lib)
if not lib in global_multilibs:
tune_error_set.append("Multilib %s is not present in MULTILIB_GLOBAL_VARIANTS" % lib)
- tune = data.getVar("DEFAULTTUNE_virtclass-multilib-%s" % lib)
+ tune = data.getVar("DEFAULTTUNE:virtclass-multilib-%s" % lib)
if tune in seen_tunes:
tune_error_set.append("The tuning '%s' appears in more than one multilib." % tune)
else:
@@ -392,9 +350,12 @@ def check_connectivity(d):
msg = data.getVar('CONNECTIVITY_CHECK_MSG') or ""
if len(msg) == 0:
msg = "%s.\n" % err
- msg += " Please ensure your host's network is configured correctly,\n"
- msg += " or set BB_NO_NETWORK = \"1\" to disable network access if\n"
- msg += " all required sources are on local disk.\n"
+ msg += " Please ensure your host's network is configured correctly.\n"
+ msg += " If your ISP or network is blocking the above URL,\n"
+ msg += " try with another domain name, for example by setting:\n"
+ msg += " CONNECTIVITY_CHECK_URIS = \"https://www.example.com/\""
+ msg += " You could also set BB_NO_NETWORK = \"1\" to disable network\n"
+ msg += " access if all required sources are on local disk.\n"
retval = msg
return retval
@@ -459,13 +420,12 @@ def check_sanity_validmachine(sanity_data):
# Patch before 2.7 can't handle all the features in git-style diffs. Some
# patches may incorrectly apply, and others won't apply at all.
def check_patch_version(sanity_data):
- from distutils.version import LooseVersion
import re, subprocess
try:
result = subprocess.check_output(["patch", "--version"], stderr=subprocess.STDOUT).decode('utf-8')
version = re.search(r"[0-9.]+", result.splitlines()[0]).group()
- if LooseVersion(version) < LooseVersion("2.7"):
+ if bb.utils.vercmp_string_op(version, "2.7", "<"):
return "Your version of patch is older than 2.7 and has bugs which will break builds. Please install a newer version of patch.\n"
else:
return None
@@ -475,7 +435,6 @@ def check_patch_version(sanity_data):
# Unpatched versions of make 3.82 are known to be broken. See GNU Savannah Bug 30612.
# Use a modified reproducer from http://savannah.gnu.org/bugs/?30612 to validate.
def check_make_version(sanity_data):
- from distutils.version import LooseVersion
import subprocess
try:
@@ -483,7 +442,7 @@ def check_make_version(sanity_data):
except subprocess.CalledProcessError as e:
return "Unable to execute make --version, exit code %d\n%s\n" % (e.returncode, e.output)
version = result.split()[2]
- if LooseVersion(version) == LooseVersion("3.82"):
+ if bb.utils.vercmp_string_op(version, "3.82", "=="):
# Construct a test file
f = open("makefile_test", "w")
f.write("makefile_test.a: makefile_test_a.c makefile_test_b.c makefile_test.a( makefile_test_a.c makefile_test_b.c)\n")
@@ -527,7 +486,7 @@ def check_wsl(d):
bb.warn("You are running bitbake under WSLv2, this works properly but you should optimize your VHDX file eventually to avoid running out of storage space")
return None
-# Require at least gcc version 6.0.
+# Require at least gcc version 7.5.
#
# This can be fixed on CentOS-7 with devtoolset-6+
# https://www.softwarecollections.org/en/scls/rhscl/devtoolset-6/
@@ -536,27 +495,25 @@ def check_wsl(d):
# built buildtools-extended-tarball)
#
def check_gcc_version(sanity_data):
- from distutils.version import LooseVersion
import subprocess
build_cc, version = oe.utils.get_host_compiler_version(sanity_data)
if build_cc.strip() == "gcc":
- if LooseVersion(version) < LooseVersion("6.0"):
- return "Your version of gcc is older than 6.0 and will break builds. Please install a newer version of gcc (you could use the project's buildtools-extended-tarball or use scripts/install-buildtools).\n"
+ if bb.utils.vercmp_string_op(version, "7.5", "<"):
+ return "Your version of gcc is older than 7.5 and will break builds. Please install a newer version of gcc (you could use the project's buildtools-extended-tarball or use scripts/install-buildtools).\n"
return None
# Tar version 1.24 and onwards handle overwriting symlinks correctly
# but earlier versions do not; this needs to work properly for sstate
# Version 1.28 is needed so opkg-build works correctly when reproducibile builds are enabled
def check_tar_version(sanity_data):
- from distutils.version import LooseVersion
import subprocess
try:
result = subprocess.check_output(["tar", "--version"], stderr=subprocess.STDOUT).decode('utf-8')
except subprocess.CalledProcessError as e:
return "Unable to execute tar --version, exit code %d\n%s\n" % (e.returncode, e.output)
version = result.split()[3]
- if LooseVersion(version) < LooseVersion("1.28"):
+ if bb.utils.vercmp_string_op(version, "1.28", "<"):
return "Your version of tar is older than 1.28 and does not have the support needed to enable reproducible builds. Please install a newer version of tar (you could use the project's buildtools-tarball from our last release or use scripts/install-buildtools).\n"
return None
@@ -564,14 +521,13 @@ def check_tar_version(sanity_data):
# The kernel tools assume git >= 1.8.3.1 (verified needed > 1.7.9.5) see #6162
# The git fetcher also had workarounds for git < 1.7.9.2 which we've dropped
def check_git_version(sanity_data):
- from distutils.version import LooseVersion
import subprocess
try:
result = subprocess.check_output(["git", "--version"], stderr=subprocess.DEVNULL).decode('utf-8')
except subprocess.CalledProcessError as e:
return "Unable to execute git --version, exit code %d\n%s\n" % (e.returncode, e.output)
version = result.split()[2]
- if LooseVersion(version) < LooseVersion("1.8.3.1"):
+ if bb.utils.vercmp_string_op(version, "1.8.3.1", "<"):
return "Your version of git is older than 1.8.3.1 and has bugs which will break builds. Please install a newer version of git.\n"
return None
@@ -603,6 +559,24 @@ def sanity_check_conffiles(d):
bb.fatal(str(e))
d.setVar("BB_INVALIDCONF", True)
+def drop_v14_cross_builds(d):
+ import glob
+ indexes = glob.glob(d.expand("${SSTATE_MANIFESTS}/index-${BUILD_ARCH}_*"))
+ for i in indexes:
+ with open(i, "r") as f:
+ lines = f.readlines()
+ for l in reversed(lines):
+ try:
+ (stamp, manifest, workdir) = l.split()
+ except ValueError:
+ bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
+ for m in glob.glob(manifest + ".*"):
+ if m.endswith(".postrm"):
+ continue
+ sstate_clean_manifest(m, d)
+ bb.utils.remove(stamp + "*")
+ bb.utils.remove(workdir, recurse = True)
+
def sanity_handle_abichanges(status, d):
#
# Check the 'ABI' of TMPDIR
@@ -619,6 +593,12 @@ def sanity_handle_abichanges(status, d):
f.write(current_abi)
elif int(abi) <= 11 and current_abi == "12":
status.addresult("The layout of TMPDIR changed for Recipe Specific Sysroots.\nConversion doesn't make sense and this change will rebuild everything so please delete TMPDIR (%s).\n" % d.getVar("TMPDIR"))
+ elif int(abi) <= 13 and current_abi == "14":
+ status.addresult("TMPDIR changed to include path filtering from the pseudo database.\nIt is recommended to use a clean TMPDIR with the new pseudo path filtering so TMPDIR (%s) would need to be removed to continue.\n" % d.getVar("TMPDIR"))
+ elif int(abi) == 14 and current_abi == "15":
+ drop_v14_cross_builds(d)
+ with open(abifile, "w") as f:
+ f.write(current_abi)
elif (abi != current_abi):
# Code to convert from one ABI to another could go here if possible.
status.addresult("Error, TMPDIR has changed its layout version number (%s to %s) and you need to either rebuild, revert or adjust it at your own risk.\n" % (abi, current_abi))
@@ -700,6 +680,23 @@ def check_sanity_version_change(status, d):
if (tmpdirmode & stat.S_ISUID):
status.addresult("TMPDIR is setuid, please don't build in a setuid directory")
+ # Check that a user isn't building in a path in PSEUDO_IGNORE_PATHS
+ pseudoignorepaths = d.getVar('PSEUDO_IGNORE_PATHS', expand=True).split(",")
+ workdir = d.getVar('WORKDIR', expand=True)
+ for i in pseudoignorepaths:
+ if i and workdir.startswith(i):
+ status.addresult("You are building in a path included in PSEUDO_IGNORE_PATHS " + str(i) + " please locate the build outside this path.\n")
+
+ # Check if PSEUDO_IGNORE_PATHS and and paths under pseudo control overlap
+ pseudoignorepaths = d.getVar('PSEUDO_IGNORE_PATHS', expand=True).split(",")
+ pseudo_control_dir = "${D},${PKGD},${PKGDEST},${IMAGEROOTFS},${SDK_OUTPUT}"
+ pseudocontroldir = d.expand(pseudo_control_dir).split(",")
+ for i in pseudoignorepaths:
+ for j in pseudocontroldir:
+ if i and j:
+ if j.startswith(i):
+ status.addresult("A path included in PSEUDO_IGNORE_PATHS " + str(i) + " and the path " + str(j) + " overlap and this will break pseudo permission and ownership tracking. Please set the path " + str(j) + " to a different directory which does not overlap with pseudo controlled directories. \n")
+
# Some third-party software apparently relies on chmod etc. being suid root (!!)
import stat
suid_check_bins = "chown chmod mknod".split()
@@ -767,15 +764,14 @@ def check_sanity_everybuild(status, d):
if 0 == os.getuid():
raise_sanity_error("Do not use Bitbake as root.", d)
- # Check the Python version, we now have a minimum of Python 3.4
+ # Check the Python version, we now have a minimum of Python 3.6
import sys
- if sys.hexversion < 0x03040000:
- status.addresult('The system requires at least Python 3.4 to run. Please update your Python interpreter.\n')
+ if sys.hexversion < 0x030600F0:
+ status.addresult('The system requires at least Python 3.6 to run. Please update your Python interpreter.\n')
# Check the bitbake version meets minimum requirements
- from distutils.version import LooseVersion
minversion = d.getVar('BB_MIN_VERSION')
- if (LooseVersion(bb.__version__) < LooseVersion(minversion)):
+ if bb.utils.vercmp_string_op(bb.__version__, minversion, "<"):
status.addresult('Bitbake version %s is required and version %s was found\n' % (minversion, bb.__version__))
sanity_check_locale(d)
@@ -784,6 +780,11 @@ def check_sanity_everybuild(status, d):
if "." in paths or "./" in paths or "" in paths:
status.addresult("PATH contains '.', './' or '' (empty element), which will break the build, please remove this.\nParsed PATH is " + str(paths) + "\n")
+ #Check if bitbake is present in PATH environment variable
+ bb_check = bb.utils.which(d.getVar('PATH'), 'bitbake')
+ if not bb_check:
+ bb.warn("bitbake binary is not found in PATH, did you source the script?")
+
# Check whether 'inherit' directive is found (used for a class to inherit)
# in conf file it's supposed to be uppercase INHERIT
inherit = d.getVar('inherit')
@@ -857,20 +858,25 @@ def check_sanity_everybuild(status, d):
except:
pass
- oeroot = d.getVar('COREBASE')
- if oeroot.find('+') != -1:
- status.addresult("Error, you have an invalid character (+) in your COREBASE directory path. Please move the installation to a directory which doesn't include any + characters.")
- if oeroot.find('@') != -1:
- status.addresult("Error, you have an invalid character (@) in your COREBASE directory path. Please move the installation to a directory which doesn't include any @ characters.")
- if oeroot.find(' ') != -1:
- status.addresult("Error, you have a space in your COREBASE directory path. Please move the installation to a directory which doesn't include a space since autotools doesn't support this.")
+ for checkdir in ['COREBASE', 'TMPDIR']:
+ val = d.getVar(checkdir)
+ if val.find('..') != -1:
+ status.addresult("Error, you have '..' in your %s directory path. Please ensure the variable contains an absolute path as this can break some recipe builds in obtuse ways." % checkdir)
+ if val.find('+') != -1:
+ status.addresult("Error, you have an invalid character (+) in your %s directory path. Please move the installation to a directory which doesn't include any + characters." % checkdir)
+ if val.find('@') != -1:
+ status.addresult("Error, you have an invalid character (@) in your %s directory path. Please move the installation to a directory which doesn't include any @ characters." % checkdir)
+ if val.find(' ') != -1:
+ status.addresult("Error, you have a space in your %s directory path. Please move the installation to a directory which doesn't include a space since autotools doesn't support this." % checkdir)
+ if val.find('%') != -1:
+ status.addresult("Error, you have an invalid character (%) in your %s directory path which causes problems with python string formatting. Please move the installation to a directory which doesn't include any % characters." % checkdir)
# Check the format of MIRRORS, PREMIRRORS and SSTATE_MIRRORS
import re
mirror_vars = ['MIRRORS', 'PREMIRRORS', 'SSTATE_MIRRORS']
protocols = ['http', 'ftp', 'file', 'https', \
'git', 'gitsm', 'hg', 'osc', 'p4', 'svn', \
- 'bzr', 'cvs', 'npm', 'sftp', 'ssh', 's3' ]
+ 'bzr', 'cvs', 'npm', 'sftp', 'ssh', 's3', 'az' ]
for mirror_var in mirror_vars:
mirrors = (d.getVar(mirror_var) or '').replace('\\n', ' ').split()
diff --git a/meta/classes/scons.bbclass b/meta/classes/scons.bbclass
index 6b171ca8df..80f8382107 100644
--- a/meta/classes/scons.bbclass
+++ b/meta/classes/scons.bbclass
@@ -5,10 +5,9 @@ DEPENDS += "python3-scons-native"
EXTRA_OESCONS ?= ""
do_configure() {
- unset _PYTHON_SYSCONFIGDATA_NAME
- if [ -n "${CONFIGURESTAMPFILE}" ]; then
+ if [ -n "${CONFIGURESTAMPFILE}" -a "${S}" = "${B}" ]; then
if [ -e "${CONFIGURESTAMPFILE}" -a "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" -a "${CLEANBROKEN}" != "1" ]; then
- ${STAGING_BINDIR_NATIVE}/scons --clean PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS}
+ ${STAGING_BINDIR_NATIVE}/scons --directory=${S} --clean PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS}
fi
mkdir -p `dirname ${CONFIGURESTAMPFILE}`
@@ -17,14 +16,12 @@ do_configure() {
}
scons_do_compile() {
- unset _PYTHON_SYSCONFIGDATA_NAME
- ${STAGING_BINDIR_NATIVE}/scons ${PARALLEL_MAKE} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} || \
+ ${STAGING_BINDIR_NATIVE}/scons --directory=${S} ${PARALLEL_MAKE} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} || \
die "scons build execution failed."
}
scons_do_install() {
- unset _PYTHON_SYSCONFIGDATA_NAME
- ${STAGING_BINDIR_NATIVE}/scons install_root=${D}${prefix} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} install || \
+ ${STAGING_BINDIR_NATIVE}/scons --directory=${S} install_root=${D}${prefix} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} install || \
die "scons install execution failed."
}
diff --git a/meta/classes/distutils-common-base.bbclass b/meta/classes/setuptools3-base.bbclass
index 94b5fd426d..15abe1dd63 100644
--- a/meta/classes/distutils-common-base.bbclass
+++ b/meta/classes/setuptools3-base.bbclass
@@ -1,3 +1,7 @@
+DEPENDS:append:class-target = " ${PYTHON_PN}-native ${PYTHON_PN}"
+DEPENDS:append:class-nativesdk = " ${PYTHON_PN}-native ${PYTHON_PN}"
+RDEPENDS:${PN}:append:class-target = " ${PYTHON_PN}-core"
+
export STAGING_INCDIR
export STAGING_LIBDIR
@@ -11,15 +15,17 @@ export LDCXXSHARED = "${CXX} -shared"
export CCSHARED = "-fPIC -DPIC"
# LINKFORSHARED are the flags passed to the $(CC) command that links
# the python executable
-export LINKFORSHARED = "{SECURITY_CFLAGS} -Xlinker -export-dynamic"
+export LINKFORSHARED = "${SECURITY_CFLAGS} -Xlinker -export-dynamic"
-FILES_${PN} += "${libdir}/* ${libdir}/${PYTHON_DIR}/*"
+FILES:${PN} += "${libdir}/* ${libdir}/${PYTHON_DIR}/*"
-FILES_${PN}-staticdev += "\
+FILES:${PN}-staticdev += "\
${PYTHON_SITEPACKAGES_DIR}/*.a \
"
-FILES_${PN}-dev += "\
+FILES:${PN}-dev += "\
${datadir}/pkgconfig \
${libdir}/pkgconfig \
${PYTHON_SITEPACKAGES_DIR}/*.la \
"
+inherit python3native python3targetconfig
+
diff --git a/meta/classes/setuptools3.bbclass b/meta/classes/setuptools3.bbclass
index 8ca66ee708..556bc801af 100644
--- a/meta/classes/setuptools3.bbclass
+++ b/meta/classes/setuptools3.bbclass
@@ -1,4 +1,33 @@
-inherit distutils3
+inherit setuptools3-base python_pep517
-DEPENDS += "python3-setuptools-native"
+# bdist_wheel builds in ./dist
+#B = "${WORKDIR}/build"
+SETUPTOOLS_BUILD_ARGS ?= ""
+
+SETUPTOOLS_SETUP_PATH ?= "${S}"
+
+setuptools3_do_configure() {
+ :
+}
+
+setuptools3_do_compile() {
+ cd ${SETUPTOOLS_SETUP_PATH}
+ NO_FETCH_BUILD=1 \
+ STAGING_INCDIR=${STAGING_INCDIR} \
+ STAGING_LIBDIR=${STAGING_LIBDIR} \
+ ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py \
+ bdist_wheel --verbose --dist-dir ${PEP517_WHEEL_PATH} ${SETUPTOOLS_BUILD_ARGS} || \
+ bbfatal_log "'${PYTHON_PN} setup.py bdist_wheel ${SETUPTOOLS_BUILD_ARGS}' execution failed."
+}
+setuptools3_do_compile[vardepsexclude] = "MACHINE"
+do_compile[cleandirs] += "${PEP517_WHEEL_PATH}"
+
+setuptools3_do_install() {
+ python_pep517_do_install
+}
+
+EXPORT_FUNCTIONS do_configure do_compile do_install
+
+export LDSHARED="${CCLD} -shared"
+DEPENDS += "python3-setuptools-native python3-wheel-native"
diff --git a/meta/classes/setuptools3_legacy.bbclass b/meta/classes/setuptools3_legacy.bbclass
new file mode 100644
index 0000000000..5a99daadb5
--- /dev/null
+++ b/meta/classes/setuptools3_legacy.bbclass
@@ -0,0 +1,78 @@
+# This class is for packages which use the deprecated setuptools behaviour,
+# specifically custom install tasks which don't work correctly with bdist_wheel.
+# This behaviour is deprecated in setuptools[1] and won't work in the future, so
+# all users of this should consider their options: pure Python modules can use a
+# modern Python tool such as build[2], or packages which are doing more (such as
+# installing init scripts) should use a fully-featured build system such as Meson.
+#
+# [1] https://setuptools.pypa.io/en/latest/history.html#id142
+# [2] https://pypi.org/project/build/
+
+inherit setuptools3-base
+
+B = "${WORKDIR}/build"
+
+SETUPTOOLS_BUILD_ARGS ?= ""
+SETUPTOOLS_INSTALL_ARGS ?= "--root=${D} \
+ --prefix=${prefix} \
+ --install-lib=${PYTHON_SITEPACKAGES_DIR} \
+ --install-data=${datadir}"
+
+SETUPTOOLS_PYTHON = "python3"
+SETUPTOOLS_PYTHON:class-native = "nativepython3"
+
+SETUPTOOLS_SETUP_PATH ?= "${S}"
+
+setuptools3_legacy_do_configure() {
+ :
+}
+
+setuptools3_legacy_do_compile() {
+ cd ${SETUPTOOLS_SETUP_PATH}
+ NO_FETCH_BUILD=1 \
+ STAGING_INCDIR=${STAGING_INCDIR} \
+ STAGING_LIBDIR=${STAGING_LIBDIR} \
+ ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py \
+ build --build-base=${B} ${SETUPTOOLS_BUILD_ARGS} || \
+ bbfatal_log "'${PYTHON_PN} setup.py build ${SETUPTOOLS_BUILD_ARGS}' execution failed."
+}
+setuptools3_legacy_do_compile[vardepsexclude] = "MACHINE"
+
+setuptools3_legacy_do_install() {
+ cd ${SETUPTOOLS_SETUP_PATH}
+ install -d ${D}${PYTHON_SITEPACKAGES_DIR}
+ STAGING_INCDIR=${STAGING_INCDIR} \
+ STAGING_LIBDIR=${STAGING_LIBDIR} \
+ PYTHONPATH=${D}${PYTHON_SITEPACKAGES_DIR} \
+ ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py \
+ build --build-base=${B} install --skip-build ${SETUPTOOLS_INSTALL_ARGS} || \
+ bbfatal_log "'${PYTHON_PN} setup.py install ${SETUPTOOLS_INSTALL_ARGS}' execution failed."
+
+ # support filenames with *spaces*
+ find ${D} -name "*.py" -exec grep -q ${D} {} \; \
+ -exec sed -i -e s:${D}::g {} \;
+
+ for i in ${D}${bindir}/* ${D}${sbindir}/*; do
+ if [ -f "$i" ]; then
+ sed -i -e s:${PYTHON}:${USRBINPATH}/env\ ${SETUPTOOLS_PYTHON}:g $i
+ sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
+ fi
+ done
+
+ rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/easy-install.pth
+
+ #
+ # FIXME: Bandaid against wrong datadir computation
+ #
+ if [ -e ${D}${datadir}/share ]; then
+ mv -f ${D}${datadir}/share/* ${D}${datadir}/
+ rmdir ${D}${datadir}/share
+ fi
+}
+setuptools3_legacy_do_install[vardepsexclude] = "MACHINE"
+
+EXPORT_FUNCTIONS do_configure do_compile do_install
+
+export LDSHARED="${CCLD} -shared"
+DEPENDS += "python3-setuptools-native"
+
diff --git a/meta/classes/setuptools_build_meta.bbclass b/meta/classes/setuptools_build_meta.bbclass
new file mode 100644
index 0000000000..b2bba35a0b
--- /dev/null
+++ b/meta/classes/setuptools_build_meta.bbclass
@@ -0,0 +1,5 @@
+inherit setuptools3-base python_pep517
+
+DEPENDS += "python3-setuptools-native python3-wheel-native"
+
+PEP517_BUILD_API = "setuptools.build_meta"
diff --git a/meta/classes/sign_package_feed.bbclass b/meta/classes/sign_package_feed.bbclass
index 7ff3a35a2f..16bcd147aa 100644
--- a/meta/classes/sign_package_feed.bbclass
+++ b/meta/classes/sign_package_feed.bbclass
@@ -29,7 +29,7 @@ PACKAGE_FEED_GPG_BACKEND ?= 'local'
PACKAGE_FEED_GPG_SIGNATURE_TYPE ?= 'ASC'
# Make feed signing key to be present in rootfs
-FEATURE_PACKAGES_package-management_append = " signing-keys-packagefeed"
+FEATURE_PACKAGES_package-management:append = " signing-keys-packagefeed"
python () {
# Check sanity of configuration
diff --git a/meta/classes/siteinfo.bbclass b/meta/classes/siteinfo.bbclass
index 1a048c053f..3555d5a663 100644
--- a/meta/classes/siteinfo.bbclass
+++ b/meta/classes/siteinfo.bbclass
@@ -45,6 +45,7 @@ def siteinfo_data_for_machine(arch, os, d):
"mipsisa32r6": "endian-big bit-32 mips-common",
"mipsisa32r6el": "endian-little bit-32 mips-common",
"powerpc": "endian-big bit-32 powerpc-common",
+ "powerpcle": "endian-little bit-32 powerpc-common",
"nios2": "endian-little bit-32 nios2-common",
"powerpc64": "endian-big bit-64 powerpc-common",
"powerpc64le": "endian-little bit-64 powerpc-common",
@@ -54,7 +55,9 @@ def siteinfo_data_for_machine(arch, os, d):
"riscv32": "endian-little bit-32 riscv-common",
"riscv64": "endian-little bit-64 riscv-common",
"sh3": "endian-little bit-32 sh-common",
+ "sh3eb": "endian-big bit-32 sh-common",
"sh4": "endian-little bit-32 sh-common",
+ "sh4eb": "endian-big bit-32 sh-common",
"sparc": "endian-big bit-32",
"viac3": "endian-little bit-32 ix86-common",
"x86_64": "endian-little", # bitinfo specified in targetinfo
@@ -98,16 +101,18 @@ def siteinfo_data_for_machine(arch, os, d):
"mips64el-linux-gnun32": "mipsel-linux bit-32",
"mipsisa64r6-linux-gnun32": "mipsisa32r6-linux bit-32",
"mipsisa64r6el-linux-gnun32": "mipsisa32r6el-linux bit-32",
- "powerpc-linux": "powerpc32-linux",
- "powerpc-linux-musl": "powerpc-linux powerpc32-linux",
- "powerpc-linux-gnuspe": "powerpc-linux powerpc32-linux",
- "powerpc-linux-muslspe": "powerpc-linux powerpc32-linux",
- "powerpc64-linux-gnuspe": "powerpc-linux powerpc64-linux",
- "powerpc64-linux-muslspe": "powerpc-linux powerpc64-linux",
- "powerpc64-linux": "powerpc-linux powerpc64-linux",
- "powerpc64-linux-musl": "powerpc-linux powerpc64-linux",
- "powerpc64le-linux": "powerpc-linux powerpc64-linux",
- "powerpc64le-linux-musl": "powerpc-linux powerpc64-linux",
+ "powerpc-linux": "powerpc32-linux powerpc32-linux-glibc",
+ "powerpc-linux-musl": "powerpc-linux powerpc32-linux powerpc32-linux-musl",
+ "powerpcle-linux": "powerpc32-linux powerpc32-linux-glibc",
+ "powerpcle-linux-musl": "powerpc-linux powerpc32-linux powerpc32-linux-musl",
+ "powerpc-linux-gnuspe": "powerpc-linux powerpc32-linux powerpc32-linux-glibc",
+ "powerpc-linux-muslspe": "powerpc-linux powerpc32-linux powerpc32-linux-musl",
+ "powerpc64-linux-gnuspe": "powerpc-linux powerpc64-linux powerpc64-linux-glibc",
+ "powerpc64-linux-muslspe": "powerpc-linux powerpc64-linux powerpc64-linux-musl",
+ "powerpc64-linux": "powerpc-linux powerpc64-linux powerpc64-linux-glibc",
+ "powerpc64-linux-musl": "powerpc-linux powerpc64-linux powerpc64-linux-musl",
+ "powerpc64le-linux": "powerpc-linux powerpc64-linux powerpc64-linux-glibc",
+ "powerpc64le-linux-musl": "powerpc-linux powerpc64-linux powerpc64-linux-musl",
"riscv32-linux": "riscv32-linux",
"riscv32-linux-musl": "riscv32-linux",
"riscv64-linux": "riscv64-linux",
@@ -171,17 +176,39 @@ python () {
bb.fatal("Please add your architecture to siteinfo.bbclass")
}
-def siteinfo_get_files(d, sysrootcache = False):
+# Layers with siteconfig need to add a replacement path to this variable so the
+# sstate isn't path specific
+SITEINFO_PATHVARS = "COREBASE"
+
+def siteinfo_get_files(d, sysrootcache=False):
sitedata = siteinfo_data(d)
- sitefiles = ""
+ sitefiles = []
+ searched = []
for path in d.getVar("BBPATH").split(":"):
for element in sitedata:
filename = os.path.join(path, "site", element)
if os.path.exists(filename):
- sitefiles += filename + " "
+ searched.append(filename + ":True")
+ sitefiles.append(filename)
+ else:
+ searched.append(filename + ":False")
+
+ # Have to parameterise out hardcoded paths such as COREBASE for the main site files
+ for var in d.getVar("SITEINFO_PATHVARS").split():
+ searched2 = []
+ replace = os.path.normpath(d.getVar(var))
+ for s in searched:
+ searched2.append(s.replace(replace, "${" + var + "}"))
+ searched = searched2
+
+ if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
+ # We need sstate sigs for native/cross not to vary upon arch so we can't depend on the site files.
+ # In future we may want to depend upon all site files?
+ # This would show up as breaking sstatetests.SStateTests.test_sstate_32_64_same_hash for example
+ searched = []
if not sysrootcache:
- return sitefiles
+ return sitefiles, searched
# Now check for siteconfig cache files in sysroots
path_siteconfig = d.getVar('SITECONFIG_SYSROOTCACHE')
@@ -190,8 +217,8 @@ def siteinfo_get_files(d, sysrootcache = False):
if not i.endswith("_config"):
continue
filename = os.path.join(path_siteconfig, i)
- sitefiles += filename + " "
- return sitefiles
+ sitefiles.append(filename)
+ return sitefiles, searched
#
# Make some information available via variables
diff --git a/meta/classes/sstate.bbclass b/meta/classes/sstate.bbclass
index 375196ef21..1c0cae4893 100644
--- a/meta/classes/sstate.bbclass
+++ b/meta/classes/sstate.bbclass
@@ -1,4 +1,6 @@
-SSTATE_VERSION = "3"
+SSTATE_VERSION = "8"
+
+SSTATE_ZSTD_CLEVEL ??= "8"
SSTATE_MANIFESTS ?= "${TMPDIR}/sstate-control"
SSTATE_MANFILEPREFIX = "${SSTATE_MANIFESTS}/manifest-${SSTATE_MANMACH}-${PN}"
@@ -6,12 +8,12 @@ SSTATE_MANFILEPREFIX = "${SSTATE_MANIFESTS}/manifest-${SSTATE_MANMACH}-${PN}"
def generate_sstatefn(spec, hash, taskname, siginfo, d):
if taskname is None:
return ""
- extension = ".tgz"
+ extension = ".tar.zst"
# 8 chars reserved for siginfo
limit = 254 - 8
if siginfo:
limit = 254
- extension = ".tgz.siginfo"
+ extension = ".tar.zst.siginfo"
if not hash:
hash = "INVALID"
fn = spec + hash + "_" + taskname + extension
@@ -20,7 +22,7 @@ def generate_sstatefn(spec, hash, taskname, siginfo, d):
components = spec.split(":")
# Fields 0,5,6 are mandatory, 1 is most useful, 2,3,4 are just for information
# 7 is for the separators
- avail = (254 - len(hash + "_" + taskname + extension) - len(components[0]) - len(components[1]) - len(components[5]) - len(components[6]) - 7) // 3
+ avail = (limit - len(hash + "_" + taskname + extension) - len(components[0]) - len(components[1]) - len(components[5]) - len(components[6]) - 7) // 3
components[2] = components[2][:avail]
components[3] = components[3][:avail]
components[4] = components[4][:avail]
@@ -37,7 +39,7 @@ SSTATE_PKGNAME = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PK
SSTATE_PKG = "${SSTATE_DIR}/${SSTATE_PKGNAME}"
SSTATE_EXTRAPATH = ""
SSTATE_EXTRAPATHWILDCARD = ""
-SSTATE_PATHSPEC = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/*/${SSTATE_PKGSPEC}*_${SSTATE_PATH_CURRTASK}.tgz*"
+SSTATE_PATHSPEC = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/*/${SSTATE_PKGSPEC}*_${SSTATE_PATH_CURRTASK}.tar.zst*"
# explicitly make PV to depend on evaluated value of PV variable
PV[vardepvalue] = "${PV}"
@@ -48,42 +50,50 @@ SSTATE_EXTRAPATH[vardepvalue] = ""
SSTATE_EXTRAPATHWILDCARD[vardepvalue] = ""
# For multilib rpm the allarch packagegroup files can overwrite (in theory they're identical)
-SSTATE_DUPWHITELIST = "${DEPLOY_DIR}/licenses/"
+SSTATE_ALLOW_OVERLAP_FILES = "${DEPLOY_DIR}/licenses/"
# Avoid docbook/sgml catalog warnings for now
-SSTATE_DUPWHITELIST += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml"
+SSTATE_ALLOW_OVERLAP_FILES += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml"
# sdk-provides-dummy-nativesdk and nativesdk-buildtools-perl-dummy overlap for different SDKMACHINE
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_RPM}/sdk_provides_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-nativesdk/"
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_RPM}/buildtools_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/buildtools-dummy-nativesdk/"
+SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_RPM}/sdk_provides_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-nativesdk/"
+SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_RPM}/buildtools_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/buildtools-dummy-nativesdk/"
# target-sdk-provides-dummy overlaps that allarch is disabled when multilib is used
-SSTATE_DUPWHITELIST += "${COMPONENTS_DIR}/sdk-provides-dummy-target/ ${DEPLOY_DIR_RPM}/sdk_provides_dummy_target/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-target/"
+SSTATE_ALLOW_OVERLAP_FILES += "${COMPONENTS_DIR}/sdk-provides-dummy-target/ ${DEPLOY_DIR_RPM}/sdk_provides_dummy_target/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-target/"
# Archive the sources for many architectures in one deploy folder
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_SRC}"
+SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_SRC}"
# ovmf/grub-efi/systemd-boot/intel-microcode multilib recipes can generate identical overlapping files
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_IMAGE}/ovmf"
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_IMAGE}/grub-efi"
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_IMAGE}/systemd-boot"
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_IMAGE}/microcode"
+SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/ovmf"
+SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/grub-efi"
+SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/systemd-boot"
+SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/microcode"
SSTATE_SCAN_FILES ?= "*.la *-config *_config postinst-*"
SSTATE_SCAN_CMD ??= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES").split())}" \) -type f'
SSTATE_SCAN_CMD_NATIVE ??= 'grep -Irl -e ${RECIPE_SYSROOT} -e ${RECIPE_SYSROOT_NATIVE} -e ${HOSTTOOLS_DIR} ${SSTATE_BUILDDIR}'
+SSTATE_HASHEQUIV_FILEMAP ?= " \
+ populate_sysroot:*/postinst-useradd-*:${TMPDIR} \
+ populate_sysroot:*/postinst-useradd-*:${COREBASE} \
+ populate_sysroot:*/postinst-useradd-*:regex-\s(PATH|PSEUDO_IGNORE_PATHS|HOME|LOGNAME|OMP_NUM_THREADS|USER)=.*\s \
+ populate_sysroot:*/crossscripts/*:${TMPDIR} \
+ populate_sysroot:*/crossscripts/*:${COREBASE} \
+ "
BB_HASHFILENAME = "False ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}"
SSTATE_ARCHS = " \
${BUILD_ARCH} \
+ ${BUILD_ARCH}_${ORIGNATIVELSBSTRING} \
${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS} \
- ${BUILD_ARCH}_${TARGET_ARCH} \
${SDK_ARCH}_${SDK_OS} \
${SDK_ARCH}_${PACKAGE_ARCH} \
allarch \
${PACKAGE_ARCH} \
${PACKAGE_EXTRA_ARCHS} \
${MACHINE_ARCH}"
+SSTATE_ARCHS[vardepsexclude] = "ORIGNATIVELSBSTRING"
SSTATE_MANMACH ?= "${SSTATE_PKGARCH}"
-SSTATECREATEFUNCS = "sstate_hardcode_path"
+SSTATECREATEFUNCS += "sstate_hardcode_path"
SSTATECREATEFUNCS[vardeps] = "SSTATE_SCAN_FILES"
SSTATEPOSTCREATEFUNCS = ""
SSTATEPREINSTFUNCS = ""
@@ -105,6 +115,9 @@ SSTATE_SIG_KEY ?= ""
SSTATE_SIG_PASSPHRASE ?= ""
# Whether to verify the GnUPG signatures when extracting sstate archives
SSTATE_VERIFY_SIG ?= "0"
+# List of signatures to consider valid.
+SSTATE_VALID_SIGS ??= ""
+SSTATE_VALID_SIGS[vardepvalue] = ""
SSTATE_HASHEQUIV_METHOD ?= "oe.sstatesig.OEOuthashBasic"
SSTATE_HASHEQUIV_METHOD[doc] = "The fully-qualified function used to calculate \
@@ -124,7 +137,7 @@ python () {
elif bb.data.inherits_class('crosssdk', d):
d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"))
elif bb.data.inherits_class('cross', d):
- d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${TARGET_ARCH}"))
+ d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}"))
elif bb.data.inherits_class('nativesdk', d):
d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${SDK_OS}"))
elif bb.data.inherits_class('cross-canadian', d):
@@ -144,6 +157,8 @@ python () {
for task in unique_tasks:
d.prependVarFlag(task, 'prefuncs', "sstate_task_prefunc ")
d.appendVarFlag(task, 'postfuncs', " sstate_task_postfunc")
+ d.setVarFlag(task, 'network', '1')
+ d.setVarFlag(task + "_setscene", 'network', '1')
}
def sstate_init(task, d):
@@ -244,13 +259,13 @@ def sstate_install(ss, d):
shareddirs.append(dstdir)
# Check the file list for conflicts against files which already exist
- whitelist = (d.getVar("SSTATE_DUPWHITELIST") or "").split()
+ overlap_allowed = (d.getVar("SSTATE_ALLOW_OVERLAP_FILES") or "").split()
match = []
for f in sharedfiles:
if os.path.exists(f) and not os.path.islink(f):
f = os.path.normpath(f)
realmatch = True
- for w in whitelist:
+ for w in overlap_allowed:
w = os.path.normpath(w)
if f.startswith(w):
realmatch = False
@@ -280,7 +295,7 @@ def sstate_install(ss, d):
"DISTRO_FEATURES on an existing build directory is not supported - you " \
"should really clean out tmp and rebuild (reusing sstate should be safe). " \
"It could be the overlapping files detected are harmless in which case " \
- "adding them to SSTATE_DUPWHITELIST may be the correct solution. It could " \
+ "adding them to SSTATE_ALLOW_OVERLAP_FILES may be the correct solution. It could " \
"also be your build is including two different conflicting versions of " \
"things (e.g. bluez 4 and bluez 5 and the correct solution for that would " \
"be to resolve the conflict. If in doubt, please ask on the mailing list, " \
@@ -315,6 +330,8 @@ def sstate_install(ss, d):
if os.path.exists(i):
with open(i, "r") as f:
manifests = f.readlines()
+ # We append new entries, we don't remove older entries which may have the same
+ # manifest name but different versions from stamp/workdir. See below.
if filedata not in manifests:
with open(i, "a+") as f:
f.write(filedata)
@@ -332,7 +349,7 @@ def sstate_install(ss, d):
for lock in locks:
bb.utils.unlockfile(lock)
-sstate_install[vardepsexclude] += "SSTATE_DUPWHITELIST STATE_MANMACH SSTATE_MANFILEPREFIX"
+sstate_install[vardepsexclude] += "SSTATE_ALLOW_OVERLAP_FILES STATE_MANMACH SSTATE_MANFILEPREFIX"
sstate_install[vardeps] += "${SSTATEPOSTINSTFUNCS}"
def sstate_installpkg(ss, d):
@@ -359,7 +376,7 @@ def sstate_installpkg(ss, d):
bb.warn("No signature file for sstate package %s, skipping acceleration..." % sstatepkg)
return False
signer = get_signer(d, 'local')
- if not signer.verify(sstatepkg + '.sig'):
+ if not signer.verify(sstatepkg + '.sig', d.getVar("SSTATE_VALID_SIGS")):
bb.warn("Cannot verify signature on sstate package %s, skipping acceleration..." % sstatepkg)
return False
@@ -397,7 +414,7 @@ def sstate_installpkgdir(ss, d):
for state in ss['dirs']:
prepdir(state[1])
- os.rename(sstateinst + state[0], state[1])
+ bb.utils.rename(sstateinst + state[0], state[1])
sstate_install(ss, d)
for plain in ss['plaindirs']:
@@ -409,7 +426,7 @@ def sstate_installpkgdir(ss, d):
dest = plain
bb.utils.mkdirhier(src)
prepdir(dest)
- os.rename(src, dest)
+ bb.utils.rename(src, dest)
return True
@@ -477,7 +494,7 @@ def sstate_clean_cachefiles(d):
ss = sstate_state_fromvars(ld, task)
sstate_clean_cachefile(ss, ld)
-def sstate_clean_manifest(manifest, d, prefix=None):
+def sstate_clean_manifest(manifest, d, canrace=False, prefix=None):
import oe.path
mfile = open(manifest)
@@ -495,7 +512,9 @@ def sstate_clean_manifest(manifest, d, prefix=None):
if entry.endswith("/"):
if os.path.islink(entry[:-1]):
os.remove(entry[:-1])
- elif os.path.exists(entry) and len(os.listdir(entry)) == 0:
+ elif os.path.exists(entry) and len(os.listdir(entry)) == 0 and not canrace:
+ # Removing directories whilst builds are in progress exposes a race. Only
+ # do it in contexts where it is safe to do so.
os.rmdir(entry[:-1])
else:
os.remove(entry)
@@ -533,7 +552,7 @@ def sstate_clean(ss, d):
for lock in ss['lockfiles']:
locks.append(bb.utils.lockfile(lock))
- sstate_clean_manifest(manifest, d)
+ sstate_clean_manifest(manifest, d, canrace=True)
for lock in locks:
bb.utils.unlockfile(lock)
@@ -634,10 +653,21 @@ python sstate_hardcode_path () {
def sstate_package(ss, d):
import oe.path
+ import time
tmpdir = d.getVar('TMPDIR')
+ fixtime = False
+ if ss['task'] == "package":
+ fixtime = True
+
+ def fixtimestamp(root, path):
+ f = os.path.join(root, path)
+ if os.lstat(f).st_mtime > sde:
+ os.utime(f, (sde, sde), follow_symlinks=False)
+
sstatebuild = d.expand("${WORKDIR}/sstate-build-%s/" % ss['task'])
+ sde = int(d.getVar("SOURCE_DATE_EPOCH") or time.time())
d.setVar("SSTATE_CURRTASK", ss['task'])
bb.utils.remove(sstatebuild, recurse=True)
bb.utils.mkdirhier(sstatebuild)
@@ -650,6 +680,8 @@ def sstate_package(ss, d):
# to sstate tasks but there aren't many of these so better just avoid them entirely.
for walkroot, dirs, files in os.walk(state[1]):
for file in files + dirs:
+ if fixtime:
+ fixtimestamp(walkroot, file)
srcpath = os.path.join(walkroot, file)
if not os.path.islink(srcpath):
continue
@@ -660,7 +692,7 @@ def sstate_package(ss, d):
continue
bb.error("sstate found an absolute path symlink %s pointing at %s. Please replace this with a relative link." % (srcpath, link))
bb.debug(2, "Preparing tree %s for packaging at %s" % (state[1], sstatebuild + state[0]))
- os.rename(state[1], sstatebuild + state[0])
+ bb.utils.rename(state[1], sstatebuild + state[0])
workdir = d.getVar('WORKDIR')
sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared")
@@ -670,7 +702,12 @@ def sstate_package(ss, d):
pdir = plain.replace(sharedworkdir, sstatebuild)
bb.utils.mkdirhier(plain)
bb.utils.mkdirhier(pdir)
- os.rename(plain, pdir)
+ bb.utils.rename(plain, pdir)
+ if fixtime:
+ fixtimestamp(pdir, "")
+ for walkroot, dirs, files in os.walk(pdir):
+ for file in files + dirs:
+ fixtimestamp(walkroot, file)
d.setVar('SSTATE_BUILDDIR', sstatebuild)
d.setVar('SSTATE_INSTDIR', sstatebuild)
@@ -697,9 +734,16 @@ def sstate_package(ss, d):
os.utime(siginfo, None)
except PermissionError:
pass
+ except OSError as e:
+ # Handle read-only file systems gracefully
+ import errno
+ if e.errno != errno.EROFS:
+ raise e
return
+sstate_package[vardepsexclude] += "SSTATE_SIG_KEY"
+
def pstaging_fetch(sstatefetch, d):
import bb.fetch2
@@ -718,6 +762,7 @@ def pstaging_fetch(sstatefetch, d):
localdata.setVar('FILESPATH', dldir)
localdata.setVar('DL_DIR', dldir)
localdata.setVar('PREMIRRORS', mirrors)
+ localdata.setVar('SRCPV', d.getVar('SRCPV'))
# if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
# we'll want to allow network access for the current set of fetches.
@@ -742,11 +787,16 @@ def pstaging_fetch(sstatefetch, d):
except bb.fetch2.BBFetchException:
pass
+pstaging_fetch[vardepsexclude] += "SRCPV"
+
+
def sstate_setscene(d):
shared_state = sstate_state_fromvars(d)
accelerate = sstate_installpkg(shared_state, d)
if not accelerate:
- bb.fatal("No suitable staging package found")
+ msg = "No sstate archive obtainable, will run full task instead."
+ bb.warn(msg)
+ raise bb.BBHandledException(msg)
python sstate_task_prefunc () {
shared_state = sstate_state_fromvars(d)
@@ -783,41 +833,46 @@ sstate_task_postfunc[dirs] = "${WORKDIR}"
sstate_create_package () {
# Exit early if it already exists
if [ -e ${SSTATE_PKG} ]; then
- [ ! -w ${SSTATE_PKG} ] || touch ${SSTATE_PKG}
+ touch ${SSTATE_PKG} 2>/dev/null || true
return
fi
- mkdir -p `dirname ${SSTATE_PKG}`
+ mkdir --mode=0775 -p `dirname ${SSTATE_PKG}`
TFILE=`mktemp ${SSTATE_PKG}.XXXXXXXX`
- # Use pigz if available
- OPT="-czS"
- if [ -x "$(command -v pigz)" ]; then
- OPT="-I pigz -cS"
+ OPT="-cS"
+ ZSTD="zstd -${SSTATE_ZSTD_CLEVEL} -T${ZSTD_THREADS}"
+ # Use pzstd if available
+ if [ -x "$(command -v pzstd)" ]; then
+ ZSTD="pzstd -${SSTATE_ZSTD_CLEVEL} -p ${ZSTD_THREADS}"
fi
# Need to handle empty directories
if [ "$(ls -A)" ]; then
set +e
- tar $OPT -f $TFILE *
+ tar -I "$ZSTD" $OPT -f $TFILE *
ret=$?
if [ $ret -ne 0 ] && [ $ret -ne 1 ]; then
exit 1
fi
set -e
else
- tar $OPT --file=$TFILE --files-from=/dev/null
+ tar -I "$ZSTD" $OPT --file=$TFILE --files-from=/dev/null
fi
chmod 0664 $TFILE
# Skip if it was already created by some other process
- if [ ! -e ${SSTATE_PKG} ]; then
+ if [ -h ${SSTATE_PKG} ] && [ ! -e ${SSTATE_PKG} ]; then
+ # There is a symbolic link, but it links to nothing.
+ # Forcefully replace it with the new file.
+ ln -f $TFILE ${SSTATE_PKG} || true
+ elif [ ! -e ${SSTATE_PKG} ]; then
# Move into place using ln to attempt an atomic op.
# Abort if it already exists
- ln $TFILE ${SSTATE_PKG} && rm $TFILE
+ ln $TFILE ${SSTATE_PKG} || true
else
- rm $TFILE
+ touch ${SSTATE_PKG} 2>/dev/null || true
fi
- [ ! -w ${SSTATE_PKG} ] || touch ${SSTATE_PKG}
+ rm $TFILE
}
python sstate_sign_package () {
@@ -845,13 +900,19 @@ python sstate_report_unihash() {
# Will be run from within SSTATE_INSTDIR.
#
sstate_unpack_package () {
- tar -xvzf ${SSTATE_PKG}
- # update .siginfo atime on local/NFS mirror
- [ -w ${SSTATE_PKG}.siginfo ] && [ -h ${SSTATE_PKG}.siginfo ] && touch -a ${SSTATE_PKG}.siginfo
- # Use "! -w ||" to return true for read only files
- [ ! -w ${SSTATE_PKG} ] || touch --no-dereference ${SSTATE_PKG}
- [ ! -w ${SSTATE_PKG}.sig ] || [ ! -e ${SSTATE_PKG}.sig ] || touch --no-dereference ${SSTATE_PKG}.sig
- [ ! -w ${SSTATE_PKG}.siginfo ] || [ ! -e ${SSTATE_PKG}.siginfo ] || touch --no-dereference ${SSTATE_PKG}.siginfo
+ ZSTD="zstd -T${ZSTD_THREADS}"
+ # Use pzstd if available
+ if [ -x "$(command -v pzstd)" ]; then
+ ZSTD="pzstd -p ${ZSTD_THREADS}"
+ fi
+
+ tar -I "$ZSTD" -xvpf ${SSTATE_PKG}
+ # update .siginfo atime on local/NFS mirror if it is a symbolic link
+ [ ! -h ${SSTATE_PKG}.siginfo ] || [ ! -e ${SSTATE_PKG}.siginfo ] || touch -a ${SSTATE_PKG}.siginfo 2>/dev/null || true
+ # update each symbolic link instead of any referenced file
+ touch --no-dereference ${SSTATE_PKG} 2>/dev/null || true
+ [ ! -e ${SSTATE_PKG}.sig ] || touch --no-dereference ${SSTATE_PKG}.sig 2>/dev/null || true
+ [ ! -e ${SSTATE_PKG}.siginfo ] || touch --no-dereference ${SSTATE_PKG}.siginfo 2>/dev/null || true
}
BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
@@ -880,21 +941,22 @@ def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True,
return spec, extrapath, tname
+ def getsstatefile(tid, siginfo, d):
+ spec, extrapath, tname = getpathcomponents(tid, d)
+ return extrapath + generate_sstatefn(spec, gethash(tid), tname, siginfo, d)
for tid in sq_data['hash']:
- spec, extrapath, tname = getpathcomponents(tid, d)
-
- sstatefile = d.expand("${SSTATE_DIR}/" + extrapath + generate_sstatefn(spec, gethash(tid), tname, siginfo, d))
+ sstatefile = d.expand("${SSTATE_DIR}/" + getsstatefile(tid, siginfo, d))
if os.path.exists(sstatefile):
- bb.debug(2, "SState: Found valid sstate file %s" % sstatefile)
found.add(tid)
- continue
+ bb.debug(2, "SState: Found valid sstate file %s" % sstatefile)
else:
missed.add(tid)
bb.debug(2, "SState: Looked for but didn't find file %s" % sstatefile)
+ foundLocal = len(found)
mirrors = d.getVar("SSTATE_MIRRORS")
if mirrors:
# Copy the data object and override DL_DIR and SRC_URI
@@ -926,63 +988,63 @@ def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True,
localdata2 = bb.data.createCopy(localdata)
srcuri = "file://" + sstatefile
- localdata.setVar('SRC_URI', srcuri)
+ localdata2.setVar('SRC_URI', srcuri)
bb.debug(2, "SState: Attempting to fetch %s" % srcuri)
+ import traceback
+
try:
fetcher = bb.fetch2.Fetch(srcuri.split(), localdata2,
connection_cache=thread_worker.connection_cache)
fetcher.checkstatus()
bb.debug(2, "SState: Successful fetch test for %s" % srcuri)
found.add(tid)
- if tid in missed:
- missed.remove(tid)
- except:
- missed.add(tid)
- bb.debug(2, "SState: Unsuccessful fetch test for %s" % srcuri)
- pass
- if len(tasklist) >= min_tasks:
+ missed.remove(tid)
+ except bb.fetch2.FetchError as e:
+ bb.debug(2, "SState: Unsuccessful fetch test for %s (%s)\n%s" % (srcuri, repr(e), traceback.format_exc()))
+ except Exception as e:
+ bb.error("SState: cannot test %s: %s\n%s" % (srcuri, repr(e), traceback.format_exc()))
+
+ if progress:
bb.event.fire(bb.event.ProcessProgress(msg, len(tasklist) - thread_worker.tasks.qsize()), d)
tasklist = []
- min_tasks = 100
- for tid in sq_data['hash']:
- if tid in found:
- continue
- spec, extrapath, tname = getpathcomponents(tid, d)
- sstatefile = d.expand(extrapath + generate_sstatefn(spec, gethash(tid), tname, siginfo, d))
+ for tid in missed:
+ sstatefile = d.expand(getsstatefile(tid, siginfo, d))
tasklist.append((tid, sstatefile))
if tasklist:
- if len(tasklist) >= min_tasks:
+ nproc = min(int(d.getVar("BB_NUMBER_THREADS")), len(tasklist))
+
+ progress = len(tasklist) >= 100
+ if progress:
msg = "Checking sstate mirror object availability"
bb.event.fire(bb.event.ProcessStarted(msg, len(tasklist)), d)
- import multiprocessing
- nproc = min(multiprocessing.cpu_count(), len(tasklist))
-
- bb.event.enable_threadlock()
- pool = oe.utils.ThreadedPool(nproc, len(tasklist),
- worker_init=checkstatus_init, worker_end=checkstatus_end)
- for t in tasklist:
- pool.add_task(checkstatus, t)
- pool.start()
- pool.wait_completion()
- bb.event.disable_threadlock()
-
- if len(tasklist) >= min_tasks:
+ # Have to setup the fetcher environment here rather than in each thread as it would race
+ fetcherenv = bb.fetch2.get_fetcher_environment(d)
+ with bb.utils.environment(**fetcherenv):
+ bb.event.enable_threadlock()
+ pool = oe.utils.ThreadedPool(nproc, len(tasklist),
+ worker_init=checkstatus_init, worker_end=checkstatus_end,
+ name="sstate_checkhashes-")
+ for t in tasklist:
+ pool.add_task(checkstatus, t)
+ pool.start()
+ pool.wait_completion()
+ bb.event.disable_threadlock()
+
+ if progress:
bb.event.fire(bb.event.ProcessFinished(msg), d)
inheritlist = d.getVar("INHERIT")
if "toaster" in inheritlist:
evdata = {'missed': [], 'found': []};
for tid in missed:
- spec, extrapath, tname = getpathcomponents(tid, d)
- sstatefile = d.expand(extrapath + generate_sstatefn(spec, gethash(tid), tname, False, d))
+ sstatefile = d.expand(getsstatefile(tid, False, d))
evdata['missed'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
for tid in found:
- spec, extrapath, tname = getpathcomponents(tid, d)
- sstatefile = d.expand(extrapath + generate_sstatefn(spec, gethash(tid), tname, False, d))
+ sstatefile = d.expand(getsstatefile(tid, False, d))
evdata['found'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
bb.event.fire(bb.event.MetadataEvent("MissedSstate", evdata), d)
@@ -996,12 +1058,14 @@ def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True,
match = 0
if total:
match = len(found) / total * 100
- bb.plain("Sstate summary: Wanted %d Found %d Missed %d Current %d (%d%% match, %d%% complete)" % (total, len(found), len(missed), currentcount, match, complete))
+ bb.plain("Sstate summary: Wanted %d Local %d Mirrors %d Missed %d Current %d (%d%% match, %d%% complete)" %
+ (total, foundLocal, len(found)-foundLocal, len(missed), currentcount, match, complete))
if hasattr(bb.parse.siggen, "checkhashes"):
bb.parse.siggen.checkhashes(sq_data, missed, found, d)
return found
+setscene_depvalid[vardepsexclude] = "SSTATE_EXCLUDEDEPS_SYSROOT"
BB_SETSCENE_DEPVALID = "setscene_depvalid"
@@ -1020,15 +1084,13 @@ def setscene_depvalid(task, taskdependees, notneeded, d, log=None):
logit("Considering setscene task: %s" % (str(taskdependees[task])), log)
+ directtasks = ["do_populate_lic", "do_deploy_source_date_epoch", "do_shared_workdir", "do_stash_locale", "do_gcc_stash_builddir", "do_create_spdx"]
+
def isNativeCross(x):
return x.endswith("-native") or "-cross-" in x or "-crosssdk" in x or x.endswith("-cross")
- # We only need to trigger populate_lic through direct dependencies
- if taskdependees[task][1] == "do_populate_lic":
- return True
-
- # stash_locale and gcc_stash_builddir are never needed as a dependency for built objects
- if taskdependees[task][1] == "do_stash_locale" or taskdependees[task][1] == "do_gcc_stash_builddir":
+ # We only need to trigger deploy_source_date_epoch through direct dependencies
+ if taskdependees[task][1] in directtasks:
return True
# We only need to trigger packagedata through direct dependencies
@@ -1051,8 +1113,8 @@ def setscene_depvalid(task, taskdependees, notneeded, d, log=None):
# do_package_write_* need do_populate_sysroot as they're mainly postinstall dependencies
if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm']:
return False
- # do_package/packagedata/package_qa don't need do_populate_sysroot
- if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_packagedata', 'do_package_qa']:
+ # do_package/packagedata/package_qa/deploy don't need do_populate_sysroot
+ if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_packagedata', 'do_package_qa', 'do_deploy']:
continue
# Native/Cross packages don't exist and are noexec anyway
if isNativeCross(taskdependees[dep][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package', 'do_package_qa']:
@@ -1100,13 +1162,9 @@ def setscene_depvalid(task, taskdependees, notneeded, d, log=None):
# Target populate_sysroot need their dependencies
return False
- if taskdependees[task][1] == 'do_shared_workdir':
- continue
-
- if taskdependees[dep][1] == "do_populate_lic":
+ if taskdependees[dep][1] in directtasks:
continue
-
# Safe fallthrough default
logit(" Default setscene dependency fall through due to dependency: %s" % (str(taskdependees[dep])), log)
return False
@@ -1133,19 +1191,28 @@ python sstate_eventhandler() {
os.utime(siginfo, None)
except PermissionError:
pass
+ except OSError as e:
+ # Handle read-only file systems gracefully
+ import errno
+ if e.errno != errno.EROFS:
+ raise e
}
SSTATE_PRUNE_OBSOLETEWORKDIR ?= "1"
-# Event handler which removes manifests and stamps file for
-# recipes which are no longer reachable in a build where they
-# once were.
+#
+# Event handler which removes manifests and stamps file for recipes which are no
+# longer 'reachable' in a build where they once were. 'Reachable' refers to
+# whether a recipe is parsed so recipes in a layer which was removed would no
+# longer be reachable. Switching between systemd and sysvinit where recipes
+# became skipped would be another example.
+#
# Also optionally removes the workdir of those tasks/recipes
#
-addhandler sstate_eventhandler2
-sstate_eventhandler2[eventmask] = "bb.event.ReachableStamps"
-python sstate_eventhandler2() {
+addhandler sstate_eventhandler_reachablestamps
+sstate_eventhandler_reachablestamps[eventmask] = "bb.event.ReachableStamps"
+python sstate_eventhandler_reachablestamps() {
import glob
d = e.data
stamps = e.stamps.values()
@@ -1171,11 +1238,21 @@ python sstate_eventhandler2() {
i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
if not os.path.exists(i):
continue
+ manseen = set()
+ ignore = []
with open(i, "r") as f:
lines = f.readlines()
- for l in lines:
+ for l in reversed(lines):
try:
(stamp, manifest, workdir) = l.split()
+ # The index may have multiple entries for the same manifest as the code above only appends
+ # new entries and there may be an entry with matching manifest but differing version in stamp/workdir.
+ # The last entry in the list is the valid one, any earlier entries with matching manifests
+ # should be ignored.
+ if manifest in manseen:
+ ignore.append(l)
+ continue
+ manseen.add(manifest)
if stamp not in stamps and stamp not in preservestamps and stamp in machineindex:
toremove.append(l)
if stamp not in seen:
@@ -1206,6 +1283,8 @@ python sstate_eventhandler2() {
with open(i, "w") as f:
for l in lines:
+ if l in ignore:
+ continue
f.write(l)
machineindex |= set(stamps)
with open(mi, "w") as f:
@@ -1215,3 +1294,59 @@ python sstate_eventhandler2() {
if preservestamps:
os.remove(preservestampfile)
}
+
+
+#
+# Bitbake can generate an event showing which setscene tasks are 'stale',
+# i.e. which ones will be rerun. These are ones where a stamp file is present but
+# it is stable (e.g. taskhash doesn't match). With that list we can go through
+# the manifests for matching tasks and "uninstall" those manifests now. We do
+# this now rather than mid build since the distribution of files between sstate
+# objects may have changed, new tasks may run first and if those new tasks overlap
+# with the stale tasks, we'd see overlapping files messages and failures. Thankfully
+# removing these files is fast.
+#
+addhandler sstate_eventhandler_stalesstate
+sstate_eventhandler_stalesstate[eventmask] = "bb.event.StaleSetSceneTasks"
+python sstate_eventhandler_stalesstate() {
+ d = e.data
+ tasks = e.tasks
+
+ bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
+
+ for a in list(set(d.getVar("SSTATE_ARCHS").split())):
+ toremove = []
+ i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
+ if not os.path.exists(i):
+ continue
+ with open(i, "r") as f:
+ lines = f.readlines()
+ for l in lines:
+ try:
+ (stamp, manifest, workdir) = l.split()
+ for tid in tasks:
+ for s in tasks[tid]:
+ if s.startswith(stamp):
+ taskname = bb.runqueue.taskname_from_tid(tid)[3:]
+ manname = manifest + "." + taskname
+ if os.path.exists(manname):
+ bb.debug(2, "Sstate for %s is stale, removing related manifest %s" % (tid, manname))
+ toremove.append((manname, tid, tasks[tid]))
+ break
+ except ValueError:
+ bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
+
+ if toremove:
+ msg = "Removing %d stale sstate objects for arch %s" % (len(toremove), a)
+ bb.event.fire(bb.event.ProcessStarted(msg, len(toremove)), d)
+
+ removed = 0
+ for (manname, tid, stamps) in toremove:
+ sstate_clean_manifest(manname, d)
+ for stamp in stamps:
+ bb.utils.remove(stamp)
+ removed = removed + 1
+ bb.event.fire(bb.event.ProcessProgress(msg, removed), d)
+
+ bb.event.fire(bb.event.ProcessFinished(msg), d)
+}
diff --git a/meta/classes/staging.bbclass b/meta/classes/staging.bbclass
index de3a19815a..ab827766be 100644
--- a/meta/classes/staging.bbclass
+++ b/meta/classes/staging.bbclass
@@ -5,6 +5,7 @@ SYSROOT_DIRS = " \
${base_libdir} \
${nonarch_base_libdir} \
${datadir} \
+ /sysroot-only \
"
# These directories are also staged in the sysroot when they contain files that
@@ -18,20 +19,24 @@ SYSROOT_DIRS_NATIVE = " \
${sysconfdir} \
${localstatedir} \
"
-SYSROOT_DIRS_append_class-native = " ${SYSROOT_DIRS_NATIVE}"
-SYSROOT_DIRS_append_class-cross = " ${SYSROOT_DIRS_NATIVE}"
-SYSROOT_DIRS_append_class-crosssdk = " ${SYSROOT_DIRS_NATIVE}"
+SYSROOT_DIRS:append:class-native = " ${SYSROOT_DIRS_NATIVE}"
+SYSROOT_DIRS:append:class-cross = " ${SYSROOT_DIRS_NATIVE}"
+SYSROOT_DIRS:append:class-crosssdk = " ${SYSROOT_DIRS_NATIVE}"
# These directories will not be staged in the sysroot
-SYSROOT_DIRS_BLACKLIST = " \
+SYSROOT_DIRS_IGNORE = " \
${mandir} \
${docdir} \
${infodir} \
+ ${datadir}/X11/locale \
${datadir}/applications \
+ ${datadir}/bash-completion \
${datadir}/fonts \
${datadir}/gtk-doc/html \
+ ${datadir}/installed-tests \
${datadir}/locale \
${datadir}/pixmaps \
+ ${datadir}/terminfo \
${libdir}/${BPN}/ptest \
"
@@ -44,9 +49,10 @@ sysroot_stage_dir() {
fi
mkdir -p "$dest"
+ rdest=$(realpath --relative-to="$src" "$dest")
(
cd $src
- find . -print0 | cpio --null -pdlu $dest
+ find . -print0 | cpio --null -pdlu $rdest
)
}
@@ -59,7 +65,7 @@ sysroot_stage_dirs() {
done
# Remove directories we do not care about
- for dir in ${SYSROOT_DIRS_BLACKLIST}; do
+ for dir in ${SYSROOT_DIRS_IGNORE}; do
rm -rf "$to$dir"
done
}
@@ -77,7 +83,7 @@ python sysroot_strip () {
pn = d.getVar('PN')
libdir = d.getVar("libdir")
base_libdir = d.getVar("base_libdir")
- qa_already_stripped = 'already-stripped' in (d.getVar('INSANE_SKIP_' + pn) or "").split()
+ qa_already_stripped = 'already-stripped' in (d.getVar('INSANE_SKIP:' + pn) or "").split()
strip_cmd = d.getVar("STRIP")
oe.package.strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, d,
@@ -85,7 +91,6 @@ python sysroot_strip () {
}
do_populate_sysroot[dirs] = "${SYSROOT_DESTDIR}"
-do_populate_sysroot[umask] = "022"
addtask populate_sysroot after do_install
@@ -99,7 +104,7 @@ python do_populate_sysroot () {
for f in (d.getVar('SYSROOT_PREPROCESS_FUNCS') or '').split():
bb.build.exec_func(f, d)
pn = d.getVar("PN")
- multiprov = d.getVar("MULTI_PROVIDER_WHITELIST").split()
+ multiprov = d.getVar("BB_MULTI_PROVIDER_ALLOWED").split()
provdir = d.expand("${SYSROOT_DESTDIR}${base_prefix}/sysroot-providers/")
bb.utils.mkdirhier(provdir)
for p in d.getVar("PROVIDES").split():
@@ -111,11 +116,11 @@ python do_populate_sysroot () {
}
do_populate_sysroot[vardeps] += "${SYSROOT_PREPROCESS_FUNCS}"
-do_populate_sysroot[vardepsexclude] += "MULTI_PROVIDER_WHITELIST"
+do_populate_sysroot[vardepsexclude] += "BB_MULTI_PROVIDER_ALLOWED"
POPULATESYSROOTDEPS = ""
-POPULATESYSROOTDEPS_class-target = "virtual/${MLPREFIX}${TARGET_PREFIX}binutils:do_populate_sysroot"
-POPULATESYSROOTDEPS_class-nativesdk = "virtual/${TARGET_PREFIX}binutils-crosssdk:do_populate_sysroot"
+POPULATESYSROOTDEPS:class-target = "virtual/${MLPREFIX}${HOST_PREFIX}binutils:do_populate_sysroot"
+POPULATESYSROOTDEPS:class-nativesdk = "virtual/${HOST_PREFIX}binutils-crosssdk:do_populate_sysroot"
do_populate_sysroot[depends] += "${POPULATESYSROOTDEPS}"
SSTATETASKS += "do_populate_sysroot"
@@ -302,6 +307,7 @@ python extend_recipe_sysroot() {
sstatetasks = d.getVar("SSTATETASKS").split()
# Add recipe specific tasks referenced by setscene_depvalid()
sstatetasks.append("do_stash_locale")
+ sstatetasks.append("do_deploy")
def print_dep_tree(deptree):
data = ""
@@ -405,7 +411,7 @@ python extend_recipe_sysroot() {
if os.path.islink(f) and not os.path.exists(f):
bb.note("%s no longer exists, removing from sysroot" % f)
lnk = os.readlink(f.replace(".complete", ""))
- sstate_clean_manifest(depdir + "/" + lnk, d, workdir)
+ sstate_clean_manifest(depdir + "/" + lnk, d, canrace=True, prefix=workdir)
os.unlink(f)
os.unlink(f.replace(".complete", ""))
@@ -450,7 +456,7 @@ python extend_recipe_sysroot() {
fl = depdir + "/" + l
bb.note("Task %s no longer depends on %s, removing from sysroot" % (mytaskname, l))
lnk = os.readlink(fl)
- sstate_clean_manifest(depdir + "/" + lnk, d, workdir)
+ sstate_clean_manifest(depdir + "/" + lnk, d, canrace=True, prefix=workdir)
os.unlink(fl)
os.unlink(fl + ".complete")
@@ -471,7 +477,7 @@ python extend_recipe_sysroot() {
continue
else:
bb.note("%s exists in sysroot, but is stale (%s vs. %s), removing." % (c, lnk, c + "." + taskhash))
- sstate_clean_manifest(depdir + "/" + lnk, d, workdir)
+ sstate_clean_manifest(depdir + "/" + lnk, d, canrace=True, prefix=workdir)
os.unlink(depdir + "/" + c)
if os.path.lexists(depdir + "/" + c + ".complete"):
os.unlink(depdir + "/" + c + ".complete")
@@ -614,8 +620,41 @@ python staging_taskhandler() {
bbtasks = e.tasklist
for task in bbtasks:
deps = d.getVarFlag(task, "depends")
- if deps and "populate_sysroot" in deps:
- d.appendVarFlag(task, "prefuncs", " extend_recipe_sysroot")
+ if task == "do_configure" or (deps and "populate_sysroot" in deps):
+ d.prependVarFlag(task, "prefuncs", "extend_recipe_sysroot ")
}
staging_taskhandler[eventmask] = "bb.event.RecipeTaskPreProcess"
addhandler staging_taskhandler
+
+
+#
+# Target build output, stored in do_populate_sysroot or do_package can depend
+# not only upon direct dependencies but also indirect ones. A good example is
+# linux-libc-headers. The toolchain depends on this but most target recipes do
+# not. There are some headers which are not used by the toolchain build and do
+# not change the toolchain task output, hence the task hashes can change without
+# changing the sysroot output of that recipe yet they can influence others.
+#
+# A specific example is rtc.h which can change rtcwake.c in util-linux but is not
+# used in the glibc or gcc build. To account for this, we need to account for the
+# populate_sysroot hashes in the task output hashes.
+#
+python target_add_sysroot_deps () {
+ current_task = "do_" + d.getVar("BB_CURRENTTASK")
+ if current_task not in ["do_populate_sysroot", "do_package"]:
+ return
+
+ pn = d.getVar("PN")
+ if pn.endswith("-native"):
+ return
+
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ deps = {}
+ for dep in taskdepdata.values():
+ if dep[1] == "do_populate_sysroot" and not dep[0].endswith(("-native", "-initial")) and "-cross-" not in dep[0]:
+ deps[dep[0]] = dep[6]
+
+ d.setVar("HASHEQUIV_EXTRA_SIGDATA", "\n".join("%s: %s" % (k, deps[k]) for k in sorted(deps.keys())))
+}
+SSTATECREATEFUNCS += "target_add_sysroot_deps"
+
diff --git a/meta/classes/systemd-boot.bbclass b/meta/classes/systemd-boot.bbclass
index 336c4c2ff5..57ec0acbc5 100644
--- a/meta/classes/systemd-boot.bbclass
+++ b/meta/classes/systemd-boot.bbclass
@@ -28,7 +28,7 @@ efi_populate() {
done
}
-efi_iso_populate_append() {
+efi_iso_populate:append() {
cp -r $iso_dir/loader ${EFIIMGDIR}
}
diff --git a/meta/classes/systemd.bbclass b/meta/classes/systemd.bbclass
index 9e8a82c9f1..09ec52792d 100644
--- a/meta/classes/systemd.bbclass
+++ b/meta/classes/systemd.bbclass
@@ -1,9 +1,9 @@
# The list of packages that should have systemd packaging scripts added. For
-# each entry, optionally have a SYSTEMD_SERVICE_[package] that lists the service
+# each entry, optionally have a SYSTEMD_SERVICE:[package] that lists the service
# files in this package. If this variable isn't set, [package].service is used.
SYSTEMD_PACKAGES ?= "${PN}"
-SYSTEMD_PACKAGES_class-native ?= ""
-SYSTEMD_PACKAGES_class-nativesdk ?= ""
+SYSTEMD_PACKAGES:class-native ?= ""
+SYSTEMD_PACKAGES:class-nativesdk ?= ""
# Whether to enable or disable the services on installation.
SYSTEMD_AUTO_ENABLE ??= "enable"
@@ -23,7 +23,7 @@ python __anonymous() {
}
systemd_postinst() {
-if type systemctl >/dev/null 2>/dev/null; then
+if systemctl >/dev/null 2>/dev/null; then
OPTS=""
if [ -n "$D" ]; then
@@ -48,7 +48,7 @@ fi
}
systemd_prerm() {
-if type systemctl >/dev/null 2>/dev/null; then
+if systemctl >/dev/null 2>/dev/null; then
if [ -z "$D" ]; then
systemctl stop ${SYSTEMD_SERVICE_ESCAPED}
@@ -70,7 +70,7 @@ python systemd_populate_packages() {
return
def get_package_var(d, var, pkg):
- val = (d.getVar('%s_%s' % (var, pkg)) or "").strip()
+ val = (d.getVar('%s:%s' % (var, pkg)) or "").strip()
if val == "":
val = (d.getVar(var) or "").strip()
return val
@@ -85,39 +85,39 @@ python systemd_populate_packages() {
def systemd_generate_package_scripts(pkg):
bb.debug(1, 'adding systemd calls to postinst/postrm for %s' % pkg)
- paths_escaped = ' '.join(shlex.quote(s) for s in d.getVar('SYSTEMD_SERVICE_' + pkg).split())
- d.setVar('SYSTEMD_SERVICE_ESCAPED_' + pkg, paths_escaped)
+ paths_escaped = ' '.join(shlex.quote(s) for s in d.getVar('SYSTEMD_SERVICE:' + pkg).split())
+ d.setVar('SYSTEMD_SERVICE_ESCAPED:' + pkg, paths_escaped)
- # Add pkg to the overrides so that it finds the SYSTEMD_SERVICE_pkg
+ # Add pkg to the overrides so that it finds the SYSTEMD_SERVICE:pkg
# variable.
localdata = d.createCopy()
localdata.prependVar("OVERRIDES", pkg + ":")
- postinst = d.getVar('pkg_postinst_%s' % pkg)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
postinst += localdata.getVar('systemd_postinst')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
- prerm = d.getVar('pkg_prerm_%s' % pkg)
+ prerm = d.getVar('pkg_prerm:%s' % pkg)
if not prerm:
prerm = '#!/bin/sh\n'
prerm += localdata.getVar('systemd_prerm')
- d.setVar('pkg_prerm_%s' % pkg, prerm)
+ d.setVar('pkg_prerm:%s' % pkg, prerm)
- # Add files to FILES_*-systemd if existent and not already done
+ # Add files to FILES:*-systemd if existent and not already done
def systemd_append_file(pkg_systemd, file_append):
appended = False
if os.path.exists(oe.path.join(d.getVar("D"), file_append)):
- var_name = "FILES_" + pkg_systemd
+ var_name = "FILES:" + pkg_systemd
files = d.getVar(var_name, False) or ""
if file_append not in files.split():
d.appendVar(var_name, " " + file_append)
appended = True
return appended
- # Add systemd files to FILES_*-systemd, parse for Also= and follow recursive
+ # Add systemd files to FILES:*-systemd, parse for Also= and follow recursive
def systemd_add_files_and_parse(pkg_systemd, path, service, keys):
# avoid infinite recursion
if systemd_append_file(pkg_systemd, oe.path.join(path, service)):
@@ -174,31 +174,32 @@ python systemd_populate_packages() {
if path_found != '':
systemd_add_files_and_parse(pkg_systemd, path_found, service, keys)
else:
- bb.fatal("SYSTEMD_SERVICE_%s value %s does not exist" % (pkg_systemd, service))
+ bb.fatal("Didn't find service unit '{0}', specified in SYSTEMD_SERVICE:{1}. {2}".format(
+ service, pkg_systemd, "Also looked for service unit '{0}'.".format(base) if base is not None else ""))
def systemd_create_presets(pkg, action):
presetf = oe.path.join(d.getVar("PKGD"), d.getVar("systemd_unitdir"), "system-preset/98-%s.preset" % pkg)
bb.utils.mkdirhier(os.path.dirname(presetf))
with open(presetf, 'a') as fd:
- for service in d.getVar('SYSTEMD_SERVICE_%s' % pkg).split():
+ for service in d.getVar('SYSTEMD_SERVICE:%s' % pkg).split():
fd.write("%s %s\n" % (action,service))
- d.appendVar("FILES_%s" % pkg, ' ' + oe.path.join(d.getVar("systemd_unitdir"), "system-preset/98-%s.preset" % pkg))
+ d.appendVar("FILES:%s" % pkg, ' ' + oe.path.join(d.getVar("systemd_unitdir"), "system-preset/98-%s.preset" % pkg))
# Run all modifications once when creating package
if os.path.exists(d.getVar("D")):
for pkg in d.getVar('SYSTEMD_PACKAGES').split():
systemd_check_package(pkg)
- if d.getVar('SYSTEMD_SERVICE_' + pkg):
+ if d.getVar('SYSTEMD_SERVICE:' + pkg):
systemd_generate_package_scripts(pkg)
action = get_package_var(d, 'SYSTEMD_AUTO_ENABLE', pkg)
if action in ("enable", "disable"):
systemd_create_presets(pkg, action)
elif action not in ("mask", "preset"):
- bb.fatal("SYSTEMD_AUTO_ENABLE_%s '%s' is not 'enable', 'disable', 'mask' or 'preset'" % (pkg, action))
+ bb.fatal("SYSTEMD_AUTO_ENABLE:%s '%s' is not 'enable', 'disable', 'mask' or 'preset'" % (pkg, action))
systemd_check_services()
}
-PACKAGESPLITFUNCS_prepend = "systemd_populate_packages "
+PACKAGESPLITFUNCS:prepend = "systemd_populate_packages "
python rm_systemd_unitdir (){
import shutil
@@ -226,7 +227,7 @@ python rm_sysvinit_initddir (){
}
do_install[postfuncs] += "${RMINITDIR} "
-RMINITDIR_class-target = " rm_sysvinit_initddir rm_systemd_unitdir "
-RMINITDIR_class-nativesdk = " rm_sysvinit_initddir rm_systemd_unitdir "
+RMINITDIR:class-target = " rm_sysvinit_initddir rm_systemd_unitdir "
+RMINITDIR:class-nativesdk = " rm_sysvinit_initddir rm_systemd_unitdir "
RMINITDIR = ""
diff --git a/meta/classes/terminal.bbclass b/meta/classes/terminal.bbclass
index 6059ae95e0..a564ee7494 100644
--- a/meta/classes/terminal.bbclass
+++ b/meta/classes/terminal.bbclass
@@ -26,6 +26,9 @@ def emit_terminal_func(command, envdata, d):
bb.utils.mkdirhier(os.path.dirname(runfile))
with open(runfile, 'w') as script:
+ # Override the shell shell_trap_code specifies.
+ # If our shell is bash, we might well face silent death.
+ script.write("#!/bin/bash\n")
script.write(bb.build.shell_trap_code())
bb.data.emit_func(cmd_func, script, envdata)
script.write(cmd_func)
@@ -37,7 +40,7 @@ def emit_terminal_func(command, envdata, d):
def oe_terminal(command, title, d):
import oe.data
import oe.terminal
-
+
envdata = bb.data.init()
for v in os.environ:
diff --git a/meta/classes/testexport.bbclass b/meta/classes/testexport.bbclass
index 59cbaefbf9..1b0fb44a4a 100644
--- a/meta/classes/testexport.bbclass
+++ b/meta/classes/testexport.bbclass
@@ -137,7 +137,7 @@ def copy_needed_files(d, tc):
shutil.rmtree(os.path.join(subdir, dir))
# Create tar file for common parts of testexport
- create_tarball(d, "testexport.tar.gz", d.getVar("TEST_EXPORT_DIR"))
+ testexport_create_tarball(d, "testexport.tar.gz", d.getVar("TEST_EXPORT_DIR"))
# Copy packages needed for runtime testing
package_extraction(d, tc.suites)
@@ -146,7 +146,7 @@ def copy_needed_files(d, tc):
export_pkg_dir = os.path.join(d.getVar("TEST_EXPORT_DIR"), "packages")
oe.path.copytree(test_pkg_dir, export_pkg_dir)
# Create tar file for packages needed by the DUT
- create_tarball(d, "testexport_packages_%s.tar.gz" % d.getVar("MACHINE"), export_pkg_dir)
+ testexport_create_tarball(d, "testexport_packages_%s.tar.gz" % d.getVar("MACHINE"), export_pkg_dir)
# Copy SDK
if d.getVar("TEST_EXPORT_SDK_ENABLED") == "1":
@@ -159,11 +159,11 @@ def copy_needed_files(d, tc):
shutil.copy2(tarball_path, export_sdk_dir)
# Create tar file for the sdk
- create_tarball(d, "testexport_sdk_%s.tar.gz" % d.getVar("SDK_ARCH"), export_sdk_dir)
+ testexport_create_tarball(d, "testexport_sdk_%s.tar.gz" % d.getVar("SDK_ARCH"), export_sdk_dir)
bb.plain("Exported tests to: %s" % export_path)
-def create_tarball(d, tar_name, src_dir):
+def testexport_create_tarball(d, tar_name, src_dir):
import tarfile
diff --git a/meta/classes/testimage.bbclass b/meta/classes/testimage.bbclass
index 00f0c29836..898248992c 100644
--- a/meta/classes/testimage.bbclass
+++ b/meta/classes/testimage.bbclass
@@ -3,6 +3,8 @@
# Released under the MIT license (see COPYING.MIT)
inherit metadata_scm
+inherit image-artifact-names
+
# testimage.bbclass enables testing of qemu images using python unittests.
# Most of the tests are commands run on target image over ssh.
# To use it add testimage to global inherit and call your target image with -c testimage
@@ -34,6 +36,7 @@ TESTIMAGE_AUTO ??= "0"
# TEST_OVERALL_TIMEOUT can be used to set the maximum time in seconds the tests will be allowed to run (defaults to no limit).
# TEST_QEMUPARAMS can be used to pass extra parameters to qemu, e.g. "-m 1024" for setting the amount of ram to 1 GB.
# TEST_RUNQEMUPARAMS can be used to pass extra parameters to runqemu, e.g. "gl" to enable OpenGL acceleration.
+# QEMU_USE_KVM can be set to "" to disable the use of kvm (by default it is enabled if target_arch == build_arch or both of them are x86 archs)
# TESTIMAGE_BOOT_PATTERNS can be used to override certain patterns used to communicate with the target when booting,
# if a pattern is not specifically present on this variable a default will be used when booting the target.
@@ -58,23 +61,22 @@ BASICTESTSUITE = "\
ping date df ssh scp python perl gi ptest parselogs \
logrotate connman systemd oe_syslog pam stap ldd xorg \
kernelmodule gcc buildcpio buildlzip buildgalculator \
- dnf rpm opkg apt weston"
+ dnf rpm opkg apt weston go rust"
DEFAULT_TEST_SUITES = "${BASICTESTSUITE}"
-# aarch64 has no graphics
-DEFAULT_TEST_SUITES_remove_aarch64 = "xorg"
# musl doesn't support systemtap
-DEFAULT_TEST_SUITES_remove_libc-musl = "stap"
+DEFAULT_TEST_SUITES:remove:libc-musl = "stap"
# qemumips is quite slow and has reached the timeout limit several times on the YP build cluster,
# mitigate this by removing build tests for qemumips machines.
MIPSREMOVE ??= "buildcpio buildlzip buildgalculator"
-DEFAULT_TEST_SUITES_remove_qemumips = "${MIPSREMOVE}"
-DEFAULT_TEST_SUITES_remove_qemumips64 = "${MIPSREMOVE}"
+DEFAULT_TEST_SUITES:remove:qemumips = "${MIPSREMOVE}"
+DEFAULT_TEST_SUITES:remove:qemumips64 = "${MIPSREMOVE}"
TEST_SUITES ?= "${DEFAULT_TEST_SUITES}"
+QEMU_USE_KVM ?= "1"
TEST_QEMUBOOT_TIMEOUT ?= "1000"
TEST_OVERALL_TIMEOUT ?= ""
TEST_TARGET ?= "qemu"
@@ -84,7 +86,7 @@ TEST_RUNQEMUPARAMS ?= ""
TESTIMAGE_BOOT_PATTERNS ?= ""
TESTIMAGEDEPENDS = ""
-TESTIMAGEDEPENDS_append_qemuall = " qemu-native:do_populate_sysroot qemu-helper-native:do_populate_sysroot qemu-helper-native:do_addto_recipe_sysroot"
+TESTIMAGEDEPENDS:append:qemuall = " qemu-native:do_populate_sysroot qemu-helper-native:do_populate_sysroot qemu-helper-native:do_addto_recipe_sysroot"
TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'cpio-native:do_populate_sysroot', '', d)}"
TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'dnf-native:do_populate_sysroot', '', d)}"
TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'createrepo-c-native:do_populate_sysroot', '', d)}"
@@ -92,7 +94,7 @@ TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'ipk', 'opkg-utils-na
TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'deb', 'apt-native:do_populate_sysroot package-index:do_package_index', '', d)}"
TESTIMAGELOCK = "${TMPDIR}/testimage.lock"
-TESTIMAGELOCK_qemuall = ""
+TESTIMAGELOCK:qemuall = ""
TESTIMAGE_DUMP_DIR ?= "${LOG_DIR}/runtime-hostdump/"
@@ -125,12 +127,19 @@ testimage_dump_host () {
netstat -an
}
+testimage_dump_monitor () {
+ query-status
+ query-block
+ dump-guest-memory {"paging":false,"protocol":"file:%s.img"}
+}
+
python do_testimage() {
testimage_main(d)
}
addtask testimage
do_testimage[nostamp] = "1"
+do_testimage[network] = "1"
do_testimage[depends] += "${TESTIMAGEDEPENDS}"
do_testimage[lockfiles] += "${TESTIMAGELOCK}"
@@ -193,6 +202,7 @@ def testimage_main(d):
import json
import signal
import logging
+ import shutil
from bb.utils import export_proxies
from oeqa.core.utils.misc import updateTestData
@@ -228,9 +238,10 @@ def testimage_main(d):
tdname = "%s.testdata.json" % image_name
try:
- td = json.load(open(tdname, "r"))
- except (FileNotFoundError) as err:
- bb.fatal('File %s Not Found. Have you built the image with INHERIT+="testimage" in the conf/local.conf?' % tdname)
+ with open(tdname, "r") as f:
+ td = json.load(f)
+ except FileNotFoundError as err:
+ bb.fatal('File %s not found (%s).\nHave you built the image with INHERIT += "testimage" in the conf/local.conf?' % (tdname, err))
# Some variables need to be updates (mostly paths) with the
# ones of the current environment because some tests require them.
@@ -303,20 +314,19 @@ def testimage_main(d):
'dump_dir' : d.getVar("TESTIMAGE_DUMP_DIR"),
'serial_ports': len(d.getVar("SERIAL_CONSOLES").split()),
'ovmf' : ovmf,
+ 'tmpfsdir' : d.getVar("RUNQEMU_TMPFS_DIR"),
}
if d.getVar("TESTIMAGE_BOOT_PATTERNS"):
target_kwargs['boot_patterns'] = get_testimage_boot_patterns(d)
- # TODO: Currently BBPATH is needed for custom loading of targets.
- # It would be better to find these modules using instrospection.
- target_kwargs['target_modules_path'] = d.getVar('BBPATH')
-
# hardware controlled targets might need further access
target_kwargs['powercontrol_cmd'] = d.getVar("TEST_POWERCONTROL_CMD") or None
target_kwargs['powercontrol_extra_args'] = d.getVar("TEST_POWERCONTROL_EXTRA_ARGS") or ""
target_kwargs['serialcontrol_cmd'] = d.getVar("TEST_SERIALCONTROL_CMD") or None
target_kwargs['serialcontrol_extra_args'] = d.getVar("TEST_SERIALCONTROL_EXTRA_ARGS") or ""
+ target_kwargs['testimage_dump_monitor'] = d.getVar("testimage_dump_monitor") or ""
+ target_kwargs['testimage_dump_target'] = d.getVar("testimage_dump_target") or ""
def export_ssh_agent(d):
import os
@@ -364,6 +374,7 @@ def testimage_main(d):
package_extraction(d, tc.suites)
results = None
+ complete = False
orig_sigterm_handler = signal.signal(signal.SIGTERM, sigterm_exception)
try:
# We need to check if runqemu ends unexpectedly
@@ -375,6 +386,7 @@ def testimage_main(d):
except ValueError:
pass
results = tc.runTests()
+ complete = True
except (KeyboardInterrupt, BlockingIOError) as err:
if isinstance(err, KeyboardInterrupt):
bb.error('testimage interrupted, shutting down...')
@@ -382,22 +394,30 @@ def testimage_main(d):
bb.error('runqemu failed, shutting down...')
if results:
results.stop()
- results = None
+ results = tc.results
finally:
signal.signal(signal.SIGTERM, orig_sigterm_handler)
tc.target.stop()
# Show results (if we have them)
- if not results:
- bb.fatal('%s - FAILED - tests were interrupted during execution' % pn, forcelog=True)
- configuration = get_testimage_configuration(d, 'runtime', machine)
- results.logDetails(get_testimage_json_result_dir(d),
- configuration,
- get_testimage_result_id(configuration),
- dump_streams=d.getVar('TESTREPORT_FULLLOGS'))
- results.logSummary(pn)
+ if results:
+ configuration = get_testimage_configuration(d, 'runtime', machine)
+ results.logDetails(get_testimage_json_result_dir(d),
+ configuration,
+ get_testimage_result_id(configuration),
+ dump_streams=d.getVar('TESTREPORT_FULLLOGS'))
+ results.logSummary(pn)
+
+ # Copy additional logs to tmp/log/oeqa so it's easier to find them
+ targetdir = os.path.join(get_testimage_json_result_dir(d), d.getVar("PN"))
+ os.makedirs(targetdir, exist_ok=True)
+ os.symlink(bootlog, os.path.join(targetdir, os.path.basename(bootlog)))
+ os.symlink(d.getVar("BB_LOGFILE"), os.path.join(targetdir, os.path.basename(d.getVar("BB_LOGFILE") + "." + d.getVar('DATETIME'))))
+
+ if not results or not complete:
+ bb.fatal('%s - FAILED - tests were interrupted during execution, check the logs in %s' % (pn, d.getVar("LOG_DIR")), forcelog=True)
if not results.wasSuccessful():
- bb.fatal('%s - FAILED - check the task log and the ssh log' % pn, forcelog=True)
+ bb.fatal('%s - FAILED - also check the logs in %s' % (pn, d.getVar("LOG_DIR")), forcelog=True)
def get_runtime_paths(d):
"""
diff --git a/meta/classes/testsdk.bbclass b/meta/classes/testsdk.bbclass
index 758a23ac55..8b2e74f606 100644
--- a/meta/classes/testsdk.bbclass
+++ b/meta/classes/testsdk.bbclass
@@ -36,12 +36,14 @@ python do_testsdk() {
}
addtask testsdk
do_testsdk[nostamp] = "1"
+do_testsdk[network] = "1"
python do_testsdkext() {
import_and_run('TESTSDKEXT_CLASS_NAME', d)
}
addtask testsdkext
do_testsdkext[nostamp] = "1"
+do_testsdkext[network] = "1"
python () {
if oe.types.boolean(d.getVar("TESTIMAGE_AUTO") or "False"):
diff --git a/meta/classes/texinfo.bbclass b/meta/classes/texinfo.bbclass
index f46bacabd4..68c9d4fb70 100644
--- a/meta/classes/texinfo.bbclass
+++ b/meta/classes/texinfo.bbclass
@@ -7,12 +7,12 @@
# makeinfo from SANITY_REQUIRED_UTILITIES.
TEXDEP = "${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'texinfo-replacement-native', 'texinfo-dummy-native', d)}"
-TEXDEP_class-native = "texinfo-dummy-native"
-TEXDEP_class-cross = "texinfo-dummy-native"
-TEXDEP_class-crosssdk = "texinfo-dummy-native"
-TEXDEP_class-cross-canadian = "texinfo-dummy-native"
-DEPENDS_append = " ${TEXDEP}"
+TEXDEP:class-native = "texinfo-dummy-native"
+TEXDEP:class-cross = "texinfo-dummy-native"
+TEXDEP:class-crosssdk = "texinfo-dummy-native"
+TEXDEP:class-cross-canadian = "texinfo-dummy-native"
+DEPENDS:append = " ${TEXDEP}"
# libtool-cross doesn't inherit cross
-TEXDEP_pn-libtool-cross = "texinfo-dummy-native"
+TEXDEP:pn-libtool-cross = "texinfo-dummy-native"
diff --git a/meta/classes/toaster.bbclass b/meta/classes/toaster.bbclass
index 9518ddf7a4..dd5c7f224b 100644
--- a/meta/classes/toaster.bbclass
+++ b/meta/classes/toaster.bbclass
@@ -106,7 +106,7 @@ def _toaster_load_pkgdatafile(dirpath, filepath):
pkgdata['OPKGN'] = m.group(1)
kn = "_".join([x for x in kn.split("_") if x.isupper()])
pkgdata[kn] = kv.strip()
- if kn == 'FILES_INFO':
+ if kn.startswith('FILES_INFO'):
pkgdata[kn] = json.loads(kv)
except ValueError:
diff --git a/meta/classes/toolchain-scripts.bbclass b/meta/classes/toolchain-scripts.bbclass
index db1d3215ef..8f914cce27 100644
--- a/meta/classes/toolchain-scripts.bbclass
+++ b/meta/classes/toolchain-scripts.bbclass
@@ -3,11 +3,13 @@ inherit toolchain-scripts-base siteinfo kernel-arch
# We want to be able to change the value of MULTIMACH_TARGET_SYS, because it
# doesn't always match our expectations... but we default to the stock value
REAL_MULTIMACH_TARGET_SYS ?= "${MULTIMACH_TARGET_SYS}"
-TARGET_CC_ARCH_append_libc-musl = " -mmusl"
+TARGET_CC_ARCH:append:libc-musl = " -mmusl"
# default debug prefix map isn't valid in the SDK
DEBUG_PREFIX_MAP = ""
+EXPORT_SDK_PS1 = "${@ 'export PS1=\\"%s\\"' % d.getVar('SDK_PS1') if d.getVar('SDK_PS1') else ''}"
+
# This function creates an environment-setup-script for use in a deployable SDK
toolchain_create_sdk_env_script () {
# Create environment setup script. Remember that $SDKTARGETSYSROOT should
@@ -39,6 +41,7 @@ toolchain_create_sdk_env_script () {
echo ' return 1' >> $script
echo 'fi' >> $script
+ echo "${EXPORT_SDK_PS1}" >> $script
echo 'export SDKTARGETSYSROOT='"$sysroot" >> $script
EXTRAPATH=""
for i in ${CANADIANEXTRAOS}; do
@@ -62,6 +65,7 @@ toolchain_create_sdk_env_script () {
# This function creates an environment-setup-script in the TMPDIR which enables
# a OE-core IDE to integrate with the build tree
+# Caller must ensure CONFIG_SITE is setup
toolchain_create_tree_env_script () {
script=${TMPDIR}/environment-setup-${REAL_MULTIMACH_TARGET_SYS}
rm -f $script
@@ -70,7 +74,7 @@ toolchain_create_tree_env_script () {
echo 'export PATH=${STAGING_DIR_NATIVE}/usr/bin:${STAGING_BINDIR_TOOLCHAIN}:$PATH' >> $script
echo 'export PKG_CONFIG_SYSROOT_DIR=${PKG_CONFIG_SYSROOT_DIR}' >> $script
echo 'export PKG_CONFIG_PATH=${PKG_CONFIG_PATH}' >> $script
- echo 'export CONFIG_SITE="${@siteinfo_get_files(d)}"' >> $script
+ echo 'export CONFIG_SITE="${CONFIG_SITE}"' >> $script
echo 'export SDKTARGETSYSROOT=${STAGING_DIR_TARGET}' >> $script
echo 'export OECORE_NATIVE_SYSROOT="${STAGING_DIR_NATIVE}"' >> $script
echo 'export OECORE_TARGET_SYSROOT="${STAGING_DIR_TARGET}"' >> $script
@@ -158,7 +162,7 @@ EOF
}
#we get the cached site config in the runtime
-TOOLCHAIN_CONFIGSITE_NOCACHE = "${@siteinfo_get_files(d)}"
+TOOLCHAIN_CONFIGSITE_NOCACHE = "${@' '.join(siteinfo_get_files(d)[0])}"
TOOLCHAIN_CONFIGSITE_SYSROOTCACHE = "${STAGING_DIR}/${MLPREFIX}${MACHINE}/${target_datadir}/${TARGET_SYS}_config_site.d"
TOOLCHAIN_NEED_CONFIGSITE_CACHE ??= "virtual/${MLPREFIX}libc ncurses"
DEPENDS += "${TOOLCHAIN_NEED_CONFIGSITE_CACHE}"
diff --git a/meta/classes/uboot-config.bbclass b/meta/classes/uboot-config.bbclass
index 89ff970fcc..b9ad35821a 100644
--- a/meta/classes/uboot-config.bbclass
+++ b/meta/classes/uboot-config.bbclass
@@ -11,7 +11,79 @@
#
# Copyright 2013, 2014 (C) O.S. Systems Software LTDA.
+def removesuffix(s, suffix):
+ if suffix and s.endswith(suffix):
+ return s[:-len(suffix)]
+ return s
+
+# Some versions of u-boot use .bin and others use .img. By default use .bin
+# but enable individual recipes to change this value.
+UBOOT_SUFFIX ??= "bin"
UBOOT_BINARY ?= "u-boot.${UBOOT_SUFFIX}"
+UBOOT_BINARYNAME ?= "${@os.path.splitext(d.getVar("UBOOT_BINARY"))[0]}"
+UBOOT_IMAGE ?= "${UBOOT_BINARYNAME}-${MACHINE}-${PV}-${PR}.${UBOOT_SUFFIX}"
+UBOOT_SYMLINK ?= "${UBOOT_BINARYNAME}-${MACHINE}.${UBOOT_SUFFIX}"
+UBOOT_MAKE_TARGET ?= "all"
+
+# Output the ELF generated. Some platforms can use the ELF file and directly
+# load it (JTAG booting, QEMU) additionally the ELF can be used for debugging
+# purposes.
+UBOOT_ELF ?= ""
+UBOOT_ELF_SUFFIX ?= "elf"
+UBOOT_ELF_IMAGE ?= "u-boot-${MACHINE}-${PV}-${PR}.${UBOOT_ELF_SUFFIX}"
+UBOOT_ELF_BINARY ?= "u-boot.${UBOOT_ELF_SUFFIX}"
+UBOOT_ELF_SYMLINK ?= "u-boot-${MACHINE}.${UBOOT_ELF_SUFFIX}"
+
+# Some versions of u-boot build an SPL (Second Program Loader) image that
+# should be packaged along with the u-boot binary as well as placed in the
+# deploy directory. For those versions they can set the following variables
+# to allow packaging the SPL.
+SPL_SUFFIX ?= ""
+SPL_BINARY ?= ""
+SPL_DELIMITER ?= "${@'.' if d.getVar("SPL_SUFFIX") else ''}"
+SPL_BINARYFILE ?= "${@os.path.basename(d.getVar("SPL_BINARY"))}"
+SPL_BINARYNAME ?= "${@removesuffix(d.getVar("SPL_BINARYFILE"), "." + d.getVar("SPL_SUFFIX"))}"
+SPL_IMAGE ?= "${SPL_BINARYNAME}-${MACHINE}-${PV}-${PR}${SPL_DELIMITER}${SPL_SUFFIX}"
+SPL_SYMLINK ?= "${SPL_BINARYNAME}-${MACHINE}${SPL_DELIMITER}${SPL_SUFFIX}"
+
+# Additional environment variables or a script can be installed alongside
+# u-boot to be used automatically on boot. This file, typically 'uEnv.txt'
+# or 'boot.scr', should be packaged along with u-boot as well as placed in the
+# deploy directory. Machine configurations needing one of these files should
+# include it in the SRC_URI and set the UBOOT_ENV parameter.
+UBOOT_ENV_SUFFIX ?= "txt"
+UBOOT_ENV ?= ""
+UBOOT_ENV_SRC_SUFFIX ?= "cmd"
+UBOOT_ENV_SRC ?= "${UBOOT_ENV}.${UBOOT_ENV_SRC_SUFFIX}"
+UBOOT_ENV_BINARY ?= "${UBOOT_ENV}.${UBOOT_ENV_SUFFIX}"
+UBOOT_ENV_IMAGE ?= "${UBOOT_ENV}-${MACHINE}-${PV}-${PR}.${UBOOT_ENV_SUFFIX}"
+UBOOT_ENV_SYMLINK ?= "${UBOOT_ENV}-${MACHINE}.${UBOOT_ENV_SUFFIX}"
+
+# Default name of u-boot initial env, but enable individual recipes to change
+# this value.
+UBOOT_INITIAL_ENV ?= "${PN}-initial-env"
+
+# U-Boot EXTLINUX variables. U-Boot searches for /boot/extlinux/extlinux.conf
+# to find EXTLINUX conf file.
+UBOOT_EXTLINUX_INSTALL_DIR ?= "/boot/extlinux"
+UBOOT_EXTLINUX_CONF_NAME ?= "extlinux.conf"
+UBOOT_EXTLINUX_SYMLINK ?= "${UBOOT_EXTLINUX_CONF_NAME}-${MACHINE}-${PR}"
+
+# Options for the device tree compiler passed to mkimage '-D' feature:
+UBOOT_MKIMAGE_DTCOPTS ??= ""
+SPL_MKIMAGE_DTCOPTS ??= ""
+
+# mkimage command
+UBOOT_MKIMAGE ?= "uboot-mkimage"
+UBOOT_MKIMAGE_SIGN ?= "${UBOOT_MKIMAGE}"
+
+# Arguments passed to mkimage for signing
+UBOOT_MKIMAGE_SIGN_ARGS ?= ""
+SPL_MKIMAGE_SIGN_ARGS ?= ""
+
+# Options to deploy the u-boot device tree
+UBOOT_DTB ?= ""
+UBOOT_DTB_BINARY ??= ""
python () {
ubootmachine = d.getVar("UBOOT_MACHINE")
diff --git a/meta/classes/uboot-extlinux-config.bbclass b/meta/classes/uboot-extlinux-config.bbclass
index f4bf94be04..dcebe7ff31 100644
--- a/meta/classes/uboot-extlinux-config.bbclass
+++ b/meta/classes/uboot-extlinux-config.bbclass
@@ -64,7 +64,7 @@ UBOOT_EXTLINUX_FDT ??= ""
UBOOT_EXTLINUX_FDTDIR ??= "../"
UBOOT_EXTLINUX_KERNEL_IMAGE ??= "../${KERNEL_IMAGETYPE}"
UBOOT_EXTLINUX_KERNEL_ARGS ??= "rootwait rw"
-UBOOT_EXTLINUX_MENU_DESCRIPTION_linux ??= "${DISTRO_NAME}"
+UBOOT_EXTLINUX_MENU_DESCRIPTION:linux ??= "${DISTRO_NAME}"
UBOOT_EXTLINUX_CONFIG = "${B}/extlinux.conf"
@@ -153,5 +153,6 @@ python do_create_extlinux_config() {
}
UBOOT_EXTLINUX_VARS = "CONSOLE MENU_DESCRIPTION ROOT KERNEL_IMAGE FDTDIR FDT KERNEL_ARGS INITRD"
do_create_extlinux_config[vardeps] += "${@' '.join(['UBOOT_EXTLINUX_%s_%s' % (v, l) for v in d.getVar('UBOOT_EXTLINUX_VARS').split() for l in d.getVar('UBOOT_EXTLINUX_LABELS').split()])}"
+do_create_extlinux_config[vardepsexclude] += "OVERRIDES"
addtask create_extlinux_config before do_install do_deploy after do_compile
diff --git a/meta/classes/uboot-sign.bbclass b/meta/classes/uboot-sign.bbclass
index 713196df41..8d136e9405 100644
--- a/meta/classes/uboot-sign.bbclass
+++ b/meta/classes/uboot-sign.bbclass
@@ -19,7 +19,7 @@
# The tasks sequence is set as below, using DEPLOY_IMAGE_DIR as common place to
# treat the device tree blob:
#
-# * u-boot:do_install_append
+# * u-boot:do_install:append
# Install UBOOT_DTB_BINARY to datadir, so that kernel can use it for
# signing, and kernel will deploy UBOOT_DTB_BINARY after signs it.
#
@@ -31,19 +31,81 @@
#
# For more details on signature process, please refer to U-Boot documentation.
-# Signature activation.
+# We need some variables from u-boot-config
+inherit uboot-config
+
+# Enable use of a U-Boot fitImage
+UBOOT_FITIMAGE_ENABLE ?= "0"
+
+# Signature activation - these require their respective fitImages
UBOOT_SIGN_ENABLE ?= "0"
+SPL_SIGN_ENABLE ?= "0"
# Default value for deployment filenames.
UBOOT_DTB_IMAGE ?= "u-boot-${MACHINE}-${PV}-${PR}.dtb"
UBOOT_DTB_BINARY ?= "u-boot.dtb"
UBOOT_DTB_SYMLINK ?= "u-boot-${MACHINE}.dtb"
-UBOOT_NODTB_IMAGE ?= "u-boot-nodtb-${MACHINE}-${PV}-${PR}.${UBOOT_SUFFIX}"
-UBOOT_NODTB_BINARY ?= "u-boot-nodtb.${UBOOT_SUFFIX}"
-UBOOT_NODTB_SYMLINK ?= "u-boot-nodtb-${MACHINE}.${UBOOT_SUFFIX}"
+UBOOT_NODTB_IMAGE ?= "u-boot-nodtb-${MACHINE}-${PV}-${PR}.bin"
+UBOOT_NODTB_BINARY ?= "u-boot-nodtb.bin"
+UBOOT_NODTB_SYMLINK ?= "u-boot-nodtb-${MACHINE}.bin"
+UBOOT_ITS_IMAGE ?= "u-boot-its-${MACHINE}-${PV}-${PR}"
+UBOOT_ITS ?= "u-boot.its"
+UBOOT_ITS_SYMLINK ?= "u-boot-its-${MACHINE}"
+UBOOT_FITIMAGE_IMAGE ?= "u-boot-fitImage-${MACHINE}-${PV}-${PR}"
+UBOOT_FITIMAGE_BINARY ?= "u-boot-fitImage"
+UBOOT_FITIMAGE_SYMLINK ?= "u-boot-fitImage-${MACHINE}"
+SPL_DIR ?= "spl"
+SPL_DTB_IMAGE ?= "u-boot-spl-${MACHINE}-${PV}-${PR}.dtb"
+SPL_DTB_BINARY ?= "u-boot-spl.dtb"
+SPL_DTB_SYMLINK ?= "u-boot-spl-${MACHINE}.dtb"
+SPL_NODTB_IMAGE ?= "u-boot-spl-nodtb-${MACHINE}-${PV}-${PR}.bin"
+SPL_NODTB_BINARY ?= "u-boot-spl-nodtb.bin"
+SPL_NODTB_SYMLINK ?= "u-boot-spl-nodtb-${MACHINE}.bin"
+
+# U-Boot fitImage description
+UBOOT_FIT_DESC ?= "U-Boot fitImage for ${DISTRO_NAME}/${PV}/${MACHINE}"
+
+# Kernel / U-Boot fitImage Hash Algo
+FIT_HASH_ALG ?= "sha256"
+UBOOT_FIT_HASH_ALG ?= "sha256"
+
+# Kernel / U-Boot fitImage Signature Algo
+FIT_SIGN_ALG ?= "rsa2048"
+UBOOT_FIT_SIGN_ALG ?= "rsa2048"
+
+# Generate keys for signing Kernel / U-Boot fitImage
+FIT_GENERATE_KEYS ?= "0"
+UBOOT_FIT_GENERATE_KEYS ?= "0"
-# Functions in this bbclass is for u-boot only
+# Size of private keys in number of bits
+FIT_SIGN_NUMBITS ?= "2048"
+UBOOT_FIT_SIGN_NUMBITS ?= "2048"
+
+# args to openssl genrsa (Default is just the public exponent)
+FIT_KEY_GENRSA_ARGS ?= "-F4"
+UBOOT_FIT_KEY_GENRSA_ARGS ?= "-F4"
+
+# args to openssl req (Default is -batch for non interactive mode and
+# -new for new certificate)
+FIT_KEY_REQ_ARGS ?= "-batch -new"
+UBOOT_FIT_KEY_REQ_ARGS ?= "-batch -new"
+
+# Standard format for public key certificate
+FIT_KEY_SIGN_PKCS ?= "-x509"
+UBOOT_FIT_KEY_SIGN_PKCS ?= "-x509"
+
+# Functions on this bbclass can apply to either U-boot or Kernel,
+# depending on the scenario
UBOOT_PN = "${@d.getVar('PREFERRED_PROVIDER_u-boot') or 'u-boot'}"
+KERNEL_PN = "${@d.getVar('PREFERRED_PROVIDER_virtual/kernel')}"
+
+# We need u-boot-tools-native if we're creating a U-Boot fitImage
+python() {
+ if d.getVar('UBOOT_FITIMAGE_ENABLE') == '1':
+ depends = d.getVar("DEPENDS")
+ depends = "%s u-boot-tools-native dtc-native" % depends
+ d.setVar("DEPENDS", depends)
+}
concat_dtb_helper() {
if [ -e "${UBOOT_DTB_BINARY}" ]; then
@@ -57,28 +119,65 @@ concat_dtb_helper() {
ln -sf ${UBOOT_NODTB_IMAGE} ${DEPLOYDIR}/${UBOOT_NODTB_BINARY}
fi
- # Concatenate U-Boot w/o DTB & DTB with public key
- # (cf. kernel-fitimage.bbclass for more details)
- deployed_uboot_dtb_binary='${DEPLOY_DIR_IMAGE}/${UBOOT_DTB_IMAGE}'
- if [ "x${UBOOT_SUFFIX}" = "ximg" -o "x${UBOOT_SUFFIX}" = "xrom" ] && \
- [ -e "$deployed_uboot_dtb_binary" ]; then
- oe_runmake EXT_DTB=$deployed_uboot_dtb_binary
- install ${UBOOT_BINARY} ${DEPLOYDIR}/${UBOOT_IMAGE}
- elif [ -e "${DEPLOYDIR}/${UBOOT_NODTB_IMAGE}" -a -e "$deployed_uboot_dtb_binary" ]; then
+ # If we're not using a signed u-boot fit, concatenate SPL w/o DTB & U-Boot DTB
+ # with public key (otherwise it will be deployed by the equivalent
+ # concat_spl_dtb_helper function - cf. kernel-fitimage.bbclass for more details)
+ if [ "${SPL_SIGN_ENABLE}" != "1" ] ; then
+ deployed_uboot_dtb_binary='${DEPLOY_DIR_IMAGE}/${UBOOT_DTB_IMAGE}'
+ if [ "x${UBOOT_SUFFIX}" = "ximg" -o "x${UBOOT_SUFFIX}" = "xrom" ] && \
+ [ -e "$deployed_uboot_dtb_binary" ]; then
+ oe_runmake EXT_DTB=$deployed_uboot_dtb_binary
+ install ${UBOOT_BINARY} ${DEPLOYDIR}/${UBOOT_IMAGE}
+ elif [ -e "${DEPLOYDIR}/${UBOOT_NODTB_IMAGE}" -a -e "$deployed_uboot_dtb_binary" ]; then
+ cd ${DEPLOYDIR}
+ cat ${UBOOT_NODTB_IMAGE} $deployed_uboot_dtb_binary | tee ${B}/${CONFIG_B_PATH}/${UBOOT_BINARY} > ${UBOOT_IMAGE}
+
+ if [ -n "${UBOOT_CONFIG}" ]
+ then
+ for config in ${UBOOT_MACHINE}; do
+ i=$(expr $i + 1);
+ for type in ${UBOOT_CONFIG}; do
+ j=$(expr $j + 1);
+ if [ $j -eq $i ]
+ then
+ cp ${UBOOT_IMAGE} ${B}/${CONFIG_B_PATH}/u-boot-$type.${UBOOT_SUFFIX}
+ fi
+ done
+ done
+ fi
+ else
+ bbwarn "Failure while adding public key to u-boot binary. Verified boot won't be available."
+ fi
+ fi
+}
+
+concat_spl_dtb_helper() {
+
+ # We only deploy symlinks to the u-boot-spl.dtb,as the KERNEL_PN will
+ # be responsible for deploying the real file
+ if [ -e "${SPL_DIR}/${SPL_DTB_BINARY}" ] ; then
+ ln -sf ${SPL_DTB_IMAGE} ${DEPLOYDIR}/${SPL_DTB_SYMLINK}
+ ln -sf ${SPL_DTB_IMAGE} ${DEPLOYDIR}/${SPL_DTB_BINARY}
+ fi
+
+ # Concatenate the SPL nodtb binary and u-boot.dtb
+ deployed_spl_dtb_binary='${DEPLOY_DIR_IMAGE}/${SPL_DTB_IMAGE}'
+ if [ -e "${DEPLOYDIR}/${SPL_NODTB_IMAGE}" -a -e "$deployed_spl_dtb_binary" ] ; then
cd ${DEPLOYDIR}
- cat ${UBOOT_NODTB_IMAGE} $deployed_uboot_dtb_binary | tee ${B}/${CONFIG_B_PATH}/${UBOOT_BINARY} > ${UBOOT_IMAGE}
+ cat ${SPL_NODTB_IMAGE} $deployed_spl_dtb_binary | tee ${B}/${CONFIG_B_PATH}/${SPL_BINARY} > ${SPL_IMAGE}
else
- bbwarn "Failure while adding public key to u-boot binary. Verified boot won't be available."
+ bbwarn "Failure while adding public key to spl binary. Verified U-Boot boot won't be available."
fi
}
+
concat_dtb() {
if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${PN}" = "${UBOOT_PN}" -a -n "${UBOOT_DTB_BINARY}" ]; then
mkdir -p ${DEPLOYDIR}
if [ -n "${UBOOT_CONFIG}" ]; then
for config in ${UBOOT_MACHINE}; do
- CONFIG_B_PATH="${config}"
- cd ${B}/${config}
+ CONFIG_B_PATH="$config"
+ cd ${B}/$config
concat_dtb_helper
done
else
@@ -89,44 +188,307 @@ concat_dtb() {
fi
}
+concat_spl_dtb() {
+ if [ "${SPL_SIGN_ENABLE}" = "1" -a "${PN}" = "${UBOOT_PN}" -a -n "${SPL_DTB_BINARY}" ]; then
+ mkdir -p ${DEPLOYDIR}
+ if [ -n "${UBOOT_CONFIG}" ]; then
+ for config in ${UBOOT_MACHINE}; do
+ CONFIG_B_PATH="$config"
+ cd ${B}/$config
+ concat_spl_dtb_helper
+ done
+ else
+ CONFIG_B_PATH=""
+ cd ${B}
+ concat_spl_dtb_helper
+ fi
+ fi
+}
+
+
# Install UBOOT_DTB_BINARY to datadir, so that kernel can use it for
# signing, and kernel will deploy UBOOT_DTB_BINARY after signs it.
install_helper() {
if [ -f "${UBOOT_DTB_BINARY}" ]; then
- install -d ${D}${datadir}
# UBOOT_DTB_BINARY is a symlink to UBOOT_DTB_IMAGE, so we
# need both of them.
- install ${UBOOT_DTB_BINARY} ${D}${datadir}/${UBOOT_DTB_IMAGE}
+ install -Dm 0644 ${UBOOT_DTB_BINARY} ${D}${datadir}/${UBOOT_DTB_IMAGE}
ln -sf ${UBOOT_DTB_IMAGE} ${D}${datadir}/${UBOOT_DTB_BINARY}
else
bbwarn "${UBOOT_DTB_BINARY} not found"
fi
}
-do_install_append() {
- if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${PN}" = "${UBOOT_PN}" -a -n "${UBOOT_DTB_BINARY}" ]; then
+# Install SPL dtb and u-boot nodtb to datadir,
+install_spl_helper() {
+ if [ -f "${SPL_DIR}/${SPL_DTB_BINARY}" ]; then
+ install -Dm 0644 ${SPL_DIR}/${SPL_DTB_BINARY} ${D}${datadir}/${SPL_DTB_IMAGE}
+ ln -sf ${SPL_DTB_IMAGE} ${D}${datadir}/${SPL_DTB_BINARY}
+ else
+ bbwarn "${SPL_DTB_BINARY} not found"
+ fi
+ if [ -f "${UBOOT_NODTB_BINARY}" ] ; then
+ install -Dm 0644 ${UBOOT_NODTB_BINARY} ${D}${datadir}/${UBOOT_NODTB_IMAGE}
+ ln -sf ${UBOOT_NODTB_IMAGE} ${D}${datadir}/${UBOOT_NODTB_BINARY}
+ else
+ bbwarn "${UBOOT_NODTB_BINARY} not found"
+ fi
+
+ # We need to install a 'stub' u-boot-fitimage + its to datadir,
+ # so that the KERNEL_PN can use the correct filename when
+ # assembling and deploying them
+ touch ${D}/${datadir}/${UBOOT_FITIMAGE_IMAGE}
+ touch ${D}/${datadir}/${UBOOT_ITS_IMAGE}
+}
+
+do_install:append() {
+ if [ "${PN}" = "${UBOOT_PN}" ]; then
if [ -n "${UBOOT_CONFIG}" ]; then
for config in ${UBOOT_MACHINE}; do
- cd ${B}/${config}
- install_helper
+ cd ${B}/$config
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" -o "${UBOOT_FITIMAGE_ENABLE}" = "1" ] && \
+ [ -n "${UBOOT_DTB_BINARY}" ]; then
+ install_helper
+ fi
+ if [ "${SPL_SIGN_ENABLE}" = "1" -a -n "${SPL_DTB_BINARY}" ]; then
+ install_spl_helper
+ fi
done
else
cd ${B}
- install_helper
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" -o "${UBOOT_FITIMAGE_ENABLE}" = "1" ] && \
+ [ -n "${UBOOT_DTB_BINARY}" ]; then
+ install_helper
+ fi
+ if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" -a -n "${SPL_DTB_BINARY}" ]; then
+ install_spl_helper
+ fi
fi
fi
}
-do_deploy_prepend_pn-${UBOOT_PN}() {
- if [ "${UBOOT_SIGN_ENABLE}" = "1" -a -n "${UBOOT_DTB_BINARY}" ]; then
+do_uboot_generate_rsa_keys() {
+ if [ "${SPL_SIGN_ENABLE}" = "0" ] && [ "${UBOOT_FIT_GENERATE_KEYS}" = "1" ]; then
+ bbwarn "UBOOT_FIT_GENERATE_KEYS is set to 1 eventhough SPL_SIGN_ENABLE is set to 0. The keys will not be generated as they won't be used."
+ fi
+
+ if [ "${SPL_SIGN_ENABLE}" = "1" ] && [ "${UBOOT_FIT_GENERATE_KEYS}" = "1" ]; then
+
+ # Generate keys only if they don't already exist
+ if [ ! -f "${SPL_SIGN_KEYDIR}/${SPL_SIGN_KEYNAME}".key ] || \
+ [ ! -f "${SPL_SIGN_KEYDIR}/${SPL_SIGN_KEYNAME}".crt ]; then
+
+ # make directory if it does not already exist
+ mkdir -p "${SPL_SIGN_KEYDIR}"
+
+ echo "Generating RSA private key for signing U-Boot fitImage"
+ openssl genrsa ${UBOOT_FIT_KEY_GENRSA_ARGS} -out \
+ "${SPL_SIGN_KEYDIR}/${SPL_SIGN_KEYNAME}".key \
+ "${UBOOT_FIT_SIGN_NUMBITS}"
+
+ echo "Generating certificate for signing U-Boot fitImage"
+ openssl req ${FIT_KEY_REQ_ARGS} "${UBOOT_FIT_KEY_SIGN_PKCS}" \
+ -key "${SPL_SIGN_KEYDIR}/${SPL_SIGN_KEYNAME}".key \
+ -out "${SPL_SIGN_KEYDIR}/${SPL_SIGN_KEYNAME}".crt
+ fi
+ fi
+
+}
+
+addtask uboot_generate_rsa_keys before do_uboot_assemble_fitimage after do_compile
+
+# Create a ITS file for the U-boot FIT, for use when
+# we want to sign it so that the SPL can verify it
+uboot_fitimage_assemble() {
+ uboot_its="$1"
+ uboot_nodtb_bin="$2"
+ uboot_dtb="$3"
+ uboot_bin="$4"
+ spl_dtb="$5"
+ uboot_csum="${UBOOT_FIT_HASH_ALG}"
+ uboot_sign_algo="${UBOOT_FIT_SIGN_ALG}"
+ uboot_sign_keyname="${SPL_SIGN_KEYNAME}"
+
+ rm -f $uboot_its $uboot_bin
+
+ # First we create the ITS script
+ cat << EOF >> $uboot_its
+/dts-v1/;
+
+/ {
+ description = "${UBOOT_FIT_DESC}";
+ #address-cells = <1>;
+
+ images {
+ uboot {
+ description = "U-Boot image";
+ data = /incbin/("$uboot_nodtb_bin");
+ type = "standalone";
+ os = "u-boot";
+ arch = "${UBOOT_ARCH}";
+ compression = "none";
+ load = <${UBOOT_LOADADDRESS}>;
+ entry = <${UBOOT_ENTRYPOINT}>;
+EOF
+
+ if [ "${SPL_SIGN_ENABLE}" = "1" ] ; then
+ cat << EOF >> $uboot_its
+ signature {
+ algo = "$uboot_csum,$uboot_sign_algo";
+ key-name-hint = "$uboot_sign_keyname";
+ };
+EOF
+ fi
+
+ cat << EOF >> $uboot_its
+ };
+ fdt {
+ description = "U-Boot FDT";
+ data = /incbin/("$uboot_dtb");
+ type = "flat_dt";
+ arch = "${UBOOT_ARCH}";
+ compression = "none";
+EOF
+
+ if [ "${SPL_SIGN_ENABLE}" = "1" ] ; then
+ cat << EOF >> $uboot_its
+ signature {
+ algo = "$uboot_csum,$uboot_sign_algo";
+ key-name-hint = "$uboot_sign_keyname";
+ };
+EOF
+ fi
+
+ cat << EOF >> $uboot_its
+ };
+ };
+
+ configurations {
+ default = "conf";
+ conf {
+ description = "Boot with signed U-Boot FIT";
+ loadables = "uboot";
+ fdt = "fdt";
+ };
+ };
+};
+EOF
+
+ #
+ # Assemble the U-boot FIT image
+ #
+ ${UBOOT_MKIMAGE} \
+ ${@'-D "${SPL_MKIMAGE_DTCOPTS}"' if len('${SPL_MKIMAGE_DTCOPTS}') else ''} \
+ -f $uboot_its \
+ $uboot_bin
+
+ if [ "${SPL_SIGN_ENABLE}" = "1" ] ; then
+ #
+ # Sign the U-boot FIT image and add public key to SPL dtb
+ #
+ ${UBOOT_MKIMAGE_SIGN} \
+ ${@'-D "${SPL_MKIMAGE_DTCOPTS}"' if len('${SPL_MKIMAGE_DTCOPTS}') else ''} \
+ -F -k "${SPL_SIGN_KEYDIR}" \
+ -K "$spl_dtb" \
+ -r $uboot_bin \
+ ${SPL_MKIMAGE_SIGN_ARGS}
+ fi
+
+}
+
+do_uboot_assemble_fitimage() {
+ # This function runs in KERNEL_PN context. The reason for that is that we need to
+ # support the scenario where UBOOT_SIGN_ENABLE is placing the Kernel fitImage's
+ # pubkey in the u-boot.dtb file, so that we can use it when building the U-Boot
+ # fitImage itself.
+ if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" ] && \
+ [ -n "${SPL_DTB_BINARY}" -a "${PN}" = "${KERNEL_PN}" ] ; then
+ if [ "${UBOOT_SIGN_ENABLE}" != "1" ]; then
+ # If we're not signing the Kernel fitImage, that means
+ # we need to copy the u-boot.dtb from staging ourselves
+ cp -P ${STAGING_DATADIR}/u-boot*.dtb ${B}
+ fi
+ # As we are in the kernel context, we need to copy u-boot-spl.dtb from staging first.
+ # Unfortunately, need to glob on top of ${SPL_DTB_BINARY} since _IMAGE and _SYMLINK
+ # will contain U-boot's PV
+ # Similarly, we need to get the filename for the 'stub' u-boot-fitimage + its in
+ # staging so that we can use it for creating the image with the correct filename
+ # in the KERNEL_PN context.
+ # As for the u-boot.dtb (with fitimage's pubkey), it should come from the dependent
+ # do_assemble_fitimage task
+ cp -P ${STAGING_DATADIR}/u-boot-spl*.dtb ${B}
+ cp -P ${STAGING_DATADIR}/u-boot-nodtb*.bin ${B}
+ rm -rf ${B}/u-boot-fitImage-* ${B}/u-boot-its-*
+ kernel_uboot_fitimage_name=`basename ${STAGING_DATADIR}/u-boot-fitImage-*`
+ kernel_uboot_its_name=`basename ${STAGING_DATADIR}/u-boot-its-*`
+ cd ${B}
+ uboot_fitimage_assemble $kernel_uboot_its_name ${UBOOT_NODTB_BINARY} \
+ ${UBOOT_DTB_BINARY} $kernel_uboot_fitimage_name \
+ ${SPL_DTB_BINARY}
+ fi
+}
+
+addtask uboot_assemble_fitimage before do_deploy after do_compile
+
+do_deploy:prepend:pn-${UBOOT_PN}() {
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" -a -n "${UBOOT_DTB_BINARY}" ] ; then
concat_dtb
fi
+
+ if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" ] ; then
+ # Deploy the u-boot-nodtb binary and symlinks...
+ if [ -f "${SPL_DIR}/${SPL_NODTB_BINARY}" ] ; then
+ echo "Copying u-boot-nodtb binary..."
+ install -m 0644 ${SPL_DIR}/${SPL_NODTB_BINARY} ${DEPLOYDIR}/${SPL_NODTB_IMAGE}
+ ln -sf ${SPL_NODTB_IMAGE} ${DEPLOYDIR}/${SPL_NODTB_SYMLINK}
+ ln -sf ${SPL_NODTB_IMAGE} ${DEPLOYDIR}/${SPL_NODTB_BINARY}
+ fi
+
+
+ # We only deploy the symlinks to the uboot-fitImage and uboot-its
+ # images, as the KERNEL_PN will take care of deploying the real file
+ ln -sf ${UBOOT_FITIMAGE_IMAGE} ${DEPLOYDIR}/${UBOOT_FITIMAGE_BINARY}
+ ln -sf ${UBOOT_FITIMAGE_IMAGE} ${DEPLOYDIR}/${UBOOT_FITIMAGE_SYMLINK}
+ ln -sf ${UBOOT_ITS_IMAGE} ${DEPLOYDIR}/${UBOOT_ITS}
+ ln -sf ${UBOOT_ITS_IMAGE} ${DEPLOYDIR}/${UBOOT_ITS_SYMLINK}
+ fi
+
+ if [ "${SPL_SIGN_ENABLE}" = "1" -a -n "${SPL_DTB_BINARY}" ] ; then
+ concat_spl_dtb
+ fi
+
+
+}
+
+do_deploy:append:pn-${UBOOT_PN}() {
+ # If we're creating a u-boot fitImage, point u-boot.bin
+ # symlink since it might get used by image recipes
+ if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" ] ; then
+ ln -sf ${UBOOT_FITIMAGE_IMAGE} ${DEPLOYDIR}/${UBOOT_BINARY}
+ ln -sf ${UBOOT_FITIMAGE_IMAGE} ${DEPLOYDIR}/${UBOOT_SYMLINK}
+ fi
}
python () {
- if d.getVar('UBOOT_SIGN_ENABLE') == '1' and d.getVar('PN') == d.getVar('UBOOT_PN') and d.getVar('UBOOT_DTB_BINARY'):
- kernel_pn = d.getVar('PREFERRED_PROVIDER_virtual/kernel')
+ if ( (d.getVar('UBOOT_SIGN_ENABLE') == '1'
+ or d.getVar('UBOOT_FITIMAGE_ENABLE') == '1')
+ and d.getVar('PN') == d.getVar('UBOOT_PN')
+ and d.getVar('UBOOT_DTB_BINARY')):
# Make "bitbake u-boot -cdeploy" deploys the signed u-boot.dtb
- d.appendVarFlag('do_deploy', 'depends', ' %s:do_deploy' % kernel_pn)
+ # and/or the U-Boot fitImage
+ d.appendVarFlag('do_deploy', 'depends', ' %s:do_deploy' % d.getVar('KERNEL_PN'))
+
+ if d.getVar('UBOOT_FITIMAGE_ENABLE') == '1' and d.getVar('PN') == d.getVar('KERNEL_PN'):
+ # As the U-Boot fitImage is created by the KERNEL_PN, we need
+ # to make sure that the u-boot-spl.dtb and u-boot-spl-nodtb.bin
+ # files are in the staging dir for it's use
+ d.appendVarFlag('do_uboot_assemble_fitimage', 'depends', ' %s:do_populate_sysroot' % d.getVar('UBOOT_PN'))
+
+ # If the Kernel fitImage is being signed, we need to
+ # create the U-Boot fitImage after it
+ if d.getVar('UBOOT_SIGN_ENABLE') == '1':
+ d.appendVarFlag('do_uboot_assemble_fitimage', 'depends', ' %s:do_assemble_fitimage' % d.getVar('KERNEL_PN'))
+ d.appendVarFlag('do_uboot_assemble_fitimage', 'depends', ' %s:do_assemble_fitimage_initramfs' % d.getVar('KERNEL_PN'))
+
}
diff --git a/meta/classes/uninative.bbclass b/meta/classes/uninative.bbclass
index 70799bbf6d..6a9e862bcd 100644
--- a/meta/classes/uninative.bbclass
+++ b/meta/classes/uninative.bbclass
@@ -2,15 +2,15 @@ UNINATIVE_LOADER ?= "${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/lib/
UNINATIVE_STAGING_DIR ?= "${STAGING_DIR}"
UNINATIVE_URL ?= "unset"
-UNINATIVE_TARBALL ?= "${BUILD_ARCH}-nativesdk-libc.tar.xz"
+UNINATIVE_TARBALL ?= "${BUILD_ARCH}-nativesdk-libc-${UNINATIVE_VERSION}.tar.xz"
# Example checksums
#UNINATIVE_CHECKSUM[aarch64] = "dead"
#UNINATIVE_CHECKSUM[i686] = "dead"
#UNINATIVE_CHECKSUM[x86_64] = "dead"
UNINATIVE_DLDIR ?= "${DL_DIR}/uninative/"
-# Enabling uninative will change the following variables so they need to go the parsing white list to prevent multiple recipe parsing
-BB_HASHCONFIG_WHITELIST += "NATIVELSBSTRING SSTATEPOSTUNPACKFUNCS BUILD_LDFLAGS"
+# Enabling uninative will change the following variables so they need to go the parsing ignored variables list to prevent multiple recipe parsing
+BB_HASHCONFIG_IGNORE_VARS += "NATIVELSBSTRING SSTATEPOSTUNPACKFUNCS BUILD_LDFLAGS"
addhandler uninative_event_fetchloader
uninative_event_fetchloader[eventmask] = "bb.event.BuildStarted"
@@ -56,12 +56,17 @@ python uninative_event_fetchloader() {
# Our games with path manipulation of DL_DIR mean standard PREMIRRORS don't work
# and we can't easily put 'chksum' into the url path from a url parameter with
# the current fetcher url handling
- ownmirror = d.getVar('SOURCE_MIRROR_URL')
- if ownmirror:
- localdata.appendVar("PREMIRRORS", " ${UNINATIVE_URL}${UNINATIVE_TARBALL} ${SOURCE_MIRROR_URL}/uninative/%s/${UNINATIVE_TARBALL}" % chksum)
+ premirrors = bb.fetch2.mirror_from_string(localdata.getVar("PREMIRRORS"))
+ for line in premirrors:
+ try:
+ (find, replace) = line
+ except ValueError:
+ continue
+ if find.startswith("http"):
+ localdata.appendVar("PREMIRRORS", " ${UNINATIVE_URL}${UNINATIVE_TARBALL} %s/uninative/%s/${UNINATIVE_TARBALL}" % (replace, chksum))
srcuri = d.expand("${UNINATIVE_URL}${UNINATIVE_TARBALL};sha256sum=%s" % chksum)
- bb.note("Fetching uninative binary shim from %s" % srcuri)
+ bb.note("Fetching uninative binary shim %s (will check PREMIRRORS first)" % srcuri)
fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
fetcher.download()
@@ -84,7 +89,7 @@ python uninative_event_fetchloader() {
# ldd output is "ldd (Ubuntu GLIBC 2.23-0ubuntu10) 2.23", extract last option from first line
glibcver = subprocess.check_output(["ldd", "--version"]).decode('utf-8').split('\n')[0].split()[-1]
if bb.utils.vercmp_string(d.getVar("UNINATIVE_MAXGLIBCVERSION"), glibcver) < 0:
- raise RuntimeError("Your host glibc verson (%s) is newer than that in uninative (%s). Disabling uninative so that sstate is not corrupted." % (glibcver, d.getVar("UNINATIVE_MAXGLIBCVERSION")))
+ raise RuntimeError("Your host glibc version (%s) is newer than that in uninative (%s). Disabling uninative so that sstate is not corrupted." % (glibcver, d.getVar("UNINATIVE_MAXGLIBCVERSION")))
cmd = d.expand("\
mkdir -p ${UNINATIVE_STAGING_DIR}-uninative; \
@@ -95,7 +100,7 @@ ${UNINATIVE_STAGING_DIR}-uninative/relocate_sdk.py \
${UNINATIVE_LOADER} \
${UNINATIVE_LOADER} \
${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/${bindir_native}/patchelf-uninative \
- ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux${base_libdir_native}/libc*.so" % chksum)
+ ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux${base_libdir_native}/libc*.so*" % chksum)
subprocess.check_output(cmd, shell=True)
with open(loaderchksum, "w") as f:
diff --git a/meta/classes/update-alternatives.bbclass b/meta/classes/update-alternatives.bbclass
index 8c2b66e7f1..fc1ffd828c 100644
--- a/meta/classes/update-alternatives.bbclass
+++ b/meta/classes/update-alternatives.bbclass
@@ -6,9 +6,9 @@
# To use this class a number of variables should be defined:
#
# List all of the alternatives needed by a package:
-# ALTERNATIVE_<pkg> = "name1 name2 name3 ..."
+# ALTERNATIVE:<pkg> = "name1 name2 name3 ..."
#
-# i.e. ALTERNATIVE_busybox = "sh sed test bracket"
+# i.e. ALTERNATIVE:busybox = "sh sed test bracket"
#
# The pathname of the link
# ALTERNATIVE_LINK_NAME[name] = "target"
@@ -123,7 +123,7 @@ def gen_updatealternativesvars(d):
for p in pkgs:
for v in vars:
- ret.append(v + "_" + p)
+ ret.append(v + ":" + p)
ret.append(v + "_VARDEPS_" + p)
return " ".join(ret)
@@ -141,10 +141,10 @@ python apply_update_alternative_renames () {
import re
def update_files(alt_target, alt_target_rename, pkg, d):
- f = d.getVar('FILES_' + pkg)
+ f = d.getVar('FILES:' + pkg)
if f:
f = re.sub(r'(^|\s)%s(\s|$)' % re.escape (alt_target), r'\1%s\2' % alt_target_rename, f)
- d.setVar('FILES_' + pkg, f)
+ d.setVar('FILES:' + pkg, f)
# Check for deprecated usage...
pn = d.getVar('BPN')
@@ -156,7 +156,7 @@ python apply_update_alternative_renames () {
for pkg in (d.getVar('PACKAGES') or "").split():
# If the src == dest, we know we need to rename the dest by appending ${BPN}
link_rename = []
- for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg) or "").split():
+ for alt_name in (d.getVar('ALTERNATIVE:%s' % pkg) or "").split():
alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name)
if not alt_link:
alt_link = "%s/%s" % (d.getVar('bindir'), alt_name)
@@ -184,7 +184,7 @@ python apply_update_alternative_renames () {
link_rename.append((alt_target, alt_target_rename))
else:
bb.note('%s: Rename %s -> %s' % (pn, alt_target, alt_target_rename))
- os.rename(src, dest)
+ bb.utils.rename(src, dest)
update_files(alt_target, alt_target_rename, pkg, d)
else:
bb.warn("%s: alternative target (%s or %s) does not exist, skipping..." % (pn, alt_target, alt_target_rename))
@@ -201,7 +201,7 @@ python apply_update_alternative_renames () {
if os.path.lexists(link_target):
# Ok, the link_target exists, we can rename
bb.note('%s: Rename (link) %s -> %s' % (pn, alt_target, alt_target_rename))
- os.rename(src, dest)
+ bb.utils.rename(src, dest)
else:
# Try to resolve the broken link to link.${BPN}
link_maybe = '%s.%s' % (os.readlink(src), pn)
@@ -233,7 +233,7 @@ def update_alternatives_alt_targets(d, pkg):
pn = d.getVar('BPN')
pkgdest = d.getVar('PKGD')
updates = list()
- for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg) or "").split():
+ for alt_name in (d.getVar('ALTERNATIVE:%s' % pkg) or "").split():
alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name)
alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name) or \
d.getVarFlag('ALTERNATIVE_TARGET', alt_name) or \
@@ -259,7 +259,7 @@ def update_alternatives_alt_targets(d, pkg):
return updates
-PACKAGESPLITFUNCS_prepend = "populate_packages_updatealternatives "
+PACKAGESPLITFUNCS:prepend = "populate_packages_updatealternatives "
python populate_packages_updatealternatives () {
if not update_alternatives_enabled(d):
@@ -280,24 +280,24 @@ python populate_packages_updatealternatives () {
provider = d.getVar('VIRTUAL-RUNTIME_update-alternatives')
if provider:
#bb.note('adding runtime requirement for update-alternatives for %s' % pkg)
- d.appendVar('RDEPENDS_%s' % pkg, ' ' + d.getVar('MLPREFIX', False) + provider)
+ d.appendVar('RDEPENDS:%s' % pkg, ' ' + d.getVar('MLPREFIX', False) + provider)
bb.note('adding update-alternatives calls to postinst/prerm for %s' % pkg)
bb.note('%s' % alt_setup_links)
- postinst = d.getVar('pkg_postinst_%s' % pkg)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
if postinst:
postinst = alt_setup_links + postinst
else:
postinst = '#!/bin/sh\n' + alt_setup_links
- d.setVar('pkg_postinst_%s' % pkg, postinst)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
bb.note('%s' % alt_remove_links)
- prerm = d.getVar('pkg_prerm_%s' % pkg) or '#!/bin/sh\n'
+ prerm = d.getVar('pkg_prerm:%s' % pkg) or '#!/bin/sh\n'
prerm += alt_remove_links
- d.setVar('pkg_prerm_%s' % pkg, prerm)
+ d.setVar('pkg_prerm:%s' % pkg, prerm)
}
-python package_do_filedeps_append () {
+python package_do_filedeps:append () {
if update_alternatives_enabled(d):
apply_update_alternative_provides(d)
}
@@ -307,7 +307,7 @@ def apply_update_alternative_provides(d):
pkgdest = d.getVar('PKGDEST')
for pkg in d.getVar('PACKAGES').split():
- for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg) or "").split():
+ for alt_name in (d.getVar('ALTERNATIVE:%s' % pkg) or "").split():
alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name)
alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name)
alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg) or d.getVar('ALTERNATIVE_TARGET') or alt_link
@@ -321,7 +321,7 @@ def apply_update_alternative_provides(d):
# Add file provide
trans_target = oe.package.file_translate(alt_target)
- d.appendVar('FILERPROVIDES_%s_%s' % (trans_target, pkg), " " + alt_link)
- if not trans_target in (d.getVar('FILERPROVIDESFLIST_%s' % pkg) or ""):
- d.appendVar('FILERPROVIDESFLIST_%s' % pkg, " " + trans_target)
+ d.appendVar('FILERPROVIDES:%s:%s' % (trans_target, pkg), " " + alt_link)
+ if not trans_target in (d.getVar('FILERPROVIDESFLIST:%s' % pkg) or ""):
+ d.appendVar('FILERPROVIDESFLIST:%s' % pkg, " " + trans_target)
diff --git a/meta/classes/update-rc.d.bbclass b/meta/classes/update-rc.d.bbclass
index 1366fee653..0a3a608662 100644
--- a/meta/classes/update-rc.d.bbclass
+++ b/meta/classes/update-rc.d.bbclass
@@ -1,11 +1,11 @@
UPDATERCPN ?= "${PN}"
-DEPENDS_append_class-target = "${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', ' update-rc.d initscripts', '', d)}"
+DEPENDS:append:class-target = "${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', ' update-rc.d initscripts', '', d)}"
UPDATERCD = "update-rc.d"
-UPDATERCD_class-cross = ""
-UPDATERCD_class-native = ""
-UPDATERCD_class-nativesdk = ""
+UPDATERCD:class-cross = ""
+UPDATERCD:class-native = ""
+UPDATERCD:class-nativesdk = ""
INITSCRIPT_PARAMS ?= "defaults"
@@ -62,8 +62,8 @@ python __anonymous() {
update_rc_after_parse(d)
}
-PACKAGESPLITFUNCS_prepend = "${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', 'populate_packages_updatercd ', '', d)}"
-PACKAGESPLITFUNCS_remove_class-nativesdk = "populate_packages_updatercd "
+PACKAGESPLITFUNCS:prepend = "${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', 'populate_packages_updatercd ', '', d)}"
+PACKAGESPLITFUNCS:remove:class-nativesdk = "populate_packages_updatercd "
populate_packages_updatercd[vardeps] += "updatercd_prerm updatercd_postrm updatercd_postinst"
populate_packages_updatercd[vardepsexclude] += "OVERRIDES"
@@ -78,7 +78,7 @@ python populate_packages_updatercd () {
statement = "grep -q -w '/etc/init.d/functions' %s" % path
if subprocess.call(statement, shell=True) == 0:
mlprefix = d.getVar('MLPREFIX') or ""
- d.appendVar('RDEPENDS_' + pkg, ' %sinitd-functions' % (mlprefix))
+ d.appendVar('RDEPENDS:' + pkg, ' %sinitd-functions' % (mlprefix))
def update_rcd_package(pkg):
bb.debug(1, 'adding update-rc.d calls to postinst/prerm/postrm for %s' % pkg)
@@ -89,25 +89,25 @@ python populate_packages_updatercd () {
update_rcd_auto_depend(pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
postinst += localdata.getVar('updatercd_postinst')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
- prerm = d.getVar('pkg_prerm_%s' % pkg)
+ prerm = d.getVar('pkg_prerm:%s' % pkg)
if not prerm:
prerm = '#!/bin/sh\n'
prerm += localdata.getVar('updatercd_prerm')
- d.setVar('pkg_prerm_%s' % pkg, prerm)
+ d.setVar('pkg_prerm:%s' % pkg, prerm)
- postrm = d.getVar('pkg_postrm_%s' % pkg)
+ postrm = d.getVar('pkg_postrm:%s' % pkg)
if not postrm:
postrm = '#!/bin/sh\n'
postrm += localdata.getVar('updatercd_postrm')
- d.setVar('pkg_postrm_%s' % pkg, postrm)
+ d.setVar('pkg_postrm:%s' % pkg, postrm)
- d.appendVar('RRECOMMENDS_' + pkg, " ${MLPREFIX}${UPDATERCD}")
+ d.appendVar('RRECOMMENDS:' + pkg, " ${MLPREFIX}${UPDATERCD}")
# Check that this class isn't being inhibited (generally, by
# systemd.bbclass) before doing any work.
diff --git a/meta/classes/useradd-staticids.bbclass b/meta/classes/useradd-staticids.bbclass
index 3a1b5f1320..3acf59cd46 100644
--- a/meta/classes/useradd-staticids.bbclass
+++ b/meta/classes/useradd-staticids.bbclass
@@ -77,7 +77,7 @@ def update_useradd_static_config(d):
try:
uaargs = parser.parse_args(oe.useradd.split_args(param))
except Exception as e:
- bb.fatal("%s: Unable to parse arguments for USERADD_PARAM_%s '%s': %s" % (d.getVar('PN'), pkg, param, e))
+ bb.fatal("%s: Unable to parse arguments for USERADD_PARAM:%s '%s': %s" % (d.getVar('PN'), pkg, param, e))
# Read all passwd files specified in USERADD_UID_TABLES or files/passwd
# Use the standard passwd layout:
@@ -140,13 +140,13 @@ def update_useradd_static_config(d):
uaargs.gid = uaargs.groupid
uaargs.user_group = None
if newgroup and is_pkg:
- groupadd = d.getVar("GROUPADD_PARAM_%s" % pkg)
+ groupadd = d.getVar("GROUPADD_PARAM:%s" % pkg)
if groupadd:
# Only add the group if not already specified
if not uaargs.groupname in groupadd:
- d.setVar("GROUPADD_PARAM_%s" % pkg, "%s; %s" % (groupadd, newgroup))
+ d.setVar("GROUPADD_PARAM:%s" % pkg, "%s; %s" % (groupadd, newgroup))
else:
- d.setVar("GROUPADD_PARAM_%s" % pkg, newgroup)
+ d.setVar("GROUPADD_PARAM:%s" % pkg, newgroup)
uaargs.comment = "'%s'" % field[4] if field[4] else uaargs.comment
uaargs.home_dir = field[5] or uaargs.home_dir
@@ -174,8 +174,6 @@ def update_useradd_static_config(d):
newparam += ['', ' --non-unique'][uaargs.non_unique]
if uaargs.password != None:
newparam += ['', ' --password %s' % uaargs.password][uaargs.password != None]
- elif uaargs.clear_password:
- newparam += ['', ' --clear-password %s' % uaargs.clear_password][uaargs.clear_password != None]
newparam += ['', ' --root %s' % uaargs.root][uaargs.root != None]
newparam += ['', ' --system'][uaargs.system]
newparam += ['', ' --shell %s' % uaargs.shell][uaargs.shell != None]
@@ -198,7 +196,7 @@ def update_useradd_static_config(d):
# If we're processing multiple lines, we could have left over values here...
gaargs = parser.parse_args(oe.useradd.split_args(param))
except Exception as e:
- bb.fatal("%s: Unable to parse arguments for GROUPADD_PARAM_%s '%s': %s" % (d.getVar('PN'), pkg, param, e))
+ bb.fatal("%s: Unable to parse arguments for GROUPADD_PARAM:%s '%s': %s" % (d.getVar('PN'), pkg, param, e))
# Read all group files specified in USERADD_GID_TABLES or files/group
# Use the standard group layout:
@@ -236,8 +234,6 @@ def update_useradd_static_config(d):
newparam += ['', ' --non-unique'][gaargs.non_unique]
if gaargs.password != None:
newparam += ['', ' --password %s' % gaargs.password][gaargs.password != None]
- elif gaargs.clear_password:
- newparam += ['', ' --clear-password %s' % gaargs.clear_password][gaargs.clear_password != None]
newparam += ['', ' --root %s' % gaargs.root][gaargs.root != None]
newparam += ['', ' --system'][gaargs.system]
newparam += ' %s' % gaargs.GROUP
@@ -265,17 +261,17 @@ def update_useradd_static_config(d):
for pkg in useradd_packages.split():
# Groupmems doesn't have anything we might want to change, so simply validating
# is a bit of a waste -- only process useradd/groupadd
- useradd_param = d.getVar('USERADD_PARAM_%s' % pkg)
+ useradd_param = d.getVar('USERADD_PARAM:%s' % pkg)
if useradd_param:
- #bb.warn("Before: 'USERADD_PARAM_%s' - '%s'" % (pkg, useradd_param))
- d.setVar('USERADD_PARAM_%s' % pkg, rewrite_useradd(useradd_param, True))
- #bb.warn("After: 'USERADD_PARAM_%s' - '%s'" % (pkg, d.getVar('USERADD_PARAM_%s' % pkg)))
+ #bb.warn("Before: 'USERADD_PARAM:%s' - '%s'" % (pkg, useradd_param))
+ d.setVar('USERADD_PARAM:%s' % pkg, rewrite_useradd(useradd_param, True))
+ #bb.warn("After: 'USERADD_PARAM:%s' - '%s'" % (pkg, d.getVar('USERADD_PARAM:%s' % pkg)))
- groupadd_param = d.getVar('GROUPADD_PARAM_%s' % pkg)
+ groupadd_param = d.getVar('GROUPADD_PARAM:%s' % pkg)
if groupadd_param:
- #bb.warn("Before: 'GROUPADD_PARAM_%s' - '%s'" % (pkg, groupadd_param))
- d.setVar('GROUPADD_PARAM_%s' % pkg, rewrite_groupadd(groupadd_param, True))
- #bb.warn("After: 'GROUPADD_PARAM_%s' - '%s'" % (pkg, d.getVar('GROUPADD_PARAM_%s' % pkg)))
+ #bb.warn("Before: 'GROUPADD_PARAM:%s' - '%s'" % (pkg, groupadd_param))
+ d.setVar('GROUPADD_PARAM:%s' % pkg, rewrite_groupadd(groupadd_param, True))
+ #bb.warn("After: 'GROUPADD_PARAM:%s' - '%s'" % (pkg, d.getVar('GROUPADD_PARAM:%s' % pkg)))
# Load and process extra users and groups, rewriting only adduser/addgroup params
pkg = d.getVar('PN')
diff --git a/meta/classes/useradd.bbclass b/meta/classes/useradd.bbclass
index e5f3ba24f9..20771a0ce5 100644
--- a/meta/classes/useradd.bbclass
+++ b/meta/classes/useradd.bbclass
@@ -3,7 +3,7 @@ inherit useradd_base
# base-passwd-cross provides the default passwd and group files in the
# target sysroot, and shadow -native and -sysroot provide the utilities
# and support files needed to add and modify user and group accounts
-DEPENDS_append_class-target = " base-files shadow-native shadow-sysroot shadow base-passwd"
+DEPENDS:append:class-target = " base-files shadow-native shadow-sysroot shadow base-passwd"
PACKAGE_WRITE_DEPS += "shadow-native"
# This preinstall function can be run in four different contexts:
@@ -164,16 +164,16 @@ python useradd_sysroot_sstate () {
}
do_prepare_recipe_sysroot[postfuncs] += "${SYSROOTFUNC}"
-SYSROOTFUNC_class-target = "useradd_sysroot_sstate"
+SYSROOTFUNC:class-target = "useradd_sysroot_sstate"
SYSROOTFUNC = ""
SYSROOT_PREPROCESS_FUNCS += "${SYSROOTFUNC}"
-SSTATEPREINSTFUNCS_append_class-target = " useradd_sysroot_sstate"
+SSTATEPREINSTFUNCS:append:class-target = " useradd_sysroot_sstate"
do_package_setscene[depends] += "${USERADDSETSCENEDEPS}"
do_populate_sysroot_setscene[depends] += "${USERADDSETSCENEDEPS}"
-USERADDSETSCENEDEPS_class-target = "${MLPREFIX}base-passwd:do_populate_sysroot_setscene pseudo-native:do_populate_sysroot_setscene shadow-native:do_populate_sysroot_setscene ${MLPREFIX}shadow-sysroot:do_populate_sysroot_setscene"
+USERADDSETSCENEDEPS:class-target = "${MLPREFIX}base-passwd:do_populate_sysroot_setscene pseudo-native:do_populate_sysroot_setscene shadow-native:do_populate_sysroot_setscene ${MLPREFIX}shadow-sysroot:do_populate_sysroot_setscene"
USERADDSETSCENEDEPS = ""
# Recipe parse-time sanity checks
@@ -184,8 +184,8 @@ def update_useradd_after_parse(d):
bb.fatal("%s inherits useradd but doesn't set USERADD_PACKAGES" % d.getVar('FILE', False))
for pkg in useradd_packages.split():
- d.appendVarFlag("do_populate_sysroot", "vardeps", "USERADD_PARAM_%s GROUPADD_PARAM_%s GROUPMEMS_PARAM_%s" % (pkg, pkg, pkg))
- if not d.getVar('USERADD_PARAM_%s' % pkg) and not d.getVar('GROUPADD_PARAM_%s' % pkg) and not d.getVar('GROUPMEMS_PARAM_%s' % pkg):
+ d.appendVarFlag("do_populate_sysroot", "vardeps", "USERADD_PARAM:%s GROUPADD_PARAM:%s GROUPMEMS_PARAM:%s" % (pkg, pkg, pkg))
+ if not d.getVar('USERADD_PARAM:%s' % pkg) and not d.getVar('GROUPADD_PARAM:%s' % pkg) and not d.getVar('GROUPMEMS_PARAM:%s' % pkg):
bb.fatal("%s inherits useradd but doesn't set USERADD_PARAM, GROUPADD_PARAM or GROUPMEMS_PARAM for package %s" % (d.getVar('FILE', False), pkg))
python __anonymous() {
@@ -199,7 +199,7 @@ python __anonymous() {
def get_all_cmd_params(d, cmd_type):
import string
- param_type = cmd_type.upper() + "_PARAM_%s"
+ param_type = cmd_type.upper() + "_PARAM:%s"
params = []
useradd_packages = d.getVar('USERADD_PACKAGES') or ""
@@ -211,7 +211,7 @@ def get_all_cmd_params(d, cmd_type):
return "; ".join(params)
# Adds the preinst script into generated packages
-fakeroot python populate_packages_prepend () {
+fakeroot python populate_packages:prepend () {
def update_useradd_package(pkg):
bb.debug(1, 'adding user/group calls to preinst for %s' % pkg)
@@ -220,7 +220,7 @@ fakeroot python populate_packages_prepend () {
required to execute on the target. Not doing so may cause
useradd preinst to be invoked twice, causing unwanted warnings.
"""
- preinst = d.getVar('pkg_preinst_%s' % pkg) or d.getVar('pkg_preinst')
+ preinst = d.getVar('pkg_preinst:%s' % pkg) or d.getVar('pkg_preinst')
if not preinst:
preinst = '#!/bin/sh\n'
preinst += 'bbnote () {\n\techo "NOTE: $*"\n}\n'
@@ -230,15 +230,19 @@ fakeroot python populate_packages_prepend () {
preinst += 'perform_useradd () {\n%s}\n' % d.getVar('perform_useradd')
preinst += 'perform_groupmems () {\n%s}\n' % d.getVar('perform_groupmems')
preinst += d.getVar('useradd_preinst')
- d.setVar('pkg_preinst_%s' % pkg, preinst)
+ # Expand out the *_PARAM variables to the package specific versions
+ for rep in ["GROUPADD_PARAM", "USERADD_PARAM", "GROUPMEMS_PARAM"]:
+ val = d.getVar(rep + ":" + pkg) or ""
+ preinst = preinst.replace("${" + rep + "}", val)
+ d.setVar('pkg_preinst:%s' % pkg, preinst)
# RDEPENDS setup
- rdepends = d.getVar("RDEPENDS_%s" % pkg) or ""
+ rdepends = d.getVar("RDEPENDS:%s" % pkg) or ""
rdepends += ' ' + d.getVar('MLPREFIX', False) + 'base-passwd'
rdepends += ' ' + d.getVar('MLPREFIX', False) + 'shadow'
# base-files is where the default /etc/skel is packaged
rdepends += ' ' + d.getVar('MLPREFIX', False) + 'base-files'
- d.setVar("RDEPENDS_%s" % pkg, rdepends)
+ d.setVar("RDEPENDS:%s" % pkg, rdepends)
# Add the user/group preinstall scripts and RDEPENDS requirements
# to packages specified by USERADD_PACKAGES
diff --git a/meta/classes/useradd_base.bbclass b/meta/classes/useradd_base.bbclass
index 0d0bdb80f5..7f5b9b7219 100644
--- a/meta/classes/useradd_base.bbclass
+++ b/meta/classes/useradd_base.bbclass
@@ -145,3 +145,21 @@ perform_usermod () {
fi
set -e
}
+
+perform_passwd_expire () {
+ local rootdir="$1"
+ local opts="$2"
+ bbnote "${PN}: Performing equivalent of passwd --expire with [$opts]"
+ # Directly set sp_lstchg to 0 without using the passwd command: Only root can do that
+ local username=`echo "$opts" | awk '{ print $NF }'`
+ local user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
+ if test "x$user_exists" != "x"; then
+ eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO sed -i \''s/^\('$username':[^:]*\):[^:]*:/\1:0:/'\' $rootdir/etc/shadow \" || true
+ local passwd_lastchanged="`grep "^$username:" $rootdir/etc/shadow | cut -d: -f3`"
+ if test "x$passwd_lastchanged" != "x0"; then
+ bbfatal "${PN}: passwd --expire operation did not succeed."
+ fi
+ else
+ bbnote "${PN}: user $username doesn't exist, not expiring its password"
+ fi
+}
diff --git a/meta/classes/utility-tasks.bbclass b/meta/classes/utility-tasks.bbclass
index b1f27d3658..0466325c13 100644
--- a/meta/classes/utility-tasks.bbclass
+++ b/meta/classes/utility-tasks.bbclass
@@ -19,7 +19,7 @@ python do_listtasks() {
CLEANFUNCS ?= ""
-T_task-clean = "${LOG_DIR}/cleanlogs/${PN}"
+T:task-clean = "${LOG_DIR}/cleanlogs/${PN}"
addtask clean
do_clean[nostamp] = "1"
python do_clean() {
@@ -38,6 +38,7 @@ python do_clean() {
addtask checkuri
do_checkuri[nostamp] = "1"
+do_checkuri[network] = "1"
python do_checkuri() {
src_uri = (d.getVar('SRC_URI') or "").split()
if len(src_uri) == 0:
diff --git a/meta/classes/utils.bbclass b/meta/classes/utils.bbclass
index 120bcc64a6..b4eb3d38ab 100644
--- a/meta/classes/utils.bbclass
+++ b/meta/classes/utils.bbclass
@@ -30,7 +30,6 @@ oe_libinstall() {
silent=""
require_static=""
require_shared=""
- staging_install=""
while [ "$#" -gt 0 ]; do
case "$1" in
-C)
@@ -62,10 +61,6 @@ oe_libinstall() {
if [ -z "$destpath" ]; then
bbfatal "oe_libinstall: no destination path specified"
fi
- if echo "$destpath/" | egrep '^${STAGING_LIBDIR}/' >/dev/null
- then
- staging_install=1
- fi
__runcmd () {
if [ -z "$silent" ]; then
@@ -159,36 +154,6 @@ oe_libinstall() {
__runcmd cd "$olddir"
}
-oe_machinstall() {
- # Purpose: Install machine dependent files, if available
- # If not available, check if there is a default
- # If no default, just touch the destination
- # Example:
- # $1 $2 $3 $4
- # oe_machinstall -m 0644 fstab ${D}/etc/fstab
- #
- # TODO: Check argument number?
- #
- filename=`basename $3`
- dirname=`dirname $3`
-
- for o in `echo ${OVERRIDES} | tr ':' ' '`; do
- if [ -e $dirname/$o/$filename ]; then
- bbnote $dirname/$o/$filename present, installing to $4
- install $1 $2 $dirname/$o/$filename $4
- return
- fi
- done
-# bbnote overrides specific file NOT present, trying default=$3...
- if [ -e $3 ]; then
- bbnote $3 present, installing to $4
- install $1 $2 $3 $4
- else
- bbnote $3 NOT present, touching empty $4
- touch $4
- fi
-}
-
create_cmdline_wrapper () {
# Create a wrapper script where commandline options are needed
#
@@ -214,7 +179,7 @@ create_cmdline_wrapper () {
#!/bin/bash
realpath=\`readlink -fn \$0\`
realdir=\`dirname \$realpath\`
-exec -a \`dirname \$realpath\`/$cmdname \`dirname \$realpath\`/$cmdname.real $cmdoptions "\$@"
+exec -a \$realdir/$cmdname \$realdir/$cmdname.real $cmdoptions "\$@"
END
chmod +x $cmd
}
diff --git a/meta/classes/vala.bbclass b/meta/classes/vala.bbclass
index bcaf68c5a7..bfcceff7cf 100644
--- a/meta/classes/vala.bbclass
+++ b/meta/classes/vala.bbclass
@@ -2,8 +2,8 @@
# because that is where target builds look for .vapi files.
#
VALADEPENDS = ""
-VALADEPENDS_class-target = "vala"
-DEPENDS_append = " vala-native ${VALADEPENDS}"
+VALADEPENDS:class-target = "vala"
+DEPENDS:append = " vala-native ${VALADEPENDS}"
# Our patched version of Vala looks in STAGING_DATADIR for .vapi files
export STAGING_DATADIR
@@ -11,7 +11,7 @@ export STAGING_DATADIR
export XDG_DATA_DIRS = "${STAGING_DATADIR}:${STAGING_LIBDIR}"
# Package additional files
-FILES_${PN}-dev += "\
+FILES:${PN}-dev += "\
${datadir}/vala/vapi/*.vapi \
${datadir}/vala/vapi/*.deps \
${datadir}/gir-1.0 \
@@ -19,6 +19,6 @@ FILES_${PN}-dev += "\
# Remove vapigen.m4 that is bundled with tarballs
# because it does not yet have our cross-compile fixes
-do_configure_prepend() {
+do_configure:prepend() {
rm -f ${S}/m4/vapigen.m4
}
diff --git a/meta/classes/waf.bbclass b/meta/classes/waf.bbclass
index 900244004e..464564afa1 100644
--- a/meta/classes/waf.bbclass
+++ b/meta/classes/waf.bbclass
@@ -1,9 +1,19 @@
# avoids build breaks when using no-static-libs.inc
DISABLE_STATIC = ""
+# What Python interpretter to use. Defaults to Python 3 but can be
+# overridden if required.
+WAF_PYTHON ?= "python3"
+
B = "${WORKDIR}/build"
+do_configure[cleandirs] += "${B}"
+
+EXTRA_OECONF:append = " ${PACKAGECONFIG_CONFARGS}"
-EXTRA_OECONF_append = " ${PACKAGECONFIG_CONFARGS}"
+EXTRA_OEWAF_BUILD ??= ""
+# In most cases, you want to pass the same arguments to `waf build` and `waf
+# install`, but you can override it if necessary
+EXTRA_OEWAF_INSTALL ??= "${EXTRA_OEWAF_BUILD}"
def waflock_hash(d):
# Calculates the hash used for the waf lock file. This should include
@@ -29,17 +39,17 @@ def waflock_hash(d):
# directory (e.g. if the source is coming from externalsrc and was previously
# configured elsewhere).
export WAFLOCK = ".lock-waf_oe_${@waflock_hash(d)}_build"
-BB_HASHBASE_WHITELIST += "WAFLOCK"
+BB_BASEHASH_IGNORE_VARS += "WAFLOCK"
python waf_preconfigure() {
import subprocess
- from distutils.version import StrictVersion
subsrcdir = d.getVar('S')
+ python = d.getVar('WAF_PYTHON')
wafbin = os.path.join(subsrcdir, 'waf')
try:
- result = subprocess.check_output([wafbin, '--version'], cwd=subsrcdir, stderr=subprocess.STDOUT)
+ result = subprocess.check_output([python, wafbin, '--version'], cwd=subsrcdir, stderr=subprocess.STDOUT)
version = result.decode('utf-8').split()[1]
- if StrictVersion(version) >= StrictVersion("1.8.7"):
+ if bb.utils.vercmp_string_op(version, "1.8.7", ">="):
d.setVar("WAF_EXTRA_CONF", "--bindir=${bindir} --libdir=${libdir}")
except subprocess.CalledProcessError as e:
bb.warn("Unable to execute waf --version, exit code %d. Assuming waf version without bindir/libdir support." % e.returncode)
@@ -50,16 +60,16 @@ python waf_preconfigure() {
do_configure[prefuncs] += "waf_preconfigure"
waf_do_configure() {
- (cd ${S} && ./waf configure -o ${B} --prefix=${prefix} ${WAF_EXTRA_CONF} ${EXTRA_OECONF})
+ (cd ${S} && ${WAF_PYTHON} ./waf configure -o ${B} --prefix=${prefix} ${WAF_EXTRA_CONF} ${EXTRA_OECONF})
}
do_compile[progress] = "outof:^\[\s*(\d+)/\s*(\d+)\]\s+"
waf_do_compile() {
- (cd ${S} && ./waf build ${@oe.utils.parallel_make_argument(d, '-j%d', limit=64)})
+ (cd ${S} && ${WAF_PYTHON} ./waf build ${@oe.utils.parallel_make_argument(d, '-j%d', limit=64)} ${EXTRA_OEWAF_BUILD})
}
waf_do_install() {
- (cd ${S} && ./waf install --destdir=${D})
+ (cd ${S} && ${WAF_PYTHON} ./waf install --destdir=${D} ${EXTRA_OEWAF_INSTALL})
}
EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes/xmlcatalog.bbclass b/meta/classes/xmlcatalog.bbclass
index ae4811fdeb..be155b7bc2 100644
--- a/meta/classes/xmlcatalog.bbclass
+++ b/meta/classes/xmlcatalog.bbclass
@@ -4,7 +4,7 @@ DEPENDS = "libxml2-native"
# "${sysconfdir}/xml/docbook-xml.xml".
XMLCATALOGS ?= ""
-SYSROOT_PREPROCESS_FUNCS_append = " xmlcatalog_sstate_postinst"
+SYSROOT_PREPROCESS_FUNCS:append = " xmlcatalog_sstate_postinst"
xmlcatalog_complete() {
ROOTCATALOG="${STAGING_ETCDIR_NATIVE}/xml/catalog"
diff --git a/meta/classes/yocto-check-layer.bbclass b/meta/classes/yocto-check-layer.bbclass
new file mode 100644
index 0000000000..329d3f8edb
--- /dev/null
+++ b/meta/classes/yocto-check-layer.bbclass
@@ -0,0 +1,16 @@
+#
+# This class is used by yocto-check-layer script for additional per-recipe tests
+# The first test ensures that the layer has no recipes skipping 'installed-vs-shipped' QA checks
+#
+
+WARN_QA:remove = "installed-vs-shipped"
+ERROR_QA:append = " installed-vs-shipped"
+
+python () {
+ packages = set((d.getVar('PACKAGES') or '').split())
+ for package in packages:
+ skip = set((d.getVar('INSANE_SKIP') or "").split() +
+ (d.getVar('INSANE_SKIP:' + package) or "").split())
+ if 'installed-vs-shipped' in skip:
+ oe.qa.handle_error("installed-vs-shipped", 'Package %s is skipping "installed-vs-shipped" QA test.' % package, d)
+}