summaryrefslogtreecommitdiffstats
path: root/meta/classes
diff options
context:
space:
mode:
Diffstat (limited to 'meta/classes')
-rw-r--r--meta/classes/allarch.bbclass2
-rw-r--r--meta/classes/archiver.bbclass20
-rw-r--r--meta/classes/autotools.bbclass16
-rw-r--r--meta/classes/baremetal-image.bbclass27
-rw-r--r--meta/classes/base.bbclass164
-rw-r--r--meta/classes/bash-completion.bbclass6
-rw-r--r--meta/classes/bin_package.bbclass2
-rw-r--r--meta/classes/binconfig-disabled.bbclass4
-rw-r--r--meta/classes/binconfig.bbclass2
-rw-r--r--meta/classes/blacklist.bbclass20
-rw-r--r--meta/classes/buildhistory.bbclass104
-rw-r--r--meta/classes/cargo.bbclass90
-rw-r--r--meta/classes/cargo_common.bbclass124
-rw-r--r--meta/classes/cmake.bbclass13
-rw-r--r--meta/classes/compress_doc.bbclass6
-rw-r--r--meta/classes/cpan-base.bbclass8
-rw-r--r--meta/classes/cpan.bbclass4
-rw-r--r--meta/classes/create-spdx.bbclass1022
-rw-r--r--meta/classes/cross-canadian.bbclass16
-rw-r--r--meta/classes/cross.bbclass8
-rw-r--r--meta/classes/cve-check.bbclass115
-rw-r--r--meta/classes/debian.bbclass18
-rw-r--r--meta/classes/deploy.bbclass2
-rw-r--r--meta/classes/devicetree.bbclass8
-rw-r--r--meta/classes/devshell.bbclass10
-rw-r--r--meta/classes/devupstream.bbclass25
-rw-r--r--meta/classes/distrooverrides.bbclass6
-rw-r--r--meta/classes/distutils3-base.bbclass6
-rw-r--r--meta/classes/distutils3.bbclass67
-rw-r--r--meta/classes/externalsrc.bbclass19
-rw-r--r--meta/classes/extrausers.bbclass4
-rw-r--r--meta/classes/features_check.bbclass13
-rw-r--r--meta/classes/fontcache.bbclass10
-rw-r--r--meta/classes/gconf.bbclass12
-rw-r--r--meta/classes/gettext.bbclass6
-rw-r--r--meta/classes/gi-docgen.bbclass8
-rw-r--r--meta/classes/gio-module-cache.bbclass10
-rw-r--r--meta/classes/glide.bbclass4
-rw-r--r--meta/classes/gnomebase.bbclass6
-rw-r--r--meta/classes/go-mod.bbclass2
-rw-r--r--meta/classes/go-ptest.bbclass2
-rw-r--r--meta/classes/go.bbclass51
-rw-r--r--meta/classes/goarch.bbclass54
-rw-r--r--meta/classes/gobject-introspection.bbclass28
-rw-r--r--meta/classes/gsettings.bbclass14
-rw-r--r--meta/classes/gtk-doc.bbclass24
-rw-r--r--meta/classes/gtk-icon-cache.bbclass45
-rw-r--r--meta/classes/gtk-immodules-cache.bbclass10
-rw-r--r--meta/classes/icecc.bbclass85
-rw-r--r--meta/classes/image-artifact-names.bbclass9
-rw-r--r--meta/classes/image-combined-dbg.bbclass2
-rw-r--r--meta/classes/image-container.bbclass2
-rw-r--r--meta/classes/image-live.bbclass6
-rw-r--r--meta/classes/image-prelink.bbclass81
-rw-r--r--meta/classes/image.bbclass66
-rw-r--r--meta/classes/image_types.bbclass122
-rw-r--r--meta/classes/image_types_wic.bbclass45
-rw-r--r--meta/classes/insane.bbclass386
-rw-r--r--meta/classes/kernel-artifact-names.bbclass5
-rw-r--r--meta/classes/kernel-devicetree.bbclass36
-rw-r--r--meta/classes/kernel-fitimage.bbclass377
-rw-r--r--meta/classes/kernel-grub.bbclass4
-rw-r--r--meta/classes/kernel-module-split.bbclass64
-rw-r--r--meta/classes/kernel-uboot.bbclass6
-rw-r--r--meta/classes/kernel-yocto.bbclass37
-rw-r--r--meta/classes/kernel.bbclass201
-rw-r--r--meta/classes/libc-package.bbclass34
-rw-r--r--meta/classes/license.bbclass122
-rw-r--r--meta/classes/license_image.bbclass48
-rw-r--r--meta/classes/linux-dummy.bbclass4
-rw-r--r--meta/classes/manpages.bbclass11
-rw-r--r--meta/classes/meson.bbclass64
-rw-r--r--meta/classes/meta.bbclass4
-rw-r--r--meta/classes/mime-xdg.bbclass12
-rw-r--r--meta/classes/mime.bbclass12
-rw-r--r--meta/classes/mirrors.bbclass136
-rw-r--r--meta/classes/module.bbclass6
-rw-r--r--meta/classes/multilib.bbclass36
-rw-r--r--meta/classes/multilib_global.bbclass19
-rw-r--r--meta/classes/multilib_header.bbclass4
-rw-r--r--meta/classes/multilib_script.bbclass4
-rw-r--r--meta/classes/native.bbclass37
-rw-r--r--meta/classes/nativesdk.bbclass6
-rw-r--r--meta/classes/npm.bbclass33
-rw-r--r--meta/classes/overlayfs-etc.bbclass76
-rw-r--r--meta/classes/overlayfs.bbclass119
-rw-r--r--meta/classes/own-mirrors.bbclass26
-rw-r--r--meta/classes/package.bbclass377
-rw-r--r--meta/classes/package_deb.bbclass8
-rw-r--r--meta/classes/package_ipk.bbclass11
-rw-r--r--meta/classes/package_rpm.bbclass19
-rw-r--r--meta/classes/packagedata.bbclass4
-rw-r--r--meta/classes/packagegroup.bbclass2
-rw-r--r--meta/classes/patch.bbclass9
-rw-r--r--meta/classes/pixbufcache.bbclass16
-rw-r--r--meta/classes/pkgconfig.bbclass2
-rw-r--r--meta/classes/populate_sdk_base.bbclass32
-rw-r--r--meta/classes/populate_sdk_ext.bbclass89
-rw-r--r--meta/classes/ptest-gnome.bbclass6
-rw-r--r--meta/classes/ptest-perl.bbclass6
-rw-r--r--meta/classes/ptest.bbclass36
-rw-r--r--meta/classes/pypi.bbclass8
-rw-r--r--meta/classes/python3-dir.bbclass2
-rw-r--r--meta/classes/python3native.bbclass4
-rw-r--r--meta/classes/python3targetconfig.bbclass22
-rw-r--r--meta/classes/python_flit_core.bbclass5
-rw-r--r--meta/classes/python_pep517.bbclass56
-rw-r--r--meta/classes/python_poetry_core.bbclass5
-rw-r--r--meta/classes/python_pyo3.bbclass30
-rw-r--r--meta/classes/python_setuptools3_rust.bbclass11
-rw-r--r--meta/classes/qemu.bbclass2
-rw-r--r--meta/classes/qemuboot.bbclass21
-rw-r--r--meta/classes/reproducible_build.bbclass127
-rw-r--r--meta/classes/reproducible_build_simple.bbclass9
-rw-r--r--meta/classes/rm_work.bbclass12
-rw-r--r--meta/classes/rm_work_and_downloads.bbclass2
-rw-r--r--meta/classes/rootfs-postcommands.bbclass65
-rw-r--r--meta/classes/rootfs_rpm.bbclass4
-rw-r--r--meta/classes/rust-bin.bbclass149
-rw-r--r--meta/classes/rust-common.bbclass185
-rw-r--r--meta/classes/rust.bbclass45
-rw-r--r--meta/classes/sanity.bbclass93
-rw-r--r--meta/classes/scons.bbclass8
-rw-r--r--meta/classes/setuptools3-base.bbclass (renamed from meta/classes/distutils-common-base.bbclass)12
-rw-r--r--meta/classes/setuptools3.bbclass33
-rw-r--r--meta/classes/setuptools3_legacy.bbclass78
-rw-r--r--meta/classes/setuptools_build_meta.bbclass5
-rw-r--r--meta/classes/sign_package_feed.bbclass2
-rw-r--r--meta/classes/siteinfo.bbclass58
-rw-r--r--meta/classes/sstate.bbclass237
-rw-r--r--meta/classes/staging.bbclass59
-rw-r--r--meta/classes/systemd-boot.bbclass2
-rw-r--r--meta/classes/systemd.bbclass44
-rw-r--r--meta/classes/terminal.bbclass5
-rw-r--r--meta/classes/testimage.bbclass40
-rw-r--r--meta/classes/testsdk.bbclass2
-rw-r--r--meta/classes/texinfo.bbclass12
-rw-r--r--meta/classes/toaster.bbclass2
-rw-r--r--meta/classes/toolchain-scripts.bbclass9
-rw-r--r--meta/classes/uboot-config.bbclass20
-rw-r--r--meta/classes/uboot-extlinux-config.bbclass2
-rw-r--r--meta/classes/uboot-sign.bbclass80
-rw-r--r--meta/classes/uninative.bbclass8
-rw-r--r--meta/classes/update-alternatives.bbclass36
-rw-r--r--meta/classes/update-rc.d.bbclass28
-rw-r--r--meta/classes/useradd-staticids.bbclass30
-rw-r--r--meta/classes/useradd.bbclass28
-rw-r--r--meta/classes/utility-tasks.bbclass3
-rw-r--r--meta/classes/utils.bbclass37
-rw-r--r--meta/classes/vala.bbclass8
-rw-r--r--meta/classes/waf.bbclass7
-rw-r--r--meta/classes/xmlcatalog.bbclass2
-rw-r--r--meta/classes/yocto-check-layer.bbclass16
153 files changed, 4612 insertions, 2359 deletions
diff --git a/meta/classes/allarch.bbclass b/meta/classes/allarch.bbclass
index 5bd5c44a27..a766a654a9 100644
--- a/meta/classes/allarch.bbclass
+++ b/meta/classes/allarch.bbclass
@@ -61,3 +61,5 @@ python () {
bb.error("Please ensure recipe %s sets PACKAGE_ARCH before inherit packagegroup" % d.getVar("FILE"))
}
+def qemu_wrapper_cmdline(data, rootfs_path, library_paths):
+ return 'false'
diff --git a/meta/classes/archiver.bbclass b/meta/classes/archiver.bbclass
index a3962306b1..c19c770d11 100644
--- a/meta/classes/archiver.bbclass
+++ b/meta/classes/archiver.bbclass
@@ -5,7 +5,7 @@
# 1) original (or unpacked) source: ARCHIVER_MODE[src] = "original"
# 2) patched source: ARCHIVER_MODE[src] = "patched" (default)
# 3) configured source: ARCHIVER_MODE[src] = "configured"
-# 4) source mirror: ARCHIVE_MODE[src] = "mirror"
+# 4) source mirror: ARCHIVER_MODE[src] = "mirror"
# 5) The patches between do_unpack and do_patch:
# ARCHIVER_MODE[diff] = "1"
# And you can set the one that you'd like to exclude from the diff:
@@ -51,6 +51,7 @@ ARCHIVER_MODE[diff-exclude] ?= ".pc autom4te.cache patches"
ARCHIVER_MODE[dumpdata] ?= "0"
ARCHIVER_MODE[recipe] ?= "0"
ARCHIVER_MODE[mirror] ?= "split"
+ARCHIVER_MODE[compression] ?= "xz"
DEPLOY_DIR_SRC ?= "${DEPLOY_DIR}/sources"
ARCHIVER_TOPDIR ?= "${WORKDIR}/archiver-sources"
@@ -62,7 +63,7 @@ ARCHIVER_WORKDIR = "${WORKDIR}/archiver-work/"
# When producing a combined mirror directory, allow duplicates for the case
# where multiple recipes use the same SRC_URI.
ARCHIVER_COMBINED_MIRRORDIR = "${ARCHIVER_TOPDIR}/mirror"
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_SRC}/mirror"
+SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_SRC}/mirror"
do_dumpdata[dirs] = "${ARCHIVER_OUTDIR}"
do_ar_recipe[dirs] = "${ARCHIVER_OUTDIR}"
@@ -281,7 +282,10 @@ python do_ar_configured() {
# ${STAGING_DATADIR}/aclocal/libtool.m4, so we can't re-run the
# do_configure, we archive the already configured ${S} to
# instead of.
- elif pn != 'libtool-native':
+ # The kernel class functions require it to be on work-shared, we
+ # don't unpack, patch, configure again, just archive the already
+ # configured ${S}
+ elif not (pn == 'libtool-native' or is_work_shared(d)):
def runTask(task):
prefuncs = d.getVarFlag(task, 'prefuncs') or ''
for func in prefuncs.split():
@@ -406,15 +410,16 @@ def create_tarball(d, srcdir, suffix, ar_outdir):
# that we archive the actual directory and not just the link.
srcdir = os.path.realpath(srcdir)
+ compression_method = d.getVarFlag('ARCHIVER_MODE', 'compression')
bb.utils.mkdirhier(ar_outdir)
if suffix:
- filename = '%s-%s.tar.gz' % (d.getVar('PF'), suffix)
+ filename = '%s-%s.tar.%s' % (d.getVar('PF'), suffix, compression_method)
else:
- filename = '%s.tar.gz' % d.getVar('PF')
+ filename = '%s.tar.%s' % (d.getVar('PF'), compression_method)
tarname = os.path.join(ar_outdir, filename)
bb.note('Creating %s' % tarname)
- tar = tarfile.open(tarname, 'w:gz')
+ tar = tarfile.open(tarname, 'w:%s' % compression_method)
tar.add(srcdir, arcname=os.path.basename(srcdir), filter=exclude_useless_paths)
tar.close()
@@ -483,6 +488,9 @@ python do_unpack_and_patch() {
src_orig = '%s.orig' % src
oe.path.copytree(src, src_orig)
+ if bb.data.inherits_class('dos2unix', d):
+ bb.build.exec_func('do_convert_crlf_to_lf', d)
+
# Make sure gcc and kernel sources are patched only once
if not (d.getVar('SRC_URI') == "" or is_work_shared(d)):
bb.build.exec_func('do_patch', d)
diff --git a/meta/classes/autotools.bbclass b/meta/classes/autotools.bbclass
index 9dc8ebdaa7..4ab2460990 100644
--- a/meta/classes/autotools.bbclass
+++ b/meta/classes/autotools.bbclass
@@ -1,4 +1,4 @@
-def autotools_dep_prepend(d):
+def get_autotools_dep(d):
if d.getVar('INHIBIT_AUTOTOOLS_DEPS'):
return ''
@@ -19,7 +19,8 @@ def autotools_dep_prepend(d):
return deps
-DEPENDS_prepend = "${@autotools_dep_prepend(d)} "
+
+DEPENDS:prepend = "${@get_autotools_dep(d)} "
inherit siteinfo
@@ -131,7 +132,7 @@ autotools_postconfigure(){
EXTRACONFFUNCS ??= ""
-EXTRA_OECONF_append = " ${PACKAGECONFIG_CONFARGS}"
+EXTRA_OECONF:append = " ${PACKAGECONFIG_CONFARGS}"
do_configure[prefuncs] += "autotools_preconfigure autotools_aclocals ${EXTRACONFFUNCS}"
do_compile[prefuncs] += "autotools_aclocals"
@@ -140,13 +141,16 @@ do_configure[postfuncs] += "autotools_postconfigure"
ACLOCALDIR = "${STAGING_DATADIR}/aclocal"
ACLOCALEXTRAPATH = ""
-ACLOCALEXTRAPATH_class-target = " -I ${STAGING_DATADIR_NATIVE}/aclocal/"
-ACLOCALEXTRAPATH_class-nativesdk = " -I ${STAGING_DATADIR_NATIVE}/aclocal/"
+ACLOCALEXTRAPATH:class-target = " -I ${STAGING_DATADIR_NATIVE}/aclocal/"
+ACLOCALEXTRAPATH:class-nativesdk = " -I ${STAGING_DATADIR_NATIVE}/aclocal/"
python autotools_aclocals () {
- d.setVar("CONFIG_SITE", siteinfo_get_files(d, sysrootcache=True))
+ sitefiles, searched = siteinfo_get_files(d, sysrootcache=True)
+ d.setVar("CONFIG_SITE", " ".join(sitefiles))
}
+do_configure[file-checksums] += "${@' '.join(siteinfo_get_files(d, sysrootcache=False)[1])}"
+
CONFIGURE_FILES = "${S}/configure.in ${S}/configure.ac ${S}/config.h.in ${S}/acinclude.m4 Makefile.am"
autotools_do_configure() {
diff --git a/meta/classes/baremetal-image.bbclass b/meta/classes/baremetal-image.bbclass
index 8708a54301..81f5e5e93d 100644
--- a/meta/classes/baremetal-image.bbclass
+++ b/meta/classes/baremetal-image.bbclass
@@ -12,8 +12,8 @@
# Toolchain should be baremetal or newlib based.
# TCLIBC="baremetal" or TCLIBC="newlib"
-COMPATIBLE_HOST_libc-musl_class-target = "null"
-COMPATIBLE_HOST_libc-glibc_class-target = "null"
+COMPATIBLE_HOST:libc-musl:class-target = "null"
+COMPATIBLE_HOST:libc-glibc:class-target = "null"
inherit rootfs-postcommands
@@ -61,7 +61,7 @@ python do_rootfs(){
# Assure binaries, manifest and qemubootconf are populated on DEPLOY_DIR_IMAGE
do_image_complete[dirs] = "${TOPDIR}"
SSTATETASKS += "do_image_complete"
-SSTATE_SKIP_CREATION_task-image-complete = '1'
+SSTATE_SKIP_CREATION:task-image-complete = '1'
do_image_complete[sstate-inputdirs] = "${IMGDEPLOYDIR}"
do_image_complete[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}"
do_image_complete[stamp-extra-info] = "${MACHINE_ARCH}"
@@ -77,18 +77,21 @@ QB_DEFAULT_KERNEL ?= "${IMAGE_LINK_NAME}.bin"
QB_MEM ?= "-m 256"
QB_DEFAULT_FSTYPE ?= "bin"
QB_DTB ?= ""
-QB_OPT_APPEND_append = " -nographic"
+QB_OPT_APPEND:append = " -nographic"
# RISC-V tunes set the BIOS, unset, and instruct QEMU to
# ignore the BIOS and boot from -kernel
-QB_DEFAULT_BIOS_qemuriscv64 = ""
-QB_OPT_APPEND_append_qemuriscv64 = " -bios none"
+QB_DEFAULT_BIOS:qemuriscv64 = ""
+QB_DEFAULT_BIOS:qemuriscv32 = ""
+QB_OPT_APPEND:append:qemuriscv64 = " -bios none"
+QB_OPT_APPEND:append:qemuriscv32 = " -bios none"
# Use the medium-any code model for the RISC-V 64 bit implementation,
# since medlow can only access addresses below 0x80000000 and RAM
# starts at 0x80000000 on RISC-V 64
-CFLAGS_append_qemuriscv64 = " -mcmodel=medany"
+# Keep RISC-V 32 using -mcmodel=medlow (symbols lie between -2GB:2GB)
+CFLAGS:append:qemuriscv64 = " -mcmodel=medany"
# This next part is necessary to trick the build system into thinking
@@ -102,13 +105,17 @@ inherit qemuboot
python(){
# do_addto_recipe_sysroot doesnt exist for all recipes, but we need it to have
# /usr/bin on recipe-sysroot (qemu) populated
+ # The do_addto_recipe_sysroot dependency is coming from EXTRA_IMAGDEPENDS now,
+ # we just need to add the logic to add its dependency to do_image.
def extraimage_getdepends(task):
deps = ""
for dep in (d.getVar('EXTRA_IMAGEDEPENDS') or "").split():
# Make sure we only add it for qemu
if 'qemu' in dep:
- deps += " %s:%s" % (dep, task)
+ if ":" in dep:
+ deps += " %s " % (dep)
+ else:
+ deps += " %s:%s" % (dep, task)
return deps
- d.appendVarFlag('do_image', 'depends', extraimage_getdepends('do_addto_recipe_sysroot'))
- d.appendVarFlag('do_image', 'depends', extraimage_getdepends('do_populate_sysroot'))
+ d.appendVarFlag('do_image', 'depends', extraimage_getdepends('do_populate_sysroot'))
}
diff --git a/meta/classes/base.bbclass b/meta/classes/base.bbclass
index 42fb84c4d5..cc81461473 100644
--- a/meta/classes/base.bbclass
+++ b/meta/classes/base.bbclass
@@ -12,7 +12,7 @@ inherit logging
OE_EXTRA_IMPORTS ?= ""
-OE_IMPORTS += "os sys time oe.path oe.utils oe.types oe.package oe.packagegroup oe.sstatesig oe.lsb oe.cachedpath oe.license ${OE_EXTRA_IMPORTS}"
+OE_IMPORTS += "os sys time oe.path oe.utils oe.types oe.package oe.packagegroup oe.sstatesig oe.lsb oe.cachedpath oe.license oe.qa oe.reproducible oe.rust ${OE_EXTRA_IMPORTS}"
OE_IMPORTS[type] = "list"
PACKAGECONFIG_CONFARGS ??= ""
@@ -66,18 +66,18 @@ oe_runmake() {
}
-def base_dep_prepend(d):
+def get_base_dep(d):
if d.getVar('INHIBIT_DEFAULT_DEPS', False):
return ""
return "${BASE_DEFAULT_DEPS}"
-BASE_DEFAULT_DEPS = "virtual/${TARGET_PREFIX}gcc virtual/${TARGET_PREFIX}compilerlibs virtual/libc"
+BASE_DEFAULT_DEPS = "virtual/${HOST_PREFIX}gcc virtual/${HOST_PREFIX}compilerlibs virtual/libc"
BASEDEPENDS = ""
-BASEDEPENDS_class-target = "${@base_dep_prepend(d)}"
-BASEDEPENDS_class-nativesdk = "${@base_dep_prepend(d)}"
+BASEDEPENDS:class-target = "${@get_base_dep(d)}"
+BASEDEPENDS:class-nativesdk = "${@get_base_dep(d)}"
-DEPENDS_prepend="${BASEDEPENDS} "
+DEPENDS:prepend="${BASEDEPENDS} "
FILESPATH = "${@base_set_filespath(["${FILE_DIRNAME}/${BP}", "${FILE_DIRNAME}/${BPN}", "${FILE_DIRNAME}/files"], d)}"
# THISDIR only works properly with imediate expansion as it has to run
@@ -91,7 +91,7 @@ def extra_path_elements(d):
path = path + "${STAGING_BINDIR_NATIVE}/" + e + ":"
return path
-PATH_prepend = "${@extra_path_elements(d)}"
+PATH:prepend = "${@extra_path_elements(d)}"
def get_lic_checksum_file_list(d):
filelist = []
@@ -150,17 +150,18 @@ do_fetch[dirs] = "${DL_DIR}"
do_fetch[file-checksums] = "${@bb.fetch.get_checksum_file_list(d)}"
do_fetch[file-checksums] += " ${@get_lic_checksum_file_list(d)}"
do_fetch[vardeps] += "SRCREV"
+do_fetch[network] = "1"
python base_do_fetch() {
src_uri = (d.getVar('SRC_URI') or "").split()
- if len(src_uri) == 0:
+ if not src_uri:
return
try:
fetcher = bb.fetch2.Fetch(src_uri, d)
fetcher.download()
except bb.fetch2.BBFetchException as e:
- bb.fatal(str(e))
+ bb.fatal("Bitbake Fetcher Error: " + repr(e))
}
addtask unpack after do_fetch
@@ -170,16 +171,54 @@ do_unpack[cleandirs] = "${@d.getVar('S') if os.path.normpath(d.getVar('S')) != o
python base_do_unpack() {
src_uri = (d.getVar('SRC_URI') or "").split()
- if len(src_uri) == 0:
+ if not src_uri:
return
try:
fetcher = bb.fetch2.Fetch(src_uri, d)
fetcher.unpack(d.getVar('WORKDIR'))
except bb.fetch2.BBFetchException as e:
- bb.fatal(str(e))
+ bb.fatal("Bitbake Fetcher Error: " + repr(e))
+}
+
+SSTATETASKS += "do_deploy_source_date_epoch"
+
+do_deploy_source_date_epoch () {
+ mkdir -p ${SDE_DEPLOYDIR}
+ if [ -e ${SDE_FILE} ]; then
+ echo "Deploying SDE from ${SDE_FILE} -> ${SDE_DEPLOYDIR}."
+ cp -p ${SDE_FILE} ${SDE_DEPLOYDIR}/__source_date_epoch.txt
+ else
+ echo "${SDE_FILE} not found!"
+ fi
}
+python do_deploy_source_date_epoch_setscene () {
+ sstate_setscene(d)
+ bb.utils.mkdirhier(d.getVar('SDE_DIR'))
+ sde_file = os.path.join(d.getVar('SDE_DEPLOYDIR'), '__source_date_epoch.txt')
+ if os.path.exists(sde_file):
+ target = d.getVar('SDE_FILE')
+ bb.debug(1, "Moving setscene SDE file %s -> %s" % (sde_file, target))
+ bb.utils.rename(sde_file, target)
+ else:
+ bb.debug(1, "%s not found!" % sde_file)
+}
+
+do_deploy_source_date_epoch[dirs] = "${SDE_DEPLOYDIR}"
+do_deploy_source_date_epoch[sstate-plaindirs] = "${SDE_DEPLOYDIR}"
+addtask do_deploy_source_date_epoch_setscene
+addtask do_deploy_source_date_epoch before do_configure after do_patch
+
+python create_source_date_epoch_stamp() {
+ source_date_epoch = oe.reproducible.get_source_date_epoch(d, d.getVar('S'))
+ oe.reproducible.epochfile_write(source_date_epoch, d.getVar('SDE_FILE'), d)
+}
+do_unpack[postfuncs] += "create_source_date_epoch_stamp"
+
+def get_source_date_epoch_value(d):
+ return oe.reproducible.epochfile_read(d.getVar('SDE_FILE'), d)
+
def get_layers_branch_rev(d):
layers = (d.getVar("BBLAYERS") or "").split()
layers_branch_rev = ["%-20s = \"%s:%s\"" % (os.path.basename(i), \
@@ -290,9 +329,9 @@ python base_eventhandler() {
source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', False)
if not source_mirror_fetch:
provs = (d.getVar("PROVIDES") or "").split()
- multiwhitelist = (d.getVar("MULTI_PROVIDER_WHITELIST") or "").split()
+ multiprovidersallowed = (d.getVar("BB_MULTI_PROVIDER_ALLOWED") or "").split()
for p in provs:
- if p.startswith("virtual/") and p not in multiwhitelist:
+ if p.startswith("virtual/") and p not in multiprovidersallowed:
profprov = d.getVar("PREFERRED_PROVIDER_" + p)
if profprov and pn != profprov:
raise bb.parse.SkipRecipe("PREFERRED_PROVIDER_%s set to %s, not %s" % (p, profprov, pn))
@@ -399,6 +438,14 @@ python () {
if os.path.normpath(d.getVar("WORKDIR")) != os.path.normpath(d.getVar("B")):
d.appendVar("PSEUDO_IGNORE_PATHS", ",${B}")
+ # To add a recipe to the skip list , set:
+ # SKIP_RECIPE[pn] = "message"
+ pn = d.getVar('PN')
+ skip_msg = d.getVarFlag('SKIP_RECIPE', pn)
+ if skip_msg:
+ bb.debug(1, "Skipping %s %s" % (pn, skip_msg))
+ raise bb.parse.SkipRecipe("Recipe will be skipped because: %s" % (skip_msg))
+
# Handle PACKAGECONFIG
#
# These take the form:
@@ -481,8 +528,8 @@ python () {
% (d.getVar('PN'), flag, 's' if len(intersec) > 1 else '', ' '.join(intersec)))
appendVar('DEPENDS', extradeps)
- appendVar('RDEPENDS_${PN}', extrardeps)
- appendVar('RRECOMMENDS_${PN}', extrarrecs)
+ appendVar('RDEPENDS:${PN}', extrardeps)
+ appendVar('RRECOMMENDS:${PN}', extrarrecs)
appendVar('PACKAGECONFIG_CONFARGS', extraconf)
pn = d.getVar('PN')
@@ -495,9 +542,9 @@ python () {
unmatched_license_flags = check_license_flags(d)
if unmatched_license_flags:
if len(unmatched_license_flags) == 1:
- message = "because it has a restricted license '{0}'. Which is not whitelisted in LICENSE_FLAGS_WHITELIST".format(unmatched_license_flags[0])
+ message = "because it has a restricted license '{0}'. Which is not listed in LICENSE_FLAGS_ACCEPTED".format(unmatched_license_flags[0])
else:
- message = "because it has restricted licenses {0}. Which are not whitelisted in LICENSE_FLAGS_WHITELIST".format(
+ message = "because it has restricted licenses {0}. Which are not listed in LICENSE_FLAGS_ACCEPTED".format(
", ".join("'{0}'".format(f) for f in unmatched_license_flags))
bb.debug(1, "Skipping %s %s" % (pn, message))
raise bb.parse.SkipRecipe(message)
@@ -548,46 +595,41 @@ python () {
if check_license and bad_licenses:
bad_licenses = expand_wildcard_licenses(d, bad_licenses)
- whitelist = []
- for lic in bad_licenses:
- spdx_license = return_spdx(d, lic)
- whitelist.extend((d.getVar("WHITELIST_" + lic) or "").split())
- if spdx_license:
- whitelist.extend((d.getVar("WHITELIST_" + spdx_license) or "").split())
-
- if pn in whitelist:
- '''
- We need to track what we are whitelisting and why. If pn is
- incompatible we need to be able to note that the image that
- is created may infact contain incompatible licenses despite
- INCOMPATIBLE_LICENSE being set.
- '''
- bb.note("Including %s as buildable despite it having an incompatible license because it has been whitelisted" % pn)
- else:
- pkgs = d.getVar('PACKAGES').split()
- skipped_pkgs = {}
- unskipped_pkgs = []
- for pkg in pkgs:
- incompatible_lic = incompatible_license(d, bad_licenses, pkg)
- if incompatible_lic:
- skipped_pkgs[pkg] = incompatible_lic
- else:
- unskipped_pkgs.append(pkg)
- if unskipped_pkgs:
- for pkg in skipped_pkgs:
- bb.debug(1, "Skipping the package %s at do_rootfs because of incompatible license(s): %s" % (pkg, ' '.join(skipped_pkgs[pkg])))
- d.setVar('LICENSE_EXCLUSION-' + pkg, ' '.join(skipped_pkgs[pkg]))
- for pkg in unskipped_pkgs:
- bb.debug(1, "Including the package %s" % pkg)
+ exceptions = (d.getVar("INCOMPATIBLE_LICENSE_EXCEPTIONS") or "").split()
+
+ for lic_exception in exceptions:
+ if ":" in lic_exception:
+ lic_exception.split(":")[0]
+ if lic_exception in oe.license.obsolete_license_list():
+ bb.fatal("Invalid license %s used in INCOMPATIBLE_LICENSE_EXCEPTIONS" % lic_exception)
+
+ pkgs = d.getVar('PACKAGES').split()
+ skipped_pkgs = {}
+ unskipped_pkgs = []
+ for pkg in pkgs:
+ remaining_bad_licenses = oe.license.apply_pkg_license_exception(pkg, bad_licenses, exceptions)
+
+ incompatible_lic = incompatible_license(d, remaining_bad_licenses, pkg)
+ if incompatible_lic:
+ skipped_pkgs[pkg] = incompatible_lic
else:
- incompatible_lic = incompatible_license(d, bad_licenses)
- for pkg in skipped_pkgs:
- incompatible_lic += skipped_pkgs[pkg]
- incompatible_lic = sorted(list(set(incompatible_lic)))
+ unskipped_pkgs.append(pkg)
+
+ if unskipped_pkgs:
+ for pkg in skipped_pkgs:
+ bb.debug(1, "Skipping the package %s at do_rootfs because of incompatible license(s): %s" % (pkg, ' '.join(skipped_pkgs[pkg])))
+ d.setVar('_exclude_incompatible-' + pkg, ' '.join(skipped_pkgs[pkg]))
+ for pkg in unskipped_pkgs:
+ bb.debug(1, "Including the package %s" % pkg)
+ else:
+ incompatible_lic = incompatible_license(d, bad_licenses)
+ for pkg in skipped_pkgs:
+ incompatible_lic += skipped_pkgs[pkg]
+ incompatible_lic = sorted(list(set(incompatible_lic)))
- if incompatible_lic:
- bb.debug(1, "Skipping recipe %s because of incompatible license(s): %s" % (pn, ' '.join(incompatible_lic)))
- raise bb.parse.SkipRecipe("it has incompatible license(s): %s" % ' '.join(incompatible_lic))
+ if incompatible_lic:
+ bb.debug(1, "Skipping recipe %s because of incompatible license(s): %s" % (pn, ' '.join(incompatible_lic)))
+ raise bb.parse.SkipRecipe("it has incompatible license(s): %s" % ' '.join(incompatible_lic))
needsrcrev = False
srcuri = d.getVar('SRC_URI')
@@ -627,10 +669,18 @@ python () {
elif uri.scheme == "npm":
d.appendVarFlag('do_fetch', 'depends', ' nodejs-native:do_populate_sysroot')
+ elif uri.scheme == "repo":
+ needsrcrev = True
+ d.appendVarFlag('do_fetch', 'depends', ' repo-native:do_populate_sysroot')
+
# *.lz4 should DEPEND on lz4-native for unpacking
if path.endswith('.lz4'):
d.appendVarFlag('do_unpack', 'depends', ' lz4-native:do_populate_sysroot')
+ # *.zst should DEPEND on zstd-native for unpacking
+ elif path.endswith('.zst'):
+ d.appendVarFlag('do_unpack', 'depends', ' zstd-native:do_populate_sysroot')
+
# *.lz should DEPEND on lzip-native for unpacking
elif path.endswith('.lz'):
d.appendVarFlag('do_unpack', 'depends', ' lzip-native:do_populate_sysroot')
@@ -689,7 +739,7 @@ python () {
if os.path.basename(p) == machine and os.path.isdir(p):
paths.append(p)
- if len(paths) != 0:
+ if paths:
for s in srcuri.split():
if not s.startswith("file://"):
continue
@@ -722,7 +772,7 @@ do_cleansstate[nostamp] = "1"
python do_cleanall() {
src_uri = (d.getVar('SRC_URI') or "").split()
- if len(src_uri) == 0:
+ if not src_uri:
return
try:
diff --git a/meta/classes/bash-completion.bbclass b/meta/classes/bash-completion.bbclass
index 80ee9b4874..803b2cae4d 100644
--- a/meta/classes/bash-completion.bbclass
+++ b/meta/classes/bash-completion.bbclass
@@ -1,7 +1,7 @@
-DEPENDS_append_class-target = " bash-completion"
+DEPENDS:append:class-target = " bash-completion"
PACKAGES += "${PN}-bash-completion"
-FILES_${PN}-bash-completion = "${datadir}/bash-completion ${sysconfdir}/bash_completion.d"
+FILES:${PN}-bash-completion = "${datadir}/bash-completion ${sysconfdir}/bash_completion.d"
-RDEPENDS_${PN}-bash-completion = "bash-completion"
+RDEPENDS:${PN}-bash-completion = "bash-completion"
diff --git a/meta/classes/bin_package.bbclass b/meta/classes/bin_package.bbclass
index cbc9b1fa13..c3aca20443 100644
--- a/meta/classes/bin_package.bbclass
+++ b/meta/classes/bin_package.bbclass
@@ -34,6 +34,6 @@ bin_package_do_install () {
| tar --no-same-owner -xpf - -C ${D}
}
-FILES_${PN} = "/"
+FILES:${PN} = "/"
EXPORT_FUNCTIONS do_install
diff --git a/meta/classes/binconfig-disabled.bbclass b/meta/classes/binconfig-disabled.bbclass
index 096b670e12..e8ac41b2d4 100644
--- a/meta/classes/binconfig-disabled.bbclass
+++ b/meta/classes/binconfig-disabled.bbclass
@@ -5,9 +5,9 @@
# The list of scripts which should be disabled.
BINCONFIG ?= ""
-FILES_${PN}-dev += "${bindir}/*-config"
+FILES:${PN}-dev += "${bindir}/*-config"
-do_install_append () {
+do_install:append () {
for x in ${BINCONFIG}; do
# Make the disabled script emit invalid parameters for those configure
# scripts which call it without checking the return code.
diff --git a/meta/classes/binconfig.bbclass b/meta/classes/binconfig.bbclass
index 9112ed4608..6e0c88269a 100644
--- a/meta/classes/binconfig.bbclass
+++ b/meta/classes/binconfig.bbclass
@@ -1,4 +1,4 @@
-FILES_${PN}-dev += "${bindir}/*-config"
+FILES:${PN}-dev += "${bindir}/*-config"
# The namespaces can clash here hence the two step replace
def get_binconfig_mangle(d):
diff --git a/meta/classes/blacklist.bbclass b/meta/classes/blacklist.bbclass
deleted file mode 100644
index dc794228ff..0000000000
--- a/meta/classes/blacklist.bbclass
+++ /dev/null
@@ -1,20 +0,0 @@
-# anonymous support class from originally from angstrom
-#
-# To use the blacklist, a distribution should include this
-# class in the INHERIT_DISTRO
-#
-# No longer use ANGSTROM_BLACKLIST, instead use a table of
-# recipes in PNBLACKLIST
-#
-# Features:
-#
-# * To add a package to the blacklist, set:
-# PNBLACKLIST[pn] = "message"
-#
-
-python () {
- blacklist = d.getVarFlag('PNBLACKLIST', d.getVar('PN'))
-
- if blacklist:
- raise bb.parse.SkipRecipe("Recipe is blacklisted: %s" % (blacklist))
-}
diff --git a/meta/classes/buildhistory.bbclass b/meta/classes/buildhistory.bbclass
index 55b12d7893..8db79a4829 100644
--- a/meta/classes/buildhistory.bbclass
+++ b/meta/classes/buildhistory.bbclass
@@ -31,7 +31,7 @@ BUILDHISTORY_DIR_PACKAGE = "${BUILDHISTORY_DIR}/packages/${MULTIMACH_TARGET_SYS}
# of failed builds.
#
# The expected usage is via auto.conf, but passing via the command line also works
-# with: BB_ENV_EXTRAWHITE=BUILDHISTORY_RESET BUILDHISTORY_RESET=1
+# with: BB_ENV_PASSTHROUGH_ADDITIONS=BUILDHISTORY_RESET BUILDHISTORY_RESET=1
BUILDHISTORY_RESET ?= ""
BUILDHISTORY_OLD_DIR = "${BUILDHISTORY_DIR}/${@ "old" if "${BUILDHISTORY_RESET}" else ""}"
@@ -45,14 +45,14 @@ BUILDHISTORY_PUSH_REPO ?= ""
BUILDHISTORY_TAG ?= "build"
BUILDHISTORY_PATH_PREFIX_STRIP ?= ""
-SSTATEPOSTINSTFUNCS_append = " buildhistory_emit_pkghistory"
+SSTATEPOSTINSTFUNCS:append = " buildhistory_emit_pkghistory"
# We want to avoid influencing the signatures of sstate tasks - first the function itself:
sstate_install[vardepsexclude] += "buildhistory_emit_pkghistory"
# then the value added to SSTATEPOSTINSTFUNCS:
SSTATEPOSTINSTFUNCS[vardepvalueexclude] .= "| buildhistory_emit_pkghistory"
# Similarly for our function that gets the output signatures
-SSTATEPOSTUNPACKFUNCS_append = " buildhistory_emit_outputsigs"
+SSTATEPOSTUNPACKFUNCS:append = " buildhistory_emit_outputsigs"
sstate_installpkgdir[vardepsexclude] += "buildhistory_emit_outputsigs"
SSTATEPOSTUNPACKFUNCS[vardepvalueexclude] .= "| buildhistory_emit_outputsigs"
@@ -91,13 +91,19 @@ buildhistory_emit_sysroot() {
python buildhistory_emit_pkghistory() {
if d.getVar('BB_CURRENTTASK') in ['populate_sysroot', 'populate_sysroot_setscene']:
bb.build.exec_func("buildhistory_emit_sysroot", d)
-
- if not d.getVar('BB_CURRENTTASK') in ['packagedata', 'packagedata_setscene']:
return 0
if not "package" in (d.getVar('BUILDHISTORY_FEATURES') or "").split():
return 0
+ if d.getVar('BB_CURRENTTASK') in ['package', 'package_setscene']:
+ # Create files-in-<package-name>.txt files containing a list of files of each recipe's package
+ bb.build.exec_func("buildhistory_list_pkg_files", d)
+ return 0
+
+ if not d.getVar('BB_CURRENTTASK') in ['packagedata', 'packagedata_setscene']:
+ return 0
+
import re
import json
import shlex
@@ -287,7 +293,7 @@ python buildhistory_emit_pkghistory() {
r = bb.utils.vercmp((pkge, pkgv, pkgr), (last_pkge, last_pkgv, last_pkgr))
if r < 0:
msg = "Package version for package %s went backwards which would break package feeds (from %s:%s-%s to %s:%s-%s)" % (pkg, last_pkge, last_pkgv, last_pkgr, pkge, pkgv, pkgr)
- package_qa_handle_error("version-going-backwards", msg, d)
+ oe.qa.handle_error("version-going-backwards", msg, d)
pkginfo = PackageInfo(pkg)
# Apparently the version can be different on a per-package basis (see Python)
@@ -319,8 +325,7 @@ python buildhistory_emit_pkghistory() {
write_pkghistory(pkginfo, d)
- # Create files-in-<package-name>.txt files containing a list of files of each recipe's package
- bb.build.exec_func("buildhistory_list_pkg_files", d)
+ oe.qa.exit_if_errors(d)
}
python buildhistory_emit_outputsigs() {
@@ -442,11 +447,16 @@ def buildhistory_list_installed(d, rootfs_type="image"):
else:
pkgs = sdk_list_installed_packages(d, rootfs_type == "sdk_target")
+ if rootfs_type == "sdk_host":
+ pkgdata_dir = d.getVar('PKGDATA_DIR_SDK')
+ else:
+ pkgdata_dir = d.getVar('PKGDATA_DIR')
+
for output_type, output_file in process_list:
output_file_full = os.path.join(d.getVar('WORKDIR'), output_file)
with open(output_file_full, 'w') as output:
- output.write(format_pkg_list(pkgs, output_type))
+ output.write(format_pkg_list(pkgs, output_type, pkgdata_dir))
python buildhistory_list_installed_image() {
buildhistory_list_installed(d)
@@ -487,6 +497,8 @@ buildhistory_get_installed() {
-e 's:|: -> :' \
-e 's:"\[REC\]":[style=dotted]:' \
-e 's:"\([<>=]\+\)" "\([^"]*\)":[label="\1 \2"]:' \
+ -e 's:"\([*]\+\)" "\([^"]*\)":[label="\2"]:' \
+ -e 's:"\[RPROVIDES\]":[style=dashed]:' \
$1/depends.tmp
# Add header, sorted and de-duped contents and footer and then delete the temp file
printf "digraph depends {\n node [shape=plaintext]\n" > $1/depends.dot
@@ -494,11 +506,22 @@ buildhistory_get_installed() {
echo "}" >> $1/depends.dot
rm $1/depends.tmp
+ # Set correct pkgdatadir
+ pkgdatadir=${PKGDATA_DIR}
+ if [ "$2" == "sdk" ] && [ "$3" == "host" ]; then
+ pkgdatadir="${PKGDATA_DIR_SDK}"
+ fi
+
# Produce installed package sizes list
- oe-pkgdata-util -p ${PKGDATA_DIR} read-value "PKGSIZE" -n -f $pkgcache > $1/installed-package-sizes.tmp
+ oe-pkgdata-util -p $pkgdatadir read-value "PKGSIZE" -n -f $pkgcache > $1/installed-package-sizes.tmp
cat $1/installed-package-sizes.tmp | awk '{print $2 "\tKiB\t" $1}' | sort -n -r > $1/installed-package-sizes.txt
rm $1/installed-package-sizes.tmp
+ # Produce package info: runtime_name, buildtime_name, recipe, version, size
+ oe-pkgdata-util -p $pkgdatadir read-value "PACKAGE,PN,PV,PKGSIZE" -n -f $pkgcache > $1/installed-package-info.tmp
+ cat $1/installed-package-info.tmp | sort -n -r -k 5 > $1/installed-package-info.txt
+ rm $1/installed-package-info.tmp
+
# We're now done with the cache, delete it
rm $pkgcache
@@ -535,7 +558,7 @@ buildhistory_get_sdk_installed() {
return
fi
- buildhistory_get_installed ${BUILDHISTORY_DIR_SDK}/$1 sdk
+ buildhistory_get_installed ${BUILDHISTORY_DIR_SDK}/$1 sdk $1
}
buildhistory_get_sdk_installed_host() {
@@ -676,17 +699,17 @@ IMAGE_POSTPROCESS_COMMAND[vardepvalueexclude] .= "| buildhistory_get_imageinfo ;
IMAGE_POSTPROCESS_COMMAND[vardepsexclude] += "buildhistory_get_imageinfo"
# We want these to be the last run so that we get called after complementary package installation
-POPULATE_SDK_POST_TARGET_COMMAND_append = " buildhistory_list_installed_sdk_target;"
-POPULATE_SDK_POST_TARGET_COMMAND_append = " buildhistory_get_sdk_installed_target;"
+POPULATE_SDK_POST_TARGET_COMMAND:append = " buildhistory_list_installed_sdk_target;"
+POPULATE_SDK_POST_TARGET_COMMAND:append = " buildhistory_get_sdk_installed_target;"
POPULATE_SDK_POST_TARGET_COMMAND[vardepvalueexclude] .= "| buildhistory_list_installed_sdk_target;| buildhistory_get_sdk_installed_target;"
POPULATE_SDK_POST_TARGET_COMMAND[vardepsexclude] += "buildhistory_list_installed_sdk_target buildhistory_get_sdk_installed_target"
-POPULATE_SDK_POST_HOST_COMMAND_append = " buildhistory_list_installed_sdk_host;"
-POPULATE_SDK_POST_HOST_COMMAND_append = " buildhistory_get_sdk_installed_host;"
+POPULATE_SDK_POST_HOST_COMMAND:append = " buildhistory_list_installed_sdk_host;"
+POPULATE_SDK_POST_HOST_COMMAND:append = " buildhistory_get_sdk_installed_host;"
POPULATE_SDK_POST_HOST_COMMAND[vardepvalueexclude] .= "| buildhistory_list_installed_sdk_host;| buildhistory_get_sdk_installed_host;"
POPULATE_SDK_POST_HOST_COMMAND[vardepsexclude] += "buildhistory_list_installed_sdk_host buildhistory_get_sdk_installed_host"
-SDK_POSTPROCESS_COMMAND_append = " buildhistory_get_sdkinfo ; buildhistory_get_extra_sdkinfo; "
+SDK_POSTPROCESS_COMMAND:append = " buildhistory_get_sdkinfo ; buildhistory_get_extra_sdkinfo; "
SDK_POSTPROCESS_COMMAND[vardepvalueexclude] .= "| buildhistory_get_sdkinfo ; buildhistory_get_extra_sdkinfo; "
SDK_POSTPROCESS_COMMAND[vardepsexclude] += "buildhistory_get_sdkinfo buildhistory_get_extra_sdkinfo"
@@ -766,11 +789,11 @@ def buildhistory_get_imagevars(d):
def buildhistory_get_sdkvars(d):
if d.getVar('BB_WORKERCONTEXT') != '1':
return ""
- sdkvars = "DISTRO DISTRO_VERSION SDK_NAME SDK_VERSION SDKMACHINE SDKIMAGE_FEATURES BAD_RECOMMENDATIONS NO_RECOMMENDATIONS PACKAGE_EXCLUDE"
+ sdkvars = "DISTRO DISTRO_VERSION SDK_NAME SDK_VERSION SDKMACHINE SDKIMAGE_FEATURES TOOLCHAIN_HOST_TASK TOOLCHAIN_TARGET_TASK BAD_RECOMMENDATIONS NO_RECOMMENDATIONS PACKAGE_EXCLUDE"
if d.getVar('BB_CURRENTTASK') == 'populate_sdk_ext':
# Extensible SDK uses some additional variables
- sdkvars += " SDK_LOCAL_CONF_WHITELIST SDK_LOCAL_CONF_BLACKLIST SDK_INHERIT_BLACKLIST SDK_UPDATE_URL SDK_EXT_TYPE SDK_RECRDEP_TASKS SDK_INCLUDE_PKGDATA SDK_INCLUDE_TOOLCHAIN"
- listvars = "SDKIMAGE_FEATURES BAD_RECOMMENDATIONS PACKAGE_EXCLUDE SDK_LOCAL_CONF_WHITELIST SDK_LOCAL_CONF_BLACKLIST SDK_INHERIT_BLACKLIST"
+ sdkvars += " ESDK_LOCALCONF_ALLOW ESDK_LOCALCONF_REMOVE ESDK_CLASS_INHERIT_DISABLE SDK_UPDATE_URL SDK_EXT_TYPE SDK_RECRDEP_TASKS SDK_INCLUDE_PKGDATA SDK_INCLUDE_TOOLCHAIN"
+ listvars = "SDKIMAGE_FEATURES BAD_RECOMMENDATIONS PACKAGE_EXCLUDE ESDK_LOCALCONF_ALLOW ESDK_LOCALCONF_REMOVE ESDK_CLASS_INHERIT_DISABLE"
return outputvars(sdkvars, listvars, d)
@@ -873,6 +896,7 @@ python buildhistory_eventhandler() {
if os.path.isdir(olddir):
shutil.rmtree(olddir)
rootdir = e.data.getVar("BUILDHISTORY_DIR")
+ bb.utils.mkdirhier(rootdir)
entries = [ x for x in os.listdir(rootdir) if not x.startswith('.') ]
bb.utils.mkdirhier(olddir)
for entry in entries:
@@ -915,22 +939,12 @@ def _get_srcrev_values(d):
if urldata[u].method.supports_srcrev():
scms.append(u)
- autoinc_templ = 'AUTOINC+'
dict_srcrevs = {}
dict_tag_srcrevs = {}
for scm in scms:
ud = urldata[scm]
for name in ud.names:
- try:
- rev = ud.method.sortable_revision(ud, d, name)
- except TypeError:
- # support old bitbake versions
- rev = ud.method.sortable_revision(scm, ud, d, name)
- # Clean this up when we next bump bitbake version
- if type(rev) != str:
- autoinc, rev = rev
- elif rev.startswith(autoinc_templ):
- rev = rev[len(autoinc_templ):]
+ autoinc, rev = ud.method.sortable_revision(ud, d, name)
dict_srcrevs[name] = rev
if 'tag' in ud.parm:
tag = ud.parm['tag'];
@@ -961,23 +975,19 @@ def write_latest_srcrev(d, pkghistdir):
value = value.replace('"', '').strip()
old_tag_srcrevs[key] = value
with open(srcrevfile, 'w') as f:
- orig_srcrev = d.getVar('SRCREV', False) or 'INVALID'
- if orig_srcrev != 'INVALID':
- f.write('# SRCREV = "%s"\n' % orig_srcrev)
- if len(srcrevs) > 1:
- for name, srcrev in sorted(srcrevs.items()):
- orig_srcrev = d.getVar('SRCREV_%s' % name, False)
- if orig_srcrev:
- f.write('# SRCREV_%s = "%s"\n' % (name, orig_srcrev))
- f.write('SRCREV_%s = "%s"\n' % (name, srcrev))
- else:
- f.write('SRCREV = "%s"\n' % next(iter(srcrevs.values())))
- if len(tag_srcrevs) > 0:
- for name, srcrev in sorted(tag_srcrevs.items()):
- f.write('# tag_%s = "%s"\n' % (name, srcrev))
- if name in old_tag_srcrevs and old_tag_srcrevs[name] != srcrev:
- pkg = d.getVar('PN')
- bb.warn("Revision for tag %s in package %s was changed since last build (from %s to %s)" % (name, pkg, old_tag_srcrevs[name], srcrev))
+ for name, srcrev in sorted(srcrevs.items()):
+ suffix = "_" + name
+ if name == "default":
+ suffix = ""
+ orig_srcrev = d.getVar('SRCREV%s' % suffix, False)
+ if orig_srcrev:
+ f.write('# SRCREV%s = "%s"\n' % (suffix, orig_srcrev))
+ f.write('SRCREV%s = "%s"\n' % (suffix, srcrev))
+ for name, srcrev in sorted(tag_srcrevs.items()):
+ f.write('# tag_%s = "%s"\n' % (name, srcrev))
+ if name in old_tag_srcrevs and old_tag_srcrevs[name] != srcrev:
+ pkg = d.getVar('PN')
+ bb.warn("Revision for tag %s in package %s was changed since last build (from %s to %s)" % (name, pkg, old_tag_srcrevs[name], srcrev))
else:
if os.path.exists(srcrevfile):
diff --git a/meta/classes/cargo.bbclass b/meta/classes/cargo.bbclass
new file mode 100644
index 0000000000..4a780a501f
--- /dev/null
+++ b/meta/classes/cargo.bbclass
@@ -0,0 +1,90 @@
+##
+## Purpose:
+## This class is used by any recipes that are built using
+## Cargo.
+
+inherit cargo_common
+
+# the binary we will use
+CARGO = "cargo"
+
+# We need cargo to compile for the target
+BASEDEPENDS:append = " cargo-native"
+
+# Ensure we get the right rust variant
+DEPENDS:append:class-target = " virtual/${TARGET_PREFIX}rust ${RUSTLIB_DEP}"
+DEPENDS:append:class-nativesdk = " virtual/${TARGET_PREFIX}rust ${RUSTLIB_DEP}"
+DEPENDS:append:class-native = " rust-native"
+
+# Enable build separation
+B = "${WORKDIR}/build"
+
+# In case something fails in the build process, give a bit more feedback on
+# where the issue occured
+export RUST_BACKTRACE = "1"
+
+# The directory of the Cargo.toml relative to the root directory, per default
+# assume there's a Cargo.toml directly in the root directory
+CARGO_SRC_DIR ??= ""
+
+# The actual path to the Cargo.toml
+MANIFEST_PATH ??= "${S}/${CARGO_SRC_DIR}/Cargo.toml"
+
+RUSTFLAGS ??= ""
+BUILD_MODE = "${@['--release', ''][d.getVar('DEBUG_BUILD') == '1']}"
+CARGO_BUILD_FLAGS = "-v --target ${HOST_SYS} ${BUILD_MODE} --manifest-path=${MANIFEST_PATH}"
+
+# This is based on the content of CARGO_BUILD_FLAGS and generally will need to
+# change if CARGO_BUILD_FLAGS changes.
+BUILD_DIR = "${@['release', 'debug'][d.getVar('DEBUG_BUILD') == '1']}"
+CARGO_TARGET_SUBDIR="${HOST_SYS}/${BUILD_DIR}"
+oe_cargo_build () {
+ export RUSTFLAGS="${RUSTFLAGS}"
+ export RUST_TARGET_PATH="${RUST_TARGET_PATH}"
+ bbnote "cargo = $(which ${CARGO})"
+ bbnote "rustc = $(which ${RUSTC})"
+ bbnote "${CARGO} build ${CARGO_BUILD_FLAGS} $@"
+ "${CARGO}" build ${CARGO_BUILD_FLAGS} "$@"
+}
+
+do_compile[progress] = "outof:\s+(\d+)/(\d+)"
+cargo_do_compile () {
+ oe_cargo_fix_env
+ oe_cargo_build
+}
+
+cargo_do_install () {
+ local have_installed=false
+ for tgt in "${B}/target/${CARGO_TARGET_SUBDIR}/"*; do
+ case $tgt in
+ *.so|*.rlib)
+ install -d "${D}${rustlibdir}"
+ install -m755 "$tgt" "${D}${rustlibdir}"
+ have_installed=true
+ ;;
+ *examples)
+ if [ -d "$tgt" ]; then
+ for example in "$tgt/"*; do
+ if [ -f "$example" ] && [ -x "$example" ]; then
+ install -d "${D}${bindir}"
+ install -m755 "$example" "${D}${bindir}"
+ have_installed=true
+ fi
+ done
+ fi
+ ;;
+ *)
+ if [ -f "$tgt" ] && [ -x "$tgt" ]; then
+ install -d "${D}${bindir}"
+ install -m755 "$tgt" "${D}${bindir}"
+ have_installed=true
+ fi
+ ;;
+ esac
+ done
+ if ! $have_installed; then
+ die "Did not find anything to install"
+ fi
+}
+
+EXPORT_FUNCTIONS do_compile do_install
diff --git a/meta/classes/cargo_common.bbclass b/meta/classes/cargo_common.bbclass
new file mode 100644
index 0000000000..90fad75415
--- /dev/null
+++ b/meta/classes/cargo_common.bbclass
@@ -0,0 +1,124 @@
+##
+## Purpose:
+## This class is to support building with cargo. It
+## must be different than cargo.bbclass because Rust
+## now builds with Cargo but cannot use cargo.bbclass
+## due to dependencies and assumptions in cargo.bbclass
+## that Rust & Cargo are already installed. So this
+## is used by cargo.bbclass and Rust
+##
+
+# add crate fetch support
+inherit rust-common
+
+# Where we download our registry and dependencies to
+export CARGO_HOME = "${WORKDIR}/cargo_home"
+
+# The pkg-config-rs library used by cargo build scripts disables itself when
+# cross compiling unless this is defined. We set up pkg-config appropriately
+# for cross compilation, so tell it we know better than it.
+export PKG_CONFIG_ALLOW_CROSS = "1"
+
+# Don't instruct cargo to use crates downloaded by bitbake. Some rust packages,
+# for example the rust compiler itself, come with their own vendored sources.
+# Specifying two [source.crates-io] will not work.
+CARGO_DISABLE_BITBAKE_VENDORING ?= "0"
+
+# Used by libstd-rs to point to the vendor dir included in rustc src
+CARGO_VENDORING_DIRECTORY ?= "${CARGO_HOME}/bitbake"
+
+CARGO_RUST_TARGET_CCLD ?= "${RUST_TARGET_CCLD}"
+cargo_common_do_configure () {
+ mkdir -p ${CARGO_HOME}/bitbake
+
+ cat <<- EOF > ${CARGO_HOME}/config
+ # EXTRA_OECARGO_PATHS
+ paths = [
+ $(for p in ${EXTRA_OECARGO_PATHS}; do echo \"$p\",; done)
+ ]
+ EOF
+
+ cat <<- EOF >> ${CARGO_HOME}/config
+
+ # Local mirror vendored by bitbake
+ [source.bitbake]
+ directory = "${CARGO_VENDORING_DIRECTORY}"
+ EOF
+
+ if [ -z "${EXTERNALSRC}" ] && [ ${CARGO_DISABLE_BITBAKE_VENDORING} = "0" ]; then
+ cat <<- EOF >> ${CARGO_HOME}/config
+
+ [source.crates-io]
+ replace-with = "bitbake"
+ local-registry = "/nonexistant"
+ EOF
+ fi
+
+ cat <<- EOF >> ${CARGO_HOME}/config
+
+ [http]
+ # Multiplexing can't be enabled because http2 can't be enabled
+ # in curl-native without dependency loops
+ multiplexing = false
+
+ # Ignore the hard coded and incorrect path to certificates
+ cainfo = "${STAGING_ETCDIR_NATIVE}/ssl/certs/ca-certificates.crt"
+
+ EOF
+
+ cat <<- EOF >> ${CARGO_HOME}/config
+
+ # HOST_SYS
+ [target.${HOST_SYS}]
+ linker = "${CARGO_RUST_TARGET_CCLD}"
+ EOF
+
+ if [ "${HOST_SYS}" != "${BUILD_SYS}" ]; then
+ cat <<- EOF >> ${CARGO_HOME}/config
+
+ # BUILD_SYS
+ [target.${BUILD_SYS}]
+ linker = "${RUST_BUILD_CCLD}"
+ EOF
+ fi
+
+ # Put build output in build directory preferred by bitbake instead of
+ # inside source directory unless they are the same
+ if [ "${B}" != "${S}" ]; then
+ cat <<- EOF >> ${CARGO_HOME}/config
+
+ [build]
+ # Use out of tree build destination to avoid poluting the source tree
+ target-dir = "${B}/target"
+ EOF
+ fi
+
+ cat <<- EOF >> ${CARGO_HOME}/config
+
+ [term]
+ progress.when = 'always'
+ progress.width = 80
+ EOF
+}
+
+oe_cargo_fix_env () {
+ export CC="${RUST_TARGET_CC}"
+ export CXX="${RUST_TARGET_CXX}"
+ export CFLAGS="${CFLAGS}"
+ export CXXFLAGS="${CXXFLAGS}"
+ export AR="${AR}"
+ export TARGET_CC="${RUST_TARGET_CC}"
+ export TARGET_CXX="${RUST_TARGET_CXX}"
+ export TARGET_CFLAGS="${CFLAGS}"
+ export TARGET_CXXFLAGS="${CXXFLAGS}"
+ export TARGET_AR="${AR}"
+ export HOST_CC="${RUST_BUILD_CC}"
+ export HOST_CXX="${RUST_BUILD_CXX}"
+ export HOST_CFLAGS="${BUILD_CFLAGS}"
+ export HOST_CXXFLAGS="${BUILD_CXXFLAGS}"
+ export HOST_AR="${BUILD_AR}"
+}
+
+EXTRA_OECARGO_PATHS ??= ""
+
+EXPORT_FUNCTIONS do_configure
diff --git a/meta/classes/cmake.bbclass b/meta/classes/cmake.bbclass
index f01db7480b..d9bcddbdbb 100644
--- a/meta/classes/cmake.bbclass
+++ b/meta/classes/cmake.bbclass
@@ -1,7 +1,7 @@
# Path to the CMake file to process.
OECMAKE_SOURCEPATH ??= "${S}"
-DEPENDS_prepend = "cmake-native "
+DEPENDS:prepend = "cmake-native "
B = "${WORKDIR}/build"
# What CMake generator to use.
@@ -31,8 +31,6 @@ OECMAKE_C_FLAGS_RELEASE ?= "-DNDEBUG"
OECMAKE_CXX_FLAGS_RELEASE ?= "-DNDEBUG"
OECMAKE_C_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CPPFLAGS} ${LDFLAGS}"
OECMAKE_CXX_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS} ${LDFLAGS}"
-CXXFLAGS += "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS}"
-CFLAGS += "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS}"
def oecmake_map_compiler(compiler, d):
args = d.getVar(compiler).split()
@@ -57,13 +55,13 @@ OECMAKE_PERLNATIVE_DIR ??= ""
OECMAKE_EXTRA_ROOT_PATH ?= ""
OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM = "ONLY"
-OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM_class-native = "BOTH"
+OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM:class-native = "BOTH"
-EXTRA_OECMAKE_append = " ${PACKAGECONFIG_CONFARGS}"
+EXTRA_OECMAKE:append = " ${PACKAGECONFIG_CONFARGS}"
export CMAKE_BUILD_PARALLEL_LEVEL
-CMAKE_BUILD_PARALLEL_LEVEL_task-compile = "${@oe.utils.parallel_make(d, False)}"
-CMAKE_BUILD_PARALLEL_LEVEL_task-install = "${@oe.utils.parallel_make(d, True)}"
+CMAKE_BUILD_PARALLEL_LEVEL:task-compile = "${@oe.utils.parallel_make(d, False)}"
+CMAKE_BUILD_PARALLEL_LEVEL:task-install = "${@oe.utils.parallel_make(d, True)}"
OECMAKE_TARGET_COMPILE ?= "all"
OECMAKE_TARGET_INSTALL ?= "install"
@@ -189,6 +187,7 @@ cmake_do_configure() {
-DCMAKE_TOOLCHAIN_FILE=${WORKDIR}/toolchain.cmake \
-DCMAKE_NO_SYSTEM_FROM_IMPORTED=1 \
-DCMAKE_EXPORT_NO_PACKAGE_REGISTRY=ON \
+ -DFETCHCONTENT_FULLY_DISCONNECTED=ON \
${EXTRA_OECMAKE} \
-Wno-dev
}
diff --git a/meta/classes/compress_doc.bbclass b/meta/classes/compress_doc.bbclass
index d6d11fad26..379b6c169e 100644
--- a/meta/classes/compress_doc.bbclass
+++ b/meta/classes/compress_doc.bbclass
@@ -8,7 +8,7 @@
#
# 3. It is easy to add a new type compression by editing
# local.conf, such as:
-# DOC_COMPRESS_LIST_append = ' abc'
+# DOC_COMPRESS_LIST:append = ' abc'
# DOC_COMPRESS = 'abc'
# DOC_COMPRESS_CMD[abc] = 'abc compress cmd ***'
# DOC_DECOMPRESS_CMD[abc] = 'abc decompress cmd ***'
@@ -225,7 +225,7 @@ python compress_doc_updatealternatives () {
infodir = d.getVar("infodir")
compress_mode = d.getVar('DOC_COMPRESS')
for pkg in (d.getVar('PACKAGES') or "").split():
- old_names = (d.getVar('ALTERNATIVE_%s' % pkg) or "").split()
+ old_names = (d.getVar('ALTERNATIVE:%s' % pkg) or "").split()
new_names = []
for old_name in old_names:
old_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', old_name)
@@ -258,6 +258,6 @@ python compress_doc_updatealternatives () {
new_names.append(new_name)
if new_names:
- d.setVar('ALTERNATIVE_%s' % pkg, ' '.join(new_names))
+ d.setVar('ALTERNATIVE:%s' % pkg, ' '.join(new_names))
}
diff --git a/meta/classes/cpan-base.bbclass b/meta/classes/cpan-base.bbclass
index 1fc3f0bcb0..93d11e1bee 100644
--- a/meta/classes/cpan-base.bbclass
+++ b/meta/classes/cpan-base.bbclass
@@ -2,10 +2,10 @@
# cpan-base providers various perl related information needed for building
# cpan modules
#
-FILES_${PN} += "${libdir}/perl5 ${datadir}/perl5"
+FILES:${PN} += "${libdir}/perl5 ${datadir}/perl5"
DEPENDS += "${@["perl", "perl-native"][(bb.data.inherits_class('native', d))]}"
-RDEPENDS_${PN} += "${@["perl", ""][(bb.data.inherits_class('native', d))]}"
+RDEPENDS:${PN} += "${@["perl", ""][(bb.data.inherits_class('native', d))]}"
inherit perl-version
@@ -15,12 +15,12 @@ def is_target(d):
return "no"
PERLLIBDIRS = "${libdir}/perl5"
-PERLLIBDIRS_class-native = "${libdir}/perl5"
+PERLLIBDIRS:class-native = "${libdir}/perl5"
def cpan_upstream_check_pattern(d):
for x in (d.getVar('SRC_URI') or '').split(' '):
if x.startswith("https://cpan.metacpan.org"):
- _pattern = x.split('/')[-1].replace(d.getVar('PV'), '(?P<pver>\d+.\d+)')
+ _pattern = x.split('/')[-1].replace(d.getVar('PV'), r'(?P<pver>\d+.\d+)')
return _pattern
return ''
diff --git a/meta/classes/cpan.bbclass b/meta/classes/cpan.bbclass
index e9908ae4b8..18f1b9d575 100644
--- a/meta/classes/cpan.bbclass
+++ b/meta/classes/cpan.bbclass
@@ -41,12 +41,12 @@ cpan_do_configure () {
fi
}
-do_configure_append_class-target() {
+do_configure:append:class-target() {
find . -name Makefile | xargs sed -E -i \
-e 's:LD_RUN_PATH ?= ?"?[^"]*"?::g'
}
-do_configure_append_class-nativesdk() {
+do_configure:append:class-nativesdk() {
find . -name Makefile | xargs sed -E -i \
-e 's:LD_RUN_PATH ?= ?"?[^"]*"?::g'
}
diff --git a/meta/classes/create-spdx.bbclass b/meta/classes/create-spdx.bbclass
new file mode 100644
index 0000000000..1a4804a7c5
--- /dev/null
+++ b/meta/classes/create-spdx.bbclass
@@ -0,0 +1,1022 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+DEPLOY_DIR_SPDX ??= "${DEPLOY_DIR}/spdx/${MACHINE}"
+
+# The product name that the CVE database uses. Defaults to BPN, but may need to
+# be overriden per recipe (for example tiff.bb sets CVE_PRODUCT=libtiff).
+CVE_PRODUCT ??= "${BPN}"
+CVE_VERSION ??= "${PV}"
+
+SPDXDIR ??= "${WORKDIR}/spdx"
+SPDXDEPLOY = "${SPDXDIR}/deploy"
+SPDXWORK = "${SPDXDIR}/work"
+
+SPDX_TOOL_NAME ??= "oe-spdx-creator"
+SPDX_TOOL_VERSION ??= "1.0"
+
+SPDXRUNTIMEDEPLOY = "${SPDXDIR}/runtime-deploy"
+
+SPDX_INCLUDE_SOURCES ??= "0"
+SPDX_INCLUDE_PACKAGED ??= "0"
+SPDX_ARCHIVE_SOURCES ??= "0"
+SPDX_ARCHIVE_PACKAGED ??= "0"
+
+SPDX_UUID_NAMESPACE ??= "sbom.openembedded.org"
+SPDX_NAMESPACE_PREFIX ??= "http://spdx.org/spdxdoc"
+
+SPDX_LICENSES ??= "${COREBASE}/meta/files/spdx-licenses.json"
+
+SPDX_ORG ??= "OpenEmbedded ()"
+SPDX_SUPPLIER ??= "Organization: ${SPDX_ORG}"
+SPDX_SUPPLIER[doc] = "The SPDX PackageSupplier field for SPDX packages created from \
+ this recipe. For SPDX documents create using this class during the build, this \
+ is the contact information for the person or organization who is doing the \
+ build."
+
+do_image_complete[depends] = "virtual/kernel:do_create_spdx"
+
+def extract_licenses(filename):
+ import re
+
+ lic_regex = re.compile(b'^\W*SPDX-License-Identifier:\s*([ \w\d.()+-]+?)(?:\s+\W*)?$', re.MULTILINE)
+
+ try:
+ with open(filename, 'rb') as f:
+ size = min(15000, os.stat(filename).st_size)
+ txt = f.read(size)
+ licenses = re.findall(lic_regex, txt)
+ if licenses:
+ ascii_licenses = [lic.decode('ascii') for lic in licenses]
+ return ascii_licenses
+ except Exception as e:
+ bb.warn(f"Exception reading {filename}: {e}")
+ return None
+
+def get_doc_namespace(d, doc):
+ import uuid
+ namespace_uuid = uuid.uuid5(uuid.NAMESPACE_DNS, d.getVar("SPDX_UUID_NAMESPACE"))
+ return "%s/%s-%s" % (d.getVar("SPDX_NAMESPACE_PREFIX"), doc.name, str(uuid.uuid5(namespace_uuid, doc.name)))
+
+def create_annotation(d, comment):
+ from datetime import datetime, timezone
+
+ creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
+ annotation = oe.spdx.SPDXAnnotation()
+ annotation.annotationDate = creation_time
+ annotation.annotationType = "OTHER"
+ annotation.annotator = "Tool: %s - %s" % (d.getVar("SPDX_TOOL_NAME"), d.getVar("SPDX_TOOL_VERSION"))
+ annotation.comment = comment
+ return annotation
+
+def recipe_spdx_is_native(d, recipe):
+ return any(a.annotationType == "OTHER" and
+ a.annotator == "Tool: %s - %s" % (d.getVar("SPDX_TOOL_NAME"), d.getVar("SPDX_TOOL_VERSION")) and
+ a.comment == "isNative" for a in recipe.annotations)
+
+def is_work_shared_spdx(d):
+ return bb.data.inherits_class('kernel', d) or ('work-shared' in d.getVar('WORKDIR'))
+
+python() {
+ import json
+ if d.getVar("SPDX_LICENSE_DATA"):
+ return
+
+ with open(d.getVar("SPDX_LICENSES"), "r") as f:
+ data = json.load(f)
+ # Transform the license array to a dictionary
+ data["licenses"] = {l["licenseId"]: l for l in data["licenses"]}
+ d.setVar("SPDX_LICENSE_DATA", data)
+}
+
+def convert_license_to_spdx(lic, document, d, existing={}):
+ from pathlib import Path
+ import oe.spdx
+
+ avail_licenses = available_licenses(d)
+ license_data = d.getVar("SPDX_LICENSE_DATA")
+ extracted = {}
+
+ def add_extracted_license(ident, name):
+ nonlocal document
+
+ if name in extracted:
+ return
+
+ extracted_info = oe.spdx.SPDXExtractedLicensingInfo()
+ extracted_info.name = name
+ extracted_info.licenseId = ident
+ extracted_info.extractedText = None
+
+ if name == "PD":
+ # Special-case this.
+ extracted_info.extractedText = "Software released to the public domain"
+ elif name in avail_licenses:
+ # This license can be found in COMMON_LICENSE_DIR or LICENSE_PATH
+ for directory in [d.getVar('COMMON_LICENSE_DIR')] + (d.getVar('LICENSE_PATH') or '').split():
+ try:
+ with (Path(directory) / name).open(errors="replace") as f:
+ extracted_info.extractedText = f.read()
+ break
+ except FileNotFoundError:
+ pass
+ if extracted_info.extractedText is None:
+ # Error out, as the license was in avail_licenses so should
+ # be on disk somewhere.
+ bb.error("Cannot find text for license %s" % name)
+ else:
+ # If it's not SPDX, or PD, or in avail_licenses, then NO_GENERIC_LICENSE must be set
+ filename = d.getVarFlag('NO_GENERIC_LICENSE', name)
+ if filename:
+ filename = d.expand("${S}/" + filename)
+ with open(filename, errors="replace") as f:
+ extracted_info.extractedText = f.read()
+ else:
+ bb.error("Cannot find any text for license %s" % name)
+
+ extracted[name] = extracted_info
+ document.hasExtractedLicensingInfos.append(extracted_info)
+
+ def convert(l):
+ if l == "(" or l == ")":
+ return l
+
+ if l == "&":
+ return "AND"
+
+ if l == "|":
+ return "OR"
+
+ if l == "CLOSED":
+ return "NONE"
+
+ spdx_license = d.getVarFlag("SPDXLICENSEMAP", l) or l
+ if spdx_license in license_data["licenses"]:
+ return spdx_license
+
+ try:
+ spdx_license = existing[l]
+ except KeyError:
+ spdx_license = "LicenseRef-" + l
+ add_extracted_license(spdx_license, l)
+
+ return spdx_license
+
+ lic_split = lic.replace("(", " ( ").replace(")", " ) ").split()
+
+ return ' '.join(convert(l) for l in lic_split)
+
+def process_sources(d):
+ pn = d.getVar('PN')
+ assume_provided = (d.getVar("ASSUME_PROVIDED") or "").split()
+ if pn in assume_provided:
+ for p in d.getVar("PROVIDES").split():
+ if p != pn:
+ pn = p
+ break
+
+ # glibc-locale: do_fetch, do_unpack and do_patch tasks have been deleted,
+ # so avoid archiving source here.
+ if pn.startswith('glibc-locale'):
+ return False
+ if d.getVar('PN') == "libtool-cross":
+ return False
+ if d.getVar('PN') == "libgcc-initial":
+ return False
+ if d.getVar('PN') == "shadow-sysroot":
+ return False
+
+ # We just archive gcc-source for all the gcc related recipes
+ if d.getVar('BPN') in ['gcc', 'libgcc']:
+ bb.debug(1, 'spdx: There is bug in scan of %s is, do nothing' % pn)
+ return False
+
+ return True
+
+
+def add_package_files(d, doc, spdx_pkg, topdir, get_spdxid, get_types, *, archive=None, ignore_dirs=[], ignore_top_level_dirs=[]):
+ from pathlib import Path
+ import oe.spdx
+ import hashlib
+
+ source_date_epoch = d.getVar("SOURCE_DATE_EPOCH")
+ if source_date_epoch:
+ source_date_epoch = int(source_date_epoch)
+
+ sha1s = []
+ spdx_files = []
+
+ file_counter = 1
+ for subdir, dirs, files in os.walk(topdir):
+ dirs[:] = [d for d in dirs if d not in ignore_dirs]
+ if subdir == str(topdir):
+ dirs[:] = [d for d in dirs if d not in ignore_top_level_dirs]
+
+ for file in files:
+ filepath = Path(subdir) / file
+ filename = str(filepath.relative_to(topdir))
+
+ if filepath.is_file() and not filepath.is_symlink():
+ spdx_file = oe.spdx.SPDXFile()
+ spdx_file.SPDXID = get_spdxid(file_counter)
+ for t in get_types(filepath):
+ spdx_file.fileTypes.append(t)
+ spdx_file.fileName = filename
+
+ if archive is not None:
+ with filepath.open("rb") as f:
+ info = archive.gettarinfo(fileobj=f)
+ info.name = filename
+ info.uid = 0
+ info.gid = 0
+ info.uname = "root"
+ info.gname = "root"
+
+ if source_date_epoch is not None and info.mtime > source_date_epoch:
+ info.mtime = source_date_epoch
+
+ archive.addfile(info, f)
+
+ sha1 = bb.utils.sha1_file(filepath)
+ sha1s.append(sha1)
+ spdx_file.checksums.append(oe.spdx.SPDXChecksum(
+ algorithm="SHA1",
+ checksumValue=sha1,
+ ))
+ spdx_file.checksums.append(oe.spdx.SPDXChecksum(
+ algorithm="SHA256",
+ checksumValue=bb.utils.sha256_file(filepath),
+ ))
+
+ if "SOURCE" in spdx_file.fileTypes:
+ extracted_lics = extract_licenses(filepath)
+ if extracted_lics:
+ spdx_file.licenseInfoInFiles = extracted_lics
+
+ doc.files.append(spdx_file)
+ doc.add_relationship(spdx_pkg, "CONTAINS", spdx_file)
+ spdx_pkg.hasFiles.append(spdx_file.SPDXID)
+
+ spdx_files.append(spdx_file)
+
+ file_counter += 1
+
+ sha1s.sort()
+ verifier = hashlib.sha1()
+ for v in sha1s:
+ verifier.update(v.encode("utf-8"))
+ spdx_pkg.packageVerificationCode.packageVerificationCodeValue = verifier.hexdigest()
+
+ return spdx_files
+
+
+def add_package_sources_from_debug(d, package_doc, spdx_package, package, package_files, sources):
+ from pathlib import Path
+ import hashlib
+ import oe.packagedata
+ import oe.spdx
+
+ debug_search_paths = [
+ Path(d.getVar('PKGD')),
+ Path(d.getVar('STAGING_DIR_TARGET')),
+ Path(d.getVar('STAGING_DIR_NATIVE')),
+ Path(d.getVar('STAGING_KERNEL_DIR')),
+ ]
+
+ pkg_data = oe.packagedata.read_subpkgdata_extended(package, d)
+
+ if pkg_data is None:
+ return
+
+ for file_path, file_data in pkg_data["files_info"].items():
+ if not "debugsrc" in file_data:
+ continue
+
+ for pkg_file in package_files:
+ if file_path.lstrip("/") == pkg_file.fileName.lstrip("/"):
+ break
+ else:
+ bb.fatal("No package file found for %s" % str(file_path))
+ continue
+
+ for debugsrc in file_data["debugsrc"]:
+ ref_id = "NOASSERTION"
+ for search in debug_search_paths:
+ if debugsrc.startswith("/usr/src/kernel"):
+ debugsrc_path = search / debugsrc.replace('/usr/src/kernel/', '')
+ else:
+ debugsrc_path = search / debugsrc.lstrip("/")
+ if not debugsrc_path.exists():
+ continue
+
+ file_sha256 = bb.utils.sha256_file(debugsrc_path)
+
+ if file_sha256 in sources:
+ source_file = sources[file_sha256]
+
+ doc_ref = package_doc.find_external_document_ref(source_file.doc.documentNamespace)
+ if doc_ref is None:
+ doc_ref = oe.spdx.SPDXExternalDocumentRef()
+ doc_ref.externalDocumentId = "DocumentRef-dependency-" + source_file.doc.name
+ doc_ref.spdxDocument = source_file.doc.documentNamespace
+ doc_ref.checksum.algorithm = "SHA1"
+ doc_ref.checksum.checksumValue = source_file.doc_sha1
+ package_doc.externalDocumentRefs.append(doc_ref)
+
+ ref_id = "%s:%s" % (doc_ref.externalDocumentId, source_file.file.SPDXID)
+ else:
+ bb.debug(1, "Debug source %s with SHA256 %s not found in any dependency" % (str(debugsrc_path), file_sha256))
+ break
+ else:
+ bb.debug(1, "Debug source %s not found" % debugsrc)
+
+ package_doc.add_relationship(pkg_file, "GENERATED_FROM", ref_id, comment=debugsrc)
+
+def collect_dep_recipes(d, doc, spdx_recipe):
+ from pathlib import Path
+ import oe.sbom
+ import oe.spdx
+
+ deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
+
+ dep_recipes = []
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ deps = sorted(set(
+ dep[0] for dep in taskdepdata.values() if
+ dep[1] == "do_create_spdx" and dep[0] != d.getVar("PN")
+ ))
+ for dep_pn in deps:
+ dep_recipe_path = deploy_dir_spdx / "recipes" / ("recipe-%s.spdx.json" % dep_pn)
+
+ spdx_dep_doc, spdx_dep_sha1 = oe.sbom.read_doc(dep_recipe_path)
+
+ for pkg in spdx_dep_doc.packages:
+ if pkg.name == dep_pn:
+ spdx_dep_recipe = pkg
+ break
+ else:
+ continue
+
+ dep_recipes.append(oe.sbom.DepRecipe(spdx_dep_doc, spdx_dep_sha1, spdx_dep_recipe))
+
+ dep_recipe_ref = oe.spdx.SPDXExternalDocumentRef()
+ dep_recipe_ref.externalDocumentId = "DocumentRef-dependency-" + spdx_dep_doc.name
+ dep_recipe_ref.spdxDocument = spdx_dep_doc.documentNamespace
+ dep_recipe_ref.checksum.algorithm = "SHA1"
+ dep_recipe_ref.checksum.checksumValue = spdx_dep_sha1
+
+ doc.externalDocumentRefs.append(dep_recipe_ref)
+
+ doc.add_relationship(
+ "%s:%s" % (dep_recipe_ref.externalDocumentId, spdx_dep_recipe.SPDXID),
+ "BUILD_DEPENDENCY_OF",
+ spdx_recipe
+ )
+
+ return dep_recipes
+
+collect_dep_recipes[vardepsexclude] += "BB_TASKDEPDATA"
+
+
+def collect_dep_sources(d, dep_recipes):
+ import oe.sbom
+
+ sources = {}
+ for dep in dep_recipes:
+ # Don't collect sources from native recipes as they
+ # match non-native sources also.
+ if recipe_spdx_is_native(d, dep.recipe):
+ continue
+ recipe_files = set(dep.recipe.hasFiles)
+
+ for spdx_file in dep.doc.files:
+ if spdx_file.SPDXID not in recipe_files:
+ continue
+
+ if "SOURCE" in spdx_file.fileTypes:
+ for checksum in spdx_file.checksums:
+ if checksum.algorithm == "SHA256":
+ sources[checksum.checksumValue] = oe.sbom.DepSource(dep.doc, dep.doc_sha1, dep.recipe, spdx_file)
+ break
+
+ return sources
+
+
+python do_create_spdx() {
+ from datetime import datetime, timezone
+ import oe.sbom
+ import oe.spdx
+ import uuid
+ from pathlib import Path
+ from contextlib import contextmanager
+ import oe.cve_check
+
+ @contextmanager
+ def optional_tarfile(name, guard, mode="w"):
+ import tarfile
+ import bb.compress.zstd
+
+ num_threads = int(d.getVar("BB_NUMBER_THREADS"))
+
+ if guard:
+ name.parent.mkdir(parents=True, exist_ok=True)
+ with bb.compress.zstd.open(name, mode=mode + "b", num_threads=num_threads) as f:
+ with tarfile.open(fileobj=f, mode=mode + "|") as tf:
+ yield tf
+ else:
+ yield None
+
+
+ deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
+ spdx_workdir = Path(d.getVar("SPDXWORK"))
+ include_packaged = d.getVar("SPDX_INCLUDE_PACKAGED") == "1"
+ include_sources = d.getVar("SPDX_INCLUDE_SOURCES") == "1"
+ archive_sources = d.getVar("SPDX_ARCHIVE_SOURCES") == "1"
+ archive_packaged = d.getVar("SPDX_ARCHIVE_PACKAGED") == "1"
+
+ creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
+
+ doc = oe.spdx.SPDXDocument()
+
+ doc.name = "recipe-" + d.getVar("PN")
+ doc.documentNamespace = get_doc_namespace(d, doc)
+ doc.creationInfo.created = creation_time
+ doc.creationInfo.comment = "This document was created by analyzing recipe files during the build."
+ doc.creationInfo.licenseListVersion = d.getVar("SPDX_LICENSE_DATA")["licenseListVersion"]
+ doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass")
+ doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG"))
+ doc.creationInfo.creators.append("Person: N/A ()")
+
+ recipe = oe.spdx.SPDXPackage()
+ recipe.name = d.getVar("PN")
+ recipe.versionInfo = d.getVar("PV")
+ recipe.SPDXID = oe.sbom.get_recipe_spdxid(d)
+ recipe.packageSupplier = d.getVar("SPDX_SUPPLIER")
+ if bb.data.inherits_class("native", d) or bb.data.inherits_class("cross", d):
+ recipe.annotations.append(create_annotation(d, "isNative"))
+
+ for s in d.getVar('SRC_URI').split():
+ if not s.startswith("file://"):
+ recipe.downloadLocation = s
+ break
+ else:
+ recipe.downloadLocation = "NOASSERTION"
+
+ homepage = d.getVar("HOMEPAGE")
+ if homepage:
+ recipe.homepage = homepage
+
+ license = d.getVar("LICENSE")
+ if license:
+ recipe.licenseDeclared = convert_license_to_spdx(license, doc, d)
+
+ summary = d.getVar("SUMMARY")
+ if summary:
+ recipe.summary = summary
+
+ description = d.getVar("DESCRIPTION")
+ if description:
+ recipe.description = description
+
+ # Some CVEs may be patched during the build process without incrementing the version number,
+ # so querying for CVEs based on the CPE id can lead to false positives. To account for this,
+ # save the CVEs fixed by patches to source information field in the SPDX.
+ patched_cves = oe.cve_check.get_patched_cves(d)
+ patched_cves = list(patched_cves)
+ patched_cves = ' '.join(patched_cves)
+ if patched_cves:
+ recipe.sourceInfo = "CVEs fixed: " + patched_cves
+
+ cpe_ids = oe.cve_check.get_cpe_ids(d.getVar("CVE_PRODUCT"), d.getVar("CVE_VERSION"))
+ if cpe_ids:
+ for cpe_id in cpe_ids:
+ cpe = oe.spdx.SPDXExternalReference()
+ cpe.referenceCategory = "SECURITY"
+ cpe.referenceType = "http://spdx.org/rdf/references/cpe23Type"
+ cpe.referenceLocator = cpe_id
+ recipe.externalRefs.append(cpe)
+
+ doc.packages.append(recipe)
+ doc.add_relationship(doc, "DESCRIBES", recipe)
+
+ if process_sources(d) and include_sources:
+ recipe_archive = deploy_dir_spdx / "recipes" / (doc.name + ".tar.zst")
+ with optional_tarfile(recipe_archive, archive_sources) as archive:
+ spdx_get_src(d)
+
+ add_package_files(
+ d,
+ doc,
+ recipe,
+ spdx_workdir,
+ lambda file_counter: "SPDXRef-SourceFile-%s-%d" % (d.getVar("PN"), file_counter),
+ lambda filepath: ["SOURCE"],
+ ignore_dirs=[".git"],
+ ignore_top_level_dirs=["temp"],
+ archive=archive,
+ )
+
+ if archive is not None:
+ recipe.packageFileName = str(recipe_archive.name)
+
+ dep_recipes = collect_dep_recipes(d, doc, recipe)
+
+ doc_sha1 = oe.sbom.write_doc(d, doc, "recipes")
+ dep_recipes.append(oe.sbom.DepRecipe(doc, doc_sha1, recipe))
+
+ recipe_ref = oe.spdx.SPDXExternalDocumentRef()
+ recipe_ref.externalDocumentId = "DocumentRef-recipe-" + recipe.name
+ recipe_ref.spdxDocument = doc.documentNamespace
+ recipe_ref.checksum.algorithm = "SHA1"
+ recipe_ref.checksum.checksumValue = doc_sha1
+
+ sources = collect_dep_sources(d, dep_recipes)
+ found_licenses = {license.name:recipe_ref.externalDocumentId + ":" + license.licenseId for license in doc.hasExtractedLicensingInfos}
+
+ if not recipe_spdx_is_native(d, recipe):
+ bb.build.exec_func("read_subpackage_metadata", d)
+
+ pkgdest = Path(d.getVar("PKGDEST"))
+ for package in d.getVar("PACKAGES").split():
+ if not oe.packagedata.packaged(package, d):
+ continue
+
+ package_doc = oe.spdx.SPDXDocument()
+ pkg_name = d.getVar("PKG:%s" % package) or package
+ package_doc.name = pkg_name
+ package_doc.documentNamespace = get_doc_namespace(d, package_doc)
+ package_doc.creationInfo.created = creation_time
+ package_doc.creationInfo.comment = "This document was created by analyzing packages created during the build."
+ package_doc.creationInfo.licenseListVersion = d.getVar("SPDX_LICENSE_DATA")["licenseListVersion"]
+ package_doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass")
+ package_doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG"))
+ package_doc.creationInfo.creators.append("Person: N/A ()")
+ package_doc.externalDocumentRefs.append(recipe_ref)
+
+ package_license = d.getVar("LICENSE:%s" % package) or d.getVar("LICENSE")
+
+ spdx_package = oe.spdx.SPDXPackage()
+
+ spdx_package.SPDXID = oe.sbom.get_package_spdxid(pkg_name)
+ spdx_package.name = pkg_name
+ spdx_package.versionInfo = d.getVar("PV")
+ spdx_package.licenseDeclared = convert_license_to_spdx(package_license, package_doc, d, found_licenses)
+ spdx_package.packageSupplier = d.getVar("SPDX_SUPPLIER")
+
+ package_doc.packages.append(spdx_package)
+
+ package_doc.add_relationship(spdx_package, "GENERATED_FROM", "%s:%s" % (recipe_ref.externalDocumentId, recipe.SPDXID))
+ package_doc.add_relationship(package_doc, "DESCRIBES", spdx_package)
+
+ package_archive = deploy_dir_spdx / "packages" / (package_doc.name + ".tar.zst")
+ with optional_tarfile(package_archive, archive_packaged) as archive:
+ package_files = add_package_files(
+ d,
+ package_doc,
+ spdx_package,
+ pkgdest / package,
+ lambda file_counter: oe.sbom.get_packaged_file_spdxid(pkg_name, file_counter),
+ lambda filepath: ["BINARY"],
+ archive=archive,
+ )
+
+ if archive is not None:
+ spdx_package.packageFileName = str(package_archive.name)
+
+ add_package_sources_from_debug(d, package_doc, spdx_package, package, package_files, sources)
+
+ oe.sbom.write_doc(d, package_doc, "packages")
+}
+# NOTE: depending on do_unpack is a hack that is necessary to get it's dependencies for archive the source
+addtask do_create_spdx after do_package do_packagedata do_unpack before do_populate_sdk do_build do_rm_work
+
+SSTATETASKS += "do_create_spdx"
+do_create_spdx[sstate-inputdirs] = "${SPDXDEPLOY}"
+do_create_spdx[sstate-outputdirs] = "${DEPLOY_DIR_SPDX}"
+
+python do_create_spdx_setscene () {
+ sstate_setscene(d)
+}
+addtask do_create_spdx_setscene
+
+do_create_spdx[dirs] = "${SPDXWORK}"
+do_create_spdx[cleandirs] = "${SPDXDEPLOY} ${SPDXWORK}"
+do_create_spdx[depends] += "${PATCHDEPENDENCY}"
+do_create_spdx[deptask] = "do_create_spdx"
+
+def collect_package_providers(d):
+ from pathlib import Path
+ import oe.sbom
+ import oe.spdx
+ import json
+
+ deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
+
+ providers = {}
+
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ deps = sorted(set(
+ dep[0] for dep in taskdepdata.values() if dep[0] != d.getVar("PN")
+ ))
+ deps.append(d.getVar("PN"))
+
+ for dep_pn in deps:
+ recipe_data = oe.packagedata.read_pkgdata(dep_pn, d)
+
+ for pkg in recipe_data.get("PACKAGES", "").split():
+
+ pkg_data = oe.packagedata.read_subpkgdata_dict(pkg, d)
+ rprovides = set(n for n, _ in bb.utils.explode_dep_versions2(pkg_data.get("RPROVIDES", "")).items())
+ rprovides.add(pkg)
+
+ for r in rprovides:
+ providers[r] = pkg
+
+ return providers
+
+collect_package_providers[vardepsexclude] += "BB_TASKDEPDATA"
+
+python do_create_runtime_spdx() {
+ from datetime import datetime, timezone
+ import oe.sbom
+ import oe.spdx
+ import oe.packagedata
+ from pathlib import Path
+
+ deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
+ spdx_deploy = Path(d.getVar("SPDXRUNTIMEDEPLOY"))
+ is_native = bb.data.inherits_class("native", d) or bb.data.inherits_class("cross", d)
+
+ creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
+
+ providers = collect_package_providers(d)
+
+ if not is_native:
+ bb.build.exec_func("read_subpackage_metadata", d)
+
+ dep_package_cache = {}
+
+ pkgdest = Path(d.getVar("PKGDEST"))
+ for package in d.getVar("PACKAGES").split():
+ localdata = bb.data.createCopy(d)
+ pkg_name = d.getVar("PKG:%s" % package) or package
+ localdata.setVar("PKG", pkg_name)
+ localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + package)
+
+ if not oe.packagedata.packaged(package, localdata):
+ continue
+
+ pkg_spdx_path = deploy_dir_spdx / "packages" / (pkg_name + ".spdx.json")
+
+ package_doc, package_doc_sha1 = oe.sbom.read_doc(pkg_spdx_path)
+
+ for p in package_doc.packages:
+ if p.name == pkg_name:
+ spdx_package = p
+ break
+ else:
+ bb.fatal("Package '%s' not found in %s" % (pkg_name, pkg_spdx_path))
+
+ runtime_doc = oe.spdx.SPDXDocument()
+ runtime_doc.name = "runtime-" + pkg_name
+ runtime_doc.documentNamespace = get_doc_namespace(localdata, runtime_doc)
+ runtime_doc.creationInfo.created = creation_time
+ runtime_doc.creationInfo.comment = "This document was created by analyzing package runtime dependencies."
+ runtime_doc.creationInfo.licenseListVersion = d.getVar("SPDX_LICENSE_DATA")["licenseListVersion"]
+ runtime_doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass")
+ runtime_doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG"))
+ runtime_doc.creationInfo.creators.append("Person: N/A ()")
+
+ package_ref = oe.spdx.SPDXExternalDocumentRef()
+ package_ref.externalDocumentId = "DocumentRef-package-" + package
+ package_ref.spdxDocument = package_doc.documentNamespace
+ package_ref.checksum.algorithm = "SHA1"
+ package_ref.checksum.checksumValue = package_doc_sha1
+
+ runtime_doc.externalDocumentRefs.append(package_ref)
+
+ runtime_doc.add_relationship(
+ runtime_doc.SPDXID,
+ "AMENDS",
+ "%s:%s" % (package_ref.externalDocumentId, package_doc.SPDXID)
+ )
+
+ deps = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS") or "")
+ seen_deps = set()
+ for dep, _ in deps.items():
+ if dep in seen_deps:
+ continue
+
+ if dep not in providers:
+ continue
+
+ dep = providers[dep]
+
+ if not oe.packagedata.packaged(dep, localdata):
+ continue
+
+ dep_pkg_data = oe.packagedata.read_subpkgdata_dict(dep, d)
+ dep_pkg = dep_pkg_data["PKG"]
+
+ if dep in dep_package_cache:
+ (dep_spdx_package, dep_package_ref) = dep_package_cache[dep]
+ else:
+ dep_path = deploy_dir_spdx / "packages" / ("%s.spdx.json" % dep_pkg)
+
+ spdx_dep_doc, spdx_dep_sha1 = oe.sbom.read_doc(dep_path)
+
+ for pkg in spdx_dep_doc.packages:
+ if pkg.name == dep_pkg:
+ dep_spdx_package = pkg
+ break
+ else:
+ bb.fatal("Package '%s' not found in %s" % (dep_pkg, dep_path))
+
+ dep_package_ref = oe.spdx.SPDXExternalDocumentRef()
+ dep_package_ref.externalDocumentId = "DocumentRef-runtime-dependency-" + spdx_dep_doc.name
+ dep_package_ref.spdxDocument = spdx_dep_doc.documentNamespace
+ dep_package_ref.checksum.algorithm = "SHA1"
+ dep_package_ref.checksum.checksumValue = spdx_dep_sha1
+
+ dep_package_cache[dep] = (dep_spdx_package, dep_package_ref)
+
+ runtime_doc.externalDocumentRefs.append(dep_package_ref)
+
+ runtime_doc.add_relationship(
+ "%s:%s" % (dep_package_ref.externalDocumentId, dep_spdx_package.SPDXID),
+ "RUNTIME_DEPENDENCY_OF",
+ "%s:%s" % (package_ref.externalDocumentId, spdx_package.SPDXID)
+ )
+ seen_deps.add(dep)
+
+ oe.sbom.write_doc(d, runtime_doc, "runtime", spdx_deploy)
+}
+
+addtask do_create_runtime_spdx after do_create_spdx before do_build do_rm_work
+SSTATETASKS += "do_create_runtime_spdx"
+do_create_runtime_spdx[sstate-inputdirs] = "${SPDXRUNTIMEDEPLOY}"
+do_create_runtime_spdx[sstate-outputdirs] = "${DEPLOY_DIR_SPDX}"
+
+python do_create_runtime_spdx_setscene () {
+ sstate_setscene(d)
+}
+addtask do_create_runtime_spdx_setscene
+
+do_create_runtime_spdx[dirs] = "${SPDXRUNTIMEDEPLOY}"
+do_create_runtime_spdx[cleandirs] = "${SPDXRUNTIMEDEPLOY}"
+do_create_runtime_spdx[rdeptask] = "do_create_spdx"
+
+def spdx_get_src(d):
+ """
+ save patched source of the recipe in SPDX_WORKDIR.
+ """
+ import shutil
+ spdx_workdir = d.getVar('SPDXWORK')
+ spdx_sysroot_native = d.getVar('STAGING_DIR_NATIVE')
+ pn = d.getVar('PN')
+
+ workdir = d.getVar("WORKDIR")
+
+ try:
+ # The kernel class functions require it to be on work-shared, so we dont change WORKDIR
+ if not is_work_shared_spdx(d):
+ # Change the WORKDIR to make do_unpack do_patch run in another dir.
+ d.setVar('WORKDIR', spdx_workdir)
+ # Restore the original path to recipe's native sysroot (it's relative to WORKDIR).
+ d.setVar('STAGING_DIR_NATIVE', spdx_sysroot_native)
+
+ # The changed 'WORKDIR' also caused 'B' changed, create dir 'B' for the
+ # possibly requiring of the following tasks (such as some recipes's
+ # do_patch required 'B' existed).
+ bb.utils.mkdirhier(d.getVar('B'))
+
+ bb.build.exec_func('do_unpack', d)
+ # Copy source of kernel to spdx_workdir
+ if is_work_shared_spdx(d):
+ d.setVar('WORKDIR', spdx_workdir)
+ d.setVar('STAGING_DIR_NATIVE', spdx_sysroot_native)
+ src_dir = spdx_workdir + "/" + d.getVar('PN')+ "-" + d.getVar('PV') + "-" + d.getVar('PR')
+ bb.utils.mkdirhier(src_dir)
+ if bb.data.inherits_class('kernel',d):
+ share_src = d.getVar('STAGING_KERNEL_DIR')
+ cmd_copy_share = "cp -rf " + share_src + "/* " + src_dir + "/"
+ cmd_copy_kernel_result = os.popen(cmd_copy_share).read()
+ bb.note("cmd_copy_kernel_result = " + cmd_copy_kernel_result)
+
+ git_path = src_dir + "/.git"
+ if os.path.exists(git_path):
+ shutils.rmtree(git_path)
+
+ # Make sure gcc and kernel sources are patched only once
+ if not (d.getVar('SRC_URI') == "" or is_work_shared_spdx(d)):
+ bb.build.exec_func('do_patch', d)
+
+ # Some userland has no source.
+ if not os.path.exists( spdx_workdir ):
+ bb.utils.mkdirhier(spdx_workdir)
+ finally:
+ d.setVar("WORKDIR", workdir)
+
+do_rootfs[recrdeptask] += "do_create_spdx do_create_runtime_spdx"
+
+ROOTFS_POSTUNINSTALL_COMMAND =+ "image_combine_spdx ; "
+
+do_populate_sdk[recrdeptask] += "do_create_spdx do_create_runtime_spdx"
+POPULATE_SDK_POST_HOST_COMMAND:append:task-populate-sdk = " sdk_host_combine_spdx; "
+POPULATE_SDK_POST_TARGET_COMMAND:append:task-populate-sdk = " sdk_target_combine_spdx; "
+
+python image_combine_spdx() {
+ import os
+ import oe.sbom
+ from pathlib import Path
+ from oe.rootfs import image_list_installed_packages
+
+ image_name = d.getVar("IMAGE_NAME")
+ image_link_name = d.getVar("IMAGE_LINK_NAME")
+ imgdeploydir = Path(d.getVar("IMGDEPLOYDIR"))
+ img_spdxid = oe.sbom.get_image_spdxid(image_name)
+ packages = image_list_installed_packages(d)
+
+ combine_spdx(d, image_name, imgdeploydir, img_spdxid, packages)
+
+ if image_link_name:
+ image_spdx_path = imgdeploydir / (image_name + ".spdx.json")
+ image_spdx_link = imgdeploydir / (image_link_name + ".spdx.json")
+ image_spdx_link.symlink_to(os.path.relpath(image_spdx_path, image_spdx_link.parent))
+
+ def make_image_link(target_path, suffix):
+ if image_link_name:
+ link = imgdeploydir / (image_link_name + suffix)
+ link.symlink_to(os.path.relpath(target_path, link.parent))
+
+ spdx_tar_path = imgdeploydir / (image_name + ".spdx.tar.zst")
+ make_image_link(spdx_tar_path, ".spdx.tar.zst")
+ spdx_index_path = imgdeploydir / (image_name + ".spdx.index.json")
+ make_image_link(spdx_index_path, ".spdx.index.json")
+}
+
+python sdk_host_combine_spdx() {
+ sdk_combine_spdx(d, "host")
+}
+
+python sdk_target_combine_spdx() {
+ sdk_combine_spdx(d, "target")
+}
+
+def sdk_combine_spdx(d, sdk_type):
+ import oe.sbom
+ from pathlib import Path
+ from oe.sdk import sdk_list_installed_packages
+
+ sdk_name = d.getVar("SDK_NAME") + "-" + sdk_type
+ sdk_deploydir = Path(d.getVar("SDKDEPLOYDIR"))
+ sdk_spdxid = oe.sbom.get_sdk_spdxid(sdk_name)
+ sdk_packages = sdk_list_installed_packages(d, sdk_type == "target")
+ combine_spdx(d, sdk_name, sdk_deploydir, sdk_spdxid, sdk_packages)
+
+def combine_spdx(d, rootfs_name, rootfs_deploydir, rootfs_spdxid, packages):
+ import os
+ import oe.spdx
+ import oe.sbom
+ import io
+ import json
+ from datetime import timezone, datetime
+ from pathlib import Path
+ import tarfile
+ import bb.compress.zstd
+
+ creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
+ deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
+ source_date_epoch = d.getVar("SOURCE_DATE_EPOCH")
+
+ doc = oe.spdx.SPDXDocument()
+ doc.name = rootfs_name
+ doc.documentNamespace = get_doc_namespace(d, doc)
+ doc.creationInfo.created = creation_time
+ doc.creationInfo.comment = "This document was created by analyzing the source of the Yocto recipe during the build."
+ doc.creationInfo.licenseListVersion = d.getVar("SPDX_LICENSE_DATA")["licenseListVersion"]
+ doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass")
+ doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG"))
+ doc.creationInfo.creators.append("Person: N/A ()")
+
+ image = oe.spdx.SPDXPackage()
+ image.name = d.getVar("PN")
+ image.versionInfo = d.getVar("PV")
+ image.SPDXID = rootfs_spdxid
+ image.packageSupplier = d.getVar("SPDX_SUPPLIER")
+
+ doc.packages.append(image)
+
+ for name in sorted(packages.keys()):
+ pkg_spdx_path = deploy_dir_spdx / "packages" / (name + ".spdx.json")
+ pkg_doc, pkg_doc_sha1 = oe.sbom.read_doc(pkg_spdx_path)
+
+ for p in pkg_doc.packages:
+ if p.name == name:
+ pkg_ref = oe.spdx.SPDXExternalDocumentRef()
+ pkg_ref.externalDocumentId = "DocumentRef-%s" % pkg_doc.name
+ pkg_ref.spdxDocument = pkg_doc.documentNamespace
+ pkg_ref.checksum.algorithm = "SHA1"
+ pkg_ref.checksum.checksumValue = pkg_doc_sha1
+
+ doc.externalDocumentRefs.append(pkg_ref)
+ doc.add_relationship(image, "CONTAINS", "%s:%s" % (pkg_ref.externalDocumentId, p.SPDXID))
+ break
+ else:
+ bb.fatal("Unable to find package with name '%s' in SPDX file %s" % (name, pkg_spdx_path))
+
+ runtime_spdx_path = deploy_dir_spdx / "runtime" / ("runtime-" + name + ".spdx.json")
+ runtime_doc, runtime_doc_sha1 = oe.sbom.read_doc(runtime_spdx_path)
+
+ runtime_ref = oe.spdx.SPDXExternalDocumentRef()
+ runtime_ref.externalDocumentId = "DocumentRef-%s" % runtime_doc.name
+ runtime_ref.spdxDocument = runtime_doc.documentNamespace
+ runtime_ref.checksum.algorithm = "SHA1"
+ runtime_ref.checksum.checksumValue = runtime_doc_sha1
+
+ # "OTHER" isn't ideal here, but I can't find a relationship that makes sense
+ doc.externalDocumentRefs.append(runtime_ref)
+ doc.add_relationship(
+ image,
+ "OTHER",
+ "%s:%s" % (runtime_ref.externalDocumentId, runtime_doc.SPDXID),
+ comment="Runtime dependencies for %s" % name
+ )
+
+ image_spdx_path = rootfs_deploydir / (rootfs_name + ".spdx.json")
+
+ with image_spdx_path.open("wb") as f:
+ doc.to_json(f, sort_keys=True)
+
+ num_threads = int(d.getVar("BB_NUMBER_THREADS"))
+
+ visited_docs = set()
+
+ index = {"documents": []}
+
+ spdx_tar_path = rootfs_deploydir / (rootfs_name + ".spdx.tar.zst")
+ with bb.compress.zstd.open(spdx_tar_path, "w", num_threads=num_threads) as f:
+ with tarfile.open(fileobj=f, mode="w|") as tar:
+ def collect_spdx_document(path):
+ nonlocal tar
+ nonlocal deploy_dir_spdx
+ nonlocal source_date_epoch
+ nonlocal index
+
+ if path in visited_docs:
+ return
+
+ visited_docs.add(path)
+
+ with path.open("rb") as f:
+ doc, sha1 = oe.sbom.read_doc(f)
+ f.seek(0)
+
+ if doc.documentNamespace in visited_docs:
+ return
+
+ bb.note("Adding SPDX document %s" % path)
+ visited_docs.add(doc.documentNamespace)
+ info = tar.gettarinfo(fileobj=f)
+
+ info.name = doc.name + ".spdx.json"
+ info.uid = 0
+ info.gid = 0
+ info.uname = "root"
+ info.gname = "root"
+
+ if source_date_epoch is not None and info.mtime > int(source_date_epoch):
+ info.mtime = int(source_date_epoch)
+
+ tar.addfile(info, f)
+
+ index["documents"].append({
+ "filename": info.name,
+ "documentNamespace": doc.documentNamespace,
+ "sha1": sha1,
+ })
+
+ for ref in doc.externalDocumentRefs:
+ ref_path = deploy_dir_spdx / "by-namespace" / ref.spdxDocument.replace("/", "_")
+ collect_spdx_document(ref_path)
+
+ collect_spdx_document(image_spdx_path)
+
+ index["documents"].sort(key=lambda x: x["filename"])
+
+ index_str = io.BytesIO(json.dumps(index, sort_keys=True).encode("utf-8"))
+
+ info = tarfile.TarInfo()
+ info.name = "index.json"
+ info.size = len(index_str.getvalue())
+ info.uid = 0
+ info.gid = 0
+ info.uname = "root"
+ info.gname = "root"
+
+ tar.addfile(info, fileobj=index_str)
+
+ spdx_index_path = rootfs_deploydir / (rootfs_name + ".spdx.index.json")
+ with spdx_index_path.open("w") as f:
+ json.dump(index, f, sort_keys=True)
diff --git a/meta/classes/cross-canadian.bbclass b/meta/classes/cross-canadian.bbclass
index 39b5bb93f4..a0e9d23836 100644
--- a/meta/classes/cross-canadian.bbclass
+++ b/meta/classes/cross-canadian.bbclass
@@ -36,10 +36,12 @@ python () {
return
tos = d.getVar("TARGET_OS")
- whitelist = ["mingw32"]
+ tos_known = ["mingw32"]
extralibcs = [""]
if "musl" in d.getVar("BASECANADIANEXTRAOS"):
extralibcs.append("musl")
+ if "android" in tos:
+ extralibcs.append("android")
for variant in ["", "spe", "x32", "eabi", "n32", "_ilp32"]:
for libc in extralibcs:
entry = "linux"
@@ -49,8 +51,8 @@ python () {
entry = entry + "-gnu" + variant
elif libc:
entry = entry + "-" + libc
- whitelist.append(entry)
- if tos not in whitelist:
+ tos_known.append(entry)
+ if tos not in tos_known:
bb.fatal("Building cross-candian for an unknown TARGET_SYS (%s), please update cross-canadian.bbclass" % d.getVar("TARGET_SYS"))
for n in ["PROVIDES", "DEPENDS"]:
@@ -104,7 +106,7 @@ STAGING_DIR_HOST = "${RECIPE_SYSROOT}"
TOOLCHAIN_OPTIONS = " --sysroot=${RECIPE_SYSROOT}"
-PATH_append = ":${TMPDIR}/sysroots/${HOST_ARCH}/${bindir_cross}"
+PATH:append = ":${TMPDIR}/sysroots/${HOST_ARCH}/${bindir_cross}"
PKGHIST_DIR = "${TMPDIR}/pkghistory/${HOST_ARCH}-${SDKPKGSUFFIX}${HOST_VENDOR}-${HOST_OS}/"
HOST_ARCH = "${SDK_ARCH}"
@@ -129,7 +131,7 @@ LDFLAGS = "${BUILDSDK_LDFLAGS} \
# We need chrpath >= 0.14 to ensure we can deal with 32 and 64 bit
# binaries
#
-DEPENDS_append = " chrpath-replacement-native"
+DEPENDS:append = " chrpath-replacement-native"
EXTRANATIVEPATH += "chrpath-native"
# Path mangling needed by the cross packaging
@@ -153,7 +155,7 @@ base_sbindir = "${bindir}"
libdir = "${exec_prefix}/lib/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
libexecdir = "${exec_prefix}/libexec/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
-FILES_${PN} = "${prefix}"
+FILES:${PN} = "${prefix}"
export PKG_CONFIG_DIR = "${STAGING_DIR_HOST}${exec_prefix}/lib/pkgconfig"
export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR_HOST}"
@@ -167,7 +169,7 @@ USE_NLS = "${SDKUSE_NLS}"
# and not any particular tune that is enabled.
TARGET_ARCH[vardepsexclude] = "TUNE_ARCH"
-PKGDATA_DIR = "${TMPDIR}/pkgdata/${SDK_SYS}"
+PKGDATA_DIR = "${PKGDATA_DIR_SDK}"
# If MLPREFIX is set by multilib code, shlibs
# points to the wrong place so force it
SHLIBSDIRS = "${PKGDATA_DIR}/nativesdk-shlibs2"
diff --git a/meta/classes/cross.bbclass b/meta/classes/cross.bbclass
index bfec91d043..9d951076a7 100644
--- a/meta/classes/cross.bbclass
+++ b/meta/classes/cross.bbclass
@@ -7,7 +7,7 @@ EXCLUDE_FROM_WORLD = "1"
CLASSOVERRIDE = "class-cross"
PACKAGES = ""
PACKAGES_DYNAMIC = ""
-PACKAGES_DYNAMIC_class-native = ""
+PACKAGES_DYNAMIC:class-native = ""
HOST_ARCH = "${BUILD_ARCH}"
HOST_VENDOR = "${BUILD_VENDOR}"
@@ -72,10 +72,6 @@ libexecdir = "${exec_prefix}/libexec/${CROSS_TARGET_SYS_DIR}"
do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE}/"
do_packagedata[stamp-extra-info] = ""
-do_install () {
- oe_runmake 'DESTDIR=${D}' install
-}
-
USE_NLS = "no"
export CC = "${BUILD_CC}"
@@ -97,3 +93,5 @@ python do_addto_recipe_sysroot () {
}
addtask addto_recipe_sysroot after do_populate_sysroot
do_addto_recipe_sysroot[deptask] = "do_populate_sysroot"
+
+PATH:prepend = "${COREBASE}/scripts/cross-intercept:"
diff --git a/meta/classes/cve-check.bbclass b/meta/classes/cve-check.bbclass
index 112ee3379d..dfad10c22b 100644
--- a/meta/classes/cve-check.bbclass
+++ b/meta/classes/cve-check.bbclass
@@ -20,7 +20,7 @@
# the only method to check against CVEs. Running this tool
# doesn't guarantee your packages are free of CVEs.
-# The product name that the CVE database uses. Defaults to BPN, but may need to
+# The product name that the CVE database uses defaults to BPN, but may need to
# be overriden per recipe (for example tiff.bb sets CVE_PRODUCT=libtiff).
CVE_PRODUCT ??= "${BPN}"
CVE_VERSION ??= "${PV}"
@@ -43,24 +43,25 @@ CVE_CHECK_CREATE_MANIFEST ??= "1"
CVE_CHECK_REPORT_PATCHED ??= "1"
-# Whitelist for packages (PN)
-CVE_CHECK_PN_WHITELIST ?= ""
+# Skip CVE Check for packages (PN)
+CVE_CHECK_SKIP_RECIPE ?= ""
-# Whitelist for CVE. If a CVE is found, then it is considered patched.
-# The value is a string containing space separated CVE values:
+# Ingore the check for a given list of CVEs. If a CVE is found,
+# then it is considered patched. The value is a string containing
+# space separated CVE values:
#
-# CVE_CHECK_WHITELIST = 'CVE-2014-2524 CVE-2018-1234'
+# CVE_CHECK_IGNORE = 'CVE-2014-2524 CVE-2018-1234'
#
-CVE_CHECK_WHITELIST ?= ""
+CVE_CHECK_IGNORE ?= ""
# Layers to be excluded
CVE_CHECK_LAYER_EXCLUDELIST ??= ""
-# Layers to be included
+# Layers to be included
CVE_CHECK_LAYER_INCLUDELIST ??= ""
-# set to "alphabetical" for version using single alphabetical character as increament release
+# set to "alphabetical" for version using single alphabetical character as increment release
CVE_VERSION_SUFFIX ??= ""
python cve_save_summary_handler () {
@@ -94,16 +95,17 @@ python do_cve_check () {
"""
Check recipe for patched and unpatched CVEs
"""
+ from oe.cve_check import get_patched_cves
if os.path.exists(d.getVar("CVE_CHECK_DB_FILE")):
try:
- patched_cves = get_patches_cves(d)
+ patched_cves = get_patched_cves(d)
except FileNotFoundError:
bb.fatal("Failure in searching patches")
- whitelisted, patched, unpatched = check_cves(d, patched_cves)
+ ignored, patched, unpatched = check_cves(d, patched_cves)
if patched or unpatched:
cve_data = get_cve_info(d, patched + unpatched)
- cve_write_data(d, patched, unpatched, whitelisted, cve_data)
+ cve_write_data(d, patched, unpatched, ignored, cve_data)
else:
bb.note("No CVE database found, skipping CVE check")
@@ -142,6 +144,7 @@ python cve_check_write_rootfs_manifest () {
manifest_name = d.getVar("CVE_CHECK_MANIFEST")
cve_tmp_file = d.getVar("CVE_CHECK_TMP_FILE")
+ bb.utils.mkdirhier(os.path.dirname(manifest_name))
shutil.copyfile(cve_tmp_file, manifest_name)
if manifest_name and os.path.exists(manifest_name):
@@ -153,65 +156,9 @@ python cve_check_write_rootfs_manifest () {
bb.plain("Image CVE report stored in: %s" % manifest_name)
}
-ROOTFS_POSTPROCESS_COMMAND_prepend = "${@'cve_check_write_rootfs_manifest; ' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
+ROOTFS_POSTPROCESS_COMMAND:prepend = "${@'cve_check_write_rootfs_manifest; ' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
do_rootfs[recrdeptask] += "${@'do_cve_check' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
-def get_patches_cves(d):
- """
- Get patches that solve CVEs using the "CVE: " tag.
- """
-
- import re
-
- pn = d.getVar("PN")
- cve_match = re.compile("CVE:( CVE\-\d{4}\-\d+)+")
-
- # Matches last CVE-1234-211432 in the file name, also if written
- # with small letters. Not supporting multiple CVE id's in a single
- # file name.
- cve_file_name_match = re.compile(".*([Cc][Vv][Ee]\-\d{4}\-\d+)")
-
- patched_cves = set()
- bb.debug(2, "Looking for patches that solves CVEs for %s" % pn)
- for url in src_patches(d):
- patch_file = bb.fetch.decodeurl(url)[2]
-
- if not os.path.isfile(patch_file):
- bb.error("File Not found: %s" % patch_file)
- raise FileNotFoundError
-
- # Check patch file name for CVE ID
- fname_match = cve_file_name_match.search(patch_file)
- if fname_match:
- cve = fname_match.group(1).upper()
- patched_cves.add(cve)
- bb.debug(2, "Found CVE %s from patch file name %s" % (cve, patch_file))
-
- with open(patch_file, "r", encoding="utf-8") as f:
- try:
- patch_text = f.read()
- except UnicodeDecodeError:
- bb.debug(1, "Failed to read patch %s using UTF-8 encoding"
- " trying with iso8859-1" % patch_file)
- f.close()
- with open(patch_file, "r", encoding="iso8859-1") as f:
- patch_text = f.read()
-
- # Search for one or more "CVE: " lines
- text_match = False
- for match in cve_match.finditer(patch_text):
- # Get only the CVEs without the "CVE: " tag
- cves = patch_text[match.start()+5:match.end()]
- for cve in cves.split():
- bb.debug(2, "Patch %s solves %s" % (patch_file, cve))
- patched_cves.add(cve)
- text_match = True
-
- if not fname_match and not text_match:
- bb.debug(2, "Patch %s doesn't solve CVEs" % patch_file)
-
- return patched_cves
-
def check_cves(d, patched_cves):
"""
Connect to the NVD database and find unpatched cves.
@@ -230,15 +177,12 @@ def check_cves(d, patched_cves):
return ([], [], [])
pv = d.getVar("CVE_VERSION").split("+git")[0]
- # If the recipe has been whitlisted we return empty lists
- if pn in d.getVar("CVE_CHECK_PN_WHITELIST").split():
- bb.note("Recipe has been whitelisted, skipping check")
+ # If the recipe has been skipped/ignored we return empty lists
+ if pn in d.getVar("CVE_CHECK_SKIP_RECIPE").split():
+ bb.note("Recipe has been skipped by cve-check")
return ([], [], [])
- old_cve_whitelist = d.getVar("CVE_CHECK_CVE_WHITELIST")
- if old_cve_whitelist:
- bb.warn("CVE_CHECK_CVE_WHITELIST is deprecated, please use CVE_CHECK_WHITELIST.")
- cve_whitelist = d.getVar("CVE_CHECK_WHITELIST").split()
+ cve_ignore = d.getVar("CVE_CHECK_IGNORE").split()
import sqlite3
db_file = d.expand("file:${CVE_CHECK_DB_FILE}?mode=ro")
@@ -255,9 +199,9 @@ def check_cves(d, patched_cves):
for cverow in conn.execute("SELECT DISTINCT ID FROM PRODUCTS WHERE PRODUCT IS ? AND VENDOR LIKE ?", (product, vendor)):
cve = cverow[0]
- if cve in cve_whitelist:
- bb.note("%s-%s has been whitelisted for %s" % (product, pv, cve))
- # TODO: this should be in the report as 'whitelisted'
+ if cve in cve_ignore:
+ bb.note("%s-%s has been ignored for %s" % (product, pv, cve))
+ # TODO: this should be in the report as 'ignored'
patched_cves.add(cve)
continue
elif cve in patched_cves:
@@ -311,7 +255,7 @@ def check_cves(d, patched_cves):
conn.close()
- return (list(cve_whitelist), list(patched_cves), cves_unpatched)
+ return (list(cve_ignore), list(patched_cves), cves_unpatched)
def get_cve_info(d, cves):
"""
@@ -321,7 +265,8 @@ def get_cve_info(d, cves):
import sqlite3
cve_data = {}
- conn = sqlite3.connect(d.getVar("CVE_CHECK_DB_FILE"))
+ db_file = d.expand("file:${CVE_CHECK_DB_FILE}?mode=ro")
+ conn = sqlite3.connect(db_file, uri=True)
for cve in cves:
for row in conn.execute("SELECT * FROM NVD WHERE ID IS ?", (cve,)):
@@ -335,7 +280,7 @@ def get_cve_info(d, cves):
conn.close()
return cve_data
-def cve_write_data(d, patched, unpatched, whitelisted, cve_data):
+def cve_write_data(d, patched, unpatched, ignored, cve_data):
"""
Write CVE information in WORKDIR; and to CVE_CHECK_DIR, and
CVE manifest if enabled.
@@ -355,7 +300,7 @@ def cve_write_data(d, patched, unpatched, whitelisted, cve_data):
if include_layers and layer not in include_layers:
return
- nvd_link = "https://web.nvd.nist.gov/view/vuln/detail?vulnId="
+ nvd_link = "https://nvd.nist.gov/vuln/detail/"
write_string = ""
unpatched_cves = []
bb.utils.mkdirhier(os.path.dirname(cve_file))
@@ -368,8 +313,8 @@ def cve_write_data(d, patched, unpatched, whitelisted, cve_data):
write_string += "PACKAGE NAME: %s\n" % d.getVar("PN")
write_string += "PACKAGE VERSION: %s%s\n" % (d.getVar("EXTENDPE"), d.getVar("PV"))
write_string += "CVE: %s\n" % cve
- if cve in whitelisted:
- write_string += "CVE STATUS: Whitelisted\n"
+ if cve in ignored:
+ write_string += "CVE STATUS: Ignored\n"
elif is_patched:
write_string += "CVE STATUS: Patched\n"
else:
diff --git a/meta/classes/debian.bbclass b/meta/classes/debian.bbclass
index 6f8a599ccb..8367be9f37 100644
--- a/meta/classes/debian.bbclass
+++ b/meta/classes/debian.bbclass
@@ -4,7 +4,7 @@
# depends are correct
#
# Custom library package names can be defined setting
-# DEBIANNAME_ + pkgname to the desired name.
+# DEBIANNAME: + pkgname to the desired name.
#
# Better expressed as ensure all RDEPENDS package before we package
# This means we can't have circular RDEPENDS/RRECOMMENDS
@@ -14,6 +14,10 @@ AUTO_LIBNAME_PKGS = "${PACKAGES}"
inherit package
DEBIANRDEP = "do_packagedata"
+do_package_write_ipk[deptask] = "${DEBIANRDEP}"
+do_package_write_deb[deptask] = "${DEBIANRDEP}"
+do_package_write_tar[deptask] = "${DEBIANRDEP}"
+do_package_write_rpm[deptask] = "${DEBIANRDEP}"
do_package_write_ipk[rdeptask] = "${DEBIANRDEP}"
do_package_write_deb[rdeptask] = "${DEBIANRDEP}"
do_package_write_tar[rdeptask] = "${DEBIANRDEP}"
@@ -51,11 +55,11 @@ python debian_package_name_hook () {
return (s[stat.ST_MODE] & stat.S_IEXEC)
def add_rprovides(pkg, d):
- newpkg = d.getVar('PKG_' + pkg)
+ newpkg = d.getVar('PKG:' + pkg)
if newpkg and newpkg != pkg:
- provs = (d.getVar('RPROVIDES_' + pkg) or "").split()
+ provs = (d.getVar('RPROVIDES:' + pkg) or "").split()
if pkg not in provs:
- d.appendVar('RPROVIDES_' + pkg, " " + pkg + " (=" + d.getVar("PKGV") + ")")
+ d.appendVar('RPROVIDES:' + pkg, " " + pkg + " (=" + d.getVar("PKGV") + ")")
def auto_libname(packages, orig_pkg):
p = lambda var: pathlib.PurePath(d.getVar(var))
@@ -110,10 +114,10 @@ python debian_package_name_hook () {
if soname_result:
(pkgname, devname) = soname_result
for pkg in packages.split():
- if (d.getVar('PKG_' + pkg, False) or d.getVar('DEBIAN_NOAUTONAME_' + pkg, False)):
+ if (d.getVar('PKG:' + pkg, False) or d.getVar('DEBIAN_NOAUTONAME:' + pkg, False)):
add_rprovides(pkg, d)
continue
- debian_pn = d.getVar('DEBIANNAME_' + pkg, False)
+ debian_pn = d.getVar('DEBIANNAME:' + pkg, False)
if debian_pn:
newpkg = debian_pn
elif pkg == orig_pkg:
@@ -126,7 +130,7 @@ python debian_package_name_hook () {
newpkg = mlpre + newpkg
if newpkg != pkg:
bb.note("debian: renaming %s to %s" % (pkg, newpkg))
- d.setVar('PKG_' + pkg, newpkg)
+ d.setVar('PKG:' + pkg, newpkg)
add_rprovides(pkg, d)
else:
add_rprovides(orig_pkg, d)
diff --git a/meta/classes/deploy.bbclass b/meta/classes/deploy.bbclass
index 737c26122b..7fbffe996b 100644
--- a/meta/classes/deploy.bbclass
+++ b/meta/classes/deploy.bbclass
@@ -7,6 +7,6 @@ python do_deploy_setscene () {
sstate_setscene(d)
}
addtask do_deploy_setscene
-do_deploy[dirs] = "${DEPLOYDIR} ${B}"
+do_deploy[dirs] = "${B}"
do_deploy[cleandirs] = "${DEPLOYDIR}"
do_deploy[stamp-extra-info] = "${MACHINE_ARCH}"
diff --git a/meta/classes/devicetree.bbclass b/meta/classes/devicetree.bbclass
index ece883accf..2a62ae7bc8 100644
--- a/meta/classes/devicetree.bbclass
+++ b/meta/classes/devicetree.bbclass
@@ -15,9 +15,9 @@
SECTION ?= "bsp"
# The default inclusion of kernel device tree includes and headers means that
-# device trees built with them are at least GPLv2 (and in some cases dual
-# licensed). Default to GPLv2 if the recipe does not specify a license.
-LICENSE ?= "GPLv2"
+# device trees built with them are at least GPL-2.0-only (and in some cases dual
+# licensed). Default to GPL-2.0-only if the recipe does not specify a license.
+LICENSE ?= "GPL-2.0-only"
LIC_FILES_CHKSUM ?= "file://${COMMON_LICENSE_DIR}/GPL-2.0-only;md5=801f80980d171dd6425610833a22dbe6"
INHIBIT_DEFAULT_DEPS = "1"
@@ -32,7 +32,7 @@ PROVIDES = "virtual/dtb"
PACKAGE_ARCH = "${MACHINE_ARCH}"
SYSROOT_DIRS += "/boot/devicetree"
-FILES_${PN} = "/boot/devicetree/*.dtb /boot/devicetree/*.dtbo"
+FILES:${PN} = "/boot/devicetree/*.dtb /boot/devicetree/*.dtbo"
S = "${WORKDIR}"
B = "${WORKDIR}/build"
diff --git a/meta/classes/devshell.bbclass b/meta/classes/devshell.bbclass
index 76dd0b42ee..62dc958d9a 100644
--- a/meta/classes/devshell.bbclass
+++ b/meta/classes/devshell.bbclass
@@ -34,7 +34,7 @@ python () {
d.delVarFlag("do_devshell", "fakeroot")
}
-def devpyshell(d):
+def pydevshell(d):
import code
import select
@@ -140,17 +140,17 @@ def devpyshell(d):
os.kill(child, signal.SIGTERM)
break
-python do_devpyshell() {
+python do_pydevshell() {
import signal
try:
- devpyshell(d)
+ pydevshell(d)
except SystemExit:
# Stop the SIGTERM above causing an error exit code
return
finally:
return
}
-addtask devpyshell after do_patch
+addtask pydevshell after do_patch
-do_devpyshell[nostamp] = "1"
+do_pydevshell[nostamp] = "1"
diff --git a/meta/classes/devupstream.bbclass b/meta/classes/devupstream.bbclass
index 7780c5482c..ba6dc4136c 100644
--- a/meta/classes/devupstream.bbclass
+++ b/meta/classes/devupstream.bbclass
@@ -4,8 +4,8 @@
#
# Usage:
# BBCLASSEXTEND = "devupstream:target"
-# SRC_URI_class-devupstream = "git://git.example.com/example"
-# SRCREV_class-devupstream = "abcdef"
+# SRC_URI:class-devupstream = "git://git.example.com/example;branch=master"
+# SRCREV:class-devupstream = "abcdef"
#
# If the first entry in SRC_URI is a git: URL then S is rewritten to
# WORKDIR/git.
@@ -16,8 +16,6 @@
# - If the fetcher requires native tools (such as subversion-native) then
# bitbake won't be able to add them automatically.
-CLASSOVERRIDE .= ":class-devupstream"
-
python devupstream_virtclass_handler () {
# Do nothing if this is inherited, as it's for BBCLASSEXTEND
if "devupstream" not in (d.getVar('BBCLASSEXTEND') or ""):
@@ -25,23 +23,32 @@ python devupstream_virtclass_handler () {
return
variant = d.getVar("BBEXTENDVARIANT")
- if variant not in ("target"):
- bb.error("Pass the variant when using devupstream, for example devupstream:target")
+ if variant not in ("target", "native"):
+ bb.error("Unsupported variant %s. Pass the variant when using devupstream, for example devupstream:target" % variant)
return
# Develpment releases are never preferred by default
d.setVar("DEFAULT_PREFERENCE", "-1")
- uri = bb.fetch2.URI(d.getVar("SRC_URI").split()[0])
+ src_uri = d.getVar("SRC_URI:class-devupstream") or d.getVar("SRC_URI")
+ uri = bb.fetch2.URI(src_uri.split()[0])
- if uri.scheme == "git":
+ if uri.scheme == "git" and not d.getVar("S:class-devupstream"):
d.setVar("S", "${WORKDIR}/git")
# Modify the PV if the recipe hasn't already overridden it
pv = d.getVar("PV")
proto_marker = "+" + uri.scheme
- if proto_marker not in pv:
+ if proto_marker not in pv and not d.getVar("PV:class-devupstream"):
d.setVar("PV", pv + proto_marker + "${SRCPV}")
+
+ if variant == "native":
+ pn = d.getVar("PN")
+ d.setVar("PN", "%s-native" % (pn))
+ fn = d.getVar("FILE")
+ bb.parse.BBHandler.inherit("native", fn, 0, d)
+
+ d.appendVar("CLASSOVERRIDE", ":class-devupstream")
}
addhandler devupstream_virtclass_handler
diff --git a/meta/classes/distrooverrides.bbclass b/meta/classes/distrooverrides.bbclass
index 9f4db0d771..bf3a2b2090 100644
--- a/meta/classes/distrooverrides.bbclass
+++ b/meta/classes/distrooverrides.bbclass
@@ -6,7 +6,7 @@
# This makes it simpler to write .bbappends that only change the
# task signatures of the recipe if the change is really enabled,
# for example with:
-# do_install_append_df-my-feature () { ... }
+# do_install:append:df-my-feature () { ... }
# where "my-feature" is a DISTRO_FEATURE.
#
# The class is meant to be used in a layer.conf or distro
@@ -22,8 +22,8 @@ DISTRO_FEATURES_OVERRIDES ?= ""
DISTRO_FEATURES_OVERRIDES[doc] = "A space-separated list of <feature> entries. \
Each entry is added to OVERRIDES as df-<feature> if <feature> is in DISTRO_FEATURES."
-DISTRO_FEATURES_FILTER_NATIVE_append = " ${DISTRO_FEATURES_OVERRIDES}"
-DISTRO_FEATURES_FILTER_NATIVESDK_append = " ${DISTRO_FEATURES_OVERRIDES}"
+DISTRO_FEATURES_FILTER_NATIVE:append = " ${DISTRO_FEATURES_OVERRIDES}"
+DISTRO_FEATURES_FILTER_NATIVESDK:append = " ${DISTRO_FEATURES_OVERRIDES}"
# If DISTRO_FEATURES_OVERRIDES or DISTRO_FEATURES show up in a task
# signature because of this line, then the task dependency on
diff --git a/meta/classes/distutils3-base.bbclass b/meta/classes/distutils3-base.bbclass
deleted file mode 100644
index 302ee8c82c..0000000000
--- a/meta/classes/distutils3-base.bbclass
+++ /dev/null
@@ -1,6 +0,0 @@
-DEPENDS_append_class-target = " ${PYTHON_PN}-native ${PYTHON_PN}"
-DEPENDS_append_class-nativesdk = " ${PYTHON_PN}-native ${PYTHON_PN}"
-RDEPENDS_${PN} += "${@['', '${PYTHON_PN}-core']['${CLASSOVERRIDE}' == 'class-target']}"
-
-inherit distutils-common-base python3native python3targetconfig
-
diff --git a/meta/classes/distutils3.bbclass b/meta/classes/distutils3.bbclass
deleted file mode 100644
index a916a8000c..0000000000
--- a/meta/classes/distutils3.bbclass
+++ /dev/null
@@ -1,67 +0,0 @@
-inherit distutils3-base
-
-B = "${WORKDIR}/build"
-distutils_do_configure[cleandirs] = "${B}"
-
-DISTUTILS_BUILD_ARGS ?= ""
-DISTUTILS_INSTALL_ARGS ?= "--root=${D} \
- --prefix=${prefix} \
- --install-lib=${PYTHON_SITEPACKAGES_DIR} \
- --install-data=${datadir}"
-
-DISTUTILS_PYTHON = "python3"
-DISTUTILS_PYTHON_class-native = "nativepython3"
-
-DISTUTILS_SETUP_PATH ?= "${S}"
-
-distutils3_do_configure() {
- :
-}
-
-distutils3_do_compile() {
- cd ${DISTUTILS_SETUP_PATH}
- NO_FETCH_BUILD=1 \
- STAGING_INCDIR=${STAGING_INCDIR} \
- STAGING_LIBDIR=${STAGING_LIBDIR} \
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py \
- build --build-base=${B} ${DISTUTILS_BUILD_ARGS} || \
- bbfatal_log "'${PYTHON_PN} setup.py build ${DISTUTILS_BUILD_ARGS}' execution failed."
-}
-distutils3_do_compile[vardepsexclude] = "MACHINE"
-
-distutils3_do_install() {
- cd ${DISTUTILS_SETUP_PATH}
- install -d ${D}${PYTHON_SITEPACKAGES_DIR}
- STAGING_INCDIR=${STAGING_INCDIR} \
- STAGING_LIBDIR=${STAGING_LIBDIR} \
- PYTHONPATH=${D}${PYTHON_SITEPACKAGES_DIR} \
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py \
- build --build-base=${B} install --skip-build ${DISTUTILS_INSTALL_ARGS} || \
- bbfatal_log "'${PYTHON_PN} setup.py install ${DISTUTILS_INSTALL_ARGS}' execution failed."
-
- # support filenames with *spaces*
- find ${D} -name "*.py" -exec grep -q ${D} {} \; \
- -exec sed -i -e s:${D}::g {} \;
-
- for i in ${D}${bindir}/* ${D}${sbindir}/*; do
- if [ -f "$i" ]; then
- sed -i -e s:${PYTHON}:${USRBINPATH}/env\ ${DISTUTILS_PYTHON}:g $i
- sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
- fi
- done
-
- rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/easy-install.pth
-
- #
- # FIXME: Bandaid against wrong datadir computation
- #
- if [ -e ${D}${datadir}/share ]; then
- mv -f ${D}${datadir}/share/* ${D}${datadir}/
- rmdir ${D}${datadir}/share
- fi
-}
-distutils3_do_install[vardepsexclude] = "MACHINE"
-
-EXPORT_FUNCTIONS do_configure do_compile do_install
-
-export LDSHARED="${CCLD} -shared"
diff --git a/meta/classes/externalsrc.bbclass b/meta/classes/externalsrc.bbclass
index 3d6b80bee2..abfe24bace 100644
--- a/meta/classes/externalsrc.bbclass
+++ b/meta/classes/externalsrc.bbclass
@@ -13,7 +13,7 @@
# called "myrecipe" you would do:
#
# INHERIT += "externalsrc"
-# EXTERNALSRC_pn-myrecipe = "/path/to/my/source/tree"
+# EXTERNALSRC:pn-myrecipe = "/path/to/my/source/tree"
#
# In order to make this class work for both target and native versions (or with
# multilibs/cross or other BBCLASSEXTEND variants), B is set to point to a separate
@@ -21,7 +21,7 @@
# the default, but the build directory can be set to the source directory if
# circumstances dictate by setting EXTERNALSRC_BUILD to the same value, e.g.:
#
-# EXTERNALSRC_BUILD_pn-myrecipe = "/path/to/my/source/tree"
+# EXTERNALSRC_BUILD:pn-myrecipe = "/path/to/my/source/tree"
#
SRCTREECOVEREDTASKS ?= "do_patch do_unpack do_fetch"
@@ -45,11 +45,11 @@ python () {
if bpn == d.getVar('PN') or not classextend:
if (externalsrc or
('native' in classextend and
- d.getVar('EXTERNALSRC_pn-%s-native' % bpn)) or
+ d.getVar('EXTERNALSRC:pn-%s-native' % bpn)) or
('nativesdk' in classextend and
- d.getVar('EXTERNALSRC_pn-nativesdk-%s' % bpn)) or
+ d.getVar('EXTERNALSRC:pn-nativesdk-%s' % bpn)) or
('cross' in classextend and
- d.getVar('EXTERNALSRC_pn-%s-cross' % bpn))):
+ d.getVar('EXTERNALSRC:pn-%s-cross' % bpn))):
d.setVar('BB_DONT_CACHE', '1')
if externalsrc:
@@ -109,6 +109,15 @@ python () {
if local_srcuri and task in fetch_tasks:
continue
bb.build.deltask(task, d)
+ if task == 'do_unpack':
+ # The reproducible build create_source_date_epoch_stamp function must
+ # be run after the source is available and before the
+ # do_deploy_source_date_epoch task. In the normal case, it's attached
+ # to do_unpack as a postfuncs, but since we removed do_unpack (above)
+ # we need to move the function elsewhere. The easiest thing to do is
+ # move it into the prefuncs of the do_deploy_source_date_epoch task.
+ # This is safe, as externalsrc runs with the source already unpacked.
+ d.prependVarFlag('do_deploy_source_date_epoch', 'prefuncs', 'create_source_date_epoch_stamp ')
d.prependVarFlag('do_compile', 'prefuncs', "externalsrc_compile_prefunc ")
d.prependVarFlag('do_configure', 'prefuncs', "externalsrc_configure_prefunc ")
diff --git a/meta/classes/extrausers.bbclass b/meta/classes/extrausers.bbclass
index 90811bfe2a..a8ef660b30 100644
--- a/meta/classes/extrausers.bbclass
+++ b/meta/classes/extrausers.bbclass
@@ -14,10 +14,10 @@
inherit useradd_base
-PACKAGE_INSTALL_append = " ${@['', 'base-passwd shadow'][bool(d.getVar('EXTRA_USERS_PARAMS'))]}"
+PACKAGE_INSTALL:append = " ${@['', 'base-passwd shadow'][bool(d.getVar('EXTRA_USERS_PARAMS'))]}"
# Image level user / group settings
-ROOTFS_POSTPROCESS_COMMAND_append = " set_user_group;"
+ROOTFS_POSTPROCESS_COMMAND:append = " set_user_group;"
# Image level user / group settings
set_user_group () {
diff --git a/meta/classes/features_check.bbclass b/meta/classes/features_check.bbclass
index b3c8047861..3ef6b35baa 100644
--- a/meta/classes/features_check.bbclass
+++ b/meta/classes/features_check.bbclass
@@ -1,6 +1,6 @@
# Allow checking of required and conflicting features
#
-# xxx = [DISTRO,MACHINE,COMBINED]
+# xxx = [DISTRO,MACHINE,COMBINED,IMAGE]
#
# ANY_OF_xxx_FEATURES: ensure at least one item on this list is included
# in xxx_FEATURES.
@@ -18,13 +18,10 @@ python () {
unused = True
- for kind in ['DISTRO', 'MACHINE', 'COMBINED']:
- if d.getVar('ANY_OF_' + kind + '_FEATURES') is None and \
- d.overridedata.get('ANY_OF_' + kind + '_FEATURES') is None and \
- d.getVar('REQUIRED_' + kind + '_FEATURES') is None and \
- d.overridedata.get('REQUIRED_' + kind + '_FEATURES') is None and \
- d.getVar('CONFLICT_' + kind + '_FEATURES') is None and \
- d.overridedata.get('CONFLICT_' + kind + '_FEATURES') is None:
+ for kind in ['DISTRO', 'MACHINE', 'COMBINED', 'IMAGE']:
+ if d.getVar('ANY_OF_' + kind + '_FEATURES') is None and not d.hasOverrides('ANY_OF_' + kind + '_FEATURES') and \
+ d.getVar('REQUIRED_' + kind + '_FEATURES') is None and not d.hasOverrides('REQUIRED_' + kind + '_FEATURES') and \
+ d.getVar('CONFLICT_' + kind + '_FEATURES') is None and not d.hasOverrides('CONFLICT_' + kind + '_FEATURES'):
continue
unused = False
diff --git a/meta/classes/fontcache.bbclass b/meta/classes/fontcache.bbclass
index 624a420a0d..442bfc7392 100644
--- a/meta/classes/fontcache.bbclass
+++ b/meta/classes/fontcache.bbclass
@@ -35,23 +35,23 @@ python () {
deps = d.getVar("FONT_EXTRA_RDEPENDS")
for pkg in font_pkgs:
- if deps: d.appendVar('RDEPENDS_' + pkg, ' '+deps)
+ if deps: d.appendVar('RDEPENDS:' + pkg, ' '+deps)
}
python add_fontcache_postinsts() {
for pkg in d.getVar('FONT_PACKAGES').split():
bb.note("adding fonts postinst and postrm scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg) or d.getVar('pkg_postinst')
+ postinst = d.getVar('pkg_postinst:%s' % pkg) or d.getVar('pkg_postinst')
if not postinst:
postinst = '#!/bin/sh\n'
postinst += d.getVar('fontcache_common')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
- postrm = d.getVar('pkg_postrm_%s' % pkg) or d.getVar('pkg_postrm')
+ postrm = d.getVar('pkg_postrm:%s' % pkg) or d.getVar('pkg_postrm')
if not postrm:
postrm = '#!/bin/sh\n'
postrm += d.getVar('fontcache_common')
- d.setVar('pkg_postrm_%s' % pkg, postrm)
+ d.setVar('pkg_postrm:%s' % pkg, postrm)
}
PACKAGEFUNCS =+ "add_fontcache_postinsts"
diff --git a/meta/classes/gconf.bbclass b/meta/classes/gconf.bbclass
index 3e3c509d5f..9d3668edd3 100644
--- a/meta/classes/gconf.bbclass
+++ b/meta/classes/gconf.bbclass
@@ -41,7 +41,7 @@ for SCHEMA in ${SCHEMA_FILES}; do
done
}
-python populate_packages_append () {
+python populate_packages:append () {
import re
packages = d.getVar('PACKAGES').split()
pkgdest = d.getVar('PKGDEST')
@@ -57,15 +57,15 @@ python populate_packages_append () {
if schemas != []:
bb.note("adding gconf postinst and prerm scripts to %s" % pkg)
d.setVar('SCHEMA_FILES', " ".join(schemas))
- postinst = d.getVar('pkg_postinst_%s' % pkg)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
postinst += d.getVar('gconf_postinst')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
- prerm = d.getVar('pkg_prerm_%s' % pkg)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
+ prerm = d.getVar('pkg_prerm:%s' % pkg)
if not prerm:
prerm = '#!/bin/sh\n'
prerm += d.getVar('gconf_prerm')
- d.setVar('pkg_prerm_%s' % pkg, prerm)
- d.appendVar("RDEPENDS_%s" % pkg, ' ' + d.getVar('MLPREFIX', False) + 'gconf')
+ d.setVar('pkg_prerm:%s' % pkg, prerm)
+ d.appendVar("RDEPENDS:%s" % pkg, ' ' + d.getVar('MLPREFIX', False) + 'gconf')
}
diff --git a/meta/classes/gettext.bbclass b/meta/classes/gettext.bbclass
index be2ef3b311..f11cb04456 100644
--- a/meta/classes/gettext.bbclass
+++ b/meta/classes/gettext.bbclass
@@ -13,10 +13,10 @@ def gettext_oeconf(d):
return '--disable-nls'
return "--enable-nls"
-BASEDEPENDS_append = " ${@gettext_dependencies(d)}"
-EXTRA_OECONF_append = " ${@gettext_oeconf(d)}"
+BASEDEPENDS:append = " ${@gettext_dependencies(d)}"
+EXTRA_OECONF:append = " ${@gettext_oeconf(d)}"
# Without this, msgfmt from gettext-native will not find ITS files
# provided by target recipes (for example, polkit.its).
-GETTEXTDATADIRS_append_class-target = ":${STAGING_DATADIR}/gettext"
+GETTEXTDATADIRS:append:class-target = ":${STAGING_DATADIR}/gettext"
export GETTEXTDATADIRS
diff --git a/meta/classes/gi-docgen.bbclass b/meta/classes/gi-docgen.bbclass
index 5750f7028d..15581ca127 100644
--- a/meta/classes/gi-docgen.bbclass
+++ b/meta/classes/gi-docgen.bbclass
@@ -7,8 +7,8 @@
GIDOCGEN_ENABLED ?= "${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'True', 'False', d)}"
# When building native recipes, disable gi-docgen, as it is not necessary,
# pulls in additional dependencies, and makes build times longer
-GIDOCGEN_ENABLED_class-native = "False"
-GIDOCGEN_ENABLED_class-nativesdk = "False"
+GIDOCGEN_ENABLED:class-native = "False"
+GIDOCGEN_ENABLED:class-nativesdk = "False"
# meson: default option name to enable/disable gi-docgen. This matches most
# projects' configuration. In doubts - check meson_options.txt in project's
@@ -18,7 +18,7 @@ GIDOCGEN_MESON_ENABLE_FLAG ?= 'true'
GIDOCGEN_MESON_DISABLE_FLAG ?= 'false'
# Auto enable/disable based on GIDOCGEN_ENABLED
-EXTRA_OEMESON_prepend = "-D${GIDOCGEN_MESON_OPTION}=${@bb.utils.contains('GIDOCGEN_ENABLED', 'True', '${GIDOCGEN_MESON_ENABLE_FLAG}', '${GIDOCGEN_MESON_DISABLE_FLAG}', d)} "
+EXTRA_OEMESON:prepend = "-D${GIDOCGEN_MESON_OPTION}=${@bb.utils.contains('GIDOCGEN_ENABLED', 'True', '${GIDOCGEN_MESON_ENABLE_FLAG}', '${GIDOCGEN_MESON_DISABLE_FLAG}', d)} "
-DEPENDS_append = "${@' gi-docgen-native gi-docgen' if d.getVar('GIDOCGEN_ENABLED') == 'True' else ''}"
+DEPENDS:append = "${@' gi-docgen-native gi-docgen' if d.getVar('GIDOCGEN_ENABLED') == 'True' else ''}"
diff --git a/meta/classes/gio-module-cache.bbclass b/meta/classes/gio-module-cache.bbclass
index e429bd3197..021eeb1cf8 100644
--- a/meta/classes/gio-module-cache.bbclass
+++ b/meta/classes/gio-module-cache.bbclass
@@ -17,22 +17,22 @@ else
fi
}
-python populate_packages_append () {
+python populate_packages:append () {
packages = d.getVar('GIO_MODULE_PACKAGES').split()
for pkg in packages:
bb.note("adding gio-module-cache postinst and postrm scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
postinst += d.getVar('gio_module_cache_common')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
- postrm = d.getVar('pkg_postrm_%s' % pkg)
+ postrm = d.getVar('pkg_postrm:%s' % pkg)
if not postrm:
postrm = '#!/bin/sh\n'
postrm += d.getVar('gio_module_cache_common')
- d.setVar('pkg_postrm_%s' % pkg, postrm)
+ d.setVar('pkg_postrm:%s' % pkg, postrm)
}
diff --git a/meta/classes/glide.bbclass b/meta/classes/glide.bbclass
index db421745bd..2db4ac6846 100644
--- a/meta/classes/glide.bbclass
+++ b/meta/classes/glide.bbclass
@@ -2,8 +2,8 @@
#
# Copyright 2018 (C) O.S. Systems Software LTDA.
-DEPENDS_append = " glide-native"
+DEPENDS:append = " glide-native"
-do_compile_prepend() {
+do_compile:prepend() {
( cd ${B}/src/${GO_IMPORT} && glide install )
}
diff --git a/meta/classes/gnomebase.bbclass b/meta/classes/gnomebase.bbclass
index 884b1a106f..9a5bd9a232 100644
--- a/meta/classes/gnomebase.bbclass
+++ b/meta/classes/gnomebase.bbclass
@@ -7,7 +7,7 @@ SECTION ?= "x11/gnome"
GNOMEBN ?= "${BPN}"
SRC_URI = "${GNOME_MIRROR}/${GNOMEBN}/${@gnome_verdir("${PV}")}/${GNOMEBN}-${PV}.tar.${GNOME_COMPRESS_TYPE};name=archive"
-FILES_${PN} += "${datadir}/application-registry \
+FILES:${PN} += "${datadir}/application-registry \
${datadir}/mime-info \
${datadir}/mime/packages \
${datadir}/mime/application \
@@ -19,12 +19,12 @@ FILES_${PN} += "${datadir}/application-registry \
${datadir}/icons \
"
-FILES_${PN}-doc += "${datadir}/devhelp"
+FILES:${PN}-doc += "${datadir}/devhelp"
GNOMEBASEBUILDCLASS ??= "autotools"
inherit ${GNOMEBASEBUILDCLASS} pkgconfig
-do_install_append() {
+do_install:append() {
rm -rf ${D}${localstatedir}/lib/scrollkeeper/*
rm -rf ${D}${localstatedir}/scrollkeeper/*
rm -f ${D}${datadir}/applications/*.cache
diff --git a/meta/classes/go-mod.bbclass b/meta/classes/go-mod.bbclass
index cabb04d0ec..674d2434e0 100644
--- a/meta/classes/go-mod.bbclass
+++ b/meta/classes/go-mod.bbclass
@@ -12,7 +12,7 @@
# The '-modcacherw' option ensures we have write access to the cached objects so
# we avoid errors during clean task as well as when removing the TMPDIR.
-GOBUILDFLAGS_append = " -modcacherw"
+GOBUILDFLAGS:append = " -modcacherw"
inherit go
diff --git a/meta/classes/go-ptest.bbclass b/meta/classes/go-ptest.bbclass
index e230a80587..b282ff7374 100644
--- a/meta/classes/go-ptest.bbclass
+++ b/meta/classes/go-ptest.bbclass
@@ -50,5 +50,5 @@ do_install_ptest_base() {
chown -R root:root ${D}${PTEST_PATH}
}
-INSANE_SKIP_${PN}-ptest += "ldflags"
+INSANE_SKIP:${PN}-ptest += "ldflags"
diff --git a/meta/classes/go.bbclass b/meta/classes/go.bbclass
index 77ec98dd51..9c4c92bffd 100644
--- a/meta/classes/go.bbclass
+++ b/meta/classes/go.bbclass
@@ -2,8 +2,10 @@ inherit goarch
GO_PARALLEL_BUILD ?= "${@oe.utils.parallel_make_argument(d, '-p %d')}"
-GOROOT_class-native = "${STAGING_LIBDIR_NATIVE}/go"
-GOROOT_class-nativesdk = "${STAGING_DIR_TARGET}${libdir}/go"
+export GODEBUG = "gocachehash=1"
+
+GOROOT:class-native = "${STAGING_LIBDIR_NATIVE}/go"
+GOROOT:class-nativesdk = "${STAGING_DIR_TARGET}${libdir}/go"
GOROOT = "${STAGING_LIBDIR}/go"
export GOROOT
export GOROOT_FINAL = "${libdir}/go"
@@ -15,42 +17,43 @@ export GOHOSTARCH="${BUILD_GOARCH}"
export GOHOSTOS="${BUILD_GOOS}"
GOARM[export] = "0"
-GOARM_arm_class-target = "${TARGET_GOARM}"
-GOARM_arm_class-target[export] = "1"
+GOARM:arm:class-target = "${TARGET_GOARM}"
+GOARM:arm:class-target[export] = "1"
GO386[export] = "0"
-GO386_x86_class-target = "${TARGET_GO386}"
-GO386_x86_class-target[export] = "1"
+GO386:x86:class-target = "${TARGET_GO386}"
+GO386:x86:class-target[export] = "1"
GOMIPS[export] = "0"
-GOMIPS_mips_class-target = "${TARGET_GOMIPS}"
-GOMIPS_mips_class-target[export] = "1"
+GOMIPS:mips:class-target = "${TARGET_GOMIPS}"
+GOMIPS:mips:class-target[export] = "1"
-DEPENDS_GOLANG_class-target = "virtual/${TUNE_PKGARCH}-go virtual/${TARGET_PREFIX}go-runtime"
-DEPENDS_GOLANG_class-native = "go-native"
-DEPENDS_GOLANG_class-nativesdk = "virtual/${TARGET_PREFIX}go-crosssdk virtual/${TARGET_PREFIX}go-runtime"
+DEPENDS_GOLANG:class-target = "virtual/${TUNE_PKGARCH}-go virtual/${TARGET_PREFIX}go-runtime"
+DEPENDS_GOLANG:class-native = "go-native"
+DEPENDS_GOLANG:class-nativesdk = "virtual/${TARGET_PREFIX}go-crosssdk virtual/${TARGET_PREFIX}go-runtime"
-DEPENDS_append = " ${DEPENDS_GOLANG}"
+DEPENDS:append = " ${DEPENDS_GOLANG}"
GO_LINKSHARED ?= "${@'-linkshared' if d.getVar('GO_DYNLINK') else ''}"
GO_RPATH_LINK = "${@'-Wl,-rpath-link=${STAGING_DIR_TARGET}${libdir}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
GO_RPATH = "${@'-r ${libdir}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
-GO_RPATH_class-native = "${@'-r ${STAGING_LIBDIR_NATIVE}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
-GO_RPATH_LINK_class-native = "${@'-Wl,-rpath-link=${STAGING_LIBDIR_NATIVE}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
+GO_RPATH:class-native = "${@'-r ${STAGING_LIBDIR_NATIVE}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
+GO_RPATH_LINK:class-native = "${@'-Wl,-rpath-link=${STAGING_LIBDIR_NATIVE}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
GO_EXTLDFLAGS ?= "${HOST_CC_ARCH}${TOOLCHAIN_OPTIONS} ${GO_RPATH_LINK} ${LDFLAGS}"
GO_LINKMODE ?= ""
-GO_LINKMODE_class-nativesdk = "--linkmode=external"
-GO_LINKMODE_class-native = "--linkmode=external"
-GO_LDFLAGS ?= '-ldflags="${GO_RPATH} ${GO_LINKMODE} -extldflags '${GO_EXTLDFLAGS}'"'
+GO_LINKMODE:class-nativesdk = "--linkmode=external"
+GO_LINKMODE:class-native = "--linkmode=external"
+GO_EXTRA_LDFLAGS ?= ""
+GO_LDFLAGS ?= '-ldflags="${GO_RPATH} ${GO_LINKMODE} ${GO_EXTRA_LDFLAGS} -extldflags '${GO_EXTLDFLAGS}'"'
export GOBUILDFLAGS ?= "-v ${GO_LDFLAGS} -trimpath"
export GOPATH_OMIT_IN_ACTIONID ?= "1"
export GOPTESTBUILDFLAGS ?= "${GOBUILDFLAGS} -c"
export GOPTESTFLAGS ?= ""
-GOBUILDFLAGS_prepend_task-compile = "${GO_PARALLEL_BUILD} "
+GOBUILDFLAGS:prepend:task-compile = "${GO_PARALLEL_BUILD} "
export GO = "${HOST_PREFIX}go"
GOTOOLDIR = "${STAGING_LIBDIR_NATIVE}/${TARGET_SYS}/go/pkg/tool/${BUILD_GOTUPLE}"
-GOTOOLDIR_class-native = "${STAGING_LIBDIR_NATIVE}/go/pkg/tool/${BUILD_GOTUPLE}"
+GOTOOLDIR:class-native = "${STAGING_LIBDIR_NATIVE}/go/pkg/tool/${BUILD_GOTUPLE}"
export GOTOOLDIR
export CGO_ENABLED ?= "1"
@@ -64,7 +67,7 @@ GO_INSTALL_FILTEROUT ?= "${GO_IMPORT}/vendor/"
B = "${WORKDIR}/build"
export GOPATH = "${B}"
-export GOTMPDIR ?= "${WORKDIR}/go-tmp"
+export GOTMPDIR ?= "${WORKDIR}/build-tmp"
GOTMPDIR[vardepvalue] = ""
python go_do_unpack() {
@@ -140,17 +143,17 @@ go_stage_testdata() {
EXPORT_FUNCTIONS do_unpack do_configure do_compile do_install
-FILES_${PN}-dev = "${libdir}/go/src"
-FILES_${PN}-staticdev = "${libdir}/go/pkg"
+FILES:${PN}-dev = "${libdir}/go/src"
+FILES:${PN}-staticdev = "${libdir}/go/pkg"
-INSANE_SKIP_${PN} += "ldflags"
+INSANE_SKIP:${PN} += "ldflags"
# Add -buildmode=pie to GOBUILDFLAGS to satisfy "textrel" QA checking, but mips
# doesn't support -buildmode=pie, so skip the QA checking for mips/rv32 and its
# variants.
python() {
if 'mips' in d.getVar('TARGET_ARCH') or 'riscv32' in d.getVar('TARGET_ARCH'):
- d.appendVar('INSANE_SKIP_%s' % d.getVar('PN'), " textrel")
+ d.appendVar('INSANE_SKIP:%s' % d.getVar('PN'), " textrel")
else:
d.appendVar('GOBUILDFLAGS', ' -buildmode=pie')
}
diff --git a/meta/classes/goarch.bbclass b/meta/classes/goarch.bbclass
index e4e0ca37be..92fec16b82 100644
--- a/meta/classes/goarch.bbclass
+++ b/meta/classes/goarch.bbclass
@@ -6,54 +6,54 @@ HOST_GOARCH = "${@go_map_arch(d.getVar('HOST_ARCH'), d)}"
HOST_GOARM = "${@go_map_arm(d.getVar('HOST_ARCH'), d)}"
HOST_GO386 = "${@go_map_386(d.getVar('HOST_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
HOST_GOMIPS = "${@go_map_mips(d.getVar('HOST_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
-HOST_GOARM_class-native = "7"
-HOST_GO386_class-native = "sse2"
-HOST_GOMIPS_class-native = "hardfloat"
+HOST_GOARM:class-native = "7"
+HOST_GO386:class-native = "sse2"
+HOST_GOMIPS:class-native = "hardfloat"
HOST_GOTUPLE = "${HOST_GOOS}_${HOST_GOARCH}"
TARGET_GOOS = "${@go_map_os(d.getVar('TARGET_OS'), d)}"
TARGET_GOARCH = "${@go_map_arch(d.getVar('TARGET_ARCH'), d)}"
TARGET_GOARM = "${@go_map_arm(d.getVar('TARGET_ARCH'), d)}"
TARGET_GO386 = "${@go_map_386(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
TARGET_GOMIPS = "${@go_map_mips(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
-TARGET_GOARM_class-native = "7"
-TARGET_GO386_class-native = "sse2"
-TARGET_GOMIPS_class-native = "hardfloat"
+TARGET_GOARM:class-native = "7"
+TARGET_GO386:class-native = "sse2"
+TARGET_GOMIPS:class-native = "hardfloat"
TARGET_GOTUPLE = "${TARGET_GOOS}_${TARGET_GOARCH}"
GO_BUILD_BINDIR = "${@['bin/${HOST_GOTUPLE}','bin'][d.getVar('BUILD_GOTUPLE') == d.getVar('HOST_GOTUPLE')]}"
# Use the MACHINEOVERRIDES to map ARM CPU architecture passed to GO via GOARM.
# This is combined with *_ARCH to set HOST_GOARM and TARGET_GOARM.
BASE_GOARM = ''
-BASE_GOARM_armv7ve = '7'
-BASE_GOARM_armv7a = '7'
-BASE_GOARM_armv6 = '6'
-BASE_GOARM_armv5 = '5'
+BASE_GOARM:armv7ve = '7'
+BASE_GOARM:armv7a = '7'
+BASE_GOARM:armv6 = '6'
+BASE_GOARM:armv5 = '5'
# Go supports dynamic linking on a limited set of architectures.
# See the supportsDynlink function in go/src/cmd/compile/internal/gc/main.go
GO_DYNLINK = ""
-GO_DYNLINK_arm = "1"
-GO_DYNLINK_aarch64 = "1"
-GO_DYNLINK_x86 = "1"
-GO_DYNLINK_x86-64 = "1"
-GO_DYNLINK_powerpc64 = "1"
-GO_DYNLINK_powerpc64le = "1"
-GO_DYNLINK_class-native = ""
-GO_DYNLINK_class-nativesdk = ""
+GO_DYNLINK:arm ?= "1"
+GO_DYNLINK:aarch64 ?= "1"
+GO_DYNLINK:x86 ?= "1"
+GO_DYNLINK:x86-64 ?= "1"
+GO_DYNLINK:powerpc64 ?= "1"
+GO_DYNLINK:powerpc64le ?= "1"
+GO_DYNLINK:class-native ?= ""
+GO_DYNLINK:class-nativesdk = ""
# define here because everybody inherits this class
#
-COMPATIBLE_HOST_linux-gnux32 = "null"
-COMPATIBLE_HOST_linux-muslx32 = "null"
-COMPATIBLE_HOST_powerpc = "null"
-COMPATIBLE_HOST_powerpc64 = "null"
-COMPATIBLE_HOST_mipsarchn32 = "null"
+COMPATIBLE_HOST:linux-gnux32 = "null"
+COMPATIBLE_HOST:linux-muslx32 = "null"
+COMPATIBLE_HOST:powerpc = "null"
+COMPATIBLE_HOST:powerpc64 = "null"
+COMPATIBLE_HOST:mipsarchn32 = "null"
-ARM_INSTRUCTION_SET_armv4 = "arm"
-ARM_INSTRUCTION_SET_armv5 = "arm"
-ARM_INSTRUCTION_SET_armv6 = "arm"
+ARM_INSTRUCTION_SET:armv4 = "arm"
+ARM_INSTRUCTION_SET:armv5 = "arm"
+ARM_INSTRUCTION_SET:armv6 = "arm"
-TUNE_CCARGS_remove = "-march=mips32r2"
+TUNE_CCARGS:remove = "-march=mips32r2"
SECURITY_NOPIE_CFLAGS ??= ""
# go can't be built with ccache:
diff --git a/meta/classes/gobject-introspection.bbclass b/meta/classes/gobject-introspection.bbclass
index 504f75e28d..7bf9feb0d6 100644
--- a/meta/classes/gobject-introspection.bbclass
+++ b/meta/classes/gobject-introspection.bbclass
@@ -14,30 +14,32 @@ GIR_MESON_OPTION ?= 'introspection'
GIR_MESON_ENABLE_FLAG ?= 'true'
GIR_MESON_DISABLE_FLAG ?= 'false'
+# Define g-i options such that they can be disabled completely when GIR_MESON_OPTION is empty
+GIRMESONTARGET = "-D${GIR_MESON_OPTION}=${@bb.utils.contains('GI_DATA_ENABLED', 'True', '${GIR_MESON_ENABLE_FLAG}', '${GIR_MESON_DISABLE_FLAG}', d)} "
+GIRMESONBUILD = "-D${GIR_MESON_OPTION}=${GIR_MESON_DISABLE_FLAG} "
# Auto enable/disable based on GI_DATA_ENABLED
-EXTRA_OECONF_prepend_class-target = "${@bb.utils.contains('GI_DATA_ENABLED', 'True', '--enable-introspection', '--disable-introspection', d)} "
-EXTRA_OEMESON_prepend_class-target = "-D${GIR_MESON_OPTION}=${@bb.utils.contains('GI_DATA_ENABLED', 'True', '${GIR_MESON_ENABLE_FLAG}', '${GIR_MESON_DISABLE_FLAG}', d)} "
-
+EXTRA_OECONF:prepend:class-target = "${@bb.utils.contains('GI_DATA_ENABLED', 'True', '--enable-introspection', '--disable-introspection', d)} "
+EXTRA_OEMESON:prepend:class-target = "${@['', '${GIRMESONTARGET}'][d.getVar('GIR_MESON_OPTION') != '']}"
# When building native recipes, disable introspection, as it is not necessary,
# pulls in additional dependencies, and makes build times longer
-EXTRA_OECONF_prepend_class-native = "--disable-introspection "
-EXTRA_OECONF_prepend_class-nativesdk = "--disable-introspection "
-EXTRA_OEMESON_prepend_class-native = "-D${GIR_MESON_OPTION}=${GIR_MESON_DISABLE_FLAG} "
-EXTRA_OEMESON_prepend_class-nativesdk = "-D${GIR_MESON_OPTION}=${GIR_MESON_DISABLE_FLAG} "
+EXTRA_OECONF:prepend:class-native = "--disable-introspection "
+EXTRA_OECONF:prepend:class-nativesdk = "--disable-introspection "
+EXTRA_OEMESON:prepend:class-native = "${@['', '${GIRMESONBUILD}'][d.getVar('GIR_MESON_OPTION') != '']}"
+EXTRA_OEMESON:prepend:class-nativesdk = "${@['', '${GIRMESONBUILD}'][d.getVar('GIR_MESON_OPTION') != '']}"
# Generating introspection data depends on a combination of native and target
# introspection tools, and qemu to run the target tools.
-DEPENDS_append_class-target = " gobject-introspection gobject-introspection-native qemu-native prelink-native"
+DEPENDS:append:class-target = " gobject-introspection gobject-introspection-native qemu-native"
# Even though introspection is disabled on -native, gobject-introspection package is still
# needed for m4 macros.
-DEPENDS_append_class-native = " gobject-introspection-native"
-DEPENDS_append_class-nativesdk = " gobject-introspection-native"
+DEPENDS:append:class-native = " gobject-introspection-native"
+DEPENDS:append:class-nativesdk = " gobject-introspection-native"
# This is used by introspection tools to find .gir includes
export XDG_DATA_DIRS = "${STAGING_DATADIR}:${STAGING_LIBDIR}"
-do_configure_prepend_class-target () {
+do_configure:prepend:class-target () {
# introspection.m4 pre-packaged with upstream tarballs does not yet
# have our fixes
mkdir -p ${S}/m4
@@ -46,8 +48,8 @@ do_configure_prepend_class-target () {
# .typelib files are needed at runtime and so they go to the main package (so
# they'll be together with libraries they support).
-FILES_${PN}_append = " ${libdir}/girepository-*/*.typelib"
+FILES:${PN}:append = " ${libdir}/girepository-*/*.typelib"
# .gir files go to dev package, as they're needed for developing (but not for
# running) things that depends on introspection.
-FILES_${PN}-dev_append = " ${datadir}/gir-*/*.gir ${libdir}/gir-*/*.gir"
+FILES:${PN}-dev:append = " ${datadir}/gir-*/*.gir ${libdir}/gir-*/*.gir"
diff --git a/meta/classes/gsettings.bbclass b/meta/classes/gsettings.bbclass
index 33afc96a9c..3fa5bd40b3 100644
--- a/meta/classes/gsettings.bbclass
+++ b/meta/classes/gsettings.bbclass
@@ -13,30 +13,30 @@ python __anonymous() {
pkg = d.getVar("GSETTINGS_PACKAGE")
if pkg:
d.appendVar("PACKAGE_WRITE_DEPS", " glib-2.0-native")
- d.appendVar("RDEPENDS_" + pkg, " ${MLPREFIX}glib-2.0-utils")
- d.appendVar("FILES_" + pkg, " ${datadir}/glib-2.0/schemas")
+ d.appendVar("RDEPENDS:" + pkg, " ${MLPREFIX}glib-2.0-utils")
+ d.appendVar("FILES:" + pkg, " ${datadir}/glib-2.0/schemas")
}
gsettings_postinstrm () {
glib-compile-schemas $D${datadir}/glib-2.0/schemas
}
-python populate_packages_append () {
+python populate_packages:append () {
pkg = d.getVar('GSETTINGS_PACKAGE')
if pkg:
bb.note("adding gsettings postinst scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg) or d.getVar('pkg_postinst')
+ postinst = d.getVar('pkg_postinst:%s' % pkg) or d.getVar('pkg_postinst')
if not postinst:
postinst = '#!/bin/sh\n'
postinst += d.getVar('gsettings_postinstrm')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
bb.note("adding gsettings postrm scripts to %s" % pkg)
- postrm = d.getVar('pkg_postrm_%s' % pkg) or d.getVar('pkg_postrm')
+ postrm = d.getVar('pkg_postrm:%s' % pkg) or d.getVar('pkg_postrm')
if not postrm:
postrm = '#!/bin/sh\n'
postrm += d.getVar('gsettings_postinstrm')
- d.setVar('pkg_postrm_%s' % pkg, postrm)
+ d.setVar('pkg_postrm:%s' % pkg, postrm)
}
diff --git a/meta/classes/gtk-doc.bbclass b/meta/classes/gtk-doc.bbclass
index ef99e63faf..07b46ac829 100644
--- a/meta/classes/gtk-doc.bbclass
+++ b/meta/classes/gtk-doc.bbclass
@@ -7,7 +7,7 @@
#
# It should be used in recipes to determine whether gtk-doc based documentation should be built,
# so that qemu use can be avoided when necessary.
-GTKDOC_ENABLED_class-native = "False"
+GTKDOC_ENABLED:class-native = "False"
GTKDOC_ENABLED ?= "${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', \
bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d), 'False', d)}"
@@ -19,20 +19,20 @@ GTKDOC_MESON_ENABLE_FLAG ?= 'true'
GTKDOC_MESON_DISABLE_FLAG ?= 'false'
# Auto enable/disable based on GTKDOC_ENABLED
-EXTRA_OECONF_prepend_class-target = "${@bb.utils.contains('GTKDOC_ENABLED', 'True', '--enable-gtk-doc --enable-gtk-doc-html --disable-gtk-doc-pdf', \
+EXTRA_OECONF:prepend:class-target = "${@bb.utils.contains('GTKDOC_ENABLED', 'True', '--enable-gtk-doc --enable-gtk-doc-html --disable-gtk-doc-pdf', \
'--disable-gtk-doc', d)} "
-EXTRA_OEMESON_prepend_class-target = "-D${GTKDOC_MESON_OPTION}=${@bb.utils.contains('GTKDOC_ENABLED', 'True', '${GTKDOC_MESON_ENABLE_FLAG}', '${GTKDOC_MESON_DISABLE_FLAG}', d)} "
+EXTRA_OEMESON:prepend:class-target = "-D${GTKDOC_MESON_OPTION}=${@bb.utils.contains('GTKDOC_ENABLED', 'True', '${GTKDOC_MESON_ENABLE_FLAG}', '${GTKDOC_MESON_DISABLE_FLAG}', d)} "
# When building native recipes, disable gtkdoc, as it is not necessary,
# pulls in additional dependencies, and makes build times longer
-EXTRA_OECONF_prepend_class-native = "--disable-gtk-doc "
-EXTRA_OECONF_prepend_class-nativesdk = "--disable-gtk-doc "
-EXTRA_OEMESON_prepend_class-native = "-D${GTKDOC_MESON_OPTION}=${GTKDOC_MESON_DISABLE_FLAG} "
-EXTRA_OEMESON_prepend_class-nativesdk = "-D${GTKDOC_MESON_OPTION}=${GTKDOC_MESON_DISABLE_FLAG} "
+EXTRA_OECONF:prepend:class-native = "--disable-gtk-doc "
+EXTRA_OECONF:prepend:class-nativesdk = "--disable-gtk-doc "
+EXTRA_OEMESON:prepend:class-native = "-D${GTKDOC_MESON_OPTION}=${GTKDOC_MESON_DISABLE_FLAG} "
+EXTRA_OEMESON:prepend:class-nativesdk = "-D${GTKDOC_MESON_OPTION}=${GTKDOC_MESON_DISABLE_FLAG} "
# Even though gtkdoc is disabled on -native, gtk-doc package is still
# needed for m4 macros.
-DEPENDS_append = " gtk-doc-native"
+DEPENDS:append = " gtk-doc-native"
# The documentation directory, where the infrastructure will be copied.
# gtkdocize has a default of "." so to handle out-of-tree builds set this to $S.
@@ -41,15 +41,15 @@ GTKDOC_DOCDIR ?= "${S}"
export STAGING_DIR_HOST
inherit python3native pkgconfig qemu
-DEPENDS_append = "${@' qemu-native' if d.getVar('GTKDOC_ENABLED') == 'True' else ''}"
+DEPENDS:append = "${@' qemu-native' if d.getVar('GTKDOC_ENABLED') == 'True' else ''}"
-do_configure_prepend () {
+do_configure:prepend () {
# Need to use ||true as this is only needed if configure.ac both exists
# and uses GTK_DOC_CHECK.
gtkdocize --srcdir ${S} --docdir ${GTKDOC_DOCDIR} || true
}
-do_compile_prepend_class-target () {
+do_compile:prepend:class-target () {
if [ ${GTKDOC_ENABLED} = True ]; then
# Write out a qemu wrapper that will be given to gtkdoc-scangobj so that it
# can run target helper binaries through that.
@@ -63,7 +63,7 @@ export GIO_MODULE_DIR=${STAGING_LIBDIR}/gio/modules-dummy
GIR_EXTRA_LIBS_PATH=\`find ${B} -name *.so -printf "%h\n"|sort|uniq| tr '\n' ':'\`\$GIR_EXTRA_LIBS_PATH
GIR_EXTRA_LIBS_PATH=\`find ${B} -name .libs| tr '\n' ':'\`\$GIR_EXTRA_LIBS_PATH
-# meson sets this wrongly (only to libs in build-dir), qemu-wrapper_cmdline() and GIR_EXTRA_LIBS_PATH take care of it properly
+# meson sets this wrongly (only to libs in build-dir), qemu_wrapper_cmdline() and GIR_EXTRA_LIBS_PATH take care of it properly
unset LD_LIBRARY_PATH
if [ -d ".libs" ]; then
diff --git a/meta/classes/gtk-icon-cache.bbclass b/meta/classes/gtk-icon-cache.bbclass
index 340a283851..6808339b90 100644
--- a/meta/classes/gtk-icon-cache.bbclass
+++ b/meta/classes/gtk-icon-cache.bbclass
@@ -1,17 +1,22 @@
-FILES_${PN} += "${datadir}/icons/hicolor"
+FILES:${PN} += "${datadir}/icons/hicolor"
-#gtk+3 reqiure GTK3DISTROFEATURES, DEPENDS on it make all the
+GTKIC_VERSION ??= '3'
+
+GTKPN = "${@ 'gtk4' if d.getVar('GTKIC_VERSION') == '4' else 'gtk+3' }"
+GTKIC_CMD = "${@ 'gtk-update-icon-cache-3.0.0' if d.getVar('GTKIC_VERSION') == '4' else 'gtk4-update-icon-cache' }"
+
+#gtk+3/gtk4 require GTK3DISTROFEATURES, DEPENDS on it make all the
#recipes inherit this class require GTK3DISTROFEATURES
inherit features_check
ANY_OF_DISTRO_FEATURES = "${GTK3DISTROFEATURES}"
-DEPENDS +=" ${@['hicolor-icon-theme', '']['${BPN}' == 'hicolor-icon-theme']} \
- ${@['gdk-pixbuf', '']['${BPN}' == 'gdk-pixbuf']} \
- ${@['gtk+3', '']['${BPN}' == 'gtk+3']} \
- gtk+3-native \
+DEPENDS +=" ${@ '' if d.getVar('BPN') == 'hicolor-icon-theme' else 'hicolor-icon-theme' } \
+ ${@ '' if d.getVar('BPN') == 'gdk-pixbuf' else 'gdk-pixbuf' } \
+ ${@ '' if d.getVar('BPN') == d.getVar('GTKPN') else d.getVar('GTKPN') } \
+ ${GTKPN}-native \
"
-PACKAGE_WRITE_DEPS += "gtk+3-native gdk-pixbuf-native"
+PACKAGE_WRITE_DEPS += "${GTKPN}-native gdk-pixbuf-native"
gtk_icon_cache_postinst() {
if [ "x$D" != "x" ]; then
@@ -25,7 +30,7 @@ else
for icondir in /usr/share/icons/* ; do
if [ -d $icondir ] ; then
- gtk-update-icon-cache -fqt $icondir
+ ${GTKIC_CMD} -fqt $icondir
fi
done
fi
@@ -39,13 +44,13 @@ if [ "x$D" != "x" ]; then
else
for icondir in /usr/share/icons/* ; do
if [ -d $icondir ] ; then
- gtk-update-icon-cache -qt $icondir
+ ${GTKIC_CMD} -qt $icondir
fi
done
fi
}
-python populate_packages_append () {
+python populate_packages:append () {
packages = d.getVar('PACKAGES').split()
pkgdest = d.getVar('PKGDEST')
@@ -56,29 +61,29 @@ python populate_packages_append () {
bb.note("adding hicolor-icon-theme dependency to %s" % pkg)
rdepends = ' ' + d.getVar('MLPREFIX', False) + "hicolor-icon-theme"
- d.appendVar('RDEPENDS_%s' % pkg, rdepends)
+ d.appendVar('RDEPENDS:%s' % pkg, rdepends)
- #gtk_icon_cache_postinst depend on gdk-pixbuf and gtk+3
+ #gtk_icon_cache_postinst depend on gdk-pixbuf and gtk+3/gtk4
bb.note("adding gdk-pixbuf dependency to %s" % pkg)
rdepends = ' ' + d.getVar('MLPREFIX', False) + "gdk-pixbuf"
- d.appendVar('RDEPENDS_%s' % pkg, rdepends)
+ d.appendVar('RDEPENDS:%s' % pkg, rdepends)
- bb.note("adding gtk+3 dependency to %s" % pkg)
- rdepends = ' ' + d.getVar('MLPREFIX', False) + "gtk+3"
- d.appendVar('RDEPENDS_%s' % pkg, rdepends)
+ bb.note("adding %s dependency to %s" % (d.getVar('GTKPN'), pkg))
+ rdepends = ' ' + d.getVar('MLPREFIX', False) + d.getVar('GTKPN')
+ d.appendVar('RDEPENDS:%s' % pkg, rdepends)
bb.note("adding gtk-icon-cache postinst and postrm scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
postinst += d.getVar('gtk_icon_cache_postinst')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
- postrm = d.getVar('pkg_postrm_%s' % pkg)
+ postrm = d.getVar('pkg_postrm:%s' % pkg)
if not postrm:
postrm = '#!/bin/sh\n'
postrm += d.getVar('gtk_icon_cache_postrm')
- d.setVar('pkg_postrm_%s' % pkg, postrm)
+ d.setVar('pkg_postrm:%s' % pkg, postrm)
}
diff --git a/meta/classes/gtk-immodules-cache.bbclass b/meta/classes/gtk-immodules-cache.bbclass
index 8e783fb493..2107517540 100644
--- a/meta/classes/gtk-immodules-cache.bbclass
+++ b/meta/classes/gtk-immodules-cache.bbclass
@@ -47,23 +47,23 @@ else
fi
}
-python populate_packages_append () {
+python populate_packages:append () {
gtkimmodules_pkgs = d.getVar('GTKIMMODULES_PACKAGES').split()
for pkg in gtkimmodules_pkgs:
bb.note("adding gtk-immodule-cache postinst and postrm scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
postinst += d.getVar('gtk_immodule_cache_postinst')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
- postrm = d.getVar('pkg_postrm_%s' % pkg)
+ postrm = d.getVar('pkg_postrm:%s' % pkg)
if not postrm:
postrm = '#!/bin/sh\n'
postrm += d.getVar('gtk_immodule_cache_postrm')
- d.setVar('pkg_postrm_%s' % pkg, postrm)
+ d.setVar('pkg_postrm:%s' % pkg, postrm)
}
python __anonymous() {
diff --git a/meta/classes/icecc.bbclass b/meta/classes/icecc.bbclass
index 80943fcf02..9b912a3083 100644
--- a/meta/classes/icecc.bbclass
+++ b/meta/classes/icecc.bbclass
@@ -19,22 +19,21 @@
# or the default one provided by icecc-create-env.bb will be used
# (NOTE that this is a modified version of the script need it and *not the one that comes with icecc*
#
-# User can specify if specific packages or packages belonging to class should not use icecc to distribute
-# compile jobs to remote machines, but handled locally, by defining ICECC_USER_CLASS_BL and ICECC_USER_PACKAGE_BL
-# with the appropriate values in local.conf. In addition the user can force to enable icecc for packages
-# which set an empty PARALLEL_MAKE variable by defining ICECC_USER_PACKAGE_WL.
+# User can specify if specific recipes or recipes belonging to class should not use icecc to distribute
+# compile jobs to remote machines, but handled locally, by defining ICECC_CLASS_DISABLE and ICECC_RECIPE_DISABLE
+# with the appropriate values in local.conf. In addition the user can force to enable icecc for recipes
+# which set an empty PARALLEL_MAKE variable by defining ICECC_RECIPE_ENABLE.
#
#########################################################################################
#Error checking is kept to minimum so double check any parameters you pass to the class
###########################################################################################
-BB_HASHBASE_WHITELIST += "ICECC_PARALLEL_MAKE ICECC_DISABLED ICECC_USER_PACKAGE_BL \
- ICECC_USER_CLASS_BL ICECC_USER_PACKAGE_WL ICECC_PATH ICECC_ENV_EXEC \
+BB_BASEHASH_IGNORE_VARS += "ICECC_PARALLEL_MAKE ICECC_DISABLED ICECC_RECIPE_DISABLE \
+ ICECC_CLASS_DISABLE ICECC_RECIPE_ENABLE ICECC_PATH ICECC_ENV_EXEC \
ICECC_CARET_WORKAROUND ICECC_CFLAGS ICECC_ENV_VERSION \
ICECC_DEBUG ICECC_LOGFILE ICECC_REPEAT_RATE ICECC_PREFERRED_HOST \
ICECC_CLANG_REMOTE_CPP ICECC_IGNORE_UNVERIFIED ICECC_TEST_SOCKET \
- ICECC_ENV_DEBUG ICECC_SYSTEM_PACKAGE_BL ICECC_SYSTEM_CLASS_BL \
- ICECC_REMOTE_CPP \
+ ICECC_ENV_DEBUG ICECC_REMOTE_CPP \
"
ICECC_ENV_EXEC ?= "${STAGING_BINDIR_NATIVE}/icecc-create-env"
@@ -47,7 +46,7 @@ HOSTTOOLS_NONFATAL += "icecc patchelf"
#
# A useful thing to do for testing Icecream changes locally is to add a
# subversion in local.conf:
-# ICECC_ENV_VERSION_append = "-my-ver-1"
+# ICECC_ENV_VERSION:append = "-my-ver-1"
ICECC_ENV_VERSION = "2"
# Default to disabling the caret workaround, If set to "1" in local.conf, icecc
@@ -66,7 +65,7 @@ CXXFLAGS += "${ICECC_CFLAGS}"
# Debug flags when generating environments
ICECC_ENV_DEBUG ??= ""
-# "system" recipe blacklist contains a list of packages that can not distribute
+# Disable recipe list contains a list of recipes that can not distribute
# compile tasks for one reason or the other. When adding new entry, please
# document why (how it failed) so that we can re-evaluate it later e.g. when
# there is new version
@@ -79,25 +78,25 @@ ICECC_ENV_DEBUG ??= ""
# inline assembly
# target-sdk-provides-dummy - ${HOST_PREFIX} is empty which triggers the "NULL
# prefix" error.
-ICECC_SYSTEM_PACKAGE_BL += "\
+ICECC_RECIPE_DISABLE += "\
libgcc-initial \
pixman \
systemtap \
target-sdk-provides-dummy \
"
-# "system" classes that should be blacklisted. When adding new entry, please
+# Classes that should not use icecc. When adding new entry, please
# document why (how it failed) so that we can re-evaluate it later
#
# image - Image aren't compiling, but the testing framework for images captures
# PARALLEL_MAKE as part of the test environment. Many tests won't use
# icecream, but leaving the high level of parallelism can cause them to
# consume an unnecessary amount of resources.
-ICECC_SYSTEM_CLASS_BL += "\
+ICECC_CLASS_DISABLE += "\
image \
"
-def icecc_dep_prepend(d):
+def get_icecc_dep(d):
# INHIBIT_DEFAULT_DEPS doesn't apply to the patch command. Whether or not
# we need that built is the responsibility of the patch function / class, not
# the application.
@@ -105,7 +104,7 @@ def icecc_dep_prepend(d):
return "icecc-create-env-native"
return ""
-DEPENDS_prepend = "${@icecc_dep_prepend(d)} "
+DEPENDS:prepend = "${@get_icecc_dep(d)} "
get_cross_kernel_cc[vardepsexclude] += "KERNEL_CC"
def get_cross_kernel_cc(bb,d):
@@ -141,32 +140,28 @@ def use_icecc(bb,d):
pn = d.getVar('PN')
bpn = d.getVar('BPN')
- # Blacklist/whitelist checks are made against BPN, because there is a good
+ # Enable/disable checks are made against BPN, because there is a good
# chance that if icecc should be skipped for a recipe, it should be skipped
# for all the variants of that recipe. PN is still checked in case a user
# specified a more specific recipe.
check_pn = set([pn, bpn])
- system_class_blacklist = (d.getVar('ICECC_SYSTEM_CLASS_BL') or "").split()
- user_class_blacklist = (d.getVar('ICECC_USER_CLASS_BL') or "none").split()
- package_class_blacklist = system_class_blacklist + user_class_blacklist
+ class_disable = (d.getVar('ICECC_CLASS_DISABLE') or "").split()
- for black in package_class_blacklist:
- if bb.data.inherits_class(black, d):
- bb.debug(1, "%s: class %s found in blacklist, disable icecc" % (pn, black))
+ for bbclass in class_disable:
+ if bb.data.inherits_class(bbclass, d):
+ bb.debug(1, "%s: bbclass %s found in disable, disable icecc" % (pn, bbclass))
return "no"
- system_package_blacklist = (d.getVar('ICECC_SYSTEM_PACKAGE_BL') or "").split()
- user_package_blacklist = (d.getVar('ICECC_USER_PACKAGE_BL') or "").split()
- user_package_whitelist = (d.getVar('ICECC_USER_PACKAGE_WL') or "").split()
- package_blacklist = system_package_blacklist + user_package_blacklist
+ disabled_recipes = (d.getVar('ICECC_RECIPE_DISABLE') or "").split()
+ enabled_recipes = (d.getVar('ICECC_RECIPE_ENABLE') or "").split()
- if check_pn & set(package_blacklist):
- bb.debug(1, "%s: found in blacklist, disable icecc" % pn)
+ if check_pn & set(disabled_recipes):
+ bb.debug(1, "%s: found in disable list, disable icecc" % pn)
return "no"
- if check_pn & set(user_package_whitelist):
- bb.debug(1, "%s: found in whitelist, enable icecc" % pn)
+ if check_pn & set(enabled_recipes):
+ bb.debug(1, "%s: found in enabled recipes list, enable icecc" % pn)
return "yes"
if d.getVar('PARALLEL_MAKE') == "":
@@ -309,7 +304,7 @@ wait_for_file() {
local TIMEOUT=$2
until [ -f "$FILE_TO_TEST" ]
do
- TIME_ELAPSED=`expr $TIME_ELAPSED + 1`
+ TIME_ELAPSED=$(expr $TIME_ELAPSED + 1)
if [ $TIME_ELAPSED -gt $TIMEOUT ]
then
return 1
@@ -362,8 +357,8 @@ set_icecc_env() {
return
fi
- ICE_VERSION=`$ICECC_CC -dumpversion`
- ICECC_VERSION=`echo ${ICECC_VERSION} | sed -e "s/@VERSION@/$ICE_VERSION/g"`
+ ICE_VERSION="$($ICECC_CC -dumpversion)"
+ ICECC_VERSION=$(echo ${ICECC_VERSION} | sed -e "s/@VERSION@/$ICE_VERSION/g")
if [ ! -x "${ICECC_ENV_EXEC}" ]
then
bbwarn "Cannot use icecc: invalid ICECC_ENV_EXEC"
@@ -390,18 +385,18 @@ set_icecc_env() {
chmod 775 $ICE_PATH/$compiler
done
- ICECC_AS="`${ICECC_CC} -print-prog-name=as`"
+ ICECC_AS="$(${ICECC_CC} -print-prog-name=as)"
# for target recipes should return something like:
# /OE/tmp-eglibc/sysroots/x86_64-linux/usr/libexec/arm920tt-oe-linux-gnueabi/gcc/arm-oe-linux-gnueabi/4.8.2/as
# and just "as" for native, if it returns "as" in current directory (for whatever reason) use "as" from PATH
- if [ "`dirname "${ICECC_AS}"`" = "." ]
+ if [ "$(dirname "${ICECC_AS}")" = "." ]
then
ICECC_AS="${ICECC_WHICH_AS}"
fi
if [ ! -f "${ICECC_VERSION}.done" ]
then
- mkdir -p "`dirname "${ICECC_VERSION}"`"
+ mkdir -p "$(dirname "${ICECC_VERSION}")"
# the ICECC_VERSION generation step must be locked by a mutex
# in order to prevent race conditions
@@ -428,28 +423,32 @@ set_icecc_env() {
bbnote "Using icecc tarball: $ICECC_VERSION"
}
-do_configure_prepend() {
+do_configure[network] = "1"
+do_configure:prepend() {
set_icecc_env
}
-do_compile_prepend() {
+do_compile[network] = "1"
+do_compile:prepend() {
set_icecc_env
}
-do_compile_kernelmodules_prepend() {
+do_compile_kernelmodules[network] = "1"
+do_compile_kernelmodules:prepend() {
set_icecc_env
}
-do_install_prepend() {
+do_install[network] = "1"
+do_install:prepend() {
set_icecc_env
}
# IceCream is not (currently) supported in the extensible SDK
ICECC_SDK_HOST_TASK = "nativesdk-icecc-toolchain"
-ICECC_SDK_HOST_TASK_task-populate-sdk-ext = ""
+ICECC_SDK_HOST_TASK:task-populate-sdk-ext = ""
# Don't include IceCream in uninative tarball
-ICECC_SDK_HOST_TASK_pn-uninative-tarball = ""
+ICECC_SDK_HOST_TASK:pn-uninative-tarball = ""
# Add the toolchain scripts to the SDK
-TOOLCHAIN_HOST_TASK_append = " ${ICECC_SDK_HOST_TASK}"
+TOOLCHAIN_HOST_TASK:append = " ${ICECC_SDK_HOST_TASK}"
diff --git a/meta/classes/image-artifact-names.bbclass b/meta/classes/image-artifact-names.bbclass
index 3ac8dd731a..f5769e520f 100644
--- a/meta/classes/image-artifact-names.bbclass
+++ b/meta/classes/image-artifact-names.bbclass
@@ -4,7 +4,7 @@
IMAGE_BASENAME ?= "${PN}"
IMAGE_VERSION_SUFFIX ?= "-${DATETIME}"
-IMAGE_VERSION_SUFFIX[vardepsexclude] += "DATETIME"
+IMAGE_VERSION_SUFFIX[vardepsexclude] += "DATETIME SOURCE_DATE_EPOCH"
IMAGE_NAME ?= "${IMAGE_BASENAME}-${MACHINE}${IMAGE_VERSION_SUFFIX}"
IMAGE_LINK_NAME ?= "${IMAGE_BASENAME}-${MACHINE}"
@@ -13,3 +13,10 @@ IMAGE_LINK_NAME ?= "${IMAGE_BASENAME}-${MACHINE}"
# by default) followed by additional suffices which describe the format (.ext4,
# .ext4.xz, etc.).
IMAGE_NAME_SUFFIX ??= ".rootfs"
+
+python () {
+ if bb.data.inherits_class('deploy', d) and d.getVar("IMAGE_VERSION_SUFFIX") == "-${DATETIME}":
+ import datetime
+ d.setVar("IMAGE_VERSION_SUFFIX", "-" + datetime.datetime.fromtimestamp(int(d.getVar("SOURCE_DATE_EPOCH")), datetime.timezone.utc).strftime('%Y%m%d%H%M%S'))
+ d.setVarFlag("IMAGE_VERSION_SUFFIX", "vardepvalue", "")
+}
diff --git a/meta/classes/image-combined-dbg.bbclass b/meta/classes/image-combined-dbg.bbclass
index f4772f7ea1..e5dc61f857 100644
--- a/meta/classes/image-combined-dbg.bbclass
+++ b/meta/classes/image-combined-dbg.bbclass
@@ -1,4 +1,4 @@
-IMAGE_PREPROCESS_COMMAND_append = " combine_dbg_image; "
+IMAGE_PREPROCESS_COMMAND:append = " combine_dbg_image; "
combine_dbg_image () {
if [ "${IMAGE_GEN_DEBUGFS}" = "1" -a -e ${IMAGE_ROOTFS}-dbg ]; then
diff --git a/meta/classes/image-container.bbclass b/meta/classes/image-container.bbclass
index f002858bd2..3d1993576a 100644
--- a/meta/classes/image-container.bbclass
+++ b/meta/classes/image-container.bbclass
@@ -1,6 +1,6 @@
ROOTFS_BOOTSTRAP_INSTALL = ""
IMAGE_TYPES_MASKED += "container"
-IMAGE_TYPEDEP_container = "tar.bz2"
+IMAGE_TYPEDEP:container = "tar.bz2"
python __anonymous() {
if "container" in d.getVar("IMAGE_FSTYPES") and \
diff --git a/meta/classes/image-live.bbclass b/meta/classes/image-live.bbclass
index fd876ed8e1..2c948190cf 100644
--- a/meta/classes/image-live.bbclass
+++ b/meta/classes/image-live.bbclass
@@ -42,9 +42,9 @@ INITRD_LIVE ?= "${DEPLOY_DIR_IMAGE}/${INITRD_IMAGE_LIVE}-${MACHINE}.${INITRAMFS_
LIVE_ROOTFS_TYPE ?= "ext4"
ROOTFS ?= "${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.${LIVE_ROOTFS_TYPE}"
-IMAGE_TYPEDEP_live = "${LIVE_ROOTFS_TYPE}"
-IMAGE_TYPEDEP_iso = "${LIVE_ROOTFS_TYPE}"
-IMAGE_TYPEDEP_hddimg = "${LIVE_ROOTFS_TYPE}"
+IMAGE_TYPEDEP:live = "${LIVE_ROOTFS_TYPE}"
+IMAGE_TYPEDEP:iso = "${LIVE_ROOTFS_TYPE}"
+IMAGE_TYPEDEP:hddimg = "${LIVE_ROOTFS_TYPE}"
IMAGE_TYPES_MASKED += "live hddimg iso"
python() {
diff --git a/meta/classes/image-prelink.bbclass b/meta/classes/image-prelink.bbclass
deleted file mode 100644
index ebf6e6d7ee..0000000000
--- a/meta/classes/image-prelink.bbclass
+++ /dev/null
@@ -1,81 +0,0 @@
-do_rootfs[depends] += "prelink-native:do_populate_sysroot"
-
-IMAGE_PREPROCESS_COMMAND_append_libc-glibc = " prelink_setup; prelink_image; "
-
-python prelink_setup () {
- oe.utils.write_ld_so_conf(d)
-}
-
-inherit linuxloader
-
-prelink_image () {
-# export PSEUDO_DEBUG=4
-# /bin/env | /bin/grep PSEUDO
-# echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
-# echo "LD_PRELOAD=$LD_PRELOAD"
-
- pre_prelink_size=`du -ks ${IMAGE_ROOTFS} | awk '{size = $1 ; print size }'`
- echo "Size before prelinking $pre_prelink_size."
-
- # The filesystem may not contain sysconfdir so establish what is present
- # to enable cleanup after temporary creation of sysconfdir if needed
- presentdir="${IMAGE_ROOTFS}${sysconfdir}"
- while [ "${IMAGE_ROOTFS}" != "${presentdir}" ] ; do
- [ ! -d "${presentdir}" ] || break
- presentdir=`dirname "${presentdir}"`
- done
-
- mkdir -p "${IMAGE_ROOTFS}${sysconfdir}"
-
- # We need a prelink conf on the filesystem, add one if it's missing
- if [ ! -e ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf ]; then
- cp ${STAGING_ETCDIR_NATIVE}/prelink.conf \
- ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf
- dummy_prelink_conf=true;
- else
- dummy_prelink_conf=false;
- fi
-
- # We need a ld.so.conf with pathnames in,prelink conf on the filesystem, add one if it's missing
- ldsoconf=${IMAGE_ROOTFS}${sysconfdir}/ld.so.conf
- if [ -e $ldsoconf ]; then
- cp $ldsoconf $ldsoconf.prelink
- fi
- cat ${STAGING_DIR_TARGET}${sysconfdir}/ld.so.conf >> $ldsoconf
-
- dynamic_loader=${@get_linuxloader(d)}
-
- # prelink!
- if [ "${BUILD_REPRODUCIBLE_BINARIES}" = "1" ]; then
- bbnote " prelink: BUILD_REPRODUCIBLE_BINARIES..."
- if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then
- export PRELINK_TIMESTAMP=`git log -1 --pretty=%ct `
- else
- export PRELINK_TIMESTAMP=$REPRODUCIBLE_TIMESTAMP_ROOTFS
- fi
- ${STAGING_SBINDIR_NATIVE}/prelink --root ${IMAGE_ROOTFS} -am -N -c ${sysconfdir}/prelink.conf --dynamic-linker $dynamic_loader
- else
- ${STAGING_SBINDIR_NATIVE}/prelink --root ${IMAGE_ROOTFS} -amR -N -c ${sysconfdir}/prelink.conf --dynamic-linker $dynamic_loader
- fi
-
- # Remove the prelink.conf if we had to add it.
- if [ "$dummy_prelink_conf" = "true" ]; then
- rm -f ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf
- fi
-
- if [ -e $ldsoconf.prelink ]; then
- mv $ldsoconf.prelink $ldsoconf
- else
- rm $ldsoconf
- fi
-
- # Remove any directories temporarily created for sysconfdir
- cleanupdir="${IMAGE_ROOTFS}${sysconfdir}"
- while [ "${presentdir}" != "${cleanupdir}" ] ; do
- rmdir "${cleanupdir}"
- cleanupdir=`dirname ${cleanupdir}`
- done
-
- pre_prelink_size=`du -ks ${IMAGE_ROOTFS} | awk '{size = $1 ; print size }'`
- echo "Size after prelinking $pre_prelink_size."
-}
diff --git a/meta/classes/image.bbclass b/meta/classes/image.bbclass
index 67603d958d..7f1f6f80a4 100644
--- a/meta/classes/image.bbclass
+++ b/meta/classes/image.bbclass
@@ -15,6 +15,7 @@ IMGCLASSES += "${@bb.utils.contains('IMAGE_FSTYPES', 'container', 'image-contain
IMGCLASSES += "image_types_wic"
IMGCLASSES += "rootfs-postcommands"
IMGCLASSES += "image-postinst-intercepts"
+IMGCLASSES += "overlayfs-etc"
inherit ${IMGCLASSES}
TOOLCHAIN_TARGET_TASK += "${PACKAGE_INSTALL}"
@@ -26,14 +27,14 @@ PACKAGES = ""
DEPENDS += "${@' '.join(["%s-qemuwrapper-cross" % m for m in d.getVar("MULTILIB_VARIANTS").split()])} qemuwrapper-cross depmodwrapper-cross cross-localedef-native"
RDEPENDS += "${PACKAGE_INSTALL} ${LINGUAS_INSTALL} ${IMAGE_INSTALL_DEBUGFS}"
RRECOMMENDS += "${PACKAGE_INSTALL_ATTEMPTONLY}"
-PATH_prepend = "${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:"
+PATH:prepend = "${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:"
INHIBIT_DEFAULT_DEPS = "1"
# IMAGE_FEATURES may contain any available package group
IMAGE_FEATURES ?= ""
IMAGE_FEATURES[type] = "list"
-IMAGE_FEATURES[validitems] += "debug-tweaks read-only-rootfs read-only-rootfs-delayed-postinsts stateless-rootfs empty-root-password allow-empty-password allow-root-login post-install-logging"
+IMAGE_FEATURES[validitems] += "debug-tweaks read-only-rootfs read-only-rootfs-delayed-postinsts stateless-rootfs empty-root-password allow-empty-password allow-root-login post-install-logging overlayfs-etc"
# Generate companion debugfs?
IMAGE_GEN_DEBUGFS ?= "0"
@@ -53,7 +54,7 @@ FEATURE_INSTALL_OPTIONAL[vardepvalue] = "${FEATURE_INSTALL_OPTIONAL}"
# Define some very basic feature package groups
FEATURE_PACKAGES_package-management = "${ROOTFS_PKGMANAGE}"
-SPLASH ?= "psplash"
+SPLASH ?= "${@bb.utils.contains("MACHINE_FEATURES", "screen", "psplash", "", d)}"
FEATURE_PACKAGES_splash = "${SPLASH}"
IMAGE_INSTALL_COMPLEMENTARY = '${@complementary_globs("IMAGE_FEATURES", d)}'
@@ -92,7 +93,7 @@ PID = "${@os.getpid()}"
PACKAGE_ARCH = "${MACHINE_ARCH}"
LDCONFIGDEPEND ?= "ldconfig-native:do_populate_sysroot"
-LDCONFIGDEPEND_libc-musl = ""
+LDCONFIGDEPEND:libc-musl = ""
# This is needed to have depmod data in PKGDATA_DIR,
# but if you're building small initramfs image
@@ -138,7 +139,10 @@ python () {
def extraimage_getdepends(task):
deps = ""
for dep in (d.getVar('EXTRA_IMAGEDEPENDS') or "").split():
- deps += " %s:%s" % (dep, task)
+ if ":" in dep:
+ deps += " %s " % (dep)
+ else:
+ deps += " %s:%s" % (dep, task)
return deps
d.appendVarFlag('do_image_complete', 'depends', extraimage_getdepends('do_populate_sysroot'))
@@ -249,7 +253,7 @@ fakeroot python do_rootfs () {
progress_reporter.finish()
}
do_rootfs[dirs] = "${TOPDIR}"
-do_rootfs[cleandirs] += "${S} ${IMGDEPLOYDIR}"
+do_rootfs[cleandirs] += "${IMAGE_ROOTFS} ${IMGDEPLOYDIR} ${S}"
do_rootfs[file-checksums] += "${POSTINST_INTERCEPT_CHECKSUMS}"
addtask rootfs after do_prepare_recipe_sysroot
@@ -273,7 +277,7 @@ fakeroot python do_image_complete () {
}
do_image_complete[dirs] = "${TOPDIR}"
SSTATETASKS += "do_image_complete"
-SSTATE_SKIP_CREATION_task-image-complete = '1'
+SSTATE_SKIP_CREATION:task-image-complete = '1'
do_image_complete[sstate-inputdirs] = "${IMGDEPLOYDIR}"
do_image_complete[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}"
do_image_complete[stamp-extra-info] = "${MACHINE_ARCH}"
@@ -314,7 +318,7 @@ fakeroot python do_image_qa () {
addtask do_image_qa after do_rootfs before do_image
SSTATETASKS += "do_image_qa"
-SSTATE_SKIP_CREATION_task-image-qa = '1'
+SSTATE_SKIP_CREATION:task-image-qa = '1'
do_image_qa[sstate-inputdirs] = ""
do_image_qa[sstate-outputdirs] = ""
python do_image_qa_setscene () {
@@ -382,8 +386,8 @@ python () {
if t.startswith("debugfs_"):
t = t[8:]
debug = "debugfs_"
- deps = (d.getVar('IMAGE_TYPEDEP_' + t) or "").split()
- vardeps.add('IMAGE_TYPEDEP_' + t)
+ deps = (d.getVar('IMAGE_TYPEDEP:' + t) or "").split()
+ vardeps.add('IMAGE_TYPEDEP:' + t)
if baset not in typedeps:
typedeps[baset] = set()
deps = [debug + dep for dep in deps]
@@ -431,21 +435,22 @@ python () {
localdata.delVar('DATETIME')
localdata.delVar('DATE')
localdata.delVar('TMPDIR')
- vardepsexclude = (d.getVarFlag('IMAGE_CMD_' + realt, 'vardepsexclude', True) or '').split()
+ localdata.delVar('IMAGE_VERSION_SUFFIX')
+ vardepsexclude = (d.getVarFlag('IMAGE_CMD:' + realt, 'vardepsexclude', True) or '').split()
for dep in vardepsexclude:
localdata.delVar(dep)
image_cmd = localdata.getVar("IMAGE_CMD")
- vardeps.add('IMAGE_CMD_' + realt)
+ vardeps.add('IMAGE_CMD:' + realt)
if image_cmd:
cmds.append("\t" + image_cmd)
else:
bb.fatal("No IMAGE_CMD defined for IMAGE_FSTYPES entry '%s' - possibly invalid type name or missing support class" % t)
cmds.append(localdata.expand("\tcd ${IMGDEPLOYDIR}"))
- # Since a copy of IMAGE_CMD_xxx will be inlined within do_image_xxx,
- # prevent a redundant copy of IMAGE_CMD_xxx being emitted as a function.
- d.delVarFlag('IMAGE_CMD_' + realt, 'func')
+ # Since a copy of IMAGE_CMD:xxx will be inlined within do_image_xxx,
+ # prevent a redundant copy of IMAGE_CMD:xxx being emitted as a function.
+ d.delVarFlag('IMAGE_CMD:' + realt, 'func')
rm_tmp_images = set()
def gen_conversion_cmds(bt):
@@ -457,11 +462,10 @@ python () {
# Create input image first.
gen_conversion_cmds(type)
localdata.setVar('type', type)
- cmd = "\t" + (localdata.getVar("CONVERSION_CMD_" + ctype) or localdata.getVar("COMPRESS_CMD_" + ctype))
+ cmd = "\t" + localdata.getVar("CONVERSION_CMD:" + ctype)
if cmd not in cmds:
cmds.append(cmd)
- vardeps.add('CONVERSION_CMD_' + ctype)
- vardeps.add('COMPRESS_CMD_' + ctype)
+ vardeps.add('CONVERSION_CMD:' + ctype)
subimage = type + "." + ctype
if subimage not in subimages:
subimages.append(subimage)
@@ -619,20 +623,20 @@ deltask do_package_write_rpm
create_merged_usr_symlinks() {
root="$1"
install -d $root${base_bindir} $root${base_sbindir} $root${base_libdir}
- lnr $root${base_bindir} $root/bin
- lnr $root${base_sbindir} $root/sbin
- lnr $root${base_libdir} $root/${baselib}
+ ln -rs $root${base_bindir} $root/bin
+ ln -rs $root${base_sbindir} $root/sbin
+ ln -rs $root${base_libdir} $root/${baselib}
if [ "${nonarch_base_libdir}" != "${base_libdir}" ]; then
install -d $root${nonarch_base_libdir}
- lnr $root${nonarch_base_libdir} $root/lib
+ ln -rs $root${nonarch_base_libdir} $root/lib
fi
# create base links for multilibs
multi_libdirs="${@d.getVar('MULTILIB_VARIANTS')}"
for d in $multi_libdirs; do
install -d $root${exec_prefix}/$d
- lnr $root${exec_prefix}/$d $root/$d
+ ln -rs $root${exec_prefix}/$d $root/$d
done
}
@@ -648,17 +652,15 @@ ROOTFS_PREPROCESS_COMMAND += "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge'
POPULATE_SDK_PRE_TARGET_COMMAND += "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', 'create_merged_usr_symlinks_sdk; ', '',d)}"
reproducible_final_image_task () {
- if [ "${BUILD_REPRODUCIBLE_BINARIES}" = "1" ]; then
+ if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then
+ REPRODUCIBLE_TIMESTAMP_ROOTFS=`git -C "${COREBASE}" log -1 --pretty=%ct 2>/dev/null` || true
if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then
- REPRODUCIBLE_TIMESTAMP_ROOTFS=`git -C "${COREBASE}" log -1 --pretty=%ct 2>/dev/null` || true
- if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then
- REPRODUCIBLE_TIMESTAMP_ROOTFS=`stat -c%Y ${@bb.utils.which(d.getVar("BBPATH"), "conf/bitbake.conf")}`
- fi
+ REPRODUCIBLE_TIMESTAMP_ROOTFS=`stat -c%Y ${@bb.utils.which(d.getVar("BBPATH"), "conf/bitbake.conf")}`
fi
- # Set mtime of all files to a reproducible value
- bbnote "reproducible_final_image_task: mtime set to $REPRODUCIBLE_TIMESTAMP_ROOTFS"
- find ${IMAGE_ROOTFS} -print0 | xargs -0 touch -h --date=@$REPRODUCIBLE_TIMESTAMP_ROOTFS
fi
+ # Set mtime of all files to a reproducible value
+ bbnote "reproducible_final_image_task: mtime set to $REPRODUCIBLE_TIMESTAMP_ROOTFS"
+ find ${IMAGE_ROOTFS} -print0 | xargs -0 touch -h --date=@$REPRODUCIBLE_TIMESTAMP_ROOTFS
}
systemd_preset_all () {
@@ -667,6 +669,6 @@ systemd_preset_all () {
fi
}
-IMAGE_PREPROCESS_COMMAND_append = " ${@ 'systemd_preset_all;' if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d) and not bb.utils.contains('IMAGE_FEATURES', 'stateless-rootfs', True, False, d) else ''} reproducible_final_image_task; "
+IMAGE_PREPROCESS_COMMAND:append = " ${@ 'systemd_preset_all;' if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d) and not bb.utils.contains('IMAGE_FEATURES', 'stateless-rootfs', True, False, d) else ''} reproducible_final_image_task; "
CVE_PRODUCT = ""
diff --git a/meta/classes/image_types.bbclass b/meta/classes/image_types.bbclass
index 954d6739ec..f643ed3ce7 100644
--- a/meta/classes/image_types.bbclass
+++ b/meta/classes/image_types.bbclass
@@ -29,7 +29,7 @@ def imagetypes_getdepends(d):
if d.getVar(var) is not None:
deprecated.add(var)
- for typedepends in (d.getVar("IMAGE_TYPEDEP_%s" % basetype) or "").split():
+ for typedepends in (d.getVar("IMAGE_TYPEDEP:%s" % basetype) or "").split():
base, rest = split_types(typedepends)
resttypes += rest
@@ -56,9 +56,9 @@ ZIP_COMPRESSION_LEVEL ?= "-9"
ZSTD_COMPRESSION_LEVEL ?= "-3"
JFFS2_SUM_EXTRA_ARGS ?= ""
-IMAGE_CMD_jffs2 = "mkfs.jffs2 --root=${IMAGE_ROOTFS} --faketime --output=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.jffs2 ${EXTRA_IMAGECMD}"
+IMAGE_CMD:jffs2 = "mkfs.jffs2 --root=${IMAGE_ROOTFS} --faketime --output=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.jffs2 ${EXTRA_IMAGECMD}"
-IMAGE_CMD_cramfs = "mkfs.cramfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cramfs ${EXTRA_IMAGECMD}"
+IMAGE_CMD:cramfs = "mkfs.cramfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cramfs ${EXTRA_IMAGECMD}"
oe_mkext234fs () {
fstype=$1
@@ -88,12 +88,12 @@ oe_mkext234fs () {
fsck.$fstype -pvfD ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype || [ $? -le 3 ]
}
-IMAGE_CMD_ext2 = "oe_mkext234fs ext2 ${EXTRA_IMAGECMD}"
-IMAGE_CMD_ext3 = "oe_mkext234fs ext3 ${EXTRA_IMAGECMD}"
-IMAGE_CMD_ext4 = "oe_mkext234fs ext4 ${EXTRA_IMAGECMD}"
+IMAGE_CMD:ext2 = "oe_mkext234fs ext2 ${EXTRA_IMAGECMD}"
+IMAGE_CMD:ext3 = "oe_mkext234fs ext3 ${EXTRA_IMAGECMD}"
+IMAGE_CMD:ext4 = "oe_mkext234fs ext4 ${EXTRA_IMAGECMD}"
MIN_BTRFS_SIZE ?= "16384"
-IMAGE_CMD_btrfs () {
+IMAGE_CMD:btrfs () {
size=${ROOTFS_SIZE}
if [ ${size} -lt ${MIN_BTRFS_SIZE} ] ; then
size=${MIN_BTRFS_SIZE}
@@ -103,22 +103,23 @@ IMAGE_CMD_btrfs () {
mkfs.btrfs ${EXTRA_IMAGECMD} -r ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.btrfs
}
-IMAGE_CMD_squashfs = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs ${EXTRA_IMAGECMD} -noappend"
-IMAGE_CMD_squashfs-xz = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-xz ${EXTRA_IMAGECMD} -noappend -comp xz"
-IMAGE_CMD_squashfs-lzo = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-lzo ${EXTRA_IMAGECMD} -noappend -comp lzo"
-IMAGE_CMD_squashfs-lz4 = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-lz4 ${EXTRA_IMAGECMD} -noappend -comp lz4"
+IMAGE_CMD:squashfs = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs ${EXTRA_IMAGECMD} -noappend"
+IMAGE_CMD:squashfs-xz = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-xz ${EXTRA_IMAGECMD} -noappend -comp xz"
+IMAGE_CMD:squashfs-lzo = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-lzo ${EXTRA_IMAGECMD} -noappend -comp lzo"
+IMAGE_CMD:squashfs-lz4 = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-lz4 ${EXTRA_IMAGECMD} -noappend -comp lz4"
+IMAGE_CMD:squashfs-zst = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-zst ${EXTRA_IMAGECMD} -noappend -comp zstd"
-IMAGE_CMD_erofs = "mkfs.erofs ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.erofs ${IMAGE_ROOTFS}"
-IMAGE_CMD_erofs-lz4 = "mkfs.erofs -zlz4 ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.erofs-lz4 ${IMAGE_ROOTFS}"
-IMAGE_CMD_erofs-lz4hc = "mkfs.erofs -zlz4hc ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.erofs-lz4hc ${IMAGE_ROOTFS}"
+IMAGE_CMD:erofs = "mkfs.erofs ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.erofs ${IMAGE_ROOTFS}"
+IMAGE_CMD:erofs-lz4 = "mkfs.erofs -zlz4 ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.erofs-lz4 ${IMAGE_ROOTFS}"
+IMAGE_CMD:erofs-lz4hc = "mkfs.erofs -zlz4hc ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.erofs-lz4hc ${IMAGE_ROOTFS}"
IMAGE_CMD_TAR ?= "tar"
# ignore return code 1 "file changed as we read it" as other tasks(e.g. do_image_wic) may be hardlinking rootfs
-IMAGE_CMD_tar = "${IMAGE_CMD_TAR} --sort=name --format=posix --numeric-owner -cf ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.tar -C ${IMAGE_ROOTFS} . || [ $? -eq 1 ]"
+IMAGE_CMD:tar = "${IMAGE_CMD_TAR} --sort=name --format=posix --numeric-owner -cf ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.tar -C ${IMAGE_ROOTFS} . || [ $? -eq 1 ]"
do_image_cpio[cleandirs] += "${WORKDIR}/cpio_append"
-IMAGE_CMD_cpio () {
+IMAGE_CMD:cpio () {
(cd ${IMAGE_ROOTFS} && find . | sort | cpio --reproducible -o -H newc >${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cpio)
# We only need the /init symlink if we're building the real
# image. The -dbg image doesn't need it! By being clever
@@ -138,16 +139,18 @@ IMAGE_CMD_cpio () {
}
UBI_VOLNAME ?= "${MACHINE}-rootfs"
+UBI_VOLTYPE ?= "dynamic"
+UBI_IMGTYPE ?= "ubifs"
multiubi_mkfs() {
local mkubifs_args="$1"
local ubinize_args="$2"
-
+
# Added prompt error message for ubi and ubifs image creation.
if [ -z "$mkubifs_args" ] || [ -z "$ubinize_args" ]; then
bbfatal "MKUBIFS_ARGS and UBINIZE_ARGS have to be set, see http://www.linux-mtd.infradead.org/faq/ubifs.html for details"
fi
-
+
if [ -z "$3" ]; then
local vname=""
else
@@ -156,9 +159,9 @@ multiubi_mkfs() {
echo \[ubifs\] > ubinize${vname}-${IMAGE_NAME}.cfg
echo mode=ubi >> ubinize${vname}-${IMAGE_NAME}.cfg
- echo image=${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs >> ubinize${vname}-${IMAGE_NAME}.cfg
+ echo image=${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.${UBI_IMGTYPE} >> ubinize${vname}-${IMAGE_NAME}.cfg
echo vol_id=0 >> ubinize${vname}-${IMAGE_NAME}.cfg
- echo vol_type=dynamic >> ubinize${vname}-${IMAGE_NAME}.cfg
+ echo vol_type=${UBI_VOLTYPE} >> ubinize${vname}-${IMAGE_NAME}.cfg
echo vol_name=${UBI_VOLNAME} >> ubinize${vname}-${IMAGE_NAME}.cfg
echo vol_flags=autoresize >> ubinize${vname}-${IMAGE_NAME}.cfg
if [ -n "$vname" ]; then
@@ -184,7 +187,7 @@ multiubi_mkfs() {
fi
}
-IMAGE_CMD_multiubi () {
+IMAGE_CMD:multiubi () {
# Split MKUBIFS_ARGS_<name> and UBINIZE_ARGS_<name>
for name in ${MULTIUBI_BUILD}; do
eval local mkubifs_args=\"\$MKUBIFS_ARGS_${name}\"
@@ -194,15 +197,15 @@ IMAGE_CMD_multiubi () {
done
}
-IMAGE_CMD_ubi () {
+IMAGE_CMD:ubi () {
multiubi_mkfs "${MKUBIFS_ARGS}" "${UBINIZE_ARGS}"
}
-IMAGE_TYPEDEP_ubi = "ubifs"
+IMAGE_TYPEDEP:ubi = "${UBI_IMGTYPE}"
-IMAGE_CMD_ubifs = "mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.ubifs ${MKUBIFS_ARGS}"
+IMAGE_CMD:ubifs = "mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.ubifs ${MKUBIFS_ARGS}"
MIN_F2FS_SIZE ?= "524288"
-IMAGE_CMD_f2fs () {
+IMAGE_CMD:f2fs () {
# We need to add additional smarts here form devices smaller than 1.5G
# Need to scale appropriately between 40M -> 1.5G as the "overprovision
# ratio" goes down as the device gets bigger (70% -> 4.5%), below about
@@ -224,14 +227,14 @@ inherit siteinfo kernel-arch image-artifact-names
JFFS2_ENDIANNESS ?= "${@oe.utils.conditional('SITEINFO_ENDIANNESS', 'le', '-l', '-b', d)}"
JFFS2_ERASEBLOCK ?= "0x40000"
-EXTRA_IMAGECMD_jffs2 ?= "--pad ${JFFS2_ENDIANNESS} --eraseblock=${JFFS2_ERASEBLOCK} --no-cleanmarkers"
+EXTRA_IMAGECMD:jffs2 ?= "--pad ${JFFS2_ENDIANNESS} --eraseblock=${JFFS2_ERASEBLOCK} --no-cleanmarkers"
# Change these if you want default mkfs behavior (i.e. create minimal inode number)
-EXTRA_IMAGECMD_ext2 ?= "-i 4096"
-EXTRA_IMAGECMD_ext3 ?= "-i 4096"
-EXTRA_IMAGECMD_ext4 ?= "-i 4096"
-EXTRA_IMAGECMD_btrfs ?= "-n 4096"
-EXTRA_IMAGECMD_f2fs ?= ""
+EXTRA_IMAGECMD:ext2 ?= "-i 4096"
+EXTRA_IMAGECMD:ext3 ?= "-i 4096"
+EXTRA_IMAGECMD:ext4 ?= "-i 4096"
+EXTRA_IMAGECMD:btrfs ?= "-n 4096 --shrink"
+EXTRA_IMAGECMD:f2fs ?= ""
do_image_cpio[depends] += "cpio-native:do_populate_sysroot"
do_image_jffs2[depends] += "mtd-utils-native:do_populate_sysroot"
@@ -244,6 +247,7 @@ do_image_squashfs[depends] += "squashfs-tools-native:do_populate_sysroot"
do_image_squashfs_xz[depends] += "squashfs-tools-native:do_populate_sysroot"
do_image_squashfs_lzo[depends] += "squashfs-tools-native:do_populate_sysroot"
do_image_squashfs_lz4[depends] += "squashfs-tools-native:do_populate_sysroot"
+do_image_squashfs_zst[depends] += "squashfs-tools-native:do_populate_sysroot"
do_image_ubi[depends] += "mtd-utils-native:do_populate_sysroot"
do_image_ubifs[depends] += "mtd-utils-native:do_populate_sysroot"
do_image_multiubi[depends] += "mtd-utils-native:do_populate_sysroot"
@@ -262,10 +266,10 @@ IMAGE_TYPES = " \
btrfs \
iso \
hddimg \
- squashfs squashfs-xz squashfs-lzo squashfs-lz4 \
+ squashfs squashfs-xz squashfs-lzo squashfs-lz4 squashfs-zst \
ubi ubifs multiubi \
tar tar.gz tar.bz2 tar.xz tar.lz4 tar.zst \
- cpio cpio.gz cpio.xz cpio.lzma cpio.lz4 \
+ cpio cpio.gz cpio.xz cpio.lzma cpio.lz4 cpio.zst \
wic wic.gz wic.bz2 wic.lzma wic.zst \
container \
f2fs \
@@ -279,31 +283,31 @@ IMAGE_TYPES = " \
COMPRESSIONTYPES ?= ""
CONVERSIONTYPES = "gz bz2 lzma xz lz4 lzo zip zst sum md5sum sha1sum sha224sum sha256sum sha384sum sha512sum bmap u-boot vmdk vhd vhdx vdi qcow2 base64 gzsync zsync ${COMPRESSIONTYPES}"
-CONVERSION_CMD_lzma = "lzma -k -f -7 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
-CONVERSION_CMD_gz = "gzip -f -9 -n -c --rsyncable ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.gz"
-CONVERSION_CMD_bz2 = "pbzip2 -f -k ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
-CONVERSION_CMD_xz = "xz -f -k -c ${XZ_COMPRESSION_LEVEL} ${XZ_DEFAULTS} --check=${XZ_INTEGRITY_CHECK} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.xz"
-CONVERSION_CMD_lz4 = "lz4 -9 -z -l ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.lz4"
-CONVERSION_CMD_lzo = "lzop -9 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
-CONVERSION_CMD_zip = "zip ${ZIP_COMPRESSION_LEVEL} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.zip ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
-CONVERSION_CMD_zst = "zstd -f -k -T0 -c ${ZSTD_COMPRESSION_LEVEL} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.zst"
-CONVERSION_CMD_sum = "sumtool -i ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} -o ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sum ${JFFS2_SUM_EXTRA_ARGS}"
-CONVERSION_CMD_md5sum = "md5sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.md5sum"
-CONVERSION_CMD_sha1sum = "sha1sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha1sum"
-CONVERSION_CMD_sha224sum = "sha224sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha224sum"
-CONVERSION_CMD_sha256sum = "sha256sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha256sum"
-CONVERSION_CMD_sha384sum = "sha384sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha384sum"
-CONVERSION_CMD_sha512sum = "sha512sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha512sum"
-CONVERSION_CMD_bmap = "bmaptool create ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} -o ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.bmap"
-CONVERSION_CMD_u-boot = "mkimage -A ${UBOOT_ARCH} -O linux -T ramdisk -C none -n ${IMAGE_NAME} -d ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.u-boot"
-CONVERSION_CMD_vmdk = "qemu-img convert -O vmdk ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vmdk"
-CONVERSION_CMD_vhdx = "qemu-img convert -O vhdx -o subformat=dynamic ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vhdx"
-CONVERSION_CMD_vhd = "qemu-img convert -O vpc -o subformat=fixed ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vhd"
-CONVERSION_CMD_vdi = "qemu-img convert -O vdi ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vdi"
-CONVERSION_CMD_qcow2 = "qemu-img convert -O qcow2 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.qcow2"
-CONVERSION_CMD_base64 = "base64 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.base64"
-CONVERSION_CMD_zsync = "zsyncmake_curl ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
-CONVERSION_CMD_gzsync = "zsyncmake_curl -z ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
+CONVERSION_CMD:lzma = "lzma -k -f -7 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
+CONVERSION_CMD:gz = "gzip -f -9 -n -c --rsyncable ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.gz"
+CONVERSION_CMD:bz2 = "pbzip2 -f -k ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
+CONVERSION_CMD:xz = "xz -f -k -c ${XZ_COMPRESSION_LEVEL} ${XZ_DEFAULTS} --check=${XZ_INTEGRITY_CHECK} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.xz"
+CONVERSION_CMD:lz4 = "lz4 -9 -z -l ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.lz4"
+CONVERSION_CMD:lzo = "lzop -9 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
+CONVERSION_CMD:zip = "zip ${ZIP_COMPRESSION_LEVEL} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.zip ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
+CONVERSION_CMD:zst = "zstd -f -k -T0 -c ${ZSTD_COMPRESSION_LEVEL} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.zst"
+CONVERSION_CMD:sum = "sumtool -i ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} -o ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sum ${JFFS2_SUM_EXTRA_ARGS}"
+CONVERSION_CMD:md5sum = "md5sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.md5sum"
+CONVERSION_CMD:sha1sum = "sha1sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha1sum"
+CONVERSION_CMD:sha224sum = "sha224sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha224sum"
+CONVERSION_CMD:sha256sum = "sha256sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha256sum"
+CONVERSION_CMD:sha384sum = "sha384sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha384sum"
+CONVERSION_CMD:sha512sum = "sha512sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha512sum"
+CONVERSION_CMD:bmap = "bmaptool create ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} -o ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.bmap"
+CONVERSION_CMD:u-boot = "mkimage -A ${UBOOT_ARCH} -O linux -T ramdisk -C none -n ${IMAGE_NAME} -d ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.u-boot"
+CONVERSION_CMD:vmdk = "qemu-img convert -O vmdk ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vmdk"
+CONVERSION_CMD:vhdx = "qemu-img convert -O vhdx -o subformat=dynamic ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vhdx"
+CONVERSION_CMD:vhd = "qemu-img convert -O vpc -o subformat=fixed ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vhd"
+CONVERSION_CMD:vdi = "qemu-img convert -O vdi ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vdi"
+CONVERSION_CMD:qcow2 = "qemu-img convert -O qcow2 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.qcow2"
+CONVERSION_CMD:base64 = "base64 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.base64"
+CONVERSION_CMD:zsync = "zsyncmake_curl ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
+CONVERSION_CMD:gzsync = "zsyncmake_curl -z ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
CONVERSION_DEPENDS_lzma = "xz-native"
CONVERSION_DEPENDS_gz = "pigz-native"
CONVERSION_DEPENDS_bz2 = "pbzip2-native"
@@ -327,7 +331,7 @@ CONVERSION_DEPENDS_gzsync = "zsync-curl-native"
RUNNABLE_IMAGE_TYPES ?= "ext2 ext3 ext4"
RUNNABLE_MACHINE_PATTERNS ?= "qemu"
-DEPLOYABLE_IMAGE_TYPES ?= "hddimg iso"
+DEPLOYABLE_IMAGE_TYPES ?= "hddimg iso"
# The IMAGE_TYPES_MASKED variable will be used to mask out from the IMAGE_FSTYPES,
# images that will not be built at do_rootfs time: vmdk, vhd, vhdx, vdi, qcow2, hddimg, iso, etc.
diff --git a/meta/classes/image_types_wic.bbclass b/meta/classes/image_types_wic.bbclass
index 49be1da77a..e3863c88a9 100644
--- a/meta/classes/image_types_wic.bbclass
+++ b/meta/classes/image_types_wic.bbclass
@@ -1,11 +1,36 @@
# The WICVARS variable is used to define list of bitbake variables used in wic code
# variables from this list is written to <image>.env file
WICVARS ?= "\
- BBLAYERS IMGDEPLOYDIR DEPLOY_DIR_IMAGE FAKEROOTCMD IMAGE_BASENAME IMAGE_EFI_BOOT_FILES IMAGE_BOOT_FILES \
- IMAGE_LINK_NAME IMAGE_ROOTFS INITRAMFS_FSTYPES INITRD INITRD_LIVE ISODIR RECIPE_SYSROOT_NATIVE \
- ROOTFS_SIZE STAGING_DATADIR STAGING_DIR STAGING_LIBDIR TARGET_SYS HOSTTOOLS_DIR \
- KERNEL_IMAGETYPE MACHINE INITRAMFS_IMAGE INITRAMFS_IMAGE_BUNDLE INITRAMFS_LINK_NAME APPEND \
- ASSUME_PROVIDED PSEUDO_IGNORE_PATHS"
+ APPEND \
+ ASSUME_PROVIDED \
+ BBLAYERS \
+ DEPLOY_DIR_IMAGE \
+ FAKEROOTCMD \
+ HOSTTOOLS_DIR \
+ IMAGE_BASENAME \
+ IMAGE_BOOT_FILES \
+ IMAGE_EFI_BOOT_FILES \
+ IMAGE_LINK_NAME \
+ IMAGE_ROOTFS \
+ IMGDEPLOYDIR \
+ INITRAMFS_FSTYPES \
+ INITRAMFS_IMAGE \
+ INITRAMFS_IMAGE_BUNDLE \
+ INITRAMFS_LINK_NAME \
+ INITRD \
+ INITRD_LIVE \
+ ISODIR \
+ KERNEL_IMAGETYPE \
+ MACHINE \
+ PSEUDO_IGNORE_PATHS \
+ RECIPE_SYSROOT_NATIVE \
+ ROOTFS_SIZE \
+ STAGING_DATADIR \
+ STAGING_DIR \
+ STAGING_DIR_HOST \
+ STAGING_LIBDIR \
+ TARGET_SYS \
+"
inherit ${@bb.utils.contains('INITRAMFS_IMAGE_BUNDLE', '1', 'kernel-artifact-names', '', d)}
@@ -26,7 +51,7 @@ def wks_search(files, search_path):
WIC_CREATE_EXTRA_ARGS ?= ""
-IMAGE_CMD_wic () {
+IMAGE_CMD:wic () {
out="${IMGDEPLOYDIR}/${IMAGE_NAME}"
build_wic="${WORKDIR}/build-wic"
tmp_wic="${WORKDIR}/tmp-wic"
@@ -42,7 +67,7 @@ IMAGE_CMD_wic () {
BUILDDIR="${TOPDIR}" PSEUDO_UNLOAD=1 wic create "$wks" --vars "${STAGING_DIR}/${MACHINE}/imgdata/" -e "${IMAGE_BASENAME}" -o "$build_wic/" -w "$tmp_wic" ${WIC_CREATE_EXTRA_ARGS}
mv "$build_wic/$(basename "${wks%.wks}")"*.direct "$out${IMAGE_NAME_SUFFIX}.wic"
}
-IMAGE_CMD_wic[vardepsexclude] = "WKS_FULL_PATH WKS_FILES TOPDIR"
+IMAGE_CMD:wic[vardepsexclude] = "WKS_FULL_PATH WKS_FILES TOPDIR"
do_image_wic[cleandirs] = "${WORKDIR}/build-wic"
PSEUDO_IGNORE_PATHS .= ",${WORKDIR}/build-wic"
@@ -60,9 +85,9 @@ do_image_wic[deptask] += "do_image_complete"
WKS_FILE_DEPENDS_DEFAULT = '${@bb.utils.contains_any("BUILD_ARCH", [ 'x86_64', 'i686' ], "syslinux-native", "",d)}'
WKS_FILE_DEPENDS_DEFAULT += "bmap-tools-native cdrtools-native btrfs-tools-native squashfs-tools-native e2fsprogs-native"
WKS_FILE_DEPENDS_BOOTLOADERS = ""
-WKS_FILE_DEPENDS_BOOTLOADERS_x86 = "syslinux grub-efi systemd-boot"
-WKS_FILE_DEPENDS_BOOTLOADERS_x86-64 = "syslinux grub-efi systemd-boot"
-WKS_FILE_DEPENDS_BOOTLOADERS_x86-x32 = "syslinux grub-efi"
+WKS_FILE_DEPENDS_BOOTLOADERS:x86 = "syslinux grub-efi systemd-boot os-release"
+WKS_FILE_DEPENDS_BOOTLOADERS:x86-64 = "syslinux grub-efi systemd-boot os-release"
+WKS_FILE_DEPENDS_BOOTLOADERS:x86-x32 = "syslinux grub-efi"
WKS_FILE_DEPENDS ??= "${WKS_FILE_DEPENDS_DEFAULT} ${WKS_FILE_DEPENDS_BOOTLOADERS}"
diff --git a/meta/classes/insane.bbclass b/meta/classes/insane.bbclass
index 763d5f1da2..0deebdb148 100644
--- a/meta/classes/insane.bbclass
+++ b/meta/classes/insane.bbclass
@@ -18,8 +18,6 @@
# files under exec_prefix
# -Check if the package name is upper case
-QA_SANE = "True"
-
# Elect whether a given type of error is a warning or error, they may
# have been set by other files.
WARN_QA ?= " libdir xorg-driver-abi \
@@ -27,7 +25,9 @@ WARN_QA ?= " libdir xorg-driver-abi \
infodir build-deps src-uri-bad symlink-to-sysroot multilib \
invalid-packageconfig host-user-contaminated uppercase-pn patch-fuzz \
mime mime-xdg unlisted-pkg-lics unhandled-features-check \
- missing-update-alternatives native-last \
+ missing-update-alternatives native-last missing-ptest \
+ license-exists license-no-generic license-syntax license-format \
+ license-incompatible license-file-missing obsolete-license \
"
ERROR_QA ?= "dev-so debug-deps dev-deps debug-files arch pkgconfig la \
perms dep-cmp pkgvarcheck perm-config perm-line perm-link \
@@ -37,10 +37,10 @@ ERROR_QA ?= "dev-so debug-deps dev-deps debug-files arch pkgconfig la \
configure-gettext perllocalpod shebang-size \
already-stripped installed-vs-shipped ldflags compile-host-path \
install-host-path pn-overrides unknown-configure-option \
- useless-rpaths rpaths staticdev \
+ useless-rpaths rpaths staticdev empty-dirs \
"
# Add usrmerge QA check based on distro feature
-ERROR_QA_append = "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', ' usrmerge', '', d)}"
+ERROR_QA:append = "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', ' usrmerge', '', d)}"
FAKEROOT_QA = "host-user-contaminated"
FAKEROOT_QA[doc] = "QA tests which need to run under fakeroot. If any \
@@ -48,7 +48,22 @@ enabled tests are listed here, the do_package_qa task will run under fakeroot."
ALL_QA = "${WARN_QA} ${ERROR_QA}"
-UNKNOWN_CONFIGURE_WHITELIST ?= "--enable-nls --disable-nls --disable-silent-rules --disable-dependency-tracking --with-libtool-sysroot --disable-static"
+UNKNOWN_CONFIGURE_OPT_IGNORE ?= "--enable-nls --disable-nls --disable-silent-rules --disable-dependency-tracking --with-libtool-sysroot --disable-static"
+
+# This is a list of directories that are expected to be empty.
+QA_EMPTY_DIRS ?= " \
+ /dev/pts \
+ /media \
+ /proc \
+ /run \
+ /tmp \
+ ${localstatedir}/run \
+ ${localstatedir}/volatile \
+"
+# It is possible to specify why a directory is expected to be empty by defining
+# QA_EMPTY_DIRS_RECOMMENDATION:<path>, which will then be included in the error
+# message if the directory is not empty. If it is not specified for a directory,
+# then "but it is expected to be empty" will be used.
def package_qa_clean_path(path, d, pkg=None):
"""
@@ -59,32 +74,6 @@ def package_qa_clean_path(path, d, pkg=None):
path = path.replace(os.path.join(d.getVar("PKGDEST"), pkg), "/")
return path.replace(d.getVar("TMPDIR"), "/").replace("//", "/")
-def package_qa_write_error(type, error, d):
- logfile = d.getVar('QA_LOGFILE')
- if logfile:
- p = d.getVar('P')
- with open(logfile, "a+") as f:
- f.write("%s: %s [%s]\n" % (p, error, type))
-
-def package_qa_handle_error(error_class, error_msg, d):
- if error_class in (d.getVar("ERROR_QA") or "").split():
- package_qa_write_error(error_class, error_msg, d)
- bb.error("QA Issue: %s [%s]" % (error_msg, error_class))
- d.setVar("QA_SANE", False)
- return False
- elif error_class in (d.getVar("WARN_QA") or "").split():
- package_qa_write_error(error_class, error_msg, d)
- bb.warn("QA Issue: %s [%s]" % (error_msg, error_class))
- else:
- bb.note("QA Issue: %s [%s]" % (error_msg, error_class))
- return True
-
-def package_qa_add_message(messages, section, new_msg):
- if section not in messages:
- messages[section] = new_msg
- else:
- messages[section] = messages[section] + "\n" + new_msg
-
QAPATHTEST[shebang-size] = "package_qa_check_shebang_size"
def package_qa_check_shebang_size(path, name, d, elf, messages):
import stat
@@ -106,7 +95,7 @@ def package_qa_check_shebang_size(path, name, d, elf, messages):
return
if len(stanza) > 129:
- package_qa_add_message(messages, "shebang-size", "%s: %s maximum shebang size exceeded, the maximum size is 128." % (name, package_qa_clean_path(path, d)))
+ oe.qa.add_message(messages, "shebang-size", "%s: %s maximum shebang size exceeded, the maximum size is 128." % (name, package_qa_clean_path(path, d)))
return
QAPATHTEST[libexec] = "package_qa_check_libexec"
@@ -118,7 +107,7 @@ def package_qa_check_libexec(path,name, d, elf, messages):
return True
if 'libexec' in path.split(os.path.sep):
- package_qa_add_message(messages, "libexec", "%s: %s is using libexec please relocate to %s" % (name, package_qa_clean_path(path, d), libexec))
+ oe.qa.add_message(messages, "libexec", "%s: %s is using libexec please relocate to %s" % (name, package_qa_clean_path(path, d), libexec))
return False
return True
@@ -146,7 +135,7 @@ def package_qa_check_rpath(file,name, d, elf, messages):
rpath = m.group(1)
for dir in bad_dirs:
if dir in rpath:
- package_qa_add_message(messages, "rpaths", "package %s contains bad RPATH %s in file %s" % (name, rpath, file))
+ oe.qa.add_message(messages, "rpaths", "package %s contains bad RPATH %s in file %s" % (name, rpath, file))
QAPATHTEST[useless-rpaths] = "package_qa_check_useless_rpaths"
def package_qa_check_useless_rpaths(file, name, d, elf, messages):
@@ -176,7 +165,7 @@ def package_qa_check_useless_rpaths(file, name, d, elf, messages):
if rpath_eq(rpath, libdir) or rpath_eq(rpath, base_libdir):
# The dynamic linker searches both these places anyway. There is no point in
# looking there again.
- package_qa_add_message(messages, "useless-rpaths", "%s: %s contains probably-redundant RPATH %s" % (name, package_qa_clean_path(file, d, name), rpath))
+ oe.qa.add_message(messages, "useless-rpaths", "%s: %s contains probably-redundant RPATH %s" % (name, package_qa_clean_path(file, d, name), rpath))
QAPATHTEST[dev-so] = "package_qa_check_dev"
def package_qa_check_dev(path, name, d, elf, messages):
@@ -185,7 +174,7 @@ def package_qa_check_dev(path, name, d, elf, messages):
"""
if not name.endswith("-dev") and not name.endswith("-dbg") and not name.endswith("-ptest") and not name.startswith("nativesdk-") and path.endswith(".so") and os.path.islink(path):
- package_qa_add_message(messages, "dev-so", "non -dev/-dbg/nativesdk- package %s contains symlink .so '%s'" % \
+ oe.qa.add_message(messages, "dev-so", "non -dev/-dbg/nativesdk- package %s contains symlink .so '%s'" % \
(name, package_qa_clean_path(path, d, name)))
QAPATHTEST[dev-elf] = "package_qa_check_dev_elf"
@@ -196,7 +185,7 @@ def package_qa_check_dev_elf(path, name, d, elf, messages):
install link-time .so files that are linker scripts.
"""
if name.endswith("-dev") and path.endswith(".so") and not os.path.islink(path) and elf:
- package_qa_add_message(messages, "dev-elf", "-dev package %s contains non-symlink .so '%s'" % \
+ oe.qa.add_message(messages, "dev-elf", "-dev package %s contains non-symlink .so '%s'" % \
(name, package_qa_clean_path(path, d, name)))
QAPATHTEST[staticdev] = "package_qa_check_staticdev"
@@ -209,7 +198,7 @@ def package_qa_check_staticdev(path, name, d, elf, messages):
"""
if not name.endswith("-pic") and not name.endswith("-staticdev") and not name.endswith("-ptest") and path.endswith(".a") and not path.endswith("_nonshared.a") and not '/usr/lib/debug-static/' in path and not '/.debug-static/' in path:
- package_qa_add_message(messages, "staticdev", "non -staticdev package contains static .a library: %s path '%s'" % \
+ oe.qa.add_message(messages, "staticdev", "non -staticdev package contains static .a library: %s path '%s'" % \
(name, package_qa_clean_path(path,d, name)))
QAPATHTEST[mime] = "package_qa_check_mime"
@@ -220,7 +209,7 @@ def package_qa_check_mime(path, name, d, elf, messages):
"""
if d.getVar("datadir") + "/mime/packages" in path and path.endswith('.xml') and not bb.data.inherits_class("mime", d):
- package_qa_add_message(messages, "mime", "package contains mime types but does not inherit mime: %s path '%s'" % \
+ oe.qa.add_message(messages, "mime", "package contains mime types but does not inherit mime: %s path '%s'" % \
(name, package_qa_clean_path(path,d)))
QAPATHTEST[mime-xdg] = "package_qa_check_mime_xdg"
@@ -246,10 +235,10 @@ def package_qa_check_mime_xdg(path, name, d, elf, messages):
pkgname = name
if name == d.getVar('PN'):
pkgname = '${PN}'
- wstr += "If yes: add \'inhert mime-xdg\' and \'MIME_XDG_PACKAGES += \"%s\"\' / if no add \'INSANE_SKIP_%s += \"mime-xdg\"\' to recipe." % (pkgname, pkgname)
- package_qa_add_message(messages, "mime-xdg", wstr)
+ wstr += "If yes: add \'inhert mime-xdg\' and \'MIME_XDG_PACKAGES += \"%s\"\' / if no add \'INSANE_SKIP:%s += \"mime-xdg\"\' to recipe." % (pkgname, pkgname)
+ oe.qa.add_message(messages, "mime-xdg", wstr)
if mime_type_found:
- package_qa_add_message(messages, "mime-xdg", "package contains desktop file with key 'MimeType' but does not inhert mime-xdg: %s path '%s'" % \
+ oe.qa.add_message(messages, "mime-xdg", "package contains desktop file with key 'MimeType' but does not inhert mime-xdg: %s path '%s'" % \
(name, package_qa_clean_path(path,d)))
def package_qa_check_libdir(d):
@@ -279,7 +268,7 @@ def package_qa_check_libdir(d):
# Skip subdirectories for any packages with libdir in INSANE_SKIP
skippackages = []
for package in dirs:
- if 'libdir' in (d.getVar('INSANE_SKIP_' + package) or "").split():
+ if 'libdir' in (d.getVar('INSANE_SKIP:' + package) or "").split():
bb.note("Package %s skipping libdir QA test" % (package))
skippackages.append(package)
elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-file-directory' and package.endswith("-dbg"):
@@ -313,7 +302,7 @@ def package_qa_check_libdir(d):
pass
if messages:
- package_qa_handle_error("libdir", "\n".join(messages), d)
+ oe.qa.handle_error("libdir", "\n".join(messages), d)
QAPATHTEST[debug-files] = "package_qa_check_dbg"
def package_qa_check_dbg(path, name, d, elf, messages):
@@ -323,7 +312,7 @@ def package_qa_check_dbg(path, name, d, elf, messages):
if not "-dbg" in name and not "-ptest" in name:
if '.debug' in path.split(os.path.sep):
- package_qa_add_message(messages, "debug-files", "non debug package contains .debug directory: %s path %s" % \
+ oe.qa.add_message(messages, "debug-files", "non debug package contains .debug directory: %s path %s" % \
(name, package_qa_clean_path(path,d)))
QAPATHTEST[arch] = "package_qa_check_arch"
@@ -336,14 +325,14 @@ def package_qa_check_arch(path,name,d, elf, messages):
if not elf:
return
- target_os = d.getVar('TARGET_OS')
- target_arch = d.getVar('TARGET_ARCH')
+ target_os = d.getVar('HOST_OS')
+ target_arch = d.getVar('HOST_ARCH')
provides = d.getVar('PROVIDES')
bpn = d.getVar('BPN')
if target_arch == "allarch":
pn = d.getVar('PN')
- package_qa_add_message(messages, "arch", pn + ": Recipe inherits the allarch class, but has packaged architecture-specific binaries")
+ oe.qa.add_message(messages, "arch", pn + ": Recipe inherits the allarch class, but has packaged architecture-specific binaries")
return
# FIXME: Cross package confuse this check, so just skip them
@@ -366,13 +355,13 @@ def package_qa_check_arch(path,name,d, elf, messages):
target_os == "linux-gnu_ilp32" or re.match(r'mips64.*32', d.getVar('DEFAULTTUNE')))
is_bpf = (oe.qa.elf_machine_to_string(elf.machine()) == "BPF")
if not ((machine == elf.machine()) or is_32 or is_bpf):
- package_qa_add_message(messages, "arch", "Architecture did not match (%s, expected %s) in %s" % \
+ oe.qa.add_message(messages, "arch", "Architecture did not match (%s, expected %s) in %s" % \
(oe.qa.elf_machine_to_string(elf.machine()), oe.qa.elf_machine_to_string(machine), package_qa_clean_path(path, d, name)))
elif not ((bits == elf.abiSize()) or is_32 or is_bpf):
- package_qa_add_message(messages, "arch", "Bit size did not match (%d, expected %d) in %s" % \
+ oe.qa.add_message(messages, "arch", "Bit size did not match (%d, expected %d) in %s" % \
(elf.abiSize(), bits, package_qa_clean_path(path, d, name)))
elif not ((littleendian == elf.isLittleEndian()) or is_bpf):
- package_qa_add_message(messages, "arch", "Endiannes did not match (%d, expected %d) in %s" % \
+ oe.qa.add_message(messages, "arch", "Endiannes did not match (%d, expected %d) in %s" % \
(elf.isLittleEndian(), littleendian, package_qa_clean_path(path,d, name)))
QAPATHTEST[desktop] = "package_qa_check_desktop"
@@ -385,7 +374,7 @@ def package_qa_check_desktop(path, name, d, elf, messages):
output = os.popen("%s %s" % (desktop_file_validate, path))
# This only produces output on errors
for l in output:
- package_qa_add_message(messages, "desktop", "Desktop file issue: " + l.strip())
+ oe.qa.add_message(messages, "desktop", "Desktop file issue: " + l.strip())
QAPATHTEST[textrel] = "package_qa_textrel"
def package_qa_textrel(path, name, d, elf, messages):
@@ -411,7 +400,7 @@ def package_qa_textrel(path, name, d, elf, messages):
if not sane:
path = package_qa_clean_path(path, d, name)
- package_qa_add_message(messages, "textrel", "%s: ELF binary %s has relocations in .text" % (name, path))
+ oe.qa.add_message(messages, "textrel", "%s: ELF binary %s has relocations in .text" % (name, path))
QAPATHTEST[ldflags] = "package_qa_hash_style"
def package_qa_hash_style(path, name, d, elf, messages):
@@ -440,19 +429,20 @@ def package_qa_hash_style(path, name, d, elf, messages):
for line in phdrs.split("\n"):
if "SYMTAB" in line:
has_syms = True
- if "GNU_HASH" in line or "DT_MIPS_XHASH" in line:
+ if "GNU_HASH" in line or "MIPS_XHASH" in line:
sane = True
if ("[mips32]" in line or "[mips64]" in line) and d.getVar('TCLIBC') == "musl":
sane = True
if has_syms and not sane:
path = package_qa_clean_path(path, d, name)
- package_qa_add_message(messages, "ldflags", "File %s in package %s doesn't have GNU_HASH (didn't pass LDFLAGS?)" % (path, name))
+ oe.qa.add_message(messages, "ldflags", "File %s in package %s doesn't have GNU_HASH (didn't pass LDFLAGS?)" % (path, name))
QAPATHTEST[buildpaths] = "package_qa_check_buildpaths"
def package_qa_check_buildpaths(path, name, d, elf, messages):
"""
- Check for build paths inside target files and error if not found in the whitelist
+ Check for build paths inside target files and error if paths are not
+ explicitly ignored.
"""
# Ignore .debug files, not interesting
if path.find(".debug") != -1:
@@ -467,7 +457,7 @@ def package_qa_check_buildpaths(path, name, d, elf, messages):
file_content = f.read()
if tmpdir in file_content:
trimmed = path.replace(os.path.join (d.getVar("PKGDEST"), name), "")
- package_qa_add_message(messages, "buildpaths", "File %s in package %s contains reference to TMPDIR" % (trimmed, name))
+ oe.qa.add_message(messages, "buildpaths", "File %s in package %s contains reference to TMPDIR" % (trimmed, name))
QAPATHTEST[xorg-driver-abi] = "package_qa_check_xorg_driver_abi"
@@ -483,10 +473,10 @@ def package_qa_check_xorg_driver_abi(path, name, d, elf, messages):
driverdir = d.expand("${libdir}/xorg/modules/drivers/")
if driverdir in path and path.endswith(".so"):
mlprefix = d.getVar('MLPREFIX') or ''
- for rdep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + name) or ""):
+ for rdep in bb.utils.explode_deps(d.getVar('RDEPENDS:' + name) or ""):
if rdep.startswith("%sxorg-abi-" % mlprefix):
return
- package_qa_add_message(messages, "xorg-driver-abi", "Package %s contains Xorg driver (%s) but no xorg-abi- dependencies" % (name, os.path.basename(path)))
+ oe.qa.add_message(messages, "xorg-driver-abi", "Package %s contains Xorg driver (%s) but no xorg-abi- dependencies" % (name, os.path.basename(path)))
QAPATHTEST[infodir] = "package_qa_check_infodir"
def package_qa_check_infodir(path, name, d, elf, messages):
@@ -496,7 +486,7 @@ def package_qa_check_infodir(path, name, d, elf, messages):
infodir = d.expand("${infodir}/dir")
if infodir in path:
- package_qa_add_message(messages, "infodir", "The /usr/share/info/dir file is not meant to be shipped in a particular package.")
+ oe.qa.add_message(messages, "infodir", "The /usr/share/info/dir file is not meant to be shipped in a particular package.")
QAPATHTEST[symlink-to-sysroot] = "package_qa_check_symlink_to_sysroot"
def package_qa_check_symlink_to_sysroot(path, name, d, elf, messages):
@@ -509,7 +499,7 @@ def package_qa_check_symlink_to_sysroot(path, name, d, elf, messages):
tmpdir = d.getVar('TMPDIR')
if target.startswith(tmpdir):
trimmed = path.replace(os.path.join (d.getVar("PKGDEST"), name), "")
- package_qa_add_message(messages, "symlink-to-sysroot", "Symlink %s in %s points to TMPDIR" % (trimmed, name))
+ oe.qa.add_message(messages, "symlink-to-sysroot", "Symlink %s in %s points to TMPDIR" % (trimmed, name))
# Check license variables
do_populate_lic[postfuncs] += "populate_lic_qa_checksum"
@@ -517,7 +507,6 @@ python populate_lic_qa_checksum() {
"""
Check for changes in the license files.
"""
- sane = True
lic_files = d.getVar('LIC_FILES_CHKSUM') or ''
lic = d.getVar('LICENSE')
@@ -527,7 +516,7 @@ python populate_lic_qa_checksum() {
return
if not lic_files and d.getVar('SRC_URI'):
- sane &= package_qa_handle_error("license-checksum", pn + ": Recipe file fetches files and does not have license file information (LIC_FILES_CHKSUM)", d)
+ oe.qa.handle_error("license-checksum", pn + ": Recipe file fetches files and does not have license file information (LIC_FILES_CHKSUM)", d)
srcdir = d.getVar('S')
corebase_licensefile = d.getVar('COREBASE') + "/LICENSE"
@@ -535,11 +524,11 @@ python populate_lic_qa_checksum() {
try:
(type, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
except bb.fetch.MalformedUrl:
- sane &= package_qa_handle_error("license-checksum", pn + ": LIC_FILES_CHKSUM contains an invalid URL: " + url, d)
+ oe.qa.handle_error("license-checksum", pn + ": LIC_FILES_CHKSUM contains an invalid URL: " + url, d)
continue
srclicfile = os.path.join(srcdir, path)
if not os.path.isfile(srclicfile):
- sane &= package_qa_handle_error("license-checksum", pn + ": LIC_FILES_CHKSUM points to an invalid file: " + srclicfile, d)
+ oe.qa.handle_error("license-checksum", pn + ": LIC_FILES_CHKSUM points to an invalid file: " + srclicfile, d)
continue
if (srclicfile == corebase_licensefile):
@@ -561,7 +550,7 @@ python populate_lic_qa_checksum() {
import hashlib
lineno = 0
license = []
- m = hashlib.md5()
+ m = hashlib.new('MD5', usedforsecurity=False)
for line in f:
lineno += 1
if (lineno >= beginline):
@@ -607,10 +596,9 @@ python populate_lic_qa_checksum() {
else:
msg = pn + ": LIC_FILES_CHKSUM is not specified for " + url
msg = msg + "\n" + pn + ": The md5 checksum is " + md5chksum
- sane &= package_qa_handle_error("license-checksum", msg, d)
+ oe.qa.handle_error("license-checksum", msg, d)
- if not sane:
- bb.fatal("Fatal QA errors found, failing task.")
+ oe.qa.exit_if_errors(d)
}
def qa_check_staged(path,d):
@@ -622,7 +610,6 @@ def qa_check_staged(path,d):
responsible for the errors easily even if we look at every .pc and .la file.
"""
- sane = True
tmpdir = d.getVar('TMPDIR')
workdir = os.path.join(tmpdir, "work")
recipesysroot = d.getVar("RECIPE_SYSROOT")
@@ -655,16 +642,14 @@ def qa_check_staged(path,d):
file_content = file_content.replace(recipesysroot, "")
if workdir in file_content:
error_msg = "%s failed sanity test (workdir) in path %s" % (file,root)
- sane &= package_qa_handle_error("la", error_msg, d)
+ oe.qa.handle_error("la", error_msg, d)
elif file.endswith(".pc") and not skip_pkgconfig:
with open(path) as f:
file_content = f.read()
file_content = file_content.replace(recipesysroot, "")
if pkgconfigcheck in file_content:
error_msg = "%s failed sanity test (tmpdir) in path %s" % (file,root)
- sane &= package_qa_handle_error("pkgconfig", error_msg, d)
-
- return sane
+ oe.qa.handle_error("pkgconfig", error_msg, d)
# Run all package-wide warnfuncs and errorfuncs
def package_qa_package(warnfuncs, errorfuncs, package, d):
@@ -677,9 +662,9 @@ def package_qa_package(warnfuncs, errorfuncs, package, d):
func(package, d, errors)
for w in warnings:
- package_qa_handle_error(w, warnings[w], d)
+ oe.qa.handle_error(w, warnings[w], d)
for e in errors:
- package_qa_handle_error(e, errors[e], d)
+ oe.qa.handle_error(e, errors[e], d)
return len(errors) == 0
@@ -694,39 +679,55 @@ def package_qa_recipe(warnfuncs, errorfuncs, pn, d):
func(pn, d, errors)
for w in warnings:
- package_qa_handle_error(w, warnings[w], d)
+ oe.qa.handle_error(w, warnings[w], d)
for e in errors:
- package_qa_handle_error(e, errors[e], d)
+ oe.qa.handle_error(e, errors[e], d)
return len(errors) == 0
+def prepopulate_objdump_p(elf, d):
+ output = elf.run_objdump("-p", d)
+ return (elf.name, output)
+
# Walk over all files in a directory and call func
def package_qa_walk(warnfuncs, errorfuncs, package, d):
- import oe.qa
-
#if this will throw an exception, then fix the dict above
- target_os = d.getVar('TARGET_OS')
- target_arch = d.getVar('TARGET_ARCH')
+ target_os = d.getVar('HOST_OS')
+ target_arch = d.getVar('HOST_ARCH')
warnings = {}
errors = {}
+ elves = {}
for path in pkgfiles[package]:
elf = None
if os.path.isfile(path):
elf = oe.qa.ELFFile(path)
try:
elf.open()
+ elf.close()
except oe.qa.NotELFFileError:
elf = None
+ if elf:
+ elves[path] = elf
+
+ results = oe.utils.multiprocess_launch(prepopulate_objdump_p, elves.values(), d, extraargs=(d,))
+ for item in results:
+ elves[item[0]].set_objdump("-p", item[1])
+
+ for path in pkgfiles[package]:
+ if path in elves:
+ elves[path].open()
for func in warnfuncs:
- func(path, package, d, elf, warnings)
+ func(path, package, d, elves.get(path), warnings)
for func in errorfuncs:
- func(path, package, d, elf, errors)
+ func(path, package, d, elves.get(path), errors)
+ if path in elves:
+ elves[path].close()
for w in warnings:
- package_qa_handle_error(w, warnings[w], d)
+ oe.qa.handle_error(w, warnings[w], d)
for e in errors:
- package_qa_handle_error(e, errors[e], d)
+ oe.qa.handle_error(e, errors[e], d)
def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
# Don't do this check for kernel/module recipes, there aren't too many debug/development
@@ -746,10 +747,10 @@ def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
for rdepend in rdepends:
if "-dbg" in rdepend and "debug-deps" not in skip:
error_msg = "%s rdepends on %s" % (pkg,rdepend)
- package_qa_handle_error("debug-deps", error_msg, d)
+ oe.qa.handle_error("debug-deps", error_msg, d)
if (not "-dev" in pkg and not "-staticdev" in pkg) and rdepend.endswith("-dev") and "dev-deps" not in skip:
error_msg = "%s rdepends on %s" % (pkg, rdepend)
- package_qa_handle_error("dev-deps", error_msg, d)
+ oe.qa.handle_error("dev-deps", error_msg, d)
if rdepend not in packages:
rdep_data = oe.packagedata.read_subpkgdata(rdepend, d)
if rdep_data and 'PN' in rdep_data and rdep_data['PN'] in taskdeps:
@@ -770,7 +771,7 @@ def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
error_msg = "%s rdepends on %s, but it isn't a build dependency, missing %s in DEPENDS or PACKAGECONFIG?" % (pkg, rdepend, rdep_data['PN'])
else:
error_msg = "%s rdepends on %s, but it isn't a build dependency?" % (pkg, rdepend)
- package_qa_handle_error("build-deps", error_msg, d)
+ oe.qa.handle_error("build-deps", error_msg, d)
if "file-rdeps" not in skip:
ignored_file_rdeps = set(['/bin/sh', '/usr/bin/env', 'rtld(GNU_HASH)'])
@@ -780,7 +781,7 @@ def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
filerdepends = {}
rdep_data = oe.packagedata.read_subpkgdata(pkg, d)
for key in rdep_data:
- if key.startswith("FILERDEPENDS_"):
+ if key.startswith("FILERDEPENDS:"):
for subkey in bb.utils.explode_deps(rdep_data[key]):
if subkey not in ignored_file_rdeps and \
not subkey.startswith('perl('):
@@ -795,7 +796,7 @@ def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
# The python is not a package, but python-core provides it, so
# skip checking /usr/bin/python if python is in the rdeps, in
- # case there is a RDEPENDS_pkg = "python" in the recipe.
+ # case there is a RDEPENDS:pkg = "python" in the recipe.
for py in [ d.getVar('MLPREFIX') + "python", "python" ]:
if py in done:
filerdepends.pop("/usr/bin/python",None)
@@ -808,11 +809,11 @@ def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
# For Saving the FILERPROVIDES, RPROVIDES and FILES_INFO
rdep_data = oe.packagedata.read_subpkgdata(rdep, d)
for key in rdep_data:
- if key.startswith("FILERPROVIDES_") or key.startswith("RPROVIDES_"):
+ if key.startswith("FILERPROVIDES:") or key.startswith("RPROVIDES:"):
for subkey in bb.utils.explode_deps(rdep_data[key]):
filerdepends.pop(subkey,None)
# Add the files list to the rprovides
- if key == "FILES_INFO":
+ if key.startswith("FILES_INFO:"):
# Use eval() to make it as a dict
for subkey in eval(rdep_data[key]):
filerdepends.pop(subkey,None)
@@ -821,9 +822,9 @@ def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
break
if filerdepends:
for key in filerdepends:
- error_msg = "%s contained in package %s requires %s, but no providers found in RDEPENDS_%s?" % \
- (filerdepends[key].replace("_%s" % pkg, "").replace("@underscore@", "_"), pkg, key, pkg)
- package_qa_handle_error("file-rdeps", error_msg, d)
+ error_msg = "%s contained in package %s requires %s, but no providers found in RDEPENDS:%s?" % \
+ (filerdepends[key].replace(":%s" % pkg, "").replace("@underscore@", "_"), pkg, key, pkg)
+ oe.qa.handle_error("file-rdeps", error_msg, d)
package_qa_check_rdepends[vardepsexclude] = "OVERRIDES"
def package_qa_check_deps(pkg, pkgdest, d):
@@ -835,12 +836,12 @@ def package_qa_check_deps(pkg, pkgdest, d):
try:
rvar = bb.utils.explode_dep_versions2(localdata.getVar(var) or "")
except ValueError as e:
- bb.fatal("%s_%s: %s" % (var, pkg, e))
+ bb.fatal("%s:%s: %s" % (var, pkg, e))
for dep in rvar:
for v in rvar[dep]:
if v and not v.startswith(('< ', '= ', '> ', '<= ', '>=')):
- error_msg = "%s_%s is invalid: %s (%s) only comparisons <, =, >, <=, and >= are allowed" % (var, pkg, dep, v)
- package_qa_handle_error("dep-cmp", error_msg, d)
+ error_msg = "%s:%s is invalid: %s (%s) only comparisons <, =, >, <=, and >= are allowed" % (var, pkg, dep, v)
+ oe.qa.handle_error("dep-cmp", error_msg, d)
check_valid_deps('RDEPENDS')
check_valid_deps('RRECOMMENDS')
@@ -851,13 +852,14 @@ def package_qa_check_deps(pkg, pkgdest, d):
QAPKGTEST[usrmerge] = "package_qa_check_usrmerge"
def package_qa_check_usrmerge(pkg, d, messages):
+
pkgdest = d.getVar('PKGDEST')
pkg_dir = pkgdest + os.sep + pkg + os.sep
merged_dirs = ['bin', 'sbin', 'lib'] + d.getVar('MULTILIB_VARIANTS').split()
for f in merged_dirs:
if os.path.exists(pkg_dir + f) and not os.path.islink(pkg_dir + f):
msg = "%s package is not obeying usrmerge distro feature. /%s should be relocated to /usr." % (pkg, f)
- package_qa_add_message(messages, "usrmerge", msg)
+ oe.qa.add_message(messages, "usrmerge", msg)
return False
return True
@@ -876,7 +878,7 @@ def package_qa_check_perllocalpod(pkg, d, messages):
if matches:
matches = [package_qa_clean_path(path, d, pkg) for path in matches]
msg = "%s contains perllocal.pod (%s), should not be installed" % (pkg, " ".join(matches))
- package_qa_add_message(messages, "perllocalpod", msg)
+ oe.qa.add_message(messages, "perllocalpod", msg)
QAPKGTEST[expanded-d] = "package_qa_check_expanded_d"
def package_qa_check_expanded_d(package, d, messages):
@@ -888,13 +890,13 @@ def package_qa_check_expanded_d(package, d, messages):
expanded_d = d.getVar('D')
for var in 'FILES','pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm':
- bbvar = d.getVar(var + "_" + package) or ""
+ bbvar = d.getVar(var + ":" + package) or ""
if expanded_d in bbvar:
if var == 'FILES':
- package_qa_add_message(messages, "expanded-d", "FILES in %s recipe should not contain the ${D} variable as it references the local build directory not the target filesystem, best solution is to remove the ${D} reference" % package)
+ oe.qa.add_message(messages, "expanded-d", "FILES in %s recipe should not contain the ${D} variable as it references the local build directory not the target filesystem, best solution is to remove the ${D} reference" % package)
sane = False
else:
- package_qa_add_message(messages, "expanded-d", "%s in %s recipe contains ${D}, it should be replaced by $D instead" % (var, package))
+ oe.qa.add_message(messages, "expanded-d", "%s in %s recipe contains ${D}, it should be replaced by $D instead" % (var, package))
sane = False
return sane
@@ -903,19 +905,40 @@ def package_qa_check_unlisted_pkg_lics(package, d, messages):
"""
Check that all licenses for a package are among the licenses for the recipe.
"""
- pkg_lics = d.getVar('LICENSE_' + package)
+ pkg_lics = d.getVar('LICENSE:' + package)
if not pkg_lics:
return True
recipe_lics_set = oe.license.list_licenses(d.getVar('LICENSE'))
- unlisted = oe.license.list_licenses(pkg_lics) - recipe_lics_set
- if not unlisted:
- return True
+ package_lics = oe.license.list_licenses(pkg_lics)
+ unlisted = package_lics - recipe_lics_set
+ if unlisted:
+ oe.qa.add_message(messages, "unlisted-pkg-lics",
+ "LICENSE:%s includes licenses (%s) that are not "
+ "listed in LICENSE" % (package, ' '.join(unlisted)))
+ return False
+ obsolete = set(oe.license.obsolete_license_list()) & package_lics - recipe_lics_set
+ if obsolete:
+ oe.qa.add_message(messages, "obsolete-license",
+ "LICENSE:%s includes obsolete licenses %s" % (package, ' '.join(obsolete)))
+ return False
+ return True
- package_qa_add_message(messages, "unlisted-pkg-lics",
- "LICENSE_%s includes licenses (%s) that are not "
- "listed in LICENSE" % (package, ' '.join(unlisted)))
- return False
+QAPKGTEST[empty-dirs] = "package_qa_check_empty_dirs"
+def package_qa_check_empty_dirs(pkg, d, messages):
+ """
+ Check for the existence of files in directories that are expected to be
+ empty.
+ """
+
+ pkgd = oe.path.join(d.getVar('PKGDEST'), pkg)
+ for dir in (d.getVar('QA_EMPTY_DIRS') or "").split():
+ empty_dir = oe.path.join(pkgd, dir)
+ if os.path.exists(empty_dir) and os.listdir(empty_dir):
+ recommendation = (d.getVar('QA_EMPTY_DIRS_RECOMMENDATION:' + dir) or
+ "but it is expected to be empty")
+ msg = "%s installs files in %s, %s" % (pkg, dir, recommendation)
+ oe.qa.add_message(messages, "empty-dirs", msg)
def package_qa_check_encoding(keys, encode, d):
def check_encoding(key, enc):
@@ -927,7 +950,7 @@ def package_qa_check_encoding(keys, encode, d):
except UnicodeDecodeError as e:
error_msg = "%s has non %s characters" % (key,enc)
sane = False
- package_qa_handle_error("invalid-chars", error_msg, d)
+ oe.qa.handle_error("invalid-chars", error_msg, d)
return sane
for key in keys:
@@ -960,44 +983,33 @@ def package_qa_check_host_user(path, name, d, elf, messages):
else:
check_uid = int(d.getVar('HOST_USER_UID'))
if stat.st_uid == check_uid:
- package_qa_add_message(messages, "host-user-contaminated", "%s: %s is owned by uid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, package_qa_clean_path(path, d, name), check_uid))
+ oe.qa.add_message(messages, "host-user-contaminated", "%s: %s is owned by uid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, package_qa_clean_path(path, d, name), check_uid))
return False
check_gid = int(d.getVar('HOST_USER_GID'))
if stat.st_gid == check_gid:
- package_qa_add_message(messages, "host-user-contaminated", "%s: %s is owned by gid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, package_qa_clean_path(path, d, name), check_gid))
+ oe.qa.add_message(messages, "host-user-contaminated", "%s: %s is owned by gid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, package_qa_clean_path(path, d, name), check_gid))
return False
return True
-QARECIPETEST[src-uri-bad] = "package_qa_check_src_uri"
-def package_qa_check_src_uri(pn, d, messages):
- import re
-
- if "${PN}" in d.getVar("SRC_URI", False):
- package_qa_handle_error("src-uri-bad", "%s: SRC_URI uses PN not BPN" % pn, d)
-
- for url in d.getVar("SRC_URI").split():
- if re.search(r"git(hu|la)b\.com/.+/.+/archive/.+", url):
- package_qa_handle_error("src-uri-bad", "%s: SRC_URI uses unstable GitHub/GitLab archives, convert recipe to use git protocol" % pn, d)
-
QARECIPETEST[unhandled-features-check] = "package_qa_check_unhandled_features_check"
def package_qa_check_unhandled_features_check(pn, d, messages):
if not bb.data.inherits_class('features_check', d):
var_set = False
for kind in ['DISTRO', 'MACHINE', 'COMBINED']:
for var in ['ANY_OF_' + kind + '_FEATURES', 'REQUIRED_' + kind + '_FEATURES', 'CONFLICT_' + kind + '_FEATURES']:
- if d.getVar(var) is not None or d.overridedata.get(var) is not None:
+ if d.getVar(var) is not None or d.hasOverrides(var):
var_set = True
if var_set:
- package_qa_handle_error("unhandled-features-check", "%s: recipe doesn't inherit features_check" % pn, d)
+ oe.qa.handle_error("unhandled-features-check", "%s: recipe doesn't inherit features_check" % pn, d)
QARECIPETEST[missing-update-alternatives] = "package_qa_check_missing_update_alternatives"
def package_qa_check_missing_update_alternatives(pn, d, messages):
# Look at all packages and find out if any of those sets ALTERNATIVE variable
# without inheriting update-alternatives class
for pkg in (d.getVar('PACKAGES') or '').split():
- if d.getVar('ALTERNATIVE_%s' % pkg) and not bb.data.inherits_class('update-alternatives', d):
- package_qa_handle_error("missing-update-alternatives", "%s: recipe defines ALTERNATIVE_%s but doesn't inherit update-alternatives. This might fail during do_rootfs later!" % (pn, pkg), d)
+ if d.getVar('ALTERNATIVE:%s' % pkg) and not bb.data.inherits_class('update-alternatives', d):
+ oe.qa.handle_error("missing-update-alternatives", "%s: recipe defines ALTERNATIVE:%s but doesn't inherit update-alternatives. This might fail during do_rootfs later!" % (pn, pkg), d)
# The PACKAGE FUNC to scan each package
python do_package_qa () {
@@ -1006,6 +1018,14 @@ python do_package_qa () {
bb.note("DO PACKAGE QA")
+ main_lic = d.getVar('LICENSE')
+
+ # Check for obsolete license references in main LICENSE (packages are checked below for any changes)
+ main_licenses = oe.license.list_licenses(d.getVar('LICENSE'))
+ obsolete = set(oe.license.obsolete_license_list()) & main_licenses
+ if obsolete:
+ oe.qa.handle_error("obsolete-license", "Recipe LICENSE includes obsolete licenses %s" % ' '.join(obsolete), d)
+
bb.build.exec_func("read_subpackage_metadata", d)
# Check non UTF-8 characters on recipe's metadata
@@ -1066,14 +1086,14 @@ python do_package_qa () {
for package in packages:
skip = set((d.getVar('INSANE_SKIP') or "").split() +
- (d.getVar('INSANE_SKIP_' + package) or "").split())
+ (d.getVar('INSANE_SKIP:' + package) or "").split())
if skip:
bb.note("Package %s skipping QA tests: %s" % (package, str(skip)))
bb.note("Checking Package: %s" % package)
# Check package name
if not pkgname_pattern.match(package):
- package_qa_handle_error("pkgname",
+ oe.qa.handle_error("pkgname",
"%s doesn't match the [a-z0-9.+-]+ regex" % package, d)
warn_checks, error_checks = parse_test_matrix("QAPATHTEST")
@@ -1091,10 +1111,7 @@ python do_package_qa () {
if 'libdir' in d.getVar("ALL_QA").split():
package_qa_check_libdir(d)
- qa_sane = d.getVar("QA_SANE")
- if not qa_sane:
- bb.fatal("QA run found fatal errors. Please consider fixing them.")
- bb.note("DONE with PACKAGE QA")
+ oe.qa.exit_if_errors(d)
}
# binutils is used for most checks, so need to set as dependency
@@ -1108,7 +1125,7 @@ addtask do_package_qa after do_packagedata do_package before do_build
python() {
pkgs = (d.getVar('PACKAGES') or '').split()
for pkg in pkgs:
- d.appendVarFlag("do_package_qa", "vardeps", " INSANE_SKIP_{}".format(pkg))
+ d.appendVarFlag("do_package_qa", "vardeps", " INSANE_SKIP:{}".format(pkg))
}
SSTATETASKS += "do_package_qa"
@@ -1121,8 +1138,8 @@ addtask do_package_qa_setscene
python do_qa_staging() {
bb.note("QA checking staging")
- if not qa_check_staged(d.expand('${SYSROOT_DESTDIR}${libdir}'), d):
- bb.fatal("QA staging was broken by the package built above")
+ qa_check_staged(d.expand('${SYSROOT_DESTDIR}${libdir}'), d)
+ oe.qa.exit_with_message_if_errors("QA staging was broken by the package built above", d)
}
python do_qa_patch() {
@@ -1170,7 +1187,31 @@ python do_qa_patch() {
elif 'patch-fuzz' in d.getVar('WARN_QA'):
bb.warn(msg)
msg = "Patch log indicates that patches do not apply cleanly."
- package_qa_handle_error("patch-fuzz", msg, d)
+ oe.qa.handle_error("patch-fuzz", msg, d)
+
+ # Check if the patch contains a correctly formatted and spelled Upstream-Status
+ import re
+ from oe import patch
+
+ for url in patch.src_patches(d):
+ (_, _, fullpath, _, _, _) = bb.fetch.decodeurl(url)
+
+ # skip patches not in oe-core
+ if '/meta/' not in fullpath:
+ continue
+
+ content = open(fullpath, encoding='utf-8', errors='ignore').read()
+ kinda_status_re = re.compile(r"^.*upstream.*status.*$", re.IGNORECASE | re.MULTILINE)
+ strict_status_re = re.compile(r"^Upstream-Status: (Pending|Submitted|Denied|Accepted|Inappropriate|Backport|Inactive-Upstream)( .+)?$", re.MULTILINE)
+ match_kinda = kinda_status_re.search(content)
+ match_strict = strict_status_re.search(content)
+ guidelines = "https://www.openembedded.org/wiki/Commit_Patch_Message_Guidelines#Patch_Header_Recommendations:_Upstream-Status"
+
+ if not match_strict:
+ if match_kinda:
+ bb.error("Malformed Upstream-Status in patch\n%s\nPlease correct according to %s :\n%s" % (fullpath, guidelines, match_kinda.group(0)))
+ else:
+ bb.error("Missing Upstream-Status in patch\n%s\nPlease add according to %s ." % (fullpath, guidelines))
}
python do_qa_configure() {
@@ -1198,7 +1239,7 @@ python do_qa_configure() {
if subprocess.call(statement, shell=True) == 0:
error_msg = """This autoconf log indicates errors, it looked at host include and/or library paths while determining system capabilities.
Rerun configure task after fixing this."""
- package_qa_handle_error("configure-unsafe", error_msg, d)
+ oe.qa.handle_error("configure-unsafe", error_msg, d)
if "configure.ac" in files:
configs.append(os.path.join(root,"configure.ac"))
@@ -1228,30 +1269,27 @@ Rerun configure task after fixing this."""
gnu = "grep \"^[[:space:]]*AM_GNU_GETTEXT\" %s >/dev/null" % config
if subprocess.call(gnu, shell=True) == 0:
error_msg = "AM_GNU_GETTEXT used but no inherit gettext"
- package_qa_handle_error("configure-gettext", error_msg, d)
+ oe.qa.handle_error("configure-gettext", error_msg, d)
###########################################################################
# Check unrecognised configure options (with a white list)
###########################################################################
- if bb.data.inherits_class("autotools", d) or bb.data.inherits_class("meson", d):
+ if bb.data.inherits_class("autotools", d):
bb.note("Checking configure output for unrecognised options")
try:
if bb.data.inherits_class("autotools", d):
flag = "WARNING: unrecognized options:"
log = os.path.join(d.getVar('B'), 'config.log')
- if bb.data.inherits_class("meson", d):
- flag = "WARNING: Unknown options:"
- log = os.path.join(d.getVar('T'), 'log.do_configure')
output = subprocess.check_output(['grep', '-F', flag, log]).decode("utf-8").replace(', ', ' ').replace('"', '')
options = set()
for line in output.splitlines():
options |= set(line.partition(flag)[2].split())
- whitelist = set(d.getVar("UNKNOWN_CONFIGURE_WHITELIST").split())
- options -= whitelist
+ ignore_opts = set(d.getVar("UNKNOWN_CONFIGURE_OPT_IGNORE").split())
+ options -= ignore_opts
if options:
pn = d.getVar('PN')
error_msg = pn + ": configure was passed unrecognised options: " + " ".join(options)
- package_qa_handle_error("unknown-configure-option", error_msg, d)
+ oe.qa.handle_error("unknown-configure-option", error_msg, d)
except subprocess.CalledProcessError:
pass
@@ -1263,18 +1301,33 @@ Rerun configure task after fixing this."""
if pconfig not in pkgconfigflags:
pn = d.getVar('PN')
error_msg = "%s: invalid PACKAGECONFIG: %s" % (pn, pconfig)
- package_qa_handle_error("invalid-packageconfig", error_msg, d)
+ oe.qa.handle_error("invalid-packageconfig", error_msg, d)
- qa_sane = d.getVar("QA_SANE")
- if not qa_sane:
- bb.fatal("Fatal QA errors found, failing task.")
+ oe.qa.exit_if_errors(d)
}
+def unpack_check_src_uri(pn, d):
+ import re
+
+ skip = (d.getVar('INSANE_SKIP') or "").split()
+ if 'src-uri-bad' in skip:
+ bb.note("Recipe %s skipping qa checking: src-uri-bad" % d.getVar('PN'))
+ return
+
+ if "${PN}" in d.getVar("SRC_URI", False):
+ oe.qa.handle_error("src-uri-bad", "%s: SRC_URI uses PN not BPN" % pn, d)
+
+ for url in d.getVar("SRC_URI").split():
+ if re.search(r"git(hu|la)b\.com/.+/.+/archive/.+", url):
+ oe.qa.handle_error("src-uri-bad", "%s: SRC_URI uses unstable GitHub/GitLab archives, convert recipe to use git protocol" % pn, d)
+
python do_qa_unpack() {
src_uri = d.getVar('SRC_URI')
s_dir = d.getVar('S')
if src_uri and not os.path.exists(s_dir):
bb.warn('%s: the directory %s (%s) pointed to by the S variable doesn\'t exist - please set S within the recipe to point to where the source has been unpacked to' % (d.getVar('PN'), d.getVar('S', False), s_dir))
+
+ unpack_check_src_uri(d.getVar('PN'), d)
}
# The Staging Func, to check all staging
@@ -1306,11 +1359,11 @@ python () {
# Checking ${FILESEXTRAPATHS}
extrapaths = (d.getVar("FILESEXTRAPATHS") or "")
if '__default' not in extrapaths.split(":"):
- msg = "FILESEXTRAPATHS-variable, must always use _prepend (or _append)\n"
+ msg = "FILESEXTRAPATHS-variable, must always use :prepend (or :append)\n"
msg += "type of assignment, and don't forget the colon.\n"
msg += "Please assign it with the format of:\n"
- msg += " FILESEXTRAPATHS_append := \":${THISDIR}/Your_Files_Path\" or\n"
- msg += " FILESEXTRAPATHS_prepend := \"${THISDIR}/Your_Files_Path:\"\n"
+ msg += " FILESEXTRAPATHS:append := \":${THISDIR}/Your_Files_Path\" or\n"
+ msg += " FILESEXTRAPATHS:prepend := \"${THISDIR}/Your_Files_Path:\"\n"
msg += "in your bbappend file\n\n"
msg += "Your incorrect assignment is:\n"
msg += "%s\n" % extrapaths
@@ -1320,15 +1373,15 @@ python () {
pn = d.getVar('PN')
if pn in overrides:
msg = 'Recipe %s has PN of "%s" which is in OVERRIDES, this can result in unexpected behaviour.' % (d.getVar("FILE"), pn)
- package_qa_handle_error("pn-overrides", msg, d)
+ oe.qa.handle_error("pn-overrides", msg, d)
prog = re.compile(r'[A-Z]')
if prog.search(pn):
- package_qa_handle_error("uppercase-pn", 'PN: %s is upper case, this can result in unexpected behavior.' % pn, d)
+ oe.qa.handle_error("uppercase-pn", 'PN: %s is upper case, this can result in unexpected behavior.' % pn, d)
- # Some people mistakenly use DEPENDS_${PN} instead of DEPENDS and wonder
+ # Some people mistakenly use DEPENDS:${PN} instead of DEPENDS and wonder
# why it doesn't work.
- if (d.getVar(d.expand('DEPENDS_${PN}'))):
- package_qa_handle_error("pkgvarcheck", "recipe uses DEPENDS_${PN}, should use DEPENDS", d)
+ if (d.getVar(d.expand('DEPENDS:${PN}'))):
+ oe.qa.handle_error("pkgvarcheck", "recipe uses DEPENDS:${PN}, should use DEPENDS", d)
issues = []
if (d.getVar('PACKAGES') or "").split():
@@ -1345,7 +1398,7 @@ python () {
else:
d.setVarFlag('do_package_qa', 'rdeptask', '')
for i in issues:
- package_qa_handle_error("pkgvarcheck", "%s: Variable %s is set as not being package specific, please fix this." % (d.getVar("FILE"), i), d)
+ oe.qa.handle_error("pkgvarcheck", "%s: Variable %s is set as not being package specific, please fix this." % (d.getVar("FILE"), i), d)
if 'native-last' not in (d.getVar('INSANE_SKIP') or "").split():
for native_class in ['native', 'nativesdk']:
@@ -1373,11 +1426,8 @@ python () {
else:
break
if broken_order:
- package_qa_handle_error("native-last", "%s: native/nativesdk class is not inherited last, this can result in unexpected behaviour. "
+ oe.qa.handle_error("native-last", "%s: native/nativesdk class is not inherited last, this can result in unexpected behaviour. "
"Classes inherited after native/nativesdk: %s" % (pn, " ".join(broken_order)), d)
-
- qa_sane = d.getVar("QA_SANE")
- if not qa_sane:
- bb.fatal("Fatal QA errors found, failing task.")
+ oe.qa.exit_if_errors(d)
}
diff --git a/meta/classes/kernel-artifact-names.bbclass b/meta/classes/kernel-artifact-names.bbclass
index a65cdddb3e..e77107c893 100644
--- a/meta/classes/kernel-artifact-names.bbclass
+++ b/meta/classes/kernel-artifact-names.bbclass
@@ -8,15 +8,20 @@ inherit image-artifact-names
KERNEL_ARTIFACT_NAME ?= "${PKGE}-${PKGV}-${PKGR}-${MACHINE}${IMAGE_VERSION_SUFFIX}"
KERNEL_ARTIFACT_LINK_NAME ?= "${MACHINE}"
+KERNEL_ARTIFACT_BIN_EXT ?= ".bin"
KERNEL_IMAGE_NAME ?= "${KERNEL_ARTIFACT_NAME}"
KERNEL_IMAGE_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
+KERNEL_IMAGE_BIN_EXT ?= "${KERNEL_ARTIFACT_BIN_EXT}"
+KERNEL_IMAGETYPE_SYMLINK ?= "1"
KERNEL_DTB_NAME ?= "${KERNEL_ARTIFACT_NAME}"
KERNEL_DTB_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
+KERNEL_DTB_BIN_EXT ?= "${KERNEL_ARTIFACT_BIN_EXT}"
KERNEL_FIT_NAME ?= "${KERNEL_ARTIFACT_NAME}"
KERNEL_FIT_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
+KERNEL_FIT_BIN_EXT ?= "${KERNEL_ARTIFACT_BIN_EXT}"
MODULE_TARBALL_NAME ?= "${KERNEL_ARTIFACT_NAME}"
MODULE_TARBALL_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
diff --git a/meta/classes/kernel-devicetree.bbclass b/meta/classes/kernel-devicetree.bbclass
index 27a4905ac6..b4338da1b1 100644
--- a/meta/classes/kernel-devicetree.bbclass
+++ b/meta/classes/kernel-devicetree.bbclass
@@ -6,8 +6,8 @@ python () {
d.appendVar("PACKAGES", " ${KERNEL_PACKAGE_NAME}-image-zimage-bundle")
}
-FILES_${KERNEL_PACKAGE_NAME}-devicetree = "/${KERNEL_IMAGEDEST}/*.dtb /${KERNEL_IMAGEDEST}/*.dtbo"
-FILES_${KERNEL_PACKAGE_NAME}-image-zimage-bundle = "/${KERNEL_IMAGEDEST}/zImage-*.dtb.bin"
+FILES:${KERNEL_PACKAGE_NAME}-devicetree = "/${KERNEL_IMAGEDEST}/*.dtb /${KERNEL_IMAGEDEST}/*.dtbo"
+FILES:${KERNEL_PACKAGE_NAME}-image-zimage-bundle = "/${KERNEL_IMAGEDEST}/zImage-*.dtb.bin"
# Generate kernel+devicetree bundle
KERNEL_DEVICETREE_BUNDLE ?= "0"
@@ -33,7 +33,7 @@ get_real_dtb_path_in_kernel () {
echo "$dtb_path"
}
-do_configure_append() {
+do_configure:append() {
if [ "${KERNEL_DEVICETREE_BUNDLE}" = "1" ]; then
if echo ${KERNEL_IMAGETYPE_FOR_MAKE} | grep -q 'zImage'; then
case "${ARCH}" in
@@ -55,7 +55,7 @@ do_configure_append() {
fi
}
-do_compile_append() {
+do_compile:append() {
if [ -n "${KERNEL_DTC_FLAGS}" ]; then
export DTC_FLAGS="${KERNEL_DTC_FLAGS}"
fi
@@ -66,7 +66,7 @@ do_compile_append() {
done
}
-do_install_append() {
+do_install:append() {
for dtbf in ${KERNEL_DEVICETREE}; do
dtb=`normalize_dtb "$dtbf"`
dtb_ext=${dtb##*.}
@@ -76,28 +76,36 @@ do_install_append() {
done
}
-do_deploy_append() {
+do_deploy:append() {
for dtbf in ${KERNEL_DEVICETREE}; do
dtb=`normalize_dtb "$dtbf"`
dtb_ext=${dtb##*.}
dtb_base_name=`basename $dtb .$dtb_ext`
install -d $deployDir
install -m 0644 ${D}/${KERNEL_IMAGEDEST}/$dtb_base_name.$dtb_ext $deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext
- ln -sf $dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext $deployDir/$dtb_base_name.$dtb_ext
- ln -sf $dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext $deployDir/$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext
+ if [ "${KERNEL_IMAGETYPE_SYMLINK}" = "1" ] ; then
+ ln -sf $dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext $deployDir/$dtb_base_name.$dtb_ext
+ fi
+ if [ -n "${KERNEL_DTB_LINK_NAME}" ] ; then
+ ln -sf $dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext $deployDir/$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext
+ fi
for type in ${KERNEL_IMAGETYPE_FOR_MAKE}; do
if [ "$type" = "zImage" ] && [ "${KERNEL_DEVICETREE_BUNDLE}" = "1" ]; then
cat ${D}/${KERNEL_IMAGEDEST}/$type \
$deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext \
- > $deployDir/$type-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin
- ln -sf $type-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin \
- $deployDir/$type-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext.bin
+ > $deployDir/$type-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT}
+ if [ -n "${KERNEL_DTB_LINK_NAME}" ]; then
+ ln -sf $type-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT} \
+ $deployDir/$type-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT}
+ fi
if [ -e "${KERNEL_OUTPUT_DIR}/${type}.initramfs" ]; then
cat ${KERNEL_OUTPUT_DIR}/${type}.initramfs \
$deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext \
- > $deployDir/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin
- ln -sf ${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin \
- $deployDir/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext.bin
+ > $deployDir/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT}
+ if [ -n "${KERNEL_DTB_LINK_NAME}" ]; then
+ ln -sf ${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT} \
+ $deployDir/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT}
+ fi
fi
fi
done
diff --git a/meta/classes/kernel-fitimage.bbclass b/meta/classes/kernel-fitimage.bbclass
index e363eeb64c..8a9b195d6e 100644
--- a/meta/classes/kernel-fitimage.bbclass
+++ b/meta/classes/kernel-fitimage.bbclass
@@ -36,6 +36,10 @@ python __anonymous () {
if image:
d.appendVarFlag('do_assemble_fitimage_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete')
+ ubootenv = d.getVar('UBOOT_ENV')
+ if ubootenv:
+ d.appendVarFlag('do_assemble_fitimage', 'depends', ' virtual/bootloader:do_populate_sysroot')
+
#check if there are any dtb providers
providerdtb = d.getVar("PREFERRED_PROVIDER_virtual/dtb")
if providerdtb:
@@ -60,12 +64,23 @@ FIT_DESC ?= "Kernel fitImage for ${DISTRO_NAME}/${PV}/${MACHINE}"
# Sign individual images as well
FIT_SIGN_INDIVIDUAL ?= "0"
+FIT_CONF_PREFIX ?= "conf-"
+FIT_CONF_PREFIX[doc] = "Prefix to use for FIT configuration node name"
+
+# Keys used to sign individually image nodes.
+# The keys to sign image nodes must be different from those used to sign
+# configuration nodes, otherwise the "required" property, from
+# UBOOT_DTB_BINARY, will be set to "conf", because "conf" prevails on "image".
+# Then the images signature checking will not be mandatory and no error will be
+# raised in case of failure.
+# UBOOT_SIGN_IMG_KEYNAME = "dev2" # keys name in keydir (eg. "dev2.crt", "dev2.key")
+
#
# Emit the fitImage ITS header
#
# $1 ... .its filename
fitimage_emit_fit_header() {
- cat << EOF >> ${1}
+ cat << EOF >> $1
/dts-v1/;
/ {
@@ -86,24 +101,24 @@ EOF
fitimage_emit_section_maint() {
case $2 in
imagestart)
- cat << EOF >> ${1}
+ cat << EOF >> $1
images {
EOF
;;
confstart)
- cat << EOF >> ${1}
+ cat << EOF >> $1
configurations {
EOF
;;
sectend)
- cat << EOF >> ${1}
+ cat << EOF >> $1
};
EOF
;;
fitend)
- cat << EOF >> ${1}
+ cat << EOF >> $1
};
EOF
;;
@@ -121,7 +136,7 @@ fitimage_emit_section_kernel() {
kernel_csum="${FIT_HASH_ALG}"
kernel_sign_algo="${FIT_SIGN_ALG}"
- kernel_sign_keyname="${UBOOT_SIGN_KEYNAME}"
+ kernel_sign_keyname="${UBOOT_SIGN_IMG_KEYNAME}"
ENTRYPOINT="${UBOOT_ENTRYPOINT}"
if [ -n "${UBOOT_ENTRYSYMBOL}" ]; then
@@ -129,28 +144,28 @@ fitimage_emit_section_kernel() {
awk '$3=="${UBOOT_ENTRYSYMBOL}" {print "0x"$1;exit}'`
fi
- cat << EOF >> ${1}
- kernel-${2} {
+ cat << EOF >> $1
+ kernel-$2 {
description = "Linux kernel";
- data = /incbin/("${3}");
+ data = /incbin/("$3");
type = "kernel";
arch = "${UBOOT_ARCH}";
os = "linux";
- compression = "${4}";
+ compression = "$4";
load = <${UBOOT_LOADADDRESS}>;
- entry = <${ENTRYPOINT}>;
+ entry = <$ENTRYPOINT>;
hash-1 {
- algo = "${kernel_csum}";
+ algo = "$kernel_csum";
};
};
EOF
- if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "${kernel_sign_keyname}" ] ; then
- sed -i '$ d' ${1}
- cat << EOF >> ${1}
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "$kernel_sign_keyname" ] ; then
+ sed -i '$ d' $1
+ cat << EOF >> $1
signature-1 {
- algo = "${kernel_csum},${kernel_sign_algo}";
- key-name-hint = "${kernel_sign_keyname}";
+ algo = "$kernel_csum,$kernel_sign_algo";
+ key-name-hint = "$kernel_sign_keyname";
};
};
EOF
@@ -167,7 +182,7 @@ fitimage_emit_section_dtb() {
dtb_csum="${FIT_HASH_ALG}"
dtb_sign_algo="${FIT_SIGN_ALG}"
- dtb_sign_keyname="${UBOOT_SIGN_KEYNAME}"
+ dtb_sign_keyname="${UBOOT_SIGN_IMG_KEYNAME}"
dtb_loadline=""
dtb_ext=${DTB##*.}
@@ -178,26 +193,26 @@ fitimage_emit_section_dtb() {
elif [ -n "${UBOOT_DTB_LOADADDRESS}" ]; then
dtb_loadline="load = <${UBOOT_DTB_LOADADDRESS}>;"
fi
- cat << EOF >> ${1}
- fdt-${2} {
+ cat << EOF >> $1
+ fdt-$2 {
description = "Flattened Device Tree blob";
- data = /incbin/("${3}");
+ data = /incbin/("$3");
type = "flat_dt";
arch = "${UBOOT_ARCH}";
compression = "none";
- ${dtb_loadline}
+ $dtb_loadline
hash-1 {
- algo = "${dtb_csum}";
+ algo = "$dtb_csum";
};
};
EOF
- if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "${dtb_sign_keyname}" ] ; then
- sed -i '$ d' ${1}
- cat << EOF >> ${1}
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "$dtb_sign_keyname" ] ; then
+ sed -i '$ d' $1
+ cat << EOF >> $1
signature-1 {
- algo = "${dtb_csum},${dtb_sign_algo}";
- key-name-hint = "${dtb_sign_keyname}";
+ algo = "$dtb_csum,$dtb_sign_algo";
+ key-name-hint = "$dtb_sign_keyname";
};
};
EOF
@@ -212,29 +227,29 @@ EOF
# $3 ... Path to boot script image
fitimage_emit_section_boot_script() {
- bootscr_csum="${FIT_HASH_ALG}"
+ bootscr_csum="${FIT_HASH_ALG}"
bootscr_sign_algo="${FIT_SIGN_ALG}"
- bootscr_sign_keyname="${UBOOT_SIGN_KEYNAME}"
+ bootscr_sign_keyname="${UBOOT_SIGN_IMG_KEYNAME}"
- cat << EOF >> ${1}
- bootscr-${2} {
+ cat << EOF >> $1
+ bootscr-$2 {
description = "U-boot script";
- data = /incbin/("${3}");
+ data = /incbin/("$3");
type = "script";
arch = "${UBOOT_ARCH}";
compression = "none";
hash-1 {
- algo = "${bootscr_csum}";
+ algo = "$bootscr_csum";
};
};
EOF
- if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "${bootscr_sign_keyname}" ] ; then
- sed -i '$ d' ${1}
- cat << EOF >> ${1}
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "$bootscr_sign_keyname" ] ; then
+ sed -i '$ d' $1
+ cat << EOF >> $1
signature-1 {
- algo = "${bootscr_csum},${bootscr_sign_algo}";
- key-name-hint = "${bootscr_sign_keyname}";
+ algo = "$bootscr_csum,$bootscr_sign_algo";
+ key-name-hint = "$bootscr_sign_keyname";
};
};
EOF
@@ -251,10 +266,10 @@ fitimage_emit_section_setup() {
setup_csum="${FIT_HASH_ALG}"
- cat << EOF >> ${1}
- setup-${2} {
+ cat << EOF >> $1
+ setup-$2 {
description = "Linux setup.bin";
- data = /incbin/("${3}");
+ data = /incbin/("$3");
type = "x86_setup";
arch = "${UBOOT_ARCH}";
os = "linux";
@@ -262,7 +277,7 @@ fitimage_emit_section_setup() {
load = <0x00090000>;
entry = <0x00090000>;
hash-1 {
- algo = "${setup_csum}";
+ algo = "$setup_csum";
};
};
EOF
@@ -278,7 +293,7 @@ fitimage_emit_section_ramdisk() {
ramdisk_csum="${FIT_HASH_ALG}"
ramdisk_sign_algo="${FIT_SIGN_ALG}"
- ramdisk_sign_keyname="${UBOOT_SIGN_KEYNAME}"
+ ramdisk_sign_keyname="${UBOOT_SIGN_IMG_KEYNAME}"
ramdisk_loadline=""
ramdisk_entryline=""
@@ -289,28 +304,28 @@ fitimage_emit_section_ramdisk() {
ramdisk_entryline="entry = <${UBOOT_RD_ENTRYPOINT}>;"
fi
- cat << EOF >> ${1}
- ramdisk-${2} {
+ cat << EOF >> $1
+ ramdisk-$2 {
description = "${INITRAMFS_IMAGE}";
- data = /incbin/("${3}");
+ data = /incbin/("$3");
type = "ramdisk";
arch = "${UBOOT_ARCH}";
os = "linux";
compression = "none";
- ${ramdisk_loadline}
- ${ramdisk_entryline}
+ $ramdisk_loadline
+ $ramdisk_entryline
hash-1 {
- algo = "${ramdisk_csum}";
+ algo = "$ramdisk_csum";
};
};
EOF
- if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "${ramdisk_sign_keyname}" ] ; then
- sed -i '$ d' ${1}
- cat << EOF >> ${1}
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "$ramdisk_sign_keyname" ] ; then
+ sed -i '$ d' $1
+ cat << EOF >> $1
signature-1 {
- algo = "${ramdisk_csum},${ramdisk_sign_algo}";
- key-name-hint = "${ramdisk_sign_keyname}";
+ algo = "$ramdisk_csum,$ramdisk_sign_algo";
+ key-name-hint = "$ramdisk_sign_keyname";
};
};
EOF
@@ -335,18 +350,18 @@ fitimage_emit_section_config() {
conf_sign_keyname="${UBOOT_SIGN_KEYNAME}"
fi
- its_file="${1}"
- kernel_id="${2}"
- dtb_image="${3}"
- ramdisk_id="${4}"
- bootscr_id="${5}"
- config_id="${6}"
- default_flag="${7}"
+ its_file="$1"
+ kernel_id="$2"
+ dtb_image="$3"
+ ramdisk_id="$4"
+ bootscr_id="$5"
+ config_id="$6"
+ default_flag="$7"
# Test if we have any DTBs at all
sep=""
conf_desc=""
- conf_node="conf-"
+ conf_node="${FIT_CONF_PREFIX}"
kernel_line=""
fdt_line=""
ramdisk_line=""
@@ -356,106 +371,106 @@ fitimage_emit_section_config() {
# conf node name is selected based on dtb ID if it is present,
# otherwise its selected based on kernel ID
- if [ -n "${dtb_image}" ]; then
- conf_node=$conf_node${dtb_image}
+ if [ -n "$dtb_image" ]; then
+ conf_node=$conf_node$dtb_image
else
- conf_node=$conf_node${kernel_id}
+ conf_node=$conf_node$kernel_id
fi
- if [ -n "${kernel_id}" ]; then
+ if [ -n "$kernel_id" ]; then
conf_desc="Linux kernel"
sep=", "
- kernel_line="kernel = \"kernel-${kernel_id}\";"
+ kernel_line="kernel = \"kernel-$kernel_id\";"
fi
- if [ -n "${dtb_image}" ]; then
- conf_desc="${conf_desc}${sep}FDT blob"
+ if [ -n "$dtb_image" ]; then
+ conf_desc="$conf_desc${sep}FDT blob"
sep=", "
- fdt_line="fdt = \"fdt-${dtb_image}\";"
+ fdt_line="fdt = \"fdt-$dtb_image\";"
fi
- if [ -n "${ramdisk_id}" ]; then
- conf_desc="${conf_desc}${sep}ramdisk"
+ if [ -n "$ramdisk_id" ]; then
+ conf_desc="$conf_desc${sep}ramdisk"
sep=", "
- ramdisk_line="ramdisk = \"ramdisk-${ramdisk_id}\";"
+ ramdisk_line="ramdisk = \"ramdisk-$ramdisk_id\";"
fi
- if [ -n "${bootscr_id}" ]; then
- conf_desc="${conf_desc}${sep}u-boot script"
+ if [ -n "$bootscr_id" ]; then
+ conf_desc="$conf_desc${sep}u-boot script"
sep=", "
- bootscr_line="bootscr = \"bootscr-${bootscr_id}\";"
+ bootscr_line="bootscr = \"bootscr-$bootscr_id\";"
fi
- if [ -n "${config_id}" ]; then
- conf_desc="${conf_desc}${sep}setup"
- setup_line="setup = \"setup-${config_id}\";"
+ if [ -n "$config_id" ]; then
+ conf_desc="$conf_desc${sep}setup"
+ setup_line="setup = \"setup-$config_id\";"
fi
- if [ "${default_flag}" = "1" ]; then
+ if [ "$default_flag" = "1" ]; then
# default node is selected based on dtb ID if it is present,
# otherwise its selected based on kernel ID
- if [ -n "${dtb_image}" ]; then
- default_line="default = \"conf-${dtb_image}\";"
+ if [ -n "$dtb_image" ]; then
+ default_line="default = \"${FIT_CONF_PREFIX}$dtb_image\";"
else
- default_line="default = \"conf-${kernel_id}\";"
+ default_line="default = \"${FIT_CONF_PREFIX}$kernel_id\";"
fi
fi
- cat << EOF >> ${its_file}
- ${default_line}
+ cat << EOF >> $its_file
+ $default_line
$conf_node {
- description = "${default_flag} ${conf_desc}";
- ${kernel_line}
- ${fdt_line}
- ${ramdisk_line}
- ${bootscr_line}
- ${setup_line}
+ description = "$default_flag $conf_desc";
+ $kernel_line
+ $fdt_line
+ $ramdisk_line
+ $bootscr_line
+ $setup_line
hash-1 {
- algo = "${conf_csum}";
+ algo = "$conf_csum";
};
EOF
- if [ ! -z "${conf_sign_keyname}" ] ; then
+ if [ -n "$conf_sign_keyname" ] ; then
sign_line="sign-images = "
sep=""
- if [ -n "${kernel_id}" ]; then
- sign_line="${sign_line}${sep}\"kernel\""
+ if [ -n "$kernel_id" ]; then
+ sign_line="$sign_line${sep}\"kernel\""
sep=", "
fi
- if [ -n "${dtb_image}" ]; then
- sign_line="${sign_line}${sep}\"fdt\""
+ if [ -n "$dtb_image" ]; then
+ sign_line="$sign_line${sep}\"fdt\""
sep=", "
fi
- if [ -n "${ramdisk_id}" ]; then
- sign_line="${sign_line}${sep}\"ramdisk\""
+ if [ -n "$ramdisk_id" ]; then
+ sign_line="$sign_line${sep}\"ramdisk\""
sep=", "
fi
- if [ -n "${bootscr_id}" ]; then
- sign_line="${sign_line}${sep}\"bootscr\""
+ if [ -n "$bootscr_id" ]; then
+ sign_line="$sign_line${sep}\"bootscr\""
sep=", "
fi
- if [ -n "${config_id}" ]; then
- sign_line="${sign_line}${sep}\"setup\""
+ if [ -n "$config_id" ]; then
+ sign_line="$sign_line${sep}\"setup\""
fi
- sign_line="${sign_line};"
+ sign_line="$sign_line;"
- cat << EOF >> ${its_file}
+ cat << EOF >> $its_file
signature-1 {
- algo = "${conf_csum},${conf_sign_algo}";
- key-name-hint = "${conf_sign_keyname}";
- ${sign_line}
+ algo = "$conf_csum,$conf_sign_algo";
+ key-name-hint = "$conf_sign_keyname";
+ $sign_line
};
EOF
fi
- cat << EOF >> ${its_file}
+ cat << EOF >> $its_file
};
EOF
}
@@ -470,35 +485,24 @@ fitimage_assemble() {
kernelcount=1
dtbcount=""
DTBS=""
- ramdiskcount=${3}
+ ramdiskcount=$3
setupcount=""
bootscr_id=""
- rm -f ${1} arch/${ARCH}/boot/${2}
+ rm -f $1 arch/${ARCH}/boot/$2
- fitimage_emit_fit_header ${1}
+ if [ -n "${UBOOT_SIGN_IMG_KEYNAME}" -a "${UBOOT_SIGN_KEYNAME}" = "${UBOOT_SIGN_IMG_KEYNAME}" ]; then
+ bbfatal "Keys used to sign images and configuration nodes must be different."
+ fi
+
+ fitimage_emit_fit_header $1
#
# Step 1: Prepare a kernel image section.
#
- fitimage_emit_section_maint ${1} imagestart
+ fitimage_emit_section_maint $1 imagestart
uboot_prep_kimage
-
- if [ "${INITRAMFS_IMAGE_BUNDLE}" = "1" ]; then
- initramfs_bundle_path="arch/"${UBOOT_ARCH}"/boot/"${KERNEL_IMAGETYPE_REPLACEMENT}".initramfs"
- if [ -e "${initramfs_bundle_path}" ]; then
-
- #
- # Include the kernel/rootfs bundle.
- #
-
- fitimage_emit_section_kernel ${1} "${kernelcount}" "${initramfs_bundle_path}" "${linux_comp}"
- else
- bbwarn "${initramfs_bundle_path} not found."
- fi
- else
- fitimage_emit_section_kernel ${1} "${kernelcount}" linux.bin "${linux_comp}"
- fi
+ fitimage_emit_section_kernel $1 $kernelcount linux.bin "$linux_comp"
#
# Step 2: Prepare a DTB image section
@@ -507,9 +511,9 @@ fitimage_assemble() {
if [ -n "${KERNEL_DEVICETREE}" ]; then
dtbcount=1
for DTB in ${KERNEL_DEVICETREE}; do
- if echo ${DTB} | grep -q '/dts/'; then
- bbwarn "${DTB} contains the full path to the the dts file, but only the dtb name should be used."
- DTB=`basename ${DTB} | sed 's,\.dts$,.dtb,g'`
+ if echo $DTB | grep -q '/dts/'; then
+ bbwarn "$DTB contains the full path to the the dts file, but only the dtb name should be used."
+ DTB=`basename $DTB | sed 's,\.dts$,.dtb,g'`
fi
# Skip ${DTB} if it's also provided in ${EXTERNAL_KERNEL_DEVICETREE}
@@ -517,23 +521,23 @@ fitimage_assemble() {
continue
fi
- DTB_PATH="arch/${ARCH}/boot/dts/${DTB}"
- if [ ! -e "${DTB_PATH}" ]; then
- DTB_PATH="arch/${ARCH}/boot/${DTB}"
+ DTB_PATH="arch/${ARCH}/boot/dts/$DTB"
+ if [ ! -e "$DTB_PATH" ]; then
+ DTB_PATH="arch/${ARCH}/boot/$DTB"
fi
- DTB=$(echo "${DTB}" | tr '/' '_')
- DTBS="${DTBS} ${DTB}"
- fitimage_emit_section_dtb ${1} ${DTB} ${DTB_PATH}
+ DTB=$(echo "$DTB" | tr '/' '_')
+ DTBS="$DTBS $DTB"
+ fitimage_emit_section_dtb $1 $DTB $DTB_PATH
done
fi
if [ -n "${EXTERNAL_KERNEL_DEVICETREE}" ]; then
dtbcount=1
for DTB in $(find "${EXTERNAL_KERNEL_DEVICETREE}" \( -name '*.dtb' -o -name '*.dtbo' \) -printf '%P\n' | sort); do
- DTB=$(echo "${DTB}" | tr '/' '_')
- DTBS="${DTBS} ${DTB}"
- fitimage_emit_section_dtb ${1} ${DTB} "${EXTERNAL_KERNEL_DEVICETREE}/${DTB}"
+ DTB=$(echo "$DTB" | tr '/' '_')
+ DTBS="$DTBS $DTB"
+ fitimage_emit_section_dtb $1 $DTB "${EXTERNAL_KERNEL_DEVICETREE}/$DTB"
done
fi
@@ -545,7 +549,7 @@ fitimage_assemble() {
if [ -e "${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY}" ]; then
cp ${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY} ${B}
bootscr_id="${UBOOT_ENV_BINARY}"
- fitimage_emit_section_boot_script ${1} "${bootscr_id}" ${UBOOT_ENV_BINARY}
+ fitimage_emit_section_boot_script $1 "$bootscr_id" ${UBOOT_ENV_BINARY}
else
bbwarn "${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY} not found."
fi
@@ -556,7 +560,7 @@ fitimage_assemble() {
#
if [ -e arch/${ARCH}/boot/setup.bin ]; then
setupcount=1
- fitimage_emit_section_setup ${1} "${setupcount}" arch/${ARCH}/boot/setup.bin
+ fitimage_emit_section_setup $1 $setupcount arch/${ARCH}/boot/setup.bin
fi
#
@@ -564,28 +568,31 @@ fitimage_assemble() {
#
if [ "x${ramdiskcount}" = "x1" ] && [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
# Find and use the first initramfs image archive type we find
- for img in cpio.lz4 cpio.lzo cpio.lzma cpio.xz cpio.gz ext2.gz cpio; do
- initramfs_path="${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.${img}"
- echo "Using $initramfs_path"
- if [ -e "${initramfs_path}" ]; then
- fitimage_emit_section_ramdisk ${1} "${ramdiskcount}" "${initramfs_path}"
+ for img in cpio.lz4 cpio.lzo cpio.lzma cpio.xz cpio.zst cpio.gz ext2.gz cpio; do
+ initramfs_path="${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.$img"
+ echo -n "Searching for $initramfs_path..."
+ if [ -e "$initramfs_path" ]; then
+ echo "found"
+ fitimage_emit_section_ramdisk $1 "$ramdiskcount" "$initramfs_path"
break
+ else
+ echo "not found"
fi
done
fi
- fitimage_emit_section_maint ${1} sectend
+ fitimage_emit_section_maint $1 sectend
# Force the first Kernel and DTB in the default config
kernelcount=1
- if [ -n "${dtbcount}" ]; then
+ if [ -n "$dtbcount" ]; then
dtbcount=1
fi
#
# Step 6: Prepare a configurations section
#
- fitimage_emit_section_maint ${1} confstart
+ fitimage_emit_section_maint $1 confstart
# kernel-fitimage.bbclass currently only supports a single kernel (no less or
# more) to be added to the FIT image along with 0 or more device trees and
@@ -596,33 +603,33 @@ fitimage_assemble() {
# the default configuration to be used is based on the dtbcount. If there is
# no dtb present than select the default configuation to be based on
# the kernelcount.
- if [ -n "${DTBS}" ]; then
+ if [ -n "$DTBS" ]; then
i=1
for DTB in ${DTBS}; do
dtb_ext=${DTB##*.}
- if [ "${dtb_ext}" = "dtbo" ]; then
- fitimage_emit_section_config ${1} "" "${DTB}" "" "${bootscr_id}" "" "`expr ${i} = ${dtbcount}`"
+ if [ "$dtb_ext" = "dtbo" ]; then
+ fitimage_emit_section_config $1 "" "$DTB" "" "$bootscr_id" "" "`expr $i = $dtbcount`"
else
- fitimage_emit_section_config ${1} "${kernelcount}" "${DTB}" "${ramdiskcount}" "${bootscr_id}" "${setupcount}" "`expr ${i} = ${dtbcount}`"
+ fitimage_emit_section_config $1 $kernelcount "$DTB" "$ramdiskcount" "$bootscr_id" "$setupcount" "`expr $i = $dtbcount`"
fi
- i=`expr ${i} + 1`
+ i=`expr $i + 1`
done
else
defaultconfigcount=1
- fitimage_emit_section_config ${1} "${kernelcount}" "" "${ramdiskcount}" "${bootscr_id}" "${setupcount}" "${defaultconfigcount}"
+ fitimage_emit_section_config $1 $kernelcount "" "$ramdiskcount" "$bootscr_id" "$setupcount" $defaultconfigcount
fi
- fitimage_emit_section_maint ${1} sectend
+ fitimage_emit_section_maint $1 sectend
- fitimage_emit_section_maint ${1} fitend
+ fitimage_emit_section_maint $1 fitend
#
# Step 7: Assemble the image
#
${UBOOT_MKIMAGE} \
${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \
- -f ${1} \
- arch/${ARCH}/boot/${2}
+ -f $1 \
+ arch/${ARCH}/boot/$2
#
# Step 8: Sign the image and add public key to U-Boot dtb
@@ -639,7 +646,7 @@ fitimage_assemble() {
${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \
-F -k "${UBOOT_SIGN_KEYDIR}" \
$add_key_to_u_boot \
- -r arch/${ARCH}/boot/${2} \
+ -r arch/${ARCH}/boot/$2 \
${UBOOT_MKIMAGE_SIGN_ARGS}
fi
}
@@ -647,7 +654,7 @@ fitimage_assemble() {
do_assemble_fitimage() {
if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage"; then
cd ${B}
- fitimage_assemble fit-image.its fitImage
+ fitimage_assemble fit-image.its fitImage ""
fi
}
@@ -674,7 +681,7 @@ do_kernel_generate_rsa_keys() {
if [ "${UBOOT_SIGN_ENABLE}" = "1" ] && [ "${FIT_GENERATE_KEYS}" = "1" ]; then
- # Generate keys only if they don't already exist
+ # Generate keys to sign configuration nodes, only if they don't already exist
if [ ! -f "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".key ] || \
[ ! -f "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".crt ]; then
@@ -691,35 +698,61 @@ do_kernel_generate_rsa_keys() {
-key "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".key \
-out "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".crt
fi
+
+ # Generate keys to sign image nodes, only if they don't already exist
+ if [ ! -f "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_IMG_KEYNAME}".key ] || \
+ [ ! -f "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_IMG_KEYNAME}".crt ]; then
+
+ # make directory if it does not already exist
+ mkdir -p "${UBOOT_SIGN_KEYDIR}"
+
+ echo "Generating RSA private key for signing fitImage"
+ openssl genrsa ${FIT_KEY_GENRSA_ARGS} -out \
+ "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_IMG_KEYNAME}".key \
+ "${FIT_SIGN_NUMBITS}"
+
+ echo "Generating certificate for signing fitImage"
+ openssl req ${FIT_KEY_REQ_ARGS} "${FIT_KEY_SIGN_PKCS}" \
+ -key "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_IMG_KEYNAME}".key \
+ -out "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_IMG_KEYNAME}".crt
+ fi
fi
}
addtask kernel_generate_rsa_keys before do_assemble_fitimage after do_compile
kernel_do_deploy[vardepsexclude] = "DATETIME"
-kernel_do_deploy_append() {
+kernel_do_deploy:append() {
# Update deploy directory
if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage"; then
if [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
echo "Copying fit-image.its source file..."
install -m 0644 ${B}/fit-image.its "$deployDir/fitImage-its-${KERNEL_FIT_NAME}.its"
- ln -snf fitImage-its-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${KERNEL_FIT_LINK_NAME}"
+ if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
+ ln -snf fitImage-its-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${KERNEL_FIT_LINK_NAME}"
+ fi
echo "Copying linux.bin file..."
- install -m 0644 ${B}/linux.bin $deployDir/fitImage-linux.bin-${KERNEL_FIT_NAME}.bin
- ln -snf fitImage-linux.bin-${KERNEL_FIT_NAME}.bin "$deployDir/fitImage-linux.bin-${KERNEL_FIT_LINK_NAME}"
+ install -m 0644 ${B}/linux.bin $deployDir/fitImage-linux.bin-${KERNEL_FIT_NAME}${KERNEL_FIT_BIN_EXT}
+ if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
+ ln -snf fitImage-linux.bin-${KERNEL_FIT_NAME}${KERNEL_FIT_BIN_EXT} "$deployDir/fitImage-linux.bin-${KERNEL_FIT_LINK_NAME}"
+ fi
fi
if [ -n "${INITRAMFS_IMAGE}" ]; then
echo "Copying fit-image-${INITRAMFS_IMAGE}.its source file..."
install -m 0644 ${B}/fit-image-${INITRAMFS_IMAGE}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its"
- ln -snf fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
+ if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
+ ln -snf fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
+ fi
if [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
echo "Copying fitImage-${INITRAMFS_IMAGE} file..."
- install -m 0644 ${B}/arch/${ARCH}/boot/fitImage-${INITRAMFS_IMAGE} "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin"
- ln -snf fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
+ install -m 0644 ${B}/arch/${ARCH}/boot/fitImage-${INITRAMFS_IMAGE} "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}${KERNEL_FIT_BIN_EXT}"
+ if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
+ ln -snf fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}${KERNEL_FIT_BIN_EXT} "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
+ fi
fi
fi
fi
diff --git a/meta/classes/kernel-grub.bbclass b/meta/classes/kernel-grub.bbclass
index 5d92f3b636..44b2015468 100644
--- a/meta/classes/kernel-grub.bbclass
+++ b/meta/classes/kernel-grub.bbclass
@@ -99,7 +99,7 @@ python __anonymous () {
typelower = type.lower()
preinst_append = preinst.replace('KERNEL_IMAGETYPE', type)
postinst_prepend = postinst.replace('KERNEL_IMAGETYPE', type)
- d.setVar('pkg_preinst_kernel-image-' + typelower + '_append', preinst_append)
- d.setVar('pkg_postinst_kernel-image-' + typelower + '_prepend', postinst_prepend)
+ d.setVar('pkg_preinst:kernel-image-' + typelower + ':append', preinst_append)
+ d.setVar('pkg_postinst:kernel-image-' + typelower + ':prepend', postinst_prepend)
}
diff --git a/meta/classes/kernel-module-split.bbclass b/meta/classes/kernel-module-split.bbclass
index baa32e0a90..a29c294810 100644
--- a/meta/classes/kernel-module-split.bbclass
+++ b/meta/classes/kernel-module-split.bbclass
@@ -1,4 +1,4 @@
-pkg_postinst_modules () {
+pkg_postinst:modules () {
if [ -z "$D" ]; then
depmod -a ${KERNEL_VERSION}
else
@@ -8,7 +8,7 @@ else
fi
}
-pkg_postrm_modules () {
+pkg_postrm:modules () {
if [ -z "$D" ]; then
depmod -a ${KERNEL_VERSION}
else
@@ -24,11 +24,12 @@ fi
PACKAGE_WRITE_DEPS += "kmod-native depmodwrapper-cross"
-do_install_append() {
+do_install:append() {
install -d ${D}${sysconfdir}/modules-load.d/ ${D}${sysconfdir}/modprobe.d/
}
-PACKAGESPLITFUNCS_prepend = "split_kernel_module_packages "
+KERNEL_SPLIT_MODULES ?= "1"
+PACKAGESPLITFUNCS:prepend = "split_kernel_module_packages "
KERNEL_MODULES_META_PACKAGE ?= "${@ d.getVar("KERNEL_PACKAGE_NAME") or "kernel" }-modules"
@@ -44,17 +45,20 @@ python split_kernel_module_packages () {
def extract_modinfo(file):
import tempfile, subprocess
tempfile.tempdir = d.getVar("WORKDIR")
- compressed = re.match( r'.*\.([xg])z$', file)
+ compressed = re.match( r'.*\.(gz|xz|zst)$', file)
tf = tempfile.mkstemp()
tmpfile = tf[1]
if compressed:
tmpkofile = tmpfile + ".ko"
- if compressed.group(1) == 'g':
+ if compressed.group(1) == 'gz':
cmd = "gunzip -dc %s > %s" % (file, tmpkofile)
subprocess.check_call(cmd, shell=True)
- elif compressed.group(1) == 'x':
+ elif compressed.group(1) == 'xz':
cmd = "xz -dc %s > %s" % (file, tmpkofile)
subprocess.check_call(cmd, shell=True)
+ elif compressed.group(1) == 'zst':
+ cmd = "zstd -dc %s > %s" % (file, tmpkofile)
+ subprocess.check_call(cmd, shell=True)
else:
msg = "Cannot decompress '%s'" % file
raise msg
@@ -100,11 +104,11 @@ python split_kernel_module_packages () {
else:
f.write('%s\n' % basename)
f.close()
- postinst = d.getVar('pkg_postinst_%s' % pkg)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
if not postinst:
- bb.fatal("pkg_postinst_%s not defined" % pkg)
+ bb.fatal("pkg_postinst:%s not defined" % pkg)
postinst += d.getVar('autoload_postinst_fragment') % (autoload or basename)
- d.setVar('pkg_postinst_%s' % pkg, postinst)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
# Write out any modconf fragment
modconflist = (d.getVar("KERNEL_MODULE_PROBECONF") or "").split()
@@ -117,19 +121,19 @@ python split_kernel_module_packages () {
elif modconf:
bb.error("Please ensure module %s is listed in KERNEL_MODULE_PROBECONF since module_conf_%s is set" % (basename, basename))
- files = d.getVar('FILES_%s' % pkg)
+ files = d.getVar('FILES:%s' % pkg)
files = "%s /etc/modules-load.d/%s.conf /etc/modprobe.d/%s.conf" % (files, basename, basename)
- d.setVar('FILES_%s' % pkg, files)
+ d.setVar('FILES:%s' % pkg, files)
- conffiles = d.getVar('CONFFILES_%s' % pkg)
+ conffiles = d.getVar('CONFFILES:%s' % pkg)
conffiles = "%s /etc/modules-load.d/%s.conf /etc/modprobe.d/%s.conf" % (conffiles, basename, basename)
- d.setVar('CONFFILES_%s' % pkg, conffiles)
+ d.setVar('CONFFILES:%s' % pkg, conffiles)
if "description" in vals:
- old_desc = d.getVar('DESCRIPTION_' + pkg) or ""
- d.setVar('DESCRIPTION_' + pkg, old_desc + "; " + vals["description"])
+ old_desc = d.getVar('DESCRIPTION:' + pkg) or ""
+ d.setVar('DESCRIPTION:' + pkg, old_desc + "; " + vals["description"])
- rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg) or "")
+ rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS:' + pkg) or "")
modinfo_deps = []
if "depends" in vals and vals["depends"] != "":
for dep in vals["depends"].split(","):
@@ -139,33 +143,41 @@ python split_kernel_module_packages () {
for dep in modinfo_deps:
if not dep in rdepends:
rdepends[dep] = []
- d.setVar('RDEPENDS_' + pkg, bb.utils.join_deps(rdepends, commasep=False))
+ d.setVar('RDEPENDS:' + pkg, bb.utils.join_deps(rdepends, commasep=False))
# Avoid automatic -dev recommendations for modules ending with -dev.
- d.setVarFlag('RRECOMMENDS_' + pkg, 'nodeprrecs', 1)
+ d.setVarFlag('RRECOMMENDS:' + pkg, 'nodeprrecs', 1)
# Provide virtual package without postfix
providevirt = d.getVar('KERNEL_MODULE_PROVIDE_VIRTUAL')
if providevirt == "1":
postfix = format.split('%s')[1]
- d.setVar('RPROVIDES_' + pkg, pkg.replace(postfix, ''))
+ d.setVar('RPROVIDES:' + pkg, pkg.replace(postfix, ''))
kernel_package_name = d.getVar("KERNEL_PACKAGE_NAME") or "kernel"
kernel_version = d.getVar("KERNEL_VERSION")
- module_regex = r'^(.*)\.k?o(?:\.[xg]z)?$'
+ metapkg = d.getVar('KERNEL_MODULES_META_PACKAGE')
+ splitmods = d.getVar('KERNEL_SPLIT_MODULES')
+ postinst = d.getVar('pkg_postinst:modules')
+ postrm = d.getVar('pkg_postrm:modules')
+
+ if splitmods != '1':
+ etcdir = d.getVar('sysconfdir')
+ d.appendVar('FILES:' + metapkg, '%s/modules-load.d/ %s/modprobe.d/ %s/modules/' % (etcdir, etcdir, d.getVar("nonarch_base_libdir")))
+ d.appendVar('pkg_postinst:%s' % metapkg, postinst)
+ d.prependVar('pkg_postrm:%s' % metapkg, postrm);
+ return
+
+ module_regex = r'^(.*)\.k?o(?:\.(gz|xz|zst))?$'
module_pattern_prefix = d.getVar('KERNEL_MODULE_PACKAGE_PREFIX')
module_pattern_suffix = d.getVar('KERNEL_MODULE_PACKAGE_SUFFIX')
module_pattern = module_pattern_prefix + kernel_package_name + '-module-%s' + module_pattern_suffix
- postinst = d.getVar('pkg_postinst_modules')
- postrm = d.getVar('pkg_postrm_modules')
-
modules = do_split_packages(d, root='${nonarch_base_libdir}/modules', file_regex=module_regex, output_pattern=module_pattern, description='%s kernel module', postinst=postinst, postrm=postrm, recursive=True, hook=frob_metadata, extra_depends='%s-%s' % (kernel_package_name, kernel_version))
if modules:
- metapkg = d.getVar('KERNEL_MODULES_META_PACKAGE')
- d.appendVar('RDEPENDS_' + metapkg, ' '+' '.join(modules))
+ d.appendVar('RDEPENDS:' + metapkg, ' '+' '.join(modules))
# If modules-load.d and modprobe.d are empty at this point, remove them to
# avoid warnings. removedirs only raises an OSError if an empty
diff --git a/meta/classes/kernel-uboot.bbclass b/meta/classes/kernel-uboot.bbclass
index b1e7ac05c2..2daa068298 100644
--- a/meta/classes/kernel-uboot.bbclass
+++ b/meta/classes/kernel-uboot.bbclass
@@ -22,7 +22,11 @@ uboot_prep_kimage() {
[ -n "${vmlinux_path}" ] && ${OBJCOPY} -O binary -R .note -R .comment -S "${vmlinux_path}" linux.bin
if [ "${linux_comp}" != "none" ] ; then
- gzip -9 linux.bin
+ if [ "${linux_comp}" = "gzip" ] ; then
+ gzip -9 linux.bin
+ elif [ "${linux_comp}" = "lzo" ] ; then
+ lzop -9 linux.bin
+ fi
mv -f "linux.bin${linux_suffix}" linux.bin
fi
diff --git a/meta/classes/kernel-yocto.bbclass b/meta/classes/kernel-yocto.bbclass
index 0df61cdef0..1d5a8cdf29 100644
--- a/meta/classes/kernel-yocto.bbclass
+++ b/meta/classes/kernel-yocto.bbclass
@@ -36,7 +36,10 @@ def find_patches(d,subdir):
if subdir == patchdir:
patch_list.append(local)
else:
- patch_list.append(local)
+ # skip the patch if a patchdir was supplied, it won't be handled
+ # properly
+ if not patchdir:
+ patch_list.append(local)
return patch_list
@@ -358,6 +361,21 @@ do_kernel_checkout() {
fi
fi
cd ${S}
+
+ # convert any remote branches to local tracking ones
+ for i in `git branch -a --no-color | grep remotes | grep -v HEAD`; do
+ b=`echo $i | cut -d' ' -f2 | sed 's%remotes/origin/%%'`;
+ git show-ref --quiet --verify -- "refs/heads/$b"
+ if [ $? -ne 0 ]; then
+ git branch $b $i > /dev/null
+ fi
+ done
+
+ # Create a working tree copy of the kernel by checking out a branch
+ machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
+
+ # checkout and clobber any unimportant files
+ git checkout -f ${machine_branch}
else
# case: we have no git repository at all.
# To support low bandwidth options for building the kernel, we'll just
@@ -379,21 +397,6 @@ do_kernel_checkout() {
git commit -q -m "baseline commit: creating repo for ${PN}-${PV}"
git clean -d -f
fi
-
- # convert any remote branches to local tracking ones
- for i in `git branch -a --no-color | grep remotes | grep -v HEAD`; do
- b=`echo $i | cut -d' ' -f2 | sed 's%remotes/origin/%%'`;
- git show-ref --quiet --verify -- "refs/heads/$b"
- if [ $? -ne 0 ]; then
- git branch $b $i > /dev/null
- fi
- done
-
- # Create a working tree copy of the kernel by checking out a branch
- machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
-
- # checkout and clobber any unimportant files
- git checkout -f ${machine_branch}
}
do_kernel_checkout[dirs] = "${S} ${WORKDIR}"
@@ -474,7 +477,7 @@ python do_config_analysis() {
env['srctree'] = s
# read specific symbols from the kernel recipe or from local.conf
- # i.e.: CONFIG_ANALYSIS_pn-linux-yocto-dev = 'NF_CONNTRACK LOCALVERSION'
+ # i.e.: CONFIG_ANALYSIS:pn-linux-yocto-dev = 'NF_CONNTRACK LOCALVERSION'
config = d.getVar( 'CONFIG_ANALYSIS' )
if not config:
config = [ "" ]
diff --git a/meta/classes/kernel.bbclass b/meta/classes/kernel.bbclass
index 846b19663b..4f304eb9c7 100644
--- a/meta/classes/kernel.bbclass
+++ b/meta/classes/kernel.bbclass
@@ -5,10 +5,11 @@ COMPATIBLE_HOST = ".*-linux"
KERNEL_PACKAGE_NAME ??= "kernel"
KERNEL_DEPLOYSUBDIR ??= "${@ "" if (d.getVar("KERNEL_PACKAGE_NAME") == "kernel") else d.getVar("KERNEL_PACKAGE_NAME") }"
-PROVIDES += "${@ "virtual/kernel" if (d.getVar("KERNEL_PACKAGE_NAME") == "kernel") else "" }"
+PROVIDES += "virtual/kernel"
DEPENDS += "virtual/${TARGET_PREFIX}binutils virtual/${TARGET_PREFIX}gcc kmod-native bc-native bison-native"
DEPENDS += "${@bb.utils.contains("INITRAMFS_FSTYPES", "cpio.lzo", "lzop-native", "", d)}"
DEPENDS += "${@bb.utils.contains("INITRAMFS_FSTYPES", "cpio.lz4", "lz4-native", "", d)}"
+DEPENDS += "${@bb.utils.contains("INITRAMFS_FSTYPES", "cpio.zst", "zstd-native", "", d)}"
PACKAGE_WRITE_DEPS += "depmodwrapper-cross"
do_deploy[depends] += "depmodwrapper-cross:do_populate_sysroot gzip-native:do_populate_sysroot"
@@ -29,6 +30,8 @@ INITRAMFS_IMAGE ?= ""
INITRAMFS_IMAGE_NAME ?= "${@['${INITRAMFS_IMAGE}-${MACHINE}', ''][d.getVar('INITRAMFS_IMAGE') == '']}"
INITRAMFS_TASK ?= ""
INITRAMFS_IMAGE_BUNDLE ?= ""
+INITRAMFS_DEPLOY_DIR_IMAGE ?= "${DEPLOY_DIR_IMAGE}"
+INITRAMFS_MULTICONFIG ?= ""
# KERNEL_VERSION is extracted from source code. It is evaluated as
# None for the first parsing, since the code has not been fetched.
@@ -46,7 +49,7 @@ python __anonymous () {
kpn = d.getVar("KERNEL_PACKAGE_NAME")
# XXX Remove this after bug 11905 is resolved
- # FILES_${KERNEL_PACKAGE_NAME}-dev doesn't expand correctly
+ # FILES:${KERNEL_PACKAGE_NAME}-dev doesn't expand correctly
if kpn == pn:
bb.warn("Some packages (E.g. *-dev) might be missing due to "
"bug 11905 (variable KERNEL_PACKAGE_NAME == PN)")
@@ -76,7 +79,7 @@ python __anonymous () {
# KERNEL_IMAGETYPES may contain a mixture of image types supported directly
# by the kernel build system and types which are created by post-processing
# the output of the kernel build system (e.g. compressing vmlinux ->
- # vmlinux.gz in kernel_do_compile()).
+ # vmlinux.gz in kernel_do_transform_kernel()).
# KERNEL_IMAGETYPE_FOR_MAKE should contain only image types supported
# directly by the kernel build system.
if not d.getVar('KERNEL_IMAGETYPE_FOR_MAKE'):
@@ -96,11 +99,18 @@ python __anonymous () {
continue
typelower = type.lower()
d.appendVar('PACKAGES', ' %s-image-%s' % (kname, typelower))
- d.setVar('FILES_' + kname + '-image-' + typelower, '/' + imagedest + '/' + type + '-${KERNEL_VERSION_NAME}' + ' /' + imagedest + '/' + type)
- d.appendVar('RDEPENDS_%s-image' % kname, ' %s-image-%s' % (kname, typelower))
- d.setVar('PKG_%s-image-%s' % (kname,typelower), '%s-image-%s-${KERNEL_VERSION_PKG_NAME}' % (kname, typelower))
- d.setVar('ALLOW_EMPTY_%s-image-%s' % (kname, typelower), '1')
- d.setVar('pkg_postinst_%s-image-%s' % (kname,typelower), """set +e
+ d.setVar('FILES:' + kname + '-image-' + typelower, '/' + imagedest + '/' + type + '-${KERNEL_VERSION_NAME}' + ' /' + imagedest + '/' + type)
+ d.appendVar('RDEPENDS:%s-image' % kname, ' %s-image-%s (= ${EXTENDPKGV})' % (kname, typelower))
+ splitmods = d.getVar("KERNEL_SPLIT_MODULES")
+ if splitmods != '1':
+ d.appendVar('RDEPENDS:%s-image' % kname, ' %s-modules (= ${EXTENDPKGV})' % kname)
+ d.appendVar('RDEPENDS:%s-image-%s' % (kname, typelower), ' %s-modules-${KERNEL_VERSION_PKG_NAME} (= ${EXTENDPKGV})' % kname)
+ d.setVar('PKG:%s-modules' % kname, '%s-modules-${KERNEL_VERSION_PKG_NAME}' % kname)
+ d.appendVar('RPROVIDES:%s-modules' % kname, '%s-modules-${KERNEL_VERSION_PKG_NAME}' % kname)
+
+ d.setVar('PKG:%s-image-%s' % (kname,typelower), '%s-image-%s-${KERNEL_VERSION_PKG_NAME}' % (kname, typelower))
+ d.setVar('ALLOW_EMPTY:%s-image-%s' % (kname, typelower), '1')
+ d.setVar('pkg_postinst:%s-image-%s' % (kname,typelower), """set +e
if [ -n "$D" ]; then
ln -sf %s-${KERNEL_VERSION} $D/${KERNEL_IMAGEDEST}/%s > /dev/null 2>&1
else
@@ -112,7 +122,7 @@ else
fi
set -e
""" % (type, type, type, type, type, type, type))
- d.setVar('pkg_postrm_%s-image-%s' % (kname,typelower), """set +e
+ d.setVar('pkg_postrm:%s-image-%s' % (kname,typelower), """set +e
if [ -f "${KERNEL_IMAGEDEST}/%s" -o -L "${KERNEL_IMAGEDEST}/%s" ]; then
rm -f ${KERNEL_IMAGEDEST}/%s > /dev/null 2>&1
fi
@@ -125,7 +135,12 @@ set -e
# the do_bundle_initramfs does nothing, but the INITRAMFS_IMAGE is built
# standalone for use by wic and other tools.
if image:
- d.appendVarFlag('do_bundle_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete')
+ if d.getVar('INITRAMFS_MULTICONFIG'):
+ d.appendVarFlag('do_bundle_initramfs', 'mcdepends', ' mc::${INITRAMFS_MULTICONFIG}:${INITRAMFS_IMAGE}:do_image_complete')
+ else:
+ d.appendVarFlag('do_bundle_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete')
+ if image and bb.utils.to_boolean(d.getVar('INITRAMFS_IMAGE_BUNDLE')):
+ bb.build.addtask('do_transform_bundled_initramfs', 'do_deploy', 'do_bundle_initramfs', d)
# NOTE: setting INITRAMFS_TASK is for backward compatibility
# The preferred method is to set INITRAMFS_IMAGE, because
@@ -218,8 +233,8 @@ UBOOT_LOADADDRESS ?= "${UBOOT_ENTRYPOINT}"
# Some Linux kernel configurations need additional parameters on the command line
KERNEL_EXTRA_ARGS ?= ""
-EXTRA_OEMAKE = " HOSTCC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}""
-EXTRA_OEMAKE += " HOSTCXX="${BUILD_CXX} ${BUILD_CXXFLAGS} ${BUILD_LDFLAGS}""
+EXTRA_OEMAKE = " HOSTCC="${BUILD_CC}" HOSTCFLAGS="${BUILD_CFLAGS}" HOSTLDFLAGS="${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}""
+EXTRA_OEMAKE += " HOSTCXX="${BUILD_CXX}" HOSTCXXFLAGS="${BUILD_CXXFLAGS}""
KERNEL_ALT_IMAGETYPE ??= ""
@@ -229,9 +244,9 @@ copy_initramfs() {
mkdir -p ${B}/usr
# Find and use the first initramfs image archive type we find
rm -f ${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
- for img in cpio cpio.gz cpio.lz4 cpio.lzo cpio.lzma cpio.xz; do
- if [ -e "${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.$img" ]; then
- cp ${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.$img ${B}/usr/.
+ for img in cpio cpio.gz cpio.lz4 cpio.lzo cpio.lzma cpio.xz cpio.zst; do
+ if [ -e "${INITRAMFS_DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.$img" ]; then
+ cp ${INITRAMFS_DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.$img ${B}/usr/.
case $img in
*gz)
echo "gzip decompressing image"
@@ -258,12 +273,17 @@ copy_initramfs() {
xz -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
break
;;
+ *zst)
+ echo "zst decompressing image"
+ zstd -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
+ break
+ ;;
esac
break
fi
done
# Verify that the above loop found a initramfs, fail otherwise
- [ -f ${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio ] && echo "Finished copy of initramfs into ./usr" || die "Could not find any ${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.cpio{.gz|.lz4|.lzo|.lzma|.xz) for bundling; INITRAMFS_IMAGE_NAME might be wrong."
+ [ -f ${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio ] && echo "Finished copy of initramfs into ./usr" || die "Could not find any ${INITRAMFS_DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.cpio{.gz|.lz4|.lzo|.lzma|.xz|.zst) for bundling; INITRAMFS_IMAGE_NAME might be wrong."
}
do_bundle_initramfs () {
@@ -303,24 +323,32 @@ do_bundle_initramfs () {
}
do_bundle_initramfs[dirs] = "${B}"
-python do_devshell_prepend () {
+kernel_do_transform_bundled_initramfs() {
+ # vmlinux.gz is not built by kernel
+ if (echo "${KERNEL_IMAGETYPES}" | grep -wq "vmlinux\.gz"); then
+ gzip -9cn < ${KERNEL_OUTPUT_DIR}/vmlinux.initramfs > ${KERNEL_OUTPUT_DIR}/vmlinux.gz.initramfs
+ fi
+}
+do_transform_bundled_initramfs[dirs] = "${B}"
+
+python do_devshell:prepend () {
os.environ["LDFLAGS"] = ''
}
addtask bundle_initramfs after do_install before do_deploy
-get_cc_option () {
- # Check if KERNEL_CC supports the option "file-prefix-map".
- # This option allows us to build images with __FILE__ values that do not
- # contain the host build path.
- if ${KERNEL_CC} -Q --help=joined | grep -q "\-ffile-prefix-map=<old=new>"; then
- echo "-ffile-prefix-map=${S}=/kernel-source/"
- fi
-}
+KERNEL_DEBUG_TIMESTAMPS ??= "0"
kernel_do_compile() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
- if [ "${BUILD_REPRODUCIBLE_BINARIES}" = "1" ]; then
+
+ # setup native pkg-config variables (kconfig scripts call pkg-config directly, cannot generically be overriden to pkg-config-native)
+ export PKG_CONFIG_DIR="${STAGING_DIR_NATIVE}${libdir_native}/pkgconfig"
+ export PKG_CONFIG_PATH="$PKG_CONFIG_DIR:${STAGING_DATADIR_NATIVE}/pkgconfig"
+ export PKG_CONFIG_LIBDIR="$PKG_CONFIG_DIR"
+ export PKG_CONFIG_SYSROOT_DIR=""
+
+ if [ "${KERNEL_DEBUG_TIMESTAMPS}" != "1" ]; then
# kernel sources do not use do_unpack, so SOURCE_DATE_EPOCH may not
# be set....
if [ "${SOURCE_DATE_EPOCH}" = "" -o "${SOURCE_DATE_EPOCH}" = "0" ]; then
@@ -348,20 +376,24 @@ kernel_do_compile() {
copy_initramfs
use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
fi
- cc_extra=$(get_cc_option)
for typeformake in ${KERNEL_IMAGETYPE_FOR_MAKE} ; do
- oe_runmake ${typeformake} CC="${KERNEL_CC} $cc_extra " LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS} $use_alternate_initrd
+ oe_runmake ${typeformake} CC="${KERNEL_CC}" LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS} $use_alternate_initrd
done
+}
+
+kernel_do_transform_kernel() {
# vmlinux.gz is not built by kernel
if (echo "${KERNEL_IMAGETYPES}" | grep -wq "vmlinux\.gz"); then
mkdir -p "${KERNEL_OUTPUT_DIR}"
gzip -9cn < ${B}/vmlinux > "${KERNEL_OUTPUT_DIR}/vmlinux.gz"
fi
}
+do_transform_kernel[dirs] = "${B}"
+addtask transform_kernel after do_compile before do_install
do_compile_kernelmodules() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
- if [ "${BUILD_REPRODUCIBLE_BINARIES}" = "1" ]; then
+ if [ "${KERNEL_DEBUG_TIMESTAMPS}" != "1" ]; then
# kernel sources do not use do_unpack, so SOURCE_DATE_EPOCH may not
# be set....
if [ "${SOURCE_DATE_EPOCH}" = "" -o "${SOURCE_DATE_EPOCH}" = "0" ]; then
@@ -377,8 +409,7 @@ do_compile_kernelmodules() {
bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
fi
if (grep -q -i -e '^CONFIG_MODULES=y$' ${B}/.config); then
- cc_extra=$(get_cc_option)
- oe_runmake -C ${B} ${PARALLEL_MAKE} modules CC="${KERNEL_CC} $cc_extra " LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS}
+ oe_runmake -C ${B} ${PARALLEL_MAKE} modules CC="${KERNEL_CC}" LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS}
# Module.symvers gets updated during the
# building of the kernel modules. We need to
@@ -591,7 +622,7 @@ kernel_do_configure() {
fi
# Copy defconfig to .config if .config does not exist. This allows
- # recipes to manage the .config themselves in do_configure_prepend().
+ # recipes to manage the .config themselves in do_configure:prepend().
if [ -f "${WORKDIR}/defconfig" ] && [ ! -f "${B}/.config" ]; then
cp "${WORKDIR}/defconfig" "${B}/.config"
fi
@@ -608,34 +639,34 @@ addtask savedefconfig after do_configure
inherit cml1
-KCONFIG_CONFIG_COMMAND_append = " LD='${KERNEL_LD}' HOSTLDFLAGS='${BUILD_LDFLAGS}'"
+KCONFIG_CONFIG_COMMAND:append = " LD='${KERNEL_LD}' HOSTLDFLAGS='${BUILD_LDFLAGS}'"
-EXPORT_FUNCTIONS do_compile do_install do_configure
+EXPORT_FUNCTIONS do_compile do_transform_kernel do_transform_bundled_initramfs do_install do_configure
# kernel-base becomes kernel-${KERNEL_VERSION}
# kernel-image becomes kernel-image-${KERNEL_VERSION}
-PACKAGES = "${KERNEL_PACKAGE_NAME} ${KERNEL_PACKAGE_NAME}-base ${KERNEL_PACKAGE_NAME}-vmlinux ${KERNEL_PACKAGE_NAME}-image ${KERNEL_PACKAGE_NAME}-dev ${KERNEL_PACKAGE_NAME}-modules"
-FILES_${PN} = ""
-FILES_${KERNEL_PACKAGE_NAME}-base = "${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.order ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.builtin ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.builtin.modinfo"
-FILES_${KERNEL_PACKAGE_NAME}-image = ""
-FILES_${KERNEL_PACKAGE_NAME}-dev = "/boot/System.map* /boot/Module.symvers* /boot/config* ${KERNEL_SRC_PATH} ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
-FILES_${KERNEL_PACKAGE_NAME}-vmlinux = "/boot/vmlinux-${KERNEL_VERSION_NAME}"
-FILES_${KERNEL_PACKAGE_NAME}-modules = ""
-RDEPENDS_${KERNEL_PACKAGE_NAME} = "${KERNEL_PACKAGE_NAME}-base"
+PACKAGES = "${KERNEL_PACKAGE_NAME} ${KERNEL_PACKAGE_NAME}-base ${KERNEL_PACKAGE_NAME}-vmlinux ${KERNEL_PACKAGE_NAME}-image ${KERNEL_PACKAGE_NAME}-dev ${KERNEL_PACKAGE_NAME}-modules ${KERNEL_PACKAGE_NAME}-dbg"
+FILES:${PN} = ""
+FILES:${KERNEL_PACKAGE_NAME}-base = "${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.order ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.builtin ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.builtin.modinfo"
+FILES:${KERNEL_PACKAGE_NAME}-image = ""
+FILES:${KERNEL_PACKAGE_NAME}-dev = "/boot/System.map* /boot/Module.symvers* /boot/config* ${KERNEL_SRC_PATH} ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
+FILES:${KERNEL_PACKAGE_NAME}-vmlinux = "/boot/vmlinux-${KERNEL_VERSION_NAME}"
+FILES:${KERNEL_PACKAGE_NAME}-modules = ""
+RDEPENDS:${KERNEL_PACKAGE_NAME} = "${KERNEL_PACKAGE_NAME}-base (= ${EXTENDPKGV})"
# Allow machines to override this dependency if kernel image files are
# not wanted in images as standard
-RDEPENDS_${KERNEL_PACKAGE_NAME}-base ?= "${KERNEL_PACKAGE_NAME}-image"
-PKG_${KERNEL_PACKAGE_NAME}-image = "${KERNEL_PACKAGE_NAME}-image-${@legitimize_package_name(d.getVar('KERNEL_VERSION'))}"
-RDEPENDS_${KERNEL_PACKAGE_NAME}-image += "${@oe.utils.conditional('KERNEL_IMAGETYPE', 'vmlinux', '${KERNEL_PACKAGE_NAME}-vmlinux', '', d)}"
-PKG_${KERNEL_PACKAGE_NAME}-base = "${KERNEL_PACKAGE_NAME}-${@legitimize_package_name(d.getVar('KERNEL_VERSION'))}"
-RPROVIDES_${KERNEL_PACKAGE_NAME}-base += "${KERNEL_PACKAGE_NAME}-${KERNEL_VERSION}"
-ALLOW_EMPTY_${KERNEL_PACKAGE_NAME} = "1"
-ALLOW_EMPTY_${KERNEL_PACKAGE_NAME}-base = "1"
-ALLOW_EMPTY_${KERNEL_PACKAGE_NAME}-image = "1"
-ALLOW_EMPTY_${KERNEL_PACKAGE_NAME}-modules = "1"
-DESCRIPTION_${KERNEL_PACKAGE_NAME}-modules = "Kernel modules meta package"
-
-pkg_postinst_${KERNEL_PACKAGE_NAME}-base () {
+RRECOMMENDS:${KERNEL_PACKAGE_NAME}-base ?= "${KERNEL_PACKAGE_NAME}-image (= ${EXTENDPKGV})"
+PKG:${KERNEL_PACKAGE_NAME}-image = "${KERNEL_PACKAGE_NAME}-image-${@legitimize_package_name(d.getVar('KERNEL_VERSION'))}"
+RDEPENDS:${KERNEL_PACKAGE_NAME}-image += "${@oe.utils.conditional('KERNEL_IMAGETYPE', 'vmlinux', '${KERNEL_PACKAGE_NAME}-vmlinux (= ${EXTENDPKGV})', '', d)}"
+PKG:${KERNEL_PACKAGE_NAME}-base = "${KERNEL_PACKAGE_NAME}-${@legitimize_package_name(d.getVar('KERNEL_VERSION'))}"
+RPROVIDES:${KERNEL_PACKAGE_NAME}-base += "${KERNEL_PACKAGE_NAME}-${KERNEL_VERSION}"
+ALLOW_EMPTY:${KERNEL_PACKAGE_NAME} = "1"
+ALLOW_EMPTY:${KERNEL_PACKAGE_NAME}-base = "1"
+ALLOW_EMPTY:${KERNEL_PACKAGE_NAME}-image = "1"
+ALLOW_EMPTY:${KERNEL_PACKAGE_NAME}-modules = "1"
+DESCRIPTION:${KERNEL_PACKAGE_NAME}-modules = "Kernel modules meta package"
+
+pkg_postinst:${KERNEL_PACKAGE_NAME}-base () {
if [ ! -e "$D/lib/modules/${KERNEL_VERSION}" ]; then
mkdir -p $D/lib/modules/${KERNEL_VERSION}
fi
@@ -646,7 +677,7 @@ pkg_postinst_${KERNEL_PACKAGE_NAME}-base () {
fi
}
-PACKAGESPLITFUNCS_prepend = "split_kernel_packages "
+PACKAGESPLITFUNCS:prepend = "split_kernel_packages "
python split_kernel_packages () {
do_split_packages(d, root='${nonarch_base_libdir}/firmware', file_regex=r'^(.*)\.(bin|fw|cis|csp|dsp)$', output_pattern='${KERNEL_PACKAGE_NAME}-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='')
@@ -674,30 +705,19 @@ do_kernel_link_images() {
}
addtask kernel_link_images after do_compile before do_strip
-do_strip() {
- if [ -n "${KERNEL_IMAGE_STRIP_EXTRA_SECTIONS}" ]; then
- if ! (echo "${KERNEL_IMAGETYPES}" | grep -wq "vmlinux"); then
- bbwarn "image type(s) will not be stripped (not supported): ${KERNEL_IMAGETYPES}"
- return
- fi
+python do_strip() {
+ import shutil
- cd ${B}
- headers=`"$CROSS_COMPILE"readelf -S ${KERNEL_OUTPUT_DIR}/vmlinux | \
- grep "^ \{1,\}\[[0-9 ]\{1,\}\] [^ ]" | \
- sed "s/^ \{1,\}\[[0-9 ]\{1,\}\] //" | \
- gawk '{print $1}'`
+ strip = d.getVar('STRIP')
+ extra_sections = d.getVar('KERNEL_IMAGE_STRIP_EXTRA_SECTIONS')
+ kernel_image = d.getVar('B') + "/" + d.getVar('KERNEL_OUTPUT_DIR') + "/vmlinux"
- for str in ${KERNEL_IMAGE_STRIP_EXTRA_SECTIONS}; do {
- if ! (echo "$headers" | grep -q "^$str$"); then
- bbwarn "Section not found: $str";
- fi
-
- "$CROSS_COMPILE"strip -s -R $str ${KERNEL_OUTPUT_DIR}/vmlinux
- }; done
-
- bbnote "KERNEL_IMAGE_STRIP_EXTRA_SECTIONS is set, stripping sections:" \
- "${KERNEL_IMAGE_STRIP_EXTRA_SECTIONS}"
- fi;
+ if (extra_sections and kernel_image.find('boot/vmlinux') != -1):
+ kernel_image_stripped = kernel_image + ".stripped"
+ shutil.copy2(kernel_image, kernel_image_stripped)
+ oe.package.runstrip((kernel_image_stripped, 8, strip, extra_sections))
+ bb.debug(1, "KERNEL_IMAGE_STRIP_EXTRA_SECTIONS is set, stripping sections: " + \
+ extra_sections)
}
do_strip[dirs] = "${B}"
@@ -742,9 +762,18 @@ kernel_do_deploy() {
for imageType in ${KERNEL_IMAGETYPES} ; do
baseName=$imageType-${KERNEL_IMAGE_NAME}
- install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType $deployDir/$baseName.bin
- ln -sf $baseName.bin $deployDir/$imageType-${KERNEL_IMAGE_LINK_NAME}.bin
- ln -sf $baseName.bin $deployDir/$imageType
+
+ if [ -s ${KERNEL_OUTPUT_DIR}/$imageType.stripped ] ; then
+ install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType.stripped $deployDir/$baseName${KERNEL_IMAGE_BIN_EXT}
+ else
+ install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType $deployDir/$baseName${KERNEL_IMAGE_BIN_EXT}
+ fi
+ if [ -n "${KERNEL_IMAGE_LINK_NAME}" ] ; then
+ ln -sf $baseName${KERNEL_IMAGE_BIN_EXT} $deployDir/$imageType-${KERNEL_IMAGE_LINK_NAME}${KERNEL_IMAGE_BIN_EXT}
+ fi
+ if [ "${KERNEL_IMAGETYPE_SYMLINK}" = "1" ] ; then
+ ln -sf $baseName${KERNEL_IMAGE_BIN_EXT} $deployDir/$imageType
+ fi
done
if [ ${MODULE_TARBALL_DEPLOY} = "1" ] && (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
@@ -757,17 +786,21 @@ kernel_do_deploy() {
TAR_ARGS="$TAR_ARGS --owner=0 --group=0"
tar $TAR_ARGS -cv -C ${D}${root_prefix} lib | gzip -9n > $deployDir/modules-${MODULE_TARBALL_NAME}.tgz
- ln -sf modules-${MODULE_TARBALL_NAME}.tgz $deployDir/modules-${MODULE_TARBALL_LINK_NAME}.tgz
+ if [ -n "${MODULE_TARBALL_LINK_NAME}" ] ; then
+ ln -sf modules-${MODULE_TARBALL_NAME}.tgz $deployDir/modules-${MODULE_TARBALL_LINK_NAME}.tgz
+ fi
fi
if [ ! -z "${INITRAMFS_IMAGE}" -a x"${INITRAMFS_IMAGE_BUNDLE}" = x1 ]; then
- for imageType in ${KERNEL_IMAGETYPE_FOR_MAKE} ; do
+ for imageType in ${KERNEL_IMAGETYPES} ; do
if [ "$imageType" = "fitImage" ] ; then
continue
fi
initramfsBaseName=$imageType-${INITRAMFS_NAME}
- install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType.initramfs $deployDir/$initramfsBaseName.bin
- ln -sf $initramfsBaseName.bin $deployDir/$imageType-${INITRAMFS_LINK_NAME}.bin
+ install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType.initramfs $deployDir/$initramfsBaseName${KERNEL_IMAGE_BIN_EXT}
+ if [ -n "${INITRAMFS_LINK_NAME}" ] ; then
+ ln -sf $initramfsBaseName${KERNEL_IMAGE_BIN_EXT} $deployDir/$imageType-${INITRAMFS_LINK_NAME}${KERNEL_IMAGE_BIN_EXT}
+ fi
done
fi
}
diff --git a/meta/classes/libc-package.bbclass b/meta/classes/libc-package.bbclass
index de3b4250c7..13ef8cdc0d 100644
--- a/meta/classes/libc-package.bbclass
+++ b/meta/classes/libc-package.bbclass
@@ -42,7 +42,7 @@ python __anonymous () {
# try to fix disable charsets/locales/locale-code compile fail
PACKAGE_NO_GCONV ?= "0"
-OVERRIDES_append = ":${TARGET_ARCH}-${TARGET_OS}"
+OVERRIDES:append = ":${TARGET_ARCH}-${TARGET_OS}"
locale_base_postinst_ontarget() {
localedef --inputfile=${datadir}/i18n/locales/%s --charmap=%s %s
@@ -129,9 +129,9 @@ python package_do_split_gconvs () {
deps.append(dp)
f.close()
if deps != []:
- d.setVar('RDEPENDS_%s' % pkg, " ".join(deps))
+ d.setVar('RDEPENDS:%s' % pkg, " ".join(deps))
if bpn != 'glibc':
- d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
+ d.setVar('RPROVIDES:%s' % pkg, pkg.replace(bpn, 'glibc'))
do_split_packages(d, gconv_libdir, file_regex=r'^(.*)\.so$', output_pattern=bpn+'-gconv-%s', \
description='gconv module for character set %s', hook=calc_gconv_deps, \
@@ -151,9 +151,9 @@ python package_do_split_gconvs () {
deps.append(dp)
f.close()
if deps != []:
- d.setVar('RDEPENDS_%s' % pkg, " ".join(deps))
+ d.setVar('RDEPENDS:%s' % pkg, " ".join(deps))
if bpn != 'glibc':
- d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
+ d.setVar('RPROVIDES:%s' % pkg, pkg.replace(bpn, 'glibc'))
do_split_packages(d, charmap_dir, file_regex=r'^(.*)\.gz$', output_pattern=bpn+'-charmap-%s', \
description='character map for %s encoding', hook=calc_charmap_deps, extra_depends='')
@@ -172,9 +172,9 @@ python package_do_split_gconvs () {
deps.append(dp)
f.close()
if deps != []:
- d.setVar('RDEPENDS_%s' % pkg, " ".join(deps))
+ d.setVar('RDEPENDS:%s' % pkg, " ".join(deps))
if bpn != 'glibc':
- d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
+ d.setVar('RPROVIDES:%s' % pkg, pkg.replace(bpn, 'glibc'))
do_split_packages(d, locales_dir, file_regex=r'(.*)', output_pattern=bpn+'-localedata-%s', \
description='locale definition for %s', hook=calc_locale_deps, extra_depends='')
@@ -210,11 +210,11 @@ python package_do_split_gconvs () {
supported[locale] = charset
def output_locale_source(name, pkgname, locale, encoding):
- d.setVar('RDEPENDS_%s' % pkgname, '%slocaledef %s-localedata-%s %s-charmap-%s' % \
+ d.setVar('RDEPENDS:%s' % pkgname, '%slocaledef %s-localedata-%s %s-charmap-%s' % \
(mlprefix, mlprefix+bpn, legitimize_package_name(locale), mlprefix+bpn, legitimize_package_name(encoding)))
- d.setVar('pkg_postinst_ontarget_%s' % pkgname, d.getVar('locale_base_postinst_ontarget') \
+ d.setVar('pkg_postinst_ontarget:%s' % pkgname, d.getVar('locale_base_postinst_ontarget') \
% (locale, encoding, locale))
- d.setVar('pkg_postrm_%s' % pkgname, d.getVar('locale_base_postrm') % \
+ d.setVar('pkg_postrm:%s' % pkgname, d.getVar('locale_base_postrm') % \
(locale, encoding, locale))
def output_locale_binary_rdepends(name, pkgname, locale, encoding):
@@ -222,8 +222,8 @@ python package_do_split_gconvs () {
lcsplit = d.getVar('GLIBC_SPLIT_LC_PACKAGES')
if lcsplit and int(lcsplit):
d.appendVar('PACKAGES', ' ' + dep)
- d.setVar('ALLOW_EMPTY_%s' % dep, '1')
- d.setVar('RDEPENDS_%s' % pkgname, mlprefix + dep)
+ d.setVar('ALLOW_EMPTY:%s' % dep, '1')
+ d.setVar('RDEPENDS:%s' % pkgname, mlprefix + dep)
commands = {}
@@ -293,13 +293,13 @@ python package_do_split_gconvs () {
def output_locale(name, locale, encoding):
pkgname = d.getVar('MLPREFIX', False) + 'locale-base-' + legitimize_package_name(name)
- d.setVar('ALLOW_EMPTY_%s' % pkgname, '1')
+ d.setVar('ALLOW_EMPTY:%s' % pkgname, '1')
d.setVar('PACKAGES', '%s %s' % (pkgname, d.getVar('PACKAGES')))
rprovides = ' %svirtual-locale-%s' % (mlprefix, legitimize_package_name(name))
m = re.match(r"(.*)_(.*)", name)
if m:
rprovides += ' %svirtual-locale-%s' % (mlprefix, m.group(1))
- d.setVar('RPROVIDES_%s' % pkgname, rprovides)
+ d.setVar('RPROVIDES:%s' % pkgname, rprovides)
if use_bin == "compile":
output_locale_binary_rdepends(name, pkgname, locale, encoding)
@@ -343,7 +343,7 @@ python package_do_split_gconvs () {
def metapkg_hook(file, pkg, pattern, format, basename):
name = basename.split('/', 1)[0]
metapkg = legitimize_package_name('%s-binary-localedata-%s' % (mlprefix+bpn, name))
- d.appendVar('RDEPENDS_%s' % metapkg, ' ' + pkg)
+ d.appendVar('RDEPENDS:%s' % metapkg, ' ' + pkg)
if use_bin == "compile":
makefile = oe.path.join(d.getVar("WORKDIR"), "locale-tree", "Makefile")
@@ -355,7 +355,7 @@ python package_do_split_gconvs () {
m.write("\t@echo 'Progress %d/%d'\n" % (i, total))
m.write("\t" + makerecipe + "\n\n")
d.setVar("EXTRA_OEMAKE", "-C %s ${PARALLEL_MAKE}" % (os.path.dirname(makefile)))
- d.setVarFlag("oe_runmake", "progress", "outof:Progress\s(\d+)/(\d+)")
+ d.setVarFlag("oe_runmake", "progress", r"outof:Progress\s(\d+)/(\d+)")
bb.note("Executing binary locale generation makefile")
bb.build.exec_func("oe_runmake", d)
bb.note("collecting binary locales from locale tree")
@@ -379,6 +379,6 @@ python package_do_split_gconvs () {
# We want to do this indirection so that we can safely 'return'
# from the called function even though we're prepending
-python populate_packages_prepend () {
+python populate_packages:prepend () {
bb.build.exec_func('package_do_split_gconvs', d)
}
diff --git a/meta/classes/license.bbclass b/meta/classes/license.bbclass
index c87473cbb8..813e1ea4f5 100644
--- a/meta/classes/license.bbclass
+++ b/meta/classes/license.bbclass
@@ -6,7 +6,7 @@
LICENSE_DIRECTORY ??= "${DEPLOY_DIR}/licenses"
LICSSTATEDIR = "${WORKDIR}/license-destdir/"
-# Create extra package with license texts and add it to RRECOMMENDS_${PN}
+# Create extra package with license texts and add it to RRECOMMENDS:${PN}
LICENSE_CREATE_PACKAGE[type] = "boolean"
LICENSE_CREATE_PACKAGE ??= "0"
LICENSE_PACKAGE_SUFFIX ??= "-lic"
@@ -29,11 +29,12 @@ python do_populate_lic() {
with open(os.path.join(destdir, "recipeinfo"), "w") as f:
for key in sorted(info.keys()):
f.write("%s: %s\n" % (key, info[key]))
+ oe.qa.exit_if_errors(d)
}
-PSEUDO_IGNORE_PATHS .= ",${@','.join(((d.getVar('COMMON_LICENSE_DIR') or '') + ' ' + (d.getVar('LICENSE_PATH') or '')).split())}"
-# it would be better to copy them in do_install_append, but find_license_filesa is python
-python perform_packagecopy_prepend () {
+PSEUDO_IGNORE_PATHS .= ",${@','.join(((d.getVar('COMMON_LICENSE_DIR') or '') + ' ' + (d.getVar('LICENSE_PATH') or '') + ' ' + d.getVar('COREBASE') + '/meta/COPYING').split())}"
+# it would be better to copy them in do_install:append, but find_license_filesa is python
+python perform_packagecopy:prepend () {
enabled = oe.data.typed_value('LICENSE_CREATE_PACKAGE', d)
if d.getVar('CLASSOVERRIDE') == 'class-target' and enabled:
lic_files_paths = find_license_files(d)
@@ -62,7 +63,7 @@ def add_package_and_files(d):
else:
# first in PACKAGES to be sure that nothing else gets LICENSE_FILES_DIRECTORY
d.setVar('PACKAGES', "%s %s" % (pn_lic, packages))
- d.setVar('FILES_' + pn_lic, files)
+ d.setVar('FILES:' + pn_lic, files)
def copy_license_files(lic_files_paths, destdir):
import shutil
@@ -145,6 +146,10 @@ def find_license_files(d):
find_license(node.s.replace("+", "").replace("*", ""))
self.generic_visit(node)
+ def visit_Constant(self, node):
+ find_license(node.value.replace("+", "").replace("*", ""))
+ self.generic_visit(node)
+
def find_license(license_type):
try:
bb.utils.mkdirhier(gen_lic_dest)
@@ -178,7 +183,8 @@ def find_license_files(d):
# The user may attempt to use NO_GENERIC_LICENSE for a generic license which doesn't make sense
# and should not be allowed, warn the user in this case.
if d.getVarFlag('NO_GENERIC_LICENSE', license_type):
- bb.warn("%s: %s is a generic license, please don't use NO_GENERIC_LICENSE for it." % (pn, license_type))
+ oe.qa.handle_error("license-no-generic",
+ "%s: %s is a generic license, please don't use NO_GENERIC_LICENSE for it." % (pn, license_type), d)
elif non_generic_lic and non_generic_lic in lic_chksums:
# if NO_GENERIC_LICENSE is set, we copy the license files from the fetched source
@@ -187,10 +193,11 @@ def find_license_files(d):
os.path.join(srcdir, non_generic_lic), None, None))
non_generic_lics[non_generic_lic] = license_type
else:
- # Add explicity avoid of CLOSED license because this isn't generic
+ # Explicitly avoid the CLOSED license because this isn't generic
if license_type != 'CLOSED':
# And here is where we warn people that their licenses are lousy
- bb.warn("%s: No generic license file exists for: %s in any provider" % (pn, license_type))
+ oe.qa.handle_error("license-exists",
+ "%s: No generic license file exists for: %s in any provider" % (pn, license_type), d)
pass
if not generic_directory:
@@ -215,7 +222,8 @@ def find_license_files(d):
except oe.license.InvalidLicense as exc:
bb.fatal('%s: %s' % (d.getVar('PF'), exc))
except SyntaxError:
- bb.warn("%s: Failed to parse it's LICENSE field." % (d.getVar('PF')))
+ oe.qa.handle_error("license-syntax",
+ "%s: Failed to parse it's LICENSE field." % (d.getVar('PF')), d)
# Add files from LIC_FILES_CHKSUM to list of license files
lic_chksum_paths = defaultdict(OrderedDict)
for path, data in sorted(lic_chksums.items()):
@@ -244,7 +252,7 @@ def return_spdx(d, license):
def canonical_license(d, license):
"""
Return the canonical (SPDX) form of the license if available (so GPLv3
- becomes GPL-3.0) or the passed license if there is no canonical form.
+ becomes GPL-3.0-only) or the passed license if there is no canonical form.
"""
return d.getVarFlag('SPDXLICENSEMAP', license) or license
@@ -263,33 +271,30 @@ def available_licenses(d):
licenses = sorted(licenses)
return licenses
-# Only determine the list of all available licenses once. This assumes that any
-# additions to LICENSE_PATH have been done before this file is parsed.
-AVAILABLE_LICENSES := "${@' '.join(available_licenses(d))}"
-
def expand_wildcard_licenses(d, wildcard_licenses):
"""
- Return actual spdx format license names if wildcards are used. We expand
- wildcards from SPDXLICENSEMAP flags and AVAILABLE_LICENSES.
+ There are some common wildcard values users may want to use. Support them
+ here.
"""
- import fnmatch
-
- licenses = wildcard_licenses[:]
- spdxmapkeys = d.getVarFlags('SPDXLICENSEMAP').keys()
- for wld_lic in wildcard_licenses:
- spdxflags = fnmatch.filter(spdxmapkeys, wld_lic)
- licenses += [d.getVarFlag('SPDXLICENSEMAP', flag) for flag in spdxflags]
- # Assume if we're passed "GPLv3" or "*GPLv3" it means -or-later as well
- if not wld_lic.endswith(("-or-later", "-only", "*", "+")):
- spdxflags = fnmatch.filter(spdxmapkeys, wld_lic + "+")
- licenses += [d.getVarFlag('SPDXLICENSEMAP', flag) for flag in spdxflags]
-
- spdx_lics = d.getVar('AVAILABLE_LICENSES').split()
- for wld_lic in wildcard_licenses:
- licenses += fnmatch.filter(spdx_lics, wld_lic)
-
- licenses = list(set(licenses))
- return licenses
+ licenses = set(wildcard_licenses)
+ mapping = {
+ "AGPL-3.0*" : ["AGPL-3.0-only", "AGPL-3.0-or-later"],
+ "GPL-3.0*" : ["GPL-3.0-only", "GPL-3.0-or-later"],
+ "LGPL-3.0*" : ["LGPL-3.0-only", "LGPL-3.0-or-later"],
+ }
+ for k in mapping:
+ if k in wildcard_licenses:
+ licenses.remove(k)
+ for item in mapping[k]:
+ licenses.add(item)
+
+ for l in licenses:
+ if l in oe.license.obsolete_license_list():
+ bb.fatal("Error, %s is an obsolete license, please use an SPDX reference in INCOMPATIBLE_LICENSE" % l)
+ if "*" in l:
+ bb.fatal("Error, %s is an invalid license wildcard entry" % l)
+
+ return list(licenses)
def incompatible_license_contains(license, truevalue, falsevalue, d):
license = canonical_license(d, license)
@@ -324,7 +329,7 @@ def incompatible_license(d, dont_want_licenses, package=None):
as canonical (SPDX) names.
"""
import oe.license
- license = d.getVar("LICENSE_%s" % package) if package else None
+ license = d.getVar("LICENSE:%s" % package) if package else None
if not license:
license = d.getVar('LICENSE')
@@ -333,30 +338,31 @@ def incompatible_license(d, dont_want_licenses, package=None):
def check_license_flags(d):
"""
This function checks if a recipe has any LICENSE_FLAGS that
- aren't whitelisted.
+ aren't acceptable.
- If it does, it returns the all LICENSE_FLAGS missing from the whitelist, or
- all of the LICENSE_FLAGS if there is no whitelist.
+ If it does, it returns the all LICENSE_FLAGS missing from the list
+ of acceptable license flags, or all of the LICENSE_FLAGS if there
+ is no list of acceptable flags.
- If everything is is properly whitelisted, it returns None.
+ If everything is is acceptable, it returns None.
"""
- def license_flag_matches(flag, whitelist, pn):
+ def license_flag_matches(flag, acceptlist, pn):
"""
- Return True if flag matches something in whitelist, None if not.
+ Return True if flag matches something in acceptlist, None if not.
- Before we test a flag against the whitelist, we append _${PN}
+ Before we test a flag against the acceptlist, we append _${PN}
to it. We then try to match that string against the
- whitelist. This covers the normal case, where we expect
+ acceptlist. This covers the normal case, where we expect
LICENSE_FLAGS to be a simple string like 'commercial', which
- the user typically matches exactly in the whitelist by
+ the user typically matches exactly in the acceptlist by
explicitly appending the package name e.g 'commercial_foo'.
If we fail the match however, we then split the flag across
'_' and append each fragment and test until we either match or
run out of fragments.
"""
flag_pn = ("%s_%s" % (flag, pn))
- for candidate in whitelist:
+ for candidate in acceptlist:
if flag_pn == candidate:
return True
@@ -367,27 +373,27 @@ def check_license_flags(d):
if flag_cur:
flag_cur += "_"
flag_cur += flagment
- for candidate in whitelist:
+ for candidate in acceptlist:
if flag_cur == candidate:
return True
return False
- def all_license_flags_match(license_flags, whitelist):
+ def all_license_flags_match(license_flags, acceptlist):
""" Return all unmatched flags, None if all flags match """
pn = d.getVar('PN')
- split_whitelist = whitelist.split()
+ split_acceptlist = acceptlist.split()
flags = []
for flag in license_flags.split():
- if not license_flag_matches(flag, split_whitelist, pn):
+ if not license_flag_matches(flag, split_acceptlist, pn):
flags.append(flag)
return flags if flags else None
license_flags = d.getVar('LICENSE_FLAGS')
if license_flags:
- whitelist = d.getVar('LICENSE_FLAGS_WHITELIST')
- if not whitelist:
+ acceptlist = d.getVar('LICENSE_FLAGS_ACCEPTED')
+ if not acceptlist:
return license_flags.split()
- unmatched_flags = all_license_flags_match(license_flags, whitelist)
+ unmatched_flags = all_license_flags_match(license_flags, acceptlist)
if unmatched_flags:
return unmatched_flags
return None
@@ -406,20 +412,22 @@ def check_license_format(d):
for pos, element in enumerate(elements):
if license_pattern.match(element):
if pos > 0 and license_pattern.match(elements[pos - 1]):
- bb.warn('%s: LICENSE value "%s" has an invalid format - license names ' \
+ oe.qa.handle_error('license-format',
+ '%s: LICENSE value "%s" has an invalid format - license names ' \
'must be separated by the following characters to indicate ' \
'the license selection: %s' %
- (pn, licenses, license_operator_chars))
+ (pn, licenses, license_operator_chars), d)
elif not license_operator.match(element):
- bb.warn('%s: LICENSE value "%s" has an invalid separator "%s" that is not ' \
+ oe.qa.handle_error('license-format',
+ '%s: LICENSE value "%s" has an invalid separator "%s" that is not ' \
'in the valid list of separators (%s)' %
- (pn, licenses, element, license_operator_chars))
+ (pn, licenses, element, license_operator_chars), d)
SSTATETASKS += "do_populate_lic"
do_populate_lic[sstate-inputdirs] = "${LICSSTATEDIR}"
do_populate_lic[sstate-outputdirs] = "${LICENSE_DIRECTORY}/"
-IMAGE_CLASSES_append = " license_image"
+IMAGE_CLASSES:append = " license_image"
python do_populate_lic_setscene () {
sstate_setscene(d)
diff --git a/meta/classes/license_image.bbclass b/meta/classes/license_image.bbclass
index 5dbec288a4..0a5ea0a2fb 100644
--- a/meta/classes/license_image.bbclass
+++ b/meta/classes/license_image.bbclass
@@ -39,7 +39,7 @@ python license_create_manifest() {
pkg_dic[pkg_name] = oe.packagedata.read_pkgdatafile(pkg_info)
if not "LICENSE" in pkg_dic[pkg_name].keys():
- pkg_lic_name = "LICENSE_" + pkg_name
+ pkg_lic_name = "LICENSE:" + pkg_name
pkg_dic[pkg_name]["LICENSE"] = pkg_dic[pkg_name][pkg_lic_name]
rootfs_license_manifest = os.path.join(d.getVar('LICENSE_DIRECTORY'),
@@ -54,28 +54,23 @@ def write_license_files(d, license_manifest, pkg_dic, rootfs=True):
bad_licenses = (d.getVar("INCOMPATIBLE_LICENSE") or "").split()
bad_licenses = expand_wildcard_licenses(d, bad_licenses)
- whitelist = []
- for lic in bad_licenses:
- whitelist.extend((d.getVar("WHITELIST_" + lic) or "").split())
-
+ exceptions = (d.getVar("INCOMPATIBLE_LICENSE_EXCEPTIONS") or "").split()
with open(license_manifest, "w") as license_file:
for pkg in sorted(pkg_dic):
- if bad_licenses and pkg not in whitelist:
- try:
- licenses = incompatible_pkg_license(d, bad_licenses, pkg_dic[pkg]["LICENSE"])
- if licenses:
- bb.fatal("Package %s cannot be installed into the image because it has incompatible license(s): %s" %(pkg, ' '.join(licenses)))
- (pkg_dic[pkg]["LICENSE"], pkg_dic[pkg]["LICENSES"]) = \
- oe.license.manifest_licenses(pkg_dic[pkg]["LICENSE"],
- bad_licenses, canonical_license, d)
- except oe.license.LicenseError as exc:
- bb.fatal('%s: %s' % (d.getVar('P'), exc))
+ remaining_bad_licenses = oe.license.apply_pkg_license_exception(pkg, bad_licenses, exceptions)
+ incompatible_licenses = incompatible_pkg_license(d, remaining_bad_licenses, pkg_dic[pkg]["LICENSE"])
+ if incompatible_licenses:
+ bb.fatal("Package %s cannot be installed into the image because it has incompatible license(s): %s" %(pkg, ' '.join(incompatible_licenses)))
else:
- pkg_dic[pkg]["LICENSES"] = re.sub(r'[|&()*]', ' ', pkg_dic[pkg]["LICENSE"])
- pkg_dic[pkg]["LICENSES"] = re.sub(r' *', ' ', pkg_dic[pkg]["LICENSES"])
- pkg_dic[pkg]["LICENSES"] = pkg_dic[pkg]["LICENSES"].split()
- if pkg in whitelist:
- bb.warn("Including %s with an incompatible license %s into the image, because it has been whitelisted." %(pkg, pkg_dic[pkg]["LICENSE"]))
+ incompatible_licenses = incompatible_pkg_license(d, bad_licenses, pkg_dic[pkg]["LICENSE"])
+ if incompatible_licenses:
+ oe.qa.handle_error('license-incompatible', "Including %s with incompatible license(s) %s into the image, because it has been allowed by exception list." %(pkg, ' '.join(incompatible_licenses)), d)
+ try:
+ (pkg_dic[pkg]["LICENSE"], pkg_dic[pkg]["LICENSES"]) = \
+ oe.license.manifest_licenses(pkg_dic[pkg]["LICENSE"],
+ remaining_bad_licenses, canonical_license, d)
+ except oe.license.LicenseError as exc:
+ bb.fatal('%s: %s' % (d.getVar('P'), exc))
if not "IMAGE_MANIFEST" in pkg_dic[pkg]:
# Rootfs manifest
@@ -87,7 +82,7 @@ def write_license_files(d, license_manifest, pkg_dic, rootfs=True):
# If the package doesn't contain any file, that is, its size is 0, the license
# isn't relevant as far as the final image is concerned. So doing license check
# doesn't make much sense, skip it.
- if pkg_dic[pkg]["PKGSIZE_%s" % pkg] == "0":
+ if pkg_dic[pkg]["PKGSIZE:%s" % pkg] == "0":
continue
else:
# Image manifest
@@ -105,10 +100,10 @@ def write_license_files(d, license_manifest, pkg_dic, rootfs=True):
continue
if not os.path.exists(lic_file):
- bb.warn("The license listed %s was not in the "\
- "licenses collected for recipe %s"
- % (lic, pkg_dic[pkg]["PN"]))
-
+ oe.qa.handle_error('license-file-missing',
+ "The license listed %s was not in the "\
+ "licenses collected for recipe %s"
+ % (lic, pkg_dic[pkg]["PN"]), d)
# Two options here:
# - Just copy the manifest
# - Copy the manifest and the license directories
@@ -269,11 +264,12 @@ def get_deployed_files(man_file):
dep_files.append(os.path.basename(f))
return dep_files
-ROOTFS_POSTPROCESS_COMMAND_prepend = "write_package_manifest; license_create_manifest; "
+ROOTFS_POSTPROCESS_COMMAND:prepend = "write_package_manifest; license_create_manifest; "
do_rootfs[recrdeptask] += "do_populate_lic"
python do_populate_lic_deploy() {
license_deployed_manifest(d)
+ oe.qa.exit_if_errors(d)
}
addtask populate_lic_deploy before do_build after do_image_complete
diff --git a/meta/classes/linux-dummy.bbclass b/meta/classes/linux-dummy.bbclass
index cd8791557d..9a06a509dd 100644
--- a/meta/classes/linux-dummy.bbclass
+++ b/meta/classes/linux-dummy.bbclass
@@ -6,7 +6,7 @@ python __anonymous () {
# set an empty package of kernel-devicetree
d.appendVar('PACKAGES', ' %s-devicetree' % kname)
- d.setVar('ALLOW_EMPTY_%s-devicetree' % kname, '1')
+ d.setVar('ALLOW_EMPTY:%s-devicetree' % kname, '1')
# Merge KERNEL_IMAGETYPE and KERNEL_ALT_IMAGETYPE into KERNEL_IMAGETYPES
type = d.getVar('KERNEL_IMAGETYPE') or ""
@@ -21,6 +21,6 @@ python __anonymous () {
for type in types.split():
typelower = type.lower()
d.appendVar('PACKAGES', ' %s-image-%s' % (kname, typelower))
- d.setVar('ALLOW_EMPTY_%s-image-%s' % (kname, typelower), '1')
+ d.setVar('ALLOW_EMPTY:%s-image-%s' % (kname, typelower), '1')
}
diff --git a/meta/classes/manpages.bbclass b/meta/classes/manpages.bbclass
index 1e66780646..5e09c77fe6 100644
--- a/meta/classes/manpages.bbclass
+++ b/meta/classes/manpages.bbclass
@@ -2,7 +2,7 @@
# depending on whether 'api-documentation' is in DISTRO_FEATURES. Such building
# tends to pull in the entire XML stack and other tools, so it's not enabled
# by default.
-PACKAGECONFIG_append_class-target = " ${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'manpages', '', d)}"
+PACKAGECONFIG:append:class-target = " ${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'manpages', '', d)}"
inherit qemu
@@ -10,15 +10,16 @@ inherit qemu
MAN_PKG ?= "${PN}-doc"
# only add man-db to RDEPENDS when manual files are built and installed
-RDEPENDS_${MAN_PKG} += "${@bb.utils.contains('PACKAGECONFIG', 'manpages', 'man-db', '', d)}"
+RDEPENDS:${MAN_PKG} += "${@bb.utils.contains('PACKAGECONFIG', 'manpages', 'man-db', '', d)}"
-pkg_postinst_append_${MAN_PKG} () {
+pkg_postinst:${MAN_PKG}:append () {
# only update manual page index caches when manual files are built and installed
if ${@bb.utils.contains('PACKAGECONFIG', 'manpages', 'true', 'false', d)}; then
if test -n "$D"; then
- if ${@bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'true','false', d)}; then
+ if ${@bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'true', 'false', d)}; then
sed "s:\(\s\)/:\1$D/:g" $D${sysconfdir}/man_db.conf | ${@qemu_run_binary(d, '$D', '${bindir}/mandb')} -C - -u -q $D${mandir}
chown -R root:root $D${mandir}
+
mkdir -p $D${localstatedir}/cache/man
cd $D${mandir}
find . -name index.db | while read index; do
@@ -36,7 +37,7 @@ pkg_postinst_append_${MAN_PKG} () {
fi
}
-pkg_postrm_append_${MAN_PKG} () {
+pkg_postrm:${MAN_PKG}:append () {
# only update manual page index caches when manual files are built and installed
if ${@bb.utils.contains('PACKAGECONFIG', 'manpages', 'true', 'false', d)}; then
mandb -q
diff --git a/meta/classes/meson.bbclass b/meta/classes/meson.bbclass
index 8ae0285f72..0bfe945811 100644
--- a/meta/classes/meson.bbclass
+++ b/meta/classes/meson.bbclass
@@ -1,6 +1,11 @@
-inherit python3native meson-routines
+inherit python3native meson-routines qemu
-DEPENDS_append = " meson-native ninja-native"
+DEPENDS:append = " meson-native ninja-native"
+
+EXEWRAPPER_ENABLED:class-native = "False"
+EXEWRAPPER_ENABLED:class-nativesdk = "False"
+EXEWRAPPER_ENABLED ?= "${@bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d)}"
+DEPENDS:append = "${@' qemu-native' if d.getVar('EXEWRAPPER_ENABLED') == 'True' else ''}"
# As Meson enforces out-of-tree builds we can just use cleandirs
B = "${WORKDIR}/build"
@@ -12,7 +17,8 @@ MESON_SOURCEPATH = "${S}"
def noprefix(var, d):
return d.getVar(var).replace(d.getVar('prefix') + '/', '', 1)
-MESON_BUILDTYPE ?= "plain"
+MESON_BUILDTYPE ?= "${@oe.utils.vartrue('DEBUG_BUILD', 'debug', 'plain', d)}"
+MESON_BUILDTYPE[vardeps] += "DEBUG_BUILD"
MESONOPTS = " --prefix ${prefix} \
--buildtype ${MESON_BUILDTYPE} \
--bindir ${@noprefix('bindir', d)} \
@@ -29,14 +35,24 @@ MESONOPTS = " --prefix ${prefix} \
--wrap-mode nodownload \
--native-file ${WORKDIR}/meson.native"
-EXTRA_OEMESON_append = " ${PACKAGECONFIG_CONFARGS}"
+EXTRA_OEMESON:append = " ${PACKAGECONFIG_CONFARGS}"
MESON_CROSS_FILE = ""
-MESON_CROSS_FILE_class-target = "--cross-file ${WORKDIR}/meson.cross"
-MESON_CROSS_FILE_class-nativesdk = "--cross-file ${WORKDIR}/meson.cross"
+MESON_CROSS_FILE:class-target = "--cross-file ${WORKDIR}/meson.cross"
+MESON_CROSS_FILE:class-nativesdk = "--cross-file ${WORKDIR}/meson.cross"
+
+# Needed to set up qemu wrapper below
+export STAGING_DIR_HOST
+
+def rust_tool(d, target_var):
+ rustc = d.getVar('RUSTC')
+ if not rustc:
+ return ""
+ cmd = [rustc, "--target", d.getVar(target_var)] + d.getVar("RUSTFLAGS").split()
+ return "rust = %s" % repr(cmd)
addtask write_config before do_configure
-do_write_config[vardeps] += "CC CXX LD AR NM STRIP READELF CFLAGS CXXFLAGS LDFLAGS"
+do_write_config[vardeps] += "CC CXX LD AR NM STRIP READELF CFLAGS CXXFLAGS LDFLAGS RUSTC RUSTFLAGS"
do_write_config() {
# This needs to be Py to split the args into single-element lists
cat >${WORKDIR}/meson.cross <<EOF
@@ -47,11 +63,14 @@ ar = ${@meson_array('AR', d)}
nm = ${@meson_array('NM', d)}
strip = ${@meson_array('STRIP', d)}
readelf = ${@meson_array('READELF', d)}
+objcopy = ${@meson_array('OBJCOPY', d)}
pkgconfig = 'pkg-config'
llvm-config = 'llvm-config${LLVMVERSION}'
cups-config = 'cups-config'
g-ir-scanner = '${STAGING_BINDIR}/g-ir-scanner-wrapper'
g-ir-compiler = '${STAGING_BINDIR}/g-ir-compiler-wrapper'
+${@rust_tool(d, "HOST_SYS")}
+${@"exe_wrapper = '${WORKDIR}/meson-qemuwrapper'" if d.getVar('EXEWRAPPER_ENABLED') == 'True' else ""}
[built-in options]
c_args = ${@meson_array('CFLAGS', d)}
@@ -61,7 +80,6 @@ cpp_link_args = ${@meson_array('LDFLAGS', d)}
[properties]
needs_exe_wrapper = true
-gtkdoc_exe_wrapper = '${B}/gtkdoc-qemuwrapper'
[host_machine]
system = '${@meson_operating_system('HOST_OS', d)}'
@@ -84,7 +102,9 @@ ar = ${@meson_array('BUILD_AR', d)}
nm = ${@meson_array('BUILD_NM', d)}
strip = ${@meson_array('BUILD_STRIP', d)}
readelf = ${@meson_array('BUILD_READELF', d)}
+objcopy = ${@meson_array('BUILD_OBJCOPY', d)}
pkgconfig = 'pkg-config-native'
+${@rust_tool(d, "BUILD_SYS")}
[built-in options]
c_args = ${@meson_array('BUILD_CFLAGS', d)}
@@ -94,6 +114,24 @@ cpp_link_args = ${@meson_array('BUILD_LDFLAGS', d)}
EOF
}
+do_write_config:append:class-target() {
+ # Write out a qemu wrapper that will be used as exe_wrapper so that meson
+ # can run target helper binaries through that.
+ qemu_binary="${@qemu_wrapper_cmdline(d, '$STAGING_DIR_HOST', ['$STAGING_DIR_HOST/${libdir}','$STAGING_DIR_HOST/${base_libdir}'])}"
+ cat > ${WORKDIR}/meson-qemuwrapper << EOF
+#!/bin/sh
+# Use a modules directory which doesn't exist so we don't load random things
+# which may then get deleted (or their dependencies) and potentially segfault
+export GIO_MODULE_DIR=${STAGING_LIBDIR}/gio/modules-dummy
+
+# meson sets this wrongly (only to libs in build-dir), qemu_wrapper_cmdline() and GIR_EXTRA_LIBS_PATH take care of it properly
+unset LD_LIBRARY_PATH
+
+$qemu_binary "\$@"
+EOF
+ chmod +x ${WORKDIR}/meson-qemuwrapper
+}
+
# Tell externalsrc that changes to this file require a reconfigure
CONFIGURE_FILES = "meson.build"
@@ -102,6 +140,16 @@ meson_do_configure() {
# https://github.com/mesonbuild/meson/commit/ef9aeb188ea2bc7353e59916c18901cde90fa2b3
unset LD
+ # sstate.bbclass no longer removes empty directories to avoid a race (see
+ # commit 4f94d929 "sstate/staging: Handle directory creation race issue").
+ # Unfortunately Python apparently treats an empty egg-info directory as if
+ # the version it previously contained still exists and fails if a newer
+ # version is required, which Meson does. To avoid this, make sure there are
+ # no empty egg-info directories from previous versions left behind. Ignore
+ # all errors from rmdir since the egg-info may be a file rather than a
+ # directory.
+ rmdir ${STAGING_LIBDIR_NATIVE}/${PYTHON_DIR}/site-packages/*.egg-info 2>/dev/null || :
+
# Work around "Meson fails if /tmp is mounted with noexec #2972"
mkdir -p "${B}/meson-private/tmp"
export TMPDIR="${B}/meson-private/tmp"
diff --git a/meta/classes/meta.bbclass b/meta/classes/meta.bbclass
deleted file mode 100644
index 5e6890238b..0000000000
--- a/meta/classes/meta.bbclass
+++ /dev/null
@@ -1,4 +0,0 @@
-
-PACKAGES = ""
-
-do_build[recrdeptask] = "do_build"
diff --git a/meta/classes/mime-xdg.bbclass b/meta/classes/mime-xdg.bbclass
index 642a5b7595..271f48dd72 100644
--- a/meta/classes/mime-xdg.bbclass
+++ b/meta/classes/mime-xdg.bbclass
@@ -34,7 +34,7 @@ else
fi
}
-python populate_packages_append () {
+python populate_packages:append () {
packages = d.getVar('PACKAGES').split()
pkgdest = d.getVar('PKGDEST')
desktop_base = d.getVar('DESKTOPDIR')
@@ -59,16 +59,16 @@ python populate_packages_append () {
break
if desktops_with_mime_found:
bb.note("adding mime-xdg postinst and postrm scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
postinst += d.getVar('mime_xdg_postinst')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
- postrm = d.getVar('pkg_postrm_%s' % pkg)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
+ postrm = d.getVar('pkg_postrm:%s' % pkg)
if not postrm:
postrm = '#!/bin/sh\n'
postrm += d.getVar('mime_xdg_postrm')
- d.setVar('pkg_postrm_%s' % pkg, postrm)
+ d.setVar('pkg_postrm:%s' % pkg, postrm)
bb.note("adding desktop-file-utils dependency to %s" % pkg)
- d.appendVar('RDEPENDS_' + pkg, " " + d.getVar('MLPREFIX')+"desktop-file-utils")
+ d.appendVar('RDEPENDS:' + pkg, " " + d.getVar('MLPREFIX')+"desktop-file-utils")
}
diff --git a/meta/classes/mime.bbclass b/meta/classes/mime.bbclass
index bb99bc35cb..8d176a884e 100644
--- a/meta/classes/mime.bbclass
+++ b/meta/classes/mime.bbclass
@@ -39,7 +39,7 @@ fi
fi
}
-python populate_packages_append () {
+python populate_packages:append () {
packages = d.getVar('PACKAGES').split()
pkgdest = d.getVar('PKGDEST')
mimedir = d.getVar('MIMEDIR')
@@ -54,17 +54,17 @@ python populate_packages_append () {
break
if mimes_types_found:
bb.note("adding mime postinst and postrm scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
postinst += d.getVar('mime_postinst')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
- postrm = d.getVar('pkg_postrm_%s' % pkg)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
+ postrm = d.getVar('pkg_postrm:%s' % pkg)
if not postrm:
postrm = '#!/bin/sh\n'
postrm += d.getVar('mime_postrm')
- d.setVar('pkg_postrm_%s' % pkg, postrm)
+ d.setVar('pkg_postrm:%s' % pkg, postrm)
if pkg != 'shared-mime-info-data':
bb.note("adding shared-mime-info-data dependency to %s" % pkg)
- d.appendVar('RDEPENDS_' + pkg, " " + d.getVar('MLPREFIX')+"shared-mime-info-data")
+ d.appendVar('RDEPENDS:' + pkg, " " + d.getVar('MLPREFIX')+"shared-mime-info-data")
}
diff --git a/meta/classes/mirrors.bbclass b/meta/classes/mirrors.bbclass
index 87bba41472..8e7b35d900 100644
--- a/meta/classes/mirrors.bbclass
+++ b/meta/classes/mirrors.bbclass
@@ -1,76 +1,76 @@
MIRRORS += "\
-${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian/20180310T215105Z/pool \n \
-${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20120328T092752Z/debian/pool \n \
-${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20110127T084257Z/debian/pool \n \
-${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20090802T004153Z/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.de.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.au.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.cl.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.hr.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.fi.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.hk.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.hu.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.ie.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.it.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.jp.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.no.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.pl.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.ro.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.si.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.es.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.se.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.tr.debian.org/debian/pool \n \
-${GNU_MIRROR} https://mirrors.kernel.org/gnu \n \
-${KERNELORG_MIRROR} http://www.kernel.org/pub \n \
-${GNUPG_MIRROR} ftp://ftp.gnupg.org/gcrypt \n \
-${GNUPG_MIRROR} ftp://ftp.franken.de/pub/crypt/mirror/ftp.gnupg.org/gcrypt \n \
-${GNUPG_MIRROR} ftp://mirrors.dotsrc.org/gcrypt \n \
-ftp://dante.ctan.org/tex-archive ftp://ftp.fu-berlin.de/tex/CTAN \n \
-ftp://dante.ctan.org/tex-archive http://sunsite.sut.ac.jp/pub/archives/ctan/ \n \
-ftp://dante.ctan.org/tex-archive http://ctan.unsw.edu.au/ \n \
-ftp://ftp.gnutls.org/gcrypt/gnutls ${GNUPG_MIRROR}/gnutls \n \
-http://ftp.info-zip.org/pub/infozip/src/ http://mirror.switch.ch/ftp/mirror/infozip/src/ \n \
-http://ftp.info-zip.org/pub/infozip/src/ ftp://sunsite.icm.edu.pl/pub/unix/archiving/info-zip/src/ \n \
-http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/ http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/OLD/ \n \
-${APACHE_MIRROR} http://www.us.apache.org/dist \n \
-${APACHE_MIRROR} http://archive.apache.org/dist \n \
-http://downloads.sourceforge.net/watchdog/ http://fossies.org/linux/misc/ \n \
-${SAVANNAH_GNU_MIRROR} http://download-mirror.savannah.gnu.org/releases \n \
-${SAVANNAH_NONGNU_MIRROR} http://download-mirror.savannah.nongnu.org/releases \n \
-ftp://sourceware.org/pub http://mirrors.kernel.org/sourceware \n \
-ftp://sourceware.org/pub http://gd.tuwien.ac.at/gnu/sourceware \n \
-ftp://sourceware.org/pub http://ftp.gwdg.de/pub/linux/sources.redhat.com/sourceware \n \
-cvs://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-svn://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-git://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-hg://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-bzr://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-p4://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-osc://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-https?$://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-ftp://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-npm://.*/?.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-cvs://.*/.* http://sources.openembedded.org/ \n \
-svn://.*/.* http://sources.openembedded.org/ \n \
-git://.*/.* http://sources.openembedded.org/ \n \
-hg://.*/.* http://sources.openembedded.org/ \n \
-bzr://.*/.* http://sources.openembedded.org/ \n \
-p4://.*/.* http://sources.openembedded.org/ \n \
-osc://.*/.* http://sources.openembedded.org/ \n \
-https?$://.*/.* http://sources.openembedded.org/ \n \
-ftp://.*/.* http://sources.openembedded.org/ \n \
-npm://.*/?.* http://sources.openembedded.org/ \n \
-${CPAN_MIRROR} http://cpan.metacpan.org/ \n \
-${CPAN_MIRROR} http://search.cpan.org/CPAN/ \n \
+${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian/20180310T215105Z/pool \
+${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20120328T092752Z/debian/pool \
+${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20110127T084257Z/debian/pool \
+${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20090802T004153Z/debian/pool \
+${DEBIAN_MIRROR} http://ftp.de.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.au.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.cl.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.hr.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.fi.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.hk.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.hu.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.ie.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.it.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.jp.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.no.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.pl.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.ro.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.si.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.es.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.se.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.tr.debian.org/debian/pool \
+${GNU_MIRROR} https://mirrors.kernel.org/gnu \
+${KERNELORG_MIRROR} http://www.kernel.org/pub \
+${GNUPG_MIRROR} ftp://ftp.gnupg.org/gcrypt \
+${GNUPG_MIRROR} ftp://ftp.franken.de/pub/crypt/mirror/ftp.gnupg.org/gcrypt \
+${GNUPG_MIRROR} ftp://mirrors.dotsrc.org/gcrypt \
+ftp://dante.ctan.org/tex-archive ftp://ftp.fu-berlin.de/tex/CTAN \
+ftp://dante.ctan.org/tex-archive http://sunsite.sut.ac.jp/pub/archives/ctan/ \
+ftp://dante.ctan.org/tex-archive http://ctan.unsw.edu.au/ \
+ftp://ftp.gnutls.org/gcrypt/gnutls ${GNUPG_MIRROR}/gnutls \
+http://ftp.info-zip.org/pub/infozip/src/ ftp://sunsite.icm.edu.pl/pub/unix/archiving/info-zip/src/ \
+http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/ http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/OLD/ \
+${APACHE_MIRROR} http://www.us.apache.org/dist \
+${APACHE_MIRROR} http://archive.apache.org/dist \
+http://downloads.sourceforge.net/watchdog/ http://fossies.org/linux/misc/ \
+${SAVANNAH_GNU_MIRROR} http://download-mirror.savannah.gnu.org/releases \
+${SAVANNAH_NONGNU_MIRROR} http://download-mirror.savannah.nongnu.org/releases \
+ftp://sourceware.org/pub http://mirrors.kernel.org/sourceware \
+ftp://sourceware.org/pub http://gd.tuwien.ac.at/gnu/sourceware \
+ftp://sourceware.org/pub http://ftp.gwdg.de/pub/linux/sources.redhat.com/sourceware \
+cvs://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
+svn://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
+git://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
+hg://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
+bzr://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
+p4://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
+osc://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
+https?://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
+ftp://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
+npm://.*/?.* http://downloads.yoctoproject.org/mirror/sources/ \
+cvs://.*/.* http://sources.openembedded.org/ \
+svn://.*/.* http://sources.openembedded.org/ \
+git://.*/.* http://sources.openembedded.org/ \
+hg://.*/.* http://sources.openembedded.org/ \
+bzr://.*/.* http://sources.openembedded.org/ \
+p4://.*/.* http://sources.openembedded.org/ \
+osc://.*/.* http://sources.openembedded.org/ \
+https?://.*/.* http://sources.openembedded.org/ \
+ftp://.*/.* http://sources.openembedded.org/ \
+npm://.*/?.* http://sources.openembedded.org/ \
+${CPAN_MIRROR} http://cpan.metacpan.org/ \
+${CPAN_MIRROR} http://search.cpan.org/CPAN/ \
+https?://downloads.yoctoproject.org/releases/uninative/ https://mirrors.kernel.org/yocto/uninative/ \
+https?://downloads.yoctoproject.org/mirror/sources/ https://mirrors.kernel.org/yocto-sources/ \
"
# Use MIRRORS to provide git repo fallbacks using the https protocol, for cases
# where git native protocol fetches may fail due to local firewall rules, etc.
MIRRORS += "\
-git://salsa.debian.org/.* git://salsa.debian.org/PATH;protocol=https \n \
-git://git.gnome.org/.* git://gitlab.gnome.org/GNOME/PATH;protocol=https \n \
-git://git.savannah.gnu.org/.* git://git.savannah.gnu.org/git/PATH;protocol=https \n \
-git://git.yoctoproject.org/.* git://git.yoctoproject.org/git/PATH;protocol=https \n \
-git://.*/.* git://HOST/PATH;protocol=https \n \
+git://salsa.debian.org/.* git://salsa.debian.org/PATH;protocol=https \
+git://git.gnome.org/.* git://gitlab.gnome.org/GNOME/PATH;protocol=https \
+git://.*/.* git://HOST/PATH;protocol=https \
+git://.*/.* git://HOST/git/PATH;protocol=https \
"
diff --git a/meta/classes/module.bbclass b/meta/classes/module.bbclass
index c0dfa35061..a09ec3ed1e 100644
--- a/meta/classes/module.bbclass
+++ b/meta/classes/module.bbclass
@@ -14,7 +14,7 @@ python __anonymous () {
d.setVar('KBUILD_EXTRA_SYMBOLS', " ".join(extra_symbols))
}
-python do_devshell_prepend () {
+python do_devshell:prepend () {
os.environ['CFLAGS'] = ''
os.environ['CPPFLAGS'] = ''
os.environ['CXXFLAGS'] = ''
@@ -70,5 +70,5 @@ EXPORT_FUNCTIONS do_compile do_install
# add all splitted modules to PN RDEPENDS, PN can be empty now
KERNEL_MODULES_META_PACKAGE = "${PN}"
-FILES_${PN} = ""
-ALLOW_EMPTY_${PN} = "1"
+FILES:${PN} = ""
+ALLOW_EMPTY:${PN} = "1"
diff --git a/meta/classes/multilib.bbclass b/meta/classes/multilib.bbclass
index 2ef75c0d16..5859ca8d21 100644
--- a/meta/classes/multilib.bbclass
+++ b/meta/classes/multilib.bbclass
@@ -35,7 +35,7 @@ python multilib_virtclass_handler () {
e.data.setVar('SDKTARGETSYSROOT', e.data.getVar('SDKTARGETSYSROOT'))
override = ":virtclass-multilib-" + variant
e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
- target_vendor = e.data.getVar("TARGET_VENDOR_" + "virtclass-multilib-" + variant, False)
+ target_vendor = e.data.getVar("TARGET_VENDOR:" + "virtclass-multilib-" + variant, False)
if target_vendor:
e.data.setVar("TARGET_VENDOR", target_vendor)
return
@@ -65,24 +65,25 @@ python multilib_virtclass_handler () {
override = ":virtclass-multilib-" + variant
- blacklist = e.data.getVarFlag('PNBLACKLIST', e.data.getVar('PN'))
- if blacklist:
+ skip_msg = e.data.getVarFlag('SKIP_RECIPE', e.data.getVar('PN'))
+ if skip_msg:
pn_new = variant + "-" + e.data.getVar('PN')
- if not e.data.getVarFlag('PNBLACKLIST', pn_new):
- e.data.setVarFlag('PNBLACKLIST', pn_new, blacklist)
+ if not e.data.getVarFlag('SKIP_RECIPE', pn_new):
+ e.data.setVarFlag('SKIP_RECIPE', pn_new, skip_msg)
e.data.setVar("MLPREFIX", variant + "-")
e.data.setVar("PN", variant + "-" + e.data.getVar("PN", False))
e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
- # Expand WHITELIST_GPL-3.0 with multilib prefix
- pkgs = e.data.getVar("WHITELIST_GPL-3.0")
- for pkg in pkgs.split():
- pkgs += " " + variant + "-" + pkg
- e.data.setVar("WHITELIST_GPL-3.0", pkgs)
+ # Expand INCOMPATIBLE_LICENSE_EXCEPTIONS with multilib prefix
+ pkgs = e.data.getVar("INCOMPATIBLE_LICENSE_EXCEPTIONS")
+ if pkgs:
+ for pkg in pkgs.split():
+ pkgs += " " + variant + "-" + pkg
+ e.data.setVar("INCOMPATIBLE_LICENSE_EXCEPTIONS", pkgs)
# DEFAULTTUNE can change TARGET_ARCH override so expand this now before update_data
- newtune = e.data.getVar("DEFAULTTUNE_" + "virtclass-multilib-" + variant, False)
+ newtune = e.data.getVar("DEFAULTTUNE:" + "virtclass-multilib-" + variant, False)
if newtune:
e.data.setVar("DEFAULTTUNE", newtune)
}
@@ -92,6 +93,10 @@ multilib_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
python __anonymous () {
if bb.data.inherits_class('image', d):
+ # set rpm preferred file color for 32-bit multilib image
+ if d.getVar("SITEINFO_BITS") == "32":
+ d.setVar("RPM_PREFER_ELF_ARCH", "1")
+
variant = d.getVar("BBEXTENDVARIANT")
import oe.classextend
@@ -176,7 +181,7 @@ def reset_alternative_priority(d):
bb.debug(1, '%s: Setting ALTERNATIVE_PRIORITY_%s to %s' % (pkg, pkg, reset_priority))
d.setVar('ALTERNATIVE_PRIORITY_%s' % pkg, reset_priority)
- for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg) or "").split():
+ for alt_name in (d.getVar('ALTERNATIVE:%s' % pkg) or "").split():
# ALTERNATIVE_PRIORITY_pkg[tool] = priority
alt_priority_pkg_name = d.getVarFlag('ALTERNATIVE_PRIORITY_%s' % pkg, alt_name)
# ALTERNATIVE_PRIORITY[tool] = priority
@@ -191,12 +196,12 @@ def reset_alternative_priority(d):
bb.debug(1, '%s: Setting ALTERNATIVE_PRIORITY[%s] to %s' % (pkg, alt_name, reset_priority))
d.setVarFlag('ALTERNATIVE_PRIORITY', alt_name, reset_priority)
-PACKAGEFUNCS_append = " do_package_qa_multilib"
+PACKAGEFUNCS:append = " do_package_qa_multilib"
python do_package_qa_multilib() {
def check_mlprefix(pkg, var, mlprefix):
- values = bb.utils.explode_deps(d.getVar('%s_%s' % (var, pkg)) or d.getVar(var) or "")
+ values = bb.utils.explode_deps(d.getVar('%s:%s' % (var, pkg)) or d.getVar(var) or "")
candidates = []
for i in values:
if i.startswith('virtual/'):
@@ -210,7 +215,7 @@ python do_package_qa_multilib() {
if len(candidates) > 0:
msg = "%s package %s - suspicious values '%s' in %s" \
% (d.getVar('PN'), pkg, ' '.join(candidates), var)
- package_qa_handle_error("multilib", msg, d)
+ oe.qa.handle_error("multilib", msg, d)
ml = d.getVar('MLPREFIX')
if not ml:
@@ -228,4 +233,5 @@ python do_package_qa_multilib() {
check_mlprefix(pkg, 'RSUGGESTS', ml)
check_mlprefix(pkg, 'RREPLACES', ml)
check_mlprefix(pkg, 'RCONFLICTS', ml)
+ oe.qa.exit_if_errors(d)
}
diff --git a/meta/classes/multilib_global.bbclass b/meta/classes/multilib_global.bbclass
index 96257de5ca..e06307d057 100644
--- a/meta/classes/multilib_global.bbclass
+++ b/meta/classes/multilib_global.bbclass
@@ -39,6 +39,9 @@ def preferred_ml_updates(d):
override = ":virtclass-multilib-" + p
localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
if "-canadian-" in pkg:
+ newtune = localdata.getVar("DEFAULTTUNE:" + "virtclass-multilib-" + p, False)
+ if newtune:
+ localdata.setVar("DEFAULTTUNE", newtune)
newname = localdata.expand(v)
else:
newname = localdata.expand(v).replace(version_str, version_str + p + '-')
@@ -137,14 +140,14 @@ def preferred_ml_updates(d):
prov = prov.replace("virtual/", "")
return "virtual/" + prefix + "-" + prov
- mp = (d.getVar("MULTI_PROVIDER_WHITELIST") or "").split()
+ mp = (d.getVar("BB_MULTI_PROVIDER_ALLOWED") or "").split()
extramp = []
for p in mp:
if p.endswith("-native") or "-crosssdk-" in p or p.startswith(("nativesdk-", "virtual/nativesdk-")) or 'cross-canadian' in p:
continue
for pref in prefixes:
extramp.append(translate_provide(pref, p))
- d.setVar("MULTI_PROVIDER_WHITELIST", " ".join(mp + extramp))
+ d.setVar("BB_MULTI_PROVIDER_ALLOWED", " ".join(mp + extramp))
abisafe = (d.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE") or "").split()
extras = []
@@ -164,8 +167,8 @@ def preferred_ml_updates(d):
python multilib_virtclass_handler_vendor () {
if isinstance(e, bb.event.ConfigParsed):
for v in e.data.getVar("MULTILIB_VARIANTS").split():
- if e.data.getVar("TARGET_VENDOR_virtclass-multilib-" + v, False) is None:
- e.data.setVar("TARGET_VENDOR_virtclass-multilib-" + v, e.data.getVar("TARGET_VENDOR", False) + "ml" + v)
+ if e.data.getVar("TARGET_VENDOR:virtclass-multilib-" + v, False) is None:
+ e.data.setVar("TARGET_VENDOR:virtclass-multilib-" + v, e.data.getVar("TARGET_VENDOR", False) + "ml" + v)
preferred_ml_updates(e.data)
}
addhandler multilib_virtclass_handler_vendor
@@ -207,13 +210,13 @@ python multilib_virtclass_handler_global () {
if rprovs.strip():
e.data.setVar("RPROVIDES", rprovs)
- # Process RPROVIDES_${PN}...
+ # Process RPROVIDES:${PN}...
for pkg in (e.data.getVar("PACKAGES") or "").split():
- origrprovs = rprovs = localdata.getVar("RPROVIDES_%s" % pkg) or ""
+ origrprovs = rprovs = localdata.getVar("RPROVIDES:%s" % pkg) or ""
for clsextend in clsextends:
- rprovs = rprovs + " " + clsextend.map_variable("RPROVIDES_%s" % pkg, setvar=False)
+ rprovs = rprovs + " " + clsextend.map_variable("RPROVIDES:%s" % pkg, setvar=False)
rprovs = rprovs + " " + clsextend.extname + "-" + pkg
- e.data.setVar("RPROVIDES_%s" % pkg, rprovs)
+ e.data.setVar("RPROVIDES:%s" % pkg, rprovs)
}
addhandler multilib_virtclass_handler_global
diff --git a/meta/classes/multilib_header.bbclass b/meta/classes/multilib_header.bbclass
index e03f5b13b2..efbc24f59b 100644
--- a/meta/classes/multilib_header.bbclass
+++ b/meta/classes/multilib_header.bbclass
@@ -42,11 +42,11 @@ oe_multilib_header() {
# Dependencies on arch variables like MIPSPKGSFX_ABI can be problematic.
# We don't need multilib headers for native builds so brute force things.
-oe_multilib_header_class-native () {
+oe_multilib_header:class-native () {
return
}
# Nor do we need multilib headers for nativesdk builds.
-oe_multilib_header_class-nativesdk () {
+oe_multilib_header:class-nativesdk () {
return
}
diff --git a/meta/classes/multilib_script.bbclass b/meta/classes/multilib_script.bbclass
index b11efc1ec5..41597341cd 100644
--- a/meta/classes/multilib_script.bbclass
+++ b/meta/classes/multilib_script.bbclass
@@ -26,9 +26,9 @@ python () {
pkg, script = entry.split(":")
epkg = d.expand(pkg)
scriptname = os.path.basename(script)
- d.appendVar("ALTERNATIVE_" + epkg, " " + scriptname + " ")
+ d.appendVar("ALTERNATIVE:" + epkg, " " + scriptname + " ")
d.setVarFlag("ALTERNATIVE_LINK_NAME", scriptname, script)
d.setVarFlag("ALTERNATIVE_TARGET", scriptname, script + "-${MULTILIB_SUFFIX}")
d.appendVar("multilibscript_rename", "\n mv ${PKGD}" + script + " ${PKGD}" + script + "-${MULTILIB_SUFFIX}")
- d.appendVar("FILES_" + epkg, " " + script + "-${MULTILIB_SUFFIX}")
+ d.appendVar("FILES:" + epkg, " " + script + "-${MULTILIB_SUFFIX}")
}
diff --git a/meta/classes/native.bbclass b/meta/classes/native.bbclass
index 561cc23f68..fc7422c5d7 100644
--- a/meta/classes/native.bbclass
+++ b/meta/classes/native.bbclass
@@ -9,7 +9,7 @@ PACKAGE_ARCH = "${BUILD_ARCH}"
# used by cmake class
OECMAKE_RPATH = "${libdir}"
-OECMAKE_RPATH_class-native = "${libdir}"
+OECMAKE_RPATH:class-native = "${libdir}"
TARGET_ARCH = "${BUILD_ARCH}"
TARGET_OS = "${BUILD_OS}"
@@ -106,7 +106,7 @@ CLASSOVERRIDE = "class-native"
MACHINEOVERRIDES = ""
MACHINE_FEATURES = ""
-PATH_prepend = "${COREBASE}/scripts/native-intercept:"
+PATH:prepend = "${COREBASE}/scripts/native-intercept:"
# This class encodes staging paths into its scripts data so can only be
# reused if we manipulate the paths.
@@ -133,7 +133,7 @@ python native_virtclass_handler () {
def map_dependencies(varname, d, suffix = "", selfref=True):
if suffix:
- varname = varname + "_" + suffix
+ varname = varname + ":" + suffix
deps = d.getVar(varname)
if not deps:
return
@@ -195,3 +195,34 @@ USE_NLS = "no"
RECIPERDEPTASK = "do_populate_sysroot"
do_populate_sysroot[rdeptask] = "${RECIPERDEPTASK}"
+
+#
+# Native task outputs are directly run on the target (host) system after being
+# built. Even if the output of this recipe doesn't change, a change in one of
+# its dependencies may cause a change in the output it generates (e.g. rpm
+# output depends on the output of its dependent zstd library).
+#
+# This can cause poor interactions with hash equivalence, since this recipes
+# output-changing dependency is "hidden" and downstream task only see that this
+# recipe has the same outhash and therefore is equivalent. This can result in
+# different output in different cases.
+#
+# To resolve this, unhide the output-changing dependency by adding its unihash
+# to this tasks outhash calculation. Unfortunately, don't know specifically
+# know which dependencies are output-changing, so we have to add all of them.
+#
+python native_add_do_populate_sysroot_deps () {
+ current_task = "do_" + d.getVar("BB_CURRENTTASK")
+ if current_task != "do_populate_sysroot":
+ return
+
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ pn = d.getVar("PN")
+ deps = {
+ dep[0]:dep[6] for dep in taskdepdata.values() if
+ dep[1] == current_task and dep[0] != pn
+ }
+
+ d.setVar("HASHEQUIV_EXTRA_SIGDATA", "\n".join("%s: %s" % (k, deps[k]) for k in sorted(deps.keys())))
+}
+SSTATECREATEFUNCS += "native_add_do_populate_sysroot_deps"
diff --git a/meta/classes/nativesdk.bbclass b/meta/classes/nativesdk.bbclass
index 7f2692c51a..f8e9607513 100644
--- a/meta/classes/nativesdk.bbclass
+++ b/meta/classes/nativesdk.bbclass
@@ -28,10 +28,10 @@ PACKAGE_ARCHS = "${SDK_PACKAGE_ARCHS}"
# We need chrpath >= 0.14 to ensure we can deal with 32 and 64 bit
# binaries
#
-DEPENDS_append = " chrpath-replacement-native"
+DEPENDS:append = " chrpath-replacement-native"
EXTRANATIVEPATH += "chrpath-native"
-PKGDATA_DIR = "${TMPDIR}/pkgdata/${SDK_SYS}"
+PKGDATA_DIR = "${PKGDATA_DIR_SDK}"
HOST_ARCH = "${SDK_ARCH}"
HOST_VENDOR = "${SDK_VENDOR}"
@@ -113,3 +113,5 @@ do_packagedata[stamp-extra-info] = ""
USE_NLS = "${SDKUSE_NLS}"
OLDEST_KERNEL = "${SDK_OLDEST_KERNEL}"
+
+PATH:prepend = "${COREBASE}/scripts/nativesdk-intercept:"
diff --git a/meta/classes/npm.bbclass b/meta/classes/npm.bbclass
index 8f8712a024..ba50fcac20 100644
--- a/meta/classes/npm.bbclass
+++ b/meta/classes/npm.bbclass
@@ -19,11 +19,15 @@
inherit python3native
-DEPENDS_prepend = "nodejs-native "
-RDEPENDS_${PN}_append_class-target = " nodejs"
+DEPENDS:prepend = "nodejs-native "
+RDEPENDS:${PN}:append:class-target = " nodejs"
+
+EXTRA_OENPM = ""
NPM_INSTALL_DEV ?= "0"
+NPM_NODEDIR ?= "${RECIPE_SYSROOT_NATIVE}${prefix_native}"
+
def npm_target_arch_map(target_arch):
"""Maps arch names to npm arch names"""
import re
@@ -57,8 +61,8 @@ def npm_pack(env, srcdir, workdir):
"""Run 'npm pack' on a specified directory"""
import shlex
cmd = "npm pack %s" % shlex.quote(srcdir)
- configs = [("ignore-scripts", "true")]
- tarball = env.run(cmd, configs=configs, workdir=workdir).strip("\n")
+ args = [("ignore-scripts", "true")]
+ tarball = env.run(cmd, args=args, workdir=workdir).strip("\n")
return os.path.join(workdir, tarball)
python npm_do_configure() {
@@ -224,15 +228,11 @@ python npm_do_compile() {
bb.utils.remove(d.getVar("NPM_BUILD"), recurse=True)
- env = NpmEnvironment(d, configs=npm_global_configs(d))
-
- dev = bb.utils.to_boolean(d.getVar("NPM_INSTALL_DEV"), False)
-
with tempfile.TemporaryDirectory() as tmpdir:
args = []
- configs = []
+ configs = npm_global_configs(d)
- if dev:
+ if bb.utils.to_boolean(d.getVar("NPM_INSTALL_DEV"), False):
configs.append(("also", "development"))
else:
configs.append(("only", "production"))
@@ -247,20 +247,19 @@ python npm_do_compile() {
# Add node-gyp configuration
configs.append(("arch", d.getVar("NPM_ARCH")))
configs.append(("release", "true"))
- nodedir = d.getVar("NPM_NODEDIR")
- if not nodedir:
- sysroot = d.getVar("RECIPE_SYSROOT_NATIVE")
- nodedir = os.path.join(sysroot, d.getVar("prefix_native").strip("/"))
- configs.append(("nodedir", nodedir))
+ configs.append(("nodedir", d.getVar("NPM_NODEDIR")))
configs.append(("python", d.getVar("PYTHON")))
+ env = NpmEnvironment(d, configs)
+
# Add node-pre-gyp configuration
args.append(("target_arch", d.getVar("NPM_ARCH")))
args.append(("build-from-source", "true"))
# Pack and install the main package
tarball = npm_pack(env, d.getVar("NPM_PACKAGE"), tmpdir)
- env.run("npm install %s" % shlex.quote(tarball), args=args, configs=configs)
+ cmd = "npm install %s %s" % (shlex.quote(tarball), d.getVar("EXTRA_OENPM"))
+ env.run(cmd, args=args)
}
npm_do_install() {
@@ -312,7 +311,7 @@ npm_do_install() {
ln -fs node_modules ${D}/${nonarch_libdir}/node
}
-FILES_${PN} += " \
+FILES:${PN} += " \
${bindir} \
${nonarch_libdir} \
"
diff --git a/meta/classes/overlayfs-etc.bbclass b/meta/classes/overlayfs-etc.bbclass
new file mode 100644
index 0000000000..91afee695c
--- /dev/null
+++ b/meta/classes/overlayfs-etc.bbclass
@@ -0,0 +1,76 @@
+# Class for setting up /etc in overlayfs
+#
+# In order to have /etc directory in overlayfs a special handling at early boot stage is required
+# The idea is to supply a custom init script that mounts /etc before launching actual init program,
+# because the latter already requires /etc to be mounted
+#
+# The configuration must be machine specific. You should at least set these three variables:
+# OVERLAYFS_ETC_MOUNT_POINT ?= "/data"
+# OVERLAYFS_ETC_FSTYPE ?= "ext4"
+# OVERLAYFS_ETC_DEVICE ?= "/dev/mmcblk0p2"
+#
+# To control more mount options you should consider setting mount options:
+# OVERLAYFS_ETC_MOUNT_OPTIONS ?= "defaults"
+#
+# The class provides two options for /sbin/init generation
+# 1. Default option is to rename original /sbin/init to /sbin/init.orig and place generated init under
+# original name, i.e. /sbin/init. It has an advantage that you won't need to change any kernel
+# parameters in order to make it work, but it poses a restriction that package-management can't
+# be used, becaause updating init manager would remove generated script
+# 2. If you are would like to keep original init as is, you can set
+# OVERLAYFS_ETC_USE_ORIG_INIT_NAME = "0"
+# Then generated init will be named /sbin/preinit and you would need to extend you kernel parameters
+# manually in your bootloader configuration.
+#
+# Regardless which mode you choose, update and migration strategy of configuration files under /etc
+# overlay is out of scope of this class
+
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "overlayfs-etc", "create_overlayfs_etc_preinit;", "", d)}'
+IMAGE_FEATURES_CONFLICTS_overlayfs-etc = "${@ 'package-management' if bb.utils.to_boolean(d.getVar('OVERLAYFS_ETC_USE_ORIG_INIT_NAME'), True) else ''}"
+
+OVERLAYFS_ETC_MOUNT_POINT ??= ""
+OVERLAYFS_ETC_FSTYPE ??= ""
+OVERLAYFS_ETC_DEVICE ??= ""
+OVERLAYFS_ETC_USE_ORIG_INIT_NAME ??= "1"
+OVERLAYFS_ETC_MOUNT_OPTIONS ??= "defaults"
+OVERLAYFS_ETC_INIT_TEMPLATE ??= "${COREBASE}/meta/files/overlayfs-etc-preinit.sh.in"
+
+python create_overlayfs_etc_preinit() {
+ overlayEtcMountPoint = d.getVar("OVERLAYFS_ETC_MOUNT_POINT")
+ overlayEtcFsType = d.getVar("OVERLAYFS_ETC_FSTYPE")
+ overlayEtcDevice = d.getVar("OVERLAYFS_ETC_DEVICE")
+
+ if not overlayEtcMountPoint:
+ bb.fatal("OVERLAYFS_ETC_MOUNT_POINT must be set in your MACHINE configuration")
+ if not overlayEtcDevice:
+ bb.fatal("OVERLAYFS_ETC_DEVICE must be set in your MACHINE configuration")
+ if not overlayEtcFsType:
+ bb.fatal("OVERLAYFS_ETC_FSTYPE should contain a valid file system type on {0}".format(overlayEtcDevice))
+
+ with open(d.getVar("OVERLAYFS_ETC_INIT_TEMPLATE"), "r") as f:
+ PreinitTemplate = f.read()
+
+ useOrigInit = oe.types.boolean(d.getVar('OVERLAYFS_ETC_USE_ORIG_INIT_NAME'))
+ preinitPath = oe.path.join(d.getVar("IMAGE_ROOTFS"), d.getVar("base_sbindir"), "preinit")
+ initBaseName = oe.path.join(d.getVar("base_sbindir"), "init")
+ origInitNameSuffix = ".orig"
+
+ args = {
+ 'OVERLAYFS_ETC_MOUNT_POINT': overlayEtcMountPoint,
+ 'OVERLAYFS_ETC_MOUNT_OPTIONS': d.getVar('OVERLAYFS_ETC_MOUNT_OPTIONS'),
+ 'OVERLAYFS_ETC_FSTYPE': overlayEtcFsType,
+ 'OVERLAYFS_ETC_DEVICE': overlayEtcDevice,
+ 'SBIN_INIT_NAME': initBaseName + origInitNameSuffix if useOrigInit else initBaseName
+ }
+
+ if useOrigInit:
+ # rename original /sbin/init
+ origInit = oe.path.join(d.getVar("IMAGE_ROOTFS"), initBaseName)
+ bb.debug(1, "rootfs path %s, init path %s, test %s" % (d.getVar('IMAGE_ROOTFS'), origInit, d.getVar("IMAGE_ROOTFS")))
+ bb.utils.rename(origInit, origInit + origInitNameSuffix)
+ preinitPath = origInit
+
+ with open(preinitPath, 'w') as f:
+ f.write(PreinitTemplate.format(**args))
+ os.chmod(preinitPath, 0o755)
+}
diff --git a/meta/classes/overlayfs.bbclass b/meta/classes/overlayfs.bbclass
new file mode 100644
index 0000000000..29fced2ca7
--- /dev/null
+++ b/meta/classes/overlayfs.bbclass
@@ -0,0 +1,119 @@
+# Class for generation of overlayfs mount units
+#
+# It's often desired in Embedded System design to have a read-only rootfs.
+# But a lot of different applications might want to have a read-write access to
+# some parts of a filesystem. It can be especially useful when your update mechanism
+# overwrites the whole rootfs, but you want your application data to be preserved
+# between updates. This class provides a way to achieve that by means
+# of overlayfs and at the same time keeping the base rootfs read-only.
+#
+# Usage example.
+#
+# Set a mount point for a partition overlayfs is going to use as upper layer
+# in your machine configuration. Underlying file system can be anything that
+# is supported by overlayfs. This has to be done in your machine configuration.
+# QA check fails to catch file existence if you redefine this variable in your recipe!
+#
+# OVERLAYFS_MOUNT_POINT[data] ?= "/data"
+#
+# The class assumes you have a data.mount systemd unit defined in your
+# systemd-machine-units recipe and installed to the image.
+#
+# Then you can specify writable directories on a recipe base
+#
+# OVERLAYFS_WRITABLE_PATHS[data] = "/usr/share/my-custom-application"
+#
+# To support several mount points you can use a different variable flag. Assume we
+# want to have a writable location on the file system, but not interested where the data
+# survive a reboot. Then we could have a mnt-overlay.mount unit for a tmpfs file system:
+#
+# OVERLAYFS_MOUNT_POINT[mnt-overlay] = "/mnt/overlay"
+# OVERLAYFS_WRITABLE_PATHS[mnt-overlay] = "/usr/share/another-application"
+#
+# Note: the class does not support /etc directory itself, because systemd depends on it
+# For /etc directory use overlayfs-etc class
+
+REQUIRED_DISTRO_FEATURES += "systemd overlayfs"
+
+inherit systemd features_check
+
+OVERLAYFS_CREATE_DIRS_TEMPLATE ??= "${COREBASE}/meta/files/overlayfs-create-dirs.service.in"
+OVERLAYFS_MOUNT_UNIT_TEMPLATE ??= "${COREBASE}/meta/files/overlayfs-unit.mount.in"
+OVERLAYFS_ALL_OVERLAYS_TEMPLATE ??= "${COREBASE}/meta/files/overlayfs-all-overlays.service.in"
+
+python do_create_overlayfs_units() {
+ from oe.overlayfs import mountUnitName
+
+ with open(d.getVar("OVERLAYFS_CREATE_DIRS_TEMPLATE"), "r") as f:
+ CreateDirsUnitTemplate = f.read()
+ with open(d.getVar("OVERLAYFS_MOUNT_UNIT_TEMPLATE"), "r") as f:
+ MountUnitTemplate = f.read()
+ with open(d.getVar("OVERLAYFS_ALL_OVERLAYS_TEMPLATE"), "r") as f:
+ AllOverlaysTemplate = f.read()
+
+ def prepareUnits(data, lower):
+ from oe.overlayfs import helperUnitName
+
+ args = {
+ 'DATA_MOUNT_POINT': data,
+ 'DATA_MOUNT_UNIT': mountUnitName(data),
+ 'CREATE_DIRS_SERVICE': helperUnitName(lower),
+ 'LOWERDIR': lower,
+ }
+
+ bb.debug(1, "Generate systemd unit %s" % mountUnitName(lower))
+ with open(os.path.join(d.getVar('WORKDIR'), mountUnitName(lower)), 'w') as f:
+ f.write(MountUnitTemplate.format(**args))
+
+ bb.debug(1, "Generate helper systemd unit %s" % helperUnitName(lower))
+ with open(os.path.join(d.getVar('WORKDIR'), helperUnitName(lower)), 'w') as f:
+ f.write(CreateDirsUnitTemplate.format(**args))
+
+ def prepareGlobalUnit(dependentUnits):
+ from oe.overlayfs import allOverlaysUnitName
+ args = {
+ 'ALL_OVERLAYFS_UNITS': " ".join(dependentUnits),
+ 'PN': d.getVar('PN')
+ }
+
+ bb.debug(1, "Generate systemd unit with all overlays %s" % allOverlaysUnitName(d))
+ with open(os.path.join(d.getVar('WORKDIR'), allOverlaysUnitName(d)), 'w') as f:
+ f.write(AllOverlaysTemplate.format(**args))
+
+ mountUnitList = []
+ overlayMountPoints = d.getVarFlags("OVERLAYFS_MOUNT_POINT")
+ for mountPoint in overlayMountPoints:
+ bb.debug(1, "Process variable flag %s" % mountPoint)
+ for lower in d.getVarFlag('OVERLAYFS_WRITABLE_PATHS', mountPoint).split():
+ bb.debug(1, "Prepare mount unit for %s with data mount point %s" %
+ (lower, d.getVarFlag('OVERLAYFS_MOUNT_POINT', mountPoint)))
+ prepareUnits(d.getVarFlag('OVERLAYFS_MOUNT_POINT', mountPoint), lower)
+ mountUnitList.append(mountUnitName(lower))
+
+ # set up one unit, which depends on all mount units, so users can set
+ # only one dependency in their units to make sure software starts
+ # when all overlays are mounted
+ prepareGlobalUnit(mountUnitList)
+}
+
+# we need to generate file names early during parsing stage
+python () {
+ from oe.overlayfs import strForBash, unitFileList
+
+ unitList = unitFileList(d)
+ for unit in unitList:
+ d.appendVar('SYSTEMD_SERVICE:' + d.getVar('PN'), ' ' + unit)
+ d.appendVar('FILES:' + d.getVar('PN'), ' ' +
+ d.getVar('systemd_system_unitdir') + '/' + strForBash(unit))
+
+ d.setVar('OVERLAYFS_UNIT_LIST', ' '.join([strForBash(s) for s in unitList]))
+}
+
+do_install:append() {
+ install -d ${D}${systemd_system_unitdir}
+ for unit in ${OVERLAYFS_UNIT_LIST}; do
+ install -m 0444 ${WORKDIR}/${unit} ${D}${systemd_system_unitdir}
+ done
+}
+
+addtask create_overlayfs_units before do_install
diff --git a/meta/classes/own-mirrors.bbclass b/meta/classes/own-mirrors.bbclass
index d58a61fe0a..ef972740ce 100644
--- a/meta/classes/own-mirrors.bbclass
+++ b/meta/classes/own-mirrors.bbclass
@@ -1,14 +1,14 @@
-PREMIRRORS_prepend = " \
-cvs://.*/.* ${SOURCE_MIRROR_URL} \n \
-svn://.*/.* ${SOURCE_MIRROR_URL} \n \
-git://.*/.* ${SOURCE_MIRROR_URL} \n \
-gitsm://.*/.* ${SOURCE_MIRROR_URL} \n \
-hg://.*/.* ${SOURCE_MIRROR_URL} \n \
-bzr://.*/.* ${SOURCE_MIRROR_URL} \n \
-p4://.*/.* ${SOURCE_MIRROR_URL} \n \
-osc://.*/.* ${SOURCE_MIRROR_URL} \n \
-https?$://.*/.* ${SOURCE_MIRROR_URL} \n \
-ftp://.*/.* ${SOURCE_MIRROR_URL} \n \
-npm://.*/?.* ${SOURCE_MIRROR_URL} \n \
-s3://.*/.* ${SOURCE_MIRROR_URL} \n \
+PREMIRRORS:prepend = " \
+cvs://.*/.* ${SOURCE_MIRROR_URL} \
+svn://.*/.* ${SOURCE_MIRROR_URL} \
+git://.*/.* ${SOURCE_MIRROR_URL} \
+gitsm://.*/.* ${SOURCE_MIRROR_URL} \
+hg://.*/.* ${SOURCE_MIRROR_URL} \
+bzr://.*/.* ${SOURCE_MIRROR_URL} \
+p4://.*/.* ${SOURCE_MIRROR_URL} \
+osc://.*/.* ${SOURCE_MIRROR_URL} \
+https?://.*/.* ${SOURCE_MIRROR_URL} \
+ftp://.*/.* ${SOURCE_MIRROR_URL} \
+npm://.*/?.* ${SOURCE_MIRROR_URL} \
+s3://.*/.* ${SOURCE_MIRROR_URL} \
"
diff --git a/meta/classes/package.bbclass b/meta/classes/package.bbclass
index cf30f33f3d..e71daafe94 100644
--- a/meta/classes/package.bbclass
+++ b/meta/classes/package.bbclass
@@ -41,8 +41,6 @@
inherit packagedata
inherit chrpath
inherit package_pkgdata
-
-# Need the package_qa_handle_error() in insane.bbclass
inherit insane
PKGD = "${WORKDIR}/package"
@@ -199,7 +197,7 @@ def do_split_packages(d, root, file_regex, output_pattern, description, postinst
packages = [pkg] + packages
else:
packages.append(pkg)
- oldfiles = d.getVar('FILES_' + pkg)
+ oldfiles = d.getVar('FILES:' + pkg)
newfile = os.path.join(root, o)
# These names will be passed through glob() so if the filename actually
# contains * or ? (rare, but possible) we need to handle that specially
@@ -219,19 +217,19 @@ def do_split_packages(d, root, file_regex, output_pattern, description, postinst
the_files.append(fp % m.group(1))
else:
the_files.append(aux_files_pattern_verbatim % m.group(1))
- d.setVar('FILES_' + pkg, " ".join(the_files))
+ d.setVar('FILES:' + pkg, " ".join(the_files))
else:
- d.setVar('FILES_' + pkg, oldfiles + " " + newfile)
+ d.setVar('FILES:' + pkg, oldfiles + " " + newfile)
if extra_depends != '':
- d.appendVar('RDEPENDS_' + pkg, ' ' + extra_depends)
- if not d.getVar('DESCRIPTION_' + pkg):
- d.setVar('DESCRIPTION_' + pkg, description % on)
- if not d.getVar('SUMMARY_' + pkg):
- d.setVar('SUMMARY_' + pkg, summary % on)
+ d.appendVar('RDEPENDS:' + pkg, ' ' + extra_depends)
+ if not d.getVar('DESCRIPTION:' + pkg):
+ d.setVar('DESCRIPTION:' + pkg, description % on)
+ if not d.getVar('SUMMARY:' + pkg):
+ d.setVar('SUMMARY:' + pkg, summary % on)
if postinst:
- d.setVar('pkg_postinst_' + pkg, postinst)
+ d.setVar('pkg_postinst:' + pkg, postinst)
if postrm:
- d.setVar('pkg_postrm_' + pkg, postrm)
+ d.setVar('pkg_postrm:' + pkg, postrm)
if callable(hook):
hook(f, pkg, file_regex, output_pattern, m.group(1))
@@ -303,7 +301,7 @@ def get_conffiles(pkg, d):
cwd = os.getcwd()
os.chdir(root)
- conffiles = d.getVar('CONFFILES_%s' % pkg);
+ conffiles = d.getVar('CONFFILES:%s' % pkg);
if conffiles == None:
conffiles = d.getVar('CONFFILES')
if conffiles == None:
@@ -369,7 +367,7 @@ def source_info(file, d, fatal=True):
return list(debugsources)
-def splitdebuginfo(file, dvar, debugdir, debuglibdir, debugappend, debugsrcdir, d):
+def splitdebuginfo(file, dvar, dv, d):
# Function to split a single file into two components, one is the stripped
# target system binary, the other contains any debugging information. The
# two files are linked to reference each other.
@@ -380,7 +378,7 @@ def splitdebuginfo(file, dvar, debugdir, debuglibdir, debugappend, debugsrcdir,
import subprocess
src = file[len(dvar):]
- dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend
+ dest = dv["libdir"] + os.path.dirname(src) + dv["dir"] + "/" + os.path.basename(src) + dv["append"]
debugfile = dvar + dest
sources = []
@@ -392,10 +390,6 @@ def splitdebuginfo(file, dvar, debugdir, debuglibdir, debugappend, debugsrcdir,
dvar = d.getVar('PKGD')
objcopy = d.getVar("OBJCOPY")
- # We ignore kernel modules, we don't generate debug info files.
- if file.find("/lib/modules/") != -1 and file.endswith(".ko"):
- return (file, sources)
-
newmode = None
if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
origmode = os.stat(file)[stat.ST_MODE]
@@ -403,7 +397,7 @@ def splitdebuginfo(file, dvar, debugdir, debuglibdir, debugappend, debugsrcdir,
os.chmod(file, newmode)
# We need to extract the debug src information here...
- if debugsrcdir:
+ if dv["srcdir"]:
sources = source_info(file, d)
bb.utils.mkdirhier(os.path.dirname(debugfile))
@@ -418,7 +412,7 @@ def splitdebuginfo(file, dvar, debugdir, debuglibdir, debugappend, debugsrcdir,
return (file, sources)
-def splitstaticdebuginfo(file, dvar, debugstaticdir, debugstaticlibdir, debugstaticappend, debugsrcdir, d):
+def splitstaticdebuginfo(file, dvar, dv, d):
# Unlike the function above, there is no way to split a static library
# two components. So to get similar results we will copy the unmodified
# static library (containing the debug symbols) into a new directory.
@@ -431,7 +425,7 @@ def splitstaticdebuginfo(file, dvar, debugstaticdir, debugstaticlibdir, debugsta
import shutil
src = file[len(dvar):]
- dest = debugstaticlibdir + os.path.dirname(src) + debugstaticdir + "/" + os.path.basename(src) + debugstaticappend
+ dest = dv["staticlibdir"] + os.path.dirname(src) + dv["staticdir"] + "/" + os.path.basename(src) + dv["staticappend"]
debugfile = dvar + dest
sources = []
@@ -448,7 +442,7 @@ def splitstaticdebuginfo(file, dvar, debugstaticdir, debugstaticlibdir, debugsta
os.chmod(file, newmode)
# We need to extract the debug src information here...
- if debugsrcdir:
+ if dv["srcdir"]:
sources = source_info(file, d)
bb.utils.mkdirhier(os.path.dirname(debugfile))
@@ -461,7 +455,7 @@ def splitstaticdebuginfo(file, dvar, debugstaticdir, debugstaticlibdir, debugsta
return (file, sources)
-def inject_minidebuginfo(file, dvar, debugdir, debuglibdir, debugappend, debugsrcdir, d):
+def inject_minidebuginfo(file, dvar, dv, d):
# Extract just the symbols from debuginfo into minidebuginfo,
# compress it with xz and inject it back into the binary in a .gnu_debugdata section.
# https://sourceware.org/gdb/onlinedocs/gdb/MiniDebugInfo.html
@@ -475,7 +469,7 @@ def inject_minidebuginfo(file, dvar, debugdir, debuglibdir, debugappend, debugsr
minidebuginfodir = d.expand('${WORKDIR}/minidebuginfo')
src = file[len(dvar):]
- dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend
+ dest = dv["libdir"] + os.path.dirname(src) + dv["dir"] + "/" + os.path.basename(src) + dv["append"]
debugfile = dvar + dest
minidebugfile = minidebuginfodir + src + '.minidebug'
bb.utils.mkdirhier(os.path.dirname(minidebugfile))
@@ -618,16 +612,18 @@ def get_package_mapping (pkg, basepkg, d, depversions=None):
import oe.packagedata
data = oe.packagedata.read_subpkgdata(pkg, d)
- key = "PKG_%s" % pkg
+ key = "PKG:%s" % pkg
if key in data:
+ if bb.data.inherits_class('allarch', d) and bb.data.inherits_class('packagegroup', d) and pkg != data[key]:
+ bb.error("An allarch packagegroup shouldn't depend on packages which are dynamically renamed (%s to %s)" % (pkg, data[key]))
# Have to avoid undoing the write_extra_pkgs(global_variants...)
if bb.data.inherits_class('allarch', d) and not d.getVar('MULTILIB_VARIANTS') \
and data[key] == basepkg:
return pkg
if depversions == []:
# Avoid returning a mapping if the renamed package rprovides its original name
- rprovkey = "RPROVIDES_%s" % pkg
+ rprovkey = "RPROVIDES:%s" % pkg
if rprovkey in data:
if pkg in bb.utils.explode_dep_versions2(data[rprovkey]):
bb.note("%s rprovides %s, not replacing the latter" % (data[key], pkg))
@@ -714,9 +710,7 @@ python package_get_auto_pr() {
return
try:
- conn = d.getVar("__PRSERV_CONN")
- if conn is None:
- conn = oe.prservice.prserv_make_conn(d)
+ conn = oe.prservice.prserv_make_conn(d)
if conn is not None:
if "AUTOINC" in pkgv:
srcpv = bb.fetch2.get_srcrev(d)
@@ -725,6 +719,7 @@ python package_get_auto_pr() {
d.setVar("PRSERV_PV_AUTOINC", str(value))
auto_pr = conn.getPR(version, pkgarch, checksum)
+ conn.close()
except Exception as e:
bb.fatal("Can NOT get PRAUTO, exception %s" % str(e))
if auto_pr is None:
@@ -784,13 +779,13 @@ python package_do_split_locales() {
ln = legitimize_package_name(l)
pkg = pn + '-locale-' + ln
packages.append(pkg)
- d.setVar('FILES_' + pkg, os.path.join(datadir, 'locale', l))
- d.setVar('RRECOMMENDS_' + pkg, '%svirtual-locale-%s' % (mlprefix, ln))
- d.setVar('RPROVIDES_' + pkg, '%s-locale %s%s-translation' % (pn, mlprefix, ln))
- d.setVar('SUMMARY_' + pkg, '%s - %s translations' % (summary, l))
- d.setVar('DESCRIPTION_' + pkg, '%s This package contains language translation files for the %s locale.' % (description, l))
+ d.setVar('FILES:' + pkg, os.path.join(datadir, 'locale', l))
+ d.setVar('RRECOMMENDS:' + pkg, '%svirtual-locale-%s' % (mlprefix, ln))
+ d.setVar('RPROVIDES:' + pkg, '%s-locale %s%s-translation' % (pn, mlprefix, ln))
+ d.setVar('SUMMARY:' + pkg, '%s - %s translations' % (summary, l))
+ d.setVar('DESCRIPTION:' + pkg, '%s This package contains language translation files for the %s locale.' % (description, l))
if locale_section:
- d.setVar('SECTION_' + pkg, locale_section)
+ d.setVar('SECTION:' + pkg, locale_section)
d.setVar('PACKAGES', ' '.join(packages))
@@ -800,9 +795,9 @@ python package_do_split_locales() {
# glibc-localedata-translit* won't install as a dependency
# for some other package which breaks meta-toolchain
# Probably breaks since virtual-locale- isn't provided anywhere
- #rdep = (d.getVar('RDEPENDS_%s' % pn) or "").split()
+ #rdep = (d.getVar('RDEPENDS:%s' % pn) or "").split()
#rdep.append('%s-locale*' % pn)
- #d.setVar('RDEPENDS_%s' % pn, ' '.join(rdep))
+ #d.setVar('RDEPENDS:%s' % pn, ' '.join(rdep))
}
python perform_packagecopy () {
@@ -866,7 +861,7 @@ python fixup_perms () {
self._setdir(lsplit[0], lsplit[1], lsplit[2], lsplit[3], lsplit[4], lsplit[5], lsplit[6], lsplit[7])
else:
msg = "Fixup Perms: invalid config line %s" % line
- package_qa_handle_error("perm-config", msg, d)
+ oe.qa.handle_error("perm-config", msg, d)
self.path = None
self.link = None
@@ -1006,7 +1001,7 @@ python fixup_perms () {
continue
if len(lsplit) != 8 and not (len(lsplit) == 3 and lsplit[1].lower() == "link"):
msg = "Fixup perms: %s invalid line: %s" % (conf, line)
- package_qa_handle_error("perm-line", msg, d)
+ oe.qa.handle_error("perm-line", msg, d)
continue
entry = fs_perms_entry(d.expand(line))
if entry and entry.path:
@@ -1043,7 +1038,7 @@ python fixup_perms () {
ptarget = os.path.join(os.path.dirname(dir), link)
if os.path.exists(target):
msg = "Fixup Perms: Unable to correct directory link, target already exists: %s -> %s" % (dir, ptarget)
- package_qa_handle_error("perm-link", msg, d)
+ oe.qa.handle_error("perm-link", msg, d)
continue
# Create path to move directory to, move it, and then setup the symlink
@@ -1070,6 +1065,54 @@ python fixup_perms () {
fix_perms(each_file, fs_perms_table[dir].fmode, fs_perms_table[dir].fuid, fs_perms_table[dir].fgid, dir)
}
+def package_debug_vars(d):
+ # We default to '.debug' style
+ if d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-file-directory':
+ # Single debug-file-directory style debug info
+ debug_vars = {
+ "append": ".debug",
+ "staticappend": "",
+ "dir": "",
+ "staticdir": "",
+ "libdir": "/usr/lib/debug",
+ "staticlibdir": "/usr/lib/debug-static",
+ "srcdir": "/usr/src/debug",
+ }
+ elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-without-src':
+ # Original OE-core, a.k.a. ".debug", style debug info, but without sources in /usr/src/debug
+ debug_vars = {
+ "append": "",
+ "staticappend": "",
+ "dir": "/.debug",
+ "staticdir": "/.debug-static",
+ "libdir": "",
+ "staticlibdir": "",
+ "srcdir": "",
+ }
+ elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg':
+ debug_vars = {
+ "append": "",
+ "staticappend": "",
+ "dir": "/.debug",
+ "staticdir": "/.debug-static",
+ "libdir": "",
+ "staticlibdir": "",
+ "srcdir": "/usr/src/debug",
+ }
+ else:
+ # Original OE-core, a.k.a. ".debug", style debug info
+ debug_vars = {
+ "append": "",
+ "staticappend": "",
+ "dir": "/.debug",
+ "staticdir": "/.debug-static",
+ "libdir": "",
+ "staticlibdir": "",
+ "srcdir": "/usr/src/debug",
+ }
+
+ return debug_vars
+
python split_and_strip_files () {
import stat, errno
import subprocess
@@ -1081,49 +1124,13 @@ python split_and_strip_files () {
oldcwd = os.getcwd()
os.chdir(dvar)
- # We default to '.debug' style
- if d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-file-directory':
- # Single debug-file-directory style debug info
- debugappend = ".debug"
- debugstaticappend = ""
- debugdir = ""
- debugstaticdir = ""
- debuglibdir = "/usr/lib/debug"
- debugstaticlibdir = "/usr/lib/debug-static"
- debugsrcdir = "/usr/src/debug"
- elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-without-src':
- # Original OE-core, a.k.a. ".debug", style debug info, but without sources in /usr/src/debug
- debugappend = ""
- debugstaticappend = ""
- debugdir = "/.debug"
- debugstaticdir = "/.debug-static"
- debuglibdir = ""
- debugstaticlibdir = ""
- debugsrcdir = ""
- elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg':
- debugappend = ""
- debugstaticappend = ""
- debugdir = "/.debug"
- debugstaticdir = "/.debug-static"
- debuglibdir = ""
- debugstaticlibdir = ""
- debugsrcdir = "/usr/src/debug"
- else:
- # Original OE-core, a.k.a. ".debug", style debug info
- debugappend = ""
- debugstaticappend = ""
- debugdir = "/.debug"
- debugstaticdir = "/.debug-static"
- debuglibdir = ""
- debugstaticlibdir = ""
- debugsrcdir = "/usr/src/debug"
+ dv = package_debug_vars(d)
#
# First lets figure out all of the files we may have to process ... do this only once!
#
elffiles = {}
symlinks = {}
- kernmods = []
staticlibs = []
inodes = {}
libdir = os.path.abspath(dvar + os.sep + d.getVar("libdir"))
@@ -1138,17 +1145,14 @@ python split_and_strip_files () {
file = os.path.join(root, f)
# Skip debug files
- if debugappend and file.endswith(debugappend):
+ if dv["append"] and file.endswith(dv["append"]):
continue
- if debugdir and debugdir in os.path.dirname(file[len(dvar):]):
+ if dv["dir"] and dv["dir"] in os.path.dirname(file[len(dvar):]):
continue
if file in skipfiles:
continue
- if file.endswith(".ko") and file.find("/lib/modules/") != -1:
- kernmods.append(file)
- continue
if oe.package.is_static_lib(file):
staticlibs.append(file)
continue
@@ -1165,8 +1169,11 @@ python split_and_strip_files () {
if not s:
continue
# Check its an executable
- if (s[stat.ST_MODE] & stat.S_IXUSR) or (s[stat.ST_MODE] & stat.S_IXGRP) or (s[stat.ST_MODE] & stat.S_IXOTH) \
- or ((file.startswith(libdir) or file.startswith(baselibdir)) and (".so" in f or ".node" in f)):
+ if (s[stat.ST_MODE] & stat.S_IXUSR) or (s[stat.ST_MODE] & stat.S_IXGRP) \
+ or (s[stat.ST_MODE] & stat.S_IXOTH) \
+ or ((file.startswith(libdir) or file.startswith(baselibdir)) \
+ and (".so" in f or ".node" in f)) \
+ or (f.startswith('vmlinux') or ".ko" in f):
if cpath.islink(file):
checkelflinks[file] = ltarget
@@ -1199,11 +1206,11 @@ python split_and_strip_files () {
# ...but is it ELF, and is it already stripped?
if elf_file & 1:
if elf_file & 2:
- if 'already-stripped' in (d.getVar('INSANE_SKIP_' + pn) or "").split():
+ if 'already-stripped' in (d.getVar('INSANE_SKIP:' + pn) or "").split():
bb.note("Skipping file %s from %s for already-stripped QA test" % (file[len(dvar):], pn))
else:
msg = "File '%s' from %s was already stripped, this will prevent future debugging!" % (file[len(dvar):], pn)
- package_qa_handle_error("already-stripped", msg, d)
+ oe.qa.handle_error("already-stripped", msg, d)
continue
# At this point we have an unstripped elf file. We need to:
@@ -1225,19 +1232,29 @@ python split_and_strip_files () {
# Modified the file so clear the cache
cpath.updatecache(file)
+ def strip_pkgd_prefix(f):
+ nonlocal dvar
+
+ if f.startswith(dvar):
+ return f[len(dvar):]
+
+ return f
+
#
# First lets process debug splitting
#
if (d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT') != '1'):
- results = oe.utils.multiprocess_launch(splitdebuginfo, list(elffiles), d, extraargs=(dvar, debugdir, debuglibdir, debugappend, debugsrcdir, d))
+ results = oe.utils.multiprocess_launch(splitdebuginfo, list(elffiles), d, extraargs=(dvar, dv, d))
- if debugsrcdir and not hostos.startswith("mingw"):
+ if dv["srcdir"] and not hostos.startswith("mingw"):
if (d.getVar('PACKAGE_DEBUG_STATIC_SPLIT') == '1'):
- results = oe.utils.multiprocess_launch(splitstaticdebuginfo, staticlibs, d, extraargs=(dvar, debugstaticdir, debugstaticlibdir, debugstaticappend, debugsrcdir, d))
+ results = oe.utils.multiprocess_launch(splitstaticdebuginfo, staticlibs, d, extraargs=(dvar, dv, d))
else:
for file in staticlibs:
results.append( (file,source_info(file, d)) )
+ d.setVar("PKGDEBUGSOURCES", {strip_pkgd_prefix(f): sorted(s) for f, s in results})
+
sources = set()
for r in results:
sources.update(r[1])
@@ -1250,9 +1267,9 @@ python split_and_strip_files () {
target = inodes[ref][0][len(dvar):]
for file in inodes[ref][1:]:
src = file[len(dvar):]
- dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(target) + debugappend
+ dest = dv["libdir"] + os.path.dirname(src) + dv["dir"] + "/" + os.path.basename(target) + dv["append"]
fpath = dvar + dest
- ftarget = dvar + debuglibdir + os.path.dirname(target) + debugdir + "/" + os.path.basename(target) + debugappend
+ ftarget = dvar + dv["libdir"] + os.path.dirname(target) + dv["dir"] + "/" + os.path.basename(target) + dv["append"]
bb.utils.mkdirhier(os.path.dirname(fpath))
# Only one hardlink of separated debug info file in each directory
if not os.access(fpath, os.R_OK):
@@ -1262,7 +1279,7 @@ python split_and_strip_files () {
# Create symlinks for all cases we were able to split symbols
for file in symlinks:
src = file[len(dvar):]
- dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend
+ dest = dv["libdir"] + os.path.dirname(src) + dv["dir"] + "/" + os.path.basename(src) + dv["append"]
fpath = dvar + dest
# Skip it if the target doesn't exist
try:
@@ -1278,17 +1295,17 @@ python split_and_strip_files () {
lbase = os.path.basename(ltarget)
ftarget = ""
if lpath and lpath != ".":
- ftarget += lpath + debugdir + "/"
- ftarget += lbase + debugappend
+ ftarget += lpath + dv["dir"] + "/"
+ ftarget += lbase + dv["append"]
if lpath.startswith(".."):
ftarget = os.path.join("..", ftarget)
bb.utils.mkdirhier(os.path.dirname(fpath))
#bb.note("Symlink %s -> %s" % (fpath, ftarget))
os.symlink(ftarget, fpath)
- # Process the debugsrcdir if requested...
+ # Process the dv["srcdir"] if requested...
# This copies and places the referenced sources for later debugging...
- copydebugsources(debugsrcdir, sources, d)
+ copydebugsources(dv["srcdir"], sources, d)
#
# End of debug splitting
#
@@ -1303,8 +1320,6 @@ python split_and_strip_files () {
elf_file = int(elffiles[file])
#bb.note("Strip %s" % file)
sfiles.append((file, elf_file, strip))
- for f in kernmods:
- sfiles.append((f, 16, strip))
if (d.getVar('PACKAGE_STRIP_STATIC') == '1' or d.getVar('PACKAGE_DEBUG_STATIC_SPLIT') == '1'):
for f in staticlibs:
sfiles.append((f, 16, strip))
@@ -1314,7 +1329,7 @@ python split_and_strip_files () {
# Build "minidebuginfo" and reinject it back into the stripped binaries
if d.getVar('PACKAGE_MINIDEBUGINFO') == '1':
oe.utils.multiprocess_launch(inject_minidebuginfo, list(elffiles), d,
- extraargs=(dvar, debugdir, debuglibdir, debugappend, debugsrcdir, d))
+ extraargs=(dvar, dv, d))
#
# End of strip
@@ -1344,7 +1359,7 @@ python populate_packages () {
src_package_name = ('%s-src' % d.getVar('PN'))
if not src_package_name in packages:
packages.append(src_package_name)
- d.setVar('FILES_%s' % src_package_name, '/usr/src/debug')
+ d.setVar('FILES:%s' % src_package_name, '/usr/src/debug')
# Sanity check PACKAGES for duplicates
# Sanity should be moved to sanity.bbclass once we have the infrastructure
@@ -1353,7 +1368,7 @@ python populate_packages () {
for i, pkg in enumerate(packages):
if pkg in package_dict:
msg = "%s is listed in PACKAGES multiple times, this leads to packaging errors." % pkg
- package_qa_handle_error("packages-list", msg, d)
+ oe.qa.handle_error("packages-list", msg, d)
# Ensure the source package gets the chance to pick up the source files
# before the debug package by ordering it first in PACKAGES. Whether it
# actually picks up any source files is controlled by
@@ -1387,10 +1402,10 @@ python populate_packages () {
root = os.path.join(pkgdest, pkg)
bb.utils.mkdirhier(root)
- filesvar = d.getVar('FILES_%s' % pkg) or ""
+ filesvar = d.getVar('FILES:%s' % pkg) or ""
if "//" in filesvar:
msg = "FILES variable for package %s contains '//' which is invalid. Attempting to fix this but you should correct the metadata.\n" % pkg
- package_qa_handle_error("files-invalid", msg, d)
+ oe.qa.handle_error("files-invalid", msg, d)
filesvar.replace("//", "/")
origfiles = filesvar.split()
@@ -1453,13 +1468,13 @@ python populate_packages () {
os.umask(oldumask)
os.chdir(workdir)
- # Handle LICENSE_EXCLUSION
+ # Handle excluding packages with incompatible licenses
package_list = []
for pkg in packages:
- licenses = d.getVar('LICENSE_EXCLUSION-' + pkg)
+ licenses = d.getVar('_exclude_incompatible-' + pkg)
if licenses:
msg = "Excluding %s from packaging as it has incompatible license(s): %s" % (pkg, licenses)
- package_qa_handle_error("incompatible-license", msg, d)
+ oe.qa.handle_error("incompatible-license", msg, d)
else:
package_list.append(pkg)
d.setVar('PACKAGES', ' '.join(package_list))
@@ -1476,14 +1491,14 @@ python populate_packages () {
if unshipped != []:
msg = pn + ": Files/directories were installed but not shipped in any package:"
- if "installed-vs-shipped" in (d.getVar('INSANE_SKIP_' + pn) or "").split():
+ if "installed-vs-shipped" in (d.getVar('INSANE_SKIP:' + pn) or "").split():
bb.note("Package %s skipping QA tests: installed-vs-shipped" % pn)
else:
for f in unshipped:
msg = msg + "\n " + f
msg = msg + "\nPlease set FILES such that these items are packaged. Alternatively if they are unneeded, avoid installing them or delete them within do_install.\n"
msg = msg + "%s: %d installed and not shipped files." % (pn, len(unshipped))
- package_qa_handle_error("installed-vs-shipped", msg, d)
+ oe.qa.handle_error("installed-vs-shipped", msg, d)
}
populate_packages[dirs] = "${D}"
@@ -1524,11 +1539,11 @@ python package_fixsymlinks () {
bb.note("%s contains dangling symlink to %s" % (pkg, l))
for pkg in newrdepends:
- rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg) or "")
+ rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS:' + pkg) or "")
for p in newrdepends[pkg]:
if p not in rdepends:
rdepends[p] = []
- d.setVar('RDEPENDS_' + pkg, bb.utils.join_deps(rdepends, commasep=False))
+ d.setVar('RDEPENDS:' + pkg, bb.utils.join_deps(rdepends, commasep=False))
}
@@ -1550,9 +1565,10 @@ PKGDATA_VARS = "PN PE PV PR PKGE PKGV PKGR LICENSE DESCRIPTION SUMMARY RDEPENDS
python emit_pkgdata() {
from glob import glob
import json
+ import bb.compress.zstd
def process_postinst_on_target(pkg, mlprefix):
- pkgval = d.getVar('PKG_%s' % pkg)
+ pkgval = d.getVar('PKG:%s' % pkg)
if pkgval is None:
pkgval = pkg
@@ -1563,8 +1579,8 @@ if [ -n "$D" ]; then
fi
""" % (pkgval, mlprefix)
- postinst = d.getVar('pkg_postinst_%s' % pkg)
- postinst_ontarget = d.getVar('pkg_postinst_ontarget_%s' % pkg)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
+ postinst_ontarget = d.getVar('pkg_postinst_ontarget:%s' % pkg)
if postinst_ontarget:
bb.debug(1, 'adding deferred pkg_postinst_ontarget() to pkg_postinst() for %s' % pkg)
@@ -1572,18 +1588,18 @@ fi
postinst = '#!/bin/sh\n'
postinst += defer_fragment
postinst += postinst_ontarget
- d.setVar('pkg_postinst_%s' % pkg, postinst)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
def add_set_e_to_scriptlets(pkg):
for scriptlet_name in ('pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm'):
- scriptlet = d.getVar('%s_%s' % (scriptlet_name, pkg))
+ scriptlet = d.getVar('%s:%s' % (scriptlet_name, pkg))
if scriptlet:
scriptlet_split = scriptlet.split('\n')
if scriptlet_split[0].startswith("#!"):
scriptlet = scriptlet_split[0] + "\nset -e\n" + "\n".join(scriptlet_split[1:])
else:
scriptlet = "set -e\n" + "\n".join(scriptlet_split[0:])
- d.setVar('%s_%s' % (scriptlet_name, pkg), scriptlet)
+ d.setVar('%s:%s' % (scriptlet_name, pkg), scriptlet)
def write_if_exists(f, pkg, var):
def encode(str):
@@ -1591,9 +1607,9 @@ fi
c = codecs.getencoder("unicode_escape")
return c(str)[0].decode("latin1")
- val = d.getVar('%s_%s' % (var, pkg))
+ val = d.getVar('%s:%s' % (var, pkg))
if val:
- f.write('%s_%s: %s\n' % (var, pkg, encode(val)))
+ f.write('%s:%s: %s\n' % (var, pkg, encode(val)))
return val
val = d.getVar('%s' % (var))
if val:
@@ -1612,7 +1628,7 @@ fi
ml_pkg = "%s-%s" % (variant, pkg)
subdata_file = "%s/runtime/%s" % (pkgdatadir, ml_pkg)
with open(subdata_file, 'w') as fd:
- fd.write("PKG_%s: %s" % (ml_pkg, pkg))
+ fd.write("PKG:%s: %s" % (ml_pkg, pkg))
packages = d.getVar('PACKAGES')
pkgdest = d.getVar('PKGDEST')
@@ -1622,6 +1638,8 @@ fi
with open(data_file, 'w') as fd:
fd.write("PACKAGES: %s\n" % packages)
+ pkgdebugsource = d.getVar("PKGDEBUGSOURCES") or []
+
pn = d.getVar('PN')
global_variants = (d.getVar('MULTILIB_GLOBAL_VARIANTS') or "").split()
variants = (d.getVar('MULTILIB_VARIANTS') or "").split()
@@ -1636,23 +1654,38 @@ fi
workdir = d.getVar('WORKDIR')
for pkg in packages.split():
- pkgval = d.getVar('PKG_%s' % pkg)
+ pkgval = d.getVar('PKG:%s' % pkg)
if pkgval is None:
pkgval = pkg
- d.setVar('PKG_%s' % pkg, pkg)
+ d.setVar('PKG:%s' % pkg, pkg)
+
+ extended_data = {
+ "files_info": {}
+ }
pkgdestpkg = os.path.join(pkgdest, pkg)
files = {}
+ files_extra = {}
total_size = 0
seen = set()
for f in pkgfiles[pkg]:
- relpth = os.path.relpath(f, pkgdestpkg)
+ fpath = os.sep + os.path.relpath(f, pkgdestpkg)
+
fstat = os.lstat(f)
- files[os.sep + relpth] = fstat.st_size
+ files[fpath] = fstat.st_size
+
+ extended_data["files_info"].setdefault(fpath, {})
+ extended_data["files_info"][fpath]['size'] = fstat.st_size
+
if fstat.st_ino not in seen:
seen.add(fstat.st_ino)
total_size += fstat.st_size
- d.setVar('FILES_INFO', json.dumps(files, sort_keys=True))
+
+ if fpath in pkgdebugsource:
+ extended_data["files_info"][fpath]['debugsrc'] = pkgdebugsource[fpath]
+ del pkgdebugsource[fpath]
+
+ d.setVar('FILES_INFO:' + pkg , json.dumps(files, sort_keys=True))
process_postinst_on_target(pkg, d.getVar("MLPREFIX"))
add_set_e_to_scriptlets(pkg)
@@ -1663,24 +1696,29 @@ fi
val = write_if_exists(sf, pkg, var)
write_if_exists(sf, pkg, 'FILERPROVIDESFLIST')
- for dfile in (d.getVar('FILERPROVIDESFLIST_' + pkg) or "").split():
- write_if_exists(sf, pkg, 'FILERPROVIDES_' + dfile)
+ for dfile in sorted((d.getVar('FILERPROVIDESFLIST:' + pkg) or "").split()):
+ write_if_exists(sf, pkg, 'FILERPROVIDES:' + dfile)
write_if_exists(sf, pkg, 'FILERDEPENDSFLIST')
- for dfile in (d.getVar('FILERDEPENDSFLIST_' + pkg) or "").split():
- write_if_exists(sf, pkg, 'FILERDEPENDS_' + dfile)
+ for dfile in sorted((d.getVar('FILERDEPENDSFLIST:' + pkg) or "").split()):
+ write_if_exists(sf, pkg, 'FILERDEPENDS:' + dfile)
- sf.write('%s_%s: %d\n' % ('PKGSIZE', pkg, total_size))
+ sf.write('%s:%s: %d\n' % ('PKGSIZE', pkg, total_size))
+
+ subdata_extended_file = pkgdatadir + "/extended/%s.json.zstd" % pkg
+ num_threads = int(d.getVar("BB_NUMBER_THREADS"))
+ with bb.compress.zstd.open(subdata_extended_file, "wt", encoding="utf-8", num_threads=num_threads) as f:
+ json.dump(extended_data, f, sort_keys=True, separators=(",", ":"))
# Symlinks needed for rprovides lookup
- rprov = d.getVar('RPROVIDES_%s' % pkg) or d.getVar('RPROVIDES')
+ rprov = d.getVar('RPROVIDES:%s' % pkg) or d.getVar('RPROVIDES')
if rprov:
for p in bb.utils.explode_deps(rprov):
subdata_sym = pkgdatadir + "/runtime-rprovides/%s/%s" % (p, pkg)
bb.utils.mkdirhier(os.path.dirname(subdata_sym))
oe.path.symlink("../../runtime/%s" % pkg, subdata_sym, True)
- allow_empty = d.getVar('ALLOW_EMPTY_%s' % pkg)
+ allow_empty = d.getVar('ALLOW_EMPTY:%s' % pkg)
if not allow_empty:
allow_empty = d.getVar('ALLOW_EMPTY')
root = "%s/%s" % (pkgdest, pkg)
@@ -1702,7 +1740,8 @@ fi
write_extra_runtime_pkgs(global_variants, packages, pkgdatadir)
}
-emit_pkgdata[dirs] = "${PKGDESTWORK}/runtime ${PKGDESTWORK}/runtime-reverse ${PKGDESTWORK}/runtime-rprovides"
+emit_pkgdata[dirs] = "${PKGDESTWORK}/runtime ${PKGDESTWORK}/runtime-reverse ${PKGDESTWORK}/runtime-rprovides ${PKGDESTWORK}/extended"
+emit_pkgdata[vardepsexclude] = "BB_NUMBER_THREADS"
ldconfig_postinst_fragment() {
if [ x"$D" = "x" ]; then
@@ -1714,11 +1753,11 @@ RPMDEPS = "${STAGING_LIBDIR_NATIVE}/rpm/rpmdeps --alldeps --define '__font_provi
# Collect perfile run-time dependency metadata
# Output:
-# FILERPROVIDESFLIST_pkg - list of all files w/ deps
-# FILERPROVIDES_filepath_pkg - per file dep
+# FILERPROVIDESFLIST:pkg - list of all files w/ deps
+# FILERPROVIDES:filepath:pkg - per file dep
#
-# FILERDEPENDSFLIST_pkg - list of all files w/ deps
-# FILERDEPENDS_filepath_pkg - per file dep
+# FILERDEPENDSFLIST:pkg - list of all files w/ deps
+# FILERDEPENDS:filepath:pkg - per file dep
python package_do_filedeps() {
if d.getVar('SKIP_FILEDEPS') == '1':
@@ -1733,7 +1772,7 @@ python package_do_filedeps() {
pkglist = []
for pkg in packages.split():
- if d.getVar('SKIP_FILEDEPS_' + pkg) == '1':
+ if d.getVar('SKIP_FILEDEPS:' + pkg) == '1':
continue
if pkg.endswith('-dbg') or pkg.endswith('-doc') or pkg.find('-locale-') != -1 or pkg.find('-localedata-') != -1 or pkg.find('-gconv-') != -1 or pkg.find('-charmap-') != -1 or pkg.startswith('kernel-module-') or pkg.endswith('-src'):
continue
@@ -1755,18 +1794,18 @@ python package_do_filedeps() {
for file in sorted(provides):
provides_files[pkg].append(file)
- key = "FILERPROVIDES_" + file + "_" + pkg
+ key = "FILERPROVIDES:" + file + ":" + pkg
d.appendVar(key, " " + " ".join(provides[file]))
for file in sorted(requires):
requires_files[pkg].append(file)
- key = "FILERDEPENDS_" + file + "_" + pkg
+ key = "FILERDEPENDS:" + file + ":" + pkg
d.appendVar(key, " " + " ".join(requires[file]))
for pkg in requires_files:
- d.setVar("FILERDEPENDSFLIST_" + pkg, " ".join(requires_files[pkg]))
+ d.setVar("FILERDEPENDSFLIST:" + pkg, " ".join(sorted(requires_files[pkg])))
for pkg in provides_files:
- d.setVar("FILERPROVIDESFLIST_" + pkg, " ".join(provides_files[pkg]))
+ d.setVar("FILERPROVIDESFLIST:" + pkg, " ".join(sorted(provides_files[pkg])))
}
SHLIBSDIRS = "${WORKDIR_PKGDATA}/${MLPREFIX}shlibs2"
@@ -1805,7 +1844,7 @@ python package_do_shlibs() {
ver = d.getVar('PKGV')
if not ver:
msg = "PKGV not defined"
- package_qa_handle_error("pkgv-undefined", msg, d)
+ oe.qa.handle_error("pkgv-undefined", msg, d)
return
pkgdest = d.getVar('PKGDEST')
@@ -1845,7 +1884,7 @@ python package_do_shlibs() {
sonames.add(prov)
if libdir_re.match(os.path.dirname(file)):
needs_ldconfig = True
- if snap_symlinks and (os.path.basename(file) != this_soname):
+ if needs_ldconfig and snap_symlinks and (os.path.basename(file) != this_soname):
renames.append((file, os.path.join(os.path.dirname(file), this_soname)))
return (needs_ldconfig, needed, sonames, renames)
@@ -1930,12 +1969,12 @@ python package_do_shlibs() {
shlib_provider = oe.package.read_shlib_providers(d)
for pkg in shlib_pkgs:
- private_libs = d.getVar('PRIVATE_LIBS_' + pkg) or d.getVar('PRIVATE_LIBS') or ""
+ private_libs = d.getVar('PRIVATE_LIBS:' + pkg) or d.getVar('PRIVATE_LIBS') or ""
private_libs = private_libs.split()
needs_ldconfig = False
bb.debug(2, "calculating shlib provides for %s" % pkg)
- pkgver = d.getVar('PKGV_' + pkg)
+ pkgver = d.getVar('PKGV:' + pkg)
if not pkgver:
pkgver = d.getVar('PV_' + pkg)
if not pkgver:
@@ -1985,11 +2024,11 @@ python package_do_shlibs() {
shlib_provider[s[0]][s[1]] = (pkg, pkgver)
if needs_ldconfig:
bb.debug(1, 'adding ldconfig call to postinst for %s' % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
postinst += d.getVar('ldconfig_postinst_fragment')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
bb.debug(1, 'LIBNAMES: pkg %s sonames %s' % (pkg, sonames))
assumed_libs = d.getVar('ASSUME_SHLIBS')
@@ -2011,7 +2050,7 @@ python package_do_shlibs() {
for pkg in shlib_pkgs:
bb.debug(2, "calculating shlib requirements for %s" % pkg)
- private_libs = d.getVar('PRIVATE_LIBS_' + pkg) or d.getVar('PRIVATE_LIBS') or ""
+ private_libs = d.getVar('PRIVATE_LIBS:' + pkg) or d.getVar('PRIVATE_LIBS') or ""
private_libs = private_libs.split()
deps = list()
@@ -2079,12 +2118,12 @@ python package_do_pkgconfig () {
for pkg in packages.split():
pkgconfig_provided[pkg] = []
pkgconfig_needed[pkg] = []
- for file in pkgfiles[pkg]:
+ for file in sorted(pkgfiles[pkg]):
m = pc_re.match(file)
if m:
pd = bb.data.init()
name = m.group(1)
- pkgconfig_provided[pkg].append(name)
+ pkgconfig_provided[pkg].append(os.path.basename(name))
if not os.access(file, os.R_OK):
continue
with open(file, 'r') as f:
@@ -2107,7 +2146,7 @@ python package_do_pkgconfig () {
pkgs_file = os.path.join(shlibswork_dir, pkg + ".pclist")
if pkgconfig_provided[pkg] != []:
with open(pkgs_file, 'w') as f:
- for p in pkgconfig_provided[pkg]:
+ for p in sorted(pkgconfig_provided[pkg]):
f.write('%s\n' % p)
# Go from least to most specific since the last one found wins
@@ -2165,7 +2204,7 @@ python read_shlibdeps () {
packages = d.getVar('PACKAGES').split()
for pkg in packages:
- rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg) or "")
+ rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS:' + pkg) or "")
for dep in sorted(pkglibdeps[pkg]):
# Add the dep if it's not already there, or if no comparison is set
if dep not in rdepends:
@@ -2173,7 +2212,7 @@ python read_shlibdeps () {
for v in pkglibdeps[pkg][dep]:
if v not in rdepends[dep]:
rdepends[dep].append(v)
- d.setVar('RDEPENDS_' + pkg, bb.utils.join_deps(rdepends, commasep=False))
+ d.setVar('RDEPENDS:' + pkg, bb.utils.join_deps(rdepends, commasep=False))
}
python package_depchains() {
@@ -2197,7 +2236,7 @@ python package_depchains() {
def pkg_adddeprrecs(pkg, base, suffix, getname, depends, d):
#bb.note('depends for %s is %s' % (base, depends))
- rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS_' + pkg) or "")
+ rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS:' + pkg) or "")
for depend in sorted(depends):
if depend.find('-native') != -1 or depend.find('-cross') != -1 or depend.startswith('virtual/'):
@@ -2212,13 +2251,13 @@ python package_depchains() {
if pkgname not in rreclist and pkgname != pkg:
rreclist[pkgname] = []
- #bb.note('setting: RRECOMMENDS_%s=%s' % (pkg, ' '.join(rreclist)))
- d.setVar('RRECOMMENDS_%s' % pkg, bb.utils.join_deps(rreclist, commasep=False))
+ #bb.note('setting: RRECOMMENDS:%s=%s' % (pkg, ' '.join(rreclist)))
+ d.setVar('RRECOMMENDS:%s' % pkg, bb.utils.join_deps(rreclist, commasep=False))
def pkg_addrrecs(pkg, base, suffix, getname, rdepends, d):
#bb.note('rdepends for %s is %s' % (base, rdepends))
- rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS_' + pkg) or "")
+ rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS:' + pkg) or "")
for depend in sorted(rdepends):
if depend.find('virtual-locale-') != -1:
@@ -2233,8 +2272,8 @@ python package_depchains() {
if pkgname not in rreclist and pkgname != pkg:
rreclist[pkgname] = []
- #bb.note('setting: RRECOMMENDS_%s=%s' % (pkg, ' '.join(rreclist)))
- d.setVar('RRECOMMENDS_%s' % pkg, bb.utils.join_deps(rreclist, commasep=False))
+ #bb.note('setting: RRECOMMENDS:%s=%s' % (pkg, ' '.join(rreclist)))
+ d.setVar('RRECOMMENDS:%s' % pkg, bb.utils.join_deps(rreclist, commasep=False))
def add_dep(list, dep):
if dep not in list:
@@ -2246,7 +2285,7 @@ python package_depchains() {
rdepends = []
for pkg in packages.split():
- for dep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + pkg) or ""):
+ for dep in bb.utils.explode_deps(d.getVar('RDEPENDS:' + pkg) or ""):
add_dep(rdepends, dep)
#bb.note('rdepends is %s' % rdepends)
@@ -2280,7 +2319,7 @@ python package_depchains() {
for suffix in pkgs:
for pkg in pkgs[suffix]:
- if d.getVarFlag('RRECOMMENDS_' + pkg, 'nodeprrecs'):
+ if d.getVarFlag('RRECOMMENDS:' + pkg, 'nodeprrecs'):
continue
(base, func) = pkgs[suffix][pkg]
if suffix == "-dev":
@@ -2293,7 +2332,7 @@ python package_depchains() {
pkg_addrrecs(pkg, base, suffix, func, rdepends, d)
else:
rdeps = []
- for dep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + base) or ""):
+ for dep in bb.utils.explode_deps(d.getVar('RDEPENDS:' + base) or ""):
add_dep(rdeps, dep)
pkg_addrrecs(pkg, base, suffix, func, rdeps, d)
}
@@ -2310,11 +2349,11 @@ def gen_packagevar(d, pkgvars="PACKAGEVARS"):
ret.append(v)
for p in pkgs:
for v in vars:
- ret.append(v + "_" + p)
+ ret.append(v + ":" + p)
# Ensure that changes to INCOMPATIBLE_LICENSE re-run do_package for
# affected recipes.
- ret.append('LICENSE_EXCLUSION-%s' % p)
+ ret.append('_exclude_incompatible-%s' % p)
return " ".join(ret)
PACKAGE_PREPROCESS_FUNCS ?= ""
@@ -2369,7 +2408,7 @@ python do_package () {
if not workdir or not outdir or not dest or not dvar or not pn:
msg = "WORKDIR, DEPLOY_DIR, D, PN and PKGD all must be defined, unable to package"
- package_qa_handle_error("var-undefined", msg, d)
+ oe.qa.handle_error("var-undefined", msg, d)
return
bb.build.exec_func("package_convert_pr_autoinc", d)
@@ -2422,12 +2461,10 @@ python do_package () {
for f in (d.getVar('PACKAGEFUNCS') or '').split():
bb.build.exec_func(f, d)
- qa_sane = d.getVar("QA_SANE")
- if not qa_sane:
- bb.fatal("Fatal QA errors found, failing task.")
+ oe.qa.exit_if_errors(d)
}
-do_package[dirs] = "${SHLIBSWORKDIR} ${PKGDESTWORK} ${D}"
+do_package[dirs] = "${SHLIBSWORKDIR} ${D}"
do_package[vardeps] += "${PACKAGEBUILDPKGD} ${PACKAGESPLITFUNCS} ${PACKAGEFUNCS} ${@gen_packagevar(d)}"
addtask package after do_install
diff --git a/meta/classes/package_deb.bbclass b/meta/classes/package_deb.bbclass
index b3d8ce330e..2e75e222bc 100644
--- a/meta/classes/package_deb.bbclass
+++ b/meta/classes/package_deb.bbclass
@@ -81,7 +81,7 @@ def deb_write_pkg(pkg, d):
localdata.setVar('ROOT', '')
localdata.setVar('ROOT_%s' % pkg, root)
- pkgname = localdata.getVar('PKG_%s' % pkg)
+ pkgname = localdata.getVar('PKG:%s' % pkg)
if not pkgname:
pkgname = pkg
localdata.setVar('PKG', pkgname)
@@ -315,10 +315,8 @@ python do_package_write_deb () {
do_package_write_deb[dirs] = "${PKGWRITEDIRDEB}"
do_package_write_deb[cleandirs] = "${PKGWRITEDIRDEB}"
do_package_write_deb[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
-addtask package_write_deb after do_packagedata do_package
-
+addtask package_write_deb after do_packagedata do_package do_deploy_source_date_epoch before do_build
+do_build[rdeptask] += "do_package_write_deb"
PACKAGEINDEXDEPS += "dpkg-native:do_populate_sysroot"
PACKAGEINDEXDEPS += "apt-native:do_populate_sysroot"
-
-do_build[recrdeptask] += "do_package_write_deb"
diff --git a/meta/classes/package_ipk.bbclass b/meta/classes/package_ipk.bbclass
index 600b3ac90c..f67cb0e5c9 100644
--- a/meta/classes/package_ipk.bbclass
+++ b/meta/classes/package_ipk.bbclass
@@ -65,7 +65,7 @@ def ipk_write_pkg(pkg, d):
try:
localdata.setVar('ROOT', '')
localdata.setVar('ROOT_%s' % pkg, root)
- pkgname = localdata.getVar('PKG_%s' % pkg)
+ pkgname = localdata.getVar('PKG:%s' % pkg)
if not pkgname:
pkgname = pkg
localdata.setVar('PKG', pkgname)
@@ -230,8 +230,8 @@ def ipk_write_pkg(pkg, d):
shell=True)
if d.getVar('IPK_SIGN_PACKAGES') == '1':
- ipkver = "%s-%s" % (d.getVar('PKGV'), d.getVar('PKGR'))
- ipk_to_sign = "%s/%s_%s_%s.ipk" % (pkgoutdir, pkgname, ipkver, d.getVar('PACKAGE_ARCH'))
+ ipkver = "%s-%s" % (localdata.getVar('PKGV'), localdata.getVar('PKGR'))
+ ipk_to_sign = "%s/%s_%s_%s.ipk" % (pkgoutdir, pkgname, ipkver, localdata.getVar('PACKAGE_ARCH'))
sign_ipk(d, ipk_to_sign)
finally:
@@ -274,9 +274,8 @@ python do_package_write_ipk () {
do_package_write_ipk[dirs] = "${PKGWRITEDIRIPK}"
do_package_write_ipk[cleandirs] = "${PKGWRITEDIRIPK}"
do_package_write_ipk[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
-addtask package_write_ipk after do_packagedata do_package
+addtask package_write_ipk after do_packagedata do_package do_deploy_source_date_epoch before do_build
+do_build[rdeptask] += "do_package_write_ipk"
PACKAGEINDEXDEPS += "opkg-utils-native:do_populate_sysroot"
PACKAGEINDEXDEPS += "opkg-native:do_populate_sysroot"
-
-do_build[recrdeptask] += "do_package_write_ipk"
diff --git a/meta/classes/package_rpm.bbclass b/meta/classes/package_rpm.bbclass
index 86706da842..e9ff1f7e65 100644
--- a/meta/classes/package_rpm.bbclass
+++ b/meta/classes/package_rpm.bbclass
@@ -40,10 +40,10 @@ def write_rpm_perfiledata(srcname, d):
outfile.write("# Dependency table\n")
outfile.write('deps = {\n')
for pkg in packages.split():
- dependsflist_key = 'FILE' + varname + 'FLIST' + "_" + pkg
+ dependsflist_key = 'FILE' + varname + 'FLIST' + ":" + pkg
dependsflist = (d.getVar(dependsflist_key) or "")
for dfile in dependsflist.split():
- key = "FILE" + varname + "_" + dfile + "_" + pkg
+ key = "FILE" + varname + ":" + dfile + ":" + pkg
deps = filter_nativesdk_deps(srcname, d.getVar(key) or "")
depends_dict = bb.utils.explode_dep_versions(deps)
file = dfile.replace("@underscore@", "_")
@@ -249,10 +249,10 @@ python write_specfile () {
def get_perfile(varname, pkg, d):
deps = []
- dependsflist_key = 'FILE' + varname + 'FLIST' + "_" + pkg
+ dependsflist_key = 'FILE' + varname + 'FLIST' + ":" + pkg
dependsflist = (d.getVar(dependsflist_key) or "")
for dfile in dependsflist.split():
- key = "FILE" + varname + "_" + dfile + "_" + pkg
+ key = "FILE" + varname + ":" + dfile + ":" + pkg
depends = d.getVar(key)
if depends:
deps.append(depends)
@@ -332,7 +332,7 @@ python write_specfile () {
localdata.setVar('ROOT', '')
localdata.setVar('ROOT_%s' % pkg, root)
- pkgname = localdata.getVar('PKG_%s' % pkg)
+ pkgname = localdata.getVar('PKG:%s' % pkg)
if not pkgname:
pkgname = pkg
localdata.setVar('PKG', pkgname)
@@ -684,8 +684,8 @@ python do_package_rpm () {
cmd = cmd + " --define '_use_internal_dependency_generator 0'"
cmd = cmd + " --define '_binaries_in_noarch_packages_terminate_build 0'"
cmd = cmd + " --define '_build_id_links none'"
- cmd = cmd + " --define '_binary_payload w6T%d.xzdio'" % int(d.getVar("XZ_THREADS"))
- cmd = cmd + " --define '_source_payload w6T%d.xzdio'" % int(d.getVar("XZ_THREADS"))
+ cmd = cmd + " --define '_binary_payload w19T%d.zstdio'" % int(d.getVar("ZSTD_THREADS"))
+ cmd = cmd + " --define '_source_payload w19T%d.zstdio'" % int(d.getVar("ZSTD_THREADS"))
cmd = cmd + " --define 'clamp_mtime_to_source_date_epoch 1'"
cmd = cmd + " --define 'use_source_date_epoch_as_buildtime 1'"
cmd = cmd + " --define '_buildhost reproducible'"
@@ -748,9 +748,8 @@ python do_package_write_rpm () {
do_package_write_rpm[dirs] = "${PKGWRITEDIRRPM}"
do_package_write_rpm[cleandirs] = "${PKGWRITEDIRRPM}"
do_package_write_rpm[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
-addtask package_write_rpm after do_packagedata do_package
+addtask package_write_rpm after do_packagedata do_package do_deploy_source_date_epoch before do_build
+do_build[rdeptask] += "do_package_write_rpm"
PACKAGEINDEXDEPS += "rpm-native:do_populate_sysroot"
PACKAGEINDEXDEPS += "createrepo-c-native:do_populate_sysroot"
-
-do_build[recrdeptask] += "do_package_write_rpm"
diff --git a/meta/classes/packagedata.bbclass b/meta/classes/packagedata.bbclass
index a903e5cfd2..c2760e2bf0 100644
--- a/meta/classes/packagedata.bbclass
+++ b/meta/classes/packagedata.bbclass
@@ -24,10 +24,10 @@ python read_subpackage_metadata () {
continue
#
# If we set unsuffixed variables here there is a chance they could clobber override versions
- # of that variable, e.g. DESCRIPTION could clobber DESCRIPTION_<pkgname>
+ # of that variable, e.g. DESCRIPTION could clobber DESCRIPTION:<pkgname>
# We therefore don't clobber for the unsuffixed variable versions
#
- if key.endswith("_" + pkg):
+ if key.endswith(":" + pkg):
d.setVar(key, sdata[key])
else:
d.setVar(key, sdata[key], parsing=True)
diff --git a/meta/classes/packagegroup.bbclass b/meta/classes/packagegroup.bbclass
index 1541c8fbff..557b1b6382 100644
--- a/meta/classes/packagegroup.bbclass
+++ b/meta/classes/packagegroup.bbclass
@@ -32,7 +32,7 @@ python () {
for suffix in types]
d.setVar('PACKAGES', ' '.join(packages))
for pkg in packages:
- d.setVar('ALLOW_EMPTY_%s' % pkg, '1')
+ d.setVar('ALLOW_EMPTY:%s' % pkg, '1')
}
# We don't want to look at shared library dependencies for the
diff --git a/meta/classes/patch.bbclass b/meta/classes/patch.bbclass
index cd491a563d..8de7025491 100644
--- a/meta/classes/patch.bbclass
+++ b/meta/classes/patch.bbclass
@@ -10,7 +10,7 @@ PATCHDEPENDENCY = "${PATCHTOOL}-native:do_populate_sysroot"
# http://git.savannah.gnu.org/cgit/patch.git/patch/?id=82b800c9552a088a241457948219d25ce0a407a4
# This leaks into debug sources in particular. Add the dependency
# to target recipes to avoid this problem until we can rely on 2.7.4 or later.
-PATCHDEPENDENCY_append_class-target = " patch-replacement-native:do_populate_sysroot"
+PATCHDEPENDENCY:append:class-target = " patch-replacement-native:do_populate_sysroot"
PATCH_GIT_USER_NAME ?= "OpenEmbedded"
PATCH_GIT_USER_EMAIL ?= "oe.patch@oe"
@@ -131,6 +131,9 @@ python patch_do_patch() {
patchdir = parm["patchdir"]
if not os.path.isabs(patchdir):
patchdir = os.path.join(s, patchdir)
+ if not os.path.isdir(patchdir):
+ bb.fatal("Target directory '%s' not found, patchdir '%s' is incorrect in patch file '%s'" %
+ (patchdir, parm["patchdir"], parm['patchname']))
else:
patchdir = s
@@ -147,12 +150,12 @@ python patch_do_patch() {
patchset.Import({"file":local, "strippath": parm['striplevel']}, True)
except Exception as exc:
bb.utils.remove(process_tmpdir, True)
- bb.fatal(str(exc))
+ bb.fatal("Importing patch '%s' with striplevel '%s'\n%s" % (parm['patchname'], parm['striplevel'], repr(exc).replace("\\n", "\n")))
try:
resolver.Resolve()
except bb.BBHandledException as e:
bb.utils.remove(process_tmpdir, True)
- bb.fatal(str(e))
+ bb.fatal("Applying patch '%s' on target directory '%s'\n%s" % (parm['patchname'], patchdir, repr(e).replace("\\n", "\n")))
bb.utils.remove(process_tmpdir, True)
del os.environ['TMPDIR']
diff --git a/meta/classes/pixbufcache.bbclass b/meta/classes/pixbufcache.bbclass
index b07f51ed56..886bf195b3 100644
--- a/meta/classes/pixbufcache.bbclass
+++ b/meta/classes/pixbufcache.bbclass
@@ -3,7 +3,7 @@
# packages.
#
-DEPENDS_append_class-target = " qemu-native"
+DEPENDS:append:class-target = " qemu-native"
inherit qemu
PIXBUF_PACKAGES ??= "${PN}"
@@ -29,30 +29,30 @@ else
fi
}
-python populate_packages_append() {
+python populate_packages:append() {
pixbuf_pkgs = d.getVar('PIXBUF_PACKAGES').split()
for pkg in pixbuf_pkgs:
bb.note("adding pixbuf postinst and postrm scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg) or d.getVar('pkg_postinst')
+ postinst = d.getVar('pkg_postinst:%s' % pkg) or d.getVar('pkg_postinst')
if not postinst:
postinst = '#!/bin/sh\n'
postinst += d.getVar('pixbufcache_common')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
- postrm = d.getVar('pkg_postrm_%s' % pkg) or d.getVar('pkg_postrm')
+ postrm = d.getVar('pkg_postrm:%s' % pkg) or d.getVar('pkg_postrm')
if not postrm:
postrm = '#!/bin/sh\n'
postrm += d.getVar('pixbufcache_common')
- d.setVar('pkg_postrm_%s' % pkg, postrm)
+ d.setVar('pkg_postrm:%s' % pkg, postrm)
}
gdkpixbuf_complete() {
GDK_PIXBUF_FATAL_LOADER=1 ${STAGING_LIBDIR_NATIVE}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders --update-cache || exit 1
}
-DEPENDS_append_class-native = " gdk-pixbuf-native"
-SYSROOT_PREPROCESS_FUNCS_append_class-native = " pixbufcache_sstate_postinst"
+DEPENDS:append:class-native = " gdk-pixbuf-native"
+SYSROOT_PREPROCESS_FUNCS:append:class-native = " pixbufcache_sstate_postinst"
pixbufcache_sstate_postinst() {
mkdir -p ${SYSROOT_DESTDIR}${bindir}
diff --git a/meta/classes/pkgconfig.bbclass b/meta/classes/pkgconfig.bbclass
index ad1f84f506..fa94527ce9 100644
--- a/meta/classes/pkgconfig.bbclass
+++ b/meta/classes/pkgconfig.bbclass
@@ -1,2 +1,2 @@
-DEPENDS_prepend = "pkgconfig-native "
+DEPENDS:prepend = "pkgconfig-native "
diff --git a/meta/classes/populate_sdk_base.bbclass b/meta/classes/populate_sdk_base.bbclass
index f8072a9d37..16f929bf59 100644
--- a/meta/classes/populate_sdk_base.bbclass
+++ b/meta/classes/populate_sdk_base.bbclass
@@ -1,4 +1,6 @@
-inherit meta image-postinst-intercepts image-artifact-names
+PACKAGES = ""
+
+inherit image-postinst-intercepts image-artifact-names
# Wildcards specifying complementary packages to install for every package that has been explicitly
# installed into the rootfs
@@ -23,7 +25,7 @@ SDKIMAGE_FEATURES ??= "dev-pkgs dbg-pkgs src-pkgs ${@bb.utils.contains('DISTRO_F
SDKIMAGE_INSTALL_COMPLEMENTARY = '${@complementary_globs("SDKIMAGE_FEATURES", d)}'
SDKIMAGE_INSTALL_COMPLEMENTARY[vardeps] += "SDKIMAGE_FEATURES"
-PACKAGE_ARCHS_append_task-populate-sdk = " sdk-provides-dummy-target"
+PACKAGE_ARCHS:append:task-populate-sdk = " sdk-provides-dummy-target"
SDK_PACKAGE_ARCHS += "sdk-provides-dummy-${SDKPKGSUFFIX}"
# List of locales to install, or "all" for all of them, or unset for none.
@@ -37,7 +39,7 @@ SDK_DEPLOY = "${DEPLOY_DIR}/sdk"
SDKDEPLOYDIR = "${WORKDIR}/${SDKMACHINE}-deploy-${PN}-populate-sdk"
-B_task-populate-sdk = "${SDK_DIR}"
+B:task-populate-sdk = "${SDK_DIR}"
SDKTARGETSYSROOT = "${SDKPATH}/sysroots/${REAL_MULTIMACH_TARGET_SYS}"
@@ -66,7 +68,7 @@ python () {
SDK_RDEPENDS = "${TOOLCHAIN_TARGET_TASK} ${TOOLCHAIN_HOST_TASK}"
SDK_DEPENDS = "virtual/fakeroot-native ${SDK_ARCHIVE_DEPENDS} cross-localedef-native nativesdk-qemuwrapper-cross ${@' '.join(["%s-qemuwrapper-cross" % m for m in d.getVar("MULTILIB_VARIANTS").split()])} qemuwrapper-cross"
-PATH_prepend = "${WORKDIR}/recipe-sysroot/${SDKPATHNATIVE}${bindir}/crossscripts:${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:"
+PATH:prepend = "${WORKDIR}/recipe-sysroot/${SDKPATHNATIVE}${bindir}/crossscripts:${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:"
SDK_DEPENDS += "nativesdk-glibc-locale"
# We want the MULTIARCH_TARGET_SYS to point to the TUNE_PKGARCH, not PACKAGE_ARCH as it
@@ -90,6 +92,8 @@ SDK_HOST_MANIFEST = "${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.host.manifest"
SDK_EXT_TARGET_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.target.manifest"
SDK_EXT_HOST_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.host.manifest"
+SDK_PRUNE_SYSROOT_DIRS ?= "/dev"
+
python write_target_sdk_manifest () {
from oe.sdk import sdk_list_installed_packages
from oe.utils import format_pkg_list
@@ -101,6 +105,12 @@ python write_target_sdk_manifest () {
output.write(format_pkg_list(pkgs, 'ver'))
}
+sdk_prune_dirs () {
+ for d in ${SDK_PRUNE_SYSROOT_DIRS}; do
+ rm -rf ${SDK_OUTPUT}${SDKTARGETSYSROOT}$d
+ done
+}
+
python write_sdk_test_data() {
from oe.data import export2json
testdata = "%s/%s.testdata.json" % (d.getVar('SDKDEPLOYDIR'), d.getVar('TOOLCHAIN_OUTPUTNAME'))
@@ -119,9 +129,10 @@ python write_host_sdk_manifest () {
output.write(format_pkg_list(pkgs, 'ver'))
}
-POPULATE_SDK_POST_TARGET_COMMAND_append = " write_sdk_test_data ; "
-POPULATE_SDK_POST_TARGET_COMMAND_append_task-populate-sdk = " write_target_sdk_manifest ; "
-POPULATE_SDK_POST_HOST_COMMAND_append_task-populate-sdk = " write_host_sdk_manifest; "
+POPULATE_SDK_POST_TARGET_COMMAND:append = " write_sdk_test_data ; "
+POPULATE_SDK_POST_TARGET_COMMAND:append:task-populate-sdk = " write_target_sdk_manifest; sdk_prune_dirs; "
+POPULATE_SDK_POST_HOST_COMMAND:append:task-populate-sdk = " write_host_sdk_manifest; "
+
SDK_PACKAGING_COMMAND = "${@'${SDK_PACKAGING_FUNC};' if '${SDK_PACKAGING_FUNC}' else ''}"
SDK_POSTPROCESS_COMMAND = " create_sdk_files; check_sdk_sysroots; archive_sdk; ${SDK_PACKAGING_COMMAND} "
@@ -172,11 +183,15 @@ fakeroot python do_populate_sdk() {
populate_sdk_common(d)
}
SSTATETASKS += "do_populate_sdk"
-SSTATE_SKIP_CREATION_task-populate-sdk = '1'
+SSTATE_SKIP_CREATION:task-populate-sdk = '1'
do_populate_sdk[cleandirs] = "${SDKDEPLOYDIR}"
do_populate_sdk[sstate-inputdirs] = "${SDKDEPLOYDIR}"
do_populate_sdk[sstate-outputdirs] = "${SDK_DEPLOY}"
do_populate_sdk[stamp-extra-info] = "${MACHINE_ARCH}${SDKMACHINE}"
+python do_populate_sdk_setscene () {
+ sstate_setscene(d)
+}
+addtask do_populate_sdk_setscene
PSEUDO_IGNORE_PATHS .= ",${SDKDEPLOYDIR},${WORKDIR}/oe-sdk-repo,${WORKDIR}/sstate-build-populate_sdk"
@@ -280,6 +295,7 @@ EOF
# substitute variables
sed -i -e 's#@SDK_ARCH@#${SDK_ARCH}#g' \
-e 's#@SDKPATH@#${SDKPATH}#g' \
+ -e 's#@SDKPATHINSTALL@#${SDKPATHINSTALL}#g' \
-e 's#@SDKEXTPATH@#${SDKEXTPATH}#g' \
-e 's#@OLDEST_KERNEL@#${SDK_OLDEST_KERNEL}#g' \
-e 's#@REAL_MULTIMACH_TARGET_SYS@#${REAL_MULTIMACH_TARGET_SYS}#g' \
diff --git a/meta/classes/populate_sdk_ext.bbclass b/meta/classes/populate_sdk_ext.bbclass
index 4aabafa079..e2019f9bbf 100644
--- a/meta/classes/populate_sdk_ext.bbclass
+++ b/meta/classes/populate_sdk_ext.bbclass
@@ -2,19 +2,15 @@
inherit populate_sdk_base
-# NOTE: normally you cannot use task overrides for this kind of thing - this
-# only works because of get_sdk_ext_rdepends()
-
-TOOLCHAIN_HOST_TASK_task-populate-sdk-ext = " \
+# Used to override TOOLCHAIN_HOST_TASK in the eSDK case
+TOOLCHAIN_HOST_TASK_ESDK = " \
meta-environment-extsdk-${MACHINE} \
"
-TOOLCHAIN_TARGET_TASK_task-populate-sdk-ext = ""
-
-SDK_RELOCATE_AFTER_INSTALL_task-populate-sdk-ext = "0"
+SDK_RELOCATE_AFTER_INSTALL:task-populate-sdk-ext = "0"
SDK_EXT = ""
-SDK_EXT_task-populate-sdk-ext = "-ext"
+SDK_EXT:task-populate-sdk-ext = "-ext"
# Options are full or minimal
SDK_EXT_TYPE ?= "full"
@@ -26,8 +22,8 @@ SDK_INCLUDE_BUILDTOOLS ?= '1'
SDK_RECRDEP_TASKS ?= ""
SDK_CUSTOM_TEMPLATECONF ?= "0"
-SDK_LOCAL_CONF_WHITELIST ?= ""
-SDK_LOCAL_CONF_BLACKLIST ?= "CONF_VERSION \
+ESDK_LOCALCONF_ALLOW ?= ""
+ESDK_LOCALCONF_REMOVE ?= "CONF_VERSION \
BB_NUMBER_THREADS \
BB_NUMBER_PARSE_THREADS \
PARALLEL_MAKE \
@@ -38,7 +34,7 @@ SDK_LOCAL_CONF_BLACKLIST ?= "CONF_VERSION \
TMPDIR \
BB_SERVER_TIMEOUT \
"
-SDK_INHERIT_BLACKLIST ?= "buildhistory icecc"
+ESDK_CLASS_INHERIT_DISABLE ?= "buildhistory icecc"
SDK_UPDATE_URL ?= ""
SDK_TARGETS ?= "${PN}"
@@ -78,10 +74,10 @@ COREBASE_FILES ?= " \
.templateconf \
"
-SDK_DIR_task-populate-sdk-ext = "${WORKDIR}/sdk-ext"
-B_task-populate-sdk-ext = "${SDK_DIR}"
+SDK_DIR:task-populate-sdk-ext = "${WORKDIR}/sdk-ext"
+B:task-populate-sdk-ext = "${SDK_DIR}"
TOOLCHAINEXT_OUTPUTNAME ?= "${SDK_NAME}-toolchain-ext-${SDK_VERSION}"
-TOOLCHAIN_OUTPUTNAME_task-populate-sdk-ext = "${TOOLCHAINEXT_OUTPUTNAME}"
+TOOLCHAIN_OUTPUTNAME:task-populate-sdk-ext = "${TOOLCHAINEXT_OUTPUTNAME}"
SDK_EXT_TARGET_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.target.manifest"
SDK_EXT_HOST_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.host.manifest"
@@ -118,9 +114,9 @@ python write_host_sdk_ext_manifest () {
f.write("%s %s %s\n" % (info[1], info[2], info[3]))
}
-SDK_POSTPROCESS_COMMAND_append_task-populate-sdk-ext = "write_target_sdk_ext_manifest; write_host_sdk_ext_manifest; "
+SDK_POSTPROCESS_COMMAND:append:task-populate-sdk-ext = "write_target_sdk_ext_manifest; write_host_sdk_ext_manifest; "
-SDK_TITLE_task-populate-sdk-ext = "${@d.getVar('DISTRO_NAME') or d.getVar('DISTRO')} Extensible SDK"
+SDK_TITLE:task-populate-sdk-ext = "${@d.getVar('DISTRO_NAME') or d.getVar('DISTRO')} Extensible SDK"
def clean_esdk_builddir(d, sdkbasepath):
"""Clean up traces of the fake build for create_filtered_tasklist()"""
@@ -147,15 +143,15 @@ def create_filtered_tasklist(d, sdkbasepath, tasklistfile, conf_initpath):
try:
with open(sdkbasepath + '/conf/local.conf', 'a') as f:
# Force the use of sstate from the build system
- f.write('\nSSTATE_DIR_forcevariable = "%s"\n' % d.getVar('SSTATE_DIR'))
- f.write('SSTATE_MIRRORS_forcevariable = "file://universal/(.*) file://universal-4.9/\\1 file://universal-4.9/(.*) file://universal-4.8/\\1"\n')
+ f.write('\nSSTATE_DIR:forcevariable = "%s"\n' % d.getVar('SSTATE_DIR'))
+ f.write('SSTATE_MIRRORS:forcevariable = "file://universal/(.*) file://universal-4.9/\\1 file://universal-4.9/(.*) file://universal-4.8/\\1"\n')
# Ensure TMPDIR is the default so that clean_esdk_builddir() can delete it
- f.write('TMPDIR_forcevariable = "${TOPDIR}/tmp"\n')
- f.write('TCLIBCAPPEND_forcevariable = ""\n')
+ f.write('TMPDIR:forcevariable = "${TOPDIR}/tmp"\n')
+ f.write('TCLIBCAPPEND:forcevariable = ""\n')
# Drop uninative if the build isn't using it (or else NATIVELSBSTRING will
# be different and we won't be able to find our native sstate)
if not bb.data.inherits_class('uninative', d):
- f.write('INHERIT_remove = "uninative"\n')
+ f.write('INHERIT:remove = "uninative"\n')
# Unfortunately the default SDKPATH (or even a custom value) may contain characters that bitbake
# will not allow in its COREBASE path, so we need to rename the directory temporarily
@@ -286,8 +282,8 @@ python copy_buildsystem () {
bb.utils.mkdirhier(uninative_outdir)
shutil.copy(uninative_file, uninative_outdir)
- env_whitelist = (d.getVar('BB_ENV_EXTRAWHITE') or '').split()
- env_whitelist_values = {}
+ env_passthrough = (d.getVar('BB_ENV_PASSTHROUGH_ADDITIONS') or '').split()
+ env_passthrough_values = {}
# Create local.conf
builddir = d.getVar('TOPDIR')
@@ -298,15 +294,15 @@ python copy_buildsystem () {
if derivative:
shutil.copyfile(builddir + '/conf/local.conf', baseoutpath + '/conf/local.conf')
else:
- local_conf_whitelist = (d.getVar('SDK_LOCAL_CONF_WHITELIST') or '').split()
- local_conf_blacklist = (d.getVar('SDK_LOCAL_CONF_BLACKLIST') or '').split()
+ local_conf_allowed = (d.getVar('ESDK_LOCALCONF_ALLOW') or '').split()
+ local_conf_remove = (d.getVar('ESDK_LOCALCONF_REMOVE') or '').split()
def handle_var(varname, origvalue, op, newlines):
- if varname in local_conf_blacklist or (origvalue.strip().startswith('/') and not varname in local_conf_whitelist):
+ if varname in local_conf_remove or (origvalue.strip().startswith('/') and not varname in local_conf_allowed):
newlines.append('# Removed original setting of %s\n' % varname)
return None, op, 0, True
else:
- if varname in env_whitelist:
- env_whitelist_values[varname] = origvalue
+ if varname in env_passthrough:
+ env_passthrough_values[varname] = origvalue
return origvalue, op, 0, True
varlist = ['[^#=+ ]*']
oldlines = []
@@ -342,7 +338,7 @@ python copy_buildsystem () {
f.write('CONF_VERSION = "%s"\n\n' % d.getVar('CONF_VERSION', False))
# Some classes are not suitable for SDK, remove them from INHERIT
- f.write('INHERIT_remove = "%s"\n' % d.getVar('SDK_INHERIT_BLACKLIST', False))
+ f.write('INHERIT:remove = "%s"\n' % d.getVar('ESDK_CLASS_INHERIT_DISABLE', False))
# Bypass the default connectivity check if any
f.write('CONNECTIVITY_CHECK_URIS = ""\n\n')
@@ -358,10 +354,10 @@ python copy_buildsystem () {
f.write('SIGGEN_LOCKEDSIGS_TASKSIG_CHECK = "warn"\n\n')
# We want to be able to set this without a full reparse
- f.write('BB_HASHCONFIG_WHITELIST_append = " SIGGEN_UNLOCKED_RECIPES"\n\n')
+ f.write('BB_HASHCONFIG_IGNORE_VARS:append = " SIGGEN_UNLOCKED_RECIPES"\n\n')
- # Set up whitelist for run on install
- f.write('BB_SETSCENE_ENFORCE_WHITELIST = "%:* *:do_shared_workdir *:do_rm_work wic-tools:* *:do_addto_recipe_sysroot"\n\n')
+ # Set up which tasks are ignored for run on install
+ f.write('BB_SETSCENE_ENFORCE_IGNORE_TASKS = "%:* *:do_shared_workdir *:do_rm_work wic-tools:* *:do_addto_recipe_sysroot"\n\n')
# Hide the config information from bitbake output (since it's fixed within the SDK)
f.write('BUILDCFG_HEADER = ""\n\n')
@@ -440,9 +436,9 @@ python copy_buildsystem () {
f.write('meta/conf\n')
# Ensure any variables set from the external environment (by way of
- # BB_ENV_EXTRAWHITE) are set in the SDK's configuration
+ # BB_ENV_PASSTHROUGH_ADDITIONS) are set in the SDK's configuration
extralines = []
- for name, value in env_whitelist_values.items():
+ for name, value in env_passthrough_values.items():
actualvalue = d.getVar(name) or ''
if value != actualvalue:
extralines.append('%s = "%s"\n' % (name, actualvalue))
@@ -554,7 +550,7 @@ python copy_buildsystem () {
# We don't need sstate do_package files
for root, dirs, files in os.walk(sstate_out):
for name in files:
- if name.endswith("_package.tgz"):
+ if name.endswith("_package.tar.zst"):
f = os.path.join(root, name)
os.remove(f)
@@ -630,7 +626,7 @@ install_tools() {
for script in $scripts; do
for scriptfn in `find ${SDK_OUTPUT}/${SDKPATH}/${scriptrelpath} -maxdepth 1 -executable -name "$script"`; do
targetscriptfn="${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}/$(basename $scriptfn)"
- test -e ${targetscriptfn} || lnr ${scriptfn} ${targetscriptfn}
+ test -e ${targetscriptfn} || ln -rs ${scriptfn} ${targetscriptfn}
done
done
# We can't use the same method as above because files in the sysroot won't exist at this point
@@ -638,7 +634,7 @@ install_tools() {
unfsd_path="${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}/unfsd"
if [ "${SDK_INCLUDE_TOOLCHAIN}" = "1" -a ! -e $unfsd_path ] ; then
binrelpath=${@os.path.relpath(d.getVar('STAGING_BINDIR_NATIVE'), d.getVar('TMPDIR'))}
- lnr ${SDK_OUTPUT}/${SDKPATH}/tmp/$binrelpath/unfsd $unfsd_path
+ ln -rs ${SDK_OUTPUT}/${SDKPATH}/tmp/$binrelpath/unfsd $unfsd_path
fi
touch ${SDK_OUTPUT}/${SDKPATH}/.devtoolbase
@@ -688,7 +684,7 @@ sdk_ext_preinst() {
EXTRA_TAR_OPTIONS="$EXTRA_TAR_OPTIONS --exclude=sstate-cache"
fi
}
-SDK_PRE_INSTALL_COMMAND_task-populate-sdk-ext = "${sdk_ext_preinst}"
+SDK_PRE_INSTALL_COMMAND:task-populate-sdk-ext = "${sdk_ext_preinst}"
# FIXME this preparation should be done as part of the SDK construction
sdk_ext_postinst() {
@@ -739,9 +735,9 @@ sdk_ext_postinst() {
echo done
}
-SDK_POST_INSTALL_COMMAND_task-populate-sdk-ext = "${sdk_ext_postinst}"
+SDK_POST_INSTALL_COMMAND:task-populate-sdk-ext = "${sdk_ext_postinst}"
-SDK_POSTPROCESS_COMMAND_prepend_task-populate-sdk-ext = "copy_buildsystem; install_tools; "
+SDK_POSTPROCESS_COMMAND:prepend:task-populate-sdk-ext = "copy_buildsystem; install_tools; "
SDK_INSTALL_TARGETS = ""
fakeroot python do_populate_sdk_ext() {
@@ -755,6 +751,10 @@ fakeroot python do_populate_sdk_ext() {
if d.getVar('BB_CURRENT_MC') != 'default':
bb.fatal('The extensible SDK can currently only be built for the default multiconfig. Currently trying to build for %s.' % d.getVar('BB_CURRENT_MC'))
+ # eSDK dependencies don't use the traditional variables and things don't work properly if they are set
+ d.setVar("TOOLCHAIN_HOST_TASK", "${TOOLCHAIN_HOST_TASK_ESDK}")
+ d.setVar("TOOLCHAIN_TARGET_TASK", "")
+
d.setVar('SDK_INSTALL_TARGETS', get_sdk_install_targets(d))
if d.getVar('SDK_INCLUDE_BUILDTOOLS') == '1':
buildtools_fn = get_current_buildtools(d)
@@ -800,12 +800,7 @@ do_sdk_depends[dirs] = "${WORKDIR}"
do_sdk_depends[depends] = "${@get_ext_sdk_depends(d)} meta-extsdk-toolchain:do_populate_sysroot"
do_sdk_depends[recrdeptask] = "${@d.getVarFlag('do_populate_sdk', 'recrdeptask', False)}"
do_sdk_depends[recrdeptask] += "do_populate_lic do_package_qa do_populate_sysroot do_deploy ${SDK_RECRDEP_TASKS}"
-do_sdk_depends[rdepends] = "${@get_sdk_ext_rdepends(d)}"
-
-def get_sdk_ext_rdepends(d):
- localdata = d.createCopy()
- localdata.appendVar('OVERRIDES', ':task-populate-sdk-ext')
- return localdata.getVarFlag('do_populate_sdk', 'rdepends')
+do_sdk_depends[rdepends] = "${@' '.join([x + ':do_package_write_${IMAGE_PKGTYPE} ' + x + ':do_packagedata' for x in d.getVar('TOOLCHAIN_HOST_TASK_ESDK').split()])}"
do_populate_sdk_ext[dirs] = "${@d.getVarFlag('do_populate_sdk', 'dirs', False)}"
@@ -833,7 +828,7 @@ do_populate_sdk_ext[nostamp] = "1"
SDKEXTDEPLOYDIR = "${WORKDIR}/deploy-${PN}-populate-sdk-ext"
SSTATETASKS += "do_populate_sdk_ext"
-SSTATE_SKIP_CREATION_task-populate-sdk-ext = '1'
+SSTATE_SKIP_CREATION:task-populate-sdk-ext = '1'
do_populate_sdk_ext[cleandirs] = "${SDKEXTDEPLOYDIR}"
do_populate_sdk_ext[sstate-inputdirs] = "${SDKEXTDEPLOYDIR}"
do_populate_sdk_ext[sstate-outputdirs] = "${SDK_DEPLOY}"
diff --git a/meta/classes/ptest-gnome.bbclass b/meta/classes/ptest-gnome.bbclass
index 478a33474d..18bd3dbff9 100644
--- a/meta/classes/ptest-gnome.bbclass
+++ b/meta/classes/ptest-gnome.bbclass
@@ -1,8 +1,8 @@
inherit ptest
-EXTRA_OECONF_append = " ${@bb.utils.contains('PTEST_ENABLED', '1', '--enable-installed-tests', '--disable-installed-tests', d)}"
+EXTRA_OECONF:append = " ${@bb.utils.contains('PTEST_ENABLED', '1', '--enable-installed-tests', '--disable-installed-tests', d)}"
-FILES_${PN}-ptest += "${libexecdir}/installed-tests/ \
+FILES:${PN}-ptest += "${libexecdir}/installed-tests/ \
${datadir}/installed-tests/"
-RDEPENDS_${PN}-ptest += "gnome-desktop-testing"
+RDEPENDS:${PN}-ptest += "gnome-desktop-testing"
diff --git a/meta/classes/ptest-perl.bbclass b/meta/classes/ptest-perl.bbclass
index a4bc40b51a..5dd72c9dad 100644
--- a/meta/classes/ptest-perl.bbclass
+++ b/meta/classes/ptest-perl.bbclass
@@ -1,6 +1,6 @@
inherit ptest
-FILESEXTRAPATHS_prepend := "${COREBASE}/meta/files:"
+FILESEXTRAPATHS:prepend := "${COREBASE}/meta/files:"
SRC_URI += "file://ptest-perl/run-ptest"
@@ -13,9 +13,9 @@ do_install_ptest_perl() {
chown -R root:root ${D}${PTEST_PATH}
}
-FILES_${PN}-ptest_prepend = "${PTEST_PATH}/t/* ${PTEST_PATH}/run-ptest "
+FILES:${PN}-ptest:prepend = "${PTEST_PATH}/t/* ${PTEST_PATH}/run-ptest "
-RDEPENDS_${PN}-ptest_prepend = "perl "
+RDEPENDS:${PN}-ptest:prepend = "perl "
addtask install_ptest_perl after do_install_ptest_base before do_package
diff --git a/meta/classes/ptest.bbclass b/meta/classes/ptest.bbclass
index 466916299a..1ec23c0923 100644
--- a/meta/classes/ptest.bbclass
+++ b/meta/classes/ptest.bbclass
@@ -1,22 +1,22 @@
-SUMMARY_${PN}-ptest ?= "${SUMMARY} - Package test files"
-DESCRIPTION_${PN}-ptest ?= "${DESCRIPTION} \
+SUMMARY:${PN}-ptest ?= "${SUMMARY} - Package test files"
+DESCRIPTION:${PN}-ptest ?= "${DESCRIPTION} \
This package contains a test directory ${PTEST_PATH} for package test purposes."
PTEST_PATH ?= "${libdir}/${BPN}/ptest"
PTEST_BUILD_HOST_FILES ?= "Makefile"
PTEST_BUILD_HOST_PATTERN ?= ""
-FILES_${PN}-ptest += "${PTEST_PATH}"
-SECTION_${PN}-ptest = "devel"
-ALLOW_EMPTY_${PN}-ptest = "1"
+FILES:${PN}-ptest += "${PTEST_PATH}"
+SECTION:${PN}-ptest = "devel"
+ALLOW_EMPTY:${PN}-ptest = "1"
PTEST_ENABLED = "${@bb.utils.contains('DISTRO_FEATURES', 'ptest', '1', '0', d)}"
-PTEST_ENABLED_class-native = ""
-PTEST_ENABLED_class-nativesdk = ""
-PTEST_ENABLED_class-cross-canadian = ""
-RDEPENDS_${PN}-ptest += "${PN}"
-RDEPENDS_${PN}-ptest_class-native = ""
-RDEPENDS_${PN}-ptest_class-nativesdk = ""
-RRECOMMENDS_${PN}-ptest += "ptest-runner"
+PTEST_ENABLED:class-native = ""
+PTEST_ENABLED:class-nativesdk = ""
+PTEST_ENABLED:class-cross-canadian = ""
+RDEPENDS:${PN}-ptest += "${PN}"
+RDEPENDS:${PN}-ptest:class-native = ""
+RDEPENDS:${PN}-ptest:class-nativesdk = ""
+RRECOMMENDS:${PN}-ptest += "ptest-runner"
PACKAGES =+ "${@bb.utils.contains('PTEST_ENABLED', '1', '${PN}-ptest', '', d)}"
@@ -73,7 +73,7 @@ PTEST_BINDIR_PKGD_PATH = "${PKGD}${PTEST_PATH}/bin"
# This function needs to run after apply_update_alternative_renames because the
# aforementioned function will update the ALTERNATIVE_LINK_NAME flag. Append is
# used here to make this function to run as late as possible.
-PACKAGE_PREPROCESS_FUNCS_append = "${@bb.utils.contains('PTEST_BINDIR', '1', \
+PACKAGE_PREPROCESS_FUNCS:append = "${@bb.utils.contains('PTEST_BINDIR', '1', \
bb.utils.contains('PTEST_ENABLED', '1', ' ptest_update_alternatives', '', d), '', d)}"
python ptest_update_alternatives() {
@@ -118,13 +118,15 @@ python () {
if not(d.getVar('PTEST_ENABLED') == "1"):
for i in ['do_configure_ptest_base', 'do_compile_ptest_base', 'do_install_ptest_base']:
bb.build.deltask(i, d)
+}
+QARECIPETEST[missing-ptest] = "package_qa_check_missing_ptest"
+def package_qa_check_missing_ptest(pn, d, messages):
# This checks that ptest package is actually included
# in standard oe-core ptest images - only for oe-core recipes
if not 'meta/recipes' in d.getVar('FILE') or not(d.getVar('PTEST_ENABLED') == "1"):
return
- enabled_ptests = " ".join([d.getVar('PTESTS_FAST'),d.getVar('PTESTS_SLOW'), d.getVar('PTESTS_PROBLEMS')]).split()
- if (d.getVar('PN') + "-ptest").replace(d.getVar('MLPREFIX'), '') not in enabled_ptests:
- bb.error("Recipe %s supports ptests but is not included in oe-core's conf/distro/include/ptest-packagelists.inc" % d.getVar("PN"))
-}
+ enabled_ptests = " ".join([d.getVar('PTESTS_FAST'), d.getVar('PTESTS_SLOW'), d.getVar('PTESTS_PROBLEMS')]).split()
+ if (pn + "-ptest").replace(d.getVar('MLPREFIX'), '') not in enabled_ptests:
+ oe.qa.handle_error("missing-ptest", "supports ptests but is not included in oe-core's ptest-packagelists.inc", d)
diff --git a/meta/classes/pypi.bbclass b/meta/classes/pypi.bbclass
index 384a209874..9405d58601 100644
--- a/meta/classes/pypi.bbclass
+++ b/meta/classes/pypi.bbclass
@@ -8,18 +8,18 @@ def pypi_package(d):
PYPI_PACKAGE ?= "${@pypi_package(d)}"
PYPI_PACKAGE_EXT ?= "tar.gz"
+PYPI_ARCHIVE_NAME ?= "${PYPI_PACKAGE}-${PV}.${PYPI_PACKAGE_EXT}"
def pypi_src_uri(d):
package = d.getVar('PYPI_PACKAGE')
- package_ext = d.getVar('PYPI_PACKAGE_EXT')
- pv = d.getVar('PV')
- return 'https://files.pythonhosted.org/packages/source/%s/%s/%s-%s.%s' % (package[0], package, package, pv, package_ext)
+ archive_name = d.getVar('PYPI_ARCHIVE_NAME')
+ return 'https://files.pythonhosted.org/packages/source/%s/%s/%s' % (package[0], package, archive_name)
PYPI_SRC_URI ?= "${@pypi_src_uri(d)}"
HOMEPAGE ?= "https://pypi.python.org/pypi/${PYPI_PACKAGE}/"
SECTION = "devel/python"
-SRC_URI_prepend = "${PYPI_SRC_URI} "
+SRC_URI:prepend = "${PYPI_SRC_URI} "
S = "${WORKDIR}/${PYPI_PACKAGE}-${PV}"
UPSTREAM_CHECK_URI ?= "https://pypi.org/project/${PYPI_PACKAGE}/"
diff --git a/meta/classes/python3-dir.bbclass b/meta/classes/python3-dir.bbclass
index f51f971fc5..ff03e584d4 100644
--- a/meta/classes/python3-dir.bbclass
+++ b/meta/classes/python3-dir.bbclass
@@ -1,4 +1,4 @@
-PYTHON_BASEVERSION = "3.9"
+PYTHON_BASEVERSION = "3.10"
PYTHON_ABI = ""
PYTHON_DIR = "python${PYTHON_BASEVERSION}"
PYTHON_PN = "python3"
diff --git a/meta/classes/python3native.bbclass b/meta/classes/python3native.bbclass
index 2e3a88c126..3783c0c47e 100644
--- a/meta/classes/python3native.bbclass
+++ b/meta/classes/python3native.bbclass
@@ -2,9 +2,9 @@ inherit python3-dir
PYTHON="${STAGING_BINDIR_NATIVE}/python3-native/python3"
EXTRANATIVEPATH += "python3-native"
-DEPENDS_append = " python3-native "
+DEPENDS:append = " python3-native "
-# python-config and other scripts are using distutils modules
+# python-config and other scripts are using sysconfig modules
# which we patch to access these variables
export STAGING_INCDIR
export STAGING_LIBDIR
diff --git a/meta/classes/python3targetconfig.bbclass b/meta/classes/python3targetconfig.bbclass
index fc1025c207..2476858cae 100644
--- a/meta/classes/python3targetconfig.bbclass
+++ b/meta/classes/python3targetconfig.bbclass
@@ -1,17 +1,29 @@
inherit python3native
EXTRA_PYTHON_DEPENDS ?= ""
-EXTRA_PYTHON_DEPENDS_class-target = "python3"
-DEPENDS_append = " ${EXTRA_PYTHON_DEPENDS}"
+EXTRA_PYTHON_DEPENDS:class-target = "python3"
+DEPENDS:append = " ${EXTRA_PYTHON_DEPENDS}"
-do_configure_prepend_class-target() {
+do_configure:prepend:class-target() {
export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
}
-do_compile_prepend_class-target() {
+do_compile:prepend:class-target() {
export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
}
-do_install_prepend_class-target() {
+do_install:prepend:class-target() {
+ export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
+}
+
+do_configure:prepend:class-nativesdk() {
+ export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
+}
+
+do_compile:prepend:class-nativesdk() {
+ export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
+}
+
+do_install:prepend:class-nativesdk() {
export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
}
diff --git a/meta/classes/python_flit_core.bbclass b/meta/classes/python_flit_core.bbclass
new file mode 100644
index 0000000000..96652aa204
--- /dev/null
+++ b/meta/classes/python_flit_core.bbclass
@@ -0,0 +1,5 @@
+inherit python_pep517 python3native python3-dir setuptools3-base
+
+DEPENDS += "python3 python3-flit-core-native"
+
+PEP517_BUILD_API = "flit_core.buildapi"
diff --git a/meta/classes/python_pep517.bbclass b/meta/classes/python_pep517.bbclass
new file mode 100644
index 0000000000..34ffdc9c0d
--- /dev/null
+++ b/meta/classes/python_pep517.bbclass
@@ -0,0 +1,56 @@
+# Common infrastructure for Python packages that use PEP-517 compliant packaging.
+# https://www.python.org/dev/peps/pep-0517/
+#
+# This class will build a wheel in do_compile, and use pypa/installer to install
+# it in do_install.
+
+DEPENDS:append = " python3-installer-native"
+
+# Where to execute the build process from
+PEP517_SOURCE_PATH ?= "${S}"
+
+# The PEP517 build API entry point
+PEP517_BUILD_API ?= "unset"
+
+# The directory where wheels will be written
+PEP517_WHEEL_PATH ?= "${WORKDIR}/dist"
+
+# The interpreter to use for installed scripts
+PEP517_INSTALL_PYTHON = "python3"
+PEP517_INSTALL_PYTHON:class-native = "nativepython3"
+
+# pypa/installer option to control the bytecode compilation
+INSTALL_WHEEL_COMPILE_BYTECODE ?= "--compile-bytecode=0"
+
+# PEP517 doesn't have a specific configure step, so set an empty do_configure to avoid
+# running base_do_configure.
+python_pep517_do_configure () {
+ :
+}
+
+# When we have Python 3.11 we can parse pyproject.toml to determine the build
+# API entry point directly
+python_pep517_do_compile () {
+ cd ${PEP517_SOURCE_PATH}
+ nativepython3 -c "import ${PEP517_BUILD_API} as api; api.build_wheel('${PEP517_WHEEL_PATH}')"
+}
+do_compile[cleandirs] += "${PEP517_WHEEL_PATH}"
+
+python_pep517_do_install () {
+ COUNT=$(find ${PEP517_WHEEL_PATH} -name '*.whl' | wc -l)
+ if test $COUNT -eq 0; then
+ bbfatal No wheels found in ${PEP517_WHEEL_PATH}
+ elif test $COUNT -gt 1; then
+ bbfatal More than one wheel found in ${PEP517_WHEEL_PATH}, this should not happen
+ fi
+
+ nativepython3 -m installer ${INSTALL_WHEEL_COMPILE_BYTECODE} --interpreter "${USRBINPATH}/env ${PEP517_INSTALL_PYTHON}" --destdir=${D} ${PEP517_WHEEL_PATH}/*.whl
+}
+
+# A manual do_install that just uses unzip for bootstrapping purposes. Callers should DEPEND on unzip-native.
+python_pep517_do_bootstrap_install () {
+ install -d ${D}${PYTHON_SITEPACKAGES_DIR}
+ unzip -d ${D}${PYTHON_SITEPACKAGES_DIR} ${PEP517_WHEEL_PATH}/*.whl
+}
+
+EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes/python_poetry_core.bbclass b/meta/classes/python_poetry_core.bbclass
new file mode 100644
index 0000000000..577663b8f1
--- /dev/null
+++ b/meta/classes/python_poetry_core.bbclass
@@ -0,0 +1,5 @@
+inherit python_pep517 python3native setuptools3-base
+
+DEPENDS += "python3-poetry-core-native"
+
+PEP517_BUILD_API = "poetry.core.masonry.api"
diff --git a/meta/classes/python_pyo3.bbclass b/meta/classes/python_pyo3.bbclass
new file mode 100644
index 0000000000..10cc3a0645
--- /dev/null
+++ b/meta/classes/python_pyo3.bbclass
@@ -0,0 +1,30 @@
+#
+# This class helps make sure that Python extensions built with PyO3
+# and setuptools_rust properly set up the environment for cross compilation
+#
+
+inherit cargo python3-dir siteinfo
+
+export PYO3_CROSS="1"
+export PYO3_CROSS_PYTHON_VERSION="${PYTHON_BASEVERSION}"
+export PYO3_CROSS_LIB_DIR="${STAGING_LIBDIR}"
+export CARGO_BUILD_TARGET="${HOST_SYS}"
+export RUSTFLAGS
+export PYO3_PYTHON="${PYTHON}"
+export PYO3_CONFIG_FILE="${WORKDIR}/pyo3.config"
+
+python_pyo3_do_configure () {
+ cat > ${WORKDIR}/pyo3.config << EOF
+implementation=CPython
+version=${PYTHON_BASEVERSION}
+shared=true
+abi3=false
+lib_name=${PYTHON_DIR}
+lib_dir=${STAGING_LIBDIR}
+pointer_width=${SITEINFO_BITS}
+build_flags=WITH_THREAD
+suppress_build_script_link_lines=false
+EOF
+}
+
+EXPORT_FUNCTIONS do_configure
diff --git a/meta/classes/python_setuptools3_rust.bbclass b/meta/classes/python_setuptools3_rust.bbclass
new file mode 100644
index 0000000000..f12e5d0cbd
--- /dev/null
+++ b/meta/classes/python_setuptools3_rust.bbclass
@@ -0,0 +1,11 @@
+inherit python_pyo3 setuptools3
+
+DEPENDS += "python3-setuptools-rust-native"
+
+python_setuptools3_rust_do_configure() {
+ python_pyo3_do_configure
+ cargo_common_do_configure
+ setuptools3_do_configure
+}
+
+EXPORT_FUNCTIONS do_configure
diff --git a/meta/classes/qemu.bbclass b/meta/classes/qemu.bbclass
index 55bdff816b..01a7b86ae1 100644
--- a/meta/classes/qemu.bbclass
+++ b/meta/classes/qemu.bbclass
@@ -64,4 +64,4 @@ QEMU_EXTRAOPTIONS_ppc64e5500 = " -cpu e500mc"
QEMU_EXTRAOPTIONS_ppce6500 = " -cpu e500mc"
QEMU_EXTRAOPTIONS_ppc64e6500 = " -cpu e500mc"
QEMU_EXTRAOPTIONS_ppc7400 = " -cpu 7400"
-QEMU_EXTRAOPTIONS_powerpc64le = " -cpu POWER8"
+QEMU_EXTRAOPTIONS:powerpc64le = " -cpu POWER8"
diff --git a/meta/classes/qemuboot.bbclass b/meta/classes/qemuboot.bbclass
index 2b50ddaa22..ad8489902a 100644
--- a/meta/classes/qemuboot.bbclass
+++ b/meta/classes/qemuboot.bbclass
@@ -29,13 +29,15 @@
#
# QB_AUDIO_DRV: qemu audio driver, e.g., "alsa", set it when support audio
#
-# QB_AUDIO_OPT: qemu audio option, e.g., "-soundhw ac97,es1370", used
+# QB_AUDIO_OPT: qemu audio option, e.g., "-device AC97", used
# when QB_AUDIO_DRV is set.
#
# QB_RNG: Pass-through for host random number generator, it can speedup boot
# in system mode, where system is experiencing entropy starvation
#
# QB_KERNEL_ROOT: kernel's root, e.g., /dev/vda
+# By default "/dev/vda rw" gets passed to the kernel.
+# To mount the rootfs read-only QB_KERNEL_ROOT can be set to e.g. "/dev/vda ro".
#
# QB_NETWORK_DEVICE: network device, e.g., "-device virtio-net-pci,netdev=net0,mac=@MAC@",
# it needs work with QB_TAP_OPT and QB_SLIRP_OPT.
@@ -43,7 +45,7 @@
# a custom one, but that may cause conflicts when multiple qemus are
# running on the same host.
# Note: If more than one interface of type -device virtio-net-device gets added,
-# QB_NETWORK_DEVICE_prepend might be used, since Qemu enumerates the eth*
+# QB_NETWORK_DEVICE:prepend might be used, since Qemu enumerates the eth*
# devices in reverse order to -device arguments.
#
# QB_TAP_OPT: network option for 'tap' mode, e.g.,
@@ -91,7 +93,7 @@ QB_RNG ?= "-object rng-random,filename=/dev/urandom,id=rng0 -device virtio-rng-p
QB_OPT_APPEND ?= ""
QB_NETWORK_DEVICE ?= "-device virtio-net-pci,netdev=net0,mac=@MAC@"
QB_CMDLINE_IP_SLIRP ?= "ip=dhcp"
-QB_CMDLINE_IP_TAP ?= "ip=192.168.7.@CLIENT@::192.168.7.@GATEWAY@:255.255.255.0"
+QB_CMDLINE_IP_TAP ?= "ip=192.168.7.@CLIENT@::192.168.7.@GATEWAY@:255.255.255.0::eth0:off:8.8.8.8"
QB_ROOTFS_EXTRA_OPT ?= ""
QB_GRAPHICS ?= ""
@@ -107,7 +109,7 @@ def qemuboot_vars(d):
build_vars = ['MACHINE', 'TUNE_ARCH', 'DEPLOY_DIR_IMAGE',
'KERNEL_IMAGETYPE', 'IMAGE_NAME', 'IMAGE_LINK_NAME',
'STAGING_DIR_NATIVE', 'STAGING_BINDIR_NATIVE',
- 'STAGING_DIR_HOST', 'SERIAL_CONSOLES']
+ 'STAGING_DIR_HOST', 'SERIAL_CONSOLES', 'UNINATIVE_LOADER']
return build_vars + [k for k in d.keys() if k.startswith('QB_')]
do_write_qemuboot_conf[vardeps] += "${@' '.join(qemuboot_vars(d))}"
@@ -116,12 +118,17 @@ python do_write_qemuboot_conf() {
import configparser
qemuboot = "%s/%s.qemuboot.conf" % (d.getVar('IMGDEPLOYDIR'), d.getVar('IMAGE_NAME'))
- qemuboot_link = "%s/%s.qemuboot.conf" % (d.getVar('IMGDEPLOYDIR'), d.getVar('IMAGE_LINK_NAME'))
+ if d.getVar('IMAGE_LINK_NAME'):
+ qemuboot_link = "%s/%s.qemuboot.conf" % (d.getVar('IMGDEPLOYDIR'), d.getVar('IMAGE_LINK_NAME'))
+ else:
+ qemuboot_link = ""
finalpath = d.getVar("DEPLOY_DIR_IMAGE")
topdir = d.getVar('TOPDIR')
cf = configparser.ConfigParser()
cf.add_section('config_bsp')
for k in sorted(qemuboot_vars(d)):
+ if ":" in k:
+ continue
# qemu-helper-native sysroot is not removed by rm_work and
# contains all tools required by runqemu
if k == 'STAGING_BINDIR_NATIVE':
@@ -129,6 +136,8 @@ python do_write_qemuboot_conf() {
'qemu-helper-native/1.0-r1/recipe-sysroot-native/usr/bin/')
else:
val = d.getVar(k)
+ if val is None:
+ continue
# we only want to write out relative paths so that we can relocate images
# and still run them
if val.startswith(topdir):
@@ -149,7 +158,7 @@ python do_write_qemuboot_conf() {
with open(qemuboot, 'w') as f:
cf.write(f)
- if qemuboot_link != qemuboot:
+ if qemuboot_link and qemuboot_link != qemuboot:
if os.path.lexists(qemuboot_link):
os.remove(qemuboot_link)
os.symlink(os.path.basename(qemuboot), qemuboot_link)
diff --git a/meta/classes/reproducible_build.bbclass b/meta/classes/reproducible_build.bbclass
deleted file mode 100644
index 278eeedc74..0000000000
--- a/meta/classes/reproducible_build.bbclass
+++ /dev/null
@@ -1,127 +0,0 @@
-# reproducible_build.bbclass
-#
-# Sets SOURCE_DATE_EPOCH in each component's build environment.
-# Upstream components (generally) respect this environment variable,
-# using it in place of the "current" date and time.
-# See https://reproducible-builds.org/specs/source-date-epoch/
-#
-# After sources are unpacked but before they are patched, we set a reproducible value for SOURCE_DATE_EPOCH.
-# This value should be reproducible for anyone who builds the same revision from the same sources.
-#
-# There are 4 ways we determine SOURCE_DATE_EPOCH:
-#
-# 1. Use the value from __source_date_epoch.txt file if this file exists.
-# This file was most likely created in the previous build by one of the following methods 2,3,4.
-# Alternatively, it can be provided by a recipe via SRC_URI.
-#
-# If the file does not exist:
-#
-# 2. If there is a git checkout, use the last git commit timestamp.
-# Git does not preserve file timestamps on checkout.
-#
-# 3. Use the mtime of "known" files such as NEWS, CHANGLELOG, ...
-# This works for well-kept repositories distributed via tarball.
-#
-# 4. Use the modification time of the youngest file in the source tree, if there is one.
-# This will be the newest file from the distribution tarball, if any.
-#
-# 5. Fall back to a fixed timestamp.
-#
-# Once the value of SOURCE_DATE_EPOCH is determined, it is stored in the recipe's SDE_FILE.
-# If none of these mechanisms are suitable, replace the do_deploy_source_date_epoch task
-# with recipe-specific functionality to write the appropriate SOURCE_DATE_EPOCH into the SDE_FILE.
-#
-# If this file is found by other tasks, the value is exported in the SOURCE_DATE_EPOCH variable.
-# SOURCE_DATE_EPOCH is set for all tasks that might use it (do_configure, do_compile, do_package, ...)
-
-BUILD_REPRODUCIBLE_BINARIES ??= '1'
-inherit ${@oe.utils.ifelse(d.getVar('BUILD_REPRODUCIBLE_BINARIES') == '1', 'reproducible_build_simple', '')}
-
-SDE_DIR = "${WORKDIR}/source-date-epoch"
-SDE_FILE = "${SDE_DIR}/__source_date_epoch.txt"
-SDE_DEPLOYDIR = "${WORKDIR}/deploy-source-date-epoch"
-
-# Enable compiler warning when the __TIME__, __DATE__ and __TIMESTAMP__ macros are used.
-TARGET_CC_ARCH_append_class-target = " -Wdate-time"
-
-# A SOURCE_DATE_EPOCH of '0' might be misinterpreted as no SDE
-export SOURCE_DATE_EPOCH_FALLBACK ??= "1302044400"
-
-SSTATETASKS += "do_deploy_source_date_epoch"
-
-do_deploy_source_date_epoch () {
- mkdir -p ${SDE_DEPLOYDIR}
- if [ -e ${SDE_FILE} ]; then
- echo "Deploying SDE from ${SDE_FILE} -> ${SDE_DEPLOYDIR}."
- cp -p ${SDE_FILE} ${SDE_DEPLOYDIR}/__source_date_epoch.txt
- else
- echo "${SDE_FILE} not found!"
- fi
-}
-
-python do_deploy_source_date_epoch_setscene () {
- sstate_setscene(d)
- bb.utils.mkdirhier(d.getVar('SDE_DIR'))
- sde_file = os.path.join(d.getVar('SDE_DEPLOYDIR'), '__source_date_epoch.txt')
- if os.path.exists(sde_file):
- target = d.getVar('SDE_FILE')
- bb.debug(1, "Moving setscene SDE file %s -> %s" % (sde_file, target))
- bb.utils.rename(sde_file, target)
- else:
- bb.debug(1, "%s not found!" % sde_file)
-}
-
-do_deploy_source_date_epoch[dirs] = "${SDE_DEPLOYDIR}"
-do_deploy_source_date_epoch[sstate-plaindirs] = "${SDE_DEPLOYDIR}"
-addtask do_deploy_source_date_epoch_setscene
-addtask do_deploy_source_date_epoch before do_configure after do_patch
-
-python create_source_date_epoch_stamp() {
- import oe.reproducible
-
- epochfile = d.getVar('SDE_FILE')
- tmp_file = "%s.new" % epochfile
-
- source_date_epoch = oe.reproducible.get_source_date_epoch(d, d.getVar('S'))
-
- bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch)
- bb.utils.mkdirhier(d.getVar('SDE_DIR'))
- with open(tmp_file, 'w') as f:
- f.write(str(source_date_epoch))
-
- os.rename(tmp_file, epochfile)
-}
-
-def get_source_date_epoch_value(d):
- cached = d.getVar('__CACHED_SOURCE_DATE_EPOCH')
- if cached:
- return cached
-
- epochfile = d.getVar('SDE_FILE')
- source_date_epoch = int(d.getVar('SOURCE_DATE_EPOCH_FALLBACK'))
- try:
- with open(epochfile, 'r') as f:
- s = f.read()
- try:
- source_date_epoch = int(s)
- # workaround for old sstate with SDE_FILE content being 0 - use SOURCE_DATE_EPOCH_FALLBACK
- if source_date_epoch == 0 :
- source_date_epoch = int(d.getVar('SOURCE_DATE_EPOCH_FALLBACK'))
- bb.warn("SOURCE_DATE_EPOCH value from sstate '%s' is deprecated/invalid. Reverting to SOURCE_DATE_EPOCH_FALLBACK '%s'" % (s, source_date_epoch))
- except ValueError:
- bb.warn("SOURCE_DATE_EPOCH value '%s' is invalid. Reverting to SOURCE_DATE_EPOCH_FALLBACK" % s)
- source_date_epoch = int(d.getVar('SOURCE_DATE_EPOCH_FALLBACK'))
- bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch)
- except FileNotFoundError:
- bb.debug(1, "Cannot find %s. SOURCE_DATE_EPOCH will default to %d" % (epochfile, source_date_epoch))
-
- d.setVar('__CACHED_SOURCE_DATE_EPOCH', str(source_date_epoch))
- return str(source_date_epoch)
-
-export SOURCE_DATE_EPOCH ?= "${@get_source_date_epoch_value(d)}"
-BB_HASHBASE_WHITELIST += "SOURCE_DATE_EPOCH"
-
-python () {
- if d.getVar('BUILD_REPRODUCIBLE_BINARIES') == '1':
- d.appendVarFlag("do_unpack", "postfuncs", " create_source_date_epoch_stamp")
-}
diff --git a/meta/classes/reproducible_build_simple.bbclass b/meta/classes/reproducible_build_simple.bbclass
deleted file mode 100644
index 393372993d..0000000000
--- a/meta/classes/reproducible_build_simple.bbclass
+++ /dev/null
@@ -1,9 +0,0 @@
-# Setup default environment for reproducible builds.
-
-BUILD_REPRODUCIBLE_BINARIES = "1"
-
-export PYTHONHASHSEED = "0"
-export PERL_HASH_SEED = "0"
-export SOURCE_DATE_EPOCH ??= "1520598896"
-
-REPRODUCIBLE_TIMESTAMP_ROOTFS ??= "1520598896"
diff --git a/meta/classes/rm_work.bbclass b/meta/classes/rm_work.bbclass
index 01c2ab1c78..5f12d5aaeb 100644
--- a/meta/classes/rm_work.bbclass
+++ b/meta/classes/rm_work.bbclass
@@ -13,7 +13,7 @@
# Recipes can also configure which entries in their ${WORKDIR}
# are preserved besides temp, which already gets excluded by default
# because it contains logs:
-# do_install_append () {
+# do_install:append () {
# echo "bar" >${WORKDIR}/foo
# }
# RM_WORK_EXCLUDE_ITEMS += "foo"
@@ -24,7 +24,7 @@ RM_WORK_EXCLUDE_ITEMS = "temp"
BB_SCHEDULER ?= "completion"
# Run the rm_work task in the idle scheduling class
-BB_TASK_IONICE_LEVEL_task-rm_work = "3.0"
+BB_TASK_IONICE_LEVEL:task-rm_work = "3.0"
do_rm_work () {
# If the recipe name is in the RM_WORK_EXCLUDE, skip the recipe.
@@ -73,7 +73,7 @@ do_rm_work () {
# sstate version since otherwise we'd need to leave 'plaindirs' around
# such as 'packages' and 'packages-split' and these can be large. No end
# of chain tasks depend directly on do_package anymore.
- rm -f $i;
+ rm -f -- $i;
;;
*_setscene*)
# Skip stamps which are already setscene versions
@@ -90,7 +90,7 @@ do_rm_work () {
;;
esac
done
- rm -f $i
+ rm -f -- $i
esac
done
@@ -100,9 +100,9 @@ do_rm_work () {
# Retain only logs and other files in temp, safely ignore
# failures of removing pseudo folers on NFS2/3 server.
if [ $dir = 'pseudo' ]; then
- rm -rf $dir 2> /dev/null || true
+ rm -rf -- $dir 2> /dev/null || true
elif ! echo "$excludes" | grep -q -w "$dir"; then
- rm -rf $dir
+ rm -rf -- $dir
fi
done
}
diff --git a/meta/classes/rm_work_and_downloads.bbclass b/meta/classes/rm_work_and_downloads.bbclass
index 7c00bea597..15e6091b9d 100644
--- a/meta/classes/rm_work_and_downloads.bbclass
+++ b/meta/classes/rm_work_and_downloads.bbclass
@@ -28,6 +28,6 @@ inherit rm_work
# Instead go up one level and remove ourself.
DL_DIR = "${BASE_WORKDIR}/${MULTIMACH_TARGET_SYS}/${PN}/downloads"
-do_rm_work_append () {
+do_rm_work:append () {
rm -rf ${DL_DIR}
}
diff --git a/meta/classes/rootfs-postcommands.bbclass b/meta/classes/rootfs-postcommands.bbclass
index e66ed5938b..7b92df69c5 100644
--- a/meta/classes/rootfs-postcommands.bbclass
+++ b/meta/classes/rootfs-postcommands.bbclass
@@ -21,9 +21,9 @@ ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only
# otherwise kernel or initramfs end up mounting the rootfs read/write
# (the default) if supported by the underlying storage.
#
-# We do this with _append because the default value might get set later with ?=
+# We do this with :append because the default value might get set later with ?=
# and we don't want to disable such a default that by setting a value here.
-APPEND_append = '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", " ro", "", d)}'
+APPEND:append = '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", " ro", "", d)}'
# Generates test data file with data store variables expanded in json format
ROOTFS_POSTPROCESS_COMMAND += "write_image_test_data; "
@@ -39,6 +39,8 @@ ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("DISTRO_FEATURES", "systemd"
ROOTFS_POSTPROCESS_COMMAND += 'empty_var_volatile;'
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("DISTRO_FEATURES", "overlayfs", "overlayfs_qa_check;", "", d)}'
+
inherit image-artifact-names
# Sort the user and group entries in /etc by ID in order to make the content
@@ -50,7 +52,7 @@ inherit image-artifact-names
# the numeric IDs of dynamically created entries remain stable.
#
# We want this to run as late as possible, in particular after
-# systemd_sysusers_create and set_user_group. Using _append is not
+# systemd_sysusers_create and set_user_group. Using :append is not
# enough for that, set_user_group is added that way and would end
# up running after us.
SORT_PASSWD_POSTPROCESS_COMMAND ??= " sort_passwd; "
@@ -60,7 +62,7 @@ python () {
}
systemd_create_users () {
- for conffile in ${IMAGE_ROOTFS}/usr/lib/sysusers.d/systemd.conf ${IMAGE_ROOTFS}/usr/lib/sysusers.d/systemd-remote.conf; do
+ for conffile in ${IMAGE_ROOTFS}/usr/lib/sysusers.d/*.conf; do
[ -e $conffile ] || continue
grep -v "^#" $conffile | sed -e '/^$/d' | while read type name id comment; do
if [ "$type" = "u" ]; then
@@ -76,12 +78,8 @@ systemd_create_users () {
eval groupadd --root ${IMAGE_ROOTFS} $groupadd_params || true
elif [ "$type" = "m" ]; then
group=$id
- if [ ! `grep -q "^${group}:" ${IMAGE_ROOTFS}${sysconfdir}/group` ]; then
- eval groupadd --root ${IMAGE_ROOTFS} --system $group
- fi
- if [ ! `grep -q "^${name}:" ${IMAGE_ROOTFS}${sysconfdir}/passwd` ]; then
- eval useradd --root ${IMAGE_ROOTFS} --shell /sbin/nologin --system $name
- fi
+ eval groupadd --root ${IMAGE_ROOTFS} --system $group || true
+ eval useradd --root ${IMAGE_ROOTFS} --shell /sbin/nologin --system $name --no-user-group || true
eval usermod --root ${IMAGE_ROOTFS} -a -G $group $name
fi
done
@@ -216,8 +214,8 @@ postinst_enable_logging () {
# Modify systemd default target
#
set_systemd_default_target () {
- if [ -d ${IMAGE_ROOTFS}${sysconfdir}/systemd/system -a -e ${IMAGE_ROOTFS}${systemd_unitdir}/system/${SYSTEMD_DEFAULT_TARGET} ]; then
- ln -sf ${systemd_unitdir}/system/${SYSTEMD_DEFAULT_TARGET} ${IMAGE_ROOTFS}${sysconfdir}/systemd/system/default.target
+ if [ -d ${IMAGE_ROOTFS}${sysconfdir}/systemd/system -a -e ${IMAGE_ROOTFS}${systemd_system_unitdir}/${SYSTEMD_DEFAULT_TARGET} ]; then
+ ln -sf ${systemd_system_unitdir}/${SYSTEMD_DEFAULT_TARGET} ${IMAGE_ROOTFS}${sysconfdir}/systemd/system/default.target
fi
}
@@ -373,3 +371,46 @@ rootfs_reproducible () {
fi
fi
}
+
+# Perform a dumb check for unit existence, not its validity
+python overlayfs_qa_check() {
+ from oe.overlayfs import mountUnitName
+
+ overlayMountPoints = d.getVarFlags("OVERLAYFS_MOUNT_POINT") or {}
+ imagepath = d.getVar("IMAGE_ROOTFS")
+ sysconfdir = d.getVar("sysconfdir")
+ searchpaths = [oe.path.join(imagepath, sysconfdir, "systemd", "system"),
+ oe.path.join(imagepath, d.getVar("systemd_system_unitdir"))]
+ fstabpath = oe.path.join(imagepath, sysconfdir, "fstab")
+
+ if not any(os.path.exists(path) for path in [*searchpaths, fstabpath]):
+ return
+
+ fstabDevices = []
+ if os.path.isfile(fstabpath):
+ with open(fstabpath, 'r') as f:
+ for line in f:
+ if line[0] == '#':
+ continue
+ path = line.split(maxsplit=2)
+ if len(path) > 2:
+ fstabDevices.append(path[1])
+
+ allUnitExist = True;
+ for mountPoint in overlayMountPoints:
+ mountPath = d.getVarFlag('OVERLAYFS_MOUNT_POINT', mountPoint)
+ if mountPath in fstabDevices:
+ continue
+
+ mountUnit = mountUnitName(mountPath)
+ if any(os.path.isfile(oe.path.join(dirpath, mountUnit))
+ for dirpath in searchpaths):
+ continue
+
+ bb.warn('Mount path %s not found in fstat and unit %s not found '
+ 'in systemd unit directories' % (mountPath, mountUnit))
+ allUnitExist = False;
+
+ if not allUnitExist:
+ bb.fatal('Not all mount paths and units are installed in the image')
+}
diff --git a/meta/classes/rootfs_rpm.bbclass b/meta/classes/rootfs_rpm.bbclass
index 0af7d65b1a..bec4d63ed6 100644
--- a/meta/classes/rootfs_rpm.bbclass
+++ b/meta/classes/rootfs_rpm.bbclass
@@ -4,12 +4,12 @@
ROOTFS_PKGMANAGE = "rpm dnf"
-# dnf is using our custom distutils, and so will fail without these
+# dnf is using our custom sysconfig module, and so will fail without these
export STAGING_INCDIR
export STAGING_LIBDIR
# Add 100Meg of extra space for dnf
-IMAGE_ROOTFS_EXTRA_SPACE_append = "${@bb.utils.contains("PACKAGE_INSTALL", "dnf", " + 102400", "", d)}"
+IMAGE_ROOTFS_EXTRA_SPACE:append = "${@bb.utils.contains("PACKAGE_INSTALL", "dnf", " + 102400", "", d)}"
# Dnf is python based, so be sure python3-native is available to us.
EXTRANATIVEPATH += "python3-native"
diff --git a/meta/classes/rust-bin.bbclass b/meta/classes/rust-bin.bbclass
new file mode 100644
index 0000000000..c87343b3cf
--- /dev/null
+++ b/meta/classes/rust-bin.bbclass
@@ -0,0 +1,149 @@
+inherit rust
+
+RDEPENDS:${PN}:append:class-target = " ${RUSTLIB_DEP}"
+
+RUSTC_ARCHFLAGS += "-C opt-level=3 -g -L ${STAGING_DIR_HOST}/${rustlibdir} -C linker=${RUST_TARGET_CCLD}"
+EXTRA_OEMAKE += 'RUSTC_ARCHFLAGS="${RUSTC_ARCHFLAGS}"'
+
+# Some libraries alias with the standard library but libstd is configured to
+# make it difficult or imposisble to use its version. Unfortunately libstd
+# must be explicitly overridden using extern.
+OVERLAP_LIBS = "\
+ libc \
+ log \
+ getopts \
+ rand \
+"
+def get_overlap_deps(d):
+ deps = d.getVar("DEPENDS").split()
+ overlap_deps = []
+ for o in d.getVar("OVERLAP_LIBS").split():
+ l = len([o for dep in deps if (o + '-rs' in dep)])
+ if l > 0:
+ overlap_deps.append(o)
+ return " ".join(overlap_deps)
+OVERLAP_DEPS = "${@get_overlap_deps(d)}"
+
+# Prevents multiple static copies of standard library modules
+# See https://github.com/rust-lang/rust/issues/19680
+RUSTC_PREFER_DYNAMIC = "-C prefer-dynamic"
+RUSTC_FLAGS += "${RUSTC_PREFER_DYNAMIC}"
+
+CRATE_NAME ?= "${@d.getVar('BPN').replace('-rs', '').replace('-', '_')}"
+BINNAME ?= "${BPN}"
+LIBNAME ?= "lib${CRATE_NAME}-rs"
+CRATE_TYPE ?= "dylib"
+BIN_SRC ?= "${S}/src/main.rs"
+LIB_SRC ?= "${S}/src/lib.rs"
+
+rustbindest ?= "${bindir}"
+rustlibdest ?= "${rustlibdir}"
+RUST_RPATH_ABS ?= "${rustlibdir}:${rustlib}"
+
+def relative_rpaths(paths, base):
+ relpaths = set()
+ for p in paths.split(':'):
+ if p == base:
+ relpaths.add('$ORIGIN')
+ continue
+ relpaths.add(os.path.join('$ORIGIN', os.path.relpath(p, base)))
+ return '-rpath=' + ':'.join(relpaths) if len(relpaths) else ''
+
+RUST_LIB_RPATH_FLAGS ?= "${@relative_rpaths(d.getVar('RUST_RPATH_ABS', True), d.getVar('rustlibdest', True))}"
+RUST_BIN_RPATH_FLAGS ?= "${@relative_rpaths(d.getVar('RUST_RPATH_ABS', True), d.getVar('rustbindest', True))}"
+
+def libfilename(d):
+ if d.getVar('CRATE_TYPE', True) == 'dylib':
+ return d.getVar('LIBNAME', True) + '.so'
+ else:
+ return d.getVar('LIBNAME', True) + '.rlib'
+
+def link_args(d, bin):
+ linkargs = []
+ if bin:
+ rpaths = d.getVar('RUST_BIN_RPATH_FLAGS', False)
+ else:
+ rpaths = d.getVar('RUST_LIB_RPATH_FLAGS', False)
+ if d.getVar('CRATE_TYPE', True) == 'dylib':
+ linkargs.append('-soname')
+ linkargs.append(libfilename(d))
+ if len(rpaths):
+ linkargs.append(rpaths)
+ if len(linkargs):
+ return ' '.join(['-Wl,' + arg for arg in linkargs])
+ else:
+ return ''
+
+get_overlap_externs () {
+ externs=
+ for dep in ${OVERLAP_DEPS}; do
+ extern=$(ls ${STAGING_DIR_HOST}/${rustlibdir}/lib$dep-rs.{so,rlib} 2>/dev/null \
+ | awk '{print $1}');
+ if [ -n "$extern" ]; then
+ externs="$externs --extern $dep=$extern"
+ else
+ echo "$dep in depends but no such library found in ${rustlibdir}!" >&2
+ exit 1
+ fi
+ done
+ echo "$externs"
+}
+
+do_configure () {
+}
+
+oe_runrustc () {
+ export RUST_TARGET_PATH="${RUST_TARGET_PATH}"
+ bbnote ${RUSTC} ${RUSTC_ARCHFLAGS} ${RUSTC_FLAGS} "$@"
+ "${RUSTC}" ${RUSTC_ARCHFLAGS} ${RUSTC_FLAGS} "$@"
+}
+
+oe_compile_rust_lib () {
+ rm -rf ${LIBNAME}.{rlib,so}
+ local -a link_args
+ if [ -n '${@link_args(d, False)}' ]; then
+ link_args[0]='-C'
+ link_args[1]='link-args=${@link_args(d, False)}'
+ fi
+ oe_runrustc $(get_overlap_externs) \
+ "${link_args[@]}" \
+ ${LIB_SRC} \
+ -o ${@libfilename(d)} \
+ --crate-name=${CRATE_NAME} --crate-type=${CRATE_TYPE} \
+ "$@"
+}
+oe_compile_rust_lib[vardeps] += "get_overlap_externs"
+
+oe_compile_rust_bin () {
+ rm -rf ${BINNAME}
+ local -a link_args
+ if [ -n '${@link_args(d, True)}' ]; then
+ link_args[0]='-C'
+ link_args[1]='link-args=${@link_args(d, True)}'
+ fi
+ oe_runrustc $(get_overlap_externs) \
+ "${link_args[@]}" \
+ ${BIN_SRC} -o ${BINNAME} "$@"
+}
+oe_compile_rust_bin[vardeps] += "get_overlap_externs"
+
+oe_install_rust_lib () {
+ for lib in $(ls ${LIBNAME}.{so,rlib} 2>/dev/null); do
+ echo Installing $lib
+ install -D -m 755 $lib ${D}/${rustlibdest}/$lib
+ done
+}
+
+oe_install_rust_bin () {
+ echo Installing ${BINNAME}
+ install -D -m 755 ${BINNAME} ${D}/${rustbindest}/${BINNAME}
+}
+
+do_rust_bin_fixups() {
+ for f in `find ${PKGD} -name '*.so*'`; do
+ echo "Strip rust note: $f"
+ ${OBJCOPY} -R .note.rustc $f $f
+ done
+}
+PACKAGE_PREPROCESS_FUNCS += "do_rust_bin_fixups"
+
diff --git a/meta/classes/rust-common.bbclass b/meta/classes/rust-common.bbclass
new file mode 100644
index 0000000000..65ad677499
--- /dev/null
+++ b/meta/classes/rust-common.bbclass
@@ -0,0 +1,185 @@
+inherit python3native
+
+# Common variables used by all Rust builds
+export rustlibdir = "${libdir}/rust"
+FILES:${PN} += "${rustlibdir}/*.so"
+FILES:${PN}-dev += "${rustlibdir}/*.rlib ${rustlibdir}/*.rmeta"
+FILES:${PN}-dbg += "${rustlibdir}/.debug"
+
+RUSTLIB = "-L ${STAGING_LIBDIR}/rust"
+RUST_DEBUG_REMAP = "--remap-path-prefix=${WORKDIR}=/usr/src/debug/${PN}/${EXTENDPE}${PV}-${PR}"
+RUSTFLAGS += "${RUSTLIB} ${RUST_DEBUG_REMAP}"
+RUSTLIB_DEP ?= "libstd-rs"
+export RUST_TARGET_PATH = "${STAGING_LIBDIR_NATIVE}/rustlib"
+RUST_PANIC_STRATEGY ?= "unwind"
+
+# Native builds are not effected by TCLIBC. Without this, rust-native
+# thinks it's "target" (i.e. x86_64-linux) is a musl target.
+RUST_LIBC = "${TCLIBC}"
+RUST_LIBC:class-native = "glibc"
+
+def determine_libc(d, thing):
+ '''Determine which libc something should target'''
+
+ # BUILD is never musl, TARGET may be musl or glibc,
+ # HOST could be musl, but only if a compiler is built to be run on
+ # target in which case HOST_SYS != BUILD_SYS.
+ if thing == 'TARGET':
+ libc = d.getVar('RUST_LIBC')
+ elif thing == 'BUILD' and (d.getVar('HOST_SYS') != d.getVar('BUILD_SYS')):
+ libc = d.getVar('RUST_LIBC')
+ else:
+ libc = d.getVar('RUST_LIBC:class-native')
+
+ return libc
+
+def target_is_armv7(d):
+ '''Determine if target is armv7'''
+ # TUNE_FEATURES may include arm* even if the target is not arm
+ # in the case of *-native packages
+ if d.getVar('TARGET_ARCH') != 'arm':
+ return False
+
+ feat = d.getVar('TUNE_FEATURES')
+ feat = frozenset(feat.split())
+ mach_overrides = d.getVar('MACHINEOVERRIDES')
+ mach_overrides = frozenset(mach_overrides.split(':'))
+
+ v7=frozenset(['armv7a', 'armv7r', 'armv7m', 'armv7ve'])
+ if mach_overrides.isdisjoint(v7) and feat.isdisjoint(v7):
+ return False
+ else:
+ return True
+target_is_armv7[vardepvalue] = "${@target_is_armv7(d)}"
+
+# Responsible for taking Yocto triples and converting it to Rust triples
+def rust_base_triple(d, thing):
+ '''
+ Mangle bitbake's *_SYS into something that rust might support (see
+ rust/mk/cfg/* for a list)
+
+ Note that os is assumed to be some linux form
+ '''
+
+ # The llvm-target for armv7 is armv7-unknown-linux-gnueabihf
+ if thing == "TARGET" and target_is_armv7(d):
+ arch = "armv7"
+ else:
+ arch = oe.rust.arch_to_rust_arch(d.getVar('{}_ARCH'.format(thing)))
+
+ # All the Yocto targets are Linux and are 'unknown'
+ vendor = "-unknown"
+ os = d.getVar('{}_OS'.format(thing))
+ libc = determine_libc(d, thing)
+
+ # Prefix with a dash and convert glibc -> gnu
+ if libc == "glibc":
+ libc = "-gnu"
+ elif libc == "musl":
+ libc = "-musl"
+
+ # Don't double up musl (only appears to be the case on aarch64)
+ if os == "linux-musl":
+ if libc != "-musl":
+ bb.fatal("{}_OS was '{}' but TCLIBC was not 'musl'".format(thing, os))
+ os = "linux"
+
+ # This catches ARM targets and appends the necessary hard float bits
+ if os == "linux-gnueabi" or os == "linux-musleabi":
+ libc = bb.utils.contains('TUNE_FEATURES', 'callconvention-hard', 'hf', '', d)
+ return arch + vendor + '-' + os + libc
+
+
+# In some cases uname and the toolchain differ on their idea of the arch name
+RUST_BUILD_ARCH = "${@oe.rust.arch_to_rust_arch(d.getVar('BUILD_ARCH'))}"
+
+# Naming explanation
+# Yocto
+# - BUILD_SYS - Yocto triple of the build environment
+# - HOST_SYS - What we're building for in Yocto
+# - TARGET_SYS - What we're building for in Yocto
+#
+# So when building '-native' packages BUILD_SYS == HOST_SYS == TARGET_SYS
+# When building packages for the image HOST_SYS == TARGET_SYS
+# This is a gross over simplification as there are other modes but
+# currently this is all that's supported.
+#
+# Rust
+# - TARGET - the system where the binary will run
+# - HOST - the system where the binary is being built
+#
+# Rust additionally will use two additional cases:
+# - undecorated (e.g. CC) - equivalent to TARGET
+# - triple suffix (e.g. CC:x86_64_unknown_linux_gnu) - both
+# see: https://github.com/alexcrichton/gcc-rs
+# The way that Rust's internal triples and Yocto triples are mapped together
+# its likely best to not use the triple suffix due to potential confusion.
+
+RUST_BUILD_SYS = "${@rust_base_triple(d, 'BUILD')}"
+RUST_HOST_SYS = "${@rust_base_triple(d, 'HOST')}"
+RUST_TARGET_SYS = "${@rust_base_triple(d, 'TARGET')}"
+
+# wrappers to get around the fact that Rust needs a single
+# binary but Yocto's compiler and linker commands have
+# arguments. Technically the archiver is always one command but
+# this is necessary for builds that determine the prefix and then
+# use those commands based on the prefix.
+WRAPPER_DIR = "${WORKDIR}/wrapper"
+RUST_BUILD_CC = "${WRAPPER_DIR}/build-rust-cc"
+RUST_BUILD_CXX = "${WRAPPER_DIR}/build-rust-cxx"
+RUST_BUILD_CCLD = "${WRAPPER_DIR}/build-rust-ccld"
+RUST_BUILD_AR = "${WRAPPER_DIR}/build-rust-ar"
+RUST_TARGET_CC = "${WRAPPER_DIR}/target-rust-cc"
+RUST_TARGET_CXX = "${WRAPPER_DIR}/target-rust-cxx"
+RUST_TARGET_CCLD = "${WRAPPER_DIR}/target-rust-ccld"
+RUST_TARGET_AR = "${WRAPPER_DIR}/target-rust-ar"
+
+create_wrapper () {
+ file="$1"
+ shift
+
+ cat <<- EOF > "${file}"
+ #!/usr/bin/env python3
+ import os, sys
+ orig_binary = "$@"
+ binary = orig_binary.split()[0]
+ args = orig_binary.split() + sys.argv[1:]
+ os.execvp(binary, args)
+ EOF
+ chmod +x "${file}"
+}
+
+export WRAPPER_TARGET_CC = "${CC}"
+export WRAPPER_TARGET_CXX = "${CXX}"
+export WRAPPER_TARGET_CCLD = "${CCLD}"
+export WRAPPER_TARGET_LDFLAGS = "${LDFLAGS}"
+export WRAPPER_TARGET_AR = "${AR}"
+
+# compiler is used by gcc-rs
+# linker is used by rustc/cargo
+# archiver is used by the build of libstd-rs
+do_rust_create_wrappers () {
+ mkdir -p "${WRAPPER_DIR}"
+
+ # Yocto Build / Rust Host C compiler
+ create_wrapper "${RUST_BUILD_CC}" "${BUILD_CC}"
+ # Yocto Build / Rust Host C++ compiler
+ create_wrapper "${RUST_BUILD_CXX}" "${BUILD_CXX}"
+ # Yocto Build / Rust Host linker
+ create_wrapper "${RUST_BUILD_CCLD}" "${BUILD_CCLD}" "${BUILD_LDFLAGS}"
+ # Yocto Build / Rust Host archiver
+ create_wrapper "${RUST_BUILD_AR}" "${BUILD_AR}"
+
+ # Yocto Target / Rust Target C compiler
+ create_wrapper "${RUST_TARGET_CC}" "${WRAPPER_TARGET_CC}" "${WRAPPER_TARGET_LDFLAGS}"
+ # Yocto Target / Rust Target C++ compiler
+ create_wrapper "${RUST_TARGET_CXX}" "${WRAPPER_TARGET_CXX}"
+ # Yocto Target / Rust Target linker
+ create_wrapper "${RUST_TARGET_CCLD}" "${WRAPPER_TARGET_CCLD}" "${WRAPPER_TARGET_LDFLAGS}"
+ # Yocto Target / Rust Target archiver
+ create_wrapper "${RUST_TARGET_AR}" "${WRAPPER_TARGET_AR}"
+
+}
+
+addtask rust_create_wrappers before do_configure after do_patch do_prepare_recipe_sysroot
+do_rust_create_wrappers[dirs] += "${WRAPPER_DIR}"
diff --git a/meta/classes/rust.bbclass b/meta/classes/rust.bbclass
new file mode 100644
index 0000000000..5c8938d09f
--- /dev/null
+++ b/meta/classes/rust.bbclass
@@ -0,0 +1,45 @@
+inherit rust-common
+
+RUSTC = "rustc"
+
+RUSTC_ARCHFLAGS += "--target=${HOST_SYS} ${RUSTFLAGS}"
+
+def rust_base_dep(d):
+ # Taken from meta/classes/base.bbclass `base_dep_prepend` and modified to
+ # use rust instead of gcc
+ deps = ""
+ if not d.getVar('INHIBIT_DEFAULT_RUST_DEPS'):
+ if (d.getVar('HOST_SYS') != d.getVar('BUILD_SYS')):
+ deps += " virtual/${TARGET_PREFIX}rust ${RUSTLIB_DEP}"
+ else:
+ deps += " rust-native"
+ return deps
+
+DEPENDS:append = " ${@rust_base_dep(d)}"
+
+# BUILD_LDFLAGS
+# ${STAGING_LIBDIR_NATIVE}
+# ${STAGING_BASE_LIBDIR_NATIVE}
+# BUILDSDK_LDFLAGS
+# ${STAGING_LIBDIR}
+# #{STAGING_DIR_HOST}
+# TARGET_LDFLAGS ?????
+#RUSTC_BUILD_LDFLAGS = "\
+# --sysroot ${STAGING_DIR_NATIVE} \
+# -L${STAGING_LIBDIR_NATIVE} \
+# -L${STAGING_BASE_LIBDIR_NATIVE} \
+#"
+
+# XXX: for some reason bitbake sets BUILD_* & TARGET_* but uses the bare
+# variables for HOST. Alias things to make it easier for us.
+HOST_LDFLAGS ?= "${LDFLAGS}"
+HOST_CFLAGS ?= "${CFLAGS}"
+HOST_CXXFLAGS ?= "${CXXFLAGS}"
+HOST_CPPFLAGS ?= "${CPPFLAGS}"
+
+rustlib_suffix="${TUNE_ARCH}${TARGET_VENDOR}-${TARGET_OS}/rustlib/${HOST_SYS}/lib"
+# Native sysroot standard library path
+rustlib_src="${prefix}/lib/${rustlib_suffix}"
+# Host sysroot standard library path
+rustlib="${libdir}/${rustlib_suffix}"
+rustlib:class-native="${libdir}/rustlib/${BUILD_SYS}/lib"
diff --git a/meta/classes/sanity.bbclass b/meta/classes/sanity.bbclass
index e907a3566f..92807dc88e 100644
--- a/meta/classes/sanity.bbclass
+++ b/meta/classes/sanity.bbclass
@@ -185,37 +185,6 @@ def raise_sanity_error(msg, d, network_error=False):
%s""" % msg)
-# Check flags associated with a tuning.
-def check_toolchain_tune_args(data, tune, multilib, errs):
- found_errors = False
- if check_toolchain_args_present(data, tune, multilib, errs, 'CCARGS'):
- found_errors = True
- if check_toolchain_args_present(data, tune, multilib, errs, 'ASARGS'):
- found_errors = True
- if check_toolchain_args_present(data, tune, multilib, errs, 'LDARGS'):
- found_errors = True
-
- return found_errors
-
-def check_toolchain_args_present(data, tune, multilib, tune_errors, which):
- args_set = (data.getVar("TUNE_%s" % which) or "").split()
- args_wanted = (data.getVar("TUNEABI_REQUIRED_%s_tune-%s" % (which, tune)) or "").split()
- args_missing = []
-
- # If no args are listed/required, we are done.
- if not args_wanted:
- return
- for arg in args_wanted:
- if arg not in args_set:
- args_missing.append(arg)
-
- found_errors = False
- if args_missing:
- found_errors = True
- tune_errors.append("TUNEABI for %s requires '%s' in TUNE_%s (%s)." %
- (tune, ' '.join(args_missing), which, ' '.join(args_set)))
- return found_errors
-
# Check a single tune for validity.
def check_toolchain_tune(data, tune, multilib):
tune_errors = []
@@ -227,7 +196,7 @@ def check_toolchain_tune(data, tune, multilib):
overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + multilib
localdata.setVar("OVERRIDES", overrides)
bb.debug(2, "Sanity-checking tuning '%s' (%s) features:" % (tune, multilib))
- features = (localdata.getVar("TUNE_FEATURES_tune-%s" % tune) or "").split()
+ features = (localdata.getVar("TUNE_FEATURES:tune-%s" % tune) or "").split()
if not features:
return "Tuning '%s' has no defined features, and cannot be used." % tune
valid_tunes = localdata.getVarFlags('TUNEVALID') or {}
@@ -247,17 +216,6 @@ def check_toolchain_tune(data, tune, multilib):
bb.debug(2, " %s: %s" % (feature, valid_tunes[feature]))
else:
tune_errors.append("Feature '%s' is not defined." % feature)
- whitelist = localdata.getVar("TUNEABI_WHITELIST")
- if whitelist:
- tuneabi = localdata.getVar("TUNEABI_tune-%s" % tune)
- if not tuneabi:
- tuneabi = tune
- if True not in [x in whitelist.split() for x in tuneabi.split()]:
- tune_errors.append("Tuning '%s' (%s) cannot be used with any supported tuning/ABI." %
- (tune, tuneabi))
- else:
- if not check_toolchain_tune_args(localdata, tuneabi, multilib, tune_errors):
- bb.debug(2, "Sanity check: Compiler args OK for %s." % tune)
if tune_errors:
return "Tuning '%s' has the following errors:\n" % tune + '\n'.join(tune_errors)
@@ -281,7 +239,7 @@ def check_toolchain(data):
seen_libs.append(lib)
if not lib in global_multilibs:
tune_error_set.append("Multilib %s is not present in MULTILIB_GLOBAL_VARIANTS" % lib)
- tune = data.getVar("DEFAULTTUNE_virtclass-multilib-%s" % lib)
+ tune = data.getVar("DEFAULTTUNE:virtclass-multilib-%s" % lib)
if tune in seen_tunes:
tune_error_set.append("The tuning '%s' appears in more than one multilib." % tune)
else:
@@ -395,7 +353,7 @@ def check_connectivity(d):
msg += " Please ensure your host's network is configured correctly.\n"
msg += " If your ISP or network is blocking the above URL,\n"
msg += " try with another domain name, for example by setting:\n"
- msg += " CONNECTIVITY_CHECK_URIS = \"https://www.yoctoproject.org/\""
+ msg += " CONNECTIVITY_CHECK_URIS = \"https://www.example.com/\""
msg += " You could also set BB_NO_NETWORK = \"1\" to disable network\n"
msg += " access if all required sources are on local disk.\n"
retval = msg
@@ -462,13 +420,12 @@ def check_sanity_validmachine(sanity_data):
# Patch before 2.7 can't handle all the features in git-style diffs. Some
# patches may incorrectly apply, and others won't apply at all.
def check_patch_version(sanity_data):
- from distutils.version import LooseVersion
import re, subprocess
try:
result = subprocess.check_output(["patch", "--version"], stderr=subprocess.STDOUT).decode('utf-8')
version = re.search(r"[0-9.]+", result.splitlines()[0]).group()
- if LooseVersion(version) < LooseVersion("2.7"):
+ if bb.utils.vercmp_string_op(version, "2.7", "<"):
return "Your version of patch is older than 2.7 and has bugs which will break builds. Please install a newer version of patch.\n"
else:
return None
@@ -478,7 +435,6 @@ def check_patch_version(sanity_data):
# Unpatched versions of make 3.82 are known to be broken. See GNU Savannah Bug 30612.
# Use a modified reproducer from http://savannah.gnu.org/bugs/?30612 to validate.
def check_make_version(sanity_data):
- from distutils.version import LooseVersion
import subprocess
try:
@@ -486,7 +442,7 @@ def check_make_version(sanity_data):
except subprocess.CalledProcessError as e:
return "Unable to execute make --version, exit code %d\n%s\n" % (e.returncode, e.output)
version = result.split()[2]
- if LooseVersion(version) == LooseVersion("3.82"):
+ if bb.utils.vercmp_string_op(version, "3.82", "=="):
# Construct a test file
f = open("makefile_test", "w")
f.write("makefile_test.a: makefile_test_a.c makefile_test_b.c makefile_test.a( makefile_test_a.c makefile_test_b.c)\n")
@@ -530,7 +486,7 @@ def check_wsl(d):
bb.warn("You are running bitbake under WSLv2, this works properly but you should optimize your VHDX file eventually to avoid running out of storage space")
return None
-# Require at least gcc version 6.0.
+# Require at least gcc version 7.5.
#
# This can be fixed on CentOS-7 with devtoolset-6+
# https://www.softwarecollections.org/en/scls/rhscl/devtoolset-6/
@@ -539,27 +495,25 @@ def check_wsl(d):
# built buildtools-extended-tarball)
#
def check_gcc_version(sanity_data):
- from distutils.version import LooseVersion
import subprocess
build_cc, version = oe.utils.get_host_compiler_version(sanity_data)
if build_cc.strip() == "gcc":
- if LooseVersion(version) < LooseVersion("6.0"):
- return "Your version of gcc is older than 6.0 and will break builds. Please install a newer version of gcc (you could use the project's buildtools-extended-tarball or use scripts/install-buildtools).\n"
+ if bb.utils.vercmp_string_op(version, "7.5", "<"):
+ return "Your version of gcc is older than 7.5 and will break builds. Please install a newer version of gcc (you could use the project's buildtools-extended-tarball or use scripts/install-buildtools).\n"
return None
# Tar version 1.24 and onwards handle overwriting symlinks correctly
# but earlier versions do not; this needs to work properly for sstate
# Version 1.28 is needed so opkg-build works correctly when reproducibile builds are enabled
def check_tar_version(sanity_data):
- from distutils.version import LooseVersion
import subprocess
try:
result = subprocess.check_output(["tar", "--version"], stderr=subprocess.STDOUT).decode('utf-8')
except subprocess.CalledProcessError as e:
return "Unable to execute tar --version, exit code %d\n%s\n" % (e.returncode, e.output)
version = result.split()[3]
- if LooseVersion(version) < LooseVersion("1.28"):
+ if bb.utils.vercmp_string_op(version, "1.28", "<"):
return "Your version of tar is older than 1.28 and does not have the support needed to enable reproducible builds. Please install a newer version of tar (you could use the project's buildtools-tarball from our last release or use scripts/install-buildtools).\n"
return None
@@ -567,14 +521,13 @@ def check_tar_version(sanity_data):
# The kernel tools assume git >= 1.8.3.1 (verified needed > 1.7.9.5) see #6162
# The git fetcher also had workarounds for git < 1.7.9.2 which we've dropped
def check_git_version(sanity_data):
- from distutils.version import LooseVersion
import subprocess
try:
result = subprocess.check_output(["git", "--version"], stderr=subprocess.DEVNULL).decode('utf-8')
except subprocess.CalledProcessError as e:
return "Unable to execute git --version, exit code %d\n%s\n" % (e.returncode, e.output)
version = result.split()[2]
- if LooseVersion(version) < LooseVersion("1.8.3.1"):
+ if bb.utils.vercmp_string_op(version, "1.8.3.1", "<"):
return "Your version of git is older than 1.8.3.1 and has bugs which will break builds. Please install a newer version of git.\n"
return None
@@ -606,6 +559,24 @@ def sanity_check_conffiles(d):
bb.fatal(str(e))
d.setVar("BB_INVALIDCONF", True)
+def drop_v14_cross_builds(d):
+ import glob
+ indexes = glob.glob(d.expand("${SSTATE_MANIFESTS}/index-${BUILD_ARCH}_*"))
+ for i in indexes:
+ with open(i, "r") as f:
+ lines = f.readlines()
+ for l in reversed(lines):
+ try:
+ (stamp, manifest, workdir) = l.split()
+ except ValueError:
+ bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
+ for m in glob.glob(manifest + ".*"):
+ if m.endswith(".postrm"):
+ continue
+ sstate_clean_manifest(m, d)
+ bb.utils.remove(stamp + "*")
+ bb.utils.remove(workdir, recurse = True)
+
def sanity_handle_abichanges(status, d):
#
# Check the 'ABI' of TMPDIR
@@ -624,7 +595,10 @@ def sanity_handle_abichanges(status, d):
status.addresult("The layout of TMPDIR changed for Recipe Specific Sysroots.\nConversion doesn't make sense and this change will rebuild everything so please delete TMPDIR (%s).\n" % d.getVar("TMPDIR"))
elif int(abi) <= 13 and current_abi == "14":
status.addresult("TMPDIR changed to include path filtering from the pseudo database.\nIt is recommended to use a clean TMPDIR with the new pseudo path filtering so TMPDIR (%s) would need to be removed to continue.\n" % d.getVar("TMPDIR"))
-
+ elif int(abi) == 14 and current_abi == "15":
+ drop_v14_cross_builds(d)
+ with open(abifile, "w") as f:
+ f.write(current_abi)
elif (abi != current_abi):
# Code to convert from one ABI to another could go here if possible.
status.addresult("Error, TMPDIR has changed its layout version number (%s to %s) and you need to either rebuild, revert or adjust it at your own risk.\n" % (abi, current_abi))
@@ -796,9 +770,8 @@ def check_sanity_everybuild(status, d):
status.addresult('The system requires at least Python 3.6 to run. Please update your Python interpreter.\n')
# Check the bitbake version meets minimum requirements
- from distutils.version import LooseVersion
minversion = d.getVar('BB_MIN_VERSION')
- if (LooseVersion(bb.__version__) < LooseVersion(minversion)):
+ if bb.utils.vercmp_string_op(bb.__version__, minversion, "<"):
status.addresult('Bitbake version %s is required and version %s was found\n' % (minversion, bb.__version__))
sanity_check_locale(d)
diff --git a/meta/classes/scons.bbclass b/meta/classes/scons.bbclass
index 4f3ae502ef..80f8382107 100644
--- a/meta/classes/scons.bbclass
+++ b/meta/classes/scons.bbclass
@@ -5,9 +5,9 @@ DEPENDS += "python3-scons-native"
EXTRA_OESCONS ?= ""
do_configure() {
- if [ -n "${CONFIGURESTAMPFILE}" ]; then
+ if [ -n "${CONFIGURESTAMPFILE}" -a "${S}" = "${B}" ]; then
if [ -e "${CONFIGURESTAMPFILE}" -a "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" -a "${CLEANBROKEN}" != "1" ]; then
- ${STAGING_BINDIR_NATIVE}/scons --clean PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS}
+ ${STAGING_BINDIR_NATIVE}/scons --directory=${S} --clean PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS}
fi
mkdir -p `dirname ${CONFIGURESTAMPFILE}`
@@ -16,12 +16,12 @@ do_configure() {
}
scons_do_compile() {
- ${STAGING_BINDIR_NATIVE}/scons ${PARALLEL_MAKE} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} || \
+ ${STAGING_BINDIR_NATIVE}/scons --directory=${S} ${PARALLEL_MAKE} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} || \
die "scons build execution failed."
}
scons_do_install() {
- ${STAGING_BINDIR_NATIVE}/scons install_root=${D}${prefix} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} install || \
+ ${STAGING_BINDIR_NATIVE}/scons --directory=${S} install_root=${D}${prefix} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} install || \
die "scons install execution failed."
}
diff --git a/meta/classes/distutils-common-base.bbclass b/meta/classes/setuptools3-base.bbclass
index 43a38e5a3a..15abe1dd63 100644
--- a/meta/classes/distutils-common-base.bbclass
+++ b/meta/classes/setuptools3-base.bbclass
@@ -1,3 +1,7 @@
+DEPENDS:append:class-target = " ${PYTHON_PN}-native ${PYTHON_PN}"
+DEPENDS:append:class-nativesdk = " ${PYTHON_PN}-native ${PYTHON_PN}"
+RDEPENDS:${PN}:append:class-target = " ${PYTHON_PN}-core"
+
export STAGING_INCDIR
export STAGING_LIBDIR
@@ -13,13 +17,15 @@ export CCSHARED = "-fPIC -DPIC"
# the python executable
export LINKFORSHARED = "${SECURITY_CFLAGS} -Xlinker -export-dynamic"
-FILES_${PN} += "${libdir}/* ${libdir}/${PYTHON_DIR}/*"
+FILES:${PN} += "${libdir}/* ${libdir}/${PYTHON_DIR}/*"
-FILES_${PN}-staticdev += "\
+FILES:${PN}-staticdev += "\
${PYTHON_SITEPACKAGES_DIR}/*.a \
"
-FILES_${PN}-dev += "\
+FILES:${PN}-dev += "\
${datadir}/pkgconfig \
${libdir}/pkgconfig \
${PYTHON_SITEPACKAGES_DIR}/*.la \
"
+inherit python3native python3targetconfig
+
diff --git a/meta/classes/setuptools3.bbclass b/meta/classes/setuptools3.bbclass
index 8ca66ee708..556bc801af 100644
--- a/meta/classes/setuptools3.bbclass
+++ b/meta/classes/setuptools3.bbclass
@@ -1,4 +1,33 @@
-inherit distutils3
+inherit setuptools3-base python_pep517
-DEPENDS += "python3-setuptools-native"
+# bdist_wheel builds in ./dist
+#B = "${WORKDIR}/build"
+SETUPTOOLS_BUILD_ARGS ?= ""
+
+SETUPTOOLS_SETUP_PATH ?= "${S}"
+
+setuptools3_do_configure() {
+ :
+}
+
+setuptools3_do_compile() {
+ cd ${SETUPTOOLS_SETUP_PATH}
+ NO_FETCH_BUILD=1 \
+ STAGING_INCDIR=${STAGING_INCDIR} \
+ STAGING_LIBDIR=${STAGING_LIBDIR} \
+ ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py \
+ bdist_wheel --verbose --dist-dir ${PEP517_WHEEL_PATH} ${SETUPTOOLS_BUILD_ARGS} || \
+ bbfatal_log "'${PYTHON_PN} setup.py bdist_wheel ${SETUPTOOLS_BUILD_ARGS}' execution failed."
+}
+setuptools3_do_compile[vardepsexclude] = "MACHINE"
+do_compile[cleandirs] += "${PEP517_WHEEL_PATH}"
+
+setuptools3_do_install() {
+ python_pep517_do_install
+}
+
+EXPORT_FUNCTIONS do_configure do_compile do_install
+
+export LDSHARED="${CCLD} -shared"
+DEPENDS += "python3-setuptools-native python3-wheel-native"
diff --git a/meta/classes/setuptools3_legacy.bbclass b/meta/classes/setuptools3_legacy.bbclass
new file mode 100644
index 0000000000..5a99daadb5
--- /dev/null
+++ b/meta/classes/setuptools3_legacy.bbclass
@@ -0,0 +1,78 @@
+# This class is for packages which use the deprecated setuptools behaviour,
+# specifically custom install tasks which don't work correctly with bdist_wheel.
+# This behaviour is deprecated in setuptools[1] and won't work in the future, so
+# all users of this should consider their options: pure Python modules can use a
+# modern Python tool such as build[2], or packages which are doing more (such as
+# installing init scripts) should use a fully-featured build system such as Meson.
+#
+# [1] https://setuptools.pypa.io/en/latest/history.html#id142
+# [2] https://pypi.org/project/build/
+
+inherit setuptools3-base
+
+B = "${WORKDIR}/build"
+
+SETUPTOOLS_BUILD_ARGS ?= ""
+SETUPTOOLS_INSTALL_ARGS ?= "--root=${D} \
+ --prefix=${prefix} \
+ --install-lib=${PYTHON_SITEPACKAGES_DIR} \
+ --install-data=${datadir}"
+
+SETUPTOOLS_PYTHON = "python3"
+SETUPTOOLS_PYTHON:class-native = "nativepython3"
+
+SETUPTOOLS_SETUP_PATH ?= "${S}"
+
+setuptools3_legacy_do_configure() {
+ :
+}
+
+setuptools3_legacy_do_compile() {
+ cd ${SETUPTOOLS_SETUP_PATH}
+ NO_FETCH_BUILD=1 \
+ STAGING_INCDIR=${STAGING_INCDIR} \
+ STAGING_LIBDIR=${STAGING_LIBDIR} \
+ ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py \
+ build --build-base=${B} ${SETUPTOOLS_BUILD_ARGS} || \
+ bbfatal_log "'${PYTHON_PN} setup.py build ${SETUPTOOLS_BUILD_ARGS}' execution failed."
+}
+setuptools3_legacy_do_compile[vardepsexclude] = "MACHINE"
+
+setuptools3_legacy_do_install() {
+ cd ${SETUPTOOLS_SETUP_PATH}
+ install -d ${D}${PYTHON_SITEPACKAGES_DIR}
+ STAGING_INCDIR=${STAGING_INCDIR} \
+ STAGING_LIBDIR=${STAGING_LIBDIR} \
+ PYTHONPATH=${D}${PYTHON_SITEPACKAGES_DIR} \
+ ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py \
+ build --build-base=${B} install --skip-build ${SETUPTOOLS_INSTALL_ARGS} || \
+ bbfatal_log "'${PYTHON_PN} setup.py install ${SETUPTOOLS_INSTALL_ARGS}' execution failed."
+
+ # support filenames with *spaces*
+ find ${D} -name "*.py" -exec grep -q ${D} {} \; \
+ -exec sed -i -e s:${D}::g {} \;
+
+ for i in ${D}${bindir}/* ${D}${sbindir}/*; do
+ if [ -f "$i" ]; then
+ sed -i -e s:${PYTHON}:${USRBINPATH}/env\ ${SETUPTOOLS_PYTHON}:g $i
+ sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
+ fi
+ done
+
+ rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/easy-install.pth
+
+ #
+ # FIXME: Bandaid against wrong datadir computation
+ #
+ if [ -e ${D}${datadir}/share ]; then
+ mv -f ${D}${datadir}/share/* ${D}${datadir}/
+ rmdir ${D}${datadir}/share
+ fi
+}
+setuptools3_legacy_do_install[vardepsexclude] = "MACHINE"
+
+EXPORT_FUNCTIONS do_configure do_compile do_install
+
+export LDSHARED="${CCLD} -shared"
+DEPENDS += "python3-setuptools-native"
+
diff --git a/meta/classes/setuptools_build_meta.bbclass b/meta/classes/setuptools_build_meta.bbclass
new file mode 100644
index 0000000000..b2bba35a0b
--- /dev/null
+++ b/meta/classes/setuptools_build_meta.bbclass
@@ -0,0 +1,5 @@
+inherit setuptools3-base python_pep517
+
+DEPENDS += "python3-setuptools-native python3-wheel-native"
+
+PEP517_BUILD_API = "setuptools.build_meta"
diff --git a/meta/classes/sign_package_feed.bbclass b/meta/classes/sign_package_feed.bbclass
index 7ff3a35a2f..16bcd147aa 100644
--- a/meta/classes/sign_package_feed.bbclass
+++ b/meta/classes/sign_package_feed.bbclass
@@ -29,7 +29,7 @@ PACKAGE_FEED_GPG_BACKEND ?= 'local'
PACKAGE_FEED_GPG_SIGNATURE_TYPE ?= 'ASC'
# Make feed signing key to be present in rootfs
-FEATURE_PACKAGES_package-management_append = " signing-keys-packagefeed"
+FEATURE_PACKAGES_package-management:append = " signing-keys-packagefeed"
python () {
# Check sanity of configuration
diff --git a/meta/classes/siteinfo.bbclass b/meta/classes/siteinfo.bbclass
index 0bd1f36805..3555d5a663 100644
--- a/meta/classes/siteinfo.bbclass
+++ b/meta/classes/siteinfo.bbclass
@@ -101,18 +101,18 @@ def siteinfo_data_for_machine(arch, os, d):
"mips64el-linux-gnun32": "mipsel-linux bit-32",
"mipsisa64r6-linux-gnun32": "mipsisa32r6-linux bit-32",
"mipsisa64r6el-linux-gnun32": "mipsisa32r6el-linux bit-32",
- "powerpc-linux": "powerpc32-linux",
- "powerpc-linux-musl": "powerpc-linux powerpc32-linux",
- "powerpcle-linux": "powerpc32-linux",
- "powerpcle-linux-musl": "powerpc-linux powerpc32-linux",
- "powerpc-linux-gnuspe": "powerpc-linux powerpc32-linux",
- "powerpc-linux-muslspe": "powerpc-linux powerpc32-linux",
- "powerpc64-linux-gnuspe": "powerpc-linux powerpc64-linux",
- "powerpc64-linux-muslspe": "powerpc-linux powerpc64-linux",
- "powerpc64-linux": "powerpc-linux powerpc64-linux",
- "powerpc64-linux-musl": "powerpc-linux powerpc64-linux",
- "powerpc64le-linux": "powerpc-linux powerpc64-linux",
- "powerpc64le-linux-musl": "powerpc-linux powerpc64-linux",
+ "powerpc-linux": "powerpc32-linux powerpc32-linux-glibc",
+ "powerpc-linux-musl": "powerpc-linux powerpc32-linux powerpc32-linux-musl",
+ "powerpcle-linux": "powerpc32-linux powerpc32-linux-glibc",
+ "powerpcle-linux-musl": "powerpc-linux powerpc32-linux powerpc32-linux-musl",
+ "powerpc-linux-gnuspe": "powerpc-linux powerpc32-linux powerpc32-linux-glibc",
+ "powerpc-linux-muslspe": "powerpc-linux powerpc32-linux powerpc32-linux-musl",
+ "powerpc64-linux-gnuspe": "powerpc-linux powerpc64-linux powerpc64-linux-glibc",
+ "powerpc64-linux-muslspe": "powerpc-linux powerpc64-linux powerpc64-linux-musl",
+ "powerpc64-linux": "powerpc-linux powerpc64-linux powerpc64-linux-glibc",
+ "powerpc64-linux-musl": "powerpc-linux powerpc64-linux powerpc64-linux-musl",
+ "powerpc64le-linux": "powerpc-linux powerpc64-linux powerpc64-linux-glibc",
+ "powerpc64le-linux-musl": "powerpc-linux powerpc64-linux powerpc64-linux-musl",
"riscv32-linux": "riscv32-linux",
"riscv32-linux-musl": "riscv32-linux",
"riscv64-linux": "riscv64-linux",
@@ -176,17 +176,39 @@ python () {
bb.fatal("Please add your architecture to siteinfo.bbclass")
}
-def siteinfo_get_files(d, sysrootcache = False):
+# Layers with siteconfig need to add a replacement path to this variable so the
+# sstate isn't path specific
+SITEINFO_PATHVARS = "COREBASE"
+
+def siteinfo_get_files(d, sysrootcache=False):
sitedata = siteinfo_data(d)
- sitefiles = ""
+ sitefiles = []
+ searched = []
for path in d.getVar("BBPATH").split(":"):
for element in sitedata:
filename = os.path.join(path, "site", element)
if os.path.exists(filename):
- sitefiles += filename + " "
+ searched.append(filename + ":True")
+ sitefiles.append(filename)
+ else:
+ searched.append(filename + ":False")
+
+ # Have to parameterise out hardcoded paths such as COREBASE for the main site files
+ for var in d.getVar("SITEINFO_PATHVARS").split():
+ searched2 = []
+ replace = os.path.normpath(d.getVar(var))
+ for s in searched:
+ searched2.append(s.replace(replace, "${" + var + "}"))
+ searched = searched2
+
+ if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
+ # We need sstate sigs for native/cross not to vary upon arch so we can't depend on the site files.
+ # In future we may want to depend upon all site files?
+ # This would show up as breaking sstatetests.SStateTests.test_sstate_32_64_same_hash for example
+ searched = []
if not sysrootcache:
- return sitefiles
+ return sitefiles, searched
# Now check for siteconfig cache files in sysroots
path_siteconfig = d.getVar('SITECONFIG_SYSROOTCACHE')
@@ -195,8 +217,8 @@ def siteinfo_get_files(d, sysrootcache = False):
if not i.endswith("_config"):
continue
filename = os.path.join(path_siteconfig, i)
- sitefiles += filename + " "
- return sitefiles
+ sitefiles.append(filename)
+ return sitefiles, searched
#
# Make some information available via variables
diff --git a/meta/classes/sstate.bbclass b/meta/classes/sstate.bbclass
index f6710fc283..1c0cae4893 100644
--- a/meta/classes/sstate.bbclass
+++ b/meta/classes/sstate.bbclass
@@ -1,4 +1,6 @@
-SSTATE_VERSION = "3"
+SSTATE_VERSION = "8"
+
+SSTATE_ZSTD_CLEVEL ??= "8"
SSTATE_MANIFESTS ?= "${TMPDIR}/sstate-control"
SSTATE_MANFILEPREFIX = "${SSTATE_MANIFESTS}/manifest-${SSTATE_MANMACH}-${PN}"
@@ -6,12 +8,12 @@ SSTATE_MANFILEPREFIX = "${SSTATE_MANIFESTS}/manifest-${SSTATE_MANMACH}-${PN}"
def generate_sstatefn(spec, hash, taskname, siginfo, d):
if taskname is None:
return ""
- extension = ".tgz"
+ extension = ".tar.zst"
# 8 chars reserved for siginfo
limit = 254 - 8
if siginfo:
limit = 254
- extension = ".tgz.siginfo"
+ extension = ".tar.zst.siginfo"
if not hash:
hash = "INVALID"
fn = spec + hash + "_" + taskname + extension
@@ -20,7 +22,7 @@ def generate_sstatefn(spec, hash, taskname, siginfo, d):
components = spec.split(":")
# Fields 0,5,6 are mandatory, 1 is most useful, 2,3,4 are just for information
# 7 is for the separators
- avail = (254 - len(hash + "_" + taskname + extension) - len(components[0]) - len(components[1]) - len(components[5]) - len(components[6]) - 7) // 3
+ avail = (limit - len(hash + "_" + taskname + extension) - len(components[0]) - len(components[1]) - len(components[5]) - len(components[6]) - 7) // 3
components[2] = components[2][:avail]
components[3] = components[3][:avail]
components[4] = components[4][:avail]
@@ -37,7 +39,7 @@ SSTATE_PKGNAME = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PK
SSTATE_PKG = "${SSTATE_DIR}/${SSTATE_PKGNAME}"
SSTATE_EXTRAPATH = ""
SSTATE_EXTRAPATHWILDCARD = ""
-SSTATE_PATHSPEC = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/*/${SSTATE_PKGSPEC}*_${SSTATE_PATH_CURRTASK}.tgz*"
+SSTATE_PATHSPEC = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/*/${SSTATE_PKGSPEC}*_${SSTATE_PATH_CURRTASK}.tar.zst*"
# explicitly make PV to depend on evaluated value of PV variable
PV[vardepvalue] = "${PV}"
@@ -48,25 +50,32 @@ SSTATE_EXTRAPATH[vardepvalue] = ""
SSTATE_EXTRAPATHWILDCARD[vardepvalue] = ""
# For multilib rpm the allarch packagegroup files can overwrite (in theory they're identical)
-SSTATE_DUPWHITELIST = "${DEPLOY_DIR}/licenses/"
+SSTATE_ALLOW_OVERLAP_FILES = "${DEPLOY_DIR}/licenses/"
# Avoid docbook/sgml catalog warnings for now
-SSTATE_DUPWHITELIST += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml"
+SSTATE_ALLOW_OVERLAP_FILES += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml"
# sdk-provides-dummy-nativesdk and nativesdk-buildtools-perl-dummy overlap for different SDKMACHINE
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_RPM}/sdk_provides_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-nativesdk/"
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_RPM}/buildtools_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/buildtools-dummy-nativesdk/"
+SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_RPM}/sdk_provides_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-nativesdk/"
+SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_RPM}/buildtools_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/buildtools-dummy-nativesdk/"
# target-sdk-provides-dummy overlaps that allarch is disabled when multilib is used
-SSTATE_DUPWHITELIST += "${COMPONENTS_DIR}/sdk-provides-dummy-target/ ${DEPLOY_DIR_RPM}/sdk_provides_dummy_target/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-target/"
+SSTATE_ALLOW_OVERLAP_FILES += "${COMPONENTS_DIR}/sdk-provides-dummy-target/ ${DEPLOY_DIR_RPM}/sdk_provides_dummy_target/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-target/"
# Archive the sources for many architectures in one deploy folder
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_SRC}"
+SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_SRC}"
# ovmf/grub-efi/systemd-boot/intel-microcode multilib recipes can generate identical overlapping files
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_IMAGE}/ovmf"
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_IMAGE}/grub-efi"
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_IMAGE}/systemd-boot"
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_IMAGE}/microcode"
+SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/ovmf"
+SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/grub-efi"
+SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/systemd-boot"
+SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/microcode"
SSTATE_SCAN_FILES ?= "*.la *-config *_config postinst-*"
SSTATE_SCAN_CMD ??= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES").split())}" \) -type f'
SSTATE_SCAN_CMD_NATIVE ??= 'grep -Irl -e ${RECIPE_SYSROOT} -e ${RECIPE_SYSROOT_NATIVE} -e ${HOSTTOOLS_DIR} ${SSTATE_BUILDDIR}'
+SSTATE_HASHEQUIV_FILEMAP ?= " \
+ populate_sysroot:*/postinst-useradd-*:${TMPDIR} \
+ populate_sysroot:*/postinst-useradd-*:${COREBASE} \
+ populate_sysroot:*/postinst-useradd-*:regex-\s(PATH|PSEUDO_IGNORE_PATHS|HOME|LOGNAME|OMP_NUM_THREADS|USER)=.*\s \
+ populate_sysroot:*/crossscripts/*:${TMPDIR} \
+ populate_sysroot:*/crossscripts/*:${COREBASE} \
+ "
BB_HASHFILENAME = "False ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}"
@@ -74,7 +83,6 @@ SSTATE_ARCHS = " \
${BUILD_ARCH} \
${BUILD_ARCH}_${ORIGNATIVELSBSTRING} \
${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS} \
- ${BUILD_ARCH}_${TARGET_ARCH} \
${SDK_ARCH}_${SDK_OS} \
${SDK_ARCH}_${PACKAGE_ARCH} \
allarch \
@@ -85,7 +93,7 @@ SSTATE_ARCHS[vardepsexclude] = "ORIGNATIVELSBSTRING"
SSTATE_MANMACH ?= "${SSTATE_PKGARCH}"
-SSTATECREATEFUNCS = "sstate_hardcode_path"
+SSTATECREATEFUNCS += "sstate_hardcode_path"
SSTATECREATEFUNCS[vardeps] = "SSTATE_SCAN_FILES"
SSTATEPOSTCREATEFUNCS = ""
SSTATEPREINSTFUNCS = ""
@@ -107,6 +115,9 @@ SSTATE_SIG_KEY ?= ""
SSTATE_SIG_PASSPHRASE ?= ""
# Whether to verify the GnUPG signatures when extracting sstate archives
SSTATE_VERIFY_SIG ?= "0"
+# List of signatures to consider valid.
+SSTATE_VALID_SIGS ??= ""
+SSTATE_VALID_SIGS[vardepvalue] = ""
SSTATE_HASHEQUIV_METHOD ?= "oe.sstatesig.OEOuthashBasic"
SSTATE_HASHEQUIV_METHOD[doc] = "The fully-qualified function used to calculate \
@@ -126,7 +137,7 @@ python () {
elif bb.data.inherits_class('crosssdk', d):
d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"))
elif bb.data.inherits_class('cross', d):
- d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${TARGET_ARCH}"))
+ d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}"))
elif bb.data.inherits_class('nativesdk', d):
d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${SDK_OS}"))
elif bb.data.inherits_class('cross-canadian', d):
@@ -146,6 +157,8 @@ python () {
for task in unique_tasks:
d.prependVarFlag(task, 'prefuncs', "sstate_task_prefunc ")
d.appendVarFlag(task, 'postfuncs', " sstate_task_postfunc")
+ d.setVarFlag(task, 'network', '1')
+ d.setVarFlag(task + "_setscene", 'network', '1')
}
def sstate_init(task, d):
@@ -246,13 +259,13 @@ def sstate_install(ss, d):
shareddirs.append(dstdir)
# Check the file list for conflicts against files which already exist
- whitelist = (d.getVar("SSTATE_DUPWHITELIST") or "").split()
+ overlap_allowed = (d.getVar("SSTATE_ALLOW_OVERLAP_FILES") or "").split()
match = []
for f in sharedfiles:
if os.path.exists(f) and not os.path.islink(f):
f = os.path.normpath(f)
realmatch = True
- for w in whitelist:
+ for w in overlap_allowed:
w = os.path.normpath(w)
if f.startswith(w):
realmatch = False
@@ -282,7 +295,7 @@ def sstate_install(ss, d):
"DISTRO_FEATURES on an existing build directory is not supported - you " \
"should really clean out tmp and rebuild (reusing sstate should be safe). " \
"It could be the overlapping files detected are harmless in which case " \
- "adding them to SSTATE_DUPWHITELIST may be the correct solution. It could " \
+ "adding them to SSTATE_ALLOW_OVERLAP_FILES may be the correct solution. It could " \
"also be your build is including two different conflicting versions of " \
"things (e.g. bluez 4 and bluez 5 and the correct solution for that would " \
"be to resolve the conflict. If in doubt, please ask on the mailing list, " \
@@ -336,7 +349,7 @@ def sstate_install(ss, d):
for lock in locks:
bb.utils.unlockfile(lock)
-sstate_install[vardepsexclude] += "SSTATE_DUPWHITELIST STATE_MANMACH SSTATE_MANFILEPREFIX"
+sstate_install[vardepsexclude] += "SSTATE_ALLOW_OVERLAP_FILES STATE_MANMACH SSTATE_MANFILEPREFIX"
sstate_install[vardeps] += "${SSTATEPOSTINSTFUNCS}"
def sstate_installpkg(ss, d):
@@ -363,7 +376,7 @@ def sstate_installpkg(ss, d):
bb.warn("No signature file for sstate package %s, skipping acceleration..." % sstatepkg)
return False
signer = get_signer(d, 'local')
- if not signer.verify(sstatepkg + '.sig'):
+ if not signer.verify(sstatepkg + '.sig', d.getVar("SSTATE_VALID_SIGS")):
bb.warn("Cannot verify signature on sstate package %s, skipping acceleration..." % sstatepkg)
return False
@@ -640,10 +653,21 @@ python sstate_hardcode_path () {
def sstate_package(ss, d):
import oe.path
+ import time
tmpdir = d.getVar('TMPDIR')
+ fixtime = False
+ if ss['task'] == "package":
+ fixtime = True
+
+ def fixtimestamp(root, path):
+ f = os.path.join(root, path)
+ if os.lstat(f).st_mtime > sde:
+ os.utime(f, (sde, sde), follow_symlinks=False)
+
sstatebuild = d.expand("${WORKDIR}/sstate-build-%s/" % ss['task'])
+ sde = int(d.getVar("SOURCE_DATE_EPOCH") or time.time())
d.setVar("SSTATE_CURRTASK", ss['task'])
bb.utils.remove(sstatebuild, recurse=True)
bb.utils.mkdirhier(sstatebuild)
@@ -656,6 +680,8 @@ def sstate_package(ss, d):
# to sstate tasks but there aren't many of these so better just avoid them entirely.
for walkroot, dirs, files in os.walk(state[1]):
for file in files + dirs:
+ if fixtime:
+ fixtimestamp(walkroot, file)
srcpath = os.path.join(walkroot, file)
if not os.path.islink(srcpath):
continue
@@ -677,6 +703,11 @@ def sstate_package(ss, d):
bb.utils.mkdirhier(plain)
bb.utils.mkdirhier(pdir)
bb.utils.rename(plain, pdir)
+ if fixtime:
+ fixtimestamp(pdir, "")
+ for walkroot, dirs, files in os.walk(pdir):
+ for file in files + dirs:
+ fixtimestamp(walkroot, file)
d.setVar('SSTATE_BUILDDIR', sstatebuild)
d.setVar('SSTATE_INSTDIR', sstatebuild)
@@ -705,6 +736,7 @@ def sstate_package(ss, d):
pass
except OSError as e:
# Handle read-only file systems gracefully
+ import errno
if e.errno != errno.EROFS:
raise e
@@ -730,6 +762,7 @@ def pstaging_fetch(sstatefetch, d):
localdata.setVar('FILESPATH', dldir)
localdata.setVar('DL_DIR', dldir)
localdata.setVar('PREMIRRORS', mirrors)
+ localdata.setVar('SRCPV', d.getVar('SRCPV'))
# if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
# we'll want to allow network access for the current set of fetches.
@@ -754,11 +787,16 @@ def pstaging_fetch(sstatefetch, d):
except bb.fetch2.BBFetchException:
pass
+pstaging_fetch[vardepsexclude] += "SRCPV"
+
+
def sstate_setscene(d):
shared_state = sstate_state_fromvars(d)
accelerate = sstate_installpkg(shared_state, d)
if not accelerate:
- bb.fatal("No suitable staging package found")
+ msg = "No sstate archive obtainable, will run full task instead."
+ bb.warn(msg)
+ raise bb.BBHandledException(msg)
python sstate_task_prefunc () {
shared_state = sstate_state_fromvars(d)
@@ -795,41 +833,46 @@ sstate_task_postfunc[dirs] = "${WORKDIR}"
sstate_create_package () {
# Exit early if it already exists
if [ -e ${SSTATE_PKG} ]; then
- [ ! -w ${SSTATE_PKG} ] || touch ${SSTATE_PKG}
+ touch ${SSTATE_PKG} 2>/dev/null || true
return
fi
mkdir --mode=0775 -p `dirname ${SSTATE_PKG}`
TFILE=`mktemp ${SSTATE_PKG}.XXXXXXXX`
- # Use pigz if available
- OPT="-czS"
- if [ -x "$(command -v pigz)" ]; then
- OPT="-I pigz -cS"
+ OPT="-cS"
+ ZSTD="zstd -${SSTATE_ZSTD_CLEVEL} -T${ZSTD_THREADS}"
+ # Use pzstd if available
+ if [ -x "$(command -v pzstd)" ]; then
+ ZSTD="pzstd -${SSTATE_ZSTD_CLEVEL} -p ${ZSTD_THREADS}"
fi
# Need to handle empty directories
if [ "$(ls -A)" ]; then
set +e
- tar $OPT -f $TFILE *
+ tar -I "$ZSTD" $OPT -f $TFILE *
ret=$?
if [ $ret -ne 0 ] && [ $ret -ne 1 ]; then
exit 1
fi
set -e
else
- tar $OPT --file=$TFILE --files-from=/dev/null
+ tar -I "$ZSTD" $OPT --file=$TFILE --files-from=/dev/null
fi
chmod 0664 $TFILE
# Skip if it was already created by some other process
- if [ ! -e ${SSTATE_PKG} ]; then
+ if [ -h ${SSTATE_PKG} ] && [ ! -e ${SSTATE_PKG} ]; then
+ # There is a symbolic link, but it links to nothing.
+ # Forcefully replace it with the new file.
+ ln -f $TFILE ${SSTATE_PKG} || true
+ elif [ ! -e ${SSTATE_PKG} ]; then
# Move into place using ln to attempt an atomic op.
# Abort if it already exists
- ln $TFILE ${SSTATE_PKG} && rm $TFILE
+ ln $TFILE ${SSTATE_PKG} || true
else
- rm $TFILE
+ touch ${SSTATE_PKG} 2>/dev/null || true
fi
- [ ! -w ${SSTATE_PKG} ] || touch ${SSTATE_PKG}
+ rm $TFILE
}
python sstate_sign_package () {
@@ -857,21 +900,25 @@ python sstate_report_unihash() {
# Will be run from within SSTATE_INSTDIR.
#
sstate_unpack_package () {
- tar -xvzf ${SSTATE_PKG}
- # update .siginfo atime on local/NFS mirror
- [ -O ${SSTATE_PKG}.siginfo ] && [ -w ${SSTATE_PKG}.siginfo ] && [ -h ${SSTATE_PKG}.siginfo ] && touch -a ${SSTATE_PKG}.siginfo
- # Use "! -w ||" to return true for read only files
- [ ! -w ${SSTATE_PKG} ] || touch --no-dereference ${SSTATE_PKG}
- [ ! -w ${SSTATE_PKG}.sig ] || [ ! -e ${SSTATE_PKG}.sig ] || touch --no-dereference ${SSTATE_PKG}.sig
- [ ! -w ${SSTATE_PKG}.siginfo ] || [ ! -e ${SSTATE_PKG}.siginfo ] || touch --no-dereference ${SSTATE_PKG}.siginfo
+ ZSTD="zstd -T${ZSTD_THREADS}"
+ # Use pzstd if available
+ if [ -x "$(command -v pzstd)" ]; then
+ ZSTD="pzstd -p ${ZSTD_THREADS}"
+ fi
+
+ tar -I "$ZSTD" -xvpf ${SSTATE_PKG}
+ # update .siginfo atime on local/NFS mirror if it is a symbolic link
+ [ ! -h ${SSTATE_PKG}.siginfo ] || [ ! -e ${SSTATE_PKG}.siginfo ] || touch -a ${SSTATE_PKG}.siginfo 2>/dev/null || true
+ # update each symbolic link instead of any referenced file
+ touch --no-dereference ${SSTATE_PKG} 2>/dev/null || true
+ [ ! -e ${SSTATE_PKG}.sig ] || touch --no-dereference ${SSTATE_PKG}.sig 2>/dev/null || true
+ [ ! -e ${SSTATE_PKG}.siginfo ] || touch --no-dereference ${SSTATE_PKG}.siginfo 2>/dev/null || true
}
BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True, **kwargs):
found = set()
- foundLocal = set()
- foundNet = set()
missed = set()
def gethash(task):
@@ -894,22 +941,22 @@ def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True,
return spec, extrapath, tname
+ def getsstatefile(tid, siginfo, d):
+ spec, extrapath, tname = getpathcomponents(tid, d)
+ return extrapath + generate_sstatefn(spec, gethash(tid), tname, siginfo, d)
for tid in sq_data['hash']:
- spec, extrapath, tname = getpathcomponents(tid, d)
-
- sstatefile = d.expand("${SSTATE_DIR}/" + extrapath + generate_sstatefn(spec, gethash(tid), tname, siginfo, d))
+ sstatefile = d.expand("${SSTATE_DIR}/" + getsstatefile(tid, siginfo, d))
if os.path.exists(sstatefile):
- bb.debug(2, "SState: Found valid sstate file %s" % sstatefile)
found.add(tid)
- foundLocal.add(tid)
- continue
+ bb.debug(2, "SState: Found valid sstate file %s" % sstatefile)
else:
missed.add(tid)
bb.debug(2, "SState: Looked for but didn't find file %s" % sstatefile)
+ foundLocal = len(found)
mirrors = d.getVar("SSTATE_MIRRORS")
if mirrors:
# Copy the data object and override DL_DIR and SRC_URI
@@ -941,64 +988,63 @@ def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True,
localdata2 = bb.data.createCopy(localdata)
srcuri = "file://" + sstatefile
- localdata.setVar('SRC_URI', srcuri)
+ localdata2.setVar('SRC_URI', srcuri)
bb.debug(2, "SState: Attempting to fetch %s" % srcuri)
+ import traceback
+
try:
fetcher = bb.fetch2.Fetch(srcuri.split(), localdata2,
connection_cache=thread_worker.connection_cache)
fetcher.checkstatus()
bb.debug(2, "SState: Successful fetch test for %s" % srcuri)
found.add(tid)
- foundNet.add(tid)
- if tid in missed:
- missed.remove(tid)
- except:
- missed.add(tid)
- bb.debug(2, "SState: Unsuccessful fetch test for %s" % srcuri)
- pass
- if len(tasklist) >= min_tasks:
+ missed.remove(tid)
+ except bb.fetch2.FetchError as e:
+ bb.debug(2, "SState: Unsuccessful fetch test for %s (%s)\n%s" % (srcuri, repr(e), traceback.format_exc()))
+ except Exception as e:
+ bb.error("SState: cannot test %s: %s\n%s" % (srcuri, repr(e), traceback.format_exc()))
+
+ if progress:
bb.event.fire(bb.event.ProcessProgress(msg, len(tasklist) - thread_worker.tasks.qsize()), d)
tasklist = []
- min_tasks = 100
- for tid in sq_data['hash']:
- if tid in found:
- continue
- spec, extrapath, tname = getpathcomponents(tid, d)
- sstatefile = d.expand(extrapath + generate_sstatefn(spec, gethash(tid), tname, siginfo, d))
+ for tid in missed:
+ sstatefile = d.expand(getsstatefile(tid, siginfo, d))
tasklist.append((tid, sstatefile))
if tasklist:
- if len(tasklist) >= min_tasks:
+ nproc = min(int(d.getVar("BB_NUMBER_THREADS")), len(tasklist))
+
+ progress = len(tasklist) >= 100
+ if progress:
msg = "Checking sstate mirror object availability"
bb.event.fire(bb.event.ProcessStarted(msg, len(tasklist)), d)
- import multiprocessing
- nproc = min(multiprocessing.cpu_count(), len(tasklist))
-
- bb.event.enable_threadlock()
- pool = oe.utils.ThreadedPool(nproc, len(tasklist),
- worker_init=checkstatus_init, worker_end=checkstatus_end)
- for t in tasklist:
- pool.add_task(checkstatus, t)
- pool.start()
- pool.wait_completion()
- bb.event.disable_threadlock()
-
- if len(tasklist) >= min_tasks:
+ # Have to setup the fetcher environment here rather than in each thread as it would race
+ fetcherenv = bb.fetch2.get_fetcher_environment(d)
+ with bb.utils.environment(**fetcherenv):
+ bb.event.enable_threadlock()
+ pool = oe.utils.ThreadedPool(nproc, len(tasklist),
+ worker_init=checkstatus_init, worker_end=checkstatus_end,
+ name="sstate_checkhashes-")
+ for t in tasklist:
+ pool.add_task(checkstatus, t)
+ pool.start()
+ pool.wait_completion()
+ bb.event.disable_threadlock()
+
+ if progress:
bb.event.fire(bb.event.ProcessFinished(msg), d)
inheritlist = d.getVar("INHERIT")
if "toaster" in inheritlist:
evdata = {'missed': [], 'found': []};
for tid in missed:
- spec, extrapath, tname = getpathcomponents(tid, d)
- sstatefile = d.expand(extrapath + generate_sstatefn(spec, gethash(tid), tname, False, d))
+ sstatefile = d.expand(getsstatefile(tid, False, d))
evdata['missed'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
for tid in found:
- spec, extrapath, tname = getpathcomponents(tid, d)
- sstatefile = d.expand(extrapath + generate_sstatefn(spec, gethash(tid), tname, False, d))
+ sstatefile = d.expand(getsstatefile(tid, False, d))
evdata['found'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
bb.event.fire(bb.event.MetadataEvent("MissedSstate", evdata), d)
@@ -1012,12 +1058,14 @@ def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True,
match = 0
if total:
match = len(found) / total * 100
- bb.plain("Sstate summary: Wanted %d Local %d Network %d Missed %d Current %d (%d%% match, %d%% complete)" % (total, len(foundLocal), len(foundNet),len(missed), currentcount, match, complete))
+ bb.plain("Sstate summary: Wanted %d Local %d Mirrors %d Missed %d Current %d (%d%% match, %d%% complete)" %
+ (total, foundLocal, len(found)-foundLocal, len(missed), currentcount, match, complete))
if hasattr(bb.parse.siggen, "checkhashes"):
bb.parse.siggen.checkhashes(sq_data, missed, found, d)
return found
+setscene_depvalid[vardepsexclude] = "SSTATE_EXCLUDEDEPS_SYSROOT"
BB_SETSCENE_DEPVALID = "setscene_depvalid"
@@ -1036,15 +1084,13 @@ def setscene_depvalid(task, taskdependees, notneeded, d, log=None):
logit("Considering setscene task: %s" % (str(taskdependees[task])), log)
+ directtasks = ["do_populate_lic", "do_deploy_source_date_epoch", "do_shared_workdir", "do_stash_locale", "do_gcc_stash_builddir", "do_create_spdx"]
+
def isNativeCross(x):
return x.endswith("-native") or "-cross-" in x or "-crosssdk" in x or x.endswith("-cross")
- # We only need to trigger populate_lic through direct dependencies
- if taskdependees[task][1] == "do_populate_lic":
- return True
-
- # stash_locale and gcc_stash_builddir are never needed as a dependency for built objects
- if taskdependees[task][1] == "do_stash_locale" or taskdependees[task][1] == "do_gcc_stash_builddir":
+ # We only need to trigger deploy_source_date_epoch through direct dependencies
+ if taskdependees[task][1] in directtasks:
return True
# We only need to trigger packagedata through direct dependencies
@@ -1067,8 +1113,8 @@ def setscene_depvalid(task, taskdependees, notneeded, d, log=None):
# do_package_write_* need do_populate_sysroot as they're mainly postinstall dependencies
if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm']:
return False
- # do_package/packagedata/package_qa don't need do_populate_sysroot
- if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_packagedata', 'do_package_qa']:
+ # do_package/packagedata/package_qa/deploy don't need do_populate_sysroot
+ if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_packagedata', 'do_package_qa', 'do_deploy']:
continue
# Native/Cross packages don't exist and are noexec anyway
if isNativeCross(taskdependees[dep][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package', 'do_package_qa']:
@@ -1116,13 +1162,9 @@ def setscene_depvalid(task, taskdependees, notneeded, d, log=None):
# Target populate_sysroot need their dependencies
return False
- if taskdependees[task][1] == 'do_shared_workdir':
+ if taskdependees[dep][1] in directtasks:
continue
- if taskdependees[dep][1] == "do_populate_lic":
- continue
-
-
# Safe fallthrough default
logit(" Default setscene dependency fall through due to dependency: %s" % (str(taskdependees[dep])), log)
return False
@@ -1151,6 +1193,7 @@ python sstate_eventhandler() {
pass
except OSError as e:
# Handle read-only file systems gracefully
+ import errno
if e.errno != errno.EROFS:
raise e
diff --git a/meta/classes/staging.bbclass b/meta/classes/staging.bbclass
index 32a615c743..ab827766be 100644
--- a/meta/classes/staging.bbclass
+++ b/meta/classes/staging.bbclass
@@ -19,12 +19,12 @@ SYSROOT_DIRS_NATIVE = " \
${sysconfdir} \
${localstatedir} \
"
-SYSROOT_DIRS_append_class-native = " ${SYSROOT_DIRS_NATIVE}"
-SYSROOT_DIRS_append_class-cross = " ${SYSROOT_DIRS_NATIVE}"
-SYSROOT_DIRS_append_class-crosssdk = " ${SYSROOT_DIRS_NATIVE}"
+SYSROOT_DIRS:append:class-native = " ${SYSROOT_DIRS_NATIVE}"
+SYSROOT_DIRS:append:class-cross = " ${SYSROOT_DIRS_NATIVE}"
+SYSROOT_DIRS:append:class-crosssdk = " ${SYSROOT_DIRS_NATIVE}"
# These directories will not be staged in the sysroot
-SYSROOT_DIRS_BLACKLIST = " \
+SYSROOT_DIRS_IGNORE = " \
${mandir} \
${docdir} \
${infodir} \
@@ -49,9 +49,10 @@ sysroot_stage_dir() {
fi
mkdir -p "$dest"
+ rdest=$(realpath --relative-to="$src" "$dest")
(
cd $src
- find . -print0 | cpio --null -pdlu $dest
+ find . -print0 | cpio --null -pdlu $rdest
)
}
@@ -64,7 +65,7 @@ sysroot_stage_dirs() {
done
# Remove directories we do not care about
- for dir in ${SYSROOT_DIRS_BLACKLIST}; do
+ for dir in ${SYSROOT_DIRS_IGNORE}; do
rm -rf "$to$dir"
done
}
@@ -82,7 +83,7 @@ python sysroot_strip () {
pn = d.getVar('PN')
libdir = d.getVar("libdir")
base_libdir = d.getVar("base_libdir")
- qa_already_stripped = 'already-stripped' in (d.getVar('INSANE_SKIP_' + pn) or "").split()
+ qa_already_stripped = 'already-stripped' in (d.getVar('INSANE_SKIP:' + pn) or "").split()
strip_cmd = d.getVar("STRIP")
oe.package.strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, d,
@@ -103,7 +104,7 @@ python do_populate_sysroot () {
for f in (d.getVar('SYSROOT_PREPROCESS_FUNCS') or '').split():
bb.build.exec_func(f, d)
pn = d.getVar("PN")
- multiprov = d.getVar("MULTI_PROVIDER_WHITELIST").split()
+ multiprov = d.getVar("BB_MULTI_PROVIDER_ALLOWED").split()
provdir = d.expand("${SYSROOT_DESTDIR}${base_prefix}/sysroot-providers/")
bb.utils.mkdirhier(provdir)
for p in d.getVar("PROVIDES").split():
@@ -115,11 +116,11 @@ python do_populate_sysroot () {
}
do_populate_sysroot[vardeps] += "${SYSROOT_PREPROCESS_FUNCS}"
-do_populate_sysroot[vardepsexclude] += "MULTI_PROVIDER_WHITELIST"
+do_populate_sysroot[vardepsexclude] += "BB_MULTI_PROVIDER_ALLOWED"
POPULATESYSROOTDEPS = ""
-POPULATESYSROOTDEPS_class-target = "virtual/${MLPREFIX}${TARGET_PREFIX}binutils:do_populate_sysroot"
-POPULATESYSROOTDEPS_class-nativesdk = "virtual/${TARGET_PREFIX}binutils-crosssdk:do_populate_sysroot"
+POPULATESYSROOTDEPS:class-target = "virtual/${MLPREFIX}${HOST_PREFIX}binutils:do_populate_sysroot"
+POPULATESYSROOTDEPS:class-nativesdk = "virtual/${HOST_PREFIX}binutils-crosssdk:do_populate_sysroot"
do_populate_sysroot[depends] += "${POPULATESYSROOTDEPS}"
SSTATETASKS += "do_populate_sysroot"
@@ -306,6 +307,7 @@ python extend_recipe_sysroot() {
sstatetasks = d.getVar("SSTATETASKS").split()
# Add recipe specific tasks referenced by setscene_depvalid()
sstatetasks.append("do_stash_locale")
+ sstatetasks.append("do_deploy")
def print_dep_tree(deptree):
data = ""
@@ -619,7 +621,40 @@ python staging_taskhandler() {
for task in bbtasks:
deps = d.getVarFlag(task, "depends")
if task == "do_configure" or (deps and "populate_sysroot" in deps):
- d.appendVarFlag(task, "prefuncs", " extend_recipe_sysroot")
+ d.prependVarFlag(task, "prefuncs", "extend_recipe_sysroot ")
}
staging_taskhandler[eventmask] = "bb.event.RecipeTaskPreProcess"
addhandler staging_taskhandler
+
+
+#
+# Target build output, stored in do_populate_sysroot or do_package can depend
+# not only upon direct dependencies but also indirect ones. A good example is
+# linux-libc-headers. The toolchain depends on this but most target recipes do
+# not. There are some headers which are not used by the toolchain build and do
+# not change the toolchain task output, hence the task hashes can change without
+# changing the sysroot output of that recipe yet they can influence others.
+#
+# A specific example is rtc.h which can change rtcwake.c in util-linux but is not
+# used in the glibc or gcc build. To account for this, we need to account for the
+# populate_sysroot hashes in the task output hashes.
+#
+python target_add_sysroot_deps () {
+ current_task = "do_" + d.getVar("BB_CURRENTTASK")
+ if current_task not in ["do_populate_sysroot", "do_package"]:
+ return
+
+ pn = d.getVar("PN")
+ if pn.endswith("-native"):
+ return
+
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ deps = {}
+ for dep in taskdepdata.values():
+ if dep[1] == "do_populate_sysroot" and not dep[0].endswith(("-native", "-initial")) and "-cross-" not in dep[0]:
+ deps[dep[0]] = dep[6]
+
+ d.setVar("HASHEQUIV_EXTRA_SIGDATA", "\n".join("%s: %s" % (k, deps[k]) for k in sorted(deps.keys())))
+}
+SSTATECREATEFUNCS += "target_add_sysroot_deps"
+
diff --git a/meta/classes/systemd-boot.bbclass b/meta/classes/systemd-boot.bbclass
index 336c4c2ff5..57ec0acbc5 100644
--- a/meta/classes/systemd-boot.bbclass
+++ b/meta/classes/systemd-boot.bbclass
@@ -28,7 +28,7 @@ efi_populate() {
done
}
-efi_iso_populate_append() {
+efi_iso_populate:append() {
cp -r $iso_dir/loader ${EFIIMGDIR}
}
diff --git a/meta/classes/systemd.bbclass b/meta/classes/systemd.bbclass
index db5d109545..09ec52792d 100644
--- a/meta/classes/systemd.bbclass
+++ b/meta/classes/systemd.bbclass
@@ -1,9 +1,9 @@
# The list of packages that should have systemd packaging scripts added. For
-# each entry, optionally have a SYSTEMD_SERVICE_[package] that lists the service
+# each entry, optionally have a SYSTEMD_SERVICE:[package] that lists the service
# files in this package. If this variable isn't set, [package].service is used.
SYSTEMD_PACKAGES ?= "${PN}"
-SYSTEMD_PACKAGES_class-native ?= ""
-SYSTEMD_PACKAGES_class-nativesdk ?= ""
+SYSTEMD_PACKAGES:class-native ?= ""
+SYSTEMD_PACKAGES:class-nativesdk ?= ""
# Whether to enable or disable the services on installation.
SYSTEMD_AUTO_ENABLE ??= "enable"
@@ -70,7 +70,7 @@ python systemd_populate_packages() {
return
def get_package_var(d, var, pkg):
- val = (d.getVar('%s_%s' % (var, pkg)) or "").strip()
+ val = (d.getVar('%s:%s' % (var, pkg)) or "").strip()
if val == "":
val = (d.getVar(var) or "").strip()
return val
@@ -85,39 +85,39 @@ python systemd_populate_packages() {
def systemd_generate_package_scripts(pkg):
bb.debug(1, 'adding systemd calls to postinst/postrm for %s' % pkg)
- paths_escaped = ' '.join(shlex.quote(s) for s in d.getVar('SYSTEMD_SERVICE_' + pkg).split())
- d.setVar('SYSTEMD_SERVICE_ESCAPED_' + pkg, paths_escaped)
+ paths_escaped = ' '.join(shlex.quote(s) for s in d.getVar('SYSTEMD_SERVICE:' + pkg).split())
+ d.setVar('SYSTEMD_SERVICE_ESCAPED:' + pkg, paths_escaped)
- # Add pkg to the overrides so that it finds the SYSTEMD_SERVICE_pkg
+ # Add pkg to the overrides so that it finds the SYSTEMD_SERVICE:pkg
# variable.
localdata = d.createCopy()
localdata.prependVar("OVERRIDES", pkg + ":")
- postinst = d.getVar('pkg_postinst_%s' % pkg)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
postinst += localdata.getVar('systemd_postinst')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
- prerm = d.getVar('pkg_prerm_%s' % pkg)
+ prerm = d.getVar('pkg_prerm:%s' % pkg)
if not prerm:
prerm = '#!/bin/sh\n'
prerm += localdata.getVar('systemd_prerm')
- d.setVar('pkg_prerm_%s' % pkg, prerm)
+ d.setVar('pkg_prerm:%s' % pkg, prerm)
- # Add files to FILES_*-systemd if existent and not already done
+ # Add files to FILES:*-systemd if existent and not already done
def systemd_append_file(pkg_systemd, file_append):
appended = False
if os.path.exists(oe.path.join(d.getVar("D"), file_append)):
- var_name = "FILES_" + pkg_systemd
+ var_name = "FILES:" + pkg_systemd
files = d.getVar(var_name, False) or ""
if file_append not in files.split():
d.appendVar(var_name, " " + file_append)
appended = True
return appended
- # Add systemd files to FILES_*-systemd, parse for Also= and follow recursive
+ # Add systemd files to FILES:*-systemd, parse for Also= and follow recursive
def systemd_add_files_and_parse(pkg_systemd, path, service, keys):
# avoid infinite recursion
if systemd_append_file(pkg_systemd, oe.path.join(path, service)):
@@ -174,32 +174,32 @@ python systemd_populate_packages() {
if path_found != '':
systemd_add_files_and_parse(pkg_systemd, path_found, service, keys)
else:
- bb.fatal("Didn't find service unit '{0}', specified in SYSTEMD_SERVICE_{1}. {2}".format(
+ bb.fatal("Didn't find service unit '{0}', specified in SYSTEMD_SERVICE:{1}. {2}".format(
service, pkg_systemd, "Also looked for service unit '{0}'.".format(base) if base is not None else ""))
def systemd_create_presets(pkg, action):
presetf = oe.path.join(d.getVar("PKGD"), d.getVar("systemd_unitdir"), "system-preset/98-%s.preset" % pkg)
bb.utils.mkdirhier(os.path.dirname(presetf))
with open(presetf, 'a') as fd:
- for service in d.getVar('SYSTEMD_SERVICE_%s' % pkg).split():
+ for service in d.getVar('SYSTEMD_SERVICE:%s' % pkg).split():
fd.write("%s %s\n" % (action,service))
- d.appendVar("FILES_%s" % pkg, ' ' + oe.path.join(d.getVar("systemd_unitdir"), "system-preset/98-%s.preset" % pkg))
+ d.appendVar("FILES:%s" % pkg, ' ' + oe.path.join(d.getVar("systemd_unitdir"), "system-preset/98-%s.preset" % pkg))
# Run all modifications once when creating package
if os.path.exists(d.getVar("D")):
for pkg in d.getVar('SYSTEMD_PACKAGES').split():
systemd_check_package(pkg)
- if d.getVar('SYSTEMD_SERVICE_' + pkg):
+ if d.getVar('SYSTEMD_SERVICE:' + pkg):
systemd_generate_package_scripts(pkg)
action = get_package_var(d, 'SYSTEMD_AUTO_ENABLE', pkg)
if action in ("enable", "disable"):
systemd_create_presets(pkg, action)
elif action not in ("mask", "preset"):
- bb.fatal("SYSTEMD_AUTO_ENABLE_%s '%s' is not 'enable', 'disable', 'mask' or 'preset'" % (pkg, action))
+ bb.fatal("SYSTEMD_AUTO_ENABLE:%s '%s' is not 'enable', 'disable', 'mask' or 'preset'" % (pkg, action))
systemd_check_services()
}
-PACKAGESPLITFUNCS_prepend = "systemd_populate_packages "
+PACKAGESPLITFUNCS:prepend = "systemd_populate_packages "
python rm_systemd_unitdir (){
import shutil
@@ -227,7 +227,7 @@ python rm_sysvinit_initddir (){
}
do_install[postfuncs] += "${RMINITDIR} "
-RMINITDIR_class-target = " rm_sysvinit_initddir rm_systemd_unitdir "
-RMINITDIR_class-nativesdk = " rm_sysvinit_initddir rm_systemd_unitdir "
+RMINITDIR:class-target = " rm_sysvinit_initddir rm_systemd_unitdir "
+RMINITDIR:class-nativesdk = " rm_sysvinit_initddir rm_systemd_unitdir "
RMINITDIR = ""
diff --git a/meta/classes/terminal.bbclass b/meta/classes/terminal.bbclass
index 6059ae95e0..a564ee7494 100644
--- a/meta/classes/terminal.bbclass
+++ b/meta/classes/terminal.bbclass
@@ -26,6 +26,9 @@ def emit_terminal_func(command, envdata, d):
bb.utils.mkdirhier(os.path.dirname(runfile))
with open(runfile, 'w') as script:
+ # Override the shell shell_trap_code specifies.
+ # If our shell is bash, we might well face silent death.
+ script.write("#!/bin/bash\n")
script.write(bb.build.shell_trap_code())
bb.data.emit_func(cmd_func, script, envdata)
script.write(cmd_func)
@@ -37,7 +40,7 @@ def emit_terminal_func(command, envdata, d):
def oe_terminal(command, title, d):
import oe.data
import oe.terminal
-
+
envdata = bb.data.init()
for v in os.environ:
diff --git a/meta/classes/testimage.bbclass b/meta/classes/testimage.bbclass
index ed3a885bdf..898248992c 100644
--- a/meta/classes/testimage.bbclass
+++ b/meta/classes/testimage.bbclass
@@ -36,6 +36,7 @@ TESTIMAGE_AUTO ??= "0"
# TEST_OVERALL_TIMEOUT can be used to set the maximum time in seconds the tests will be allowed to run (defaults to no limit).
# TEST_QEMUPARAMS can be used to pass extra parameters to qemu, e.g. "-m 1024" for setting the amount of ram to 1 GB.
# TEST_RUNQEMUPARAMS can be used to pass extra parameters to runqemu, e.g. "gl" to enable OpenGL acceleration.
+# QEMU_USE_KVM can be set to "" to disable the use of kvm (by default it is enabled if target_arch == build_arch or both of them are x86 archs)
# TESTIMAGE_BOOT_PATTERNS can be used to override certain patterns used to communicate with the target when booting,
# if a pattern is not specifically present on this variable a default will be used when booting the target.
@@ -60,23 +61,22 @@ BASICTESTSUITE = "\
ping date df ssh scp python perl gi ptest parselogs \
logrotate connman systemd oe_syslog pam stap ldd xorg \
kernelmodule gcc buildcpio buildlzip buildgalculator \
- dnf rpm opkg apt weston"
+ dnf rpm opkg apt weston go rust"
DEFAULT_TEST_SUITES = "${BASICTESTSUITE}"
-# aarch64 has no graphics
-DEFAULT_TEST_SUITES_remove_aarch64 = "xorg"
# musl doesn't support systemtap
-DEFAULT_TEST_SUITES_remove_libc-musl = "stap"
+DEFAULT_TEST_SUITES:remove:libc-musl = "stap"
# qemumips is quite slow and has reached the timeout limit several times on the YP build cluster,
# mitigate this by removing build tests for qemumips machines.
MIPSREMOVE ??= "buildcpio buildlzip buildgalculator"
-DEFAULT_TEST_SUITES_remove_qemumips = "${MIPSREMOVE}"
-DEFAULT_TEST_SUITES_remove_qemumips64 = "${MIPSREMOVE}"
+DEFAULT_TEST_SUITES:remove:qemumips = "${MIPSREMOVE}"
+DEFAULT_TEST_SUITES:remove:qemumips64 = "${MIPSREMOVE}"
TEST_SUITES ?= "${DEFAULT_TEST_SUITES}"
+QEMU_USE_KVM ?= "1"
TEST_QEMUBOOT_TIMEOUT ?= "1000"
TEST_OVERALL_TIMEOUT ?= ""
TEST_TARGET ?= "qemu"
@@ -86,7 +86,7 @@ TEST_RUNQEMUPARAMS ?= ""
TESTIMAGE_BOOT_PATTERNS ?= ""
TESTIMAGEDEPENDS = ""
-TESTIMAGEDEPENDS_append_qemuall = " qemu-native:do_populate_sysroot qemu-helper-native:do_populate_sysroot qemu-helper-native:do_addto_recipe_sysroot"
+TESTIMAGEDEPENDS:append:qemuall = " qemu-native:do_populate_sysroot qemu-helper-native:do_populate_sysroot qemu-helper-native:do_addto_recipe_sysroot"
TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'cpio-native:do_populate_sysroot', '', d)}"
TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'dnf-native:do_populate_sysroot', '', d)}"
TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'createrepo-c-native:do_populate_sysroot', '', d)}"
@@ -94,7 +94,7 @@ TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'ipk', 'opkg-utils-na
TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'deb', 'apt-native:do_populate_sysroot package-index:do_package_index', '', d)}"
TESTIMAGELOCK = "${TMPDIR}/testimage.lock"
-TESTIMAGELOCK_qemuall = ""
+TESTIMAGELOCK:qemuall = ""
TESTIMAGE_DUMP_DIR ?= "${LOG_DIR}/runtime-hostdump/"
@@ -139,6 +139,7 @@ python do_testimage() {
addtask testimage
do_testimage[nostamp] = "1"
+do_testimage[network] = "1"
do_testimage[depends] += "${TESTIMAGEDEPENDS}"
do_testimage[lockfiles] += "${TESTIMAGELOCK}"
@@ -201,6 +202,7 @@ def testimage_main(d):
import json
import signal
import logging
+ import shutil
from bb.utils import export_proxies
from oeqa.core.utils.misc import updateTestData
@@ -236,9 +238,10 @@ def testimage_main(d):
tdname = "%s.testdata.json" % image_name
try:
- td = json.load(open(tdname, "r"))
- except (FileNotFoundError) as err:
- bb.fatal('File %s Not Found. Have you built the image with INHERIT+="testimage" in the conf/local.conf?' % tdname)
+ with open(tdname, "r") as f:
+ td = json.load(f)
+ except FileNotFoundError as err:
+ bb.fatal('File %s not found (%s).\nHave you built the image with INHERIT += "testimage" in the conf/local.conf?' % (tdname, err))
# Some variables need to be updates (mostly paths) with the
# ones of the current environment because some tests require them.
@@ -317,10 +320,6 @@ def testimage_main(d):
if d.getVar("TESTIMAGE_BOOT_PATTERNS"):
target_kwargs['boot_patterns'] = get_testimage_boot_patterns(d)
- # TODO: Currently BBPATH is needed for custom loading of targets.
- # It would be better to find these modules using instrospection.
- target_kwargs['target_modules_path'] = d.getVar('BBPATH')
-
# hardware controlled targets might need further access
target_kwargs['powercontrol_cmd'] = d.getVar("TEST_POWERCONTROL_CMD") or None
target_kwargs['powercontrol_extra_args'] = d.getVar("TEST_POWERCONTROL_EXTRA_ARGS") or ""
@@ -408,10 +407,17 @@ def testimage_main(d):
get_testimage_result_id(configuration),
dump_streams=d.getVar('TESTREPORT_FULLLOGS'))
results.logSummary(pn)
+
+ # Copy additional logs to tmp/log/oeqa so it's easier to find them
+ targetdir = os.path.join(get_testimage_json_result_dir(d), d.getVar("PN"))
+ os.makedirs(targetdir, exist_ok=True)
+ os.symlink(bootlog, os.path.join(targetdir, os.path.basename(bootlog)))
+ os.symlink(d.getVar("BB_LOGFILE"), os.path.join(targetdir, os.path.basename(d.getVar("BB_LOGFILE") + "." + d.getVar('DATETIME'))))
+
if not results or not complete:
- bb.fatal('%s - FAILED - tests were interrupted during execution' % pn, forcelog=True)
+ bb.fatal('%s - FAILED - tests were interrupted during execution, check the logs in %s' % (pn, d.getVar("LOG_DIR")), forcelog=True)
if not results.wasSuccessful():
- bb.fatal('%s - FAILED - check the task log and the ssh log' % pn, forcelog=True)
+ bb.fatal('%s - FAILED - also check the logs in %s' % (pn, d.getVar("LOG_DIR")), forcelog=True)
def get_runtime_paths(d):
"""
diff --git a/meta/classes/testsdk.bbclass b/meta/classes/testsdk.bbclass
index 758a23ac55..8b2e74f606 100644
--- a/meta/classes/testsdk.bbclass
+++ b/meta/classes/testsdk.bbclass
@@ -36,12 +36,14 @@ python do_testsdk() {
}
addtask testsdk
do_testsdk[nostamp] = "1"
+do_testsdk[network] = "1"
python do_testsdkext() {
import_and_run('TESTSDKEXT_CLASS_NAME', d)
}
addtask testsdkext
do_testsdkext[nostamp] = "1"
+do_testsdkext[network] = "1"
python () {
if oe.types.boolean(d.getVar("TESTIMAGE_AUTO") or "False"):
diff --git a/meta/classes/texinfo.bbclass b/meta/classes/texinfo.bbclass
index f46bacabd4..68c9d4fb70 100644
--- a/meta/classes/texinfo.bbclass
+++ b/meta/classes/texinfo.bbclass
@@ -7,12 +7,12 @@
# makeinfo from SANITY_REQUIRED_UTILITIES.
TEXDEP = "${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'texinfo-replacement-native', 'texinfo-dummy-native', d)}"
-TEXDEP_class-native = "texinfo-dummy-native"
-TEXDEP_class-cross = "texinfo-dummy-native"
-TEXDEP_class-crosssdk = "texinfo-dummy-native"
-TEXDEP_class-cross-canadian = "texinfo-dummy-native"
-DEPENDS_append = " ${TEXDEP}"
+TEXDEP:class-native = "texinfo-dummy-native"
+TEXDEP:class-cross = "texinfo-dummy-native"
+TEXDEP:class-crosssdk = "texinfo-dummy-native"
+TEXDEP:class-cross-canadian = "texinfo-dummy-native"
+DEPENDS:append = " ${TEXDEP}"
# libtool-cross doesn't inherit cross
-TEXDEP_pn-libtool-cross = "texinfo-dummy-native"
+TEXDEP:pn-libtool-cross = "texinfo-dummy-native"
diff --git a/meta/classes/toaster.bbclass b/meta/classes/toaster.bbclass
index 9518ddf7a4..dd5c7f224b 100644
--- a/meta/classes/toaster.bbclass
+++ b/meta/classes/toaster.bbclass
@@ -106,7 +106,7 @@ def _toaster_load_pkgdatafile(dirpath, filepath):
pkgdata['OPKGN'] = m.group(1)
kn = "_".join([x for x in kn.split("_") if x.isupper()])
pkgdata[kn] = kv.strip()
- if kn == 'FILES_INFO':
+ if kn.startswith('FILES_INFO'):
pkgdata[kn] = json.loads(kv)
except ValueError:
diff --git a/meta/classes/toolchain-scripts.bbclass b/meta/classes/toolchain-scripts.bbclass
index 67a812cb02..8f914cce27 100644
--- a/meta/classes/toolchain-scripts.bbclass
+++ b/meta/classes/toolchain-scripts.bbclass
@@ -3,12 +3,12 @@ inherit toolchain-scripts-base siteinfo kernel-arch
# We want to be able to change the value of MULTIMACH_TARGET_SYS, because it
# doesn't always match our expectations... but we default to the stock value
REAL_MULTIMACH_TARGET_SYS ?= "${MULTIMACH_TARGET_SYS}"
-TARGET_CC_ARCH_append_libc-musl = " -mmusl"
+TARGET_CC_ARCH:append:libc-musl = " -mmusl"
# default debug prefix map isn't valid in the SDK
DEBUG_PREFIX_MAP = ""
-EXPORT_SDK_PS1 = "${@ 'export PS1=\'%s\'' % d.getVar('SDK_PS1') if d.getVar('SDK_PS1') else ''}"
+EXPORT_SDK_PS1 = "${@ 'export PS1=\\"%s\\"' % d.getVar('SDK_PS1') if d.getVar('SDK_PS1') else ''}"
# This function creates an environment-setup-script for use in a deployable SDK
toolchain_create_sdk_env_script () {
@@ -65,6 +65,7 @@ toolchain_create_sdk_env_script () {
# This function creates an environment-setup-script in the TMPDIR which enables
# a OE-core IDE to integrate with the build tree
+# Caller must ensure CONFIG_SITE is setup
toolchain_create_tree_env_script () {
script=${TMPDIR}/environment-setup-${REAL_MULTIMACH_TARGET_SYS}
rm -f $script
@@ -73,7 +74,7 @@ toolchain_create_tree_env_script () {
echo 'export PATH=${STAGING_DIR_NATIVE}/usr/bin:${STAGING_BINDIR_TOOLCHAIN}:$PATH' >> $script
echo 'export PKG_CONFIG_SYSROOT_DIR=${PKG_CONFIG_SYSROOT_DIR}' >> $script
echo 'export PKG_CONFIG_PATH=${PKG_CONFIG_PATH}' >> $script
- echo 'export CONFIG_SITE="${@siteinfo_get_files(d)}"' >> $script
+ echo 'export CONFIG_SITE="${CONFIG_SITE}"' >> $script
echo 'export SDKTARGETSYSROOT=${STAGING_DIR_TARGET}' >> $script
echo 'export OECORE_NATIVE_SYSROOT="${STAGING_DIR_NATIVE}"' >> $script
echo 'export OECORE_TARGET_SYSROOT="${STAGING_DIR_TARGET}"' >> $script
@@ -161,7 +162,7 @@ EOF
}
#we get the cached site config in the runtime
-TOOLCHAIN_CONFIGSITE_NOCACHE = "${@siteinfo_get_files(d)}"
+TOOLCHAIN_CONFIGSITE_NOCACHE = "${@' '.join(siteinfo_get_files(d)[0])}"
TOOLCHAIN_CONFIGSITE_SYSROOTCACHE = "${STAGING_DIR}/${MLPREFIX}${MACHINE}/${target_datadir}/${TARGET_SYS}_config_site.d"
TOOLCHAIN_NEED_CONFIGSITE_CACHE ??= "virtual/${MLPREFIX}libc ncurses"
DEPENDS += "${TOOLCHAIN_NEED_CONFIGSITE_CACHE}"
diff --git a/meta/classes/uboot-config.bbclass b/meta/classes/uboot-config.bbclass
index 451db0c650..b9ad35821a 100644
--- a/meta/classes/uboot-config.bbclass
+++ b/meta/classes/uboot-config.bbclass
@@ -11,13 +11,18 @@
#
# Copyright 2013, 2014 (C) O.S. Systems Software LTDA.
+def removesuffix(s, suffix):
+ if suffix and s.endswith(suffix):
+ return s[:-len(suffix)]
+ return s
+
# Some versions of u-boot use .bin and others use .img. By default use .bin
# but enable individual recipes to change this value.
UBOOT_SUFFIX ??= "bin"
UBOOT_BINARY ?= "u-boot.${UBOOT_SUFFIX}"
UBOOT_BINARYNAME ?= "${@os.path.splitext(d.getVar("UBOOT_BINARY"))[0]}"
-UBOOT_IMAGE ?= "u-boot-${MACHINE}-${PV}-${PR}.${UBOOT_SUFFIX}"
-UBOOT_SYMLINK ?= "u-boot-${MACHINE}.${UBOOT_SUFFIX}"
+UBOOT_IMAGE ?= "${UBOOT_BINARYNAME}-${MACHINE}-${PV}-${PR}.${UBOOT_SUFFIX}"
+UBOOT_SYMLINK ?= "${UBOOT_BINARYNAME}-${MACHINE}.${UBOOT_SUFFIX}"
UBOOT_MAKE_TARGET ?= "all"
# Output the ELF generated. Some platforms can use the ELF file and directly
@@ -33,10 +38,13 @@ UBOOT_ELF_SYMLINK ?= "u-boot-${MACHINE}.${UBOOT_ELF_SUFFIX}"
# should be packaged along with the u-boot binary as well as placed in the
# deploy directory. For those versions they can set the following variables
# to allow packaging the SPL.
+SPL_SUFFIX ?= ""
SPL_BINARY ?= ""
-SPL_BINARYNAME ?= "${@os.path.basename(d.getVar("SPL_BINARY"))}"
-SPL_IMAGE ?= "${SPL_BINARYNAME}-${MACHINE}-${PV}-${PR}"
-SPL_SYMLINK ?= "${SPL_BINARYNAME}-${MACHINE}"
+SPL_DELIMITER ?= "${@'.' if d.getVar("SPL_SUFFIX") else ''}"
+SPL_BINARYFILE ?= "${@os.path.basename(d.getVar("SPL_BINARY"))}"
+SPL_BINARYNAME ?= "${@removesuffix(d.getVar("SPL_BINARYFILE"), "." + d.getVar("SPL_SUFFIX"))}"
+SPL_IMAGE ?= "${SPL_BINARYNAME}-${MACHINE}-${PV}-${PR}${SPL_DELIMITER}${SPL_SUFFIX}"
+SPL_SYMLINK ?= "${SPL_BINARYNAME}-${MACHINE}${SPL_DELIMITER}${SPL_SUFFIX}"
# Additional environment variables or a script can be installed alongside
# u-boot to be used automatically on boot. This file, typically 'uEnv.txt'
@@ -45,6 +53,8 @@ SPL_SYMLINK ?= "${SPL_BINARYNAME}-${MACHINE}"
# include it in the SRC_URI and set the UBOOT_ENV parameter.
UBOOT_ENV_SUFFIX ?= "txt"
UBOOT_ENV ?= ""
+UBOOT_ENV_SRC_SUFFIX ?= "cmd"
+UBOOT_ENV_SRC ?= "${UBOOT_ENV}.${UBOOT_ENV_SRC_SUFFIX}"
UBOOT_ENV_BINARY ?= "${UBOOT_ENV}.${UBOOT_ENV_SUFFIX}"
UBOOT_ENV_IMAGE ?= "${UBOOT_ENV}-${MACHINE}-${PV}-${PR}.${UBOOT_ENV_SUFFIX}"
UBOOT_ENV_SYMLINK ?= "${UBOOT_ENV}-${MACHINE}.${UBOOT_ENV_SUFFIX}"
diff --git a/meta/classes/uboot-extlinux-config.bbclass b/meta/classes/uboot-extlinux-config.bbclass
index be285daa01..dcebe7ff31 100644
--- a/meta/classes/uboot-extlinux-config.bbclass
+++ b/meta/classes/uboot-extlinux-config.bbclass
@@ -64,7 +64,7 @@ UBOOT_EXTLINUX_FDT ??= ""
UBOOT_EXTLINUX_FDTDIR ??= "../"
UBOOT_EXTLINUX_KERNEL_IMAGE ??= "../${KERNEL_IMAGETYPE}"
UBOOT_EXTLINUX_KERNEL_ARGS ??= "rootwait rw"
-UBOOT_EXTLINUX_MENU_DESCRIPTION_linux ??= "${DISTRO_NAME}"
+UBOOT_EXTLINUX_MENU_DESCRIPTION:linux ??= "${DISTRO_NAME}"
UBOOT_EXTLINUX_CONFIG = "${B}/extlinux.conf"
diff --git a/meta/classes/uboot-sign.bbclass b/meta/classes/uboot-sign.bbclass
index 9671cf76a5..8d136e9405 100644
--- a/meta/classes/uboot-sign.bbclass
+++ b/meta/classes/uboot-sign.bbclass
@@ -19,7 +19,7 @@
# The tasks sequence is set as below, using DEPLOY_IMAGE_DIR as common place to
# treat the device tree blob:
#
-# * u-boot:do_install_append
+# * u-boot:do_install:append
# Install UBOOT_DTB_BINARY to datadir, so that kernel can use it for
# signing, and kernel will deploy UBOOT_DTB_BINARY after signs it.
#
@@ -131,6 +131,20 @@ concat_dtb_helper() {
elif [ -e "${DEPLOYDIR}/${UBOOT_NODTB_IMAGE}" -a -e "$deployed_uboot_dtb_binary" ]; then
cd ${DEPLOYDIR}
cat ${UBOOT_NODTB_IMAGE} $deployed_uboot_dtb_binary | tee ${B}/${CONFIG_B_PATH}/${UBOOT_BINARY} > ${UBOOT_IMAGE}
+
+ if [ -n "${UBOOT_CONFIG}" ]
+ then
+ for config in ${UBOOT_MACHINE}; do
+ i=$(expr $i + 1);
+ for type in ${UBOOT_CONFIG}; do
+ j=$(expr $j + 1);
+ if [ $j -eq $i ]
+ then
+ cp ${UBOOT_IMAGE} ${B}/${CONFIG_B_PATH}/u-boot-$type.${UBOOT_SUFFIX}
+ fi
+ done
+ done
+ fi
else
bbwarn "Failure while adding public key to u-boot binary. Verified boot won't be available."
fi
@@ -162,8 +176,8 @@ concat_dtb() {
mkdir -p ${DEPLOYDIR}
if [ -n "${UBOOT_CONFIG}" ]; then
for config in ${UBOOT_MACHINE}; do
- CONFIG_B_PATH="${config}"
- cd ${B}/${config}
+ CONFIG_B_PATH="$config"
+ cd ${B}/$config
concat_dtb_helper
done
else
@@ -179,8 +193,8 @@ concat_spl_dtb() {
mkdir -p ${DEPLOYDIR}
if [ -n "${UBOOT_CONFIG}" ]; then
for config in ${UBOOT_MACHINE}; do
- CONFIG_B_PATH="${config}"
- cd ${B}/${config}
+ CONFIG_B_PATH="$config"
+ cd ${B}/$config
concat_spl_dtb_helper
done
else
@@ -205,7 +219,7 @@ install_helper() {
fi
}
-# Install SPL dtb and u-boot nodtb to datadir,
+# Install SPL dtb and u-boot nodtb to datadir,
install_spl_helper() {
if [ -f "${SPL_DIR}/${SPL_DTB_BINARY}" ]; then
install -Dm 0644 ${SPL_DIR}/${SPL_DTB_BINARY} ${D}${datadir}/${SPL_DTB_IMAGE}
@@ -227,11 +241,11 @@ install_spl_helper() {
touch ${D}/${datadir}/${UBOOT_ITS_IMAGE}
}
-do_install_append() {
+do_install:append() {
if [ "${PN}" = "${UBOOT_PN}" ]; then
if [ -n "${UBOOT_CONFIG}" ]; then
for config in ${UBOOT_MACHINE}; do
- cd ${B}/${config}
+ cd ${B}/$config
if [ "${UBOOT_SIGN_ENABLE}" = "1" -o "${UBOOT_FITIMAGE_ENABLE}" = "1" ] && \
[ -n "${UBOOT_DTB_BINARY}" ]; then
install_helper
@@ -286,19 +300,19 @@ addtask uboot_generate_rsa_keys before do_uboot_assemble_fitimage after do_compi
# Create a ITS file for the U-boot FIT, for use when
# we want to sign it so that the SPL can verify it
uboot_fitimage_assemble() {
- uboot_its="${1}"
- uboot_nodtb_bin="${2}"
- uboot_dtb="${3}"
- uboot_bin="${4}"
- spl_dtb="${5}"
+ uboot_its="$1"
+ uboot_nodtb_bin="$2"
+ uboot_dtb="$3"
+ uboot_bin="$4"
+ spl_dtb="$5"
uboot_csum="${UBOOT_FIT_HASH_ALG}"
uboot_sign_algo="${UBOOT_FIT_SIGN_ALG}"
uboot_sign_keyname="${SPL_SIGN_KEYNAME}"
- rm -f ${uboot_its} ${uboot_bin}
+ rm -f $uboot_its $uboot_bin
# First we create the ITS script
- cat << EOF >> ${uboot_its}
+ cat << EOF >> $uboot_its
/dts-v1/;
/ {
@@ -308,7 +322,7 @@ uboot_fitimage_assemble() {
images {
uboot {
description = "U-Boot image";
- data = /incbin/("${uboot_nodtb_bin}");
+ data = /incbin/("$uboot_nodtb_bin");
type = "standalone";
os = "u-boot";
arch = "${UBOOT_ARCH}";
@@ -318,34 +332,34 @@ uboot_fitimage_assemble() {
EOF
if [ "${SPL_SIGN_ENABLE}" = "1" ] ; then
- cat << EOF >> ${uboot_its}
+ cat << EOF >> $uboot_its
signature {
- algo = "${uboot_csum},${uboot_sign_algo}";
- key-name-hint = "${uboot_sign_keyname}";
+ algo = "$uboot_csum,$uboot_sign_algo";
+ key-name-hint = "$uboot_sign_keyname";
};
EOF
fi
- cat << EOF >> ${uboot_its}
+ cat << EOF >> $uboot_its
};
fdt {
description = "U-Boot FDT";
- data = /incbin/("${uboot_dtb}");
+ data = /incbin/("$uboot_dtb");
type = "flat_dt";
arch = "${UBOOT_ARCH}";
compression = "none";
EOF
if [ "${SPL_SIGN_ENABLE}" = "1" ] ; then
- cat << EOF >> ${uboot_its}
+ cat << EOF >> $uboot_its
signature {
- algo = "${uboot_csum},${uboot_sign_algo}";
- key-name-hint = "${uboot_sign_keyname}";
+ algo = "$uboot_csum,$uboot_sign_algo";
+ key-name-hint = "$uboot_sign_keyname";
};
EOF
fi
- cat << EOF >> ${uboot_its}
+ cat << EOF >> $uboot_its
};
};
@@ -365,8 +379,8 @@ EOF
#
${UBOOT_MKIMAGE} \
${@'-D "${SPL_MKIMAGE_DTCOPTS}"' if len('${SPL_MKIMAGE_DTCOPTS}') else ''} \
- -f ${uboot_its} \
- ${uboot_bin}
+ -f $uboot_its \
+ $uboot_bin
if [ "${SPL_SIGN_ENABLE}" = "1" ] ; then
#
@@ -375,8 +389,8 @@ EOF
${UBOOT_MKIMAGE_SIGN} \
${@'-D "${SPL_MKIMAGE_DTCOPTS}"' if len('${SPL_MKIMAGE_DTCOPTS}') else ''} \
-F -k "${SPL_SIGN_KEYDIR}" \
- -K "${spl_dtb}" \
- -r ${uboot_bin} \
+ -K "$spl_dtb" \
+ -r $uboot_bin \
${SPL_MKIMAGE_SIGN_ARGS}
fi
@@ -408,15 +422,15 @@ do_uboot_assemble_fitimage() {
kernel_uboot_fitimage_name=`basename ${STAGING_DATADIR}/u-boot-fitImage-*`
kernel_uboot_its_name=`basename ${STAGING_DATADIR}/u-boot-its-*`
cd ${B}
- uboot_fitimage_assemble ${kernel_uboot_its_name} ${UBOOT_NODTB_BINARY} \
- ${UBOOT_DTB_BINARY} ${kernel_uboot_fitimage_name} \
+ uboot_fitimage_assemble $kernel_uboot_its_name ${UBOOT_NODTB_BINARY} \
+ ${UBOOT_DTB_BINARY} $kernel_uboot_fitimage_name \
${SPL_DTB_BINARY}
fi
}
addtask uboot_assemble_fitimage before do_deploy after do_compile
-do_deploy_prepend_pn-${UBOOT_PN}() {
+do_deploy:prepend:pn-${UBOOT_PN}() {
if [ "${UBOOT_SIGN_ENABLE}" = "1" -a -n "${UBOOT_DTB_BINARY}" ] ; then
concat_dtb
fi
@@ -446,7 +460,7 @@ do_deploy_prepend_pn-${UBOOT_PN}() {
}
-do_deploy_append_pn-${UBOOT_PN}() {
+do_deploy:append:pn-${UBOOT_PN}() {
# If we're creating a u-boot fitImage, point u-boot.bin
# symlink since it might get used by image recipes
if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" ] ; then
diff --git a/meta/classes/uninative.bbclass b/meta/classes/uninative.bbclass
index 1e19917a97..6a9e862bcd 100644
--- a/meta/classes/uninative.bbclass
+++ b/meta/classes/uninative.bbclass
@@ -2,15 +2,15 @@ UNINATIVE_LOADER ?= "${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/lib/
UNINATIVE_STAGING_DIR ?= "${STAGING_DIR}"
UNINATIVE_URL ?= "unset"
-UNINATIVE_TARBALL ?= "${BUILD_ARCH}-nativesdk-libc.tar.xz"
+UNINATIVE_TARBALL ?= "${BUILD_ARCH}-nativesdk-libc-${UNINATIVE_VERSION}.tar.xz"
# Example checksums
#UNINATIVE_CHECKSUM[aarch64] = "dead"
#UNINATIVE_CHECKSUM[i686] = "dead"
#UNINATIVE_CHECKSUM[x86_64] = "dead"
UNINATIVE_DLDIR ?= "${DL_DIR}/uninative/"
-# Enabling uninative will change the following variables so they need to go the parsing white list to prevent multiple recipe parsing
-BB_HASHCONFIG_WHITELIST += "NATIVELSBSTRING SSTATEPOSTUNPACKFUNCS BUILD_LDFLAGS"
+# Enabling uninative will change the following variables so they need to go the parsing ignored variables list to prevent multiple recipe parsing
+BB_HASHCONFIG_IGNORE_VARS += "NATIVELSBSTRING SSTATEPOSTUNPACKFUNCS BUILD_LDFLAGS"
addhandler uninative_event_fetchloader
uninative_event_fetchloader[eventmask] = "bb.event.BuildStarted"
@@ -100,7 +100,7 @@ ${UNINATIVE_STAGING_DIR}-uninative/relocate_sdk.py \
${UNINATIVE_LOADER} \
${UNINATIVE_LOADER} \
${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/${bindir_native}/patchelf-uninative \
- ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux${base_libdir_native}/libc*.so" % chksum)
+ ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux${base_libdir_native}/libc*.so*" % chksum)
subprocess.check_output(cmd, shell=True)
with open(loaderchksum, "w") as f:
diff --git a/meta/classes/update-alternatives.bbclass b/meta/classes/update-alternatives.bbclass
index 000e4d5664..fc1ffd828c 100644
--- a/meta/classes/update-alternatives.bbclass
+++ b/meta/classes/update-alternatives.bbclass
@@ -6,9 +6,9 @@
# To use this class a number of variables should be defined:
#
# List all of the alternatives needed by a package:
-# ALTERNATIVE_<pkg> = "name1 name2 name3 ..."
+# ALTERNATIVE:<pkg> = "name1 name2 name3 ..."
#
-# i.e. ALTERNATIVE_busybox = "sh sed test bracket"
+# i.e. ALTERNATIVE:busybox = "sh sed test bracket"
#
# The pathname of the link
# ALTERNATIVE_LINK_NAME[name] = "target"
@@ -123,7 +123,7 @@ def gen_updatealternativesvars(d):
for p in pkgs:
for v in vars:
- ret.append(v + "_" + p)
+ ret.append(v + ":" + p)
ret.append(v + "_VARDEPS_" + p)
return " ".join(ret)
@@ -141,10 +141,10 @@ python apply_update_alternative_renames () {
import re
def update_files(alt_target, alt_target_rename, pkg, d):
- f = d.getVar('FILES_' + pkg)
+ f = d.getVar('FILES:' + pkg)
if f:
f = re.sub(r'(^|\s)%s(\s|$)' % re.escape (alt_target), r'\1%s\2' % alt_target_rename, f)
- d.setVar('FILES_' + pkg, f)
+ d.setVar('FILES:' + pkg, f)
# Check for deprecated usage...
pn = d.getVar('BPN')
@@ -156,7 +156,7 @@ python apply_update_alternative_renames () {
for pkg in (d.getVar('PACKAGES') or "").split():
# If the src == dest, we know we need to rename the dest by appending ${BPN}
link_rename = []
- for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg) or "").split():
+ for alt_name in (d.getVar('ALTERNATIVE:%s' % pkg) or "").split():
alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name)
if not alt_link:
alt_link = "%s/%s" % (d.getVar('bindir'), alt_name)
@@ -233,7 +233,7 @@ def update_alternatives_alt_targets(d, pkg):
pn = d.getVar('BPN')
pkgdest = d.getVar('PKGD')
updates = list()
- for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg) or "").split():
+ for alt_name in (d.getVar('ALTERNATIVE:%s' % pkg) or "").split():
alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name)
alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name) or \
d.getVarFlag('ALTERNATIVE_TARGET', alt_name) or \
@@ -259,7 +259,7 @@ def update_alternatives_alt_targets(d, pkg):
return updates
-PACKAGESPLITFUNCS_prepend = "populate_packages_updatealternatives "
+PACKAGESPLITFUNCS:prepend = "populate_packages_updatealternatives "
python populate_packages_updatealternatives () {
if not update_alternatives_enabled(d):
@@ -280,24 +280,24 @@ python populate_packages_updatealternatives () {
provider = d.getVar('VIRTUAL-RUNTIME_update-alternatives')
if provider:
#bb.note('adding runtime requirement for update-alternatives for %s' % pkg)
- d.appendVar('RDEPENDS_%s' % pkg, ' ' + d.getVar('MLPREFIX', False) + provider)
+ d.appendVar('RDEPENDS:%s' % pkg, ' ' + d.getVar('MLPREFIX', False) + provider)
bb.note('adding update-alternatives calls to postinst/prerm for %s' % pkg)
bb.note('%s' % alt_setup_links)
- postinst = d.getVar('pkg_postinst_%s' % pkg)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
if postinst:
postinst = alt_setup_links + postinst
else:
postinst = '#!/bin/sh\n' + alt_setup_links
- d.setVar('pkg_postinst_%s' % pkg, postinst)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
bb.note('%s' % alt_remove_links)
- prerm = d.getVar('pkg_prerm_%s' % pkg) or '#!/bin/sh\n'
+ prerm = d.getVar('pkg_prerm:%s' % pkg) or '#!/bin/sh\n'
prerm += alt_remove_links
- d.setVar('pkg_prerm_%s' % pkg, prerm)
+ d.setVar('pkg_prerm:%s' % pkg, prerm)
}
-python package_do_filedeps_append () {
+python package_do_filedeps:append () {
if update_alternatives_enabled(d):
apply_update_alternative_provides(d)
}
@@ -307,7 +307,7 @@ def apply_update_alternative_provides(d):
pkgdest = d.getVar('PKGDEST')
for pkg in d.getVar('PACKAGES').split():
- for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg) or "").split():
+ for alt_name in (d.getVar('ALTERNATIVE:%s' % pkg) or "").split():
alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name)
alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name)
alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg) or d.getVar('ALTERNATIVE_TARGET') or alt_link
@@ -321,7 +321,7 @@ def apply_update_alternative_provides(d):
# Add file provide
trans_target = oe.package.file_translate(alt_target)
- d.appendVar('FILERPROVIDES_%s_%s' % (trans_target, pkg), " " + alt_link)
- if not trans_target in (d.getVar('FILERPROVIDESFLIST_%s' % pkg) or ""):
- d.appendVar('FILERPROVIDESFLIST_%s' % pkg, " " + trans_target)
+ d.appendVar('FILERPROVIDES:%s:%s' % (trans_target, pkg), " " + alt_link)
+ if not trans_target in (d.getVar('FILERPROVIDESFLIST:%s' % pkg) or ""):
+ d.appendVar('FILERPROVIDESFLIST:%s' % pkg, " " + trans_target)
diff --git a/meta/classes/update-rc.d.bbclass b/meta/classes/update-rc.d.bbclass
index 1366fee653..0a3a608662 100644
--- a/meta/classes/update-rc.d.bbclass
+++ b/meta/classes/update-rc.d.bbclass
@@ -1,11 +1,11 @@
UPDATERCPN ?= "${PN}"
-DEPENDS_append_class-target = "${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', ' update-rc.d initscripts', '', d)}"
+DEPENDS:append:class-target = "${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', ' update-rc.d initscripts', '', d)}"
UPDATERCD = "update-rc.d"
-UPDATERCD_class-cross = ""
-UPDATERCD_class-native = ""
-UPDATERCD_class-nativesdk = ""
+UPDATERCD:class-cross = ""
+UPDATERCD:class-native = ""
+UPDATERCD:class-nativesdk = ""
INITSCRIPT_PARAMS ?= "defaults"
@@ -62,8 +62,8 @@ python __anonymous() {
update_rc_after_parse(d)
}
-PACKAGESPLITFUNCS_prepend = "${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', 'populate_packages_updatercd ', '', d)}"
-PACKAGESPLITFUNCS_remove_class-nativesdk = "populate_packages_updatercd "
+PACKAGESPLITFUNCS:prepend = "${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', 'populate_packages_updatercd ', '', d)}"
+PACKAGESPLITFUNCS:remove:class-nativesdk = "populate_packages_updatercd "
populate_packages_updatercd[vardeps] += "updatercd_prerm updatercd_postrm updatercd_postinst"
populate_packages_updatercd[vardepsexclude] += "OVERRIDES"
@@ -78,7 +78,7 @@ python populate_packages_updatercd () {
statement = "grep -q -w '/etc/init.d/functions' %s" % path
if subprocess.call(statement, shell=True) == 0:
mlprefix = d.getVar('MLPREFIX') or ""
- d.appendVar('RDEPENDS_' + pkg, ' %sinitd-functions' % (mlprefix))
+ d.appendVar('RDEPENDS:' + pkg, ' %sinitd-functions' % (mlprefix))
def update_rcd_package(pkg):
bb.debug(1, 'adding update-rc.d calls to postinst/prerm/postrm for %s' % pkg)
@@ -89,25 +89,25 @@ python populate_packages_updatercd () {
update_rcd_auto_depend(pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
postinst += localdata.getVar('updatercd_postinst')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
- prerm = d.getVar('pkg_prerm_%s' % pkg)
+ prerm = d.getVar('pkg_prerm:%s' % pkg)
if not prerm:
prerm = '#!/bin/sh\n'
prerm += localdata.getVar('updatercd_prerm')
- d.setVar('pkg_prerm_%s' % pkg, prerm)
+ d.setVar('pkg_prerm:%s' % pkg, prerm)
- postrm = d.getVar('pkg_postrm_%s' % pkg)
+ postrm = d.getVar('pkg_postrm:%s' % pkg)
if not postrm:
postrm = '#!/bin/sh\n'
postrm += localdata.getVar('updatercd_postrm')
- d.setVar('pkg_postrm_%s' % pkg, postrm)
+ d.setVar('pkg_postrm:%s' % pkg, postrm)
- d.appendVar('RRECOMMENDS_' + pkg, " ${MLPREFIX}${UPDATERCD}")
+ d.appendVar('RRECOMMENDS:' + pkg, " ${MLPREFIX}${UPDATERCD}")
# Check that this class isn't being inhibited (generally, by
# systemd.bbclass) before doing any work.
diff --git a/meta/classes/useradd-staticids.bbclass b/meta/classes/useradd-staticids.bbclass
index 3a1b5f1320..3acf59cd46 100644
--- a/meta/classes/useradd-staticids.bbclass
+++ b/meta/classes/useradd-staticids.bbclass
@@ -77,7 +77,7 @@ def update_useradd_static_config(d):
try:
uaargs = parser.parse_args(oe.useradd.split_args(param))
except Exception as e:
- bb.fatal("%s: Unable to parse arguments for USERADD_PARAM_%s '%s': %s" % (d.getVar('PN'), pkg, param, e))
+ bb.fatal("%s: Unable to parse arguments for USERADD_PARAM:%s '%s': %s" % (d.getVar('PN'), pkg, param, e))
# Read all passwd files specified in USERADD_UID_TABLES or files/passwd
# Use the standard passwd layout:
@@ -140,13 +140,13 @@ def update_useradd_static_config(d):
uaargs.gid = uaargs.groupid
uaargs.user_group = None
if newgroup and is_pkg:
- groupadd = d.getVar("GROUPADD_PARAM_%s" % pkg)
+ groupadd = d.getVar("GROUPADD_PARAM:%s" % pkg)
if groupadd:
# Only add the group if not already specified
if not uaargs.groupname in groupadd:
- d.setVar("GROUPADD_PARAM_%s" % pkg, "%s; %s" % (groupadd, newgroup))
+ d.setVar("GROUPADD_PARAM:%s" % pkg, "%s; %s" % (groupadd, newgroup))
else:
- d.setVar("GROUPADD_PARAM_%s" % pkg, newgroup)
+ d.setVar("GROUPADD_PARAM:%s" % pkg, newgroup)
uaargs.comment = "'%s'" % field[4] if field[4] else uaargs.comment
uaargs.home_dir = field[5] or uaargs.home_dir
@@ -174,8 +174,6 @@ def update_useradd_static_config(d):
newparam += ['', ' --non-unique'][uaargs.non_unique]
if uaargs.password != None:
newparam += ['', ' --password %s' % uaargs.password][uaargs.password != None]
- elif uaargs.clear_password:
- newparam += ['', ' --clear-password %s' % uaargs.clear_password][uaargs.clear_password != None]
newparam += ['', ' --root %s' % uaargs.root][uaargs.root != None]
newparam += ['', ' --system'][uaargs.system]
newparam += ['', ' --shell %s' % uaargs.shell][uaargs.shell != None]
@@ -198,7 +196,7 @@ def update_useradd_static_config(d):
# If we're processing multiple lines, we could have left over values here...
gaargs = parser.parse_args(oe.useradd.split_args(param))
except Exception as e:
- bb.fatal("%s: Unable to parse arguments for GROUPADD_PARAM_%s '%s': %s" % (d.getVar('PN'), pkg, param, e))
+ bb.fatal("%s: Unable to parse arguments for GROUPADD_PARAM:%s '%s': %s" % (d.getVar('PN'), pkg, param, e))
# Read all group files specified in USERADD_GID_TABLES or files/group
# Use the standard group layout:
@@ -236,8 +234,6 @@ def update_useradd_static_config(d):
newparam += ['', ' --non-unique'][gaargs.non_unique]
if gaargs.password != None:
newparam += ['', ' --password %s' % gaargs.password][gaargs.password != None]
- elif gaargs.clear_password:
- newparam += ['', ' --clear-password %s' % gaargs.clear_password][gaargs.clear_password != None]
newparam += ['', ' --root %s' % gaargs.root][gaargs.root != None]
newparam += ['', ' --system'][gaargs.system]
newparam += ' %s' % gaargs.GROUP
@@ -265,17 +261,17 @@ def update_useradd_static_config(d):
for pkg in useradd_packages.split():
# Groupmems doesn't have anything we might want to change, so simply validating
# is a bit of a waste -- only process useradd/groupadd
- useradd_param = d.getVar('USERADD_PARAM_%s' % pkg)
+ useradd_param = d.getVar('USERADD_PARAM:%s' % pkg)
if useradd_param:
- #bb.warn("Before: 'USERADD_PARAM_%s' - '%s'" % (pkg, useradd_param))
- d.setVar('USERADD_PARAM_%s' % pkg, rewrite_useradd(useradd_param, True))
- #bb.warn("After: 'USERADD_PARAM_%s' - '%s'" % (pkg, d.getVar('USERADD_PARAM_%s' % pkg)))
+ #bb.warn("Before: 'USERADD_PARAM:%s' - '%s'" % (pkg, useradd_param))
+ d.setVar('USERADD_PARAM:%s' % pkg, rewrite_useradd(useradd_param, True))
+ #bb.warn("After: 'USERADD_PARAM:%s' - '%s'" % (pkg, d.getVar('USERADD_PARAM:%s' % pkg)))
- groupadd_param = d.getVar('GROUPADD_PARAM_%s' % pkg)
+ groupadd_param = d.getVar('GROUPADD_PARAM:%s' % pkg)
if groupadd_param:
- #bb.warn("Before: 'GROUPADD_PARAM_%s' - '%s'" % (pkg, groupadd_param))
- d.setVar('GROUPADD_PARAM_%s' % pkg, rewrite_groupadd(groupadd_param, True))
- #bb.warn("After: 'GROUPADD_PARAM_%s' - '%s'" % (pkg, d.getVar('GROUPADD_PARAM_%s' % pkg)))
+ #bb.warn("Before: 'GROUPADD_PARAM:%s' - '%s'" % (pkg, groupadd_param))
+ d.setVar('GROUPADD_PARAM:%s' % pkg, rewrite_groupadd(groupadd_param, True))
+ #bb.warn("After: 'GROUPADD_PARAM:%s' - '%s'" % (pkg, d.getVar('GROUPADD_PARAM:%s' % pkg)))
# Load and process extra users and groups, rewriting only adduser/addgroup params
pkg = d.getVar('PN')
diff --git a/meta/classes/useradd.bbclass b/meta/classes/useradd.bbclass
index e5f3ba24f9..20771a0ce5 100644
--- a/meta/classes/useradd.bbclass
+++ b/meta/classes/useradd.bbclass
@@ -3,7 +3,7 @@ inherit useradd_base
# base-passwd-cross provides the default passwd and group files in the
# target sysroot, and shadow -native and -sysroot provide the utilities
# and support files needed to add and modify user and group accounts
-DEPENDS_append_class-target = " base-files shadow-native shadow-sysroot shadow base-passwd"
+DEPENDS:append:class-target = " base-files shadow-native shadow-sysroot shadow base-passwd"
PACKAGE_WRITE_DEPS += "shadow-native"
# This preinstall function can be run in four different contexts:
@@ -164,16 +164,16 @@ python useradd_sysroot_sstate () {
}
do_prepare_recipe_sysroot[postfuncs] += "${SYSROOTFUNC}"
-SYSROOTFUNC_class-target = "useradd_sysroot_sstate"
+SYSROOTFUNC:class-target = "useradd_sysroot_sstate"
SYSROOTFUNC = ""
SYSROOT_PREPROCESS_FUNCS += "${SYSROOTFUNC}"
-SSTATEPREINSTFUNCS_append_class-target = " useradd_sysroot_sstate"
+SSTATEPREINSTFUNCS:append:class-target = " useradd_sysroot_sstate"
do_package_setscene[depends] += "${USERADDSETSCENEDEPS}"
do_populate_sysroot_setscene[depends] += "${USERADDSETSCENEDEPS}"
-USERADDSETSCENEDEPS_class-target = "${MLPREFIX}base-passwd:do_populate_sysroot_setscene pseudo-native:do_populate_sysroot_setscene shadow-native:do_populate_sysroot_setscene ${MLPREFIX}shadow-sysroot:do_populate_sysroot_setscene"
+USERADDSETSCENEDEPS:class-target = "${MLPREFIX}base-passwd:do_populate_sysroot_setscene pseudo-native:do_populate_sysroot_setscene shadow-native:do_populate_sysroot_setscene ${MLPREFIX}shadow-sysroot:do_populate_sysroot_setscene"
USERADDSETSCENEDEPS = ""
# Recipe parse-time sanity checks
@@ -184,8 +184,8 @@ def update_useradd_after_parse(d):
bb.fatal("%s inherits useradd but doesn't set USERADD_PACKAGES" % d.getVar('FILE', False))
for pkg in useradd_packages.split():
- d.appendVarFlag("do_populate_sysroot", "vardeps", "USERADD_PARAM_%s GROUPADD_PARAM_%s GROUPMEMS_PARAM_%s" % (pkg, pkg, pkg))
- if not d.getVar('USERADD_PARAM_%s' % pkg) and not d.getVar('GROUPADD_PARAM_%s' % pkg) and not d.getVar('GROUPMEMS_PARAM_%s' % pkg):
+ d.appendVarFlag("do_populate_sysroot", "vardeps", "USERADD_PARAM:%s GROUPADD_PARAM:%s GROUPMEMS_PARAM:%s" % (pkg, pkg, pkg))
+ if not d.getVar('USERADD_PARAM:%s' % pkg) and not d.getVar('GROUPADD_PARAM:%s' % pkg) and not d.getVar('GROUPMEMS_PARAM:%s' % pkg):
bb.fatal("%s inherits useradd but doesn't set USERADD_PARAM, GROUPADD_PARAM or GROUPMEMS_PARAM for package %s" % (d.getVar('FILE', False), pkg))
python __anonymous() {
@@ -199,7 +199,7 @@ python __anonymous() {
def get_all_cmd_params(d, cmd_type):
import string
- param_type = cmd_type.upper() + "_PARAM_%s"
+ param_type = cmd_type.upper() + "_PARAM:%s"
params = []
useradd_packages = d.getVar('USERADD_PACKAGES') or ""
@@ -211,7 +211,7 @@ def get_all_cmd_params(d, cmd_type):
return "; ".join(params)
# Adds the preinst script into generated packages
-fakeroot python populate_packages_prepend () {
+fakeroot python populate_packages:prepend () {
def update_useradd_package(pkg):
bb.debug(1, 'adding user/group calls to preinst for %s' % pkg)
@@ -220,7 +220,7 @@ fakeroot python populate_packages_prepend () {
required to execute on the target. Not doing so may cause
useradd preinst to be invoked twice, causing unwanted warnings.
"""
- preinst = d.getVar('pkg_preinst_%s' % pkg) or d.getVar('pkg_preinst')
+ preinst = d.getVar('pkg_preinst:%s' % pkg) or d.getVar('pkg_preinst')
if not preinst:
preinst = '#!/bin/sh\n'
preinst += 'bbnote () {\n\techo "NOTE: $*"\n}\n'
@@ -230,15 +230,19 @@ fakeroot python populate_packages_prepend () {
preinst += 'perform_useradd () {\n%s}\n' % d.getVar('perform_useradd')
preinst += 'perform_groupmems () {\n%s}\n' % d.getVar('perform_groupmems')
preinst += d.getVar('useradd_preinst')
- d.setVar('pkg_preinst_%s' % pkg, preinst)
+ # Expand out the *_PARAM variables to the package specific versions
+ for rep in ["GROUPADD_PARAM", "USERADD_PARAM", "GROUPMEMS_PARAM"]:
+ val = d.getVar(rep + ":" + pkg) or ""
+ preinst = preinst.replace("${" + rep + "}", val)
+ d.setVar('pkg_preinst:%s' % pkg, preinst)
# RDEPENDS setup
- rdepends = d.getVar("RDEPENDS_%s" % pkg) or ""
+ rdepends = d.getVar("RDEPENDS:%s" % pkg) or ""
rdepends += ' ' + d.getVar('MLPREFIX', False) + 'base-passwd'
rdepends += ' ' + d.getVar('MLPREFIX', False) + 'shadow'
# base-files is where the default /etc/skel is packaged
rdepends += ' ' + d.getVar('MLPREFIX', False) + 'base-files'
- d.setVar("RDEPENDS_%s" % pkg, rdepends)
+ d.setVar("RDEPENDS:%s" % pkg, rdepends)
# Add the user/group preinstall scripts and RDEPENDS requirements
# to packages specified by USERADD_PACKAGES
diff --git a/meta/classes/utility-tasks.bbclass b/meta/classes/utility-tasks.bbclass
index b1f27d3658..0466325c13 100644
--- a/meta/classes/utility-tasks.bbclass
+++ b/meta/classes/utility-tasks.bbclass
@@ -19,7 +19,7 @@ python do_listtasks() {
CLEANFUNCS ?= ""
-T_task-clean = "${LOG_DIR}/cleanlogs/${PN}"
+T:task-clean = "${LOG_DIR}/cleanlogs/${PN}"
addtask clean
do_clean[nostamp] = "1"
python do_clean() {
@@ -38,6 +38,7 @@ python do_clean() {
addtask checkuri
do_checkuri[nostamp] = "1"
+do_checkuri[network] = "1"
python do_checkuri() {
src_uri = (d.getVar('SRC_URI') or "").split()
if len(src_uri) == 0:
diff --git a/meta/classes/utils.bbclass b/meta/classes/utils.bbclass
index 120bcc64a6..b4eb3d38ab 100644
--- a/meta/classes/utils.bbclass
+++ b/meta/classes/utils.bbclass
@@ -30,7 +30,6 @@ oe_libinstall() {
silent=""
require_static=""
require_shared=""
- staging_install=""
while [ "$#" -gt 0 ]; do
case "$1" in
-C)
@@ -62,10 +61,6 @@ oe_libinstall() {
if [ -z "$destpath" ]; then
bbfatal "oe_libinstall: no destination path specified"
fi
- if echo "$destpath/" | egrep '^${STAGING_LIBDIR}/' >/dev/null
- then
- staging_install=1
- fi
__runcmd () {
if [ -z "$silent" ]; then
@@ -159,36 +154,6 @@ oe_libinstall() {
__runcmd cd "$olddir"
}
-oe_machinstall() {
- # Purpose: Install machine dependent files, if available
- # If not available, check if there is a default
- # If no default, just touch the destination
- # Example:
- # $1 $2 $3 $4
- # oe_machinstall -m 0644 fstab ${D}/etc/fstab
- #
- # TODO: Check argument number?
- #
- filename=`basename $3`
- dirname=`dirname $3`
-
- for o in `echo ${OVERRIDES} | tr ':' ' '`; do
- if [ -e $dirname/$o/$filename ]; then
- bbnote $dirname/$o/$filename present, installing to $4
- install $1 $2 $dirname/$o/$filename $4
- return
- fi
- done
-# bbnote overrides specific file NOT present, trying default=$3...
- if [ -e $3 ]; then
- bbnote $3 present, installing to $4
- install $1 $2 $3 $4
- else
- bbnote $3 NOT present, touching empty $4
- touch $4
- fi
-}
-
create_cmdline_wrapper () {
# Create a wrapper script where commandline options are needed
#
@@ -214,7 +179,7 @@ create_cmdline_wrapper () {
#!/bin/bash
realpath=\`readlink -fn \$0\`
realdir=\`dirname \$realpath\`
-exec -a \`dirname \$realpath\`/$cmdname \`dirname \$realpath\`/$cmdname.real $cmdoptions "\$@"
+exec -a \$realdir/$cmdname \$realdir/$cmdname.real $cmdoptions "\$@"
END
chmod +x $cmd
}
diff --git a/meta/classes/vala.bbclass b/meta/classes/vala.bbclass
index bcaf68c5a7..bfcceff7cf 100644
--- a/meta/classes/vala.bbclass
+++ b/meta/classes/vala.bbclass
@@ -2,8 +2,8 @@
# because that is where target builds look for .vapi files.
#
VALADEPENDS = ""
-VALADEPENDS_class-target = "vala"
-DEPENDS_append = " vala-native ${VALADEPENDS}"
+VALADEPENDS:class-target = "vala"
+DEPENDS:append = " vala-native ${VALADEPENDS}"
# Our patched version of Vala looks in STAGING_DATADIR for .vapi files
export STAGING_DATADIR
@@ -11,7 +11,7 @@ export STAGING_DATADIR
export XDG_DATA_DIRS = "${STAGING_DATADIR}:${STAGING_LIBDIR}"
# Package additional files
-FILES_${PN}-dev += "\
+FILES:${PN}-dev += "\
${datadir}/vala/vapi/*.vapi \
${datadir}/vala/vapi/*.deps \
${datadir}/gir-1.0 \
@@ -19,6 +19,6 @@ FILES_${PN}-dev += "\
# Remove vapigen.m4 that is bundled with tarballs
# because it does not yet have our cross-compile fixes
-do_configure_prepend() {
+do_configure:prepend() {
rm -f ${S}/m4/vapigen.m4
}
diff --git a/meta/classes/waf.bbclass b/meta/classes/waf.bbclass
index 188119f356..464564afa1 100644
--- a/meta/classes/waf.bbclass
+++ b/meta/classes/waf.bbclass
@@ -8,7 +8,7 @@ WAF_PYTHON ?= "python3"
B = "${WORKDIR}/build"
do_configure[cleandirs] += "${B}"
-EXTRA_OECONF_append = " ${PACKAGECONFIG_CONFARGS}"
+EXTRA_OECONF:append = " ${PACKAGECONFIG_CONFARGS}"
EXTRA_OEWAF_BUILD ??= ""
# In most cases, you want to pass the same arguments to `waf build` and `waf
@@ -39,18 +39,17 @@ def waflock_hash(d):
# directory (e.g. if the source is coming from externalsrc and was previously
# configured elsewhere).
export WAFLOCK = ".lock-waf_oe_${@waflock_hash(d)}_build"
-BB_HASHBASE_WHITELIST += "WAFLOCK"
+BB_BASEHASH_IGNORE_VARS += "WAFLOCK"
python waf_preconfigure() {
import subprocess
- from distutils.version import StrictVersion
subsrcdir = d.getVar('S')
python = d.getVar('WAF_PYTHON')
wafbin = os.path.join(subsrcdir, 'waf')
try:
result = subprocess.check_output([python, wafbin, '--version'], cwd=subsrcdir, stderr=subprocess.STDOUT)
version = result.decode('utf-8').split()[1]
- if StrictVersion(version) >= StrictVersion("1.8.7"):
+ if bb.utils.vercmp_string_op(version, "1.8.7", ">="):
d.setVar("WAF_EXTRA_CONF", "--bindir=${bindir} --libdir=${libdir}")
except subprocess.CalledProcessError as e:
bb.warn("Unable to execute waf --version, exit code %d. Assuming waf version without bindir/libdir support." % e.returncode)
diff --git a/meta/classes/xmlcatalog.bbclass b/meta/classes/xmlcatalog.bbclass
index ae4811fdeb..be155b7bc2 100644
--- a/meta/classes/xmlcatalog.bbclass
+++ b/meta/classes/xmlcatalog.bbclass
@@ -4,7 +4,7 @@ DEPENDS = "libxml2-native"
# "${sysconfdir}/xml/docbook-xml.xml".
XMLCATALOGS ?= ""
-SYSROOT_PREPROCESS_FUNCS_append = " xmlcatalog_sstate_postinst"
+SYSROOT_PREPROCESS_FUNCS:append = " xmlcatalog_sstate_postinst"
xmlcatalog_complete() {
ROOTCATALOG="${STAGING_ETCDIR_NATIVE}/xml/catalog"
diff --git a/meta/classes/yocto-check-layer.bbclass b/meta/classes/yocto-check-layer.bbclass
new file mode 100644
index 0000000000..329d3f8edb
--- /dev/null
+++ b/meta/classes/yocto-check-layer.bbclass
@@ -0,0 +1,16 @@
+#
+# This class is used by yocto-check-layer script for additional per-recipe tests
+# The first test ensures that the layer has no recipes skipping 'installed-vs-shipped' QA checks
+#
+
+WARN_QA:remove = "installed-vs-shipped"
+ERROR_QA:append = " installed-vs-shipped"
+
+python () {
+ packages = set((d.getVar('PACKAGES') or '').split())
+ for package in packages:
+ skip = set((d.getVar('INSANE_SKIP') or "").split() +
+ (d.getVar('INSANE_SKIP:' + package) or "").split())
+ if 'installed-vs-shipped' in skip:
+ oe.qa.handle_error("installed-vs-shipped", 'Package %s is skipping "installed-vs-shipped" QA test.' % package, d)
+}