summaryrefslogtreecommitdiffstats
path: root/meta/classes
diff options
context:
space:
mode:
Diffstat (limited to 'meta/classes')
-rw-r--r--meta/classes/allarch.bbclass2
-rw-r--r--meta/classes/archiver.bbclass200
-rw-r--r--meta/classes/autotools.bbclass41
-rw-r--r--meta/classes/baremetal-image.bbclass121
-rw-r--r--meta/classes/base.bbclass296
-rw-r--r--meta/classes/bash-completion.bbclass6
-rw-r--r--meta/classes/bin_package.bbclass2
-rw-r--r--meta/classes/binconfig-disabled.bbclass4
-rw-r--r--meta/classes/binconfig.bbclass11
-rw-r--r--meta/classes/blacklist.bbclass20
-rw-r--r--meta/classes/bluetooth.bbclass14
-rw-r--r--meta/classes/buildhistory.bbclass232
-rw-r--r--meta/classes/buildstats.bbclass90
-rw-r--r--meta/classes/cargo.bbclass90
-rw-r--r--meta/classes/cargo_common.bbclass124
-rw-r--r--meta/classes/ccache.bbclass6
-rw-r--r--meta/classes/ccmake.bbclass97
-rw-r--r--meta/classes/chrpath.bbclass55
-rw-r--r--meta/classes/clutter.bbclass17
-rw-r--r--meta/classes/cmake.bbclass131
-rw-r--r--meta/classes/cml1.bbclass40
-rw-r--r--meta/classes/compress_doc.bbclass6
-rw-r--r--meta/classes/core-image.bbclass7
-rw-r--r--meta/classes/cpan-base.bbclass15
-rw-r--r--meta/classes/cpan.bbclass4
-rw-r--r--meta/classes/create-spdx.bbclass1022
-rw-r--r--meta/classes/cross-canadian.bbclass18
-rw-r--r--meta/classes/cross.bbclass10
-rw-r--r--meta/classes/crosssdk.bbclass7
-rw-r--r--meta/classes/cve-check.bbclass365
-rw-r--r--meta/classes/debian.bbclass18
-rw-r--r--meta/classes/deploy.bbclass3
-rw-r--r--meta/classes/devicetree.bbclass23
-rw-r--r--meta/classes/devshell.bbclass11
-rw-r--r--meta/classes/devtool-source.bbclass16
-rw-r--r--meta/classes/devupstream.bbclass25
-rw-r--r--meta/classes/distro_features_check.bbclass35
-rw-r--r--meta/classes/distrooverrides.bbclass6
-rw-r--r--meta/classes/distutils-base.bbclass4
-rw-r--r--meta/classes/distutils.bbclass92
-rw-r--r--meta/classes/distutils3-base.bbclass5
-rw-r--r--meta/classes/distutils3.bbclass86
-rw-r--r--meta/classes/externalsrc.bbclass46
-rw-r--r--meta/classes/extrausers.bbclass7
-rw-r--r--meta/classes/features_check.bbclass54
-rw-r--r--meta/classes/fontcache.bbclass14
-rw-r--r--meta/classes/gconf.bbclass12
-rw-r--r--meta/classes/gettext.bbclass6
-rw-r--r--meta/classes/gi-docgen.bbclass24
-rw-r--r--meta/classes/gio-module-cache.bbclass10
-rw-r--r--meta/classes/glide.bbclass4
-rw-r--r--meta/classes/gnome.bbclass1
-rw-r--r--meta/classes/gnomebase.bbclass9
-rw-r--r--meta/classes/go-mod.bbclass20
-rw-r--r--meta/classes/go-ptest.bbclass54
-rw-r--r--meta/classes/go.bbclass136
-rw-r--r--meta/classes/goarch.bbclass74
-rw-r--r--meta/classes/gobject-introspection.bbclass34
-rw-r--r--meta/classes/godep.bbclass8
-rw-r--r--meta/classes/grub-efi-cfg.bbclass11
-rw-r--r--meta/classes/grub-efi.bbclass35
-rw-r--r--meta/classes/gsettings.bbclass14
-rw-r--r--meta/classes/gtk-doc.bbclass29
-rw-r--r--meta/classes/gtk-icon-cache.bbclass53
-rw-r--r--meta/classes/gtk-immodules-cache.bbclass11
-rw-r--r--meta/classes/icecc.bbclass137
-rw-r--r--meta/classes/image-artifact-names.bbclass22
-rw-r--r--meta/classes/image-combined-dbg.bbclass2
-rw-r--r--meta/classes/image-container.bbclass2
-rw-r--r--meta/classes/image-live.bbclass16
-rw-r--r--meta/classes/image-mklibs.bbclass56
-rw-r--r--meta/classes/image-prelink.bbclass64
-rw-r--r--meta/classes/image.bbclass120
-rw-r--r--meta/classes/image_types.bbclass170
-rw-r--r--meta/classes/image_types_wic.bbclass81
-rw-r--r--meta/classes/insane.bbclass756
-rw-r--r--meta/classes/kernel-artifact-names.bbclass13
-rw-r--r--meta/classes/kernel-devicetree.bbclass64
-rw-r--r--meta/classes/kernel-fitimage.bbclass596
-rw-r--r--meta/classes/kernel-grub.bbclass4
-rw-r--r--meta/classes/kernel-module-split.bbclass76
-rw-r--r--meta/classes/kernel-uboot.bbclass18
-rw-r--r--meta/classes/kernel-yocto.bbclass482
-rw-r--r--meta/classes/kernel.bbclass327
-rw-r--r--meta/classes/kernelsrc.bbclass2
-rw-r--r--meta/classes/libc-common.bbclass37
-rw-r--r--meta/classes/libc-package.bbclass76
-rw-r--r--meta/classes/license.bbclass173
-rw-r--r--meta/classes/license_image.bbclass108
-rw-r--r--meta/classes/linux-dummy.bbclass26
-rw-r--r--meta/classes/linuxloader.bbclass45
-rw-r--r--meta/classes/live-vm-common.bbclass33
-rw-r--r--meta/classes/manpages.bbclass20
-rw-r--r--meta/classes/mcextend.bbclass16
-rw-r--r--meta/classes/meson-routines.bbclass51
-rw-r--r--meta/classes/meson.bbclass179
-rw-r--r--meta/classes/meta.bbclass4
-rw-r--r--meta/classes/metadata_scm.bbclass8
-rw-r--r--meta/classes/mime-xdg.bbclass74
-rw-r--r--meta/classes/mime.bbclass81
-rw-r--r--meta/classes/mirrors.bbclass136
-rw-r--r--meta/classes/module.bbclass6
-rw-r--r--meta/classes/multilib.bbclass119
-rw-r--r--meta/classes/multilib_global.bbclass104
-rw-r--r--meta/classes/multilib_header.bbclass4
-rw-r--r--meta/classes/multilib_script.bbclass20
-rw-r--r--meta/classes/native.bbclass72
-rw-r--r--meta/classes/nativesdk.bbclass12
-rw-r--r--meta/classes/nopackages.bbclass1
-rw-r--r--meta/classes/npm.bbclass378
-rw-r--r--meta/classes/overlayfs-etc.bbclass76
-rw-r--r--meta/classes/overlayfs.bbclass119
-rw-r--r--meta/classes/own-mirrors.bbclass25
-rw-r--r--meta/classes/package.bbclass863
-rw-r--r--meta/classes/package_deb.bbclass19
-rw-r--r--meta/classes/package_ipk.bbclass26
-rw-r--r--meta/classes/package_pkgdata.bbclass167
-rw-r--r--meta/classes/package_rpm.bbclass97
-rw-r--r--meta/classes/package_tar.bbclass6
-rw-r--r--meta/classes/packagedata.bbclass4
-rw-r--r--meta/classes/packagefeed-stability.bbclass252
-rw-r--r--meta/classes/packagegroup.bbclass4
-rw-r--r--meta/classes/patch.bbclass15
-rw-r--r--meta/classes/pixbufcache.bbclass17
-rw-r--r--meta/classes/pkgconfig.bbclass2
-rw-r--r--meta/classes/populate_sdk_base.bbclass83
-rw-r--r--meta/classes/populate_sdk_ext.bbclass275
-rw-r--r--meta/classes/ptest-gnome.bbclass6
-rw-r--r--meta/classes/ptest-perl.bbclass6
-rw-r--r--meta/classes/ptest.bbclass68
-rw-r--r--meta/classes/pypi.bbclass12
-rw-r--r--meta/classes/python-dir.bbclass5
-rw-r--r--meta/classes/python3-dir.bbclass4
-rw-r--r--meta/classes/python3native.bbclass12
-rw-r--r--meta/classes/python3targetconfig.bbclass29
-rw-r--r--meta/classes/python_flit_core.bbclass5
-rw-r--r--meta/classes/python_pep517.bbclass56
-rw-r--r--meta/classes/python_poetry_core.bbclass5
-rw-r--r--meta/classes/python_pyo3.bbclass30
-rw-r--r--meta/classes/python_setuptools3_rust.bbclass11
-rw-r--r--meta/classes/pythonnative.bbclass19
-rw-r--r--meta/classes/qemu.bbclass3
-rw-r--r--meta/classes/qemuboot.bbclass57
-rw-r--r--meta/classes/relocatable.bbclass20
-rw-r--r--meta/classes/report-error.bbclass25
-rw-r--r--meta/classes/reproducible_build.bbclass170
-rw-r--r--meta/classes/reproducible_build_simple.bbclass10
-rw-r--r--meta/classes/rm_work.bbclass63
-rw-r--r--meta/classes/rm_work_and_downloads.bbclass2
-rw-r--r--meta/classes/rootfs-postcommands.bbclass110
-rw-r--r--meta/classes/rootfs_deb.bbclass6
-rw-r--r--meta/classes/rootfs_ipk.bbclass8
-rw-r--r--meta/classes/rootfs_rpm.bbclass6
-rw-r--r--meta/classes/rootfsdebugfiles.bbclass2
-rw-r--r--meta/classes/rust-bin.bbclass149
-rw-r--r--meta/classes/rust-common.bbclass185
-rw-r--r--meta/classes/rust.bbclass45
-rw-r--r--meta/classes/sanity.bbclass211
-rw-r--r--meta/classes/scons.bbclass12
-rw-r--r--meta/classes/setuptools.bbclass3
-rw-r--r--meta/classes/setuptools3-base.bbclass (renamed from meta/classes/distutils-common-base.bbclass)14
-rw-r--r--meta/classes/setuptools3.bbclass33
-rw-r--r--meta/classes/setuptools3_legacy.bbclass78
-rw-r--r--meta/classes/setuptools_build_meta.bbclass5
-rw-r--r--meta/classes/sign_package_feed.bbclass2
-rw-r--r--meta/classes/sign_rpm.bbclass1
-rw-r--r--meta/classes/siteinfo.bbclass61
-rw-r--r--meta/classes/spdx.bbclass360
-rw-r--r--meta/classes/sstate.bbclass530
-rw-r--r--meta/classes/staging.bbclass150
-rw-r--r--meta/classes/syslinux.bbclass5
-rw-r--r--meta/classes/systemd-boot-cfg.bbclass3
-rw-r--r--meta/classes/systemd-boot.bbclass31
-rw-r--r--meta/classes/systemd.bbclass93
-rw-r--r--meta/classes/terminal.bbclass9
-rw-r--r--meta/classes/testexport.bbclass8
-rw-r--r--meta/classes/testimage.bbclass158
-rw-r--r--meta/classes/testsdk.bbclass2
-rw-r--r--meta/classes/texinfo.bbclass16
-rw-r--r--meta/classes/tinderclient.bbclass368
-rw-r--r--meta/classes/toaster.bbclass28
-rw-r--r--meta/classes/toolchain-scripts.bbclass11
-rw-r--r--meta/classes/uboot-config.bbclass72
-rw-r--r--meta/classes/uboot-extlinux-config.bbclass18
-rw-r--r--meta/classes/uboot-sign.bbclass470
-rw-r--r--meta/classes/uninative.bbclass29
-rw-r--r--meta/classes/update-alternatives.bbclass113
-rw-r--r--meta/classes/update-rc.d.bbclass56
-rw-r--r--meta/classes/useradd-staticids.bbclass34
-rw-r--r--meta/classes/useradd.bbclass33
-rw-r--r--meta/classes/useradd_base.bbclass18
-rw-r--r--meta/classes/utility-tasks.bbclass3
-rw-r--r--meta/classes/utils.bbclass58
-rw-r--r--meta/classes/vala.bbclass10
-rw-r--r--meta/classes/waf.bbclass50
-rw-r--r--meta/classes/xmlcatalog.bbclass26
-rw-r--r--meta/classes/yocto-check-layer.bbclass16
197 files changed, 9993 insertions, 5218 deletions
diff --git a/meta/classes/allarch.bbclass b/meta/classes/allarch.bbclass
index 5bd5c44a27..a766a654a9 100644
--- a/meta/classes/allarch.bbclass
+++ b/meta/classes/allarch.bbclass
@@ -61,3 +61,5 @@ python () {
bb.error("Please ensure recipe %s sets PACKAGE_ARCH before inherit packagegroup" % d.getVar("FILE"))
}
+def qemu_wrapper_cmdline(data, rootfs_path, library_paths):
+ return 'false'
diff --git a/meta/classes/archiver.bbclass b/meta/classes/archiver.bbclass
index af9f010dfc..c19c770d11 100644
--- a/meta/classes/archiver.bbclass
+++ b/meta/classes/archiver.bbclass
@@ -2,25 +2,42 @@
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# This bbclass is used for creating archive for:
-# 1) original (or unpacked) source: ARCHIVER_MODE[src] = "original"
-# 2) patched source: ARCHIVER_MODE[src] = "patched" (default)
-# 3) configured source: ARCHIVER_MODE[src] = "configured"
-# 4) The patches between do_unpack and do_patch:
-# ARCHIVER_MODE[diff] = "1"
-# And you can set the one that you'd like to exclude from the diff:
-# ARCHIVER_MODE[diff-exclude] ?= ".pc autom4te.cache patches"
-# 5) The environment data, similar to 'bitbake -e recipe':
-# ARCHIVER_MODE[dumpdata] = "1"
-# 6) The recipe (.bb and .inc): ARCHIVER_MODE[recipe] = "1"
-# 7) Whether output the .src.rpm package:
-# ARCHIVER_MODE[srpm] = "1"
-# 8) Filter the license, the recipe whose license in
-# COPYLEFT_LICENSE_INCLUDE will be included, and in
-# COPYLEFT_LICENSE_EXCLUDE will be excluded.
-# COPYLEFT_LICENSE_INCLUDE = 'GPL* LGPL*'
-# COPYLEFT_LICENSE_EXCLUDE = 'CLOSED Proprietary'
-# 9) The recipe type that will be archived:
-# COPYLEFT_RECIPE_TYPES = 'target'
+# 1) original (or unpacked) source: ARCHIVER_MODE[src] = "original"
+# 2) patched source: ARCHIVER_MODE[src] = "patched" (default)
+# 3) configured source: ARCHIVER_MODE[src] = "configured"
+# 4) source mirror: ARCHIVER_MODE[src] = "mirror"
+# 5) The patches between do_unpack and do_patch:
+# ARCHIVER_MODE[diff] = "1"
+# And you can set the one that you'd like to exclude from the diff:
+# ARCHIVER_MODE[diff-exclude] ?= ".pc autom4te.cache patches"
+# 6) The environment data, similar to 'bitbake -e recipe':
+# ARCHIVER_MODE[dumpdata] = "1"
+# 7) The recipe (.bb and .inc): ARCHIVER_MODE[recipe] = "1"
+# 8) Whether output the .src.rpm package:
+# ARCHIVER_MODE[srpm] = "1"
+# 9) Filter the license, the recipe whose license in
+# COPYLEFT_LICENSE_INCLUDE will be included, and in
+# COPYLEFT_LICENSE_EXCLUDE will be excluded.
+# COPYLEFT_LICENSE_INCLUDE = 'GPL* LGPL*'
+# COPYLEFT_LICENSE_EXCLUDE = 'CLOSED Proprietary'
+# 10) The recipe type that will be archived:
+# COPYLEFT_RECIPE_TYPES = 'target'
+# 11) The source mirror mode:
+# ARCHIVER_MODE[mirror] = "split" (default): Sources are split into
+# per-recipe directories in a similar way to other archiver modes.
+# Post-processing may be required to produce a single mirror directory.
+# This does however allow inspection of duplicate sources and more
+# intelligent handling.
+# ARCHIVER_MODE[mirror] = "combined": All sources are placed into a single
+# directory suitable for direct use as a mirror. Duplicate sources are
+# ignored.
+# 12) Source mirror exclusions:
+# ARCHIVER_MIRROR_EXCLUDE is a list of prefixes to exclude from the mirror.
+# This may be used for sources which you are already publishing yourself
+# (e.g. if the URI starts with 'https://mysite.com/' and your mirror is
+# going to be published to the same site). It may also be used to exclude
+# local files (with the prefix 'file://') if these will be provided as part
+# of an archive of the layers themselves.
#
# Create archive for all the recipe types
@@ -33,14 +50,20 @@ ARCHIVER_MODE[diff] ?= "0"
ARCHIVER_MODE[diff-exclude] ?= ".pc autom4te.cache patches"
ARCHIVER_MODE[dumpdata] ?= "0"
ARCHIVER_MODE[recipe] ?= "0"
+ARCHIVER_MODE[mirror] ?= "split"
+ARCHIVER_MODE[compression] ?= "xz"
DEPLOY_DIR_SRC ?= "${DEPLOY_DIR}/sources"
-ARCHIVER_TOPDIR ?= "${WORKDIR}/deploy-sources"
+ARCHIVER_TOPDIR ?= "${WORKDIR}/archiver-sources"
ARCHIVER_OUTDIR = "${ARCHIVER_TOPDIR}/${TARGET_SYS}/${PF}/"
ARCHIVER_RPMTOPDIR ?= "${WORKDIR}/deploy-sources-rpm"
ARCHIVER_RPMOUTDIR = "${ARCHIVER_RPMTOPDIR}/${TARGET_SYS}/${PF}/"
ARCHIVER_WORKDIR = "${WORKDIR}/archiver-work/"
+# When producing a combined mirror directory, allow duplicates for the case
+# where multiple recipes use the same SRC_URI.
+ARCHIVER_COMBINED_MIRRORDIR = "${ARCHIVER_TOPDIR}/mirror"
+SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_SRC}/mirror"
do_dumpdata[dirs] = "${ARCHIVER_OUTDIR}"
do_ar_recipe[dirs] = "${ARCHIVER_OUTDIR}"
@@ -78,6 +101,9 @@ python () {
bb.debug(1, 'archiver: %s is excluded, covered by gcc-source' % pn)
return
+ def hasTask(task):
+ return bool(d.getVarFlag(task, "task", False)) and not bool(d.getVarFlag(task, "noexec", False))
+
ar_src = d.getVarFlag('ARCHIVER_MODE', 'src')
ar_dumpdata = d.getVarFlag('ARCHIVER_MODE', 'dumpdata')
ar_recipe = d.getVarFlag('ARCHIVER_MODE', 'recipe')
@@ -93,19 +119,18 @@ python () {
d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_patched' % pn)
elif ar_src == "configured":
# We can't use "addtask do_ar_configured after do_configure" since it
- # will cause the deptask of do_populate_sysroot to run not matter what
+ # will cause the deptask of do_populate_sysroot to run no matter what
# archives we need, so we add the depends here.
# There is a corner case with "gcc-source-${PV}" recipes, they don't have
# the "do_configure" task, so we need to use "do_preconfigure"
- def hasTask(task):
- return bool(d.getVarFlag(task, "task", False)) and not bool(d.getVarFlag(task, "noexec", False))
-
if hasTask("do_preconfigure"):
d.appendVarFlag('do_ar_configured', 'depends', ' %s:do_preconfigure' % pn)
elif hasTask("do_configure"):
d.appendVarFlag('do_ar_configured', 'depends', ' %s:do_configure' % pn)
d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_configured' % pn)
+ elif ar_src == "mirror":
+ d.appendVarFlag('do_deploy_archives', 'depends', '%s:do_ar_mirror' % pn)
elif ar_src:
bb.fatal("Invalid ARCHIVER_MODE[src]: %s" % ar_src)
@@ -118,7 +143,11 @@ python () {
# Output the SRPM package
if d.getVarFlag('ARCHIVER_MODE', 'srpm') == "1" and d.getVar('PACKAGES'):
- if "package_rpm" in d.getVar('PACKAGE_CLASSES'):
+ if "package_rpm" not in d.getVar('PACKAGE_CLASSES'):
+ bb.fatal("ARCHIVER_MODE[srpm] needs package_rpm in PACKAGE_CLASSES")
+
+ # Some recipes do not have any packaging tasks
+ if hasTask("do_package_write_rpm"):
d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_package_write_rpm' % pn)
d.appendVarFlag('do_package_write_rpm', 'dirs', ' ${ARCHIVER_RPMTOPDIR}')
d.appendVarFlag('do_package_write_rpm', 'sstate-inputdirs', ' ${ARCHIVER_RPMTOPDIR}')
@@ -133,11 +162,9 @@ python () {
d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_patched' % pn)
elif ar_src == "configured":
d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_configured' % pn)
- else:
- bb.fatal("ARCHIVER_MODE[srpm] needs package_rpm in PACKAGE_CLASSES")
}
-# Take all the sources for a recipe and puts them in WORKDIR/archiver-work/.
+# Take all the sources for a recipe and put them in WORKDIR/archiver-work/.
# Files in SRC_URI are copied directly, anything that's a directory
# (e.g. git repositories) is "unpacked" and then put into a tarball.
python do_ar_original() {
@@ -167,7 +194,13 @@ python do_ar_original() {
del decoded[5][param]
encoded = bb.fetch2.encodeurl(decoded)
urls[i] = encoded
- fetch = bb.fetch2.Fetch(urls, d)
+
+ # Cleanup SRC_URI before call bb.fetch2.Fetch() since now SRC_URI is in the
+ # variable "urls", otherwise there might be errors like:
+ # The SRCREV_FORMAT variable must be set when multiple SCMs are used
+ ld = bb.data.createCopy(d)
+ ld.setVar('SRC_URI', '')
+ fetch = bb.fetch2.Fetch(urls, ld)
tarball_suffix = {}
for url in fetch.urls:
local = fetch.localpath(url).rstrip("/");
@@ -219,9 +252,10 @@ python do_ar_patched() {
# Get the ARCHIVER_OUTDIR before we reset the WORKDIR
ar_outdir = d.getVar('ARCHIVER_OUTDIR')
- ar_workdir = d.getVar('ARCHIVER_WORKDIR')
+ if not is_work_shared(d):
+ ar_workdir = d.getVar('ARCHIVER_WORKDIR')
+ d.setVar('WORKDIR', ar_workdir)
bb.note('Archiving the patched source...')
- d.setVar('WORKDIR', ar_workdir)
create_tarball(d, d.getVar('S'), 'patched', ar_outdir)
}
@@ -248,7 +282,10 @@ python do_ar_configured() {
# ${STAGING_DATADIR}/aclocal/libtool.m4, so we can't re-run the
# do_configure, we archive the already configured ${S} to
# instead of.
- elif pn != 'libtool-native':
+ # The kernel class functions require it to be on work-shared, we
+ # don't unpack, patch, configure again, just archive the already
+ # configured ${S}
+ elif not (pn == 'libtool-native' or is_work_shared(d)):
def runTask(task):
prefuncs = d.getVarFlag(task, 'prefuncs') or ''
for func in prefuncs.split():
@@ -278,6 +315,78 @@ python do_ar_configured() {
create_tarball(d, srcdir, 'configured', ar_outdir)
}
+python do_ar_mirror() {
+ import subprocess
+
+ src_uri = (d.getVar('SRC_URI') or '').split()
+ if len(src_uri) == 0:
+ return
+
+ dl_dir = d.getVar('DL_DIR')
+ mirror_exclusions = (d.getVar('ARCHIVER_MIRROR_EXCLUDE') or '').split()
+ mirror_mode = d.getVarFlag('ARCHIVER_MODE', 'mirror')
+ have_mirror_tarballs = d.getVar('BB_GENERATE_MIRROR_TARBALLS')
+
+ if mirror_mode == 'combined':
+ destdir = d.getVar('ARCHIVER_COMBINED_MIRRORDIR')
+ elif mirror_mode == 'split':
+ destdir = d.getVar('ARCHIVER_OUTDIR')
+ else:
+ bb.fatal('Invalid ARCHIVER_MODE[mirror]: %s' % (mirror_mode))
+
+ if not have_mirror_tarballs:
+ bb.fatal('Using `ARCHIVER_MODE[src] = "mirror"` depends on setting `BB_GENERATE_MIRROR_TARBALLS = "1"`')
+
+ def is_excluded(url):
+ for prefix in mirror_exclusions:
+ if url.startswith(prefix):
+ return True
+ return False
+
+ bb.note('Archiving the source as a mirror...')
+
+ bb.utils.mkdirhier(destdir)
+
+ fetcher = bb.fetch2.Fetch(src_uri, d)
+
+ for ud in fetcher.expanded_urldata():
+ if is_excluded(ud.url):
+ bb.note('Skipping excluded url: %s' % (ud.url))
+ continue
+
+ bb.note('Archiving url: %s' % (ud.url))
+ ud.setup_localpath(d)
+ localpath = None
+
+ # Check for mirror tarballs first. We will archive the first mirror
+ # tarball that we find as it's assumed that we just need one.
+ for mirror_fname in ud.mirrortarballs:
+ mirror_path = os.path.join(dl_dir, mirror_fname)
+ if os.path.exists(mirror_path):
+ bb.note('Found mirror tarball: %s' % (mirror_path))
+ localpath = mirror_path
+ break
+
+ if len(ud.mirrortarballs) and not localpath:
+ bb.warn('Mirror tarballs are listed for a source but none are present. ' \
+ 'Falling back to original download.\n' \
+ 'SRC_URI = %s' % (ud.url))
+
+ # Check original download
+ if not localpath:
+ bb.note('Using original download: %s' % (ud.localpath))
+ localpath = ud.localpath
+
+ if not localpath or not os.path.exists(localpath):
+ bb.fatal('Original download is missing for a source.\n' \
+ 'SRC_URI = %s' % (ud.url))
+
+ # We now have an appropriate localpath
+ bb.note('Copying source mirror')
+ cmd = 'cp -fpPRH %s %s' % (localpath, destdir)
+ subprocess.check_call(cmd, shell=True)
+}
+
def exclude_useless_paths(tarinfo):
if tarinfo.isdir():
if tarinfo.name.endswith('/temp') or tarinfo.name.endswith('/patches') or tarinfo.name.endswith('/.pc'):
@@ -301,15 +410,16 @@ def create_tarball(d, srcdir, suffix, ar_outdir):
# that we archive the actual directory and not just the link.
srcdir = os.path.realpath(srcdir)
+ compression_method = d.getVarFlag('ARCHIVER_MODE', 'compression')
bb.utils.mkdirhier(ar_outdir)
if suffix:
- filename = '%s-%s.tar.gz' % (d.getVar('PF'), suffix)
+ filename = '%s-%s.tar.%s' % (d.getVar('PF'), suffix, compression_method)
else:
- filename = '%s.tar.gz' % d.getVar('PF')
+ filename = '%s.tar.%s' % (d.getVar('PF'), compression_method)
tarname = os.path.join(ar_outdir, filename)
bb.note('Creating %s' % tarname)
- tar = tarfile.open(tarname, 'w:gz')
+ tar = tarfile.open(tarname, 'w:%s' % compression_method)
tar.add(srcdir, arcname=os.path.basename(srcdir), filter=exclude_useless_paths)
tar.close()
@@ -358,7 +468,7 @@ python do_unpack_and_patch() {
ar_sysroot_native = d.getVar('STAGING_DIR_NATIVE')
pn = d.getVar('PN')
- # The kernel class functions require it to be on work-shared, so we dont change WORKDIR
+ # The kernel class functions require it to be on work-shared, so we don't change WORKDIR
if not is_work_shared(d):
# Change the WORKDIR to make do_unpack do_patch run in another dir.
d.setVar('WORKDIR', ar_workdir)
@@ -378,6 +488,9 @@ python do_unpack_and_patch() {
src_orig = '%s.orig' % src
oe.path.copytree(src, src_orig)
+ if bb.data.inherits_class('dos2unix', d):
+ bb.build.exec_func('do_convert_crlf_to_lf', d)
+
# Make sure gcc and kernel sources are patched only once
if not (d.getVar('SRC_URI') == "" or is_work_shared(d)):
bb.build.exec_func('do_patch', d)
@@ -400,7 +513,7 @@ python do_unpack_and_patch() {
# of the output file ensures that we create it each time the recipe
# gets rebuilt, at least as long as a PR server is used. We also rely
# on that mechanism to catch changes in the file content, because the
-# file content is not part of of the task signature either.
+# file content is not part of the task signature either.
do_ar_recipe[vardepsexclude] += "BBINCLUDED"
python do_ar_recipe () {
"""
@@ -438,9 +551,10 @@ python do_ar_recipe () {
incfile = include_re.match(line).group(1)
if incfile:
incfile = d.expand(incfile)
+ if incfile:
incfile = bb.utils.which(bbpath, incfile)
- if incfile:
- shutil.copy(incfile, outdir)
+ if incfile:
+ shutil.copy(incfile, outdir)
create_tarball(d, outdir, 'recipe', d.getVar('ARCHIVER_OUTDIR'))
bb.utils.remove(outdir, recurse=True)
@@ -476,12 +590,16 @@ do_deploy_archives[sstate-outputdirs] = "${DEPLOY_DIR_SRC}"
addtask do_deploy_archives_setscene
addtask do_ar_original after do_unpack
-addtask do_unpack_and_patch after do_patch
+addtask do_unpack_and_patch after do_patch do_preconfigure
addtask do_ar_patched after do_unpack_and_patch
addtask do_ar_configured after do_unpack_and_patch
+addtask do_ar_mirror after do_fetch
addtask do_dumpdata
addtask do_ar_recipe
-addtask do_deploy_archives before do_build
+addtask do_deploy_archives
+do_build[recrdeptask] += "do_deploy_archives"
+do_rootfs[recrdeptask] += "do_deploy_archives"
+do_populate_sdk[recrdeptask] += "do_deploy_archives"
python () {
# Add tasks in the correct order, specifically for linux-yocto to avoid race condition.
diff --git a/meta/classes/autotools.bbclass b/meta/classes/autotools.bbclass
index 8768a6ad68..4ab2460990 100644
--- a/meta/classes/autotools.bbclass
+++ b/meta/classes/autotools.bbclass
@@ -1,11 +1,11 @@
-def autotools_dep_prepend(d):
+def get_autotools_dep(d):
if d.getVar('INHIBIT_AUTOTOOLS_DEPS'):
return ''
pn = d.getVar('PN')
deps = ''
- if pn in ['autoconf-native', 'automake-native', 'help2man-native']:
+ if pn in ['autoconf-native', 'automake-native']:
return deps
deps += 'autoconf-native automake-native '
@@ -17,18 +17,21 @@ def autotools_dep_prepend(d):
and not d.getVar('INHIBIT_DEFAULT_DEPS'):
deps += 'libtool-cross '
- return deps + 'gnu-config-native '
+ return deps
-DEPENDS_prepend = "${@autotools_dep_prepend(d)} "
+
+DEPENDS:prepend = "${@get_autotools_dep(d)} "
inherit siteinfo
# Space separated list of shell scripts with variables defined to supply test
# results for autoconf tests we cannot run at build time.
-export CONFIG_SITE = "${@siteinfo_get_files(d)}"
+# The value of this variable is filled in in a prefunc because it depends on
+# the contents of the sysroot.
+export CONFIG_SITE
acpaths ?= "default"
-EXTRA_AUTORECONF = "--exclude=autopoint"
+EXTRA_AUTORECONF = "--exclude=autopoint --exclude=gtkdocize"
export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir}"
@@ -88,7 +91,7 @@ oe_runconf () {
cfgscript=`python3 -c "import os; print(os.path.relpath(os.path.dirname('${CONFIGURE_SCRIPT}'), '.'))"`/$cfgscript_name
if [ -x "$cfgscript" ] ; then
bbnote "Running $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} $@"
- if ! ${CACHED_CONFIGUREVARS} $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} "$@"; then
+ if ! CONFIG_SHELL=${CONFIG_SHELL-/bin/bash} ${CACHED_CONFIGUREVARS} $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} "$@"; then
bbnote "The following config.log files may provide further information."
bbnote `find ${B} -ignore_readdir_race -type f -name config.log`
bbfatal_log "configure failed"
@@ -129,21 +132,25 @@ autotools_postconfigure(){
EXTRACONFFUNCS ??= ""
-EXTRA_OECONF_append = " ${PACKAGECONFIG_CONFARGS}"
+EXTRA_OECONF:append = " ${PACKAGECONFIG_CONFARGS}"
do_configure[prefuncs] += "autotools_preconfigure autotools_aclocals ${EXTRACONFFUNCS}"
+do_compile[prefuncs] += "autotools_aclocals"
+do_install[prefuncs] += "autotools_aclocals"
do_configure[postfuncs] += "autotools_postconfigure"
ACLOCALDIR = "${STAGING_DATADIR}/aclocal"
ACLOCALEXTRAPATH = ""
-ACLOCALEXTRAPATH_class-target = " -I ${STAGING_DATADIR_NATIVE}/aclocal/"
-ACLOCALEXTRAPATH_class-nativesdk = " -I ${STAGING_DATADIR_NATIVE}/aclocal/"
+ACLOCALEXTRAPATH:class-target = " -I ${STAGING_DATADIR_NATIVE}/aclocal/"
+ACLOCALEXTRAPATH:class-nativesdk = " -I ${STAGING_DATADIR_NATIVE}/aclocal/"
python autotools_aclocals () {
- # Refresh variable with cache files
- d.setVar("CONFIG_SITE", siteinfo_get_files(d, sysrootcache=True))
+ sitefiles, searched = siteinfo_get_files(d, sysrootcache=True)
+ d.setVar("CONFIG_SITE", " ".join(sitefiles))
}
+do_configure[file-checksums] += "${@' '.join(siteinfo_get_files(d, sysrootcache=False)[1])}"
+
CONFIGURE_FILES = "${S}/configure.in ${S}/configure.ac ${S}/config.h.in ${S}/acinclude.m4 Makefile.am"
autotools_do_configure() {
@@ -212,21 +219,13 @@ autotools_do_configure() {
PRUNE_M4="$PRUNE_M4 gettext.m4 iconv.m4 lib-ld.m4 lib-link.m4 lib-prefix.m4 nls.m4 po.m4 progtest.m4"
fi
mkdir -p m4
- if grep -q "^[[:space:]]*[AI][CT]_PROG_INTLTOOL" $CONFIGURE_AC; then
- if ! echo "${DEPENDS}" | grep -q intltool-native; then
- bbwarn "Missing DEPENDS on intltool-native"
- fi
- PRUNE_M4="$PRUNE_M4 intltool.m4"
- bbnote Executing intltoolize --copy --force --automake
- intltoolize --copy --force --automake
- fi
for i in $PRUNE_M4; do
find ${S} -ignore_readdir_race -name $i -delete
done
bbnote Executing ACLOCAL=\"$ACLOCAL\" autoreconf -Wcross --verbose --install --force ${EXTRA_AUTORECONF} $acpaths
- ACLOCAL="$ACLOCAL" autoreconf -Wcross --verbose --install --force ${EXTRA_AUTORECONF} $acpaths || die "autoreconf execution failed."
+ ACLOCAL="$ACLOCAL" autoreconf -Wcross -Wno-obsolete --verbose --install --force ${EXTRA_AUTORECONF} $acpaths || die "autoreconf execution failed."
cd $olddir
fi
if [ -e ${CONFIGURE_SCRIPT} ]; then
diff --git a/meta/classes/baremetal-image.bbclass b/meta/classes/baremetal-image.bbclass
new file mode 100644
index 0000000000..81f5e5e93d
--- /dev/null
+++ b/meta/classes/baremetal-image.bbclass
@@ -0,0 +1,121 @@
+# Baremetal image class
+#
+# This class is meant to be inherited by recipes for baremetal/RTOS applications
+# It contains code that would be used by all of them, every recipe just needs to
+# override certain variables.
+#
+# For scalability purposes, code within this class focuses on the "image" wiring
+# to satisfy the OpenEmbedded image creation and testing infrastructure.
+#
+# See meta-skeleton for a working example.
+
+
+# Toolchain should be baremetal or newlib based.
+# TCLIBC="baremetal" or TCLIBC="newlib"
+COMPATIBLE_HOST:libc-musl:class-target = "null"
+COMPATIBLE_HOST:libc-glibc:class-target = "null"
+
+
+inherit rootfs-postcommands
+
+# Set some defaults, but these should be overriden by each recipe if required
+IMGDEPLOYDIR ?= "${WORKDIR}/deploy-${PN}-image-complete"
+BAREMETAL_BINNAME ?= "hello_baremetal_${MACHINE}"
+IMAGE_LINK_NAME ?= "baremetal-helloworld-image-${MACHINE}"
+IMAGE_NAME_SUFFIX ?= ""
+
+do_rootfs[dirs] = "${IMGDEPLOYDIR} ${DEPLOY_DIR_IMAGE}"
+
+do_image(){
+ install ${D}/${base_libdir}/firmware/${BAREMETAL_BINNAME}.bin ${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.bin
+ install ${D}/${base_libdir}/firmware/${BAREMETAL_BINNAME}.elf ${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.elf
+}
+
+do_image_complete(){
+ :
+}
+
+python do_rootfs(){
+ from oe.utils import execute_pre_post_process
+ from pathlib import Path
+
+ # Write empty manifest file to satisfy test infrastructure
+ deploy_dir = d.getVar('IMGDEPLOYDIR')
+ link_name = d.getVar('IMAGE_LINK_NAME')
+ manifest_name = d.getVar('IMAGE_MANIFEST')
+
+ Path(manifest_name).touch()
+ if os.path.exists(manifest_name) and link_name:
+ manifest_link = deploy_dir + "/" + link_name + ".manifest"
+ if os.path.lexists(manifest_link):
+ os.remove(manifest_link)
+ os.symlink(os.path.basename(manifest_name), manifest_link)
+ # A lot of postprocess commands assume the existence of rootfs/etc
+ sysconfdir = d.getVar("IMAGE_ROOTFS") + d.getVar('sysconfdir')
+ bb.utils.mkdirhier(sysconfdir)
+
+ execute_pre_post_process(d, d.getVar('ROOTFS_POSTPROCESS_COMMAND'))
+}
+
+
+# Assure binaries, manifest and qemubootconf are populated on DEPLOY_DIR_IMAGE
+do_image_complete[dirs] = "${TOPDIR}"
+SSTATETASKS += "do_image_complete"
+SSTATE_SKIP_CREATION:task-image-complete = '1'
+do_image_complete[sstate-inputdirs] = "${IMGDEPLOYDIR}"
+do_image_complete[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}"
+do_image_complete[stamp-extra-info] = "${MACHINE_ARCH}"
+addtask do_image_complete after do_image before do_build
+
+python do_image_complete_setscene () {
+ sstate_setscene(d)
+}
+addtask do_image_complete_setscene
+
+# QEMU generic Baremetal/RTOS parameters
+QB_DEFAULT_KERNEL ?= "${IMAGE_LINK_NAME}.bin"
+QB_MEM ?= "-m 256"
+QB_DEFAULT_FSTYPE ?= "bin"
+QB_DTB ?= ""
+QB_OPT_APPEND:append = " -nographic"
+
+# RISC-V tunes set the BIOS, unset, and instruct QEMU to
+# ignore the BIOS and boot from -kernel
+QB_DEFAULT_BIOS:qemuriscv64 = ""
+QB_DEFAULT_BIOS:qemuriscv32 = ""
+QB_OPT_APPEND:append:qemuriscv64 = " -bios none"
+QB_OPT_APPEND:append:qemuriscv32 = " -bios none"
+
+
+# Use the medium-any code model for the RISC-V 64 bit implementation,
+# since medlow can only access addresses below 0x80000000 and RAM
+# starts at 0x80000000 on RISC-V 64
+# Keep RISC-V 32 using -mcmodel=medlow (symbols lie between -2GB:2GB)
+CFLAGS:append:qemuriscv64 = " -mcmodel=medany"
+
+
+# This next part is necessary to trick the build system into thinking
+# its building an image recipe so it generates the qemuboot.conf
+addtask do_rootfs before do_image after do_install
+addtask do_image after do_rootfs before do_image_complete
+addtask do_image_complete after do_image before do_build
+inherit qemuboot
+
+# Based on image.bbclass to make sure we build qemu
+python(){
+ # do_addto_recipe_sysroot doesnt exist for all recipes, but we need it to have
+ # /usr/bin on recipe-sysroot (qemu) populated
+ # The do_addto_recipe_sysroot dependency is coming from EXTRA_IMAGDEPENDS now,
+ # we just need to add the logic to add its dependency to do_image.
+ def extraimage_getdepends(task):
+ deps = ""
+ for dep in (d.getVar('EXTRA_IMAGEDEPENDS') or "").split():
+ # Make sure we only add it for qemu
+ if 'qemu' in dep:
+ if ":" in dep:
+ deps += " %s " % (dep)
+ else:
+ deps += " %s:%s" % (dep, task)
+ return deps
+ d.appendVarFlag('do_image', 'depends', extraimage_getdepends('do_populate_sysroot'))
+}
diff --git a/meta/classes/base.bbclass b/meta/classes/base.bbclass
index f1a3c0e53e..cc81461473 100644
--- a/meta/classes/base.bbclass
+++ b/meta/classes/base.bbclass
@@ -10,9 +10,13 @@ inherit utility-tasks
inherit metadata_scm
inherit logging
-OE_IMPORTS += "os sys time oe.path oe.utils oe.types oe.package oe.packagegroup oe.sstatesig oe.lsb oe.cachedpath oe.license"
+OE_EXTRA_IMPORTS ?= ""
+
+OE_IMPORTS += "os sys time oe.path oe.utils oe.types oe.package oe.packagegroup oe.sstatesig oe.lsb oe.cachedpath oe.license oe.qa oe.reproducible oe.rust ${OE_EXTRA_IMPORTS}"
OE_IMPORTS[type] = "list"
+PACKAGECONFIG_CONFARGS ??= ""
+
def oe_import(d):
import sys
@@ -28,9 +32,11 @@ def oe_import(d):
import oe.data
for toimport in oe.data.typed_value("OE_IMPORTS", d):
- imported = __import__(toimport)
- inject(toimport.split(".", 1)[0], imported)
-
+ try:
+ imported = __import__(toimport)
+ inject(toimport.split(".", 1)[0], imported)
+ except AttributeError as e:
+ bb.error("Error importing OE modules: %s" % str(e))
return ""
# We need the oe module name space early (before INHERITs get added)
@@ -60,18 +66,18 @@ oe_runmake() {
}
-def base_dep_prepend(d):
+def get_base_dep(d):
if d.getVar('INHIBIT_DEFAULT_DEPS', False):
return ""
return "${BASE_DEFAULT_DEPS}"
-BASE_DEFAULT_DEPS = "virtual/${TARGET_PREFIX}gcc virtual/${TARGET_PREFIX}compilerlibs virtual/libc"
+BASE_DEFAULT_DEPS = "virtual/${HOST_PREFIX}gcc virtual/${HOST_PREFIX}compilerlibs virtual/libc"
BASEDEPENDS = ""
-BASEDEPENDS_class-target = "${@base_dep_prepend(d)}"
-BASEDEPENDS_class-nativesdk = "${@base_dep_prepend(d)}"
+BASEDEPENDS:class-target = "${@get_base_dep(d)}"
+BASEDEPENDS:class-nativesdk = "${@get_base_dep(d)}"
-DEPENDS_prepend="${BASEDEPENDS} "
+DEPENDS:prepend="${BASEDEPENDS} "
FILESPATH = "${@base_set_filespath(["${FILE_DIRNAME}/${BP}", "${FILE_DIRNAME}/${BPN}", "${FILE_DIRNAME}/files"], d)}"
# THISDIR only works properly with imediate expansion as it has to run
@@ -85,7 +91,7 @@ def extra_path_elements(d):
path = path + "${STAGING_BINDIR_NATIVE}/" + e + ":"
return path
-PATH_prepend = "${@extra_path_elements(d)}"
+PATH:prepend = "${@extra_path_elements(d)}"
def get_lic_checksum_file_list(d):
filelist = []
@@ -121,6 +127,9 @@ def setup_hosttools_dir(dest, toolsvar, d, fatal=True):
for tool in tools:
desttool = os.path.join(dest, tool)
if not os.path.exists(desttool):
+ # clean up dead symlink
+ if os.path.islink(desttool):
+ os.unlink(desttool)
srctool = bb.utils.which(path, tool, executable=True)
# gcc/g++ may link to ccache on some hosts, e.g.,
# /usr/local/bin/ccache/gcc -> /usr/bin/ccache, then which(gcc)
@@ -132,11 +141,6 @@ def setup_hosttools_dir(dest, toolsvar, d, fatal=True):
os.symlink(srctool, desttool)
else:
notfound.append(tool)
- # Force "python" -> "python2"
- desttool = os.path.join(dest, "python")
- if not os.path.exists(desttool):
- srctool = "python2"
- os.symlink(srctool, desttool)
if notfound and fatal:
bb.fatal("The following required tools (as specified by HOSTTOOLS) appear to be unavailable in PATH, please install them in order to proceed:\n %s" % " ".join(notfound))
@@ -146,17 +150,18 @@ do_fetch[dirs] = "${DL_DIR}"
do_fetch[file-checksums] = "${@bb.fetch.get_checksum_file_list(d)}"
do_fetch[file-checksums] += " ${@get_lic_checksum_file_list(d)}"
do_fetch[vardeps] += "SRCREV"
+do_fetch[network] = "1"
python base_do_fetch() {
src_uri = (d.getVar('SRC_URI') or "").split()
- if len(src_uri) == 0:
+ if not src_uri:
return
try:
fetcher = bb.fetch2.Fetch(src_uri, d)
fetcher.download()
except bb.fetch2.BBFetchException as e:
- bb.fatal(str(e))
+ bb.fatal("Bitbake Fetcher Error: " + repr(e))
}
addtask unpack after do_fetch
@@ -166,16 +171,54 @@ do_unpack[cleandirs] = "${@d.getVar('S') if os.path.normpath(d.getVar('S')) != o
python base_do_unpack() {
src_uri = (d.getVar('SRC_URI') or "").split()
- if len(src_uri) == 0:
+ if not src_uri:
return
try:
fetcher = bb.fetch2.Fetch(src_uri, d)
fetcher.unpack(d.getVar('WORKDIR'))
except bb.fetch2.BBFetchException as e:
- bb.fatal(str(e))
+ bb.fatal("Bitbake Fetcher Error: " + repr(e))
+}
+
+SSTATETASKS += "do_deploy_source_date_epoch"
+
+do_deploy_source_date_epoch () {
+ mkdir -p ${SDE_DEPLOYDIR}
+ if [ -e ${SDE_FILE} ]; then
+ echo "Deploying SDE from ${SDE_FILE} -> ${SDE_DEPLOYDIR}."
+ cp -p ${SDE_FILE} ${SDE_DEPLOYDIR}/__source_date_epoch.txt
+ else
+ echo "${SDE_FILE} not found!"
+ fi
}
+python do_deploy_source_date_epoch_setscene () {
+ sstate_setscene(d)
+ bb.utils.mkdirhier(d.getVar('SDE_DIR'))
+ sde_file = os.path.join(d.getVar('SDE_DEPLOYDIR'), '__source_date_epoch.txt')
+ if os.path.exists(sde_file):
+ target = d.getVar('SDE_FILE')
+ bb.debug(1, "Moving setscene SDE file %s -> %s" % (sde_file, target))
+ bb.utils.rename(sde_file, target)
+ else:
+ bb.debug(1, "%s not found!" % sde_file)
+}
+
+do_deploy_source_date_epoch[dirs] = "${SDE_DEPLOYDIR}"
+do_deploy_source_date_epoch[sstate-plaindirs] = "${SDE_DEPLOYDIR}"
+addtask do_deploy_source_date_epoch_setscene
+addtask do_deploy_source_date_epoch before do_configure after do_patch
+
+python create_source_date_epoch_stamp() {
+ source_date_epoch = oe.reproducible.get_source_date_epoch(d, d.getVar('S'))
+ oe.reproducible.epochfile_write(source_date_epoch, d.getVar('SDE_FILE'), d)
+}
+do_unpack[postfuncs] += "create_source_date_epoch_stamp"
+
+def get_source_date_epoch_value(d):
+ return oe.reproducible.epochfile_read(d.getVar('SDE_FILE'), d)
+
def get_layers_branch_rev(d):
layers = (d.getVar("BBLAYERS") or "").split()
layers_branch_rev = ["%-20s = \"%s:%s\"" % (os.path.basename(i), \
@@ -220,14 +263,21 @@ def buildcfg_neededvars(d):
bb.fatal('The following variable(s) were not set: %s\nPlease set them directly, or choose a MACHINE or DISTRO that sets them.' % ', '.join(pesteruser))
addhandler base_eventhandler
-base_eventhandler[eventmask] = "bb.event.ConfigParsed bb.event.MultiConfigParsed bb.event.BuildStarted bb.event.RecipePreFinalise bb.runqueue.sceneQueueComplete bb.event.RecipeParsed"
+base_eventhandler[eventmask] = "bb.event.ConfigParsed bb.event.MultiConfigParsed bb.event.BuildStarted bb.event.RecipePreFinalise bb.event.RecipeParsed"
python base_eventhandler() {
import bb.runqueue
if isinstance(e, bb.event.ConfigParsed):
if not d.getVar("NATIVELSBSTRING", False):
d.setVar("NATIVELSBSTRING", lsb_distro_identifier(d))
+ d.setVar("ORIGNATIVELSBSTRING", d.getVar("NATIVELSBSTRING", False))
d.setVar('BB_VERSION', bb.__version__)
+
+ # There might be no bb.event.ConfigParsed event if bitbake server is
+ # running, so check bb.event.BuildStarted too to make sure ${HOSTTOOLS_DIR}
+ # exists.
+ if isinstance(e, bb.event.ConfigParsed) or \
+ (isinstance(e, bb.event.BuildStarted) and not os.path.exists(d.getVar('HOSTTOOLS_DIR'))):
# Works with the line in layer.conf which changes PATH to point here
setup_hosttools_dir(d.getVar('HOSTTOOLS_DIR'), 'HOSTTOOLS', d)
setup_hosttools_dir(d.getVar('HOSTTOOLS_DIR'), 'HOSTTOOLS_NONFATAL', d, fatal=False)
@@ -268,18 +318,6 @@ python base_eventhandler() {
d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}g++")
d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}compilerlibs")
- if isinstance(e, bb.runqueue.sceneQueueComplete):
- completions = d.expand("${STAGING_DIR}/sstatecompletions")
- if os.path.exists(completions):
- cmds = set()
- with open(completions, "r") as f:
- cmds = set(f)
- d.setVar("completion_function", "\n".join(cmds))
- d.setVarFlag("completion_function", "func", "1")
- bb.debug(1, "Executing SceneQueue Completion commands: %s" % "\n".join(cmds))
- bb.build.exec_func("completion_function", d)
- os.remove(completions)
-
if isinstance(e, bb.event.RecipeParsed):
#
# If we have multiple providers of virtual/X and a PREFERRED_PROVIDER_virtual/X is set
@@ -291,9 +329,9 @@ python base_eventhandler() {
source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', False)
if not source_mirror_fetch:
provs = (d.getVar("PROVIDES") or "").split()
- multiwhitelist = (d.getVar("MULTI_PROVIDER_WHITELIST") or "").split()
+ multiprovidersallowed = (d.getVar("BB_MULTI_PROVIDER_ALLOWED") or "").split()
for p in provs:
- if p.startswith("virtual/") and p not in multiwhitelist:
+ if p.startswith("virtual/") and p not in multiprovidersallowed:
profprov = d.getVar("PREFERRED_PROVIDER_" + p)
if profprov and pn != profprov:
raise bb.parse.SkipRecipe("PREFERRED_PROVIDER_%s set to %s, not %s" % (p, profprov, pn))
@@ -304,7 +342,6 @@ CLEANBROKEN = "0"
addtask configure after do_patch
do_configure[dirs] = "${B}"
-do_prepare_recipe_sysroot[deptask] = "do_populate_sysroot"
base_do_configure() {
if [ -n "${CONFIGURESTAMPFILE}" -a -e "${CONFIGURESTAMPFILE}" ]; then
if [ "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" ]; then
@@ -391,12 +428,30 @@ python () {
oe.utils.features_backfill("DISTRO_FEATURES", d)
oe.utils.features_backfill("MACHINE_FEATURES", d)
+ if d.getVar("S")[-1] == '/':
+ bb.warn("Recipe %s sets S variable with trailing slash '%s', remove it" % (d.getVar("PN"), d.getVar("S")))
+ if d.getVar("B")[-1] == '/':
+ bb.warn("Recipe %s sets B variable with trailing slash '%s', remove it" % (d.getVar("PN"), d.getVar("B")))
+
+ if os.path.normpath(d.getVar("WORKDIR")) != os.path.normpath(d.getVar("S")):
+ d.appendVar("PSEUDO_IGNORE_PATHS", ",${S}")
+ if os.path.normpath(d.getVar("WORKDIR")) != os.path.normpath(d.getVar("B")):
+ d.appendVar("PSEUDO_IGNORE_PATHS", ",${B}")
+
+ # To add a recipe to the skip list , set:
+ # SKIP_RECIPE[pn] = "message"
+ pn = d.getVar('PN')
+ skip_msg = d.getVarFlag('SKIP_RECIPE', pn)
+ if skip_msg:
+ bb.debug(1, "Skipping %s %s" % (pn, skip_msg))
+ raise bb.parse.SkipRecipe("Recipe will be skipped because: %s" % (skip_msg))
+
# Handle PACKAGECONFIG
#
# These take the form:
#
# PACKAGECONFIG ??= "<default options>"
- # PACKAGECONFIG[foo] = "--enable-foo,--disable-foo,foo_depends,foo_runtime_depends,foo_runtime_recommends"
+ # PACKAGECONFIG[foo] = "--enable-foo,--disable-foo,foo_depends,foo_runtime_depends,foo_runtime_recommends,foo_conflict_packageconfig"
pkgconfigflags = d.getVarFlags("PACKAGECONFIG") or {}
if pkgconfigflags:
pkgconfig = (d.getVar('PACKAGECONFIG') or "").split()
@@ -443,8 +498,8 @@ python () {
for flag, flagval in sorted(pkgconfigflags.items()):
items = flagval.split(",")
num = len(items)
- if num > 5:
- bb.error("%s: PACKAGECONFIG[%s] Only enable,disable,depend,rdepend,rrecommend can be specified!"
+ if num > 6:
+ bb.error("%s: PACKAGECONFIG[%s] Only enable,disable,depend,rdepend,rrecommend,conflict_packageconfig can be specified!"
% (d.getVar('PN'), flag))
if flag in pkgconfig:
@@ -458,9 +513,23 @@ python () {
extraconf.append(items[0])
elif num >= 2 and items[1]:
extraconf.append(items[1])
+
+ if num >= 6 and items[5]:
+ conflicts = set(items[5].split())
+ invalid = conflicts.difference(set(pkgconfigflags.keys()))
+ if invalid:
+ bb.error("%s: PACKAGECONFIG[%s] Invalid conflict package config%s '%s' specified."
+ % (d.getVar('PN'), flag, 's' if len(invalid) > 1 else '', ' '.join(invalid)))
+
+ if flag in pkgconfig:
+ intersec = conflicts.intersection(set(pkgconfig))
+ if intersec:
+ bb.fatal("%s: PACKAGECONFIG[%s] Conflict package config%s '%s' set in PACKAGECONFIG."
+ % (d.getVar('PN'), flag, 's' if len(intersec) > 1 else '', ' '.join(intersec)))
+
appendVar('DEPENDS', extradeps)
- appendVar('RDEPENDS_${PN}', extrardeps)
- appendVar('RRECOMMENDS_${PN}', extrarrecs)
+ appendVar('RDEPENDS:${PN}', extrardeps)
+ appendVar('RRECOMMENDS:${PN}', extrarrecs)
appendVar('PACKAGECONFIG_CONFARGS', extraconf)
pn = d.getVar('PN')
@@ -473,9 +542,9 @@ python () {
unmatched_license_flags = check_license_flags(d)
if unmatched_license_flags:
if len(unmatched_license_flags) == 1:
- message = "because it has a restricted license '{0}'. Which is not whitelisted in LICENSE_FLAGS_WHITELIST".format(unmatched_license_flags[0])
+ message = "because it has a restricted license '{0}'. Which is not listed in LICENSE_FLAGS_ACCEPTED".format(unmatched_license_flags[0])
else:
- message = "because it has restricted licenses {0}. Which are not whitelisted in LICENSE_FLAGS_WHITELIST".format(
+ message = "because it has restricted licenses {0}. Which are not listed in LICENSE_FLAGS_ACCEPTED".format(
", ".join("'{0}'".format(f) for f in unmatched_license_flags))
bb.debug(1, "Skipping %s %s" % (pn, message))
raise bb.parse.SkipRecipe(message)
@@ -483,22 +552,18 @@ python () {
# If we're building a target package we need to use fakeroot (pseudo)
# in order to capture permissions, owners, groups and special files
if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
- d.setVarFlag('do_unpack', 'umask', '022')
- d.setVarFlag('do_configure', 'umask', '022')
- d.setVarFlag('do_compile', 'umask', '022')
+ d.appendVarFlag('do_prepare_recipe_sysroot', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
d.appendVarFlag('do_install', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
d.setVarFlag('do_install', 'fakeroot', '1')
- d.setVarFlag('do_install', 'umask', '022')
d.appendVarFlag('do_package', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
d.setVarFlag('do_package', 'fakeroot', '1')
- d.setVarFlag('do_package', 'umask', '022')
d.setVarFlag('do_package_setscene', 'fakeroot', '1')
d.appendVarFlag('do_package_setscene', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
d.setVarFlag('do_devshell', 'fakeroot', '1')
d.appendVarFlag('do_devshell', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
need_machine = d.getVar('COMPATIBLE_MACHINE')
- if need_machine:
+ if need_machine and not d.getVar('PARSE_ALL_RECIPES', False):
import re
compat_machines = (d.getVar('MACHINEOVERRIDES') or "").split(":")
for m in compat_machines:
@@ -507,7 +572,7 @@ python () {
else:
raise bb.parse.SkipRecipe("incompatible with machine %s (not in COMPATIBLE_MACHINE)" % d.getVar('MACHINE'))
- source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', False)
+ source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', False) or d.getVar('PARSE_ALL_RECIPES', False)
if not source_mirror_fetch:
need_host = d.getVar('COMPATIBLE_HOST')
if need_host:
@@ -530,99 +595,92 @@ python () {
if check_license and bad_licenses:
bad_licenses = expand_wildcard_licenses(d, bad_licenses)
- whitelist = []
- incompatwl = []
- for lic in bad_licenses:
- spdx_license = return_spdx(d, lic)
- whitelist.extend((d.getVar("WHITELIST_" + lic) or "").split())
- if spdx_license:
- whitelist.extend((d.getVar("WHITELIST_" + spdx_license) or "").split())
- '''
- We need to track what we are whitelisting and why. If pn is
- incompatible we need to be able to note that the image that
- is created may infact contain incompatible licenses despite
- INCOMPATIBLE_LICENSE being set.
- '''
- incompatwl.extend((d.getVar("WHITELIST_" + lic) or "").split())
- if spdx_license:
- incompatwl.extend((d.getVar("WHITELIST_" + spdx_license) or "").split())
-
- if not pn in whitelist:
- pkgs = d.getVar('PACKAGES').split()
- skipped_pkgs = []
- unskipped_pkgs = []
- for pkg in pkgs:
- if incompatible_license(d, bad_licenses, pkg):
- skipped_pkgs.append(pkg)
- else:
- unskipped_pkgs.append(pkg)
- all_skipped = skipped_pkgs and not unskipped_pkgs
- if unskipped_pkgs:
- for pkg in skipped_pkgs:
- bb.debug(1, "SKIPPING the package " + pkg + " at do_rootfs because it's " + license)
- mlprefix = d.getVar('MLPREFIX')
- d.setVar('LICENSE_EXCLUSION-' + mlprefix + pkg, 1)
- for pkg in unskipped_pkgs:
- bb.debug(1, "INCLUDING the package " + pkg)
- elif all_skipped or incompatible_license(d, bad_licenses):
- bb.debug(1, "SKIPPING recipe %s because it's %s" % (pn, license))
- raise bb.parse.SkipRecipe("it has an incompatible license: %s" % license)
- elif pn in whitelist:
- if pn in incompatwl:
- bb.note("INCLUDING " + pn + " as buildable despite INCOMPATIBLE_LICENSE because it has been whitelisted")
-
- # Try to verify per-package (LICENSE_<pkg>) values. LICENSE should be a
- # superset of all per-package licenses. We do not do advanced (pattern)
- # matching of license expressions - just check that all license strings
- # in LICENSE_<pkg> are found in LICENSE.
- license_set = oe.license.list_licenses(license)
- for pkg in d.getVar('PACKAGES').split():
- pkg_license = d.getVar('LICENSE_' + pkg)
- if pkg_license:
- unlisted = oe.license.list_licenses(pkg_license) - license_set
- if unlisted:
- bb.warn("LICENSE_%s includes licenses (%s) that are not "
- "listed in LICENSE" % (pkg, ' '.join(unlisted)))
+ exceptions = (d.getVar("INCOMPATIBLE_LICENSE_EXCEPTIONS") or "").split()
+
+ for lic_exception in exceptions:
+ if ":" in lic_exception:
+ lic_exception.split(":")[0]
+ if lic_exception in oe.license.obsolete_license_list():
+ bb.fatal("Invalid license %s used in INCOMPATIBLE_LICENSE_EXCEPTIONS" % lic_exception)
+
+ pkgs = d.getVar('PACKAGES').split()
+ skipped_pkgs = {}
+ unskipped_pkgs = []
+ for pkg in pkgs:
+ remaining_bad_licenses = oe.license.apply_pkg_license_exception(pkg, bad_licenses, exceptions)
+
+ incompatible_lic = incompatible_license(d, remaining_bad_licenses, pkg)
+ if incompatible_lic:
+ skipped_pkgs[pkg] = incompatible_lic
+ else:
+ unskipped_pkgs.append(pkg)
+
+ if unskipped_pkgs:
+ for pkg in skipped_pkgs:
+ bb.debug(1, "Skipping the package %s at do_rootfs because of incompatible license(s): %s" % (pkg, ' '.join(skipped_pkgs[pkg])))
+ d.setVar('_exclude_incompatible-' + pkg, ' '.join(skipped_pkgs[pkg]))
+ for pkg in unskipped_pkgs:
+ bb.debug(1, "Including the package %s" % pkg)
+ else:
+ incompatible_lic = incompatible_license(d, bad_licenses)
+ for pkg in skipped_pkgs:
+ incompatible_lic += skipped_pkgs[pkg]
+ incompatible_lic = sorted(list(set(incompatible_lic)))
+
+ if incompatible_lic:
+ bb.debug(1, "Skipping recipe %s because of incompatible license(s): %s" % (pn, ' '.join(incompatible_lic)))
+ raise bb.parse.SkipRecipe("it has incompatible license(s): %s" % ' '.join(incompatible_lic))
needsrcrev = False
srcuri = d.getVar('SRC_URI')
- for uri in srcuri.split():
- (scheme, _ , path) = bb.fetch.decodeurl(uri)[:3]
+ for uri_string in srcuri.split():
+ uri = bb.fetch.URI(uri_string)
+ # Also check downloadfilename as the URL path might not be useful for sniffing
+ path = uri.params.get("downloadfilename", uri.path)
# HTTP/FTP use the wget fetcher
- if scheme in ("http", "https", "ftp"):
+ if uri.scheme in ("http", "https", "ftp"):
d.appendVarFlag('do_fetch', 'depends', ' wget-native:do_populate_sysroot')
# Svn packages should DEPEND on subversion-native
- if scheme == "svn":
+ if uri.scheme == "svn":
needsrcrev = True
d.appendVarFlag('do_fetch', 'depends', ' subversion-native:do_populate_sysroot')
# Git packages should DEPEND on git-native
- elif scheme in ("git", "gitsm"):
+ elif uri.scheme in ("git", "gitsm"):
needsrcrev = True
d.appendVarFlag('do_fetch', 'depends', ' git-native:do_populate_sysroot')
# Mercurial packages should DEPEND on mercurial-native
- elif scheme == "hg":
+ elif uri.scheme == "hg":
needsrcrev = True
+ d.appendVar("EXTRANATIVEPATH", ' python3-native ')
d.appendVarFlag('do_fetch', 'depends', ' mercurial-native:do_populate_sysroot')
# Perforce packages support SRCREV = "${AUTOREV}"
- elif scheme == "p4":
+ elif uri.scheme == "p4":
needsrcrev = True
# OSC packages should DEPEND on osc-native
- elif scheme == "osc":
+ elif uri.scheme == "osc":
d.appendVarFlag('do_fetch', 'depends', ' osc-native:do_populate_sysroot')
- elif scheme == "npm":
+ elif uri.scheme == "npm":
d.appendVarFlag('do_fetch', 'depends', ' nodejs-native:do_populate_sysroot')
+ elif uri.scheme == "repo":
+ needsrcrev = True
+ d.appendVarFlag('do_fetch', 'depends', ' repo-native:do_populate_sysroot')
+
# *.lz4 should DEPEND on lz4-native for unpacking
if path.endswith('.lz4'):
d.appendVarFlag('do_unpack', 'depends', ' lz4-native:do_populate_sysroot')
+ # *.zst should DEPEND on zstd-native for unpacking
+ elif path.endswith('.zst'):
+ d.appendVarFlag('do_unpack', 'depends', ' zstd-native:do_populate_sysroot')
+
# *.lz should DEPEND on lzip-native for unpacking
elif path.endswith('.lz'):
d.appendVarFlag('do_unpack', 'depends', ' lzip-native:do_populate_sysroot')
@@ -646,6 +704,18 @@ python () {
if needsrcrev:
d.setVar("SRCPV", "${@bb.fetch2.get_srcrev(d)}")
+ # Gather all named SRCREVs to add to the sstate hash calculation
+ # This anonymous python snippet is called multiple times so we
+ # need to be careful to not double up the appends here and cause
+ # the base hash to mismatch the task hash
+ for uri in srcuri.split():
+ parm = bb.fetch.decodeurl(uri)[5]
+ uri_names = parm.get("name", "").split(",")
+ for uri_name in filter(None, uri_names):
+ srcrev_name = "SRCREV_{}".format(uri_name)
+ if srcrev_name not in (d.getVarFlag("do_fetch", "vardeps") or "").split():
+ d.appendVarFlag("do_fetch", "vardeps", " {}".format(srcrev_name))
+
set_packagetriplet(d)
# 'multimachine' handling
@@ -669,7 +739,7 @@ python () {
if os.path.basename(p) == machine and os.path.isdir(p):
paths.append(p)
- if len(paths) != 0:
+ if paths:
for s in srcuri.split():
if not s.startswith("file://"):
continue
@@ -702,7 +772,7 @@ do_cleansstate[nostamp] = "1"
python do_cleanall() {
src_uri = (d.getVar('SRC_URI') or "").split()
- if len(src_uri) == 0:
+ if not src_uri:
return
try:
diff --git a/meta/classes/bash-completion.bbclass b/meta/classes/bash-completion.bbclass
index 80ee9b4874..803b2cae4d 100644
--- a/meta/classes/bash-completion.bbclass
+++ b/meta/classes/bash-completion.bbclass
@@ -1,7 +1,7 @@
-DEPENDS_append_class-target = " bash-completion"
+DEPENDS:append:class-target = " bash-completion"
PACKAGES += "${PN}-bash-completion"
-FILES_${PN}-bash-completion = "${datadir}/bash-completion ${sysconfdir}/bash_completion.d"
+FILES:${PN}-bash-completion = "${datadir}/bash-completion ${sysconfdir}/bash_completion.d"
-RDEPENDS_${PN}-bash-completion = "bash-completion"
+RDEPENDS:${PN}-bash-completion = "bash-completion"
diff --git a/meta/classes/bin_package.bbclass b/meta/classes/bin_package.bbclass
index cbc9b1fa13..c3aca20443 100644
--- a/meta/classes/bin_package.bbclass
+++ b/meta/classes/bin_package.bbclass
@@ -34,6 +34,6 @@ bin_package_do_install () {
| tar --no-same-owner -xpf - -C ${D}
}
-FILES_${PN} = "/"
+FILES:${PN} = "/"
EXPORT_FUNCTIONS do_install
diff --git a/meta/classes/binconfig-disabled.bbclass b/meta/classes/binconfig-disabled.bbclass
index 096b670e12..e8ac41b2d4 100644
--- a/meta/classes/binconfig-disabled.bbclass
+++ b/meta/classes/binconfig-disabled.bbclass
@@ -5,9 +5,9 @@
# The list of scripts which should be disabled.
BINCONFIG ?= ""
-FILES_${PN}-dev += "${bindir}/*-config"
+FILES:${PN}-dev += "${bindir}/*-config"
-do_install_append () {
+do_install:append () {
for x in ${BINCONFIG}; do
# Make the disabled script emit invalid parameters for those configure
# scripts which call it without checking the return code.
diff --git a/meta/classes/binconfig.bbclass b/meta/classes/binconfig.bbclass
index 133b9537cf..6e0c88269a 100644
--- a/meta/classes/binconfig.bbclass
+++ b/meta/classes/binconfig.bbclass
@@ -1,4 +1,4 @@
-FILES_${PN}-dev += "${bindir}/*-config"
+FILES:${PN}-dev += "${bindir}/*-config"
# The namespaces can clash here hence the two step replace
def get_binconfig_mangle(d):
@@ -40,15 +40,6 @@ binconfig_package_preprocess () {
-e 's:${STAGING_DIR_HOST}${prefix}:${prefix}:' \
$config
done
- for lafile in `find ${PKGD} -type f -name "*.la"` ; do
- sed -i \
- -e 's:${STAGING_BASELIBDIR}:${base_libdir}:g;' \
- -e 's:${STAGING_LIBDIR}:${libdir}:g;' \
- -e 's:${STAGING_INCDIR}:${includedir}:g;' \
- -e 's:${STAGING_DATADIR}:${datadir}:' \
- -e 's:${STAGING_DIR_HOST}${prefix}:${prefix}:' \
- $lafile
- done
}
SYSROOT_PREPROCESS_FUNCS += "binconfig_sysroot_preprocess"
diff --git a/meta/classes/blacklist.bbclass b/meta/classes/blacklist.bbclass
deleted file mode 100644
index dc794228ff..0000000000
--- a/meta/classes/blacklist.bbclass
+++ /dev/null
@@ -1,20 +0,0 @@
-# anonymous support class from originally from angstrom
-#
-# To use the blacklist, a distribution should include this
-# class in the INHERIT_DISTRO
-#
-# No longer use ANGSTROM_BLACKLIST, instead use a table of
-# recipes in PNBLACKLIST
-#
-# Features:
-#
-# * To add a package to the blacklist, set:
-# PNBLACKLIST[pn] = "message"
-#
-
-python () {
- blacklist = d.getVarFlag('PNBLACKLIST', d.getVar('PN'))
-
- if blacklist:
- raise bb.parse.SkipRecipe("Recipe is blacklisted: %s" % (blacklist))
-}
diff --git a/meta/classes/bluetooth.bbclass b/meta/classes/bluetooth.bbclass
deleted file mode 100644
index f88b4ae5b8..0000000000
--- a/meta/classes/bluetooth.bbclass
+++ /dev/null
@@ -1,14 +0,0 @@
-# Avoid code duplication in bluetooth-dependent recipes.
-
-# Define a variable that expands to the recipe (package) providing core
-# bluetooth support on the platform:
-# "" if bluetooth is not in DISTRO_FEATURES
-# else "bluez5" if bluez5 is in DISTRO_FEATURES
-# else "bluez4"
-
-# Use this with:
-# inherit bluetooth
-# PACKAGECONFIG ??= "${@bb.utils.contains('DISTRO_FEATURES', 'bluetooth', '${BLUEZ}', '', d)}
-# PACKAGECONFIG[bluez4] = "--enable-bluez4,--disable-bluez4,bluez4"
-
-BLUEZ ?= "${@bb.utils.contains('DISTRO_FEATURES', 'bluetooth', bb.utils.contains('DISTRO_FEATURES', 'bluez5', 'bluez5', 'bluez4', d), '', d)}"
diff --git a/meta/classes/buildhistory.bbclass b/meta/classes/buildhistory.bbclass
index 796f68cf8f..8db79a4829 100644
--- a/meta/classes/buildhistory.bbclass
+++ b/meta/classes/buildhistory.bbclass
@@ -7,6 +7,8 @@
# Copyright (C) 2007-2011 Koen Kooi <koen@openembedded.org>
#
+inherit image-artifact-names
+
BUILDHISTORY_FEATURES ?= "image package sdk"
BUILDHISTORY_DIR ?= "${TOPDIR}/buildhistory"
BUILDHISTORY_DIR_IMAGE = "${BUILDHISTORY_DIR}/images/${MACHINE_ARCH}/${TCLIBC}/${IMAGE_BASENAME}"
@@ -29,7 +31,7 @@ BUILDHISTORY_DIR_PACKAGE = "${BUILDHISTORY_DIR}/packages/${MULTIMACH_TARGET_SYS}
# of failed builds.
#
# The expected usage is via auto.conf, but passing via the command line also works
-# with: BB_ENV_EXTRAWHITE=BUILDHISTORY_RESET BUILDHISTORY_RESET=1
+# with: BB_ENV_PASSTHROUGH_ADDITIONS=BUILDHISTORY_RESET BUILDHISTORY_RESET=1
BUILDHISTORY_RESET ?= ""
BUILDHISTORY_OLD_DIR = "${BUILDHISTORY_DIR}/${@ "old" if "${BUILDHISTORY_RESET}" else ""}"
@@ -40,15 +42,17 @@ BUILDHISTORY_SDK_FILES ?= "conf/local.conf conf/bblayers.conf conf/auto.conf con
BUILDHISTORY_COMMIT ?= "1"
BUILDHISTORY_COMMIT_AUTHOR ?= "buildhistory <buildhistory@${DISTRO}>"
BUILDHISTORY_PUSH_REPO ?= ""
+BUILDHISTORY_TAG ?= "build"
+BUILDHISTORY_PATH_PREFIX_STRIP ?= ""
-SSTATEPOSTINSTFUNCS_append = " buildhistory_emit_pkghistory"
+SSTATEPOSTINSTFUNCS:append = " buildhistory_emit_pkghistory"
# We want to avoid influencing the signatures of sstate tasks - first the function itself:
sstate_install[vardepsexclude] += "buildhistory_emit_pkghistory"
# then the value added to SSTATEPOSTINSTFUNCS:
SSTATEPOSTINSTFUNCS[vardepvalueexclude] .= "| buildhistory_emit_pkghistory"
# Similarly for our function that gets the output signatures
-SSTATEPOSTUNPACKFUNCS_append = " buildhistory_emit_outputsigs"
+SSTATEPOSTUNPACKFUNCS:append = " buildhistory_emit_outputsigs"
sstate_installpkgdir[vardepsexclude] += "buildhistory_emit_outputsigs"
SSTATEPOSTUNPACKFUNCS[vardepvalueexclude] .= "| buildhistory_emit_outputsigs"
@@ -60,21 +64,46 @@ SSTATEPOSTUNPACKFUNCS[vardepvalueexclude] .= "| buildhistory_emit_outputsigs"
# When extending build history, derive your class from buildhistory.bbclass
# and extend this list here with the additional files created by the derived
# class.
-BUILDHISTORY_PRESERVE = "latest latest_srcrev"
+BUILDHISTORY_PRESERVE = "latest latest_srcrev sysroot"
PATCH_GIT_USER_EMAIL ?= "buildhistory@oe"
PATCH_GIT_USER_NAME ?= "OpenEmbedded"
#
+# Write out the contents of the sysroot
+#
+buildhistory_emit_sysroot() {
+ mkdir --parents ${BUILDHISTORY_DIR_PACKAGE}
+ case ${CLASSOVERRIDE} in
+ class-native|class-cross|class-crosssdk)
+ BASE=${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE}
+ ;;
+ *)
+ BASE=${SYSROOT_DESTDIR}
+ ;;
+ esac
+ buildhistory_list_files_no_owners $BASE ${BUILDHISTORY_DIR_PACKAGE}/sysroot
+}
+
+#
# Write out metadata about this package for comparison when writing future packages
#
python buildhistory_emit_pkghistory() {
- if not d.getVar('BB_CURRENTTASK') in ['packagedata', 'packagedata_setscene']:
+ if d.getVar('BB_CURRENTTASK') in ['populate_sysroot', 'populate_sysroot_setscene']:
+ bb.build.exec_func("buildhistory_emit_sysroot", d)
return 0
if not "package" in (d.getVar('BUILDHISTORY_FEATURES') or "").split():
return 0
+ if d.getVar('BB_CURRENTTASK') in ['package', 'package_setscene']:
+ # Create files-in-<package-name>.txt files containing a list of files of each recipe's package
+ bb.build.exec_func("buildhistory_list_pkg_files", d)
+ return 0
+
+ if not d.getVar('BB_CURRENTTASK') in ['packagedata', 'packagedata_setscene']:
+ return 0
+
import re
import json
import shlex
@@ -93,6 +122,9 @@ python buildhistory_emit_pkghistory() {
self.packages = ""
self.srcrev = ""
self.layer = ""
+ self.license = ""
+ self.config = ""
+ self.src_uri = ""
class PackageInfo:
@@ -194,6 +226,7 @@ python buildhistory_emit_pkghistory() {
pv = d.getVar('PV')
pr = d.getVar('PR')
layer = bb.utils.get_file_layer(d.getVar('FILE'), d)
+ license = d.getVar('LICENSE')
pkgdata_dir = d.getVar('PKGDATA_DIR')
packages = ""
@@ -234,22 +267,20 @@ python buildhistory_emit_pkghistory() {
rcpinfo.depends = sortlist(oe.utils.squashspaces(d.getVar('DEPENDS') or ""))
rcpinfo.packages = packages
rcpinfo.layer = layer
+ rcpinfo.license = license
+ rcpinfo.config = sortlist(oe.utils.squashspaces(d.getVar('PACKAGECONFIG') or ""))
+ rcpinfo.src_uri = oe.utils.squashspaces(d.getVar('SRC_URI') or "")
write_recipehistory(rcpinfo, d)
- pkgdest = d.getVar('PKGDEST')
+ bb.build.exec_func("read_subpackage_metadata", d)
+
for pkg in packagelist:
- pkgdata = {}
- with open(os.path.join(pkgdata_dir, 'runtime', pkg)) as f:
- for line in f.readlines():
- item = line.rstrip('\n').split(': ', 1)
- key = item[0]
- if key.endswith('_' + pkg):
- key = key[:-len(pkg)-1]
- pkgdata[key] = item[1].encode('latin-1').decode('unicode_escape')
-
- pkge = pkgdata.get('PKGE', '0')
- pkgv = pkgdata['PKGV']
- pkgr = pkgdata['PKGR']
+ localdata = d.createCopy()
+ localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + pkg)
+
+ pkge = localdata.getVar("PKGE") or '0'
+ pkgv = localdata.getVar("PKGV")
+ pkgr = localdata.getVar("PKGR")
#
# Find out what the last version was
# Make sure the version did not decrease
@@ -261,41 +292,40 @@ python buildhistory_emit_pkghistory() {
last_pkgr = lastversion.pkgr
r = bb.utils.vercmp((pkge, pkgv, pkgr), (last_pkge, last_pkgv, last_pkgr))
if r < 0:
- msg = "Package version for package %s went backwards which would break package feeds from (%s:%s-%s to %s:%s-%s)" % (pkg, last_pkge, last_pkgv, last_pkgr, pkge, pkgv, pkgr)
- package_qa_handle_error("version-going-backwards", msg, d)
+ msg = "Package version for package %s went backwards which would break package feeds (from %s:%s-%s to %s:%s-%s)" % (pkg, last_pkge, last_pkgv, last_pkgr, pkge, pkgv, pkgr)
+ oe.qa.handle_error("version-going-backwards", msg, d)
pkginfo = PackageInfo(pkg)
# Apparently the version can be different on a per-package basis (see Python)
- pkginfo.pe = pkgdata.get('PE', '0')
- pkginfo.pv = pkgdata['PV']
- pkginfo.pr = pkgdata['PR']
- pkginfo.pkg = pkgdata['PKG']
+ pkginfo.pe = localdata.getVar("PE") or '0'
+ pkginfo.pv = localdata.getVar("PV")
+ pkginfo.pr = localdata.getVar("PR")
+ pkginfo.pkg = localdata.getVar("PKG")
pkginfo.pkge = pkge
pkginfo.pkgv = pkgv
pkginfo.pkgr = pkgr
- pkginfo.rprovides = sortpkglist(oe.utils.squashspaces(pkgdata.get('RPROVIDES', "")))
- pkginfo.rdepends = sortpkglist(oe.utils.squashspaces(pkgdata.get('RDEPENDS', "")))
- pkginfo.rrecommends = sortpkglist(oe.utils.squashspaces(pkgdata.get('RRECOMMENDS', "")))
- pkginfo.rsuggests = sortpkglist(oe.utils.squashspaces(pkgdata.get('RSUGGESTS', "")))
- pkginfo.rreplaces = sortpkglist(oe.utils.squashspaces(pkgdata.get('RREPLACES', "")))
- pkginfo.rconflicts = sortpkglist(oe.utils.squashspaces(pkgdata.get('RCONFLICTS', "")))
- pkginfo.files = oe.utils.squashspaces(pkgdata.get('FILES', ""))
+ pkginfo.rprovides = sortpkglist(oe.utils.squashspaces(localdata.getVar("RPROVIDES") or ""))
+ pkginfo.rdepends = sortpkglist(oe.utils.squashspaces(localdata.getVar("RDEPENDS") or ""))
+ pkginfo.rrecommends = sortpkglist(oe.utils.squashspaces(localdata.getVar("RRECOMMENDS") or ""))
+ pkginfo.rsuggests = sortpkglist(oe.utils.squashspaces(localdata.getVar("RSUGGESTS") or ""))
+ pkginfo.replaces = sortpkglist(oe.utils.squashspaces(localdata.getVar("RREPLACES") or ""))
+ pkginfo.rconflicts = sortpkglist(oe.utils.squashspaces(localdata.getVar("RCONFLICTS") or ""))
+ pkginfo.files = oe.utils.squashspaces(localdata.getVar("FILES") or "")
for filevar in pkginfo.filevars:
- pkginfo.filevars[filevar] = pkgdata.get(filevar, "")
+ pkginfo.filevars[filevar] = localdata.getVar(filevar) or ""
# Gather information about packaged files
- val = pkgdata.get('FILES_INFO', '')
+ val = localdata.getVar('FILES_INFO') or ''
dictval = json.loads(val)
filelist = list(dictval.keys())
filelist.sort()
pkginfo.filelist = " ".join([shlex.quote(x) for x in filelist])
- pkginfo.size = int(pkgdata['PKGSIZE'])
+ pkginfo.size = int(localdata.getVar('PKGSIZE') or '0')
write_pkghistory(pkginfo, d)
- # Create files-in-<package-name>.txt files containing a list of files of each recipe's package
- bb.build.exec_func("buildhistory_list_pkg_files", d)
+ oe.qa.exit_if_errors(d)
}
python buildhistory_emit_outputsigs() {
@@ -348,6 +378,9 @@ def write_recipehistory(rcpinfo, d):
f.write(u"DEPENDS = %s\n" % rcpinfo.depends)
f.write(u"PACKAGES = %s\n" % rcpinfo.packages)
f.write(u"LAYER = %s\n" % rcpinfo.layer)
+ f.write(u"LICENSE = %s\n" % rcpinfo.license)
+ f.write(u"CONFIG = %s\n" % rcpinfo.config)
+ f.write(u"SRC_URI = %s\n" % rcpinfo.src_uri)
write_latest_srcrev(d, pkghistdir)
@@ -406,19 +439,24 @@ def buildhistory_list_installed(d, rootfs_type="image"):
from oe.sdk import sdk_list_installed_packages
from oe.utils import format_pkg_list
- process_list = [('file', 'bh_installed_pkgs.txt'),\
- ('deps', 'bh_installed_pkgs_deps.txt')]
+ process_list = [('file', 'bh_installed_pkgs_%s.txt' % os.getpid()),\
+ ('deps', 'bh_installed_pkgs_deps_%s.txt' % os.getpid())]
if rootfs_type == "image":
pkgs = image_list_installed_packages(d)
else:
pkgs = sdk_list_installed_packages(d, rootfs_type == "sdk_target")
+ if rootfs_type == "sdk_host":
+ pkgdata_dir = d.getVar('PKGDATA_DIR_SDK')
+ else:
+ pkgdata_dir = d.getVar('PKGDATA_DIR')
+
for output_type, output_file in process_list:
output_file_full = os.path.join(d.getVar('WORKDIR'), output_file)
with open(output_file_full, 'w') as output:
- output.write(format_pkg_list(pkgs, output_type))
+ output.write(format_pkg_list(pkgs, output_type, pkgdata_dir))
python buildhistory_list_installed_image() {
buildhistory_list_installed(d)
@@ -437,9 +475,10 @@ buildhistory_get_installed() {
# Get list of installed packages
pkgcache="$1/installed-packages.tmp"
- cat ${WORKDIR}/bh_installed_pkgs.txt | sort > $pkgcache && rm ${WORKDIR}/bh_installed_pkgs.txt
+ cat ${WORKDIR}/bh_installed_pkgs_${PID}.txt | sort > $pkgcache && rm ${WORKDIR}/bh_installed_pkgs_${PID}.txt
cat $pkgcache | awk '{ print $1 }' > $1/installed-package-names.txt
+
if [ -s $pkgcache ] ; then
cat $pkgcache | awk '{ print $2 }' | xargs -n1 basename > $1/installed-packages.txt
else
@@ -448,8 +487,8 @@ buildhistory_get_installed() {
# Produce dependency graph
# First, quote each name to handle characters that cause issues for dot
- sed 's:\([^| ]*\):"\1":g' ${WORKDIR}/bh_installed_pkgs_deps.txt > $1/depends.tmp &&
- rm ${WORKDIR}/bh_installed_pkgs_deps.txt
+ sed 's:\([^| ]*\):"\1":g' ${WORKDIR}/bh_installed_pkgs_deps_${PID}.txt > $1/depends.tmp &&
+ rm ${WORKDIR}/bh_installed_pkgs_deps_${PID}.txt
# Remove lines with rpmlib(...) and config(...) dependencies, change the
# delimiter from pipe to "->", set the style for recommend lines and
# turn versioned dependencies into edge labels.
@@ -458,6 +497,8 @@ buildhistory_get_installed() {
-e 's:|: -> :' \
-e 's:"\[REC\]":[style=dotted]:' \
-e 's:"\([<>=]\+\)" "\([^"]*\)":[label="\1 \2"]:' \
+ -e 's:"\([*]\+\)" "\([^"]*\)":[label="\2"]:' \
+ -e 's:"\[RPROVIDES\]":[style=dashed]:' \
$1/depends.tmp
# Add header, sorted and de-duped contents and footer and then delete the temp file
printf "digraph depends {\n node [shape=plaintext]\n" > $1/depends.dot
@@ -465,11 +506,22 @@ buildhistory_get_installed() {
echo "}" >> $1/depends.dot
rm $1/depends.tmp
+ # Set correct pkgdatadir
+ pkgdatadir=${PKGDATA_DIR}
+ if [ "$2" == "sdk" ] && [ "$3" == "host" ]; then
+ pkgdatadir="${PKGDATA_DIR_SDK}"
+ fi
+
# Produce installed package sizes list
- oe-pkgdata-util -p ${PKGDATA_DIR} read-value "PKGSIZE" -n -f $pkgcache > $1/installed-package-sizes.tmp
+ oe-pkgdata-util -p $pkgdatadir read-value "PKGSIZE" -n -f $pkgcache > $1/installed-package-sizes.tmp
cat $1/installed-package-sizes.tmp | awk '{print $2 "\tKiB\t" $1}' | sort -n -r > $1/installed-package-sizes.txt
rm $1/installed-package-sizes.tmp
+ # Produce package info: runtime_name, buildtime_name, recipe, version, size
+ oe-pkgdata-util -p $pkgdatadir read-value "PACKAGE,PN,PV,PKGSIZE" -n -f $pkgcache > $1/installed-package-info.tmp
+ cat $1/installed-package-info.tmp | sort -n -r -k 5 > $1/installed-package-info.txt
+ rm $1/installed-package-info.tmp
+
# We're now done with the cache, delete it
rm $pkgcache
@@ -506,7 +558,7 @@ buildhistory_get_sdk_installed() {
return
fi
- buildhistory_get_installed ${BUILDHISTORY_DIR_SDK}/$1 sdk
+ buildhistory_get_installed ${BUILDHISTORY_DIR_SDK}/$1 sdk $1
}
buildhistory_get_sdk_installed_host() {
@@ -529,6 +581,20 @@ buildhistory_list_files() {
fi | sort -k5 | sed 's/ * -> $//' > $2 )
}
+buildhistory_list_files_no_owners() {
+ # List the files in the specified directory, but exclude date/time etc.
+ # Also don't output the ownership data, but instead output just - - so
+ # that the same parsing code as for _list_files works.
+ # This is somewhat messy, but handles where the size is not printed for device files under pseudo
+ ( cd $1
+ find_cmd='find . ! -path . -printf "%M - - %10s %p -> %l\n"'
+ if [ "$3" = "fakeroot" ] ; then
+ eval ${FAKEROOTENV} ${FAKEROOTCMD} "$find_cmd"
+ else
+ eval "$find_cmd"
+ fi | sort -k5 | sed 's/ * -> $//' > $2 )
+}
+
buildhistory_list_pkg_files() {
# Create individual files-in-package for each recipe's package
for pkgdir in $(find ${PKGDEST}/* -maxdepth 0 -type d); do
@@ -633,16 +699,19 @@ IMAGE_POSTPROCESS_COMMAND[vardepvalueexclude] .= "| buildhistory_get_imageinfo ;
IMAGE_POSTPROCESS_COMMAND[vardepsexclude] += "buildhistory_get_imageinfo"
# We want these to be the last run so that we get called after complementary package installation
-POPULATE_SDK_POST_TARGET_COMMAND_append = " buildhistory_list_installed_sdk_target;"
-POPULATE_SDK_POST_TARGET_COMMAND_append = " buildhistory_get_sdk_installed_target;"
+POPULATE_SDK_POST_TARGET_COMMAND:append = " buildhistory_list_installed_sdk_target;"
+POPULATE_SDK_POST_TARGET_COMMAND:append = " buildhistory_get_sdk_installed_target;"
POPULATE_SDK_POST_TARGET_COMMAND[vardepvalueexclude] .= "| buildhistory_list_installed_sdk_target;| buildhistory_get_sdk_installed_target;"
+POPULATE_SDK_POST_TARGET_COMMAND[vardepsexclude] += "buildhistory_list_installed_sdk_target buildhistory_get_sdk_installed_target"
-POPULATE_SDK_POST_HOST_COMMAND_append = " buildhistory_list_installed_sdk_host;"
-POPULATE_SDK_POST_HOST_COMMAND_append = " buildhistory_get_sdk_installed_host;"
+POPULATE_SDK_POST_HOST_COMMAND:append = " buildhistory_list_installed_sdk_host;"
+POPULATE_SDK_POST_HOST_COMMAND:append = " buildhistory_get_sdk_installed_host;"
POPULATE_SDK_POST_HOST_COMMAND[vardepvalueexclude] .= "| buildhistory_list_installed_sdk_host;| buildhistory_get_sdk_installed_host;"
+POPULATE_SDK_POST_HOST_COMMAND[vardepsexclude] += "buildhistory_list_installed_sdk_host buildhistory_get_sdk_installed_host"
-SDK_POSTPROCESS_COMMAND_append = " buildhistory_get_sdkinfo ; buildhistory_get_extra_sdkinfo; "
+SDK_POSTPROCESS_COMMAND:append = " buildhistory_get_sdkinfo ; buildhistory_get_extra_sdkinfo; "
SDK_POSTPROCESS_COMMAND[vardepvalueexclude] .= "| buildhistory_get_sdkinfo ; buildhistory_get_extra_sdkinfo; "
+SDK_POSTPROCESS_COMMAND[vardepsexclude] += "buildhistory_get_sdkinfo buildhistory_get_extra_sdkinfo"
python buildhistory_write_sigs() {
if not "task" in (d.getVar('BUILDHISTORY_FEATURES') or "").split():
@@ -652,7 +721,7 @@ python buildhistory_write_sigs() {
if hasattr(bb.parse.siggen, 'dump_siglist'):
taskoutdir = os.path.join(d.getVar('BUILDHISTORY_DIR'), 'task')
bb.utils.mkdirhier(taskoutdir)
- bb.parse.siggen.dump_siglist(os.path.join(taskoutdir, 'tasksigs.txt'))
+ bb.parse.siggen.dump_siglist(os.path.join(taskoutdir, 'tasksigs.txt'), d.getVar("BUILDHISTORY_PATH_PREFIX_STRIP"))
}
def buildhistory_get_build_id(d):
@@ -720,11 +789,11 @@ def buildhistory_get_imagevars(d):
def buildhistory_get_sdkvars(d):
if d.getVar('BB_WORKERCONTEXT') != '1':
return ""
- sdkvars = "DISTRO DISTRO_VERSION SDK_NAME SDK_VERSION SDKMACHINE SDKIMAGE_FEATURES BAD_RECOMMENDATIONS NO_RECOMMENDATIONS PACKAGE_EXCLUDE"
+ sdkvars = "DISTRO DISTRO_VERSION SDK_NAME SDK_VERSION SDKMACHINE SDKIMAGE_FEATURES TOOLCHAIN_HOST_TASK TOOLCHAIN_TARGET_TASK BAD_RECOMMENDATIONS NO_RECOMMENDATIONS PACKAGE_EXCLUDE"
if d.getVar('BB_CURRENTTASK') == 'populate_sdk_ext':
# Extensible SDK uses some additional variables
- sdkvars += " SDK_LOCAL_CONF_WHITELIST SDK_LOCAL_CONF_BLACKLIST SDK_INHERIT_BLACKLIST SDK_UPDATE_URL SDK_EXT_TYPE SDK_RECRDEP_TASKS SDK_INCLUDE_PKGDATA SDK_INCLUDE_TOOLCHAIN"
- listvars = "SDKIMAGE_FEATURES BAD_RECOMMENDATIONS PACKAGE_EXCLUDE SDK_LOCAL_CONF_WHITELIST SDK_LOCAL_CONF_BLACKLIST SDK_INHERIT_BLACKLIST"
+ sdkvars += " ESDK_LOCALCONF_ALLOW ESDK_LOCALCONF_REMOVE ESDK_CLASS_INHERIT_DISABLE SDK_UPDATE_URL SDK_EXT_TYPE SDK_RECRDEP_TASKS SDK_INCLUDE_PKGDATA SDK_INCLUDE_TOOLCHAIN"
+ listvars = "SDKIMAGE_FEATURES BAD_RECOMMENDATIONS PACKAGE_EXCLUDE ESDK_LOCALCONF_ALLOW ESDK_LOCALCONF_REMOVE ESDK_CLASS_INHERIT_DISABLE"
return outputvars(sdkvars, listvars, d)
@@ -791,9 +860,9 @@ END
if [ ! -e .git ] ; then
git init -q
else
- git tag -f build-minus-3 build-minus-2 > /dev/null 2>&1 || true
- git tag -f build-minus-2 build-minus-1 > /dev/null 2>&1 || true
- git tag -f build-minus-1 > /dev/null 2>&1 || true
+ git tag -f ${BUILDHISTORY_TAG}-minus-3 ${BUILDHISTORY_TAG}-minus-2 > /dev/null 2>&1 || true
+ git tag -f ${BUILDHISTORY_TAG}-minus-2 ${BUILDHISTORY_TAG}-minus-1 > /dev/null 2>&1 || true
+ git tag -f ${BUILDHISTORY_TAG}-minus-1 > /dev/null 2>&1 || true
fi
check_git_config
@@ -817,7 +886,7 @@ END
}
python buildhistory_eventhandler() {
- if e.data.getVar('BUILDHISTORY_FEATURES').strip():
+ if (e.data.getVar('BUILDHISTORY_FEATURES') or "").strip():
reset = e.data.getVar("BUILDHISTORY_RESET")
olddir = e.data.getVar("BUILDHISTORY_OLD_DIR")
if isinstance(e, bb.event.BuildStarted):
@@ -827,10 +896,11 @@ python buildhistory_eventhandler() {
if os.path.isdir(olddir):
shutil.rmtree(olddir)
rootdir = e.data.getVar("BUILDHISTORY_DIR")
+ bb.utils.mkdirhier(rootdir)
entries = [ x for x in os.listdir(rootdir) if not x.startswith('.') ]
bb.utils.mkdirhier(olddir)
for entry in entries:
- os.rename(os.path.join(rootdir, entry),
+ bb.utils.rename(os.path.join(rootdir, entry),
os.path.join(olddir, entry))
elif isinstance(e, bb.event.BuildCompleted):
if reset:
@@ -839,11 +909,15 @@ python buildhistory_eventhandler() {
if e.data.getVar("BUILDHISTORY_COMMIT") == "1":
bb.note("Writing buildhistory")
bb.build.exec_func("buildhistory_write_sigs", d)
+ import time
+ start=time.time()
localdata = bb.data.createCopy(e.data)
localdata.setVar('BUILDHISTORY_BUILD_FAILURES', str(e._failures))
interrupted = getattr(e, '_interrupted', 0)
localdata.setVar('BUILDHISTORY_BUILD_INTERRUPTED', str(interrupted))
bb.build.exec_func("buildhistory_commit", localdata)
+ stop=time.time()
+ bb.note("Writing buildhistory took: %s seconds" % round(stop-start))
else:
bb.note("No commit since BUILDHISTORY_COMMIT != '1'")
}
@@ -865,22 +939,12 @@ def _get_srcrev_values(d):
if urldata[u].method.supports_srcrev():
scms.append(u)
- autoinc_templ = 'AUTOINC+'
dict_srcrevs = {}
dict_tag_srcrevs = {}
for scm in scms:
ud = urldata[scm]
for name in ud.names:
- try:
- rev = ud.method.sortable_revision(ud, d, name)
- except TypeError:
- # support old bitbake versions
- rev = ud.method.sortable_revision(scm, ud, d, name)
- # Clean this up when we next bump bitbake version
- if type(rev) != str:
- autoinc, rev = rev
- elif rev.startswith(autoinc_templ):
- rev = rev[len(autoinc_templ):]
+ autoinc, rev = ud.method.sortable_revision(ud, d, name)
dict_srcrevs[name] = rev
if 'tag' in ud.parm:
tag = ud.parm['tag'];
@@ -911,23 +975,19 @@ def write_latest_srcrev(d, pkghistdir):
value = value.replace('"', '').strip()
old_tag_srcrevs[key] = value
with open(srcrevfile, 'w') as f:
- orig_srcrev = d.getVar('SRCREV', False) or 'INVALID'
- if orig_srcrev != 'INVALID':
- f.write('# SRCREV = "%s"\n' % orig_srcrev)
- if len(srcrevs) > 1:
- for name, srcrev in sorted(srcrevs.items()):
- orig_srcrev = d.getVar('SRCREV_%s' % name, False)
- if orig_srcrev:
- f.write('# SRCREV_%s = "%s"\n' % (name, orig_srcrev))
- f.write('SRCREV_%s = "%s"\n' % (name, srcrev))
- else:
- f.write('SRCREV = "%s"\n' % next(iter(srcrevs.values())))
- if len(tag_srcrevs) > 0:
- for name, srcrev in sorted(tag_srcrevs.items()):
- f.write('# tag_%s = "%s"\n' % (name, srcrev))
- if name in old_tag_srcrevs and old_tag_srcrevs[name] != srcrev:
- pkg = d.getVar('PN')
- bb.warn("Revision for tag %s in package %s was changed since last build (from %s to %s)" % (name, pkg, old_tag_srcrevs[name], srcrev))
+ for name, srcrev in sorted(srcrevs.items()):
+ suffix = "_" + name
+ if name == "default":
+ suffix = ""
+ orig_srcrev = d.getVar('SRCREV%s' % suffix, False)
+ if orig_srcrev:
+ f.write('# SRCREV%s = "%s"\n' % (suffix, orig_srcrev))
+ f.write('SRCREV%s = "%s"\n' % (suffix, srcrev))
+ for name, srcrev in sorted(tag_srcrevs.items()):
+ f.write('# tag_%s = "%s"\n' % (name, srcrev))
+ if name in old_tag_srcrevs and old_tag_srcrevs[name] != srcrev:
+ pkg = d.getVar('PN')
+ bb.warn("Revision for tag %s in package %s was changed since last build (from %s to %s)" % (name, pkg, old_tag_srcrevs[name], srcrev))
else:
if os.path.exists(srcrevfile):
diff --git a/meta/classes/buildstats.bbclass b/meta/classes/buildstats.bbclass
index 960653c704..0de605200a 100644
--- a/meta/classes/buildstats.bbclass
+++ b/meta/classes/buildstats.bbclass
@@ -80,8 +80,6 @@ def get_buildtimedata(var, d):
return timediff, cpuperc
def write_task_data(status, logfile, e, d):
- bn = d.getVar('BUILDNAME')
- bsdir = os.path.join(d.getVar('BUILDSTATS_BASE'), bn)
with open(os.path.join(logfile), "a") as f:
elapsedtime = get_timedata("__timedata_task", d, e.time)
if elapsedtime:
@@ -100,20 +98,96 @@ def write_task_data(status, logfile, e, d):
f.write("rusage %s: %s\n" % (i, getattr(resources, i)))
for i in rusages:
f.write("Child rusage %s: %s\n" % (i, getattr(childres, i)))
- if status is "passed":
+ if status == "passed":
f.write("Status: PASSED \n")
else:
f.write("Status: FAILED \n")
f.write("Ended: %0.2f \n" % e.time)
+def write_host_data(logfile, e, d, type):
+ import subprocess, os, datetime
+ # minimum time allowed for each command to run, in seconds
+ time_threshold = 0.5
+ limit = 10
+ # the total number of commands
+ num_cmds = 0
+ msg = ""
+ if type == "interval":
+ # interval at which data will be logged
+ interval = d.getVar("BB_HEARTBEAT_EVENT", False)
+ if interval is None:
+ bb.warn("buildstats: Collecting host data at intervals failed. Set BB_HEARTBEAT_EVENT=\"<interval>\" in conf/local.conf for the interval at which host data will be logged.")
+ d.setVar("BB_LOG_HOST_STAT_ON_INTERVAL", "0")
+ return
+ interval = int(interval)
+ cmds = d.getVar('BB_LOG_HOST_STAT_CMDS_INTERVAL')
+ msg = "Host Stats: Collecting data at %d second intervals.\n" % interval
+ if cmds is None:
+ d.setVar("BB_LOG_HOST_STAT_ON_INTERVAL", "0")
+ bb.warn("buildstats: Collecting host data at intervals failed. Set BB_LOG_HOST_STAT_CMDS_INTERVAL=\"command1 ; command2 ; ... \" in conf/local.conf.")
+ return
+ if type == "failure":
+ cmds = d.getVar('BB_LOG_HOST_STAT_CMDS_FAILURE')
+ msg = "Host Stats: Collecting data on failure.\n"
+ msg += "Failed at task: " + e.task + "\n"
+ if cmds is None:
+ d.setVar("BB_LOG_HOST_STAT_ON_FAILURE", "0")
+ bb.warn("buildstats: Collecting host data on failure failed. Set BB_LOG_HOST_STAT_CMDS_FAILURE=\"command1 ; command2 ; ... \" in conf/local.conf.")
+ return
+ c_san = []
+ for cmd in cmds.split(";"):
+ if len(cmd) == 0:
+ continue
+ num_cmds += 1
+ c_san.append(cmd)
+ if num_cmds == 0:
+ if type == "interval":
+ d.setVar("BB_LOG_HOST_STAT_ON_INTERVAL", "0")
+ if type == "failure":
+ d.setVar("BB_LOG_HOST_STAT_ON_FAILURE", "0")
+ return
+
+ # return if the interval is not enough to run all commands within the specified BB_HEARTBEAT_EVENT interval
+ if type == "interval":
+ limit = interval / num_cmds
+ if limit <= time_threshold:
+ d.setVar("BB_LOG_HOST_STAT_ON_INTERVAL", "0")
+ bb.warn("buildstats: Collecting host data failed. BB_HEARTBEAT_EVENT interval not enough to run the specified commands. Increase value of BB_HEARTBEAT_EVENT in conf/local.conf.")
+ return
+
+ # set the environment variables
+ path = d.getVar("PATH")
+ opath = d.getVar("BB_ORIGENV", False).getVar("PATH")
+ ospath = os.environ['PATH']
+ os.environ['PATH'] = path + ":" + opath + ":" + ospath
+ with open(logfile, "a") as f:
+ f.write("Event Time: %f\nDate: %s\n" % (e.time, datetime.datetime.now()))
+ f.write("%s" % msg)
+ for c in c_san:
+ try:
+ output = subprocess.check_output(c.split(), stderr=subprocess.STDOUT, timeout=limit).decode('utf-8')
+ except (subprocess.CalledProcessError, subprocess.TimeoutExpired, FileNotFoundError) as err:
+ output = "Error running command: %s\n%s\n" % (c, err)
+ f.write("%s\n%s\n" % (c, output))
+ # reset the environment
+ os.environ['PATH'] = ospath
+
python run_buildstats () {
import bb.build
import bb.event
import time, subprocess, platform
bn = d.getVar('BUILDNAME')
- bsdir = os.path.join(d.getVar('BUILDSTATS_BASE'), bn)
- taskdir = os.path.join(bsdir, d.getVar('PF'))
+ ########################################################################
+ # bitbake fires HeartbeatEvent even before a build has been
+ # triggered, causing BUILDNAME to be None
+ ########################################################################
+ if bn is not None:
+ bsdir = os.path.join(d.getVar('BUILDSTATS_BASE'), bn)
+ taskdir = os.path.join(bsdir, d.getVar('PF'))
+ if isinstance(e, bb.event.HeartbeatEvent) and bb.utils.to_boolean(d.getVar("BB_LOG_HOST_STAT_ON_INTERVAL")):
+ bb.utils.mkdirhier(bsdir)
+ write_host_data(os.path.join(bsdir, "host_stats_interval"), e, d, "interval")
if isinstance(e, bb.event.BuildStarted):
########################################################################
@@ -138,7 +212,7 @@ python run_buildstats () {
if x:
f.write(x + " ")
f.write("\n")
- f.write("Build Started: %0.2f \n" % time.time())
+ f.write("Build Started: %0.2f \n" % d.getVar('__timedata_build', False)[0])
elif isinstance(e, bb.event.BuildCompleted):
build_time = os.path.join(bsdir, "build_stats")
@@ -188,10 +262,12 @@ python run_buildstats () {
build_status = os.path.join(bsdir, "build_stats")
with open(build_status, "a") as f:
f.write(d.expand("Failed at: ${PF} at task: %s \n" % e.task))
+ if bb.utils.to_boolean(d.getVar("BB_LOG_HOST_STAT_ON_FAILURE")):
+ write_host_data(os.path.join(bsdir, "host_stats_%s_failure" % e.task), e, d, "failure")
}
addhandler run_buildstats
-run_buildstats[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted bb.build.TaskStarted bb.build.TaskSucceeded bb.build.TaskFailed"
+run_buildstats[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted bb.event.HeartbeatEvent bb.build.TaskStarted bb.build.TaskSucceeded bb.build.TaskFailed"
python runqueue_stats () {
import buildstats
diff --git a/meta/classes/cargo.bbclass b/meta/classes/cargo.bbclass
new file mode 100644
index 0000000000..4a780a501f
--- /dev/null
+++ b/meta/classes/cargo.bbclass
@@ -0,0 +1,90 @@
+##
+## Purpose:
+## This class is used by any recipes that are built using
+## Cargo.
+
+inherit cargo_common
+
+# the binary we will use
+CARGO = "cargo"
+
+# We need cargo to compile for the target
+BASEDEPENDS:append = " cargo-native"
+
+# Ensure we get the right rust variant
+DEPENDS:append:class-target = " virtual/${TARGET_PREFIX}rust ${RUSTLIB_DEP}"
+DEPENDS:append:class-nativesdk = " virtual/${TARGET_PREFIX}rust ${RUSTLIB_DEP}"
+DEPENDS:append:class-native = " rust-native"
+
+# Enable build separation
+B = "${WORKDIR}/build"
+
+# In case something fails in the build process, give a bit more feedback on
+# where the issue occured
+export RUST_BACKTRACE = "1"
+
+# The directory of the Cargo.toml relative to the root directory, per default
+# assume there's a Cargo.toml directly in the root directory
+CARGO_SRC_DIR ??= ""
+
+# The actual path to the Cargo.toml
+MANIFEST_PATH ??= "${S}/${CARGO_SRC_DIR}/Cargo.toml"
+
+RUSTFLAGS ??= ""
+BUILD_MODE = "${@['--release', ''][d.getVar('DEBUG_BUILD') == '1']}"
+CARGO_BUILD_FLAGS = "-v --target ${HOST_SYS} ${BUILD_MODE} --manifest-path=${MANIFEST_PATH}"
+
+# This is based on the content of CARGO_BUILD_FLAGS and generally will need to
+# change if CARGO_BUILD_FLAGS changes.
+BUILD_DIR = "${@['release', 'debug'][d.getVar('DEBUG_BUILD') == '1']}"
+CARGO_TARGET_SUBDIR="${HOST_SYS}/${BUILD_DIR}"
+oe_cargo_build () {
+ export RUSTFLAGS="${RUSTFLAGS}"
+ export RUST_TARGET_PATH="${RUST_TARGET_PATH}"
+ bbnote "cargo = $(which ${CARGO})"
+ bbnote "rustc = $(which ${RUSTC})"
+ bbnote "${CARGO} build ${CARGO_BUILD_FLAGS} $@"
+ "${CARGO}" build ${CARGO_BUILD_FLAGS} "$@"
+}
+
+do_compile[progress] = "outof:\s+(\d+)/(\d+)"
+cargo_do_compile () {
+ oe_cargo_fix_env
+ oe_cargo_build
+}
+
+cargo_do_install () {
+ local have_installed=false
+ for tgt in "${B}/target/${CARGO_TARGET_SUBDIR}/"*; do
+ case $tgt in
+ *.so|*.rlib)
+ install -d "${D}${rustlibdir}"
+ install -m755 "$tgt" "${D}${rustlibdir}"
+ have_installed=true
+ ;;
+ *examples)
+ if [ -d "$tgt" ]; then
+ for example in "$tgt/"*; do
+ if [ -f "$example" ] && [ -x "$example" ]; then
+ install -d "${D}${bindir}"
+ install -m755 "$example" "${D}${bindir}"
+ have_installed=true
+ fi
+ done
+ fi
+ ;;
+ *)
+ if [ -f "$tgt" ] && [ -x "$tgt" ]; then
+ install -d "${D}${bindir}"
+ install -m755 "$tgt" "${D}${bindir}"
+ have_installed=true
+ fi
+ ;;
+ esac
+ done
+ if ! $have_installed; then
+ die "Did not find anything to install"
+ fi
+}
+
+EXPORT_FUNCTIONS do_compile do_install
diff --git a/meta/classes/cargo_common.bbclass b/meta/classes/cargo_common.bbclass
new file mode 100644
index 0000000000..90fad75415
--- /dev/null
+++ b/meta/classes/cargo_common.bbclass
@@ -0,0 +1,124 @@
+##
+## Purpose:
+## This class is to support building with cargo. It
+## must be different than cargo.bbclass because Rust
+## now builds with Cargo but cannot use cargo.bbclass
+## due to dependencies and assumptions in cargo.bbclass
+## that Rust & Cargo are already installed. So this
+## is used by cargo.bbclass and Rust
+##
+
+# add crate fetch support
+inherit rust-common
+
+# Where we download our registry and dependencies to
+export CARGO_HOME = "${WORKDIR}/cargo_home"
+
+# The pkg-config-rs library used by cargo build scripts disables itself when
+# cross compiling unless this is defined. We set up pkg-config appropriately
+# for cross compilation, so tell it we know better than it.
+export PKG_CONFIG_ALLOW_CROSS = "1"
+
+# Don't instruct cargo to use crates downloaded by bitbake. Some rust packages,
+# for example the rust compiler itself, come with their own vendored sources.
+# Specifying two [source.crates-io] will not work.
+CARGO_DISABLE_BITBAKE_VENDORING ?= "0"
+
+# Used by libstd-rs to point to the vendor dir included in rustc src
+CARGO_VENDORING_DIRECTORY ?= "${CARGO_HOME}/bitbake"
+
+CARGO_RUST_TARGET_CCLD ?= "${RUST_TARGET_CCLD}"
+cargo_common_do_configure () {
+ mkdir -p ${CARGO_HOME}/bitbake
+
+ cat <<- EOF > ${CARGO_HOME}/config
+ # EXTRA_OECARGO_PATHS
+ paths = [
+ $(for p in ${EXTRA_OECARGO_PATHS}; do echo \"$p\",; done)
+ ]
+ EOF
+
+ cat <<- EOF >> ${CARGO_HOME}/config
+
+ # Local mirror vendored by bitbake
+ [source.bitbake]
+ directory = "${CARGO_VENDORING_DIRECTORY}"
+ EOF
+
+ if [ -z "${EXTERNALSRC}" ] && [ ${CARGO_DISABLE_BITBAKE_VENDORING} = "0" ]; then
+ cat <<- EOF >> ${CARGO_HOME}/config
+
+ [source.crates-io]
+ replace-with = "bitbake"
+ local-registry = "/nonexistant"
+ EOF
+ fi
+
+ cat <<- EOF >> ${CARGO_HOME}/config
+
+ [http]
+ # Multiplexing can't be enabled because http2 can't be enabled
+ # in curl-native without dependency loops
+ multiplexing = false
+
+ # Ignore the hard coded and incorrect path to certificates
+ cainfo = "${STAGING_ETCDIR_NATIVE}/ssl/certs/ca-certificates.crt"
+
+ EOF
+
+ cat <<- EOF >> ${CARGO_HOME}/config
+
+ # HOST_SYS
+ [target.${HOST_SYS}]
+ linker = "${CARGO_RUST_TARGET_CCLD}"
+ EOF
+
+ if [ "${HOST_SYS}" != "${BUILD_SYS}" ]; then
+ cat <<- EOF >> ${CARGO_HOME}/config
+
+ # BUILD_SYS
+ [target.${BUILD_SYS}]
+ linker = "${RUST_BUILD_CCLD}"
+ EOF
+ fi
+
+ # Put build output in build directory preferred by bitbake instead of
+ # inside source directory unless they are the same
+ if [ "${B}" != "${S}" ]; then
+ cat <<- EOF >> ${CARGO_HOME}/config
+
+ [build]
+ # Use out of tree build destination to avoid poluting the source tree
+ target-dir = "${B}/target"
+ EOF
+ fi
+
+ cat <<- EOF >> ${CARGO_HOME}/config
+
+ [term]
+ progress.when = 'always'
+ progress.width = 80
+ EOF
+}
+
+oe_cargo_fix_env () {
+ export CC="${RUST_TARGET_CC}"
+ export CXX="${RUST_TARGET_CXX}"
+ export CFLAGS="${CFLAGS}"
+ export CXXFLAGS="${CXXFLAGS}"
+ export AR="${AR}"
+ export TARGET_CC="${RUST_TARGET_CC}"
+ export TARGET_CXX="${RUST_TARGET_CXX}"
+ export TARGET_CFLAGS="${CFLAGS}"
+ export TARGET_CXXFLAGS="${CXXFLAGS}"
+ export TARGET_AR="${AR}"
+ export HOST_CC="${RUST_BUILD_CC}"
+ export HOST_CXX="${RUST_BUILD_CXX}"
+ export HOST_CFLAGS="${BUILD_CFLAGS}"
+ export HOST_CXXFLAGS="${BUILD_CXXFLAGS}"
+ export HOST_AR="${BUILD_AR}"
+}
+
+EXTRA_OECARGO_PATHS ??= ""
+
+EXPORT_FUNCTIONS do_configure
diff --git a/meta/classes/ccache.bbclass b/meta/classes/ccache.bbclass
index b5457359ca..4532894c57 100644
--- a/meta/classes/ccache.bbclass
+++ b/meta/classes/ccache.bbclass
@@ -33,6 +33,10 @@ export CCACHE_CONFIGPATH ?= "${COREBASE}/meta/conf/ccache.conf"
export CCACHE_DIR ?= "${CCACHE_TOP_DIR}/${MULTIMACH_TARGET_SYS}/${PN}"
+# Fixed errors:
+# ccache: error: Failed to create directory /run/user/0/ccache-tmp: Permission denied
+export CCACHE_TEMPDIR ?= "${CCACHE_DIR}/tmp"
+
# We need to stop ccache considering the current directory or the
# debug-prefix-map target directory to be significant when calculating
# its hash. Without this the cache would be invalidated every time
@@ -45,7 +49,7 @@ python() {
"""
pn = d.getVar('PN')
# quilt-native doesn't need ccache since no c files
- if not (pn in ('ccache-native', 'quilt-native') or
+ if not (bb.data.inherits_class("native", d) or
bb.utils.to_boolean(d.getVar('CCACHE_DISABLE'))):
d.appendVar('DEPENDS', ' ccache-native')
d.setVar('CCACHE', 'ccache ')
diff --git a/meta/classes/ccmake.bbclass b/meta/classes/ccmake.bbclass
new file mode 100644
index 0000000000..df5134a108
--- /dev/null
+++ b/meta/classes/ccmake.bbclass
@@ -0,0 +1,97 @@
+inherit terminal
+
+python do_ccmake() {
+ import shutil
+
+ # copy current config for diffing
+ config = os.path.join(d.getVar("B"), "CMakeCache.txt")
+ if os.path.exists(config):
+ shutil.copy(config, config + ".orig")
+
+ oe_terminal(d.expand("ccmake ${OECMAKE_GENERATOR_ARGS} ${OECMAKE_SOURCEPATH} -Wno-dev"),
+ d.getVar("PN") + " - ccmake", d)
+
+ if os.path.exists(config) and os.path.exists(config + ".orig"):
+ if bb.utils.md5_file(config) != bb.utils.md5_file(config + ".orig"):
+ # the cmake class uses cmake --build, which will by default
+ # regenerate configuration, simply mark the compile step as tainted
+ # to ensure it is re-run
+ bb.note("Configuration changed, recompile will be forced")
+ bb.build.write_taint('do_compile', d)
+
+}
+do_ccmake[depends] += "cmake-native:do_populate_sysroot"
+do_ccmake[nostamp] = "1"
+do_ccmake[dirs] = "${B}"
+addtask ccmake after do_configure
+
+def cmake_parse_config_cache(path):
+ with open(path, "r") as f:
+ for i in f:
+ i = i.rstrip("\n")
+ if len(i) == 0 or i.startswith("//") or i.startswith("#"):
+ continue # empty or comment
+ key, value = i.split("=", 1)
+ key, keytype = key.split(":")
+ if keytype in ["INTERNAL", "STATIC"]:
+ continue # skip internal and static config options
+ yield key, keytype, value
+
+def cmake_diff_config_vars(a, b):
+ removed, added = [], []
+
+ for ak, akt, av in a:
+ found = False
+ for bk, bkt, bv in b:
+ if bk == ak:
+ found = True
+ if bkt != akt or bv != av: # changed
+ removed.append((ak, akt, av))
+ added.append((bk, bkt, bv))
+ break
+ # remove any missing from b
+ if not found:
+ removed.append((ak, akt, av))
+
+ # add any missing from a
+ for bk, bkt, bv in b:
+ if not any(bk == ak for ak, akt, av in a):
+ added.append((bk, bkt, bv))
+
+ return removed, added
+
+python do_ccmake_diffconfig() {
+ import shutil
+ config = os.path.join(d.getVar("B"), "CMakeCache.txt")
+ if os.path.exists(config) and os.path.exists(config + ".orig"):
+ if bb.utils.md5_file(config) != bb.utils.md5_file(config + ".orig"):
+ # scan the changed options
+ old = list(cmake_parse_config_cache(config + ".orig"))
+ new = list(cmake_parse_config_cache(config))
+ _, added = cmake_diff_config_vars(old, new)
+
+ if len(added) != 0:
+ with open(d.expand("${WORKDIR}/configuration.inc"), "w") as f:
+ f.write("EXTRA_OECMAKE += \" \\\n")
+ for k, kt, v in added:
+ escaped = v if " " not in v else "\"{0}\"".format(v)
+ f.write(" -D{0}:{1}={2} \\\n".format(k, kt, escaped))
+ f.write(" \"\n")
+ bb.plain("Configuration recipe fragment written to: {0}".format(d.expand("${WORKDIR}/configuration.inc")))
+
+ with open(d.expand("${WORKDIR}/site-file.cmake"), "w") as f:
+ for k, kt, v in added:
+ f.write("SET({0} \"{1}\" CACHE {2} \"\")\n".format(k, v, kt))
+ bb.plain("Configuration cmake fragment written to: {0}".format(d.expand("${WORKDIR}/site-file.cmake")))
+
+ # restore the original config
+ shutil.copy(config + ".orig", config)
+ else:
+ bb.plain("No configuration differences, skipping configuration fragment generation.")
+ else:
+ bb.fatal("No config files found. Did you run ccmake?")
+}
+do_ccmake_diffconfig[nostamp] = "1"
+do_ccmake_diffconfig[dirs] = "${B}"
+addtask ccmake_diffconfig
+
diff --git a/meta/classes/chrpath.bbclass b/meta/classes/chrpath.bbclass
index ad3c3975a5..26b984c4db 100644
--- a/meta/classes/chrpath.bbclass
+++ b/meta/classes/chrpath.bbclass
@@ -1,17 +1,20 @@
CHRPATH_BIN ?= "chrpath"
PREPROCESS_RELOCATE_DIRS ?= ""
-def process_file_linux(cmd, fpath, rootdir, baseprefix, tmpdir, d):
- import subprocess as sub
-
- p = sub.Popen([cmd, '-l', fpath],stdout=sub.PIPE,stderr=sub.PIPE)
- out, err = p.communicate()
- # If returned successfully, process stdout for results
- if p.returncode != 0:
+def process_file_linux(cmd, fpath, rootdir, baseprefix, tmpdir, d, break_hardlinks = False):
+ import subprocess, oe.qa
+
+ with oe.qa.ELFFile(fpath) as elf:
+ try:
+ elf.open()
+ except oe.qa.NotELFFileError:
+ return
+
+ try:
+ out = subprocess.check_output([cmd, "-l", fpath], universal_newlines=True)
+ except subprocess.CalledProcessError:
return
- out = out.decode('utf-8')
-
# Handle RUNPATH as well as RPATH
out = out.replace("RUNPATH=","RPATH=")
# Throw away everything other than the rpath list
@@ -39,14 +42,18 @@ def process_file_linux(cmd, fpath, rootdir, baseprefix, tmpdir, d):
# if we have modified some rpaths call chrpath to update the binary
if modified:
+ if break_hardlinks:
+ bb.utils.break_hardlinks(fpath)
+
args = ":".join(new_rpaths)
#bb.note("Setting rpath for %s to %s" %(fpath, args))
- p = sub.Popen([cmd, '-r', args, fpath],stdout=sub.PIPE,stderr=sub.PIPE)
- out, err = p.communicate()
- if p.returncode != 0:
- bb.fatal("%s: chrpath command failed with exit code %d:\n%s%s" % (d.getVar('PN'), p.returncode, out, err))
+ try:
+ subprocess.check_output([cmd, "-r", args, fpath],
+ stderr=subprocess.PIPE, universal_newlines=True)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("chrpath command failed with exit code %d:\n%s\n%s" % (e.returncode, e.stdout, e.stderr))
-def process_file_darwin(cmd, fpath, rootdir, baseprefix, tmpdir, d):
+def process_file_darwin(cmd, fpath, rootdir, baseprefix, tmpdir, d, break_hardlinks = False):
import subprocess as sub
p = sub.Popen([d.expand("${HOST_PREFIX}otool"), '-L', fpath],stdout=sub.PIPE,stderr=sub.PIPE)
@@ -61,11 +68,18 @@ def process_file_darwin(cmd, fpath, rootdir, baseprefix, tmpdir, d):
if baseprefix not in rpath:
continue
+ if break_hardlinks:
+ bb.utils.break_hardlinks(fpath)
+
newpath = "@loader_path/" + os.path.relpath(rpath, os.path.dirname(fpath.replace(rootdir, "/")))
p = sub.Popen([d.expand("${HOST_PREFIX}install_name_tool"), '-change', rpath, newpath, fpath],stdout=sub.PIPE,stderr=sub.PIPE)
out, err = p.communicate()
-def process_dir (rootdir, directory, d):
+def process_dir(rootdir, directory, d, break_hardlinks = False):
+ bb.debug(2, "Checking %s for binaries to process" % directory)
+ if not os.path.exists(directory):
+ return
+
import stat
rootdir = os.path.normpath(rootdir)
@@ -74,10 +88,6 @@ def process_dir (rootdir, directory, d):
baseprefix = os.path.normpath(d.expand('${base_prefix}'))
hostos = d.getVar("HOST_OS")
- #bb.debug("Checking %s for binaries to process" % directory)
- if not os.path.exists(directory):
- return
-
if "linux" in hostos:
process_file = process_file_linux
elif "darwin" in hostos:
@@ -95,7 +105,7 @@ def process_dir (rootdir, directory, d):
continue
if os.path.isdir(fpath):
- process_dir(rootdir, fpath, d)
+ process_dir(rootdir, fpath, d, break_hardlinks = break_hardlinks)
else:
#bb.note("Testing %s for relocatability" % fpath)
@@ -108,8 +118,9 @@ def process_dir (rootdir, directory, d):
else:
# Temporarily make the file writeable so we can chrpath it
os.chmod(fpath, perms|stat.S_IRWXU)
- process_file(cmd, fpath, rootdir, baseprefix, tmpdir, d)
-
+
+ process_file(cmd, fpath, rootdir, baseprefix, tmpdir, d, break_hardlinks = break_hardlinks)
+
if perms:
os.chmod(fpath, perms)
diff --git a/meta/classes/clutter.bbclass b/meta/classes/clutter.bbclass
deleted file mode 100644
index 5edab0e55d..0000000000
--- a/meta/classes/clutter.bbclass
+++ /dev/null
@@ -1,17 +0,0 @@
-def get_minor_dir(v):
- import re
- m = re.match(r"^([0-9]+)\.([0-9]+)", v)
- return "%s.%s" % (m.group(1), m.group(2))
-
-def get_real_name(n):
- import re
- m = re.match(r"^([a-z]+(-[a-z]+)?)(-[0-9]+\.[0-9]+)?", n)
- return "%s" % (m.group(1))
-
-VERMINOR = "${@get_minor_dir("${PV}")}"
-REALNAME = "${@get_real_name("${BPN}")}"
-
-SRC_URI = "${GNOME_MIRROR}/${REALNAME}/${VERMINOR}/${REALNAME}-${PV}.tar.xz;name=archive"
-S = "${WORKDIR}/${REALNAME}-${PV}"
-
-inherit autotools pkgconfig gtk-doc gettext
diff --git a/meta/classes/cmake.bbclass b/meta/classes/cmake.bbclass
index fa7f68c99b..d9bcddbdbb 100644
--- a/meta/classes/cmake.bbclass
+++ b/meta/classes/cmake.bbclass
@@ -1,7 +1,7 @@
# Path to the CMake file to process.
OECMAKE_SOURCEPATH ??= "${S}"
-DEPENDS_prepend = "cmake-native "
+DEPENDS:prepend = "cmake-native "
B = "${WORKDIR}/build"
# What CMake generator to use.
@@ -10,31 +10,17 @@ OECMAKE_GENERATOR ?= "Ninja"
python() {
generator = d.getVar("OECMAKE_GENERATOR")
- if generator == "Unix Makefiles":
- args = "-G 'Unix Makefiles' -DCMAKE_MAKE_PROGRAM=" + d.getVar("MAKE")
+ if "Unix Makefiles" in generator:
+ args = "-G '" + generator + "' -DCMAKE_MAKE_PROGRAM=" + d.getVar("MAKE")
d.setVar("OECMAKE_GENERATOR_ARGS", args)
d.setVarFlag("do_compile", "progress", "percent")
- elif generator == "Ninja":
+ elif "Ninja" in generator:
+ args = "-G '" + generator + "' -DCMAKE_MAKE_PROGRAM=ninja"
d.appendVar("DEPENDS", " ninja-native")
- d.setVar("OECMAKE_GENERATOR_ARGS", "-G Ninja -DCMAKE_MAKE_PROGRAM=ninja")
+ d.setVar("OECMAKE_GENERATOR_ARGS", args)
d.setVarFlag("do_compile", "progress", r"outof:^\[(\d+)/(\d+)\]\s+")
else:
bb.fatal("Unknown CMake Generator %s" % generator)
-
- # C/C++ Compiler (without cpu arch/tune arguments)
- if not d.getVar('OECMAKE_C_COMPILER'):
- cc_list = d.getVar('CC').split()
- if cc_list[0] == 'ccache':
- d.setVar('OECMAKE_C_COMPILER', '%s %s' % (cc_list[0], cc_list[1]))
- else:
- d.setVar('OECMAKE_C_COMPILER', cc_list[0])
-
- if not d.getVar('OECMAKE_CXX_COMPILER'):
- cxx_list = d.getVar('CXX').split()
- if cxx_list[0] == 'ccache':
- d.setVar('OECMAKE_CXX_COMPILER', '%s %s' % (cxx_list[0], cxx_list[1]))
- else:
- d.setVar('OECMAKE_CXX_COMPILER', cxx_list[0])
}
OECMAKE_AR ?= "${AR}"
@@ -45,33 +31,59 @@ OECMAKE_C_FLAGS_RELEASE ?= "-DNDEBUG"
OECMAKE_CXX_FLAGS_RELEASE ?= "-DNDEBUG"
OECMAKE_C_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CPPFLAGS} ${LDFLAGS}"
OECMAKE_CXX_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS} ${LDFLAGS}"
-CXXFLAGS += "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS}"
-CFLAGS += "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS}"
+
+def oecmake_map_compiler(compiler, d):
+ args = d.getVar(compiler).split()
+ if args[0] == "ccache":
+ return args[1], args[0]
+ return args[0], ""
+
+# C/C++ Compiler (without cpu arch/tune arguments)
+OECMAKE_C_COMPILER ?= "${@oecmake_map_compiler('CC', d)[0]}"
+OECMAKE_C_COMPILER_LAUNCHER ?= "${@oecmake_map_compiler('CC', d)[1]}"
+OECMAKE_CXX_COMPILER ?= "${@oecmake_map_compiler('CXX', d)[0]}"
+OECMAKE_CXX_COMPILER_LAUNCHER ?= "${@oecmake_map_compiler('CXX', d)[1]}"
+
+# clear compiler vars for allarch to avoid sig hash difference
+OECMAKE_C_COMPILER_allarch = ""
+OECMAKE_C_COMPILER_LAUNCHER_allarch = ""
+OECMAKE_CXX_COMPILER_allarch = ""
+OECMAKE_CXX_COMPILER_LAUNCHER_allarch = ""
OECMAKE_RPATH ?= ""
OECMAKE_PERLNATIVE_DIR ??= ""
OECMAKE_EXTRA_ROOT_PATH ?= ""
OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM = "ONLY"
-OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM_class-native = "BOTH"
+OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM:class-native = "BOTH"
-EXTRA_OECMAKE_append = " ${PACKAGECONFIG_CONFARGS}"
+EXTRA_OECMAKE:append = " ${PACKAGECONFIG_CONFARGS}"
-EXTRA_OECMAKE_BUILD_prepend_task-compile = "${PARALLEL_MAKE} "
-EXTRA_OECMAKE_BUILD_prepend_task-install = "${PARALLEL_MAKEINST} "
+export CMAKE_BUILD_PARALLEL_LEVEL
+CMAKE_BUILD_PARALLEL_LEVEL:task-compile = "${@oe.utils.parallel_make(d, False)}"
+CMAKE_BUILD_PARALLEL_LEVEL:task-install = "${@oe.utils.parallel_make(d, True)}"
OECMAKE_TARGET_COMPILE ?= "all"
OECMAKE_TARGET_INSTALL ?= "install"
+def map_host_os_to_system_name(host_os):
+ if host_os.startswith('mingw'):
+ return 'Windows'
+ if host_os.startswith('linux'):
+ return 'Linux'
+ return host_os
+
# CMake expects target architectures in the format of uname(2),
# which do not always match TARGET_ARCH, so all the necessary
# conversions should happen here.
-def map_target_arch_to_uname_arch(target_arch):
- if target_arch == "powerpc":
+def map_host_arch_to_uname_arch(host_arch):
+ if host_arch == "powerpc":
return "ppc"
- if target_arch == "powerpc64":
+ if host_arch == "powerpc64le":
+ return "ppc64le"
+ if host_arch == "powerpc64":
return "ppc64"
- return target_arch
+ return host_arch
cmake_do_generate_toolchain_file() {
if [ "${BUILD_SYS}" = "${HOST_SYS}" ]; then
@@ -81,12 +93,15 @@ cmake_do_generate_toolchain_file() {
# CMake system name must be something like "Linux".
# This is important for cross-compiling.
$cmake_crosscompiling
-set( CMAKE_SYSTEM_NAME `echo ${TARGET_OS} | sed -e 's/^./\u&/' -e 's/^\(Linux\).*/\1/'` )
-set( CMAKE_SYSTEM_PROCESSOR ${@map_target_arch_to_uname_arch(d.getVar('TARGET_ARCH'))} )
+set( CMAKE_SYSTEM_NAME ${@map_host_os_to_system_name(d.getVar('HOST_OS'))} )
+set( CMAKE_SYSTEM_PROCESSOR ${@map_host_arch_to_uname_arch(d.getVar('HOST_ARCH'))} )
set( CMAKE_C_COMPILER ${OECMAKE_C_COMPILER} )
set( CMAKE_CXX_COMPILER ${OECMAKE_CXX_COMPILER} )
+set( CMAKE_C_COMPILER_LAUNCHER ${OECMAKE_C_COMPILER_LAUNCHER} )
+set( CMAKE_CXX_COMPILER_LAUNCHER ${OECMAKE_CXX_COMPILER_LAUNCHER} )
set( CMAKE_ASM_COMPILER ${OECMAKE_C_COMPILER} )
-set( CMAKE_AR ${OECMAKE_AR} CACHE FILEPATH "Archiver" )
+find_program( CMAKE_AR ${OECMAKE_AR} DOC "Archiver" REQUIRED )
+
set( CMAKE_C_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "CFLAGS" )
set( CMAKE_CXX_FLAGS "${OECMAKE_CXX_FLAGS}" CACHE STRING "CXXFLAGS" )
set( CMAKE_ASM_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "ASM FLAGS" )
@@ -98,11 +113,12 @@ set( CMAKE_CXX_LINK_FLAGS "${OECMAKE_CXX_LINK_FLAGS}" CACHE STRING "LDFLAGS" )
# only search in the paths provided so cmake doesnt pick
# up libraries and tools from the native build machine
-set( CMAKE_FIND_ROOT_PATH ${STAGING_DIR_HOST} ${STAGING_DIR_NATIVE} ${CROSS_DIR} ${OECMAKE_PERLNATIVE_DIR} ${OECMAKE_EXTRA_ROOT_PATH} ${EXTERNAL_TOOLCHAIN})
+set( CMAKE_FIND_ROOT_PATH ${STAGING_DIR_HOST} ${STAGING_DIR_NATIVE} ${CROSS_DIR} ${OECMAKE_PERLNATIVE_DIR} ${OECMAKE_EXTRA_ROOT_PATH} ${EXTERNAL_TOOLCHAIN} ${HOSTTOOLS_DIR})
set( CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY )
set( CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ${OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM} )
set( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY )
set( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY )
+set( CMAKE_PROGRAM_PATH "/" )
# Use qt.conf settings
set( ENV{QT_CONF_PATH} ${WORKDIR}/qt.conf )
@@ -111,7 +127,10 @@ set( ENV{QT_CONF_PATH} ${WORKDIR}/qt.conf )
# directory as rpath by default
set( CMAKE_INSTALL_RPATH ${OECMAKE_RPATH} )
-# Use native cmake modules
+# Use RPATHs relative to build directory for reproducibility
+set( CMAKE_BUILD_RPATH_USE_ORIGIN ON )
+
+# Use our cmake modules
list(APPEND CMAKE_MODULE_PATH "${STAGING_DATADIR}/cmake/Modules/")
# add for non /usr/lib libdir, e.g. /usr/lib64
@@ -128,16 +147,14 @@ addtask generate_toolchain_file after do_patch before do_configure
CONFIGURE_FILES = "CMakeLists.txt"
+do_configure[cleandirs] = "${@d.getVar('B') if d.getVar('S') != d.getVar('B') else ''}"
+
cmake_do_configure() {
if [ "${OECMAKE_BUILDPATH}" ]; then
bbnote "cmake.bbclass no longer uses OECMAKE_BUILDPATH. The default behaviour is now out-of-tree builds with B=WORKDIR/build."
fi
- if [ "${S}" != "${B}" ]; then
- rm -rf ${B}
- mkdir -p ${B}
- cd ${B}
- else
+ if [ "${S}" = "${B}" ]; then
find ${B} -name CMakeFiles -or -name Makefile -or -name cmake_install.cmake -or -name CMakeCache.txt -delete
fi
@@ -153,26 +170,40 @@ cmake_do_configure() {
$oecmake_sitefile \
${OECMAKE_SOURCEPATH} \
-DCMAKE_INSTALL_PREFIX:PATH=${prefix} \
- -DCMAKE_INSTALL_BINDIR:PATH=${@os.path.relpath(d.getVar('bindir'), d.getVar('prefix'))} \
- -DCMAKE_INSTALL_SBINDIR:PATH=${@os.path.relpath(d.getVar('sbindir'), d.getVar('prefix'))} \
- -DCMAKE_INSTALL_LIBEXECDIR:PATH=${@os.path.relpath(d.getVar('libexecdir'), d.getVar('prefix'))} \
+ -DCMAKE_INSTALL_BINDIR:PATH=${@os.path.relpath(d.getVar('bindir'), d.getVar('prefix') + '/')} \
+ -DCMAKE_INSTALL_SBINDIR:PATH=${@os.path.relpath(d.getVar('sbindir'), d.getVar('prefix') + '/')} \
+ -DCMAKE_INSTALL_LIBEXECDIR:PATH=${@os.path.relpath(d.getVar('libexecdir'), d.getVar('prefix') + '/')} \
-DCMAKE_INSTALL_SYSCONFDIR:PATH=${sysconfdir} \
- -DCMAKE_INSTALL_SHAREDSTATEDIR:PATH=${@os.path.relpath(d.getVar('sharedstatedir'), d. getVar('prefix'))} \
+ -DCMAKE_INSTALL_SHAREDSTATEDIR:PATH=${@os.path.relpath(d.getVar('sharedstatedir'), d. getVar('prefix') + '/')} \
-DCMAKE_INSTALL_LOCALSTATEDIR:PATH=${localstatedir} \
- -DCMAKE_INSTALL_LIBDIR:PATH=${@os.path.relpath(d.getVar('libdir'), d.getVar('prefix'))} \
- -DCMAKE_INSTALL_INCLUDEDIR:PATH=${@os.path.relpath(d.getVar('includedir'), d.getVar('prefix'))} \
- -DCMAKE_INSTALL_DATAROOTDIR:PATH=${@os.path.relpath(d.getVar('datadir'), d.getVar('prefix'))} \
+ -DCMAKE_INSTALL_LIBDIR:PATH=${@os.path.relpath(d.getVar('libdir'), d.getVar('prefix') + '/')} \
+ -DCMAKE_INSTALL_INCLUDEDIR:PATH=${@os.path.relpath(d.getVar('includedir'), d.getVar('prefix') + '/')} \
+ -DCMAKE_INSTALL_DATAROOTDIR:PATH=${@os.path.relpath(d.getVar('datadir'), d.getVar('prefix') + '/')} \
+ -DPYTHON_EXECUTABLE:PATH=${PYTHON} \
+ -DPython_EXECUTABLE:PATH=${PYTHON} \
+ -DPython3_EXECUTABLE:PATH=${PYTHON} \
+ -DLIB_SUFFIX=${@d.getVar('baselib').replace('lib', '')} \
-DCMAKE_INSTALL_SO_NO_EXE=0 \
-DCMAKE_TOOLCHAIN_FILE=${WORKDIR}/toolchain.cmake \
- -DCMAKE_VERBOSE_MAKEFILE=1 \
-DCMAKE_NO_SYSTEM_FROM_IMPORTED=1 \
+ -DCMAKE_EXPORT_NO_PACKAGE_REGISTRY=ON \
+ -DFETCHCONTENT_FULLY_DISCONNECTED=ON \
${EXTRA_OECMAKE} \
-Wno-dev
}
+# To disable verbose cmake logs for a given recipe or globally config metadata e.g. local.conf
+# add following
+#
+# CMAKE_VERBOSE = ""
+#
+
+CMAKE_VERBOSE ??= "VERBOSE=1"
+
+# Then run do_compile again
cmake_runcmake_build() {
- bbnote ${DESTDIR:+DESTDIR=${DESTDIR} }VERBOSE=1 cmake --build '${B}' "$@" -- ${EXTRA_OECMAKE_BUILD}
- eval ${DESTDIR:+DESTDIR=${DESTDIR} }VERBOSE=1 cmake --build '${B}' "$@" -- ${EXTRA_OECMAKE_BUILD}
+ bbnote ${DESTDIR:+DESTDIR=${DESTDIR} }${CMAKE_VERBOSE} cmake --build '${B}' "$@" -- ${EXTRA_OECMAKE_BUILD}
+ eval ${DESTDIR:+DESTDIR=${DESTDIR} }${CMAKE_VERBOSE} cmake --build '${B}' "$@" -- ${EXTRA_OECMAKE_BUILD}
}
cmake_do_compile() {
diff --git a/meta/classes/cml1.bbclass b/meta/classes/cml1.bbclass
index 7f6df4011b..d319d66ab2 100644
--- a/meta/classes/cml1.bbclass
+++ b/meta/classes/cml1.bbclass
@@ -1,7 +1,17 @@
+# returns all the elements from the src uri that are .cfg files
+def find_cfgs(d):
+ sources=src_patches(d, True)
+ sources_list=[]
+ for s in sources:
+ if s.endswith('.cfg'):
+ sources_list.append(s)
+
+ return sources_list
+
cml1_do_configure() {
set -e
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
- oe_runmake oldconfig
+ yes '' | oe_runmake oldconfig
}
EXPORT_FUNCTIONS do_configure
@@ -17,22 +27,34 @@ CROSS_CURSES_INC = '-DCURSES_LOC="<curses.h>"'
TERMINFO = "${STAGING_DATADIR_NATIVE}/terminfo"
KCONFIG_CONFIG_COMMAND ??= "menuconfig"
+KCONFIG_CONFIG_ROOTDIR ??= "${B}"
python do_menuconfig() {
import shutil
+ config = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config")
+ configorig = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config.orig")
+
try:
- mtime = os.path.getmtime(".config")
- shutil.copy(".config", ".config.orig")
+ mtime = os.path.getmtime(config)
+ shutil.copy(config, configorig)
except OSError:
mtime = 0
- oe_terminal("${SHELL} -c \"make %s; if [ \\$? -ne 0 ]; then echo 'Command failed.'; printf 'Press any key to continue... '; read r; fi\"" % d.getVar('KCONFIG_CONFIG_COMMAND'),
+ # setup native pkg-config variables (kconfig scripts call pkg-config directly, cannot generically be overriden to pkg-config-native)
+ d.setVar("PKG_CONFIG_DIR", "${STAGING_DIR_NATIVE}${libdir_native}/pkgconfig")
+ d.setVar("PKG_CONFIG_PATH", "${PKG_CONFIG_DIR}:${STAGING_DATADIR_NATIVE}/pkgconfig")
+ d.setVar("PKG_CONFIG_LIBDIR", "${PKG_CONFIG_DIR}")
+ d.setVarFlag("PKG_CONFIG_SYSROOT_DIR", "unexport", "1")
+ # ensure that environment variables are overwritten with this tasks 'd' values
+ d.appendVar("OE_TERMINAL_EXPORTS", " PKG_CONFIG_DIR PKG_CONFIG_PATH PKG_CONFIG_LIBDIR PKG_CONFIG_SYSROOT_DIR")
+
+ oe_terminal("sh -c \"make %s; if [ \\$? -ne 0 ]; then echo 'Command failed.'; printf 'Press any key to continue... '; read r; fi\"" % d.getVar('KCONFIG_CONFIG_COMMAND'),
d.getVar('PN') + ' Configuration', d)
# FIXME this check can be removed when the minimum bitbake version has been bumped
if hasattr(bb.build, 'write_taint'):
try:
- newmtime = os.path.getmtime(".config")
+ newmtime = os.path.getmtime(config)
except OSError:
newmtime = 0
@@ -42,7 +64,7 @@ python do_menuconfig() {
}
do_menuconfig[depends] += "ncurses-native:do_populate_sysroot"
do_menuconfig[nostamp] = "1"
-do_menuconfig[dirs] = "${B}"
+do_menuconfig[dirs] = "${KCONFIG_CONFIG_ROOTDIR}"
addtask menuconfig after do_configure
python do_diffconfig() {
@@ -51,8 +73,8 @@ python do_diffconfig() {
workdir = d.getVar('WORKDIR')
fragment = workdir + '/fragment.cfg'
- configorig = '.config.orig'
- config = '.config'
+ configorig = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config.orig")
+ config = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config")
try:
md5newconfig = bb.utils.md5_file(configorig)
@@ -75,5 +97,5 @@ python do_diffconfig() {
}
do_diffconfig[nostamp] = "1"
-do_diffconfig[dirs] = "${B}"
+do_diffconfig[dirs] = "${KCONFIG_CONFIG_ROOTDIR}"
addtask diffconfig
diff --git a/meta/classes/compress_doc.bbclass b/meta/classes/compress_doc.bbclass
index d6d11fad26..379b6c169e 100644
--- a/meta/classes/compress_doc.bbclass
+++ b/meta/classes/compress_doc.bbclass
@@ -8,7 +8,7 @@
#
# 3. It is easy to add a new type compression by editing
# local.conf, such as:
-# DOC_COMPRESS_LIST_append = ' abc'
+# DOC_COMPRESS_LIST:append = ' abc'
# DOC_COMPRESS = 'abc'
# DOC_COMPRESS_CMD[abc] = 'abc compress cmd ***'
# DOC_DECOMPRESS_CMD[abc] = 'abc decompress cmd ***'
@@ -225,7 +225,7 @@ python compress_doc_updatealternatives () {
infodir = d.getVar("infodir")
compress_mode = d.getVar('DOC_COMPRESS')
for pkg in (d.getVar('PACKAGES') or "").split():
- old_names = (d.getVar('ALTERNATIVE_%s' % pkg) or "").split()
+ old_names = (d.getVar('ALTERNATIVE:%s' % pkg) or "").split()
new_names = []
for old_name in old_names:
old_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', old_name)
@@ -258,6 +258,6 @@ python compress_doc_updatealternatives () {
new_names.append(new_name)
if new_names:
- d.setVar('ALTERNATIVE_%s' % pkg, ' '.join(new_names))
+ d.setVar('ALTERNATIVE:%s' % pkg, ' '.join(new_names))
}
diff --git a/meta/classes/core-image.bbclass b/meta/classes/core-image.bbclass
index a9a2cec68f..84fd3eeb38 100644
--- a/meta/classes/core-image.bbclass
+++ b/meta/classes/core-image.bbclass
@@ -9,6 +9,7 @@
#
# Available IMAGE_FEATURES:
#
+# - weston - Weston Wayland compositor
# - x11 - X server
# - x11-base - X server with minimal environment
# - x11-sato - OpenedHand Sato environment
@@ -26,14 +27,20 @@
# - debug-tweaks - makes an image suitable for development, e.g. allowing passwordless root logins
# - empty-root-password
# - allow-empty-password
+# - allow-root-login
# - post-install-logging
# - dev-pkgs - development packages (headers, etc.) for all installed packages in the rootfs
# - dbg-pkgs - debug symbol packages for all installed packages in the rootfs
+# - lic-pkgs - license packages for all installed pacakges in the rootfs, requires
+# LICENSE_CREATE_PACKAGE="1" to be set when building packages too
# - doc-pkgs - documentation packages for all installed packages in the rootfs
+# - bash-completion-pkgs - bash-completion packages for recipes using bash-completion bbclass
# - ptest-pkgs - ptest packages for all ptest-enabled recipes
# - read-only-rootfs - tweaks an image to support read-only rootfs
+# - stateless-rootfs - systemctl-native not run, image populated by systemd at runtime
# - splash - bootup splash screen
#
+FEATURE_PACKAGES_weston = "packagegroup-core-weston"
FEATURE_PACKAGES_x11 = "packagegroup-core-x11"
FEATURE_PACKAGES_x11-base = "packagegroup-core-x11-base"
FEATURE_PACKAGES_x11-sato = "packagegroup-core-x11-sato"
diff --git a/meta/classes/cpan-base.bbclass b/meta/classes/cpan-base.bbclass
index 867edf8707..93d11e1bee 100644
--- a/meta/classes/cpan-base.bbclass
+++ b/meta/classes/cpan-base.bbclass
@@ -2,10 +2,10 @@
# cpan-base providers various perl related information needed for building
# cpan modules
#
-FILES_${PN} += "${libdir}/perl5 ${datadir}/perl5"
+FILES:${PN} += "${libdir}/perl5 ${datadir}/perl5"
DEPENDS += "${@["perl", "perl-native"][(bb.data.inherits_class('native', d))]}"
-RDEPENDS_${PN} += "${@["perl", ""][(bb.data.inherits_class('native', d))]}"
+RDEPENDS:${PN} += "${@["perl", ""][(bb.data.inherits_class('native', d))]}"
inherit perl-version
@@ -15,4 +15,13 @@ def is_target(d):
return "no"
PERLLIBDIRS = "${libdir}/perl5"
-PERLLIBDIRS_class-native = "${libdir}/perl5"
+PERLLIBDIRS:class-native = "${libdir}/perl5"
+
+def cpan_upstream_check_pattern(d):
+ for x in (d.getVar('SRC_URI') or '').split(' '):
+ if x.startswith("https://cpan.metacpan.org"):
+ _pattern = x.split('/')[-1].replace(d.getVar('PV'), r'(?P<pver>\d+.\d+)')
+ return _pattern
+ return ''
+
+UPSTREAM_CHECK_REGEX ?= "${@cpan_upstream_check_pattern(d)}"
diff --git a/meta/classes/cpan.bbclass b/meta/classes/cpan.bbclass
index e9908ae4b8..18f1b9d575 100644
--- a/meta/classes/cpan.bbclass
+++ b/meta/classes/cpan.bbclass
@@ -41,12 +41,12 @@ cpan_do_configure () {
fi
}
-do_configure_append_class-target() {
+do_configure:append:class-target() {
find . -name Makefile | xargs sed -E -i \
-e 's:LD_RUN_PATH ?= ?"?[^"]*"?::g'
}
-do_configure_append_class-nativesdk() {
+do_configure:append:class-nativesdk() {
find . -name Makefile | xargs sed -E -i \
-e 's:LD_RUN_PATH ?= ?"?[^"]*"?::g'
}
diff --git a/meta/classes/create-spdx.bbclass b/meta/classes/create-spdx.bbclass
new file mode 100644
index 0000000000..1a4804a7c5
--- /dev/null
+++ b/meta/classes/create-spdx.bbclass
@@ -0,0 +1,1022 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+DEPLOY_DIR_SPDX ??= "${DEPLOY_DIR}/spdx/${MACHINE}"
+
+# The product name that the CVE database uses. Defaults to BPN, but may need to
+# be overriden per recipe (for example tiff.bb sets CVE_PRODUCT=libtiff).
+CVE_PRODUCT ??= "${BPN}"
+CVE_VERSION ??= "${PV}"
+
+SPDXDIR ??= "${WORKDIR}/spdx"
+SPDXDEPLOY = "${SPDXDIR}/deploy"
+SPDXWORK = "${SPDXDIR}/work"
+
+SPDX_TOOL_NAME ??= "oe-spdx-creator"
+SPDX_TOOL_VERSION ??= "1.0"
+
+SPDXRUNTIMEDEPLOY = "${SPDXDIR}/runtime-deploy"
+
+SPDX_INCLUDE_SOURCES ??= "0"
+SPDX_INCLUDE_PACKAGED ??= "0"
+SPDX_ARCHIVE_SOURCES ??= "0"
+SPDX_ARCHIVE_PACKAGED ??= "0"
+
+SPDX_UUID_NAMESPACE ??= "sbom.openembedded.org"
+SPDX_NAMESPACE_PREFIX ??= "http://spdx.org/spdxdoc"
+
+SPDX_LICENSES ??= "${COREBASE}/meta/files/spdx-licenses.json"
+
+SPDX_ORG ??= "OpenEmbedded ()"
+SPDX_SUPPLIER ??= "Organization: ${SPDX_ORG}"
+SPDX_SUPPLIER[doc] = "The SPDX PackageSupplier field for SPDX packages created from \
+ this recipe. For SPDX documents create using this class during the build, this \
+ is the contact information for the person or organization who is doing the \
+ build."
+
+do_image_complete[depends] = "virtual/kernel:do_create_spdx"
+
+def extract_licenses(filename):
+ import re
+
+ lic_regex = re.compile(b'^\W*SPDX-License-Identifier:\s*([ \w\d.()+-]+?)(?:\s+\W*)?$', re.MULTILINE)
+
+ try:
+ with open(filename, 'rb') as f:
+ size = min(15000, os.stat(filename).st_size)
+ txt = f.read(size)
+ licenses = re.findall(lic_regex, txt)
+ if licenses:
+ ascii_licenses = [lic.decode('ascii') for lic in licenses]
+ return ascii_licenses
+ except Exception as e:
+ bb.warn(f"Exception reading {filename}: {e}")
+ return None
+
+def get_doc_namespace(d, doc):
+ import uuid
+ namespace_uuid = uuid.uuid5(uuid.NAMESPACE_DNS, d.getVar("SPDX_UUID_NAMESPACE"))
+ return "%s/%s-%s" % (d.getVar("SPDX_NAMESPACE_PREFIX"), doc.name, str(uuid.uuid5(namespace_uuid, doc.name)))
+
+def create_annotation(d, comment):
+ from datetime import datetime, timezone
+
+ creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
+ annotation = oe.spdx.SPDXAnnotation()
+ annotation.annotationDate = creation_time
+ annotation.annotationType = "OTHER"
+ annotation.annotator = "Tool: %s - %s" % (d.getVar("SPDX_TOOL_NAME"), d.getVar("SPDX_TOOL_VERSION"))
+ annotation.comment = comment
+ return annotation
+
+def recipe_spdx_is_native(d, recipe):
+ return any(a.annotationType == "OTHER" and
+ a.annotator == "Tool: %s - %s" % (d.getVar("SPDX_TOOL_NAME"), d.getVar("SPDX_TOOL_VERSION")) and
+ a.comment == "isNative" for a in recipe.annotations)
+
+def is_work_shared_spdx(d):
+ return bb.data.inherits_class('kernel', d) or ('work-shared' in d.getVar('WORKDIR'))
+
+python() {
+ import json
+ if d.getVar("SPDX_LICENSE_DATA"):
+ return
+
+ with open(d.getVar("SPDX_LICENSES"), "r") as f:
+ data = json.load(f)
+ # Transform the license array to a dictionary
+ data["licenses"] = {l["licenseId"]: l for l in data["licenses"]}
+ d.setVar("SPDX_LICENSE_DATA", data)
+}
+
+def convert_license_to_spdx(lic, document, d, existing={}):
+ from pathlib import Path
+ import oe.spdx
+
+ avail_licenses = available_licenses(d)
+ license_data = d.getVar("SPDX_LICENSE_DATA")
+ extracted = {}
+
+ def add_extracted_license(ident, name):
+ nonlocal document
+
+ if name in extracted:
+ return
+
+ extracted_info = oe.spdx.SPDXExtractedLicensingInfo()
+ extracted_info.name = name
+ extracted_info.licenseId = ident
+ extracted_info.extractedText = None
+
+ if name == "PD":
+ # Special-case this.
+ extracted_info.extractedText = "Software released to the public domain"
+ elif name in avail_licenses:
+ # This license can be found in COMMON_LICENSE_DIR or LICENSE_PATH
+ for directory in [d.getVar('COMMON_LICENSE_DIR')] + (d.getVar('LICENSE_PATH') or '').split():
+ try:
+ with (Path(directory) / name).open(errors="replace") as f:
+ extracted_info.extractedText = f.read()
+ break
+ except FileNotFoundError:
+ pass
+ if extracted_info.extractedText is None:
+ # Error out, as the license was in avail_licenses so should
+ # be on disk somewhere.
+ bb.error("Cannot find text for license %s" % name)
+ else:
+ # If it's not SPDX, or PD, or in avail_licenses, then NO_GENERIC_LICENSE must be set
+ filename = d.getVarFlag('NO_GENERIC_LICENSE', name)
+ if filename:
+ filename = d.expand("${S}/" + filename)
+ with open(filename, errors="replace") as f:
+ extracted_info.extractedText = f.read()
+ else:
+ bb.error("Cannot find any text for license %s" % name)
+
+ extracted[name] = extracted_info
+ document.hasExtractedLicensingInfos.append(extracted_info)
+
+ def convert(l):
+ if l == "(" or l == ")":
+ return l
+
+ if l == "&":
+ return "AND"
+
+ if l == "|":
+ return "OR"
+
+ if l == "CLOSED":
+ return "NONE"
+
+ spdx_license = d.getVarFlag("SPDXLICENSEMAP", l) or l
+ if spdx_license in license_data["licenses"]:
+ return spdx_license
+
+ try:
+ spdx_license = existing[l]
+ except KeyError:
+ spdx_license = "LicenseRef-" + l
+ add_extracted_license(spdx_license, l)
+
+ return spdx_license
+
+ lic_split = lic.replace("(", " ( ").replace(")", " ) ").split()
+
+ return ' '.join(convert(l) for l in lic_split)
+
+def process_sources(d):
+ pn = d.getVar('PN')
+ assume_provided = (d.getVar("ASSUME_PROVIDED") or "").split()
+ if pn in assume_provided:
+ for p in d.getVar("PROVIDES").split():
+ if p != pn:
+ pn = p
+ break
+
+ # glibc-locale: do_fetch, do_unpack and do_patch tasks have been deleted,
+ # so avoid archiving source here.
+ if pn.startswith('glibc-locale'):
+ return False
+ if d.getVar('PN') == "libtool-cross":
+ return False
+ if d.getVar('PN') == "libgcc-initial":
+ return False
+ if d.getVar('PN') == "shadow-sysroot":
+ return False
+
+ # We just archive gcc-source for all the gcc related recipes
+ if d.getVar('BPN') in ['gcc', 'libgcc']:
+ bb.debug(1, 'spdx: There is bug in scan of %s is, do nothing' % pn)
+ return False
+
+ return True
+
+
+def add_package_files(d, doc, spdx_pkg, topdir, get_spdxid, get_types, *, archive=None, ignore_dirs=[], ignore_top_level_dirs=[]):
+ from pathlib import Path
+ import oe.spdx
+ import hashlib
+
+ source_date_epoch = d.getVar("SOURCE_DATE_EPOCH")
+ if source_date_epoch:
+ source_date_epoch = int(source_date_epoch)
+
+ sha1s = []
+ spdx_files = []
+
+ file_counter = 1
+ for subdir, dirs, files in os.walk(topdir):
+ dirs[:] = [d for d in dirs if d not in ignore_dirs]
+ if subdir == str(topdir):
+ dirs[:] = [d for d in dirs if d not in ignore_top_level_dirs]
+
+ for file in files:
+ filepath = Path(subdir) / file
+ filename = str(filepath.relative_to(topdir))
+
+ if filepath.is_file() and not filepath.is_symlink():
+ spdx_file = oe.spdx.SPDXFile()
+ spdx_file.SPDXID = get_spdxid(file_counter)
+ for t in get_types(filepath):
+ spdx_file.fileTypes.append(t)
+ spdx_file.fileName = filename
+
+ if archive is not None:
+ with filepath.open("rb") as f:
+ info = archive.gettarinfo(fileobj=f)
+ info.name = filename
+ info.uid = 0
+ info.gid = 0
+ info.uname = "root"
+ info.gname = "root"
+
+ if source_date_epoch is not None and info.mtime > source_date_epoch:
+ info.mtime = source_date_epoch
+
+ archive.addfile(info, f)
+
+ sha1 = bb.utils.sha1_file(filepath)
+ sha1s.append(sha1)
+ spdx_file.checksums.append(oe.spdx.SPDXChecksum(
+ algorithm="SHA1",
+ checksumValue=sha1,
+ ))
+ spdx_file.checksums.append(oe.spdx.SPDXChecksum(
+ algorithm="SHA256",
+ checksumValue=bb.utils.sha256_file(filepath),
+ ))
+
+ if "SOURCE" in spdx_file.fileTypes:
+ extracted_lics = extract_licenses(filepath)
+ if extracted_lics:
+ spdx_file.licenseInfoInFiles = extracted_lics
+
+ doc.files.append(spdx_file)
+ doc.add_relationship(spdx_pkg, "CONTAINS", spdx_file)
+ spdx_pkg.hasFiles.append(spdx_file.SPDXID)
+
+ spdx_files.append(spdx_file)
+
+ file_counter += 1
+
+ sha1s.sort()
+ verifier = hashlib.sha1()
+ for v in sha1s:
+ verifier.update(v.encode("utf-8"))
+ spdx_pkg.packageVerificationCode.packageVerificationCodeValue = verifier.hexdigest()
+
+ return spdx_files
+
+
+def add_package_sources_from_debug(d, package_doc, spdx_package, package, package_files, sources):
+ from pathlib import Path
+ import hashlib
+ import oe.packagedata
+ import oe.spdx
+
+ debug_search_paths = [
+ Path(d.getVar('PKGD')),
+ Path(d.getVar('STAGING_DIR_TARGET')),
+ Path(d.getVar('STAGING_DIR_NATIVE')),
+ Path(d.getVar('STAGING_KERNEL_DIR')),
+ ]
+
+ pkg_data = oe.packagedata.read_subpkgdata_extended(package, d)
+
+ if pkg_data is None:
+ return
+
+ for file_path, file_data in pkg_data["files_info"].items():
+ if not "debugsrc" in file_data:
+ continue
+
+ for pkg_file in package_files:
+ if file_path.lstrip("/") == pkg_file.fileName.lstrip("/"):
+ break
+ else:
+ bb.fatal("No package file found for %s" % str(file_path))
+ continue
+
+ for debugsrc in file_data["debugsrc"]:
+ ref_id = "NOASSERTION"
+ for search in debug_search_paths:
+ if debugsrc.startswith("/usr/src/kernel"):
+ debugsrc_path = search / debugsrc.replace('/usr/src/kernel/', '')
+ else:
+ debugsrc_path = search / debugsrc.lstrip("/")
+ if not debugsrc_path.exists():
+ continue
+
+ file_sha256 = bb.utils.sha256_file(debugsrc_path)
+
+ if file_sha256 in sources:
+ source_file = sources[file_sha256]
+
+ doc_ref = package_doc.find_external_document_ref(source_file.doc.documentNamespace)
+ if doc_ref is None:
+ doc_ref = oe.spdx.SPDXExternalDocumentRef()
+ doc_ref.externalDocumentId = "DocumentRef-dependency-" + source_file.doc.name
+ doc_ref.spdxDocument = source_file.doc.documentNamespace
+ doc_ref.checksum.algorithm = "SHA1"
+ doc_ref.checksum.checksumValue = source_file.doc_sha1
+ package_doc.externalDocumentRefs.append(doc_ref)
+
+ ref_id = "%s:%s" % (doc_ref.externalDocumentId, source_file.file.SPDXID)
+ else:
+ bb.debug(1, "Debug source %s with SHA256 %s not found in any dependency" % (str(debugsrc_path), file_sha256))
+ break
+ else:
+ bb.debug(1, "Debug source %s not found" % debugsrc)
+
+ package_doc.add_relationship(pkg_file, "GENERATED_FROM", ref_id, comment=debugsrc)
+
+def collect_dep_recipes(d, doc, spdx_recipe):
+ from pathlib import Path
+ import oe.sbom
+ import oe.spdx
+
+ deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
+
+ dep_recipes = []
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ deps = sorted(set(
+ dep[0] for dep in taskdepdata.values() if
+ dep[1] == "do_create_spdx" and dep[0] != d.getVar("PN")
+ ))
+ for dep_pn in deps:
+ dep_recipe_path = deploy_dir_spdx / "recipes" / ("recipe-%s.spdx.json" % dep_pn)
+
+ spdx_dep_doc, spdx_dep_sha1 = oe.sbom.read_doc(dep_recipe_path)
+
+ for pkg in spdx_dep_doc.packages:
+ if pkg.name == dep_pn:
+ spdx_dep_recipe = pkg
+ break
+ else:
+ continue
+
+ dep_recipes.append(oe.sbom.DepRecipe(spdx_dep_doc, spdx_dep_sha1, spdx_dep_recipe))
+
+ dep_recipe_ref = oe.spdx.SPDXExternalDocumentRef()
+ dep_recipe_ref.externalDocumentId = "DocumentRef-dependency-" + spdx_dep_doc.name
+ dep_recipe_ref.spdxDocument = spdx_dep_doc.documentNamespace
+ dep_recipe_ref.checksum.algorithm = "SHA1"
+ dep_recipe_ref.checksum.checksumValue = spdx_dep_sha1
+
+ doc.externalDocumentRefs.append(dep_recipe_ref)
+
+ doc.add_relationship(
+ "%s:%s" % (dep_recipe_ref.externalDocumentId, spdx_dep_recipe.SPDXID),
+ "BUILD_DEPENDENCY_OF",
+ spdx_recipe
+ )
+
+ return dep_recipes
+
+collect_dep_recipes[vardepsexclude] += "BB_TASKDEPDATA"
+
+
+def collect_dep_sources(d, dep_recipes):
+ import oe.sbom
+
+ sources = {}
+ for dep in dep_recipes:
+ # Don't collect sources from native recipes as they
+ # match non-native sources also.
+ if recipe_spdx_is_native(d, dep.recipe):
+ continue
+ recipe_files = set(dep.recipe.hasFiles)
+
+ for spdx_file in dep.doc.files:
+ if spdx_file.SPDXID not in recipe_files:
+ continue
+
+ if "SOURCE" in spdx_file.fileTypes:
+ for checksum in spdx_file.checksums:
+ if checksum.algorithm == "SHA256":
+ sources[checksum.checksumValue] = oe.sbom.DepSource(dep.doc, dep.doc_sha1, dep.recipe, spdx_file)
+ break
+
+ return sources
+
+
+python do_create_spdx() {
+ from datetime import datetime, timezone
+ import oe.sbom
+ import oe.spdx
+ import uuid
+ from pathlib import Path
+ from contextlib import contextmanager
+ import oe.cve_check
+
+ @contextmanager
+ def optional_tarfile(name, guard, mode="w"):
+ import tarfile
+ import bb.compress.zstd
+
+ num_threads = int(d.getVar("BB_NUMBER_THREADS"))
+
+ if guard:
+ name.parent.mkdir(parents=True, exist_ok=True)
+ with bb.compress.zstd.open(name, mode=mode + "b", num_threads=num_threads) as f:
+ with tarfile.open(fileobj=f, mode=mode + "|") as tf:
+ yield tf
+ else:
+ yield None
+
+
+ deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
+ spdx_workdir = Path(d.getVar("SPDXWORK"))
+ include_packaged = d.getVar("SPDX_INCLUDE_PACKAGED") == "1"
+ include_sources = d.getVar("SPDX_INCLUDE_SOURCES") == "1"
+ archive_sources = d.getVar("SPDX_ARCHIVE_SOURCES") == "1"
+ archive_packaged = d.getVar("SPDX_ARCHIVE_PACKAGED") == "1"
+
+ creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
+
+ doc = oe.spdx.SPDXDocument()
+
+ doc.name = "recipe-" + d.getVar("PN")
+ doc.documentNamespace = get_doc_namespace(d, doc)
+ doc.creationInfo.created = creation_time
+ doc.creationInfo.comment = "This document was created by analyzing recipe files during the build."
+ doc.creationInfo.licenseListVersion = d.getVar("SPDX_LICENSE_DATA")["licenseListVersion"]
+ doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass")
+ doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG"))
+ doc.creationInfo.creators.append("Person: N/A ()")
+
+ recipe = oe.spdx.SPDXPackage()
+ recipe.name = d.getVar("PN")
+ recipe.versionInfo = d.getVar("PV")
+ recipe.SPDXID = oe.sbom.get_recipe_spdxid(d)
+ recipe.packageSupplier = d.getVar("SPDX_SUPPLIER")
+ if bb.data.inherits_class("native", d) or bb.data.inherits_class("cross", d):
+ recipe.annotations.append(create_annotation(d, "isNative"))
+
+ for s in d.getVar('SRC_URI').split():
+ if not s.startswith("file://"):
+ recipe.downloadLocation = s
+ break
+ else:
+ recipe.downloadLocation = "NOASSERTION"
+
+ homepage = d.getVar("HOMEPAGE")
+ if homepage:
+ recipe.homepage = homepage
+
+ license = d.getVar("LICENSE")
+ if license:
+ recipe.licenseDeclared = convert_license_to_spdx(license, doc, d)
+
+ summary = d.getVar("SUMMARY")
+ if summary:
+ recipe.summary = summary
+
+ description = d.getVar("DESCRIPTION")
+ if description:
+ recipe.description = description
+
+ # Some CVEs may be patched during the build process without incrementing the version number,
+ # so querying for CVEs based on the CPE id can lead to false positives. To account for this,
+ # save the CVEs fixed by patches to source information field in the SPDX.
+ patched_cves = oe.cve_check.get_patched_cves(d)
+ patched_cves = list(patched_cves)
+ patched_cves = ' '.join(patched_cves)
+ if patched_cves:
+ recipe.sourceInfo = "CVEs fixed: " + patched_cves
+
+ cpe_ids = oe.cve_check.get_cpe_ids(d.getVar("CVE_PRODUCT"), d.getVar("CVE_VERSION"))
+ if cpe_ids:
+ for cpe_id in cpe_ids:
+ cpe = oe.spdx.SPDXExternalReference()
+ cpe.referenceCategory = "SECURITY"
+ cpe.referenceType = "http://spdx.org/rdf/references/cpe23Type"
+ cpe.referenceLocator = cpe_id
+ recipe.externalRefs.append(cpe)
+
+ doc.packages.append(recipe)
+ doc.add_relationship(doc, "DESCRIBES", recipe)
+
+ if process_sources(d) and include_sources:
+ recipe_archive = deploy_dir_spdx / "recipes" / (doc.name + ".tar.zst")
+ with optional_tarfile(recipe_archive, archive_sources) as archive:
+ spdx_get_src(d)
+
+ add_package_files(
+ d,
+ doc,
+ recipe,
+ spdx_workdir,
+ lambda file_counter: "SPDXRef-SourceFile-%s-%d" % (d.getVar("PN"), file_counter),
+ lambda filepath: ["SOURCE"],
+ ignore_dirs=[".git"],
+ ignore_top_level_dirs=["temp"],
+ archive=archive,
+ )
+
+ if archive is not None:
+ recipe.packageFileName = str(recipe_archive.name)
+
+ dep_recipes = collect_dep_recipes(d, doc, recipe)
+
+ doc_sha1 = oe.sbom.write_doc(d, doc, "recipes")
+ dep_recipes.append(oe.sbom.DepRecipe(doc, doc_sha1, recipe))
+
+ recipe_ref = oe.spdx.SPDXExternalDocumentRef()
+ recipe_ref.externalDocumentId = "DocumentRef-recipe-" + recipe.name
+ recipe_ref.spdxDocument = doc.documentNamespace
+ recipe_ref.checksum.algorithm = "SHA1"
+ recipe_ref.checksum.checksumValue = doc_sha1
+
+ sources = collect_dep_sources(d, dep_recipes)
+ found_licenses = {license.name:recipe_ref.externalDocumentId + ":" + license.licenseId for license in doc.hasExtractedLicensingInfos}
+
+ if not recipe_spdx_is_native(d, recipe):
+ bb.build.exec_func("read_subpackage_metadata", d)
+
+ pkgdest = Path(d.getVar("PKGDEST"))
+ for package in d.getVar("PACKAGES").split():
+ if not oe.packagedata.packaged(package, d):
+ continue
+
+ package_doc = oe.spdx.SPDXDocument()
+ pkg_name = d.getVar("PKG:%s" % package) or package
+ package_doc.name = pkg_name
+ package_doc.documentNamespace = get_doc_namespace(d, package_doc)
+ package_doc.creationInfo.created = creation_time
+ package_doc.creationInfo.comment = "This document was created by analyzing packages created during the build."
+ package_doc.creationInfo.licenseListVersion = d.getVar("SPDX_LICENSE_DATA")["licenseListVersion"]
+ package_doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass")
+ package_doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG"))
+ package_doc.creationInfo.creators.append("Person: N/A ()")
+ package_doc.externalDocumentRefs.append(recipe_ref)
+
+ package_license = d.getVar("LICENSE:%s" % package) or d.getVar("LICENSE")
+
+ spdx_package = oe.spdx.SPDXPackage()
+
+ spdx_package.SPDXID = oe.sbom.get_package_spdxid(pkg_name)
+ spdx_package.name = pkg_name
+ spdx_package.versionInfo = d.getVar("PV")
+ spdx_package.licenseDeclared = convert_license_to_spdx(package_license, package_doc, d, found_licenses)
+ spdx_package.packageSupplier = d.getVar("SPDX_SUPPLIER")
+
+ package_doc.packages.append(spdx_package)
+
+ package_doc.add_relationship(spdx_package, "GENERATED_FROM", "%s:%s" % (recipe_ref.externalDocumentId, recipe.SPDXID))
+ package_doc.add_relationship(package_doc, "DESCRIBES", spdx_package)
+
+ package_archive = deploy_dir_spdx / "packages" / (package_doc.name + ".tar.zst")
+ with optional_tarfile(package_archive, archive_packaged) as archive:
+ package_files = add_package_files(
+ d,
+ package_doc,
+ spdx_package,
+ pkgdest / package,
+ lambda file_counter: oe.sbom.get_packaged_file_spdxid(pkg_name, file_counter),
+ lambda filepath: ["BINARY"],
+ archive=archive,
+ )
+
+ if archive is not None:
+ spdx_package.packageFileName = str(package_archive.name)
+
+ add_package_sources_from_debug(d, package_doc, spdx_package, package, package_files, sources)
+
+ oe.sbom.write_doc(d, package_doc, "packages")
+}
+# NOTE: depending on do_unpack is a hack that is necessary to get it's dependencies for archive the source
+addtask do_create_spdx after do_package do_packagedata do_unpack before do_populate_sdk do_build do_rm_work
+
+SSTATETASKS += "do_create_spdx"
+do_create_spdx[sstate-inputdirs] = "${SPDXDEPLOY}"
+do_create_spdx[sstate-outputdirs] = "${DEPLOY_DIR_SPDX}"
+
+python do_create_spdx_setscene () {
+ sstate_setscene(d)
+}
+addtask do_create_spdx_setscene
+
+do_create_spdx[dirs] = "${SPDXWORK}"
+do_create_spdx[cleandirs] = "${SPDXDEPLOY} ${SPDXWORK}"
+do_create_spdx[depends] += "${PATCHDEPENDENCY}"
+do_create_spdx[deptask] = "do_create_spdx"
+
+def collect_package_providers(d):
+ from pathlib import Path
+ import oe.sbom
+ import oe.spdx
+ import json
+
+ deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
+
+ providers = {}
+
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ deps = sorted(set(
+ dep[0] for dep in taskdepdata.values() if dep[0] != d.getVar("PN")
+ ))
+ deps.append(d.getVar("PN"))
+
+ for dep_pn in deps:
+ recipe_data = oe.packagedata.read_pkgdata(dep_pn, d)
+
+ for pkg in recipe_data.get("PACKAGES", "").split():
+
+ pkg_data = oe.packagedata.read_subpkgdata_dict(pkg, d)
+ rprovides = set(n for n, _ in bb.utils.explode_dep_versions2(pkg_data.get("RPROVIDES", "")).items())
+ rprovides.add(pkg)
+
+ for r in rprovides:
+ providers[r] = pkg
+
+ return providers
+
+collect_package_providers[vardepsexclude] += "BB_TASKDEPDATA"
+
+python do_create_runtime_spdx() {
+ from datetime import datetime, timezone
+ import oe.sbom
+ import oe.spdx
+ import oe.packagedata
+ from pathlib import Path
+
+ deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
+ spdx_deploy = Path(d.getVar("SPDXRUNTIMEDEPLOY"))
+ is_native = bb.data.inherits_class("native", d) or bb.data.inherits_class("cross", d)
+
+ creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
+
+ providers = collect_package_providers(d)
+
+ if not is_native:
+ bb.build.exec_func("read_subpackage_metadata", d)
+
+ dep_package_cache = {}
+
+ pkgdest = Path(d.getVar("PKGDEST"))
+ for package in d.getVar("PACKAGES").split():
+ localdata = bb.data.createCopy(d)
+ pkg_name = d.getVar("PKG:%s" % package) or package
+ localdata.setVar("PKG", pkg_name)
+ localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + package)
+
+ if not oe.packagedata.packaged(package, localdata):
+ continue
+
+ pkg_spdx_path = deploy_dir_spdx / "packages" / (pkg_name + ".spdx.json")
+
+ package_doc, package_doc_sha1 = oe.sbom.read_doc(pkg_spdx_path)
+
+ for p in package_doc.packages:
+ if p.name == pkg_name:
+ spdx_package = p
+ break
+ else:
+ bb.fatal("Package '%s' not found in %s" % (pkg_name, pkg_spdx_path))
+
+ runtime_doc = oe.spdx.SPDXDocument()
+ runtime_doc.name = "runtime-" + pkg_name
+ runtime_doc.documentNamespace = get_doc_namespace(localdata, runtime_doc)
+ runtime_doc.creationInfo.created = creation_time
+ runtime_doc.creationInfo.comment = "This document was created by analyzing package runtime dependencies."
+ runtime_doc.creationInfo.licenseListVersion = d.getVar("SPDX_LICENSE_DATA")["licenseListVersion"]
+ runtime_doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass")
+ runtime_doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG"))
+ runtime_doc.creationInfo.creators.append("Person: N/A ()")
+
+ package_ref = oe.spdx.SPDXExternalDocumentRef()
+ package_ref.externalDocumentId = "DocumentRef-package-" + package
+ package_ref.spdxDocument = package_doc.documentNamespace
+ package_ref.checksum.algorithm = "SHA1"
+ package_ref.checksum.checksumValue = package_doc_sha1
+
+ runtime_doc.externalDocumentRefs.append(package_ref)
+
+ runtime_doc.add_relationship(
+ runtime_doc.SPDXID,
+ "AMENDS",
+ "%s:%s" % (package_ref.externalDocumentId, package_doc.SPDXID)
+ )
+
+ deps = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS") or "")
+ seen_deps = set()
+ for dep, _ in deps.items():
+ if dep in seen_deps:
+ continue
+
+ if dep not in providers:
+ continue
+
+ dep = providers[dep]
+
+ if not oe.packagedata.packaged(dep, localdata):
+ continue
+
+ dep_pkg_data = oe.packagedata.read_subpkgdata_dict(dep, d)
+ dep_pkg = dep_pkg_data["PKG"]
+
+ if dep in dep_package_cache:
+ (dep_spdx_package, dep_package_ref) = dep_package_cache[dep]
+ else:
+ dep_path = deploy_dir_spdx / "packages" / ("%s.spdx.json" % dep_pkg)
+
+ spdx_dep_doc, spdx_dep_sha1 = oe.sbom.read_doc(dep_path)
+
+ for pkg in spdx_dep_doc.packages:
+ if pkg.name == dep_pkg:
+ dep_spdx_package = pkg
+ break
+ else:
+ bb.fatal("Package '%s' not found in %s" % (dep_pkg, dep_path))
+
+ dep_package_ref = oe.spdx.SPDXExternalDocumentRef()
+ dep_package_ref.externalDocumentId = "DocumentRef-runtime-dependency-" + spdx_dep_doc.name
+ dep_package_ref.spdxDocument = spdx_dep_doc.documentNamespace
+ dep_package_ref.checksum.algorithm = "SHA1"
+ dep_package_ref.checksum.checksumValue = spdx_dep_sha1
+
+ dep_package_cache[dep] = (dep_spdx_package, dep_package_ref)
+
+ runtime_doc.externalDocumentRefs.append(dep_package_ref)
+
+ runtime_doc.add_relationship(
+ "%s:%s" % (dep_package_ref.externalDocumentId, dep_spdx_package.SPDXID),
+ "RUNTIME_DEPENDENCY_OF",
+ "%s:%s" % (package_ref.externalDocumentId, spdx_package.SPDXID)
+ )
+ seen_deps.add(dep)
+
+ oe.sbom.write_doc(d, runtime_doc, "runtime", spdx_deploy)
+}
+
+addtask do_create_runtime_spdx after do_create_spdx before do_build do_rm_work
+SSTATETASKS += "do_create_runtime_spdx"
+do_create_runtime_spdx[sstate-inputdirs] = "${SPDXRUNTIMEDEPLOY}"
+do_create_runtime_spdx[sstate-outputdirs] = "${DEPLOY_DIR_SPDX}"
+
+python do_create_runtime_spdx_setscene () {
+ sstate_setscene(d)
+}
+addtask do_create_runtime_spdx_setscene
+
+do_create_runtime_spdx[dirs] = "${SPDXRUNTIMEDEPLOY}"
+do_create_runtime_spdx[cleandirs] = "${SPDXRUNTIMEDEPLOY}"
+do_create_runtime_spdx[rdeptask] = "do_create_spdx"
+
+def spdx_get_src(d):
+ """
+ save patched source of the recipe in SPDX_WORKDIR.
+ """
+ import shutil
+ spdx_workdir = d.getVar('SPDXWORK')
+ spdx_sysroot_native = d.getVar('STAGING_DIR_NATIVE')
+ pn = d.getVar('PN')
+
+ workdir = d.getVar("WORKDIR")
+
+ try:
+ # The kernel class functions require it to be on work-shared, so we dont change WORKDIR
+ if not is_work_shared_spdx(d):
+ # Change the WORKDIR to make do_unpack do_patch run in another dir.
+ d.setVar('WORKDIR', spdx_workdir)
+ # Restore the original path to recipe's native sysroot (it's relative to WORKDIR).
+ d.setVar('STAGING_DIR_NATIVE', spdx_sysroot_native)
+
+ # The changed 'WORKDIR' also caused 'B' changed, create dir 'B' for the
+ # possibly requiring of the following tasks (such as some recipes's
+ # do_patch required 'B' existed).
+ bb.utils.mkdirhier(d.getVar('B'))
+
+ bb.build.exec_func('do_unpack', d)
+ # Copy source of kernel to spdx_workdir
+ if is_work_shared_spdx(d):
+ d.setVar('WORKDIR', spdx_workdir)
+ d.setVar('STAGING_DIR_NATIVE', spdx_sysroot_native)
+ src_dir = spdx_workdir + "/" + d.getVar('PN')+ "-" + d.getVar('PV') + "-" + d.getVar('PR')
+ bb.utils.mkdirhier(src_dir)
+ if bb.data.inherits_class('kernel',d):
+ share_src = d.getVar('STAGING_KERNEL_DIR')
+ cmd_copy_share = "cp -rf " + share_src + "/* " + src_dir + "/"
+ cmd_copy_kernel_result = os.popen(cmd_copy_share).read()
+ bb.note("cmd_copy_kernel_result = " + cmd_copy_kernel_result)
+
+ git_path = src_dir + "/.git"
+ if os.path.exists(git_path):
+ shutils.rmtree(git_path)
+
+ # Make sure gcc and kernel sources are patched only once
+ if not (d.getVar('SRC_URI') == "" or is_work_shared_spdx(d)):
+ bb.build.exec_func('do_patch', d)
+
+ # Some userland has no source.
+ if not os.path.exists( spdx_workdir ):
+ bb.utils.mkdirhier(spdx_workdir)
+ finally:
+ d.setVar("WORKDIR", workdir)
+
+do_rootfs[recrdeptask] += "do_create_spdx do_create_runtime_spdx"
+
+ROOTFS_POSTUNINSTALL_COMMAND =+ "image_combine_spdx ; "
+
+do_populate_sdk[recrdeptask] += "do_create_spdx do_create_runtime_spdx"
+POPULATE_SDK_POST_HOST_COMMAND:append:task-populate-sdk = " sdk_host_combine_spdx; "
+POPULATE_SDK_POST_TARGET_COMMAND:append:task-populate-sdk = " sdk_target_combine_spdx; "
+
+python image_combine_spdx() {
+ import os
+ import oe.sbom
+ from pathlib import Path
+ from oe.rootfs import image_list_installed_packages
+
+ image_name = d.getVar("IMAGE_NAME")
+ image_link_name = d.getVar("IMAGE_LINK_NAME")
+ imgdeploydir = Path(d.getVar("IMGDEPLOYDIR"))
+ img_spdxid = oe.sbom.get_image_spdxid(image_name)
+ packages = image_list_installed_packages(d)
+
+ combine_spdx(d, image_name, imgdeploydir, img_spdxid, packages)
+
+ if image_link_name:
+ image_spdx_path = imgdeploydir / (image_name + ".spdx.json")
+ image_spdx_link = imgdeploydir / (image_link_name + ".spdx.json")
+ image_spdx_link.symlink_to(os.path.relpath(image_spdx_path, image_spdx_link.parent))
+
+ def make_image_link(target_path, suffix):
+ if image_link_name:
+ link = imgdeploydir / (image_link_name + suffix)
+ link.symlink_to(os.path.relpath(target_path, link.parent))
+
+ spdx_tar_path = imgdeploydir / (image_name + ".spdx.tar.zst")
+ make_image_link(spdx_tar_path, ".spdx.tar.zst")
+ spdx_index_path = imgdeploydir / (image_name + ".spdx.index.json")
+ make_image_link(spdx_index_path, ".spdx.index.json")
+}
+
+python sdk_host_combine_spdx() {
+ sdk_combine_spdx(d, "host")
+}
+
+python sdk_target_combine_spdx() {
+ sdk_combine_spdx(d, "target")
+}
+
+def sdk_combine_spdx(d, sdk_type):
+ import oe.sbom
+ from pathlib import Path
+ from oe.sdk import sdk_list_installed_packages
+
+ sdk_name = d.getVar("SDK_NAME") + "-" + sdk_type
+ sdk_deploydir = Path(d.getVar("SDKDEPLOYDIR"))
+ sdk_spdxid = oe.sbom.get_sdk_spdxid(sdk_name)
+ sdk_packages = sdk_list_installed_packages(d, sdk_type == "target")
+ combine_spdx(d, sdk_name, sdk_deploydir, sdk_spdxid, sdk_packages)
+
+def combine_spdx(d, rootfs_name, rootfs_deploydir, rootfs_spdxid, packages):
+ import os
+ import oe.spdx
+ import oe.sbom
+ import io
+ import json
+ from datetime import timezone, datetime
+ from pathlib import Path
+ import tarfile
+ import bb.compress.zstd
+
+ creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
+ deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
+ source_date_epoch = d.getVar("SOURCE_DATE_EPOCH")
+
+ doc = oe.spdx.SPDXDocument()
+ doc.name = rootfs_name
+ doc.documentNamespace = get_doc_namespace(d, doc)
+ doc.creationInfo.created = creation_time
+ doc.creationInfo.comment = "This document was created by analyzing the source of the Yocto recipe during the build."
+ doc.creationInfo.licenseListVersion = d.getVar("SPDX_LICENSE_DATA")["licenseListVersion"]
+ doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass")
+ doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG"))
+ doc.creationInfo.creators.append("Person: N/A ()")
+
+ image = oe.spdx.SPDXPackage()
+ image.name = d.getVar("PN")
+ image.versionInfo = d.getVar("PV")
+ image.SPDXID = rootfs_spdxid
+ image.packageSupplier = d.getVar("SPDX_SUPPLIER")
+
+ doc.packages.append(image)
+
+ for name in sorted(packages.keys()):
+ pkg_spdx_path = deploy_dir_spdx / "packages" / (name + ".spdx.json")
+ pkg_doc, pkg_doc_sha1 = oe.sbom.read_doc(pkg_spdx_path)
+
+ for p in pkg_doc.packages:
+ if p.name == name:
+ pkg_ref = oe.spdx.SPDXExternalDocumentRef()
+ pkg_ref.externalDocumentId = "DocumentRef-%s" % pkg_doc.name
+ pkg_ref.spdxDocument = pkg_doc.documentNamespace
+ pkg_ref.checksum.algorithm = "SHA1"
+ pkg_ref.checksum.checksumValue = pkg_doc_sha1
+
+ doc.externalDocumentRefs.append(pkg_ref)
+ doc.add_relationship(image, "CONTAINS", "%s:%s" % (pkg_ref.externalDocumentId, p.SPDXID))
+ break
+ else:
+ bb.fatal("Unable to find package with name '%s' in SPDX file %s" % (name, pkg_spdx_path))
+
+ runtime_spdx_path = deploy_dir_spdx / "runtime" / ("runtime-" + name + ".spdx.json")
+ runtime_doc, runtime_doc_sha1 = oe.sbom.read_doc(runtime_spdx_path)
+
+ runtime_ref = oe.spdx.SPDXExternalDocumentRef()
+ runtime_ref.externalDocumentId = "DocumentRef-%s" % runtime_doc.name
+ runtime_ref.spdxDocument = runtime_doc.documentNamespace
+ runtime_ref.checksum.algorithm = "SHA1"
+ runtime_ref.checksum.checksumValue = runtime_doc_sha1
+
+ # "OTHER" isn't ideal here, but I can't find a relationship that makes sense
+ doc.externalDocumentRefs.append(runtime_ref)
+ doc.add_relationship(
+ image,
+ "OTHER",
+ "%s:%s" % (runtime_ref.externalDocumentId, runtime_doc.SPDXID),
+ comment="Runtime dependencies for %s" % name
+ )
+
+ image_spdx_path = rootfs_deploydir / (rootfs_name + ".spdx.json")
+
+ with image_spdx_path.open("wb") as f:
+ doc.to_json(f, sort_keys=True)
+
+ num_threads = int(d.getVar("BB_NUMBER_THREADS"))
+
+ visited_docs = set()
+
+ index = {"documents": []}
+
+ spdx_tar_path = rootfs_deploydir / (rootfs_name + ".spdx.tar.zst")
+ with bb.compress.zstd.open(spdx_tar_path, "w", num_threads=num_threads) as f:
+ with tarfile.open(fileobj=f, mode="w|") as tar:
+ def collect_spdx_document(path):
+ nonlocal tar
+ nonlocal deploy_dir_spdx
+ nonlocal source_date_epoch
+ nonlocal index
+
+ if path in visited_docs:
+ return
+
+ visited_docs.add(path)
+
+ with path.open("rb") as f:
+ doc, sha1 = oe.sbom.read_doc(f)
+ f.seek(0)
+
+ if doc.documentNamespace in visited_docs:
+ return
+
+ bb.note("Adding SPDX document %s" % path)
+ visited_docs.add(doc.documentNamespace)
+ info = tar.gettarinfo(fileobj=f)
+
+ info.name = doc.name + ".spdx.json"
+ info.uid = 0
+ info.gid = 0
+ info.uname = "root"
+ info.gname = "root"
+
+ if source_date_epoch is not None and info.mtime > int(source_date_epoch):
+ info.mtime = int(source_date_epoch)
+
+ tar.addfile(info, f)
+
+ index["documents"].append({
+ "filename": info.name,
+ "documentNamespace": doc.documentNamespace,
+ "sha1": sha1,
+ })
+
+ for ref in doc.externalDocumentRefs:
+ ref_path = deploy_dir_spdx / "by-namespace" / ref.spdxDocument.replace("/", "_")
+ collect_spdx_document(ref_path)
+
+ collect_spdx_document(image_spdx_path)
+
+ index["documents"].sort(key=lambda x: x["filename"])
+
+ index_str = io.BytesIO(json.dumps(index, sort_keys=True).encode("utf-8"))
+
+ info = tarfile.TarInfo()
+ info.name = "index.json"
+ info.size = len(index_str.getvalue())
+ info.uid = 0
+ info.gid = 0
+ info.uname = "root"
+ info.gname = "root"
+
+ tar.addfile(info, fileobj=index_str)
+
+ spdx_index_path = rootfs_deploydir / (rootfs_name + ".spdx.index.json")
+ with spdx_index_path.open("w") as f:
+ json.dump(index, f, sort_keys=True)
diff --git a/meta/classes/cross-canadian.bbclass b/meta/classes/cross-canadian.bbclass
index f5c9f61595..a0e9d23836 100644
--- a/meta/classes/cross-canadian.bbclass
+++ b/meta/classes/cross-canadian.bbclass
@@ -36,10 +36,12 @@ python () {
return
tos = d.getVar("TARGET_OS")
- whitelist = []
+ tos_known = ["mingw32"]
extralibcs = [""]
if "musl" in d.getVar("BASECANADIANEXTRAOS"):
extralibcs.append("musl")
+ if "android" in tos:
+ extralibcs.append("android")
for variant in ["", "spe", "x32", "eabi", "n32", "_ilp32"]:
for libc in extralibcs:
entry = "linux"
@@ -49,8 +51,8 @@ python () {
entry = entry + "-gnu" + variant
elif libc:
entry = entry + "-" + libc
- whitelist.append(entry)
- if tos not in whitelist:
+ tos_known.append(entry)
+ if tos not in tos_known:
bb.fatal("Building cross-candian for an unknown TARGET_SYS (%s), please update cross-canadian.bbclass" % d.getVar("TARGET_SYS"))
for n in ["PROVIDES", "DEPENDS"]:
@@ -104,7 +106,7 @@ STAGING_DIR_HOST = "${RECIPE_SYSROOT}"
TOOLCHAIN_OPTIONS = " --sysroot=${RECIPE_SYSROOT}"
-PATH_append = ":${TMPDIR}/sysroots/${HOST_ARCH}/${bindir_cross}"
+PATH:append = ":${TMPDIR}/sysroots/${HOST_ARCH}/${bindir_cross}"
PKGHIST_DIR = "${TMPDIR}/pkghistory/${HOST_ARCH}-${SDKPKGSUFFIX}${HOST_VENDOR}-${HOST_OS}/"
HOST_ARCH = "${SDK_ARCH}"
@@ -129,7 +131,7 @@ LDFLAGS = "${BUILDSDK_LDFLAGS} \
# We need chrpath >= 0.14 to ensure we can deal with 32 and 64 bit
# binaries
#
-DEPENDS_append = " chrpath-replacement-native"
+DEPENDS:append = " chrpath-replacement-native"
EXTRANATIVEPATH += "chrpath-native"
# Path mangling needed by the cross packaging
@@ -153,9 +155,9 @@ base_sbindir = "${bindir}"
libdir = "${exec_prefix}/lib/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
libexecdir = "${exec_prefix}/libexec/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
-FILES_${PN} = "${prefix}"
+FILES:${PN} = "${prefix}"
-export PKG_CONFIG_DIR = "${STAGING_DIR_HOST}${layout_libdir}/pkgconfig"
+export PKG_CONFIG_DIR = "${STAGING_DIR_HOST}${exec_prefix}/lib/pkgconfig"
export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR_HOST}"
do_populate_sysroot[stamp-extra-info] = ""
@@ -167,7 +169,7 @@ USE_NLS = "${SDKUSE_NLS}"
# and not any particular tune that is enabled.
TARGET_ARCH[vardepsexclude] = "TUNE_ARCH"
-PKGDATA_DIR = "${TMPDIR}/pkgdata/${SDK_SYS}"
+PKGDATA_DIR = "${PKGDATA_DIR_SDK}"
# If MLPREFIX is set by multilib code, shlibs
# points to the wrong place so force it
SHLIBSDIRS = "${PKGDATA_DIR}/nativesdk-shlibs2"
diff --git a/meta/classes/cross.bbclass b/meta/classes/cross.bbclass
index f832561daf..9d951076a7 100644
--- a/meta/classes/cross.bbclass
+++ b/meta/classes/cross.bbclass
@@ -7,7 +7,7 @@ EXCLUDE_FROM_WORLD = "1"
CLASSOVERRIDE = "class-cross"
PACKAGES = ""
PACKAGES_DYNAMIC = ""
-PACKAGES_DYNAMIC_class-native = ""
+PACKAGES_DYNAMIC:class-native = ""
HOST_ARCH = "${BUILD_ARCH}"
HOST_VENDOR = "${BUILD_VENDOR}"
@@ -70,13 +70,8 @@ libdir = "${exec_prefix}/lib/${CROSS_TARGET_SYS_DIR}"
libexecdir = "${exec_prefix}/libexec/${CROSS_TARGET_SYS_DIR}"
do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE}/"
-do_populate_sysroot[stamp-extra-info] = ""
do_packagedata[stamp-extra-info] = ""
-do_install () {
- oe_runmake 'DESTDIR=${D}' install
-}
-
USE_NLS = "no"
export CC = "${BUILD_CC}"
@@ -97,3 +92,6 @@ python do_addto_recipe_sysroot () {
bb.build.exec_func("extend_recipe_sysroot", d)
}
addtask addto_recipe_sysroot after do_populate_sysroot
+do_addto_recipe_sysroot[deptask] = "do_populate_sysroot"
+
+PATH:prepend = "${COREBASE}/scripts/cross-intercept:"
diff --git a/meta/classes/crosssdk.bbclass b/meta/classes/crosssdk.bbclass
index c0c0bfee16..04aecb694e 100644
--- a/meta/classes/crosssdk.bbclass
+++ b/meta/classes/crosssdk.bbclass
@@ -5,9 +5,15 @@ NATIVESDKLIBC ?= "libc-glibc"
LIBCOVERRIDE = ":${NATIVESDKLIBC}"
MACHINEOVERRIDES = ""
PACKAGE_ARCH = "${SDK_ARCH}"
+
python () {
# set TUNE_PKGARCH to SDK_ARCH
d.setVar('TUNE_PKGARCH', d.getVar('SDK_ARCH'))
+ # Set features here to prevent appends and distro features backfill
+ # from modifying nativesdk distro features
+ features = set(d.getVar("DISTRO_FEATURES_NATIVESDK").split())
+ filtered = set(bb.utils.filter("DISTRO_FEATURES", d.getVar("DISTRO_FEATURES_FILTER_NATIVESDK"), d).split())
+ d.setVar("DISTRO_FEATURES", " ".join(sorted(features | filtered)))
}
STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
@@ -37,7 +43,6 @@ target_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
target_exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
baselib = "lib"
-do_populate_sysroot[stamp-extra-info] = ""
do_packagedata[stamp-extra-info] = ""
# Need to force this to ensure consitency across architectures
diff --git a/meta/classes/cve-check.bbclass b/meta/classes/cve-check.bbclass
index 743bc08a4f..dfad10c22b 100644
--- a/meta/classes/cve-check.bbclass
+++ b/meta/classes/cve-check.bbclass
@@ -20,56 +20,105 @@
# the only method to check against CVEs. Running this tool
# doesn't guarantee your packages are free of CVEs.
-# The product name that the CVE database uses. Defaults to BPN, but may need to
+# The product name that the CVE database uses defaults to BPN, but may need to
# be overriden per recipe (for example tiff.bb sets CVE_PRODUCT=libtiff).
CVE_PRODUCT ??= "${BPN}"
CVE_VERSION ??= "${PV}"
CVE_CHECK_DB_DIR ?= "${DL_DIR}/CVE_CHECK"
-CVE_CHECK_DB_FILE ?= "${CVE_CHECK_DB_DIR}/nvd.db"
+CVE_CHECK_DB_FILE ?= "${CVE_CHECK_DB_DIR}/nvdcve_1.1.db"
+CVE_CHECK_DB_FILE_LOCK ?= "${CVE_CHECK_DB_FILE}.lock"
CVE_CHECK_LOG ?= "${T}/cve.log"
CVE_CHECK_TMP_FILE ?= "${TMPDIR}/cve_check"
+CVE_CHECK_SUMMARY_DIR ?= "${LOG_DIR}/cve"
+CVE_CHECK_SUMMARY_FILE_NAME ?= "cve-summary"
+CVE_CHECK_SUMMARY_FILE ?= "${CVE_CHECK_SUMMARY_DIR}/${CVE_CHECK_SUMMARY_FILE_NAME}"
CVE_CHECK_DIR ??= "${DEPLOY_DIR}/cve"
+CVE_CHECK_RECIPE_FILE ?= "${CVE_CHECK_DIR}/${PN}"
CVE_CHECK_MANIFEST ?= "${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cve"
CVE_CHECK_COPY_FILES ??= "1"
CVE_CHECK_CREATE_MANIFEST ??= "1"
-# Whitelist for packages (PN)
-CVE_CHECK_PN_WHITELIST = "\
- glibc-locale \
-"
+CVE_CHECK_REPORT_PATCHED ??= "1"
-# Whitelist for CVE and version of package
-CVE_CHECK_CVE_WHITELIST = "{\
- 'CVE-2014-2524': ('6.3','5.2',), \
-}"
+# Skip CVE Check for packages (PN)
+CVE_CHECK_SKIP_RECIPE ?= ""
+
+# Ingore the check for a given list of CVEs. If a CVE is found,
+# then it is considered patched. The value is a string containing
+# space separated CVE values:
+#
+# CVE_CHECK_IGNORE = 'CVE-2014-2524 CVE-2018-1234'
+#
+CVE_CHECK_IGNORE ?= ""
+
+# Layers to be excluded
+CVE_CHECK_LAYER_EXCLUDELIST ??= ""
+
+# Layers to be included
+CVE_CHECK_LAYER_INCLUDELIST ??= ""
+
+
+# set to "alphabetical" for version using single alphabetical character as increment release
+CVE_VERSION_SUFFIX ??= ""
+
+python cve_save_summary_handler () {
+ import shutil
+ import datetime
+
+ cve_tmp_file = d.getVar("CVE_CHECK_TMP_FILE")
+
+ cve_summary_name = d.getVar("CVE_CHECK_SUMMARY_FILE_NAME")
+ cvelogpath = d.getVar("CVE_CHECK_SUMMARY_DIR")
+ bb.utils.mkdirhier(cvelogpath)
+
+ timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
+ cve_summary_file = os.path.join(cvelogpath, "%s-%s.txt" % (cve_summary_name, timestamp))
+
+ if os.path.exists(cve_tmp_file):
+ shutil.copyfile(cve_tmp_file, cve_summary_file)
+
+ if cve_summary_file and os.path.exists(cve_summary_file):
+ cvefile_link = os.path.join(cvelogpath, cve_summary_name)
+
+ if os.path.exists(os.path.realpath(cvefile_link)):
+ os.remove(cvefile_link)
+ os.symlink(os.path.basename(cve_summary_file), cvefile_link)
+}
+
+addhandler cve_save_summary_handler
+cve_save_summary_handler[eventmask] = "bb.event.BuildCompleted"
python do_cve_check () {
"""
Check recipe for patched and unpatched CVEs
"""
-
- if os.path.exists(d.getVar("CVE_CHECK_TMP_FILE")):
- patched_cves = get_patches_cves(d)
- patched, unpatched = check_cves(d, patched_cves)
+ from oe.cve_check import get_patched_cves
+
+ if os.path.exists(d.getVar("CVE_CHECK_DB_FILE")):
+ try:
+ patched_cves = get_patched_cves(d)
+ except FileNotFoundError:
+ bb.fatal("Failure in searching patches")
+ ignored, patched, unpatched = check_cves(d, patched_cves)
if patched or unpatched:
cve_data = get_cve_info(d, patched + unpatched)
- cve_write_data(d, patched, unpatched, cve_data)
+ cve_write_data(d, patched, unpatched, ignored, cve_data)
else:
- bb.note("Failed to update CVE database, skipping CVE check")
+ bb.note("No CVE database found, skipping CVE check")
+
}
-addtask cve_check after do_unpack before do_build
-do_cve_check[depends] = "cve-check-tool-native:do_populate_sysroot cve-check-tool-native:do_populate_cve_db"
+addtask cve_check before do_build after do_fetch
+do_cve_check[depends] = "cve-update-db-native:do_fetch"
do_cve_check[nostamp] = "1"
python cve_check_cleanup () {
"""
Delete the file used to gather all the CVE information.
"""
-
bb.utils.remove(e.data.getVar("CVE_CHECK_TMP_FILE"))
}
@@ -84,7 +133,7 @@ python cve_check_write_rootfs_manifest () {
import shutil
if d.getVar("CVE_CHECK_COPY_FILES") == "1":
- deploy_file = os.path.join(d.getVar("CVE_CHECK_DIR"), d.getVar("PN"))
+ deploy_file = d.getVar("CVE_CHECK_RECIPE_FILE")
if os.path.exists(deploy_file):
bb.utils.remove(deploy_file)
@@ -95,6 +144,7 @@ python cve_check_write_rootfs_manifest () {
manifest_name = d.getVar("CVE_CHECK_MANIFEST")
cve_tmp_file = d.getVar("CVE_CHECK_TMP_FILE")
+ bb.utils.mkdirhier(os.path.dirname(manifest_name))
shutil.copyfile(cve_tmp_file, manifest_name)
if manifest_name and os.path.exists(manifest_name):
@@ -106,188 +156,193 @@ python cve_check_write_rootfs_manifest () {
bb.plain("Image CVE report stored in: %s" % manifest_name)
}
-ROOTFS_POSTPROCESS_COMMAND_prepend = "${@'cve_check_write_rootfs_manifest; ' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
+ROOTFS_POSTPROCESS_COMMAND:prepend = "${@'cve_check_write_rootfs_manifest; ' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
do_rootfs[recrdeptask] += "${@'do_cve_check' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
-def get_patches_cves(d):
+def check_cves(d, patched_cves):
"""
- Get patches that solve CVEs using the "CVE: " tag.
+ Connect to the NVD database and find unpatched cves.
"""
-
- import re
+ from oe.cve_check import Version
pn = d.getVar("PN")
- cve_match = re.compile("CVE:( CVE\-\d{4}\-\d+)+")
-
- # Matches last CVE-1234-211432 in the file name, also if written
- # with small letters. Not supporting multiple CVE id's in a single
- # file name.
- cve_file_name_match = re.compile(".*([Cc][Vv][Ee]\-\d{4}\-\d+)")
-
- patched_cves = set()
- bb.debug(2, "Looking for patches that solves CVEs for %s" % pn)
- for url in src_patches(d):
- patch_file = bb.fetch.decodeurl(url)[2]
-
- # Check patch file name for CVE ID
- fname_match = cve_file_name_match.search(patch_file)
- if fname_match:
- cve = fname_match.group(1).upper()
- patched_cves.add(cve)
- bb.debug(2, "Found CVE %s from patch file name %s" % (cve, patch_file))
-
- with open(patch_file, "r", encoding="utf-8") as f:
- try:
- patch_text = f.read()
- except UnicodeDecodeError:
- bb.debug(1, "Failed to read patch %s using UTF-8 encoding"
- " trying with iso8859-1" % patch_file)
- f.close()
- with open(patch_file, "r", encoding="iso8859-1") as f:
- patch_text = f.read()
-
- # Search for one or more "CVE: " lines
- text_match = False
- for match in cve_match.finditer(patch_text):
- # Get only the CVEs without the "CVE: " tag
- cves = patch_text[match.start()+5:match.end()]
- for cve in cves.split():
- bb.debug(2, "Patch %s solves %s" % (patch_file, cve))
- patched_cves.add(cve)
- text_match = True
+ real_pv = d.getVar("PV")
+ suffix = d.getVar("CVE_VERSION_SUFFIX")
- if not fname_match and not text_match:
- bb.debug(2, "Patch %s doesn't solve CVEs" % patch_file)
+ cves_unpatched = []
+ # CVE_PRODUCT can contain more than one product (eg. curl/libcurl)
+ products = d.getVar("CVE_PRODUCT").split()
+ # If this has been unset then we're not scanning for CVEs here (for example, image recipes)
+ if not products:
+ return ([], [], [])
+ pv = d.getVar("CVE_VERSION").split("+git")[0]
- return patched_cves
+ # If the recipe has been skipped/ignored we return empty lists
+ if pn in d.getVar("CVE_CHECK_SKIP_RECIPE").split():
+ bb.note("Recipe has been skipped by cve-check")
+ return ([], [], [])
-def check_cves(d, patched_cves):
- """
- Run cve-check-tool looking for patched and unpatched CVEs.
- """
+ cve_ignore = d.getVar("CVE_CHECK_IGNORE").split()
- import ast, csv, tempfile, subprocess, io
+ import sqlite3
+ db_file = d.expand("file:${CVE_CHECK_DB_FILE}?mode=ro")
+ conn = sqlite3.connect(db_file, uri=True)
- cves_patched = []
- cves_unpatched = []
- bpn = d.getVar("CVE_PRODUCT")
- # If this has been unset then we're not scanning for CVEs here (for example, image recipes)
- if not bpn:
- return ([], [])
- pv = d.getVar("CVE_VERSION").split("+git")[0]
- cves = " ".join(patched_cves)
- cve_db_dir = d.getVar("CVE_CHECK_DB_DIR")
- cve_whitelist = ast.literal_eval(d.getVar("CVE_CHECK_CVE_WHITELIST"))
- cve_cmd = "cve-check-tool"
- cmd = [cve_cmd, "--no-html", "--skip-update", "--csv", "--not-affected", "-t", "faux", "-d", cve_db_dir]
-
- # If the recipe has been whitlisted we return empty lists
- if d.getVar("PN") in d.getVar("CVE_CHECK_PN_WHITELIST").split():
- bb.note("Recipe has been whitelisted, skipping check")
- return ([], [])
-
- try:
- # Write the faux CSV file to be used with cve-check-tool
- fd, faux = tempfile.mkstemp(prefix="cve-faux-")
- with os.fdopen(fd, "w") as f:
- for pn in bpn.split():
- f.write("%s,%s,%s,\n" % (pn, pv, cves))
- cmd.append(faux)
-
- output = subprocess.check_output(cmd).decode("utf-8")
- bb.debug(2, "Output of command %s:\n%s" % ("\n".join(cmd), output))
- except subprocess.CalledProcessError as e:
- bb.warn("Couldn't check for CVEs: %s (output %s)" % (e, e.output))
- finally:
- os.remove(faux)
-
- for row in csv.reader(io.StringIO(output)):
- # Third row has the unpatched CVEs
- if row[2]:
- for cve in row[2].split():
- # Skip if the CVE has been whitlisted for the current version
- if pv in cve_whitelist.get(cve,[]):
- bb.note("%s-%s has been whitelisted for %s" % (bpn, pv, cve))
+ # For each of the known product names (e.g. curl has CPEs using curl and libcurl)...
+ for product in products:
+ if ":" in product:
+ vendor, product = product.split(":", 1)
+ else:
+ vendor = "%"
+
+ # Find all relevant CVE IDs.
+ for cverow in conn.execute("SELECT DISTINCT ID FROM PRODUCTS WHERE PRODUCT IS ? AND VENDOR LIKE ?", (product, vendor)):
+ cve = cverow[0]
+
+ if cve in cve_ignore:
+ bb.note("%s-%s has been ignored for %s" % (product, pv, cve))
+ # TODO: this should be in the report as 'ignored'
+ patched_cves.add(cve)
+ continue
+ elif cve in patched_cves:
+ bb.note("%s has been patched" % (cve))
+ continue
+
+ vulnerable = False
+ for row in conn.execute("SELECT * FROM PRODUCTS WHERE ID IS ? AND PRODUCT IS ? AND VENDOR LIKE ?", (cve, product, vendor)):
+ (_, _, _, version_start, operator_start, version_end, operator_end) = row
+ #bb.debug(2, "Evaluating row " + str(row))
+
+ if (operator_start == '=' and pv == version_start) or version_start == '-':
+ vulnerable = True
else:
+ if operator_start:
+ try:
+ vulnerable_start = (operator_start == '>=' and Version(pv,suffix) >= Version(version_start,suffix))
+ vulnerable_start |= (operator_start == '>' and Version(pv,suffix) > Version(version_start,suffix))
+ except:
+ bb.warn("%s: Failed to compare %s %s %s for %s" %
+ (product, pv, operator_start, version_start, cve))
+ vulnerable_start = False
+ else:
+ vulnerable_start = False
+
+ if operator_end:
+ try:
+ vulnerable_end = (operator_end == '<=' and Version(pv,suffix) <= Version(version_end,suffix) )
+ vulnerable_end |= (operator_end == '<' and Version(pv,suffix) < Version(version_end,suffix) )
+ except:
+ bb.warn("%s: Failed to compare %s %s %s for %s" %
+ (product, pv, operator_end, version_end, cve))
+ vulnerable_end = False
+ else:
+ vulnerable_end = False
+
+ if operator_start and operator_end:
+ vulnerable = vulnerable_start and vulnerable_end
+ else:
+ vulnerable = vulnerable_start or vulnerable_end
+
+ if vulnerable:
+ bb.note("%s-%s is vulnerable to %s" % (pn, real_pv, cve))
cves_unpatched.append(cve)
- bb.debug(2, "%s-%s is not patched for %s" % (bpn, pv, cve))
- # Fourth row has patched CVEs
- if row[3]:
- for cve in row[3].split():
- cves_patched.append(cve)
- bb.debug(2, "%s-%s is patched for %s" % (bpn, pv, cve))
+ break
+
+ if not vulnerable:
+ bb.note("%s-%s is not vulnerable to %s" % (pn, real_pv, cve))
+ # TODO: not patched but not vulnerable
+ patched_cves.add(cve)
+
+ conn.close()
- return (cves_patched, cves_unpatched)
+ return (list(cve_ignore), list(patched_cves), cves_unpatched)
def get_cve_info(d, cves):
"""
- Get CVE information from the database used by cve-check-tool.
-
- Unfortunately the only way to get CVE info is set the output to
- html (hard to parse) or query directly the database.
+ Get CVE information from the database.
"""
- try:
- import sqlite3
- except ImportError:
- from pysqlite2 import dbapi2 as sqlite3
+ import sqlite3
cve_data = {}
- db_file = d.getVar("CVE_CHECK_DB_FILE")
- placeholder = ",".join("?" * len(cves))
- query = "SELECT * FROM NVD WHERE id IN (%s)" % placeholder
- conn = sqlite3.connect(db_file)
- cur = conn.cursor()
- for row in cur.execute(query, tuple(cves)):
- cve_data[row[0]] = {}
- cve_data[row[0]]["summary"] = row[1]
- cve_data[row[0]]["score"] = row[2]
- cve_data[row[0]]["modified"] = row[3]
- cve_data[row[0]]["vector"] = row[4]
- conn.close()
+ db_file = d.expand("file:${CVE_CHECK_DB_FILE}?mode=ro")
+ conn = sqlite3.connect(db_file, uri=True)
+
+ for cve in cves:
+ for row in conn.execute("SELECT * FROM NVD WHERE ID IS ?", (cve,)):
+ cve_data[row[0]] = {}
+ cve_data[row[0]]["summary"] = row[1]
+ cve_data[row[0]]["scorev2"] = row[2]
+ cve_data[row[0]]["scorev3"] = row[3]
+ cve_data[row[0]]["modified"] = row[4]
+ cve_data[row[0]]["vector"] = row[5]
+ conn.close()
return cve_data
-def cve_write_data(d, patched, unpatched, cve_data):
+def cve_write_data(d, patched, unpatched, ignored, cve_data):
"""
Write CVE information in WORKDIR; and to CVE_CHECK_DIR, and
CVE manifest if enabled.
"""
+
cve_file = d.getVar("CVE_CHECK_LOG")
- nvd_link = "https://web.nvd.nist.gov/view/vuln/detail?vulnId="
+ fdir_name = d.getVar("FILE_DIRNAME")
+ layer = fdir_name.split("/")[-3]
+
+ include_layers = d.getVar("CVE_CHECK_LAYER_INCLUDELIST").split()
+ exclude_layers = d.getVar("CVE_CHECK_LAYER_EXCLUDELIST").split()
+
+ if exclude_layers and layer in exclude_layers:
+ return
+
+ if include_layers and layer not in include_layers:
+ return
+
+ nvd_link = "https://nvd.nist.gov/vuln/detail/"
write_string = ""
unpatched_cves = []
bb.utils.mkdirhier(os.path.dirname(cve_file))
for cve in sorted(cve_data):
+ is_patched = cve in patched
+ if is_patched and (d.getVar("CVE_CHECK_REPORT_PATCHED") != "1"):
+ continue
+ write_string += "LAYER: %s\n" % layer
write_string += "PACKAGE NAME: %s\n" % d.getVar("PN")
- write_string += "PACKAGE VERSION: %s\n" % d.getVar("PV")
+ write_string += "PACKAGE VERSION: %s%s\n" % (d.getVar("EXTENDPE"), d.getVar("PV"))
write_string += "CVE: %s\n" % cve
- if cve in patched:
+ if cve in ignored:
+ write_string += "CVE STATUS: Ignored\n"
+ elif is_patched:
write_string += "CVE STATUS: Patched\n"
else:
unpatched_cves.append(cve)
write_string += "CVE STATUS: Unpatched\n"
write_string += "CVE SUMMARY: %s\n" % cve_data[cve]["summary"]
- write_string += "CVSS v2 BASE SCORE: %s\n" % cve_data[cve]["score"]
+ write_string += "CVSS v2 BASE SCORE: %s\n" % cve_data[cve]["scorev2"]
+ write_string += "CVSS v3 BASE SCORE: %s\n" % cve_data[cve]["scorev3"]
write_string += "VECTOR: %s\n" % cve_data[cve]["vector"]
write_string += "MORE INFORMATION: %s%s\n\n" % (nvd_link, cve)
if unpatched_cves:
bb.warn("Found unpatched CVE (%s), for more information check %s" % (" ".join(unpatched_cves),cve_file))
- with open(cve_file, "w") as f:
- bb.note("Writing file %s with CVE information" % cve_file)
- f.write(write_string)
-
- if d.getVar("CVE_CHECK_COPY_FILES") == "1":
- cve_dir = d.getVar("CVE_CHECK_DIR")
- bb.utils.mkdirhier(cve_dir)
- deploy_file = os.path.join(cve_dir, d.getVar("PN"))
- with open(deploy_file, "w") as f:
+ if write_string:
+ with open(cve_file, "w") as f:
+ bb.note("Writing file %s with CVE information" % cve_file)
f.write(write_string)
- if d.getVar("CVE_CHECK_CREATE_MANIFEST") == "1":
- with open(d.getVar("CVE_CHECK_TMP_FILE"), "a") as f:
- f.write("%s" % write_string)
+ if d.getVar("CVE_CHECK_COPY_FILES") == "1":
+ deploy_file = d.getVar("CVE_CHECK_RECIPE_FILE")
+ bb.utils.mkdirhier(os.path.dirname(deploy_file))
+ with open(deploy_file, "w") as f:
+ f.write(write_string)
+
+ if d.getVar("CVE_CHECK_CREATE_MANIFEST") == "1":
+ cvelogpath = d.getVar("CVE_CHECK_SUMMARY_DIR")
+ bb.utils.mkdirhier(cvelogpath)
+
+ with open(d.getVar("CVE_CHECK_TMP_FILE"), "a") as f:
+ f.write("%s" % write_string)
diff --git a/meta/classes/debian.bbclass b/meta/classes/debian.bbclass
index 6f8a599ccb..8367be9f37 100644
--- a/meta/classes/debian.bbclass
+++ b/meta/classes/debian.bbclass
@@ -4,7 +4,7 @@
# depends are correct
#
# Custom library package names can be defined setting
-# DEBIANNAME_ + pkgname to the desired name.
+# DEBIANNAME: + pkgname to the desired name.
#
# Better expressed as ensure all RDEPENDS package before we package
# This means we can't have circular RDEPENDS/RRECOMMENDS
@@ -14,6 +14,10 @@ AUTO_LIBNAME_PKGS = "${PACKAGES}"
inherit package
DEBIANRDEP = "do_packagedata"
+do_package_write_ipk[deptask] = "${DEBIANRDEP}"
+do_package_write_deb[deptask] = "${DEBIANRDEP}"
+do_package_write_tar[deptask] = "${DEBIANRDEP}"
+do_package_write_rpm[deptask] = "${DEBIANRDEP}"
do_package_write_ipk[rdeptask] = "${DEBIANRDEP}"
do_package_write_deb[rdeptask] = "${DEBIANRDEP}"
do_package_write_tar[rdeptask] = "${DEBIANRDEP}"
@@ -51,11 +55,11 @@ python debian_package_name_hook () {
return (s[stat.ST_MODE] & stat.S_IEXEC)
def add_rprovides(pkg, d):
- newpkg = d.getVar('PKG_' + pkg)
+ newpkg = d.getVar('PKG:' + pkg)
if newpkg and newpkg != pkg:
- provs = (d.getVar('RPROVIDES_' + pkg) or "").split()
+ provs = (d.getVar('RPROVIDES:' + pkg) or "").split()
if pkg not in provs:
- d.appendVar('RPROVIDES_' + pkg, " " + pkg + " (=" + d.getVar("PKGV") + ")")
+ d.appendVar('RPROVIDES:' + pkg, " " + pkg + " (=" + d.getVar("PKGV") + ")")
def auto_libname(packages, orig_pkg):
p = lambda var: pathlib.PurePath(d.getVar(var))
@@ -110,10 +114,10 @@ python debian_package_name_hook () {
if soname_result:
(pkgname, devname) = soname_result
for pkg in packages.split():
- if (d.getVar('PKG_' + pkg, False) or d.getVar('DEBIAN_NOAUTONAME_' + pkg, False)):
+ if (d.getVar('PKG:' + pkg, False) or d.getVar('DEBIAN_NOAUTONAME:' + pkg, False)):
add_rprovides(pkg, d)
continue
- debian_pn = d.getVar('DEBIANNAME_' + pkg, False)
+ debian_pn = d.getVar('DEBIANNAME:' + pkg, False)
if debian_pn:
newpkg = debian_pn
elif pkg == orig_pkg:
@@ -126,7 +130,7 @@ python debian_package_name_hook () {
newpkg = mlpre + newpkg
if newpkg != pkg:
bb.note("debian: renaming %s to %s" % (pkg, newpkg))
- d.setVar('PKG_' + pkg, newpkg)
+ d.setVar('PKG:' + pkg, newpkg)
add_rprovides(pkg, d)
else:
add_rprovides(orig_pkg, d)
diff --git a/meta/classes/deploy.bbclass b/meta/classes/deploy.bbclass
index 6d52908783..7fbffe996b 100644
--- a/meta/classes/deploy.bbclass
+++ b/meta/classes/deploy.bbclass
@@ -7,5 +7,6 @@ python do_deploy_setscene () {
sstate_setscene(d)
}
addtask do_deploy_setscene
-do_deploy[dirs] = "${DEPLOYDIR} ${B}"
+do_deploy[dirs] = "${B}"
+do_deploy[cleandirs] = "${DEPLOYDIR}"
do_deploy[stamp-extra-info] = "${MACHINE_ARCH}"
diff --git a/meta/classes/devicetree.bbclass b/meta/classes/devicetree.bbclass
index 8fe5a5ed79..2a62ae7bc8 100644
--- a/meta/classes/devicetree.bbclass
+++ b/meta/classes/devicetree.bbclass
@@ -15,10 +15,10 @@
SECTION ?= "bsp"
# The default inclusion of kernel device tree includes and headers means that
-# device trees built with them are at least GPLv2 (and in some cases dual
-# licensed). Default to GPLv2 if the recipe does not specify a license.
-LICENSE ?= "GPLv2"
-LIC_FILES_CHKSUM ?= "file://${COMMON_LICENSE_DIR}/GPL-2.0;md5=801f80980d171dd6425610833a22dbe6"
+# device trees built with them are at least GPL-2.0-only (and in some cases dual
+# licensed). Default to GPL-2.0-only if the recipe does not specify a license.
+LICENSE ?= "GPL-2.0-only"
+LIC_FILES_CHKSUM ?= "file://${COMMON_LICENSE_DIR}/GPL-2.0-only;md5=801f80980d171dd6425610833a22dbe6"
INHIBIT_DEFAULT_DEPS = "1"
DEPENDS += "dtc-native"
@@ -27,10 +27,12 @@ inherit deploy kernel-arch
COMPATIBLE_MACHINE ?= "^$"
+PROVIDES = "virtual/dtb"
+
PACKAGE_ARCH = "${MACHINE_ARCH}"
SYSROOT_DIRS += "/boot/devicetree"
-FILES_${PN} = "/boot/devicetree/*.dtb /boot/devicetree/*.dtbo"
+FILES:${PN} = "/boot/devicetree/*.dtb /boot/devicetree/*.dtbo"
S = "${WORKDIR}"
B = "${WORKDIR}/build"
@@ -57,7 +59,7 @@ DT_BOOT_CPU ??= "0"
DTC_FLAGS ?= "-R ${DT_RESERVED_MAP} -b ${DT_BOOT_CPU}"
DTC_PPFLAGS ?= "-nostdinc -undef -D__DTS__ -x assembler-with-cpp"
-DTC_BFLAGS ?= "-p ${DT_PADDING_SIZE}"
+DTC_BFLAGS ?= "-p ${DT_PADDING_SIZE} -@"
DTC_OFLAGS ?= "-p 0 -@ -H epapr"
python () {
@@ -114,15 +116,18 @@ def devicetree_compile(dtspath, includes, d):
dtcargs += ["-o", "{0}.{1}".format(dtname, "dtbo" if isoverlay else "dtb")]
dtcargs += ["-I", "dts", "-O", "dtb", "{0}.pp".format(dts)]
bb.note("Running {0}".format(" ".join(dtcargs)))
- subprocess.run(dtcargs, check = True)
+ subprocess.run(dtcargs, check = True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
python devicetree_do_compile() {
includes = expand_includes("DT_INCLUDE", d)
listpath = d.getVar("DT_FILES_PATH")
for dts in os.listdir(listpath):
- if not dts.endswith(".dts"):
- continue # skip non-.dts files
dtspath = os.path.join(listpath, dts)
+ try:
+ if not(os.path.isfile(dtspath)) or not(dts.endswith(".dts") or devicetree_source_is_overlay(dtspath)):
+ continue # skip non-.dts files and non-overlay files
+ except:
+ continue # skip if can't determine if overlay
devicetree_compile(dtspath, includes, d)
}
diff --git a/meta/classes/devshell.bbclass b/meta/classes/devshell.bbclass
index fdf7dc100f..62dc958d9a 100644
--- a/meta/classes/devshell.bbclass
+++ b/meta/classes/devshell.bbclass
@@ -34,7 +34,7 @@ python () {
d.delVarFlag("do_devshell", "fakeroot")
}
-def devpyshell(d):
+def pydevshell(d):
import code
import select
@@ -128,6 +128,7 @@ def devpyshell(d):
more = i.runsource(source, "<pyshell>")
if not more:
buf = []
+ sys.stderr.flush()
prompt(more)
except KeyboardInterrupt:
i.write("\nKeyboardInterrupt\n")
@@ -139,17 +140,17 @@ def devpyshell(d):
os.kill(child, signal.SIGTERM)
break
-python do_devpyshell() {
+python do_pydevshell() {
import signal
try:
- devpyshell(d)
+ pydevshell(d)
except SystemExit:
# Stop the SIGTERM above causing an error exit code
return
finally:
return
}
-addtask devpyshell after do_patch
+addtask pydevshell after do_patch
-do_devpyshell[nostamp] = "1"
+do_pydevshell[nostamp] = "1"
diff --git a/meta/classes/devtool-source.bbclass b/meta/classes/devtool-source.bbclass
index 1372e32c9e..41900e651f 100644
--- a/meta/classes/devtool-source.bbclass
+++ b/meta/classes/devtool-source.bbclass
@@ -97,15 +97,15 @@ python devtool_post_unpack() {
local_files = oe.recipeutils.get_recipe_local_files(d)
if is_kernel_yocto:
- for key in local_files.copy():
- if key.endswith('scc'):
- sccfile = open(local_files[key], 'r')
+ for key in [f for f in local_files if f.endswith('scc')]:
+ with open(local_files[key], 'r') as sccfile:
for l in sccfile:
line = l.split()
if line and line[0] in ('kconf', 'patch'):
- local_files[line[-1]] = os.path.join(os.path.dirname(local_files[key]), line[-1])
- shutil.copy2(os.path.join(os.path.dirname(local_files[key]), line[-1]), workdir)
- sccfile.close()
+ cfg = os.path.join(os.path.dirname(local_files[key]), line[-1])
+ if cfg not in local_files.values():
+ local_files[line[-1]] = cfg
+ shutil.copy2(cfg, workdir)
# Ignore local files with subdir={BP}
srcabspath = os.path.abspath(srcsubdir)
@@ -199,6 +199,7 @@ python devtool_post_patch() {
# Run do_patch function with the override applied
localdata = bb.data.createCopy(d)
localdata.setVar('OVERRIDES', ':'.join(no_overrides))
+ localdata.setVar('FILESOVERRIDES', ':'.join(no_overrides))
bb.build.exec_func('do_patch', localdata)
rm_patches()
# Now we need to reconcile the dev branch with the no-overrides one
@@ -216,7 +217,8 @@ python devtool_post_patch() {
# Reset back to the initial commit on a new branch
bb.process.run('git checkout %s -b devtool-override-%s' % (initial_rev, override), cwd=srcsubdir)
# Run do_patch function with the override applied
- localdata.appendVar('OVERRIDES', ':%s' % override)
+ localdata.setVar('OVERRIDES', ':'.join(no_overrides + [override]))
+ localdata.setVar('FILESOVERRIDES', ':'.join(no_overrides + [override]))
bb.build.exec_func('do_patch', localdata)
rm_patches()
# Now we need to reconcile the new branch with the no-overrides one
diff --git a/meta/classes/devupstream.bbclass b/meta/classes/devupstream.bbclass
index 7780c5482c..ba6dc4136c 100644
--- a/meta/classes/devupstream.bbclass
+++ b/meta/classes/devupstream.bbclass
@@ -4,8 +4,8 @@
#
# Usage:
# BBCLASSEXTEND = "devupstream:target"
-# SRC_URI_class-devupstream = "git://git.example.com/example"
-# SRCREV_class-devupstream = "abcdef"
+# SRC_URI:class-devupstream = "git://git.example.com/example;branch=master"
+# SRCREV:class-devupstream = "abcdef"
#
# If the first entry in SRC_URI is a git: URL then S is rewritten to
# WORKDIR/git.
@@ -16,8 +16,6 @@
# - If the fetcher requires native tools (such as subversion-native) then
# bitbake won't be able to add them automatically.
-CLASSOVERRIDE .= ":class-devupstream"
-
python devupstream_virtclass_handler () {
# Do nothing if this is inherited, as it's for BBCLASSEXTEND
if "devupstream" not in (d.getVar('BBCLASSEXTEND') or ""):
@@ -25,23 +23,32 @@ python devupstream_virtclass_handler () {
return
variant = d.getVar("BBEXTENDVARIANT")
- if variant not in ("target"):
- bb.error("Pass the variant when using devupstream, for example devupstream:target")
+ if variant not in ("target", "native"):
+ bb.error("Unsupported variant %s. Pass the variant when using devupstream, for example devupstream:target" % variant)
return
# Develpment releases are never preferred by default
d.setVar("DEFAULT_PREFERENCE", "-1")
- uri = bb.fetch2.URI(d.getVar("SRC_URI").split()[0])
+ src_uri = d.getVar("SRC_URI:class-devupstream") or d.getVar("SRC_URI")
+ uri = bb.fetch2.URI(src_uri.split()[0])
- if uri.scheme == "git":
+ if uri.scheme == "git" and not d.getVar("S:class-devupstream"):
d.setVar("S", "${WORKDIR}/git")
# Modify the PV if the recipe hasn't already overridden it
pv = d.getVar("PV")
proto_marker = "+" + uri.scheme
- if proto_marker not in pv:
+ if proto_marker not in pv and not d.getVar("PV:class-devupstream"):
d.setVar("PV", pv + proto_marker + "${SRCPV}")
+
+ if variant == "native":
+ pn = d.getVar("PN")
+ d.setVar("PN", "%s-native" % (pn))
+ fn = d.getVar("FILE")
+ bb.parse.BBHandler.inherit("native", fn, 0, d)
+
+ d.appendVar("CLASSOVERRIDE", ":class-devupstream")
}
addhandler devupstream_virtclass_handler
diff --git a/meta/classes/distro_features_check.bbclass b/meta/classes/distro_features_check.bbclass
index eeaa3b44cb..8124a8ca27 100644
--- a/meta/classes/distro_features_check.bbclass
+++ b/meta/classes/distro_features_check.bbclass
@@ -1,32 +1,7 @@
-# Allow checking of required and conflicting DISTRO_FEATURES
-#
-# ANY_OF_DISTRO_FEATURES: ensure at least one item on this list is included
-# in DISTRO_FEATURES.
-# REQUIRED_DISTRO_FEATURES: ensure every item on this list is included
-# in DISTRO_FEATURES.
-# CONFLICT_DISTRO_FEATURES: ensure no item in this list is included in
-# DISTRO_FEATURES.
-#
-# Copyright 2013 (C) O.S. Systems Software LTDA.
+# Temporarily provide fallback to the old name of the class
-python () {
- # Assume at least one var is set.
- distro_features = set((d.getVar('DISTRO_FEATURES') or '').split())
-
- any_of_distro_features = set((d.getVar('ANY_OF_DISTRO_FEATURES') or '').split())
- if any_of_distro_features:
- if set.isdisjoint(any_of_distro_features, distro_features):
- raise bb.parse.SkipRecipe("one of '%s' needs to be in DISTRO_FEATURES" % ' '.join(any_of_distro_features))
-
- required_distro_features = set((d.getVar('REQUIRED_DISTRO_FEATURES') or '').split())
- if required_distro_features:
- missing = set.difference(required_distro_features, distro_features)
- if missing:
- raise bb.parse.SkipRecipe("missing required distro feature%s '%s' (not in DISTRO_FEATURES)" % ('s' if len(missing) > 1 else '', ' '.join(missing)))
-
- conflict_distro_features = set((d.getVar('CONFLICT_DISTRO_FEATURES') or '').split())
- if conflict_distro_features:
- conflicts = set.intersection(conflict_distro_features, distro_features)
- if conflicts:
- raise bb.parse.SkipRecipe("conflicting distro feature%s '%s' (in DISTRO_FEATURES)" % ('s' if len(conflicts) > 1 else '', ' '.join(conflicts)))
+python __anonymous() {
+ bb.warn("distro_features_check.bbclass is deprecated, please use features_check.bbclass instead")
}
+
+inherit features_check
diff --git a/meta/classes/distrooverrides.bbclass b/meta/classes/distrooverrides.bbclass
index 9f4db0d771..bf3a2b2090 100644
--- a/meta/classes/distrooverrides.bbclass
+++ b/meta/classes/distrooverrides.bbclass
@@ -6,7 +6,7 @@
# This makes it simpler to write .bbappends that only change the
# task signatures of the recipe if the change is really enabled,
# for example with:
-# do_install_append_df-my-feature () { ... }
+# do_install:append:df-my-feature () { ... }
# where "my-feature" is a DISTRO_FEATURE.
#
# The class is meant to be used in a layer.conf or distro
@@ -22,8 +22,8 @@ DISTRO_FEATURES_OVERRIDES ?= ""
DISTRO_FEATURES_OVERRIDES[doc] = "A space-separated list of <feature> entries. \
Each entry is added to OVERRIDES as df-<feature> if <feature> is in DISTRO_FEATURES."
-DISTRO_FEATURES_FILTER_NATIVE_append = " ${DISTRO_FEATURES_OVERRIDES}"
-DISTRO_FEATURES_FILTER_NATIVESDK_append = " ${DISTRO_FEATURES_OVERRIDES}"
+DISTRO_FEATURES_FILTER_NATIVE:append = " ${DISTRO_FEATURES_OVERRIDES}"
+DISTRO_FEATURES_FILTER_NATIVESDK:append = " ${DISTRO_FEATURES_OVERRIDES}"
# If DISTRO_FEATURES_OVERRIDES or DISTRO_FEATURES show up in a task
# signature because of this line, then the task dependency on
diff --git a/meta/classes/distutils-base.bbclass b/meta/classes/distutils-base.bbclass
deleted file mode 100644
index 9f398d7051..0000000000
--- a/meta/classes/distutils-base.bbclass
+++ /dev/null
@@ -1,4 +0,0 @@
-DEPENDS += "${@["${PYTHON_PN}-native ${PYTHON_PN}", ""][(d.getVar('PACKAGES') == '')]}"
-RDEPENDS_${PN} += "${@['', '${PYTHON_PN}-core']['${CLASSOVERRIDE}' == 'class-target']}"
-
-inherit distutils-common-base pythonnative
diff --git a/meta/classes/distutils.bbclass b/meta/classes/distutils.bbclass
deleted file mode 100644
index 9862731493..0000000000
--- a/meta/classes/distutils.bbclass
+++ /dev/null
@@ -1,92 +0,0 @@
-inherit distutils-base
-
-DISTUTILS_BUILD_ARGS ?= ""
-DISTUTILS_STAGE_HEADERS_ARGS ?= "--install-dir=${STAGING_INCDIR}/${PYTHON_DIR}"
-DISTUTILS_STAGE_ALL_ARGS ?= "--prefix=${STAGING_DIR_HOST}${prefix} \
- --install-data=${STAGING_DATADIR}"
-DISTUTILS_INSTALL_ARGS ?= "--root=${D} \
- --prefix=${prefix} \
- --install-lib=${PYTHON_SITEPACKAGES_DIR} \
- --install-data=${datadir}"
-
-distutils_do_configure() {
- if [ "${CLEANBROKEN}" != "1" ] ; then
- NO_FETCH_BUILD=1 \
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py clean ${DISTUTILS_BUILD_ARGS}
- fi
-}
-
-distutils_do_compile() {
- NO_FETCH_BUILD=1 \
- STAGING_INCDIR=${STAGING_INCDIR} \
- STAGING_LIBDIR=${STAGING_LIBDIR} \
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py build ${DISTUTILS_BUILD_ARGS} || \
- bbfatal_log "'${PYTHON_PN} setup.py build ${DISTUTILS_BUILD_ARGS}' execution failed."
-}
-
-distutils_stage_headers() {
- install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS} || \
- bbfatal_log "'${PYTHON_PN} setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS}' execution for stage_headers failed."
-}
-
-distutils_stage_all() {
- STAGING_INCDIR=${STAGING_INCDIR} \
- STAGING_LIBDIR=${STAGING_LIBDIR} \
- install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
- PYTHONPATH=${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} \
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install ${DISTUTILS_STAGE_ALL_ARGS} || \
- bbfatal_log "'${PYTHON_PN} setup.py install ${DISTUTILS_STAGE_ALL_ARGS}' execution for stage_all failed."
-}
-
-distutils_do_install() {
- install -d ${D}${PYTHON_SITEPACKAGES_DIR}
- STAGING_INCDIR=${STAGING_INCDIR} \
- STAGING_LIBDIR=${STAGING_LIBDIR} \
- PYTHONPATH=${D}${PYTHON_SITEPACKAGES_DIR} \
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install ${DISTUTILS_INSTALL_ARGS} || \
- bbfatal_log "'${PYTHON_PN} setup.py install ${DISTUTILS_INSTALL_ARGS}' execution failed."
-
- # support filenames with *spaces*
- # only modify file if it contains path and recompile it
- find ${D} -name "*.py" -exec grep -q ${D} {} \; -exec sed -i -e s:${D}::g {} \; -exec ${STAGING_BINDIR_NATIVE}/python-native/python -mcompileall {} \;
-
- if test -e ${D}${bindir} ; then
- for i in ${D}${bindir}/* ; do \
- if [ ${PN} != "${BPN}-native" ]; then
- sed -i -e s:${STAGING_BINDIR_NATIVE}/python-native/python:${USRBINPATH}/env\ python:g $i
- fi
- sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
- done
- fi
-
- if [ -e ${D}${sbindir} ]; then
- for i in ${D}${sbindir}/* ; do \
- if [ ${PN} != "${BPN}-native" ]; then
- sed -i -e s:${STAGING_BINDIR_NATIVE}/python-native/python:${USRBINPATH}/env\ python:g $i
- fi
- sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
- done
- fi
-
- rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/easy-install.pth
- rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/site.py*
-
- #
- # FIXME: Bandaid against wrong datadir computation
- #
- if [ -e ${D}${datadir}/share ]; then
- mv -f ${D}${datadir}/share/* ${D}${datadir}/
- rmdir ${D}${datadir}/share
- fi
-
- # Fix backport modules
- if [ -e ${STAGING_LIBDIR}/${PYTHON_DIR}/site-packages/backports/__init__.py ] && [ -e ${D}${PYTHON_SITEPACKAGES_DIR}/backports/__init__.py ]; then
- rm ${D}${PYTHON_SITEPACKAGES_DIR}/backports/__init__.py;
- rm ${D}${PYTHON_SITEPACKAGES_DIR}/backports/__init__.pyc;
- fi
-}
-
-EXPORT_FUNCTIONS do_configure do_compile do_install
-
-export LDSHARED="${CCLD} -shared"
diff --git a/meta/classes/distutils3-base.bbclass b/meta/classes/distutils3-base.bbclass
deleted file mode 100644
index 7dbf07ac4b..0000000000
--- a/meta/classes/distutils3-base.bbclass
+++ /dev/null
@@ -1,5 +0,0 @@
-DEPENDS += "${@["${PYTHON_PN}-native ${PYTHON_PN}", ""][(d.getVar('PACKAGES') == '')]}"
-RDEPENDS_${PN} += "${@['', '${PYTHON_PN}-core']['${CLASSOVERRIDE}' == 'class-target']}"
-
-inherit distutils-common-base python3native
-
diff --git a/meta/classes/distutils3.bbclass b/meta/classes/distutils3.bbclass
deleted file mode 100644
index 834e322474..0000000000
--- a/meta/classes/distutils3.bbclass
+++ /dev/null
@@ -1,86 +0,0 @@
-inherit distutils3-base
-
-DISTUTILS_BUILD_ARGS ?= ""
-DISTUTILS_BUILD_EXT_ARGS ?= ""
-DISTUTILS_STAGE_HEADERS_ARGS ?= "--install-dir=${STAGING_INCDIR}/${PYTHON_DIR}"
-DISTUTILS_STAGE_ALL_ARGS ?= "--prefix=${STAGING_DIR_HOST}${prefix} \
- --install-data=${STAGING_DATADIR}"
-DISTUTILS_INSTALL_ARGS ?= "--root=${D} \
- --prefix=${prefix} \
- --install-lib=${PYTHON_SITEPACKAGES_DIR} \
- --install-data=${datadir}"
-
-distutils3_do_configure() {
- if [ "${CLEANBROKEN}" != "1" ] ; then
- NO_FETCH_BUILD=1 \
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py clean ${DISTUTILS_BUILD_ARGS}
- fi
-}
-
-distutils3_do_compile() {
- NO_FETCH_BUILD=1 \
- STAGING_INCDIR=${STAGING_INCDIR} \
- STAGING_LIBDIR=${STAGING_LIBDIR} \
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py \
- build ${DISTUTILS_BUILD_ARGS} || \
- bbfatal_log "'${PYTHON_PN} setup.py build ${DISTUTILS_BUILD_ARGS}' execution failed."
-}
-distutils3_do_compile[vardepsexclude] = "MACHINE"
-
-distutils3_stage_headers() {
- install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS} || \
- bbfatal_log "'${PYTHON_PN} setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS}' execution for stage_headers failed."
-}
-distutils3_stage_headers[vardepsexclude] = "MACHINE"
-
-distutils3_stage_all() {
- STAGING_INCDIR=${STAGING_INCDIR} \
- STAGING_LIBDIR=${STAGING_LIBDIR} \
- install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
- PYTHONPATH=${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} \
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install ${DISTUTILS_STAGE_ALL_ARGS} || \
- bbfatal_log "'${PYTHON_PN} setup.py install ${DISTUTILS_STAGE_ALL_ARGS}' execution for stage_all failed."
-}
-distutils3_stage_all[vardepsexclude] = "MACHINE"
-
-distutils3_do_install() {
- install -d ${D}${PYTHON_SITEPACKAGES_DIR}
- STAGING_INCDIR=${STAGING_INCDIR} \
- STAGING_LIBDIR=${STAGING_LIBDIR} \
- PYTHONPATH=${D}${PYTHON_SITEPACKAGES_DIR} \
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install ${DISTUTILS_INSTALL_ARGS} || \
- bbfatal_log "'${PYTHON_PN} setup.py install ${DISTUTILS_INSTALL_ARGS}' execution failed."
-
- # support filenames with *spaces*
- find ${D} -name "*.py" -exec grep -q ${D} {} \; -exec sed -i -e s:${D}::g {} \;
-
- if test -e ${D}${bindir} ; then
- for i in ${D}${bindir}/* ; do \
- sed -i -e s:${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN}:${USRBINPATH}/env\ ${PYTHON_PN}:g $i
- sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
- done
- fi
-
- if test -e ${D}${sbindir}; then
- for i in ${D}${sbindir}/* ; do \
- sed -i -e s:${STAGING_BINDIR_NATIVE}/python-${PYTHON_PN}/${PYTHON_PN}:${USRBINPATH}/env\ ${PYTHON_PN}:g $i
- sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
- done
- fi
-
- rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/easy-install.pth
-
- #
- # FIXME: Bandaid against wrong datadir computation
- #
- if [ -e ${D}${datadir}/share ]; then
- mv -f ${D}${datadir}/share/* ${D}${datadir}/
- rmdir ${D}${datadir}/share
- fi
-}
-distutils3_do_install[vardepsexclude] = "MACHINE"
-
-EXPORT_FUNCTIONS do_configure do_compile do_install
-
-export LDSHARED="${CCLD} -shared"
diff --git a/meta/classes/externalsrc.bbclass b/meta/classes/externalsrc.bbclass
index 3618b99a86..abfe24bace 100644
--- a/meta/classes/externalsrc.bbclass
+++ b/meta/classes/externalsrc.bbclass
@@ -13,7 +13,7 @@
# called "myrecipe" you would do:
#
# INHERIT += "externalsrc"
-# EXTERNALSRC_pn-myrecipe = "/path/to/my/source/tree"
+# EXTERNALSRC:pn-myrecipe = "/path/to/my/source/tree"
#
# In order to make this class work for both target and native versions (or with
# multilibs/cross or other BBCLASSEXTEND variants), B is set to point to a separate
@@ -21,7 +21,7 @@
# the default, but the build directory can be set to the source directory if
# circumstances dictate by setting EXTERNALSRC_BUILD to the same value, e.g.:
#
-# EXTERNALSRC_BUILD_pn-myrecipe = "/path/to/my/source/tree"
+# EXTERNALSRC_BUILD:pn-myrecipe = "/path/to/my/source/tree"
#
SRCTREECOVEREDTASKS ?= "do_patch do_unpack do_fetch"
@@ -45,11 +45,11 @@ python () {
if bpn == d.getVar('PN') or not classextend:
if (externalsrc or
('native' in classextend and
- d.getVar('EXTERNALSRC_pn-%s-native' % bpn)) or
+ d.getVar('EXTERNALSRC:pn-%s-native' % bpn)) or
('nativesdk' in classextend and
- d.getVar('EXTERNALSRC_pn-nativesdk-%s' % bpn)) or
+ d.getVar('EXTERNALSRC:pn-nativesdk-%s' % bpn)) or
('cross' in classextend and
- d.getVar('EXTERNALSRC_pn-%s-cross' % bpn))):
+ d.getVar('EXTERNALSRC:pn-%s-cross' % bpn))):
d.setVar('BB_DONT_CACHE', '1')
if externalsrc:
@@ -68,14 +68,14 @@ python () {
url_data = fetch.ud[url]
parm = url_data.parm
if (url_data.type == 'file' or
+ url_data.type == 'npmsw' or
'type' in parm and parm['type'] == 'kmeta'):
local_srcuri.append(url)
d.setVar('SRC_URI', ' '.join(local_srcuri))
- if '{SRCPV}' in d.getVar('PV', False):
- # Dummy value because the default function can't be called with blank SRC_URI
- d.setVar('SRCPV', '999')
+ # Dummy value because the default function can't be called with blank SRC_URI
+ d.setVar('SRCPV', '999')
if d.getVar('CONFIGUREOPT_DEPTRACK') == '--disable-dependency-tracking':
d.setVar('CONFIGUREOPT_DEPTRACK', '')
@@ -86,7 +86,7 @@ python () {
if task.endswith("_setscene"):
# sstate is never going to work for external source trees, disable it
bb.build.deltask(task, d)
- else:
+ elif os.path.realpath(d.getVar('S')) == os.path.realpath(d.getVar('B')):
# Since configure will likely touch ${S}, ensure only we lock so one task has access at a time
d.appendVarFlag(task, "lockfiles", " ${S}/singletask.lock")
@@ -109,6 +109,15 @@ python () {
if local_srcuri and task in fetch_tasks:
continue
bb.build.deltask(task, d)
+ if task == 'do_unpack':
+ # The reproducible build create_source_date_epoch_stamp function must
+ # be run after the source is available and before the
+ # do_deploy_source_date_epoch task. In the normal case, it's attached
+ # to do_unpack as a postfuncs, but since we removed do_unpack (above)
+ # we need to move the function elsewhere. The easiest thing to do is
+ # move it into the prefuncs of the do_deploy_source_date_epoch task.
+ # This is safe, as externalsrc runs with the source already unpacked.
+ d.prependVarFlag('do_deploy_source_date_epoch', 'prefuncs', 'create_source_date_epoch_stamp ')
d.prependVarFlag('do_compile', 'prefuncs', "externalsrc_compile_prefunc ")
d.prependVarFlag('do_configure', 'prefuncs', "externalsrc_configure_prefunc ")
@@ -191,6 +200,7 @@ def srctree_hash_files(d, srcdir=None):
import shutil
import subprocess
import tempfile
+ import hashlib
s_dir = srcdir or d.getVar('EXTERNALSRC')
git_dir = None
@@ -198,12 +208,16 @@ def srctree_hash_files(d, srcdir=None):
try:
git_dir = os.path.join(s_dir,
subprocess.check_output(['git', '-C', s_dir, 'rev-parse', '--git-dir'], stderr=subprocess.DEVNULL).decode("utf-8").rstrip())
+ top_git_dir = os.path.join(s_dir, subprocess.check_output(['git', '-C', d.getVar("TOPDIR"), 'rev-parse', '--git-dir'],
+ stderr=subprocess.DEVNULL).decode("utf-8").rstrip())
+ if git_dir == top_git_dir:
+ git_dir = None
except subprocess.CalledProcessError:
pass
ret = " "
if git_dir is not None:
- oe_hash_file = os.path.join(git_dir, 'oe-devtool-tree-sha1')
+ oe_hash_file = os.path.join(git_dir, 'oe-devtool-tree-sha1-%s' % d.getVar('PN'))
with tempfile.NamedTemporaryFile(prefix='oe-devtool-index') as tmp_index:
# Clone index
shutil.copyfile(os.path.join(git_dir, 'index'), tmp_index.name)
@@ -211,7 +225,17 @@ def srctree_hash_files(d, srcdir=None):
env = os.environ.copy()
env['GIT_INDEX_FILE'] = tmp_index.name
subprocess.check_output(['git', 'add', '-A', '.'], cwd=s_dir, env=env)
- sha1 = subprocess.check_output(['git', 'write-tree'], cwd=s_dir, env=env).decode("utf-8")
+ git_sha1 = subprocess.check_output(['git', 'write-tree'], cwd=s_dir, env=env).decode("utf-8")
+ submodule_helper = subprocess.check_output(['git', 'submodule--helper', 'list'], cwd=s_dir, env=env).decode("utf-8")
+ for line in submodule_helper.splitlines():
+ module_dir = os.path.join(s_dir, line.rsplit(maxsplit=1)[1])
+ if os.path.isdir(module_dir):
+ proc = subprocess.Popen(['git', 'add', '-A', '.'], cwd=module_dir, env=env, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
+ proc.communicate()
+ proc = subprocess.Popen(['git', 'write-tree'], cwd=module_dir, env=env, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
+ stdout, _ = proc.communicate()
+ git_sha1 += stdout.decode("utf-8")
+ sha1 = hashlib.sha1(git_sha1.encode("utf-8")).hexdigest()
with open(oe_hash_file, 'w') as fobj:
fobj.write(sha1)
ret = oe_hash_file + ':True'
diff --git a/meta/classes/extrausers.bbclass b/meta/classes/extrausers.bbclass
index 32569e97db..a8ef660b30 100644
--- a/meta/classes/extrausers.bbclass
+++ b/meta/classes/extrausers.bbclass
@@ -14,10 +14,10 @@
inherit useradd_base
-PACKAGE_INSTALL_append = " ${@['', 'base-passwd shadow'][bool(d.getVar('EXTRA_USERS_PARAMS'))]}"
+PACKAGE_INSTALL:append = " ${@['', 'base-passwd shadow'][bool(d.getVar('EXTRA_USERS_PARAMS'))]}"
# Image level user / group settings
-ROOTFS_POSTPROCESS_COMMAND_append = " set_user_group;"
+ROOTFS_POSTPROCESS_COMMAND:append = " set_user_group;"
# Image level user / group settings
set_user_group () {
@@ -46,6 +46,9 @@ set_user_group () {
usermod)
perform_usermod "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts"
;;
+ passwd-expire)
+ perform_passwd_expire "${IMAGE_ROOTFS}" "$opts"
+ ;;
groupmod)
perform_groupmod "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts"
;;
diff --git a/meta/classes/features_check.bbclass b/meta/classes/features_check.bbclass
new file mode 100644
index 0000000000..3ef6b35baa
--- /dev/null
+++ b/meta/classes/features_check.bbclass
@@ -0,0 +1,54 @@
+# Allow checking of required and conflicting features
+#
+# xxx = [DISTRO,MACHINE,COMBINED,IMAGE]
+#
+# ANY_OF_xxx_FEATURES: ensure at least one item on this list is included
+# in xxx_FEATURES.
+# REQUIRED_xxx_FEATURES: ensure every item on this list is included
+# in xxx_FEATURES.
+# CONFLICT_xxx_FEATURES: ensure no item in this list is included in
+# xxx_FEATURES.
+#
+# Copyright 2019 (C) Texas Instruments Inc.
+# Copyright 2013 (C) O.S. Systems Software LTDA.
+
+python () {
+ if d.getVar('PARSE_ALL_RECIPES', False):
+ return
+
+ unused = True
+
+ for kind in ['DISTRO', 'MACHINE', 'COMBINED', 'IMAGE']:
+ if d.getVar('ANY_OF_' + kind + '_FEATURES') is None and not d.hasOverrides('ANY_OF_' + kind + '_FEATURES') and \
+ d.getVar('REQUIRED_' + kind + '_FEATURES') is None and not d.hasOverrides('REQUIRED_' + kind + '_FEATURES') and \
+ d.getVar('CONFLICT_' + kind + '_FEATURES') is None and not d.hasOverrides('CONFLICT_' + kind + '_FEATURES'):
+ continue
+
+ unused = False
+
+ # Assume at least one var is set.
+ features = set((d.getVar(kind + '_FEATURES') or '').split())
+
+ any_of_features = set((d.getVar('ANY_OF_' + kind + '_FEATURES') or '').split())
+ if any_of_features:
+ if set.isdisjoint(any_of_features, features):
+ raise bb.parse.SkipRecipe("one of '%s' needs to be in %s_FEATURES"
+ % (' '.join(any_of_features), kind))
+
+ required_features = set((d.getVar('REQUIRED_' + kind + '_FEATURES') or '').split())
+ if required_features:
+ missing = set.difference(required_features, features)
+ if missing:
+ raise bb.parse.SkipRecipe("missing required %s feature%s '%s' (not in %s_FEATURES)"
+ % (kind.lower(), 's' if len(missing) > 1 else '', ' '.join(missing), kind))
+
+ conflict_features = set((d.getVar('CONFLICT_' + kind + '_FEATURES') or '').split())
+ if conflict_features:
+ conflicts = set.intersection(conflict_features, features)
+ if conflicts:
+ raise bb.parse.SkipRecipe("conflicting %s feature%s '%s' (in %s_FEATURES)"
+ % (kind.lower(), 's' if len(conflicts) > 1 else '', ' '.join(conflicts), kind))
+
+ if unused:
+ bb.warn("Recipe inherits features_check but doesn't use it")
+}
diff --git a/meta/classes/fontcache.bbclass b/meta/classes/fontcache.bbclass
index 13f9df1592..442bfc7392 100644
--- a/meta/classes/fontcache.bbclass
+++ b/meta/classes/fontcache.bbclass
@@ -7,7 +7,7 @@ PACKAGE_WRITE_DEPS += "qemu-native"
inherit qemu
FONT_PACKAGES ??= "${PN}"
-FONT_EXTRA_RDEPENDS ?= "fontconfig-utils"
+FONT_EXTRA_RDEPENDS ?= "${MLPREFIX}fontconfig-utils"
FONTCONFIG_CACHE_DIR ?= "${localstatedir}/cache/fontconfig"
FONTCONFIG_CACHE_PARAMS ?= "-v"
# You can change this to e.g. FC_DEBUG=16 to debug fc-cache issues,
@@ -20,7 +20,7 @@ if [ -n "$D" ] ; then
$INTERCEPT_DIR/postinst_intercept update_font_cache ${PKG} mlprefix=${MLPREFIX} binprefix=${MLPREFIX} \
'bindir="${bindir}"' \
'libdir="${libdir}"' \
- 'libexecdir="${libexecdir}"' \
+ 'libexecdir="${libexecdir}"' \
'base_libdir="${base_libdir}"' \
'fontconfigcachedir="${FONTCONFIG_CACHE_DIR}"' \
'fontconfigcacheparams="${FONTCONFIG_CACHE_PARAMS}"' \
@@ -35,23 +35,23 @@ python () {
deps = d.getVar("FONT_EXTRA_RDEPENDS")
for pkg in font_pkgs:
- if deps: d.appendVar('RDEPENDS_' + pkg, ' '+deps)
+ if deps: d.appendVar('RDEPENDS:' + pkg, ' '+deps)
}
python add_fontcache_postinsts() {
for pkg in d.getVar('FONT_PACKAGES').split():
bb.note("adding fonts postinst and postrm scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg) or d.getVar('pkg_postinst')
+ postinst = d.getVar('pkg_postinst:%s' % pkg) or d.getVar('pkg_postinst')
if not postinst:
postinst = '#!/bin/sh\n'
postinst += d.getVar('fontcache_common')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
- postrm = d.getVar('pkg_postrm_%s' % pkg) or d.getVar('pkg_postrm')
+ postrm = d.getVar('pkg_postrm:%s' % pkg) or d.getVar('pkg_postrm')
if not postrm:
postrm = '#!/bin/sh\n'
postrm += d.getVar('fontcache_common')
- d.setVar('pkg_postrm_%s' % pkg, postrm)
+ d.setVar('pkg_postrm:%s' % pkg, postrm)
}
PACKAGEFUNCS =+ "add_fontcache_postinsts"
diff --git a/meta/classes/gconf.bbclass b/meta/classes/gconf.bbclass
index 3e3c509d5f..9d3668edd3 100644
--- a/meta/classes/gconf.bbclass
+++ b/meta/classes/gconf.bbclass
@@ -41,7 +41,7 @@ for SCHEMA in ${SCHEMA_FILES}; do
done
}
-python populate_packages_append () {
+python populate_packages:append () {
import re
packages = d.getVar('PACKAGES').split()
pkgdest = d.getVar('PKGDEST')
@@ -57,15 +57,15 @@ python populate_packages_append () {
if schemas != []:
bb.note("adding gconf postinst and prerm scripts to %s" % pkg)
d.setVar('SCHEMA_FILES', " ".join(schemas))
- postinst = d.getVar('pkg_postinst_%s' % pkg)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
postinst += d.getVar('gconf_postinst')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
- prerm = d.getVar('pkg_prerm_%s' % pkg)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
+ prerm = d.getVar('pkg_prerm:%s' % pkg)
if not prerm:
prerm = '#!/bin/sh\n'
prerm += d.getVar('gconf_prerm')
- d.setVar('pkg_prerm_%s' % pkg, prerm)
- d.appendVar("RDEPENDS_%s" % pkg, ' ' + d.getVar('MLPREFIX', False) + 'gconf')
+ d.setVar('pkg_prerm:%s' % pkg, prerm)
+ d.appendVar("RDEPENDS:%s" % pkg, ' ' + d.getVar('MLPREFIX', False) + 'gconf')
}
diff --git a/meta/classes/gettext.bbclass b/meta/classes/gettext.bbclass
index be2ef3b311..f11cb04456 100644
--- a/meta/classes/gettext.bbclass
+++ b/meta/classes/gettext.bbclass
@@ -13,10 +13,10 @@ def gettext_oeconf(d):
return '--disable-nls'
return "--enable-nls"
-BASEDEPENDS_append = " ${@gettext_dependencies(d)}"
-EXTRA_OECONF_append = " ${@gettext_oeconf(d)}"
+BASEDEPENDS:append = " ${@gettext_dependencies(d)}"
+EXTRA_OECONF:append = " ${@gettext_oeconf(d)}"
# Without this, msgfmt from gettext-native will not find ITS files
# provided by target recipes (for example, polkit.its).
-GETTEXTDATADIRS_append_class-target = ":${STAGING_DATADIR}/gettext"
+GETTEXTDATADIRS:append:class-target = ":${STAGING_DATADIR}/gettext"
export GETTEXTDATADIRS
diff --git a/meta/classes/gi-docgen.bbclass b/meta/classes/gi-docgen.bbclass
new file mode 100644
index 0000000000..15581ca127
--- /dev/null
+++ b/meta/classes/gi-docgen.bbclass
@@ -0,0 +1,24 @@
+# gi-docgen is a new gnome documentation generator, which
+# seems to be a successor to gtk-doc:
+# https://gitlab.gnome.org/GNOME/gi-docgen
+
+# This variable is set to True if api-documentation is in
+# DISTRO_FEATURES, and False otherwise.
+GIDOCGEN_ENABLED ?= "${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'True', 'False', d)}"
+# When building native recipes, disable gi-docgen, as it is not necessary,
+# pulls in additional dependencies, and makes build times longer
+GIDOCGEN_ENABLED:class-native = "False"
+GIDOCGEN_ENABLED:class-nativesdk = "False"
+
+# meson: default option name to enable/disable gi-docgen. This matches most
+# projects' configuration. In doubts - check meson_options.txt in project's
+# source path.
+GIDOCGEN_MESON_OPTION ?= 'gtk_doc'
+GIDOCGEN_MESON_ENABLE_FLAG ?= 'true'
+GIDOCGEN_MESON_DISABLE_FLAG ?= 'false'
+
+# Auto enable/disable based on GIDOCGEN_ENABLED
+EXTRA_OEMESON:prepend = "-D${GIDOCGEN_MESON_OPTION}=${@bb.utils.contains('GIDOCGEN_ENABLED', 'True', '${GIDOCGEN_MESON_ENABLE_FLAG}', '${GIDOCGEN_MESON_DISABLE_FLAG}', d)} "
+
+DEPENDS:append = "${@' gi-docgen-native gi-docgen' if d.getVar('GIDOCGEN_ENABLED') == 'True' else ''}"
+
diff --git a/meta/classes/gio-module-cache.bbclass b/meta/classes/gio-module-cache.bbclass
index e429bd3197..021eeb1cf8 100644
--- a/meta/classes/gio-module-cache.bbclass
+++ b/meta/classes/gio-module-cache.bbclass
@@ -17,22 +17,22 @@ else
fi
}
-python populate_packages_append () {
+python populate_packages:append () {
packages = d.getVar('GIO_MODULE_PACKAGES').split()
for pkg in packages:
bb.note("adding gio-module-cache postinst and postrm scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
postinst += d.getVar('gio_module_cache_common')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
- postrm = d.getVar('pkg_postrm_%s' % pkg)
+ postrm = d.getVar('pkg_postrm:%s' % pkg)
if not postrm:
postrm = '#!/bin/sh\n'
postrm += d.getVar('gio_module_cache_common')
- d.setVar('pkg_postrm_%s' % pkg, postrm)
+ d.setVar('pkg_postrm:%s' % pkg, postrm)
}
diff --git a/meta/classes/glide.bbclass b/meta/classes/glide.bbclass
index db421745bd..2db4ac6846 100644
--- a/meta/classes/glide.bbclass
+++ b/meta/classes/glide.bbclass
@@ -2,8 +2,8 @@
#
# Copyright 2018 (C) O.S. Systems Software LTDA.
-DEPENDS_append = " glide-native"
+DEPENDS:append = " glide-native"
-do_compile_prepend() {
+do_compile:prepend() {
( cd ${B}/src/${GO_IMPORT} && glide install )
}
diff --git a/meta/classes/gnome.bbclass b/meta/classes/gnome.bbclass
deleted file mode 100644
index c6202bbb75..0000000000
--- a/meta/classes/gnome.bbclass
+++ /dev/null
@@ -1 +0,0 @@
-inherit gnomebase gtk-icon-cache gconf mime
diff --git a/meta/classes/gnomebase.bbclass b/meta/classes/gnomebase.bbclass
index efcb6caae1..9a5bd9a232 100644
--- a/meta/classes/gnomebase.bbclass
+++ b/meta/classes/gnomebase.bbclass
@@ -1,12 +1,13 @@
def gnome_verdir(v):
- return oe.utils.trim_version(v, 2)
+ return ".".join(v.split(".")[:-1])
+
GNOME_COMPRESS_TYPE ?= "xz"
SECTION ?= "x11/gnome"
GNOMEBN ?= "${BPN}"
SRC_URI = "${GNOME_MIRROR}/${GNOMEBN}/${@gnome_verdir("${PV}")}/${GNOMEBN}-${PV}.tar.${GNOME_COMPRESS_TYPE};name=archive"
-FILES_${PN} += "${datadir}/application-registry \
+FILES:${PN} += "${datadir}/application-registry \
${datadir}/mime-info \
${datadir}/mime/packages \
${datadir}/mime/application \
@@ -18,12 +19,12 @@ FILES_${PN} += "${datadir}/application-registry \
${datadir}/icons \
"
-FILES_${PN}-doc += "${datadir}/devhelp"
+FILES:${PN}-doc += "${datadir}/devhelp"
GNOMEBASEBUILDCLASS ??= "autotools"
inherit ${GNOMEBASEBUILDCLASS} pkgconfig
-do_install_append() {
+do_install:append() {
rm -rf ${D}${localstatedir}/lib/scrollkeeper/*
rm -rf ${D}${localstatedir}/scrollkeeper/*
rm -f ${D}${datadir}/applications/*.cache
diff --git a/meta/classes/go-mod.bbclass b/meta/classes/go-mod.bbclass
new file mode 100644
index 0000000000..674d2434e0
--- /dev/null
+++ b/meta/classes/go-mod.bbclass
@@ -0,0 +1,20 @@
+# Handle Go Modules support
+#
+# When using Go Modules, the the current working directory MUST be at or below
+# the location of the 'go.mod' file when the go tool is used, and there is no
+# way to tell it to look elsewhere. It will automatically look upwards for the
+# file, but not downwards.
+#
+# To support this use case, we provide the `GO_WORKDIR` variable, which defaults
+# to `GO_IMPORT` but allows for easy override.
+#
+# Copyright 2020 (C) O.S. Systems Software LTDA.
+
+# The '-modcacherw' option ensures we have write access to the cached objects so
+# we avoid errors during clean task as well as when removing the TMPDIR.
+GOBUILDFLAGS:append = " -modcacherw"
+
+inherit go
+
+GO_WORKDIR ?= "${GO_IMPORT}"
+do_compile[dirs] += "${B}/src/${GO_WORKDIR}"
diff --git a/meta/classes/go-ptest.bbclass b/meta/classes/go-ptest.bbclass
new file mode 100644
index 0000000000..b282ff7374
--- /dev/null
+++ b/meta/classes/go-ptest.bbclass
@@ -0,0 +1,54 @@
+inherit go ptest
+
+do_compile_ptest_base() {
+ export TMPDIR="${GOTMPDIR}"
+ rm -f ${B}/.go_compiled_tests.list
+ go_list_package_tests | while read pkg; do
+ cd ${B}/src/$pkg
+ ${GO} test ${GOPTESTBUILDFLAGS} $pkg
+ find . -mindepth 1 -maxdepth 1 -type f -name '*.test' -exec echo $pkg/{} \; | \
+ sed -e's,/\./,/,'>> ${B}/.go_compiled_tests.list
+ done
+ do_compile_ptest
+}
+
+do_compile_ptest_base[dirs] =+ "${GOTMPDIR}"
+
+go_make_ptest_wrapper() {
+ cat >${D}${PTEST_PATH}/run-ptest <<EOF
+#!/bin/sh
+RC=0
+run_test() (
+ cd "\$1"
+ ((((./\$2 ${GOPTESTFLAGS}; echo \$? >&3) | sed -r -e"s,^(PASS|SKIP|FAIL)\$,\\1: \$1/\$2," >&4) 3>&1) | (read rc; exit \$rc)) 4>&1
+ exit \$?)
+EOF
+
+}
+
+do_install_ptest_base() {
+ test -f "${B}/.go_compiled_tests.list" || exit 0
+ install -d ${D}${PTEST_PATH}
+ go_stage_testdata
+ go_make_ptest_wrapper
+ havetests=""
+ while read test; do
+ testdir=`dirname $test`
+ testprog=`basename $test`
+ install -d ${D}${PTEST_PATH}/$testdir
+ install -m 0755 ${B}/src/$test ${D}${PTEST_PATH}/$test
+ echo "run_test $testdir $testprog || RC=1" >> ${D}${PTEST_PATH}/run-ptest
+ havetests="yes"
+ done < ${B}/.go_compiled_tests.list
+ if [ -n "$havetests" ]; then
+ echo "exit \$RC" >> ${D}${PTEST_PATH}/run-ptest
+ chmod +x ${D}${PTEST_PATH}/run-ptest
+ else
+ rm -rf ${D}${PTEST_PATH}
+ fi
+ do_install_ptest
+ chown -R root:root ${D}${PTEST_PATH}
+}
+
+INSANE_SKIP:${PN}-ptest += "ldflags"
+
diff --git a/meta/classes/go.bbclass b/meta/classes/go.bbclass
index af331f8018..9c4c92bffd 100644
--- a/meta/classes/go.bbclass
+++ b/meta/classes/go.bbclass
@@ -1,37 +1,59 @@
-inherit goarch ptest
+inherit goarch
GO_PARALLEL_BUILD ?= "${@oe.utils.parallel_make_argument(d, '-p %d')}"
-GOROOT_class-native = "${STAGING_LIBDIR_NATIVE}/go"
-GOROOT_class-nativesdk = "${STAGING_DIR_TARGET}${libdir}/go"
+export GODEBUG = "gocachehash=1"
+
+GOROOT:class-native = "${STAGING_LIBDIR_NATIVE}/go"
+GOROOT:class-nativesdk = "${STAGING_DIR_TARGET}${libdir}/go"
GOROOT = "${STAGING_LIBDIR}/go"
export GOROOT
export GOROOT_FINAL = "${libdir}/go"
+export GOCACHE = "${B}/.cache"
+
+export GOARCH = "${TARGET_GOARCH}"
+export GOOS = "${TARGET_GOOS}"
+export GOHOSTARCH="${BUILD_GOARCH}"
+export GOHOSTOS="${BUILD_GOOS}"
+
+GOARM[export] = "0"
+GOARM:arm:class-target = "${TARGET_GOARM}"
+GOARM:arm:class-target[export] = "1"
+
+GO386[export] = "0"
+GO386:x86:class-target = "${TARGET_GO386}"
+GO386:x86:class-target[export] = "1"
-DEPENDS_GOLANG_class-target = "virtual/${TUNE_PKGARCH}-go virtual/${TARGET_PREFIX}go-runtime"
-DEPENDS_GOLANG_class-native = "go-native"
-DEPENDS_GOLANG_class-nativesdk = "virtual/${TARGET_PREFIX}go-crosssdk virtual/${TARGET_PREFIX}go-runtime"
+GOMIPS[export] = "0"
+GOMIPS:mips:class-target = "${TARGET_GOMIPS}"
+GOMIPS:mips:class-target[export] = "1"
-DEPENDS_append = " ${DEPENDS_GOLANG}"
+DEPENDS_GOLANG:class-target = "virtual/${TUNE_PKGARCH}-go virtual/${TARGET_PREFIX}go-runtime"
+DEPENDS_GOLANG:class-native = "go-native"
+DEPENDS_GOLANG:class-nativesdk = "virtual/${TARGET_PREFIX}go-crosssdk virtual/${TARGET_PREFIX}go-runtime"
+
+DEPENDS:append = " ${DEPENDS_GOLANG}"
GO_LINKSHARED ?= "${@'-linkshared' if d.getVar('GO_DYNLINK') else ''}"
GO_RPATH_LINK = "${@'-Wl,-rpath-link=${STAGING_DIR_TARGET}${libdir}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
GO_RPATH = "${@'-r ${libdir}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
-GO_RPATH_class-native = "${@'-r ${STAGING_LIBDIR_NATIVE}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
-GO_RPATH_LINK_class-native = "${@'-Wl,-rpath-link=${STAGING_LIBDIR_NATIVE}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
+GO_RPATH:class-native = "${@'-r ${STAGING_LIBDIR_NATIVE}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
+GO_RPATH_LINK:class-native = "${@'-Wl,-rpath-link=${STAGING_LIBDIR_NATIVE}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
GO_EXTLDFLAGS ?= "${HOST_CC_ARCH}${TOOLCHAIN_OPTIONS} ${GO_RPATH_LINK} ${LDFLAGS}"
GO_LINKMODE ?= ""
-GO_LINKMODE_class-nativesdk = "--linkmode=external"
-GO_LDFLAGS ?= '-ldflags="${GO_RPATH} ${GO_LINKMODE} -extldflags '${GO_EXTLDFLAGS}'"'
-export GOBUILDFLAGS ?= "-v ${GO_LDFLAGS}"
+GO_LINKMODE:class-nativesdk = "--linkmode=external"
+GO_LINKMODE:class-native = "--linkmode=external"
+GO_EXTRA_LDFLAGS ?= ""
+GO_LDFLAGS ?= '-ldflags="${GO_RPATH} ${GO_LINKMODE} ${GO_EXTRA_LDFLAGS} -extldflags '${GO_EXTLDFLAGS}'"'
+export GOBUILDFLAGS ?= "-v ${GO_LDFLAGS} -trimpath"
export GOPATH_OMIT_IN_ACTIONID ?= "1"
export GOPTESTBUILDFLAGS ?= "${GOBUILDFLAGS} -c"
export GOPTESTFLAGS ?= ""
-GOBUILDFLAGS_prepend_task-compile = "${GO_PARALLEL_BUILD} "
+GOBUILDFLAGS:prepend:task-compile = "${GO_PARALLEL_BUILD} "
export GO = "${HOST_PREFIX}go"
GOTOOLDIR = "${STAGING_LIBDIR_NATIVE}/${TARGET_SYS}/go/pkg/tool/${BUILD_GOTUPLE}"
-GOTOOLDIR_class-native = "${STAGING_LIBDIR_NATIVE}/go/pkg/tool/${BUILD_GOTUPLE}"
+GOTOOLDIR:class-native = "${STAGING_LIBDIR_NATIVE}/go/pkg/tool/${BUILD_GOTUPLE}"
export GOTOOLDIR
export CGO_ENABLED ?= "1"
@@ -45,8 +67,7 @@ GO_INSTALL_FILTEROUT ?= "${GO_IMPORT}/vendor/"
B = "${WORKDIR}/build"
export GOPATH = "${B}"
-export GOCACHE = "off"
-export GOTMPDIR ?= "${WORKDIR}/go-tmp"
+export GOTMPDIR ?= "${WORKDIR}/build-tmp"
GOTMPDIR[vardepvalue] = ""
python go_do_unpack() {
@@ -54,17 +75,13 @@ python go_do_unpack() {
if len(src_uri) == 0:
return
- try:
- fetcher = bb.fetch2.Fetch(src_uri, d)
- for url in fetcher.urls:
- if fetcher.ud[url].type == 'git':
- if fetcher.ud[url].parm.get('destsuffix') is None:
- s_dirname = os.path.basename(d.getVar('S'))
- fetcher.ud[url].parm['destsuffix'] = os.path.join(s_dirname, 'src',
- d.getVar('GO_IMPORT')) + '/'
- fetcher.unpack(d.getVar('WORKDIR'))
- except bb.fetch2.BBFetchException as e:
- raise bb.build.FuncFailed(e)
+ fetcher = bb.fetch2.Fetch(src_uri, d)
+ for url in fetcher.urls:
+ if fetcher.ud[url].type == 'git':
+ if fetcher.ud[url].parm.get('destsuffix') is None:
+ s_dirname = os.path.basename(d.getVar('S'))
+ fetcher.ud[url].parm['destsuffix'] = os.path.join(s_dirname, 'src', d.getVar('GO_IMPORT')) + '/'
+ fetcher.unpack(d.getVar('WORKDIR'))
}
go_list_packages() {
@@ -97,24 +114,12 @@ go_do_compile() {
do_compile[dirs] =+ "${GOTMPDIR}"
do_compile[cleandirs] = "${B}/bin ${B}/pkg"
-do_compile_ptest_base() {
- export TMPDIR="${GOTMPDIR}"
- rm -f ${B}/.go_compiled_tests.list
- go_list_package_tests | while read pkg; do
- cd ${B}/src/$pkg
- ${GO} test ${GOPTESTBUILDFLAGS} $pkg
- find . -mindepth 1 -maxdepth 1 -type f -name '*.test' -exec echo $pkg/{} \; | \
- sed -e's,/\./,/,'>> ${B}/.go_compiled_tests.list
- done
- do_compile_ptest
-}
-do_compile_ptest_base[dirs] =+ "${GOTMPDIR}"
-
go_do_install() {
install -d ${D}${libdir}/go/src/${GO_IMPORT}
tar -C ${S}/src/${GO_IMPORT} -cf - --exclude-vcs --exclude '*.test' --exclude 'testdata' . | \
tar -C ${D}${libdir}/go/src/${GO_IMPORT} --no-same-owner -xf -
- tar -C ${B} -cf - pkg | tar -C ${D}${libdir}/go --no-same-owner -xf -
+ tar -C ${B} -cf - --exclude-vcs --exclude '*.test' --exclude 'testdata' pkg | \
+ tar -C ${D}${libdir}/go --no-same-owner -xf -
if [ -n "`ls ${B}/${GO_BUILD_BINDIR}/`" ]; then
install -d ${D}${bindir}
@@ -122,18 +127,6 @@ go_do_install() {
fi
}
-go_make_ptest_wrapper() {
- cat >${D}${PTEST_PATH}/run-ptest <<EOF
-#!/bin/sh
-RC=0
-run_test() (
- cd "\$1"
- ((((./\$2 ${GOPTESTFLAGS}; echo \$? >&3) | sed -r -e"s,^(PASS|SKIP|FAIL)\$,\\1: \$1/\$2," >&4) 3>&1) | (read rc; exit \$rc)) 4>&1
- exit \$?)
-EOF
-
-}
-
go_stage_testdata() {
oldwd="$PWD"
cd ${S}/src
@@ -148,44 +141,19 @@ go_stage_testdata() {
cd "$oldwd"
}
-do_install_ptest_base() {
- test -f "${B}/.go_compiled_tests.list" || exit 0
- install -d ${D}${PTEST_PATH}
- go_stage_testdata
- go_make_ptest_wrapper
- havetests=""
- while read test; do
- testdir=`dirname $test`
- testprog=`basename $test`
- install -d ${D}${PTEST_PATH}/$testdir
- install -m 0755 ${B}/src/$test ${D}${PTEST_PATH}/$test
- echo "run_test $testdir $testprog || RC=1" >> ${D}${PTEST_PATH}/run-ptest
- havetests="yes"
- done < ${B}/.go_compiled_tests.list
- if [ -n "$havetests" ]; then
- echo "exit \$RC" >> ${D}${PTEST_PATH}/run-ptest
- chmod +x ${D}${PTEST_PATH}/run-ptest
- else
- rm -rf ${D}${PTEST_PATH}
- fi
- do_install_ptest
- chown -R root:root ${D}${PTEST_PATH}
-}
-
EXPORT_FUNCTIONS do_unpack do_configure do_compile do_install
-FILES_${PN}-dev = "${libdir}/go/src"
-FILES_${PN}-staticdev = "${libdir}/go/pkg"
+FILES:${PN}-dev = "${libdir}/go/src"
+FILES:${PN}-staticdev = "${libdir}/go/pkg"
-INSANE_SKIP_${PN} += "ldflags"
-INSANE_SKIP_${PN}-ptest += "ldflags"
+INSANE_SKIP:${PN} += "ldflags"
# Add -buildmode=pie to GOBUILDFLAGS to satisfy "textrel" QA checking, but mips
-# doesn't support -buildmode=pie, so skip the QA checking for mips and its
+# doesn't support -buildmode=pie, so skip the QA checking for mips/rv32 and its
# variants.
python() {
- if 'mips' in d.getVar('TARGET_ARCH'):
- d.appendVar('INSANE_SKIP_%s' % d.getVar('PN'), " textrel")
+ if 'mips' in d.getVar('TARGET_ARCH') or 'riscv32' in d.getVar('TARGET_ARCH'):
+ d.appendVar('INSANE_SKIP:%s' % d.getVar('PN'), " textrel")
else:
d.appendVar('GOBUILDFLAGS', ' -buildmode=pie')
}
diff --git a/meta/classes/goarch.bbclass b/meta/classes/goarch.bbclass
index 39fea5e3d1..92fec16b82 100644
--- a/meta/classes/goarch.bbclass
+++ b/meta/classes/goarch.bbclass
@@ -3,43 +3,57 @@ BUILD_GOARCH = "${@go_map_arch(d.getVar('BUILD_ARCH'), d)}"
BUILD_GOTUPLE = "${BUILD_GOOS}_${BUILD_GOARCH}"
HOST_GOOS = "${@go_map_os(d.getVar('HOST_OS'), d)}"
HOST_GOARCH = "${@go_map_arch(d.getVar('HOST_ARCH'), d)}"
-HOST_GOARM = "${@go_map_arm(d.getVar('HOST_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
+HOST_GOARM = "${@go_map_arm(d.getVar('HOST_ARCH'), d)}"
HOST_GO386 = "${@go_map_386(d.getVar('HOST_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
HOST_GOMIPS = "${@go_map_mips(d.getVar('HOST_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
+HOST_GOARM:class-native = "7"
+HOST_GO386:class-native = "sse2"
+HOST_GOMIPS:class-native = "hardfloat"
HOST_GOTUPLE = "${HOST_GOOS}_${HOST_GOARCH}"
TARGET_GOOS = "${@go_map_os(d.getVar('TARGET_OS'), d)}"
TARGET_GOARCH = "${@go_map_arch(d.getVar('TARGET_ARCH'), d)}"
-TARGET_GOARM = "${@go_map_arm(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
+TARGET_GOARM = "${@go_map_arm(d.getVar('TARGET_ARCH'), d)}"
TARGET_GO386 = "${@go_map_386(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
TARGET_GOMIPS = "${@go_map_mips(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
+TARGET_GOARM:class-native = "7"
+TARGET_GO386:class-native = "sse2"
+TARGET_GOMIPS:class-native = "hardfloat"
TARGET_GOTUPLE = "${TARGET_GOOS}_${TARGET_GOARCH}"
GO_BUILD_BINDIR = "${@['bin/${HOST_GOTUPLE}','bin'][d.getVar('BUILD_GOTUPLE') == d.getVar('HOST_GOTUPLE')]}"
+# Use the MACHINEOVERRIDES to map ARM CPU architecture passed to GO via GOARM.
+# This is combined with *_ARCH to set HOST_GOARM and TARGET_GOARM.
+BASE_GOARM = ''
+BASE_GOARM:armv7ve = '7'
+BASE_GOARM:armv7a = '7'
+BASE_GOARM:armv6 = '6'
+BASE_GOARM:armv5 = '5'
+
# Go supports dynamic linking on a limited set of architectures.
# See the supportsDynlink function in go/src/cmd/compile/internal/gc/main.go
GO_DYNLINK = ""
-GO_DYNLINK_arm = "1"
-GO_DYNLINK_aarch64 = "1"
-GO_DYNLINK_x86 = "1"
-GO_DYNLINK_x86-64 = "1"
-GO_DYNLINK_powerpc64 = "1"
-GO_DYNLINK_class-native = ""
-GO_DYNLINK_class-nativesdk = ""
+GO_DYNLINK:arm ?= "1"
+GO_DYNLINK:aarch64 ?= "1"
+GO_DYNLINK:x86 ?= "1"
+GO_DYNLINK:x86-64 ?= "1"
+GO_DYNLINK:powerpc64 ?= "1"
+GO_DYNLINK:powerpc64le ?= "1"
+GO_DYNLINK:class-native ?= ""
+GO_DYNLINK:class-nativesdk = ""
# define here because everybody inherits this class
#
-COMPATIBLE_HOST_linux-gnux32 = "null"
-COMPATIBLE_HOST_linux-muslx32 = "null"
-COMPATIBLE_HOST_powerpc = "null"
-COMPATIBLE_HOST_powerpc64 = "null"
-COMPATIBLE_HOST_mipsarchn32 = "null"
+COMPATIBLE_HOST:linux-gnux32 = "null"
+COMPATIBLE_HOST:linux-muslx32 = "null"
+COMPATIBLE_HOST:powerpc = "null"
+COMPATIBLE_HOST:powerpc64 = "null"
+COMPATIBLE_HOST:mipsarchn32 = "null"
-ARM_INSTRUCTION_SET_armv4 = "arm"
-ARM_INSTRUCTION_SET_armv5 = "arm"
-ARM_INSTRUCTION_SET_armv6 = "arm"
+ARM_INSTRUCTION_SET:armv4 = "arm"
+ARM_INSTRUCTION_SET:armv5 = "arm"
+ARM_INSTRUCTION_SET:armv6 = "arm"
-TUNE_CCARGS_remove = "-march=mips32r2"
-SECURITY_CFLAGS_mipsarch = "${SECURITY_NOPIE_CFLAGS}"
+TUNE_CCARGS:remove = "-march=mips32r2"
SECURITY_NOPIE_CFLAGS ??= ""
# go can't be built with ccache:
@@ -64,22 +78,18 @@ def go_map_arch(a, d):
return 'mips'
elif a == 'mipsel':
return 'mipsle'
+ elif re.match('p(pc|owerpc)(64le)', a):
+ return 'ppc64le'
elif re.match('p(pc|owerpc)(64)', a):
return 'ppc64'
- elif re.match('p(pc|owerpc)(64el)', a):
- return 'ppc64le'
+ elif a == 'riscv64':
+ return 'riscv64'
else:
raise bb.parse.SkipRecipe("Unsupported CPU architecture: %s" % a)
-def go_map_arm(a, f, d):
- import re
- if re.match('arm.*', a):
- if 'armv7' in f:
- return '7'
- elif 'armv6' in f:
- return '6'
- elif 'armv5' in f:
- return '5'
+def go_map_arm(a, d):
+ if a.startswith("arm"):
+ return d.getVar('BASE_GOARM')
return ''
def go_map_386(a, f, d):
@@ -88,7 +98,7 @@ def go_map_386(a, f, d):
if ('core2' in f) or ('corei7' in f):
return 'sse2'
else:
- return '387'
+ return 'softfloat'
return ''
def go_map_mips(a, f, d):
@@ -104,5 +114,3 @@ def go_map_os(o, d):
if o.startswith('linux'):
return 'linux'
return o
-
-
diff --git a/meta/classes/gobject-introspection.bbclass b/meta/classes/gobject-introspection.bbclass
index 4ceb0c68b1..7bf9feb0d6 100644
--- a/meta/classes/gobject-introspection.bbclass
+++ b/meta/classes/gobject-introspection.bbclass
@@ -6,28 +6,40 @@
# This also sets up autoconf-based recipes to build introspection data (or not),
# depending on distro and machine features (see gobject-introspection-data class).
inherit python3native gobject-introspection-data
-EXTRA_OECONF_prepend_class-target = "${@bb.utils.contains('GI_DATA_ENABLED', 'True', '--enable-introspection', '--disable-introspection', d)} "
+# meson: default option name to enable/disable introspection. This matches most
+# project's configuration. In doubts - check meson_options.txt in project's
+# source path.
+GIR_MESON_OPTION ?= 'introspection'
+GIR_MESON_ENABLE_FLAG ?= 'true'
+GIR_MESON_DISABLE_FLAG ?= 'false'
+
+# Define g-i options such that they can be disabled completely when GIR_MESON_OPTION is empty
+GIRMESONTARGET = "-D${GIR_MESON_OPTION}=${@bb.utils.contains('GI_DATA_ENABLED', 'True', '${GIR_MESON_ENABLE_FLAG}', '${GIR_MESON_DISABLE_FLAG}', d)} "
+GIRMESONBUILD = "-D${GIR_MESON_OPTION}=${GIR_MESON_DISABLE_FLAG} "
+# Auto enable/disable based on GI_DATA_ENABLED
+EXTRA_OECONF:prepend:class-target = "${@bb.utils.contains('GI_DATA_ENABLED', 'True', '--enable-introspection', '--disable-introspection', d)} "
+EXTRA_OEMESON:prepend:class-target = "${@['', '${GIRMESONTARGET}'][d.getVar('GIR_MESON_OPTION') != '']}"
# When building native recipes, disable introspection, as it is not necessary,
# pulls in additional dependencies, and makes build times longer
-EXTRA_OECONF_prepend_class-native = "--disable-introspection "
-EXTRA_OECONF_prepend_class-nativesdk = "--disable-introspection "
-
-UNKNOWN_CONFIGURE_WHITELIST_append = " --enable-introspection --disable-introspection"
+EXTRA_OECONF:prepend:class-native = "--disable-introspection "
+EXTRA_OECONF:prepend:class-nativesdk = "--disable-introspection "
+EXTRA_OEMESON:prepend:class-native = "${@['', '${GIRMESONBUILD}'][d.getVar('GIR_MESON_OPTION') != '']}"
+EXTRA_OEMESON:prepend:class-nativesdk = "${@['', '${GIRMESONBUILD}'][d.getVar('GIR_MESON_OPTION') != '']}"
# Generating introspection data depends on a combination of native and target
# introspection tools, and qemu to run the target tools.
-DEPENDS_append_class-target = " gobject-introspection gobject-introspection-native qemu-native prelink-native"
+DEPENDS:append:class-target = " gobject-introspection gobject-introspection-native qemu-native"
# Even though introspection is disabled on -native, gobject-introspection package is still
# needed for m4 macros.
-DEPENDS_append_class-native = " gobject-introspection-native"
-DEPENDS_append_class-nativesdk = " gobject-introspection-native"
+DEPENDS:append:class-native = " gobject-introspection-native"
+DEPENDS:append:class-nativesdk = " gobject-introspection-native"
# This is used by introspection tools to find .gir includes
export XDG_DATA_DIRS = "${STAGING_DATADIR}:${STAGING_LIBDIR}"
-do_configure_prepend_class-target () {
+do_configure:prepend:class-target () {
# introspection.m4 pre-packaged with upstream tarballs does not yet
# have our fixes
mkdir -p ${S}/m4
@@ -36,8 +48,8 @@ do_configure_prepend_class-target () {
# .typelib files are needed at runtime and so they go to the main package (so
# they'll be together with libraries they support).
-FILES_${PN}_append = " ${libdir}/girepository-*/*.typelib"
+FILES:${PN}:append = " ${libdir}/girepository-*/*.typelib"
# .gir files go to dev package, as they're needed for developing (but not for
# running) things that depends on introspection.
-FILES_${PN}-dev_append = " ${datadir}/gir-*/*.gir ${libdir}/gir-*/*.gir"
+FILES:${PN}-dev:append = " ${datadir}/gir-*/*.gir ${libdir}/gir-*/*.gir"
diff --git a/meta/classes/godep.bbclass b/meta/classes/godep.bbclass
deleted file mode 100644
index c82401c313..0000000000
--- a/meta/classes/godep.bbclass
+++ /dev/null
@@ -1,8 +0,0 @@
-DEPENDS_append = " go-dep-native"
-
-do_compile_prepend() {
- rm -f ${WORKDIR}/build/src/${GO_IMPORT}/Gopkg.toml
- rm -f ${WORKDIR}/build/src/${GO_IMPORT}/Gopkg.lock
- ( cd ${WORKDIR}/build/src/${GO_IMPORT} && dep init && dep ensure )
-}
-
diff --git a/meta/classes/grub-efi-cfg.bbclass b/meta/classes/grub-efi-cfg.bbclass
index 5eeee6c2e3..ea21b3de3d 100644
--- a/meta/classes/grub-efi-cfg.bbclass
+++ b/meta/classes/grub-efi-cfg.bbclass
@@ -23,10 +23,12 @@ GRUB_TIMEOUT ?= "10"
#FIXME: build this from the machine config
GRUB_OPTS ?= "serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1"
-EFIDIR = "/EFI/BOOT"
GRUB_ROOT ?= "${ROOT}"
APPEND ?= ""
+# Uses MACHINE specific KERNEL_IMAGETYPE
+PACKAGE_ARCH = "${MACHINE_ARCH}"
+
# Need UUID utility code.
inherit fs-uuid
@@ -86,6 +88,12 @@ python build_efi_cfg() {
for label in labels.split():
localdata = d.createCopy()
+ overrides = localdata.getVar('OVERRIDES')
+ if not overrides:
+ bb.fatal('OVERRIDES not defined')
+
+ localdata.setVar('OVERRIDES', 'grub_' + label + ':' + overrides)
+
for btype in btypes:
cfgfile.write('\nmenuentry \'%s%s\'{\n' % (label, btype[0]))
lb = label
@@ -112,3 +120,4 @@ python build_efi_cfg() {
cfgfile.close()
}
+build_efi_cfg[vardepsexclude] += "OVERRIDES"
diff --git a/meta/classes/grub-efi.bbclass b/meta/classes/grub-efi.bbclass
index 90badc03a0..8fc6999e52 100644
--- a/meta/classes/grub-efi.bbclass
+++ b/meta/classes/grub-efi.bbclass
@@ -1,39 +1,8 @@
inherit grub-efi-cfg
+require conf/image-uefi.conf
efi_populate() {
- # DEST must be the root of the image so that EFIDIR is not
- # nested under a top level directory.
- DEST=$1
-
- install -d ${DEST}${EFIDIR}
-
- GRUB_IMAGE="grub-efi-bootia32.efi"
- DEST_IMAGE="bootia32.efi"
- if [ "${TARGET_ARCH}" = "x86_64" ]; then
- GRUB_IMAGE="grub-efi-bootx64.efi"
- DEST_IMAGE="bootx64.efi"
- fi
- install -m 0644 ${DEPLOY_DIR_IMAGE}/${GRUB_IMAGE} ${DEST}${EFIDIR}/${DEST_IMAGE}
- EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
- printf 'fs0:%s\%s\n' "$EFIPATH" "$DEST_IMAGE" >${DEST}/startup.nsh
+ efi_populate_common "$1" grub-efi
install -m 0644 ${GRUB_CFG} ${DEST}${EFIDIR}/grub.cfg
}
-
-efi_iso_populate() {
- iso_dir=$1
- efi_populate $iso_dir
- # Build a EFI directory to create efi.img
- mkdir -p ${EFIIMGDIR}/${EFIDIR}
- cp $iso_dir/${EFIDIR}/* ${EFIIMGDIR}${EFIDIR}
- cp $iso_dir/${KERNEL_IMAGETYPE} ${EFIIMGDIR}
- EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
- printf 'fs0:%s\%s\n' "$EFIPATH" "$GRUB_IMAGE" > ${EFIIMGDIR}/startup.nsh
- if [ -f "$iso_dir/initrd" ] ; then
- cp $iso_dir/initrd ${EFIIMGDIR}
- fi
-}
-
-efi_hddimg_populate() {
- efi_populate $1
-}
diff --git a/meta/classes/gsettings.bbclass b/meta/classes/gsettings.bbclass
index 33afc96a9c..3fa5bd40b3 100644
--- a/meta/classes/gsettings.bbclass
+++ b/meta/classes/gsettings.bbclass
@@ -13,30 +13,30 @@ python __anonymous() {
pkg = d.getVar("GSETTINGS_PACKAGE")
if pkg:
d.appendVar("PACKAGE_WRITE_DEPS", " glib-2.0-native")
- d.appendVar("RDEPENDS_" + pkg, " ${MLPREFIX}glib-2.0-utils")
- d.appendVar("FILES_" + pkg, " ${datadir}/glib-2.0/schemas")
+ d.appendVar("RDEPENDS:" + pkg, " ${MLPREFIX}glib-2.0-utils")
+ d.appendVar("FILES:" + pkg, " ${datadir}/glib-2.0/schemas")
}
gsettings_postinstrm () {
glib-compile-schemas $D${datadir}/glib-2.0/schemas
}
-python populate_packages_append () {
+python populate_packages:append () {
pkg = d.getVar('GSETTINGS_PACKAGE')
if pkg:
bb.note("adding gsettings postinst scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg) or d.getVar('pkg_postinst')
+ postinst = d.getVar('pkg_postinst:%s' % pkg) or d.getVar('pkg_postinst')
if not postinst:
postinst = '#!/bin/sh\n'
postinst += d.getVar('gsettings_postinstrm')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
bb.note("adding gsettings postrm scripts to %s" % pkg)
- postrm = d.getVar('pkg_postrm_%s' % pkg) or d.getVar('pkg_postrm')
+ postrm = d.getVar('pkg_postrm:%s' % pkg) or d.getVar('pkg_postrm')
if not postrm:
postrm = '#!/bin/sh\n'
postrm += d.getVar('gsettings_postinstrm')
- d.setVar('pkg_postrm_%s' % pkg, postrm)
+ d.setVar('pkg_postrm:%s' % pkg, postrm)
}
diff --git a/meta/classes/gtk-doc.bbclass b/meta/classes/gtk-doc.bbclass
index bedb36ec8b..07b46ac829 100644
--- a/meta/classes/gtk-doc.bbclass
+++ b/meta/classes/gtk-doc.bbclass
@@ -7,20 +7,32 @@
#
# It should be used in recipes to determine whether gtk-doc based documentation should be built,
# so that qemu use can be avoided when necessary.
+GTKDOC_ENABLED:class-native = "False"
GTKDOC_ENABLED ?= "${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', \
bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d), 'False', d)}"
-EXTRA_OECONF_prepend_class-target = "${@bb.utils.contains('GTKDOC_ENABLED', 'True', '--enable-gtk-doc --enable-gtk-doc-html --disable-gtk-doc-pdf', \
+# meson: default option name to enable/disable gtk-doc. This matches most
+# project's configuration. In doubts - check meson_options.txt in project's
+# source path.
+GTKDOC_MESON_OPTION ?= 'docs'
+GTKDOC_MESON_ENABLE_FLAG ?= 'true'
+GTKDOC_MESON_DISABLE_FLAG ?= 'false'
+
+# Auto enable/disable based on GTKDOC_ENABLED
+EXTRA_OECONF:prepend:class-target = "${@bb.utils.contains('GTKDOC_ENABLED', 'True', '--enable-gtk-doc --enable-gtk-doc-html --disable-gtk-doc-pdf', \
'--disable-gtk-doc', d)} "
+EXTRA_OEMESON:prepend:class-target = "-D${GTKDOC_MESON_OPTION}=${@bb.utils.contains('GTKDOC_ENABLED', 'True', '${GTKDOC_MESON_ENABLE_FLAG}', '${GTKDOC_MESON_DISABLE_FLAG}', d)} "
# When building native recipes, disable gtkdoc, as it is not necessary,
# pulls in additional dependencies, and makes build times longer
-EXTRA_OECONF_prepend_class-native = "--disable-gtk-doc "
-EXTRA_OECONF_prepend_class-nativesdk = "--disable-gtk-doc "
+EXTRA_OECONF:prepend:class-native = "--disable-gtk-doc "
+EXTRA_OECONF:prepend:class-nativesdk = "--disable-gtk-doc "
+EXTRA_OEMESON:prepend:class-native = "-D${GTKDOC_MESON_OPTION}=${GTKDOC_MESON_DISABLE_FLAG} "
+EXTRA_OEMESON:prepend:class-nativesdk = "-D${GTKDOC_MESON_OPTION}=${GTKDOC_MESON_DISABLE_FLAG} "
# Even though gtkdoc is disabled on -native, gtk-doc package is still
# needed for m4 macros.
-DEPENDS_append = " gtk-doc-native"
+DEPENDS:append = " gtk-doc-native"
# The documentation directory, where the infrastructure will be copied.
# gtkdocize has a default of "." so to handle out-of-tree builds set this to $S.
@@ -29,15 +41,15 @@ GTKDOC_DOCDIR ?= "${S}"
export STAGING_DIR_HOST
inherit python3native pkgconfig qemu
-DEPENDS_append = "${@' qemu-native' if d.getVar('GTKDOC_ENABLED') == 'True' else ''}"
+DEPENDS:append = "${@' qemu-native' if d.getVar('GTKDOC_ENABLED') == 'True' else ''}"
-do_configure_prepend () {
+do_configure:prepend () {
# Need to use ||true as this is only needed if configure.ac both exists
# and uses GTK_DOC_CHECK.
gtkdocize --srcdir ${S} --docdir ${GTKDOC_DOCDIR} || true
}
-do_compile_prepend_class-target () {
+do_compile:prepend:class-target () {
if [ ${GTKDOC_ENABLED} = True ]; then
# Write out a qemu wrapper that will be given to gtkdoc-scangobj so that it
# can run target helper binaries through that.
@@ -51,6 +63,9 @@ export GIO_MODULE_DIR=${STAGING_LIBDIR}/gio/modules-dummy
GIR_EXTRA_LIBS_PATH=\`find ${B} -name *.so -printf "%h\n"|sort|uniq| tr '\n' ':'\`\$GIR_EXTRA_LIBS_PATH
GIR_EXTRA_LIBS_PATH=\`find ${B} -name .libs| tr '\n' ':'\`\$GIR_EXTRA_LIBS_PATH
+# meson sets this wrongly (only to libs in build-dir), qemu_wrapper_cmdline() and GIR_EXTRA_LIBS_PATH take care of it properly
+unset LD_LIBRARY_PATH
+
if [ -d ".libs" ]; then
$qemu_binary ".libs/\$@"
else
diff --git a/meta/classes/gtk-icon-cache.bbclass b/meta/classes/gtk-icon-cache.bbclass
index d87167aec0..6808339b90 100644
--- a/meta/classes/gtk-icon-cache.bbclass
+++ b/meta/classes/gtk-icon-cache.bbclass
@@ -1,12 +1,26 @@
-FILES_${PN} += "${datadir}/icons/hicolor"
+FILES:${PN} += "${datadir}/icons/hicolor"
-DEPENDS += "${@['hicolor-icon-theme', '']['${BPN}' == 'hicolor-icon-theme']} gtk-icon-utils-native"
+GTKIC_VERSION ??= '3'
-PACKAGE_WRITE_DEPS += "gtk-icon-utils-native gdk-pixbuf-native"
+GTKPN = "${@ 'gtk4' if d.getVar('GTKIC_VERSION') == '4' else 'gtk+3' }"
+GTKIC_CMD = "${@ 'gtk-update-icon-cache-3.0.0' if d.getVar('GTKIC_VERSION') == '4' else 'gtk4-update-icon-cache' }"
+
+#gtk+3/gtk4 require GTK3DISTROFEATURES, DEPENDS on it make all the
+#recipes inherit this class require GTK3DISTROFEATURES
+inherit features_check
+ANY_OF_DISTRO_FEATURES = "${GTK3DISTROFEATURES}"
+
+DEPENDS +=" ${@ '' if d.getVar('BPN') == 'hicolor-icon-theme' else 'hicolor-icon-theme' } \
+ ${@ '' if d.getVar('BPN') == 'gdk-pixbuf' else 'gdk-pixbuf' } \
+ ${@ '' if d.getVar('BPN') == d.getVar('GTKPN') else d.getVar('GTKPN') } \
+ ${GTKPN}-native \
+"
+
+PACKAGE_WRITE_DEPS += "${GTKPN}-native gdk-pixbuf-native"
gtk_icon_cache_postinst() {
if [ "x$D" != "x" ]; then
- $INTERCEPT_DIR/postinst_intercept update_icon_cache ${PKG} \
+ $INTERCEPT_DIR/postinst_intercept update_gtk_icon_cache ${PKG} \
mlprefix=${MLPREFIX} \
libdir_native=${libdir_native}
else
@@ -16,7 +30,7 @@ else
for icondir in /usr/share/icons/* ; do
if [ -d $icondir ] ; then
- gtk-update-icon-cache -fqt $icondir
+ ${GTKIC_CMD} -fqt $icondir
fi
done
fi
@@ -24,19 +38,19 @@ fi
gtk_icon_cache_postrm() {
if [ "x$D" != "x" ]; then
- $INTERCEPT_DIR/postinst_intercept update_icon_cache ${PKG} \
+ $INTERCEPT_DIR/postinst_intercept update_gtk_icon_cache ${PKG} \
mlprefix=${MLPREFIX} \
libdir=${libdir}
else
for icondir in /usr/share/icons/* ; do
if [ -d $icondir ] ; then
- gtk-update-icon-cache -qt $icondir
+ ${GTKIC_CMD} -qt $icondir
fi
done
fi
}
-python populate_packages_append () {
+python populate_packages:append () {
packages = d.getVar('PACKAGES').split()
pkgdest = d.getVar('PKGDEST')
@@ -47,20 +61,29 @@ python populate_packages_append () {
bb.note("adding hicolor-icon-theme dependency to %s" % pkg)
rdepends = ' ' + d.getVar('MLPREFIX', False) + "hicolor-icon-theme"
- d.appendVar('RDEPENDS_%s' % pkg, rdepends)
-
+ d.appendVar('RDEPENDS:%s' % pkg, rdepends)
+
+ #gtk_icon_cache_postinst depend on gdk-pixbuf and gtk+3/gtk4
+ bb.note("adding gdk-pixbuf dependency to %s" % pkg)
+ rdepends = ' ' + d.getVar('MLPREFIX', False) + "gdk-pixbuf"
+ d.appendVar('RDEPENDS:%s' % pkg, rdepends)
+
+ bb.note("adding %s dependency to %s" % (d.getVar('GTKPN'), pkg))
+ rdepends = ' ' + d.getVar('MLPREFIX', False) + d.getVar('GTKPN')
+ d.appendVar('RDEPENDS:%s' % pkg, rdepends)
+
bb.note("adding gtk-icon-cache postinst and postrm scripts to %s" % pkg)
-
- postinst = d.getVar('pkg_postinst_%s' % pkg)
+
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
postinst += d.getVar('gtk_icon_cache_postinst')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
- postrm = d.getVar('pkg_postrm_%s' % pkg)
+ postrm = d.getVar('pkg_postrm:%s' % pkg)
if not postrm:
postrm = '#!/bin/sh\n'
postrm += d.getVar('gtk_icon_cache_postrm')
- d.setVar('pkg_postrm_%s' % pkg, postrm)
+ d.setVar('pkg_postrm:%s' % pkg, postrm)
}
diff --git a/meta/classes/gtk-immodules-cache.bbclass b/meta/classes/gtk-immodules-cache.bbclass
index 9bb0af8b26..2107517540 100644
--- a/meta/classes/gtk-immodules-cache.bbclass
+++ b/meta/classes/gtk-immodules-cache.bbclass
@@ -22,6 +22,7 @@ else
gtk-query-immodules-2.0 > ${libdir}/gtk-2.0/2.10.0/immodules.cache
fi
if [ ! -z `which gtk-query-immodules-3.0` ]; then
+ mkdir -p ${libdir}/gtk-3.0/3.0.0
gtk-query-immodules-3.0 > ${libdir}/gtk-3.0/3.0.0/immodules.cache
fi
fi
@@ -46,23 +47,23 @@ else
fi
}
-python populate_packages_append () {
+python populate_packages:append () {
gtkimmodules_pkgs = d.getVar('GTKIMMODULES_PACKAGES').split()
for pkg in gtkimmodules_pkgs:
bb.note("adding gtk-immodule-cache postinst and postrm scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
postinst += d.getVar('gtk_immodule_cache_postinst')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
- postrm = d.getVar('pkg_postrm_%s' % pkg)
+ postrm = d.getVar('pkg_postrm:%s' % pkg)
if not postrm:
postrm = '#!/bin/sh\n'
postrm += d.getVar('gtk_immodule_cache_postrm')
- d.setVar('pkg_postrm_%s' % pkg, postrm)
+ d.setVar('pkg_postrm:%s' % pkg, postrm)
}
python __anonymous() {
diff --git a/meta/classes/icecc.bbclass b/meta/classes/icecc.bbclass
index aebcc44667..9b912a3083 100644
--- a/meta/classes/icecc.bbclass
+++ b/meta/classes/icecc.bbclass
@@ -19,22 +19,21 @@
# or the default one provided by icecc-create-env.bb will be used
# (NOTE that this is a modified version of the script need it and *not the one that comes with icecc*
#
-# User can specify if specific packages or packages belonging to class should not use icecc to distribute
-# compile jobs to remote machines, but handled locally, by defining ICECC_USER_CLASS_BL and ICECC_USER_PACKAGE_BL
-# with the appropriate values in local.conf. In addition the user can force to enable icecc for packages
-# which set an empty PARALLEL_MAKE variable by defining ICECC_USER_PACKAGE_WL.
+# User can specify if specific recipes or recipes belonging to class should not use icecc to distribute
+# compile jobs to remote machines, but handled locally, by defining ICECC_CLASS_DISABLE and ICECC_RECIPE_DISABLE
+# with the appropriate values in local.conf. In addition the user can force to enable icecc for recipes
+# which set an empty PARALLEL_MAKE variable by defining ICECC_RECIPE_ENABLE.
#
#########################################################################################
#Error checking is kept to minimum so double check any parameters you pass to the class
###########################################################################################
-BB_HASHBASE_WHITELIST += "ICECC_PARALLEL_MAKE ICECC_DISABLED ICECC_USER_PACKAGE_BL \
- ICECC_USER_CLASS_BL ICECC_USER_PACKAGE_WL ICECC_PATH ICECC_ENV_EXEC \
+BB_BASEHASH_IGNORE_VARS += "ICECC_PARALLEL_MAKE ICECC_DISABLED ICECC_RECIPE_DISABLE \
+ ICECC_CLASS_DISABLE ICECC_RECIPE_ENABLE ICECC_PATH ICECC_ENV_EXEC \
ICECC_CARET_WORKAROUND ICECC_CFLAGS ICECC_ENV_VERSION \
ICECC_DEBUG ICECC_LOGFILE ICECC_REPEAT_RATE ICECC_PREFERRED_HOST \
ICECC_CLANG_REMOTE_CPP ICECC_IGNORE_UNVERIFIED ICECC_TEST_SOCKET \
- ICECC_ENV_DEBUG ICECC_SYSTEM_PACKAGE_BL ICECC_SYSTEM_CLASS_BL \
- ICECC_REMOTE_CPP \
+ ICECC_ENV_DEBUG ICECC_REMOTE_CPP \
"
ICECC_ENV_EXEC ?= "${STAGING_BINDIR_NATIVE}/icecc-create-env"
@@ -47,7 +46,7 @@ HOSTTOOLS_NONFATAL += "icecc patchelf"
#
# A useful thing to do for testing Icecream changes locally is to add a
# subversion in local.conf:
-# ICECC_ENV_VERSION_append = "-my-ver-1"
+# ICECC_ENV_VERSION:append = "-my-ver-1"
ICECC_ENV_VERSION = "2"
# Default to disabling the caret workaround, If set to "1" in local.conf, icecc
@@ -57,7 +56,7 @@ ICECC_ENV_VERSION = "2"
# See: https://github.com/icecc/icecream/issues/190
export ICECC_CARET_WORKAROUND ??= "0"
-export ICECC_REMOTE_CPP ??= "1"
+export ICECC_REMOTE_CPP ??= "0"
ICECC_CFLAGS = ""
CFLAGS += "${ICECC_CFLAGS}"
@@ -66,32 +65,38 @@ CXXFLAGS += "${ICECC_CFLAGS}"
# Debug flags when generating environments
ICECC_ENV_DEBUG ??= ""
-# "system" recipe blacklist contains a list of packages that can not distribute
+# Disable recipe list contains a list of recipes that can not distribute
# compile tasks for one reason or the other. When adding new entry, please
# document why (how it failed) so that we can re-evaluate it later e.g. when
# there is new version
#
# libgcc-initial - fails with CPP sanity check error if host sysroot contains
# cross gcc built for another target tune/variant
+# pixman - prng_state: TLS reference mismatches non-TLS reference, possibly due to
+# pragma omp threadprivate(prng_state)
+# systemtap - _HelperSDT.c undefs macros and uses the identifiers in macros emitting
+# inline assembly
# target-sdk-provides-dummy - ${HOST_PREFIX} is empty which triggers the "NULL
# prefix" error.
-ICECC_SYSTEM_PACKAGE_BL += "\
+ICECC_RECIPE_DISABLE += "\
libgcc-initial \
+ pixman \
+ systemtap \
target-sdk-provides-dummy \
"
-# "system" classes that should be blacklisted. When adding new entry, please
+# Classes that should not use icecc. When adding new entry, please
# document why (how it failed) so that we can re-evaluate it later
#
# image - Image aren't compiling, but the testing framework for images captures
# PARALLEL_MAKE as part of the test environment. Many tests won't use
# icecream, but leaving the high level of parallelism can cause them to
# consume an unnecessary amount of resources.
-ICECC_SYSTEM_CLASS_BL += "\
+ICECC_CLASS_DISABLE += "\
image \
"
-def icecc_dep_prepend(d):
+def get_icecc_dep(d):
# INHIBIT_DEFAULT_DEPS doesn't apply to the patch command. Whether or not
# we need that built is the responsibility of the patch function / class, not
# the application.
@@ -99,7 +104,7 @@ def icecc_dep_prepend(d):
return "icecc-create-env-native"
return ""
-DEPENDS_prepend += "${@icecc_dep_prepend(d)} "
+DEPENDS:prepend = "${@get_icecc_dep(d)} "
get_cross_kernel_cc[vardepsexclude] += "KERNEL_CC"
def get_cross_kernel_cc(bb,d):
@@ -135,32 +140,28 @@ def use_icecc(bb,d):
pn = d.getVar('PN')
bpn = d.getVar('BPN')
- # Blacklist/whitelist checks are made against BPN, because there is a good
+ # Enable/disable checks are made against BPN, because there is a good
# chance that if icecc should be skipped for a recipe, it should be skipped
# for all the variants of that recipe. PN is still checked in case a user
# specified a more specific recipe.
check_pn = set([pn, bpn])
- system_class_blacklist = (d.getVar('ICECC_SYSTEM_CLASS_BL') or "").split()
- user_class_blacklist = (d.getVar('ICECC_USER_CLASS_BL') or "none").split()
- package_class_blacklist = system_class_blacklist + user_class_blacklist
+ class_disable = (d.getVar('ICECC_CLASS_DISABLE') or "").split()
- for black in package_class_blacklist:
- if bb.data.inherits_class(black, d):
- bb.debug(1, "%s: class %s found in blacklist, disable icecc" % (pn, black))
+ for bbclass in class_disable:
+ if bb.data.inherits_class(bbclass, d):
+ bb.debug(1, "%s: bbclass %s found in disable, disable icecc" % (pn, bbclass))
return "no"
- system_package_blacklist = (d.getVar('ICECC_SYSTEM_PACKAGE_BL') or "").split()
- user_package_blacklist = (d.getVar('ICECC_USER_PACKAGE_BL') or "").split()
- user_package_whitelist = (d.getVar('ICECC_USER_PACKAGE_WL') or "").split()
- package_blacklist = system_package_blacklist + user_package_blacklist
+ disabled_recipes = (d.getVar('ICECC_RECIPE_DISABLE') or "").split()
+ enabled_recipes = (d.getVar('ICECC_RECIPE_ENABLE') or "").split()
- if check_pn & set(package_blacklist):
- bb.debug(1, "%s: found in blacklist, disable icecc" % pn)
+ if check_pn & set(disabled_recipes):
+ bb.debug(1, "%s: found in disable list, disable icecc" % pn)
return "no"
- if check_pn & set(user_package_whitelist):
- bb.debug(1, "%s: found in whitelist, enable icecc" % pn)
+ if check_pn & set(enabled_recipes):
+ bb.debug(1, "%s: found in enabled recipes list, enable icecc" % pn)
return "yes"
if d.getVar('PARALLEL_MAKE') == "":
@@ -243,7 +244,11 @@ def icecc_get_external_tool(bb, d, tool):
def icecc_get_tool_link(tool, d):
import subprocess
- return subprocess.check_output("readlink -f %s" % tool, shell=True).decode("utf-8")[:-1]
+ try:
+ return subprocess.check_output("readlink -f %s" % tool, shell=True).decode("utf-8")[:-1]
+ except subprocess.CalledProcessError as e:
+ bb.note("icecc: one of the tools probably disappeared during recipe parsing, cmd readlink -f %s returned %d:\n%s" % (tool, e.returncode, e.output.decode("utf-8")))
+ return tool
def icecc_get_path_tool(tool, d):
# This is a little ugly, but we want to make sure we add an actual
@@ -299,7 +304,7 @@ wait_for_file() {
local TIMEOUT=$2
until [ -f "$FILE_TO_TEST" ]
do
- TIME_ELAPSED=`expr $TIME_ELAPSED + 1`
+ TIME_ELAPSED=$(expr $TIME_ELAPSED + 1)
if [ $TIME_ELAPSED -gt $TIMEOUT ]
then
return 1
@@ -312,6 +317,7 @@ def set_icecc_env():
# dummy python version of set_icecc_env
return
+set_icecc_env[vardepsexclude] += "KERNEL_CC"
set_icecc_env() {
if [ "${@use_icecc(bb, d)}" = "no" ]
then
@@ -341,47 +347,56 @@ set_icecc_env() {
return
fi
- # Create symlinks to icecc in the recipe-sysroot directory
- mkdir -p ${ICE_PATH}
- if [ -n "${KERNEL_CC}" ]; then
- compilers="${@get_cross_kernel_cc(bb,d)}"
- else
- compilers="${HOST_PREFIX}gcc ${HOST_PREFIX}g++"
- fi
- for compiler in $compilers; do
- ln -sf ${ICECC_BIN} ${ICE_PATH}/$compiler
- done
-
ICECC_CC="${@icecc_get_and_check_tool(bb, d, "gcc")}"
ICECC_CXX="${@icecc_get_and_check_tool(bb, d, "g++")}"
# cannot use icecc_get_and_check_tool here because it assumes as without target_sys prefix
ICECC_WHICH_AS="${@bb.utils.which(os.getenv('PATH'), 'as')}"
if [ ! -x "${ICECC_CC}" -o ! -x "${ICECC_CXX}" ]
then
- bbwarn "Cannot use icecc: could not get ICECC_CC or ICECC_CXX"
+ bbnote "Cannot use icecc: could not get ICECC_CC or ICECC_CXX"
return
fi
- ICE_VERSION=`$ICECC_CC -dumpversion`
- ICECC_VERSION=`echo ${ICECC_VERSION} | sed -e "s/@VERSION@/$ICE_VERSION/g"`
+ ICE_VERSION="$($ICECC_CC -dumpversion)"
+ ICECC_VERSION=$(echo ${ICECC_VERSION} | sed -e "s/@VERSION@/$ICE_VERSION/g")
if [ ! -x "${ICECC_ENV_EXEC}" ]
then
bbwarn "Cannot use icecc: invalid ICECC_ENV_EXEC"
return
fi
- ICECC_AS="`${ICECC_CC} -print-prog-name=as`"
+ # Create symlinks to icecc and wrapper-scripts in the recipe-sysroot directory
+ mkdir -p $ICE_PATH/symlinks
+ if [ -n "${KERNEL_CC}" ]; then
+ compilers="${@get_cross_kernel_cc(bb,d)}"
+ else
+ compilers="${HOST_PREFIX}gcc ${HOST_PREFIX}g++"
+ fi
+ for compiler in $compilers; do
+ ln -sf $ICECC_BIN $ICE_PATH/symlinks/$compiler
+ rm -f $ICE_PATH/$compiler
+ cat <<-__EOF__ > $ICE_PATH/$compiler
+ #!/bin/sh -e
+ export ICECC_VERSION=$ICECC_VERSION
+ export ICECC_CC=$ICECC_CC
+ export ICECC_CXX=$ICECC_CXX
+ $ICE_PATH/symlinks/$compiler "\$@"
+ __EOF__
+ chmod 775 $ICE_PATH/$compiler
+ done
+
+ ICECC_AS="$(${ICECC_CC} -print-prog-name=as)"
# for target recipes should return something like:
# /OE/tmp-eglibc/sysroots/x86_64-linux/usr/libexec/arm920tt-oe-linux-gnueabi/gcc/arm-oe-linux-gnueabi/4.8.2/as
# and just "as" for native, if it returns "as" in current directory (for whatever reason) use "as" from PATH
- if [ "`dirname "${ICECC_AS}"`" = "." ]
+ if [ "$(dirname "${ICECC_AS}")" = "." ]
then
ICECC_AS="${ICECC_WHICH_AS}"
fi
if [ ! -f "${ICECC_VERSION}.done" ]
then
- mkdir -p "`dirname "${ICECC_VERSION}"`"
+ mkdir -p "$(dirname "${ICECC_VERSION}")"
# the ICECC_VERSION generation step must be locked by a mutex
# in order to prevent race conditions
@@ -402,34 +417,38 @@ set_icecc_env() {
export CCACHE_PATH="$PATH"
export CCACHE_DISABLE="1"
- export ICECC_VERSION ICECC_CC ICECC_CXX
export PATH="$ICE_PATH:$PATH"
- bbnote "Using icecc"
+ bbnote "Using icecc path: $ICE_PATH"
+ bbnote "Using icecc tarball: $ICECC_VERSION"
}
-do_configure_prepend() {
+do_configure[network] = "1"
+do_configure:prepend() {
set_icecc_env
}
-do_compile_prepend() {
+do_compile[network] = "1"
+do_compile:prepend() {
set_icecc_env
}
-do_compile_kernelmodules_prepend() {
+do_compile_kernelmodules[network] = "1"
+do_compile_kernelmodules:prepend() {
set_icecc_env
}
-do_install_prepend() {
+do_install[network] = "1"
+do_install:prepend() {
set_icecc_env
}
# IceCream is not (currently) supported in the extensible SDK
ICECC_SDK_HOST_TASK = "nativesdk-icecc-toolchain"
-ICECC_SDK_HOST_TASK_task-populate-sdk-ext = ""
+ICECC_SDK_HOST_TASK:task-populate-sdk-ext = ""
# Don't include IceCream in uninative tarball
-ICECC_SDK_HOST_TASK_pn-uninative-tarball = ""
+ICECC_SDK_HOST_TASK:pn-uninative-tarball = ""
# Add the toolchain scripts to the SDK
-TOOLCHAIN_HOST_TASK_append = " ${ICECC_SDK_HOST_TASK}"
+TOOLCHAIN_HOST_TASK:append = " ${ICECC_SDK_HOST_TASK}"
diff --git a/meta/classes/image-artifact-names.bbclass b/meta/classes/image-artifact-names.bbclass
new file mode 100644
index 0000000000..f5769e520f
--- /dev/null
+++ b/meta/classes/image-artifact-names.bbclass
@@ -0,0 +1,22 @@
+##################################################################
+# Specific image creation and rootfs population info.
+##################################################################
+
+IMAGE_BASENAME ?= "${PN}"
+IMAGE_VERSION_SUFFIX ?= "-${DATETIME}"
+IMAGE_VERSION_SUFFIX[vardepsexclude] += "DATETIME SOURCE_DATE_EPOCH"
+IMAGE_NAME ?= "${IMAGE_BASENAME}-${MACHINE}${IMAGE_VERSION_SUFFIX}"
+IMAGE_LINK_NAME ?= "${IMAGE_BASENAME}-${MACHINE}"
+
+# IMAGE_NAME is the base name for everything produced when building images.
+# The actual image that contains the rootfs has an additional suffix (.rootfs
+# by default) followed by additional suffices which describe the format (.ext4,
+# .ext4.xz, etc.).
+IMAGE_NAME_SUFFIX ??= ".rootfs"
+
+python () {
+ if bb.data.inherits_class('deploy', d) and d.getVar("IMAGE_VERSION_SUFFIX") == "-${DATETIME}":
+ import datetime
+ d.setVar("IMAGE_VERSION_SUFFIX", "-" + datetime.datetime.fromtimestamp(int(d.getVar("SOURCE_DATE_EPOCH")), datetime.timezone.utc).strftime('%Y%m%d%H%M%S'))
+ d.setVarFlag("IMAGE_VERSION_SUFFIX", "vardepvalue", "")
+}
diff --git a/meta/classes/image-combined-dbg.bbclass b/meta/classes/image-combined-dbg.bbclass
index f4772f7ea1..e5dc61f857 100644
--- a/meta/classes/image-combined-dbg.bbclass
+++ b/meta/classes/image-combined-dbg.bbclass
@@ -1,4 +1,4 @@
-IMAGE_PREPROCESS_COMMAND_append = " combine_dbg_image; "
+IMAGE_PREPROCESS_COMMAND:append = " combine_dbg_image; "
combine_dbg_image () {
if [ "${IMAGE_GEN_DEBUGFS}" = "1" -a -e ${IMAGE_ROOTFS}-dbg ]; then
diff --git a/meta/classes/image-container.bbclass b/meta/classes/image-container.bbclass
index f002858bd2..3d1993576a 100644
--- a/meta/classes/image-container.bbclass
+++ b/meta/classes/image-container.bbclass
@@ -1,6 +1,6 @@
ROOTFS_BOOTSTRAP_INSTALL = ""
IMAGE_TYPES_MASKED += "container"
-IMAGE_TYPEDEP_container = "tar.bz2"
+IMAGE_TYPEDEP:container = "tar.bz2"
python __anonymous() {
if "container" in d.getVar("IMAGE_FSTYPES") and \
diff --git a/meta/classes/image-live.bbclass b/meta/classes/image-live.bbclass
index af71be5093..2c948190cf 100644
--- a/meta/classes/image-live.bbclass
+++ b/meta/classes/image-live.bbclass
@@ -22,7 +22,7 @@
# ${HDDIMG_ID} - FAT image volume-id
# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
-inherit live-vm-common
+inherit live-vm-common image-artifact-names
do_bootimg[depends] += "dosfstools-native:do_populate_sysroot \
mtools-native:do_populate_sysroot \
@@ -30,21 +30,21 @@ do_bootimg[depends] += "dosfstools-native:do_populate_sysroot \
virtual/kernel:do_deploy \
${MLPREFIX}syslinux:do_populate_sysroot \
syslinux-native:do_populate_sysroot \
- ${PN}:do_image_${@d.getVar('LIVE_ROOTFS_TYPE').replace('-', '_')} \
+ ${@'%s:do_image_%s' % (d.getVar('PN'), d.getVar('LIVE_ROOTFS_TYPE').replace('-', '_')) if d.getVar('ROOTFS') else ''} \
"
LABELS_LIVE ?= "boot install"
ROOT_LIVE ?= "root=/dev/ram0"
INITRD_IMAGE_LIVE ?= "${MLPREFIX}core-image-minimal-initramfs"
-INITRD_LIVE ?= "${DEPLOY_DIR_IMAGE}/${INITRD_IMAGE_LIVE}-${MACHINE}.cpio.gz"
+INITRD_LIVE ?= "${DEPLOY_DIR_IMAGE}/${INITRD_IMAGE_LIVE}-${MACHINE}.${INITRAMFS_FSTYPES}"
LIVE_ROOTFS_TYPE ?= "ext4"
ROOTFS ?= "${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.${LIVE_ROOTFS_TYPE}"
-IMAGE_TYPEDEP_live = "${LIVE_ROOTFS_TYPE}"
-IMAGE_TYPEDEP_iso = "${LIVE_ROOTFS_TYPE}"
-IMAGE_TYPEDEP_hddimg = "${LIVE_ROOTFS_TYPE}"
+IMAGE_TYPEDEP:live = "${LIVE_ROOTFS_TYPE}"
+IMAGE_TYPEDEP:iso = "${LIVE_ROOTFS_TYPE}"
+IMAGE_TYPEDEP:hddimg = "${LIVE_ROOTFS_TYPE}"
IMAGE_TYPES_MASKED += "live hddimg iso"
python() {
@@ -234,7 +234,7 @@ build_hddimg() {
bberror "${HDDDIR}/rootfs.img rootfs size is greather than or equal to 4GB,"
bberror "and this doesn't work on a FAT filesystem. You can either:"
bberror "1) Reduce the size of rootfs.img, or,"
- bbfatal "2) Use wic, vmdk or vdi instead of hddimg\n"
+ bbfatal "2) Use wic, vmdk,vhd, vhdx or vdi instead of hddimg\n"
fi
fi
@@ -261,4 +261,4 @@ python do_bootimg() {
do_bootimg[subimages] = "hddimg iso"
do_bootimg[imgsuffix] = "."
-addtask bootimg before do_image_complete
+addtask bootimg before do_image_complete after do_rootfs
diff --git a/meta/classes/image-mklibs.bbclass b/meta/classes/image-mklibs.bbclass
deleted file mode 100644
index 68e11d4365..0000000000
--- a/meta/classes/image-mklibs.bbclass
+++ /dev/null
@@ -1,56 +0,0 @@
-do_rootfs[depends] += "mklibs-native:do_populate_sysroot"
-
-IMAGE_PREPROCESS_COMMAND += "mklibs_optimize_image; "
-
-inherit linuxloader
-
-mklibs_optimize_image_doit() {
- rm -rf ${WORKDIR}/mklibs
- mkdir -p ${WORKDIR}/mklibs/dest
- cd ${IMAGE_ROOTFS}
- du -bs > ${WORKDIR}/mklibs/du.before.mklibs.txt
-
- # Build a list of dynamically linked executable ELF files.
- # Omit libc/libpthread as a special case because it has an interpreter
- # but is primarily what we intend to strip down.
- for i in `find . -type f -executable ! -name 'libc-*' ! -name 'libpthread-*'`; do
- file $i | grep -q ELF || continue
- ${HOST_PREFIX}readelf -l $i | grep -q INTERP || continue
- echo $i
- done > ${WORKDIR}/mklibs/executables.list
-
- dynamic_loader=${@get_linuxloader(d)}
-
- mklibs -v \
- --ldlib ${dynamic_loader} \
- --libdir ${baselib} \
- --sysroot ${PKG_CONFIG_SYSROOT_DIR} \
- --gcc-options "--sysroot=${PKG_CONFIG_SYSROOT_DIR}" \
- --root ${IMAGE_ROOTFS} \
- --target `echo ${TARGET_PREFIX} | sed 's/-$//' ` \
- -d ${WORKDIR}/mklibs/dest \
- `cat ${WORKDIR}/mklibs/executables.list`
-
- cd ${WORKDIR}/mklibs/dest
- for i in *
- do
- cp $i `find ${IMAGE_ROOTFS} -name $i`
- done
-
- cd ${IMAGE_ROOTFS}
- du -bs > ${WORKDIR}/mklibs/du.after.mklibs.txt
-
- echo rootfs size before mklibs optimization: `cat ${WORKDIR}/mklibs/du.before.mklibs.txt`
- echo rootfs size after mklibs optimization: `cat ${WORKDIR}/mklibs/du.after.mklibs.txt`
-}
-
-mklibs_optimize_image() {
- for img in ${MKLIBS_OPTIMIZED_IMAGES}
- do
- if [ "${img}" = "${PN}" ] || [ "${img}" = "all" ]
- then
- mklibs_optimize_image_doit
- break
- fi
- done
-}
diff --git a/meta/classes/image-prelink.bbclass b/meta/classes/image-prelink.bbclass
deleted file mode 100644
index 04dd57c940..0000000000
--- a/meta/classes/image-prelink.bbclass
+++ /dev/null
@@ -1,64 +0,0 @@
-do_rootfs[depends] += "prelink-native:do_populate_sysroot"
-
-IMAGE_PREPROCESS_COMMAND_append_libc-glibc = " prelink_setup; prelink_image; "
-
-python prelink_setup () {
- oe.utils.write_ld_so_conf(d)
-}
-
-inherit linuxloader
-
-prelink_image () {
-# export PSEUDO_DEBUG=4
-# /bin/env | /bin/grep PSEUDO
-# echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
-# echo "LD_PRELOAD=$LD_PRELOAD"
-
- pre_prelink_size=`du -ks ${IMAGE_ROOTFS} | awk '{size = $1 ; print size }'`
- echo "Size before prelinking $pre_prelink_size."
-
- # We need a prelink conf on the filesystem, add one if it's missing
- if [ ! -e ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf ]; then
- cp ${STAGING_ETCDIR_NATIVE}/prelink.conf \
- ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf
- dummy_prelink_conf=true;
- else
- dummy_prelink_conf=false;
- fi
-
- # We need a ld.so.conf with pathnames in,prelink conf on the filesystem, add one if it's missing
- ldsoconf=${IMAGE_ROOTFS}${sysconfdir}/ld.so.conf
- if [ -e $ldsoconf ]; then
- cp $ldsoconf $ldsoconf.prelink
- fi
- cat ${STAGING_DIR_TARGET}${sysconfdir}/ld.so.conf >> $ldsoconf
-
- dynamic_loader=${@get_linuxloader(d)}
-
- # prelink!
- if [ "${BUILD_REPRODUCIBLE_BINARIES}" = "1" ]; then
- bbnote " prelink: BUILD_REPRODUCIBLE_BINARIES..."
- if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then
- export PRELINK_TIMESTAMP=`git log -1 --pretty=%ct `
- else
- export PRELINK_TIMESTAMP=$REPRODUCIBLE_TIMESTAMP_ROOTFS
- fi
- ${STAGING_SBINDIR_NATIVE}/prelink --root ${IMAGE_ROOTFS} -am -N -c ${sysconfdir}/prelink.conf --dynamic-linker $dynamic_loader
- else
- ${STAGING_SBINDIR_NATIVE}/prelink --root ${IMAGE_ROOTFS} -amR -N -c ${sysconfdir}/prelink.conf --dynamic-linker $dynamic_loader
- fi
-
- # Remove the prelink.conf if we had to add it.
- if [ "$dummy_prelink_conf" = "true" ]; then
- rm -f ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf
- fi
-
- if [ -e $ldsoconf.prelink ]; then
- mv $ldsoconf.prelink $ldsoconf
- else
- rm $ldsoconf
- fi
-
- pre_prelink_size=`du -ks ${IMAGE_ROOTFS} | awk '{size = $1 ; print size }'`
- echo "Size after prelinking $pre_prelink_size."
-}
diff --git a/meta/classes/image.bbclass b/meta/classes/image.bbclass
index 11927f39f5..7f1f6f80a4 100644
--- a/meta/classes/image.bbclass
+++ b/meta/classes/image.bbclass
@@ -15,6 +15,7 @@ IMGCLASSES += "${@bb.utils.contains('IMAGE_FSTYPES', 'container', 'image-contain
IMGCLASSES += "image_types_wic"
IMGCLASSES += "rootfs-postcommands"
IMGCLASSES += "image-postinst-intercepts"
+IMGCLASSES += "overlayfs-etc"
inherit ${IMGCLASSES}
TOOLCHAIN_TARGET_TASK += "${PACKAGE_INSTALL}"
@@ -26,19 +27,19 @@ PACKAGES = ""
DEPENDS += "${@' '.join(["%s-qemuwrapper-cross" % m for m in d.getVar("MULTILIB_VARIANTS").split()])} qemuwrapper-cross depmodwrapper-cross cross-localedef-native"
RDEPENDS += "${PACKAGE_INSTALL} ${LINGUAS_INSTALL} ${IMAGE_INSTALL_DEBUGFS}"
RRECOMMENDS += "${PACKAGE_INSTALL_ATTEMPTONLY}"
-PATH_prepend = "${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:"
+PATH:prepend = "${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:"
INHIBIT_DEFAULT_DEPS = "1"
# IMAGE_FEATURES may contain any available package group
IMAGE_FEATURES ?= ""
IMAGE_FEATURES[type] = "list"
-IMAGE_FEATURES[validitems] += "debug-tweaks read-only-rootfs empty-root-password allow-empty-password allow-root-login post-install-logging"
+IMAGE_FEATURES[validitems] += "debug-tweaks read-only-rootfs read-only-rootfs-delayed-postinsts stateless-rootfs empty-root-password allow-empty-password allow-root-login post-install-logging overlayfs-etc"
# Generate companion debugfs?
IMAGE_GEN_DEBUGFS ?= "0"
-# These pacackages will be installed as additional into debug rootfs
+# These packages will be installed as additional into debug rootfs
IMAGE_INSTALL_DEBUGFS ?= ""
# These packages will be removed from a read-only rootfs after all other
@@ -53,7 +54,7 @@ FEATURE_INSTALL_OPTIONAL[vardepvalue] = "${FEATURE_INSTALL_OPTIONAL}"
# Define some very basic feature package groups
FEATURE_PACKAGES_package-management = "${ROOTFS_PKGMANAGE}"
-SPLASH ?= "psplash"
+SPLASH ?= "${@bb.utils.contains("MACHINE_FEATURES", "screen", "psplash", "", d)}"
FEATURE_PACKAGES_splash = "${SPLASH}"
IMAGE_INSTALL_COMPLEMENTARY = '${@complementary_globs("IMAGE_FEATURES", d)}'
@@ -62,10 +63,7 @@ def check_image_features(d):
valid_features = (d.getVarFlag('IMAGE_FEATURES', 'validitems') or "").split()
valid_features += d.getVarFlags('COMPLEMENTARY_GLOB').keys()
for var in d:
- if var.startswith("PACKAGE_GROUP_"):
- bb.warn("PACKAGE_GROUP is deprecated, please use FEATURE_PACKAGES instead")
- valid_features.append(var[14:])
- elif var.startswith("FEATURE_PACKAGES_"):
+ if var.startswith("FEATURE_PACKAGES_"):
valid_features.append(var[17:])
valid_features.sort()
@@ -95,7 +93,7 @@ PID = "${@os.getpid()}"
PACKAGE_ARCH = "${MACHINE_ARCH}"
LDCONFIGDEPEND ?= "ldconfig-native:do_populate_sysroot"
-LDCONFIGDEPEND_libc-musl = ""
+LDCONFIGDEPEND:libc-musl = ""
# This is needed to have depmod data in PKGDATA_DIR,
# but if you're building small initramfs image
@@ -115,7 +113,7 @@ def rootfs_command_variables(d):
'IMAGE_PREPROCESS_COMMAND','RPM_PREPROCESS_COMMANDS','RPM_POSTPROCESS_COMMANDS','DEB_PREPROCESS_COMMANDS','DEB_POSTPROCESS_COMMANDS']
python () {
- variables = rootfs_command_variables(d) + sdk_command_variables(d)
+ variables = rootfs_command_variables(d)
for var in variables:
if d.getVar(var, False):
d.setVarFlag(var, 'func', '1')
@@ -124,7 +122,7 @@ python () {
def rootfs_variables(d):
from oe.rootfs import variable_depends
variables = ['IMAGE_DEVICE_TABLE','IMAGE_DEVICE_TABLES','BUILD_IMAGES_FROM_FEEDS','IMAGE_TYPES_MASKED','IMAGE_ROOTFS_ALIGNMENT','IMAGE_OVERHEAD_FACTOR','IMAGE_ROOTFS_SIZE','IMAGE_ROOTFS_EXTRA_SPACE',
- 'IMAGE_ROOTFS_MAXSIZE','IMAGE_NAME','IMAGE_LINK_NAME','IMAGE_MANIFEST','DEPLOY_DIR_IMAGE','IMAGE_FSTYPES','IMAGE_INSTALL_COMPLEMENTARY','IMAGE_LINGUAS',
+ 'IMAGE_ROOTFS_MAXSIZE','IMAGE_NAME','IMAGE_LINK_NAME','IMAGE_MANIFEST','DEPLOY_DIR_IMAGE','IMAGE_FSTYPES','IMAGE_INSTALL_COMPLEMENTARY','IMAGE_LINGUAS', 'IMAGE_LINGUAS_COMPLEMENTARY', 'IMAGE_LOCALES_ARCHIVE',
'MULTILIBRE_ALLOW_REP','MULTILIB_TEMP_ROOTFS','MULTILIB_VARIANTS','MULTILIBS','ALL_MULTILIB_PACKAGE_ARCHS','MULTILIB_GLOBAL_VARIANTS','BAD_RECOMMENDATIONS','NO_RECOMMENDATIONS',
'PACKAGE_ARCHS','PACKAGE_CLASSES','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','OVERRIDES','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI','INTERCEPT_DIR','USE_DEVFS',
'CONVERSIONTYPES', 'IMAGE_GEN_DEBUGFS', 'ROOTFS_RO_UNNEEDED', 'IMGDEPLOYDIR', 'PACKAGE_EXCLUDE_COMPLEMENTARY', 'REPRODUCIBLE_TIMESTAMP_ROOTFS', 'IMAGE_INSTALL_DEBUGFS']
@@ -141,7 +139,10 @@ python () {
def extraimage_getdepends(task):
deps = ""
for dep in (d.getVar('EXTRA_IMAGEDEPENDS') or "").split():
- deps += " %s:%s" % (dep, task)
+ if ":" in dep:
+ deps += " %s " % (dep)
+ else:
+ deps += " %s:%s" % (dep, task)
return deps
d.appendVarFlag('do_image_complete', 'depends', extraimage_getdepends('do_populate_sysroot'))
@@ -176,15 +177,15 @@ IMAGE_LINGUAS ?= "de-de fr-fr en-gb"
LINGUAS_INSTALL ?= "${@" ".join(map(lambda s: "locale-base-%s" % s, d.getVar('IMAGE_LINGUAS').split()))}"
-python () {
- if not bb.utils.contains('DISTRO_FEATURES', 'libc-charsets libc-locale-code libc-locales', True, False, d):
- d.setVar('IMAGE_LINGUAS', '')
-}
+# per default create a locale archive
+IMAGE_LOCALES_ARCHIVE ?= '1'
# Prefer image, but use the fallback files for lookups if the image ones
# aren't yet available.
PSEUDO_PASSWD = "${IMAGE_ROOTFS}:${STAGING_DIR_NATIVE}"
+PSEUDO_IGNORE_PATHS .= ",${WORKDIR}/intercept_scripts,${WORKDIR}/oe-rootfs-repo,${WORKDIR}/sstate-build-image_complete"
+
PACKAGE_EXCLUDE ??= ""
PACKAGE_EXCLUDE[type] = "list"
@@ -252,8 +253,7 @@ fakeroot python do_rootfs () {
progress_reporter.finish()
}
do_rootfs[dirs] = "${TOPDIR}"
-do_rootfs[cleandirs] += "${S} ${IMGDEPLOYDIR}"
-do_rootfs[umask] = "022"
+do_rootfs[cleandirs] += "${IMAGE_ROOTFS} ${IMGDEPLOYDIR} ${S}"
do_rootfs[file-checksums] += "${POSTINST_INTERCEPT_CHECKSUMS}"
addtask rootfs after do_prepare_recipe_sysroot
@@ -266,7 +266,6 @@ fakeroot python do_image () {
execute_pre_post_process(d, pre_process_cmds)
}
do_image[dirs] = "${TOPDIR}"
-do_image[umask] = "022"
addtask do_image after do_rootfs
fakeroot python do_image_complete () {
@@ -277,9 +276,8 @@ fakeroot python do_image_complete () {
execute_pre_post_process(d, post_process_cmds)
}
do_image_complete[dirs] = "${TOPDIR}"
-do_image_complete[umask] = "022"
SSTATETASKS += "do_image_complete"
-SSTATE_SKIP_CREATION_task-image-complete = '1'
+SSTATE_SKIP_CREATION:task-image-complete = '1'
do_image_complete[sstate-inputdirs] = "${IMGDEPLOYDIR}"
do_image_complete[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}"
do_image_complete[stamp-extra-info] = "${MACHINE_ARCH}"
@@ -310,11 +308,8 @@ fakeroot python do_image_qa () {
bb.build.exec_func(cmd, d)
except oe.utils.ImageQAFailed as e:
qamsg = qamsg + '\tImage QA function %s failed: %s\n' % (e.name, e.description)
- except bb.build.FuncFailed as e:
- qamsg = qamsg + '\tImage QA function %s failed' % e.name
- if e.logfile:
- qamsg = qamsg + ' (log file is located at %s)' % e.logfile
- qamsg = qamsg + '\n'
+ except Exception as e:
+ qamsg = qamsg + '\tImage QA function %s failed\n' % cmd
if qamsg:
imgname = d.getVar('IMAGE_NAME')
@@ -323,7 +318,7 @@ fakeroot python do_image_qa () {
addtask do_image_qa after do_rootfs before do_image
SSTATETASKS += "do_image_qa"
-SSTATE_SKIP_CREATION_task-image-qa = '1'
+SSTATE_SKIP_CREATION:task-image-qa = '1'
do_image_qa[sstate-inputdirs] = ""
do_image_qa[sstate-outputdirs] = ""
python do_image_qa_setscene () {
@@ -333,7 +328,8 @@ addtask do_image_qa_setscene
def setup_debugfs_variables(d):
d.appendVar('IMAGE_ROOTFS', '-dbg')
- d.appendVar('IMAGE_LINK_NAME', '-dbg')
+ if d.getVar('IMAGE_LINK_NAME'):
+ d.appendVar('IMAGE_LINK_NAME', '-dbg')
d.appendVar('IMAGE_NAME','-dbg')
d.setVar('IMAGE_BUILDING_DEBUGFS', 'true')
debugfs_image_fstypes = d.getVar('IMAGE_FSTYPES_DEBUGFS')
@@ -390,8 +386,8 @@ python () {
if t.startswith("debugfs_"):
t = t[8:]
debug = "debugfs_"
- deps = (d.getVar('IMAGE_TYPEDEP_' + t) or "").split()
- vardeps.add('IMAGE_TYPEDEP_' + t)
+ deps = (d.getVar('IMAGE_TYPEDEP:' + t) or "").split()
+ vardeps.add('IMAGE_TYPEDEP:' + t)
if baset not in typedeps:
typedeps[baset] = set()
deps = [debug + dep for dep in deps]
@@ -439,21 +435,22 @@ python () {
localdata.delVar('DATETIME')
localdata.delVar('DATE')
localdata.delVar('TMPDIR')
- vardepsexclude = (d.getVarFlag('IMAGE_CMD_' + realt, 'vardepsexclude', True) or '').split()
+ localdata.delVar('IMAGE_VERSION_SUFFIX')
+ vardepsexclude = (d.getVarFlag('IMAGE_CMD:' + realt, 'vardepsexclude', True) or '').split()
for dep in vardepsexclude:
localdata.delVar(dep)
image_cmd = localdata.getVar("IMAGE_CMD")
- vardeps.add('IMAGE_CMD_' + realt)
+ vardeps.add('IMAGE_CMD:' + realt)
if image_cmd:
cmds.append("\t" + image_cmd)
else:
bb.fatal("No IMAGE_CMD defined for IMAGE_FSTYPES entry '%s' - possibly invalid type name or missing support class" % t)
cmds.append(localdata.expand("\tcd ${IMGDEPLOYDIR}"))
- # Since a copy of IMAGE_CMD_xxx will be inlined within do_image_xxx,
- # prevent a redundant copy of IMAGE_CMD_xxx being emitted as a function.
- d.delVarFlag('IMAGE_CMD_' + realt, 'func')
+ # Since a copy of IMAGE_CMD:xxx will be inlined within do_image_xxx,
+ # prevent a redundant copy of IMAGE_CMD:xxx being emitted as a function.
+ d.delVarFlag('IMAGE_CMD:' + realt, 'func')
rm_tmp_images = set()
def gen_conversion_cmds(bt):
@@ -465,11 +462,10 @@ python () {
# Create input image first.
gen_conversion_cmds(type)
localdata.setVar('type', type)
- cmd = "\t" + (localdata.getVar("CONVERSION_CMD_" + ctype) or localdata.getVar("COMPRESS_CMD_" + ctype))
+ cmd = "\t" + localdata.getVar("CONVERSION_CMD:" + ctype)
if cmd not in cmds:
cmds.append(cmd)
- vardeps.add('CONVERSION_CMD_' + ctype)
- vardeps.add('COMPRESS_CMD_' + ctype)
+ vardeps.add('CONVERSION_CMD:' + ctype)
subimage = type + "." + ctype
if subimage not in subimages:
subimages.append(subimage)
@@ -515,7 +511,7 @@ python () {
# Compute the rootfs size
#
def get_rootfs_size(d):
- import subprocess
+ import subprocess, oe.utils
rootfs_alignment = int(d.getVar('IMAGE_ROOTFS_ALIGNMENT'))
overhead_factor = float(d.getVar('IMAGE_OVERHEAD_FACTOR'))
@@ -526,14 +522,12 @@ def get_rootfs_size(d):
initramfs_fstypes = d.getVar('INITRAMFS_FSTYPES') or ''
initramfs_maxsize = d.getVar('INITRAMFS_MAXSIZE')
- output = subprocess.check_output(['du', '-ks',
- d.getVar('IMAGE_ROOTFS')])
- size_kb = int(output.split()[0])
+ size_kb = oe.utils.directory_size(d.getVar("IMAGE_ROOTFS")) / 1024
base_size = size_kb * overhead_factor
bb.debug(1, '%f = %d * %f' % (base_size, size_kb, overhead_factor))
base_size2 = max(base_size, rootfs_req_size) + rootfs_extra_space
- bb.debug(1, '%f = max(%f, %d)[%f] + %d' % (base_size2, base_size, rootfs_req_size, max(base_size, rootfs_req_size), overhead_factor))
+ bb.debug(1, '%f = max(%f, %d)[%f] + %d' % (base_size2, base_size, rootfs_req_size, max(base_size, rootfs_req_size), rootfs_extra_space))
base_size = base_size2
if base_size != int(base_size):
@@ -558,14 +552,14 @@ def get_rootfs_size(d):
if rootfs_maxsize:
rootfs_maxsize_int = int(rootfs_maxsize)
if base_size > rootfs_maxsize_int:
- bb.fatal("The rootfs size %d(K) overrides IMAGE_ROOTFS_MAXSIZE: %d(K)" % \
+ bb.fatal("The rootfs size %d(K) exceeds IMAGE_ROOTFS_MAXSIZE: %d(K)" % \
(base_size, rootfs_maxsize_int))
# Check the initramfs size against INITRAMFS_MAXSIZE (if set)
if image_fstypes == initramfs_fstypes != '' and initramfs_maxsize:
initramfs_maxsize_int = int(initramfs_maxsize)
if base_size > initramfs_maxsize_int:
- bb.error("The initramfs size %d(K) overrides INITRAMFS_MAXSIZE: %d(K)" % \
+ bb.error("The initramfs size %d(K) exceeds INITRAMFS_MAXSIZE: %d(K)" % \
(base_size, initramfs_maxsize_int))
bb.error("You can set INITRAMFS_MAXSIZE a larger value. Usually, it should")
bb.fatal("be less than 1/2 of ram size, or you may fail to boot it.\n")
@@ -616,10 +610,11 @@ do_patch[noexec] = "1"
do_configure[noexec] = "1"
do_compile[noexec] = "1"
do_install[noexec] = "1"
+deltask do_populate_lic
deltask do_populate_sysroot
do_package[noexec] = "1"
deltask do_package_qa
-do_packagedata[noexec] = "1"
+deltask do_packagedata
deltask do_package_write_ipk
deltask do_package_write_deb
deltask do_package_write_rpm
@@ -628,20 +623,20 @@ deltask do_package_write_rpm
create_merged_usr_symlinks() {
root="$1"
install -d $root${base_bindir} $root${base_sbindir} $root${base_libdir}
- lnr $root${base_bindir} $root/bin
- lnr $root${base_sbindir} $root/sbin
- lnr $root${base_libdir} $root/${baselib}
+ ln -rs $root${base_bindir} $root/bin
+ ln -rs $root${base_sbindir} $root/sbin
+ ln -rs $root${base_libdir} $root/${baselib}
if [ "${nonarch_base_libdir}" != "${base_libdir}" ]; then
install -d $root${nonarch_base_libdir}
- lnr $root${nonarch_base_libdir} $root/lib
+ ln -rs $root${nonarch_base_libdir} $root/lib
fi
# create base links for multilibs
multi_libdirs="${@d.getVar('MULTILIB_VARIANTS')}"
for d in $multi_libdirs; do
install -d $root${exec_prefix}/$d
- lnr $root${exec_prefix}/$d $root/$d
+ ln -rs $root${exec_prefix}/$d $root/$d
done
}
@@ -657,18 +652,23 @@ ROOTFS_PREPROCESS_COMMAND += "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge'
POPULATE_SDK_PRE_TARGET_COMMAND += "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', 'create_merged_usr_symlinks_sdk; ', '',d)}"
reproducible_final_image_task () {
- if [ "${BUILD_REPRODUCIBLE_BINARIES}" = "1" ]; then
+ if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then
+ REPRODUCIBLE_TIMESTAMP_ROOTFS=`git -C "${COREBASE}" log -1 --pretty=%ct 2>/dev/null` || true
if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then
- REPRODUCIBLE_TIMESTAMP_ROOTFS=`git -C "${COREBASE}" log -1 --pretty=%ct 2>/dev/null` || true
- if [ "${REPRODUCIBLE_TIMESTAMP_ROOTFS}" = "" ]; then
- REPRODUCIBLE_TIMESTAMP_ROOTFS=`stat -c%Y ${@bb.utils.which(d.getVar("BBPATH"), "conf/bitbake.conf")}`
- fi
+ REPRODUCIBLE_TIMESTAMP_ROOTFS=`stat -c%Y ${@bb.utils.which(d.getVar("BBPATH"), "conf/bitbake.conf")}`
fi
- # Set mtime of all files to a reproducible value
- bbnote "reproducible_final_image_task: mtime set to $REPRODUCIBLE_TIMESTAMP_ROOTFS"
- find ${IMAGE_ROOTFS} -exec touch -h --date=@$REPRODUCIBLE_TIMESTAMP_ROOTFS {} \;
fi
+ # Set mtime of all files to a reproducible value
+ bbnote "reproducible_final_image_task: mtime set to $REPRODUCIBLE_TIMESTAMP_ROOTFS"
+ find ${IMAGE_ROOTFS} -print0 | xargs -0 touch -h --date=@$REPRODUCIBLE_TIMESTAMP_ROOTFS
}
-IMAGE_PREPROCESS_COMMAND_append = " reproducible_final_image_task; "
+
+systemd_preset_all () {
+ if [ -e ${IMAGE_ROOTFS}${root_prefix}/lib/systemd/systemd ]; then
+ systemctl --root="${IMAGE_ROOTFS}" --preset-mode=enable-only preset-all
+ fi
+}
+
+IMAGE_PREPROCESS_COMMAND:append = " ${@ 'systemd_preset_all;' if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d) and not bb.utils.contains('IMAGE_FEATURES', 'stateless-rootfs', True, False, d) else ''} reproducible_final_image_task; "
CVE_PRODUCT = ""
diff --git a/meta/classes/image_types.bbclass b/meta/classes/image_types.bbclass
index 70bd315306..f643ed3ce7 100644
--- a/meta/classes/image_types.bbclass
+++ b/meta/classes/image_types.bbclass
@@ -1,9 +1,3 @@
-# IMAGE_NAME is the base name for everything produced when building images.
-# The actual image that contains the rootfs has an additional suffix (.rootfs
-# by default) followed by additional suffices which describe the format (.ext4,
-# .ext4.xz, etc.).
-IMAGE_NAME_SUFFIX ??= ".rootfs"
-
# The default aligment of the size of the rootfs is set to 1KiB. In case
# you're using the SD card emulation of a QEMU system simulator you may
# set this value to 2048 (2MiB alignment).
@@ -35,7 +29,7 @@ def imagetypes_getdepends(d):
if d.getVar(var) is not None:
deprecated.add(var)
- for typedepends in (d.getVar("IMAGE_TYPEDEP_%s" % basetype) or "").split():
+ for typedepends in (d.getVar("IMAGE_TYPEDEP:%s" % basetype) or "").split():
base, rest = split_types(typedepends)
resttypes += rest
@@ -54,16 +48,17 @@ def imagetypes_getdepends(d):
# Sort the set so that ordering is consistant
return " ".join(sorted(deps))
-XZ_COMPRESSION_LEVEL ?= "-3"
+XZ_COMPRESSION_LEVEL ?= "-9"
XZ_INTEGRITY_CHECK ?= "crc32"
-XZ_THREADS ?= "-T 0"
ZIP_COMPRESSION_LEVEL ?= "-9"
+ZSTD_COMPRESSION_LEVEL ?= "-3"
+
JFFS2_SUM_EXTRA_ARGS ?= ""
-IMAGE_CMD_jffs2 = "mkfs.jffs2 --root=${IMAGE_ROOTFS} --faketime --output=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.jffs2 ${EXTRA_IMAGECMD}"
+IMAGE_CMD:jffs2 = "mkfs.jffs2 --root=${IMAGE_ROOTFS} --faketime --output=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.jffs2 ${EXTRA_IMAGECMD}"
-IMAGE_CMD_cramfs = "mkfs.cramfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cramfs ${EXTRA_IMAGECMD}"
+IMAGE_CMD:cramfs = "mkfs.cramfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cramfs ${EXTRA_IMAGECMD}"
oe_mkext234fs () {
fstype=$1
@@ -93,12 +88,12 @@ oe_mkext234fs () {
fsck.$fstype -pvfD ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype || [ $? -le 3 ]
}
-IMAGE_CMD_ext2 = "oe_mkext234fs ext2 ${EXTRA_IMAGECMD}"
-IMAGE_CMD_ext3 = "oe_mkext234fs ext3 ${EXTRA_IMAGECMD}"
-IMAGE_CMD_ext4 = "oe_mkext234fs ext4 ${EXTRA_IMAGECMD}"
+IMAGE_CMD:ext2 = "oe_mkext234fs ext2 ${EXTRA_IMAGECMD}"
+IMAGE_CMD:ext3 = "oe_mkext234fs ext3 ${EXTRA_IMAGECMD}"
+IMAGE_CMD:ext4 = "oe_mkext234fs ext4 ${EXTRA_IMAGECMD}"
MIN_BTRFS_SIZE ?= "16384"
-IMAGE_CMD_btrfs () {
+IMAGE_CMD:btrfs () {
size=${ROOTFS_SIZE}
if [ ${size} -lt ${MIN_BTRFS_SIZE} ] ; then
size=${MIN_BTRFS_SIZE}
@@ -108,28 +103,24 @@ IMAGE_CMD_btrfs () {
mkfs.btrfs ${EXTRA_IMAGECMD} -r ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.btrfs
}
-IMAGE_CMD_squashfs = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs ${EXTRA_IMAGECMD} -noappend"
-IMAGE_CMD_squashfs-xz = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-xz ${EXTRA_IMAGECMD} -noappend -comp xz"
-IMAGE_CMD_squashfs-lzo = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-lzo ${EXTRA_IMAGECMD} -noappend -comp lzo"
-IMAGE_CMD_squashfs-lz4 = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-lz4 ${EXTRA_IMAGECMD} -noappend -comp lz4"
-
-# By default, tar from the host is used, which can be quite old. If
-# you need special parameters (like --xattrs) which are only supported
-# by GNU tar upstream >= 1.27, then override that default:
-# IMAGE_CMD_TAR = "tar --xattrs --xattrs-include=*"
-# do_image_tar[depends] += "tar-replacement-native:do_populate_sysroot"
-# EXTRANATIVEPATH += "tar-native"
-#
-# The GNU documentation does not specify whether --xattrs-include is necessary.
-# In practice, it turned out to be not needed when creating archives and
-# required when extracting, but it seems prudent to use it in both cases.
+IMAGE_CMD:squashfs = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs ${EXTRA_IMAGECMD} -noappend"
+IMAGE_CMD:squashfs-xz = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-xz ${EXTRA_IMAGECMD} -noappend -comp xz"
+IMAGE_CMD:squashfs-lzo = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-lzo ${EXTRA_IMAGECMD} -noappend -comp lzo"
+IMAGE_CMD:squashfs-lz4 = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-lz4 ${EXTRA_IMAGECMD} -noappend -comp lz4"
+IMAGE_CMD:squashfs-zst = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-zst ${EXTRA_IMAGECMD} -noappend -comp zstd"
+
+IMAGE_CMD:erofs = "mkfs.erofs ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.erofs ${IMAGE_ROOTFS}"
+IMAGE_CMD:erofs-lz4 = "mkfs.erofs -zlz4 ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.erofs-lz4 ${IMAGE_ROOTFS}"
+IMAGE_CMD:erofs-lz4hc = "mkfs.erofs -zlz4hc ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.erofs-lz4hc ${IMAGE_ROOTFS}"
+
+
IMAGE_CMD_TAR ?= "tar"
# ignore return code 1 "file changed as we read it" as other tasks(e.g. do_image_wic) may be hardlinking rootfs
-IMAGE_CMD_tar = "${IMAGE_CMD_TAR} --numeric-owner -cf ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.tar -C ${IMAGE_ROOTFS} . || [ $? -eq 1 ]"
+IMAGE_CMD:tar = "${IMAGE_CMD_TAR} --sort=name --format=posix --numeric-owner -cf ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.tar -C ${IMAGE_ROOTFS} . || [ $? -eq 1 ]"
do_image_cpio[cleandirs] += "${WORKDIR}/cpio_append"
-IMAGE_CMD_cpio () {
- (cd ${IMAGE_ROOTFS} && find . | cpio -o -H newc >${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cpio)
+IMAGE_CMD:cpio () {
+ (cd ${IMAGE_ROOTFS} && find . | sort | cpio --reproducible -o -H newc >${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cpio)
# We only need the /init symlink if we're building the real
# image. The -dbg image doesn't need it! By being clever
# about this we also avoid 'touch' below failing, as it
@@ -148,16 +139,18 @@ IMAGE_CMD_cpio () {
}
UBI_VOLNAME ?= "${MACHINE}-rootfs"
+UBI_VOLTYPE ?= "dynamic"
+UBI_IMGTYPE ?= "ubifs"
multiubi_mkfs() {
local mkubifs_args="$1"
local ubinize_args="$2"
-
+
# Added prompt error message for ubi and ubifs image creation.
if [ -z "$mkubifs_args" ] || [ -z "$ubinize_args" ]; then
bbfatal "MKUBIFS_ARGS and UBINIZE_ARGS have to be set, see http://www.linux-mtd.infradead.org/faq/ubifs.html for details"
fi
-
+
if [ -z "$3" ]; then
local vname=""
else
@@ -166,12 +159,14 @@ multiubi_mkfs() {
echo \[ubifs\] > ubinize${vname}-${IMAGE_NAME}.cfg
echo mode=ubi >> ubinize${vname}-${IMAGE_NAME}.cfg
- echo image=${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs >> ubinize${vname}-${IMAGE_NAME}.cfg
+ echo image=${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.${UBI_IMGTYPE} >> ubinize${vname}-${IMAGE_NAME}.cfg
echo vol_id=0 >> ubinize${vname}-${IMAGE_NAME}.cfg
- echo vol_type=dynamic >> ubinize${vname}-${IMAGE_NAME}.cfg
+ echo vol_type=${UBI_VOLTYPE} >> ubinize${vname}-${IMAGE_NAME}.cfg
echo vol_name=${UBI_VOLNAME} >> ubinize${vname}-${IMAGE_NAME}.cfg
echo vol_flags=autoresize >> ubinize${vname}-${IMAGE_NAME}.cfg
- mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs ${mkubifs_args}
+ if [ -n "$vname" ]; then
+ mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs ${mkubifs_args}
+ fi
ubinize -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubi ${ubinize_args} ubinize${vname}-${IMAGE_NAME}.cfg
# Cleanup cfg file
@@ -192,7 +187,7 @@ multiubi_mkfs() {
fi
}
-IMAGE_CMD_multiubi () {
+IMAGE_CMD:multiubi () {
# Split MKUBIFS_ARGS_<name> and UBINIZE_ARGS_<name>
for name in ${MULTIUBI_BUILD}; do
eval local mkubifs_args=\"\$MKUBIFS_ARGS_${name}\"
@@ -202,14 +197,15 @@ IMAGE_CMD_multiubi () {
done
}
-IMAGE_CMD_ubi () {
+IMAGE_CMD:ubi () {
multiubi_mkfs "${MKUBIFS_ARGS}" "${UBINIZE_ARGS}"
}
+IMAGE_TYPEDEP:ubi = "${UBI_IMGTYPE}"
-IMAGE_CMD_ubifs = "mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.ubifs ${MKUBIFS_ARGS}"
+IMAGE_CMD:ubifs = "mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.ubifs ${MKUBIFS_ARGS}"
MIN_F2FS_SIZE ?= "524288"
-IMAGE_CMD_f2fs () {
+IMAGE_CMD:f2fs () {
# We need to add additional smarts here form devices smaller than 1.5G
# Need to scale appropriately between 40M -> 1.5G as the "overprovision
# ratio" goes down as the device gets bigger (70% -> 4.5%), below about
@@ -227,17 +223,18 @@ IMAGE_CMD_f2fs () {
EXTRA_IMAGECMD = ""
-inherit siteinfo kernel-arch
+inherit siteinfo kernel-arch image-artifact-names
+
JFFS2_ENDIANNESS ?= "${@oe.utils.conditional('SITEINFO_ENDIANNESS', 'le', '-l', '-b', d)}"
JFFS2_ERASEBLOCK ?= "0x40000"
-EXTRA_IMAGECMD_jffs2 ?= "--pad ${JFFS2_ENDIANNESS} --eraseblock=${JFFS2_ERASEBLOCK} --no-cleanmarkers"
+EXTRA_IMAGECMD:jffs2 ?= "--pad ${JFFS2_ENDIANNESS} --eraseblock=${JFFS2_ERASEBLOCK} --no-cleanmarkers"
# Change these if you want default mkfs behavior (i.e. create minimal inode number)
-EXTRA_IMAGECMD_ext2 ?= "-i 4096"
-EXTRA_IMAGECMD_ext3 ?= "-i 4096"
-EXTRA_IMAGECMD_ext4 ?= "-i 4096"
-EXTRA_IMAGECMD_btrfs ?= "-n 4096"
-EXTRA_IMAGECMD_f2fs ?= ""
+EXTRA_IMAGECMD:ext2 ?= "-i 4096"
+EXTRA_IMAGECMD:ext3 ?= "-i 4096"
+EXTRA_IMAGECMD:ext4 ?= "-i 4096"
+EXTRA_IMAGECMD:btrfs ?= "-n 4096 --shrink"
+EXTRA_IMAGECMD:f2fs ?= ""
do_image_cpio[depends] += "cpio-native:do_populate_sysroot"
do_image_jffs2[depends] += "mtd-utils-native:do_populate_sysroot"
@@ -250,10 +247,14 @@ do_image_squashfs[depends] += "squashfs-tools-native:do_populate_sysroot"
do_image_squashfs_xz[depends] += "squashfs-tools-native:do_populate_sysroot"
do_image_squashfs_lzo[depends] += "squashfs-tools-native:do_populate_sysroot"
do_image_squashfs_lz4[depends] += "squashfs-tools-native:do_populate_sysroot"
+do_image_squashfs_zst[depends] += "squashfs-tools-native:do_populate_sysroot"
do_image_ubi[depends] += "mtd-utils-native:do_populate_sysroot"
do_image_ubifs[depends] += "mtd-utils-native:do_populate_sysroot"
do_image_multiubi[depends] += "mtd-utils-native:do_populate_sysroot"
do_image_f2fs[depends] += "f2fs-tools-native:do_populate_sysroot"
+do_image_erofs[depends] += "erofs-utils-native:do_populate_sysroot"
+do_image_erofs_lz4[depends] += "erofs-utils-native:do_populate_sysroot"
+do_image_erofs_lz4hc[depends] += "erofs-utils-native:do_populate_sysroot"
# This variable is available to request which values are suitable for IMAGE_FSTYPES
IMAGE_TYPES = " \
@@ -265,13 +266,14 @@ IMAGE_TYPES = " \
btrfs \
iso \
hddimg \
- squashfs squashfs-xz squashfs-lzo squashfs-lz4 \
+ squashfs squashfs-xz squashfs-lzo squashfs-lz4 squashfs-zst \
ubi ubifs multiubi \
- tar tar.gz tar.bz2 tar.xz tar.lz4 \
- cpio cpio.gz cpio.xz cpio.lzma cpio.lz4 \
- wic wic.gz wic.bz2 wic.lzma \
+ tar tar.gz tar.bz2 tar.xz tar.lz4 tar.zst \
+ cpio cpio.gz cpio.xz cpio.lzma cpio.lz4 cpio.zst \
+ wic wic.gz wic.bz2 wic.lzma wic.zst \
container \
f2fs \
+ erofs erofs-lz4 erofs-lz4hc \
"
# Compression is a special case of conversion. The old variable
@@ -280,26 +282,32 @@ IMAGE_TYPES = " \
# CONVERSION_CMD/DEPENDS.
COMPRESSIONTYPES ?= ""
-CONVERSIONTYPES = "gz bz2 lzma xz lz4 lzo zip sum md5sum sha1sum sha224sum sha256sum sha384sum sha512sum bmap u-boot vmdk vdi qcow2 ${COMPRESSIONTYPES}"
-CONVERSION_CMD_lzma = "lzma -k -f -7 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
-CONVERSION_CMD_gz = "pigz -f -9 -n -c ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.gz"
-CONVERSION_CMD_bz2 = "pbzip2 -f -k ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
-CONVERSION_CMD_xz = "xz -f -k -c ${XZ_COMPRESSION_LEVEL} ${XZ_THREADS} --check=${XZ_INTEGRITY_CHECK} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.xz"
-CONVERSION_CMD_lz4 = "lz4 -9 -z -l ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.lz4"
-CONVERSION_CMD_lzo = "lzop -9 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
-CONVERSION_CMD_zip = "zip ${ZIP_COMPRESSION_LEVEL} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.zip ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
-CONVERSION_CMD_sum = "sumtool -i ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} -o ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sum ${JFFS2_SUM_EXTRA_ARGS}"
-CONVERSION_CMD_md5sum = "md5sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.md5sum"
-CONVERSION_CMD_sha1sum = "sha1sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha1sum"
-CONVERSION_CMD_sha224sum = "sha224sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha224sum"
-CONVERSION_CMD_sha256sum = "sha256sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha256sum"
-CONVERSION_CMD_sha384sum = "sha384sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha384sum"
-CONVERSION_CMD_sha512sum = "sha512sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha512sum"
-CONVERSION_CMD_bmap = "bmaptool create ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} -o ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.bmap"
-CONVERSION_CMD_u-boot = "mkimage -A ${UBOOT_ARCH} -O linux -T ramdisk -C none -n ${IMAGE_NAME} -d ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.u-boot"
-CONVERSION_CMD_vmdk = "qemu-img convert -O vmdk ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vmdk"
-CONVERSION_CMD_vdi = "qemu-img convert -O vdi ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vdi"
-CONVERSION_CMD_qcow2 = "qemu-img convert -O qcow2 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.qcow2"
+CONVERSIONTYPES = "gz bz2 lzma xz lz4 lzo zip zst sum md5sum sha1sum sha224sum sha256sum sha384sum sha512sum bmap u-boot vmdk vhd vhdx vdi qcow2 base64 gzsync zsync ${COMPRESSIONTYPES}"
+CONVERSION_CMD:lzma = "lzma -k -f -7 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
+CONVERSION_CMD:gz = "gzip -f -9 -n -c --rsyncable ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.gz"
+CONVERSION_CMD:bz2 = "pbzip2 -f -k ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
+CONVERSION_CMD:xz = "xz -f -k -c ${XZ_COMPRESSION_LEVEL} ${XZ_DEFAULTS} --check=${XZ_INTEGRITY_CHECK} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.xz"
+CONVERSION_CMD:lz4 = "lz4 -9 -z -l ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.lz4"
+CONVERSION_CMD:lzo = "lzop -9 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
+CONVERSION_CMD:zip = "zip ${ZIP_COMPRESSION_LEVEL} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.zip ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
+CONVERSION_CMD:zst = "zstd -f -k -T0 -c ${ZSTD_COMPRESSION_LEVEL} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.zst"
+CONVERSION_CMD:sum = "sumtool -i ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} -o ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sum ${JFFS2_SUM_EXTRA_ARGS}"
+CONVERSION_CMD:md5sum = "md5sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.md5sum"
+CONVERSION_CMD:sha1sum = "sha1sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha1sum"
+CONVERSION_CMD:sha224sum = "sha224sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha224sum"
+CONVERSION_CMD:sha256sum = "sha256sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha256sum"
+CONVERSION_CMD:sha384sum = "sha384sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha384sum"
+CONVERSION_CMD:sha512sum = "sha512sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha512sum"
+CONVERSION_CMD:bmap = "bmaptool create ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} -o ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.bmap"
+CONVERSION_CMD:u-boot = "mkimage -A ${UBOOT_ARCH} -O linux -T ramdisk -C none -n ${IMAGE_NAME} -d ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.u-boot"
+CONVERSION_CMD:vmdk = "qemu-img convert -O vmdk ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vmdk"
+CONVERSION_CMD:vhdx = "qemu-img convert -O vhdx -o subformat=dynamic ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vhdx"
+CONVERSION_CMD:vhd = "qemu-img convert -O vpc -o subformat=fixed ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vhd"
+CONVERSION_CMD:vdi = "qemu-img convert -O vdi ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vdi"
+CONVERSION_CMD:qcow2 = "qemu-img convert -O qcow2 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.qcow2"
+CONVERSION_CMD:base64 = "base64 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.base64"
+CONVERSION_CMD:zsync = "zsyncmake_curl ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
+CONVERSION_CMD:gzsync = "zsyncmake_curl -z ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
CONVERSION_DEPENDS_lzma = "xz-native"
CONVERSION_DEPENDS_gz = "pigz-native"
CONVERSION_DEPENDS_bz2 = "pbzip2-native"
@@ -307,20 +315,26 @@ CONVERSION_DEPENDS_xz = "xz-native"
CONVERSION_DEPENDS_lz4 = "lz4-native"
CONVERSION_DEPENDS_lzo = "lzop-native"
CONVERSION_DEPENDS_zip = "zip-native"
+CONVERSION_DEPENDS_zst = "zstd-native"
CONVERSION_DEPENDS_sum = "mtd-utils-native"
CONVERSION_DEPENDS_bmap = "bmap-tools-native"
CONVERSION_DEPENDS_u-boot = "u-boot-tools-native"
-CONVERSION_DEPENDS_vmdk = "qemu-native"
-CONVERSION_DEPENDS_vdi = "qemu-native"
-CONVERSION_DEPENDS_qcow2 = "qemu-native"
+CONVERSION_DEPENDS_vmdk = "qemu-system-native"
+CONVERSION_DEPENDS_vdi = "qemu-system-native"
+CONVERSION_DEPENDS_qcow2 = "qemu-system-native"
+CONVERSION_DEPENDS_base64 = "coreutils-native"
+CONVERSION_DEPENDS_vhdx = "qemu-system-native"
+CONVERSION_DEPENDS_vhd = "qemu-system-native"
+CONVERSION_DEPENDS_zsync = "zsync-curl-native"
+CONVERSION_DEPENDS_gzsync = "zsync-curl-native"
RUNNABLE_IMAGE_TYPES ?= "ext2 ext3 ext4"
RUNNABLE_MACHINE_PATTERNS ?= "qemu"
-DEPLOYABLE_IMAGE_TYPES ?= "hddimg iso"
+DEPLOYABLE_IMAGE_TYPES ?= "hddimg iso"
# The IMAGE_TYPES_MASKED variable will be used to mask out from the IMAGE_FSTYPES,
-# images that will not be built at do_rootfs time: vmdk, vdi, qcow2, hdddirect, hddimg, iso, etc.
+# images that will not be built at do_rootfs time: vmdk, vhd, vhdx, vdi, qcow2, hddimg, iso, etc.
IMAGE_TYPES_MASKED ?= ""
# bmap requires python3 to be in the PATH
diff --git a/meta/classes/image_types_wic.bbclass b/meta/classes/image_types_wic.bbclass
index 5b40a9e919..e3863c88a9 100644
--- a/meta/classes/image_types_wic.bbclass
+++ b/meta/classes/image_types_wic.bbclass
@@ -1,9 +1,38 @@
# The WICVARS variable is used to define list of bitbake variables used in wic code
# variables from this list is written to <image>.env file
WICVARS ?= "\
- BBLAYERS IMGDEPLOYDIR DEPLOY_DIR_IMAGE FAKEROOTCMD IMAGE_BASENAME IMAGE_BOOT_FILES \
- IMAGE_LINK_NAME IMAGE_ROOTFS INITRAMFS_FSTYPES INITRD INITRD_LIVE ISODIR RECIPE_SYSROOT_NATIVE \
- ROOTFS_SIZE STAGING_DATADIR STAGING_DIR STAGING_LIBDIR TARGET_SYS"
+ APPEND \
+ ASSUME_PROVIDED \
+ BBLAYERS \
+ DEPLOY_DIR_IMAGE \
+ FAKEROOTCMD \
+ HOSTTOOLS_DIR \
+ IMAGE_BASENAME \
+ IMAGE_BOOT_FILES \
+ IMAGE_EFI_BOOT_FILES \
+ IMAGE_LINK_NAME \
+ IMAGE_ROOTFS \
+ IMGDEPLOYDIR \
+ INITRAMFS_FSTYPES \
+ INITRAMFS_IMAGE \
+ INITRAMFS_IMAGE_BUNDLE \
+ INITRAMFS_LINK_NAME \
+ INITRD \
+ INITRD_LIVE \
+ ISODIR \
+ KERNEL_IMAGETYPE \
+ MACHINE \
+ PSEUDO_IGNORE_PATHS \
+ RECIPE_SYSROOT_NATIVE \
+ ROOTFS_SIZE \
+ STAGING_DATADIR \
+ STAGING_DIR \
+ STAGING_DIR_HOST \
+ STAGING_LIBDIR \
+ TARGET_SYS \
+"
+
+inherit ${@bb.utils.contains('INITRAMFS_IMAGE_BUNDLE', '1', 'kernel-artifact-names', '', d)}
WKS_FILE ??= "${IMAGE_BASENAME}.${MACHINE}.wks"
WKS_FILES ?= "${WKS_FILE} ${IMAGE_BASENAME}.wks"
@@ -22,18 +51,26 @@ def wks_search(files, search_path):
WIC_CREATE_EXTRA_ARGS ?= ""
-IMAGE_CMD_wic () {
+IMAGE_CMD:wic () {
out="${IMGDEPLOYDIR}/${IMAGE_NAME}"
+ build_wic="${WORKDIR}/build-wic"
+ tmp_wic="${WORKDIR}/tmp-wic"
wks="${WKS_FULL_PATH}"
+ if [ -e "$tmp_wic" ]; then
+ # Ensure we don't have any junk leftover from a previously interrupted
+ # do_image_wic execution
+ rm -rf "$tmp_wic"
+ fi
if [ -z "$wks" ]; then
bbfatal "No kickstart files from WKS_FILES were found: ${WKS_FILES}. Please set WKS_FILE or WKS_FILES appropriately."
fi
-
- BUILDDIR="${TOPDIR}" wic create "$wks" --vars "${STAGING_DIR}/${MACHINE}/imgdata/" -e "${IMAGE_BASENAME}" -o "$out/" ${WIC_CREATE_EXTRA_ARGS}
- mv "$out/$(basename "${wks%.wks}")"*.direct "$out${IMAGE_NAME_SUFFIX}.wic"
- rm -rf "$out/"
+ BUILDDIR="${TOPDIR}" PSEUDO_UNLOAD=1 wic create "$wks" --vars "${STAGING_DIR}/${MACHINE}/imgdata/" -e "${IMAGE_BASENAME}" -o "$build_wic/" -w "$tmp_wic" ${WIC_CREATE_EXTRA_ARGS}
+ mv "$build_wic/$(basename "${wks%.wks}")"*.direct "$out${IMAGE_NAME_SUFFIX}.wic"
}
-IMAGE_CMD_wic[vardepsexclude] = "WKS_FULL_PATH WKS_FILES TOPDIR"
+IMAGE_CMD:wic[vardepsexclude] = "WKS_FULL_PATH WKS_FILES TOPDIR"
+do_image_wic[cleandirs] = "${WORKDIR}/build-wic"
+
+PSEUDO_IGNORE_PATHS .= ",${WORKDIR}/build-wic"
# Rebuild when the wks file or vars in WICVARS change
USING_WIC = "${@bb.utils.contains_any('IMAGE_FSTYPES', 'wic ' + ' '.join('wic.%s' % c for c in '${CONVERSIONTYPES}'.split()), '1', '', d)}"
@@ -43,12 +80,14 @@ do_image_wic[depends] += "${@' '.join('%s-native:do_populate_sysroot' % r for r
# We ensure all artfacts are deployed (e.g virtual/bootloader)
do_image_wic[recrdeptask] += "do_deploy"
+do_image_wic[deptask] += "do_image_complete"
-WKS_FILE_DEPENDS_DEFAULT = "syslinux-native bmap-tools-native cdrtools-native btrfs-tools-native squashfs-tools-native e2fsprogs-native"
+WKS_FILE_DEPENDS_DEFAULT = '${@bb.utils.contains_any("BUILD_ARCH", [ 'x86_64', 'i686' ], "syslinux-native", "",d)}'
+WKS_FILE_DEPENDS_DEFAULT += "bmap-tools-native cdrtools-native btrfs-tools-native squashfs-tools-native e2fsprogs-native"
WKS_FILE_DEPENDS_BOOTLOADERS = ""
-WKS_FILE_DEPENDS_BOOTLOADERS_x86 = "syslinux grub-efi systemd-boot"
-WKS_FILE_DEPENDS_BOOTLOADERS_x86-64 = "syslinux grub-efi systemd-boot"
-WKS_FILE_DEPENDS_BOOTLOADERS_x86-x32 = "syslinux grub-efi"
+WKS_FILE_DEPENDS_BOOTLOADERS:x86 = "syslinux grub-efi systemd-boot os-release"
+WKS_FILE_DEPENDS_BOOTLOADERS:x86-64 = "syslinux grub-efi systemd-boot os-release"
+WKS_FILE_DEPENDS_BOOTLOADERS:x86-x32 = "syslinux grub-efi"
WKS_FILE_DEPENDS ??= "${WKS_FILE_DEPENDS_DEFAULT} ${WKS_FILE_DEPENDS_BOOTLOADERS}"
@@ -73,6 +112,15 @@ python do_write_wks_template () {
wks_file = d.getVar('WKS_FULL_PATH')
with open(wks_file, 'w') as f:
f.write(template_body)
+ f.close()
+ # Copy the finalized wks file to the deploy directory for later use
+ depdir = d.getVar('IMGDEPLOYDIR')
+ basename = d.getVar('IMAGE_BASENAME')
+ bb.utils.copyfile(wks_file, "%s/%s" % (depdir, basename + '-' + os.path.basename(wks_file)))
+}
+
+do_flush_pseudodb() {
+ ${FAKEROOTENV} ${FAKEROOTCMD} -S
}
python () {
@@ -101,7 +149,7 @@ python () {
# file in process_wks_template as well, so just put it in
# a variable and let the metadata deal with the deps.
d.setVar('_WKS_TEMPLATE', body)
- bb.build.addtask('do_write_wks_template', 'do_image_wic', None, d)
+ bb.build.addtask('do_write_wks_template', 'do_image_wic', 'do_image', d)
bb.build.addtask('do_image_wic', 'do_image_complete', None, d)
}
@@ -123,7 +171,12 @@ python do_rootfs_wicenv () {
value = d.getVar(var)
if value:
envf.write('%s="%s"\n' % (var, value.strip()))
+ envf.close()
+ # Copy .env file to deploy directory for later use with stand alone wic
+ depdir = d.getVar('IMGDEPLOYDIR')
+ bb.utils.copyfile(os.path.join(outdir, basename) + '.env', os.path.join(depdir, basename) + '.env')
}
+addtask do_flush_pseudodb after do_rootfs before do_image do_image_qa
addtask do_rootfs_wicenv after do_image before do_image_wic
do_rootfs_wicenv[vardeps] += "${WICVARS}"
do_rootfs_wicenv[prefuncs] = 'set_image_size'
diff --git a/meta/classes/insane.bbclass b/meta/classes/insane.bbclass
index 6411884f92..0deebdb148 100644
--- a/meta/classes/insane.bbclass
+++ b/meta/classes/insane.bbclass
@@ -18,25 +18,29 @@
# files under exec_prefix
# -Check if the package name is upper case
-QA_SANE = "True"
-
# Elect whether a given type of error is a warning or error, they may
# have been set by other files.
-WARN_QA ?= "ldflags useless-rpaths rpaths staticdev libdir xorg-driver-abi \
- textrel already-stripped incompatible-license files-invalid \
- installed-vs-shipped compile-host-path install-host-path \
- pn-overrides infodir build-deps \
- unknown-configure-option symlink-to-sysroot multilib \
- invalid-packageconfig host-user-contaminated uppercase-pn \
+WARN_QA ?= " libdir xorg-driver-abi \
+ textrel incompatible-license files-invalid \
+ infodir build-deps src-uri-bad symlink-to-sysroot multilib \
+ invalid-packageconfig host-user-contaminated uppercase-pn patch-fuzz \
+ mime mime-xdg unlisted-pkg-lics unhandled-features-check \
+ missing-update-alternatives native-last missing-ptest \
+ license-exists license-no-generic license-syntax license-format \
+ license-incompatible license-file-missing obsolete-license \
"
ERROR_QA ?= "dev-so debug-deps dev-deps debug-files arch pkgconfig la \
perms dep-cmp pkgvarcheck perm-config perm-line perm-link \
split-strip packages-list pkgv-undefined var-undefined \
version-going-backwards expanded-d invalid-chars \
- license-checksum dev-elf file-rdeps \
+ license-checksum dev-elf file-rdeps configure-unsafe \
+ configure-gettext perllocalpod shebang-size \
+ already-stripped installed-vs-shipped ldflags compile-host-path \
+ install-host-path pn-overrides unknown-configure-option \
+ useless-rpaths rpaths staticdev empty-dirs \
"
# Add usrmerge QA check based on distro feature
-ERROR_QA_append = "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', ' usrmerge', '', d)}"
+ERROR_QA:append = "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', ' usrmerge', '', d)}"
FAKEROOT_QA = "host-user-contaminated"
FAKEROOT_QA[doc] = "QA tests which need to run under fakeroot. If any \
@@ -44,7 +48,22 @@ enabled tests are listed here, the do_package_qa task will run under fakeroot."
ALL_QA = "${WARN_QA} ${ERROR_QA}"
-UNKNOWN_CONFIGURE_WHITELIST ?= "--enable-nls --disable-nls --disable-silent-rules --disable-dependency-tracking --with-libtool-sysroot --disable-static"
+UNKNOWN_CONFIGURE_OPT_IGNORE ?= "--enable-nls --disable-nls --disable-silent-rules --disable-dependency-tracking --with-libtool-sysroot --disable-static"
+
+# This is a list of directories that are expected to be empty.
+QA_EMPTY_DIRS ?= " \
+ /dev/pts \
+ /media \
+ /proc \
+ /run \
+ /tmp \
+ ${localstatedir}/run \
+ ${localstatedir}/volatile \
+"
+# It is possible to specify why a directory is expected to be empty by defining
+# QA_EMPTY_DIRS_RECOMMENDATION:<path>, which will then be included in the error
+# message if the directory is not empty. If it is not specified for a directory,
+# then "but it is expected to be empty" will be used.
def package_qa_clean_path(path, d, pkg=None):
"""
@@ -55,31 +74,29 @@ def package_qa_clean_path(path, d, pkg=None):
path = path.replace(os.path.join(d.getVar("PKGDEST"), pkg), "/")
return path.replace(d.getVar("TMPDIR"), "/").replace("//", "/")
-def package_qa_write_error(type, error, d):
- logfile = d.getVar('QA_LOGFILE')
- if logfile:
- p = d.getVar('P')
- with open(logfile, "a+") as f:
- f.write("%s: %s [%s]\n" % (p, error, type))
-
-def package_qa_handle_error(error_class, error_msg, d):
- if error_class in (d.getVar("ERROR_QA") or "").split():
- package_qa_write_error(error_class, error_msg, d)
- bb.error("QA Issue: %s [%s]" % (error_msg, error_class))
- d.setVar("QA_SANE", False)
- return False
- elif error_class in (d.getVar("WARN_QA") or "").split():
- package_qa_write_error(error_class, error_msg, d)
- bb.warn("QA Issue: %s [%s]" % (error_msg, error_class))
- else:
- bb.note("QA Issue: %s [%s]" % (error_msg, error_class))
- return True
+QAPATHTEST[shebang-size] = "package_qa_check_shebang_size"
+def package_qa_check_shebang_size(path, name, d, elf, messages):
+ import stat
+ if os.path.islink(path) or stat.S_ISFIFO(os.stat(path).st_mode) or elf:
+ return
-def package_qa_add_message(messages, section, new_msg):
- if section not in messages:
- messages[section] = new_msg
- else:
- messages[section] = messages[section] + "\n" + new_msg
+ try:
+ with open(path, 'rb') as f:
+ stanza = f.readline(130)
+ except IOError:
+ return
+
+ if stanza.startswith(b'#!'):
+ #Shebang not found
+ try:
+ stanza = stanza.decode("utf-8")
+ except UnicodeDecodeError:
+ #If it is not a text file, it is not a script
+ return
+
+ if len(stanza) > 129:
+ oe.qa.add_message(messages, "shebang-size", "%s: %s maximum shebang size exceeded, the maximum size is 128." % (name, package_qa_clean_path(path, d)))
+ return
QAPATHTEST[libexec] = "package_qa_check_libexec"
def package_qa_check_libexec(path,name, d, elf, messages):
@@ -90,7 +107,7 @@ def package_qa_check_libexec(path,name, d, elf, messages):
return True
if 'libexec' in path.split(os.path.sep):
- package_qa_add_message(messages, "libexec", "%s: %s is using libexec please relocate to %s" % (name, package_qa_clean_path(path, d), libexec))
+ oe.qa.add_message(messages, "libexec", "%s: %s is using libexec please relocate to %s" % (name, package_qa_clean_path(path, d), libexec))
return False
return True
@@ -118,7 +135,7 @@ def package_qa_check_rpath(file,name, d, elf, messages):
rpath = m.group(1)
for dir in bad_dirs:
if dir in rpath:
- package_qa_add_message(messages, "rpaths", "package %s contains bad RPATH %s in file %s" % (name, rpath, file))
+ oe.qa.add_message(messages, "rpaths", "package %s contains bad RPATH %s in file %s" % (name, rpath, file))
QAPATHTEST[useless-rpaths] = "package_qa_check_useless_rpaths"
def package_qa_check_useless_rpaths(file, name, d, elf, messages):
@@ -148,7 +165,7 @@ def package_qa_check_useless_rpaths(file, name, d, elf, messages):
if rpath_eq(rpath, libdir) or rpath_eq(rpath, base_libdir):
# The dynamic linker searches both these places anyway. There is no point in
# looking there again.
- package_qa_add_message(messages, "useless-rpaths", "%s: %s contains probably-redundant RPATH %s" % (name, package_qa_clean_path(file, d), rpath))
+ oe.qa.add_message(messages, "useless-rpaths", "%s: %s contains probably-redundant RPATH %s" % (name, package_qa_clean_path(file, d, name), rpath))
QAPATHTEST[dev-so] = "package_qa_check_dev"
def package_qa_check_dev(path, name, d, elf, messages):
@@ -157,8 +174,8 @@ def package_qa_check_dev(path, name, d, elf, messages):
"""
if not name.endswith("-dev") and not name.endswith("-dbg") and not name.endswith("-ptest") and not name.startswith("nativesdk-") and path.endswith(".so") and os.path.islink(path):
- package_qa_add_message(messages, "dev-so", "non -dev/-dbg/nativesdk- package contains symlink .so: %s path '%s'" % \
- (name, package_qa_clean_path(path,d)))
+ oe.qa.add_message(messages, "dev-so", "non -dev/-dbg/nativesdk- package %s contains symlink .so '%s'" % \
+ (name, package_qa_clean_path(path, d, name)))
QAPATHTEST[dev-elf] = "package_qa_check_dev_elf"
def package_qa_check_dev_elf(path, name, d, elf, messages):
@@ -168,8 +185,8 @@ def package_qa_check_dev_elf(path, name, d, elf, messages):
install link-time .so files that are linker scripts.
"""
if name.endswith("-dev") and path.endswith(".so") and not os.path.islink(path) and elf:
- package_qa_add_message(messages, "dev-elf", "-dev package contains non-symlink .so: %s path '%s'" % \
- (name, package_qa_clean_path(path,d)))
+ oe.qa.add_message(messages, "dev-elf", "-dev package %s contains non-symlink .so '%s'" % \
+ (name, package_qa_clean_path(path, d, name)))
QAPATHTEST[staticdev] = "package_qa_check_staticdev"
def package_qa_check_staticdev(path, name, d, elf, messages):
@@ -180,10 +197,50 @@ def package_qa_check_staticdev(path, name, d, elf, messages):
libgcc.a, libgcov.a will be skipped in their packages
"""
- if not name.endswith("-pic") and not name.endswith("-staticdev") and not name.endswith("-ptest") and path.endswith(".a") and not path.endswith("_nonshared.a"):
- package_qa_add_message(messages, "staticdev", "non -staticdev package contains static .a library: %s path '%s'" % \
+ if not name.endswith("-pic") and not name.endswith("-staticdev") and not name.endswith("-ptest") and path.endswith(".a") and not path.endswith("_nonshared.a") and not '/usr/lib/debug-static/' in path and not '/.debug-static/' in path:
+ oe.qa.add_message(messages, "staticdev", "non -staticdev package contains static .a library: %s path '%s'" % \
+ (name, package_qa_clean_path(path,d, name)))
+
+QAPATHTEST[mime] = "package_qa_check_mime"
+def package_qa_check_mime(path, name, d, elf, messages):
+ """
+ Check if package installs mime types to /usr/share/mime/packages
+ while no inheriting mime.bbclass
+ """
+
+ if d.getVar("datadir") + "/mime/packages" in path and path.endswith('.xml') and not bb.data.inherits_class("mime", d):
+ oe.qa.add_message(messages, "mime", "package contains mime types but does not inherit mime: %s path '%s'" % \
(name, package_qa_clean_path(path,d)))
+QAPATHTEST[mime-xdg] = "package_qa_check_mime_xdg"
+def package_qa_check_mime_xdg(path, name, d, elf, messages):
+ """
+ Check if package installs desktop file containing MimeType and requires
+ mime-types.bbclass to create /usr/share/applications/mimeinfo.cache
+ """
+
+ if d.getVar("datadir") + "/applications" in path and path.endswith('.desktop') and not bb.data.inherits_class("mime-xdg", d):
+ mime_type_found = False
+ try:
+ with open(path, 'r') as f:
+ for line in f.read().split('\n'):
+ if 'MimeType' in line:
+ mime_type_found = True
+ break;
+ except:
+ # At least libreoffice installs symlinks with absolute paths that are dangling here.
+ # We could implement some magic but for few (one) recipes it is not worth the effort so just warn:
+ wstr = "%s cannot open %s - is it a symlink with absolute path?\n" % (name, package_qa_clean_path(path,d))
+ wstr += "Please check if (linked) file contains key 'MimeType'.\n"
+ pkgname = name
+ if name == d.getVar('PN'):
+ pkgname = '${PN}'
+ wstr += "If yes: add \'inhert mime-xdg\' and \'MIME_XDG_PACKAGES += \"%s\"\' / if no add \'INSANE_SKIP:%s += \"mime-xdg\"\' to recipe." % (pkgname, pkgname)
+ oe.qa.add_message(messages, "mime-xdg", wstr)
+ if mime_type_found:
+ oe.qa.add_message(messages, "mime-xdg", "package contains desktop file with key 'MimeType' but does not inhert mime-xdg: %s path '%s'" % \
+ (name, package_qa_clean_path(path,d)))
+
def package_qa_check_libdir(d):
"""
Check for wrong library installation paths. For instance, catch
@@ -211,7 +268,7 @@ def package_qa_check_libdir(d):
# Skip subdirectories for any packages with libdir in INSANE_SKIP
skippackages = []
for package in dirs:
- if 'libdir' in (d.getVar('INSANE_SKIP_' + package) or "").split():
+ if 'libdir' in (d.getVar('INSANE_SKIP:' + package) or "").split():
bb.note("Package %s skipping libdir QA test" % (package))
skippackages.append(package)
elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-file-directory' and package.endswith("-dbg"):
@@ -245,7 +302,7 @@ def package_qa_check_libdir(d):
pass
if messages:
- package_qa_handle_error("libdir", "\n".join(messages), d)
+ oe.qa.handle_error("libdir", "\n".join(messages), d)
QAPATHTEST[debug-files] = "package_qa_check_dbg"
def package_qa_check_dbg(path, name, d, elf, messages):
@@ -255,16 +312,9 @@ def package_qa_check_dbg(path, name, d, elf, messages):
if not "-dbg" in name and not "-ptest" in name:
if '.debug' in path.split(os.path.sep):
- package_qa_add_message(messages, "debug-files", "non debug package contains .debug directory: %s path %s" % \
+ oe.qa.add_message(messages, "debug-files", "non debug package contains .debug directory: %s path %s" % \
(name, package_qa_clean_path(path,d)))
-QAPATHTEST[perms] = "package_qa_check_perm"
-def package_qa_check_perm(path,name,d, elf, messages):
- """
- Check the permission of files
- """
- return
-
QAPATHTEST[arch] = "package_qa_check_arch"
def package_qa_check_arch(path,name,d, elf, messages):
"""
@@ -275,14 +325,14 @@ def package_qa_check_arch(path,name,d, elf, messages):
if not elf:
return
- target_os = d.getVar('TARGET_OS')
- target_arch = d.getVar('TARGET_ARCH')
+ target_os = d.getVar('HOST_OS')
+ target_arch = d.getVar('HOST_ARCH')
provides = d.getVar('PROVIDES')
bpn = d.getVar('BPN')
if target_arch == "allarch":
pn = d.getVar('PN')
- package_qa_add_message(messages, "arch", pn + ": Recipe inherits the allarch class, but has packaged architecture-specific binaries")
+ oe.qa.add_message(messages, "arch", pn + ": Recipe inherits the allarch class, but has packaged architecture-specific binaries")
return
# FIXME: Cross package confuse this check, so just skip them
@@ -305,14 +355,14 @@ def package_qa_check_arch(path,name,d, elf, messages):
target_os == "linux-gnu_ilp32" or re.match(r'mips64.*32', d.getVar('DEFAULTTUNE')))
is_bpf = (oe.qa.elf_machine_to_string(elf.machine()) == "BPF")
if not ((machine == elf.machine()) or is_32 or is_bpf):
- package_qa_add_message(messages, "arch", "Architecture did not match (%s, expected %s) on %s" % \
- (oe.qa.elf_machine_to_string(elf.machine()), oe.qa.elf_machine_to_string(machine), package_qa_clean_path(path,d)))
+ oe.qa.add_message(messages, "arch", "Architecture did not match (%s, expected %s) in %s" % \
+ (oe.qa.elf_machine_to_string(elf.machine()), oe.qa.elf_machine_to_string(machine), package_qa_clean_path(path, d, name)))
elif not ((bits == elf.abiSize()) or is_32 or is_bpf):
- package_qa_add_message(messages, "arch", "Bit size did not match (%d to %d) %s on %s" % \
- (bits, elf.abiSize(), bpn, package_qa_clean_path(path,d)))
+ oe.qa.add_message(messages, "arch", "Bit size did not match (%d, expected %d) in %s" % \
+ (elf.abiSize(), bits, package_qa_clean_path(path, d, name)))
elif not ((littleendian == elf.isLittleEndian()) or is_bpf):
- package_qa_add_message(messages, "arch", "Endiannes did not match (%d to %d) on %s" % \
- (littleendian, elf.isLittleEndian(), package_qa_clean_path(path,d)))
+ oe.qa.add_message(messages, "arch", "Endiannes did not match (%d, expected %d) in %s" % \
+ (elf.isLittleEndian(), littleendian, package_qa_clean_path(path,d, name)))
QAPATHTEST[desktop] = "package_qa_check_desktop"
def package_qa_check_desktop(path, name, d, elf, messages):
@@ -324,7 +374,7 @@ def package_qa_check_desktop(path, name, d, elf, messages):
output = os.popen("%s %s" % (desktop_file_validate, path))
# This only produces output on errors
for l in output:
- package_qa_add_message(messages, "desktop", "Desktop file issue: " + l.strip())
+ oe.qa.add_message(messages, "desktop", "Desktop file issue: " + l.strip())
QAPATHTEST[textrel] = "package_qa_textrel"
def package_qa_textrel(path, name, d, elf, messages):
@@ -346,9 +396,11 @@ def package_qa_textrel(path, name, d, elf, messages):
for line in phdrs.split("\n"):
if textrel_re.match(line):
sane = False
+ break
if not sane:
- package_qa_add_message(messages, "textrel", "ELF binary '%s' has relocations in .text" % path)
+ path = package_qa_clean_path(path, d, name)
+ oe.qa.add_message(messages, "textrel", "%s: ELF binary %s has relocations in .text" % (name, path))
QAPATHTEST[ldflags] = "package_qa_hash_style"
def package_qa_hash_style(path, name, d, elf, messages):
@@ -377,19 +429,20 @@ def package_qa_hash_style(path, name, d, elf, messages):
for line in phdrs.split("\n"):
if "SYMTAB" in line:
has_syms = True
- if "GNU_HASH" in line:
+ if "GNU_HASH" in line or "MIPS_XHASH" in line:
sane = True
- if "[mips32]" in line or "[mips64]" in line:
+ if ("[mips32]" in line or "[mips64]" in line) and d.getVar('TCLIBC') == "musl":
sane = True
-
if has_syms and not sane:
- package_qa_add_message(messages, "ldflags", "No GNU_HASH in the ELF binary %s, didn't pass LDFLAGS?" % path)
+ path = package_qa_clean_path(path, d, name)
+ oe.qa.add_message(messages, "ldflags", "File %s in package %s doesn't have GNU_HASH (didn't pass LDFLAGS?)" % (path, name))
QAPATHTEST[buildpaths] = "package_qa_check_buildpaths"
def package_qa_check_buildpaths(path, name, d, elf, messages):
"""
- Check for build paths inside target files and error if not found in the whitelist
+ Check for build paths inside target files and error if paths are not
+ explicitly ignored.
"""
# Ignore .debug files, not interesting
if path.find(".debug") != -1:
@@ -399,15 +452,12 @@ def package_qa_check_buildpaths(path, name, d, elf, messages):
if os.path.islink(path):
return
- # Ignore ipk and deb's CONTROL dir
- if path.find(name + "/CONTROL/") != -1 or path.find(name + "/DEBIAN/") != -1:
- return
-
tmpdir = bytes(d.getVar('TMPDIR'), encoding="utf-8")
with open(path, 'rb') as f:
file_content = f.read()
if tmpdir in file_content:
- package_qa_add_message(messages, "buildpaths", "File %s in package contained reference to tmpdir" % package_qa_clean_path(path,d))
+ trimmed = path.replace(os.path.join (d.getVar("PKGDEST"), name), "")
+ oe.qa.add_message(messages, "buildpaths", "File %s in package %s contains reference to TMPDIR" % (trimmed, name))
QAPATHTEST[xorg-driver-abi] = "package_qa_check_xorg_driver_abi"
@@ -423,10 +473,10 @@ def package_qa_check_xorg_driver_abi(path, name, d, elf, messages):
driverdir = d.expand("${libdir}/xorg/modules/drivers/")
if driverdir in path and path.endswith(".so"):
mlprefix = d.getVar('MLPREFIX') or ''
- for rdep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + name) or ""):
+ for rdep in bb.utils.explode_deps(d.getVar('RDEPENDS:' + name) or ""):
if rdep.startswith("%sxorg-abi-" % mlprefix):
return
- package_qa_add_message(messages, "xorg-driver-abi", "Package %s contains Xorg driver (%s) but no xorg-abi- dependencies" % (name, os.path.basename(path)))
+ oe.qa.add_message(messages, "xorg-driver-abi", "Package %s contains Xorg driver (%s) but no xorg-abi- dependencies" % (name, os.path.basename(path)))
QAPATHTEST[infodir] = "package_qa_check_infodir"
def package_qa_check_infodir(path, name, d, elf, messages):
@@ -436,7 +486,7 @@ def package_qa_check_infodir(path, name, d, elf, messages):
infodir = d.expand("${infodir}/dir")
if infodir in path:
- package_qa_add_message(messages, "infodir", "The /usr/share/info/dir file is not meant to be shipped in a particular package.")
+ oe.qa.add_message(messages, "infodir", "The /usr/share/info/dir file is not meant to be shipped in a particular package.")
QAPATHTEST[symlink-to-sysroot] = "package_qa_check_symlink_to_sysroot"
def package_qa_check_symlink_to_sysroot(path, name, d, elf, messages):
@@ -449,7 +499,7 @@ def package_qa_check_symlink_to_sysroot(path, name, d, elf, messages):
tmpdir = d.getVar('TMPDIR')
if target.startswith(tmpdir):
trimmed = path.replace(os.path.join (d.getVar("PKGDEST"), name), "")
- package_qa_add_message(messages, "symlink-to-sysroot", "Symlink %s in %s points to TMPDIR" % (trimmed, name))
+ oe.qa.add_message(messages, "symlink-to-sysroot", "Symlink %s in %s points to TMPDIR" % (trimmed, name))
# Check license variables
do_populate_lic[postfuncs] += "populate_lic_qa_checksum"
@@ -457,8 +507,6 @@ python populate_lic_qa_checksum() {
"""
Check for changes in the license files.
"""
- import tempfile
- sane = True
lic_files = d.getVar('LIC_FILES_CHKSUM') or ''
lic = d.getVar('LICENSE')
@@ -468,7 +516,7 @@ python populate_lic_qa_checksum() {
return
if not lic_files and d.getVar('SRC_URI'):
- sane &= package_qa_handle_error("license-checksum", pn + ": Recipe file fetches files and does not have license file information (LIC_FILES_CHKSUM)", d)
+ oe.qa.handle_error("license-checksum", pn + ": Recipe file fetches files and does not have license file information (LIC_FILES_CHKSUM)", d)
srcdir = d.getVar('S')
corebase_licensefile = d.getVar('COREBASE') + "/LICENSE"
@@ -476,11 +524,11 @@ python populate_lic_qa_checksum() {
try:
(type, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
except bb.fetch.MalformedUrl:
- sane &= package_qa_handle_error("license-checksum", pn + ": LIC_FILES_CHKSUM contains an invalid URL: " + url, d)
+ oe.qa.handle_error("license-checksum", pn + ": LIC_FILES_CHKSUM contains an invalid URL: " + url, d)
continue
srclicfile = os.path.join(srcdir, path)
if not os.path.isfile(srclicfile):
- sane &= package_qa_handle_error("license-checksum", pn + ": LIC_FILES_CHKSUM points to an invalid file: " + srclicfile, d)
+ oe.qa.handle_error("license-checksum", pn + ": LIC_FILES_CHKSUM points to an invalid file: " + srclicfile, d)
continue
if (srclicfile == corebase_licensefile):
@@ -495,61 +543,45 @@ python populate_lic_qa_checksum() {
if (not beginline) and (not endline):
md5chksum = bb.utils.md5_file(srclicfile)
- with open(srclicfile, 'rb') as f:
- license = f.read()
+ with open(srclicfile, 'r', errors='replace') as f:
+ license = f.read().splitlines()
else:
- fi = open(srclicfile, 'rb')
- fo = tempfile.NamedTemporaryFile(mode='wb', prefix='poky.', suffix='.tmp', delete=False)
- tmplicfile = fo.name;
- lineno = 0
- linesout = 0
- license = []
- for line in fi:
- lineno += 1
- if (lineno >= beginline):
- if ((lineno <= endline) or not endline):
- fo.write(line)
- license.append(line)
- linesout += 1
- else:
- break
- fo.flush()
- fo.close()
- fi.close()
- md5chksum = bb.utils.md5_file(tmplicfile)
- license = b''.join(license)
- os.unlink(tmplicfile)
-
+ with open(srclicfile, 'rb') as f:
+ import hashlib
+ lineno = 0
+ license = []
+ m = hashlib.new('MD5', usedforsecurity=False)
+ for line in f:
+ lineno += 1
+ if (lineno >= beginline):
+ if ((lineno <= endline) or not endline):
+ m.update(line)
+ license.append(line.decode('utf-8', errors='replace').rstrip())
+ else:
+ break
+ md5chksum = m.hexdigest()
if recipemd5 == md5chksum:
bb.note (pn + ": md5 checksum matched for ", url)
else:
if recipemd5:
msg = pn + ": The LIC_FILES_CHKSUM does not match for " + url
msg = msg + "\n" + pn + ": The new md5 checksum is " + md5chksum
- try:
- license_lines = license.decode('utf-8').split('\n')
- except:
- # License text might not be valid UTF-8, in which
- # case we don't know how to include it in our output
- # and have to skip it.
- pass
- else:
- max_lines = int(d.getVar('QA_MAX_LICENSE_LINES') or 20)
- if not license_lines or license_lines[-1] != '':
- # Ensure that our license text ends with a line break
- # (will be added with join() below).
- license_lines.append('')
- remove = len(license_lines) - max_lines
- if remove > 0:
- start = max_lines // 2
- end = start + remove - 1
- del license_lines[start:end]
- license_lines.insert(start, '...')
- msg = msg + "\n" + pn + ": Here is the selected license text:" + \
- "\n" + \
- "{:v^70}".format(" beginline=%d " % beginline if beginline else "") + \
- "\n" + "\n".join(license_lines) + \
- "{:^^70}".format(" endline=%d " % endline if endline else "")
+ max_lines = int(d.getVar('QA_MAX_LICENSE_LINES') or 20)
+ if not license or license[-1] != '':
+ # Ensure that our license text ends with a line break
+ # (will be added with join() below).
+ license.append('')
+ remove = len(license) - max_lines
+ if remove > 0:
+ start = max_lines // 2
+ end = start + remove - 1
+ del license[start:end]
+ license.insert(start, '...')
+ msg = msg + "\n" + pn + ": Here is the selected license text:" + \
+ "\n" + \
+ "{:v^70}".format(" beginline=%d " % beginline if beginline else "") + \
+ "\n" + "\n".join(license) + \
+ "{:^^70}".format(" endline=%d " % endline if endline else "")
if beginline:
if endline:
srcfiledesc = "%s (lines %d through to %d)" % (srclicfile, beginline, endline)
@@ -564,13 +596,12 @@ python populate_lic_qa_checksum() {
else:
msg = pn + ": LIC_FILES_CHKSUM is not specified for " + url
msg = msg + "\n" + pn + ": The md5 checksum is " + md5chksum
- sane &= package_qa_handle_error("license-checksum", msg, d)
+ oe.qa.handle_error("license-checksum", msg, d)
- if not sane:
- bb.fatal("Fatal QA errors found, failing task.")
+ oe.qa.exit_if_errors(d)
}
-def package_qa_check_staged(path,d):
+def qa_check_staged(path,d):
"""
Check staged la and pc files for common problems like references to the work
directory.
@@ -579,7 +610,6 @@ def package_qa_check_staged(path,d):
responsible for the errors easily even if we look at every .pc and .la file.
"""
- sane = True
tmpdir = d.getVar('TMPDIR')
workdir = os.path.join(tmpdir, "work")
recipesysroot = d.getVar("RECIPE_SYSROOT")
@@ -589,28 +619,37 @@ def package_qa_check_staged(path,d):
else:
pkgconfigcheck = tmpdir
+ skip = (d.getVar('INSANE_SKIP') or "").split()
+ skip_la = False
+ if 'la' in skip:
+ bb.note("Recipe %s skipping qa checking: la" % d.getVar('PN'))
+ skip_la = True
+
+ skip_pkgconfig = False
+ if 'pkgconfig' in skip:
+ bb.note("Recipe %s skipping qa checking: pkgconfig" % d.getVar('PN'))
+ skip_pkgconfig = True
+
# find all .la and .pc files
# read the content
# and check for stuff that looks wrong
for root, dirs, files in os.walk(path):
for file in files:
path = os.path.join(root,file)
- if file.endswith(".la"):
+ if file.endswith(".la") and not skip_la:
with open(path) as f:
file_content = f.read()
file_content = file_content.replace(recipesysroot, "")
if workdir in file_content:
error_msg = "%s failed sanity test (workdir) in path %s" % (file,root)
- sane &= package_qa_handle_error("la", error_msg, d)
- elif file.endswith(".pc"):
+ oe.qa.handle_error("la", error_msg, d)
+ elif file.endswith(".pc") and not skip_pkgconfig:
with open(path) as f:
file_content = f.read()
file_content = file_content.replace(recipesysroot, "")
if pkgconfigcheck in file_content:
error_msg = "%s failed sanity test (tmpdir) in path %s" % (file,root)
- sane &= package_qa_handle_error("pkgconfig", error_msg, d)
-
- return sane
+ oe.qa.handle_error("pkgconfig", error_msg, d)
# Run all package-wide warnfuncs and errorfuncs
def package_qa_package(warnfuncs, errorfuncs, package, d):
@@ -623,9 +662,9 @@ def package_qa_package(warnfuncs, errorfuncs, package, d):
func(package, d, errors)
for w in warnings:
- package_qa_handle_error(w, warnings[w], d)
+ oe.qa.handle_error(w, warnings[w], d)
for e in errors:
- package_qa_handle_error(e, errors[e], d)
+ oe.qa.handle_error(e, errors[e], d)
return len(errors) == 0
@@ -640,38 +679,55 @@ def package_qa_recipe(warnfuncs, errorfuncs, pn, d):
func(pn, d, errors)
for w in warnings:
- package_qa_handle_error(w, warnings[w], d)
+ oe.qa.handle_error(w, warnings[w], d)
for e in errors:
- package_qa_handle_error(e, errors[e], d)
+ oe.qa.handle_error(e, errors[e], d)
return len(errors) == 0
+def prepopulate_objdump_p(elf, d):
+ output = elf.run_objdump("-p", d)
+ return (elf.name, output)
+
# Walk over all files in a directory and call func
def package_qa_walk(warnfuncs, errorfuncs, package, d):
- import oe.qa
-
#if this will throw an exception, then fix the dict above
- target_os = d.getVar('TARGET_OS')
- target_arch = d.getVar('TARGET_ARCH')
+ target_os = d.getVar('HOST_OS')
+ target_arch = d.getVar('HOST_ARCH')
warnings = {}
errors = {}
+ elves = {}
for path in pkgfiles[package]:
- elf = oe.qa.ELFFile(path)
- try:
- elf.open()
- except (IOError, oe.qa.NotELFFileError):
- # IOError can happen if the packaging control files disappear,
- elf = None
+ elf = None
+ if os.path.isfile(path):
+ elf = oe.qa.ELFFile(path)
+ try:
+ elf.open()
+ elf.close()
+ except oe.qa.NotELFFileError:
+ elf = None
+ if elf:
+ elves[path] = elf
+
+ results = oe.utils.multiprocess_launch(prepopulate_objdump_p, elves.values(), d, extraargs=(d,))
+ for item in results:
+ elves[item[0]].set_objdump("-p", item[1])
+
+ for path in pkgfiles[package]:
+ if path in elves:
+ elves[path].open()
for func in warnfuncs:
- func(path, package, d, elf, warnings)
+ func(path, package, d, elves.get(path), warnings)
for func in errorfuncs:
- func(path, package, d, elf, errors)
+ func(path, package, d, elves.get(path), errors)
+ if path in elves:
+ elves[path].close()
for w in warnings:
- package_qa_handle_error(w, warnings[w], d)
+ oe.qa.handle_error(w, warnings[w], d)
for e in errors:
- package_qa_handle_error(e, errors[e], d)
+ oe.qa.handle_error(e, errors[e], d)
def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
# Don't do this check for kernel/module recipes, there aren't too many debug/development
@@ -691,10 +747,10 @@ def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
for rdepend in rdepends:
if "-dbg" in rdepend and "debug-deps" not in skip:
error_msg = "%s rdepends on %s" % (pkg,rdepend)
- package_qa_handle_error("debug-deps", error_msg, d)
+ oe.qa.handle_error("debug-deps", error_msg, d)
if (not "-dev" in pkg and not "-staticdev" in pkg) and rdepend.endswith("-dev") and "dev-deps" not in skip:
error_msg = "%s rdepends on %s" % (pkg, rdepend)
- package_qa_handle_error("dev-deps", error_msg, d)
+ oe.qa.handle_error("dev-deps", error_msg, d)
if rdepend not in packages:
rdep_data = oe.packagedata.read_subpkgdata(rdepend, d)
if rdep_data and 'PN' in rdep_data and rdep_data['PN'] in taskdeps:
@@ -715,7 +771,7 @@ def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
error_msg = "%s rdepends on %s, but it isn't a build dependency, missing %s in DEPENDS or PACKAGECONFIG?" % (pkg, rdepend, rdep_data['PN'])
else:
error_msg = "%s rdepends on %s, but it isn't a build dependency?" % (pkg, rdepend)
- package_qa_handle_error("build-deps", error_msg, d)
+ oe.qa.handle_error("build-deps", error_msg, d)
if "file-rdeps" not in skip:
ignored_file_rdeps = set(['/bin/sh', '/usr/bin/env', 'rtld(GNU_HASH)'])
@@ -725,7 +781,7 @@ def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
filerdepends = {}
rdep_data = oe.packagedata.read_subpkgdata(pkg, d)
for key in rdep_data:
- if key.startswith("FILERDEPENDS_"):
+ if key.startswith("FILERDEPENDS:"):
for subkey in bb.utils.explode_deps(rdep_data[key]):
if subkey not in ignored_file_rdeps and \
not subkey.startswith('perl('):
@@ -733,32 +789,14 @@ def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
filerdepends[subkey] = key[13:]
if filerdepends:
- next = rdepends
done = rdepends[:]
- # Find all the rdepends on the dependency chain
- while next:
- new = []
- for rdep in next:
- rdep_data = oe.packagedata.read_subpkgdata(rdep, d)
- sub_rdeps = rdep_data.get("RDEPENDS_" + rdep)
- if not sub_rdeps:
- continue
- for sub_rdep in bb.utils.explode_deps(sub_rdeps):
- if sub_rdep in done:
- continue
- if oe.packagedata.has_subpkgdata(sub_rdep, d):
- # It's a new rdep
- done.append(sub_rdep)
- new.append(sub_rdep)
- next = new
-
# Add the rprovides of itself
if pkg not in done:
done.insert(0, pkg)
# The python is not a package, but python-core provides it, so
# skip checking /usr/bin/python if python is in the rdeps, in
- # case there is a RDEPENDS_pkg = "python" in the recipe.
+ # case there is a RDEPENDS:pkg = "python" in the recipe.
for py in [ d.getVar('MLPREFIX') + "python", "python" ]:
if py in done:
filerdepends.pop("/usr/bin/python",None)
@@ -771,11 +809,11 @@ def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
# For Saving the FILERPROVIDES, RPROVIDES and FILES_INFO
rdep_data = oe.packagedata.read_subpkgdata(rdep, d)
for key in rdep_data:
- if key.startswith("FILERPROVIDES_") or key.startswith("RPROVIDES_"):
+ if key.startswith("FILERPROVIDES:") or key.startswith("RPROVIDES:"):
for subkey in bb.utils.explode_deps(rdep_data[key]):
filerdepends.pop(subkey,None)
# Add the files list to the rprovides
- if key == "FILES_INFO":
+ if key.startswith("FILES_INFO:"):
# Use eval() to make it as a dict
for subkey in eval(rdep_data[key]):
filerdepends.pop(subkey,None)
@@ -784,9 +822,9 @@ def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
break
if filerdepends:
for key in filerdepends:
- error_msg = "%s contained in package %s requires %s, but no providers found in RDEPENDS_%s?" % \
- (filerdepends[key].replace("_%s" % pkg, "").replace("@underscore@", "_"), pkg, key, pkg)
- package_qa_handle_error("file-rdeps", error_msg, d)
+ error_msg = "%s contained in package %s requires %s, but no providers found in RDEPENDS:%s?" % \
+ (filerdepends[key].replace(":%s" % pkg, "").replace("@underscore@", "_"), pkg, key, pkg)
+ oe.qa.handle_error("file-rdeps", error_msg, d)
package_qa_check_rdepends[vardepsexclude] = "OVERRIDES"
def package_qa_check_deps(pkg, pkgdest, d):
@@ -798,12 +836,12 @@ def package_qa_check_deps(pkg, pkgdest, d):
try:
rvar = bb.utils.explode_dep_versions2(localdata.getVar(var) or "")
except ValueError as e:
- bb.fatal("%s_%s: %s" % (var, pkg, e))
+ bb.fatal("%s:%s: %s" % (var, pkg, e))
for dep in rvar:
for v in rvar[dep]:
if v and not v.startswith(('< ', '= ', '> ', '<= ', '>=')):
- error_msg = "%s_%s is invalid: %s (%s) only comparisons <, =, >, <=, and >= are allowed" % (var, pkg, dep, v)
- package_qa_handle_error("dep-cmp", error_msg, d)
+ error_msg = "%s:%s is invalid: %s (%s) only comparisons <, =, >, <=, and >= are allowed" % (var, pkg, dep, v)
+ oe.qa.handle_error("dep-cmp", error_msg, d)
check_valid_deps('RDEPENDS')
check_valid_deps('RRECOMMENDS')
@@ -814,16 +852,34 @@ def package_qa_check_deps(pkg, pkgdest, d):
QAPKGTEST[usrmerge] = "package_qa_check_usrmerge"
def package_qa_check_usrmerge(pkg, d, messages):
+
pkgdest = d.getVar('PKGDEST')
pkg_dir = pkgdest + os.sep + pkg + os.sep
merged_dirs = ['bin', 'sbin', 'lib'] + d.getVar('MULTILIB_VARIANTS').split()
for f in merged_dirs:
if os.path.exists(pkg_dir + f) and not os.path.islink(pkg_dir + f):
msg = "%s package is not obeying usrmerge distro feature. /%s should be relocated to /usr." % (pkg, f)
- package_qa_add_message(messages, "usrmerge", msg)
+ oe.qa.add_message(messages, "usrmerge", msg)
return False
return True
+QAPKGTEST[perllocalpod] = "package_qa_check_perllocalpod"
+def package_qa_check_perllocalpod(pkg, d, messages):
+ """
+ Check that the recipe didn't ship a perlocal.pod file, which shouldn't be
+ installed in a distribution package. cpan.bbclass sets NO_PERLLOCAL=1 to
+ handle this for most recipes.
+ """
+ import glob
+ pkgd = oe.path.join(d.getVar('PKGDEST'), pkg)
+ podpath = oe.path.join(pkgd, d.getVar("libdir"), "perl*", "*", "*", "perllocal.pod")
+
+ matches = glob.glob(podpath)
+ if matches:
+ matches = [package_qa_clean_path(path, d, pkg) for path in matches]
+ msg = "%s contains perllocal.pod (%s), should not be installed" % (pkg, " ".join(matches))
+ oe.qa.add_message(messages, "perllocalpod", msg)
+
QAPKGTEST[expanded-d] = "package_qa_check_expanded_d"
def package_qa_check_expanded_d(package, d, messages):
"""
@@ -834,16 +890,56 @@ def package_qa_check_expanded_d(package, d, messages):
expanded_d = d.getVar('D')
for var in 'FILES','pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm':
- bbvar = d.getVar(var + "_" + package) or ""
+ bbvar = d.getVar(var + ":" + package) or ""
if expanded_d in bbvar:
if var == 'FILES':
- package_qa_add_message(messages, "expanded-d", "FILES in %s recipe should not contain the ${D} variable as it references the local build directory not the target filesystem, best solution is to remove the ${D} reference" % package)
+ oe.qa.add_message(messages, "expanded-d", "FILES in %s recipe should not contain the ${D} variable as it references the local build directory not the target filesystem, best solution is to remove the ${D} reference" % package)
sane = False
else:
- package_qa_add_message(messages, "expanded-d", "%s in %s recipe contains ${D}, it should be replaced by $D instead" % (var, package))
+ oe.qa.add_message(messages, "expanded-d", "%s in %s recipe contains ${D}, it should be replaced by $D instead" % (var, package))
sane = False
return sane
+QAPKGTEST[unlisted-pkg-lics] = "package_qa_check_unlisted_pkg_lics"
+def package_qa_check_unlisted_pkg_lics(package, d, messages):
+ """
+ Check that all licenses for a package are among the licenses for the recipe.
+ """
+ pkg_lics = d.getVar('LICENSE:' + package)
+ if not pkg_lics:
+ return True
+
+ recipe_lics_set = oe.license.list_licenses(d.getVar('LICENSE'))
+ package_lics = oe.license.list_licenses(pkg_lics)
+ unlisted = package_lics - recipe_lics_set
+ if unlisted:
+ oe.qa.add_message(messages, "unlisted-pkg-lics",
+ "LICENSE:%s includes licenses (%s) that are not "
+ "listed in LICENSE" % (package, ' '.join(unlisted)))
+ return False
+ obsolete = set(oe.license.obsolete_license_list()) & package_lics - recipe_lics_set
+ if obsolete:
+ oe.qa.add_message(messages, "obsolete-license",
+ "LICENSE:%s includes obsolete licenses %s" % (package, ' '.join(obsolete)))
+ return False
+ return True
+
+QAPKGTEST[empty-dirs] = "package_qa_check_empty_dirs"
+def package_qa_check_empty_dirs(pkg, d, messages):
+ """
+ Check for the existence of files in directories that are expected to be
+ empty.
+ """
+
+ pkgd = oe.path.join(d.getVar('PKGDEST'), pkg)
+ for dir in (d.getVar('QA_EMPTY_DIRS') or "").split():
+ empty_dir = oe.path.join(pkgd, dir)
+ if os.path.exists(empty_dir) and os.listdir(empty_dir):
+ recommendation = (d.getVar('QA_EMPTY_DIRS_RECOMMENDATION:' + dir) or
+ "but it is expected to be empty")
+ msg = "%s installs files in %s, %s" % (pkg, dir, recommendation)
+ oe.qa.add_message(messages, "empty-dirs", msg)
+
def package_qa_check_encoding(keys, encode, d):
def check_encoding(key, enc):
sane = True
@@ -854,7 +950,7 @@ def package_qa_check_encoding(keys, encode, d):
except UnicodeDecodeError as e:
error_msg = "%s has non %s characters" % (key,enc)
sane = False
- package_qa_handle_error("invalid-chars", error_msg, d)
+ oe.qa.handle_error("invalid-chars", error_msg, d)
return sane
for key in keys:
@@ -885,18 +981,35 @@ def package_qa_check_host_user(path, name, d, elf, messages):
if exc.errno != errno.ENOENT:
raise
else:
- rootfs_path = path[len(dest):]
check_uid = int(d.getVar('HOST_USER_UID'))
if stat.st_uid == check_uid:
- package_qa_add_message(messages, "host-user-contaminated", "%s: %s is owned by uid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, rootfs_path, check_uid))
+ oe.qa.add_message(messages, "host-user-contaminated", "%s: %s is owned by uid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, package_qa_clean_path(path, d, name), check_uid))
return False
check_gid = int(d.getVar('HOST_USER_GID'))
if stat.st_gid == check_gid:
- package_qa_add_message(messages, "host-user-contaminated", "%s: %s is owned by gid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, rootfs_path, check_gid))
+ oe.qa.add_message(messages, "host-user-contaminated", "%s: %s is owned by gid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, package_qa_clean_path(path, d, name), check_gid))
return False
return True
+QARECIPETEST[unhandled-features-check] = "package_qa_check_unhandled_features_check"
+def package_qa_check_unhandled_features_check(pn, d, messages):
+ if not bb.data.inherits_class('features_check', d):
+ var_set = False
+ for kind in ['DISTRO', 'MACHINE', 'COMBINED']:
+ for var in ['ANY_OF_' + kind + '_FEATURES', 'REQUIRED_' + kind + '_FEATURES', 'CONFLICT_' + kind + '_FEATURES']:
+ if d.getVar(var) is not None or d.hasOverrides(var):
+ var_set = True
+ if var_set:
+ oe.qa.handle_error("unhandled-features-check", "%s: recipe doesn't inherit features_check" % pn, d)
+
+QARECIPETEST[missing-update-alternatives] = "package_qa_check_missing_update_alternatives"
+def package_qa_check_missing_update_alternatives(pn, d, messages):
+ # Look at all packages and find out if any of those sets ALTERNATIVE variable
+ # without inheriting update-alternatives class
+ for pkg in (d.getVar('PACKAGES') or '').split():
+ if d.getVar('ALTERNATIVE:%s' % pkg) and not bb.data.inherits_class('update-alternatives', d):
+ oe.qa.handle_error("missing-update-alternatives", "%s: recipe defines ALTERNATIVE:%s but doesn't inherit update-alternatives. This might fail during do_rootfs later!" % (pn, pkg), d)
# The PACKAGE FUNC to scan each package
python do_package_qa () {
@@ -905,6 +1018,14 @@ python do_package_qa () {
bb.note("DO PACKAGE QA")
+ main_lic = d.getVar('LICENSE')
+
+ # Check for obsolete license references in main LICENSE (packages are checked below for any changes)
+ main_licenses = oe.license.list_licenses(d.getVar('LICENSE'))
+ obsolete = set(oe.license.obsolete_license_list()) & main_licenses
+ if obsolete:
+ oe.qa.handle_error("obsolete-license", "Recipe LICENSE includes obsolete licenses %s" % ' '.join(obsolete), d)
+
bb.build.exec_func("read_subpackage_metadata", d)
# Check non UTF-8 characters on recipe's metadata
@@ -913,38 +1034,24 @@ python do_package_qa () {
logdir = d.getVar('T')
pn = d.getVar('PN')
- # Check the compile log for host contamination
- compilelog = os.path.join(logdir,"log.do_compile")
-
- if os.path.exists(compilelog):
- statement = "grep -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s > /dev/null" % compilelog
- if subprocess.call(statement, shell=True) == 0:
- msg = "%s: The compile log indicates that host include and/or library paths were used.\n \
- Please check the log '%s' for more information." % (pn, compilelog)
- package_qa_handle_error("compile-host-path", msg, d)
-
- # Check the install log for host contamination
- installlog = os.path.join(logdir,"log.do_install")
-
- if os.path.exists(installlog):
- statement = "grep -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s > /dev/null" % installlog
- if subprocess.call(statement, shell=True) == 0:
- msg = "%s: The install log indicates that host include and/or library paths were used.\n \
- Please check the log '%s' for more information." % (pn, installlog)
- package_qa_handle_error("install-host-path", msg, d)
-
# Scan the packages...
pkgdest = d.getVar('PKGDEST')
packages = set((d.getVar('PACKAGES') or '').split())
- cpath = oe.cachedpath.CachedPath()
global pkgfiles
pkgfiles = {}
for pkg in packages:
pkgfiles[pkg] = []
- for walkroot, dirs, files in cpath.walk(pkgdest + "/" + pkg):
+ pkgdir = os.path.join(pkgdest, pkg)
+ for walkroot, dirs, files in os.walk(pkgdir):
+ # Don't walk into top-level CONTROL or DEBIAN directories as these
+ # are temporary directories created by do_package.
+ if walkroot == pkgdir:
+ for control in ("CONTROL", "DEBIAN"):
+ if control in dirs:
+ dirs.remove(control)
for file in files:
- pkgfiles[pkg].append(walkroot + os.sep + file)
+ pkgfiles[pkg].append(os.path.join(walkroot, file))
# no packages should be scanned
if not packages:
@@ -979,14 +1086,14 @@ python do_package_qa () {
for package in packages:
skip = set((d.getVar('INSANE_SKIP') or "").split() +
- (d.getVar('INSANE_SKIP_' + package) or "").split())
+ (d.getVar('INSANE_SKIP:' + package) or "").split())
if skip:
bb.note("Package %s skipping QA tests: %s" % (package, str(skip)))
bb.note("Checking Package: %s" % package)
# Check package name
if not pkgname_pattern.match(package):
- package_qa_handle_error("pkgname",
+ oe.qa.handle_error("pkgname",
"%s doesn't match the [a-z0-9.+-]+ regex" % package, d)
warn_checks, error_checks = parse_test_matrix("QAPATHTEST")
@@ -1004,10 +1111,7 @@ python do_package_qa () {
if 'libdir' in d.getVar("ALL_QA").split():
package_qa_check_libdir(d)
- qa_sane = d.getVar("QA_SANE")
- if not qa_sane:
- bb.fatal("QA run found fatal errors. Please consider fixing them.")
- bb.note("DONE with PACKAGE QA")
+ oe.qa.exit_if_errors(d)
}
# binutils is used for most checks, so need to set as dependency
@@ -1021,7 +1125,7 @@ addtask do_package_qa after do_packagedata do_package before do_build
python() {
pkgs = (d.getVar('PACKAGES') or '').split()
for pkg in pkgs:
- d.appendVarFlag("do_package_qa", "vardeps", " INSANE_SKIP_{}".format(pkg))
+ d.appendVarFlag("do_package_qa", "vardeps", " INSANE_SKIP:{}".format(pkg))
}
SSTATETASKS += "do_package_qa"
@@ -1034,9 +1138,80 @@ addtask do_package_qa_setscene
python do_qa_staging() {
bb.note("QA checking staging")
+ qa_check_staged(d.expand('${SYSROOT_DESTDIR}${libdir}'), d)
+ oe.qa.exit_with_message_if_errors("QA staging was broken by the package built above", d)
+}
+
+python do_qa_patch() {
+ import subprocess
- if not package_qa_check_staged(d.expand('${SYSROOT_DESTDIR}${libdir}'), d):
- bb.fatal("QA staging was broken by the package built above")
+ ###########################################################################
+ # Check patch.log for fuzz warnings
+ #
+ # Further information on why we check for patch fuzz warnings:
+ # http://lists.openembedded.org/pipermail/openembedded-core/2018-March/148675.html
+ # https://bugzilla.yoctoproject.org/show_bug.cgi?id=10450
+ ###########################################################################
+
+ logdir = d.getVar('T')
+ patchlog = os.path.join(logdir,"log.do_patch")
+
+ if os.path.exists(patchlog):
+ fuzzheader = '--- Patch fuzz start ---'
+ fuzzfooter = '--- Patch fuzz end ---'
+ statement = "grep -e '%s' %s > /dev/null" % (fuzzheader, patchlog)
+ if subprocess.call(statement, shell=True) == 0:
+ msg = "Fuzz detected:\n\n"
+ fuzzmsg = ""
+ inFuzzInfo = False
+ f = open(patchlog, "r")
+ for line in f:
+ if fuzzheader in line:
+ inFuzzInfo = True
+ fuzzmsg = ""
+ elif fuzzfooter in line:
+ fuzzmsg = fuzzmsg.replace('\n\n', '\n')
+ msg += fuzzmsg
+ msg += "\n"
+ inFuzzInfo = False
+ elif inFuzzInfo and not 'Now at patch' in line:
+ fuzzmsg += line
+ f.close()
+ msg += "The context lines in the patches can be updated with devtool:\n"
+ msg += "\n"
+ msg += " devtool modify %s\n" % d.getVar('PN')
+ msg += " devtool finish --force-patch-refresh %s <layer_path>\n\n" % d.getVar('PN')
+ msg += "Don't forget to review changes done by devtool!\n"
+ if 'patch-fuzz' in d.getVar('ERROR_QA'):
+ bb.error(msg)
+ elif 'patch-fuzz' in d.getVar('WARN_QA'):
+ bb.warn(msg)
+ msg = "Patch log indicates that patches do not apply cleanly."
+ oe.qa.handle_error("patch-fuzz", msg, d)
+
+ # Check if the patch contains a correctly formatted and spelled Upstream-Status
+ import re
+ from oe import patch
+
+ for url in patch.src_patches(d):
+ (_, _, fullpath, _, _, _) = bb.fetch.decodeurl(url)
+
+ # skip patches not in oe-core
+ if '/meta/' not in fullpath:
+ continue
+
+ content = open(fullpath, encoding='utf-8', errors='ignore').read()
+ kinda_status_re = re.compile(r"^.*upstream.*status.*$", re.IGNORECASE | re.MULTILINE)
+ strict_status_re = re.compile(r"^Upstream-Status: (Pending|Submitted|Denied|Accepted|Inappropriate|Backport|Inactive-Upstream)( .+)?$", re.MULTILINE)
+ match_kinda = kinda_status_re.search(content)
+ match_strict = strict_status_re.search(content)
+ guidelines = "https://www.openembedded.org/wiki/Commit_Patch_Message_Guidelines#Patch_Header_Recommendations:_Upstream-Status"
+
+ if not match_strict:
+ if match_kinda:
+ bb.error("Malformed Upstream-Status in patch\n%s\nPlease correct according to %s :\n%s" % (fullpath, guidelines, match_kinda.group(0)))
+ else:
+ bb.error("Missing Upstream-Status in patch\n%s\nPlease add according to %s ." % (fullpath, guidelines))
}
python do_qa_configure() {
@@ -1049,15 +1224,22 @@ python do_qa_configure() {
configs = []
workdir = d.getVar('WORKDIR')
- if bb.data.inherits_class('autotools', d):
+ skip = (d.getVar('INSANE_SKIP') or "").split()
+ skip_configure_unsafe = False
+ if 'configure-unsafe' in skip:
+ bb.note("Recipe %s skipping qa checking: configure-unsafe" % d.getVar('PN'))
+ skip_configure_unsafe = True
+
+ if bb.data.inherits_class('autotools', d) and not skip_configure_unsafe:
bb.note("Checking autotools environment for common misconfiguration")
for root, dirs, files in os.walk(workdir):
- statement = "grep -q -F -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s" % \
+ statement = "grep -q -F -e 'is unsafe for cross-compilation' %s" % \
os.path.join(root,"config.log")
if "config.log" in files:
if subprocess.call(statement, shell=True) == 0:
- bb.fatal("""This autoconf log indicates errors, it looked at host include and/or library paths while determining system capabilities.
-Rerun configure task after fixing this.""")
+ error_msg = """This autoconf log indicates errors, it looked at host include and/or library paths while determining system capabilities.
+Rerun configure task after fixing this."""
+ oe.qa.handle_error("configure-unsafe", error_msg, d)
if "configure.ac" in files:
configs.append(os.path.join(root,"configure.ac"))
@@ -1068,8 +1250,14 @@ Rerun configure task after fixing this.""")
# Check gettext configuration and dependencies are correct
###########################################################################
+ skip_configure_gettext = False
+ if 'configure-gettext' in skip:
+ bb.note("Recipe %s skipping qa checking: configure-gettext" % d.getVar('PN'))
+ skip_configure_gettext = True
+
cnf = d.getVar('EXTRA_OECONF') or ""
- if "gettext" not in d.getVar('P') and "gcc-runtime" not in d.getVar('P') and "--disable-nls" not in cnf:
+ if not ("gettext" in d.getVar('P') or "gcc-runtime" in d.getVar('P') or \
+ "--disable-nls" in cnf or skip_configure_gettext):
ml = d.getVar("MLPREFIX") or ""
if bb.data.inherits_class('cross-canadian', d):
gt = "nativesdk-gettext"
@@ -1080,8 +1268,8 @@ Rerun configure task after fixing this.""")
for config in configs:
gnu = "grep \"^[[:space:]]*AM_GNU_GETTEXT\" %s >/dev/null" % config
if subprocess.call(gnu, shell=True) == 0:
- bb.fatal("""%s required but not in DEPENDS for file %s.
-Missing inherit gettext?""" % (gt, config))
+ error_msg = "AM_GNU_GETTEXT used but no inherit gettext"
+ oe.qa.handle_error("configure-gettext", error_msg, d)
###########################################################################
# Check unrecognised configure options (with a white list)
@@ -1089,18 +1277,19 @@ Missing inherit gettext?""" % (gt, config))
if bb.data.inherits_class("autotools", d):
bb.note("Checking configure output for unrecognised options")
try:
- flag = "WARNING: unrecognized options:"
- log = os.path.join(d.getVar('B'), 'config.log')
- output = subprocess.check_output(['grep', '-F', flag, log]).decode("utf-8").replace(', ', ' ')
+ if bb.data.inherits_class("autotools", d):
+ flag = "WARNING: unrecognized options:"
+ log = os.path.join(d.getVar('B'), 'config.log')
+ output = subprocess.check_output(['grep', '-F', flag, log]).decode("utf-8").replace(', ', ' ').replace('"', '')
options = set()
for line in output.splitlines():
options |= set(line.partition(flag)[2].split())
- whitelist = set(d.getVar("UNKNOWN_CONFIGURE_WHITELIST").split())
- options -= whitelist
+ ignore_opts = set(d.getVar("UNKNOWN_CONFIGURE_OPT_IGNORE").split())
+ options -= ignore_opts
if options:
pn = d.getVar('PN')
error_msg = pn + ": configure was passed unrecognised options: " + " ".join(options)
- package_qa_handle_error("unknown-configure-option", error_msg, d)
+ oe.qa.handle_error("unknown-configure-option", error_msg, d)
except subprocess.CalledProcessError:
pass
@@ -1112,24 +1301,42 @@ Missing inherit gettext?""" % (gt, config))
if pconfig not in pkgconfigflags:
pn = d.getVar('PN')
error_msg = "%s: invalid PACKAGECONFIG: %s" % (pn, pconfig)
- package_qa_handle_error("invalid-packageconfig", error_msg, d)
+ oe.qa.handle_error("invalid-packageconfig", error_msg, d)
- qa_sane = d.getVar("QA_SANE")
- if not qa_sane:
- bb.fatal("Fatal QA errors found, failing task.")
+ oe.qa.exit_if_errors(d)
}
+def unpack_check_src_uri(pn, d):
+ import re
+
+ skip = (d.getVar('INSANE_SKIP') or "").split()
+ if 'src-uri-bad' in skip:
+ bb.note("Recipe %s skipping qa checking: src-uri-bad" % d.getVar('PN'))
+ return
+
+ if "${PN}" in d.getVar("SRC_URI", False):
+ oe.qa.handle_error("src-uri-bad", "%s: SRC_URI uses PN not BPN" % pn, d)
+
+ for url in d.getVar("SRC_URI").split():
+ if re.search(r"git(hu|la)b\.com/.+/.+/archive/.+", url):
+ oe.qa.handle_error("src-uri-bad", "%s: SRC_URI uses unstable GitHub/GitLab archives, convert recipe to use git protocol" % pn, d)
+
python do_qa_unpack() {
src_uri = d.getVar('SRC_URI')
s_dir = d.getVar('S')
if src_uri and not os.path.exists(s_dir):
bb.warn('%s: the directory %s (%s) pointed to by the S variable doesn\'t exist - please set S within the recipe to point to where the source has been unpacked to' % (d.getVar('PN'), d.getVar('S', False), s_dir))
+
+ unpack_check_src_uri(d.getVar('PN'), d)
}
# The Staging Func, to check all staging
#addtask qa_staging after do_populate_sysroot before do_build
do_populate_sysroot[postfuncs] += "do_qa_staging "
+# Check for patch fuzz
+do_patch[postfuncs] += "do_qa_patch "
+
# Check broken config.log files, for packages requiring Gettext which
# don't have it in DEPENDS.
#addtask qa_configure after do_configure before do_compile
@@ -1152,11 +1359,11 @@ python () {
# Checking ${FILESEXTRAPATHS}
extrapaths = (d.getVar("FILESEXTRAPATHS") or "")
if '__default' not in extrapaths.split(":"):
- msg = "FILESEXTRAPATHS-variable, must always use _prepend (or _append)\n"
+ msg = "FILESEXTRAPATHS-variable, must always use :prepend (or :append)\n"
msg += "type of assignment, and don't forget the colon.\n"
msg += "Please assign it with the format of:\n"
- msg += " FILESEXTRAPATHS_append := \":${THISDIR}/Your_Files_Path\" or\n"
- msg += " FILESEXTRAPATHS_prepend := \"${THISDIR}/Your_Files_Path:\"\n"
+ msg += " FILESEXTRAPATHS:append := \":${THISDIR}/Your_Files_Path\" or\n"
+ msg += " FILESEXTRAPATHS:prepend := \"${THISDIR}/Your_Files_Path:\"\n"
msg += "in your bbappend file\n\n"
msg += "Your incorrect assignment is:\n"
msg += "%s\n" % extrapaths
@@ -1166,10 +1373,15 @@ python () {
pn = d.getVar('PN')
if pn in overrides:
msg = 'Recipe %s has PN of "%s" which is in OVERRIDES, this can result in unexpected behaviour.' % (d.getVar("FILE"), pn)
- package_qa_handle_error("pn-overrides", msg, d)
+ oe.qa.handle_error("pn-overrides", msg, d)
prog = re.compile(r'[A-Z]')
if prog.search(pn):
- package_qa_handle_error("uppercase-pn", 'PN: %s is upper case, this can result in unexpected behavior.' % pn, d)
+ oe.qa.handle_error("uppercase-pn", 'PN: %s is upper case, this can result in unexpected behavior.' % pn, d)
+
+ # Some people mistakenly use DEPENDS:${PN} instead of DEPENDS and wonder
+ # why it doesn't work.
+ if (d.getVar(d.expand('DEPENDS:${PN}'))):
+ oe.qa.handle_error("pkgvarcheck", "recipe uses DEPENDS:${PN}, should use DEPENDS", d)
issues = []
if (d.getVar('PACKAGES') or "").split():
@@ -1186,8 +1398,36 @@ python () {
else:
d.setVarFlag('do_package_qa', 'rdeptask', '')
for i in issues:
- package_qa_handle_error("pkgvarcheck", "%s: Variable %s is set as not being package specific, please fix this." % (d.getVar("FILE"), i), d)
- qa_sane = d.getVar("QA_SANE")
- if not qa_sane:
- bb.fatal("Fatal QA errors found, failing task.")
+ oe.qa.handle_error("pkgvarcheck", "%s: Variable %s is set as not being package specific, please fix this." % (d.getVar("FILE"), i), d)
+
+ if 'native-last' not in (d.getVar('INSANE_SKIP') or "").split():
+ for native_class in ['native', 'nativesdk']:
+ if bb.data.inherits_class(native_class, d):
+
+ inherited_classes = d.getVar('__inherit_cache', False) or []
+ needle = os.path.join('classes', native_class)
+
+ bbclassextend = (d.getVar('BBCLASSEXTEND') or '').split()
+ # BBCLASSEXTEND items are always added in the end
+ skip_classes = bbclassextend
+ if bb.data.inherits_class('native', d) or 'native' in bbclassextend:
+ # native also inherits nopackages and relocatable bbclasses
+ skip_classes.extend(['nopackages', 'relocatable'])
+
+ broken_order = []
+ for class_item in reversed(inherited_classes):
+ if needle not in class_item:
+ for extend_item in skip_classes:
+ if os.path.join('classes', '%s.bbclass' % extend_item) in class_item:
+ break
+ else:
+ pn = d.getVar('PN')
+ broken_order.append(os.path.basename(class_item))
+ else:
+ break
+ if broken_order:
+ oe.qa.handle_error("native-last", "%s: native/nativesdk class is not inherited last, this can result in unexpected behaviour. "
+ "Classes inherited after native/nativesdk: %s" % (pn, " ".join(broken_order)), d)
+
+ oe.qa.exit_if_errors(d)
}
diff --git a/meta/classes/kernel-artifact-names.bbclass b/meta/classes/kernel-artifact-names.bbclass
index bbeecba7bd..e77107c893 100644
--- a/meta/classes/kernel-artifact-names.bbclass
+++ b/meta/classes/kernel-artifact-names.bbclass
@@ -1,14 +1,27 @@
+##################################################################
+# Specific kernel creation info
+# for recipes/bbclasses which need to reuse some of the kernel
+# artifacts, but aren't kernel recipes themselves
+##################################################################
+
+inherit image-artifact-names
+
KERNEL_ARTIFACT_NAME ?= "${PKGE}-${PKGV}-${PKGR}-${MACHINE}${IMAGE_VERSION_SUFFIX}"
KERNEL_ARTIFACT_LINK_NAME ?= "${MACHINE}"
+KERNEL_ARTIFACT_BIN_EXT ?= ".bin"
KERNEL_IMAGE_NAME ?= "${KERNEL_ARTIFACT_NAME}"
KERNEL_IMAGE_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
+KERNEL_IMAGE_BIN_EXT ?= "${KERNEL_ARTIFACT_BIN_EXT}"
+KERNEL_IMAGETYPE_SYMLINK ?= "1"
KERNEL_DTB_NAME ?= "${KERNEL_ARTIFACT_NAME}"
KERNEL_DTB_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
+KERNEL_DTB_BIN_EXT ?= "${KERNEL_ARTIFACT_BIN_EXT}"
KERNEL_FIT_NAME ?= "${KERNEL_ARTIFACT_NAME}"
KERNEL_FIT_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
+KERNEL_FIT_BIN_EXT ?= "${KERNEL_ARTIFACT_BIN_EXT}"
MODULE_TARBALL_NAME ?= "${KERNEL_ARTIFACT_NAME}"
MODULE_TARBALL_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
diff --git a/meta/classes/kernel-devicetree.bbclass b/meta/classes/kernel-devicetree.bbclass
index 867b776aa7..b4338da1b1 100644
--- a/meta/classes/kernel-devicetree.bbclass
+++ b/meta/classes/kernel-devicetree.bbclass
@@ -1,14 +1,20 @@
# Support for device tree generation
-PACKAGES_append = " \
- ${KERNEL_PACKAGE_NAME}-devicetree \
- ${@[d.getVar('KERNEL_PACKAGE_NAME') + '-image-zimage-bundle', ''][d.getVar('KERNEL_DEVICETREE_BUNDLE') != '1']} \
-"
-FILES_${KERNEL_PACKAGE_NAME}-devicetree = "/${KERNEL_IMAGEDEST}/*.dtb /${KERNEL_IMAGEDEST}/*.dtbo"
-FILES_${KERNEL_PACKAGE_NAME}-image-zimage-bundle = "/${KERNEL_IMAGEDEST}/zImage-*.dtb.bin"
+python () {
+ if not bb.data.inherits_class('nopackages', d):
+ d.appendVar("PACKAGES", " ${KERNEL_PACKAGE_NAME}-devicetree")
+ if d.getVar('KERNEL_DEVICETREE_BUNDLE') == '1':
+ d.appendVar("PACKAGES", " ${KERNEL_PACKAGE_NAME}-image-zimage-bundle")
+}
+
+FILES:${KERNEL_PACKAGE_NAME}-devicetree = "/${KERNEL_IMAGEDEST}/*.dtb /${KERNEL_IMAGEDEST}/*.dtbo"
+FILES:${KERNEL_PACKAGE_NAME}-image-zimage-bundle = "/${KERNEL_IMAGEDEST}/zImage-*.dtb.bin"
# Generate kernel+devicetree bundle
KERNEL_DEVICETREE_BUNDLE ?= "0"
+# dtc flags passed via DTC_FLAGS env variable
+KERNEL_DTC_FLAGS ?= ""
+
normalize_dtb () {
dtb="$1"
if echo $dtb | grep -q '/dts/'; then
@@ -27,7 +33,7 @@ get_real_dtb_path_in_kernel () {
echo "$dtb_path"
}
-do_configure_append() {
+do_configure:append() {
if [ "${KERNEL_DEVICETREE_BUNDLE}" = "1" ]; then
if echo ${KERNEL_IMAGETYPE_FOR_MAKE} | grep -q 'zImage'; then
case "${ARCH}" in
@@ -49,14 +55,18 @@ do_configure_append() {
fi
}
-do_compile_append() {
+do_compile:append() {
+ if [ -n "${KERNEL_DTC_FLAGS}" ]; then
+ export DTC_FLAGS="${KERNEL_DTC_FLAGS}"
+ fi
+
for dtbf in ${KERNEL_DEVICETREE}; do
dtb=`normalize_dtb "$dtbf"`
- oe_runmake $dtb
+ oe_runmake $dtb CC="${KERNEL_CC} $cc_extra " LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS}
done
}
-do_install_append() {
+do_install:append() {
for dtbf in ${KERNEL_DEVICETREE}; do
dtb=`normalize_dtb "$dtbf"`
dtb_ext=${dtb##*.}
@@ -66,28 +76,36 @@ do_install_append() {
done
}
-do_deploy_append() {
+do_deploy:append() {
for dtbf in ${KERNEL_DEVICETREE}; do
dtb=`normalize_dtb "$dtbf"`
dtb_ext=${dtb##*.}
dtb_base_name=`basename $dtb .$dtb_ext`
- install -d ${DEPLOYDIR}
- install -m 0644 ${D}/${KERNEL_IMAGEDEST}/$dtb_base_name.$dtb_ext ${DEPLOYDIR}/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext
- ln -sf $dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext ${DEPLOYDIR}/$dtb_base_name.$dtb_ext
- ln -sf $dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext ${DEPLOYDIR}/$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext
+ install -d $deployDir
+ install -m 0644 ${D}/${KERNEL_IMAGEDEST}/$dtb_base_name.$dtb_ext $deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext
+ if [ "${KERNEL_IMAGETYPE_SYMLINK}" = "1" ] ; then
+ ln -sf $dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext $deployDir/$dtb_base_name.$dtb_ext
+ fi
+ if [ -n "${KERNEL_DTB_LINK_NAME}" ] ; then
+ ln -sf $dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext $deployDir/$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext
+ fi
for type in ${KERNEL_IMAGETYPE_FOR_MAKE}; do
if [ "$type" = "zImage" ] && [ "${KERNEL_DEVICETREE_BUNDLE}" = "1" ]; then
cat ${D}/${KERNEL_IMAGEDEST}/$type \
- ${DEPLOYDIR}/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext \
- > ${DEPLOYDIR}/$type-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin
- ln -sf $type-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin \
- ${DEPLOYDIR}/$type-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext.bin
+ $deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext \
+ > $deployDir/$type-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT}
+ if [ -n "${KERNEL_DTB_LINK_NAME}" ]; then
+ ln -sf $type-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT} \
+ $deployDir/$type-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT}
+ fi
if [ -e "${KERNEL_OUTPUT_DIR}/${type}.initramfs" ]; then
cat ${KERNEL_OUTPUT_DIR}/${type}.initramfs \
- ${DEPLOYDIR}/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext
- > ${DEPLOYDIR}/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin
- ln -sf ${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin \
- ${DEPLOYDIR}/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext.bin
+ $deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext \
+ > $deployDir/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT}
+ if [ -n "${KERNEL_DTB_LINK_NAME}" ]; then
+ ln -sf ${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT} \
+ $deployDir/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext${KERNEL_DTB_BIN_EXT}
+ fi
fi
fi
done
diff --git a/meta/classes/kernel-fitimage.bbclass b/meta/classes/kernel-fitimage.bbclass
index 718162a861..8a9b195d6e 100644
--- a/meta/classes/kernel-fitimage.bbclass
+++ b/meta/classes/kernel-fitimage.bbclass
@@ -1,5 +1,7 @@
inherit kernel-uboot kernel-artifact-names uboot-sign
+KERNEL_IMAGETYPE_REPLACEMENT = ""
+
python __anonymous () {
kerneltypes = d.getVar('KERNEL_IMAGETYPES') or ""
if 'fitImage' in kerneltypes.split():
@@ -10,6 +12,8 @@ python __anonymous () {
uarch = d.getVar("UBOOT_ARCH")
if uarch == "arm64":
replacementtype = "Image"
+ elif uarch == "riscv":
+ replacementtype = "Image"
elif uarch == "mips":
replacementtype = "vmlinuz.bin"
elif uarch == "x86":
@@ -19,6 +23,8 @@ python __anonymous () {
else:
replacementtype = "zImage"
+ d.setVar("KERNEL_IMAGETYPE_REPLACEMENT", replacementtype)
+
# Override KERNEL_IMAGETYPE_FOR_MAKE variable, which is internal
# to kernel.bbclass . We have to override it, since we pack zImage
# (at least for now) into the fitImage .
@@ -30,27 +36,55 @@ python __anonymous () {
if image:
d.appendVarFlag('do_assemble_fitimage_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete')
+ ubootenv = d.getVar('UBOOT_ENV')
+ if ubootenv:
+ d.appendVarFlag('do_assemble_fitimage', 'depends', ' virtual/bootloader:do_populate_sysroot')
+
+ #check if there are any dtb providers
+ providerdtb = d.getVar("PREFERRED_PROVIDER_virtual/dtb")
+ if providerdtb:
+ d.appendVarFlag('do_assemble_fitimage', 'depends', ' virtual/dtb:do_populate_sysroot')
+ d.appendVarFlag('do_assemble_fitimage_initramfs', 'depends', ' virtual/dtb:do_populate_sysroot')
+ d.setVar('EXTERNAL_KERNEL_DEVICETREE', "${RECIPE_SYSROOT}/boot/devicetree")
+
# Verified boot will sign the fitImage and append the public key to
# U-Boot dtb. We ensure the U-Boot dtb is deployed before assembling
# the fitImage:
- if d.getVar('UBOOT_SIGN_ENABLE') == "1":
+ if d.getVar('UBOOT_SIGN_ENABLE') == "1" and d.getVar('UBOOT_DTB_BINARY'):
uboot_pn = d.getVar('PREFERRED_PROVIDER_u-boot') or 'u-boot'
d.appendVarFlag('do_assemble_fitimage', 'depends', ' %s:do_populate_sysroot' % uboot_pn)
+ if d.getVar('INITRAMFS_IMAGE_BUNDLE') == "1":
+ d.appendVarFlag('do_assemble_fitimage_initramfs', 'depends', ' %s:do_populate_sysroot' % uboot_pn)
}
-# Options for the device tree compiler passed to mkimage '-D' feature:
-UBOOT_MKIMAGE_DTCOPTS ??= ""
+
+# Description string
+FIT_DESC ?= "Kernel fitImage for ${DISTRO_NAME}/${PV}/${MACHINE}"
+
+# Sign individual images as well
+FIT_SIGN_INDIVIDUAL ?= "0"
+
+FIT_CONF_PREFIX ?= "conf-"
+FIT_CONF_PREFIX[doc] = "Prefix to use for FIT configuration node name"
+
+# Keys used to sign individually image nodes.
+# The keys to sign image nodes must be different from those used to sign
+# configuration nodes, otherwise the "required" property, from
+# UBOOT_DTB_BINARY, will be set to "conf", because "conf" prevails on "image".
+# Then the images signature checking will not be mandatory and no error will be
+# raised in case of failure.
+# UBOOT_SIGN_IMG_KEYNAME = "dev2" # keys name in keydir (eg. "dev2.crt", "dev2.key")
#
# Emit the fitImage ITS header
#
# $1 ... .its filename
fitimage_emit_fit_header() {
- cat << EOF >> ${1}
+ cat << EOF >> $1
/dts-v1/;
/ {
- description = "U-Boot fitImage for ${DISTRO_NAME}/${PV}/${MACHINE}";
+ description = "${FIT_DESC}";
#address-cells = <1>;
EOF
}
@@ -67,24 +101,24 @@ EOF
fitimage_emit_section_maint() {
case $2 in
imagestart)
- cat << EOF >> ${1}
+ cat << EOF >> $1
images {
EOF
;;
confstart)
- cat << EOF >> ${1}
+ cat << EOF >> $1
configurations {
EOF
;;
sectend)
- cat << EOF >> ${1}
+ cat << EOF >> $1
};
EOF
;;
fitend)
- cat << EOF >> ${1}
+ cat << EOF >> $1
};
EOF
;;
@@ -100,7 +134,9 @@ EOF
# $4 ... Compression type
fitimage_emit_section_kernel() {
- kernel_csum="sha1"
+ kernel_csum="${FIT_HASH_ALG}"
+ kernel_sign_algo="${FIT_SIGN_ALG}"
+ kernel_sign_keyname="${UBOOT_SIGN_IMG_KEYNAME}"
ENTRYPOINT="${UBOOT_ENTRYPOINT}"
if [ -n "${UBOOT_ENTRYSYMBOL}" ]; then
@@ -108,21 +144,32 @@ fitimage_emit_section_kernel() {
awk '$3=="${UBOOT_ENTRYSYMBOL}" {print "0x"$1;exit}'`
fi
- cat << EOF >> ${1}
- kernel@${2} {
+ cat << EOF >> $1
+ kernel-$2 {
description = "Linux kernel";
- data = /incbin/("${3}");
+ data = /incbin/("$3");
type = "kernel";
arch = "${UBOOT_ARCH}";
os = "linux";
- compression = "${4}";
+ compression = "$4";
load = <${UBOOT_LOADADDRESS}>;
- entry = <${ENTRYPOINT}>;
- hash@1 {
- algo = "${kernel_csum}";
+ entry = <$ENTRYPOINT>;
+ hash-1 {
+ algo = "$kernel_csum";
+ };
+ };
+EOF
+
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "$kernel_sign_keyname" ] ; then
+ sed -i '$ d' $1
+ cat << EOF >> $1
+ signature-1 {
+ algo = "$kernel_csum,$kernel_sign_algo";
+ key-name-hint = "$kernel_sign_keyname";
};
};
EOF
+ fi
}
#
@@ -133,7 +180,9 @@ EOF
# $3 ... Path to DTB image
fitimage_emit_section_dtb() {
- dtb_csum="sha1"
+ dtb_csum="${FIT_HASH_ALG}"
+ dtb_sign_algo="${FIT_SIGN_ALG}"
+ dtb_sign_keyname="${UBOOT_SIGN_IMG_KEYNAME}"
dtb_loadline=""
dtb_ext=${DTB##*.}
@@ -144,19 +193,67 @@ fitimage_emit_section_dtb() {
elif [ -n "${UBOOT_DTB_LOADADDRESS}" ]; then
dtb_loadline="load = <${UBOOT_DTB_LOADADDRESS}>;"
fi
- cat << EOF >> ${1}
- fdt@${2} {
+ cat << EOF >> $1
+ fdt-$2 {
description = "Flattened Device Tree blob";
- data = /incbin/("${3}");
+ data = /incbin/("$3");
type = "flat_dt";
arch = "${UBOOT_ARCH}";
compression = "none";
- ${dtb_loadline}
- hash@1 {
- algo = "${dtb_csum}";
+ $dtb_loadline
+ hash-1 {
+ algo = "$dtb_csum";
+ };
+ };
+EOF
+
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "$dtb_sign_keyname" ] ; then
+ sed -i '$ d' $1
+ cat << EOF >> $1
+ signature-1 {
+ algo = "$dtb_csum,$dtb_sign_algo";
+ key-name-hint = "$dtb_sign_keyname";
};
};
EOF
+ fi
+}
+
+#
+# Emit the fitImage ITS u-boot script section
+#
+# $1 ... .its filename
+# $2 ... Image counter
+# $3 ... Path to boot script image
+fitimage_emit_section_boot_script() {
+
+ bootscr_csum="${FIT_HASH_ALG}"
+ bootscr_sign_algo="${FIT_SIGN_ALG}"
+ bootscr_sign_keyname="${UBOOT_SIGN_IMG_KEYNAME}"
+
+ cat << EOF >> $1
+ bootscr-$2 {
+ description = "U-boot script";
+ data = /incbin/("$3");
+ type = "script";
+ arch = "${UBOOT_ARCH}";
+ compression = "none";
+ hash-1 {
+ algo = "$bootscr_csum";
+ };
+ };
+EOF
+
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "$bootscr_sign_keyname" ] ; then
+ sed -i '$ d' $1
+ cat << EOF >> $1
+ signature-1 {
+ algo = "$bootscr_csum,$bootscr_sign_algo";
+ key-name-hint = "$bootscr_sign_keyname";
+ };
+ };
+EOF
+ fi
}
#
@@ -167,20 +264,20 @@ EOF
# $3 ... Path to setup image
fitimage_emit_section_setup() {
- setup_csum="sha1"
+ setup_csum="${FIT_HASH_ALG}"
- cat << EOF >> ${1}
- setup@${2} {
+ cat << EOF >> $1
+ setup-$2 {
description = "Linux setup.bin";
- data = /incbin/("${3}");
+ data = /incbin/("$3");
type = "x86_setup";
arch = "${UBOOT_ARCH}";
os = "linux";
compression = "none";
load = <0x00090000>;
entry = <0x00090000>;
- hash@1 {
- algo = "${setup_csum}";
+ hash-1 {
+ algo = "$setup_csum";
};
};
EOF
@@ -194,8 +291,9 @@ EOF
# $3 ... Path to ramdisk image
fitimage_emit_section_ramdisk() {
- ramdisk_csum="sha1"
- ramdisk_ctype="none"
+ ramdisk_csum="${FIT_HASH_ALG}"
+ ramdisk_sign_algo="${FIT_SIGN_ALG}"
+ ramdisk_sign_keyname="${UBOOT_SIGN_IMG_KEYNAME}"
ramdisk_loadline=""
ramdisk_entryline=""
@@ -206,39 +304,32 @@ fitimage_emit_section_ramdisk() {
ramdisk_entryline="entry = <${UBOOT_RD_ENTRYPOINT}>;"
fi
- case $3 in
- *.gz)
- ramdisk_ctype="gzip"
- ;;
- *.bz2)
- ramdisk_ctype="bzip2"
- ;;
- *.lzma)
- ramdisk_ctype="lzma"
- ;;
- *.lzo)
- ramdisk_ctype="lzo"
- ;;
- *.lz4)
- ramdisk_ctype="lz4"
- ;;
- esac
-
- cat << EOF >> ${1}
- ramdisk@${2} {
+ cat << EOF >> $1
+ ramdisk-$2 {
description = "${INITRAMFS_IMAGE}";
- data = /incbin/("${3}");
+ data = /incbin/("$3");
type = "ramdisk";
arch = "${UBOOT_ARCH}";
os = "linux";
- compression = "${ramdisk_ctype}";
- ${ramdisk_loadline}
- ${ramdisk_entryline}
- hash@1 {
- algo = "${ramdisk_csum}";
+ compression = "none";
+ $ramdisk_loadline
+ $ramdisk_entryline
+ hash-1 {
+ algo = "$ramdisk_csum";
+ };
+ };
+EOF
+
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "$ramdisk_sign_keyname" ] ; then
+ sed -i '$ d' $1
+ cat << EOF >> $1
+ signature-1 {
+ algo = "$ramdisk_csum,$ramdisk_sign_algo";
+ key-name-hint = "$ramdisk_sign_keyname";
};
};
EOF
+ fi
}
#
@@ -248,100 +339,138 @@ EOF
# $2 ... Linux kernel ID
# $3 ... DTB image name
# $4 ... ramdisk ID
-# $5 ... config ID
-# $6 ... default flag
+# $5 ... u-boot script ID
+# $6 ... config ID
+# $7 ... default flag
fitimage_emit_section_config() {
- conf_csum="sha1"
- if [ -n "${UBOOT_SIGN_ENABLE}" ] ; then
+ conf_csum="${FIT_HASH_ALG}"
+ conf_sign_algo="${FIT_SIGN_ALG}"
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" ] ; then
conf_sign_keyname="${UBOOT_SIGN_KEYNAME}"
fi
+ its_file="$1"
+ kernel_id="$2"
+ dtb_image="$3"
+ ramdisk_id="$4"
+ bootscr_id="$5"
+ config_id="$6"
+ default_flag="$7"
+
# Test if we have any DTBs at all
sep=""
conf_desc=""
+ conf_node="${FIT_CONF_PREFIX}"
kernel_line=""
fdt_line=""
ramdisk_line=""
+ bootscr_line=""
setup_line=""
default_line=""
- if [ -n "${2}" ]; then
+ # conf node name is selected based on dtb ID if it is present,
+ # otherwise its selected based on kernel ID
+ if [ -n "$dtb_image" ]; then
+ conf_node=$conf_node$dtb_image
+ else
+ conf_node=$conf_node$kernel_id
+ fi
+
+ if [ -n "$kernel_id" ]; then
conf_desc="Linux kernel"
sep=", "
- kernel_line="kernel = \"kernel@${2}\";"
+ kernel_line="kernel = \"kernel-$kernel_id\";"
+ fi
+
+ if [ -n "$dtb_image" ]; then
+ conf_desc="$conf_desc${sep}FDT blob"
+ sep=", "
+ fdt_line="fdt = \"fdt-$dtb_image\";"
fi
- if [ -n "${3}" ]; then
- conf_desc="${conf_desc}${sep}FDT blob"
+ if [ -n "$ramdisk_id" ]; then
+ conf_desc="$conf_desc${sep}ramdisk"
sep=", "
- fdt_line="fdt = \"fdt@${3}\";"
+ ramdisk_line="ramdisk = \"ramdisk-$ramdisk_id\";"
fi
- if [ -n "${4}" ]; then
- conf_desc="${conf_desc}${sep}ramdisk"
+ if [ -n "$bootscr_id" ]; then
+ conf_desc="$conf_desc${sep}u-boot script"
sep=", "
- ramdisk_line="ramdisk = \"ramdisk@${4}\";"
+ bootscr_line="bootscr = \"bootscr-$bootscr_id\";"
fi
- if [ -n "${5}" ]; then
- conf_desc="${conf_desc}${sep}setup"
- setup_line="setup = \"setup@${5}\";"
+ if [ -n "$config_id" ]; then
+ conf_desc="$conf_desc${sep}setup"
+ setup_line="setup = \"setup-$config_id\";"
fi
- if [ "${6}" = "1" ]; then
- default_line="default = \"conf@${3}\";"
+ if [ "$default_flag" = "1" ]; then
+ # default node is selected based on dtb ID if it is present,
+ # otherwise its selected based on kernel ID
+ if [ -n "$dtb_image" ]; then
+ default_line="default = \"${FIT_CONF_PREFIX}$dtb_image\";"
+ else
+ default_line="default = \"${FIT_CONF_PREFIX}$kernel_id\";"
+ fi
fi
- cat << EOF >> ${1}
- ${default_line}
- conf@${3} {
- description = "${6} ${conf_desc}";
- ${kernel_line}
- ${fdt_line}
- ${ramdisk_line}
- ${setup_line}
- hash@1 {
- algo = "${conf_csum}";
+ cat << EOF >> $its_file
+ $default_line
+ $conf_node {
+ description = "$default_flag $conf_desc";
+ $kernel_line
+ $fdt_line
+ $ramdisk_line
+ $bootscr_line
+ $setup_line
+ hash-1 {
+ algo = "$conf_csum";
};
EOF
- if [ ! -z "${conf_sign_keyname}" ] ; then
+ if [ -n "$conf_sign_keyname" ] ; then
sign_line="sign-images = "
sep=""
- if [ -n "${2}" ]; then
- sign_line="${sign_line}${sep}\"kernel\""
+ if [ -n "$kernel_id" ]; then
+ sign_line="$sign_line${sep}\"kernel\""
+ sep=", "
+ fi
+
+ if [ -n "$dtb_image" ]; then
+ sign_line="$sign_line${sep}\"fdt\""
sep=", "
fi
- if [ -n "${3}" ]; then
- sign_line="${sign_line}${sep}\"fdt\""
+ if [ -n "$ramdisk_id" ]; then
+ sign_line="$sign_line${sep}\"ramdisk\""
sep=", "
fi
- if [ -n "${4}" ]; then
- sign_line="${sign_line}${sep}\"ramdisk\""
+ if [ -n "$bootscr_id" ]; then
+ sign_line="$sign_line${sep}\"bootscr\""
sep=", "
fi
- if [ -n "${5}" ]; then
- sign_line="${sign_line}${sep}\"setup\""
+ if [ -n "$config_id" ]; then
+ sign_line="$sign_line${sep}\"setup\""
fi
- sign_line="${sign_line};"
+ sign_line="$sign_line;"
- cat << EOF >> ${1}
- signature@1 {
- algo = "${conf_csum},rsa2048";
- key-name-hint = "${conf_sign_keyname}";
- ${sign_line}
+ cat << EOF >> $its_file
+ signature-1 {
+ algo = "$conf_csum,$conf_sign_algo";
+ key-name-hint = "$conf_sign_keyname";
+ $sign_line
};
EOF
fi
- cat << EOF >> ${1}
+ cat << EOF >> $its_file
};
EOF
}
@@ -356,104 +485,154 @@ fitimage_assemble() {
kernelcount=1
dtbcount=""
DTBS=""
- ramdiskcount=${3}
+ ramdiskcount=$3
setupcount=""
- rm -f ${1} arch/${ARCH}/boot/${2}
+ bootscr_id=""
+ rm -f $1 arch/${ARCH}/boot/$2
- fitimage_emit_fit_header ${1}
+ if [ -n "${UBOOT_SIGN_IMG_KEYNAME}" -a "${UBOOT_SIGN_KEYNAME}" = "${UBOOT_SIGN_IMG_KEYNAME}" ]; then
+ bbfatal "Keys used to sign images and configuration nodes must be different."
+ fi
+
+ fitimage_emit_fit_header $1
#
# Step 1: Prepare a kernel image section.
#
- fitimage_emit_section_maint ${1} imagestart
+ fitimage_emit_section_maint $1 imagestart
uboot_prep_kimage
- fitimage_emit_section_kernel ${1} "${kernelcount}" linux.bin "${linux_comp}"
+ fitimage_emit_section_kernel $1 $kernelcount linux.bin "$linux_comp"
#
# Step 2: Prepare a DTB image section
#
+
if [ -n "${KERNEL_DEVICETREE}" ]; then
dtbcount=1
for DTB in ${KERNEL_DEVICETREE}; do
- if echo ${DTB} | grep -q '/dts/'; then
- bbwarn "${DTB} contains the full path to the the dts file, but only the dtb name should be used."
- DTB=`basename ${DTB} | sed 's,\.dts$,.dtb,g'`
+ if echo $DTB | grep -q '/dts/'; then
+ bbwarn "$DTB contains the full path to the the dts file, but only the dtb name should be used."
+ DTB=`basename $DTB | sed 's,\.dts$,.dtb,g'`
fi
- DTB_PATH="arch/${ARCH}/boot/dts/${DTB}"
- if [ ! -e "${DTB_PATH}" ]; then
- DTB_PATH="arch/${ARCH}/boot/${DTB}"
+
+ # Skip ${DTB} if it's also provided in ${EXTERNAL_KERNEL_DEVICETREE}
+ if [ -n "${EXTERNAL_KERNEL_DEVICETREE}" ] && [ -s ${EXTERNAL_KERNEL_DEVICETREE}/${DTB} ]; then
+ continue
+ fi
+
+ DTB_PATH="arch/${ARCH}/boot/dts/$DTB"
+ if [ ! -e "$DTB_PATH" ]; then
+ DTB_PATH="arch/${ARCH}/boot/$DTB"
fi
- DTB=$(echo "${DTB}" | tr '/' '_')
- DTBS="${DTBS} ${DTB}"
- fitimage_emit_section_dtb ${1} ${DTB} ${DTB_PATH}
+ DTB=$(echo "$DTB" | tr '/' '_')
+ DTBS="$DTBS $DTB"
+ fitimage_emit_section_dtb $1 $DTB $DTB_PATH
done
fi
+ if [ -n "${EXTERNAL_KERNEL_DEVICETREE}" ]; then
+ dtbcount=1
+ for DTB in $(find "${EXTERNAL_KERNEL_DEVICETREE}" \( -name '*.dtb' -o -name '*.dtbo' \) -printf '%P\n' | sort); do
+ DTB=$(echo "$DTB" | tr '/' '_')
+ DTBS="$DTBS $DTB"
+ fitimage_emit_section_dtb $1 $DTB "${EXTERNAL_KERNEL_DEVICETREE}/$DTB"
+ done
+ fi
+
+ #
+ # Step 3: Prepare a u-boot script section
+ #
+
+ if [ -n "${UBOOT_ENV}" ] && [ -d "${STAGING_DIR_HOST}/boot" ]; then
+ if [ -e "${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY}" ]; then
+ cp ${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY} ${B}
+ bootscr_id="${UBOOT_ENV_BINARY}"
+ fitimage_emit_section_boot_script $1 "$bootscr_id" ${UBOOT_ENV_BINARY}
+ else
+ bbwarn "${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY} not found."
+ fi
+ fi
+
#
- # Step 3: Prepare a setup section. (For x86)
+ # Step 4: Prepare a setup section. (For x86)
#
if [ -e arch/${ARCH}/boot/setup.bin ]; then
setupcount=1
- fitimage_emit_section_setup ${1} "${setupcount}" arch/${ARCH}/boot/setup.bin
+ fitimage_emit_section_setup $1 $setupcount arch/${ARCH}/boot/setup.bin
fi
#
- # Step 4: Prepare a ramdisk section.
+ # Step 5: Prepare a ramdisk section.
#
- if [ "x${ramdiskcount}" = "x1" ] ; then
+ if [ "x${ramdiskcount}" = "x1" ] && [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
# Find and use the first initramfs image archive type we find
- for img in cpio.lz4 cpio.lzo cpio.lzma cpio.xz cpio.gz ext2.gz cpio; do
- initramfs_path="${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.${img}"
- echo "Using $initramfs_path"
- if [ -e "${initramfs_path}" ]; then
- fitimage_emit_section_ramdisk ${1} "${ramdiskcount}" "${initramfs_path}"
+ for img in cpio.lz4 cpio.lzo cpio.lzma cpio.xz cpio.zst cpio.gz ext2.gz cpio; do
+ initramfs_path="${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.$img"
+ echo -n "Searching for $initramfs_path..."
+ if [ -e "$initramfs_path" ]; then
+ echo "found"
+ fitimage_emit_section_ramdisk $1 "$ramdiskcount" "$initramfs_path"
break
+ else
+ echo "not found"
fi
done
fi
- fitimage_emit_section_maint ${1} sectend
+ fitimage_emit_section_maint $1 sectend
# Force the first Kernel and DTB in the default config
kernelcount=1
- if [ -n "${dtbcount}" ]; then
+ if [ -n "$dtbcount" ]; then
dtbcount=1
fi
#
- # Step 5: Prepare a configurations section
+ # Step 6: Prepare a configurations section
#
- fitimage_emit_section_maint ${1} confstart
-
- if [ -n "${DTBS}" ]; then
+ fitimage_emit_section_maint $1 confstart
+
+ # kernel-fitimage.bbclass currently only supports a single kernel (no less or
+ # more) to be added to the FIT image along with 0 or more device trees and
+ # 0 or 1 ramdisk.
+ # It is also possible to include an initramfs bundle (kernel and rootfs in one binary)
+ # When the initramfs bundle is used ramdisk is disabled.
+ # If a device tree is to be part of the FIT image, then select
+ # the default configuration to be used is based on the dtbcount. If there is
+ # no dtb present than select the default configuation to be based on
+ # the kernelcount.
+ if [ -n "$DTBS" ]; then
i=1
for DTB in ${DTBS}; do
dtb_ext=${DTB##*.}
- if [ "${dtb_ext}" = "dtbo" ]; then
- fitimage_emit_section_config ${1} "" "${DTB}" "" "" "`expr ${i} = ${dtbcount}`"
+ if [ "$dtb_ext" = "dtbo" ]; then
+ fitimage_emit_section_config $1 "" "$DTB" "" "$bootscr_id" "" "`expr $i = $dtbcount`"
else
- fitimage_emit_section_config ${1} "${kernelcount}" "${DTB}" "${ramdiskcount}" "${setupcount}" "`expr ${i} = ${dtbcount}`"
+ fitimage_emit_section_config $1 $kernelcount "$DTB" "$ramdiskcount" "$bootscr_id" "$setupcount" "`expr $i = $dtbcount`"
fi
- i=`expr ${i} + 1`
+ i=`expr $i + 1`
done
+ else
+ defaultconfigcount=1
+ fitimage_emit_section_config $1 $kernelcount "" "$ramdiskcount" "$bootscr_id" "$setupcount" $defaultconfigcount
fi
- fitimage_emit_section_maint ${1} sectend
+ fitimage_emit_section_maint $1 sectend
- fitimage_emit_section_maint ${1} fitend
+ fitimage_emit_section_maint $1 fitend
#
- # Step 6: Assemble the image
+ # Step 7: Assemble the image
#
- uboot-mkimage \
+ ${UBOOT_MKIMAGE} \
${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \
- -f ${1} \
- arch/${ARCH}/boot/${2}
+ -f $1 \
+ arch/${ARCH}/boot/$2
#
- # Step 7: Sign the image and add public key to U-Boot dtb
+ # Step 8: Sign the image and add public key to U-Boot dtb
#
if [ "x${UBOOT_SIGN_ENABLE}" = "x1" ] ; then
add_key_to_u_boot=""
@@ -463,18 +642,19 @@ fitimage_assemble() {
cp -P ${STAGING_DATADIR}/u-boot*.dtb ${B}
add_key_to_u_boot="-K ${B}/${UBOOT_DTB_BINARY}"
fi
- uboot-mkimage \
+ ${UBOOT_MKIMAGE_SIGN} \
${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \
-F -k "${UBOOT_SIGN_KEYDIR}" \
$add_key_to_u_boot \
- -r arch/${ARCH}/boot/${2}
+ -r arch/${ARCH}/boot/$2 \
+ ${UBOOT_MKIMAGE_SIGN_ARGS}
fi
}
do_assemble_fitimage() {
if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage"; then
cd ${B}
- fitimage_assemble fit-image.its fitImage
+ fitimage_assemble fit-image.its fitImage ""
fi
}
@@ -484,39 +664,127 @@ do_assemble_fitimage_initramfs() {
if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage" && \
test -n "${INITRAMFS_IMAGE}" ; then
cd ${B}
- fitimage_assemble fit-image-${INITRAMFS_IMAGE}.its fitImage-${INITRAMFS_IMAGE} 1
+ if [ "${INITRAMFS_IMAGE_BUNDLE}" = "1" ]; then
+ fitimage_assemble fit-image-${INITRAMFS_IMAGE}.its fitImage ""
+ else
+ fitimage_assemble fit-image-${INITRAMFS_IMAGE}.its fitImage-${INITRAMFS_IMAGE} 1
+ fi
fi
}
-addtask assemble_fitimage_initramfs before do_deploy after do_install
+addtask assemble_fitimage_initramfs before do_deploy after do_bundle_initramfs
+
+do_kernel_generate_rsa_keys() {
+ if [ "${UBOOT_SIGN_ENABLE}" = "0" ] && [ "${FIT_GENERATE_KEYS}" = "1" ]; then
+ bbwarn "FIT_GENERATE_KEYS is set to 1 even though UBOOT_SIGN_ENABLE is set to 0. The keys will not be generated as they won't be used."
+ fi
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" ] && [ "${FIT_GENERATE_KEYS}" = "1" ]; then
+
+ # Generate keys to sign configuration nodes, only if they don't already exist
+ if [ ! -f "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".key ] || \
+ [ ! -f "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".crt ]; then
+
+ # make directory if it does not already exist
+ mkdir -p "${UBOOT_SIGN_KEYDIR}"
+
+ echo "Generating RSA private key for signing fitImage"
+ openssl genrsa ${FIT_KEY_GENRSA_ARGS} -out \
+ "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".key \
+ "${FIT_SIGN_NUMBITS}"
+
+ echo "Generating certificate for signing fitImage"
+ openssl req ${FIT_KEY_REQ_ARGS} "${FIT_KEY_SIGN_PKCS}" \
+ -key "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".key \
+ -out "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_KEYNAME}".crt
+ fi
+
+ # Generate keys to sign image nodes, only if they don't already exist
+ if [ ! -f "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_IMG_KEYNAME}".key ] || \
+ [ ! -f "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_IMG_KEYNAME}".crt ]; then
+
+ # make directory if it does not already exist
+ mkdir -p "${UBOOT_SIGN_KEYDIR}"
+
+ echo "Generating RSA private key for signing fitImage"
+ openssl genrsa ${FIT_KEY_GENRSA_ARGS} -out \
+ "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_IMG_KEYNAME}".key \
+ "${FIT_SIGN_NUMBITS}"
+
+ echo "Generating certificate for signing fitImage"
+ openssl req ${FIT_KEY_REQ_ARGS} "${FIT_KEY_SIGN_PKCS}" \
+ -key "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_IMG_KEYNAME}".key \
+ -out "${UBOOT_SIGN_KEYDIR}/${UBOOT_SIGN_IMG_KEYNAME}".crt
+ fi
+ fi
+}
+
+addtask kernel_generate_rsa_keys before do_assemble_fitimage after do_compile
kernel_do_deploy[vardepsexclude] = "DATETIME"
-kernel_do_deploy_append() {
+kernel_do_deploy:append() {
# Update deploy directory
if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage"; then
- echo "Copying fit-image.its source file..."
- install -m 0644 ${B}/fit-image.its ${DEPLOYDIR}/fitImage-its-${KERNEL_FIT_NAME}.its
- ln -snf fitImage-its-${KERNEL_FIT_NAME}.its ${DEPLOYDIR}/fitImage-its-${KERNEL_FIT_LINK_NAME}
- echo "Copying linux.bin file..."
- install -m 0644 ${B}/linux.bin ${DEPLOYDIR}/fitImage-linux.bin-${KERNEL_FIT_NAME}.bin
- ln -snf fitImage-linux.bin-${KERNEL_FIT_NAME}.bin ${DEPLOYDIR}/fitImage-linux.bin-${KERNEL_FIT_LINK_NAME}
+ if [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
+ echo "Copying fit-image.its source file..."
+ install -m 0644 ${B}/fit-image.its "$deployDir/fitImage-its-${KERNEL_FIT_NAME}.its"
+ if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
+ ln -snf fitImage-its-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${KERNEL_FIT_LINK_NAME}"
+ fi
+
+ echo "Copying linux.bin file..."
+ install -m 0644 ${B}/linux.bin $deployDir/fitImage-linux.bin-${KERNEL_FIT_NAME}${KERNEL_FIT_BIN_EXT}
+ if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
+ ln -snf fitImage-linux.bin-${KERNEL_FIT_NAME}${KERNEL_FIT_BIN_EXT} "$deployDir/fitImage-linux.bin-${KERNEL_FIT_LINK_NAME}"
+ fi
+ fi
if [ -n "${INITRAMFS_IMAGE}" ]; then
echo "Copying fit-image-${INITRAMFS_IMAGE}.its source file..."
- install -m 0644 ${B}/fit-image-${INITRAMFS_IMAGE}.its ${DEPLOYDIR}/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its
- ln -snf fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its ${DEPLOYDIR}/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}
+ install -m 0644 ${B}/fit-image-${INITRAMFS_IMAGE}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its"
+ if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
+ ln -snf fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
+ fi
- echo "Copying fitImage-${INITRAMFS_IMAGE} file..."
- install -m 0644 ${B}/arch/${ARCH}/boot/fitImage-${INITRAMFS_IMAGE} ${DEPLOYDIR}/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin
- ln -snf fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin ${DEPLOYDIR}/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}
- fi
- if [ "${UBOOT_SIGN_ENABLE}" = "1" -a -n "${UBOOT_DTB_BINARY}" ] ; then
- # UBOOT_DTB_IMAGE is a realfile, but we can't use
- # ${UBOOT_DTB_IMAGE} since it contains ${PV} which is aimed
- # for u-boot, but we are in kernel env now.
- install -m 0644 ${B}/u-boot-${MACHINE}*.dtb ${DEPLOYDIR}/
+ if [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
+ echo "Copying fitImage-${INITRAMFS_IMAGE} file..."
+ install -m 0644 ${B}/arch/${ARCH}/boot/fitImage-${INITRAMFS_IMAGE} "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}${KERNEL_FIT_BIN_EXT}"
+ if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
+ ln -snf fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}${KERNEL_FIT_BIN_EXT} "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
+ fi
+ fi
fi
fi
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" -o "${UBOOT_FITIMAGE_ENABLE}" = "1" ] && \
+ [ -n "${UBOOT_DTB_BINARY}" ] ; then
+ # UBOOT_DTB_IMAGE is a realfile, but we can't use
+ # ${UBOOT_DTB_IMAGE} since it contains ${PV} which is aimed
+ # for u-boot, but we are in kernel env now.
+ install -m 0644 ${B}/u-boot-${MACHINE}*.dtb "$deployDir/"
+ fi
+ if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" -a -n "${UBOOT_BINARY}" -a -n "${SPL_DTB_BINARY}" ] ; then
+ # If we're also creating and/or signing the uboot fit, now we need to
+ # deploy it, it's its file, as well as u-boot-spl.dtb
+ install -m 0644 ${B}/u-boot-spl-${MACHINE}*.dtb "$deployDir/"
+ echo "Copying u-boot-fitImage file..."
+ install -m 0644 ${B}/u-boot-fitImage-* "$deployDir/"
+ echo "Copying u-boot-its file..."
+ install -m 0644 ${B}/u-boot-its-* "$deployDir/"
+ fi
+}
+
+# The function below performs the following in case of initramfs bundles:
+# - Removes do_assemble_fitimage. FIT generation is done through
+# do_assemble_fitimage_initramfs. do_assemble_fitimage is not needed
+# and should not be part of the tasks to be executed.
+# - Since do_kernel_generate_rsa_keys is inserted by default
+# between do_compile and do_assemble_fitimage, this is
+# not suitable in case of initramfs bundles. do_kernel_generate_rsa_keys
+# should be between do_bundle_initramfs and do_assemble_fitimage_initramfs.
+python () {
+ if d.getVar('INITRAMFS_IMAGE_BUNDLE') == "1":
+ bb.build.deltask('do_assemble_fitimage', d)
+ bb.build.deltask('kernel_generate_rsa_keys', d)
+ bb.build.addtask('kernel_generate_rsa_keys', 'do_assemble_fitimage_initramfs', 'do_bundle_initramfs', d)
}
diff --git a/meta/classes/kernel-grub.bbclass b/meta/classes/kernel-grub.bbclass
index 5d92f3b636..44b2015468 100644
--- a/meta/classes/kernel-grub.bbclass
+++ b/meta/classes/kernel-grub.bbclass
@@ -99,7 +99,7 @@ python __anonymous () {
typelower = type.lower()
preinst_append = preinst.replace('KERNEL_IMAGETYPE', type)
postinst_prepend = postinst.replace('KERNEL_IMAGETYPE', type)
- d.setVar('pkg_preinst_kernel-image-' + typelower + '_append', preinst_append)
- d.setVar('pkg_postinst_kernel-image-' + typelower + '_prepend', postinst_prepend)
+ d.setVar('pkg_preinst:kernel-image-' + typelower + ':append', preinst_append)
+ d.setVar('pkg_postinst:kernel-image-' + typelower + ':prepend', postinst_prepend)
}
diff --git a/meta/classes/kernel-module-split.bbclass b/meta/classes/kernel-module-split.bbclass
index e8d3eb5105..a29c294810 100644
--- a/meta/classes/kernel-module-split.bbclass
+++ b/meta/classes/kernel-module-split.bbclass
@@ -1,4 +1,4 @@
-pkg_postinst_modules () {
+pkg_postinst:modules () {
if [ -z "$D" ]; then
depmod -a ${KERNEL_VERSION}
else
@@ -8,7 +8,7 @@ else
fi
}
-pkg_postrm_modules () {
+pkg_postrm:modules () {
if [ -z "$D" ]; then
depmod -a ${KERNEL_VERSION}
else
@@ -24,11 +24,12 @@ fi
PACKAGE_WRITE_DEPS += "kmod-native depmodwrapper-cross"
-do_install_append() {
+do_install:append() {
install -d ${D}${sysconfdir}/modules-load.d/ ${D}${sysconfdir}/modprobe.d/
}
-PACKAGESPLITFUNCS_prepend = "split_kernel_module_packages "
+KERNEL_SPLIT_MODULES ?= "1"
+PACKAGESPLITFUNCS:prepend = "split_kernel_module_packages "
KERNEL_MODULES_META_PACKAGE ?= "${@ d.getVar("KERNEL_PACKAGE_NAME") or "kernel" }-modules"
@@ -44,9 +45,26 @@ python split_kernel_module_packages () {
def extract_modinfo(file):
import tempfile, subprocess
tempfile.tempdir = d.getVar("WORKDIR")
+ compressed = re.match( r'.*\.(gz|xz|zst)$', file)
tf = tempfile.mkstemp()
tmpfile = tf[1]
- cmd = "%sobjcopy -j .modinfo -O binary %s %s" % (d.getVar("HOST_PREFIX") or "", file, tmpfile)
+ if compressed:
+ tmpkofile = tmpfile + ".ko"
+ if compressed.group(1) == 'gz':
+ cmd = "gunzip -dc %s > %s" % (file, tmpkofile)
+ subprocess.check_call(cmd, shell=True)
+ elif compressed.group(1) == 'xz':
+ cmd = "xz -dc %s > %s" % (file, tmpkofile)
+ subprocess.check_call(cmd, shell=True)
+ elif compressed.group(1) == 'zst':
+ cmd = "zstd -dc %s > %s" % (file, tmpkofile)
+ subprocess.check_call(cmd, shell=True)
+ else:
+ msg = "Cannot decompress '%s'" % file
+ raise msg
+ cmd = "%sobjcopy -j .modinfo -O binary %s %s" % (d.getVar("HOST_PREFIX") or "", tmpkofile, tmpfile)
+ else:
+ cmd = "%sobjcopy -j .modinfo -O binary %s %s" % (d.getVar("HOST_PREFIX") or "", file, tmpfile)
subprocess.check_call(cmd, shell=True)
# errors='replace': Some old kernel versions contain invalid utf-8 characters in mod descriptions (like 0xf6, 'ö')
f = open(tmpfile, errors='replace')
@@ -54,6 +72,8 @@ python split_kernel_module_packages () {
f.close()
os.close(tf[0])
os.unlink(tmpfile)
+ if compressed:
+ os.unlink(tmpkofile)
vals = {}
for i in l:
m = modinfoexp.match(i)
@@ -84,11 +104,11 @@ python split_kernel_module_packages () {
else:
f.write('%s\n' % basename)
f.close()
- postinst = d.getVar('pkg_postinst_%s' % pkg)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
if not postinst:
- bb.fatal("pkg_postinst_%s not defined" % pkg)
+ bb.fatal("pkg_postinst:%s not defined" % pkg)
postinst += d.getVar('autoload_postinst_fragment') % (autoload or basename)
- d.setVar('pkg_postinst_%s' % pkg, postinst)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
# Write out any modconf fragment
modconflist = (d.getVar("KERNEL_MODULE_PROBECONF") or "").split()
@@ -101,15 +121,19 @@ python split_kernel_module_packages () {
elif modconf:
bb.error("Please ensure module %s is listed in KERNEL_MODULE_PROBECONF since module_conf_%s is set" % (basename, basename))
- files = d.getVar('FILES_%s' % pkg)
+ files = d.getVar('FILES:%s' % pkg)
files = "%s /etc/modules-load.d/%s.conf /etc/modprobe.d/%s.conf" % (files, basename, basename)
- d.setVar('FILES_%s' % pkg, files)
+ d.setVar('FILES:%s' % pkg, files)
+
+ conffiles = d.getVar('CONFFILES:%s' % pkg)
+ conffiles = "%s /etc/modules-load.d/%s.conf /etc/modprobe.d/%s.conf" % (conffiles, basename, basename)
+ d.setVar('CONFFILES:%s' % pkg, conffiles)
if "description" in vals:
- old_desc = d.getVar('DESCRIPTION_' + pkg) or ""
- d.setVar('DESCRIPTION_' + pkg, old_desc + "; " + vals["description"])
+ old_desc = d.getVar('DESCRIPTION:' + pkg) or ""
+ d.setVar('DESCRIPTION:' + pkg, old_desc + "; " + vals["description"])
- rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg) or "")
+ rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS:' + pkg) or "")
modinfo_deps = []
if "depends" in vals and vals["depends"] != "":
for dep in vals["depends"].split(","):
@@ -119,33 +143,41 @@ python split_kernel_module_packages () {
for dep in modinfo_deps:
if not dep in rdepends:
rdepends[dep] = []
- d.setVar('RDEPENDS_' + pkg, bb.utils.join_deps(rdepends, commasep=False))
+ d.setVar('RDEPENDS:' + pkg, bb.utils.join_deps(rdepends, commasep=False))
# Avoid automatic -dev recommendations for modules ending with -dev.
- d.setVarFlag('RRECOMMENDS_' + pkg, 'nodeprrecs', 1)
+ d.setVarFlag('RRECOMMENDS:' + pkg, 'nodeprrecs', 1)
# Provide virtual package without postfix
providevirt = d.getVar('KERNEL_MODULE_PROVIDE_VIRTUAL')
if providevirt == "1":
postfix = format.split('%s')[1]
- d.setVar('RPROVIDES_' + pkg, pkg.replace(postfix, ''))
+ d.setVar('RPROVIDES:' + pkg, pkg.replace(postfix, ''))
kernel_package_name = d.getVar("KERNEL_PACKAGE_NAME") or "kernel"
kernel_version = d.getVar("KERNEL_VERSION")
- module_regex = r'^(.*)\.k?o$'
+ metapkg = d.getVar('KERNEL_MODULES_META_PACKAGE')
+ splitmods = d.getVar('KERNEL_SPLIT_MODULES')
+ postinst = d.getVar('pkg_postinst:modules')
+ postrm = d.getVar('pkg_postrm:modules')
+
+ if splitmods != '1':
+ etcdir = d.getVar('sysconfdir')
+ d.appendVar('FILES:' + metapkg, '%s/modules-load.d/ %s/modprobe.d/ %s/modules/' % (etcdir, etcdir, d.getVar("nonarch_base_libdir")))
+ d.appendVar('pkg_postinst:%s' % metapkg, postinst)
+ d.prependVar('pkg_postrm:%s' % metapkg, postrm);
+ return
+
+ module_regex = r'^(.*)\.k?o(?:\.(gz|xz|zst))?$'
module_pattern_prefix = d.getVar('KERNEL_MODULE_PACKAGE_PREFIX')
module_pattern_suffix = d.getVar('KERNEL_MODULE_PACKAGE_SUFFIX')
module_pattern = module_pattern_prefix + kernel_package_name + '-module-%s' + module_pattern_suffix
- postinst = d.getVar('pkg_postinst_modules')
- postrm = d.getVar('pkg_postrm_modules')
-
modules = do_split_packages(d, root='${nonarch_base_libdir}/modules', file_regex=module_regex, output_pattern=module_pattern, description='%s kernel module', postinst=postinst, postrm=postrm, recursive=True, hook=frob_metadata, extra_depends='%s-%s' % (kernel_package_name, kernel_version))
if modules:
- metapkg = d.getVar('KERNEL_MODULES_META_PACKAGE')
- d.appendVar('RDEPENDS_' + metapkg, ' '+' '.join(modules))
+ d.appendVar('RDEPENDS:' + metapkg, ' '+' '.join(modules))
# If modules-load.d and modprobe.d are empty at this point, remove them to
# avoid warnings. removedirs only raises an OSError if an empty
diff --git a/meta/classes/kernel-uboot.bbclass b/meta/classes/kernel-uboot.bbclass
index 2364053f31..2daa068298 100644
--- a/meta/classes/kernel-uboot.bbclass
+++ b/meta/classes/kernel-uboot.bbclass
@@ -1,12 +1,12 @@
+# fitImage kernel compression algorithm
+FIT_KERNEL_COMP_ALG ?= "gzip"
+FIT_KERNEL_COMP_ALG_EXTENSION ?= ".gz"
+
uboot_prep_kimage() {
if [ -e arch/${ARCH}/boot/compressed/vmlinux ]; then
vmlinux_path="arch/${ARCH}/boot/compressed/vmlinux"
linux_suffix=""
linux_comp="none"
- elif [ -e arch/${ARCH}/boot/Image ] ; then
- vmlinux_path="vmlinux"
- linux_suffix=""
- linux_comp="none"
elif [ -e arch/${ARCH}/boot/vmlinuz.bin ]; then
rm -f linux.bin
cp -l arch/${ARCH}/boot/vmlinuz.bin linux.bin
@@ -15,14 +15,18 @@ uboot_prep_kimage() {
linux_comp="none"
else
vmlinux_path="vmlinux"
- linux_suffix=".gz"
- linux_comp="gzip"
+ linux_suffix="${FIT_KERNEL_COMP_ALG_EXTENSION}"
+ linux_comp="${FIT_KERNEL_COMP_ALG}"
fi
[ -n "${vmlinux_path}" ] && ${OBJCOPY} -O binary -R .note -R .comment -S "${vmlinux_path}" linux.bin
if [ "${linux_comp}" != "none" ] ; then
- gzip -9 linux.bin
+ if [ "${linux_comp}" = "gzip" ] ; then
+ gzip -9 linux.bin
+ elif [ "${linux_comp}" = "lzo" ] ; then
+ lzop -9 linux.bin
+ fi
mv -f "linux.bin${linux_suffix}" linux.bin
fi
diff --git a/meta/classes/kernel-yocto.bbclass b/meta/classes/kernel-yocto.bbclass
index 496c8a7f68..1d5a8cdf29 100644
--- a/meta/classes/kernel-yocto.bbclass
+++ b/meta/classes/kernel-yocto.bbclass
@@ -1,16 +1,45 @@
# remove tasks that modify the source tree in case externalsrc is inherited
-SRCTREECOVEREDTASKS += "do_kernel_configme do_validate_branches do_kernel_configcheck do_kernel_checkout do_fetch do_unpack do_patch"
+SRCTREECOVEREDTASKS += "do_validate_branches do_kernel_configcheck do_kernel_checkout do_fetch do_unpack do_patch"
PATCH_GIT_USER_EMAIL ?= "kernel-yocto@oe"
PATCH_GIT_USER_NAME ?= "OpenEmbedded"
+# The distro or local.conf should set this, but if nobody cares...
+LINUX_KERNEL_TYPE ??= "standard"
+
+# KMETA ?= ""
+KBRANCH ?= "master"
+KMACHINE ?= "${MACHINE}"
+SRCREV_FORMAT ?= "meta_machine"
+
+# LEVELS:
+# 0: no reporting
+# 1: report options that are specified, but not in the final config
+# 2: report options that are not hardware related, but set by a BSP
+KCONF_AUDIT_LEVEL ?= "1"
+KCONF_BSP_AUDIT_LEVEL ?= "0"
+KMETA_AUDIT ?= "yes"
+KMETA_AUDIT_WERROR ?= ""
+
# returns local (absolute) path names for all valid patches in the
# src_uri
-def find_patches(d):
+def find_patches(d,subdir):
patches = src_patches(d)
patch_list=[]
for p in patches:
- _, _, local, _, _, _ = bb.fetch.decodeurl(p)
- patch_list.append(local)
+ _, _, local, _, _, parm = bb.fetch.decodeurl(p)
+ # if patchdir has been passed, we won't be able to apply it so skip
+ # the patch for now, and special processing happens later
+ patchdir = ''
+ if "patchdir" in parm:
+ patchdir = parm["patchdir"]
+ if subdir:
+ if subdir == patchdir:
+ patch_list.append(local)
+ else:
+ # skip the patch if a patchdir was supplied, it won't be handled
+ # properly
+ if not patchdir:
+ patch_list.append(local)
return patch_list
@@ -22,7 +51,7 @@ def find_sccs(d):
base, ext = os.path.splitext(os.path.basename(s))
if ext and ext in [".scc", ".cfg"]:
sources_list.append(s)
- elif base and base in 'defconfig':
+ elif base and 'defconfig' in base:
sources_list.append(s)
return sources_list
@@ -60,11 +89,35 @@ def get_machine_branch(d, default):
return default
+# returns a list of all directories that are on FILESEXTRAPATHS (and
+# hence available to the build) that contain .scc or .cfg files
+def get_dirs_with_fragments(d):
+ extrapaths = []
+ extrafiles = []
+ extrapathsvalue = (d.getVar("FILESEXTRAPATHS") or "")
+ # Remove default flag which was used for checking
+ extrapathsvalue = extrapathsvalue.replace("__default:", "")
+ extrapaths = extrapathsvalue.split(":")
+ for path in extrapaths:
+ if path + ":True" not in extrafiles:
+ extrafiles.append(path + ":" + str(os.path.exists(path)))
+
+ return " ".join(extrafiles)
+
do_kernel_metadata() {
set +e
+
+ if [ -n "$1" ]; then
+ mode="$1"
+ else
+ mode="patch"
+ fi
+
cd ${S}
export KMETA=${KMETA}
+ bbnote "do_kernel_metadata: for summary/debug, set KCONF_AUDIT_LEVEL > 0"
+
# if kernel tools are available in-tree, they are preferred
# and are placed on the path before any external tools. Unless
# the external tools flag is set, in that case we do nothing.
@@ -74,13 +127,6 @@ do_kernel_metadata() {
fi
fi
- machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
- machine_srcrev="${SRCREV_machine}"
- if [ -z "${machine_srcrev}" ]; then
- # fallback to SRCREV if a non machine_meta tree is being built
- machine_srcrev="${SRCREV}"
- fi
-
# In a similar manner to the kernel itself:
#
# defconfig: $(obj)/conf
@@ -102,35 +148,59 @@ do_kernel_metadata() {
if [ -n "${KBUILD_DEFCONFIG}" ]; then
if [ -f "${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG}" ]; then
if [ -f "${WORKDIR}/defconfig" ]; then
- # If the two defconfig's are different, warn that we didn't overwrite the
- # one already placed in WORKDIR by the fetcher.
+ # If the two defconfig's are different, warn that we overwrote the
+ # one already placed in WORKDIR
cmp "${WORKDIR}/defconfig" "${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG}"
if [ $? -ne 0 ]; then
- bbwarn "defconfig detected in WORKDIR. ${KBUILD_DEFCONFIG} skipped"
- else
- cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${WORKDIR}/defconfig
+ bbdebug 1 "detected SRC_URI or unpatched defconfig in WORKDIR. ${KBUILD_DEFCONFIG} copied over it"
fi
+ cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${WORKDIR}/defconfig
else
cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${WORKDIR}/defconfig
fi
- sccs="${WORKDIR}/defconfig"
+ in_tree_defconfig="${WORKDIR}/defconfig"
else
- bbfatal "A KBUILD_DEFCONFIG '${KBUILD_DEFCONFIG}' was specified, but not present in the source tree"
+ bbfatal "A KBUILD_DEFCONFIG '${KBUILD_DEFCONFIG}' was specified, but not present in the source tree (${S}/arch/${ARCH}/configs/)"
fi
fi
+ if [ "$mode" = "patch" ]; then
+ # was anyone trying to patch the kernel meta data ?, we need to do
+ # this here, since the scc commands migrate the .cfg fragments to the
+ # kernel source tree, where they'll be used later.
+ check_git_config
+ patches="${@" ".join(find_patches(d,'kernel-meta'))}"
+ for p in $patches; do
+ (
+ cd ${WORKDIR}/kernel-meta
+ git am -s $p
+ )
+ done
+ fi
+
sccs_from_src_uri="${@" ".join(find_sccs(d))}"
- patches="${@" ".join(find_patches(d))}"
+ patches="${@" ".join(find_patches(d,''))}"
feat_dirs="${@" ".join(find_kernel_feature_dirs(d))}"
- # a quick check to make sure we don't have duplicate defconfigs
- # If there's a defconfig in the SRC_URI, did we also have one from
- # the KBUILD_DEFCONFIG processing above ?
- if [ -n "$sccs" ]; then
- # we did have a defconfig from above. remove any that might be in the src_uri
- sccs_from_src_uri=$(echo $sccs_from_src_uri | awk '{ if ($0!="defconfig") { print $0 } }' RS=' ')
+ # a quick check to make sure we don't have duplicate defconfigs If
+ # there's a defconfig in the SRC_URI, did we also have one from the
+ # KBUILD_DEFCONFIG processing above ?
+ src_uri_defconfig=$(echo $sccs_from_src_uri | awk '(match($0, "defconfig") != 0) { print $0 }' RS=' ')
+ # drop and defconfig's from the src_uri variable, we captured it just above here if it existed
+ sccs_from_src_uri=$(echo $sccs_from_src_uri | awk '(match($0, "defconfig") == 0) { print $0 }' RS=' ')
+
+ if [ -n "$in_tree_defconfig" ]; then
+ sccs_defconfig=$in_tree_defconfig
+ if [ -n "$src_uri_defconfig" ]; then
+ bbwarn "[NOTE]: defconfig was supplied both via KBUILD_DEFCONFIG and SRC_URI. Dropping SRC_URI defconfig"
+ fi
+ else
+ # if we didn't have an in-tree one, make our defconfig the one
+ # from the src_uri. Note: there may not have been one from the
+ # src_uri, so this can be an empty variable.
+ sccs_defconfig=$src_uri_defconfig
fi
- sccs="$sccs $sccs_from_src_uri"
+ sccs="$sccs_from_src_uri"
# check for feature directories/repos/branches that were part of the
# SRC_URI. If they were supplied, we convert them into include directives
@@ -138,10 +208,10 @@ do_kernel_metadata() {
for f in ${feat_dirs}; do
if [ -d "${WORKDIR}/$f/meta" ]; then
includes="$includes -I${WORKDIR}/$f/kernel-meta"
- elif [ -d "${WORKDIR}/$f" ]; then
- includes="$includes -I${WORKDIR}/$f"
elif [ -d "${WORKDIR}/../oe-local-files/$f" ]; then
includes="$includes -I${WORKDIR}/../oe-local-files/$f"
+ elif [ -d "${WORKDIR}/$f" ]; then
+ includes="$includes -I${WORKDIR}/$f"
fi
done
for s in ${sccs} ${patches}; do
@@ -157,31 +227,89 @@ do_kernel_metadata() {
# expand kernel features into their full path equivalents
bsp_definition=$(spp ${includes} --find -DKMACHINE=${KMACHINE} -DKTYPE=${LINUX_KERNEL_TYPE})
if [ -z "$bsp_definition" ]; then
- echo "$sccs" | grep -q defconfig
- if [ $? -ne 0 ]; then
+ if [ -z "$sccs_defconfig" ]; then
bbfatal_log "Could not locate BSP definition for ${KMACHINE}/${LINUX_KERNEL_TYPE} and no defconfig was provided"
fi
+ else
+ # if the bsp definition has "define KMETA_EXTERNAL_BSP t",
+ # then we need to set a flag that will instruct the next
+ # steps to use the BSP as both configuration and patches.
+ grep -q KMETA_EXTERNAL_BSP $bsp_definition
+ if [ $? -eq 0 ]; then
+ KMETA_EXTERNAL_BSPS="t"
+ fi
fi
meta_dir=$(kgit --meta)
- # run1: pull all the configuration fragments, no matter where they come from
- elements="`echo -n ${bsp_definition} ${sccs} ${patches} ${KERNEL_FEATURES}`"
- if [ -n "${elements}" ]; then
- echo "${bsp_definition}" > ${S}/${meta_dir}/bsp_definition
- scc --force -o ${S}/${meta_dir}:cfg,merge,meta ${includes} ${bsp_definition} ${sccs} ${patches} ${KERNEL_FEATURES}
- if [ $? -ne 0 ]; then
- bbfatal_log "Could not generate configuration queue for ${KMACHINE}."
+ KERNEL_FEATURES_FINAL=""
+ if [ -n "${KERNEL_FEATURES}" ]; then
+ for feature in ${KERNEL_FEATURES}; do
+ feature_found=f
+ for d in $includes; do
+ path_to_check=$(echo $d | sed 's/^-I//')
+ if [ "$feature_found" = "f" ] && [ -e "$path_to_check/$feature" ]; then
+ feature_found=t
+ fi
+ done
+ if [ "$feature_found" = "f" ]; then
+ if [ -n "${KERNEL_DANGLING_FEATURES_WARN_ONLY}" ]; then
+ bbwarn "Feature '$feature' not found, but KERNEL_DANGLING_FEATURES_WARN_ONLY is set"
+ bbwarn "This may cause runtime issues, dropping feature and allowing configuration to continue"
+ else
+ bberror "Feature '$feature' not found, this will cause configuration failures."
+ bberror "Check the SRC_URI for meta-data repositories or directories that may be missing"
+ bbfatal_log "Set KERNEL_DANGLING_FEATURES_WARN_ONLY to ignore this issue"
+ fi
+ else
+ KERNEL_FEATURES_FINAL="$KERNEL_FEATURES_FINAL $feature"
+ fi
+ done
+ fi
+
+ if [ "$mode" = "config" ]; then
+ # run1: pull all the configuration fragments, no matter where they come from
+ elements="`echo -n ${bsp_definition} $sccs_defconfig ${sccs} ${patches} $KERNEL_FEATURES_FINAL`"
+ if [ -n "${elements}" ]; then
+ echo "${bsp_definition}" > ${S}/${meta_dir}/bsp_definition
+ scc --force -o ${S}/${meta_dir}:cfg,merge,meta ${includes} $sccs_defconfig $bsp_definition $sccs $patches $KERNEL_FEATURES_FINAL
+ if [ $? -ne 0 ]; then
+ bbfatal_log "Could not generate configuration queue for ${KMACHINE}."
+ fi
fi
fi
- # run2: only generate patches for elements that have been passed on the SRC_URI
- elements="`echo -n ${sccs} ${patches} ${KERNEL_FEATURES}`"
- if [ -n "${elements}" ]; then
- scc --force -o ${S}/${meta_dir}:patch --cmds patch ${includes} ${sccs} ${patches} ${KERNEL_FEATURES}
- if [ $? -ne 0 ]; then
- bbfatal_log "Could not generate configuration queue for ${KMACHINE}."
+ # if KMETA_EXTERNAL_BSPS has been set, or it has been detected from
+ # the bsp definition, then we inject the bsp_definition into the
+ # patch phase below. we'll piggy back on the sccs variable.
+ if [ -n "${KMETA_EXTERNAL_BSPS}" ]; then
+ sccs="${bsp_definition} ${sccs}"
+ fi
+
+ if [ "$mode" = "patch" ]; then
+ # run2: only generate patches for elements that have been passed on the SRC_URI
+ elements="`echo -n ${sccs} ${patches} $KERNEL_FEATURES_FINAL`"
+ if [ -n "${elements}" ]; then
+ scc --force -o ${S}/${meta_dir}:patch --cmds patch ${includes} ${sccs} ${patches} $KERNEL_FEATURES_FINAL
+ if [ $? -ne 0 ]; then
+ bbfatal_log "Could not generate configuration queue for ${KMACHINE}."
+ fi
fi
fi
+
+ if [ ${KCONF_AUDIT_LEVEL} -gt 0 ]; then
+ bbnote "kernel meta data summary for ${KMACHINE} (${LINUX_KERNEL_TYPE}):"
+ bbnote "======================================================================"
+ if [ -n "${KMETA_EXTERNAL_BSPS}" ]; then
+ bbnote "Non kernel-cache (external) bsp"
+ fi
+ bbnote "BSP entry point / definition: $bsp_definition"
+ if [ -n "$in_tree_defconfig" ]; then
+ bbnote "KBUILD_DEFCONFIG: ${KBUILD_DEFCONFIG}"
+ fi
+ bbnote "Fragments from SRC_URI: $sccs_from_src_uri"
+ bbnote "KERNEL_FEATURES: $KERNEL_FEATURES_FINAL"
+ bbnote "Final scc/cfg list: $sccs_defconfig $bsp_definition $sccs $KERNEL_FEATURES_FINAL"
+ fi
}
do_patch() {
@@ -233,6 +361,21 @@ do_kernel_checkout() {
fi
fi
cd ${S}
+
+ # convert any remote branches to local tracking ones
+ for i in `git branch -a --no-color | grep remotes | grep -v HEAD`; do
+ b=`echo $i | cut -d' ' -f2 | sed 's%remotes/origin/%%'`;
+ git show-ref --quiet --verify -- "refs/heads/$b"
+ if [ $? -ne 0 ]; then
+ git branch $b $i > /dev/null
+ fi
+ done
+
+ # Create a working tree copy of the kernel by checking out a branch
+ machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
+
+ # checkout and clobber any unimportant files
+ git checkout -f ${machine_branch}
else
# case: we have no git repository at all.
# To support low bandwidth options for building the kernel, we'll just
@@ -254,35 +397,22 @@ do_kernel_checkout() {
git commit -q -m "baseline commit: creating repo for ${PN}-${PV}"
git clean -d -f
fi
-
- # convert any remote branches to local tracking ones
- for i in `git branch -a --no-color | grep remotes | grep -v HEAD`; do
- b=`echo $i | cut -d' ' -f2 | sed 's%remotes/origin/%%'`;
- git show-ref --quiet --verify -- "refs/heads/$b"
- if [ $? -ne 0 ]; then
- git branch $b $i > /dev/null
- fi
- done
-
- # Create a working tree copy of the kernel by checking out a branch
- machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
-
- # checkout and clobber any unimportant files
- git checkout -f ${machine_branch}
}
-do_kernel_checkout[dirs] = "${S}"
+do_kernel_checkout[dirs] = "${S} ${WORKDIR}"
-addtask kernel_checkout before do_kernel_metadata after do_unpack
+addtask kernel_checkout before do_kernel_metadata after do_symlink_kernsrc
addtask kernel_metadata after do_validate_branches do_unpack before do_patch
do_kernel_metadata[depends] = "kern-tools-native:do_populate_sysroot"
+do_kernel_metadata[file-checksums] = " ${@get_dirs_with_fragments(d)}"
do_validate_branches[depends] = "kern-tools-native:do_populate_sysroot"
do_kernel_configme[depends] += "virtual/${TARGET_PREFIX}binutils:do_populate_sysroot"
do_kernel_configme[depends] += "virtual/${TARGET_PREFIX}gcc:do_populate_sysroot"
do_kernel_configme[depends] += "bc-native:do_populate_sysroot bison-native:do_populate_sysroot"
+do_kernel_configme[depends] += "kern-tools-native:do_populate_sysroot"
do_kernel_configme[dirs] += "${S} ${B}"
do_kernel_configme() {
- set +e
+ do_kernel_metadata config
# translate the kconfig_mode into something that merge_config.sh
# understands
@@ -293,11 +423,11 @@ do_kernel_configme() {
*alldefconfig)
config_flags=""
;;
- *)
- if [ -f ${WORKDIR}/defconfig ]; then
- config_flags="-n"
- fi
- ;;
+ *)
+ if [ -f ${WORKDIR}/defconfig ]; then
+ config_flags="-n"
+ fi
+ ;;
esac
cd ${S}
@@ -309,16 +439,84 @@ do_kernel_configme() {
bbfatal_log "Could not find configuration queue (${meta_dir}/config.queue)"
fi
- CFLAGS="${CFLAGS} ${TOOLCHAIN_OPTIONS}" HOSTCC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}" CC="${KERNEL_CC}" ARCH=${ARCH} merge_config.sh -O ${B} ${config_flags} ${configs} > ${meta_dir}/cfg/merge_config_build.log 2>&1
- if [ $? -ne 0 ]; then
- bbfatal_log "Could not configure ${KMACHINE}-${LINUX_KERNEL_TYPE}"
+ CFLAGS="${CFLAGS} ${TOOLCHAIN_OPTIONS}" HOSTCC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}" CC="${KERNEL_CC}" LD="${KERNEL_LD}" ARCH=${ARCH} merge_config.sh -O ${B} ${config_flags} ${configs} > ${meta_dir}/cfg/merge_config_build.log 2>&1
+ if [ $? -ne 0 -o ! -f ${B}/.config ]; then
+ bberror "Could not generate a .config for ${KMACHINE}-${LINUX_KERNEL_TYPE}"
+ if [ ${KCONF_AUDIT_LEVEL} -gt 1 ]; then
+ bbfatal_log "`cat ${meta_dir}/cfg/merge_config_build.log`"
+ else
+ bbfatal_log "Details can be found at: ${S}/${meta_dir}/cfg/merge_config_build.log"
+ fi
fi
- echo "# Global settings from linux recipe" >> ${B}/.config
- echo "CONFIG_LOCALVERSION="\"${LINUX_VERSION_EXTENSION}\" >> ${B}/.config
+ if [ ! -z "${LINUX_VERSION_EXTENSION}" ]; then
+ echo "# Global settings from linux recipe" >> ${B}/.config
+ echo "CONFIG_LOCALVERSION="\"${LINUX_VERSION_EXTENSION}\" >> ${B}/.config
+ fi
}
addtask kernel_configme before do_configure after do_patch
+addtask config_analysis
+
+do_config_analysis[depends] = "virtual/kernel:do_configure"
+do_config_analysis[depends] += "kern-tools-native:do_populate_sysroot"
+
+CONFIG_AUDIT_FILE ?= "${WORKDIR}/config-audit.txt"
+CONFIG_ANALYSIS_FILE ?= "${WORKDIR}/config-analysis.txt"
+
+python do_config_analysis() {
+ import re, string, sys, subprocess
+
+ s = d.getVar('S')
+
+ env = os.environ.copy()
+ env['PATH'] = "%s:%s%s" % (d.getVar('PATH'), s, "/scripts/util/")
+ env['LD'] = d.getVar('KERNEL_LD')
+ env['CC'] = d.getVar('KERNEL_CC')
+ env['ARCH'] = d.getVar('ARCH')
+ env['srctree'] = s
+
+ # read specific symbols from the kernel recipe or from local.conf
+ # i.e.: CONFIG_ANALYSIS:pn-linux-yocto-dev = 'NF_CONNTRACK LOCALVERSION'
+ config = d.getVar( 'CONFIG_ANALYSIS' )
+ if not config:
+ config = [ "" ]
+ else:
+ config = config.split()
+
+ for c in config:
+ for action in ["analysis","audit"]:
+ if action == "analysis":
+ try:
+ analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--blame', c], cwd=s, env=env ).decode('utf-8')
+ except subprocess.CalledProcessError as e:
+ bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
+
+ outfile = d.getVar( 'CONFIG_ANALYSIS_FILE' )
+
+ if action == "audit":
+ try:
+ analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--summary', '--extended', '--sanity', c], cwd=s, env=env ).decode('utf-8')
+ except subprocess.CalledProcessError as e:
+ bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
+
+ outfile = d.getVar( 'CONFIG_AUDIT_FILE' )
+
+ if c:
+ outdir = os.path.dirname( outfile )
+ outname = os.path.basename( outfile )
+ outfile = outdir + '/'+ c + '-' + outname
+
+ if config and os.path.isfile(outfile):
+ os.remove(outfile)
+
+ with open(outfile, 'w+') as f:
+ f.write( analysis )
+
+ bb.warn( "Configuration {} executed, see: {} for details".format(action,outfile ))
+ if c:
+ bb.warn( analysis )
+}
python do_kernel_configcheck() {
import re, string, sys, subprocess
@@ -328,58 +526,99 @@ python do_kernel_configcheck() {
# meta-series for processing
kmeta = d.getVar("KMETA") or "meta"
if not os.path.exists(kmeta):
- kmeta = "." + kmeta
+ kmeta = subprocess.check_output(['kgit', '--meta'], cwd=d.getVar('S')).decode('utf-8').rstrip()
s = d.getVar('S')
env = os.environ.copy()
env['PATH'] = "%s:%s%s" % (d.getVar('PATH'), s, "/scripts/util/")
+ env['LD'] = d.getVar('KERNEL_LD')
+ env['CC'] = d.getVar('KERNEL_CC')
+ env['ARCH'] = d.getVar('ARCH')
+ env['srctree'] = s
try:
configs = subprocess.check_output(['scc', '--configs', '-o', s + '/.kernel-meta'], env=env).decode('utf-8')
except subprocess.CalledProcessError as e:
bb.fatal( "Cannot gather config fragments for audit: %s" % e.output.decode("utf-8") )
- try:
- subprocess.check_call(['kconf_check', '--report', '-o',
- '%s/%s/cfg' % (s, kmeta), d.getVar('B') + '/.config', s, configs], cwd=s, env=env)
- except subprocess.CalledProcessError:
- # The configuration gathering can return different exit codes, but
- # we interpret them based on the KCONF_AUDIT_LEVEL variable, so we catch
- # everything here, and let the run continue.
- pass
-
config_check_visibility = int(d.getVar("KCONF_AUDIT_LEVEL") or 0)
bsp_check_visibility = int(d.getVar("KCONF_BSP_AUDIT_LEVEL") or 0)
+ kmeta_audit_werror = d.getVar("KMETA_AUDIT_WERROR") or ""
+ warnings_detected = False
+
+ # if config check visibility is "1", that's the lowest level of audit. So
+ # we add the --classify option to the run, since classification will
+ # streamline the output to only report options that could be boot issues,
+ # or are otherwise required for proper operation.
+ extra_params = ""
+ if config_check_visibility == 1:
+ extra_params = "--classify"
+
+ # category #1: mismatches
+ try:
+ analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--mismatches', extra_params], cwd=s, env=env ).decode('utf-8')
+ except subprocess.CalledProcessError as e:
+ bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
+
+ if analysis:
+ outfile = "{}/{}/cfg/mismatch.txt".format( s, kmeta )
+ if os.path.isfile(outfile):
+ os.remove(outfile)
+ with open(outfile, 'w+') as f:
+ f.write( analysis )
- # if config check visibility is non-zero, report dropped configuration values
- mismatch_file = d.expand("${S}/%s/cfg/mismatch.txt" % kmeta)
- if os.path.exists(mismatch_file):
- if config_check_visibility:
- with open (mismatch_file, "r") as myfile:
+ if config_check_visibility and os.stat(outfile).st_size > 0:
+ with open (outfile, "r") as myfile:
results = myfile.read()
bb.warn( "[kernel config]: specified values did not make it into the kernel's final configuration:\n\n%s" % results)
+ warnings_detected = True
+
+ # category #2: invalid fragment elements
+ extra_params = ""
+ if bsp_check_visibility > 1:
+ extra_params = "--strict"
+ try:
+ analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--invalid', extra_params], cwd=s, env=env ).decode('utf-8')
+ except subprocess.CalledProcessError as e:
+ bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
- if bsp_check_visibility:
- invalid_file = d.expand("${S}/%s/cfg/invalid.cfg" % kmeta)
- if os.path.exists(invalid_file) and os.stat(invalid_file).st_size > 0:
- with open (invalid_file, "r") as myfile:
+ if analysis:
+ outfile = "{}/{}/cfg/invalid.txt".format(s,kmeta)
+ if os.path.isfile(outfile):
+ os.remove(outfile)
+ with open(outfile, 'w+') as f:
+ f.write( analysis )
+
+ if bsp_check_visibility and os.stat(outfile).st_size > 0:
+ with open (outfile, "r") as myfile:
results = myfile.read()
- bb.warn( "[kernel config]: This BSP sets config options that are not offered anywhere within this kernel:\n\n%s" % results)
- errors_file = d.expand("${S}/%s/cfg/fragment_errors.txt" % kmeta)
- if os.path.exists(errors_file) and os.stat(errors_file).st_size > 0:
- with open (errors_file, "r") as myfile:
- results = myfile.read()
- bb.warn( "[kernel config]: This BSP contains fragments with errors:\n\n%s" % results)
-
- # if the audit level is greater than two, we report if a fragment has overriden
- # a value from a base fragment. This is really only used for new kernel introduction
- if bsp_check_visibility > 2:
- redefinition_file = d.expand("${S}/%s/cfg/redefinition.txt" % kmeta)
- if os.path.exists(redefinition_file) and os.stat(redefinition_file).st_size > 0:
- with open (redefinition_file, "r") as myfile:
+ bb.warn( "[kernel config]: This BSP contains fragments with warnings:\n\n%s" % results)
+ warnings_detected = True
+
+ # category #3: redefined options (this is pretty verbose and is debug only)
+ try:
+ analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--sanity'], cwd=s, env=env ).decode('utf-8')
+ except subprocess.CalledProcessError as e:
+ bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8'))
+
+ if analysis:
+ outfile = "{}/{}/cfg/redefinition.txt".format(s,kmeta)
+ if os.path.isfile(outfile):
+ os.remove(outfile)
+ with open(outfile, 'w+') as f:
+ f.write( analysis )
+
+ # if the audit level is greater than two, we report if a fragment has overriden
+ # a value from a base fragment. This is really only used for new kernel introduction
+ if bsp_check_visibility > 2 and os.stat(outfile).st_size > 0:
+ with open (outfile, "r") as myfile:
results = myfile.read()
bb.warn( "[kernel config]: This BSP has configuration options defined in more than one config, with differing values:\n\n%s" % results)
+ warnings_detected = True
+
+ if warnings_detected and kmeta_audit_werror:
+ bb.fatal( "configuration warnings detected, werror is set, promoting to fatal" )
}
# Ensure that the branches (BSP and meta) are on the locations specified by
@@ -395,7 +634,31 @@ do_validate_branches() {
# if SRCREV is AUTOREV it shows up as AUTOINC there's nothing to
# check and we can exit early
if [ "${machine_srcrev}" = "AUTOINC" ]; then
+ linux_yocto_dev='${@oe.utils.conditional("PREFERRED_PROVIDER_virtual/kernel", "linux-yocto-dev", "1", "", d)}'
+ if [ -n "$linux_yocto_dev" ]; then
+ git checkout -q -f ${machine_branch}
+ ver=$(grep "^VERSION =" ${S}/Makefile | sed s/.*=\ *//)
+ patchlevel=$(grep "^PATCHLEVEL =" ${S}/Makefile | sed s/.*=\ *//)
+ sublevel=$(grep "^SUBLEVEL =" ${S}/Makefile | sed s/.*=\ *//)
+ kver="$ver.$patchlevel"
+ bbnote "dev kernel: performing version -> branch -> SRCREV validation"
+ bbnote "dev kernel: recipe version ${LINUX_VERSION}, src version: $kver"
+ echo "${LINUX_VERSION}" | grep -q $kver
+ if [ $? -ne 0 ]; then
+ version="$(echo ${LINUX_VERSION} | sed 's/\+.*$//g')"
+ versioned_branch="v$version/$machine_branch"
+
+ machine_branch=$versioned_branch
+ force_srcrev="$(git rev-parse $machine_branch 2> /dev/null)"
+ if [ $? -ne 0 ]; then
+ bbfatal "kernel version mismatch detected, and no valid branch $machine_branch detected"
+ fi
+
+ bbnote "dev kernel: adjusting branch to $machine_branch, srcrev to: $force_srcrev"
+ fi
+ else
bbnote "SRCREV validation is not required for AUTOREV"
+ fi
elif [ "${machine_srcrev}" = "" ]; then
if [ "${SRCREV}" != "AUTOINC" ] && [ "${SRCREV}" != "INVALID" ]; then
# SRCREV_machine_<MACHINE> was not set. This means that a custom recipe
@@ -437,4 +700,15 @@ python () {
# If diffconfig is available, ensure it runs after kernel_configme
if 'do_diffconfig' in d:
bb.build.addtask('do_diffconfig', None, 'do_kernel_configme', d)
+
+ externalsrc = d.getVar('EXTERNALSRC')
+ if externalsrc:
+ # If we deltask do_patch, do_kernel_configme is left without
+ # dependencies and runs too early
+ d.setVarFlag('do_kernel_configme', 'deps', (d.getVarFlag('do_kernel_configme', 'deps', False) or []) + ['do_unpack'])
}
+
+# extra tasks
+addtask kernel_version_sanity_check after do_kernel_metadata do_kernel_checkout before do_compile
+addtask validate_branches before do_patch after do_kernel_checkout
+addtask kernel_configcheck after do_configure before do_compile
diff --git a/meta/classes/kernel.bbclass b/meta/classes/kernel.bbclass
index c0889bd3ee..4f304eb9c7 100644
--- a/meta/classes/kernel.bbclass
+++ b/meta/classes/kernel.bbclass
@@ -1,13 +1,19 @@
inherit linux-kernel-base kernel-module-split
+COMPATIBLE_HOST = ".*-linux"
+
KERNEL_PACKAGE_NAME ??= "kernel"
KERNEL_DEPLOYSUBDIR ??= "${@ "" if (d.getVar("KERNEL_PACKAGE_NAME") == "kernel") else d.getVar("KERNEL_PACKAGE_NAME") }"
-PROVIDES += "${@ "virtual/kernel" if (d.getVar("KERNEL_PACKAGE_NAME") == "kernel") else "" }"
-DEPENDS += "virtual/${TARGET_PREFIX}binutils virtual/${TARGET_PREFIX}gcc kmod-native bc-native lzop-native bison-native"
+PROVIDES += "virtual/kernel"
+DEPENDS += "virtual/${TARGET_PREFIX}binutils virtual/${TARGET_PREFIX}gcc kmod-native bc-native bison-native"
+DEPENDS += "${@bb.utils.contains("INITRAMFS_FSTYPES", "cpio.lzo", "lzop-native", "", d)}"
+DEPENDS += "${@bb.utils.contains("INITRAMFS_FSTYPES", "cpio.lz4", "lz4-native", "", d)}"
+DEPENDS += "${@bb.utils.contains("INITRAMFS_FSTYPES", "cpio.zst", "zstd-native", "", d)}"
PACKAGE_WRITE_DEPS += "depmodwrapper-cross"
-do_deploy[depends] += "depmodwrapper-cross:do_populate_sysroot"
+do_deploy[depends] += "depmodwrapper-cross:do_populate_sysroot gzip-native:do_populate_sysroot"
+do_clean[depends] += "make-mod-scripts:do_clean"
CVE_PRODUCT ?= "linux_kernel"
@@ -24,6 +30,8 @@ INITRAMFS_IMAGE ?= ""
INITRAMFS_IMAGE_NAME ?= "${@['${INITRAMFS_IMAGE}-${MACHINE}', ''][d.getVar('INITRAMFS_IMAGE') == '']}"
INITRAMFS_TASK ?= ""
INITRAMFS_IMAGE_BUNDLE ?= ""
+INITRAMFS_DEPLOY_DIR_IMAGE ?= "${DEPLOY_DIR_IMAGE}"
+INITRAMFS_MULTICONFIG ?= ""
# KERNEL_VERSION is extracted from source code. It is evaluated as
# None for the first parsing, since the code has not been fetched.
@@ -41,7 +49,7 @@ python __anonymous () {
kpn = d.getVar("KERNEL_PACKAGE_NAME")
# XXX Remove this after bug 11905 is resolved
- # FILES_${KERNEL_PACKAGE_NAME}-dev doesn't expand correctly
+ # FILES:${KERNEL_PACKAGE_NAME}-dev doesn't expand correctly
if kpn == pn:
bb.warn("Some packages (E.g. *-dev) might be missing due to "
"bug 11905 (variable KERNEL_PACKAGE_NAME == PN)")
@@ -71,7 +79,7 @@ python __anonymous () {
# KERNEL_IMAGETYPES may contain a mixture of image types supported directly
# by the kernel build system and types which are created by post-processing
# the output of the kernel build system (e.g. compressing vmlinux ->
- # vmlinux.gz in kernel_do_compile()).
+ # vmlinux.gz in kernel_do_transform_kernel()).
# KERNEL_IMAGETYPE_FOR_MAKE should contain only image types supported
# directly by the kernel build system.
if not d.getVar('KERNEL_IMAGETYPE_FOR_MAKE'):
@@ -87,16 +95,52 @@ python __anonymous () {
imagedest = d.getVar('KERNEL_IMAGEDEST')
for type in types.split():
+ if bb.data.inherits_class('nopackages', d):
+ continue
typelower = type.lower()
d.appendVar('PACKAGES', ' %s-image-%s' % (kname, typelower))
- d.setVar('FILES_' + kname + '-image-' + typelower, '/' + imagedest + '/' + type + '-${KERNEL_VERSION_NAME}' + ' /' + imagedest + '/' + type)
- d.appendVar('RDEPENDS_%s-image' % kname, ' %s-image-%s' % (kname, typelower))
- d.setVar('PKG_%s-image-%s' % (kname,typelower), '%s-image-%s-${KERNEL_VERSION_PKG_NAME}' % (kname, typelower))
- d.setVar('ALLOW_EMPTY_%s-image-%s' % (kname, typelower), '1')
+ d.setVar('FILES:' + kname + '-image-' + typelower, '/' + imagedest + '/' + type + '-${KERNEL_VERSION_NAME}' + ' /' + imagedest + '/' + type)
+ d.appendVar('RDEPENDS:%s-image' % kname, ' %s-image-%s (= ${EXTENDPKGV})' % (kname, typelower))
+ splitmods = d.getVar("KERNEL_SPLIT_MODULES")
+ if splitmods != '1':
+ d.appendVar('RDEPENDS:%s-image' % kname, ' %s-modules (= ${EXTENDPKGV})' % kname)
+ d.appendVar('RDEPENDS:%s-image-%s' % (kname, typelower), ' %s-modules-${KERNEL_VERSION_PKG_NAME} (= ${EXTENDPKGV})' % kname)
+ d.setVar('PKG:%s-modules' % kname, '%s-modules-${KERNEL_VERSION_PKG_NAME}' % kname)
+ d.appendVar('RPROVIDES:%s-modules' % kname, '%s-modules-${KERNEL_VERSION_PKG_NAME}' % kname)
+
+ d.setVar('PKG:%s-image-%s' % (kname,typelower), '%s-image-%s-${KERNEL_VERSION_PKG_NAME}' % (kname, typelower))
+ d.setVar('ALLOW_EMPTY:%s-image-%s' % (kname, typelower), '1')
+ d.setVar('pkg_postinst:%s-image-%s' % (kname,typelower), """set +e
+if [ -n "$D" ]; then
+ ln -sf %s-${KERNEL_VERSION} $D/${KERNEL_IMAGEDEST}/%s > /dev/null 2>&1
+else
+ ln -sf %s-${KERNEL_VERSION} ${KERNEL_IMAGEDEST}/%s > /dev/null 2>&1
+ if [ $? -ne 0 ]; then
+ echo "Filesystem on ${KERNEL_IMAGEDEST}/ doesn't support symlinks, falling back to copied image (%s)."
+ install -m 0644 ${KERNEL_IMAGEDEST}/%s-${KERNEL_VERSION} ${KERNEL_IMAGEDEST}/%s
+ fi
+fi
+set -e
+""" % (type, type, type, type, type, type, type))
+ d.setVar('pkg_postrm:%s-image-%s' % (kname,typelower), """set +e
+if [ -f "${KERNEL_IMAGEDEST}/%s" -o -L "${KERNEL_IMAGEDEST}/%s" ]; then
+ rm -f ${KERNEL_IMAGEDEST}/%s > /dev/null 2>&1
+fi
+set -e
+""" % (type, type, type))
+
image = d.getVar('INITRAMFS_IMAGE')
+ # If the INTIRAMFS_IMAGE is set but the INITRAMFS_IMAGE_BUNDLE is set to 0,
+ # the do_bundle_initramfs does nothing, but the INITRAMFS_IMAGE is built
+ # standalone for use by wic and other tools.
if image:
- d.appendVarFlag('do_bundle_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete')
+ if d.getVar('INITRAMFS_MULTICONFIG'):
+ d.appendVarFlag('do_bundle_initramfs', 'mcdepends', ' mc::${INITRAMFS_MULTICONFIG}:${INITRAMFS_IMAGE}:do_image_complete')
+ else:
+ d.appendVarFlag('do_bundle_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete')
+ if image and bb.utils.to_boolean(d.getVar('INITRAMFS_IMAGE_BUNDLE')):
+ bb.build.addtask('do_transform_bundled_initramfs', 'do_deploy', 'do_bundle_initramfs', d)
# NOTE: setting INITRAMFS_TASK is for backward compatibility
# The preferred method is to set INITRAMFS_IMAGE, because
@@ -130,7 +174,7 @@ inherit ${KERNEL_CLASSES}
# the symlink.
do_unpack[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILDDIR}"
do_clean[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILDDIR}"
-base_do_unpack_append () {
+python do_symlink_kernsrc () {
s = d.getVar("S")
if s[-1] == '/':
# drop trailing slash, so that os.symlink(kernsrc, s) doesn't use s as directory name and fail
@@ -147,6 +191,10 @@ base_do_unpack_append () {
shutil.move(s, kernsrc)
os.symlink(kernsrc, s)
}
+# do_patch is normally ordered before do_configure, but
+# externalsrc.bbclass deletes do_patch, breaking the dependency of
+# do_configure on do_symlink_kernsrc.
+addtask symlink_kernsrc before do_patch do_configure after do_unpack
inherit kernel-arch deploy
@@ -185,7 +233,9 @@ UBOOT_LOADADDRESS ?= "${UBOOT_ENTRYPOINT}"
# Some Linux kernel configurations need additional parameters on the command line
KERNEL_EXTRA_ARGS ?= ""
-EXTRA_OEMAKE = " HOSTCC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}""
+EXTRA_OEMAKE = " HOSTCC="${BUILD_CC}" HOSTCFLAGS="${BUILD_CFLAGS}" HOSTLDFLAGS="${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}""
+EXTRA_OEMAKE += " HOSTCXX="${BUILD_CXX}" HOSTCXXFLAGS="${BUILD_CXXFLAGS}""
+
KERNEL_ALT_IMAGETYPE ??= ""
copy_initramfs() {
@@ -194,9 +244,9 @@ copy_initramfs() {
mkdir -p ${B}/usr
# Find and use the first initramfs image archive type we find
rm -f ${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
- for img in cpio cpio.gz cpio.lz4 cpio.lzo cpio.lzma cpio.xz; do
- if [ -e "${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.$img" ]; then
- cp ${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.$img ${B}/usr/.
+ for img in cpio cpio.gz cpio.lz4 cpio.lzo cpio.lzma cpio.xz cpio.zst; do
+ if [ -e "${INITRAMFS_DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.$img" ]; then
+ cp ${INITRAMFS_DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.$img ${B}/usr/.
case $img in
*gz)
echo "gzip decompressing image"
@@ -205,7 +255,7 @@ copy_initramfs() {
;;
*lz4)
echo "lz4 decompressing image"
- lz4 -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
+ lz4 -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img ${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
break
;;
*lzo)
@@ -223,10 +273,17 @@ copy_initramfs() {
xz -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
break
;;
+ *zst)
+ echo "zst decompressing image"
+ zstd -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
+ break
+ ;;
esac
+ break
fi
done
- echo "Finished copy of initramfs into ./usr"
+ # Verify that the above loop found a initramfs, fail otherwise
+ [ -f ${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio ] && echo "Finished copy of initramfs into ./usr" || die "Could not find any ${INITRAMFS_DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.cpio{.gz|.lz4|.lzo|.lzma|.xz|.zst) for bundling; INITRAMFS_IMAGE_NAME might be wrong."
}
do_bundle_initramfs () {
@@ -266,35 +323,39 @@ do_bundle_initramfs () {
}
do_bundle_initramfs[dirs] = "${B}"
-python do_devshell_prepend () {
+kernel_do_transform_bundled_initramfs() {
+ # vmlinux.gz is not built by kernel
+ if (echo "${KERNEL_IMAGETYPES}" | grep -wq "vmlinux\.gz"); then
+ gzip -9cn < ${KERNEL_OUTPUT_DIR}/vmlinux.initramfs > ${KERNEL_OUTPUT_DIR}/vmlinux.gz.initramfs
+ fi
+}
+do_transform_bundled_initramfs[dirs] = "${B}"
+
+python do_devshell:prepend () {
os.environ["LDFLAGS"] = ''
}
addtask bundle_initramfs after do_install before do_deploy
-get_cc_option () {
- # Check if KERNEL_CC supports the option "file-prefix-map".
- # This option allows us to build images with __FILE__ values that do not
- # contain the host build path.
- if ${KERNEL_CC} -Q --help=joined | grep -q "\-ffile-prefix-map=<old=new>"; then
- echo "-ffile-prefix-map=${S}=/kernel-source/"
- fi
-}
+KERNEL_DEBUG_TIMESTAMPS ??= "0"
kernel_do_compile() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
- if [ "${BUILD_REPRODUCIBLE_BINARIES}" = "1" ]; then
+
+ # setup native pkg-config variables (kconfig scripts call pkg-config directly, cannot generically be overriden to pkg-config-native)
+ export PKG_CONFIG_DIR="${STAGING_DIR_NATIVE}${libdir_native}/pkgconfig"
+ export PKG_CONFIG_PATH="$PKG_CONFIG_DIR:${STAGING_DATADIR_NATIVE}/pkgconfig"
+ export PKG_CONFIG_LIBDIR="$PKG_CONFIG_DIR"
+ export PKG_CONFIG_SYSROOT_DIR=""
+
+ if [ "${KERNEL_DEBUG_TIMESTAMPS}" != "1" ]; then
# kernel sources do not use do_unpack, so SOURCE_DATE_EPOCH may not
# be set....
if [ "${SOURCE_DATE_EPOCH}" = "" -o "${SOURCE_DATE_EPOCH}" = "0" ]; then
- olddir=`pwd`
- cd ${S}
- SOURCE_DATE_EPOCH=`git log -1 --pretty=%ct`
- # git repo not guaranteed, so fall back to REPRODUCIBLE_TIMESTAMP_ROOTFS
- if [ $? -ne 0 ]; then
- SOURCE_DATE_EPOCH=${REPRODUCIBLE_TIMESTAMP_ROOTFS}
- fi
- cd $olddir
+ # The source directory is not necessarily a git repository, so we
+ # specify the git-dir to ensure that git does not query a
+ # repository in any parent directory.
+ SOURCE_DATE_EPOCH=`git --git-dir="${S}/.git" log -1 --pretty=%ct 2>/dev/null || echo "${REPRODUCIBLE_TIMESTAMP_ROOTFS}"`
fi
ts=`LC_ALL=C date -d @$SOURCE_DATE_EPOCH`
@@ -315,22 +376,40 @@ kernel_do_compile() {
copy_initramfs
use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
fi
- cc_extra=$(get_cc_option)
for typeformake in ${KERNEL_IMAGETYPE_FOR_MAKE} ; do
- oe_runmake ${typeformake} CC="${KERNEL_CC} $cc_extra " LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS} $use_alternate_initrd
+ oe_runmake ${typeformake} CC="${KERNEL_CC}" LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS} $use_alternate_initrd
done
+}
+
+kernel_do_transform_kernel() {
# vmlinux.gz is not built by kernel
if (echo "${KERNEL_IMAGETYPES}" | grep -wq "vmlinux\.gz"); then
mkdir -p "${KERNEL_OUTPUT_DIR}"
gzip -9cn < ${B}/vmlinux > "${KERNEL_OUTPUT_DIR}/vmlinux.gz"
fi
}
+do_transform_kernel[dirs] = "${B}"
+addtask transform_kernel after do_compile before do_install
do_compile_kernelmodules() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
+ if [ "${KERNEL_DEBUG_TIMESTAMPS}" != "1" ]; then
+ # kernel sources do not use do_unpack, so SOURCE_DATE_EPOCH may not
+ # be set....
+ if [ "${SOURCE_DATE_EPOCH}" = "" -o "${SOURCE_DATE_EPOCH}" = "0" ]; then
+ # The source directory is not necessarily a git repository, so we
+ # specify the git-dir to ensure that git does not query a
+ # repository in any parent directory.
+ SOURCE_DATE_EPOCH=`git --git-dir="${S}/.git" log -1 --pretty=%ct 2>/dev/null || echo "${REPRODUCIBLE_TIMESTAMP_ROOTFS}"`
+ fi
+
+ ts=`LC_ALL=C date -d @$SOURCE_DATE_EPOCH`
+ export KBUILD_BUILD_TIMESTAMP="$ts"
+ export KCONFIG_NOTIMESTAMP=1
+ bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
+ fi
if (grep -q -i -e '^CONFIG_MODULES=y$' ${B}/.config); then
- cc_extra=$(get_cc_option)
- oe_runmake -C ${B} ${PARALLEL_MAKE} modules CC="${KERNEL_CC} $cc_extra " LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS}
+ oe_runmake -C ${B} ${PARALLEL_MAKE} modules CC="${KERNEL_CC}" LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS}
# Module.symvers gets updated during the
# building of the kernel modules. We need to
@@ -339,6 +418,10 @@ do_compile_kernelmodules() {
# other kernel modules and will look at this
# file to do symbol lookups
cp ${B}/Module.symvers ${STAGING_KERNEL_BUILDDIR}/
+ # 5.10+ kernels have module.lds that we need to copy for external module builds
+ if [ -e "${B}/scripts/module.lds" ]; then
+ install -Dm 0644 ${B}/scripts/module.lds ${STAGING_KERNEL_BUILDDIR}/scripts/module.lds
+ fi
else
bbnote "no modules to compile"
fi
@@ -365,12 +448,23 @@ kernel_do_install() {
#
install -d ${D}/${KERNEL_IMAGEDEST}
install -d ${D}/boot
+
+ #
+ # When including an initramfs bundle inside a FIT image, the fitImage is created after the install task
+ # by do_assemble_fitimage_initramfs.
+ # This happens after the generation of the initramfs bundle (done by do_bundle_initramfs).
+ # So, at the level of the install task we should not try to install the fitImage. fitImage is still not
+ # generated yet.
+ # After the generation of the fitImage, the deploy task copies the fitImage from the build directory to
+ # the deploy folder.
+ #
+
for imageType in ${KERNEL_IMAGETYPES} ; do
- install -m 0644 ${KERNEL_OUTPUT_DIR}/${imageType} ${D}/${KERNEL_IMAGEDEST}/${imageType}-${KERNEL_VERSION}
- if [ "${KERNEL_PACKAGE_NAME}" = "kernel" ]; then
- ln -sf ${imageType}-${KERNEL_VERSION} ${D}/${KERNEL_IMAGEDEST}/${imageType}
+ if [ $imageType != "fitImage" ] || [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ] ; then
+ install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType ${D}/${KERNEL_IMAGEDEST}/$imageType-${KERNEL_VERSION}
fi
done
+
install -m 0644 System.map ${D}/boot/System.map-${KERNEL_VERSION}
install -m 0644 .config ${D}/boot/config-${KERNEL_VERSION}
install -m 0644 vmlinux ${D}/boot/vmlinux-${KERNEL_VERSION}
@@ -378,7 +472,6 @@ kernel_do_install() {
install -d ${D}${sysconfdir}/modules-load.d
install -d ${D}${sysconfdir}/modprobe.d
}
-do_install[prefuncs] += "package_get_auto_pr"
# Must be ran no earlier than after do_kernel_checkout or else Makefile won't be in ${S}/Makefile
do_kernel_version_sanity_check() {
@@ -445,11 +538,11 @@ do_shared_workdir () {
# Copy files required for module builds
cp System.map $kerneldir/System.map-${KERNEL_VERSION}
- cp Module.symvers $kerneldir/
+ [ -e Module.symvers ] && cp Module.symvers $kerneldir/
cp .config $kerneldir/
mkdir -p $kerneldir/include/config
cp include/config/kernel.release $kerneldir/include/config/kernel.release
- if [ -e certs/signing_key.pem ]; then
+ if [ -e certs/signing_key.x509 ]; then
# The signing_key.* files are stored in the certs/ dir in
# newer Linux kernels
mkdir -p $kerneldir/certs
@@ -485,6 +578,15 @@ do_shared_workdir () {
mkdir -p $kerneldir/arch/${ARCH}/include/generated/
cp -fR arch/${ARCH}/include/generated/* $kerneldir/arch/${ARCH}/include/generated/
fi
+
+ if (grep -q -i -e '^CONFIG_UNWINDER_ORC=y$' $kerneldir/.config); then
+ # With CONFIG_UNWINDER_ORC (the default in 4.14), objtool is required for
+ # out-of-tree modules to be able to generate object files.
+ if [ -x tools/objtool/objtool ]; then
+ mkdir -p ${kerneldir}/tools/objtool
+ cp tools/objtool/objtool ${kerneldir}/tools/objtool/
+ fi
+ fi
}
# We don't need to stage anything, not the modules/firmware since those would clash with linux-firmware
@@ -492,7 +594,7 @@ sysroot_stage_all () {
:
}
-KERNEL_CONFIG_COMMAND ?= "oe_runmake_call -C ${S} CC="${KERNEL_CC}" O=${B} olddefconfig || oe_runmake -C ${S} O=${B} CC="${KERNEL_CC}" oldnoconfig"
+KERNEL_CONFIG_COMMAND ?= "oe_runmake_call -C ${S} CC="${KERNEL_CC}" LD="${KERNEL_LD}" O=${B} olddefconfig || oe_runmake -C ${S} O=${B} CC="${KERNEL_CC}" LD="${KERNEL_LD}" oldnoconfig"
python check_oldest_kernel() {
oldest_kernel = d.getVar('OLDEST_KERNEL')
@@ -520,7 +622,7 @@ kernel_do_configure() {
fi
# Copy defconfig to .config if .config does not exist. This allows
- # recipes to manage the .config themselves in do_configure_prepend().
+ # recipes to manage the .config themselves in do_configure:prepend().
if [ -f "${WORKDIR}/defconfig" ] && [ ! -f "${B}/.config" ]; then
cp "${WORKDIR}/defconfig" "${B}/.config"
fi
@@ -537,34 +639,34 @@ addtask savedefconfig after do_configure
inherit cml1
-KCONFIG_CONFIG_COMMAND_append = " HOSTLDFLAGS='${BUILD_LDFLAGS}'"
+KCONFIG_CONFIG_COMMAND:append = " LD='${KERNEL_LD}' HOSTLDFLAGS='${BUILD_LDFLAGS}'"
-EXPORT_FUNCTIONS do_compile do_install do_configure
+EXPORT_FUNCTIONS do_compile do_transform_kernel do_transform_bundled_initramfs do_install do_configure
# kernel-base becomes kernel-${KERNEL_VERSION}
# kernel-image becomes kernel-image-${KERNEL_VERSION}
-PACKAGES = "${KERNEL_PACKAGE_NAME} ${KERNEL_PACKAGE_NAME}-base ${KERNEL_PACKAGE_NAME}-vmlinux ${KERNEL_PACKAGE_NAME}-image ${KERNEL_PACKAGE_NAME}-dev ${KERNEL_PACKAGE_NAME}-modules"
-FILES_${PN} = ""
-FILES_${KERNEL_PACKAGE_NAME}-base = "${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.order ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.builtin"
-FILES_${KERNEL_PACKAGE_NAME}-image = ""
-FILES_${KERNEL_PACKAGE_NAME}-dev = "/boot/System.map* /boot/Module.symvers* /boot/config* ${KERNEL_SRC_PATH} ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
-FILES_${KERNEL_PACKAGE_NAME}-vmlinux = "/boot/vmlinux-${KERNEL_VERSION_NAME}"
-FILES_${KERNEL_PACKAGE_NAME}-modules = ""
-RDEPENDS_${KERNEL_PACKAGE_NAME} = "${KERNEL_PACKAGE_NAME}-base"
+PACKAGES = "${KERNEL_PACKAGE_NAME} ${KERNEL_PACKAGE_NAME}-base ${KERNEL_PACKAGE_NAME}-vmlinux ${KERNEL_PACKAGE_NAME}-image ${KERNEL_PACKAGE_NAME}-dev ${KERNEL_PACKAGE_NAME}-modules ${KERNEL_PACKAGE_NAME}-dbg"
+FILES:${PN} = ""
+FILES:${KERNEL_PACKAGE_NAME}-base = "${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.order ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.builtin ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.builtin.modinfo"
+FILES:${KERNEL_PACKAGE_NAME}-image = ""
+FILES:${KERNEL_PACKAGE_NAME}-dev = "/boot/System.map* /boot/Module.symvers* /boot/config* ${KERNEL_SRC_PATH} ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
+FILES:${KERNEL_PACKAGE_NAME}-vmlinux = "/boot/vmlinux-${KERNEL_VERSION_NAME}"
+FILES:${KERNEL_PACKAGE_NAME}-modules = ""
+RDEPENDS:${KERNEL_PACKAGE_NAME} = "${KERNEL_PACKAGE_NAME}-base (= ${EXTENDPKGV})"
# Allow machines to override this dependency if kernel image files are
# not wanted in images as standard
-RDEPENDS_${KERNEL_PACKAGE_NAME}-base ?= "${KERNEL_PACKAGE_NAME}-image"
-PKG_${KERNEL_PACKAGE_NAME}-image = "${KERNEL_PACKAGE_NAME}-image-${@legitimize_package_name('${KERNEL_VERSION}')}"
-RDEPENDS_${KERNEL_PACKAGE_NAME}-image += "${@oe.utils.conditional('KERNEL_IMAGETYPE', 'vmlinux', '${KERNEL_PACKAGE_NAME}-vmlinux', '', d)}"
-PKG_${KERNEL_PACKAGE_NAME}-base = "${KERNEL_PACKAGE_NAME}-${@legitimize_package_name('${KERNEL_VERSION}')}"
-RPROVIDES_${KERNEL_PACKAGE_NAME}-base += "${KERNEL_PACKAGE_NAME}-${KERNEL_VERSION}"
-ALLOW_EMPTY_${KERNEL_PACKAGE_NAME} = "1"
-ALLOW_EMPTY_${KERNEL_PACKAGE_NAME}-base = "1"
-ALLOW_EMPTY_${KERNEL_PACKAGE_NAME}-image = "1"
-ALLOW_EMPTY_${KERNEL_PACKAGE_NAME}-modules = "1"
-DESCRIPTION_${KERNEL_PACKAGE_NAME}-modules = "Kernel modules meta package"
-
-pkg_postinst_${KERNEL_PACKAGE_NAME}-base () {
+RRECOMMENDS:${KERNEL_PACKAGE_NAME}-base ?= "${KERNEL_PACKAGE_NAME}-image (= ${EXTENDPKGV})"
+PKG:${KERNEL_PACKAGE_NAME}-image = "${KERNEL_PACKAGE_NAME}-image-${@legitimize_package_name(d.getVar('KERNEL_VERSION'))}"
+RDEPENDS:${KERNEL_PACKAGE_NAME}-image += "${@oe.utils.conditional('KERNEL_IMAGETYPE', 'vmlinux', '${KERNEL_PACKAGE_NAME}-vmlinux (= ${EXTENDPKGV})', '', d)}"
+PKG:${KERNEL_PACKAGE_NAME}-base = "${KERNEL_PACKAGE_NAME}-${@legitimize_package_name(d.getVar('KERNEL_VERSION'))}"
+RPROVIDES:${KERNEL_PACKAGE_NAME}-base += "${KERNEL_PACKAGE_NAME}-${KERNEL_VERSION}"
+ALLOW_EMPTY:${KERNEL_PACKAGE_NAME} = "1"
+ALLOW_EMPTY:${KERNEL_PACKAGE_NAME}-base = "1"
+ALLOW_EMPTY:${KERNEL_PACKAGE_NAME}-image = "1"
+ALLOW_EMPTY:${KERNEL_PACKAGE_NAME}-modules = "1"
+DESCRIPTION:${KERNEL_PACKAGE_NAME}-modules = "Kernel modules meta package"
+
+pkg_postinst:${KERNEL_PACKAGE_NAME}-base () {
if [ ! -e "$D/lib/modules/${KERNEL_VERSION}" ]; then
mkdir -p $D/lib/modules/${KERNEL_VERSION}
fi
@@ -575,7 +677,7 @@ pkg_postinst_${KERNEL_PACKAGE_NAME}-base () {
fi
}
-PACKAGESPLITFUNCS_prepend = "split_kernel_packages "
+PACKAGESPLITFUNCS:prepend = "split_kernel_packages "
python split_kernel_packages () {
do_split_packages(d, root='${nonarch_base_libdir}/firmware', file_regex=r'^(.*)\.(bin|fw|cis|csp|dsp)$', output_pattern='${KERNEL_PACKAGE_NAME}-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='')
@@ -597,33 +699,25 @@ do_kernel_link_images() {
if [ -f ../../../vmlinuz.bin ]; then
ln -sf ../../../vmlinuz.bin
fi
+ if [ -f ../../../vmlinux.64 ]; then
+ ln -sf ../../../vmlinux.64
+ fi
}
addtask kernel_link_images after do_compile before do_strip
-do_strip() {
- if [ -n "${KERNEL_IMAGE_STRIP_EXTRA_SECTIONS}" ]; then
- if ! (echo "${KERNEL_IMAGETYPES}" | grep -wq "vmlinux"); then
- bbwarn "image type(s) will not be stripped (not supported): ${KERNEL_IMAGETYPES}"
- return
- fi
-
- cd ${B}
- headers=`"$CROSS_COMPILE"readelf -S ${KERNEL_OUTPUT_DIR}/vmlinux | \
- grep "^ \{1,\}\[[0-9 ]\{1,\}\] [^ ]" | \
- sed "s/^ \{1,\}\[[0-9 ]\{1,\}\] //" | \
- gawk '{print $1}'`
+python do_strip() {
+ import shutil
- for str in ${KERNEL_IMAGE_STRIP_EXTRA_SECTIONS}; do {
- if ! (echo "$headers" | grep -q "^$str$"); then
- bbwarn "Section not found: $str";
- fi
+ strip = d.getVar('STRIP')
+ extra_sections = d.getVar('KERNEL_IMAGE_STRIP_EXTRA_SECTIONS')
+ kernel_image = d.getVar('B') + "/" + d.getVar('KERNEL_OUTPUT_DIR') + "/vmlinux"
- "$CROSS_COMPILE"strip -s -R $str ${KERNEL_OUTPUT_DIR}/vmlinux
- }; done
-
- bbnote "KERNEL_IMAGE_STRIP_EXTRA_SECTIONS is set, stripping sections:" \
- "${KERNEL_IMAGE_STRIP_EXTRA_SECTIONS}"
- fi;
+ if (extra_sections and kernel_image.find('boot/vmlinux') != -1):
+ kernel_image_stripped = kernel_image + ".stripped"
+ shutil.copy2(kernel_image, kernel_image_stripped)
+ oe.package.runstrip((kernel_image_stripped, 8, strip, extra_sections))
+ bb.debug(1, "KERNEL_IMAGE_STRIP_EXTRA_SECTIONS is set, stripping sections: " + \
+ extra_sections)
}
do_strip[dirs] = "${B}"
@@ -642,7 +736,7 @@ do_sizecheck() {
at_least_one_fits=
for imageType in ${KERNEL_IMAGETYPES} ; do
size=`du -ks ${B}/${KERNEL_OUTPUT_DIR}/$imageType | awk '{print $1}'`
- if [ $size -ge ${KERNEL_IMAGE_MAXSIZE} ]; then
+ if [ $size -gt ${KERNEL_IMAGE_MAXSIZE} ]; then
bbwarn "This kernel $imageType (size=$size(K) > ${KERNEL_IMAGE_MAXSIZE}(K)) is too big for your device."
else
at_least_one_fits=y
@@ -667,17 +761,34 @@ kernel_do_deploy() {
fi
for imageType in ${KERNEL_IMAGETYPES} ; do
- base_name=${imageType}-${KERNEL_IMAGE_NAME}
- install -m 0644 ${KERNEL_OUTPUT_DIR}/${imageType} $deployDir/${base_name}.bin
- symlink_name=${imageType}-${KERNEL_IMAGE_LINK_NAME}
- ln -sf ${base_name}.bin $deployDir/${symlink_name}.bin
- ln -sf ${base_name}.bin $deployDir/${imageType}
+ baseName=$imageType-${KERNEL_IMAGE_NAME}
+
+ if [ -s ${KERNEL_OUTPUT_DIR}/$imageType.stripped ] ; then
+ install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType.stripped $deployDir/$baseName${KERNEL_IMAGE_BIN_EXT}
+ else
+ install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType $deployDir/$baseName${KERNEL_IMAGE_BIN_EXT}
+ fi
+ if [ -n "${KERNEL_IMAGE_LINK_NAME}" ] ; then
+ ln -sf $baseName${KERNEL_IMAGE_BIN_EXT} $deployDir/$imageType-${KERNEL_IMAGE_LINK_NAME}${KERNEL_IMAGE_BIN_EXT}
+ fi
+ if [ "${KERNEL_IMAGETYPE_SYMLINK}" = "1" ] ; then
+ ln -sf $baseName${KERNEL_IMAGE_BIN_EXT} $deployDir/$imageType
+ fi
done
if [ ${MODULE_TARBALL_DEPLOY} = "1" ] && (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
mkdir -p ${D}${root_prefix}/lib
- tar -cvzf $deployDir/modules-${MODULE_TARBALL_NAME}.tgz -C ${D}${root_prefix} lib
- ln -sf modules-${MODULE_TARBALL_NAME}.tgz $deployDir/modules-${MODULE_TARBALL_LINK_NAME}.tgz
+ if [ -n "${SOURCE_DATE_EPOCH}" ]; then
+ TAR_ARGS="--sort=name --clamp-mtime --mtime=@${SOURCE_DATE_EPOCH}"
+ else
+ TAR_ARGS=""
+ fi
+ TAR_ARGS="$TAR_ARGS --owner=0 --group=0"
+ tar $TAR_ARGS -cv -C ${D}${root_prefix} lib | gzip -9n > $deployDir/modules-${MODULE_TARBALL_NAME}.tgz
+
+ if [ -n "${MODULE_TARBALL_LINK_NAME}" ] ; then
+ ln -sf modules-${MODULE_TARBALL_NAME}.tgz $deployDir/modules-${MODULE_TARBALL_LINK_NAME}.tgz
+ fi
fi
if [ ! -z "${INITRAMFS_IMAGE}" -a x"${INITRAMFS_IMAGE_BUNDLE}" = x1 ]; then
@@ -685,16 +796,18 @@ kernel_do_deploy() {
if [ "$imageType" = "fitImage" ] ; then
continue
fi
- initramfs_base_name=${imageType}-${INITRAMFS_NAME}
- initramfs_symlink_name=${imageType}-${INITRAMFS_LINK_NAME}
- install -m 0644 ${KERNEL_OUTPUT_DIR}/${imageType}.initramfs $deployDir/${initramfs_base_name}.bin
- ln -sf ${initramfs_base_name}.bin $deployDir/${initramfs_symlink_name}.bin
+ initramfsBaseName=$imageType-${INITRAMFS_NAME}
+ install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType.initramfs $deployDir/$initramfsBaseName${KERNEL_IMAGE_BIN_EXT}
+ if [ -n "${INITRAMFS_LINK_NAME}" ] ; then
+ ln -sf $initramfsBaseName${KERNEL_IMAGE_BIN_EXT} $deployDir/$imageType-${INITRAMFS_LINK_NAME}${KERNEL_IMAGE_BIN_EXT}
+ fi
done
fi
}
-do_deploy[cleandirs] = "${DEPLOYDIR}"
-do_deploy[dirs] = "${DEPLOYDIR} ${B}"
-do_deploy[prefuncs] += "package_get_auto_pr"
+
+# We deploy to filenames that include PKGV and PKGR, read the saved data to
+# ensure we get the right values for both
+do_deploy[prefuncs] += "read_subpackage_metadata"
addtask deploy after do_populate_sysroot do_packagedata
diff --git a/meta/classes/kernelsrc.bbclass b/meta/classes/kernelsrc.bbclass
index 675d40ec9a..a951ba3325 100644
--- a/meta/classes/kernelsrc.bbclass
+++ b/meta/classes/kernelsrc.bbclass
@@ -1,7 +1,7 @@
S = "${STAGING_KERNEL_DIR}"
deltask do_fetch
deltask do_unpack
-do_patch[depends] += "virtual/kernel:do_patch"
+do_patch[depends] += "virtual/kernel:do_shared_workdir"
do_patch[noexec] = "1"
do_package[depends] += "virtual/kernel:do_populate_sysroot"
KERNEL_VERSION = "${@get_kernelversion_file("${STAGING_KERNEL_BUILDDIR}")}"
diff --git a/meta/classes/libc-common.bbclass b/meta/classes/libc-common.bbclass
deleted file mode 100644
index 0e351b6746..0000000000
--- a/meta/classes/libc-common.bbclass
+++ /dev/null
@@ -1,37 +0,0 @@
-do_install() {
- oe_runmake install_root=${D} install
- install -Dm 0644 ${WORKDIR}/etc/ld.so.conf ${D}/${sysconfdir}/ld.so.conf
- install -d ${D}${localedir}
- make -f ${WORKDIR}/generate-supported.mk IN="${S}/localedata/SUPPORTED" OUT="${WORKDIR}/SUPPORTED"
- # get rid of some broken files...
- for i in ${GLIBC_BROKEN_LOCALES}; do
- sed -i "/$i/d" ${WORKDIR}/SUPPORTED
- done
- rm -f ${D}${sysconfdir}/rpc
- rm -rf ${D}${datadir}/zoneinfo
- rm -rf ${D}${libexecdir}/getconf
-}
-
-def get_libc_fpu_setting(bb, d):
- if d.getVar('TARGET_FPU') in [ 'soft', 'ppc-efd' ]:
- return "--without-fp"
- return ""
-
-python populate_packages_prepend () {
- if d.getVar('DEBIAN_NAMES'):
- pkgs = d.getVar('PACKAGES').split()
- bpn = d.getVar('BPN')
- prefix = d.getVar('MLPREFIX') or ""
- # Set the base package...
- d.setVar('PKG_' + prefix + bpn, prefix + 'libc6')
- libcprefix = prefix + bpn + '-'
- for p in pkgs:
- # And all the subpackages.
- if p.startswith(libcprefix):
- renamed = p.replace(bpn, 'libc6', 1)
- d.setVar('PKG_' + p, renamed)
- # For backward compatibility with old -dbg package
- d.appendVar('RPROVIDES_' + libcprefix + 'dbg', ' ' + prefix + 'libc-dbg')
- d.appendVar('RCONFLICTS_' + libcprefix + 'dbg', ' ' + prefix + 'libc-dbg')
- d.appendVar('RREPLACES_' + libcprefix + 'dbg', ' ' + prefix + 'libc-dbg')
-}
diff --git a/meta/classes/libc-package.bbclass b/meta/classes/libc-package.bbclass
index 0b4c666a74..13ef8cdc0d 100644
--- a/meta/classes/libc-package.bbclass
+++ b/meta/classes/libc-package.bbclass
@@ -37,15 +37,12 @@ python __anonymous () {
d.setVar("DEPENDS", depends)
d.setVar("GLIBC_INTERNAL_USE_BINARY_LOCALE", "compile")
break
-
- # try to fix disable charsets/locales/locale-code compile fail
- if bb.utils.contains('DISTRO_FEATURES', 'libc-charsets libc-locales libc-locale-code', True, False, d):
- d.setVar('PACKAGE_NO_GCONV', '0')
- else:
- d.setVar('PACKAGE_NO_GCONV', '1')
}
-OVERRIDES_append = ":${TARGET_ARCH}-${TARGET_OS}"
+# try to fix disable charsets/locales/locale-code compile fail
+PACKAGE_NO_GCONV ?= "0"
+
+OVERRIDES:append = ":${TARGET_ARCH}-${TARGET_OS}"
locale_base_postinst_ontarget() {
localedef --inputfile=${datadir}/i18n/locales/%s --charmap=%s %s
@@ -67,9 +64,14 @@ do_prep_locale_tree() {
for i in $treedir/${datadir}/i18n/charmaps/*gz; do
gunzip $i
done
- tar -cf - -C ${LOCALETREESRC}${base_libdir} -p . | tar -xf - -C $treedir/${base_libdir}
- if [ -f ${STAGING_DIR_NATIVE}${prefix_native}/lib/libgcc_s.* ]; then
- tar -cf - -C ${STAGING_DIR_NATIVE}/${prefix_native}/${base_libdir} -p libgcc_s.* | tar -xf - -C $treedir/${base_libdir}
+ # The extract pattern "./l*.so*" is carefully selected so that it will
+ # match ld*.so and lib*.so*, but not any files in the gconv directory
+ # (if it exists). This makes sure we only unpack the files we need.
+ # This is important in case usrmerge is set in DISTRO_FEATURES, which
+ # means ${base_libdir} == ${libdir}.
+ tar -cf - -C ${LOCALETREESRC}${base_libdir} -p . | tar -xf - -C $treedir/${base_libdir} --wildcards './l*.so*'
+ if [ -f ${STAGING_LIBDIR_NATIVE}/libgcc_s.* ]; then
+ tar -cf - -C ${STAGING_LIBDIR_NATIVE} -p libgcc_s.* | tar -xf - -C $treedir/${base_libdir}
fi
install -m 0755 ${LOCALETREESRC}${bindir}/localedef $treedir/${base_bindir}
}
@@ -80,6 +82,9 @@ do_collect_bins_from_locale_tree() {
parent=$(dirname ${localedir})
mkdir -p ${PKGD}/$parent
tar -cf - -C $treedir/$parent -p $(basename ${localedir}) | tar -xf - -C ${PKGD}$parent
+
+ # Finalize tree by chaning all duplicate files into hard links
+ cross-localedef-hardlink -c -v ${WORKDIR}/locale-tree
}
inherit qemu
@@ -124,9 +129,9 @@ python package_do_split_gconvs () {
deps.append(dp)
f.close()
if deps != []:
- d.setVar('RDEPENDS_%s' % pkg, " ".join(deps))
+ d.setVar('RDEPENDS:%s' % pkg, " ".join(deps))
if bpn != 'glibc':
- d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
+ d.setVar('RPROVIDES:%s' % pkg, pkg.replace(bpn, 'glibc'))
do_split_packages(d, gconv_libdir, file_regex=r'^(.*)\.so$', output_pattern=bpn+'-gconv-%s', \
description='gconv module for character set %s', hook=calc_gconv_deps, \
@@ -146,9 +151,9 @@ python package_do_split_gconvs () {
deps.append(dp)
f.close()
if deps != []:
- d.setVar('RDEPENDS_%s' % pkg, " ".join(deps))
+ d.setVar('RDEPENDS:%s' % pkg, " ".join(deps))
if bpn != 'glibc':
- d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
+ d.setVar('RPROVIDES:%s' % pkg, pkg.replace(bpn, 'glibc'))
do_split_packages(d, charmap_dir, file_regex=r'^(.*)\.gz$', output_pattern=bpn+'-charmap-%s', \
description='character map for %s encoding', hook=calc_charmap_deps, extra_depends='')
@@ -167,9 +172,9 @@ python package_do_split_gconvs () {
deps.append(dp)
f.close()
if deps != []:
- d.setVar('RDEPENDS_%s' % pkg, " ".join(deps))
+ d.setVar('RDEPENDS:%s' % pkg, " ".join(deps))
if bpn != 'glibc':
- d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
+ d.setVar('RPROVIDES:%s' % pkg, pkg.replace(bpn, 'glibc'))
do_split_packages(d, locales_dir, file_regex=r'(.*)', output_pattern=bpn+'-localedata-%s', \
description='locale definition for %s', hook=calc_locale_deps, extra_depends='')
@@ -205,11 +210,11 @@ python package_do_split_gconvs () {
supported[locale] = charset
def output_locale_source(name, pkgname, locale, encoding):
- d.setVar('RDEPENDS_%s' % pkgname, '%slocaledef %s-localedata-%s %s-charmap-%s' % \
+ d.setVar('RDEPENDS:%s' % pkgname, '%slocaledef %s-localedata-%s %s-charmap-%s' % \
(mlprefix, mlprefix+bpn, legitimize_package_name(locale), mlprefix+bpn, legitimize_package_name(encoding)))
- d.setVar('pkg_postinst_ontarget_%s' % pkgname, d.getVar('locale_base_postinst_ontarget') \
+ d.setVar('pkg_postinst_ontarget:%s' % pkgname, d.getVar('locale_base_postinst_ontarget') \
% (locale, encoding, locale))
- d.setVar('pkg_postrm_%s' % pkgname, d.getVar('locale_base_postrm') % \
+ d.setVar('pkg_postrm:%s' % pkgname, d.getVar('locale_base_postrm') % \
(locale, encoding, locale))
def output_locale_binary_rdepends(name, pkgname, locale, encoding):
@@ -217,8 +222,8 @@ python package_do_split_gconvs () {
lcsplit = d.getVar('GLIBC_SPLIT_LC_PACKAGES')
if lcsplit and int(lcsplit):
d.appendVar('PACKAGES', ' ' + dep)
- d.setVar('ALLOW_EMPTY_%s' % dep, '1')
- d.setVar('RDEPENDS_%s' % pkgname, mlprefix + dep)
+ d.setVar('ALLOW_EMPTY:%s' % dep, '1')
+ d.setVar('RDEPENDS:%s' % pkgname, mlprefix + dep)
commands = {}
@@ -243,6 +248,7 @@ python package_do_split_gconvs () {
"sh4": " --uint32-align=4 --big-endian ", \
"powerpc": " --uint32-align=4 --big-endian ", \
"powerpc64": " --uint32-align=4 --big-endian ", \
+ "powerpc64le": " --uint32-align=4 --little-endian ", \
"mips": " --uint32-align=4 --big-endian ", \
"mipsisa32r6": " --uint32-align=4 --big-endian ", \
"mips64": " --uint32-align=4 --big-endian ", \
@@ -263,7 +269,7 @@ python package_do_split_gconvs () {
bb.error("locale_arch_options not found for target_arch=" + target_arch)
bb.fatal("unknown arch:" + target_arch + " for locale_arch_options")
- localedef_opts += " --force --no-archive --prefix=%s \
+ localedef_opts += " --force --no-hard-links --no-archive --prefix=%s \
--inputfile=%s/%s/i18n/locales/%s --charmap=%s %s/%s" \
% (treedir, treedir, datadir, locale, encoding, outputpath, name)
@@ -271,14 +277,14 @@ python package_do_split_gconvs () {
(path, i18npath, gconvpath, localedef_opts)
else: # earlier slower qemu way
qemu = qemu_target_binary(d)
- localedef_opts = "--force --no-archive --prefix=%s \
+ localedef_opts = "--force --no-hard-links --no-archive --prefix=%s \
--inputfile=%s/i18n/locales/%s --charmap=%s %s" \
% (treedir, datadir, locale, encoding, name)
qemu_options = d.getVar('QEMU_OPTIONS')
cmd = "PSEUDO_RELOADED=YES PATH=\"%s\" I18NPATH=\"%s\" %s -L %s \
- -E LD_LIBRARY_PATH=%s %s %s/bin/localedef %s" % \
+ -E LD_LIBRARY_PATH=%s %s %s${base_bindir}/localedef %s" % \
(path, i18npath, qemu, treedir, ldlibdir, qemu_options, treedir, localedef_opts)
commands["%s/%s" % (outputpath, name)] = cmd
@@ -287,13 +293,13 @@ python package_do_split_gconvs () {
def output_locale(name, locale, encoding):
pkgname = d.getVar('MLPREFIX', False) + 'locale-base-' + legitimize_package_name(name)
- d.setVar('ALLOW_EMPTY_%s' % pkgname, '1')
+ d.setVar('ALLOW_EMPTY:%s' % pkgname, '1')
d.setVar('PACKAGES', '%s %s' % (pkgname, d.getVar('PACKAGES')))
rprovides = ' %svirtual-locale-%s' % (mlprefix, legitimize_package_name(name))
m = re.match(r"(.*)_(.*)", name)
if m:
rprovides += ' %svirtual-locale-%s' % (mlprefix, m.group(1))
- d.setVar('RPROVIDES_%s' % pkgname, rprovides)
+ d.setVar('RPROVIDES:%s' % pkgname, rprovides)
if use_bin == "compile":
output_locale_binary_rdepends(name, pkgname, locale, encoding)
@@ -337,17 +343,19 @@ python package_do_split_gconvs () {
def metapkg_hook(file, pkg, pattern, format, basename):
name = basename.split('/', 1)[0]
metapkg = legitimize_package_name('%s-binary-localedata-%s' % (mlprefix+bpn, name))
- d.appendVar('RDEPENDS_%s' % metapkg, ' ' + pkg)
+ d.appendVar('RDEPENDS:%s' % metapkg, ' ' + pkg)
if use_bin == "compile":
makefile = oe.path.join(d.getVar("WORKDIR"), "locale-tree", "Makefile")
- m = open(makefile, "w")
- m.write("all: %s\n\n" % " ".join(commands.keys()))
- for cmd in commands:
- m.write(cmd + ":\n")
- m.write("\t" + commands[cmd] + "\n\n")
- m.close()
+ with open(makefile, "w") as m:
+ m.write("all: %s\n\n" % " ".join(commands.keys()))
+ total = len(commands)
+ for i, (maketarget, makerecipe) in enumerate(commands.items()):
+ m.write(maketarget + ":\n")
+ m.write("\t@echo 'Progress %d/%d'\n" % (i, total))
+ m.write("\t" + makerecipe + "\n\n")
d.setVar("EXTRA_OEMAKE", "-C %s ${PARALLEL_MAKE}" % (os.path.dirname(makefile)))
+ d.setVarFlag("oe_runmake", "progress", r"outof:Progress\s(\d+)/(\d+)")
bb.note("Executing binary locale generation makefile")
bb.build.exec_func("oe_runmake", d)
bb.note("collecting binary locales from locale tree")
@@ -371,6 +379,6 @@ python package_do_split_gconvs () {
# We want to do this indirection so that we can safely 'return'
# from the called function even though we're prepending
-python populate_packages_prepend () {
+python populate_packages:prepend () {
bb.build.exec_func('package_do_split_gconvs', d)
}
diff --git a/meta/classes/license.bbclass b/meta/classes/license.bbclass
index ed91a4b4db..813e1ea4f5 100644
--- a/meta/classes/license.bbclass
+++ b/meta/classes/license.bbclass
@@ -6,7 +6,7 @@
LICENSE_DIRECTORY ??= "${DEPLOY_DIR}/licenses"
LICSSTATEDIR = "${WORKDIR}/license-destdir/"
-# Create extra package with license texts and add it to RRECOMMENDS_${PN}
+# Create extra package with license texts and add it to RRECOMMENDS:${PN}
LICENSE_CREATE_PACKAGE[type] = "boolean"
LICENSE_CREATE_PACKAGE ??= "0"
LICENSE_PACKAGE_SUFFIX ??= "-lic"
@@ -29,10 +29,12 @@ python do_populate_lic() {
with open(os.path.join(destdir, "recipeinfo"), "w") as f:
for key in sorted(info.keys()):
f.write("%s: %s\n" % (key, info[key]))
+ oe.qa.exit_if_errors(d)
}
-# it would be better to copy them in do_install_append, but find_license_filesa is python
-python perform_packagecopy_prepend () {
+PSEUDO_IGNORE_PATHS .= ",${@','.join(((d.getVar('COMMON_LICENSE_DIR') or '') + ' ' + (d.getVar('LICENSE_PATH') or '') + ' ' + d.getVar('COREBASE') + '/meta/COPYING').split())}"
+# it would be better to copy them in do_install:append, but find_license_filesa is python
+python perform_packagecopy:prepend () {
enabled = oe.data.typed_value('LICENSE_CREATE_PACKAGE', d)
if d.getVar('CLASSOVERRIDE') == 'class-target' and enabled:
lic_files_paths = find_license_files(d)
@@ -61,15 +63,7 @@ def add_package_and_files(d):
else:
# first in PACKAGES to be sure that nothing else gets LICENSE_FILES_DIRECTORY
d.setVar('PACKAGES', "%s %s" % (pn_lic, packages))
- d.setVar('FILES_' + pn_lic, files)
- for pn in packages.split():
- if pn == pn_lic:
- continue
- rrecommends_pn = d.getVar('RRECOMMENDS_' + pn)
- if rrecommends_pn:
- d.setVar('RRECOMMENDS_' + pn, "%s %s" % (pn_lic, rrecommends_pn))
- else:
- d.setVar('RRECOMMENDS_' + pn, "%s" % (pn_lic))
+ d.setVar('FILES:' + pn_lic, files)
def copy_license_files(lic_files_paths, destdir):
import shutil
@@ -152,6 +146,10 @@ def find_license_files(d):
find_license(node.s.replace("+", "").replace("*", ""))
self.generic_visit(node)
+ def visit_Constant(self, node):
+ find_license(node.value.replace("+", "").replace("*", ""))
+ self.generic_visit(node)
+
def find_license(license_type):
try:
bb.utils.mkdirhier(gen_lic_dest)
@@ -185,7 +183,8 @@ def find_license_files(d):
# The user may attempt to use NO_GENERIC_LICENSE for a generic license which doesn't make sense
# and should not be allowed, warn the user in this case.
if d.getVarFlag('NO_GENERIC_LICENSE', license_type):
- bb.warn("%s: %s is a generic license, please don't use NO_GENERIC_LICENSE for it." % (pn, license_type))
+ oe.qa.handle_error("license-no-generic",
+ "%s: %s is a generic license, please don't use NO_GENERIC_LICENSE for it." % (pn, license_type), d)
elif non_generic_lic and non_generic_lic in lic_chksums:
# if NO_GENERIC_LICENSE is set, we copy the license files from the fetched source
@@ -194,10 +193,11 @@ def find_license_files(d):
os.path.join(srcdir, non_generic_lic), None, None))
non_generic_lics[non_generic_lic] = license_type
else:
- # Add explicity avoid of CLOSED license because this isn't generic
+ # Explicitly avoid the CLOSED license because this isn't generic
if license_type != 'CLOSED':
# And here is where we warn people that their licenses are lousy
- bb.warn("%s: No generic license file exists for: %s in any provider" % (pn, license_type))
+ oe.qa.handle_error("license-exists",
+ "%s: No generic license file exists for: %s in any provider" % (pn, license_type), d)
pass
if not generic_directory:
@@ -222,7 +222,8 @@ def find_license_files(d):
except oe.license.InvalidLicense as exc:
bb.fatal('%s: %s' % (d.getVar('PF'), exc))
except SyntaxError:
- bb.warn("%s: Failed to parse it's LICENSE field." % (d.getVar('PF')))
+ oe.qa.handle_error("license-syntax",
+ "%s: Failed to parse it's LICENSE field." % (d.getVar('PF')), d)
# Add files from LIC_FILES_CHKSUM to list of license files
lic_chksum_paths = defaultdict(OrderedDict)
for path, data in sorted(lic_chksums.items()):
@@ -251,42 +252,76 @@ def return_spdx(d, license):
def canonical_license(d, license):
"""
Return the canonical (SPDX) form of the license if available (so GPLv3
- becomes GPL-3.0), for the license named 'X+', return canonical form of
- 'X' if availabel and the tailing '+' (so GPLv3+ becomes GPL-3.0+),
- or the passed license if there is no canonical form.
+ becomes GPL-3.0-only) or the passed license if there is no canonical form.
"""
- lic = d.getVarFlag('SPDXLICENSEMAP', license) or ""
- if not lic and license.endswith('+'):
- lic = d.getVarFlag('SPDXLICENSEMAP', license.rstrip('+'))
- if lic:
- lic += '+'
- return lic or license
+ return d.getVarFlag('SPDXLICENSEMAP', license) or license
-def expand_wildcard_licenses(d, wildcard_licenses):
+def available_licenses(d):
"""
- Return actual spdx format license names if wildcard used. We expand
- wildcards from SPDXLICENSEMAP flags and SRC_DISTRIBUTE_LICENSES values.
+ Return the available licenses by searching the directories specified by
+ COMMON_LICENSE_DIR and LICENSE_PATH.
"""
- import fnmatch
- licenses = []
- spdxmapkeys = d.getVarFlags('SPDXLICENSEMAP').keys()
- for wld_lic in wildcard_licenses:
- spdxflags = fnmatch.filter(spdxmapkeys, wld_lic)
- licenses += [d.getVarFlag('SPDXLICENSEMAP', flag) for flag in spdxflags]
+ lic_dirs = ((d.getVar('COMMON_LICENSE_DIR') or '') + ' ' +
+ (d.getVar('LICENSE_PATH') or '')).split()
- spdx_lics = (d.getVar('SRC_DISTRIBUTE_LICENSES', False) or '').split()
- for wld_lic in wildcard_licenses:
- licenses += fnmatch.filter(spdx_lics, wld_lic)
+ licenses = []
+ for lic_dir in lic_dirs:
+ licenses += os.listdir(lic_dir)
- licenses = list(set(licenses))
+ licenses = sorted(licenses)
return licenses
+def expand_wildcard_licenses(d, wildcard_licenses):
+ """
+ There are some common wildcard values users may want to use. Support them
+ here.
+ """
+ licenses = set(wildcard_licenses)
+ mapping = {
+ "AGPL-3.0*" : ["AGPL-3.0-only", "AGPL-3.0-or-later"],
+ "GPL-3.0*" : ["GPL-3.0-only", "GPL-3.0-or-later"],
+ "LGPL-3.0*" : ["LGPL-3.0-only", "LGPL-3.0-or-later"],
+ }
+ for k in mapping:
+ if k in wildcard_licenses:
+ licenses.remove(k)
+ for item in mapping[k]:
+ licenses.add(item)
+
+ for l in licenses:
+ if l in oe.license.obsolete_license_list():
+ bb.fatal("Error, %s is an obsolete license, please use an SPDX reference in INCOMPATIBLE_LICENSE" % l)
+ if "*" in l:
+ bb.fatal("Error, %s is an invalid license wildcard entry" % l)
+
+ return list(licenses)
+
def incompatible_license_contains(license, truevalue, falsevalue, d):
license = canonical_license(d, license)
bad_licenses = (d.getVar('INCOMPATIBLE_LICENSE') or "").split()
bad_licenses = expand_wildcard_licenses(d, bad_licenses)
return truevalue if license in bad_licenses else falsevalue
+def incompatible_pkg_license(d, dont_want_licenses, license):
+ # Handles an "or" or two license sets provided by
+ # flattened_licenses(), pick one that works if possible.
+ def choose_lic_set(a, b):
+ return a if all(oe.license.license_ok(canonical_license(d, lic),
+ dont_want_licenses) for lic in a) else b
+
+ try:
+ licenses = oe.license.flattened_licenses(license, choose_lic_set)
+ except oe.license.LicenseError as exc:
+ bb.fatal('%s: %s' % (d.getVar('P'), exc))
+
+ incompatible_lic = []
+ for l in licenses:
+ license = canonical_license(d, l)
+ if not oe.license.license_ok(license, dont_want_licenses):
+ incompatible_lic.append(license)
+
+ return sorted(incompatible_lic)
+
def incompatible_license(d, dont_want_licenses, package=None):
"""
This function checks if a recipe has only incompatible licenses. It also
@@ -294,50 +329,40 @@ def incompatible_license(d, dont_want_licenses, package=None):
as canonical (SPDX) names.
"""
import oe.license
- license = d.getVar("LICENSE_%s" % package) if package else None
+ license = d.getVar("LICENSE:%s" % package) if package else None
if not license:
license = d.getVar('LICENSE')
- # Handles an "or" or two license sets provided by
- # flattened_licenses(), pick one that works if possible.
- def choose_lic_set(a, b):
- return a if all(oe.license.license_ok(canonical_license(d, lic),
- dont_want_licenses) for lic in a) else b
-
- try:
- licenses = oe.license.flattened_licenses(license, choose_lic_set)
- except oe.license.LicenseError as exc:
- bb.fatal('%s: %s' % (d.getVar('P'), exc))
- return any(not oe.license.license_ok(canonical_license(d, l), \
- dont_want_licenses) for l in licenses)
+ return incompatible_pkg_license(d, dont_want_licenses, license)
def check_license_flags(d):
"""
This function checks if a recipe has any LICENSE_FLAGS that
- aren't whitelisted.
+ aren't acceptable.
- If it does, it returns the all LICENSE_FLAGS missing from the whitelist, or
- all of the LICENSE_FLAGS if there is no whitelist.
+ If it does, it returns the all LICENSE_FLAGS missing from the list
+ of acceptable license flags, or all of the LICENSE_FLAGS if there
+ is no list of acceptable flags.
- If everything is is properly whitelisted, it returns None.
+ If everything is is acceptable, it returns None.
"""
- def license_flag_matches(flag, whitelist, pn):
+ def license_flag_matches(flag, acceptlist, pn):
"""
- Return True if flag matches something in whitelist, None if not.
+ Return True if flag matches something in acceptlist, None if not.
- Before we test a flag against the whitelist, we append _${PN}
+ Before we test a flag against the acceptlist, we append _${PN}
to it. We then try to match that string against the
- whitelist. This covers the normal case, where we expect
+ acceptlist. This covers the normal case, where we expect
LICENSE_FLAGS to be a simple string like 'commercial', which
- the user typically matches exactly in the whitelist by
+ the user typically matches exactly in the acceptlist by
explicitly appending the package name e.g 'commercial_foo'.
If we fail the match however, we then split the flag across
'_' and append each fragment and test until we either match or
run out of fragments.
"""
flag_pn = ("%s_%s" % (flag, pn))
- for candidate in whitelist:
+ for candidate in acceptlist:
if flag_pn == candidate:
return True
@@ -348,27 +373,27 @@ def check_license_flags(d):
if flag_cur:
flag_cur += "_"
flag_cur += flagment
- for candidate in whitelist:
+ for candidate in acceptlist:
if flag_cur == candidate:
return True
return False
- def all_license_flags_match(license_flags, whitelist):
+ def all_license_flags_match(license_flags, acceptlist):
""" Return all unmatched flags, None if all flags match """
pn = d.getVar('PN')
- split_whitelist = whitelist.split()
+ split_acceptlist = acceptlist.split()
flags = []
for flag in license_flags.split():
- if not license_flag_matches(flag, split_whitelist, pn):
+ if not license_flag_matches(flag, split_acceptlist, pn):
flags.append(flag)
return flags if flags else None
license_flags = d.getVar('LICENSE_FLAGS')
if license_flags:
- whitelist = d.getVar('LICENSE_FLAGS_WHITELIST')
- if not whitelist:
+ acceptlist = d.getVar('LICENSE_FLAGS_ACCEPTED')
+ if not acceptlist:
return license_flags.split()
- unmatched_flags = all_license_flags_match(license_flags, whitelist)
+ unmatched_flags = all_license_flags_match(license_flags, acceptlist)
if unmatched_flags:
return unmatched_flags
return None
@@ -387,20 +412,22 @@ def check_license_format(d):
for pos, element in enumerate(elements):
if license_pattern.match(element):
if pos > 0 and license_pattern.match(elements[pos - 1]):
- bb.warn('%s: LICENSE value "%s" has an invalid format - license names ' \
+ oe.qa.handle_error('license-format',
+ '%s: LICENSE value "%s" has an invalid format - license names ' \
'must be separated by the following characters to indicate ' \
'the license selection: %s' %
- (pn, licenses, license_operator_chars))
+ (pn, licenses, license_operator_chars), d)
elif not license_operator.match(element):
- bb.warn('%s: LICENSE value "%s" has an invalid separator "%s" that is not ' \
+ oe.qa.handle_error('license-format',
+ '%s: LICENSE value "%s" has an invalid separator "%s" that is not ' \
'in the valid list of separators (%s)' %
- (pn, licenses, element, license_operator_chars))
+ (pn, licenses, element, license_operator_chars), d)
SSTATETASKS += "do_populate_lic"
do_populate_lic[sstate-inputdirs] = "${LICSSTATEDIR}"
do_populate_lic[sstate-outputdirs] = "${LICENSE_DIRECTORY}/"
-IMAGE_CLASSES_append = " license_image"
+IMAGE_CLASSES:append = " license_image"
python do_populate_lic_setscene () {
sstate_setscene(d)
diff --git a/meta/classes/license_image.bbclass b/meta/classes/license_image.bbclass
index 6ac63e0192..0a5ea0a2fb 100644
--- a/meta/classes/license_image.bbclass
+++ b/meta/classes/license_image.bbclass
@@ -1,3 +1,15 @@
+ROOTFS_LICENSE_DIR = "${IMAGE_ROOTFS}/usr/share/common-licenses"
+
+# This requires LICENSE_CREATE_PACKAGE=1 to work too
+COMPLEMENTARY_GLOB[lic-pkgs] = "*-lic"
+
+python() {
+ if not oe.data.typed_value('LICENSE_CREATE_PACKAGE', d):
+ features = set(oe.data.typed_value('IMAGE_FEATURES', d))
+ if 'lic-pkgs' in features:
+ bb.error("'lic-pkgs' in IMAGE_FEATURES but LICENSE_CREATE_PACKAGE not enabled to generate -lic packages")
+}
+
python write_package_manifest() {
# Get list of installed packages
license_image_dir = d.expand('${LICENSE_DIRECTORY}/${IMAGE_NAME}')
@@ -27,7 +39,7 @@ python license_create_manifest() {
pkg_dic[pkg_name] = oe.packagedata.read_pkgdatafile(pkg_info)
if not "LICENSE" in pkg_dic[pkg_name].keys():
- pkg_lic_name = "LICENSE_" + pkg_name
+ pkg_lic_name = "LICENSE:" + pkg_name
pkg_dic[pkg_name]["LICENSE"] = pkg_dic[pkg_name][pkg_lic_name]
rootfs_license_manifest = os.path.join(d.getVar('LICENSE_DIRECTORY'),
@@ -37,24 +49,28 @@ python license_create_manifest() {
def write_license_files(d, license_manifest, pkg_dic, rootfs=True):
import re
+ import stat
bad_licenses = (d.getVar("INCOMPATIBLE_LICENSE") or "").split()
- bad_licenses = map(lambda l: canonical_license(d, l), bad_licenses)
bad_licenses = expand_wildcard_licenses(d, bad_licenses)
+ exceptions = (d.getVar("INCOMPATIBLE_LICENSE_EXCEPTIONS") or "").split()
with open(license_manifest, "w") as license_file:
for pkg in sorted(pkg_dic):
- if bad_licenses:
- try:
- (pkg_dic[pkg]["LICENSE"], pkg_dic[pkg]["LICENSES"]) = \
- oe.license.manifest_licenses(pkg_dic[pkg]["LICENSE"],
- bad_licenses, canonical_license, d)
- except oe.license.LicenseError as exc:
- bb.fatal('%s: %s' % (d.getVar('P'), exc))
+ remaining_bad_licenses = oe.license.apply_pkg_license_exception(pkg, bad_licenses, exceptions)
+ incompatible_licenses = incompatible_pkg_license(d, remaining_bad_licenses, pkg_dic[pkg]["LICENSE"])
+ if incompatible_licenses:
+ bb.fatal("Package %s cannot be installed into the image because it has incompatible license(s): %s" %(pkg, ' '.join(incompatible_licenses)))
else:
- pkg_dic[pkg]["LICENSES"] = re.sub(r'[|&()*]', ' ', pkg_dic[pkg]["LICENSE"])
- pkg_dic[pkg]["LICENSES"] = re.sub(r' *', ' ', pkg_dic[pkg]["LICENSES"])
- pkg_dic[pkg]["LICENSES"] = pkg_dic[pkg]["LICENSES"].split()
+ incompatible_licenses = incompatible_pkg_license(d, bad_licenses, pkg_dic[pkg]["LICENSE"])
+ if incompatible_licenses:
+ oe.qa.handle_error('license-incompatible', "Including %s with incompatible license(s) %s into the image, because it has been allowed by exception list." %(pkg, ' '.join(incompatible_licenses)), d)
+ try:
+ (pkg_dic[pkg]["LICENSE"], pkg_dic[pkg]["LICENSES"]) = \
+ oe.license.manifest_licenses(pkg_dic[pkg]["LICENSE"],
+ remaining_bad_licenses, canonical_license, d)
+ except oe.license.LicenseError as exc:
+ bb.fatal('%s: %s' % (d.getVar('P'), exc))
if not "IMAGE_MANIFEST" in pkg_dic[pkg]:
# Rootfs manifest
@@ -66,7 +82,7 @@ def write_license_files(d, license_manifest, pkg_dic, rootfs=True):
# If the package doesn't contain any file, that is, its size is 0, the license
# isn't relevant as far as the final image is concerned. So doing license check
# doesn't make much sense, skip it.
- if pkg_dic[pkg]["PKGSIZE_%s" % pkg] == "0":
+ if pkg_dic[pkg]["PKGSIZE:%s" % pkg] == "0":
continue
else:
# Image manifest
@@ -84,10 +100,10 @@ def write_license_files(d, license_manifest, pkg_dic, rootfs=True):
continue
if not os.path.exists(lic_file):
- bb.warn("The license listed %s was not in the "\
- "licenses collected for recipe %s"
- % (lic, pkg_dic[pkg]["PN"]))
-
+ oe.qa.handle_error('license-file-missing',
+ "The license listed %s was not in the "\
+ "licenses collected for recipe %s"
+ % (lic, pkg_dic[pkg]["PN"]), d)
# Two options here:
# - Just copy the manifest
# - Copy the manifest and the license directories
@@ -95,13 +111,12 @@ def write_license_files(d, license_manifest, pkg_dic, rootfs=True):
copy_lic_manifest = d.getVar('COPY_LIC_MANIFEST')
copy_lic_dirs = d.getVar('COPY_LIC_DIRS')
if rootfs and copy_lic_manifest == "1":
- rootfs_license_dir = os.path.join(d.getVar('IMAGE_ROOTFS'),
- 'usr', 'share', 'common-licenses')
+ rootfs_license_dir = d.getVar('ROOTFS_LICENSE_DIR')
bb.utils.mkdirhier(rootfs_license_dir)
rootfs_license_manifest = os.path.join(rootfs_license_dir,
os.path.split(license_manifest)[1])
if not os.path.exists(rootfs_license_manifest):
- os.link(license_manifest, rootfs_license_manifest)
+ oe.path.copyhardlink(license_manifest, rootfs_license_manifest)
if copy_lic_dirs == "1":
for pkg in sorted(pkg_dic):
@@ -115,7 +130,6 @@ def write_license_files(d, license_manifest, pkg_dic, rootfs=True):
licenses = os.listdir(pkg_license_dir)
for lic in licenses:
- rootfs_license = os.path.join(rootfs_license_dir, lic)
pkg_license = os.path.join(pkg_license_dir, lic)
pkg_rootfs_license = os.path.join(pkg_rootfs_license_dir, lic)
@@ -134,24 +148,32 @@ def write_license_files(d, license_manifest, pkg_dic, rootfs=True):
bad_licenses) == False:
continue
+ # Make sure we use only canonical name for the license file
+ generic_lic_file = "generic_%s" % generic_lic
+ rootfs_license = os.path.join(rootfs_license_dir, generic_lic_file)
if not os.path.exists(rootfs_license):
- os.link(pkg_license, rootfs_license)
+ oe.path.copyhardlink(pkg_license, rootfs_license)
if not os.path.exists(pkg_rootfs_license):
- os.symlink(os.path.join('..', lic), pkg_rootfs_license)
+ os.symlink(os.path.join('..', generic_lic_file), pkg_rootfs_license)
else:
if (oe.license.license_ok(canonical_license(d,
lic), bad_licenses) == False or
os.path.exists(pkg_rootfs_license)):
continue
- os.link(pkg_license, pkg_rootfs_license)
- # Fixup file ownership
+ oe.path.copyhardlink(pkg_license, pkg_rootfs_license)
+ # Fixup file ownership and permissions
for walkroot, dirs, files in os.walk(rootfs_license_dir):
for f in files:
- os.lchown(os.path.join(walkroot, f), 0, 0)
+ p = os.path.join(walkroot, f)
+ os.lchown(p, 0, 0)
+ if not os.path.islink(p):
+ os.chmod(p, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
for dir in dirs:
- os.lchown(os.path.join(walkroot, dir), 0, 0)
+ p = os.path.join(walkroot, dir)
+ os.lchown(p, 0, 0)
+ os.chmod(p, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
@@ -185,6 +207,18 @@ def license_deployed_manifest(d):
image_license_manifest = os.path.join(lic_manifest_dir, 'image_license.manifest')
write_license_files(d, image_license_manifest, man_dic, rootfs=False)
+ link_name = d.getVar('IMAGE_LINK_NAME')
+ if link_name:
+ lic_manifest_symlink_dir = os.path.join(d.getVar('LICENSE_DIRECTORY'),
+ link_name)
+ # remove old symlink
+ if os.path.islink(lic_manifest_symlink_dir):
+ os.unlink(lic_manifest_symlink_dir)
+
+ # create the image dir symlink
+ if lic_manifest_dir != lic_manifest_symlink_dir:
+ os.symlink(lic_manifest_dir, lic_manifest_symlink_dir)
+
def get_deployed_dependencies(d):
"""
Get all the deployed dependencies of an image
@@ -192,14 +226,11 @@ def get_deployed_dependencies(d):
deploy = {}
# Get all the dependencies for the current task (rootfs).
- # Also get EXTRA_IMAGEDEPENDS because the bootloader is
- # usually in this var and not listed in rootfs.
- # At last, get the dependencies from boot classes because
- # it might contain the bootloader.
taskdata = d.getVar("BB_TASKDEPDATA", False)
+ pn = d.getVar("PN", True)
depends = list(set([dep[0] for dep
in list(taskdata.values())
- if not dep[0].endswith("-native")]))
+ if not dep[0].endswith("-native") and not dep[0] == pn]))
# To verify what was deployed it checks the rootfs dependencies against
# the SSTATE_MANIFESTS for "deploy" task.
@@ -233,13 +264,24 @@ def get_deployed_files(man_file):
dep_files.append(os.path.basename(f))
return dep_files
-ROOTFS_POSTPROCESS_COMMAND_prepend = "write_package_manifest; license_create_manifest; "
+ROOTFS_POSTPROCESS_COMMAND:prepend = "write_package_manifest; license_create_manifest; "
do_rootfs[recrdeptask] += "do_populate_lic"
python do_populate_lic_deploy() {
license_deployed_manifest(d)
+ oe.qa.exit_if_errors(d)
}
addtask populate_lic_deploy before do_build after do_image_complete
do_populate_lic_deploy[recrdeptask] += "do_populate_lic do_deploy"
+python license_qa_dead_symlink() {
+ import os
+
+ for root, dirs, files in os.walk(d.getVar('ROOTFS_LICENSE_DIR')):
+ for file in files:
+ full_path = root + "/" + file
+ if os.path.islink(full_path) and not os.path.exists(full_path):
+ bb.error("broken symlink: " + full_path)
+}
+IMAGE_QA_COMMANDS += "license_qa_dead_symlink"
diff --git a/meta/classes/linux-dummy.bbclass b/meta/classes/linux-dummy.bbclass
new file mode 100644
index 0000000000..9a06a509dd
--- /dev/null
+++ b/meta/classes/linux-dummy.bbclass
@@ -0,0 +1,26 @@
+
+python __anonymous () {
+ if d.getVar('PREFERRED_PROVIDER_virtual/kernel') == 'linux-dummy':
+ # copy part codes from kernel.bbclass
+ kname = d.getVar('KERNEL_PACKAGE_NAME') or "kernel"
+
+ # set an empty package of kernel-devicetree
+ d.appendVar('PACKAGES', ' %s-devicetree' % kname)
+ d.setVar('ALLOW_EMPTY:%s-devicetree' % kname, '1')
+
+ # Merge KERNEL_IMAGETYPE and KERNEL_ALT_IMAGETYPE into KERNEL_IMAGETYPES
+ type = d.getVar('KERNEL_IMAGETYPE') or ""
+ alttype = d.getVar('KERNEL_ALT_IMAGETYPE') or ""
+ types = d.getVar('KERNEL_IMAGETYPES') or ""
+ if type not in types.split():
+ types = (type + ' ' + types).strip()
+ if alttype not in types.split():
+ types = (alttype + ' ' + types).strip()
+
+ # set empty packages of kernel-image-*
+ for type in types.split():
+ typelower = type.lower()
+ d.appendVar('PACKAGES', ' %s-image-%s' % (kname, typelower))
+ d.setVar('ALLOW_EMPTY:%s-image-%s' % (kname, typelower), '1')
+}
+
diff --git a/meta/classes/linuxloader.bbclass b/meta/classes/linuxloader.bbclass
index b4c413494a..4447c8847c 100644
--- a/meta/classes/linuxloader.bbclass
+++ b/meta/classes/linuxloader.bbclass
@@ -1,30 +1,38 @@
-def get_musl_loader(d):
+def get_musl_loader_arch(d):
import re
- dynamic_loader = None
+ ldso_arch = "NotSupported"
targetarch = d.getVar("TARGET_ARCH")
if targetarch.startswith("microblaze"):
- dynamic_loader = "${base_libdir}/ld-musl-microblaze${@bb.utils.contains('TUNE_FEATURES', 'bigendian', '', 'el' ,d)}.so.1"
+ ldso_arch = "microblaze${@bb.utils.contains('TUNE_FEATURES', 'bigendian', '', 'el', d)}"
elif targetarch.startswith("mips"):
- dynamic_loader = "${base_libdir}/ld-musl-mips${ABIEXTENSION}${MIPSPKGSFX_BYTE}${MIPSPKGSFX_R6}${MIPSPKGSFX_ENDIAN}${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}.so.1"
+ ldso_arch = "mips${ABIEXTENSION}${MIPSPKGSFX_BYTE}${MIPSPKGSFX_R6}${MIPSPKGSFX_ENDIAN}${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}"
elif targetarch == "powerpc":
- dynamic_loader = "${base_libdir}/ld-musl-powerpc${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}.so.1"
- elif targetarch == "powerpc64":
- dynamic_loader = "${base_libdir}/ld-musl-powerpc64.so.1"
+ ldso_arch = "powerpc${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}"
+ elif targetarch.startswith("powerpc64"):
+ ldso_arch = "powerpc64${@bb.utils.contains('TUNE_FEATURES', 'bigendian', '', 'le', d)}"
elif targetarch == "x86_64":
- dynamic_loader = "${base_libdir}/ld-musl-x86_64.so.1"
+ ldso_arch = "x86_64"
elif re.search("i.86", targetarch):
- dynamic_loader = "${base_libdir}/ld-musl-i386.so.1"
+ ldso_arch = "i386"
elif targetarch.startswith("arm"):
- dynamic_loader = "${base_libdir}/ld-musl-arm${ARMPKGSFX_ENDIAN}${ARMPKGSFX_EABI}.so.1"
+ ldso_arch = "arm${ARMPKGSFX_ENDIAN}${ARMPKGSFX_EABI}"
elif targetarch.startswith("aarch64"):
- dynamic_loader = "${base_libdir}/ld-musl-aarch64${ARMPKGSFX_ENDIAN_64}.so.1"
- return dynamic_loader
+ ldso_arch = "aarch64${ARMPKGSFX_ENDIAN_64}"
+ elif targetarch.startswith("riscv64"):
+ ldso_arch = "riscv64${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}"
+ elif targetarch.startswith("riscv32"):
+ ldso_arch = "riscv32${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}"
+ return ldso_arch
+
+def get_musl_loader(d):
+ import re
+ return "/lib/ld-musl-" + get_musl_loader_arch(d) + ".so.1"
def get_glibc_loader(d):
import re
- dynamic_loader = None
+ dynamic_loader = "NotSupported"
targetarch = d.getVar("TARGET_ARCH")
if targetarch in ["powerpc", "microblaze"]:
dynamic_loader = "${base_libdir}/ld.so.1"
@@ -32,6 +40,8 @@ def get_glibc_loader(d):
dynamic_loader = "${base_libdir}/ld-linux-mipsn8.so.1"
elif targetarch.startswith("mips"):
dynamic_loader = "${base_libdir}/ld.so.1"
+ elif targetarch == "powerpc64le":
+ dynamic_loader = "${base_libdir}/ld64.so.2"
elif targetarch == "powerpc64":
dynamic_loader = "${base_libdir}/ld64.so.1"
elif targetarch == "x86_64":
@@ -39,16 +49,20 @@ def get_glibc_loader(d):
elif re.search("i.86", targetarch):
dynamic_loader = "${base_libdir}/ld-linux.so.2"
elif targetarch == "arm":
- dynamic_loader = "${base_libdir}/ld-linux.so.3"
+ dynamic_loader = "${base_libdir}/ld-linux${@['-armhf', ''][d.getVar('TARGET_FPU') == 'soft']}.so.3"
elif targetarch.startswith("aarch64"):
dynamic_loader = "${base_libdir}/ld-linux-aarch64${ARMPKGSFX_ENDIAN_64}.so.1"
+ elif targetarch.startswith("riscv64"):
+ dynamic_loader = "${base_libdir}/ld-linux-riscv64-lp64${@['d', ''][d.getVar('TARGET_FPU') == 'soft']}.so.1"
+ elif targetarch.startswith("riscv32"):
+ dynamic_loader = "${base_libdir}/ld-linux-riscv32-ilp32${@['d', ''][d.getVar('TARGET_FPU') == 'soft']}.so.1"
return dynamic_loader
def get_linuxloader(d):
overrides = d.getVar("OVERRIDES").split(":")
if "libc-baremetal" in overrides:
- return None
+ return "NotSupported"
if "libc-musl" in overrides:
dynamic_loader = get_musl_loader(d)
@@ -58,4 +72,5 @@ def get_linuxloader(d):
get_linuxloader[vardepvalue] = "${@get_linuxloader(d)}"
get_musl_loader[vardepvalue] = "${@get_musl_loader(d)}"
+get_musl_loader_arch[vardepvalue] = "${@get_musl_loader_arch(d)}"
get_glibc_loader[vardepvalue] = "${@get_glibc_loader(d)}"
diff --git a/meta/classes/live-vm-common.bbclass b/meta/classes/live-vm-common.bbclass
index 68105d9b84..74e7074a53 100644
--- a/meta/classes/live-vm-common.bbclass
+++ b/meta/classes/live-vm-common.bbclass
@@ -29,6 +29,39 @@ def pcbios(d):
PCBIOS = "${@pcbios(d)}"
PCBIOS_CLASS = "${@['','syslinux'][d.getVar('PCBIOS') == '1']}"
+# efi_populate_common DEST BOOTLOADER
+efi_populate_common() {
+ # DEST must be the root of the image so that EFIDIR is not
+ # nested under a top level directory.
+ DEST=$1
+
+ install -d ${DEST}${EFIDIR}
+
+ install -m 0644 ${DEPLOY_DIR_IMAGE}/$2-${EFI_BOOT_IMAGE} ${DEST}${EFIDIR}/${EFI_BOOT_IMAGE}
+ EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
+ printf 'fs0:%s\%s\n' "$EFIPATH" "${EFI_BOOT_IMAGE}" >${DEST}/startup.nsh
+}
+
+efi_iso_populate() {
+ iso_dir=$1
+ efi_populate $iso_dir
+ # Build a EFI directory to create efi.img
+ mkdir -p ${EFIIMGDIR}/${EFIDIR}
+ cp $iso_dir/${EFIDIR}/* ${EFIIMGDIR}${EFIDIR}
+ cp $iso_dir/${KERNEL_IMAGETYPE} ${EFIIMGDIR}
+
+ EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
+ printf 'fs0:%s\%s\n' "$EFIPATH" "${EFI_BOOT_IMAGE}" >${EFIIMGDIR}/startup.nsh
+
+ if [ -f "$iso_dir/initrd" ] ; then
+ cp $iso_dir/initrd ${EFIIMGDIR}
+ fi
+}
+
+efi_hddimg_populate() {
+ efi_populate $1
+}
+
inherit ${EFI_CLASS}
inherit ${PCBIOS_CLASS}
diff --git a/meta/classes/manpages.bbclass b/meta/classes/manpages.bbclass
index 50c254763e..5e09c77fe6 100644
--- a/meta/classes/manpages.bbclass
+++ b/meta/classes/manpages.bbclass
@@ -2,7 +2,7 @@
# depending on whether 'api-documentation' is in DISTRO_FEATURES. Such building
# tends to pull in the entire XML stack and other tools, so it's not enabled
# by default.
-PACKAGECONFIG_append_class-target = " ${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'manpages', '', d)}"
+PACKAGECONFIG:append:class-target = " ${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'manpages', '', d)}"
inherit qemu
@@ -10,16 +10,24 @@ inherit qemu
MAN_PKG ?= "${PN}-doc"
# only add man-db to RDEPENDS when manual files are built and installed
-RDEPENDS_${MAN_PKG} += "${@bb.utils.contains('PACKAGECONFIG', 'manpages', 'man-db', '', d)}"
+RDEPENDS:${MAN_PKG} += "${@bb.utils.contains('PACKAGECONFIG', 'manpages', 'man-db', '', d)}"
-pkg_postinst_append_${MAN_PKG} () {
+pkg_postinst:${MAN_PKG}:append () {
# only update manual page index caches when manual files are built and installed
if ${@bb.utils.contains('PACKAGECONFIG', 'manpages', 'true', 'false', d)}; then
if test -n "$D"; then
- if ${@bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'true','false', d)}; then
+ if ${@bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'true', 'false', d)}; then
sed "s:\(\s\)/:\1$D/:g" $D${sysconfdir}/man_db.conf | ${@qemu_run_binary(d, '$D', '${bindir}/mandb')} -C - -u -q $D${mandir}
+ chown -R root:root $D${mandir}
+
mkdir -p $D${localstatedir}/cache/man
- mv $D${mandir}/index.db $D${localstatedir}/cache/man
+ cd $D${mandir}
+ find . -name index.db | while read index; do
+ mkdir -p $D${localstatedir}/cache/man/$(dirname ${index})
+ mv ${index} $D${localstatedir}/cache/man/${index}
+ chown man:man $D${localstatedir}/cache/man/${index}
+ done
+ cd -
else
$INTERCEPT_DIR/postinst_intercept delay_to_first_boot ${PKG} mlprefix=${MLPREFIX}
fi
@@ -29,7 +37,7 @@ pkg_postinst_append_${MAN_PKG} () {
fi
}
-pkg_postrm_append_${MAN_PKG} () {
+pkg_postrm:${MAN_PKG}:append () {
# only update manual page index caches when manual files are built and installed
if ${@bb.utils.contains('PACKAGECONFIG', 'manpages', 'true', 'false', d)}; then
mandb -q
diff --git a/meta/classes/mcextend.bbclass b/meta/classes/mcextend.bbclass
new file mode 100644
index 0000000000..0f8f962298
--- /dev/null
+++ b/meta/classes/mcextend.bbclass
@@ -0,0 +1,16 @@
+python mcextend_virtclass_handler () {
+ cls = e.data.getVar("BBEXTENDCURR")
+ variant = e.data.getVar("BBEXTENDVARIANT")
+ if cls != "mcextend" or not variant:
+ return
+
+ override = ":virtclass-mcextend-" + variant
+
+ e.data.setVar("PN", e.data.getVar("PN", False) + "-" + variant)
+ e.data.setVar("MCNAME", variant)
+ e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
+}
+
+addhandler mcextend_virtclass_handler
+mcextend_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
+
diff --git a/meta/classes/meson-routines.bbclass b/meta/classes/meson-routines.bbclass
new file mode 100644
index 0000000000..be3aeedeba
--- /dev/null
+++ b/meta/classes/meson-routines.bbclass
@@ -0,0 +1,51 @@
+inherit siteinfo
+
+def meson_array(var, d):
+ items = d.getVar(var).split()
+ return repr(items[0] if len(items) == 1 else items)
+
+# Map our ARCH values to what Meson expects:
+# http://mesonbuild.com/Reference-tables.html#cpu-families
+def meson_cpu_family(var, d):
+ import re
+ arch = d.getVar(var)
+ if arch == 'powerpc':
+ return 'ppc'
+ elif arch == 'powerpc64' or arch == 'powerpc64le':
+ return 'ppc64'
+ elif arch == 'armeb':
+ return 'arm'
+ elif arch == 'aarch64_be':
+ return 'aarch64'
+ elif arch == 'mipsel':
+ return 'mips'
+ elif arch == 'mips64el':
+ return 'mips64'
+ elif re.match(r"i[3-6]86", arch):
+ return "x86"
+ elif arch == "microblazeel":
+ return "microblaze"
+ else:
+ return arch
+
+# Map our OS values to what Meson expects:
+# https://mesonbuild.com/Reference-tables.html#operating-system-names
+def meson_operating_system(var, d):
+ os = d.getVar(var)
+ if "mingw" in os:
+ return "windows"
+ # avoid e.g 'linux-gnueabi'
+ elif "linux" in os:
+ return "linux"
+ else:
+ return os
+
+def meson_endian(prefix, d):
+ arch, os = d.getVar(prefix + "_ARCH"), d.getVar(prefix + "_OS")
+ sitedata = siteinfo_data_for_machine(arch, os, d)
+ if "endian-little" in sitedata:
+ return "little"
+ elif "endian-big" in sitedata:
+ return "big"
+ else:
+ bb.fatal("Cannot determine endianism for %s-%s" % (arch, os))
diff --git a/meta/classes/meson.bbclass b/meta/classes/meson.bbclass
index dff0485b47..0bfe945811 100644
--- a/meta/classes/meson.bbclass
+++ b/meta/classes/meson.bbclass
@@ -1,6 +1,11 @@
-inherit siteinfo python3native
+inherit python3native meson-routines qemu
-DEPENDS_append = " meson-native ninja-native"
+DEPENDS:append = " meson-native ninja-native"
+
+EXEWRAPPER_ENABLED:class-native = "False"
+EXEWRAPPER_ENABLED:class-nativesdk = "False"
+EXEWRAPPER_ENABLED ?= "${@bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d)}"
+DEPENDS:append = "${@' qemu-native' if d.getVar('EXEWRAPPER_ENABLED') == 'True' else ''}"
# As Meson enforces out-of-tree builds we can just use cleandirs
B = "${WORKDIR}/build"
@@ -12,8 +17,10 @@ MESON_SOURCEPATH = "${S}"
def noprefix(var, d):
return d.getVar(var).replace(d.getVar('prefix') + '/', '', 1)
+MESON_BUILDTYPE ?= "${@oe.utils.vartrue('DEBUG_BUILD', 'debug', 'plain', d)}"
+MESON_BUILDTYPE[vardeps] += "DEBUG_BUILD"
MESONOPTS = " --prefix ${prefix} \
- --buildtype plain \
+ --buildtype ${MESON_BUILDTYPE} \
--bindir ${@noprefix('bindir', d)} \
--sbindir ${@noprefix('sbindir', d)} \
--datadir ${@noprefix('datadir', d)} \
@@ -25,58 +32,27 @@ MESONOPTS = " --prefix ${prefix} \
--sysconfdir ${sysconfdir} \
--localstatedir ${localstatedir} \
--sharedstatedir ${sharedstatedir} \
- -Dc_args='${BUILD_CPPFLAGS} ${BUILD_CFLAGS}' \
- -Dc_link_args='${BUILD_LDFLAGS}' \
- -Dcpp_args='${BUILD_CPPFLAGS} ${BUILD_CXXFLAGS}' \
- -Dcpp_link_args='${BUILD_LDFLAGS}'"
-
-MESON_TOOLCHAIN_ARGS = "${HOST_CC_ARCH}${TOOLCHAIN_OPTIONS}"
-MESON_C_ARGS = "${MESON_TOOLCHAIN_ARGS} ${CFLAGS}"
-MESON_CPP_ARGS = "${MESON_TOOLCHAIN_ARGS} ${CXXFLAGS}"
-MESON_LINK_ARGS = "${MESON_TOOLCHAIN_ARGS} ${LDFLAGS}"
+ --wrap-mode nodownload \
+ --native-file ${WORKDIR}/meson.native"
-EXTRA_OEMESON_append = " ${PACKAGECONFIG_CONFARGS}"
+EXTRA_OEMESON:append = " ${PACKAGECONFIG_CONFARGS}"
MESON_CROSS_FILE = ""
-MESON_CROSS_FILE_class-target = "--cross-file ${WORKDIR}/meson.cross"
-MESON_CROSS_FILE_class-nativesdk = "--cross-file ${WORKDIR}/meson.cross"
+MESON_CROSS_FILE:class-target = "--cross-file ${WORKDIR}/meson.cross"
+MESON_CROSS_FILE:class-nativesdk = "--cross-file ${WORKDIR}/meson.cross"
-def meson_array(var, d):
- items = d.getVar(var).split()
- return repr(items[0] if len(items) == 1 else items)
+# Needed to set up qemu wrapper below
+export STAGING_DIR_HOST
-# Map our ARCH values to what Meson expects:
-# http://mesonbuild.com/Reference-tables.html#cpu-families
-def meson_cpu_family(var, d):
- import re
- arch = d.getVar(var)
- if arch == 'powerpc':
- return 'ppc'
- elif arch == 'powerpc64':
- return 'ppc64'
- elif arch == 'armeb':
- return 'arm'
- elif arch == 'mipsel':
- return 'mips'
- elif arch == 'mips64el':
- return 'mips64'
- elif re.match(r"i[3-6]86", arch):
- return "x86"
- else:
- return arch
-
-def meson_endian(prefix, d):
- arch, os = d.getVar(prefix + "_ARCH"), d.getVar(prefix + "_OS")
- sitedata = siteinfo_data_for_machine(arch, os, d)
- if "endian-little" in sitedata:
- return "little"
- elif "endian-big" in sitedata:
- return "big"
- else:
- bb.fatal("Cannot determine endianism for %s-%s" % (arch, os))
+def rust_tool(d, target_var):
+ rustc = d.getVar('RUSTC')
+ if not rustc:
+ return ""
+ cmd = [rustc, "--target", d.getVar(target_var)] + d.getVar("RUSTFLAGS").split()
+ return "rust = %s" % repr(cmd)
addtask write_config before do_configure
-do_write_config[vardeps] += "MESON_C_ARGS MESON_CPP_ARGS MESON_LINK_ARGS CC CXX LD AR NM STRIP READELF"
+do_write_config[vardeps] += "CC CXX LD AR NM STRIP READELF CFLAGS CXXFLAGS LDFLAGS RUSTC RUSTFLAGS"
do_write_config() {
# This needs to be Py to split the args into single-element lists
cat >${WORKDIR}/meson.cross <<EOF
@@ -85,36 +61,95 @@ c = ${@meson_array('CC', d)}
cpp = ${@meson_array('CXX', d)}
ar = ${@meson_array('AR', d)}
nm = ${@meson_array('NM', d)}
-ld = ${@meson_array('LD', d)}
strip = ${@meson_array('STRIP', d)}
readelf = ${@meson_array('READELF', d)}
+objcopy = ${@meson_array('OBJCOPY', d)}
pkgconfig = 'pkg-config'
+llvm-config = 'llvm-config${LLVMVERSION}'
+cups-config = 'cups-config'
+g-ir-scanner = '${STAGING_BINDIR}/g-ir-scanner-wrapper'
+g-ir-compiler = '${STAGING_BINDIR}/g-ir-compiler-wrapper'
+${@rust_tool(d, "HOST_SYS")}
+${@"exe_wrapper = '${WORKDIR}/meson-qemuwrapper'" if d.getVar('EXEWRAPPER_ENABLED') == 'True' else ""}
+
+[built-in options]
+c_args = ${@meson_array('CFLAGS', d)}
+c_link_args = ${@meson_array('LDFLAGS', d)}
+cpp_args = ${@meson_array('CXXFLAGS', d)}
+cpp_link_args = ${@meson_array('LDFLAGS', d)}
[properties]
needs_exe_wrapper = true
-c_args = ${@meson_array('MESON_C_ARGS', d)}
-c_link_args = ${@meson_array('MESON_LINK_ARGS', d)}
-cpp_args = ${@meson_array('MESON_CPP_ARGS', d)}
-cpp_link_args = ${@meson_array('MESON_LINK_ARGS', d)}
-gtkdoc_exe_wrapper = '${B}/gtkdoc-qemuwrapper'
[host_machine]
-system = '${HOST_OS}'
+system = '${@meson_operating_system('HOST_OS', d)}'
cpu_family = '${@meson_cpu_family('HOST_ARCH', d)}'
cpu = '${HOST_ARCH}'
endian = '${@meson_endian('HOST', d)}'
[target_machine]
-system = '${TARGET_OS}'
+system = '${@meson_operating_system('TARGET_OS', d)}'
cpu_family = '${@meson_cpu_family('TARGET_ARCH', d)}'
cpu = '${TARGET_ARCH}'
endian = '${@meson_endian('TARGET', d)}'
EOF
+
+ cat >${WORKDIR}/meson.native <<EOF
+[binaries]
+c = ${@meson_array('BUILD_CC', d)}
+cpp = ${@meson_array('BUILD_CXX', d)}
+ar = ${@meson_array('BUILD_AR', d)}
+nm = ${@meson_array('BUILD_NM', d)}
+strip = ${@meson_array('BUILD_STRIP', d)}
+readelf = ${@meson_array('BUILD_READELF', d)}
+objcopy = ${@meson_array('BUILD_OBJCOPY', d)}
+pkgconfig = 'pkg-config-native'
+${@rust_tool(d, "BUILD_SYS")}
+
+[built-in options]
+c_args = ${@meson_array('BUILD_CFLAGS', d)}
+c_link_args = ${@meson_array('BUILD_LDFLAGS', d)}
+cpp_args = ${@meson_array('BUILD_CXXFLAGS', d)}
+cpp_link_args = ${@meson_array('BUILD_LDFLAGS', d)}
+EOF
}
+do_write_config:append:class-target() {
+ # Write out a qemu wrapper that will be used as exe_wrapper so that meson
+ # can run target helper binaries through that.
+ qemu_binary="${@qemu_wrapper_cmdline(d, '$STAGING_DIR_HOST', ['$STAGING_DIR_HOST/${libdir}','$STAGING_DIR_HOST/${base_libdir}'])}"
+ cat > ${WORKDIR}/meson-qemuwrapper << EOF
+#!/bin/sh
+# Use a modules directory which doesn't exist so we don't load random things
+# which may then get deleted (or their dependencies) and potentially segfault
+export GIO_MODULE_DIR=${STAGING_LIBDIR}/gio/modules-dummy
+
+# meson sets this wrongly (only to libs in build-dir), qemu_wrapper_cmdline() and GIR_EXTRA_LIBS_PATH take care of it properly
+unset LD_LIBRARY_PATH
+
+$qemu_binary "\$@"
+EOF
+ chmod +x ${WORKDIR}/meson-qemuwrapper
+}
+
+# Tell externalsrc that changes to this file require a reconfigure
CONFIGURE_FILES = "meson.build"
meson_do_configure() {
+ # Meson requires this to be 'bfd, 'lld' or 'gold' from 0.53 onwards
+ # https://github.com/mesonbuild/meson/commit/ef9aeb188ea2bc7353e59916c18901cde90fa2b3
+ unset LD
+
+ # sstate.bbclass no longer removes empty directories to avoid a race (see
+ # commit 4f94d929 "sstate/staging: Handle directory creation race issue").
+ # Unfortunately Python apparently treats an empty egg-info directory as if
+ # the version it previously contained still exists and fails if a newer
+ # version is required, which Meson does. To avoid this, make sure there are
+ # no empty egg-info directories from previous versions left behind. Ignore
+ # all errors from rmdir since the egg-info may be a file rather than a
+ # directory.
+ rmdir ${STAGING_LIBDIR_NATIVE}/${PYTHON_DIR}/site-packages/*.egg-info 2>/dev/null || :
+
# Work around "Meson fails if /tmp is mounted with noexec #2972"
mkdir -p "${B}/meson-private/tmp"
export TMPDIR="${B}/meson-private/tmp"
@@ -124,31 +159,15 @@ meson_do_configure() {
fi
}
-override_native_tools() {
- # Set these so that meson uses the native tools for its build sanity tests,
- # which require executables to be runnable. The cross file will still
- # override these for the target build.
- export CC="${BUILD_CC}"
- export CXX="${BUILD_CXX}"
- export LD="${BUILD_LD}"
- export AR="${BUILD_AR}"
- # These contain *target* flags but will be used as *native* flags. The
- # correct native flags will be passed via -Dc_args and so on, unset them so
- # they don't interfere with tools invoked by Meson (such as g-ir-scanner)
- unset CPPFLAGS CFLAGS CXXFLAGS LDFLAGS
-}
-
-meson_do_configure_prepend_class-target() {
- override_native_tools
-}
-
-meson_do_configure_prepend_class-nativesdk() {
- override_native_tools
-}
-
-meson_do_configure_prepend_class-native() {
- export PKG_CONFIG="pkg-config-native"
+python meson_do_qa_configure() {
+ import re
+ warn_re = re.compile(r"^WARNING: Cross property (.+) is using default value (.+)$", re.MULTILINE)
+ with open(d.expand("${B}/meson-logs/meson-log.txt")) as logfile:
+ log = logfile.read()
+ for (prop, value) in warn_re.findall(log):
+ bb.warn("Meson cross property %s used without explicit assignment, defaulting to %s" % (prop, value))
}
+do_configure[postfuncs] += "meson_do_qa_configure"
do_compile[progress] = "outof:^\[(\d+)/(\d+)\]\s+"
meson_do_compile() {
diff --git a/meta/classes/meta.bbclass b/meta/classes/meta.bbclass
deleted file mode 100644
index 5e6890238b..0000000000
--- a/meta/classes/meta.bbclass
+++ /dev/null
@@ -1,4 +0,0 @@
-
-PACKAGES = ""
-
-do_build[recrdeptask] = "do_build"
diff --git a/meta/classes/metadata_scm.bbclass b/meta/classes/metadata_scm.bbclass
index 58bb4c555a..47cb969b8d 100644
--- a/meta/classes/metadata_scm.bbclass
+++ b/meta/classes/metadata_scm.bbclass
@@ -1,6 +1,3 @@
-METADATA_BRANCH ?= "${@base_detect_branch(d)}"
-METADATA_REVISION ?= "${@base_detect_revision(d)}"
-
def base_detect_revision(d):
path = base_get_scmbasepath(d)
return base_get_metadata_git_revision(path, d)
@@ -40,3 +37,8 @@ def base_get_metadata_git_revision(path, d):
except bb.process.ExecutionError:
rev = '<unknown>'
return rev.strip()
+
+METADATA_BRANCH := "${@base_detect_branch(d)}"
+METADATA_BRANCH[vardepvalue] = "${METADATA_BRANCH}"
+METADATA_REVISION := "${@base_detect_revision(d)}"
+METADATA_REVISION[vardepvalue] = "${METADATA_REVISION}"
diff --git a/meta/classes/mime-xdg.bbclass b/meta/classes/mime-xdg.bbclass
new file mode 100644
index 0000000000..271f48dd72
--- /dev/null
+++ b/meta/classes/mime-xdg.bbclass
@@ -0,0 +1,74 @@
+#
+# This class creates mime <-> application associations based on entry
+# 'MimeType' in *.desktop files
+#
+
+DEPENDS += "desktop-file-utils"
+PACKAGE_WRITE_DEPS += "desktop-file-utils-native"
+DESKTOPDIR = "${datadir}/applications"
+
+# There are recipes out there installing their .desktop files as absolute
+# symlinks. For us these are dangling and cannot be introspected for "MimeType"
+# easily. By addding package-names to MIME_XDG_PACKAGES, packager can force
+# proper update-desktop-database handling. Note that all introspection is
+# skipped for MIME_XDG_PACKAGES not empty
+MIME_XDG_PACKAGES ?= ""
+
+mime_xdg_postinst() {
+if [ "x$D" != "x" ]; then
+ $INTERCEPT_DIR/postinst_intercept update_desktop_database ${PKG} \
+ mlprefix=${MLPREFIX} \
+ desktop_dir=${DESKTOPDIR}
+else
+ update-desktop-database $D${DESKTOPDIR}
+fi
+}
+
+mime_xdg_postrm() {
+if [ "x$D" != "x" ]; then
+ $INTERCEPT_DIR/postinst_intercept update_desktop_database ${PKG} \
+ mlprefix=${MLPREFIX} \
+ desktop_dir=${DESKTOPDIR}
+else
+ update-desktop-database $D${DESKTOPDIR}
+fi
+}
+
+python populate_packages:append () {
+ packages = d.getVar('PACKAGES').split()
+ pkgdest = d.getVar('PKGDEST')
+ desktop_base = d.getVar('DESKTOPDIR')
+ forced_mime_xdg_pkgs = (d.getVar('MIME_XDG_PACKAGES') or '').split()
+
+ for pkg in packages:
+ desktops_with_mime_found = pkg in forced_mime_xdg_pkgs
+ if d.getVar('MIME_XDG_PACKAGES') == '':
+ desktop_dir = '%s/%s%s' % (pkgdest, pkg, desktop_base)
+ if os.path.exists(desktop_dir):
+ for df in os.listdir(desktop_dir):
+ if df.endswith('.desktop'):
+ try:
+ with open(desktop_dir + '/'+ df, 'r') as f:
+ for line in f.read().split('\n'):
+ if 'MimeType' in line:
+ desktops_with_mime_found = True
+ break;
+ except:
+ bb.warn('Could not open %s. Set MIME_XDG_PACKAGES in recipe or add mime-xdg to INSANE_SKIP.' % desktop_dir + '/'+ df)
+ if desktops_with_mime_found:
+ break
+ if desktops_with_mime_found:
+ bb.note("adding mime-xdg postinst and postrm scripts to %s" % pkg)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
+ if not postinst:
+ postinst = '#!/bin/sh\n'
+ postinst += d.getVar('mime_xdg_postinst')
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
+ postrm = d.getVar('pkg_postrm:%s' % pkg)
+ if not postrm:
+ postrm = '#!/bin/sh\n'
+ postrm += d.getVar('mime_xdg_postrm')
+ d.setVar('pkg_postrm:%s' % pkg, postrm)
+ bb.note("adding desktop-file-utils dependency to %s" % pkg)
+ d.appendVar('RDEPENDS:' + pkg, " " + d.getVar('MLPREFIX')+"desktop-file-utils")
+}
diff --git a/meta/classes/mime.bbclass b/meta/classes/mime.bbclass
index 6c7b868f79..8d176a884e 100644
--- a/meta/classes/mime.bbclass
+++ b/meta/classes/mime.bbclass
@@ -1,57 +1,70 @@
-DEPENDS += "shared-mime-info"
+#
+# This class is used by recipes installing mime types
+#
+
+DEPENDS += "${@bb.utils.contains('BPN', 'shared-mime-info', '', 'shared-mime-info', d)}"
PACKAGE_WRITE_DEPS += "shared-mime-info-native"
+MIMEDIR = "${datadir}/mime"
mime_postinst() {
-if [ "$1" = configure ]; then
- UPDATEMIMEDB=`which update-mime-database`
- if [ -x "$UPDATEMIMEDB" ] ; then
- echo "Updating MIME database... this may take a while."
- $UPDATEMIMEDB $D${datadir}/mime
- else
- echo "Missing update-mime-database, update of mime database failed!"
- exit 1
- fi
+if [ "x$D" != "x" ]; then
+ $INTERCEPT_DIR/postinst_intercept update_mime_database ${PKG} \
+ mlprefix=${MLPREFIX} \
+ mimedir=${MIMEDIR}
+else
+ echo "Updating MIME database... this may take a while."
+ update-mime-database $D${MIMEDIR}
fi
}
mime_postrm() {
-if [ "$1" = remove ] || [ "$1" = upgrade ]; then
- UPDATEMIMEDB=`which update-mime-database`
- if [ -x "$UPDATEMIMEDB" ] ; then
- echo "Updating MIME database... this may take a while."
- $UPDATEMIMEDB $D${datadir}/mime
+if [ "x$D" != "x" ]; then
+ $INTERCEPT_DIR/postinst_intercept update_mime_database ${PKG} \
+ mlprefix=${MLPREFIX} \
+ mimedir=${MIMEDIR}
+else
+ echo "Updating MIME database... this may take a while."
+ # $D${MIMEDIR}/packages belong to package shared-mime-info-data,
+ # packages like libfm-mime depend on shared-mime-info-data.
+ # after shared-mime-info-data uninstalled, $D${MIMEDIR}/packages
+ # is removed, but update-mime-database need this dir to update
+ # database, workaround to create one and remove it later
+ if [ ! -d $D${MIMEDIR}/packages ]; then
+ mkdir -p $D${MIMEDIR}/packages
+ update-mime-database $D${MIMEDIR}
+ rmdir --ignore-fail-on-non-empty $D${MIMEDIR}/packages
else
- echo "Missing update-mime-database, update of mime database failed!"
- exit 1
- fi
+ update-mime-database $D${MIMEDIR}
+fi
fi
}
-python populate_packages_append () {
- import re
+python populate_packages:append () {
packages = d.getVar('PACKAGES').split()
pkgdest = d.getVar('PKGDEST')
+ mimedir = d.getVar('MIMEDIR')
for pkg in packages:
- mime_dir = '%s/%s/usr/share/mime/packages' % (pkgdest, pkg)
- mimes = []
- mime_re = re.compile(".*\.xml$")
- if os.path.exists(mime_dir):
- for f in os.listdir(mime_dir):
- if mime_re.match(f):
- mimes.append(f)
- if mimes:
+ mime_packages_dir = '%s/%s%s/packages' % (pkgdest, pkg, mimedir)
+ mimes_types_found = False
+ if os.path.exists(mime_packages_dir):
+ for f in os.listdir(mime_packages_dir):
+ if f.endswith('.xml'):
+ mimes_types_found = True
+ break
+ if mimes_types_found:
bb.note("adding mime postinst and postrm scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
postinst += d.getVar('mime_postinst')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
- postrm = d.getVar('pkg_postrm_%s' % pkg)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
+ postrm = d.getVar('pkg_postrm:%s' % pkg)
if not postrm:
postrm = '#!/bin/sh\n'
postrm += d.getVar('mime_postrm')
- d.setVar('pkg_postrm_%s' % pkg, postrm)
- bb.note("adding shared-mime-info-data dependency to %s" % pkg)
- d.appendVar('RDEPENDS_' + pkg, " " + d.getVar('MLPREFIX')+"shared-mime-info-data")
+ d.setVar('pkg_postrm:%s' % pkg, postrm)
+ if pkg != 'shared-mime-info-data':
+ bb.note("adding shared-mime-info-data dependency to %s" % pkg)
+ d.appendVar('RDEPENDS:' + pkg, " " + d.getVar('MLPREFIX')+"shared-mime-info-data")
}
diff --git a/meta/classes/mirrors.bbclass b/meta/classes/mirrors.bbclass
index 87bba41472..8e7b35d900 100644
--- a/meta/classes/mirrors.bbclass
+++ b/meta/classes/mirrors.bbclass
@@ -1,76 +1,76 @@
MIRRORS += "\
-${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian/20180310T215105Z/pool \n \
-${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20120328T092752Z/debian/pool \n \
-${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20110127T084257Z/debian/pool \n \
-${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20090802T004153Z/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.de.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.au.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.cl.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.hr.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.fi.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.hk.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.hu.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.ie.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.it.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.jp.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.no.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.pl.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.ro.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.si.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.es.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.se.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.tr.debian.org/debian/pool \n \
-${GNU_MIRROR} https://mirrors.kernel.org/gnu \n \
-${KERNELORG_MIRROR} http://www.kernel.org/pub \n \
-${GNUPG_MIRROR} ftp://ftp.gnupg.org/gcrypt \n \
-${GNUPG_MIRROR} ftp://ftp.franken.de/pub/crypt/mirror/ftp.gnupg.org/gcrypt \n \
-${GNUPG_MIRROR} ftp://mirrors.dotsrc.org/gcrypt \n \
-ftp://dante.ctan.org/tex-archive ftp://ftp.fu-berlin.de/tex/CTAN \n \
-ftp://dante.ctan.org/tex-archive http://sunsite.sut.ac.jp/pub/archives/ctan/ \n \
-ftp://dante.ctan.org/tex-archive http://ctan.unsw.edu.au/ \n \
-ftp://ftp.gnutls.org/gcrypt/gnutls ${GNUPG_MIRROR}/gnutls \n \
-http://ftp.info-zip.org/pub/infozip/src/ http://mirror.switch.ch/ftp/mirror/infozip/src/ \n \
-http://ftp.info-zip.org/pub/infozip/src/ ftp://sunsite.icm.edu.pl/pub/unix/archiving/info-zip/src/ \n \
-http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/ http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/OLD/ \n \
-${APACHE_MIRROR} http://www.us.apache.org/dist \n \
-${APACHE_MIRROR} http://archive.apache.org/dist \n \
-http://downloads.sourceforge.net/watchdog/ http://fossies.org/linux/misc/ \n \
-${SAVANNAH_GNU_MIRROR} http://download-mirror.savannah.gnu.org/releases \n \
-${SAVANNAH_NONGNU_MIRROR} http://download-mirror.savannah.nongnu.org/releases \n \
-ftp://sourceware.org/pub http://mirrors.kernel.org/sourceware \n \
-ftp://sourceware.org/pub http://gd.tuwien.ac.at/gnu/sourceware \n \
-ftp://sourceware.org/pub http://ftp.gwdg.de/pub/linux/sources.redhat.com/sourceware \n \
-cvs://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-svn://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-git://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-hg://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-bzr://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-p4://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-osc://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-https?$://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-ftp://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-npm://.*/?.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-cvs://.*/.* http://sources.openembedded.org/ \n \
-svn://.*/.* http://sources.openembedded.org/ \n \
-git://.*/.* http://sources.openembedded.org/ \n \
-hg://.*/.* http://sources.openembedded.org/ \n \
-bzr://.*/.* http://sources.openembedded.org/ \n \
-p4://.*/.* http://sources.openembedded.org/ \n \
-osc://.*/.* http://sources.openembedded.org/ \n \
-https?$://.*/.* http://sources.openembedded.org/ \n \
-ftp://.*/.* http://sources.openembedded.org/ \n \
-npm://.*/?.* http://sources.openembedded.org/ \n \
-${CPAN_MIRROR} http://cpan.metacpan.org/ \n \
-${CPAN_MIRROR} http://search.cpan.org/CPAN/ \n \
+${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian/20180310T215105Z/pool \
+${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20120328T092752Z/debian/pool \
+${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20110127T084257Z/debian/pool \
+${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20090802T004153Z/debian/pool \
+${DEBIAN_MIRROR} http://ftp.de.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.au.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.cl.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.hr.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.fi.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.hk.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.hu.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.ie.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.it.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.jp.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.no.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.pl.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.ro.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.si.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.es.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.se.debian.org/debian/pool \
+${DEBIAN_MIRROR} http://ftp.tr.debian.org/debian/pool \
+${GNU_MIRROR} https://mirrors.kernel.org/gnu \
+${KERNELORG_MIRROR} http://www.kernel.org/pub \
+${GNUPG_MIRROR} ftp://ftp.gnupg.org/gcrypt \
+${GNUPG_MIRROR} ftp://ftp.franken.de/pub/crypt/mirror/ftp.gnupg.org/gcrypt \
+${GNUPG_MIRROR} ftp://mirrors.dotsrc.org/gcrypt \
+ftp://dante.ctan.org/tex-archive ftp://ftp.fu-berlin.de/tex/CTAN \
+ftp://dante.ctan.org/tex-archive http://sunsite.sut.ac.jp/pub/archives/ctan/ \
+ftp://dante.ctan.org/tex-archive http://ctan.unsw.edu.au/ \
+ftp://ftp.gnutls.org/gcrypt/gnutls ${GNUPG_MIRROR}/gnutls \
+http://ftp.info-zip.org/pub/infozip/src/ ftp://sunsite.icm.edu.pl/pub/unix/archiving/info-zip/src/ \
+http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/ http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/OLD/ \
+${APACHE_MIRROR} http://www.us.apache.org/dist \
+${APACHE_MIRROR} http://archive.apache.org/dist \
+http://downloads.sourceforge.net/watchdog/ http://fossies.org/linux/misc/ \
+${SAVANNAH_GNU_MIRROR} http://download-mirror.savannah.gnu.org/releases \
+${SAVANNAH_NONGNU_MIRROR} http://download-mirror.savannah.nongnu.org/releases \
+ftp://sourceware.org/pub http://mirrors.kernel.org/sourceware \
+ftp://sourceware.org/pub http://gd.tuwien.ac.at/gnu/sourceware \
+ftp://sourceware.org/pub http://ftp.gwdg.de/pub/linux/sources.redhat.com/sourceware \
+cvs://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
+svn://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
+git://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
+hg://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
+bzr://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
+p4://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
+osc://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
+https?://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
+ftp://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \
+npm://.*/?.* http://downloads.yoctoproject.org/mirror/sources/ \
+cvs://.*/.* http://sources.openembedded.org/ \
+svn://.*/.* http://sources.openembedded.org/ \
+git://.*/.* http://sources.openembedded.org/ \
+hg://.*/.* http://sources.openembedded.org/ \
+bzr://.*/.* http://sources.openembedded.org/ \
+p4://.*/.* http://sources.openembedded.org/ \
+osc://.*/.* http://sources.openembedded.org/ \
+https?://.*/.* http://sources.openembedded.org/ \
+ftp://.*/.* http://sources.openembedded.org/ \
+npm://.*/?.* http://sources.openembedded.org/ \
+${CPAN_MIRROR} http://cpan.metacpan.org/ \
+${CPAN_MIRROR} http://search.cpan.org/CPAN/ \
+https?://downloads.yoctoproject.org/releases/uninative/ https://mirrors.kernel.org/yocto/uninative/ \
+https?://downloads.yoctoproject.org/mirror/sources/ https://mirrors.kernel.org/yocto-sources/ \
"
# Use MIRRORS to provide git repo fallbacks using the https protocol, for cases
# where git native protocol fetches may fail due to local firewall rules, etc.
MIRRORS += "\
-git://salsa.debian.org/.* git://salsa.debian.org/PATH;protocol=https \n \
-git://git.gnome.org/.* git://gitlab.gnome.org/GNOME/PATH;protocol=https \n \
-git://git.savannah.gnu.org/.* git://git.savannah.gnu.org/git/PATH;protocol=https \n \
-git://git.yoctoproject.org/.* git://git.yoctoproject.org/git/PATH;protocol=https \n \
-git://.*/.* git://HOST/PATH;protocol=https \n \
+git://salsa.debian.org/.* git://salsa.debian.org/PATH;protocol=https \
+git://git.gnome.org/.* git://gitlab.gnome.org/GNOME/PATH;protocol=https \
+git://.*/.* git://HOST/PATH;protocol=https \
+git://.*/.* git://HOST/git/PATH;protocol=https \
"
diff --git a/meta/classes/module.bbclass b/meta/classes/module.bbclass
index c0dfa35061..a09ec3ed1e 100644
--- a/meta/classes/module.bbclass
+++ b/meta/classes/module.bbclass
@@ -14,7 +14,7 @@ python __anonymous () {
d.setVar('KBUILD_EXTRA_SYMBOLS', " ".join(extra_symbols))
}
-python do_devshell_prepend () {
+python do_devshell:prepend () {
os.environ['CFLAGS'] = ''
os.environ['CPPFLAGS'] = ''
os.environ['CXXFLAGS'] = ''
@@ -70,5 +70,5 @@ EXPORT_FUNCTIONS do_compile do_install
# add all splitted modules to PN RDEPENDS, PN can be empty now
KERNEL_MODULES_META_PACKAGE = "${PN}"
-FILES_${PN} = ""
-ALLOW_EMPTY_${PN} = "1"
+FILES:${PN} = ""
+ALLOW_EMPTY:${PN} = "1"
diff --git a/meta/classes/multilib.bbclass b/meta/classes/multilib.bbclass
index 2b761f3551..5859ca8d21 100644
--- a/meta/classes/multilib.bbclass
+++ b/meta/classes/multilib.bbclass
@@ -33,7 +33,9 @@ python multilib_virtclass_handler () {
e.data.setVar("MLPREFIX", variant + "-")
e.data.setVar("PN", variant + "-" + e.data.getVar("PN", False))
e.data.setVar('SDKTARGETSYSROOT', e.data.getVar('SDKTARGETSYSROOT'))
- target_vendor = e.data.getVar("TARGET_VENDOR_" + "virtclass-multilib-" + variant, False)
+ override = ":virtclass-multilib-" + variant
+ e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
+ target_vendor = e.data.getVar("TARGET_VENDOR:" + "virtclass-multilib-" + variant, False)
if target_vendor:
e.data.setVar("TARGET_VENDOR", target_vendor)
return
@@ -63,24 +65,25 @@ python multilib_virtclass_handler () {
override = ":virtclass-multilib-" + variant
- blacklist = e.data.getVarFlag('PNBLACKLIST', e.data.getVar('PN'))
- if blacklist:
+ skip_msg = e.data.getVarFlag('SKIP_RECIPE', e.data.getVar('PN'))
+ if skip_msg:
pn_new = variant + "-" + e.data.getVar('PN')
- if not e.data.getVarFlag('PNBLACKLIST', pn_new):
- e.data.setVarFlag('PNBLACKLIST', pn_new, blacklist)
+ if not e.data.getVarFlag('SKIP_RECIPE', pn_new):
+ e.data.setVarFlag('SKIP_RECIPE', pn_new, skip_msg)
e.data.setVar("MLPREFIX", variant + "-")
e.data.setVar("PN", variant + "-" + e.data.getVar("PN", False))
e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
- # Expand WHITELIST_GPL-3.0 with multilib prefix
- pkgs = e.data.getVar("WHITELIST_GPL-3.0")
- for pkg in pkgs.split():
- pkgs += " " + variant + "-" + pkg
- e.data.setVar("WHITELIST_GPL-3.0", pkgs)
+ # Expand INCOMPATIBLE_LICENSE_EXCEPTIONS with multilib prefix
+ pkgs = e.data.getVar("INCOMPATIBLE_LICENSE_EXCEPTIONS")
+ if pkgs:
+ for pkg in pkgs.split():
+ pkgs += " " + variant + "-" + pkg
+ e.data.setVar("INCOMPATIBLE_LICENSE_EXCEPTIONS", pkgs)
# DEFAULTTUNE can change TARGET_ARCH override so expand this now before update_data
- newtune = e.data.getVar("DEFAULTTUNE_" + "virtclass-multilib-" + variant, False)
+ newtune = e.data.getVar("DEFAULTTUNE:" + "virtclass-multilib-" + variant, False)
if newtune:
e.data.setVar("DEFAULTTUNE", newtune)
}
@@ -89,13 +92,16 @@ addhandler multilib_virtclass_handler
multilib_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
python __anonymous () {
- variant = d.getVar("BBEXTENDVARIANT")
+ if bb.data.inherits_class('image', d):
+ # set rpm preferred file color for 32-bit multilib image
+ if d.getVar("SITEINFO_BITS") == "32":
+ d.setVar("RPM_PREFER_ELF_ARCH", "1")
- import oe.classextend
+ variant = d.getVar("BBEXTENDVARIANT")
+ import oe.classextend
- clsextend = oe.classextend.ClassExtender(variant, d)
+ clsextend = oe.classextend.ClassExtender(variant, d)
- if bb.data.inherits_class('image', d):
clsextend.map_depends_variable("PACKAGE_INSTALL")
clsextend.map_depends_variable("LINGUAS_INSTALL")
clsextend.map_depends_variable("RDEPENDS")
@@ -104,9 +110,24 @@ python __anonymous () {
d.setVar("LINGUAS_INSTALL", "")
# FIXME, we need to map this to something, not delete it!
d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", "")
- bb.build.deltask('do_populate_sdk', d)
bb.build.deltask('do_populate_sdk_ext', d)
return
+}
+
+python multilib_virtclass_handler_postkeyexp () {
+ cls = d.getVar("BBEXTENDCURR")
+ variant = d.getVar("BBEXTENDVARIANT")
+ if cls != "multilib" or not variant:
+ return
+
+ variant = d.getVar("BBEXTENDVARIANT")
+
+ import oe.classextend
+
+ clsextend = oe.classextend.ClassExtender(variant, d)
+
+ if bb.data.inherits_class('image', d):
+ return
clsextend.map_depends_variable("DEPENDS")
clsextend.map_variable("PROVIDES")
@@ -123,27 +144,78 @@ python __anonymous () {
clsextend.map_variable("USERADD_PACKAGES")
clsextend.map_variable("SYSTEMD_PACKAGES")
clsextend.map_variable("UPDATERCPN")
+
+ reset_alternative_priority(d)
}
-PACKAGEFUNCS_append = " do_package_qa_multilib"
+addhandler multilib_virtclass_handler_postkeyexp
+multilib_virtclass_handler_postkeyexp[eventmask] = "bb.event.RecipePostKeyExpansion"
+
+def reset_alternative_priority(d):
+ if not bb.data.inherits_class('update-alternatives', d):
+ return
+
+ # There might be multiple multilibs at the same time, e.g., lib32 and
+ # lib64, each of them should have a different priority.
+ multilib_variants = d.getVar('MULTILIB_VARIANTS')
+ bbextendvariant = d.getVar('BBEXTENDVARIANT')
+ reset_gap = multilib_variants.split().index(bbextendvariant) + 1
+
+ # ALTERNATIVE_PRIORITY = priority
+ alt_priority_recipe = d.getVar('ALTERNATIVE_PRIORITY')
+ # Reset ALTERNATIVE_PRIORITY when found
+ if alt_priority_recipe:
+ reset_priority = int(alt_priority_recipe) - reset_gap
+ bb.debug(1, '%s: Setting ALTERNATIVE_PRIORITY to %s' % (d.getVar('PN'), reset_priority))
+ d.setVar('ALTERNATIVE_PRIORITY', reset_priority)
+
+ handled_pkgs = []
+ for pkg in (d.getVar('PACKAGES') or "").split():
+ # ALTERNATIVE_PRIORITY_pkg = priority
+ alt_priority_pkg = d.getVar('ALTERNATIVE_PRIORITY_%s' % pkg)
+ # Reset ALTERNATIVE_PRIORITY_pkg when found
+ if alt_priority_pkg:
+ reset_priority = int(alt_priority_pkg) - reset_gap
+ if not pkg in handled_pkgs:
+ handled_pkgs.append(pkg)
+ bb.debug(1, '%s: Setting ALTERNATIVE_PRIORITY_%s to %s' % (pkg, pkg, reset_priority))
+ d.setVar('ALTERNATIVE_PRIORITY_%s' % pkg, reset_priority)
+
+ for alt_name in (d.getVar('ALTERNATIVE:%s' % pkg) or "").split():
+ # ALTERNATIVE_PRIORITY_pkg[tool] = priority
+ alt_priority_pkg_name = d.getVarFlag('ALTERNATIVE_PRIORITY_%s' % pkg, alt_name)
+ # ALTERNATIVE_PRIORITY[tool] = priority
+ alt_priority_name = d.getVarFlag('ALTERNATIVE_PRIORITY', alt_name)
+
+ if alt_priority_pkg_name:
+ reset_priority = int(alt_priority_pkg_name) - reset_gap
+ bb.debug(1, '%s: Setting ALTERNATIVE_PRIORITY_%s[%s] to %s' % (pkg, pkg, alt_name, reset_priority))
+ d.setVarFlag('ALTERNATIVE_PRIORITY_%s' % pkg, alt_name, reset_priority)
+ elif alt_priority_name:
+ reset_priority = int(alt_priority_name) - reset_gap
+ bb.debug(1, '%s: Setting ALTERNATIVE_PRIORITY[%s] to %s' % (pkg, alt_name, reset_priority))
+ d.setVarFlag('ALTERNATIVE_PRIORITY', alt_name, reset_priority)
+
+PACKAGEFUNCS:append = " do_package_qa_multilib"
python do_package_qa_multilib() {
def check_mlprefix(pkg, var, mlprefix):
- values = bb.utils.explode_deps(d.getVar('%s_%s' % (var, pkg)) or d.getVar(var) or "")
+ values = bb.utils.explode_deps(d.getVar('%s:%s' % (var, pkg)) or d.getVar(var) or "")
candidates = []
for i in values:
if i.startswith('virtual/'):
i = i[len('virtual/'):]
- if (not i.startswith('kernel-module')) and (not i.startswith(mlprefix)) and \
- (not 'cross-canadian' in i) and (not i.startswith("nativesdk-")) and \
- (not i.startswith("rtld")) and (not i.startswith('kernel-vmlinux')) \
- and (not i.startswith("kernel-image")):
+
+ if (not (i.startswith(mlprefix) or i.startswith("kernel-") \
+ or ('cross-canadian' in i) or i.startswith("nativesdk-") \
+ or i.startswith("rtld") or i.startswith("/"))):
candidates.append(i)
+
if len(candidates) > 0:
msg = "%s package %s - suspicious values '%s' in %s" \
% (d.getVar('PN'), pkg, ' '.join(candidates), var)
- package_qa_handle_error("multilib", msg, d)
+ oe.qa.handle_error("multilib", msg, d)
ml = d.getVar('MLPREFIX')
if not ml:
@@ -161,4 +233,5 @@ python do_package_qa_multilib() {
check_mlprefix(pkg, 'RSUGGESTS', ml)
check_mlprefix(pkg, 'RREPLACES', ml)
check_mlprefix(pkg, 'RCONFLICTS', ml)
+ oe.qa.exit_if_errors(d)
}
diff --git a/meta/classes/multilib_global.bbclass b/meta/classes/multilib_global.bbclass
index 649cc096b7..e06307d057 100644
--- a/meta/classes/multilib_global.bbclass
+++ b/meta/classes/multilib_global.bbclass
@@ -1,6 +1,7 @@
def preferred_ml_updates(d):
- # If any PREFERRED_PROVIDER or PREFERRED_VERSION are set,
- # we need to mirror these variables in the multilib case;
+ # If any of PREFERRED_PROVIDER, PREFERRED_RPROVIDER, REQUIRED_VERSION
+ # or PREFERRED_VERSION are set, we need to mirror these variables in
+ # the multilib case;
multilibs = d.getVar('MULTILIBS') or ""
if not multilibs:
return
@@ -11,43 +12,54 @@ def preferred_ml_updates(d):
if len(eext) > 1 and eext[0] == 'multilib':
prefixes.append(eext[1])
- versions = []
+ required_versions = []
+ preferred_versions = []
providers = []
rproviders = []
for v in d.keys():
+ if v.startswith("REQUIRED_VERSION_"):
+ required_versions.append(v)
if v.startswith("PREFERRED_VERSION_"):
- versions.append(v)
+ preferred_versions.append(v)
if v.startswith("PREFERRED_PROVIDER_"):
providers.append(v)
if v.startswith("PREFERRED_RPROVIDER_"):
rproviders.append(v)
- for v in versions:
- val = d.getVar(v, False)
- pkg = v.replace("PREFERRED_VERSION_", "")
- if pkg.endswith("-native") or "-crosssdk-" in pkg or pkg.startswith(("nativesdk-", "virtual/nativesdk-")):
- continue
- if '-cross-' in pkg and '${' in pkg:
+ def sort_versions(versions, keyword):
+ version_str = "_".join([keyword, "VERSION", ""])
+ for v in versions:
+ val = d.getVar(v, False)
+ pkg = v.replace(version_str, "")
+ if pkg.endswith("-native") or "-crosssdk-" in pkg or pkg.startswith(("nativesdk-", "virtual/nativesdk-")):
+ continue
+ if '-cross-' in pkg and '${' in pkg:
+ for p in prefixes:
+ localdata = bb.data.createCopy(d)
+ override = ":virtclass-multilib-" + p
+ localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
+ if "-canadian-" in pkg:
+ newtune = localdata.getVar("DEFAULTTUNE:" + "virtclass-multilib-" + p, False)
+ if newtune:
+ localdata.setVar("DEFAULTTUNE", newtune)
+ newname = localdata.expand(v)
+ else:
+ newname = localdata.expand(v).replace(version_str, version_str + p + '-')
+ if newname != v:
+ newval = localdata.expand(val)
+ d.setVar(newname, newval)
+ # Avoid future variable key expansion
+ vexp = d.expand(v)
+ if v != vexp and d.getVar(v, False):
+ d.renameVar(v, vexp)
+ continue
for p in prefixes:
- localdata = bb.data.createCopy(d)
- override = ":virtclass-multilib-" + p
- localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
- if "-canadian-" in pkg:
- newname = localdata.expand(v)
- else:
- newname = localdata.expand(v).replace("PREFERRED_VERSION_", "PREFERRED_VERSION_" + p + '-')
- if newname != v:
- newval = localdata.expand(val)
- d.setVar(newname, newval)
- # Avoid future variable key expansion
- vexp = d.expand(v)
- if v != vexp and d.getVar(v, False):
- d.renameVar(v, vexp)
- continue
- for p in prefixes:
- newname = "PREFERRED_VERSION_" + p + "-" + pkg
- if not d.getVar(newname, False):
- d.setVar(newname, val)
+ newname = version_str + p + "-" + pkg
+ if not d.getVar(newname, False):
+ d.setVar(newname, val)
+
+ sort_versions(required_versions, "REQUIRED")
+ sort_versions(preferred_versions, "PREFERRED")
for prov in providers:
val = d.getVar(prov, False)
@@ -118,6 +130,9 @@ def preferred_ml_updates(d):
d.renameVar(prov, provexp)
def translate_provide(prefix, prov):
+ # Really need to know if kernel modules class is inherited somehow
+ if prov == "lttng-modules":
+ return prov
if not prov.startswith("virtual/"):
return prefix + "-" + prov
if prov == "virtual/kernel":
@@ -125,14 +140,14 @@ def preferred_ml_updates(d):
prov = prov.replace("virtual/", "")
return "virtual/" + prefix + "-" + prov
- mp = (d.getVar("MULTI_PROVIDER_WHITELIST") or "").split()
+ mp = (d.getVar("BB_MULTI_PROVIDER_ALLOWED") or "").split()
extramp = []
for p in mp:
if p.endswith("-native") or "-crosssdk-" in p or p.startswith(("nativesdk-", "virtual/nativesdk-")) or 'cross-canadian' in p:
continue
for pref in prefixes:
extramp.append(translate_provide(pref, p))
- d.setVar("MULTI_PROVIDER_WHITELIST", " ".join(mp + extramp))
+ d.setVar("BB_MULTI_PROVIDER_ALLOWED", " ".join(mp + extramp))
abisafe = (d.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE") or "").split()
extras = []
@@ -152,8 +167,8 @@ def preferred_ml_updates(d):
python multilib_virtclass_handler_vendor () {
if isinstance(e, bb.event.ConfigParsed):
for v in e.data.getVar("MULTILIB_VARIANTS").split():
- if e.data.getVar("TARGET_VENDOR_virtclass-multilib-" + v, False) is None:
- e.data.setVar("TARGET_VENDOR_virtclass-multilib-" + v, e.data.getVar("TARGET_VENDOR", False) + "ml" + v)
+ if e.data.getVar("TARGET_VENDOR:virtclass-multilib-" + v, False) is None:
+ e.data.setVar("TARGET_VENDOR:virtclass-multilib-" + v, e.data.getVar("TARGET_VENDOR", False) + "ml" + v)
preferred_ml_updates(e.data)
}
addhandler multilib_virtclass_handler_vendor
@@ -169,35 +184,40 @@ python multilib_virtclass_handler_global () {
if bb.data.inherits_class('kernel', e.data) or \
bb.data.inherits_class('module-base', e.data) or \
d.getVar('BPN') in non_ml_recipes:
+
+ # We need to avoid expanding KERNEL_VERSION which we can do by deleting it
+ # from a copy of the datastore
+ localdata = bb.data.createCopy(d)
+ localdata.delVar("KERNEL_VERSION")
+
variants = (e.data.getVar("MULTILIB_VARIANTS") or "").split()
import oe.classextend
clsextends = []
for variant in variants:
- clsextends.append(oe.classextend.ClassExtender(variant, e.data))
+ clsextends.append(oe.classextend.ClassExtender(variant, localdata))
# Process PROVIDES
- origprovs = provs = e.data.getVar("PROVIDES") or ""
+ origprovs = provs = localdata.getVar("PROVIDES") or ""
for clsextend in clsextends:
provs = provs + " " + clsextend.map_variable("PROVIDES", setvar=False)
e.data.setVar("PROVIDES", provs)
# Process RPROVIDES
- origrprovs = rprovs = e.data.getVar("RPROVIDES") or ""
+ origrprovs = rprovs = localdata.getVar("RPROVIDES") or ""
for clsextend in clsextends:
rprovs = rprovs + " " + clsextend.map_variable("RPROVIDES", setvar=False)
if rprovs.strip():
e.data.setVar("RPROVIDES", rprovs)
- # Process RPROVIDES_${PN}...
+ # Process RPROVIDES:${PN}...
for pkg in (e.data.getVar("PACKAGES") or "").split():
- origrprovs = rprovs = e.data.getVar("RPROVIDES_%s" % pkg) or ""
+ origrprovs = rprovs = localdata.getVar("RPROVIDES:%s" % pkg) or ""
for clsextend in clsextends:
- rprovs = rprovs + " " + clsextend.map_variable("RPROVIDES_%s" % pkg, setvar=False)
+ rprovs = rprovs + " " + clsextend.map_variable("RPROVIDES:%s" % pkg, setvar=False)
rprovs = rprovs + " " + clsextend.extname + "-" + pkg
- e.data.setVar("RPROVIDES_%s" % pkg, rprovs)
+ e.data.setVar("RPROVIDES:%s" % pkg, rprovs)
}
addhandler multilib_virtclass_handler_global
-multilib_virtclass_handler_global[eventmask] = "bb.event.RecipeParsed"
-
+multilib_virtclass_handler_global[eventmask] = "bb.event.RecipeTaskPreProcess"
diff --git a/meta/classes/multilib_header.bbclass b/meta/classes/multilib_header.bbclass
index e03f5b13b2..efbc24f59b 100644
--- a/meta/classes/multilib_header.bbclass
+++ b/meta/classes/multilib_header.bbclass
@@ -42,11 +42,11 @@ oe_multilib_header() {
# Dependencies on arch variables like MIPSPKGSFX_ABI can be problematic.
# We don't need multilib headers for native builds so brute force things.
-oe_multilib_header_class-native () {
+oe_multilib_header:class-native () {
return
}
# Nor do we need multilib headers for nativesdk builds.
-oe_multilib_header_class-nativesdk () {
+oe_multilib_header:class-nativesdk () {
return
}
diff --git a/meta/classes/multilib_script.bbclass b/meta/classes/multilib_script.bbclass
index dc166d06c1..41597341cd 100644
--- a/meta/classes/multilib_script.bbclass
+++ b/meta/classes/multilib_script.bbclass
@@ -17,18 +17,18 @@ multilibscript_rename() {
python () {
# Do nothing if multilib isn't being used
if not d.getVar("MULTILIB_VARIANTS"):
- return
+ return
# Do nothing for native/cross
if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d):
- return
+ return
for entry in (d.getVar("MULTILIB_SCRIPTS", False) or "").split():
- pkg, script = entry.split(":")
- epkg = d.expand(pkg)
- scriptname = os.path.basename(script)
- d.appendVar("ALTERNATIVE_" + epkg, " " + scriptname + " ")
- d.setVarFlag("ALTERNATIVE_LINK_NAME", scriptname, script)
- d.setVarFlag("ALTERNATIVE_TARGET", scriptname, script + "-${MULTILIB_SUFFIX}")
- d.appendVar("multilibscript_rename", "\n mv ${PKGD}" + script + " ${PKGD}" + script + "-${MULTILIB_SUFFIX}")
- d.appendVar("FILES_" + epkg, " " + script + "-${MULTILIB_SUFFIX}")
+ pkg, script = entry.split(":")
+ epkg = d.expand(pkg)
+ scriptname = os.path.basename(script)
+ d.appendVar("ALTERNATIVE:" + epkg, " " + scriptname + " ")
+ d.setVarFlag("ALTERNATIVE_LINK_NAME", scriptname, script)
+ d.setVarFlag("ALTERNATIVE_TARGET", scriptname, script + "-${MULTILIB_SUFFIX}")
+ d.appendVar("multilibscript_rename", "\n mv ${PKGD}" + script + " ${PKGD}" + script + "-${MULTILIB_SUFFIX}")
+ d.appendVar("FILES:" + epkg, " " + script + "-${MULTILIB_SUFFIX}")
}
diff --git a/meta/classes/native.bbclass b/meta/classes/native.bbclass
index 30a30f924d..fc7422c5d7 100644
--- a/meta/classes/native.bbclass
+++ b/meta/classes/native.bbclass
@@ -5,19 +5,11 @@ inherit relocatable
# no need for them to be a direct target of 'world'
EXCLUDE_FROM_WORLD = "1"
-PACKAGES = ""
-PACKAGES_class-native = ""
-PACKAGES_DYNAMIC = ""
-PACKAGES_DYNAMIC_class-native = ""
PACKAGE_ARCH = "${BUILD_ARCH}"
# used by cmake class
OECMAKE_RPATH = "${libdir}"
-OECMAKE_RPATH_class-native = "${libdir}"
-
-# When this class has packaging enabled, setting
-# RPROVIDES becomes unnecessary.
-RPROVIDES = "${PN}"
+OECMAKE_RPATH:class-native = "${libdir}"
TARGET_ARCH = "${BUILD_ARCH}"
TARGET_OS = "${BUILD_OS}"
@@ -89,6 +81,7 @@ export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir} /lib /lib64
NATIVE_PACKAGE_PATH_SUFFIX ?= ""
bindir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
+sbindir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
base_libdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
libdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
libexecdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
@@ -113,7 +106,7 @@ CLASSOVERRIDE = "class-native"
MACHINEOVERRIDES = ""
MACHINE_FEATURES = ""
-PATH_prepend = "${COREBASE}/scripts/native-intercept:"
+PATH:prepend = "${COREBASE}/scripts/native-intercept:"
# This class encodes staging paths into its scripts data so can only be
# reused if we manipulate the paths.
@@ -126,6 +119,7 @@ python native_virtclass_handler () {
pn = e.data.getVar("PN")
if not pn.endswith("-native"):
return
+ bpn = e.data.getVar("BPN")
# Set features here to prevent appends and distro features backfill
# from modifying native distro features
@@ -137,9 +131,9 @@ python native_virtclass_handler () {
if "native" not in classextend:
return
- def map_dependencies(varname, d, suffix = ""):
+ def map_dependencies(varname, d, suffix = "", selfref=True):
if suffix:
- varname = varname + "_" + suffix
+ varname = varname + ":" + suffix
deps = d.getVar(varname)
if not deps:
return
@@ -147,22 +141,28 @@ python native_virtclass_handler () {
newdeps = []
for dep in deps:
if dep == pn:
- continue
+ if not selfref:
+ continue
+ newdeps.append(dep)
elif "-cross-" in dep:
newdeps.append(dep.replace("-cross", "-native"))
elif not dep.endswith("-native"):
- newdeps.append(dep + "-native")
+ # Replace ${PN} with ${BPN} in the dependency to make sure
+ # dependencies on, e.g., ${PN}-foo become ${BPN}-foo-native
+ # rather than ${BPN}-native-foo-native.
+ newdeps.append(dep.replace(pn, bpn) + "-native")
else:
newdeps.append(dep)
- d.setVar(varname, " ".join(newdeps))
+ d.setVar(varname, " ".join(newdeps), parsing=True)
- map_dependencies("DEPENDS", e.data)
- for pkg in [e.data.getVar("PN"), "", "${PN}"]:
+ map_dependencies("DEPENDS", e.data, selfref=False)
+ for pkg in e.data.getVar("PACKAGES", False).split():
map_dependencies("RDEPENDS", e.data, pkg)
map_dependencies("RRECOMMENDS", e.data, pkg)
map_dependencies("RSUGGESTS", e.data, pkg)
map_dependencies("RPROVIDES", e.data, pkg)
map_dependencies("RREPLACES", e.data, pkg)
+ map_dependencies("PACKAGES", e.data)
provides = e.data.getVar("PROVIDES")
nprovides = []
@@ -170,7 +170,7 @@ python native_virtclass_handler () {
if prov.find(pn) != -1:
nprovides.append(prov)
elif not prov.endswith("-native"):
- nprovides.append(prov.replace(prov, prov + "-native"))
+ nprovides.append(prov + "-native")
else:
nprovides.append(prov)
e.data.setVar("PROVIDES", ' '.join(nprovides))
@@ -185,10 +185,44 @@ python do_addto_recipe_sysroot () {
bb.build.exec_func("extend_recipe_sysroot", d)
}
addtask addto_recipe_sysroot after do_populate_sysroot
+do_addto_recipe_sysroot[deptask] = "do_populate_sysroot"
inherit nopackages
do_packagedata[stamp-extra-info] = ""
-do_populate_sysroot[stamp-extra-info] = ""
USE_NLS = "no"
+
+RECIPERDEPTASK = "do_populate_sysroot"
+do_populate_sysroot[rdeptask] = "${RECIPERDEPTASK}"
+
+#
+# Native task outputs are directly run on the target (host) system after being
+# built. Even if the output of this recipe doesn't change, a change in one of
+# its dependencies may cause a change in the output it generates (e.g. rpm
+# output depends on the output of its dependent zstd library).
+#
+# This can cause poor interactions with hash equivalence, since this recipes
+# output-changing dependency is "hidden" and downstream task only see that this
+# recipe has the same outhash and therefore is equivalent. This can result in
+# different output in different cases.
+#
+# To resolve this, unhide the output-changing dependency by adding its unihash
+# to this tasks outhash calculation. Unfortunately, don't know specifically
+# know which dependencies are output-changing, so we have to add all of them.
+#
+python native_add_do_populate_sysroot_deps () {
+ current_task = "do_" + d.getVar("BB_CURRENTTASK")
+ if current_task != "do_populate_sysroot":
+ return
+
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ pn = d.getVar("PN")
+ deps = {
+ dep[0]:dep[6] for dep in taskdepdata.values() if
+ dep[1] == current_task and dep[0] != pn
+ }
+
+ d.setVar("HASHEQUIV_EXTRA_SIGDATA", "\n".join("%s: %s" % (k, deps[k]) for k in sorted(deps.keys())))
+}
+SSTATECREATEFUNCS += "native_add_do_populate_sysroot_deps"
diff --git a/meta/classes/nativesdk.bbclass b/meta/classes/nativesdk.bbclass
index f25b0c31b1..f8e9607513 100644
--- a/meta/classes/nativesdk.bbclass
+++ b/meta/classes/nativesdk.bbclass
@@ -9,6 +9,7 @@ NATIVESDKLIBC ?= "libc-glibc"
LIBCOVERRIDE = ":${NATIVESDKLIBC}"
CLASSOVERRIDE = "class-nativesdk"
MACHINEOVERRIDES = ""
+MACHINE_FEATURES = ""
MULTILIBS = ""
@@ -27,10 +28,10 @@ PACKAGE_ARCHS = "${SDK_PACKAGE_ARCHS}"
# We need chrpath >= 0.14 to ensure we can deal with 32 and 64 bit
# binaries
#
-DEPENDS_append = " chrpath-replacement-native"
+DEPENDS:append = " chrpath-replacement-native"
EXTRANATIVEPATH += "chrpath-native"
-PKGDATA_DIR = "${TMPDIR}/pkgdata/${SDK_SYS}"
+PKGDATA_DIR = "${PKGDATA_DIR_SDK}"
HOST_ARCH = "${SDK_ARCH}"
HOST_VENDOR = "${SDK_VENDOR}"
@@ -57,7 +58,7 @@ EXTRA_OECONF_GCC_FLOAT = ""
CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
CFLAGS = "${BUILDSDK_CFLAGS}"
-CXXFLAGS = "${BUILDSDK_CFLAGS}"
+CXXFLAGS = "${BUILDSDK_CXXFLAGS}"
LDFLAGS = "${BUILDSDK_LDFLAGS}"
# Change to place files in SDKPATH
@@ -100,14 +101,17 @@ python () {
clsextend.map_packagevars()
clsextend.map_variable("PROVIDES")
clsextend.map_regexp_variable("PACKAGES_DYNAMIC")
+ d.setVar("LIBCEXTENSION", "")
+ d.setVar("ABIEXTENSION", "")
}
addhandler nativesdk_virtclass_handler
nativesdk_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
-do_populate_sysroot[stamp-extra-info] = ""
do_packagedata[stamp-extra-info] = ""
USE_NLS = "${SDKUSE_NLS}"
OLDEST_KERNEL = "${SDK_OLDEST_KERNEL}"
+
+PATH:prepend = "${COREBASE}/scripts/nativesdk-intercept:"
diff --git a/meta/classes/nopackages.bbclass b/meta/classes/nopackages.bbclass
index 559f5078bd..7a4f632d71 100644
--- a/meta/classes/nopackages.bbclass
+++ b/meta/classes/nopackages.bbclass
@@ -2,6 +2,7 @@ deltask do_package
deltask do_package_write_rpm
deltask do_package_write_ipk
deltask do_package_write_deb
+deltask do_package_write_tar
deltask do_package_qa
deltask do_packagedata
deltask do_package_setscene
diff --git a/meta/classes/npm.bbclass b/meta/classes/npm.bbclass
index 6dbae6bc79..ba50fcac20 100644
--- a/meta/classes/npm.bbclass
+++ b/meta/classes/npm.bbclass
@@ -1,91 +1,319 @@
-DEPENDS_prepend = "nodejs-native "
-RDEPENDS_${PN}_prepend = "nodejs "
-S = "${WORKDIR}/npmpkg"
+# Copyright (C) 2020 Savoir-Faire Linux
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# This bbclass builds and installs an npm package to the target. The package
+# sources files should be fetched in the calling recipe by using the SRC_URI
+# variable. The ${S} variable should be updated depending of your fetcher.
+#
+# Usage:
+# SRC_URI = "..."
+# inherit npm
+#
+# Optional variables:
+# NPM_ARCH:
+# Override the auto generated npm architecture.
+#
+# NPM_INSTALL_DEV:
+# Set to 1 to also install devDependencies.
-def node_pkgname(d):
- bpn = d.getVar('BPN')
- if bpn.startswith("node-"):
- return bpn[5:]
- return bpn
+inherit python3native
-NPMPN ?= "${@node_pkgname(d)}"
+DEPENDS:prepend = "nodejs-native "
+RDEPENDS:${PN}:append:class-target = " nodejs"
-NPM_INSTALLDIR = "${libdir}/node/${NPMPN}"
+EXTRA_OENPM = ""
-# function maps arch names to npm arch names
-def npm_oe_arch_map(target_arch, d):
+NPM_INSTALL_DEV ?= "0"
+
+NPM_NODEDIR ?= "${RECIPE_SYSROOT_NATIVE}${prefix_native}"
+
+def npm_target_arch_map(target_arch):
+ """Maps arch names to npm arch names"""
import re
- if re.match('p(pc|owerpc)(|64)', target_arch): return 'ppc'
- elif re.match('i.86$', target_arch): return 'ia32'
- elif re.match('x86_64$', target_arch): return 'x64'
- elif re.match('arm64$', target_arch): return 'arm'
+ if re.match("p(pc|owerpc)(|64)", target_arch):
+ return "ppc"
+ elif re.match("i.86$", target_arch):
+ return "ia32"
+ elif re.match("x86_64$", target_arch):
+ return "x64"
+ elif re.match("arm64$", target_arch):
+ return "arm"
return target_arch
-NPM_ARCH ?= "${@npm_oe_arch_map(d.getVar('TARGET_ARCH'), d)}"
-NPM_INSTALL_DEV ?= "0"
+NPM_ARCH ?= "${@npm_target_arch_map(d.getVar("TARGET_ARCH"))}"
+
+NPM_PACKAGE = "${WORKDIR}/npm-package"
+NPM_CACHE = "${WORKDIR}/npm-cache"
+NPM_BUILD = "${WORKDIR}/npm-build"
+
+def npm_global_configs(d):
+ """Get the npm global configuration"""
+ configs = []
+ # Ensure no network access is done
+ configs.append(("offline", "true"))
+ configs.append(("proxy", "http://invalid"))
+ # Configure the cache directory
+ configs.append(("cache", d.getVar("NPM_CACHE")))
+ return configs
+
+def npm_pack(env, srcdir, workdir):
+ """Run 'npm pack' on a specified directory"""
+ import shlex
+ cmd = "npm pack %s" % shlex.quote(srcdir)
+ args = [("ignore-scripts", "true")]
+ tarball = env.run(cmd, args=args, workdir=workdir).strip("\n")
+ return os.path.join(workdir, tarball)
+
+python npm_do_configure() {
+ """
+ Step one: configure the npm cache and the main npm package
+
+ Every dependencies have been fetched and patched in the source directory.
+ They have to be packed (this remove unneeded files) and added to the npm
+ cache to be available for the next step.
+
+ The main package and its associated manifest file and shrinkwrap file have
+ to be configured to take into account these cached dependencies.
+ """
+ import base64
+ import copy
+ import json
+ import re
+ import shlex
+ import tempfile
+ from bb.fetch2.npm import NpmEnvironment
+ from bb.fetch2.npm import npm_unpack
+ from bb.fetch2.npmsw import foreach_dependencies
+ from bb.progress import OutOfProgressHandler
+
+ bb.utils.remove(d.getVar("NPM_CACHE"), recurse=True)
+ bb.utils.remove(d.getVar("NPM_PACKAGE"), recurse=True)
+
+ env = NpmEnvironment(d, configs=npm_global_configs(d))
+
+ def _npm_cache_add(tarball):
+ """Run 'npm cache add' for a specified tarball"""
+ cmd = "npm cache add %s" % shlex.quote(tarball)
+ env.run(cmd)
+
+ def _npm_integrity(tarball):
+ """Return the npm integrity of a specified tarball"""
+ sha512 = bb.utils.sha512_file(tarball)
+ return "sha512-" + base64.b64encode(bytes.fromhex(sha512)).decode()
+
+ def _npm_version(tarball):
+ """Return the version of a specified tarball"""
+ regex = r"-(\d+\.\d+\.\d+(-.*)?(\+.*)?)\.tgz"
+ return re.search(regex, tarball).group(1)
+
+ def _npmsw_dependency_dict(orig, deptree):
+ """
+ Return the sub dictionary in the 'orig' dictionary corresponding to the
+ 'deptree' dependency tree. This function follows the shrinkwrap file
+ format.
+ """
+ ptr = orig
+ for dep in deptree:
+ if "dependencies" not in ptr:
+ ptr["dependencies"] = {}
+ ptr = ptr["dependencies"]
+ if dep not in ptr:
+ ptr[dep] = {}
+ ptr = ptr[dep]
+ return ptr
+
+ # Manage the manifest file and shrinkwrap files
+ orig_manifest_file = d.expand("${S}/package.json")
+ orig_shrinkwrap_file = d.expand("${S}/npm-shrinkwrap.json")
+ cached_manifest_file = d.expand("${NPM_PACKAGE}/package.json")
+ cached_shrinkwrap_file = d.expand("${NPM_PACKAGE}/npm-shrinkwrap.json")
+
+ with open(orig_manifest_file, "r") as f:
+ orig_manifest = json.load(f)
+
+ cached_manifest = copy.deepcopy(orig_manifest)
+ cached_manifest.pop("dependencies", None)
+ cached_manifest.pop("devDependencies", None)
+
+ has_shrinkwrap_file = True
+
+ try:
+ with open(orig_shrinkwrap_file, "r") as f:
+ orig_shrinkwrap = json.load(f)
+ except IOError:
+ has_shrinkwrap_file = False
+
+ if has_shrinkwrap_file:
+ cached_shrinkwrap = copy.deepcopy(orig_shrinkwrap)
+ cached_shrinkwrap.pop("dependencies", None)
+
+ # Manage the dependencies
+ progress = OutOfProgressHandler(d, r"^(\d+)/(\d+)$")
+ progress_total = 1 # also count the main package
+ progress_done = 0
+
+ def _count_dependency(name, params, deptree):
+ nonlocal progress_total
+ progress_total += 1
+
+ def _cache_dependency(name, params, deptree):
+ destsubdirs = [os.path.join("node_modules", dep) for dep in deptree]
+ destsuffix = os.path.join(*destsubdirs)
+ with tempfile.TemporaryDirectory() as tmpdir:
+ # Add the dependency to the npm cache
+ destdir = os.path.join(d.getVar("S"), destsuffix)
+ tarball = npm_pack(env, destdir, tmpdir)
+ _npm_cache_add(tarball)
+ # Add its signature to the cached shrinkwrap
+ dep = _npmsw_dependency_dict(cached_shrinkwrap, deptree)
+ dep["version"] = _npm_version(tarball)
+ dep["integrity"] = _npm_integrity(tarball)
+ if params.get("dev", False):
+ dep["dev"] = True
+ # Display progress
+ nonlocal progress_done
+ progress_done += 1
+ progress.write("%d/%d" % (progress_done, progress_total))
+
+ dev = bb.utils.to_boolean(d.getVar("NPM_INSTALL_DEV"), False)
+
+ if has_shrinkwrap_file:
+ foreach_dependencies(orig_shrinkwrap, _count_dependency, dev)
+ foreach_dependencies(orig_shrinkwrap, _cache_dependency, dev)
-npm_do_compile() {
- # Copy in any additionally fetched modules
- if [ -d ${WORKDIR}/node_modules ] ; then
- cp -a ${WORKDIR}/node_modules ${S}/
- fi
- # changing the home directory to the working directory, the .npmrc will
- # be created in this directory
- export HOME=${WORKDIR}
- if [ "${NPM_INSTALL_DEV}" = "1" ]; then
- npm config set dev true
- else
- npm config set dev false
- fi
- npm set cache ${WORKDIR}/npm_cache
- # clear cache before every build
- npm cache clear --force
- # Install pkg into ${S} without going to the registry
- if [ "${NPM_INSTALL_DEV}" = "1" ]; then
- npm --arch=${NPM_ARCH} --target_arch=${NPM_ARCH} --no-registry install
- else
- npm --arch=${NPM_ARCH} --target_arch=${NPM_ARCH} --production --no-registry install
- fi
+ # Configure the main package
+ with tempfile.TemporaryDirectory() as tmpdir:
+ tarball = npm_pack(env, d.getVar("S"), tmpdir)
+ npm_unpack(tarball, d.getVar("NPM_PACKAGE"), d)
+
+ # Configure the cached manifest file and cached shrinkwrap file
+ def _update_manifest(depkey):
+ for name in orig_manifest.get(depkey, {}):
+ version = cached_shrinkwrap["dependencies"][name]["version"]
+ if depkey not in cached_manifest:
+ cached_manifest[depkey] = {}
+ cached_manifest[depkey][name] = version
+
+ if has_shrinkwrap_file:
+ _update_manifest("dependencies")
+
+ if dev:
+ if has_shrinkwrap_file:
+ _update_manifest("devDependencies")
+
+ with open(cached_manifest_file, "w") as f:
+ json.dump(cached_manifest, f, indent=2)
+
+ if has_shrinkwrap_file:
+ with open(cached_shrinkwrap_file, "w") as f:
+ json.dump(cached_shrinkwrap, f, indent=2)
}
-npm_do_install() {
- # changing the home directory to the working directory, the .npmrc will
- # be created in this directory
- export HOME=${WORKDIR}
- mkdir -p ${D}${libdir}/node_modules
- npm pack .
- npm install --prefix ${D}${prefix} -g --arch=${NPM_ARCH} --target_arch=${NPM_ARCH} --production --no-registry ${NPMPN}-${PV}.tgz
- mv ${D}${libdir}/node_modules ${D}${libdir}/node
- if [ -d ${D}${prefix}/etc ] ; then
- # This will be empty
- rmdir ${D}${prefix}/etc
- fi
+python npm_do_compile() {
+ """
+ Step two: install the npm package
+
+ Use the configured main package and the cached dependencies to run the
+ installation process. The installation is done in a directory which is
+ not the destination directory yet.
+
+ A combination of 'npm pack' and 'npm install' is used to ensure that the
+ installed files are actual copies instead of symbolic links (which is the
+ default npm behavior).
+ """
+ import shlex
+ import tempfile
+ from bb.fetch2.npm import NpmEnvironment
+
+ bb.utils.remove(d.getVar("NPM_BUILD"), recurse=True)
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ args = []
+ configs = npm_global_configs(d)
+
+ if bb.utils.to_boolean(d.getVar("NPM_INSTALL_DEV"), False):
+ configs.append(("also", "development"))
+ else:
+ configs.append(("only", "production"))
+
+ # Report as many logs as possible for debugging purpose
+ configs.append(("loglevel", "silly"))
+
+ # Configure the installation to be done globally in the build directory
+ configs.append(("global", "true"))
+ configs.append(("prefix", d.getVar("NPM_BUILD")))
+
+ # Add node-gyp configuration
+ configs.append(("arch", d.getVar("NPM_ARCH")))
+ configs.append(("release", "true"))
+ configs.append(("nodedir", d.getVar("NPM_NODEDIR")))
+ configs.append(("python", d.getVar("PYTHON")))
+
+ env = NpmEnvironment(d, configs)
+
+ # Add node-pre-gyp configuration
+ args.append(("target_arch", d.getVar("NPM_ARCH")))
+ args.append(("build-from-source", "true"))
+
+ # Pack and install the main package
+ tarball = npm_pack(env, d.getVar("NPM_PACKAGE"), tmpdir)
+ cmd = "npm install %s %s" % (shlex.quote(tarball), d.getVar("EXTRA_OENPM"))
+ env.run(cmd, args=args)
}
-python populate_packages_prepend () {
- instdir = d.expand('${D}${NPM_INSTALLDIR}')
- extrapackages = oe.package.npm_split_package_dirs(instdir)
- pkgnames = extrapackages.keys()
- d.prependVar('PACKAGES', '%s ' % ' '.join(pkgnames))
- for pkgname in pkgnames:
- pkgrelpath, pdata = extrapackages[pkgname]
- pkgpath = '${NPM_INSTALLDIR}/' + pkgrelpath
- # package names can't have underscores but npm packages sometimes use them
- oe_pkg_name = pkgname.replace('_', '-')
- expanded_pkgname = d.expand(oe_pkg_name)
- d.setVar('FILES_%s' % expanded_pkgname, pkgpath)
- if pdata:
- version = pdata.get('version', None)
- if version:
- d.setVar('PKGV_%s' % expanded_pkgname, version)
- description = pdata.get('description', None)
- if description:
- d.setVar('SUMMARY_%s' % expanded_pkgname, description.replace(u"\u2018", "'").replace(u"\u2019", "'"))
- d.appendVar('RDEPENDS_%s' % d.getVar('PN'), ' %s' % ' '.join(pkgnames).replace('_', '-'))
+npm_do_install() {
+ # Step three: final install
+ #
+ # The previous installation have to be filtered to remove some extra files.
+
+ rm -rf ${D}
+
+ # Copy the entire lib and bin directories
+ install -d ${D}/${nonarch_libdir}
+ cp --no-preserve=ownership --recursive ${NPM_BUILD}/lib/. ${D}/${nonarch_libdir}
+
+ if [ -d "${NPM_BUILD}/bin" ]
+ then
+ install -d ${D}/${bindir}
+ cp --no-preserve=ownership --recursive ${NPM_BUILD}/bin/. ${D}/${bindir}
+ fi
+
+ # If the package (or its dependencies) uses node-gyp to build native addons,
+ # object files, static libraries or other temporary files can be hidden in
+ # the lib directory. To reduce the package size and to avoid QA issues
+ # (staticdev with static library files) these files must be removed.
+ local GYP_REGEX=".*/build/Release/[^/]*.node"
+
+ # Remove any node-gyp directory in ${D} to remove temporary build files
+ for GYP_D_FILE in $(find ${D} -regex "${GYP_REGEX}")
+ do
+ local GYP_D_DIR=${GYP_D_FILE%/Release/*}
+
+ rm --recursive --force ${GYP_D_DIR}
+ done
+
+ # Copy only the node-gyp release files
+ for GYP_B_FILE in $(find ${NPM_BUILD} -regex "${GYP_REGEX}")
+ do
+ local GYP_D_FILE=${D}/${prefix}/${GYP_B_FILE#${NPM_BUILD}}
+
+ install -d ${GYP_D_FILE%/*}
+ install -m 755 ${GYP_B_FILE} ${GYP_D_FILE}
+ done
+
+ # Remove the shrinkwrap file which does not need to be packed
+ rm -f ${D}/${nonarch_libdir}/node_modules/*/npm-shrinkwrap.json
+ rm -f ${D}/${nonarch_libdir}/node_modules/@*/*/npm-shrinkwrap.json
+
+ # node(1) is using /usr/lib/node as default include directory and npm(1) is
+ # using /usr/lib/node_modules as install directory. Let's make both happy.
+ ln -fs node_modules ${D}/${nonarch_libdir}/node
}
-FILES_${PN} += " \
- ${NPM_INSTALLDIR} \
+FILES:${PN} += " \
+ ${bindir} \
+ ${nonarch_libdir} \
"
-EXPORT_FUNCTIONS do_compile do_install
+EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes/overlayfs-etc.bbclass b/meta/classes/overlayfs-etc.bbclass
new file mode 100644
index 0000000000..91afee695c
--- /dev/null
+++ b/meta/classes/overlayfs-etc.bbclass
@@ -0,0 +1,76 @@
+# Class for setting up /etc in overlayfs
+#
+# In order to have /etc directory in overlayfs a special handling at early boot stage is required
+# The idea is to supply a custom init script that mounts /etc before launching actual init program,
+# because the latter already requires /etc to be mounted
+#
+# The configuration must be machine specific. You should at least set these three variables:
+# OVERLAYFS_ETC_MOUNT_POINT ?= "/data"
+# OVERLAYFS_ETC_FSTYPE ?= "ext4"
+# OVERLAYFS_ETC_DEVICE ?= "/dev/mmcblk0p2"
+#
+# To control more mount options you should consider setting mount options:
+# OVERLAYFS_ETC_MOUNT_OPTIONS ?= "defaults"
+#
+# The class provides two options for /sbin/init generation
+# 1. Default option is to rename original /sbin/init to /sbin/init.orig and place generated init under
+# original name, i.e. /sbin/init. It has an advantage that you won't need to change any kernel
+# parameters in order to make it work, but it poses a restriction that package-management can't
+# be used, becaause updating init manager would remove generated script
+# 2. If you are would like to keep original init as is, you can set
+# OVERLAYFS_ETC_USE_ORIG_INIT_NAME = "0"
+# Then generated init will be named /sbin/preinit and you would need to extend you kernel parameters
+# manually in your bootloader configuration.
+#
+# Regardless which mode you choose, update and migration strategy of configuration files under /etc
+# overlay is out of scope of this class
+
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "overlayfs-etc", "create_overlayfs_etc_preinit;", "", d)}'
+IMAGE_FEATURES_CONFLICTS_overlayfs-etc = "${@ 'package-management' if bb.utils.to_boolean(d.getVar('OVERLAYFS_ETC_USE_ORIG_INIT_NAME'), True) else ''}"
+
+OVERLAYFS_ETC_MOUNT_POINT ??= ""
+OVERLAYFS_ETC_FSTYPE ??= ""
+OVERLAYFS_ETC_DEVICE ??= ""
+OVERLAYFS_ETC_USE_ORIG_INIT_NAME ??= "1"
+OVERLAYFS_ETC_MOUNT_OPTIONS ??= "defaults"
+OVERLAYFS_ETC_INIT_TEMPLATE ??= "${COREBASE}/meta/files/overlayfs-etc-preinit.sh.in"
+
+python create_overlayfs_etc_preinit() {
+ overlayEtcMountPoint = d.getVar("OVERLAYFS_ETC_MOUNT_POINT")
+ overlayEtcFsType = d.getVar("OVERLAYFS_ETC_FSTYPE")
+ overlayEtcDevice = d.getVar("OVERLAYFS_ETC_DEVICE")
+
+ if not overlayEtcMountPoint:
+ bb.fatal("OVERLAYFS_ETC_MOUNT_POINT must be set in your MACHINE configuration")
+ if not overlayEtcDevice:
+ bb.fatal("OVERLAYFS_ETC_DEVICE must be set in your MACHINE configuration")
+ if not overlayEtcFsType:
+ bb.fatal("OVERLAYFS_ETC_FSTYPE should contain a valid file system type on {0}".format(overlayEtcDevice))
+
+ with open(d.getVar("OVERLAYFS_ETC_INIT_TEMPLATE"), "r") as f:
+ PreinitTemplate = f.read()
+
+ useOrigInit = oe.types.boolean(d.getVar('OVERLAYFS_ETC_USE_ORIG_INIT_NAME'))
+ preinitPath = oe.path.join(d.getVar("IMAGE_ROOTFS"), d.getVar("base_sbindir"), "preinit")
+ initBaseName = oe.path.join(d.getVar("base_sbindir"), "init")
+ origInitNameSuffix = ".orig"
+
+ args = {
+ 'OVERLAYFS_ETC_MOUNT_POINT': overlayEtcMountPoint,
+ 'OVERLAYFS_ETC_MOUNT_OPTIONS': d.getVar('OVERLAYFS_ETC_MOUNT_OPTIONS'),
+ 'OVERLAYFS_ETC_FSTYPE': overlayEtcFsType,
+ 'OVERLAYFS_ETC_DEVICE': overlayEtcDevice,
+ 'SBIN_INIT_NAME': initBaseName + origInitNameSuffix if useOrigInit else initBaseName
+ }
+
+ if useOrigInit:
+ # rename original /sbin/init
+ origInit = oe.path.join(d.getVar("IMAGE_ROOTFS"), initBaseName)
+ bb.debug(1, "rootfs path %s, init path %s, test %s" % (d.getVar('IMAGE_ROOTFS'), origInit, d.getVar("IMAGE_ROOTFS")))
+ bb.utils.rename(origInit, origInit + origInitNameSuffix)
+ preinitPath = origInit
+
+ with open(preinitPath, 'w') as f:
+ f.write(PreinitTemplate.format(**args))
+ os.chmod(preinitPath, 0o755)
+}
diff --git a/meta/classes/overlayfs.bbclass b/meta/classes/overlayfs.bbclass
new file mode 100644
index 0000000000..29fced2ca7
--- /dev/null
+++ b/meta/classes/overlayfs.bbclass
@@ -0,0 +1,119 @@
+# Class for generation of overlayfs mount units
+#
+# It's often desired in Embedded System design to have a read-only rootfs.
+# But a lot of different applications might want to have a read-write access to
+# some parts of a filesystem. It can be especially useful when your update mechanism
+# overwrites the whole rootfs, but you want your application data to be preserved
+# between updates. This class provides a way to achieve that by means
+# of overlayfs and at the same time keeping the base rootfs read-only.
+#
+# Usage example.
+#
+# Set a mount point for a partition overlayfs is going to use as upper layer
+# in your machine configuration. Underlying file system can be anything that
+# is supported by overlayfs. This has to be done in your machine configuration.
+# QA check fails to catch file existence if you redefine this variable in your recipe!
+#
+# OVERLAYFS_MOUNT_POINT[data] ?= "/data"
+#
+# The class assumes you have a data.mount systemd unit defined in your
+# systemd-machine-units recipe and installed to the image.
+#
+# Then you can specify writable directories on a recipe base
+#
+# OVERLAYFS_WRITABLE_PATHS[data] = "/usr/share/my-custom-application"
+#
+# To support several mount points you can use a different variable flag. Assume we
+# want to have a writable location on the file system, but not interested where the data
+# survive a reboot. Then we could have a mnt-overlay.mount unit for a tmpfs file system:
+#
+# OVERLAYFS_MOUNT_POINT[mnt-overlay] = "/mnt/overlay"
+# OVERLAYFS_WRITABLE_PATHS[mnt-overlay] = "/usr/share/another-application"
+#
+# Note: the class does not support /etc directory itself, because systemd depends on it
+# For /etc directory use overlayfs-etc class
+
+REQUIRED_DISTRO_FEATURES += "systemd overlayfs"
+
+inherit systemd features_check
+
+OVERLAYFS_CREATE_DIRS_TEMPLATE ??= "${COREBASE}/meta/files/overlayfs-create-dirs.service.in"
+OVERLAYFS_MOUNT_UNIT_TEMPLATE ??= "${COREBASE}/meta/files/overlayfs-unit.mount.in"
+OVERLAYFS_ALL_OVERLAYS_TEMPLATE ??= "${COREBASE}/meta/files/overlayfs-all-overlays.service.in"
+
+python do_create_overlayfs_units() {
+ from oe.overlayfs import mountUnitName
+
+ with open(d.getVar("OVERLAYFS_CREATE_DIRS_TEMPLATE"), "r") as f:
+ CreateDirsUnitTemplate = f.read()
+ with open(d.getVar("OVERLAYFS_MOUNT_UNIT_TEMPLATE"), "r") as f:
+ MountUnitTemplate = f.read()
+ with open(d.getVar("OVERLAYFS_ALL_OVERLAYS_TEMPLATE"), "r") as f:
+ AllOverlaysTemplate = f.read()
+
+ def prepareUnits(data, lower):
+ from oe.overlayfs import helperUnitName
+
+ args = {
+ 'DATA_MOUNT_POINT': data,
+ 'DATA_MOUNT_UNIT': mountUnitName(data),
+ 'CREATE_DIRS_SERVICE': helperUnitName(lower),
+ 'LOWERDIR': lower,
+ }
+
+ bb.debug(1, "Generate systemd unit %s" % mountUnitName(lower))
+ with open(os.path.join(d.getVar('WORKDIR'), mountUnitName(lower)), 'w') as f:
+ f.write(MountUnitTemplate.format(**args))
+
+ bb.debug(1, "Generate helper systemd unit %s" % helperUnitName(lower))
+ with open(os.path.join(d.getVar('WORKDIR'), helperUnitName(lower)), 'w') as f:
+ f.write(CreateDirsUnitTemplate.format(**args))
+
+ def prepareGlobalUnit(dependentUnits):
+ from oe.overlayfs import allOverlaysUnitName
+ args = {
+ 'ALL_OVERLAYFS_UNITS': " ".join(dependentUnits),
+ 'PN': d.getVar('PN')
+ }
+
+ bb.debug(1, "Generate systemd unit with all overlays %s" % allOverlaysUnitName(d))
+ with open(os.path.join(d.getVar('WORKDIR'), allOverlaysUnitName(d)), 'w') as f:
+ f.write(AllOverlaysTemplate.format(**args))
+
+ mountUnitList = []
+ overlayMountPoints = d.getVarFlags("OVERLAYFS_MOUNT_POINT")
+ for mountPoint in overlayMountPoints:
+ bb.debug(1, "Process variable flag %s" % mountPoint)
+ for lower in d.getVarFlag('OVERLAYFS_WRITABLE_PATHS', mountPoint).split():
+ bb.debug(1, "Prepare mount unit for %s with data mount point %s" %
+ (lower, d.getVarFlag('OVERLAYFS_MOUNT_POINT', mountPoint)))
+ prepareUnits(d.getVarFlag('OVERLAYFS_MOUNT_POINT', mountPoint), lower)
+ mountUnitList.append(mountUnitName(lower))
+
+ # set up one unit, which depends on all mount units, so users can set
+ # only one dependency in their units to make sure software starts
+ # when all overlays are mounted
+ prepareGlobalUnit(mountUnitList)
+}
+
+# we need to generate file names early during parsing stage
+python () {
+ from oe.overlayfs import strForBash, unitFileList
+
+ unitList = unitFileList(d)
+ for unit in unitList:
+ d.appendVar('SYSTEMD_SERVICE:' + d.getVar('PN'), ' ' + unit)
+ d.appendVar('FILES:' + d.getVar('PN'), ' ' +
+ d.getVar('systemd_system_unitdir') + '/' + strForBash(unit))
+
+ d.setVar('OVERLAYFS_UNIT_LIST', ' '.join([strForBash(s) for s in unitList]))
+}
+
+do_install:append() {
+ install -d ${D}${systemd_system_unitdir}
+ for unit in ${OVERLAYFS_UNIT_LIST}; do
+ install -m 0444 ${WORKDIR}/${unit} ${D}${systemd_system_unitdir}
+ done
+}
+
+addtask create_overlayfs_units before do_install
diff --git a/meta/classes/own-mirrors.bbclass b/meta/classes/own-mirrors.bbclass
index a777835138..ef972740ce 100644
--- a/meta/classes/own-mirrors.bbclass
+++ b/meta/classes/own-mirrors.bbclass
@@ -1,13 +1,14 @@
-PREMIRRORS_prepend = " \
-cvs://.*/.* ${SOURCE_MIRROR_URL} \n \
-svn://.*/.* ${SOURCE_MIRROR_URL} \n \
-git://.*/.* ${SOURCE_MIRROR_URL} \n \
-gitsm://.*/.* ${SOURCE_MIRROR_URL} \n \
-hg://.*/.* ${SOURCE_MIRROR_URL} \n \
-bzr://.*/.* ${SOURCE_MIRROR_URL} \n \
-p4://.*/.* ${SOURCE_MIRROR_URL} \n \
-osc://.*/.* ${SOURCE_MIRROR_URL} \n \
-https?$://.*/.* ${SOURCE_MIRROR_URL} \n \
-ftp://.*/.* ${SOURCE_MIRROR_URL} \n \
-npm://.*/?.* ${SOURCE_MIRROR_URL} \n \
+PREMIRRORS:prepend = " \
+cvs://.*/.* ${SOURCE_MIRROR_URL} \
+svn://.*/.* ${SOURCE_MIRROR_URL} \
+git://.*/.* ${SOURCE_MIRROR_URL} \
+gitsm://.*/.* ${SOURCE_MIRROR_URL} \
+hg://.*/.* ${SOURCE_MIRROR_URL} \
+bzr://.*/.* ${SOURCE_MIRROR_URL} \
+p4://.*/.* ${SOURCE_MIRROR_URL} \
+osc://.*/.* ${SOURCE_MIRROR_URL} \
+https?://.*/.* ${SOURCE_MIRROR_URL} \
+ftp://.*/.* ${SOURCE_MIRROR_URL} \
+npm://.*/?.* ${SOURCE_MIRROR_URL} \
+s3://.*/.* ${SOURCE_MIRROR_URL} \
"
diff --git a/meta/classes/package.bbclass b/meta/classes/package.bbclass
index 7dd1b09a87..e71daafe94 100644
--- a/meta/classes/package.bbclass
+++ b/meta/classes/package.bbclass
@@ -7,7 +7,7 @@
#
# There are the following default steps but PACKAGEFUNCS can be extended:
#
-# a) package_get_auto_pr - get PRAUTO from remote PR service
+# a) package_convert_pr_autoinc - convert AUTOINC in PKGV to ${PRSERV_PV_AUTOINC}
#
# b) perform_packagecopy - Copy D into PKGD
#
@@ -40,8 +40,7 @@
inherit packagedata
inherit chrpath
-
-# Need the package_qa_handle_error() in insane.bbclass
+inherit package_pkgdata
inherit insane
PKGD = "${WORKDIR}/package"
@@ -198,7 +197,7 @@ def do_split_packages(d, root, file_regex, output_pattern, description, postinst
packages = [pkg] + packages
else:
packages.append(pkg)
- oldfiles = d.getVar('FILES_' + pkg)
+ oldfiles = d.getVar('FILES:' + pkg)
newfile = os.path.join(root, o)
# These names will be passed through glob() so if the filename actually
# contains * or ? (rare, but possible) we need to handle that specially
@@ -218,19 +217,19 @@ def do_split_packages(d, root, file_regex, output_pattern, description, postinst
the_files.append(fp % m.group(1))
else:
the_files.append(aux_files_pattern_verbatim % m.group(1))
- d.setVar('FILES_' + pkg, " ".join(the_files))
+ d.setVar('FILES:' + pkg, " ".join(the_files))
else:
- d.setVar('FILES_' + pkg, oldfiles + " " + newfile)
+ d.setVar('FILES:' + pkg, oldfiles + " " + newfile)
if extra_depends != '':
- d.appendVar('RDEPENDS_' + pkg, ' ' + extra_depends)
- if not d.getVar('DESCRIPTION_' + pkg):
- d.setVar('DESCRIPTION_' + pkg, description % on)
- if not d.getVar('SUMMARY_' + pkg):
- d.setVar('SUMMARY_' + pkg, summary % on)
+ d.appendVar('RDEPENDS:' + pkg, ' ' + extra_depends)
+ if not d.getVar('DESCRIPTION:' + pkg):
+ d.setVar('DESCRIPTION:' + pkg, description % on)
+ if not d.getVar('SUMMARY:' + pkg):
+ d.setVar('SUMMARY:' + pkg, summary % on)
if postinst:
- d.setVar('pkg_postinst_' + pkg, postinst)
+ d.setVar('pkg_postinst:' + pkg, postinst)
if postrm:
- d.setVar('pkg_postrm_' + pkg, postrm)
+ d.setVar('pkg_postrm:' + pkg, postrm)
if callable(hook):
hook(f, pkg, file_regex, output_pattern, m.group(1))
@@ -244,6 +243,8 @@ python () {
deps = ""
for dep in (d.getVar('PACKAGE_DEPENDS') or "").split():
deps += " %s:do_populate_sysroot" % dep
+ if d.getVar('PACKAGE_MINIDEBUGINFO') == '1':
+ deps += ' xz-native:do_populate_sysroot'
d.appendVarFlag('do_package', 'depends', deps)
# shlibs requires any DEPENDS to have already packaged for the *.list files
@@ -300,7 +301,7 @@ def get_conffiles(pkg, d):
cwd = os.getcwd()
os.chdir(root)
- conffiles = d.getVar('CONFFILES_%s' % pkg);
+ conffiles = d.getVar('CONFFILES:%s' % pkg);
if conffiles == None:
conffiles = d.getVar('CONFFILES')
if conffiles == None:
@@ -344,7 +345,7 @@ def parse_debugsources_from_dwarfsrcfiles_output(dwarfsrcfiles_output):
return debugfiles.keys()
-def append_source_info(file, sourcefile, d, fatal=True):
+def source_info(file, d, fatal=True):
import subprocess
cmd = ["dwarfsrcfiles", file]
@@ -363,29 +364,23 @@ def append_source_info(file, sourcefile, d, fatal=True):
bb.note(msg)
debugsources = parse_debugsources_from_dwarfsrcfiles_output(output)
- # filenames are null-separated - this is an artefact of the previous use
- # of rpm's debugedit, which was writing them out that way, and the code elsewhere
- # is still assuming that.
- debuglistoutput = '\0'.join(debugsources) + '\0'
- lf = bb.utils.lockfile(sourcefile + ".lock")
- with open(sourcefile, 'a') as sf:
- sf.write(debuglistoutput)
- bb.utils.unlockfile(lf)
+ return list(debugsources)
-def splitdebuginfo(file, dvar, debugdir, debuglibdir, debugappend, debugsrcdir, sourcefile, d):
+def splitdebuginfo(file, dvar, dv, d):
# Function to split a single file into two components, one is the stripped
# target system binary, the other contains any debugging information. The
# two files are linked to reference each other.
#
- # sourcefile is also generated containing a list of debugsources
+ # return a mapping of files:debugsources
import stat
import subprocess
src = file[len(dvar):]
- dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend
+ dest = dv["libdir"] + os.path.dirname(src) + dv["dir"] + "/" + os.path.basename(src) + dv["append"]
debugfile = dvar + dest
+ sources = []
# Split the file...
bb.utils.mkdirhier(os.path.dirname(debugfile))
@@ -395,10 +390,6 @@ def splitdebuginfo(file, dvar, debugdir, debuglibdir, debugappend, debugsrcdir,
dvar = d.getVar('PKGD')
objcopy = d.getVar("OBJCOPY")
- # We ignore kernel modules, we don't generate debug info files.
- if file.find("/lib/modules/") != -1 and file.endswith(".ko"):
- return 1
-
newmode = None
if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
origmode = os.stat(file)[stat.ST_MODE]
@@ -406,8 +397,8 @@ def splitdebuginfo(file, dvar, debugdir, debuglibdir, debugappend, debugsrcdir,
os.chmod(file, newmode)
# We need to extract the debug src information here...
- if debugsrcdir:
- append_source_info(file, sourcefile, d)
+ if dv["srcdir"]:
+ sources = source_info(file, d)
bb.utils.mkdirhier(os.path.dirname(debugfile))
@@ -419,17 +410,146 @@ def splitdebuginfo(file, dvar, debugdir, debuglibdir, debugappend, debugsrcdir,
if newmode:
os.chmod(file, origmode)
- return 0
+ return (file, sources)
+
+def splitstaticdebuginfo(file, dvar, dv, d):
+ # Unlike the function above, there is no way to split a static library
+ # two components. So to get similar results we will copy the unmodified
+ # static library (containing the debug symbols) into a new directory.
+ # We will then strip (preserving symbols) the static library in the
+ # typical location.
+ #
+ # return a mapping of files:debugsources
+
+ import stat
+ import shutil
+
+ src = file[len(dvar):]
+ dest = dv["staticlibdir"] + os.path.dirname(src) + dv["staticdir"] + "/" + os.path.basename(src) + dv["staticappend"]
+ debugfile = dvar + dest
+ sources = []
+
+ # Copy the file...
+ bb.utils.mkdirhier(os.path.dirname(debugfile))
+ #bb.note("Copy %s -> %s" % (file, debugfile))
-def copydebugsources(debugsrcdir, d):
+ dvar = d.getVar('PKGD')
+
+ newmode = None
+ if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
+ origmode = os.stat(file)[stat.ST_MODE]
+ newmode = origmode | stat.S_IWRITE | stat.S_IREAD
+ os.chmod(file, newmode)
+
+ # We need to extract the debug src information here...
+ if dv["srcdir"]:
+ sources = source_info(file, d)
+
+ bb.utils.mkdirhier(os.path.dirname(debugfile))
+
+ # Copy the unmodified item to the debug directory
+ shutil.copy2(file, debugfile)
+
+ if newmode:
+ os.chmod(file, origmode)
+
+ return (file, sources)
+
+def inject_minidebuginfo(file, dvar, dv, d):
+ # Extract just the symbols from debuginfo into minidebuginfo,
+ # compress it with xz and inject it back into the binary in a .gnu_debugdata section.
+ # https://sourceware.org/gdb/onlinedocs/gdb/MiniDebugInfo.html
+
+ import subprocess
+
+ readelf = d.getVar('READELF')
+ nm = d.getVar('NM')
+ objcopy = d.getVar('OBJCOPY')
+
+ minidebuginfodir = d.expand('${WORKDIR}/minidebuginfo')
+
+ src = file[len(dvar):]
+ dest = dv["libdir"] + os.path.dirname(src) + dv["dir"] + "/" + os.path.basename(src) + dv["append"]
+ debugfile = dvar + dest
+ minidebugfile = minidebuginfodir + src + '.minidebug'
+ bb.utils.mkdirhier(os.path.dirname(minidebugfile))
+
+ # If we didn't produce debuginfo for any reason, we can't produce minidebuginfo either
+ # so skip it.
+ if not os.path.exists(debugfile):
+ bb.debug(1, 'ELF file {} has no debuginfo, skipping minidebuginfo injection'.format(file))
+ return
+
+ # Find non-allocated PROGBITS, NOTE, and NOBITS sections in the debuginfo.
+ # We will exclude all of these from minidebuginfo to save space.
+ remove_section_names = []
+ for line in subprocess.check_output([readelf, '-W', '-S', debugfile], universal_newlines=True).splitlines():
+ fields = line.split()
+ if len(fields) < 8:
+ continue
+ name = fields[0]
+ type = fields[1]
+ flags = fields[7]
+ # .debug_ sections will be removed by objcopy -S so no need to explicitly remove them
+ if name.startswith('.debug_'):
+ continue
+ if 'A' not in flags and type in ['PROGBITS', 'NOTE', 'NOBITS']:
+ remove_section_names.append(name)
+
+ # List dynamic symbols in the binary. We can exclude these from minidebuginfo
+ # because they are always present in the binary.
+ dynsyms = set()
+ for line in subprocess.check_output([nm, '-D', file, '--format=posix', '--defined-only'], universal_newlines=True).splitlines():
+ dynsyms.add(line.split()[0])
+
+ # Find all function symbols from debuginfo which aren't in the dynamic symbols table.
+ # These are the ones we want to keep in minidebuginfo.
+ keep_symbols_file = minidebugfile + '.symlist'
+ found_any_symbols = False
+ with open(keep_symbols_file, 'w') as f:
+ for line in subprocess.check_output([nm, debugfile, '--format=sysv', '--defined-only'], universal_newlines=True).splitlines():
+ fields = line.split('|')
+ if len(fields) < 7:
+ continue
+ name = fields[0].strip()
+ type = fields[3].strip()
+ if type == 'FUNC' and name not in dynsyms:
+ f.write('{}\n'.format(name))
+ found_any_symbols = True
+
+ if not found_any_symbols:
+ bb.debug(1, 'ELF file {} contains no symbols, skipping minidebuginfo injection'.format(file))
+ return
+
+ bb.utils.remove(minidebugfile)
+ bb.utils.remove(minidebugfile + '.xz')
+
+ subprocess.check_call([objcopy, '-S'] +
+ ['--remove-section={}'.format(s) for s in remove_section_names] +
+ ['--keep-symbols={}'.format(keep_symbols_file), debugfile, minidebugfile])
+
+ subprocess.check_call(['xz', '--keep', minidebugfile])
+
+ subprocess.check_call([objcopy, '--add-section', '.gnu_debugdata={}.xz'.format(minidebugfile), file])
+
+def copydebugsources(debugsrcdir, sources, d):
# The debug src information written out to sourcefile is further processed
# and copied to the destination here.
import stat
import subprocess
- sourcefile = d.expand("${WORKDIR}/debugsources.list")
- if debugsrcdir and os.path.isfile(sourcefile):
+ if debugsrcdir and sources:
+ sourcefile = d.expand("${WORKDIR}/debugsources.list")
+ bb.utils.remove(sourcefile)
+
+ # filenames are null-separated - this is an artefact of the previous use
+ # of rpm's debugedit, which was writing them out that way, and the code elsewhere
+ # is still assuming that.
+ debuglistoutput = '\0'.join(sources) + '\0'
+ with open(sourcefile, 'a') as sf:
+ sf.write(debuglistoutput)
+
dvar = d.getVar('PKGD')
strip = d.getVar("STRIP")
objcopy = d.getVar("OBJCOPY")
@@ -488,17 +608,27 @@ def copydebugsources(debugsrcdir, d):
# Package data handling routines
#
-def get_package_mapping (pkg, basepkg, d):
+def get_package_mapping (pkg, basepkg, d, depversions=None):
import oe.packagedata
data = oe.packagedata.read_subpkgdata(pkg, d)
- key = "PKG_%s" % pkg
+ key = "PKG:%s" % pkg
if key in data:
+ if bb.data.inherits_class('allarch', d) and bb.data.inherits_class('packagegroup', d) and pkg != data[key]:
+ bb.error("An allarch packagegroup shouldn't depend on packages which are dynamically renamed (%s to %s)" % (pkg, data[key]))
# Have to avoid undoing the write_extra_pkgs(global_variants...)
if bb.data.inherits_class('allarch', d) and not d.getVar('MULTILIB_VARIANTS') \
and data[key] == basepkg:
return pkg
+ if depversions == []:
+ # Avoid returning a mapping if the renamed package rprovides its original name
+ rprovkey = "RPROVIDES:%s" % pkg
+ if rprovkey in data:
+ if pkg in bb.utils.explode_dep_versions2(data[rprovkey]):
+ bb.note("%s rprovides %s, not replacing the latter" % (data[key], pkg))
+ return pkg
+ # Do map to rewritten package name
return data[key]
return pkg
@@ -519,8 +649,10 @@ def runtime_mapping_rename (varname, pkg, d):
new_depends = {}
deps = bb.utils.explode_dep_versions2(d.getVar(varname) or "")
- for depend in deps:
- new_depend = get_package_mapping(depend, pkg, d)
+ for depend, depversions in deps.items():
+ new_depend = get_package_mapping(depend, pkg, d, depversions)
+ if depend != new_depend:
+ bb.note("package name mapping done: %s -> %s" % (depend, new_depend))
new_depends[new_depend] = deps[depend]
d.setVar(varname, bb.utils.join_deps(new_depends, commasep=False))
@@ -528,12 +660,20 @@ def runtime_mapping_rename (varname, pkg, d):
#bb.note("%s after: %s" % (varname, d.getVar(varname)))
#
-# Package functions suitable for inclusion in PACKAGEFUNCS
+# Used by do_packagedata (and possibly other routines post do_package)
#
+package_get_auto_pr[vardepsexclude] = "BB_TASKDEPDATA"
python package_get_auto_pr() {
import oe.prservice
- import re
+
+ def get_do_package_hash(pn):
+ if d.getVar("BB_RUNTASK") != "do_package":
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ for dep in taskdepdata:
+ if taskdepdata[dep][1] == "do_package" and taskdepdata[dep][0] == pn:
+ return taskdepdata[dep][6]
+ return None
# Support per recipe PRSERV_HOST
pn = d.getVar('PN')
@@ -545,15 +685,22 @@ python package_get_auto_pr() {
# PR Server not active, handle AUTOINC
if not d.getVar('PRSERV_HOST'):
- if 'AUTOINC' in pkgv:
- d.setVar("PKGV", pkgv.replace("AUTOINC", "0"))
+ d.setVar("PRSERV_PV_AUTOINC", "0")
return
auto_pr = None
pv = d.getVar("PV")
version = d.getVar("PRAUTOINX")
pkgarch = d.getVar("PACKAGE_ARCH")
- checksum = d.getVar("BB_TASKHASH")
+ checksum = get_do_package_hash(pn)
+
+ # If do_package isn't in the dependencies, we can't get the checksum...
+ if not checksum:
+ bb.warn('Task %s requested do_package unihash, but it was not available.' % d.getVar('BB_RUNTASK'))
+ #taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ #for dep in taskdepdata:
+ # bb.warn('%s:%s = %s' % (taskdepdata[dep][0], taskdepdata[dep][1], taskdepdata[dep][6]))
+ return
if d.getVar('PRSERV_LOCKDOWN'):
auto_pr = d.getVar('PRAUTO_' + version + '_' + pkgarch) or d.getVar('PRAUTO_' + version) or None
@@ -563,17 +710,16 @@ python package_get_auto_pr() {
return
try:
- conn = d.getVar("__PRSERV_CONN")
- if conn is None:
- conn = oe.prservice.prserv_make_conn(d)
+ conn = oe.prservice.prserv_make_conn(d)
if conn is not None:
if "AUTOINC" in pkgv:
srcpv = bb.fetch2.get_srcrev(d)
base_ver = "AUTOINC-%s" % version[:version.find(srcpv)]
value = conn.getPR(base_ver, pkgarch, srcpv)
- d.setVar("PKGV", pkgv.replace("AUTOINC", str(value)))
+ d.setVar("PRSERV_PV_AUTOINC", str(value))
auto_pr = conn.getPR(version, pkgarch, checksum)
+ conn.close()
except Exception as e:
bb.fatal("Can NOT get PRAUTO, exception %s" % str(e))
if auto_pr is None:
@@ -581,6 +727,22 @@ python package_get_auto_pr() {
d.setVar('PRAUTO',str(auto_pr))
}
+#
+# Package functions suitable for inclusion in PACKAGEFUNCS
+#
+
+python package_convert_pr_autoinc() {
+ pkgv = d.getVar("PKGV")
+
+ # Adjust pkgv as necessary...
+ if 'AUTOINC' in pkgv:
+ d.setVar("PKGV", pkgv.replace("AUTOINC", "${PRSERV_PV_AUTOINC}"))
+
+ # Change PRSERV_PV_AUTOINC and EXTENDPRAUTO usage to special values
+ d.setVar('PRSERV_PV_AUTOINC', '@PRSERV_PV_AUTOINC@')
+ d.setVar('EXTENDPRAUTO', '@EXTENDPRAUTO@')
+}
+
LOCALEBASEPN ??= "${PN}"
python package_do_split_locales() {
@@ -617,13 +779,13 @@ python package_do_split_locales() {
ln = legitimize_package_name(l)
pkg = pn + '-locale-' + ln
packages.append(pkg)
- d.setVar('FILES_' + pkg, os.path.join(datadir, 'locale', l))
- d.setVar('RRECOMMENDS_' + pkg, '%svirtual-locale-%s' % (mlprefix, ln))
- d.setVar('RPROVIDES_' + pkg, '%s-locale %s%s-translation' % (pn, mlprefix, ln))
- d.setVar('SUMMARY_' + pkg, '%s - %s translations' % (summary, l))
- d.setVar('DESCRIPTION_' + pkg, '%s This package contains language translation files for the %s locale.' % (description, l))
+ d.setVar('FILES:' + pkg, os.path.join(datadir, 'locale', l))
+ d.setVar('RRECOMMENDS:' + pkg, '%svirtual-locale-%s' % (mlprefix, ln))
+ d.setVar('RPROVIDES:' + pkg, '%s-locale %s%s-translation' % (pn, mlprefix, ln))
+ d.setVar('SUMMARY:' + pkg, '%s - %s translations' % (summary, l))
+ d.setVar('DESCRIPTION:' + pkg, '%s This package contains language translation files for the %s locale.' % (description, l))
if locale_section:
- d.setVar('SECTION_' + pkg, locale_section)
+ d.setVar('SECTION:' + pkg, locale_section)
d.setVar('PACKAGES', ' '.join(packages))
@@ -633,17 +795,23 @@ python package_do_split_locales() {
# glibc-localedata-translit* won't install as a dependency
# for some other package which breaks meta-toolchain
# Probably breaks since virtual-locale- isn't provided anywhere
- #rdep = (d.getVar('RDEPENDS_%s' % pn) or "").split()
+ #rdep = (d.getVar('RDEPENDS:%s' % pn) or "").split()
#rdep.append('%s-locale*' % pn)
- #d.setVar('RDEPENDS_%s' % pn, ' '.join(rdep))
+ #d.setVar('RDEPENDS:%s' % pn, ' '.join(rdep))
}
python perform_packagecopy () {
import subprocess
+ import shutil
dest = d.getVar('D')
dvar = d.getVar('PKGD')
+ # Remove ${D}/sysroot-only if present
+ sysroot_only = os.path.join(dest, 'sysroot-only')
+ if cpath.exists(sysroot_only) and cpath.isdir(sysroot_only):
+ shutil.rmtree(sysroot_only)
+
# Start by package population by taking a copy of the installed
# files to operate on
# Preserve sparse files and hard links
@@ -693,7 +861,7 @@ python fixup_perms () {
self._setdir(lsplit[0], lsplit[1], lsplit[2], lsplit[3], lsplit[4], lsplit[5], lsplit[6], lsplit[7])
else:
msg = "Fixup Perms: invalid config line %s" % line
- package_qa_handle_error("perm-config", msg, d)
+ oe.qa.handle_error("perm-config", msg, d)
self.path = None
self.link = None
@@ -822,8 +990,9 @@ python fixup_perms () {
# Now we actually load from the configuration files
for conf in get_fs_perms_list(d).split():
- if os.path.exists(conf):
- f = open(conf)
+ if not os.path.exists(conf):
+ continue
+ with open(conf) as f:
for line in f:
if line.startswith('#'):
continue
@@ -832,7 +1001,7 @@ python fixup_perms () {
continue
if len(lsplit) != 8 and not (len(lsplit) == 3 and lsplit[1].lower() == "link"):
msg = "Fixup perms: %s invalid line: %s" % (conf, line)
- package_qa_handle_error("perm-line", msg, d)
+ oe.qa.handle_error("perm-line", msg, d)
continue
entry = fs_perms_entry(d.expand(line))
if entry and entry.path:
@@ -844,7 +1013,6 @@ python fixup_perms () {
fs_perms_table[entry.path] = entry
if entry.path in fs_link_table:
fs_link_table.pop(entry.path)
- f.close()
# Debug -- list out in-memory table
#for dir in fs_perms_table:
@@ -870,13 +1038,13 @@ python fixup_perms () {
ptarget = os.path.join(os.path.dirname(dir), link)
if os.path.exists(target):
msg = "Fixup Perms: Unable to correct directory link, target already exists: %s -> %s" % (dir, ptarget)
- package_qa_handle_error("perm-link", msg, d)
+ oe.qa.handle_error("perm-link", msg, d)
continue
# Create path to move directory to, move it, and then setup the symlink
bb.utils.mkdirhier(os.path.dirname(target))
#bb.note("Fixup Perms: Rename %s -> %s" % (dir, ptarget))
- os.rename(origin, target)
+ bb.utils.rename(origin, target)
#bb.note("Fixup Perms: Link %s -> %s" % (dir, link))
os.symlink(link, origin)
@@ -897,51 +1065,72 @@ python fixup_perms () {
fix_perms(each_file, fs_perms_table[dir].fmode, fs_perms_table[dir].fuid, fs_perms_table[dir].fgid, dir)
}
+def package_debug_vars(d):
+ # We default to '.debug' style
+ if d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-file-directory':
+ # Single debug-file-directory style debug info
+ debug_vars = {
+ "append": ".debug",
+ "staticappend": "",
+ "dir": "",
+ "staticdir": "",
+ "libdir": "/usr/lib/debug",
+ "staticlibdir": "/usr/lib/debug-static",
+ "srcdir": "/usr/src/debug",
+ }
+ elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-without-src':
+ # Original OE-core, a.k.a. ".debug", style debug info, but without sources in /usr/src/debug
+ debug_vars = {
+ "append": "",
+ "staticappend": "",
+ "dir": "/.debug",
+ "staticdir": "/.debug-static",
+ "libdir": "",
+ "staticlibdir": "",
+ "srcdir": "",
+ }
+ elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg':
+ debug_vars = {
+ "append": "",
+ "staticappend": "",
+ "dir": "/.debug",
+ "staticdir": "/.debug-static",
+ "libdir": "",
+ "staticlibdir": "",
+ "srcdir": "/usr/src/debug",
+ }
+ else:
+ # Original OE-core, a.k.a. ".debug", style debug info
+ debug_vars = {
+ "append": "",
+ "staticappend": "",
+ "dir": "/.debug",
+ "staticdir": "/.debug-static",
+ "libdir": "",
+ "staticlibdir": "",
+ "srcdir": "/usr/src/debug",
+ }
+
+ return debug_vars
+
python split_and_strip_files () {
import stat, errno
import subprocess
dvar = d.getVar('PKGD')
pn = d.getVar('PN')
- targetos = d.getVar('TARGET_OS')
+ hostos = d.getVar('HOST_OS')
oldcwd = os.getcwd()
os.chdir(dvar)
- # We default to '.debug' style
- if d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-file-directory':
- # Single debug-file-directory style debug info
- debugappend = ".debug"
- debugdir = ""
- debuglibdir = "/usr/lib/debug"
- debugsrcdir = "/usr/src/debug"
- elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-without-src':
- # Original OE-core, a.k.a. ".debug", style debug info, but without sources in /usr/src/debug
- debugappend = ""
- debugdir = "/.debug"
- debuglibdir = ""
- debugsrcdir = ""
- elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg':
- debugappend = ""
- debugdir = "/.debug"
- debuglibdir = ""
- debugsrcdir = "/usr/src/debug"
- else:
- # Original OE-core, a.k.a. ".debug", style debug info
- debugappend = ""
- debugdir = "/.debug"
- debuglibdir = ""
- debugsrcdir = "/usr/src/debug"
-
- sourcefile = d.expand("${WORKDIR}/debugsources.list")
- bb.utils.remove(sourcefile)
+ dv = package_debug_vars(d)
#
# First lets figure out all of the files we may have to process ... do this only once!
#
elffiles = {}
symlinks = {}
- kernmods = []
staticlibs = []
inodes = {}
libdir = os.path.abspath(dvar + os.sep + d.getVar("libdir"))
@@ -954,22 +1143,20 @@ python split_and_strip_files () {
for root, dirs, files in cpath.walk(dvar):
for f in files:
file = os.path.join(root, f)
- if file.endswith(".ko") and file.find("/lib/modules/") != -1:
- kernmods.append(file)
- continue
- if oe.package.is_static_lib(file):
- staticlibs.append(file)
- continue
# Skip debug files
- if debugappend and file.endswith(debugappend):
+ if dv["append"] and file.endswith(dv["append"]):
continue
- if debugdir and debugdir in os.path.dirname(file[len(dvar):]):
+ if dv["dir"] and dv["dir"] in os.path.dirname(file[len(dvar):]):
continue
if file in skipfiles:
continue
+ if oe.package.is_static_lib(file):
+ staticlibs.append(file)
+ continue
+
try:
ltarget = cpath.realpath(file, dvar, False)
s = cpath.lstat(ltarget)
@@ -982,8 +1169,11 @@ python split_and_strip_files () {
if not s:
continue
# Check its an executable
- if (s[stat.ST_MODE] & stat.S_IXUSR) or (s[stat.ST_MODE] & stat.S_IXGRP) or (s[stat.ST_MODE] & stat.S_IXOTH) \
- or ((file.startswith(libdir) or file.startswith(baselibdir)) and (".so" in f or ".node" in f)):
+ if (s[stat.ST_MODE] & stat.S_IXUSR) or (s[stat.ST_MODE] & stat.S_IXGRP) \
+ or (s[stat.ST_MODE] & stat.S_IXOTH) \
+ or ((file.startswith(libdir) or file.startswith(baselibdir)) \
+ and (".so" in f or ".node" in f)) \
+ or (f.startswith('vmlinux') or ".ko" in f):
if cpath.islink(file):
checkelflinks[file] = ltarget
@@ -1005,16 +1195,22 @@ python split_and_strip_files () {
symlinks[file] = target
results = oe.utils.multiprocess_launch(oe.package.is_elf, checkelf.keys(), d)
+
+ # Sort results by file path. This ensures that the files are always
+ # processed in the same order, which is important to make sure builds
+ # are reproducible when dealing with hardlinks
+ results.sort(key=lambda x: x[0])
+
for (file, elf_file) in results:
# It's a file (or hardlink), not a link
# ...but is it ELF, and is it already stripped?
if elf_file & 1:
if elf_file & 2:
- if 'already-stripped' in (d.getVar('INSANE_SKIP_' + pn) or "").split():
+ if 'already-stripped' in (d.getVar('INSANE_SKIP:' + pn) or "").split():
bb.note("Skipping file %s from %s for already-stripped QA test" % (file[len(dvar):], pn))
else:
msg = "File '%s' from %s was already stripped, this will prevent future debugging!" % (file[len(dvar):], pn)
- package_qa_handle_error("already-stripped", msg, d)
+ oe.qa.handle_error("already-stripped", msg, d)
continue
# At this point we have an unstripped elf file. We need to:
@@ -1036,15 +1232,32 @@ python split_and_strip_files () {
# Modified the file so clear the cache
cpath.updatecache(file)
+ def strip_pkgd_prefix(f):
+ nonlocal dvar
+
+ if f.startswith(dvar):
+ return f[len(dvar):]
+
+ return f
+
#
# First lets process debug splitting
#
if (d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT') != '1'):
- oe.utils.multiprocess_launch(splitdebuginfo, list(elffiles), d, extraargs=(dvar, debugdir, debuglibdir, debugappend, debugsrcdir, sourcefile, d))
+ results = oe.utils.multiprocess_launch(splitdebuginfo, list(elffiles), d, extraargs=(dvar, dv, d))
- if debugsrcdir and not targetos.startswith("mingw"):
- for file in staticlibs:
- append_source_info(file, sourcefile, d, fatal=False)
+ if dv["srcdir"] and not hostos.startswith("mingw"):
+ if (d.getVar('PACKAGE_DEBUG_STATIC_SPLIT') == '1'):
+ results = oe.utils.multiprocess_launch(splitstaticdebuginfo, staticlibs, d, extraargs=(dvar, dv, d))
+ else:
+ for file in staticlibs:
+ results.append( (file,source_info(file, d)) )
+
+ d.setVar("PKGDEBUGSOURCES", {strip_pkgd_prefix(f): sorted(s) for f, s in results})
+
+ sources = set()
+ for r in results:
+ sources.update(r[1])
# Hardlink our debug symbols to the other hardlink copies
for ref in inodes:
@@ -1054,9 +1267,9 @@ python split_and_strip_files () {
target = inodes[ref][0][len(dvar):]
for file in inodes[ref][1:]:
src = file[len(dvar):]
- dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(target) + debugappend
+ dest = dv["libdir"] + os.path.dirname(src) + dv["dir"] + "/" + os.path.basename(target) + dv["append"]
fpath = dvar + dest
- ftarget = dvar + debuglibdir + os.path.dirname(target) + debugdir + "/" + os.path.basename(target) + debugappend
+ ftarget = dvar + dv["libdir"] + os.path.dirname(target) + dv["dir"] + "/" + os.path.basename(target) + dv["append"]
bb.utils.mkdirhier(os.path.dirname(fpath))
# Only one hardlink of separated debug info file in each directory
if not os.access(fpath, os.R_OK):
@@ -1066,7 +1279,7 @@ python split_and_strip_files () {
# Create symlinks for all cases we were able to split symbols
for file in symlinks:
src = file[len(dvar):]
- dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend
+ dest = dv["libdir"] + os.path.dirname(src) + dv["dir"] + "/" + os.path.basename(src) + dv["append"]
fpath = dvar + dest
# Skip it if the target doesn't exist
try:
@@ -1082,17 +1295,17 @@ python split_and_strip_files () {
lbase = os.path.basename(ltarget)
ftarget = ""
if lpath and lpath != ".":
- ftarget += lpath + debugdir + "/"
- ftarget += lbase + debugappend
+ ftarget += lpath + dv["dir"] + "/"
+ ftarget += lbase + dv["append"]
if lpath.startswith(".."):
ftarget = os.path.join("..", ftarget)
bb.utils.mkdirhier(os.path.dirname(fpath))
#bb.note("Symlink %s -> %s" % (fpath, ftarget))
os.symlink(ftarget, fpath)
- # Process the debugsrcdir if requested...
+ # Process the dv["srcdir"] if requested...
# This copies and places the referenced sources for later debugging...
- copydebugsources(debugsrcdir, d)
+ copydebugsources(dv["srcdir"], sources, d)
#
# End of debug splitting
#
@@ -1107,11 +1320,17 @@ python split_and_strip_files () {
elf_file = int(elffiles[file])
#bb.note("Strip %s" % file)
sfiles.append((file, elf_file, strip))
- for f in kernmods:
- sfiles.append((f, 16, strip))
+ if (d.getVar('PACKAGE_STRIP_STATIC') == '1' or d.getVar('PACKAGE_DEBUG_STATIC_SPLIT') == '1'):
+ for f in staticlibs:
+ sfiles.append((f, 16, strip))
oe.utils.multiprocess_launch(oe.package.runstrip, sfiles, d)
+ # Build "minidebuginfo" and reinject it back into the stripped binaries
+ if d.getVar('PACKAGE_MINIDEBUGINFO') == '1':
+ oe.utils.multiprocess_launch(inject_minidebuginfo, list(elffiles), d,
+ extraargs=(dvar, dv, d))
+
#
# End of strip
#
@@ -1124,7 +1343,7 @@ python populate_packages () {
workdir = d.getVar('WORKDIR')
outdir = d.getVar('DEPLOY_DIR')
dvar = d.getVar('PKGD')
- packages = d.getVar('PACKAGES')
+ packages = d.getVar('PACKAGES').split()
pn = d.getVar('PN')
bb.utils.mkdirhier(outdir)
@@ -1134,32 +1353,34 @@ python populate_packages () {
split_source_package = (d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg')
- # If debug-with-srcpkg mode is enabled then the src package is added
- # into the package list and the source directory as its main content
+ # If debug-with-srcpkg mode is enabled then add the source package if it
+ # doesn't exist and add the source file contents to the source package.
if split_source_package:
src_package_name = ('%s-src' % d.getVar('PN'))
- packages += (' ' + src_package_name)
- d.setVar('FILES_%s' % src_package_name, '/usr/src/debug')
+ if not src_package_name in packages:
+ packages.append(src_package_name)
+ d.setVar('FILES:%s' % src_package_name, '/usr/src/debug')
# Sanity check PACKAGES for duplicates
# Sanity should be moved to sanity.bbclass once we have the infrastructure
package_dict = {}
- for i, pkg in enumerate(packages.split()):
+ for i, pkg in enumerate(packages):
if pkg in package_dict:
msg = "%s is listed in PACKAGES multiple times, this leads to packaging errors." % pkg
- package_qa_handle_error("packages-list", msg, d)
- # If debug-with-srcpkg mode is enabled then the src package will have
- # priority over dbg package when assigning the files.
- # This allows src package to include source files and remove them from dbg.
- elif split_source_package and pkg.endswith("-src"):
+ oe.qa.handle_error("packages-list", msg, d)
+ # Ensure the source package gets the chance to pick up the source files
+ # before the debug package by ordering it first in PACKAGES. Whether it
+ # actually picks up any source files is controlled by
+ # PACKAGE_DEBUG_SPLIT_STYLE.
+ elif pkg.endswith("-src"):
package_dict[pkg] = (10, i)
elif autodebug and pkg.endswith("-dbg"):
package_dict[pkg] = (30, i)
else:
package_dict[pkg] = (50, i)
- package_list = sorted(package_dict.keys(), key=package_dict.get)
- d.setVar('PACKAGES', ' '.join(package_list))
+ packages = sorted(package_dict.keys(), key=package_dict.get)
+ d.setVar('PACKAGES', ' '.join(packages))
pkgdest = d.getVar('PKGDEST')
seen = []
@@ -1174,17 +1395,17 @@ python populate_packages () {
dir = os.sep
for f in (files + dirs):
path = "." + os.path.join(dir, f)
- if "/.debug/" in path or path.endswith("/.debug"):
+ if "/.debug/" in path or "/.debug-static/" in path or path.endswith("/.debug"):
debug.append(path)
- for pkg in package_list:
+ for pkg in packages:
root = os.path.join(pkgdest, pkg)
bb.utils.mkdirhier(root)
- filesvar = d.getVar('FILES_%s' % pkg) or ""
+ filesvar = d.getVar('FILES:%s' % pkg) or ""
if "//" in filesvar:
msg = "FILES variable for package %s contains '//' which is invalid. Attempting to fix this but you should correct the metadata.\n" % pkg
- package_qa_handle_error("files-invalid", msg, d)
+ oe.qa.handle_error("files-invalid", msg, d)
filesvar.replace("//", "/")
origfiles = filesvar.split()
@@ -1204,7 +1425,8 @@ python populate_packages () {
src = os.path.join(src, p)
dest = os.path.join(dest, p)
fstat = cpath.stat(src)
- os.mkdir(dest, fstat.st_mode)
+ os.mkdir(dest)
+ os.chmod(dest, fstat.st_mode)
os.chown(dest, fstat.st_uid, fstat.st_gid)
if p not in seen:
seen.append(p)
@@ -1246,12 +1468,13 @@ python populate_packages () {
os.umask(oldumask)
os.chdir(workdir)
- # Handle LICENSE_EXCLUSION
+ # Handle excluding packages with incompatible licenses
package_list = []
- for pkg in packages.split():
- if d.getVar('LICENSE_EXCLUSION-' + pkg):
- msg = "%s has an incompatible license. Excluding from packaging." % pkg
- package_qa_handle_error("incompatible-license", msg, d)
+ for pkg in packages:
+ licenses = d.getVar('_exclude_incompatible-' + pkg)
+ if licenses:
+ msg = "Excluding %s from packaging as it has incompatible license(s): %s" % (pkg, licenses)
+ oe.qa.handle_error("incompatible-license", msg, d)
else:
package_list.append(pkg)
d.setVar('PACKAGES', ' '.join(package_list))
@@ -1268,14 +1491,14 @@ python populate_packages () {
if unshipped != []:
msg = pn + ": Files/directories were installed but not shipped in any package:"
- if "installed-vs-shipped" in (d.getVar('INSANE_SKIP_' + pn) or "").split():
+ if "installed-vs-shipped" in (d.getVar('INSANE_SKIP:' + pn) or "").split():
bb.note("Package %s skipping QA tests: installed-vs-shipped" % pn)
else:
for f in unshipped:
msg = msg + "\n " + f
msg = msg + "\nPlease set FILES such that these items are packaged. Alternatively if they are unneeded, avoid installing them or delete them within do_install.\n"
msg = msg + "%s: %d installed and not shipped files." % (pn, len(unshipped))
- package_qa_handle_error("installed-vs-shipped", msg, d)
+ oe.qa.handle_error("installed-vs-shipped", msg, d)
}
populate_packages[dirs] = "${D}"
@@ -1316,11 +1539,11 @@ python package_fixsymlinks () {
bb.note("%s contains dangling symlink to %s" % (pkg, l))
for pkg in newrdepends:
- rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg) or "")
+ rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS:' + pkg) or "")
for p in newrdepends[pkg]:
if p not in rdepends:
rdepends[p] = []
- d.setVar('RDEPENDS_' + pkg, bb.utils.join_deps(rdepends, commasep=False))
+ d.setVar('RDEPENDS:' + pkg, bb.utils.join_deps(rdepends, commasep=False))
}
@@ -1337,20 +1560,27 @@ EXPORT_FUNCTIONS package_name_hook
PKGDESTWORK = "${WORKDIR}/pkgdata"
+PKGDATA_VARS = "PN PE PV PR PKGE PKGV PKGR LICENSE DESCRIPTION SUMMARY RDEPENDS RPROVIDES RRECOMMENDS RSUGGESTS RREPLACES RCONFLICTS SECTION PKG ALLOW_EMPTY FILES CONFFILES FILES_INFO PACKAGE_ADD_METADATA pkg_postinst pkg_postrm pkg_preinst pkg_prerm"
+
python emit_pkgdata() {
from glob import glob
import json
+ import bb.compress.zstd
def process_postinst_on_target(pkg, mlprefix):
+ pkgval = d.getVar('PKG:%s' % pkg)
+ if pkgval is None:
+ pkgval = pkg
+
defer_fragment = """
if [ -n "$D" ]; then
$INTERCEPT_DIR/postinst_intercept delay_to_first_boot %s mlprefix=%s
exit 0
fi
-""" % (pkg, mlprefix)
+""" % (pkgval, mlprefix)
- postinst = d.getVar('pkg_postinst_%s' % pkg)
- postinst_ontarget = d.getVar('pkg_postinst_ontarget_%s' % pkg)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
+ postinst_ontarget = d.getVar('pkg_postinst_ontarget:%s' % pkg)
if postinst_ontarget:
bb.debug(1, 'adding deferred pkg_postinst_ontarget() to pkg_postinst() for %s' % pkg)
@@ -1358,18 +1588,18 @@ fi
postinst = '#!/bin/sh\n'
postinst += defer_fragment
postinst += postinst_ontarget
- d.setVar('pkg_postinst_%s' % pkg, postinst)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
def add_set_e_to_scriptlets(pkg):
for scriptlet_name in ('pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm'):
- scriptlet = d.getVar('%s_%s' % (scriptlet_name, pkg))
+ scriptlet = d.getVar('%s:%s' % (scriptlet_name, pkg))
if scriptlet:
scriptlet_split = scriptlet.split('\n')
if scriptlet_split[0].startswith("#!"):
scriptlet = scriptlet_split[0] + "\nset -e\n" + "\n".join(scriptlet_split[1:])
else:
scriptlet = "set -e\n" + "\n".join(scriptlet_split[0:])
- d.setVar('%s_%s' % (scriptlet_name, pkg), scriptlet)
+ d.setVar('%s:%s' % (scriptlet_name, pkg), scriptlet)
def write_if_exists(f, pkg, var):
def encode(str):
@@ -1377,9 +1607,9 @@ fi
c = codecs.getencoder("unicode_escape")
return c(str)[0].decode("latin1")
- val = d.getVar('%s_%s' % (var, pkg))
+ val = d.getVar('%s:%s' % (var, pkg))
if val:
- f.write('%s_%s: %s\n' % (var, pkg, encode(val)))
+ f.write('%s:%s: %s\n' % (var, pkg, encode(val)))
return val
val = d.getVar('%s' % (var))
if val:
@@ -1398,16 +1628,17 @@ fi
ml_pkg = "%s-%s" % (variant, pkg)
subdata_file = "%s/runtime/%s" % (pkgdatadir, ml_pkg)
with open(subdata_file, 'w') as fd:
- fd.write("PKG_%s: %s" % (ml_pkg, pkg))
+ fd.write("PKG:%s: %s" % (ml_pkg, pkg))
packages = d.getVar('PACKAGES')
pkgdest = d.getVar('PKGDEST')
pkgdatadir = d.getVar('PKGDESTWORK')
- data_file = pkgdatadir + d.expand("/${PN}" )
- f = open(data_file, 'w')
- f.write("PACKAGES: %s\n" % packages)
- f.close()
+ data_file = pkgdatadir + d.expand("/${PN}")
+ with open(data_file, 'w') as fd:
+ fd.write("PACKAGES: %s\n" % packages)
+
+ pkgdebugsource = d.getVar("PKGDEBUGSOURCES") or []
pn = d.getVar('PN')
global_variants = (d.getVar('MULTILIB_GLOBAL_VARIANTS') or "").split()
@@ -1423,73 +1654,71 @@ fi
workdir = d.getVar('WORKDIR')
for pkg in packages.split():
- pkgval = d.getVar('PKG_%s' % pkg)
+ pkgval = d.getVar('PKG:%s' % pkg)
if pkgval is None:
pkgval = pkg
- d.setVar('PKG_%s' % pkg, pkg)
+ d.setVar('PKG:%s' % pkg, pkg)
+
+ extended_data = {
+ "files_info": {}
+ }
pkgdestpkg = os.path.join(pkgdest, pkg)
files = {}
+ files_extra = {}
total_size = 0
seen = set()
for f in pkgfiles[pkg]:
- relpth = os.path.relpath(f, pkgdestpkg)
+ fpath = os.sep + os.path.relpath(f, pkgdestpkg)
+
fstat = os.lstat(f)
- files[os.sep + relpth] = fstat.st_size
+ files[fpath] = fstat.st_size
+
+ extended_data["files_info"].setdefault(fpath, {})
+ extended_data["files_info"][fpath]['size'] = fstat.st_size
+
if fstat.st_ino not in seen:
seen.add(fstat.st_ino)
total_size += fstat.st_size
- d.setVar('FILES_INFO', json.dumps(files, sort_keys=True))
- subdata_file = pkgdatadir + "/runtime/%s" % pkg
- sf = open(subdata_file, 'w')
- write_if_exists(sf, pkg, 'PN')
- write_if_exists(sf, pkg, 'PE')
- write_if_exists(sf, pkg, 'PV')
- write_if_exists(sf, pkg, 'PR')
- write_if_exists(sf, pkg, 'PKGE')
- write_if_exists(sf, pkg, 'PKGV')
- write_if_exists(sf, pkg, 'PKGR')
- write_if_exists(sf, pkg, 'LICENSE')
- write_if_exists(sf, pkg, 'DESCRIPTION')
- write_if_exists(sf, pkg, 'SUMMARY')
- write_if_exists(sf, pkg, 'RDEPENDS')
- rprov = write_if_exists(sf, pkg, 'RPROVIDES')
- write_if_exists(sf, pkg, 'RRECOMMENDS')
- write_if_exists(sf, pkg, 'RSUGGESTS')
- write_if_exists(sf, pkg, 'RREPLACES')
- write_if_exists(sf, pkg, 'RCONFLICTS')
- write_if_exists(sf, pkg, 'SECTION')
- write_if_exists(sf, pkg, 'PKG')
- write_if_exists(sf, pkg, 'ALLOW_EMPTY')
- write_if_exists(sf, pkg, 'FILES')
- write_if_exists(sf, pkg, 'CONFFILES')
+ if fpath in pkgdebugsource:
+ extended_data["files_info"][fpath]['debugsrc'] = pkgdebugsource[fpath]
+ del pkgdebugsource[fpath]
+
+ d.setVar('FILES_INFO:' + pkg , json.dumps(files, sort_keys=True))
+
process_postinst_on_target(pkg, d.getVar("MLPREFIX"))
add_set_e_to_scriptlets(pkg)
- write_if_exists(sf, pkg, 'pkg_postinst')
- write_if_exists(sf, pkg, 'pkg_postrm')
- write_if_exists(sf, pkg, 'pkg_preinst')
- write_if_exists(sf, pkg, 'pkg_prerm')
- write_if_exists(sf, pkg, 'FILERPROVIDESFLIST')
- write_if_exists(sf, pkg, 'FILES_INFO')
- for dfile in (d.getVar('FILERPROVIDESFLIST_' + pkg) or "").split():
- write_if_exists(sf, pkg, 'FILERPROVIDES_' + dfile)
-
- write_if_exists(sf, pkg, 'FILERDEPENDSFLIST')
- for dfile in (d.getVar('FILERDEPENDSFLIST_' + pkg) or "").split():
- write_if_exists(sf, pkg, 'FILERDEPENDS_' + dfile)
-
- sf.write('%s_%s: %d\n' % ('PKGSIZE', pkg, total_size))
- sf.close()
+
+ subdata_file = pkgdatadir + "/runtime/%s" % pkg
+ with open(subdata_file, 'w') as sf:
+ for var in (d.getVar('PKGDATA_VARS') or "").split():
+ val = write_if_exists(sf, pkg, var)
+
+ write_if_exists(sf, pkg, 'FILERPROVIDESFLIST')
+ for dfile in sorted((d.getVar('FILERPROVIDESFLIST:' + pkg) or "").split()):
+ write_if_exists(sf, pkg, 'FILERPROVIDES:' + dfile)
+
+ write_if_exists(sf, pkg, 'FILERDEPENDSFLIST')
+ for dfile in sorted((d.getVar('FILERDEPENDSFLIST:' + pkg) or "").split()):
+ write_if_exists(sf, pkg, 'FILERDEPENDS:' + dfile)
+
+ sf.write('%s:%s: %d\n' % ('PKGSIZE', pkg, total_size))
+
+ subdata_extended_file = pkgdatadir + "/extended/%s.json.zstd" % pkg
+ num_threads = int(d.getVar("BB_NUMBER_THREADS"))
+ with bb.compress.zstd.open(subdata_extended_file, "wt", encoding="utf-8", num_threads=num_threads) as f:
+ json.dump(extended_data, f, sort_keys=True, separators=(",", ":"))
# Symlinks needed for rprovides lookup
+ rprov = d.getVar('RPROVIDES:%s' % pkg) or d.getVar('RPROVIDES')
if rprov:
- for p in rprov.strip().split():
+ for p in bb.utils.explode_deps(rprov):
subdata_sym = pkgdatadir + "/runtime-rprovides/%s/%s" % (p, pkg)
bb.utils.mkdirhier(os.path.dirname(subdata_sym))
oe.path.symlink("../../runtime/%s" % pkg, subdata_sym, True)
- allow_empty = d.getVar('ALLOW_EMPTY_%s' % pkg)
+ allow_empty = d.getVar('ALLOW_EMPTY:%s' % pkg)
if not allow_empty:
allow_empty = d.getVar('ALLOW_EMPTY')
root = "%s/%s" % (pkgdest, pkg)
@@ -1511,7 +1740,8 @@ fi
write_extra_runtime_pkgs(global_variants, packages, pkgdatadir)
}
-emit_pkgdata[dirs] = "${PKGDESTWORK}/runtime ${PKGDESTWORK}/runtime-reverse ${PKGDESTWORK}/runtime-rprovides"
+emit_pkgdata[dirs] = "${PKGDESTWORK}/runtime ${PKGDESTWORK}/runtime-reverse ${PKGDESTWORK}/runtime-rprovides ${PKGDESTWORK}/extended"
+emit_pkgdata[vardepsexclude] = "BB_NUMBER_THREADS"
ldconfig_postinst_fragment() {
if [ x"$D" = "x" ]; then
@@ -1519,15 +1749,15 @@ if [ x"$D" = "x" ]; then
fi
}
-RPMDEPS = "${STAGING_LIBDIR_NATIVE}/rpm/rpmdeps --alldeps"
+RPMDEPS = "${STAGING_LIBDIR_NATIVE}/rpm/rpmdeps --alldeps --define '__font_provides %{nil}'"
# Collect perfile run-time dependency metadata
# Output:
-# FILERPROVIDESFLIST_pkg - list of all files w/ deps
-# FILERPROVIDES_filepath_pkg - per file dep
+# FILERPROVIDESFLIST:pkg - list of all files w/ deps
+# FILERPROVIDES:filepath:pkg - per file dep
#
-# FILERDEPENDSFLIST_pkg - list of all files w/ deps
-# FILERDEPENDS_filepath_pkg - per file dep
+# FILERDEPENDSFLIST:pkg - list of all files w/ deps
+# FILERDEPENDS:filepath:pkg - per file dep
python package_do_filedeps() {
if d.getVar('SKIP_FILEDEPS') == '1':
@@ -1542,7 +1772,7 @@ python package_do_filedeps() {
pkglist = []
for pkg in packages.split():
- if d.getVar('SKIP_FILEDEPS_' + pkg) == '1':
+ if d.getVar('SKIP_FILEDEPS:' + pkg) == '1':
continue
if pkg.endswith('-dbg') or pkg.endswith('-doc') or pkg.find('-locale-') != -1 or pkg.find('-localedata-') != -1 or pkg.find('-gconv-') != -1 or pkg.find('-charmap-') != -1 or pkg.startswith('kernel-module-') or pkg.endswith('-src'):
continue
@@ -1564,24 +1794,25 @@ python package_do_filedeps() {
for file in sorted(provides):
provides_files[pkg].append(file)
- key = "FILERPROVIDES_" + file + "_" + pkg
+ key = "FILERPROVIDES:" + file + ":" + pkg
d.appendVar(key, " " + " ".join(provides[file]))
for file in sorted(requires):
requires_files[pkg].append(file)
- key = "FILERDEPENDS_" + file + "_" + pkg
+ key = "FILERDEPENDS:" + file + ":" + pkg
d.appendVar(key, " " + " ".join(requires[file]))
for pkg in requires_files:
- d.setVar("FILERDEPENDSFLIST_" + pkg, " ".join(requires_files[pkg]))
+ d.setVar("FILERDEPENDSFLIST:" + pkg, " ".join(sorted(requires_files[pkg])))
for pkg in provides_files:
- d.setVar("FILERPROVIDESFLIST_" + pkg, " ".join(provides_files[pkg]))
+ d.setVar("FILERPROVIDESFLIST:" + pkg, " ".join(sorted(provides_files[pkg])))
}
-SHLIBSDIRS = "${PKGDATA_DIR}/${MLPREFIX}shlibs2"
+SHLIBSDIRS = "${WORKDIR_PKGDATA}/${MLPREFIX}shlibs2"
SHLIBSWORKDIR = "${PKGDESTWORK}/${MLPREFIX}shlibs2"
python package_do_shlibs() {
+ import itertools
import re, pipes
import subprocess
@@ -1606,14 +1837,14 @@ python package_do_shlibs() {
else:
shlib_pkgs = packages.split()
- targetos = d.getVar('TARGET_OS')
+ hostos = d.getVar('HOST_OS')
workdir = d.getVar('WORKDIR')
ver = d.getVar('PKGV')
if not ver:
msg = "PKGV not defined"
- package_qa_handle_error("pkgv-undefined", msg, d)
+ oe.qa.handle_error("pkgv-undefined", msg, d)
return
pkgdest = d.getVar('PKGDEST')
@@ -1648,11 +1879,12 @@ python package_do_shlibs() {
prov = (this_soname, ldir, pkgver)
if not prov in sonames:
# if library is private (only used by package) then do not build shlib for it
- if not private_libs or this_soname not in private_libs:
+ import fnmatch
+ if not private_libs or len([i for i in private_libs if fnmatch.fnmatch(this_soname, i)]) == 0:
sonames.add(prov)
if libdir_re.match(os.path.dirname(file)):
needs_ldconfig = True
- if snap_symlinks and (os.path.basename(file) != this_soname):
+ if needs_ldconfig and snap_symlinks and (os.path.basename(file) != this_soname):
renames.append((file, os.path.join(os.path.dirname(file), this_soname)))
return (needs_ldconfig, needed, sonames, renames)
@@ -1732,22 +1964,17 @@ python package_do_shlibs() {
else:
snap_symlinks = False
- use_ldconfig = bb.utils.contains('DISTRO_FEATURES', 'ldconfig', True, False, d)
-
needed = {}
- # Take shared lock since we're only reading, not writing
- lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}"), True)
shlib_provider = oe.package.read_shlib_providers(d)
- bb.utils.unlockfile(lf)
for pkg in shlib_pkgs:
- private_libs = d.getVar('PRIVATE_LIBS_' + pkg) or d.getVar('PRIVATE_LIBS') or ""
+ private_libs = d.getVar('PRIVATE_LIBS:' + pkg) or d.getVar('PRIVATE_LIBS') or ""
private_libs = private_libs.split()
needs_ldconfig = False
bb.debug(2, "calculating shlib provides for %s" % pkg)
- pkgver = d.getVar('PKGV_' + pkg)
+ pkgver = d.getVar('PKGV:' + pkg)
if not pkgver:
pkgver = d.getVar('PV_' + pkg)
if not pkgver:
@@ -1761,9 +1988,9 @@ python package_do_shlibs() {
soname = None
if cpath.islink(file):
continue
- if targetos == "darwin" or targetos == "darwin8":
+ if hostos == "darwin" or hostos == "darwin8":
darwin_so(file, needed, sonames, renames, pkgver)
- elif targetos.startswith("mingw"):
+ elif hostos.startswith("mingw"):
mingw_dll(file, needed, sonames, renames, pkgver)
elif os.access(file, os.X_OK) or lib_re.match(file):
linuxlist.append(file)
@@ -1779,30 +2006,29 @@ python package_do_shlibs() {
for (old, new) in renames:
bb.note("Renaming %s to %s" % (old, new))
- os.rename(old, new)
+ bb.utils.rename(old, new)
pkgfiles[pkg].remove(old)
-
+
shlibs_file = os.path.join(shlibswork_dir, pkg + ".list")
if len(sonames):
- fd = open(shlibs_file, 'w')
- for s in sonames:
- if s[0] in shlib_provider and s[1] in shlib_provider[s[0]]:
- (old_pkg, old_pkgver) = shlib_provider[s[0]][s[1]]
- if old_pkg != pkg:
- bb.warn('%s-%s was registered as shlib provider for %s, changing it to %s-%s because it was built later' % (old_pkg, old_pkgver, s[0], pkg, pkgver))
- bb.debug(1, 'registering %s-%s as shlib provider for %s' % (pkg, pkgver, s[0]))
- fd.write(s[0] + ':' + s[1] + ':' + s[2] + '\n')
- if s[0] not in shlib_provider:
- shlib_provider[s[0]] = {}
- shlib_provider[s[0]][s[1]] = (pkg, pkgver)
- fd.close()
- if needs_ldconfig and use_ldconfig:
+ with open(shlibs_file, 'w') as fd:
+ for s in sorted(sonames):
+ if s[0] in shlib_provider and s[1] in shlib_provider[s[0]]:
+ (old_pkg, old_pkgver) = shlib_provider[s[0]][s[1]]
+ if old_pkg != pkg:
+ bb.warn('%s-%s was registered as shlib provider for %s, changing it to %s-%s because it was built later' % (old_pkg, old_pkgver, s[0], pkg, pkgver))
+ bb.debug(1, 'registering %s-%s as shlib provider for %s' % (pkg, pkgver, s[0]))
+ fd.write(s[0] + ':' + s[1] + ':' + s[2] + '\n')
+ if s[0] not in shlib_provider:
+ shlib_provider[s[0]] = {}
+ shlib_provider[s[0]][s[1]] = (pkg, pkgver)
+ if needs_ldconfig:
bb.debug(1, 'adding ldconfig call to postinst for %s' % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
postinst += d.getVar('ldconfig_postinst_fragment')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
bb.debug(1, 'LIBNAMES: pkg %s sonames %s' % (pkg, sonames))
assumed_libs = d.getVar('ASSUME_SHLIBS')
@@ -1824,7 +2050,7 @@ python package_do_shlibs() {
for pkg in shlib_pkgs:
bb.debug(2, "calculating shlib requirements for %s" % pkg)
- private_libs = d.getVar('PRIVATE_LIBS_' + pkg) or d.getVar('PRIVATE_LIBS') or ""
+ private_libs = d.getVar('PRIVATE_LIBS:' + pkg) or d.getVar('PRIVATE_LIBS') or ""
private_libs = private_libs.split()
deps = list()
@@ -1834,20 +2060,21 @@ python package_do_shlibs() {
# /opt/abc/lib/libfoo.so.1 and contains /usr/bin/abc depending on system library libfoo.so.1
# but skipping it is still better alternative than providing own
# version and then adding runtime dependency for the same system library
- if private_libs and n[0] in private_libs:
+ import fnmatch
+ if private_libs and len([i for i in private_libs if fnmatch.fnmatch(n[0], i)]) > 0:
bb.debug(2, '%s: Dependency %s covered by PRIVATE_LIBS' % (pkg, n[0]))
continue
if n[0] in shlib_provider.keys():
- shlib_provider_path = []
- for k in shlib_provider[n[0]].keys():
- shlib_provider_path.append(k)
- match = None
- for p in list(n[2]) + shlib_provider_path + libsearchpath:
- if p in shlib_provider[n[0]]:
- match = p
- break
- if match:
- (dep_pkg, ver_needed) = shlib_provider[n[0]][match]
+ shlib_provider_map = shlib_provider[n[0]]
+ matches = set()
+ for p in itertools.chain(list(n[2]), sorted(shlib_provider_map.keys()), libsearchpath):
+ if p in shlib_provider_map:
+ matches.add(p)
+ if len(matches) > 1:
+ matchpkgs = ', '.join([shlib_provider_map[match][0] for match in matches])
+ bb.error("%s: Multiple shlib providers for %s: %s (used by files: %s)" % (pkg, n[0], matchpkgs, n[1]))
+ elif len(matches) == 1:
+ (dep_pkg, ver_needed) = shlib_provider_map[matches.pop()]
bb.debug(2, '%s: Dependency %s requires package %s (used by files: %s)' % (pkg, n[0], dep_pkg, n[1]))
@@ -1866,11 +2093,10 @@ python package_do_shlibs() {
deps_file = os.path.join(pkgdest, pkg + ".shlibdeps")
if os.path.exists(deps_file):
os.remove(deps_file)
- if len(deps):
- fd = open(deps_file, 'w')
- for dep in sorted(deps):
- fd.write(dep + '\n')
- fd.close()
+ if deps:
+ with open(deps_file, 'w') as fd:
+ for dep in sorted(deps):
+ fd.write(dep + '\n')
}
python package_do_pkgconfig () {
@@ -1892,17 +2118,16 @@ python package_do_pkgconfig () {
for pkg in packages.split():
pkgconfig_provided[pkg] = []
pkgconfig_needed[pkg] = []
- for file in pkgfiles[pkg]:
+ for file in sorted(pkgfiles[pkg]):
m = pc_re.match(file)
if m:
pd = bb.data.init()
name = m.group(1)
- pkgconfig_provided[pkg].append(name)
+ pkgconfig_provided[pkg].append(os.path.basename(name))
if not os.access(file, os.R_OK):
continue
- f = open(file, 'r')
- lines = f.readlines()
- f.close()
+ with open(file, 'r') as f:
+ lines = f.readlines()
for l in lines:
m = var_re.match(l)
if m:
@@ -1920,31 +2145,24 @@ python package_do_pkgconfig () {
for pkg in packages.split():
pkgs_file = os.path.join(shlibswork_dir, pkg + ".pclist")
if pkgconfig_provided[pkg] != []:
- f = open(pkgs_file, 'w')
- for p in pkgconfig_provided[pkg]:
- f.write('%s\n' % p)
- f.close()
-
- # Take shared lock since we're only reading, not writing
- lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}"), True)
+ with open(pkgs_file, 'w') as f:
+ for p in sorted(pkgconfig_provided[pkg]):
+ f.write('%s\n' % p)
# Go from least to most specific since the last one found wins
for dir in reversed(shlibs_dirs):
if not os.path.exists(dir):
continue
- for file in os.listdir(dir):
+ for file in sorted(os.listdir(dir)):
m = re.match(r'^(.*)\.pclist$', file)
if m:
pkg = m.group(1)
- fd = open(os.path.join(dir, file))
- lines = fd.readlines()
- fd.close()
+ with open(os.path.join(dir, file)) as fd:
+ lines = fd.readlines()
pkgconfig_provided[pkg] = []
for l in lines:
pkgconfig_provided[pkg].append(l.rstrip())
- bb.utils.unlockfile(lf)
-
for pkg in packages.split():
deps = []
for n in pkgconfig_needed[pkg]:
@@ -1958,10 +2176,9 @@ python package_do_pkgconfig () {
bb.note("couldn't find pkgconfig module '%s' in any package" % n)
deps_file = os.path.join(pkgdest, pkg + ".pcdeps")
if len(deps):
- fd = open(deps_file, 'w')
- for dep in deps:
- fd.write(dep + '\n')
- fd.close()
+ with open(deps_file, 'w') as fd:
+ for dep in deps:
+ fd.write(dep + '\n')
}
def read_libdep_files(d):
@@ -1972,9 +2189,8 @@ def read_libdep_files(d):
for extension in ".shlibdeps", ".pcdeps", ".clilibdeps":
depsfile = d.expand("${PKGDEST}/" + pkg + extension)
if os.access(depsfile, os.R_OK):
- fd = open(depsfile)
- lines = fd.readlines()
- fd.close()
+ with open(depsfile) as fd:
+ lines = fd.readlines()
for l in lines:
l.rstrip()
deps = bb.utils.explode_dep_versions2(l)
@@ -1988,7 +2204,7 @@ python read_shlibdeps () {
packages = d.getVar('PACKAGES').split()
for pkg in packages:
- rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg) or "")
+ rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS:' + pkg) or "")
for dep in sorted(pkglibdeps[pkg]):
# Add the dep if it's not already there, or if no comparison is set
if dep not in rdepends:
@@ -1996,7 +2212,7 @@ python read_shlibdeps () {
for v in pkglibdeps[pkg][dep]:
if v not in rdepends[dep]:
rdepends[dep].append(v)
- d.setVar('RDEPENDS_' + pkg, bb.utils.join_deps(rdepends, commasep=False))
+ d.setVar('RDEPENDS:' + pkg, bb.utils.join_deps(rdepends, commasep=False))
}
python package_depchains() {
@@ -2020,7 +2236,7 @@ python package_depchains() {
def pkg_adddeprrecs(pkg, base, suffix, getname, depends, d):
#bb.note('depends for %s is %s' % (base, depends))
- rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS_' + pkg) or "")
+ rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS:' + pkg) or "")
for depend in sorted(depends):
if depend.find('-native') != -1 or depend.find('-cross') != -1 or depend.startswith('virtual/'):
@@ -2035,13 +2251,13 @@ python package_depchains() {
if pkgname not in rreclist and pkgname != pkg:
rreclist[pkgname] = []
- #bb.note('setting: RRECOMMENDS_%s=%s' % (pkg, ' '.join(rreclist)))
- d.setVar('RRECOMMENDS_%s' % pkg, bb.utils.join_deps(rreclist, commasep=False))
+ #bb.note('setting: RRECOMMENDS:%s=%s' % (pkg, ' '.join(rreclist)))
+ d.setVar('RRECOMMENDS:%s' % pkg, bb.utils.join_deps(rreclist, commasep=False))
def pkg_addrrecs(pkg, base, suffix, getname, rdepends, d):
#bb.note('rdepends for %s is %s' % (base, rdepends))
- rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS_' + pkg) or "")
+ rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS:' + pkg) or "")
for depend in sorted(rdepends):
if depend.find('virtual-locale-') != -1:
@@ -2056,8 +2272,8 @@ python package_depchains() {
if pkgname not in rreclist and pkgname != pkg:
rreclist[pkgname] = []
- #bb.note('setting: RRECOMMENDS_%s=%s' % (pkg, ' '.join(rreclist)))
- d.setVar('RRECOMMENDS_%s' % pkg, bb.utils.join_deps(rreclist, commasep=False))
+ #bb.note('setting: RRECOMMENDS:%s=%s' % (pkg, ' '.join(rreclist)))
+ d.setVar('RRECOMMENDS:%s' % pkg, bb.utils.join_deps(rreclist, commasep=False))
def add_dep(list, dep):
if dep not in list:
@@ -2069,7 +2285,7 @@ python package_depchains() {
rdepends = []
for pkg in packages.split():
- for dep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + pkg) or ""):
+ for dep in bb.utils.explode_deps(d.getVar('RDEPENDS:' + pkg) or ""):
add_dep(rdepends, dep)
#bb.note('rdepends is %s' % rdepends)
@@ -2103,7 +2319,7 @@ python package_depchains() {
for suffix in pkgs:
for pkg in pkgs[suffix]:
- if d.getVarFlag('RRECOMMENDS_' + pkg, 'nodeprrecs'):
+ if d.getVarFlag('RRECOMMENDS:' + pkg, 'nodeprrecs'):
continue
(base, func) = pkgs[suffix][pkg]
if suffix == "-dev":
@@ -2116,31 +2332,34 @@ python package_depchains() {
pkg_addrrecs(pkg, base, suffix, func, rdepends, d)
else:
rdeps = []
- for dep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + base) or ""):
+ for dep in bb.utils.explode_deps(d.getVar('RDEPENDS:' + base) or ""):
add_dep(rdeps, dep)
pkg_addrrecs(pkg, base, suffix, func, rdeps, d)
}
# Since bitbake can't determine which variables are accessed during package
# iteration, we need to list them here:
-PACKAGEVARS = "FILES RDEPENDS RRECOMMENDS SUMMARY DESCRIPTION RSUGGESTS RPROVIDES RCONFLICTS PKG ALLOW_EMPTY pkg_postinst pkg_postrm pkg_postinst_ontarget INITSCRIPT_NAME INITSCRIPT_PARAMS DEBIAN_NOAUTONAME ALTERNATIVE PKGE PKGV PKGR USERADD_PARAM GROUPADD_PARAM CONFFILES SYSTEMD_SERVICE LICENSE SECTION pkg_preinst pkg_prerm RREPLACES GROUPMEMS_PARAM SYSTEMD_AUTO_ENABLE SKIP_FILEDEPS PRIVATE_LIBS"
+PACKAGEVARS = "FILES RDEPENDS RRECOMMENDS SUMMARY DESCRIPTION RSUGGESTS RPROVIDES RCONFLICTS PKG ALLOW_EMPTY pkg_postinst pkg_postrm pkg_postinst_ontarget INITSCRIPT_NAME INITSCRIPT_PARAMS DEBIAN_NOAUTONAME ALTERNATIVE PKGE PKGV PKGR USERADD_PARAM GROUPADD_PARAM CONFFILES SYSTEMD_SERVICE LICENSE SECTION pkg_preinst pkg_prerm RREPLACES GROUPMEMS_PARAM SYSTEMD_AUTO_ENABLE SKIP_FILEDEPS PRIVATE_LIBS PACKAGE_ADD_METADATA"
-def gen_packagevar(d):
+def gen_packagevar(d, pkgvars="PACKAGEVARS"):
ret = []
pkgs = (d.getVar("PACKAGES") or "").split()
- vars = (d.getVar("PACKAGEVARS") or "").split()
+ vars = (d.getVar(pkgvars) or "").split()
+ for v in vars:
+ ret.append(v)
for p in pkgs:
for v in vars:
- ret.append(v + "_" + p)
+ ret.append(v + ":" + p)
# Ensure that changes to INCOMPATIBLE_LICENSE re-run do_package for
# affected recipes.
- ret.append('LICENSE_EXCLUSION-%s' % p)
+ ret.append('_exclude_incompatible-%s' % p)
return " ".join(ret)
PACKAGE_PREPROCESS_FUNCS ?= ""
# Functions for setting up PKGD
PACKAGEBUILDPKGD ?= " \
+ package_prepare_pkgdata \
perform_packagecopy \
${PACKAGE_PREPROCESS_FUNCS} \
split_and_strip_files \
@@ -2166,7 +2385,7 @@ python do_package () {
# cache. This is useful if an item this class depends on changes in a
# way that the output of this class changes. rpmdeps is a good example
# as any change to rpmdeps requires this to be rerun.
- # PACKAGE_BBCLASS_VERSION = "2"
+ # PACKAGE_BBCLASS_VERSION = "4"
# Init cachedpath
global cpath
@@ -2189,10 +2408,10 @@ python do_package () {
if not workdir or not outdir or not dest or not dvar or not pn:
msg = "WORKDIR, DEPLOY_DIR, D, PN and PKGD all must be defined, unable to package"
- package_qa_handle_error("var-undefined", msg, d)
+ oe.qa.handle_error("var-undefined", msg, d)
return
- bb.build.exec_func("package_get_auto_pr", d)
+ bb.build.exec_func("package_convert_pr_autoinc", d)
###########################################################################
# Optimisations
@@ -2242,12 +2461,10 @@ python do_package () {
for f in (d.getVar('PACKAGEFUNCS') or '').split():
bb.build.exec_func(f, d)
- qa_sane = d.getVar("QA_SANE")
- if not qa_sane:
- bb.fatal("Fatal QA errors found, failing task.")
+ oe.qa.exit_if_errors(d)
}
-do_package[dirs] = "${SHLIBSWORKDIR} ${PKGDESTWORK} ${D}"
+do_package[dirs] = "${SHLIBSWORKDIR} ${D}"
do_package[vardeps] += "${PACKAGEBUILDPKGD} ${PACKAGESPLITFUNCS} ${PACKAGEFUNCS} ${@gen_packagevar(d)}"
addtask package after do_install
@@ -2261,19 +2478,31 @@ python do_package_setscene () {
}
addtask do_package_setscene
-do_packagedata () {
- :
+# Copy from PKGDESTWORK to tempdirectory as tempdirectory can be cleaned at both
+# do_package_setscene and do_packagedata_setscene leading to races
+python do_packagedata () {
+ bb.build.exec_func("package_get_auto_pr", d)
+
+ src = d.expand("${PKGDESTWORK}")
+ dest = d.expand("${WORKDIR}/pkgdata-pdata-input")
+ oe.path.copyhardlinktree(src, dest)
+
+ bb.build.exec_func("packagedata_translate_pr_autoinc", d)
+}
+do_packagedata[cleandirs] += "${WORKDIR}/pkgdata-pdata-input"
+
+# Translate the EXTENDPRAUTO and AUTOINC to the final values
+packagedata_translate_pr_autoinc() {
+ find ${WORKDIR}/pkgdata-pdata-input -type f | xargs --no-run-if-empty \
+ sed -e 's,@PRSERV_PV_AUTOINC@,${PRSERV_PV_AUTOINC},g' \
+ -e 's,@EXTENDPRAUTO@,${EXTENDPRAUTO},g' -i
}
addtask packagedata before do_build after do_package
SSTATETASKS += "do_packagedata"
-# PACKAGELOCK protects readers of PKGDATA_DIR against writes
-# whilst code is reading in do_package
-PACKAGELOCK = "${STAGING_DIR}/package-output.lock"
-do_packagedata[sstate-inputdirs] = "${PKGDESTWORK}"
+do_packagedata[sstate-inputdirs] = "${WORKDIR}/pkgdata-pdata-input"
do_packagedata[sstate-outputdirs] = "${PKGDATA_DIR}"
-do_packagedata[sstate-lockfile] = "${PACKAGELOCK}"
do_packagedata[stamp-extra-info] = "${MACHINE_ARCH}"
python do_packagedata_setscene () {
diff --git a/meta/classes/package_deb.bbclass b/meta/classes/package_deb.bbclass
index 6f81591653..2e75e222bc 100644
--- a/meta/classes/package_deb.bbclass
+++ b/meta/classes/package_deb.bbclass
@@ -6,6 +6,8 @@ inherit package
IMAGE_PKGTYPE ?= "deb"
+DPKG_BUILDCMD ??= "dpkg-deb"
+
DPKG_ARCH ?= "${@debian_arch_map(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'))}"
DPKG_ARCH[vardepvalue] = "${DPKG_ARCH}"
@@ -79,7 +81,7 @@ def deb_write_pkg(pkg, d):
localdata.setVar('ROOT', '')
localdata.setVar('ROOT_%s' % pkg, root)
- pkgname = localdata.getVar('PKG_%s' % pkg)
+ pkgname = localdata.getVar('PKG:%s' % pkg)
if not pkgname:
pkgname = pkg
localdata.setVar('PKG', pkgname)
@@ -269,7 +271,8 @@ def deb_write_pkg(pkg, d):
conffiles.close()
os.chdir(basedir)
- subprocess.check_output("PATH=\"%s\" dpkg-deb -b %s %s" % (localdata.getVar("PATH"), root, pkgoutdir),
+ subprocess.check_output("PATH=\"%s\" %s -b %s %s" % (localdata.getVar("PATH"), localdata.getVar("DPKG_BUILDCMD"),
+ root, pkgoutdir),
stderr=subprocess.STDOUT,
shell=True)
@@ -280,8 +283,9 @@ def deb_write_pkg(pkg, d):
# Otherwise allarch packages may change depending on override configuration
deb_write_pkg[vardepsexclude] = "OVERRIDES"
-# Indirect references to these vars
-do_package_write_deb[vardeps] += "PKGV PKGR PKGV DESCRIPTION SECTION PRIORITY MAINTAINER DPKG_ARCH PN HOMEPAGE"
+# Have to list any variables referenced as X_<pkg> that aren't in pkgdata here
+DEBEXTRAVARS = "PKGV PKGR PKGV DESCRIPTION SECTION PRIORITY MAINTAINER DPKG_ARCH PN HOMEPAGE PACKAGE_ADD_METADATA_DEB"
+do_package_write_deb[vardeps] += "${@gen_packagevar(d, 'DEBEXTRAVARS')}"
SSTATETASKS += "do_package_write_deb"
do_package_write_deb[sstate-inputdirs] = "${PKGWRITEDIRDEB}"
@@ -310,12 +314,9 @@ python do_package_write_deb () {
}
do_package_write_deb[dirs] = "${PKGWRITEDIRDEB}"
do_package_write_deb[cleandirs] = "${PKGWRITEDIRDEB}"
-do_package_write_deb[umask] = "022"
do_package_write_deb[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
-addtask package_write_deb after do_packagedata do_package
-
+addtask package_write_deb after do_packagedata do_package do_deploy_source_date_epoch before do_build
+do_build[rdeptask] += "do_package_write_deb"
PACKAGEINDEXDEPS += "dpkg-native:do_populate_sysroot"
PACKAGEINDEXDEPS += "apt-native:do_populate_sysroot"
-
-do_build[recrdeptask] += "do_package_write_deb"
diff --git a/meta/classes/package_ipk.bbclass b/meta/classes/package_ipk.bbclass
index 508b7dcaff..f67cb0e5c9 100644
--- a/meta/classes/package_ipk.bbclass
+++ b/meta/classes/package_ipk.bbclass
@@ -4,17 +4,18 @@ IMAGE_PKGTYPE ?= "ipk"
IPKGCONF_TARGET = "${WORKDIR}/opkg.conf"
IPKGCONF_SDK = "${WORKDIR}/opkg-sdk.conf"
+IPKGCONF_SDK_TARGET = "${WORKDIR}/opkg-sdk-target.conf"
PKGWRITEDIRIPK = "${WORKDIR}/deploy-ipks"
# Program to be used to build opkg packages
-OPKGBUILDCMD ??= 'opkg-build -Z xz -a "--threads 0"'
+OPKGBUILDCMD ??= 'opkg-build -Z xz -a "${XZ_DEFAULTS}"'
OPKG_ARGS += "--force_postinstall --prefer-arch-to-version"
OPKG_ARGS += "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS") == "1"]}"
OPKG_ARGS += "${@['', '--add-exclude ' + ' --add-exclude '.join((d.getVar('PACKAGE_EXCLUDE') or "").split())][(d.getVar("PACKAGE_EXCLUDE") or "").strip() != ""]}"
-OPKGLIBDIR = "${localstatedir}/lib"
+OPKGLIBDIR ??= "${localstatedir}/lib"
python do_package_ipk () {
workdir = d.getVar('WORKDIR')
@@ -45,6 +46,7 @@ def ipk_write_pkg(pkg, d):
import subprocess
import textwrap
import collections
+ import glob
def cleanupcontrol(root):
for p in ['CONTROL', 'DEBIAN']:
@@ -63,7 +65,7 @@ def ipk_write_pkg(pkg, d):
try:
localdata.setVar('ROOT', '')
localdata.setVar('ROOT_%s' % pkg, root)
- pkgname = localdata.getVar('PKG_%s' % pkg)
+ pkgname = localdata.getVar('PKG:%s' % pkg)
if not pkgname:
pkgname = pkg
localdata.setVar('PKG', pkgname)
@@ -101,8 +103,7 @@ def ipk_write_pkg(pkg, d):
bb.utils.mkdirhier(pkgoutdir)
os.chdir(root)
cleanupcontrol(root)
- from glob import glob
- g = glob('*')
+ g = glob.glob('*')
if not g and localdata.getVar('ALLOW_EMPTY', False) != "1":
bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV'), localdata.getVar('PKGR')))
return
@@ -154,7 +155,6 @@ def ipk_write_pkg(pkg, d):
ctrlfile.write('%s\n' % textwrap.fill(description, width=74, initial_indent=' ', subsequent_indent=' '))
else:
ctrlfile.write(c % tuple(pullData(fs, localdata)))
- # more fields
custom_fields_chunk = get_package_additional_metadata("ipk", localdata)
if custom_fields_chunk is not None:
@@ -230,14 +230,18 @@ def ipk_write_pkg(pkg, d):
shell=True)
if d.getVar('IPK_SIGN_PACKAGES') == '1':
- ipkver = "%s-%s" % (d.getVar('PKGV'), d.getVar('PKGR'))
- ipk_to_sign = "%s/%s_%s_%s.ipk" % (pkgoutdir, pkgname, ipkver, d.getVar('PACKAGE_ARCH'))
+ ipkver = "%s-%s" % (localdata.getVar('PKGV'), localdata.getVar('PKGR'))
+ ipk_to_sign = "%s/%s_%s_%s.ipk" % (pkgoutdir, pkgname, ipkver, localdata.getVar('PACKAGE_ARCH'))
sign_ipk(d, ipk_to_sign)
finally:
cleanupcontrol(root)
bb.utils.unlockfile(lf)
+# Have to list any variables referenced as X_<pkg> that aren't in pkgdata here
+IPKEXTRAVARS = "PRIORITY MAINTAINER PACKAGE_ARCH HOMEPAGE PACKAGE_ADD_METADATA_IPK"
+ipk_write_pkg[vardeps] += "${@gen_packagevar(d, 'IPKEXTRAVARS')}"
+
# Otherwise allarch packages may change depending on override configuration
ipk_write_pkg[vardepsexclude] = "OVERRIDES"
@@ -269,11 +273,9 @@ python do_package_write_ipk () {
}
do_package_write_ipk[dirs] = "${PKGWRITEDIRIPK}"
do_package_write_ipk[cleandirs] = "${PKGWRITEDIRIPK}"
-do_package_write_ipk[umask] = "022"
do_package_write_ipk[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
-addtask package_write_ipk after do_packagedata do_package
+addtask package_write_ipk after do_packagedata do_package do_deploy_source_date_epoch before do_build
+do_build[rdeptask] += "do_package_write_ipk"
PACKAGEINDEXDEPS += "opkg-utils-native:do_populate_sysroot"
PACKAGEINDEXDEPS += "opkg-native:do_populate_sysroot"
-
-do_build[recrdeptask] += "do_package_write_ipk"
diff --git a/meta/classes/package_pkgdata.bbclass b/meta/classes/package_pkgdata.bbclass
new file mode 100644
index 0000000000..a1ea8fc041
--- /dev/null
+++ b/meta/classes/package_pkgdata.bbclass
@@ -0,0 +1,167 @@
+WORKDIR_PKGDATA = "${WORKDIR}/pkgdata-sysroot"
+
+def package_populate_pkgdata_dir(pkgdatadir, d):
+ import glob
+
+ postinsts = []
+ seendirs = set()
+ stagingdir = d.getVar("PKGDATA_DIR")
+ pkgarchs = ['${MACHINE_ARCH}']
+ pkgarchs = pkgarchs + list(reversed(d.getVar("PACKAGE_EXTRA_ARCHS").split()))
+ pkgarchs.append('allarch')
+
+ bb.utils.mkdirhier(pkgdatadir)
+ for pkgarch in pkgarchs:
+ for manifest in glob.glob(d.expand("${SSTATE_MANIFESTS}/manifest-%s-*.packagedata" % pkgarch)):
+ with open(manifest, "r") as f:
+ for l in f:
+ l = l.strip()
+ dest = l.replace(stagingdir, "")
+ if l.endswith("/"):
+ staging_copydir(l, pkgdatadir, dest, seendirs)
+ continue
+ try:
+ staging_copyfile(l, pkgdatadir, dest, postinsts, seendirs)
+ except FileExistsError:
+ continue
+
+python package_prepare_pkgdata() {
+ import copy
+ import glob
+
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ mytaskname = d.getVar("BB_RUNTASK")
+ if mytaskname.endswith("_setscene"):
+ mytaskname = mytaskname.replace("_setscene", "")
+ workdir = d.getVar("WORKDIR")
+ pn = d.getVar("PN")
+ stagingdir = d.getVar("PKGDATA_DIR")
+ pkgdatadir = d.getVar("WORKDIR_PKGDATA")
+
+ # Detect bitbake -b usage
+ nodeps = d.getVar("BB_LIMITEDDEPS") or False
+ if nodeps:
+ staging_package_populate_pkgdata_dir(pkgdatadir, d)
+ return
+
+ start = None
+ configuredeps = []
+ for dep in taskdepdata:
+ data = taskdepdata[dep]
+ if data[1] == mytaskname and data[0] == pn:
+ start = dep
+ break
+ if start is None:
+ bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
+
+ # We need to figure out which sysroot files we need to expose to this task.
+ # This needs to match what would get restored from sstate, which is controlled
+ # ultimately by calls from bitbake to setscene_depvalid().
+ # That function expects a setscene dependency tree. We build a dependency tree
+ # condensed to inter-sstate task dependencies, similar to that used by setscene
+ # tasks. We can then call into setscene_depvalid() and decide
+ # which dependencies we can "see" and should expose in the recipe specific sysroot.
+ setscenedeps = copy.deepcopy(taskdepdata)
+
+ start = set([start])
+
+ sstatetasks = d.getVar("SSTATETASKS").split()
+ # Add recipe specific tasks referenced by setscene_depvalid()
+ sstatetasks.append("do_stash_locale")
+
+ # If start is an sstate task (like do_package) we need to add in its direct dependencies
+ # else the code below won't recurse into them.
+ for dep in set(start):
+ for dep2 in setscenedeps[dep][3]:
+ start.add(dep2)
+ start.remove(dep)
+
+ # Create collapsed do_populate_sysroot -> do_populate_sysroot tree
+ for dep in taskdepdata:
+ data = setscenedeps[dep]
+ if data[1] not in sstatetasks:
+ for dep2 in setscenedeps:
+ data2 = setscenedeps[dep2]
+ if dep in data2[3]:
+ data2[3].update(setscenedeps[dep][3])
+ data2[3].remove(dep)
+ if dep in start:
+ start.update(setscenedeps[dep][3])
+ start.remove(dep)
+ del setscenedeps[dep]
+
+ # Remove circular references
+ for dep in setscenedeps:
+ if dep in setscenedeps[dep][3]:
+ setscenedeps[dep][3].remove(dep)
+
+ # Direct dependencies should be present and can be depended upon
+ for dep in set(start):
+ if setscenedeps[dep][1] == "do_packagedata":
+ if dep not in configuredeps:
+ configuredeps.append(dep)
+
+ msgbuf = []
+ # Call into setscene_depvalid for each sub-dependency and only copy sysroot files
+ # for ones that would be restored from sstate.
+ done = list(start)
+ next = list(start)
+ while next:
+ new = []
+ for dep in next:
+ data = setscenedeps[dep]
+ for datadep in data[3]:
+ if datadep in done:
+ continue
+ taskdeps = {}
+ taskdeps[dep] = setscenedeps[dep][:2]
+ taskdeps[datadep] = setscenedeps[datadep][:2]
+ retval = setscene_depvalid(datadep, taskdeps, [], d, msgbuf)
+ done.append(datadep)
+ new.append(datadep)
+ if retval:
+ msgbuf.append("Skipping setscene dependency %s" % datadep)
+ continue
+ if datadep not in configuredeps and setscenedeps[datadep][1] == "do_packagedata":
+ configuredeps.append(datadep)
+ msgbuf.append("Adding dependency on %s" % setscenedeps[datadep][0])
+ else:
+ msgbuf.append("Following dependency on %s" % setscenedeps[datadep][0])
+ next = new
+
+ # This logging is too verbose for day to day use sadly
+ #bb.debug(2, "\n".join(msgbuf))
+
+ seendirs = set()
+ postinsts = []
+ multilibs = {}
+ manifests = {}
+
+ msg_adding = []
+
+ for dep in configuredeps:
+ c = setscenedeps[dep][0]
+ msg_adding.append(c)
+
+ manifest, d2 = oe.sstatesig.find_sstate_manifest(c, setscenedeps[dep][2], "packagedata", d, multilibs)
+ destsysroot = pkgdatadir
+
+ if manifest:
+ targetdir = destsysroot
+ with open(manifest, "r") as f:
+ manifests[dep] = manifest
+ for l in f:
+ l = l.strip()
+ dest = targetdir + l.replace(stagingdir, "")
+ if l.endswith("/"):
+ staging_copydir(l, targetdir, dest, seendirs)
+ continue
+ staging_copyfile(l, targetdir, dest, postinsts, seendirs)
+
+ bb.note("Installed into pkgdata-sysroot: %s" % str(msg_adding))
+
+}
+package_prepare_pkgdata[cleandirs] = "${WORKDIR_PKGDATA}"
+package_prepare_pkgdata[vardepsexclude] += "MACHINE_ARCH PACKAGE_EXTRA_ARCHS SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA SSTATETASKS"
+
+
diff --git a/meta/classes/package_rpm.bbclass b/meta/classes/package_rpm.bbclass
index 1a64cb271a..e9ff1f7e65 100644
--- a/meta/classes/package_rpm.bbclass
+++ b/meta/classes/package_rpm.bbclass
@@ -36,14 +36,14 @@ def write_rpm_perfiledata(srcname, d):
pkgd = d.getVar('PKGD')
def dump_filerdeps(varname, outfile, d):
- outfile.write("#!/usr/bin/env python\n\n")
+ outfile.write("#!/usr/bin/env python3\n\n")
outfile.write("# Dependency table\n")
outfile.write('deps = {\n')
for pkg in packages.split():
- dependsflist_key = 'FILE' + varname + 'FLIST' + "_" + pkg
+ dependsflist_key = 'FILE' + varname + 'FLIST' + ":" + pkg
dependsflist = (d.getVar(dependsflist_key) or "")
for dfile in dependsflist.split():
- key = "FILE" + varname + "_" + dfile + "_" + pkg
+ key = "FILE" + varname + ":" + dfile + ":" + pkg
deps = filter_nativesdk_deps(srcname, d.getVar(key) or "")
depends_dict = bb.utils.explode_dep_versions(deps)
file = dfile.replace("@underscore@", "_")
@@ -249,10 +249,10 @@ python write_specfile () {
def get_perfile(varname, pkg, d):
deps = []
- dependsflist_key = 'FILE' + varname + 'FLIST' + "_" + pkg
+ dependsflist_key = 'FILE' + varname + 'FLIST' + ":" + pkg
dependsflist = (d.getVar(dependsflist_key) or "")
for dfile in dependsflist.split():
- key = "FILE" + varname + "_" + dfile + "_" + pkg
+ key = "FILE" + varname + ":" + dfile + ":" + pkg
depends = d.getVar(key)
if depends:
deps.append(depends)
@@ -286,25 +286,27 @@ python write_specfile () {
# Construct the SPEC file...
srcname = d.getVar('PN')
- srcsummary = (d.getVar('SUMMARY') or d.getVar('DESCRIPTION') or ".")
- srcversion = d.getVar('PKGV').replace('-', '+')
- srcrelease = d.getVar('PKGR')
- srcepoch = (d.getVar('PKGE') or "")
- srclicense = d.getVar('LICENSE')
- srcsection = d.getVar('SECTION')
- srcmaintainer = d.getVar('MAINTAINER')
- srchomepage = d.getVar('HOMEPAGE')
- srcdescription = d.getVar('DESCRIPTION') or "."
- srccustomtagschunk = get_package_additional_metadata("rpm", d)
+ localdata = bb.data.createCopy(d)
+ localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + srcname)
+ srcsummary = (localdata.getVar('SUMMARY') or localdata.getVar('DESCRIPTION') or ".")
+ srcversion = localdata.getVar('PKGV').replace('-', '+')
+ srcrelease = localdata.getVar('PKGR')
+ srcepoch = (localdata.getVar('PKGE') or "")
+ srclicense = localdata.getVar('LICENSE')
+ srcsection = localdata.getVar('SECTION')
+ srcmaintainer = localdata.getVar('MAINTAINER')
+ srchomepage = localdata.getVar('HOMEPAGE')
+ srcdescription = localdata.getVar('DESCRIPTION') or "."
+ srccustomtagschunk = get_package_additional_metadata("rpm", localdata)
srcdepends = d.getVar('DEPENDS')
- srcrdepends = []
- srcrrecommends = []
- srcrsuggests = []
- srcrprovides = []
- srcrreplaces = []
- srcrconflicts = []
- srcrobsoletes = []
+ srcrdepends = ""
+ srcrrecommends = ""
+ srcrsuggests = ""
+ srcrprovides = ""
+ srcrreplaces = ""
+ srcrconflicts = ""
+ srcrobsoletes = ""
srcrpreinst = []
srcrpostinst = []
@@ -330,7 +332,7 @@ python write_specfile () {
localdata.setVar('ROOT', '')
localdata.setVar('ROOT_%s' % pkg, root)
- pkgname = localdata.getVar('PKG_%s' % pkg)
+ pkgname = localdata.getVar('PKG:%s' % pkg)
if not pkgname:
pkgname = pkg
localdata.setVar('PKG', pkgname)
@@ -363,13 +365,13 @@ python write_specfile () {
# Map the dependencies into their final form
mapping_rename_hook(localdata)
- splitrdepends = localdata.getVar('RDEPENDS')
- splitrrecommends = localdata.getVar('RRECOMMENDS')
- splitrsuggests = localdata.getVar('RSUGGESTS')
- splitrprovides = localdata.getVar('RPROVIDES')
- splitrreplaces = localdata.getVar('RREPLACES')
- splitrconflicts = localdata.getVar('RCONFLICTS')
- splitrobsoletes = []
+ splitrdepends = localdata.getVar('RDEPENDS') or ""
+ splitrrecommends = localdata.getVar('RRECOMMENDS') or ""
+ splitrsuggests = localdata.getVar('RSUGGESTS') or ""
+ splitrprovides = localdata.getVar('RPROVIDES') or ""
+ splitrreplaces = localdata.getVar('RREPLACES') or ""
+ splitrconflicts = localdata.getVar('RCONFLICTS') or ""
+ splitrobsoletes = ""
splitrpreinst = localdata.getVar('pkg_preinst')
splitrpostinst = localdata.getVar('pkg_postinst')
@@ -409,7 +411,6 @@ python write_specfile () {
if not file_list and localdata.getVar('ALLOW_EMPTY', False) != "1":
bb.note("Not creating empty RPM package for %s" % splitname)
else:
- bb.note("Creating RPM package for %s" % splitname)
spec_files_top.append('%files')
if extra_pkgdata:
package_rpm_extra_pkgdata(splitname, spec_files_top, localdata)
@@ -418,7 +419,7 @@ python write_specfile () {
bb.note("Creating RPM package for %s" % splitname)
spec_files_top.extend(file_list)
else:
- bb.note("Creating EMPTY RPM Package for %s" % splitname)
+ bb.note("Creating empty RPM package for %s" % splitname)
spec_files_top.append('')
continue
@@ -438,9 +439,9 @@ python write_specfile () {
spec_preamble_bottom.append(splitcustomtagschunk)
# Replaces == Obsoletes && Provides
- robsoletes = bb.utils.explode_dep_versions2(splitrobsoletes or "")
- rprovides = bb.utils.explode_dep_versions2(splitrprovides or "")
- rreplaces = bb.utils.explode_dep_versions2(splitrreplaces or "")
+ robsoletes = bb.utils.explode_dep_versions2(splitrobsoletes)
+ rprovides = bb.utils.explode_dep_versions2(splitrprovides)
+ rreplaces = bb.utils.explode_dep_versions2(splitrreplaces)
for dep in rreplaces:
if not dep in robsoletes:
robsoletes[dep] = rreplaces[dep]
@@ -510,7 +511,7 @@ python write_specfile () {
bb.note("Creating RPM package for %s" % splitname)
spec_files_bottom.extend(file_list)
else:
- bb.note("Creating EMPTY RPM Package for %s" % splitname)
+ bb.note("Creating empty RPM package for %s" % splitname)
spec_files_bottom.append('')
del localdata
@@ -532,9 +533,9 @@ python write_specfile () {
tail_source(d)
# Replaces == Obsoletes && Provides
- robsoletes = bb.utils.explode_dep_versions2(srcrobsoletes or "")
- rprovides = bb.utils.explode_dep_versions2(srcrprovides or "")
- rreplaces = bb.utils.explode_dep_versions2(srcrreplaces or "")
+ robsoletes = bb.utils.explode_dep_versions2(srcrobsoletes)
+ rprovides = bb.utils.explode_dep_versions2(srcrprovides)
+ rreplaces = bb.utils.explode_dep_versions2(srcrreplaces)
for dep in rreplaces:
if not dep in robsoletes:
robsoletes[dep] = rreplaces[dep]
@@ -556,7 +557,7 @@ python write_specfile () {
print_deps(srcrrecommends, "Recommends", spec_preamble_top, d)
print_deps(srcrsuggests, "Suggests", spec_preamble_top, d)
- print_deps(srcrprovides + (" /bin/sh" if srcname.startswith("nativesdk-") else ""), "Provides", spec_preamble_top, d)
+ print_deps(srcrprovides, "Provides", spec_preamble_top, d)
print_deps(srcrobsoletes, "Obsoletes", spec_preamble_top, d)
print_deps(srcrconflicts, "Conflicts", spec_preamble_top, d)
@@ -622,6 +623,10 @@ python write_specfile () {
# Otherwise allarch packages may change depending on override configuration
write_specfile[vardepsexclude] = "OVERRIDES"
+# Have to list any variables referenced as X_<pkg> that aren't in pkgdata here
+RPMEXTRAVARS = "PACKAGE_ADD_METADATA_RPM"
+write_specfile[vardeps] += "${@gen_packagevar(d, 'RPMEXTRAVARS')}"
+
python do_package_rpm () {
workdir = d.getVar('WORKDIR')
tmpdir = d.getVar('TMPDIR')
@@ -679,10 +684,12 @@ python do_package_rpm () {
cmd = cmd + " --define '_use_internal_dependency_generator 0'"
cmd = cmd + " --define '_binaries_in_noarch_packages_terminate_build 0'"
cmd = cmd + " --define '_build_id_links none'"
- cmd = cmd + " --define '_binary_payload w6T.xzdio'"
- cmd = cmd + " --define '_source_payload w6T.xzdio'"
+ cmd = cmd + " --define '_binary_payload w19T%d.zstdio'" % int(d.getVar("ZSTD_THREADS"))
+ cmd = cmd + " --define '_source_payload w19T%d.zstdio'" % int(d.getVar("ZSTD_THREADS"))
cmd = cmd + " --define 'clamp_mtime_to_source_date_epoch 1'"
+ cmd = cmd + " --define 'use_source_date_epoch_as_buildtime 1'"
cmd = cmd + " --define '_buildhost reproducible'"
+ cmd = cmd + " --define '__font_provides %{nil}'"
if perfiledeps:
cmd = cmd + " --define '__find_requires " + outdepends + "'"
cmd = cmd + " --define '__find_provides " + outprovides + "'"
@@ -740,11 +747,9 @@ python do_package_write_rpm () {
do_package_write_rpm[dirs] = "${PKGWRITEDIRRPM}"
do_package_write_rpm[cleandirs] = "${PKGWRITEDIRRPM}"
-do_package_write_rpm[umask] = "022"
do_package_write_rpm[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
-addtask package_write_rpm after do_packagedata do_package
+addtask package_write_rpm after do_packagedata do_package do_deploy_source_date_epoch before do_build
+do_build[rdeptask] += "do_package_write_rpm"
PACKAGEINDEXDEPS += "rpm-native:do_populate_sysroot"
PACKAGEINDEXDEPS += "createrepo-c-native:do_populate_sysroot"
-
-do_build[recrdeptask] += "do_package_write_rpm"
diff --git a/meta/classes/package_tar.bbclass b/meta/classes/package_tar.bbclass
index ce3ab4c8e2..d6c1b306fc 100644
--- a/meta/classes/package_tar.bbclass
+++ b/meta/classes/package_tar.bbclass
@@ -57,10 +57,8 @@ python do_package_tar () {
python () {
if d.getVar('PACKAGES') != '':
- deps = (d.getVarFlag('do_package_write_tar', 'depends') or "").split()
- deps.append('tar-native:do_populate_sysroot')
- deps.append('virtual/fakeroot-native:do_populate_sysroot')
- d.setVarFlag('do_package_write_tar', 'depends', " ".join(deps))
+ deps = ' tar-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
+ d.appendVarFlag('do_package_write_tar', 'depends', deps)
d.setVarFlag('do_package_write_tar', 'fakeroot', "1")
}
diff --git a/meta/classes/packagedata.bbclass b/meta/classes/packagedata.bbclass
index a903e5cfd2..c2760e2bf0 100644
--- a/meta/classes/packagedata.bbclass
+++ b/meta/classes/packagedata.bbclass
@@ -24,10 +24,10 @@ python read_subpackage_metadata () {
continue
#
# If we set unsuffixed variables here there is a chance they could clobber override versions
- # of that variable, e.g. DESCRIPTION could clobber DESCRIPTION_<pkgname>
+ # of that variable, e.g. DESCRIPTION could clobber DESCRIPTION:<pkgname>
# We therefore don't clobber for the unsuffixed variable versions
#
- if key.endswith("_" + pkg):
+ if key.endswith(":" + pkg):
d.setVar(key, sdata[key])
else:
d.setVar(key, sdata[key], parsing=True)
diff --git a/meta/classes/packagefeed-stability.bbclass b/meta/classes/packagefeed-stability.bbclass
deleted file mode 100644
index 5648602564..0000000000
--- a/meta/classes/packagefeed-stability.bbclass
+++ /dev/null
@@ -1,252 +0,0 @@
-# Class to avoid copying packages into the feed if they haven't materially changed
-#
-# Copyright (C) 2015 Intel Corporation
-# Released under the MIT license (see COPYING.MIT for details)
-#
-# This class effectively intercepts packages as they are written out by
-# do_package_write_*, causing them to be written into a different
-# directory where we can compare them to whatever older packages might
-# be in the "real" package feed directory, and avoid copying the new
-# package to the feed if it has not materially changed. The idea is to
-# avoid unnecessary churn in the packages when dependencies trigger task
-# reexecution (and thus repackaging). Enabling the class is simple:
-#
-# INHERIT += "packagefeed-stability"
-#
-# Caveats:
-# 1) Latest PR values in the build system may not match those in packages
-# seen on the target (naturally)
-# 2) If you rebuild from sstate without the existing package feed present,
-# you will lose the "state" of the package feed i.e. the preserved old
-# package versions. Not the end of the world, but would negate the
-# entire purpose of this class.
-#
-# Note that running -c cleanall on a recipe will purposely delete the old
-# package files so they will definitely be copied the next time.
-
-python() {
- if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d):
- return
- # Package backend agnostic intercept
- # This assumes that the package_write task is called package_write_<pkgtype>
- # and that the directory in which packages should be written is
- # pointed to by the variable DEPLOY_DIR_<PKGTYPE>
- for pkgclass in (d.getVar('PACKAGE_CLASSES') or '').split():
- if pkgclass.startswith('package_'):
- pkgtype = pkgclass.split('_', 1)[1]
- pkgwritefunc = 'do_package_write_%s' % pkgtype
- sstate_outputdirs = d.getVarFlag(pkgwritefunc, 'sstate-outputdirs', False)
- deploydirvar = 'DEPLOY_DIR_%s' % pkgtype.upper()
- deploydirvarref = '${' + deploydirvar + '}'
- pkgcomparefunc = 'do_package_compare_%s' % pkgtype
-
- if bb.data.inherits_class('image', d):
- d.appendVarFlag('do_rootfs', 'recrdeptask', ' ' + pkgcomparefunc)
-
- if bb.data.inherits_class('populate_sdk_base', d):
- d.appendVarFlag('do_populate_sdk', 'recrdeptask', ' ' + pkgcomparefunc)
-
- if bb.data.inherits_class('populate_sdk_ext', d):
- d.appendVarFlag('do_populate_sdk_ext', 'recrdeptask', ' ' + pkgcomparefunc)
-
- d.appendVarFlag('do_build', 'recrdeptask', ' ' + pkgcomparefunc)
-
- if d.getVarFlag(pkgwritefunc, 'noexec') or not d.getVarFlag(pkgwritefunc, 'task'):
- # Packaging is disabled for this recipe, we shouldn't do anything
- continue
-
- if deploydirvarref in sstate_outputdirs:
- deplor_dir_pkgtype = d.expand(deploydirvarref + '-prediff')
- # Set intermediate output directory
- d.setVarFlag(pkgwritefunc, 'sstate-outputdirs', sstate_outputdirs.replace(deploydirvarref, deplor_dir_pkgtype))
- # Update SSTATE_DUPWHITELIST to avoid shared location conflicted error
- d.appendVar('SSTATE_DUPWHITELIST', ' %s' % deplor_dir_pkgtype)
-
- d.setVar(pkgcomparefunc, d.getVar('do_package_compare', False))
- d.setVarFlags(pkgcomparefunc, d.getVarFlags('do_package_compare', False))
- d.appendVarFlag(pkgcomparefunc, 'depends', ' build-compare-native:do_populate_sysroot')
- bb.build.addtask(pkgcomparefunc, 'do_build', 'do_packagedata ' + pkgwritefunc, d)
-}
-
-# This isn't the real task function - it's a template that we use in the
-# anonymous python code above
-fakeroot python do_package_compare () {
- currenttask = d.getVar('BB_CURRENTTASK')
- pkgtype = currenttask.rsplit('_', 1)[1]
- package_compare_impl(pkgtype, d)
-}
-
-def package_compare_impl(pkgtype, d):
- import errno
- import fnmatch
- import glob
- import subprocess
- import oe.sstatesig
-
- pn = d.getVar('PN')
- deploydir = d.getVar('DEPLOY_DIR_%s' % pkgtype.upper())
- prepath = deploydir + '-prediff/'
-
- # Find out PKGR values are
- pkgdatadir = d.getVar('PKGDATA_DIR')
- packages = []
- try:
- with open(os.path.join(pkgdatadir, pn), 'r') as f:
- for line in f:
- if line.startswith('PACKAGES:'):
- packages = line.split(':', 1)[1].split()
- break
- except IOError as e:
- if e.errno == errno.ENOENT:
- pass
-
- if not packages:
- bb.debug(2, '%s: no packages, nothing to do' % pn)
- return
-
- pkgrvalues = {}
- rpkgnames = {}
- rdepends = {}
- pkgvvalues = {}
- for pkg in packages:
- with open(os.path.join(pkgdatadir, 'runtime', pkg), 'r') as f:
- for line in f:
- if line.startswith('PKGR:'):
- pkgrvalues[pkg] = line.split(':', 1)[1].strip()
- if line.startswith('PKGV:'):
- pkgvvalues[pkg] = line.split(':', 1)[1].strip()
- elif line.startswith('PKG_%s:' % pkg):
- rpkgnames[pkg] = line.split(':', 1)[1].strip()
- elif line.startswith('RDEPENDS_%s:' % pkg):
- rdepends[pkg] = line.split(':', 1)[1].strip()
-
- # Prepare a list of the runtime package names for packages that were
- # actually produced
- rpkglist = []
- for pkg, rpkg in rpkgnames.items():
- if os.path.exists(os.path.join(pkgdatadir, 'runtime', pkg + '.packaged')):
- rpkglist.append((rpkg, pkg))
- rpkglist.sort(key=lambda x: len(x[0]), reverse=True)
-
- pvu = d.getVar('PV', False)
- if '$' + '{SRCPV}' in pvu:
- pvprefix = pvu.split('$' + '{SRCPV}', 1)[0]
- else:
- pvprefix = None
-
- pkgwritetask = 'package_write_%s' % pkgtype
- files = []
- docopy = False
- manifest, _ = oe.sstatesig.sstate_get_manifest_filename(pkgwritetask, d)
- mlprefix = d.getVar('MLPREFIX')
- # Copy recipe's all packages if one of the packages are different to make
- # they have the same PR.
- with open(manifest, 'r') as f:
- for line in f:
- if line.startswith(prepath):
- srcpath = line.rstrip()
- if os.path.isfile(srcpath):
- destpath = os.path.join(deploydir, os.path.relpath(srcpath, prepath))
-
- # This is crude but should work assuming the output
- # package file name starts with the package name
- # and rpkglist is sorted by length (descending)
- pkgbasename = os.path.basename(destpath)
- pkgname = None
- for rpkg, pkg in rpkglist:
- if mlprefix and pkgtype == 'rpm' and rpkg.startswith(mlprefix):
- rpkg = rpkg[len(mlprefix):]
- if pkgbasename.startswith(rpkg):
- pkgr = pkgrvalues[pkg]
- destpathspec = destpath.replace(pkgr, '*')
- if pvprefix:
- pkgv = pkgvvalues[pkg]
- if pkgv.startswith(pvprefix):
- pkgvsuffix = pkgv[len(pvprefix):]
- if '+' in pkgvsuffix:
- newpkgv = pvprefix + '*+' + pkgvsuffix.split('+', 1)[1]
- destpathspec = destpathspec.replace(pkgv, newpkgv)
- pkgname = pkg
- break
- else:
- bb.warn('Unable to map %s back to package' % pkgbasename)
- destpathspec = destpath
-
- oldfile = None
- if not docopy:
- oldfiles = glob.glob(destpathspec)
- if oldfiles:
- oldfile = oldfiles[-1]
- result = subprocess.call(['pkg-diff.sh', oldfile, srcpath])
- if result != 0:
- docopy = True
- bb.note("%s and %s are different, will copy packages" % (oldfile, srcpath))
- else:
- docopy = True
- bb.note("No old packages found for %s, will copy packages" % pkgname)
-
- files.append((pkgname, pkgbasename, srcpath, destpath))
-
- # Remove all the old files and copy again if docopy
- if docopy:
- bb.note('Copying packages for recipe %s' % pn)
- pcmanifest = os.path.join(prepath, d.expand('pkg-compare-manifest-${MULTIMACH_TARGET_SYS}-${PN}'))
- try:
- with open(pcmanifest, 'r') as f:
- for line in f:
- fn = line.rstrip()
- if fn:
- try:
- os.remove(fn)
- bb.note('Removed old package %s' % fn)
- except OSError as e:
- if e.errno == errno.ENOENT:
- pass
- except IOError as e:
- if e.errno == errno.ENOENT:
- pass
-
- # Create new manifest
- with open(pcmanifest, 'w') as f:
- for pkgname, pkgbasename, srcpath, destpath in files:
- destdir = os.path.dirname(destpath)
- bb.utils.mkdirhier(destdir)
- # Remove allarch rpm pkg if it is already existed (for
- # multilib), they're identical in theory, but sstate.bbclass
- # copies it again, so keep align with that.
- if os.path.exists(destpath) and pkgtype == 'rpm' \
- and d.getVar('PACKAGE_ARCH') == 'all':
- os.unlink(destpath)
- if (os.stat(srcpath).st_dev == os.stat(destdir).st_dev):
- # Use a hard link to save space
- os.link(srcpath, destpath)
- else:
- shutil.copyfile(srcpath, destpath)
- f.write('%s\n' % destpath)
- else:
- bb.note('Not copying packages for recipe %s' % pn)
-
-do_cleansstate[postfuncs] += "pfs_cleanpkgs"
-python pfs_cleanpkgs () {
- import errno
- for pkgclass in (d.getVar('PACKAGE_CLASSES') or '').split():
- if pkgclass.startswith('package_'):
- pkgtype = pkgclass.split('_', 1)[1]
- deploydir = d.getVar('DEPLOY_DIR_%s' % pkgtype.upper())
- prepath = deploydir + '-prediff'
- pcmanifest = os.path.join(prepath, d.expand('pkg-compare-manifest-${MULTIMACH_TARGET_SYS}-${PN}'))
- try:
- with open(pcmanifest, 'r') as f:
- for line in f:
- fn = line.rstrip()
- if fn:
- try:
- os.remove(fn)
- except OSError as e:
- if e.errno == errno.ENOENT:
- pass
- os.remove(pcmanifest)
- except IOError as e:
- if e.errno == errno.ENOENT:
- pass
-}
diff --git a/meta/classes/packagegroup.bbclass b/meta/classes/packagegroup.bbclass
index 94a59e0c03..557b1b6382 100644
--- a/meta/classes/packagegroup.bbclass
+++ b/meta/classes/packagegroup.bbclass
@@ -8,7 +8,7 @@ PACKAGES = "${PN}"
# By default, packagegroup packages do not depend on a certain architecture.
# Only if dependencies are modified by MACHINE_FEATURES, packages
-# need to be set to MACHINE_ARCH after inheriting packagegroup.bbclass
+# need to be set to MACHINE_ARCH before inheriting packagegroup.bbclass
PACKAGE_ARCH ?= "all"
# Fully expanded - so it applies the overrides as well
@@ -32,7 +32,7 @@ python () {
for suffix in types]
d.setVar('PACKAGES', ' '.join(packages))
for pkg in packages:
- d.setVar('ALLOW_EMPTY_%s' % pkg, '1')
+ d.setVar('ALLOW_EMPTY:%s' % pkg, '1')
}
# We don't want to look at shared library dependencies for the
diff --git a/meta/classes/patch.bbclass b/meta/classes/patch.bbclass
index cd241f1c84..8de7025491 100644
--- a/meta/classes/patch.bbclass
+++ b/meta/classes/patch.bbclass
@@ -5,6 +5,13 @@ QUILTRCFILE ?= "${STAGING_ETCDIR_NATIVE}/quiltrc"
PATCHDEPENDENCY = "${PATCHTOOL}-native:do_populate_sysroot"
+# There is a bug in patch 2.7.3 and earlier where index lines
+# in patches can change file modes when they shouldn't:
+# http://git.savannah.gnu.org/cgit/patch.git/patch/?id=82b800c9552a088a241457948219d25ce0a407a4
+# This leaks into debug sources in particular. Add the dependency
+# to target recipes to avoid this problem until we can rely on 2.7.4 or later.
+PATCHDEPENDENCY:append:class-target = " patch-replacement-native:do_populate_sysroot"
+
PATCH_GIT_USER_NAME ?= "OpenEmbedded"
PATCH_GIT_USER_EMAIL ?= "oe.patch@oe"
@@ -124,6 +131,9 @@ python patch_do_patch() {
patchdir = parm["patchdir"]
if not os.path.isabs(patchdir):
patchdir = os.path.join(s, patchdir)
+ if not os.path.isdir(patchdir):
+ bb.fatal("Target directory '%s' not found, patchdir '%s' is incorrect in patch file '%s'" %
+ (patchdir, parm["patchdir"], parm['patchname']))
else:
patchdir = s
@@ -140,12 +150,12 @@ python patch_do_patch() {
patchset.Import({"file":local, "strippath": parm['striplevel']}, True)
except Exception as exc:
bb.utils.remove(process_tmpdir, True)
- bb.fatal(str(exc))
+ bb.fatal("Importing patch '%s' with striplevel '%s'\n%s" % (parm['patchname'], parm['striplevel'], repr(exc).replace("\\n", "\n")))
try:
resolver.Resolve()
except bb.BBHandledException as e:
bb.utils.remove(process_tmpdir, True)
- bb.fatal(str(e))
+ bb.fatal("Applying patch '%s' on target directory '%s'\n%s" % (parm['patchname'], patchdir, repr(e).replace("\\n", "\n")))
bb.utils.remove(process_tmpdir, True)
del os.environ['TMPDIR']
@@ -153,7 +163,6 @@ python patch_do_patch() {
patch_do_patch[vardepsexclude] = "PATCHRESOLVE"
addtask patch after do_unpack
-do_patch[umask] = "022"
do_patch[dirs] = "${WORKDIR}"
do_patch[depends] = "${PATCHDEPENDENCY}"
diff --git a/meta/classes/pixbufcache.bbclass b/meta/classes/pixbufcache.bbclass
index fb50cd4965..886bf195b3 100644
--- a/meta/classes/pixbufcache.bbclass
+++ b/meta/classes/pixbufcache.bbclass
@@ -3,7 +3,7 @@
# packages.
#
-DEPENDS_append_class-target = " qemu-native"
+DEPENDS:append:class-target = " qemu-native"
inherit qemu
PIXBUF_PACKAGES ??= "${PN}"
@@ -29,32 +29,31 @@ else
fi
}
-python populate_packages_append() {
+python populate_packages:append() {
pixbuf_pkgs = d.getVar('PIXBUF_PACKAGES').split()
for pkg in pixbuf_pkgs:
bb.note("adding pixbuf postinst and postrm scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg) or d.getVar('pkg_postinst')
+ postinst = d.getVar('pkg_postinst:%s' % pkg) or d.getVar('pkg_postinst')
if not postinst:
postinst = '#!/bin/sh\n'
postinst += d.getVar('pixbufcache_common')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
- postrm = d.getVar('pkg_postrm_%s' % pkg) or d.getVar('pkg_postrm')
+ postrm = d.getVar('pkg_postrm:%s' % pkg) or d.getVar('pkg_postrm')
if not postrm:
postrm = '#!/bin/sh\n'
postrm += d.getVar('pixbufcache_common')
- d.setVar('pkg_postrm_%s' % pkg, postrm)
+ d.setVar('pkg_postrm:%s' % pkg, postrm)
}
gdkpixbuf_complete() {
GDK_PIXBUF_FATAL_LOADER=1 ${STAGING_LIBDIR_NATIVE}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders --update-cache || exit 1
}
-DEPENDS_append_class-native = " gdk-pixbuf-native"
-SYSROOT_PREPROCESS_FUNCS_append_class-native = " pixbufcache_sstate_postinst"
+DEPENDS:append:class-native = " gdk-pixbuf-native"
+SYSROOT_PREPROCESS_FUNCS:append:class-native = " pixbufcache_sstate_postinst"
-# See base.bbclass for the other half of this
pixbufcache_sstate_postinst() {
mkdir -p ${SYSROOT_DESTDIR}${bindir}
dest=${SYSROOT_DESTDIR}${bindir}/postinst-${PN}
diff --git a/meta/classes/pkgconfig.bbclass b/meta/classes/pkgconfig.bbclass
index ad1f84f506..fa94527ce9 100644
--- a/meta/classes/pkgconfig.bbclass
+++ b/meta/classes/pkgconfig.bbclass
@@ -1,2 +1,2 @@
-DEPENDS_prepend = "pkgconfig-native "
+DEPENDS:prepend = "pkgconfig-native "
diff --git a/meta/classes/populate_sdk_base.bbclass b/meta/classes/populate_sdk_base.bbclass
index 80fa443e4c..16f929bf59 100644
--- a/meta/classes/populate_sdk_base.bbclass
+++ b/meta/classes/populate_sdk_base.bbclass
@@ -1,4 +1,6 @@
-inherit meta image-postinst-intercepts
+PACKAGES = ""
+
+inherit image-postinst-intercepts image-artifact-names
# Wildcards specifying complementary packages to install for every package that has been explicitly
# installed into the rootfs
@@ -8,6 +10,7 @@ COMPLEMENTARY_GLOB[doc-pkgs] = '*-doc'
COMPLEMENTARY_GLOB[dbg-pkgs] = '*-dbg'
COMPLEMENTARY_GLOB[src-pkgs] = '*-src'
COMPLEMENTARY_GLOB[ptest-pkgs] = '*-ptest'
+COMPLEMENTARY_GLOB[bash-completion-pkgs] = '*-bash-completion'
def complementary_globs(featurevar, d):
all_globs = d.getVarFlags('COMPLEMENTARY_GLOB')
@@ -20,8 +23,9 @@ def complementary_globs(featurevar, d):
SDKIMAGE_FEATURES ??= "dev-pkgs dbg-pkgs src-pkgs ${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'doc-pkgs', '', d)}"
SDKIMAGE_INSTALL_COMPLEMENTARY = '${@complementary_globs("SDKIMAGE_FEATURES", d)}'
+SDKIMAGE_INSTALL_COMPLEMENTARY[vardeps] += "SDKIMAGE_FEATURES"
-PACKAGE_ARCHS_append_task-populate-sdk = " sdk-provides-dummy-target"
+PACKAGE_ARCHS:append:task-populate-sdk = " sdk-provides-dummy-target"
SDK_PACKAGE_ARCHS += "sdk-provides-dummy-${SDKPKGSUFFIX}"
# List of locales to install, or "all" for all of them, or unset for none.
@@ -35,7 +39,7 @@ SDK_DEPLOY = "${DEPLOY_DIR}/sdk"
SDKDEPLOYDIR = "${WORKDIR}/${SDKMACHINE}-deploy-${PN}-populate-sdk"
-B_task-populate-sdk = "${SDK_DIR}"
+B:task-populate-sdk = "${SDK_DIR}"
SDKTARGETSYSROOT = "${SDKPATH}/sysroots/${REAL_MULTIMACH_TARGET_SYS}"
@@ -45,10 +49,27 @@ TOOLCHAIN_TARGET_TASK ?= "${@multilib_pkg_extend(d, 'packagegroup-core-standalon
TOOLCHAIN_TARGET_TASK_ATTEMPTONLY ?= ""
TOOLCHAIN_OUTPUTNAME ?= "${SDK_NAME}-toolchain-${SDK_VERSION}"
+# Default archived SDK's suffix
+SDK_ARCHIVE_TYPE ?= "tar.xz"
+SDK_XZ_COMPRESSION_LEVEL ?= "-9"
+SDK_XZ_OPTIONS ?= "${XZ_DEFAULTS} ${SDK_XZ_COMPRESSION_LEVEL}"
+
+# To support different sdk type according to SDK_ARCHIVE_TYPE, now support zip and tar.xz
+python () {
+ if d.getVar('SDK_ARCHIVE_TYPE') == 'zip':
+ d.setVar('SDK_ARCHIVE_DEPENDS', 'zip-native')
+ # SDK_ARCHIVE_CMD used to generate archived sdk ${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} from input dir ${SDK_OUTPUT}/${SDKPATH} to output dir ${SDKDEPLOYDIR}
+ # recommand to cd into input dir first to avoid archive with buildpath
+ d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; zip -r -y ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} .')
+ else:
+ d.setVar('SDK_ARCHIVE_DEPENDS', 'xz-native')
+ d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; tar ${SDKTAROPTS} -cf - . | xz ${SDK_XZ_OPTIONS} > ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE}')
+}
+
SDK_RDEPENDS = "${TOOLCHAIN_TARGET_TASK} ${TOOLCHAIN_HOST_TASK}"
-SDK_DEPENDS = "virtual/fakeroot-native xz-native cross-localedef-native nativesdk-qemuwrapper-cross ${@' '.join(["%s-qemuwrapper-cross" % m for m in d.getVar("MULTILIB_VARIANTS").split()])} qemuwrapper-cross"
-PATH_prepend = "${STAGING_DIR_HOST}${SDKPATHNATIVE}${bindir}/crossscripts:${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:"
-SDK_DEPENDS_append_libc-glibc = " nativesdk-glibc-locale"
+SDK_DEPENDS = "virtual/fakeroot-native ${SDK_ARCHIVE_DEPENDS} cross-localedef-native nativesdk-qemuwrapper-cross ${@' '.join(["%s-qemuwrapper-cross" % m for m in d.getVar("MULTILIB_VARIANTS").split()])} qemuwrapper-cross"
+PATH:prepend = "${WORKDIR}/recipe-sysroot/${SDKPATHNATIVE}${bindir}/crossscripts:${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:"
+SDK_DEPENDS += "nativesdk-glibc-locale"
# We want the MULTIARCH_TARGET_SYS to point to the TUNE_PKGARCH, not PACKAGE_ARCH as it
# could be set to the MACHINE_ARCH
@@ -71,6 +92,8 @@ SDK_HOST_MANIFEST = "${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.host.manifest"
SDK_EXT_TARGET_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.target.manifest"
SDK_EXT_HOST_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.host.manifest"
+SDK_PRUNE_SYSROOT_DIRS ?= "/dev"
+
python write_target_sdk_manifest () {
from oe.sdk import sdk_list_installed_packages
from oe.utils import format_pkg_list
@@ -82,6 +105,12 @@ python write_target_sdk_manifest () {
output.write(format_pkg_list(pkgs, 'ver'))
}
+sdk_prune_dirs () {
+ for d in ${SDK_PRUNE_SYSROOT_DIRS}; do
+ rm -rf ${SDK_OUTPUT}${SDKTARGETSYSROOT}$d
+ done
+}
+
python write_sdk_test_data() {
from oe.data import export2json
testdata = "%s/%s.testdata.json" % (d.getVar('SDKDEPLOYDIR'), d.getVar('TOOLCHAIN_OUTPUTNAME'))
@@ -100,11 +129,12 @@ python write_host_sdk_manifest () {
output.write(format_pkg_list(pkgs, 'ver'))
}
-POPULATE_SDK_POST_TARGET_COMMAND_append = " write_sdk_test_data ; "
-POPULATE_SDK_POST_TARGET_COMMAND_append_task-populate-sdk = " write_target_sdk_manifest ; "
-POPULATE_SDK_POST_HOST_COMMAND_append_task-populate-sdk = " write_host_sdk_manifest; "
+POPULATE_SDK_POST_TARGET_COMMAND:append = " write_sdk_test_data ; "
+POPULATE_SDK_POST_TARGET_COMMAND:append:task-populate-sdk = " write_target_sdk_manifest; sdk_prune_dirs; "
+POPULATE_SDK_POST_HOST_COMMAND:append:task-populate-sdk = " write_host_sdk_manifest; "
+
SDK_PACKAGING_COMMAND = "${@'${SDK_PACKAGING_FUNC};' if '${SDK_PACKAGING_FUNC}' else ''}"
-SDK_POSTPROCESS_COMMAND = " create_sdk_files; check_sdk_sysroots; tar_sdk; ${SDK_PACKAGING_COMMAND} "
+SDK_POSTPROCESS_COMMAND = " create_sdk_files; check_sdk_sysroots; archive_sdk; ${SDK_PACKAGING_COMMAND} "
def populate_sdk_common(d):
from oe.sdk import populate_sdk
@@ -153,11 +183,17 @@ fakeroot python do_populate_sdk() {
populate_sdk_common(d)
}
SSTATETASKS += "do_populate_sdk"
-SSTATE_SKIP_CREATION_task-populate-sdk = '1'
+SSTATE_SKIP_CREATION:task-populate-sdk = '1'
do_populate_sdk[cleandirs] = "${SDKDEPLOYDIR}"
do_populate_sdk[sstate-inputdirs] = "${SDKDEPLOYDIR}"
do_populate_sdk[sstate-outputdirs] = "${SDK_DEPLOY}"
do_populate_sdk[stamp-extra-info] = "${MACHINE_ARCH}${SDKMACHINE}"
+python do_populate_sdk_setscene () {
+ sstate_setscene(d)
+}
+addtask do_populate_sdk_setscene
+
+PSEUDO_IGNORE_PATHS .= ",${SDKDEPLOYDIR},${WORKDIR}/oe-sdk-repo,${WORKDIR}/sstate-build-populate_sdk"
fakeroot create_sdk_files() {
cp ${COREBASE}/scripts/relocate_sdk.py ${SDK_OUTPUT}/${SDKPATH}/
@@ -166,6 +202,11 @@ fakeroot create_sdk_files() {
# Escape special characters like '+' and '.' in the SDKPATH
escaped_sdkpath=$(echo ${SDKPATH} |sed -e "s:[\+\.]:\\\\\\\\\0:g")
sed -i -e "s:##DEFAULT_INSTALL_DIR##:$escaped_sdkpath:" ${SDK_OUTPUT}/${SDKPATH}/relocate_sdk.py
+
+ mkdir -p ${SDK_OUTPUT}/${SDKPATHNATIVE}${sysconfdir}/
+ echo '${SDKPATHNATIVE}${libdir_nativesdk}
+${SDKPATHNATIVE}${base_libdir_nativesdk}
+include /etc/ld.so.conf' > ${SDK_OUTPUT}/${SDKPATHNATIVE}${sysconfdir}/ld.so.conf
}
python check_sdk_sysroots() {
@@ -222,11 +263,10 @@ python check_sdk_sysroots() {
SDKTAROPTS = "--owner=root --group=root"
-fakeroot tar_sdk() {
+fakeroot archive_sdk() {
# Package it up
mkdir -p ${SDKDEPLOYDIR}
- cd ${SDK_OUTPUT}/${SDKPATH}
- tar ${SDKTAROPTS} -cf - . | xz -T 0 > ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.tar.xz
+ ${SDK_ARCHIVE_CMD}
}
TOOLCHAIN_SHAR_EXT_TMPL ?= "${COREBASE}/meta/files/toolchain-shar-extract.sh"
@@ -238,7 +278,7 @@ fakeroot create_shar() {
rm -f ${T}/pre_install_command ${T}/post_install_command
- if [ ${SDK_RELOCATE_AFTER_INSTALL} -eq 1 ] ; then
+ if [ "${SDK_RELOCATE_AFTER_INSTALL}" = "1" ] ; then
cp ${TOOLCHAIN_SHAR_REL_TMPL} ${T}/post_install_command
fi
cat << "EOF" >> ${T}/pre_install_command
@@ -255,6 +295,7 @@ EOF
# substitute variables
sed -i -e 's#@SDK_ARCH@#${SDK_ARCH}#g' \
-e 's#@SDKPATH@#${SDKPATH}#g' \
+ -e 's#@SDKPATHINSTALL@#${SDKPATHINSTALL}#g' \
-e 's#@SDKEXTPATH@#${SDKEXTPATH}#g' \
-e 's#@OLDEST_KERNEL@#${SDK_OLDEST_KERNEL}#g' \
-e 's#@REAL_MULTIMACH_TARGET_SYS@#${REAL_MULTIMACH_TARGET_SYS}#g' \
@@ -263,16 +304,17 @@ EOF
-e '/@SDK_PRE_INSTALL_COMMAND@/d' \
-e '/@SDK_POST_INSTALL_COMMAND@/d' \
-e 's#@SDK_GCC_VER@#${@oe.utils.host_gcc_version(d, taskcontextonly=True)}#g' \
+ -e 's#@SDK_ARCHIVE_TYPE@#${SDK_ARCHIVE_TYPE}#g' \
${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
# add execution permission
chmod +x ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
# append the SDK tarball
- cat ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.tar.xz >> ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
+ cat ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} >> ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
# delete the old tarball, we don't need it anymore
- rm ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.tar.xz
+ rm ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE}
}
populate_sdk_log_check() {
@@ -303,6 +345,13 @@ def sdk_variables(d):
do_populate_sdk[vardeps] += "${@sdk_variables(d)}"
+python () {
+ variables = sdk_command_variables(d)
+ for var in variables:
+ if d.getVar(var, False):
+ d.setVarFlag(var, 'func', '1')
+}
+
do_populate_sdk[file-checksums] += "${TOOLCHAIN_SHAR_REL_TMPL}:True \
${TOOLCHAIN_SHAR_EXT_TMPL}:True"
diff --git a/meta/classes/populate_sdk_ext.bbclass b/meta/classes/populate_sdk_ext.bbclass
index 40b0375e0b..e2019f9bbf 100644
--- a/meta/classes/populate_sdk_ext.bbclass
+++ b/meta/classes/populate_sdk_ext.bbclass
@@ -2,29 +2,28 @@
inherit populate_sdk_base
-# NOTE: normally you cannot use task overrides for this kind of thing - this
-# only works because of get_sdk_ext_rdepends()
-
-TOOLCHAIN_HOST_TASK_task-populate-sdk-ext = " \
+# Used to override TOOLCHAIN_HOST_TASK in the eSDK case
+TOOLCHAIN_HOST_TASK_ESDK = " \
meta-environment-extsdk-${MACHINE} \
"
-TOOLCHAIN_TARGET_TASK_task-populate-sdk-ext = ""
-
-SDK_RELOCATE_AFTER_INSTALL_task-populate-sdk-ext = "0"
+SDK_RELOCATE_AFTER_INSTALL:task-populate-sdk-ext = "0"
SDK_EXT = ""
-SDK_EXT_task-populate-sdk-ext = "-ext"
+SDK_EXT:task-populate-sdk-ext = "-ext"
# Options are full or minimal
SDK_EXT_TYPE ?= "full"
SDK_INCLUDE_PKGDATA ?= "0"
SDK_INCLUDE_TOOLCHAIN ?= "${@'1' if d.getVar('SDK_EXT_TYPE') == 'full' else '0'}"
+SDK_INCLUDE_NATIVESDK ?= "0"
+SDK_INCLUDE_BUILDTOOLS ?= '1'
SDK_RECRDEP_TASKS ?= ""
+SDK_CUSTOM_TEMPLATECONF ?= "0"
-SDK_LOCAL_CONF_WHITELIST ?= ""
-SDK_LOCAL_CONF_BLACKLIST ?= "CONF_VERSION \
+ESDK_LOCALCONF_ALLOW ?= ""
+ESDK_LOCALCONF_REMOVE ?= "CONF_VERSION \
BB_NUMBER_THREADS \
BB_NUMBER_PARSE_THREADS \
PARALLEL_MAKE \
@@ -35,7 +34,7 @@ SDK_LOCAL_CONF_BLACKLIST ?= "CONF_VERSION \
TMPDIR \
BB_SERVER_TIMEOUT \
"
-SDK_INHERIT_BLACKLIST ?= "buildhistory icecc"
+ESDK_CLASS_INHERIT_DISABLE ?= "buildhistory icecc"
SDK_UPDATE_URL ?= ""
SDK_TARGETS ?= "${PN}"
@@ -75,10 +74,10 @@ COREBASE_FILES ?= " \
.templateconf \
"
-SDK_DIR_task-populate-sdk-ext = "${WORKDIR}/sdk-ext"
-B_task-populate-sdk-ext = "${SDK_DIR}"
+SDK_DIR:task-populate-sdk-ext = "${WORKDIR}/sdk-ext"
+B:task-populate-sdk-ext = "${SDK_DIR}"
TOOLCHAINEXT_OUTPUTNAME ?= "${SDK_NAME}-toolchain-ext-${SDK_VERSION}"
-TOOLCHAIN_OUTPUTNAME_task-populate-sdk-ext = "${TOOLCHAINEXT_OUTPUTNAME}"
+TOOLCHAIN_OUTPUTNAME:task-populate-sdk-ext = "${TOOLCHAINEXT_OUTPUTNAME}"
SDK_EXT_TARGET_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.target.manifest"
SDK_EXT_HOST_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.host.manifest"
@@ -93,6 +92,7 @@ python write_target_sdk_ext_manifest () {
real_target_multimach = d.getVar('REAL_MULTIMACH_TARGET_SYS')
pkgs = {}
+ os.makedirs(os.path.dirname(d.getVar('SDK_EXT_TARGET_MANIFEST')), exist_ok=True)
with open(d.getVar('SDK_EXT_TARGET_MANIFEST'), 'w') as f:
for fn in extra_info['filesizes']:
info = fn.split(':')
@@ -114,14 +114,14 @@ python write_host_sdk_ext_manifest () {
f.write("%s %s %s\n" % (info[1], info[2], info[3]))
}
-SDK_POSTPROCESS_COMMAND_append_task-populate-sdk-ext = "write_target_sdk_ext_manifest; write_host_sdk_ext_manifest; "
+SDK_POSTPROCESS_COMMAND:append:task-populate-sdk-ext = "write_target_sdk_ext_manifest; write_host_sdk_ext_manifest; "
-SDK_TITLE_task-populate-sdk-ext = "${@d.getVar('DISTRO_NAME') or d.getVar('DISTRO')} Extensible SDK"
+SDK_TITLE:task-populate-sdk-ext = "${@d.getVar('DISTRO_NAME') or d.getVar('DISTRO')} Extensible SDK"
def clean_esdk_builddir(d, sdkbasepath):
"""Clean up traces of the fake build for create_filtered_tasklist()"""
import shutil
- cleanpaths = 'cache conf/sanity_info tmp'.split()
+ cleanpaths = ['cache', 'tmp']
for pth in cleanpaths:
fullpth = os.path.join(sdkbasepath, pth)
if os.path.isdir(fullpth):
@@ -143,15 +143,15 @@ def create_filtered_tasklist(d, sdkbasepath, tasklistfile, conf_initpath):
try:
with open(sdkbasepath + '/conf/local.conf', 'a') as f:
# Force the use of sstate from the build system
- f.write('\nSSTATE_DIR_forcevariable = "%s"\n' % d.getVar('SSTATE_DIR'))
- f.write('SSTATE_MIRRORS_forcevariable = "file://universal/(.*) file://universal-4.9/\\1 file://universal-4.9/(.*) file://universal-4.8/\\1"\n')
+ f.write('\nSSTATE_DIR:forcevariable = "%s"\n' % d.getVar('SSTATE_DIR'))
+ f.write('SSTATE_MIRRORS:forcevariable = "file://universal/(.*) file://universal-4.9/\\1 file://universal-4.9/(.*) file://universal-4.8/\\1"\n')
# Ensure TMPDIR is the default so that clean_esdk_builddir() can delete it
- f.write('TMPDIR_forcevariable = "${TOPDIR}/tmp"\n')
- f.write('TCLIBCAPPEND_forcevariable = ""\n')
+ f.write('TMPDIR:forcevariable = "${TOPDIR}/tmp"\n')
+ f.write('TCLIBCAPPEND:forcevariable = ""\n')
# Drop uninative if the build isn't using it (or else NATIVELSBSTRING will
# be different and we won't be able to find our native sstate)
if not bb.data.inherits_class('uninative', d):
- f.write('INHERIT_remove = "uninative"\n')
+ f.write('INHERIT:remove = "uninative"\n')
# Unfortunately the default SDKPATH (or even a custom value) may contain characters that bitbake
# will not allow in its COREBASE path, so we need to rename the directory temporarily
@@ -161,7 +161,7 @@ def create_filtered_tasklist(d, sdkbasepath, tasklistfile, conf_initpath):
shutil.rmtree(temp_sdkbasepath)
except FileNotFoundError:
pass
- os.rename(sdkbasepath, temp_sdkbasepath)
+ bb.utils.rename(sdkbasepath, temp_sdkbasepath)
cmdprefix = '. %s .; ' % conf_initpath
logfile = d.getVar('WORKDIR') + '/tasklist_bb_log.txt'
try:
@@ -171,12 +171,14 @@ def create_filtered_tasklist(d, sdkbasepath, tasklistfile, conf_initpath):
if 'attempted to execute unexpectedly and should have been setscened' in e.stdout:
msg += '\n----------\n\nNOTE: "attempted to execute unexpectedly and should have been setscened" errors indicate this may be caused by missing sstate artifacts that were likely produced in earlier builds, but have been subsequently deleted for some reason.\n'
bb.fatal(msg)
- os.rename(temp_sdkbasepath, sdkbasepath)
+ bb.utils.rename(temp_sdkbasepath, sdkbasepath)
# Clean out residue of running bitbake, which check_sstate_task_list()
# will effectively do
clean_esdk_builddir(d, sdkbasepath)
finally:
- os.replace(sdkbasepath + '/conf/local.conf.bak', sdkbasepath + '/conf/local.conf')
+ localconf = sdkbasepath + '/conf/local.conf'
+ if os.path.exists(localconf + '.bak'):
+ os.replace(localconf + '.bak', localconf)
python copy_buildsystem () {
import re
@@ -194,6 +196,9 @@ python copy_buildsystem () {
buildsystem = oe.copy_buildsystem.BuildSystem('extensible SDK', d)
baseoutpath = d.getVar('SDK_OUTPUT') + '/' + d.getVar('SDKPATH')
+ #check if custome templateconf path is set
+ use_custom_templateconf = d.getVar('SDK_CUSTOM_TEMPLATECONF')
+
# Determine if we're building a derivative extensible SDK (from devtool build-sdk)
derivative = (d.getVar('SDK_DERIVATIVE') or '') == '1'
if derivative:
@@ -242,7 +247,9 @@ python copy_buildsystem () {
# Create a layer for new recipes / appends
bbpath = d.getVar('BBPATH')
- bb.process.run(['devtool', '--bbpath', bbpath, '--basepath', baseoutpath, 'create-workspace', '--create-only', os.path.join(baseoutpath, 'workspace')])
+ env = os.environ.copy()
+ env['PYTHONDONTWRITEBYTECODE'] = '1'
+ bb.process.run(['devtool', '--bbpath', bbpath, '--basepath', baseoutpath, 'create-workspace', '--create-only', os.path.join(baseoutpath, 'workspace')], env=env)
# Create bblayers.conf
bb.utils.mkdirhier(baseoutpath + '/conf')
@@ -275,8 +282,8 @@ python copy_buildsystem () {
bb.utils.mkdirhier(uninative_outdir)
shutil.copy(uninative_file, uninative_outdir)
- env_whitelist = (d.getVar('BB_ENV_EXTRAWHITE') or '').split()
- env_whitelist_values = {}
+ env_passthrough = (d.getVar('BB_ENV_PASSTHROUGH_ADDITIONS') or '').split()
+ env_passthrough_values = {}
# Create local.conf
builddir = d.getVar('TOPDIR')
@@ -287,15 +294,15 @@ python copy_buildsystem () {
if derivative:
shutil.copyfile(builddir + '/conf/local.conf', baseoutpath + '/conf/local.conf')
else:
- local_conf_whitelist = (d.getVar('SDK_LOCAL_CONF_WHITELIST') or '').split()
- local_conf_blacklist = (d.getVar('SDK_LOCAL_CONF_BLACKLIST') or '').split()
+ local_conf_allowed = (d.getVar('ESDK_LOCALCONF_ALLOW') or '').split()
+ local_conf_remove = (d.getVar('ESDK_LOCALCONF_REMOVE') or '').split()
def handle_var(varname, origvalue, op, newlines):
- if varname in local_conf_blacklist or (origvalue.strip().startswith('/') and not varname in local_conf_whitelist):
+ if varname in local_conf_remove or (origvalue.strip().startswith('/') and not varname in local_conf_allowed):
newlines.append('# Removed original setting of %s\n' % varname)
return None, op, 0, True
else:
- if varname in env_whitelist:
- env_whitelist_values[varname] = origvalue
+ if varname in env_passthrough:
+ env_passthrough_values[varname] = origvalue
return origvalue, op, 0, True
varlist = ['[^#=+ ]*']
oldlines = []
@@ -305,8 +312,9 @@ python copy_buildsystem () {
if os.path.exists(builddir + '/conf/auto.conf'):
with open(builddir + '/conf/auto.conf', 'r') as f:
oldlines += f.readlines()
- with open(builddir + '/conf/local.conf', 'r') as f:
- oldlines += f.readlines()
+ if os.path.exists(builddir + '/conf/local.conf'):
+ with open(builddir + '/conf/local.conf', 'r') as f:
+ oldlines += f.readlines()
(updated, newlines) = bb.utils.edit_metadata(oldlines, varlist, handle_var)
with open(baseoutpath + '/conf/local.conf', 'w') as f:
@@ -330,7 +338,7 @@ python copy_buildsystem () {
f.write('CONF_VERSION = "%s"\n\n' % d.getVar('CONF_VERSION', False))
# Some classes are not suitable for SDK, remove them from INHERIT
- f.write('INHERIT_remove = "%s"\n' % d.getVar('SDK_INHERIT_BLACKLIST', False))
+ f.write('INHERIT:remove = "%s"\n' % d.getVar('ESDK_CLASS_INHERIT_DISABLE', False))
# Bypass the default connectivity check if any
f.write('CONNECTIVITY_CHECK_URIS = ""\n\n')
@@ -346,20 +354,27 @@ python copy_buildsystem () {
f.write('SIGGEN_LOCKEDSIGS_TASKSIG_CHECK = "warn"\n\n')
# We want to be able to set this without a full reparse
- f.write('BB_HASHCONFIG_WHITELIST_append = " SIGGEN_UNLOCKED_RECIPES"\n\n')
+ f.write('BB_HASHCONFIG_IGNORE_VARS:append = " SIGGEN_UNLOCKED_RECIPES"\n\n')
- # Set up whitelist for run on install
- f.write('BB_SETSCENE_ENFORCE_WHITELIST = "%:* *:do_shared_workdir *:do_rm_work wic-tools:* *:do_addto_recipe_sysroot"\n\n')
+ # Set up which tasks are ignored for run on install
+ f.write('BB_SETSCENE_ENFORCE_IGNORE_TASKS = "%:* *:do_shared_workdir *:do_rm_work wic-tools:* *:do_addto_recipe_sysroot"\n\n')
# Hide the config information from bitbake output (since it's fixed within the SDK)
f.write('BUILDCFG_HEADER = ""\n\n')
+ # Write METADATA_REVISION
+ f.write('METADATA_REVISION = "%s"\n\n' % d.getVar('METADATA_REVISION'))
+
f.write('# Provide a flag to indicate we are in the EXT_SDK Context\n')
f.write('WITHIN_EXT_SDK = "1"\n\n')
# Map gcc-dependent uninative sstate cache for installer usage
f.write('SSTATE_MIRRORS += " file://universal/(.*) file://universal-4.9/\\1 file://universal-4.9/(.*) file://universal-4.8/\\1"\n\n')
+ if d.getVar("PRSERV_HOST"):
+ # Override this, we now include PR data, so it should only point ot the local database
+ f.write('PRSERV_HOST = "localhost:0"\n\n')
+
# Allow additional config through sdk-extra.conf
fn = bb.cookerdata.findConfigFile('sdk-extra.conf', d)
if fn:
@@ -378,14 +393,52 @@ python copy_buildsystem () {
f.write('require conf/locked-sigs.inc\n')
f.write('require conf/unlocked-sigs.inc\n')
- # Write a templateconf.cfg
- with open(baseoutpath + '/conf/templateconf.cfg', 'w') as f:
- f.write('meta/conf\n')
+ # Copy multiple configurations if they exist in the users config directory
+ if d.getVar('BBMULTICONFIG') is not None:
+ bb.utils.mkdirhier(os.path.join(baseoutpath, 'conf', 'multiconfig'))
+ for mc in d.getVar('BBMULTICONFIG').split():
+ dest_stub = "/conf/multiconfig/%s.conf" % (mc,)
+ if os.path.exists(builddir + dest_stub):
+ shutil.copyfile(builddir + dest_stub, baseoutpath + dest_stub)
+
+ if os.path.exists(builddir + '/cache/bb_unihashes.dat'):
+ bb.parse.siggen.save_unitaskhashes()
+ bb.utils.mkdirhier(os.path.join(baseoutpath, 'cache'))
+ shutil.copyfile(builddir + '/cache/bb_unihashes.dat', baseoutpath + '/cache/bb_unihashes.dat')
+
+ # If PR Service is in use, we need to export this as well
+ bb.note('Do we have a pr database?')
+ if d.getVar("PRSERV_HOST"):
+ bb.note('Writing PR database...')
+ # Based on the code in classes/prexport.bbclass
+ import oe.prservice
+ #dump meta info of tables
+ localdata = d.createCopy()
+ localdata.setVar('PRSERV_DUMPOPT_COL', "1")
+ localdata.setVar('PRSERV_DUMPDIR', os.path.join(baseoutpath, 'conf'))
+ localdata.setVar('PRSERV_DUMPFILE', '${PRSERV_DUMPDIR}/prserv.inc')
+
+ bb.note('PR Database write to %s' % (localdata.getVar('PRSERV_DUMPFILE')))
+
+ retval = oe.prservice.prserv_dump_db(localdata)
+ if not retval:
+ bb.error("prexport_handler: export failed!")
+ return
+ (metainfo, datainfo) = retval
+ oe.prservice.prserv_export_tofile(localdata, metainfo, datainfo, True)
+
+ # Use templateconf.cfg file from builddir if exists
+ if os.path.exists(builddir + '/conf/templateconf.cfg') and use_custom_templateconf == '1':
+ shutil.copyfile(builddir + '/conf/templateconf.cfg', baseoutpath + '/conf/templateconf.cfg')
+ else:
+ # Write a templateconf.cfg
+ with open(baseoutpath + '/conf/templateconf.cfg', 'w') as f:
+ f.write('meta/conf\n')
# Ensure any variables set from the external environment (by way of
- # BB_ENV_EXTRAWHITE) are set in the SDK's configuration
+ # BB_ENV_PASSTHROUGH_ADDITIONS) are set in the SDK's configuration
extralines = []
- for name, value in env_whitelist_values.items():
+ for name, value in env_passthrough_values.items():
actualvalue = d.getVar(name) or ''
if value != actualvalue:
extralines.append('%s = "%s"\n' % (name, actualvalue))
@@ -401,9 +454,27 @@ python copy_buildsystem () {
excluded_targets = get_sdk_install_targets(d, images_only=True)
sigfile = d.getVar('WORKDIR') + '/locked-sigs.inc'
lockedsigs_pruned = baseoutpath + '/conf/locked-sigs.inc'
+ #nativesdk-only sigfile to merge into locked-sigs.inc
+ sdk_include_nativesdk = (d.getVar("SDK_INCLUDE_NATIVESDK") == '1')
+ nativesigfile = d.getVar('WORKDIR') + '/locked-sigs_nativesdk.inc'
+ nativesigfile_pruned = d.getVar('WORKDIR') + '/locked-sigs_nativesdk_pruned.inc'
+
+ if sdk_include_nativesdk:
+ oe.copy_buildsystem.prune_lockedsigs([],
+ excluded_targets.split(),
+ nativesigfile,
+ True,
+ nativesigfile_pruned)
+
+ oe.copy_buildsystem.merge_lockedsigs([],
+ sigfile,
+ nativesigfile_pruned,
+ sigfile)
+
oe.copy_buildsystem.prune_lockedsigs([],
excluded_targets.split(),
sigfile,
+ False,
lockedsigs_pruned)
sstate_out = baseoutpath + '/sstate-cache'
@@ -414,13 +485,18 @@ python copy_buildsystem () {
sdk_include_toolchain = (d.getVar('SDK_INCLUDE_TOOLCHAIN') == '1')
sdk_ext_type = d.getVar('SDK_EXT_TYPE')
- if sdk_ext_type != 'minimal' or sdk_include_toolchain or derivative:
+ if (sdk_ext_type != 'minimal' or sdk_include_toolchain or derivative) and not sdk_include_nativesdk:
# Create the filtered task list used to generate the sstate cache shipped with the SDK
tasklistfn = d.getVar('WORKDIR') + '/tasklist.txt'
create_filtered_tasklist(d, baseoutpath, tasklistfn, conf_initpath)
else:
tasklistfn = None
+ if os.path.exists(builddir + '/cache/bb_unihashes.dat'):
+ bb.parse.siggen.save_unitaskhashes()
+ bb.utils.mkdirhier(os.path.join(baseoutpath, 'cache'))
+ shutil.copyfile(builddir + '/cache/bb_unihashes.dat', baseoutpath + '/cache/bb_unihashes.dat')
+
# Add packagedata if enabled
if d.getVar('SDK_INCLUDE_PKGDATA') == '1':
lockedsigs_base = d.getVar('WORKDIR') + '/locked-sigs-base.inc'
@@ -474,7 +550,7 @@ python copy_buildsystem () {
# We don't need sstate do_package files
for root, dirs, files in os.walk(sstate_out):
for name in files:
- if name.endswith("_package.tgz"):
+ if name.endswith("_package.tar.zst"):
f = os.path.join(root, name)
os.remove(f)
@@ -484,11 +560,20 @@ python copy_buildsystem () {
# sdk_ext_postinst() below) thus the checksum we take here would always
# be different.
manifest_file_list = ['conf/*']
+ if d.getVar('BBMULTICONFIG') is not None:
+ manifest_file_list.append('conf/multiconfig/*')
+
+ esdk_manifest_excludes = (d.getVar('ESDK_MANIFEST_EXCLUDES') or '').split()
+ esdk_manifest_excludes_list = []
+ for exclude_item in esdk_manifest_excludes:
+ esdk_manifest_excludes_list += glob.glob(os.path.join(baseoutpath, exclude_item))
manifest_file = os.path.join(baseoutpath, 'conf', 'sdk-conf-manifest')
with open(manifest_file, 'w') as f:
for item in manifest_file_list:
for fn in glob.glob(os.path.join(baseoutpath, item)):
- if fn == manifest_file:
+ if fn == manifest_file or os.path.isdir(fn):
+ continue
+ if fn in esdk_manifest_excludes_list:
continue
chksum = bb.utils.sha256_file(fn)
f.write('%s\t%s\n' % (chksum, os.path.relpath(fn, baseoutpath)))
@@ -506,8 +591,12 @@ def get_sdk_required_utilities(buildtools_fn, d):
sanity_required_utilities = (d.getVar('SANITY_REQUIRED_UTILITIES') or '').split()
sanity_required_utilities.append(d.expand('${BUILD_PREFIX}gcc'))
sanity_required_utilities.append(d.expand('${BUILD_PREFIX}g++'))
- buildtools_installer = os.path.join(d.getVar('SDK_DEPLOY'), buildtools_fn)
- filelist, _ = bb.process.run('%s -l' % buildtools_installer)
+ if buildtools_fn:
+ buildtools_installer = os.path.join(d.getVar('SDK_DEPLOY'), buildtools_fn)
+ filelist, _ = bb.process.run('%s -l' % buildtools_installer)
+ else:
+ buildtools_installer = None
+ filelist = ""
localdata = bb.data.createCopy(d)
localdata.setVar('SDKPATH', '.')
sdkpathnative = localdata.getVar('SDKPATHNATIVE')
@@ -537,7 +626,7 @@ install_tools() {
for script in $scripts; do
for scriptfn in `find ${SDK_OUTPUT}/${SDKPATH}/${scriptrelpath} -maxdepth 1 -executable -name "$script"`; do
targetscriptfn="${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}/$(basename $scriptfn)"
- test -e ${targetscriptfn} || lnr ${scriptfn} ${targetscriptfn}
+ test -e ${targetscriptfn} || ln -rs ${scriptfn} ${targetscriptfn}
done
done
# We can't use the same method as above because files in the sysroot won't exist at this point
@@ -545,12 +634,14 @@ install_tools() {
unfsd_path="${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}/unfsd"
if [ "${SDK_INCLUDE_TOOLCHAIN}" = "1" -a ! -e $unfsd_path ] ; then
binrelpath=${@os.path.relpath(d.getVar('STAGING_BINDIR_NATIVE'), d.getVar('TMPDIR'))}
- lnr ${SDK_OUTPUT}/${SDKPATH}/tmp/$binrelpath/unfsd $unfsd_path
+ ln -rs ${SDK_OUTPUT}/${SDKPATH}/tmp/$binrelpath/unfsd $unfsd_path
fi
touch ${SDK_OUTPUT}/${SDKPATH}/.devtoolbase
# find latest buildtools-tarball and install it
- install ${SDK_DEPLOY}/${SDK_BUILDTOOLS_INSTALLER} ${SDK_OUTPUT}/${SDKPATH}
+ if [ -n "${SDK_BUILDTOOLS_INSTALLER}" ]; then
+ install ${SDK_DEPLOY}/${SDK_BUILDTOOLS_INSTALLER} ${SDK_OUTPUT}/${SDKPATH}
+ fi
install -m 0644 ${COREBASE}/meta/files/ext-sdk-prepare.py ${SDK_OUTPUT}/${SDKPATH}
}
@@ -574,8 +665,8 @@ sdk_ext_preinst() {
exit 1
fi
# The relocation script used by buildtools installer requires python
- if ! command -v python > /dev/null; then
- echo "ERROR: The installer requires python, please install it first"
+ if ! command -v python3 > /dev/null; then
+ echo "ERROR: The installer requires python3, please install it first"
exit 1
fi
missing_utils=""
@@ -589,30 +680,32 @@ sdk_ext_preinst() {
exit 1
fi
SDK_EXTENSIBLE="1"
- if [ "$publish" = "1" ] ; then
- EXTRA_TAR_OPTIONS="$EXTRA_TAR_OPTIONS --exclude=ext-sdk-prepare.py"
- if [ "${SDK_EXT_TYPE}" = "minimal" ] ; then
- EXTRA_TAR_OPTIONS="$EXTRA_TAR_OPTIONS --exclude=sstate-cache"
- fi
+ if [ "$publish" = "1" ] && [ "${SDK_EXT_TYPE}" = "minimal" ] ; then
+ EXTRA_TAR_OPTIONS="$EXTRA_TAR_OPTIONS --exclude=sstate-cache"
fi
}
-SDK_PRE_INSTALL_COMMAND_task-populate-sdk-ext = "${sdk_ext_preinst}"
+SDK_PRE_INSTALL_COMMAND:task-populate-sdk-ext = "${sdk_ext_preinst}"
# FIXME this preparation should be done as part of the SDK construction
sdk_ext_postinst() {
printf "\nExtracting buildtools...\n"
cd $target_sdk_dir
env_setup_script="$target_sdk_dir/environment-setup-${REAL_MULTIMACH_TARGET_SYS}"
- printf "buildtools\ny" | ./${SDK_BUILDTOOLS_INSTALLER} > buildtools.log || { printf 'ERROR: buildtools installation failed:\n' ; cat buildtools.log ; echo "printf 'ERROR: this SDK was not fully installed and needs reinstalling\n'" >> $env_setup_script ; exit 1 ; }
-
- # Delete the buildtools tar file since it won't be used again
- rm -f ./${SDK_BUILDTOOLS_INSTALLER}
- # We don't need the log either since it succeeded
- rm -f buildtools.log
-
- # Make sure when the user sets up the environment, they also get
- # the buildtools-tarball tools in their path.
- echo ". $target_sdk_dir/buildtools/environment-setup*" >> $env_setup_script
+ if [ -n "${SDK_BUILDTOOLS_INSTALLER}" ]; then
+ printf "buildtools\ny" | ./${SDK_BUILDTOOLS_INSTALLER} > buildtools.log || { printf 'ERROR: buildtools installation failed:\n' ; cat buildtools.log ; echo "printf 'ERROR: this SDK was not fully installed and needs reinstalling\n'" >> $env_setup_script ; exit 1 ; }
+
+ # Delete the buildtools tar file since it won't be used again
+ rm -f ./${SDK_BUILDTOOLS_INSTALLER}
+ # We don't need the log either since it succeeded
+ rm -f buildtools.log
+
+ # Make sure when the user sets up the environment, they also get
+ # the buildtools-tarball tools in their path.
+ echo "# Save and reset OECORE_NATIVE_SYSROOT as buildtools may change it" >> $env_setup_script
+ echo "SAVED=\"\$OECORE_NATIVE_SYSROOT\"" >> $env_setup_script
+ echo ". $target_sdk_dir/buildtools/environment-setup*" >> $env_setup_script
+ echo "OECORE_NATIVE_SYSROOT=\"\$SAVED\"" >> $env_setup_script
+ fi
# Allow bitbake environment setup to be ran as part of this sdk.
echo "export OE_SKIP_SDK_CHECK=1" >> $env_setup_script
@@ -628,21 +721,23 @@ sdk_ext_postinst() {
# Warn if trying to use external bitbake and the ext SDK together
echo "(which bitbake > /dev/null 2>&1 && echo 'WARNING: attempting to use the extensible SDK in an environment set up to run bitbake - this may lead to unexpected results. Please source this script in a new shell session instead.') || true" >> $env_setup_script
- if [ "$prepare_buildsystem" != "no" ]; then
+ if [ "$prepare_buildsystem" != "no" -a -n "${SDK_BUILDTOOLS_INSTALLER}" ]; then
printf "Preparing build system...\n"
# dash which is /bin/sh on Ubuntu will not preserve the
# current working directory when first ran, nor will it set $1 when
# sourcing a script. That is why this has to look so ugly.
LOGFILE="$target_sdk_dir/preparing_build_system.log"
- sh -c ". buildtools/environment-setup* > $LOGFILE && cd $target_sdk_dir/`dirname ${oe_init_build_env_path}` && set $target_sdk_dir && . $target_sdk_dir/${oe_init_build_env_path} $target_sdk_dir >> $LOGFILE && python $target_sdk_dir/ext-sdk-prepare.py $LOGFILE '${SDK_INSTALL_TARGETS}'" || { echo "printf 'ERROR: this SDK was not fully installed and needs reinstalling\n'" >> $env_setup_script ; exit 1 ; }
+ sh -c ". buildtools/environment-setup* > $LOGFILE && cd $target_sdk_dir/`dirname ${oe_init_build_env_path}` && set $target_sdk_dir && . $target_sdk_dir/${oe_init_build_env_path} $target_sdk_dir >> $LOGFILE && python3 $target_sdk_dir/ext-sdk-prepare.py $LOGFILE '${SDK_INSTALL_TARGETS}'" || { echo "printf 'ERROR: this SDK was not fully installed and needs reinstalling\n'" >> $env_setup_script ; exit 1 ; }
+ fi
+ if [ -e $target_sdk_dir/ext-sdk-prepare.py ]; then
rm $target_sdk_dir/ext-sdk-prepare.py
fi
echo done
}
-SDK_POST_INSTALL_COMMAND_task-populate-sdk-ext = "${sdk_ext_postinst}"
+SDK_POST_INSTALL_COMMAND:task-populate-sdk-ext = "${sdk_ext_postinst}"
-SDK_POSTPROCESS_COMMAND_prepend_task-populate-sdk-ext = "copy_buildsystem; install_tools; "
+SDK_POSTPROCESS_COMMAND:prepend:task-populate-sdk-ext = "copy_buildsystem; install_tools; "
SDK_INSTALL_TARGETS = ""
fakeroot python do_populate_sdk_ext() {
@@ -651,16 +746,35 @@ fakeroot python do_populate_sdk_ext() {
if d.getVar('SDK_ARCH') != d.getVar('BUILD_ARCH'):
bb.fatal('The extensible SDK can currently only be built for the same architecture as the machine being built on - SDK_ARCH is set to %s (likely via setting SDKMACHINE) which is different from the architecture of the build machine (%s). Unable to continue.' % (d.getVar('SDK_ARCH'), d.getVar('BUILD_ARCH')))
+ # FIXME hopefully we can remove this restriction at some point, but the eSDK
+ # can only be built for the primary (default) multiconfig
+ if d.getVar('BB_CURRENT_MC') != 'default':
+ bb.fatal('The extensible SDK can currently only be built for the default multiconfig. Currently trying to build for %s.' % d.getVar('BB_CURRENT_MC'))
+
+ # eSDK dependencies don't use the traditional variables and things don't work properly if they are set
+ d.setVar("TOOLCHAIN_HOST_TASK", "${TOOLCHAIN_HOST_TASK_ESDK}")
+ d.setVar("TOOLCHAIN_TARGET_TASK", "")
+
d.setVar('SDK_INSTALL_TARGETS', get_sdk_install_targets(d))
- buildtools_fn = get_current_buildtools(d)
+ if d.getVar('SDK_INCLUDE_BUILDTOOLS') == '1':
+ buildtools_fn = get_current_buildtools(d)
+ else:
+ buildtools_fn = None
d.setVar('SDK_REQUIRED_UTILITIES', get_sdk_required_utilities(buildtools_fn, d))
d.setVar('SDK_BUILDTOOLS_INSTALLER', buildtools_fn)
d.setVar('SDKDEPLOYDIR', '${SDKEXTDEPLOYDIR}')
# ESDKs have a libc from the buildtools so ensure we don't ship linguas twice
d.delVar('SDKIMAGE_LINGUAS')
+ if d.getVar("SDK_INCLUDE_NATIVESDK") == '1':
+ generate_nativesdk_lockedsigs(d)
populate_sdk_common(d)
}
+def generate_nativesdk_lockedsigs(d):
+ import oe.copy_buildsystem
+ sigfile = d.getVar('WORKDIR') + '/locked-sigs_nativesdk.inc'
+ oe.copy_buildsystem.generate_locked_sigs(sigfile, d)
+
def get_ext_sdk_depends(d):
# Note: the deps varflag is a list not a string, so we need to specify expand=False
deps = d.getVarFlag('do_image_complete', 'deps', False)
@@ -686,17 +800,12 @@ do_sdk_depends[dirs] = "${WORKDIR}"
do_sdk_depends[depends] = "${@get_ext_sdk_depends(d)} meta-extsdk-toolchain:do_populate_sysroot"
do_sdk_depends[recrdeptask] = "${@d.getVarFlag('do_populate_sdk', 'recrdeptask', False)}"
do_sdk_depends[recrdeptask] += "do_populate_lic do_package_qa do_populate_sysroot do_deploy ${SDK_RECRDEP_TASKS}"
-do_sdk_depends[rdepends] = "${@get_sdk_ext_rdepends(d)}"
-
-def get_sdk_ext_rdepends(d):
- localdata = d.createCopy()
- localdata.appendVar('OVERRIDES', ':task-populate-sdk-ext')
- return localdata.getVarFlag('do_populate_sdk', 'rdepends')
+do_sdk_depends[rdepends] = "${@' '.join([x + ':do_package_write_${IMAGE_PKGTYPE} ' + x + ':do_packagedata' for x in d.getVar('TOOLCHAIN_HOST_TASK_ESDK').split()])}"
do_populate_sdk_ext[dirs] = "${@d.getVarFlag('do_populate_sdk', 'dirs', False)}"
do_populate_sdk_ext[depends] = "${@d.getVarFlag('do_populate_sdk', 'depends', False)} \
- buildtools-tarball:do_populate_sdk \
+ ${@'buildtools-tarball:do_populate_sdk' if d.getVar('SDK_INCLUDE_BUILDTOOLS') == '1' else ''} \
${@'meta-world-pkgdata:do_collect_packagedata' if d.getVar('SDK_INCLUDE_PKGDATA') == '1' else ''} \
${@'meta-extsdk-toolchain:do_locked_sigs' if d.getVar('SDK_INCLUDE_TOOLCHAIN') == '1' else ''}"
@@ -719,7 +828,7 @@ do_populate_sdk_ext[nostamp] = "1"
SDKEXTDEPLOYDIR = "${WORKDIR}/deploy-${PN}-populate-sdk-ext"
SSTATETASKS += "do_populate_sdk_ext"
-SSTATE_SKIP_CREATION_task-populate-sdk-ext = '1'
+SSTATE_SKIP_CREATION:task-populate-sdk-ext = '1'
do_populate_sdk_ext[cleandirs] = "${SDKEXTDEPLOYDIR}"
do_populate_sdk_ext[sstate-inputdirs] = "${SDKEXTDEPLOYDIR}"
do_populate_sdk_ext[sstate-outputdirs] = "${SDK_DEPLOY}"
diff --git a/meta/classes/ptest-gnome.bbclass b/meta/classes/ptest-gnome.bbclass
index 478a33474d..18bd3dbff9 100644
--- a/meta/classes/ptest-gnome.bbclass
+++ b/meta/classes/ptest-gnome.bbclass
@@ -1,8 +1,8 @@
inherit ptest
-EXTRA_OECONF_append = " ${@bb.utils.contains('PTEST_ENABLED', '1', '--enable-installed-tests', '--disable-installed-tests', d)}"
+EXTRA_OECONF:append = " ${@bb.utils.contains('PTEST_ENABLED', '1', '--enable-installed-tests', '--disable-installed-tests', d)}"
-FILES_${PN}-ptest += "${libexecdir}/installed-tests/ \
+FILES:${PN}-ptest += "${libexecdir}/installed-tests/ \
${datadir}/installed-tests/"
-RDEPENDS_${PN}-ptest += "gnome-desktop-testing"
+RDEPENDS:${PN}-ptest += "gnome-desktop-testing"
diff --git a/meta/classes/ptest-perl.bbclass b/meta/classes/ptest-perl.bbclass
index a4bc40b51a..5dd72c9dad 100644
--- a/meta/classes/ptest-perl.bbclass
+++ b/meta/classes/ptest-perl.bbclass
@@ -1,6 +1,6 @@
inherit ptest
-FILESEXTRAPATHS_prepend := "${COREBASE}/meta/files:"
+FILESEXTRAPATHS:prepend := "${COREBASE}/meta/files:"
SRC_URI += "file://ptest-perl/run-ptest"
@@ -13,9 +13,9 @@ do_install_ptest_perl() {
chown -R root:root ${D}${PTEST_PATH}
}
-FILES_${PN}-ptest_prepend = "${PTEST_PATH}/t/* ${PTEST_PATH}/run-ptest "
+FILES:${PN}-ptest:prepend = "${PTEST_PATH}/t/* ${PTEST_PATH}/run-ptest "
-RDEPENDS_${PN}-ptest_prepend = "perl "
+RDEPENDS:${PN}-ptest:prepend = "perl "
addtask install_ptest_perl after do_install_ptest_base before do_package
diff --git a/meta/classes/ptest.bbclass b/meta/classes/ptest.bbclass
index 97865c9338..1ec23c0923 100644
--- a/meta/classes/ptest.bbclass
+++ b/meta/classes/ptest.bbclass
@@ -1,24 +1,27 @@
-SUMMARY_${PN}-ptest ?= "${SUMMARY} - Package test files"
-DESCRIPTION_${PN}-ptest ?= "${DESCRIPTION} \
+SUMMARY:${PN}-ptest ?= "${SUMMARY} - Package test files"
+DESCRIPTION:${PN}-ptest ?= "${DESCRIPTION} \
This package contains a test directory ${PTEST_PATH} for package test purposes."
PTEST_PATH ?= "${libdir}/${BPN}/ptest"
PTEST_BUILD_HOST_FILES ?= "Makefile"
PTEST_BUILD_HOST_PATTERN ?= ""
-FILES_${PN}-ptest = "${PTEST_PATH}"
-SECTION_${PN}-ptest = "devel"
-ALLOW_EMPTY_${PN}-ptest = "1"
+FILES:${PN}-ptest += "${PTEST_PATH}"
+SECTION:${PN}-ptest = "devel"
+ALLOW_EMPTY:${PN}-ptest = "1"
PTEST_ENABLED = "${@bb.utils.contains('DISTRO_FEATURES', 'ptest', '1', '0', d)}"
-PTEST_ENABLED_class-native = ""
-PTEST_ENABLED_class-nativesdk = ""
-PTEST_ENABLED_class-cross-canadian = ""
-RDEPENDS_${PN}-ptest_class-native = ""
-RDEPENDS_${PN}-ptest_class-nativesdk = ""
-RRECOMMENDS_${PN}-ptest += "ptest-runner"
+PTEST_ENABLED:class-native = ""
+PTEST_ENABLED:class-nativesdk = ""
+PTEST_ENABLED:class-cross-canadian = ""
+RDEPENDS:${PN}-ptest += "${PN}"
+RDEPENDS:${PN}-ptest:class-native = ""
+RDEPENDS:${PN}-ptest:class-nativesdk = ""
+RRECOMMENDS:${PN}-ptest += "ptest-runner"
PACKAGES =+ "${@bb.utils.contains('PTEST_ENABLED', '1', '${PN}-ptest', '', d)}"
+require conf/distro/include/ptest-packagelists.inc
+
do_configure_ptest() {
:
}
@@ -65,6 +68,38 @@ do_install_ptest_base() {
done
}
+PTEST_BINDIR_PKGD_PATH = "${PKGD}${PTEST_PATH}/bin"
+
+# This function needs to run after apply_update_alternative_renames because the
+# aforementioned function will update the ALTERNATIVE_LINK_NAME flag. Append is
+# used here to make this function to run as late as possible.
+PACKAGE_PREPROCESS_FUNCS:append = "${@bb.utils.contains('PTEST_BINDIR', '1', \
+ bb.utils.contains('PTEST_ENABLED', '1', ' ptest_update_alternatives', '', d), '', d)}"
+
+python ptest_update_alternatives() {
+ """
+ This function will generate the symlinks in the PTEST_BINDIR_PKGD_PATH
+ to match the renamed binaries by update-alternatives.
+ """
+
+ if not bb.data.inherits_class('update-alternatives', d) \
+ or not update_alternatives_enabled(d):
+ return
+
+ bb.note("Generating symlinks for ptest")
+ bin_paths = { d.getVar("bindir"), d.getVar("base_bindir"),
+ d.getVar("sbindir"), d.getVar("base_sbindir") }
+ ptest_bindir = d.getVar("PTEST_BINDIR_PKGD_PATH")
+ os.mkdir(ptest_bindir)
+ for pkg in (d.getVar('PACKAGES') or "").split():
+ alternatives = update_alternatives_alt_targets(d, pkg)
+ for alt_name, alt_link, alt_target, _ in alternatives:
+ # Some alternatives are for man pages,
+ # check if the alternative is in PATH
+ if os.path.dirname(alt_link) in bin_paths:
+ os.symlink(alt_target, os.path.join(ptest_bindir, alt_name))
+}
+
do_configure_ptest_base[dirs] = "${B}"
do_compile_ptest_base[dirs] = "${B}"
do_install_ptest_base[dirs] = "${B}"
@@ -84,3 +119,14 @@ python () {
for i in ['do_configure_ptest_base', 'do_compile_ptest_base', 'do_install_ptest_base']:
bb.build.deltask(i, d)
}
+
+QARECIPETEST[missing-ptest] = "package_qa_check_missing_ptest"
+def package_qa_check_missing_ptest(pn, d, messages):
+ # This checks that ptest package is actually included
+ # in standard oe-core ptest images - only for oe-core recipes
+ if not 'meta/recipes' in d.getVar('FILE') or not(d.getVar('PTEST_ENABLED') == "1"):
+ return
+
+ enabled_ptests = " ".join([d.getVar('PTESTS_FAST'), d.getVar('PTESTS_SLOW'), d.getVar('PTESTS_PROBLEMS')]).split()
+ if (pn + "-ptest").replace(d.getVar('MLPREFIX'), '') not in enabled_ptests:
+ oe.qa.handle_error("missing-ptest", "supports ptests but is not included in oe-core's ptest-packagelists.inc", d)
diff --git a/meta/classes/pypi.bbclass b/meta/classes/pypi.bbclass
index e5d7ab3ce1..9405d58601 100644
--- a/meta/classes/pypi.bbclass
+++ b/meta/classes/pypi.bbclass
@@ -8,19 +8,19 @@ def pypi_package(d):
PYPI_PACKAGE ?= "${@pypi_package(d)}"
PYPI_PACKAGE_EXT ?= "tar.gz"
+PYPI_ARCHIVE_NAME ?= "${PYPI_PACKAGE}-${PV}.${PYPI_PACKAGE_EXT}"
def pypi_src_uri(d):
package = d.getVar('PYPI_PACKAGE')
- package_ext = d.getVar('PYPI_PACKAGE_EXT')
- pv = d.getVar('PV')
- return 'https://files.pythonhosted.org/packages/source/%s/%s/%s-%s.%s' % (package[0], package, package, pv, package_ext)
+ archive_name = d.getVar('PYPI_ARCHIVE_NAME')
+ return 'https://files.pythonhosted.org/packages/source/%s/%s/%s' % (package[0], package, archive_name)
PYPI_SRC_URI ?= "${@pypi_src_uri(d)}"
HOMEPAGE ?= "https://pypi.python.org/pypi/${PYPI_PACKAGE}/"
SECTION = "devel/python"
-SRC_URI += "${PYPI_SRC_URI}"
+SRC_URI:prepend = "${PYPI_SRC_URI} "
S = "${WORKDIR}/${PYPI_PACKAGE}-${PV}"
-UPSTREAM_CHECK_URI ?= "https://pypi.python.org/pypi/${PYPI_PACKAGE}/"
-UPSTREAM_CHECK_REGEX ?= "/${PYPI_PACKAGE}/(?P<pver>(\d+[\.\-_]*)+)"
+UPSTREAM_CHECK_URI ?= "https://pypi.org/project/${PYPI_PACKAGE}/"
+UPSTREAM_CHECK_REGEX ?= "/${PYPI_PACKAGE}/(?P<pver>(\d+[\.\-_]*)+)/"
diff --git a/meta/classes/python-dir.bbclass b/meta/classes/python-dir.bbclass
deleted file mode 100644
index a11dc350be..0000000000
--- a/meta/classes/python-dir.bbclass
+++ /dev/null
@@ -1,5 +0,0 @@
-PYTHON_BASEVERSION = "2.7"
-PYTHON_ABI = ""
-PYTHON_DIR = "python${PYTHON_BASEVERSION}"
-PYTHON_PN = "python"
-PYTHON_SITEPACKAGES_DIR = "${libdir}/${PYTHON_DIR}/site-packages"
diff --git a/meta/classes/python3-dir.bbclass b/meta/classes/python3-dir.bbclass
index 7dd130bad9..ff03e584d4 100644
--- a/meta/classes/python3-dir.bbclass
+++ b/meta/classes/python3-dir.bbclass
@@ -1,5 +1,5 @@
-PYTHON_BASEVERSION = "3.7"
-PYTHON_ABI = "m"
+PYTHON_BASEVERSION = "3.10"
+PYTHON_ABI = ""
PYTHON_DIR = "python${PYTHON_BASEVERSION}"
PYTHON_PN = "python3"
PYTHON_SITEPACKAGES_DIR = "${libdir}/${PYTHON_DIR}/site-packages"
diff --git a/meta/classes/python3native.bbclass b/meta/classes/python3native.bbclass
index a3acaf61bb..3783c0c47e 100644
--- a/meta/classes/python3native.bbclass
+++ b/meta/classes/python3native.bbclass
@@ -2,14 +2,20 @@ inherit python3-dir
PYTHON="${STAGING_BINDIR_NATIVE}/python3-native/python3"
EXTRANATIVEPATH += "python3-native"
-DEPENDS_append = " python3-native "
+DEPENDS:append = " python3-native "
-# python-config and other scripts are using distutils modules
+# python-config and other scripts are using sysconfig modules
# which we patch to access these variables
export STAGING_INCDIR
export STAGING_LIBDIR
-export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
+# Packages can use
+# find_package(PythonInterp REQUIRED)
+# find_package(PythonLibs REQUIRED)
+# which ends up using libs/includes from build host
+# Therefore pre-empt that effort
+export PYTHON_LIBRARY="${STAGING_LIBDIR}/lib${PYTHON_DIR}${PYTHON_ABI}.so"
+export PYTHON_INCLUDE_DIR="${STAGING_INCDIR}/${PYTHON_DIR}${PYTHON_ABI}"
# suppress host user's site-packages dirs.
export PYTHONNOUSERSITE = "1"
diff --git a/meta/classes/python3targetconfig.bbclass b/meta/classes/python3targetconfig.bbclass
new file mode 100644
index 0000000000..2476858cae
--- /dev/null
+++ b/meta/classes/python3targetconfig.bbclass
@@ -0,0 +1,29 @@
+inherit python3native
+
+EXTRA_PYTHON_DEPENDS ?= ""
+EXTRA_PYTHON_DEPENDS:class-target = "python3"
+DEPENDS:append = " ${EXTRA_PYTHON_DEPENDS}"
+
+do_configure:prepend:class-target() {
+ export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
+}
+
+do_compile:prepend:class-target() {
+ export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
+}
+
+do_install:prepend:class-target() {
+ export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
+}
+
+do_configure:prepend:class-nativesdk() {
+ export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
+}
+
+do_compile:prepend:class-nativesdk() {
+ export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
+}
+
+do_install:prepend:class-nativesdk() {
+ export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
+}
diff --git a/meta/classes/python_flit_core.bbclass b/meta/classes/python_flit_core.bbclass
new file mode 100644
index 0000000000..96652aa204
--- /dev/null
+++ b/meta/classes/python_flit_core.bbclass
@@ -0,0 +1,5 @@
+inherit python_pep517 python3native python3-dir setuptools3-base
+
+DEPENDS += "python3 python3-flit-core-native"
+
+PEP517_BUILD_API = "flit_core.buildapi"
diff --git a/meta/classes/python_pep517.bbclass b/meta/classes/python_pep517.bbclass
new file mode 100644
index 0000000000..34ffdc9c0d
--- /dev/null
+++ b/meta/classes/python_pep517.bbclass
@@ -0,0 +1,56 @@
+# Common infrastructure for Python packages that use PEP-517 compliant packaging.
+# https://www.python.org/dev/peps/pep-0517/
+#
+# This class will build a wheel in do_compile, and use pypa/installer to install
+# it in do_install.
+
+DEPENDS:append = " python3-installer-native"
+
+# Where to execute the build process from
+PEP517_SOURCE_PATH ?= "${S}"
+
+# The PEP517 build API entry point
+PEP517_BUILD_API ?= "unset"
+
+# The directory where wheels will be written
+PEP517_WHEEL_PATH ?= "${WORKDIR}/dist"
+
+# The interpreter to use for installed scripts
+PEP517_INSTALL_PYTHON = "python3"
+PEP517_INSTALL_PYTHON:class-native = "nativepython3"
+
+# pypa/installer option to control the bytecode compilation
+INSTALL_WHEEL_COMPILE_BYTECODE ?= "--compile-bytecode=0"
+
+# PEP517 doesn't have a specific configure step, so set an empty do_configure to avoid
+# running base_do_configure.
+python_pep517_do_configure () {
+ :
+}
+
+# When we have Python 3.11 we can parse pyproject.toml to determine the build
+# API entry point directly
+python_pep517_do_compile () {
+ cd ${PEP517_SOURCE_PATH}
+ nativepython3 -c "import ${PEP517_BUILD_API} as api; api.build_wheel('${PEP517_WHEEL_PATH}')"
+}
+do_compile[cleandirs] += "${PEP517_WHEEL_PATH}"
+
+python_pep517_do_install () {
+ COUNT=$(find ${PEP517_WHEEL_PATH} -name '*.whl' | wc -l)
+ if test $COUNT -eq 0; then
+ bbfatal No wheels found in ${PEP517_WHEEL_PATH}
+ elif test $COUNT -gt 1; then
+ bbfatal More than one wheel found in ${PEP517_WHEEL_PATH}, this should not happen
+ fi
+
+ nativepython3 -m installer ${INSTALL_WHEEL_COMPILE_BYTECODE} --interpreter "${USRBINPATH}/env ${PEP517_INSTALL_PYTHON}" --destdir=${D} ${PEP517_WHEEL_PATH}/*.whl
+}
+
+# A manual do_install that just uses unzip for bootstrapping purposes. Callers should DEPEND on unzip-native.
+python_pep517_do_bootstrap_install () {
+ install -d ${D}${PYTHON_SITEPACKAGES_DIR}
+ unzip -d ${D}${PYTHON_SITEPACKAGES_DIR} ${PEP517_WHEEL_PATH}/*.whl
+}
+
+EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes/python_poetry_core.bbclass b/meta/classes/python_poetry_core.bbclass
new file mode 100644
index 0000000000..577663b8f1
--- /dev/null
+++ b/meta/classes/python_poetry_core.bbclass
@@ -0,0 +1,5 @@
+inherit python_pep517 python3native setuptools3-base
+
+DEPENDS += "python3-poetry-core-native"
+
+PEP517_BUILD_API = "poetry.core.masonry.api"
diff --git a/meta/classes/python_pyo3.bbclass b/meta/classes/python_pyo3.bbclass
new file mode 100644
index 0000000000..10cc3a0645
--- /dev/null
+++ b/meta/classes/python_pyo3.bbclass
@@ -0,0 +1,30 @@
+#
+# This class helps make sure that Python extensions built with PyO3
+# and setuptools_rust properly set up the environment for cross compilation
+#
+
+inherit cargo python3-dir siteinfo
+
+export PYO3_CROSS="1"
+export PYO3_CROSS_PYTHON_VERSION="${PYTHON_BASEVERSION}"
+export PYO3_CROSS_LIB_DIR="${STAGING_LIBDIR}"
+export CARGO_BUILD_TARGET="${HOST_SYS}"
+export RUSTFLAGS
+export PYO3_PYTHON="${PYTHON}"
+export PYO3_CONFIG_FILE="${WORKDIR}/pyo3.config"
+
+python_pyo3_do_configure () {
+ cat > ${WORKDIR}/pyo3.config << EOF
+implementation=CPython
+version=${PYTHON_BASEVERSION}
+shared=true
+abi3=false
+lib_name=${PYTHON_DIR}
+lib_dir=${STAGING_LIBDIR}
+pointer_width=${SITEINFO_BITS}
+build_flags=WITH_THREAD
+suppress_build_script_link_lines=false
+EOF
+}
+
+EXPORT_FUNCTIONS do_configure
diff --git a/meta/classes/python_setuptools3_rust.bbclass b/meta/classes/python_setuptools3_rust.bbclass
new file mode 100644
index 0000000000..f12e5d0cbd
--- /dev/null
+++ b/meta/classes/python_setuptools3_rust.bbclass
@@ -0,0 +1,11 @@
+inherit python_pyo3 setuptools3
+
+DEPENDS += "python3-setuptools-rust-native"
+
+python_setuptools3_rust_do_configure() {
+ python_pyo3_do_configure
+ cargo_common_do_configure
+ setuptools3_do_configure
+}
+
+EXPORT_FUNCTIONS do_configure
diff --git a/meta/classes/pythonnative.bbclass b/meta/classes/pythonnative.bbclass
deleted file mode 100644
index ae6600cd15..0000000000
--- a/meta/classes/pythonnative.bbclass
+++ /dev/null
@@ -1,19 +0,0 @@
-
-inherit python-dir
-
-PYTHON="${STAGING_BINDIR_NATIVE}/python-native/python"
-# PYTHON_EXECUTABLE is used by cmake
-PYTHON_EXECUTABLE="${PYTHON}"
-EXTRANATIVEPATH += "python-native"
-DEPENDS_append = " python-native "
-
-# python-config and other scripts are using distutils modules
-# which we patch to access these variables
-export STAGING_INCDIR
-export STAGING_LIBDIR
-
-# suppress host user's site-packages dirs.
-export PYTHONNOUSERSITE = "1"
-
-# autoconf macros will use their internal default preference otherwise
-export PYTHON
diff --git a/meta/classes/qemu.bbclass b/meta/classes/qemu.bbclass
index f5c5780125..01a7b86ae1 100644
--- a/meta/classes/qemu.bbclass
+++ b/meta/classes/qemu.bbclass
@@ -16,6 +16,8 @@ def qemu_target_binary(data):
target_arch = "ppc"
elif target_arch == "powerpc64":
target_arch = "ppc64"
+ elif target_arch == "powerpc64le":
+ target_arch = "ppc64le"
return "qemu-" + target_arch
@@ -62,3 +64,4 @@ QEMU_EXTRAOPTIONS_ppc64e5500 = " -cpu e500mc"
QEMU_EXTRAOPTIONS_ppce6500 = " -cpu e500mc"
QEMU_EXTRAOPTIONS_ppc64e6500 = " -cpu e500mc"
QEMU_EXTRAOPTIONS_ppc7400 = " -cpu 7400"
+QEMU_EXTRAOPTIONS:powerpc64le = " -cpu POWER8"
diff --git a/meta/classes/qemuboot.bbclass b/meta/classes/qemuboot.bbclass
index 15a9e63f2b..ad8489902a 100644
--- a/meta/classes/qemuboot.bbclass
+++ b/meta/classes/qemuboot.bbclass
@@ -4,7 +4,7 @@
#
# QB_SYSTEM_NAME: qemu name, e.g., "qemu-system-i386"
#
-# QB_OPT_APPEND: options to append to qemu, e.g., "-show-cursor"
+# QB_OPT_APPEND: options to append to qemu, e.g., "-device usb-mouse"
#
# QB_DEFAULT_KERNEL: default kernel to boot, e.g., "bzImage"
#
@@ -19,6 +19,9 @@
# QB_CPU_KVM: the similar to QB_CPU, but used when kvm, e.g., '-cpu kvm64',
# set it when support kvm.
#
+# QB_SMP: amount of CPU cores inside qemu guest, each mapped to a thread on the host,
+# e.g. "-smp 8".
+#
# QB_KERNEL_CMDLINE_APPEND: options to append to kernel's -append
# option, e.g., "console=ttyS0 console=tty"
#
@@ -26,23 +29,40 @@
#
# QB_AUDIO_DRV: qemu audio driver, e.g., "alsa", set it when support audio
#
-# QB_AUDIO_OPT: qemu audio option, e.g., "-soundhw ac97,es1370", used
+# QB_AUDIO_OPT: qemu audio option, e.g., "-device AC97", used
# when QB_AUDIO_DRV is set.
#
+# QB_RNG: Pass-through for host random number generator, it can speedup boot
+# in system mode, where system is experiencing entropy starvation
+#
# QB_KERNEL_ROOT: kernel's root, e.g., /dev/vda
+# By default "/dev/vda rw" gets passed to the kernel.
+# To mount the rootfs read-only QB_KERNEL_ROOT can be set to e.g. "/dev/vda ro".
#
# QB_NETWORK_DEVICE: network device, e.g., "-device virtio-net-pci,netdev=net0,mac=@MAC@",
# it needs work with QB_TAP_OPT and QB_SLIRP_OPT.
# Note, runqemu will replace @MAC@ with a predefined mac, you can set
# a custom one, but that may cause conflicts when multiple qemus are
# running on the same host.
+# Note: If more than one interface of type -device virtio-net-device gets added,
+# QB_NETWORK_DEVICE:prepend might be used, since Qemu enumerates the eth*
+# devices in reverse order to -device arguments.
#
-# QB_TAP_OPT: netowrk option for 'tap' mode, e.g.,
+# QB_TAP_OPT: network option for 'tap' mode, e.g.,
# "-netdev tap,id=net0,ifname=@TAP@,script=no,downscript=no"
# Note, runqemu will replace "@TAP@" with the one which is used, such as tap0, tap1 ...
#
# QB_SLIRP_OPT: network option for SLIRP mode, e.g., -netdev user,id=net0"
#
+# QB_CMDLINE_IP_SLIRP: If QB_NETWORK_DEVICE adds more than one network interface to qemu, usually the
+# ip= kernel comand line argument needs to be changed accordingly. Details are documented
+# in the kernel docuemntation https://www.kernel.org/doc/Documentation/filesystems/nfs/nfsroot.txt
+# Example to configure only the first interface: "ip=eth0:dhcp"
+# QB_CMDLINE_IP_TAP: This parameter is similar to the QB_CMDLINE_IP_SLIRP parameter. Since the tap interface requires
+# static IP configuration @CLIENT@ and @GATEWAY@ place holders are replaced by the IP and the gateway
+# address of the qemu guest by runqemu.
+# Example: "ip=192.168.7.@CLIENT@::192.168.7.@GATEWAY@:255.255.255.0::eth0"
+#
# QB_ROOTFS_OPT: used as rootfs, e.g.,
# "-drive id=disk0,file=@ROOTFS@,if=none,format=raw -device virtio-blk-device,drive=disk0"
# Note, runqemu will replace "@ROOTFS@" with the one which is used, such as core-image-minimal-qemuarm64.ext4.
@@ -53,29 +73,43 @@
# " -device virtio-serial-device -chardev socket,id=virtcon,port=@PORT@,host=127.0.0.1 -device virtconsole,chardev=virtcon"
# Note, runqemu will replace "@PORT@" with the port number which is used.
#
+# QB_ROOTFS_EXTRA_OPT: extra options to be appended to the rootfs device in case there is none specified by QB_ROOTFS_OPT.
+# Can be used to automatically determine the image from the other variables
+# but define things link 'bootindex' when booting from EFI or 'readonly' when using squashfs
+# without the need to specify a dedicated qemu configuration
+#
+# QB_GRAPHICS: QEMU video card type (e.g. "-vga std")
+#
# Usage:
# IMAGE_CLASSES += "qemuboot"
# See "runqemu help" for more info
QB_MEM ?= "-m 256"
+QB_SMP ?= ""
QB_SERIAL_OPT ?= "-serial mon:stdio -serial null"
QB_DEFAULT_KERNEL ?= "${KERNEL_IMAGETYPE}"
QB_DEFAULT_FSTYPE ?= "ext4"
-QB_OPT_APPEND ?= "-show-cursor"
+QB_RNG ?= "-object rng-random,filename=/dev/urandom,id=rng0 -device virtio-rng-pci,rng=rng0"
+QB_OPT_APPEND ?= ""
QB_NETWORK_DEVICE ?= "-device virtio-net-pci,netdev=net0,mac=@MAC@"
+QB_CMDLINE_IP_SLIRP ?= "ip=dhcp"
+QB_CMDLINE_IP_TAP ?= "ip=192.168.7.@CLIENT@::192.168.7.@GATEWAY@:255.255.255.0::eth0:off:8.8.8.8"
+QB_ROOTFS_EXTRA_OPT ?= ""
+QB_GRAPHICS ?= ""
# This should be kept align with ROOT_VM
QB_DRIVE_TYPE ?= "/dev/sd"
+inherit image-artifact-names
+
# Create qemuboot.conf
addtask do_write_qemuboot_conf after do_rootfs before do_image
-IMGDEPLOYDIR ?= "${WORKDIR}/deploy-${PN}-image-complete"
def qemuboot_vars(d):
build_vars = ['MACHINE', 'TUNE_ARCH', 'DEPLOY_DIR_IMAGE',
'KERNEL_IMAGETYPE', 'IMAGE_NAME', 'IMAGE_LINK_NAME',
'STAGING_DIR_NATIVE', 'STAGING_BINDIR_NATIVE',
- 'STAGING_DIR_HOST']
+ 'STAGING_DIR_HOST', 'SERIAL_CONSOLES', 'UNINATIVE_LOADER']
return build_vars + [k for k in d.keys() if k.startswith('QB_')]
do_write_qemuboot_conf[vardeps] += "${@' '.join(qemuboot_vars(d))}"
@@ -84,12 +118,17 @@ python do_write_qemuboot_conf() {
import configparser
qemuboot = "%s/%s.qemuboot.conf" % (d.getVar('IMGDEPLOYDIR'), d.getVar('IMAGE_NAME'))
- qemuboot_link = "%s/%s.qemuboot.conf" % (d.getVar('IMGDEPLOYDIR'), d.getVar('IMAGE_LINK_NAME'))
+ if d.getVar('IMAGE_LINK_NAME'):
+ qemuboot_link = "%s/%s.qemuboot.conf" % (d.getVar('IMGDEPLOYDIR'), d.getVar('IMAGE_LINK_NAME'))
+ else:
+ qemuboot_link = ""
finalpath = d.getVar("DEPLOY_DIR_IMAGE")
topdir = d.getVar('TOPDIR')
cf = configparser.ConfigParser()
cf.add_section('config_bsp')
for k in sorted(qemuboot_vars(d)):
+ if ":" in k:
+ continue
# qemu-helper-native sysroot is not removed by rm_work and
# contains all tools required by runqemu
if k == 'STAGING_BINDIR_NATIVE':
@@ -97,6 +136,8 @@ python do_write_qemuboot_conf() {
'qemu-helper-native/1.0-r1/recipe-sysroot-native/usr/bin/')
else:
val = d.getVar(k)
+ if val is None:
+ continue
# we only want to write out relative paths so that we can relocate images
# and still run them
if val.startswith(topdir):
@@ -117,7 +158,7 @@ python do_write_qemuboot_conf() {
with open(qemuboot, 'w') as f:
cf.write(f)
- if qemuboot_link != qemuboot:
+ if qemuboot_link and qemuboot_link != qemuboot:
if os.path.lexists(qemuboot_link):
os.remove(qemuboot_link)
os.symlink(os.path.basename(qemuboot), qemuboot_link)
diff --git a/meta/classes/relocatable.bbclass b/meta/classes/relocatable.bbclass
index 582812c1cf..af04be5cca 100644
--- a/meta/classes/relocatable.bbclass
+++ b/meta/classes/relocatable.bbclass
@@ -6,13 +6,15 @@ python relocatable_binaries_preprocess() {
rpath_replace(d.expand('${SYSROOT_DESTDIR}'), d)
}
-relocatable_native_pcfiles () {
- if [ -d ${SYSROOT_DESTDIR}${libdir}/pkgconfig ]; then
- rel=${@os.path.relpath(d.getVar('base_prefix'), d.getVar('libdir') + "/pkgconfig")}
- sed -i -e "s:${base_prefix}:\${pcfiledir}/$rel:g" ${SYSROOT_DESTDIR}${libdir}/pkgconfig/*.pc
- fi
- if [ -d ${SYSROOT_DESTDIR}${datadir}/pkgconfig ]; then
- rel=${@os.path.relpath(d.getVar('base_prefix'), d.getVar('datadir') + "/pkgconfig")}
- sed -i -e "s:${base_prefix}:\${pcfiledir}/$rel:g" ${SYSROOT_DESTDIR}${datadir}/pkgconfig/*.pc
- fi
+relocatable_native_pcfiles() {
+ for dir in ${libdir}/pkgconfig ${datadir}/pkgconfig; do
+ files_template=${SYSROOT_DESTDIR}$dir/*.pc
+ # Expand to any files matching $files_template
+ files=$(echo $files_template)
+ # $files_template and $files will differ if any files were found
+ if [ "$files_template" != "$files" ]; then
+ rel=$(realpath -m --relative-to=$dir ${base_prefix})
+ sed -i -e "s:${base_prefix}:\${pcfiledir}/$rel:g" $files
+ fi
+ done
}
diff --git a/meta/classes/report-error.bbclass b/meta/classes/report-error.bbclass
index 1c55abfbf3..de48e4ff0f 100644
--- a/meta/classes/report-error.bbclass
+++ b/meta/classes/report-error.bbclass
@@ -25,6 +25,19 @@ def errorreport_savedata(e, newdata, file):
json.dump(newdata, f, indent=4, sort_keys=True)
return datafile
+def get_conf_data(e, filename):
+ builddir = e.data.getVar('TOPDIR')
+ filepath = os.path.join(builddir, "conf", filename)
+ jsonstring = ""
+ if os.path.exists(filepath):
+ with open(filepath, 'r') as f:
+ for line in f.readlines():
+ if line.startswith("#") or len(line.strip()) == 0:
+ continue
+ else:
+ jsonstring=jsonstring + line
+ return jsonstring
+
python errorreport_handler () {
import json
import codecs
@@ -51,6 +64,10 @@ python errorreport_handler () {
data['failures'] = []
data['component'] = " ".join(e.getPkgs())
data['branch_commit'] = str(base_detect_branch(e.data)) + ": " + str(base_detect_revision(e.data))
+ data['bitbake_version'] = e.data.getVar("BB_VERSION")
+ data['layer_version'] = get_layers_branch_rev(e.data)
+ data['local_conf'] = get_conf_data(e, 'local.conf')
+ data['auto_conf'] = get_conf_data(e, 'auto.conf')
lock = bb.utils.lockfile(datafile + '.lock')
errorreport_savedata(e, data, "error-report.txt")
bb.utils.unlockfile(lock)
@@ -63,19 +80,15 @@ python errorreport_handler () {
taskdata['task'] = task
if log:
try:
- logFile = codecs.open(log, 'r', 'utf-8')
- logdata = logFile.read()
-
+ with codecs.open(log, encoding='utf-8') as logFile:
+ logdata = logFile.read()
# Replace host-specific paths so the logs are cleaner
for d in ("TOPDIR", "TMPDIR"):
s = e.data.getVar(d)
if s:
logdata = logdata.replace(s, d)
-
- logFile.close()
except:
logdata = "Unable to read log file"
-
else:
logdata = "No Log"
diff --git a/meta/classes/reproducible_build.bbclass b/meta/classes/reproducible_build.bbclass
deleted file mode 100644
index 8788ad7145..0000000000
--- a/meta/classes/reproducible_build.bbclass
+++ /dev/null
@@ -1,170 +0,0 @@
-# reproducible_build.bbclass
-#
-# Sets SOURCE_DATE_EPOCH in each component's build environment.
-# Upstream components (generally) respect this environment variable,
-# using it in place of the "current" date and time.
-# See https://reproducible-builds.org/specs/source-date-epoch/
-#
-# After sources are unpacked but before they are patched, we set a reproducible value for SOURCE_DATE_EPOCH.
-# This value should be reproducible for anyone who builds the same revision from the same sources.
-#
-# There are 4 ways we determine SOURCE_DATE_EPOCH:
-#
-# 1. Use the value from __source_date_epoch.txt file if this file exists.
-# This file was most likely created in the previous build by one of the following methods 2,3,4.
-# Alternatively, it can be provided by a recipe via SRC_URI.
-#
-# If the file does not exist:
-#
-# 2. If there is a git checkout, use the last git commit timestamp.
-# Git does not preserve file timestamps on checkout.
-#
-# 3. Use the mtime of "known" files such as NEWS, CHANGLELOG, ...
-# This works for well-kept repositories distributed via tarball.
-#
-# 4. Use the modification time of the youngest file in the source tree, if there is one.
-# This will be the newest file from the distribution tarball, if any.
-#
-# 5. Fall back to a fixed timestamp.
-#
-# Once the value of SOURCE_DATE_EPOCH is determined, it is stored in the recipe's SDE_FILE.
-# If none of these mechanisms are suitable, replace the do_deploy_source_date_epoch task
-# with recipe-specific functionality to write the appropriate SOURCE_DATE_EPOCH into the SDE_FILE.
-#
-# If this file is found by other tasks, the value is exported in the SOURCE_DATE_EPOCH variable.
-# SOURCE_DATE_EPOCH is set for all tasks that might use it (do_configure, do_compile, do_package, ...)
-
-BUILD_REPRODUCIBLE_BINARIES ??= '1'
-inherit ${@oe.utils.ifelse(d.getVar('BUILD_REPRODUCIBLE_BINARIES') == '1', 'reproducible_build_simple', '')}
-
-SDE_DIR ="${WORKDIR}/source-date-epoch"
-SDE_FILE = "${SDE_DIR}/__source_date_epoch.txt"
-
-SSTATETASKS += "do_deploy_source_date_epoch"
-
-do_deploy_source_date_epoch () {
- echo "Deploying SDE to ${SDE_DIR}."
-}
-
-python do_deploy_source_date_epoch_setscene () {
- sstate_setscene(d)
-}
-
-do_deploy_source_date_epoch[dirs] = "${SDE_DIR}"
-do_deploy_source_date_epoch[sstate-plaindirs] = "${SDE_DIR}"
-addtask do_deploy_source_date_epoch_setscene
-addtask do_deploy_source_date_epoch before do_configure after do_patch
-
-def get_source_date_epoch_from_known_files(d, sourcedir):
- source_date_epoch = None
- newest_file = None
- known_files = set(["NEWS", "ChangeLog", "Changelog", "CHANGES"])
- for file in known_files:
- filepath = os.path.join(sourcedir, file)
- if os.path.isfile(filepath):
- mtime = int(os.lstat(filepath).st_mtime)
- # There may be more than one "known_file" present, if so, use the youngest one
- if not source_date_epoch or mtime > source_date_epoch:
- source_date_epoch = mtime
- newest_file = filepath
- if newest_file:
- bb.debug(1, "SOURCE_DATE_EPOCH taken from: %s" % newest_file)
- return source_date_epoch
-
-def find_git_folder(d, sourcedir):
- # First guess: WORKDIR/git
- # This is the default git fetcher unpack path
- workdir = d.getVar('WORKDIR')
- gitpath = os.path.join(workdir, "git/.git")
- if os.path.isdir(gitpath):
- return gitpath
-
- # Second guess: ${S}
- gitpath = os.path.join(sourcedir, ".git")
- if os.path.isdir(gitpath):
- return gitpath
-
- # Perhaps there was a subpath or destsuffix specified.
- # Go looking in the WORKDIR
- exclude = set(["build", "image", "license-destdir", "patches", "pseudo",
- "recipe-sysroot", "recipe-sysroot-native", "sysroot-destdir", "temp"])
- for root, dirs, files in os.walk(workdir, topdown=True):
- dirs[:] = [d for d in dirs if d not in exclude]
- if '.git' in dirs:
- return root
-
- bb.warn("Failed to find a git repository in WORKDIR: %s" % workdir)
- return None
-
-def get_source_date_epoch_from_git(d, sourcedir):
- source_date_epoch = None
- if "git://" in d.getVar('SRC_URI'):
- gitpath = find_git_folder(d, sourcedir)
- if gitpath:
- import subprocess
- source_date_epoch = int(subprocess.check_output(['git','log','-1','--pretty=%ct'], cwd=gitpath))
- bb.debug(1, "git repository: %s" % gitpath)
- return source_date_epoch
-
-def get_source_date_epoch_from_youngest_file(d, sourcedir):
- if sourcedir == d.getVar('WORKDIR'):
- # These sources are almost certainly not from a tarball
- return None
-
- # Do it the hard way: check all files and find the youngest one...
- source_date_epoch = None
- newest_file = None
- for root, dirs, files in os.walk(sourcedir, topdown=True):
- files = [f for f in files if not f[0] == '.']
-
- for fname in files:
- filename = os.path.join(root, fname)
- try:
- mtime = int(os.lstat(filename).st_mtime)
- except ValueError:
- mtime = 0
- if not source_date_epoch or mtime > source_date_epoch:
- source_date_epoch = mtime
- newest_file = filename
-
- if newest_file:
- bb.debug(1, "Newest file found: %s" % newest_file)
- return source_date_epoch
-
-def fixed_source_date_epoch():
- bb.debug(1, "No tarball or git repo found to determine SOURCE_DATE_EPOCH")
- return 0
-
-python do_create_source_date_epoch_stamp() {
- epochfile = d.getVar('SDE_FILE')
- if os.path.isfile(epochfile):
- bb.debug(1, "Reusing SOURCE_DATE_EPOCH from: %s" % epochfile)
- return
-
- sourcedir = d.getVar('S')
- source_date_epoch = (
- get_source_date_epoch_from_git(d, sourcedir) or
- get_source_date_epoch_from_known_files(d, sourcedir) or
- get_source_date_epoch_from_youngest_file(d, sourcedir) or
- fixed_source_date_epoch() # Last resort
- )
-
- bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch)
- bb.utils.mkdirhier(d.getVar('SDE_DIR'))
- with open(epochfile, 'w') as f:
- f.write(str(source_date_epoch))
-}
-
-BB_HASHBASE_WHITELIST += "SOURCE_DATE_EPOCH"
-
-python () {
- if d.getVar('BUILD_REPRODUCIBLE_BINARIES') == '1':
- d.appendVarFlag("do_unpack", "postfuncs", " do_create_source_date_epoch_stamp")
- epochfile = d.getVar('SDE_FILE')
- source_date_epoch = "0"
- if os.path.isfile(epochfile):
- with open(epochfile, 'r') as f:
- source_date_epoch = f.read()
- bb.debug(1, "SOURCE_DATE_EPOCH: %s" % source_date_epoch)
- d.setVar('SOURCE_DATE_EPOCH', source_date_epoch)
-}
diff --git a/meta/classes/reproducible_build_simple.bbclass b/meta/classes/reproducible_build_simple.bbclass
deleted file mode 100644
index 8a60deef3d..0000000000
--- a/meta/classes/reproducible_build_simple.bbclass
+++ /dev/null
@@ -1,10 +0,0 @@
-# Setup default environment for reproducible builds.
-
-BUILD_REPRODUCIBLE_BINARIES = "1"
-
-export PYTHONHASHSEED = "0"
-export PERL_HASH_SEED = "0"
-export SOURCE_DATE_EPOCH ??= "1520598896"
-
-REPRODUCIBLE_TIMESTAMP_ROOTFS ??= "1520598896"
-
diff --git a/meta/classes/rm_work.bbclass b/meta/classes/rm_work.bbclass
index 10e134b958..5f12d5aaeb 100644
--- a/meta/classes/rm_work.bbclass
+++ b/meta/classes/rm_work.bbclass
@@ -13,7 +13,7 @@
# Recipes can also configure which entries in their ${WORKDIR}
# are preserved besides temp, which already gets excluded by default
# because it contains logs:
-# do_install_append () {
+# do_install:append () {
# echo "bar" >${WORKDIR}/foo
# }
# RM_WORK_EXCLUDE_ITEMS += "foo"
@@ -24,7 +24,7 @@ RM_WORK_EXCLUDE_ITEMS = "temp"
BB_SCHEDULER ?= "completion"
# Run the rm_work task in the idle scheduling class
-BB_TASK_IONICE_LEVEL_task-rm_work = "3.0"
+BB_TASK_IONICE_LEVEL:task-rm_work = "3.0"
do_rm_work () {
# If the recipe name is in the RM_WORK_EXCLUDE, skip the recipe.
@@ -47,61 +47,51 @@ do_rm_work () {
cd `dirname ${STAMP}`
for i in `basename ${STAMP}`*
do
- # By default we'll delete the stamp, unless $i is changed by the inner loop
- # (i=dummy does this)
-
case $i in
*sigdata*|*sigbasedata*)
# Save/skip anything that looks like a signature data file.
- i=dummy
;;
- *do_image_complete_setscene*)
- # Ensure we don't 'stack' setscene extensions to this stamp with the section below
- i=dummy
+ *do_image_complete_setscene*|*do_image_qa_setscene*)
+ # Ensure we don't 'stack' setscene extensions to these stamps with the sections below
;;
*do_image_complete*)
# Promote do_image_complete stamps to setscene versions (ahead of *do_image* below)
mv $i `echo $i | sed -e "s#do_image_complete#do_image_complete_setscene#"`
- i=dummy
+ ;;
+ *do_image_qa*)
+ # Promote do_image_qa stamps to setscene versions (ahead of *do_image* below)
+ mv $i `echo $i | sed -e "s#do_image_qa#do_image_qa_setscene#"`
;;
*do_package_write*|*do_rootfs*|*do_image*|*do_bootimg*|*do_write_qemuboot_conf*|*do_build*)
- i=dummy
;;
*do_addto_recipe_sysroot*)
# Preserve recipe-sysroot-native if do_addto_recipe_sysroot has been used
excludes="$excludes recipe-sysroot-native"
- i=dummy
;;
*do_package|*do_package.*|*do_package_setscene.*)
# We remove do_package entirely, including any
# sstate version since otherwise we'd need to leave 'plaindirs' around
# such as 'packages' and 'packages-split' and these can be large. No end
# of chain tasks depend directly on do_package anymore.
- rm -f $i;
- i=dummy
+ rm -f -- $i;
;;
*_setscene*)
# Skip stamps which are already setscene versions
- i=dummy
;;
+ *)
+ # For everything else: if suitable, promote the stamp to a setscene
+ # version, otherwise remove it
+ for j in ${SSTATETASKS} do_shared_workdir
+ do
+ case $i in
+ *$j|*$j.*)
+ mv $i `echo $i | sed -e "s#${j}#${j}_setscene#"`
+ break
+ ;;
+ esac
+ done
+ rm -f -- $i
esac
-
- for j in ${SSTATETASKS} do_shared_workdir
- do
- case $i in
- dummy)
- break
- ;;
- *$j|*$j.*)
- # Promote the stamp to a setscene version
- mv $i `echo $i | sed -e "s#${j}#${j}_setscene#"`
- i=dummy
- break
- ;;
- esac
- done
-
- rm -f $i
done
cd ${WORKDIR}
@@ -110,9 +100,9 @@ do_rm_work () {
# Retain only logs and other files in temp, safely ignore
# failures of removing pseudo folers on NFS2/3 server.
if [ $dir = 'pseudo' ]; then
- rm -rf $dir 2> /dev/null || true
+ rm -rf -- $dir 2> /dev/null || true
elif ! echo "$excludes" | grep -q -w "$dir"; then
- rm -rf $dir
+ rm -rf -- $dir
fi
done
}
@@ -121,7 +111,7 @@ do_rm_work_all () {
}
do_rm_work_all[recrdeptask] = "do_rm_work"
do_rm_work_all[noexec] = "1"
-addtask rm_work_all after before do_build
+addtask rm_work_all before do_build
do_populate_sdk[postfuncs] += "rm_work_populatesdk"
rm_work_populatesdk () {
@@ -164,8 +154,7 @@ python inject_rm_work() {
# Determine what do_build depends upon, without including do_build
# itself or our own special do_rm_work_all.
- deps = set(bb.build.preceedtask('do_build', True, d))
- deps.difference_update(('do_build', 'do_rm_work_all'))
+ deps = sorted((set(bb.build.preceedtask('do_build', True, d))).difference(('do_build', 'do_rm_work_all')) or "")
# deps can be empty if do_build doesn't exist, e.g. *-inital recipes
if not deps:
diff --git a/meta/classes/rm_work_and_downloads.bbclass b/meta/classes/rm_work_and_downloads.bbclass
index 7c00bea597..15e6091b9d 100644
--- a/meta/classes/rm_work_and_downloads.bbclass
+++ b/meta/classes/rm_work_and_downloads.bbclass
@@ -28,6 +28,6 @@ inherit rm_work
# Instead go up one level and remove ourself.
DL_DIR = "${BASE_WORKDIR}/${MULTIMACH_TARGET_SYS}/${PN}/downloads"
-do_rm_work_append () {
+do_rm_work:append () {
rm -rf ${DL_DIR}
}
diff --git a/meta/classes/rootfs-postcommands.bbclass b/meta/classes/rootfs-postcommands.bbclass
index bde58ad6cd..7b92df69c5 100644
--- a/meta/classes/rootfs-postcommands.bbclass
+++ b/meta/classes/rootfs-postcommands.bbclass
@@ -1,6 +1,6 @@
# Zap the root password if debug-tweaks feature is not enabled
-ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'empty-root-password' ], "", "zap_empty_root_password ; ",d)}'
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'empty-root-password' ], "", "zap_empty_root_password; ",d)}'
# Allow dropbear/openssh to accept logins from accounts with an empty password string if debug-tweaks or allow-empty-password is enabled
ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'allow-empty-password' ], "ssh_allow_empty_password; ", "",d)}'
@@ -12,7 +12,7 @@ ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'deb
ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'post-install-logging' ], "postinst_enable_logging; ", "",d)}'
# Create /etc/timestamp during image construction to give a reasonably sane default time setting
-ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp ; "
+ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp; "
# Tweak the mount options for rootfs in /etc/fstab if read-only-rootfs is enabled
ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", "read_only_rootfs_hook; ", "",d)}'
@@ -21,24 +21,28 @@ ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only
# otherwise kernel or initramfs end up mounting the rootfs read/write
# (the default) if supported by the underlying storage.
#
-# We do this with _append because the default value might get set later with ?=
+# We do this with :append because the default value might get set later with ?=
# and we don't want to disable such a default that by setting a value here.
-APPEND_append = '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", " ro", "", d)}'
+APPEND:append = '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", " ro", "", d)}'
# Generates test data file with data store variables expanded in json format
-ROOTFS_POSTPROCESS_COMMAND += "write_image_test_data ; "
+ROOTFS_POSTPROCESS_COMMAND += "write_image_test_data; "
# Write manifest
-IMAGE_MANIFEST = "${IMGDEPLOYDIR}/${IMAGE_NAME}.rootfs.manifest"
+IMAGE_MANIFEST = "${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.manifest"
ROOTFS_POSTUNINSTALL_COMMAND =+ "write_image_manifest ; "
# Set default postinst log file
POSTINST_LOGFILE ?= "${localstatedir}/log/postinstall.log"
# Set default target for systemd images
-SYSTEMD_DEFAULT_TARGET ?= '${@bb.utils.contains("IMAGE_FEATURES", "x11-base", "graphical.target", "multi-user.target", d)}'
+SYSTEMD_DEFAULT_TARGET ?= '${@bb.utils.contains_any("IMAGE_FEATURES", [ "x11-base", "weston" ], "graphical.target", "multi-user.target", d)}'
ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("DISTRO_FEATURES", "systemd", "set_systemd_default_target; systemd_create_users;", "", d)}'
ROOTFS_POSTPROCESS_COMMAND += 'empty_var_volatile;'
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("DISTRO_FEATURES", "overlayfs", "overlayfs_qa_check;", "", d)}'
+
+inherit image-artifact-names
+
# Sort the user and group entries in /etc by ID in order to make the content
# deterministic. Package installs are not deterministic, causing the ordering
# of entries to change between builds. In case that this isn't desired,
@@ -48,7 +52,7 @@ ROOTFS_POSTPROCESS_COMMAND += 'empty_var_volatile;'
# the numeric IDs of dynamically created entries remain stable.
#
# We want this to run as late as possible, in particular after
-# systemd_sysusers_create and set_user_group. Using _append is not
+# systemd_sysusers_create and set_user_group. Using :append is not
# enough for that, set_user_group is added that way and would end
# up running after us.
SORT_PASSWD_POSTPROCESS_COMMAND ??= " sort_passwd; "
@@ -58,7 +62,7 @@ python () {
}
systemd_create_users () {
- for conffile in ${IMAGE_ROOTFS}/usr/lib/sysusers.d/systemd.conf ${IMAGE_ROOTFS}/usr/lib/sysusers.d/systemd-remote.conf; do
+ for conffile in ${IMAGE_ROOTFS}/usr/lib/sysusers.d/*.conf; do
[ -e $conffile ] || continue
grep -v "^#" $conffile | sed -e '/^$/d' | while read type name id comment; do
if [ "$type" = "u" ]; then
@@ -74,12 +78,8 @@ systemd_create_users () {
eval groupadd --root ${IMAGE_ROOTFS} $groupadd_params || true
elif [ "$type" = "m" ]; then
group=$id
- if [ ! `grep -q "^${group}:" ${IMAGE_ROOTFS}${sysconfdir}/group` ]; then
- eval groupadd --root ${IMAGE_ROOTFS} --system $group
- fi
- if [ ! `grep -q "^${name}:" ${IMAGE_ROOTFS}${sysconfdir}/passwd` ]; then
- eval useradd --root ${IMAGE_ROOTFS} --shell /sbin/nologin --system $name
- fi
+ eval groupadd --root ${IMAGE_ROOTFS} --system $group || true
+ eval useradd --root ${IMAGE_ROOTFS} --shell /sbin/nologin --system $name --no-user-group || true
eval usermod --root ${IMAGE_ROOTFS} -a -G $group $name
fi
done
@@ -95,6 +95,11 @@ read_only_rootfs_hook () {
sed -i -e '/^[#[:space:]]*\/dev\/root/{s/defaults/ro/;s/\([[:space:]]*[[:digit:]]\)\([[:space:]]*\)[[:digit:]]$/\1\20/}' ${IMAGE_ROOTFS}/etc/fstab
fi
+ # Tweak the "mount -o remount,rw /" command in busybox-inittab inittab
+ if [ -f ${IMAGE_ROOTFS}/etc/inittab ]; then
+ sed -i 's|/bin/mount -o remount,rw /|/bin/mount -o remount,ro /|' ${IMAGE_ROOTFS}/etc/inittab
+ fi
+
# If we're using openssh and the /etc/ssh directory has no pre-generated keys,
# we should configure openssh to use the configuration file /etc/ssh/sshd_config_readonly
# and the keys under /var/run/ssh.
@@ -126,6 +131,12 @@ read_only_rootfs_hook () {
${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh
fi
fi
+
+ if ${@bb.utils.contains("DISTRO_FEATURES", "systemd", "true", "false", d)}; then
+ # Create machine-id
+ # 20:12 < mezcalero> koen: you have three options: a) run systemd-machine-id-setup at install time, b) have / read-only and an empty file there (for stateless) and c) boot with / writable
+ touch ${IMAGE_ROOTFS}${sysconfdir}/machine-id
+ fi
}
#
@@ -203,8 +214,8 @@ postinst_enable_logging () {
# Modify systemd default target
#
set_systemd_default_target () {
- if [ -d ${IMAGE_ROOTFS}${sysconfdir}/systemd/system -a -e ${IMAGE_ROOTFS}${systemd_unitdir}/system/${SYSTEMD_DEFAULT_TARGET} ]; then
- ln -sf ${systemd_unitdir}/system/${SYSTEMD_DEFAULT_TARGET} ${IMAGE_ROOTFS}${sysconfdir}/systemd/system/default.target
+ if [ -d ${IMAGE_ROOTFS}${sysconfdir}/systemd/system -a -e ${IMAGE_ROOTFS}${systemd_system_unitdir}/${SYSTEMD_DEFAULT_TARGET} ]; then
+ ln -sf ${systemd_system_unitdir}/${SYSTEMD_DEFAULT_TARGET} ${IMAGE_ROOTFS}${sysconfdir}/systemd/system/default.target
fi
}
@@ -254,7 +265,7 @@ python write_image_manifest () {
with open(manifest_name, 'w+') as image_manifest:
image_manifest.write(format_pkg_list(pkgs, "ver"))
- if os.path.exists(manifest_name):
+ if os.path.exists(manifest_name) and link_name:
manifest_link = deploy_dir + "/" + link_name + ".manifest"
if os.path.lexists(manifest_link):
os.remove(manifest_link)
@@ -297,12 +308,16 @@ rootfs_check_host_user_contaminated () {
HOST_USER_UID="$(PSEUDO_UNLOAD=1 id -u)"
HOST_USER_GID="$(PSEUDO_UNLOAD=1 id -g)"
- find "${IMAGE_ROOTFS}" -wholename "${IMAGE_ROOTFS}/home" -prune \
- -user "$HOST_USER_UID" -o -group "$HOST_USER_GID" >"$contaminated"
+ find "${IMAGE_ROOTFS}" -path "${IMAGE_ROOTFS}/home" -prune -o \
+ -user "$HOST_USER_UID" -print -o -group "$HOST_USER_GID" -print >"$contaminated"
+
+ sed -e "s,${IMAGE_ROOTFS},," $contaminated | while read line; do
+ bbwarn "Path in the rootfs is owned by the same user or group as the user running bitbake:" $line `ls -lan ${IMAGE_ROOTFS}/$line`
+ done
if [ -s "$contaminated" ]; then
- echo "WARNING: Paths in the rootfs are owned by the same user or group as the user running bitbake. See the logfile for the specific paths."
- cat "$contaminated" | sed "s,^, ,"
+ bbwarn "/etc/passwd:" `cat ${IMAGE_ROOTFS}/etc/passwd`
+ bbwarn "/etc/group:" `cat ${IMAGE_ROOTFS}/etc/group`
fi
}
@@ -322,7 +337,7 @@ python write_image_test_data() {
searchString = "%s/"%(d.getVar("TOPDIR")).replace("//","/")
export2json(d, testdata_name, searchString=searchString, replaceString="")
- if os.path.exists(testdata_name):
+ if os.path.exists(testdata_name) and link_name:
testdata_link = os.path.join(deploy_dir, "%s.testdata.json" % link_name)
if os.path.lexists(testdata_link):
os.remove(testdata_link)
@@ -350,7 +365,52 @@ rootfs_reproducible () {
echo $sformatted > ${IMAGE_ROOTFS}/etc/version
bbnote "rootfs_reproducible: set /etc/version to $sformatted"
- find ${IMAGE_ROOTFS}/etc/gconf -name '%gconf.xml' -print0 | xargs -0r \
- sed -i -e 's@\bmtime="[0-9][0-9]*"@mtime="'${REPRODUCIBLE_TIMESTAMP_ROOTFS}'"@g'
+ if [ -d ${IMAGE_ROOTFS}${sysconfdir}/gconf ]; then
+ find ${IMAGE_ROOTFS}${sysconfdir}/gconf -name '%gconf.xml' -print0 | xargs -0r \
+ sed -i -e 's@\bmtime="[0-9][0-9]*"@mtime="'${REPRODUCIBLE_TIMESTAMP_ROOTFS}'"@g'
+ fi
fi
}
+
+# Perform a dumb check for unit existence, not its validity
+python overlayfs_qa_check() {
+ from oe.overlayfs import mountUnitName
+
+ overlayMountPoints = d.getVarFlags("OVERLAYFS_MOUNT_POINT") or {}
+ imagepath = d.getVar("IMAGE_ROOTFS")
+ sysconfdir = d.getVar("sysconfdir")
+ searchpaths = [oe.path.join(imagepath, sysconfdir, "systemd", "system"),
+ oe.path.join(imagepath, d.getVar("systemd_system_unitdir"))]
+ fstabpath = oe.path.join(imagepath, sysconfdir, "fstab")
+
+ if not any(os.path.exists(path) for path in [*searchpaths, fstabpath]):
+ return
+
+ fstabDevices = []
+ if os.path.isfile(fstabpath):
+ with open(fstabpath, 'r') as f:
+ for line in f:
+ if line[0] == '#':
+ continue
+ path = line.split(maxsplit=2)
+ if len(path) > 2:
+ fstabDevices.append(path[1])
+
+ allUnitExist = True;
+ for mountPoint in overlayMountPoints:
+ mountPath = d.getVarFlag('OVERLAYFS_MOUNT_POINT', mountPoint)
+ if mountPath in fstabDevices:
+ continue
+
+ mountUnit = mountUnitName(mountPath)
+ if any(os.path.isfile(oe.path.join(dirpath, mountUnit))
+ for dirpath in searchpaths):
+ continue
+
+ bb.warn('Mount path %s not found in fstat and unit %s not found '
+ 'in systemd unit directories' % (mountPath, mountUnit))
+ allUnitExist = False;
+
+ if not allUnitExist:
+ bb.fatal('Not all mount paths and units are installed in the image')
+}
diff --git a/meta/classes/rootfs_deb.bbclass b/meta/classes/rootfs_deb.bbclass
index 2b93796a76..0469ba7059 100644
--- a/meta/classes/rootfs_deb.bbclass
+++ b/meta/classes/rootfs_deb.bbclass
@@ -7,7 +7,7 @@ ROOTFS_PKGMANAGE = "dpkg apt"
do_rootfs[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot"
do_populate_sdk[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot bzip2-native:do_populate_sysroot"
do_rootfs[recrdeptask] += "do_package_write_deb do_package_qa"
-do_rootfs[vardeps] += "PACKAGE_FEED_URIS"
+do_rootfs[vardeps] += "PACKAGE_FEED_URIS PACKAGE_FEED_BASE_PATHS PACKAGE_FEED_ARCHS"
do_rootfs[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock"
do_populate_sdk[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock"
@@ -32,4 +32,8 @@ python () {
d.setVar('DEB_SDK_ARCH', 'amd64')
elif darch == "arm":
d.setVar('DEB_SDK_ARCH', 'armel')
+ elif darch == "aarch64":
+ d.setVar('DEB_SDK_ARCH', 'arm64')
+ else:
+ bb.fatal("Unhandled SDK_ARCH %s" % darch)
}
diff --git a/meta/classes/rootfs_ipk.bbclass b/meta/classes/rootfs_ipk.bbclass
index aabc370cfc..245c256a6f 100644
--- a/meta/classes/rootfs_ipk.bbclass
+++ b/meta/classes/rootfs_ipk.bbclass
@@ -11,17 +11,17 @@ ROOTFS_PKGMANAGE = "opkg ${EXTRAOPKGCONFIG}"
do_rootfs[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
do_populate_sdk[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
do_rootfs[recrdeptask] += "do_package_write_ipk do_package_qa"
-do_rootfs[vardeps] += "PACKAGE_FEED_URIS"
+do_rootfs[vardeps] += "PACKAGE_FEED_URIS PACKAGE_FEED_BASE_PATHS PACKAGE_FEED_ARCHS"
do_rootfs[lockfiles] += "${WORKDIR}/ipk.lock"
-do_populate_sdk[lockfiles] += "${WORKDIR}/ipk.lock"
-do_populate_sdk_ext[lockfiles] += "${WORKDIR}/ipk.lock"
+do_populate_sdk[lockfiles] += "${WORKDIR}/sdk-ipk.lock"
+do_populate_sdk_ext[lockfiles] += "${WORKDIR}/sdk-ipk.lock"
OPKG_PREPROCESS_COMMANDS = ""
OPKG_POSTPROCESS_COMMANDS = ""
-OPKGLIBDIR = "${localstatedir}/lib"
+OPKGLIBDIR ??= "${localstatedir}/lib"
MULTILIBRE_ALLOW_REP = "${OPKGLIBDIR}/opkg|/usr/lib/opkg"
diff --git a/meta/classes/rootfs_rpm.bbclass b/meta/classes/rootfs_rpm.bbclass
index 51f89ea990..bec4d63ed6 100644
--- a/meta/classes/rootfs_rpm.bbclass
+++ b/meta/classes/rootfs_rpm.bbclass
@@ -4,12 +4,12 @@
ROOTFS_PKGMANAGE = "rpm dnf"
-# dnf is using our custom distutils, and so will fail without these
+# dnf is using our custom sysconfig module, and so will fail without these
export STAGING_INCDIR
export STAGING_LIBDIR
# Add 100Meg of extra space for dnf
-IMAGE_ROOTFS_EXTRA_SPACE_append = "${@bb.utils.contains("PACKAGE_INSTALL", "dnf", " + 102400", "" ,d)}"
+IMAGE_ROOTFS_EXTRA_SPACE:append = "${@bb.utils.contains("PACKAGE_INSTALL", "dnf", " + 102400", "", d)}"
# Dnf is python based, so be sure python3-native is available to us.
EXTRANATIVEPATH += "python3-native"
@@ -24,7 +24,7 @@ do_rootfs[depends] += "${RPMROOTFSDEPENDS}"
do_populate_sdk[depends] += "${RPMROOTFSDEPENDS}"
do_rootfs[recrdeptask] += "do_package_write_rpm do_package_qa"
-do_rootfs[vardeps] += "PACKAGE_FEED_URIS"
+do_rootfs[vardeps] += "PACKAGE_FEED_URIS PACKAGE_FEED_BASE_PATHS PACKAGE_FEED_ARCHS"
python () {
if d.getVar('BUILD_IMAGES_FROM_FEEDS'):
diff --git a/meta/classes/rootfsdebugfiles.bbclass b/meta/classes/rootfsdebugfiles.bbclass
index e2ba4e3647..85c7ec7434 100644
--- a/meta/classes/rootfsdebugfiles.bbclass
+++ b/meta/classes/rootfsdebugfiles.bbclass
@@ -28,7 +28,7 @@
ROOTFS_DEBUG_FILES ?= ""
ROOTFS_DEBUG_FILES[doc] = "Lists additional files or directories to be installed with 'cp -a' in the format 'source1 target1;source2 target2;...'"
-ROOTFS_POSTPROCESS_COMMAND += "rootfs_debug_files ;"
+ROOTFS_POSTPROCESS_COMMAND += "rootfs_debug_files;"
rootfs_debug_files () {
#!/bin/sh -e
echo "${ROOTFS_DEBUG_FILES}" | sed -e 's/;/\n/g' | while read source target mode; do
diff --git a/meta/classes/rust-bin.bbclass b/meta/classes/rust-bin.bbclass
new file mode 100644
index 0000000000..c87343b3cf
--- /dev/null
+++ b/meta/classes/rust-bin.bbclass
@@ -0,0 +1,149 @@
+inherit rust
+
+RDEPENDS:${PN}:append:class-target = " ${RUSTLIB_DEP}"
+
+RUSTC_ARCHFLAGS += "-C opt-level=3 -g -L ${STAGING_DIR_HOST}/${rustlibdir} -C linker=${RUST_TARGET_CCLD}"
+EXTRA_OEMAKE += 'RUSTC_ARCHFLAGS="${RUSTC_ARCHFLAGS}"'
+
+# Some libraries alias with the standard library but libstd is configured to
+# make it difficult or imposisble to use its version. Unfortunately libstd
+# must be explicitly overridden using extern.
+OVERLAP_LIBS = "\
+ libc \
+ log \
+ getopts \
+ rand \
+"
+def get_overlap_deps(d):
+ deps = d.getVar("DEPENDS").split()
+ overlap_deps = []
+ for o in d.getVar("OVERLAP_LIBS").split():
+ l = len([o for dep in deps if (o + '-rs' in dep)])
+ if l > 0:
+ overlap_deps.append(o)
+ return " ".join(overlap_deps)
+OVERLAP_DEPS = "${@get_overlap_deps(d)}"
+
+# Prevents multiple static copies of standard library modules
+# See https://github.com/rust-lang/rust/issues/19680
+RUSTC_PREFER_DYNAMIC = "-C prefer-dynamic"
+RUSTC_FLAGS += "${RUSTC_PREFER_DYNAMIC}"
+
+CRATE_NAME ?= "${@d.getVar('BPN').replace('-rs', '').replace('-', '_')}"
+BINNAME ?= "${BPN}"
+LIBNAME ?= "lib${CRATE_NAME}-rs"
+CRATE_TYPE ?= "dylib"
+BIN_SRC ?= "${S}/src/main.rs"
+LIB_SRC ?= "${S}/src/lib.rs"
+
+rustbindest ?= "${bindir}"
+rustlibdest ?= "${rustlibdir}"
+RUST_RPATH_ABS ?= "${rustlibdir}:${rustlib}"
+
+def relative_rpaths(paths, base):
+ relpaths = set()
+ for p in paths.split(':'):
+ if p == base:
+ relpaths.add('$ORIGIN')
+ continue
+ relpaths.add(os.path.join('$ORIGIN', os.path.relpath(p, base)))
+ return '-rpath=' + ':'.join(relpaths) if len(relpaths) else ''
+
+RUST_LIB_RPATH_FLAGS ?= "${@relative_rpaths(d.getVar('RUST_RPATH_ABS', True), d.getVar('rustlibdest', True))}"
+RUST_BIN_RPATH_FLAGS ?= "${@relative_rpaths(d.getVar('RUST_RPATH_ABS', True), d.getVar('rustbindest', True))}"
+
+def libfilename(d):
+ if d.getVar('CRATE_TYPE', True) == 'dylib':
+ return d.getVar('LIBNAME', True) + '.so'
+ else:
+ return d.getVar('LIBNAME', True) + '.rlib'
+
+def link_args(d, bin):
+ linkargs = []
+ if bin:
+ rpaths = d.getVar('RUST_BIN_RPATH_FLAGS', False)
+ else:
+ rpaths = d.getVar('RUST_LIB_RPATH_FLAGS', False)
+ if d.getVar('CRATE_TYPE', True) == 'dylib':
+ linkargs.append('-soname')
+ linkargs.append(libfilename(d))
+ if len(rpaths):
+ linkargs.append(rpaths)
+ if len(linkargs):
+ return ' '.join(['-Wl,' + arg for arg in linkargs])
+ else:
+ return ''
+
+get_overlap_externs () {
+ externs=
+ for dep in ${OVERLAP_DEPS}; do
+ extern=$(ls ${STAGING_DIR_HOST}/${rustlibdir}/lib$dep-rs.{so,rlib} 2>/dev/null \
+ | awk '{print $1}');
+ if [ -n "$extern" ]; then
+ externs="$externs --extern $dep=$extern"
+ else
+ echo "$dep in depends but no such library found in ${rustlibdir}!" >&2
+ exit 1
+ fi
+ done
+ echo "$externs"
+}
+
+do_configure () {
+}
+
+oe_runrustc () {
+ export RUST_TARGET_PATH="${RUST_TARGET_PATH}"
+ bbnote ${RUSTC} ${RUSTC_ARCHFLAGS} ${RUSTC_FLAGS} "$@"
+ "${RUSTC}" ${RUSTC_ARCHFLAGS} ${RUSTC_FLAGS} "$@"
+}
+
+oe_compile_rust_lib () {
+ rm -rf ${LIBNAME}.{rlib,so}
+ local -a link_args
+ if [ -n '${@link_args(d, False)}' ]; then
+ link_args[0]='-C'
+ link_args[1]='link-args=${@link_args(d, False)}'
+ fi
+ oe_runrustc $(get_overlap_externs) \
+ "${link_args[@]}" \
+ ${LIB_SRC} \
+ -o ${@libfilename(d)} \
+ --crate-name=${CRATE_NAME} --crate-type=${CRATE_TYPE} \
+ "$@"
+}
+oe_compile_rust_lib[vardeps] += "get_overlap_externs"
+
+oe_compile_rust_bin () {
+ rm -rf ${BINNAME}
+ local -a link_args
+ if [ -n '${@link_args(d, True)}' ]; then
+ link_args[0]='-C'
+ link_args[1]='link-args=${@link_args(d, True)}'
+ fi
+ oe_runrustc $(get_overlap_externs) \
+ "${link_args[@]}" \
+ ${BIN_SRC} -o ${BINNAME} "$@"
+}
+oe_compile_rust_bin[vardeps] += "get_overlap_externs"
+
+oe_install_rust_lib () {
+ for lib in $(ls ${LIBNAME}.{so,rlib} 2>/dev/null); do
+ echo Installing $lib
+ install -D -m 755 $lib ${D}/${rustlibdest}/$lib
+ done
+}
+
+oe_install_rust_bin () {
+ echo Installing ${BINNAME}
+ install -D -m 755 ${BINNAME} ${D}/${rustbindest}/${BINNAME}
+}
+
+do_rust_bin_fixups() {
+ for f in `find ${PKGD} -name '*.so*'`; do
+ echo "Strip rust note: $f"
+ ${OBJCOPY} -R .note.rustc $f $f
+ done
+}
+PACKAGE_PREPROCESS_FUNCS += "do_rust_bin_fixups"
+
diff --git a/meta/classes/rust-common.bbclass b/meta/classes/rust-common.bbclass
new file mode 100644
index 0000000000..65ad677499
--- /dev/null
+++ b/meta/classes/rust-common.bbclass
@@ -0,0 +1,185 @@
+inherit python3native
+
+# Common variables used by all Rust builds
+export rustlibdir = "${libdir}/rust"
+FILES:${PN} += "${rustlibdir}/*.so"
+FILES:${PN}-dev += "${rustlibdir}/*.rlib ${rustlibdir}/*.rmeta"
+FILES:${PN}-dbg += "${rustlibdir}/.debug"
+
+RUSTLIB = "-L ${STAGING_LIBDIR}/rust"
+RUST_DEBUG_REMAP = "--remap-path-prefix=${WORKDIR}=/usr/src/debug/${PN}/${EXTENDPE}${PV}-${PR}"
+RUSTFLAGS += "${RUSTLIB} ${RUST_DEBUG_REMAP}"
+RUSTLIB_DEP ?= "libstd-rs"
+export RUST_TARGET_PATH = "${STAGING_LIBDIR_NATIVE}/rustlib"
+RUST_PANIC_STRATEGY ?= "unwind"
+
+# Native builds are not effected by TCLIBC. Without this, rust-native
+# thinks it's "target" (i.e. x86_64-linux) is a musl target.
+RUST_LIBC = "${TCLIBC}"
+RUST_LIBC:class-native = "glibc"
+
+def determine_libc(d, thing):
+ '''Determine which libc something should target'''
+
+ # BUILD is never musl, TARGET may be musl or glibc,
+ # HOST could be musl, but only if a compiler is built to be run on
+ # target in which case HOST_SYS != BUILD_SYS.
+ if thing == 'TARGET':
+ libc = d.getVar('RUST_LIBC')
+ elif thing == 'BUILD' and (d.getVar('HOST_SYS') != d.getVar('BUILD_SYS')):
+ libc = d.getVar('RUST_LIBC')
+ else:
+ libc = d.getVar('RUST_LIBC:class-native')
+
+ return libc
+
+def target_is_armv7(d):
+ '''Determine if target is armv7'''
+ # TUNE_FEATURES may include arm* even if the target is not arm
+ # in the case of *-native packages
+ if d.getVar('TARGET_ARCH') != 'arm':
+ return False
+
+ feat = d.getVar('TUNE_FEATURES')
+ feat = frozenset(feat.split())
+ mach_overrides = d.getVar('MACHINEOVERRIDES')
+ mach_overrides = frozenset(mach_overrides.split(':'))
+
+ v7=frozenset(['armv7a', 'armv7r', 'armv7m', 'armv7ve'])
+ if mach_overrides.isdisjoint(v7) and feat.isdisjoint(v7):
+ return False
+ else:
+ return True
+target_is_armv7[vardepvalue] = "${@target_is_armv7(d)}"
+
+# Responsible for taking Yocto triples and converting it to Rust triples
+def rust_base_triple(d, thing):
+ '''
+ Mangle bitbake's *_SYS into something that rust might support (see
+ rust/mk/cfg/* for a list)
+
+ Note that os is assumed to be some linux form
+ '''
+
+ # The llvm-target for armv7 is armv7-unknown-linux-gnueabihf
+ if thing == "TARGET" and target_is_armv7(d):
+ arch = "armv7"
+ else:
+ arch = oe.rust.arch_to_rust_arch(d.getVar('{}_ARCH'.format(thing)))
+
+ # All the Yocto targets are Linux and are 'unknown'
+ vendor = "-unknown"
+ os = d.getVar('{}_OS'.format(thing))
+ libc = determine_libc(d, thing)
+
+ # Prefix with a dash and convert glibc -> gnu
+ if libc == "glibc":
+ libc = "-gnu"
+ elif libc == "musl":
+ libc = "-musl"
+
+ # Don't double up musl (only appears to be the case on aarch64)
+ if os == "linux-musl":
+ if libc != "-musl":
+ bb.fatal("{}_OS was '{}' but TCLIBC was not 'musl'".format(thing, os))
+ os = "linux"
+
+ # This catches ARM targets and appends the necessary hard float bits
+ if os == "linux-gnueabi" or os == "linux-musleabi":
+ libc = bb.utils.contains('TUNE_FEATURES', 'callconvention-hard', 'hf', '', d)
+ return arch + vendor + '-' + os + libc
+
+
+# In some cases uname and the toolchain differ on their idea of the arch name
+RUST_BUILD_ARCH = "${@oe.rust.arch_to_rust_arch(d.getVar('BUILD_ARCH'))}"
+
+# Naming explanation
+# Yocto
+# - BUILD_SYS - Yocto triple of the build environment
+# - HOST_SYS - What we're building for in Yocto
+# - TARGET_SYS - What we're building for in Yocto
+#
+# So when building '-native' packages BUILD_SYS == HOST_SYS == TARGET_SYS
+# When building packages for the image HOST_SYS == TARGET_SYS
+# This is a gross over simplification as there are other modes but
+# currently this is all that's supported.
+#
+# Rust
+# - TARGET - the system where the binary will run
+# - HOST - the system where the binary is being built
+#
+# Rust additionally will use two additional cases:
+# - undecorated (e.g. CC) - equivalent to TARGET
+# - triple suffix (e.g. CC:x86_64_unknown_linux_gnu) - both
+# see: https://github.com/alexcrichton/gcc-rs
+# The way that Rust's internal triples and Yocto triples are mapped together
+# its likely best to not use the triple suffix due to potential confusion.
+
+RUST_BUILD_SYS = "${@rust_base_triple(d, 'BUILD')}"
+RUST_HOST_SYS = "${@rust_base_triple(d, 'HOST')}"
+RUST_TARGET_SYS = "${@rust_base_triple(d, 'TARGET')}"
+
+# wrappers to get around the fact that Rust needs a single
+# binary but Yocto's compiler and linker commands have
+# arguments. Technically the archiver is always one command but
+# this is necessary for builds that determine the prefix and then
+# use those commands based on the prefix.
+WRAPPER_DIR = "${WORKDIR}/wrapper"
+RUST_BUILD_CC = "${WRAPPER_DIR}/build-rust-cc"
+RUST_BUILD_CXX = "${WRAPPER_DIR}/build-rust-cxx"
+RUST_BUILD_CCLD = "${WRAPPER_DIR}/build-rust-ccld"
+RUST_BUILD_AR = "${WRAPPER_DIR}/build-rust-ar"
+RUST_TARGET_CC = "${WRAPPER_DIR}/target-rust-cc"
+RUST_TARGET_CXX = "${WRAPPER_DIR}/target-rust-cxx"
+RUST_TARGET_CCLD = "${WRAPPER_DIR}/target-rust-ccld"
+RUST_TARGET_AR = "${WRAPPER_DIR}/target-rust-ar"
+
+create_wrapper () {
+ file="$1"
+ shift
+
+ cat <<- EOF > "${file}"
+ #!/usr/bin/env python3
+ import os, sys
+ orig_binary = "$@"
+ binary = orig_binary.split()[0]
+ args = orig_binary.split() + sys.argv[1:]
+ os.execvp(binary, args)
+ EOF
+ chmod +x "${file}"
+}
+
+export WRAPPER_TARGET_CC = "${CC}"
+export WRAPPER_TARGET_CXX = "${CXX}"
+export WRAPPER_TARGET_CCLD = "${CCLD}"
+export WRAPPER_TARGET_LDFLAGS = "${LDFLAGS}"
+export WRAPPER_TARGET_AR = "${AR}"
+
+# compiler is used by gcc-rs
+# linker is used by rustc/cargo
+# archiver is used by the build of libstd-rs
+do_rust_create_wrappers () {
+ mkdir -p "${WRAPPER_DIR}"
+
+ # Yocto Build / Rust Host C compiler
+ create_wrapper "${RUST_BUILD_CC}" "${BUILD_CC}"
+ # Yocto Build / Rust Host C++ compiler
+ create_wrapper "${RUST_BUILD_CXX}" "${BUILD_CXX}"
+ # Yocto Build / Rust Host linker
+ create_wrapper "${RUST_BUILD_CCLD}" "${BUILD_CCLD}" "${BUILD_LDFLAGS}"
+ # Yocto Build / Rust Host archiver
+ create_wrapper "${RUST_BUILD_AR}" "${BUILD_AR}"
+
+ # Yocto Target / Rust Target C compiler
+ create_wrapper "${RUST_TARGET_CC}" "${WRAPPER_TARGET_CC}" "${WRAPPER_TARGET_LDFLAGS}"
+ # Yocto Target / Rust Target C++ compiler
+ create_wrapper "${RUST_TARGET_CXX}" "${WRAPPER_TARGET_CXX}"
+ # Yocto Target / Rust Target linker
+ create_wrapper "${RUST_TARGET_CCLD}" "${WRAPPER_TARGET_CCLD}" "${WRAPPER_TARGET_LDFLAGS}"
+ # Yocto Target / Rust Target archiver
+ create_wrapper "${RUST_TARGET_AR}" "${WRAPPER_TARGET_AR}"
+
+}
+
+addtask rust_create_wrappers before do_configure after do_patch do_prepare_recipe_sysroot
+do_rust_create_wrappers[dirs] += "${WRAPPER_DIR}"
diff --git a/meta/classes/rust.bbclass b/meta/classes/rust.bbclass
new file mode 100644
index 0000000000..5c8938d09f
--- /dev/null
+++ b/meta/classes/rust.bbclass
@@ -0,0 +1,45 @@
+inherit rust-common
+
+RUSTC = "rustc"
+
+RUSTC_ARCHFLAGS += "--target=${HOST_SYS} ${RUSTFLAGS}"
+
+def rust_base_dep(d):
+ # Taken from meta/classes/base.bbclass `base_dep_prepend` and modified to
+ # use rust instead of gcc
+ deps = ""
+ if not d.getVar('INHIBIT_DEFAULT_RUST_DEPS'):
+ if (d.getVar('HOST_SYS') != d.getVar('BUILD_SYS')):
+ deps += " virtual/${TARGET_PREFIX}rust ${RUSTLIB_DEP}"
+ else:
+ deps += " rust-native"
+ return deps
+
+DEPENDS:append = " ${@rust_base_dep(d)}"
+
+# BUILD_LDFLAGS
+# ${STAGING_LIBDIR_NATIVE}
+# ${STAGING_BASE_LIBDIR_NATIVE}
+# BUILDSDK_LDFLAGS
+# ${STAGING_LIBDIR}
+# #{STAGING_DIR_HOST}
+# TARGET_LDFLAGS ?????
+#RUSTC_BUILD_LDFLAGS = "\
+# --sysroot ${STAGING_DIR_NATIVE} \
+# -L${STAGING_LIBDIR_NATIVE} \
+# -L${STAGING_BASE_LIBDIR_NATIVE} \
+#"
+
+# XXX: for some reason bitbake sets BUILD_* & TARGET_* but uses the bare
+# variables for HOST. Alias things to make it easier for us.
+HOST_LDFLAGS ?= "${LDFLAGS}"
+HOST_CFLAGS ?= "${CFLAGS}"
+HOST_CXXFLAGS ?= "${CXXFLAGS}"
+HOST_CPPFLAGS ?= "${CPPFLAGS}"
+
+rustlib_suffix="${TUNE_ARCH}${TARGET_VENDOR}-${TARGET_OS}/rustlib/${HOST_SYS}/lib"
+# Native sysroot standard library path
+rustlib_src="${prefix}/lib/${rustlib_suffix}"
+# Host sysroot standard library path
+rustlib="${libdir}/${rustlib_suffix}"
+rustlib:class-native="${libdir}/rustlib/${BUILD_SYS}/lib"
diff --git a/meta/classes/sanity.bbclass b/meta/classes/sanity.bbclass
index 374dacf4d0..92807dc88e 100644
--- a/meta/classes/sanity.bbclass
+++ b/meta/classes/sanity.bbclass
@@ -2,7 +2,7 @@
# Sanity check the users setup for common misconfigurations
#
-SANITY_REQUIRED_UTILITIES ?= "patch diffstat makeinfo git bzip2 tar \
+SANITY_REQUIRED_UTILITIES ?= "patch diffstat git bzip2 tar \
gzip gawk chrpath wget cpio perl file which"
def bblayers_conf_file(d):
@@ -185,37 +185,6 @@ def raise_sanity_error(msg, d, network_error=False):
%s""" % msg)
-# Check flags associated with a tuning.
-def check_toolchain_tune_args(data, tune, multilib, errs):
- found_errors = False
- if check_toolchain_args_present(data, tune, multilib, errs, 'CCARGS'):
- found_errors = True
- if check_toolchain_args_present(data, tune, multilib, errs, 'ASARGS'):
- found_errors = True
- if check_toolchain_args_present(data, tune, multilib, errs, 'LDARGS'):
- found_errors = True
-
- return found_errors
-
-def check_toolchain_args_present(data, tune, multilib, tune_errors, which):
- args_set = (data.getVar("TUNE_%s" % which) or "").split()
- args_wanted = (data.getVar("TUNEABI_REQUIRED_%s_tune-%s" % (which, tune)) or "").split()
- args_missing = []
-
- # If no args are listed/required, we are done.
- if not args_wanted:
- return
- for arg in args_wanted:
- if arg not in args_set:
- args_missing.append(arg)
-
- found_errors = False
- if args_missing:
- found_errors = True
- tune_errors.append("TUNEABI for %s requires '%s' in TUNE_%s (%s)." %
- (tune, ' '.join(args_missing), which, ' '.join(args_set)))
- return found_errors
-
# Check a single tune for validity.
def check_toolchain_tune(data, tune, multilib):
tune_errors = []
@@ -227,7 +196,7 @@ def check_toolchain_tune(data, tune, multilib):
overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + multilib
localdata.setVar("OVERRIDES", overrides)
bb.debug(2, "Sanity-checking tuning '%s' (%s) features:" % (tune, multilib))
- features = (localdata.getVar("TUNE_FEATURES_tune-%s" % tune) or "").split()
+ features = (localdata.getVar("TUNE_FEATURES:tune-%s" % tune) or "").split()
if not features:
return "Tuning '%s' has no defined features, and cannot be used." % tune
valid_tunes = localdata.getVarFlags('TUNEVALID') or {}
@@ -247,17 +216,6 @@ def check_toolchain_tune(data, tune, multilib):
bb.debug(2, " %s: %s" % (feature, valid_tunes[feature]))
else:
tune_errors.append("Feature '%s' is not defined." % feature)
- whitelist = localdata.getVar("TUNEABI_WHITELIST")
- if whitelist:
- tuneabi = localdata.getVar("TUNEABI_tune-%s" % tune)
- if not tuneabi:
- tuneabi = tune
- if True not in [x in whitelist.split() for x in tuneabi.split()]:
- tune_errors.append("Tuning '%s' (%s) cannot be used with any supported tuning/ABI." %
- (tune, tuneabi))
- else:
- if not check_toolchain_tune_args(localdata, tuneabi, multilib, tune_errors):
- bb.debug(2, "Sanity check: Compiler args OK for %s." % tune)
if tune_errors:
return "Tuning '%s' has the following errors:\n" % tune + '\n'.join(tune_errors)
@@ -281,7 +239,7 @@ def check_toolchain(data):
seen_libs.append(lib)
if not lib in global_multilibs:
tune_error_set.append("Multilib %s is not present in MULTILIB_GLOBAL_VARIANTS" % lib)
- tune = data.getVar("DEFAULTTUNE_virtclass-multilib-%s" % lib)
+ tune = data.getVar("DEFAULTTUNE:virtclass-multilib-%s" % lib)
if tune in seen_tunes:
tune_error_set.append("The tuning '%s' appears in more than one multilib." % tune)
else:
@@ -338,7 +296,7 @@ def check_path_length(filepath, pathname, limit):
def get_filesystem_id(path):
import subprocess
try:
- return subprocess.check_output(["stat", "-f", "-c", "%t", path]).decode('utf-8')
+ return subprocess.check_output(["stat", "-f", "-c", "%t", path]).decode('utf-8').strip()
except subprocess.CalledProcessError:
bb.warn("Can't get filesystem id of: %s" % path)
return None
@@ -392,9 +350,12 @@ def check_connectivity(d):
msg = data.getVar('CONNECTIVITY_CHECK_MSG') or ""
if len(msg) == 0:
msg = "%s.\n" % err
- msg += " Please ensure your host's network is configured correctly,\n"
- msg += " or set BB_NO_NETWORK = \"1\" to disable network access if\n"
- msg += " all required sources are on local disk.\n"
+ msg += " Please ensure your host's network is configured correctly.\n"
+ msg += " If your ISP or network is blocking the above URL,\n"
+ msg += " try with another domain name, for example by setting:\n"
+ msg += " CONNECTIVITY_CHECK_URIS = \"https://www.example.com/\""
+ msg += " You could also set BB_NO_NETWORK = \"1\" to disable network\n"
+ msg += " access if all required sources are on local disk.\n"
retval = msg
return retval
@@ -459,13 +420,12 @@ def check_sanity_validmachine(sanity_data):
# Patch before 2.7 can't handle all the features in git-style diffs. Some
# patches may incorrectly apply, and others won't apply at all.
def check_patch_version(sanity_data):
- from distutils.version import LooseVersion
import re, subprocess
try:
result = subprocess.check_output(["patch", "--version"], stderr=subprocess.STDOUT).decode('utf-8')
version = re.search(r"[0-9.]+", result.splitlines()[0]).group()
- if LooseVersion(version) < LooseVersion("2.7"):
+ if bb.utils.vercmp_string_op(version, "2.7", "<"):
return "Your version of patch is older than 2.7 and has bugs which will break builds. Please install a newer version of patch.\n"
else:
return None
@@ -475,7 +435,6 @@ def check_patch_version(sanity_data):
# Unpatched versions of make 3.82 are known to be broken. See GNU Savannah Bug 30612.
# Use a modified reproducer from http://savannah.gnu.org/bugs/?30612 to validate.
def check_make_version(sanity_data):
- from distutils.version import LooseVersion
import subprocess
try:
@@ -483,7 +442,7 @@ def check_make_version(sanity_data):
except subprocess.CalledProcessError as e:
return "Unable to execute make --version, exit code %d\n%s\n" % (e.returncode, e.output)
version = result.split()[2]
- if LooseVersion(version) == LooseVersion("3.82"):
+ if bb.utils.vercmp_string_op(version, "3.82", "=="):
# Construct a test file
f = open("makefile_test", "w")
f.write("makefile_test.a: makefile_test_a.c makefile_test_b.c makefile_test.a( makefile_test_a.c makefile_test_b.c)\n")
@@ -511,42 +470,64 @@ def check_make_version(sanity_data):
return None
-# Check if we're running on WSL (Windows Subsystem for Linux). Its known not to
-# work but we should tell the user that upfront.
+# Check if we're running on WSL (Windows Subsystem for Linux).
+# WSLv1 is known not to work but WSLv2 should work properly as
+# long as the VHDX file is optimized often, let the user know
+# upfront.
+# More information on installing WSLv2 at:
+# https://docs.microsoft.com/en-us/windows/wsl/wsl2-install
def check_wsl(d):
with open("/proc/version", "r") as f:
verdata = f.readlines()
for l in verdata:
if "Microsoft" in l:
- return "OpenEmbedded doesn't work under WSL at this time, sorry"
+ return "OpenEmbedded doesn't work under WSLv1, please upgrade to WSLv2 if you want to run builds on Windows"
+ elif "microsoft" in l:
+ bb.warn("You are running bitbake under WSLv2, this works properly but you should optimize your VHDX file eventually to avoid running out of storage space")
+ return None
+
+# Require at least gcc version 7.5.
+#
+# This can be fixed on CentOS-7 with devtoolset-6+
+# https://www.softwarecollections.org/en/scls/rhscl/devtoolset-6/
+#
+# A less invasive fix is with scripts/install-buildtools (or with user
+# built buildtools-extended-tarball)
+#
+def check_gcc_version(sanity_data):
+ import subprocess
+
+ build_cc, version = oe.utils.get_host_compiler_version(sanity_data)
+ if build_cc.strip() == "gcc":
+ if bb.utils.vercmp_string_op(version, "7.5", "<"):
+ return "Your version of gcc is older than 7.5 and will break builds. Please install a newer version of gcc (you could use the project's buildtools-extended-tarball or use scripts/install-buildtools).\n"
return None
# Tar version 1.24 and onwards handle overwriting symlinks correctly
# but earlier versions do not; this needs to work properly for sstate
+# Version 1.28 is needed so opkg-build works correctly when reproducibile builds are enabled
def check_tar_version(sanity_data):
- from distutils.version import LooseVersion
import subprocess
try:
result = subprocess.check_output(["tar", "--version"], stderr=subprocess.STDOUT).decode('utf-8')
except subprocess.CalledProcessError as e:
return "Unable to execute tar --version, exit code %d\n%s\n" % (e.returncode, e.output)
version = result.split()[3]
- if LooseVersion(version) < LooseVersion("1.24"):
- return "Your version of tar is older than 1.24 and has bugs which will break builds. Please install a newer version of tar.\n"
+ if bb.utils.vercmp_string_op(version, "1.28", "<"):
+ return "Your version of tar is older than 1.28 and does not have the support needed to enable reproducible builds. Please install a newer version of tar (you could use the project's buildtools-tarball from our last release or use scripts/install-buildtools).\n"
return None
# We use git parameters and functionality only found in 1.7.8 or later
# The kernel tools assume git >= 1.8.3.1 (verified needed > 1.7.9.5) see #6162
# The git fetcher also had workarounds for git < 1.7.9.2 which we've dropped
def check_git_version(sanity_data):
- from distutils.version import LooseVersion
import subprocess
try:
result = subprocess.check_output(["git", "--version"], stderr=subprocess.DEVNULL).decode('utf-8')
except subprocess.CalledProcessError as e:
return "Unable to execute git --version, exit code %d\n%s\n" % (e.returncode, e.output)
version = result.split()[2]
- if LooseVersion(version) < LooseVersion("1.8.3.1"):
+ if bb.utils.vercmp_string_op(version, "1.8.3.1", "<"):
return "Your version of git is older than 1.8.3.1 and has bugs which will break builds. Please install a newer version of git.\n"
return None
@@ -560,7 +541,7 @@ def check_perl_modules(sanity_data):
try:
subprocess.check_output(["perl", "-e", "use %s" % m])
except subprocess.CalledProcessError as e:
- errresult += e.output
+ errresult += bytes.decode(e.output)
ret += "%s " % m
if ret:
return "Required perl module(s) not found: %s\n\n%s\n" % (ret, errresult)
@@ -573,11 +554,29 @@ def sanity_check_conffiles(d):
if check_conf_exists(conffile, d) and d.getVar(current_version) is not None and \
d.getVar(current_version) != d.getVar(required_version):
try:
- bb.build.exec_func(func, d, pythonexception=True)
+ bb.build.exec_func(func, d)
except NotImplementedError as e:
bb.fatal(str(e))
d.setVar("BB_INVALIDCONF", True)
+def drop_v14_cross_builds(d):
+ import glob
+ indexes = glob.glob(d.expand("${SSTATE_MANIFESTS}/index-${BUILD_ARCH}_*"))
+ for i in indexes:
+ with open(i, "r") as f:
+ lines = f.readlines()
+ for l in reversed(lines):
+ try:
+ (stamp, manifest, workdir) = l.split()
+ except ValueError:
+ bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
+ for m in glob.glob(manifest + ".*"):
+ if m.endswith(".postrm"):
+ continue
+ sstate_clean_manifest(m, d)
+ bb.utils.remove(stamp + "*")
+ bb.utils.remove(workdir, recurse = True)
+
def sanity_handle_abichanges(status, d):
#
# Check the 'ABI' of TMPDIR
@@ -594,6 +593,12 @@ def sanity_handle_abichanges(status, d):
f.write(current_abi)
elif int(abi) <= 11 and current_abi == "12":
status.addresult("The layout of TMPDIR changed for Recipe Specific Sysroots.\nConversion doesn't make sense and this change will rebuild everything so please delete TMPDIR (%s).\n" % d.getVar("TMPDIR"))
+ elif int(abi) <= 13 and current_abi == "14":
+ status.addresult("TMPDIR changed to include path filtering from the pseudo database.\nIt is recommended to use a clean TMPDIR with the new pseudo path filtering so TMPDIR (%s) would need to be removed to continue.\n" % d.getVar("TMPDIR"))
+ elif int(abi) == 14 and current_abi == "15":
+ drop_v14_cross_builds(d)
+ with open(abifile, "w") as f:
+ f.write(current_abi)
elif (abi != current_abi):
# Code to convert from one ABI to another could go here if possible.
status.addresult("Error, TMPDIR has changed its layout version number (%s to %s) and you need to either rebuild, revert or adjust it at your own risk.\n" % (abi, current_abi))
@@ -622,14 +627,16 @@ def check_sanity_version_change(status, d):
# In other words, these tests run once in a given build directory and then
# never again until the sanity version or host distrubution id/version changes.
- # Check the python install is complete. glib-2.0-natives requries
- # xml.parsers.expat
+ # Check the python install is complete. Examples that are often removed in
+ # minimal installations: glib-2.0-natives requries # xml.parsers.expat and icu
+ # requires distutils.sysconfig.
try:
import xml.parsers.expat
- except ImportError:
- status.addresult('Your python is not a full install. Please install the module xml.parsers.expat (python-xml on openSUSE and SUSE Linux).\n')
- import stat
+ import distutils.sysconfig
+ except ImportError as e:
+ status.addresult('Your Python 3 is not a full install. Please install the module %s (see the Getting Started guide for further information).\n' % e.name)
+ status.addresult(check_gcc_version(d))
status.addresult(check_make_version(d))
status.addresult(check_patch_version(d))
status.addresult(check_tar_version(d))
@@ -664,6 +671,7 @@ def check_sanity_version_change(status, d):
status.addresult('Please use ASSUME_PROVIDED +=, not ASSUME_PROVIDED = in your local.conf\n')
# Check that TMPDIR isn't on a filesystem with limited filename length (eg. eCryptFS)
+ import stat
tmpdir = d.getVar('TMPDIR')
status.addresult(check_create_long_filename(tmpdir, "TMPDIR"))
tmpdirmode = os.stat(tmpdir).st_mode
@@ -672,6 +680,23 @@ def check_sanity_version_change(status, d):
if (tmpdirmode & stat.S_ISUID):
status.addresult("TMPDIR is setuid, please don't build in a setuid directory")
+ # Check that a user isn't building in a path in PSEUDO_IGNORE_PATHS
+ pseudoignorepaths = d.getVar('PSEUDO_IGNORE_PATHS', expand=True).split(",")
+ workdir = d.getVar('WORKDIR', expand=True)
+ for i in pseudoignorepaths:
+ if i and workdir.startswith(i):
+ status.addresult("You are building in a path included in PSEUDO_IGNORE_PATHS " + str(i) + " please locate the build outside this path.\n")
+
+ # Check if PSEUDO_IGNORE_PATHS and and paths under pseudo control overlap
+ pseudoignorepaths = d.getVar('PSEUDO_IGNORE_PATHS', expand=True).split(",")
+ pseudo_control_dir = "${D},${PKGD},${PKGDEST},${IMAGEROOTFS},${SDK_OUTPUT}"
+ pseudocontroldir = d.expand(pseudo_control_dir).split(",")
+ for i in pseudoignorepaths:
+ for j in pseudocontroldir:
+ if i and j:
+ if j.startswith(i):
+ status.addresult("A path included in PSEUDO_IGNORE_PATHS " + str(i) + " and the path " + str(j) + " overlap and this will break pseudo permission and ownership tracking. Please set the path " + str(j) + " to a different directory which does not overlap with pseudo controlled directories. \n")
+
# Some third-party software apparently relies on chmod etc. being suid root (!!)
import stat
suid_check_bins = "chown chmod mknod".split()
@@ -739,15 +764,14 @@ def check_sanity_everybuild(status, d):
if 0 == os.getuid():
raise_sanity_error("Do not use Bitbake as root.", d)
- # Check the Python version, we now have a minimum of Python 3.4
+ # Check the Python version, we now have a minimum of Python 3.6
import sys
- if sys.hexversion < 0x03040000:
- status.addresult('The system requires at least Python 3.4 to run. Please update your Python interpreter.\n')
+ if sys.hexversion < 0x030600F0:
+ status.addresult('The system requires at least Python 3.6 to run. Please update your Python interpreter.\n')
# Check the bitbake version meets minimum requirements
- from distutils.version import LooseVersion
minversion = d.getVar('BB_MIN_VERSION')
- if (LooseVersion(bb.__version__) < LooseVersion(minversion)):
+ if bb.utils.vercmp_string_op(bb.__version__, minversion, "<"):
status.addresult('Bitbake version %s is required and version %s was found\n' % (minversion, bb.__version__))
sanity_check_locale(d)
@@ -756,6 +780,17 @@ def check_sanity_everybuild(status, d):
if "." in paths or "./" in paths or "" in paths:
status.addresult("PATH contains '.', './' or '' (empty element), which will break the build, please remove this.\nParsed PATH is " + str(paths) + "\n")
+ #Check if bitbake is present in PATH environment variable
+ bb_check = bb.utils.which(d.getVar('PATH'), 'bitbake')
+ if not bb_check:
+ bb.warn("bitbake binary is not found in PATH, did you source the script?")
+
+ # Check whether 'inherit' directive is found (used for a class to inherit)
+ # in conf file it's supposed to be uppercase INHERIT
+ inherit = d.getVar('inherit')
+ if inherit:
+ status.addresult("Please don't use inherit directive in your local.conf. The directive is supposed to be used in classes and recipes only to inherit of bbclasses. Here INHERIT should be used.\n")
+
# Check that the DISTRO is valid, if set
# need to take into account DISTRO renaming DISTRO
distro = d.getVar('DISTRO')
@@ -798,6 +833,11 @@ def check_sanity_everybuild(status, d):
elif d.getVar('SDK_ARCH', False) == "${BUILD_ARCH}":
status.addresult('SDKMACHINE is set, but SDK_ARCH has not been changed as a result - SDKMACHINE may have been set too late (e.g. in the distro configuration)\n')
+ # If SDK_VENDOR looks like "-my-sdk" then the triples are badly formed so fail early
+ sdkvendor = d.getVar("SDK_VENDOR")
+ if not (sdkvendor.startswith("-") and sdkvendor.count("-") == 1):
+ status.addresult("SDK_VENDOR should be of the form '-foosdk' with a single dash; found '%s'\n" % sdkvendor)
+
check_supported_distro(d)
omask = os.umask(0o022)
@@ -818,20 +858,25 @@ def check_sanity_everybuild(status, d):
except:
pass
- oeroot = d.getVar('COREBASE')
- if oeroot.find('+') != -1:
- status.addresult("Error, you have an invalid character (+) in your COREBASE directory path. Please move the installation to a directory which doesn't include any + characters.")
- if oeroot.find('@') != -1:
- status.addresult("Error, you have an invalid character (@) in your COREBASE directory path. Please move the installation to a directory which doesn't include any @ characters.")
- if oeroot.find(' ') != -1:
- status.addresult("Error, you have a space in your COREBASE directory path. Please move the installation to a directory which doesn't include a space since autotools doesn't support this.")
+ for checkdir in ['COREBASE', 'TMPDIR']:
+ val = d.getVar(checkdir)
+ if val.find('..') != -1:
+ status.addresult("Error, you have '..' in your %s directory path. Please ensure the variable contains an absolute path as this can break some recipe builds in obtuse ways." % checkdir)
+ if val.find('+') != -1:
+ status.addresult("Error, you have an invalid character (+) in your %s directory path. Please move the installation to a directory which doesn't include any + characters." % checkdir)
+ if val.find('@') != -1:
+ status.addresult("Error, you have an invalid character (@) in your %s directory path. Please move the installation to a directory which doesn't include any @ characters." % checkdir)
+ if val.find(' ') != -1:
+ status.addresult("Error, you have a space in your %s directory path. Please move the installation to a directory which doesn't include a space since autotools doesn't support this." % checkdir)
+ if val.find('%') != -1:
+ status.addresult("Error, you have an invalid character (%) in your %s directory path which causes problems with python string formatting. Please move the installation to a directory which doesn't include any % characters." % checkdir)
# Check the format of MIRRORS, PREMIRRORS and SSTATE_MIRRORS
import re
mirror_vars = ['MIRRORS', 'PREMIRRORS', 'SSTATE_MIRRORS']
protocols = ['http', 'ftp', 'file', 'https', \
'git', 'gitsm', 'hg', 'osc', 'p4', 'svn', \
- 'bzr', 'cvs', 'npm', 'sftp', 'ssh', 's3' ]
+ 'bzr', 'cvs', 'npm', 'sftp', 'ssh', 's3', 'az' ]
for mirror_var in mirror_vars:
mirrors = (d.getVar(mirror_var) or '').replace('\\n', ' ').split()
@@ -876,7 +921,7 @@ def check_sanity_everybuild(status, d):
with open(checkfile, "r") as f:
saved_tmpdir = f.read().strip()
if (saved_tmpdir != tmpdir):
- status.addresult("Error, TMPDIR has changed location. You need to either move it back to %s or rebuild\n" % saved_tmpdir)
+ status.addresult("Error, TMPDIR has changed location. You need to either move it back to %s or delete it and rebuild\n" % saved_tmpdir)
else:
bb.utils.mkdirhier(tmpdir)
# Remove setuid, setgid and sticky bits from TMPDIR
@@ -919,7 +964,7 @@ def check_sanity(sanity_data):
last_tmpdir = ""
last_sstate_dir = ""
last_nativelsbstr = ""
- sanityverfile = sanity_data.expand("${TOPDIR}/conf/sanity_info")
+ sanityverfile = sanity_data.expand("${TOPDIR}/cache/sanity_info")
if os.path.exists(sanityverfile):
with open(sanityverfile, 'r') as f:
for line in f:
diff --git a/meta/classes/scons.bbclass b/meta/classes/scons.bbclass
index 9ee7d1587d..80f8382107 100644
--- a/meta/classes/scons.bbclass
+++ b/meta/classes/scons.bbclass
@@ -1,11 +1,13 @@
-DEPENDS += "python-scons-native"
+inherit python3native
+
+DEPENDS += "python3-scons-native"
EXTRA_OESCONS ?= ""
do_configure() {
- if [ -n "${CONFIGURESTAMPFILE}" ]; then
+ if [ -n "${CONFIGURESTAMPFILE}" -a "${S}" = "${B}" ]; then
if [ -e "${CONFIGURESTAMPFILE}" -a "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" -a "${CLEANBROKEN}" != "1" ]; then
- ${STAGING_BINDIR_NATIVE}/scons --clean PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS}
+ ${STAGING_BINDIR_NATIVE}/scons --directory=${S} --clean PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS}
fi
mkdir -p `dirname ${CONFIGURESTAMPFILE}`
@@ -14,12 +16,12 @@ do_configure() {
}
scons_do_compile() {
- ${STAGING_BINDIR_NATIVE}/scons ${PARALLEL_MAKE} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} || \
+ ${STAGING_BINDIR_NATIVE}/scons --directory=${S} ${PARALLEL_MAKE} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} || \
die "scons build execution failed."
}
scons_do_install() {
- ${STAGING_BINDIR_NATIVE}/scons install_root=${D}${prefix} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} install || \
+ ${STAGING_BINDIR_NATIVE}/scons --directory=${S} install_root=${D}${prefix} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} install || \
die "scons install execution failed."
}
diff --git a/meta/classes/setuptools.bbclass b/meta/classes/setuptools.bbclass
deleted file mode 100644
index a923ea3c4a..0000000000
--- a/meta/classes/setuptools.bbclass
+++ /dev/null
@@ -1,3 +0,0 @@
-inherit distutils
-
-DEPENDS += "python-setuptools-native"
diff --git a/meta/classes/distutils-common-base.bbclass b/meta/classes/setuptools3-base.bbclass
index 94b5fd426d..15abe1dd63 100644
--- a/meta/classes/distutils-common-base.bbclass
+++ b/meta/classes/setuptools3-base.bbclass
@@ -1,3 +1,7 @@
+DEPENDS:append:class-target = " ${PYTHON_PN}-native ${PYTHON_PN}"
+DEPENDS:append:class-nativesdk = " ${PYTHON_PN}-native ${PYTHON_PN}"
+RDEPENDS:${PN}:append:class-target = " ${PYTHON_PN}-core"
+
export STAGING_INCDIR
export STAGING_LIBDIR
@@ -11,15 +15,17 @@ export LDCXXSHARED = "${CXX} -shared"
export CCSHARED = "-fPIC -DPIC"
# LINKFORSHARED are the flags passed to the $(CC) command that links
# the python executable
-export LINKFORSHARED = "{SECURITY_CFLAGS} -Xlinker -export-dynamic"
+export LINKFORSHARED = "${SECURITY_CFLAGS} -Xlinker -export-dynamic"
-FILES_${PN} += "${libdir}/* ${libdir}/${PYTHON_DIR}/*"
+FILES:${PN} += "${libdir}/* ${libdir}/${PYTHON_DIR}/*"
-FILES_${PN}-staticdev += "\
+FILES:${PN}-staticdev += "\
${PYTHON_SITEPACKAGES_DIR}/*.a \
"
-FILES_${PN}-dev += "\
+FILES:${PN}-dev += "\
${datadir}/pkgconfig \
${libdir}/pkgconfig \
${PYTHON_SITEPACKAGES_DIR}/*.la \
"
+inherit python3native python3targetconfig
+
diff --git a/meta/classes/setuptools3.bbclass b/meta/classes/setuptools3.bbclass
index 8ca66ee708..556bc801af 100644
--- a/meta/classes/setuptools3.bbclass
+++ b/meta/classes/setuptools3.bbclass
@@ -1,4 +1,33 @@
-inherit distutils3
+inherit setuptools3-base python_pep517
-DEPENDS += "python3-setuptools-native"
+# bdist_wheel builds in ./dist
+#B = "${WORKDIR}/build"
+SETUPTOOLS_BUILD_ARGS ?= ""
+
+SETUPTOOLS_SETUP_PATH ?= "${S}"
+
+setuptools3_do_configure() {
+ :
+}
+
+setuptools3_do_compile() {
+ cd ${SETUPTOOLS_SETUP_PATH}
+ NO_FETCH_BUILD=1 \
+ STAGING_INCDIR=${STAGING_INCDIR} \
+ STAGING_LIBDIR=${STAGING_LIBDIR} \
+ ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py \
+ bdist_wheel --verbose --dist-dir ${PEP517_WHEEL_PATH} ${SETUPTOOLS_BUILD_ARGS} || \
+ bbfatal_log "'${PYTHON_PN} setup.py bdist_wheel ${SETUPTOOLS_BUILD_ARGS}' execution failed."
+}
+setuptools3_do_compile[vardepsexclude] = "MACHINE"
+do_compile[cleandirs] += "${PEP517_WHEEL_PATH}"
+
+setuptools3_do_install() {
+ python_pep517_do_install
+}
+
+EXPORT_FUNCTIONS do_configure do_compile do_install
+
+export LDSHARED="${CCLD} -shared"
+DEPENDS += "python3-setuptools-native python3-wheel-native"
diff --git a/meta/classes/setuptools3_legacy.bbclass b/meta/classes/setuptools3_legacy.bbclass
new file mode 100644
index 0000000000..5a99daadb5
--- /dev/null
+++ b/meta/classes/setuptools3_legacy.bbclass
@@ -0,0 +1,78 @@
+# This class is for packages which use the deprecated setuptools behaviour,
+# specifically custom install tasks which don't work correctly with bdist_wheel.
+# This behaviour is deprecated in setuptools[1] and won't work in the future, so
+# all users of this should consider their options: pure Python modules can use a
+# modern Python tool such as build[2], or packages which are doing more (such as
+# installing init scripts) should use a fully-featured build system such as Meson.
+#
+# [1] https://setuptools.pypa.io/en/latest/history.html#id142
+# [2] https://pypi.org/project/build/
+
+inherit setuptools3-base
+
+B = "${WORKDIR}/build"
+
+SETUPTOOLS_BUILD_ARGS ?= ""
+SETUPTOOLS_INSTALL_ARGS ?= "--root=${D} \
+ --prefix=${prefix} \
+ --install-lib=${PYTHON_SITEPACKAGES_DIR} \
+ --install-data=${datadir}"
+
+SETUPTOOLS_PYTHON = "python3"
+SETUPTOOLS_PYTHON:class-native = "nativepython3"
+
+SETUPTOOLS_SETUP_PATH ?= "${S}"
+
+setuptools3_legacy_do_configure() {
+ :
+}
+
+setuptools3_legacy_do_compile() {
+ cd ${SETUPTOOLS_SETUP_PATH}
+ NO_FETCH_BUILD=1 \
+ STAGING_INCDIR=${STAGING_INCDIR} \
+ STAGING_LIBDIR=${STAGING_LIBDIR} \
+ ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py \
+ build --build-base=${B} ${SETUPTOOLS_BUILD_ARGS} || \
+ bbfatal_log "'${PYTHON_PN} setup.py build ${SETUPTOOLS_BUILD_ARGS}' execution failed."
+}
+setuptools3_legacy_do_compile[vardepsexclude] = "MACHINE"
+
+setuptools3_legacy_do_install() {
+ cd ${SETUPTOOLS_SETUP_PATH}
+ install -d ${D}${PYTHON_SITEPACKAGES_DIR}
+ STAGING_INCDIR=${STAGING_INCDIR} \
+ STAGING_LIBDIR=${STAGING_LIBDIR} \
+ PYTHONPATH=${D}${PYTHON_SITEPACKAGES_DIR} \
+ ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py \
+ build --build-base=${B} install --skip-build ${SETUPTOOLS_INSTALL_ARGS} || \
+ bbfatal_log "'${PYTHON_PN} setup.py install ${SETUPTOOLS_INSTALL_ARGS}' execution failed."
+
+ # support filenames with *spaces*
+ find ${D} -name "*.py" -exec grep -q ${D} {} \; \
+ -exec sed -i -e s:${D}::g {} \;
+
+ for i in ${D}${bindir}/* ${D}${sbindir}/*; do
+ if [ -f "$i" ]; then
+ sed -i -e s:${PYTHON}:${USRBINPATH}/env\ ${SETUPTOOLS_PYTHON}:g $i
+ sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
+ fi
+ done
+
+ rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/easy-install.pth
+
+ #
+ # FIXME: Bandaid against wrong datadir computation
+ #
+ if [ -e ${D}${datadir}/share ]; then
+ mv -f ${D}${datadir}/share/* ${D}${datadir}/
+ rmdir ${D}${datadir}/share
+ fi
+}
+setuptools3_legacy_do_install[vardepsexclude] = "MACHINE"
+
+EXPORT_FUNCTIONS do_configure do_compile do_install
+
+export LDSHARED="${CCLD} -shared"
+DEPENDS += "python3-setuptools-native"
+
diff --git a/meta/classes/setuptools_build_meta.bbclass b/meta/classes/setuptools_build_meta.bbclass
new file mode 100644
index 0000000000..b2bba35a0b
--- /dev/null
+++ b/meta/classes/setuptools_build_meta.bbclass
@@ -0,0 +1,5 @@
+inherit setuptools3-base python_pep517
+
+DEPENDS += "python3-setuptools-native python3-wheel-native"
+
+PEP517_BUILD_API = "setuptools.build_meta"
diff --git a/meta/classes/sign_package_feed.bbclass b/meta/classes/sign_package_feed.bbclass
index 7ff3a35a2f..16bcd147aa 100644
--- a/meta/classes/sign_package_feed.bbclass
+++ b/meta/classes/sign_package_feed.bbclass
@@ -29,7 +29,7 @@ PACKAGE_FEED_GPG_BACKEND ?= 'local'
PACKAGE_FEED_GPG_SIGNATURE_TYPE ?= 'ASC'
# Make feed signing key to be present in rootfs
-FEATURE_PACKAGES_package-management_append = " signing-keys-packagefeed"
+FEATURE_PACKAGES_package-management:append = " signing-keys-packagefeed"
python () {
# Check sanity of configuration
diff --git a/meta/classes/sign_rpm.bbclass b/meta/classes/sign_rpm.bbclass
index 64ae7ce30e..73a55a512d 100644
--- a/meta/classes/sign_rpm.bbclass
+++ b/meta/classes/sign_rpm.bbclass
@@ -64,6 +64,7 @@ python sign_rpm () {
d.getVar('RPM_FSK_PATH'),
d.getVar('RPM_FSK_PASSWORD'))
}
+sign_rpm[vardepsexclude] += "RPM_GPG_SIGN_CHUNK"
do_package_index[depends] += "signing-keys:do_deploy"
do_rootfs[depends] += "signing-keys:do_populate_sysroot"
diff --git a/meta/classes/siteinfo.bbclass b/meta/classes/siteinfo.bbclass
index 411e70478e..3555d5a663 100644
--- a/meta/classes/siteinfo.bbclass
+++ b/meta/classes/siteinfo.bbclass
@@ -35,7 +35,6 @@ def siteinfo_data_for_machine(arch, os, d):
"lm32": "endian-big bit-32",
"m68k": "endian-big bit-32",
"microblaze": "endian-big bit-32 microblaze-common",
- "microblazeeb": "endian-big bit-32 microblaze-common",
"microblazeel": "endian-little bit-32 microblaze-common",
"mips": "endian-big bit-32 mips-common",
"mips64": "endian-big bit-64 mips-common",
@@ -46,15 +45,19 @@ def siteinfo_data_for_machine(arch, os, d):
"mipsisa32r6": "endian-big bit-32 mips-common",
"mipsisa32r6el": "endian-little bit-32 mips-common",
"powerpc": "endian-big bit-32 powerpc-common",
+ "powerpcle": "endian-little bit-32 powerpc-common",
"nios2": "endian-little bit-32 nios2-common",
"powerpc64": "endian-big bit-64 powerpc-common",
+ "powerpc64le": "endian-little bit-64 powerpc-common",
"ppc": "endian-big bit-32 powerpc-common",
"ppc64": "endian-big bit-64 powerpc-common",
"ppc64le" : "endian-little bit-64 powerpc-common",
"riscv32": "endian-little bit-32 riscv-common",
"riscv64": "endian-little bit-64 riscv-common",
"sh3": "endian-little bit-32 sh-common",
+ "sh3eb": "endian-big bit-32 sh-common",
"sh4": "endian-little bit-32 sh-common",
+ "sh4eb": "endian-big bit-32 sh-common",
"sparc": "endian-big bit-32",
"viac3": "endian-little bit-32 ix86-common",
"x86_64": "endian-little", # bitinfo specified in targetinfo
@@ -88,8 +91,6 @@ def siteinfo_data_for_machine(arch, os, d):
"arm-linux-musleabi": "arm-linux",
"armeb-linux-gnueabi": "armeb-linux",
"armeb-linux-musleabi": "armeb-linux",
- "microblazeeb-linux" : "microblaze-linux",
- "microblazeeb-linux-musl" : "microblaze-linux",
"microblazeel-linux" : "microblaze-linux",
"microblazeel-linux-musl" : "microblaze-linux",
"mips-linux-musl": "mips-linux",
@@ -100,14 +101,18 @@ def siteinfo_data_for_machine(arch, os, d):
"mips64el-linux-gnun32": "mipsel-linux bit-32",
"mipsisa64r6-linux-gnun32": "mipsisa32r6-linux bit-32",
"mipsisa64r6el-linux-gnun32": "mipsisa32r6el-linux bit-32",
- "powerpc-linux": "powerpc32-linux",
- "powerpc-linux-musl": "powerpc-linux powerpc32-linux",
- "powerpc-linux-gnuspe": "powerpc-linux powerpc32-linux",
- "powerpc-linux-muslspe": "powerpc-linux powerpc32-linux",
- "powerpc64-linux-gnuspe": "powerpc-linux powerpc64-linux",
- "powerpc64-linux-muslspe": "powerpc-linux powerpc64-linux",
- "powerpc64-linux": "powerpc-linux",
- "powerpc64-linux-musl": "powerpc-linux",
+ "powerpc-linux": "powerpc32-linux powerpc32-linux-glibc",
+ "powerpc-linux-musl": "powerpc-linux powerpc32-linux powerpc32-linux-musl",
+ "powerpcle-linux": "powerpc32-linux powerpc32-linux-glibc",
+ "powerpcle-linux-musl": "powerpc-linux powerpc32-linux powerpc32-linux-musl",
+ "powerpc-linux-gnuspe": "powerpc-linux powerpc32-linux powerpc32-linux-glibc",
+ "powerpc-linux-muslspe": "powerpc-linux powerpc32-linux powerpc32-linux-musl",
+ "powerpc64-linux-gnuspe": "powerpc-linux powerpc64-linux powerpc64-linux-glibc",
+ "powerpc64-linux-muslspe": "powerpc-linux powerpc64-linux powerpc64-linux-musl",
+ "powerpc64-linux": "powerpc-linux powerpc64-linux powerpc64-linux-glibc",
+ "powerpc64-linux-musl": "powerpc-linux powerpc64-linux powerpc64-linux-musl",
+ "powerpc64le-linux": "powerpc-linux powerpc64-linux powerpc64-linux-glibc",
+ "powerpc64le-linux-musl": "powerpc-linux powerpc64-linux powerpc64-linux-musl",
"riscv32-linux": "riscv32-linux",
"riscv32-linux-musl": "riscv32-linux",
"riscv64-linux": "riscv64-linux",
@@ -171,17 +176,39 @@ python () {
bb.fatal("Please add your architecture to siteinfo.bbclass")
}
-def siteinfo_get_files(d, sysrootcache = False):
+# Layers with siteconfig need to add a replacement path to this variable so the
+# sstate isn't path specific
+SITEINFO_PATHVARS = "COREBASE"
+
+def siteinfo_get_files(d, sysrootcache=False):
sitedata = siteinfo_data(d)
- sitefiles = ""
+ sitefiles = []
+ searched = []
for path in d.getVar("BBPATH").split(":"):
for element in sitedata:
filename = os.path.join(path, "site", element)
if os.path.exists(filename):
- sitefiles += filename + " "
+ searched.append(filename + ":True")
+ sitefiles.append(filename)
+ else:
+ searched.append(filename + ":False")
+
+ # Have to parameterise out hardcoded paths such as COREBASE for the main site files
+ for var in d.getVar("SITEINFO_PATHVARS").split():
+ searched2 = []
+ replace = os.path.normpath(d.getVar(var))
+ for s in searched:
+ searched2.append(s.replace(replace, "${" + var + "}"))
+ searched = searched2
+
+ if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
+ # We need sstate sigs for native/cross not to vary upon arch so we can't depend on the site files.
+ # In future we may want to depend upon all site files?
+ # This would show up as breaking sstatetests.SStateTests.test_sstate_32_64_same_hash for example
+ searched = []
if not sysrootcache:
- return sitefiles
+ return sitefiles, searched
# Now check for siteconfig cache files in sysroots
path_siteconfig = d.getVar('SITECONFIG_SYSROOTCACHE')
@@ -190,8 +217,8 @@ def siteinfo_get_files(d, sysrootcache = False):
if not i.endswith("_config"):
continue
filename = os.path.join(path_siteconfig, i)
- sitefiles += filename + " "
- return sitefiles
+ sitefiles.append(filename)
+ return sitefiles, searched
#
# Make some information available via variables
diff --git a/meta/classes/spdx.bbclass b/meta/classes/spdx.bbclass
deleted file mode 100644
index fb78e274a8..0000000000
--- a/meta/classes/spdx.bbclass
+++ /dev/null
@@ -1,360 +0,0 @@
-# This class integrates real-time license scanning, generation of SPDX standard
-# output and verifiying license info during the building process.
-# It is a combination of efforts from the OE-Core, SPDX and Fossology projects.
-#
-# For more information on FOSSology:
-# http://www.fossology.org
-#
-# For more information on FOSSologySPDX commandline:
-# https://github.com/spdx-tools/fossology-spdx/wiki/Fossology-SPDX-Web-API
-#
-# For more information on SPDX:
-# http://www.spdx.org
-#
-
-# SPDX file will be output to the path which is defined as[SPDX_MANIFEST_DIR]
-# in ./meta/conf/licenses.conf.
-
-SPDXSSTATEDIR = "${WORKDIR}/spdx_sstate_dir"
-
-# If ${S} isn't actually the top-level source directory, set SPDX_S to point at
-# the real top-level directory.
-SPDX_S ?= "${S}"
-
-python do_spdx () {
- import os, sys
- import json, shutil
-
- info = {}
- info['workdir'] = d.getVar('WORKDIR')
- info['sourcedir'] = d.getVar('SPDX_S')
- info['pn'] = d.getVar('PN')
- info['pv'] = d.getVar('PV')
- info['spdx_version'] = d.getVar('SPDX_VERSION')
- info['data_license'] = d.getVar('DATA_LICENSE')
-
- sstatedir = d.getVar('SPDXSSTATEDIR')
- sstatefile = os.path.join(sstatedir, info['pn'] + info['pv'] + ".spdx")
-
- manifest_dir = d.getVar('SPDX_MANIFEST_DIR')
- info['outfile'] = os.path.join(manifest_dir, info['pn'] + ".spdx" )
-
- info['spdx_temp_dir'] = d.getVar('SPDX_TEMP_DIR')
- info['tar_file'] = os.path.join(info['workdir'], info['pn'] + ".tar.gz" )
-
- # Make sure important dirs exist
- try:
- bb.utils.mkdirhier(manifest_dir)
- bb.utils.mkdirhier(sstatedir)
- bb.utils.mkdirhier(info['spdx_temp_dir'])
- except OSError as e:
- bb.error("SPDX: Could not set up required directories: " + str(e))
- return
-
- ## get everything from cache. use it to decide if
- ## something needs to be rerun
- cur_ver_code = get_ver_code(info['sourcedir'])
- cache_cur = False
- if os.path.exists(sstatefile):
- ## cache for this package exists. read it in
- cached_spdx = get_cached_spdx(sstatefile)
-
- if cached_spdx['PackageVerificationCode'] == cur_ver_code:
- bb.warn("SPDX: Verification code for " + info['pn']
- + "is same as cache's. do nothing")
- cache_cur = True
- else:
- local_file_info = setup_foss_scan(info, True, cached_spdx['Files'])
- else:
- local_file_info = setup_foss_scan(info, False, None)
-
- if cache_cur:
- spdx_file_info = cached_spdx['Files']
- foss_package_info = cached_spdx['Package']
- foss_license_info = cached_spdx['Licenses']
- else:
- ## setup fossology command
- foss_server = d.getVar('FOSS_SERVER')
- foss_flags = d.getVar('FOSS_WGET_FLAGS')
- foss_full_spdx = d.getVar('FOSS_FULL_SPDX') == "true" or False
- foss_command = "wget %s --post-file=%s %s"\
- % (foss_flags, info['tar_file'], foss_server)
-
- foss_result = run_fossology(foss_command, foss_full_spdx)
- if foss_result is not None:
- (foss_package_info, foss_file_info, foss_license_info) = foss_result
- spdx_file_info = create_spdx_doc(local_file_info, foss_file_info)
- ## write to cache
- write_cached_spdx(sstatefile, cur_ver_code, foss_package_info,
- spdx_file_info, foss_license_info)
- else:
- bb.error("SPDX: Could not communicate with FOSSology server. Command was: " + foss_command)
- return
-
- ## Get document and package level information
- spdx_header_info = get_header_info(info, cur_ver_code, foss_package_info)
-
- ## CREATE MANIFEST
- create_manifest(info, spdx_header_info, spdx_file_info, foss_license_info)
-
- ## clean up the temp stuff
- shutil.rmtree(info['spdx_temp_dir'], ignore_errors=True)
- if os.path.exists(info['tar_file']):
- remove_file(info['tar_file'])
-}
-addtask spdx after do_patch before do_configure
-
-def create_manifest(info, header, files, licenses):
- import codecs
- with codecs.open(info['outfile'], mode='w', encoding='utf-8') as f:
- # Write header
- f.write(header + '\n')
-
- # Write file data
- for chksum, block in files.iteritems():
- f.write("FileName: " + block['FileName'] + '\n')
- for key, value in block.iteritems():
- if not key == 'FileName':
- f.write(key + ": " + value + '\n')
- f.write('\n')
-
- # Write license data
- for id, block in licenses.iteritems():
- f.write("LicenseID: " + id + '\n')
- for key, value in block.iteritems():
- f.write(key + ": " + value + '\n')
- f.write('\n')
-
-def get_cached_spdx(sstatefile):
- import json
- import codecs
- cached_spdx_info = {}
- with codecs.open(sstatefile, mode='r', encoding='utf-8') as f:
- try:
- cached_spdx_info = json.load(f)
- except ValueError as e:
- cached_spdx_info = None
- return cached_spdx_info
-
-def write_cached_spdx(sstatefile, ver_code, package_info, files, license_info):
- import json
- import codecs
- spdx_doc = {}
- spdx_doc['PackageVerificationCode'] = ver_code
- spdx_doc['Files'] = {}
- spdx_doc['Files'] = files
- spdx_doc['Package'] = {}
- spdx_doc['Package'] = package_info
- spdx_doc['Licenses'] = {}
- spdx_doc['Licenses'] = license_info
- with codecs.open(sstatefile, mode='w', encoding='utf-8') as f:
- f.write(json.dumps(spdx_doc))
-
-def setup_foss_scan(info, cache, cached_files):
- import errno, shutil
- import tarfile
- file_info = {}
- cache_dict = {}
-
- for f_dir, f in list_files(info['sourcedir']):
- full_path = os.path.join(f_dir, f)
- abs_path = os.path.join(info['sourcedir'], full_path)
- dest_dir = os.path.join(info['spdx_temp_dir'], f_dir)
- dest_path = os.path.join(info['spdx_temp_dir'], full_path)
-
- checksum = hash_file(abs_path)
- if not checksum is None:
- file_info[checksum] = {}
- ## retain cache information if it exists
- if cache and checksum in cached_files:
- file_info[checksum] = cached_files[checksum]
- ## have the file included in what's sent to the FOSSology server
- else:
- file_info[checksum]['FileName'] = full_path
- try:
- bb.utils.mkdirhier(dest_dir)
- shutil.copyfile(abs_path, dest_path)
- except OSError as e:
- bb.warn("SPDX: mkdirhier failed: " + str(e))
- except shutil.Error as e:
- bb.warn("SPDX: copyfile failed: " + str(e))
- except IOError as e:
- bb.warn("SPDX: copyfile failed: " + str(e))
- else:
- bb.warn("SPDX: Could not get checksum for file: " + f)
-
- with tarfile.open(info['tar_file'], "w:gz") as tar:
- tar.add(info['spdx_temp_dir'], arcname=os.path.basename(info['spdx_temp_dir']))
-
- return file_info
-
-def remove_file(file_name):
- try:
- os.remove(file_name)
- except OSError as e:
- pass
-
-def list_files(dir):
- for root, subFolders, files in os.walk(dir):
- for f in files:
- rel_root = os.path.relpath(root, dir)
- yield rel_root, f
- return
-
-def hash_file(file_name):
- from bb.utils import sha1_file
- return sha1_file(file_name)
-
-def hash_string(data):
- import hashlib
- sha1 = hashlib.sha1()
- sha1.update(data.encode('utf-8'))
- return sha1.hexdigest()
-
-def run_fossology(foss_command, full_spdx):
- import string, re
- import subprocess
-
- try:
- foss_output = subprocess.check_output(foss_command.split(),
- stderr=subprocess.STDOUT).decode('utf-8')
- except subprocess.CalledProcessError as e:
- return None
-
- foss_output = foss_output.replace('\r', '')
-
- # Package info
- package_info = {}
- if full_spdx:
- # All mandatory, only one occurrence
- package_info['PackageCopyrightText'] = re.findall('PackageCopyrightText: (.*?</text>)', foss_output, re.S)[0]
- package_info['PackageLicenseDeclared'] = re.findall('PackageLicenseDeclared: (.*)', foss_output)[0]
- package_info['PackageLicenseConcluded'] = re.findall('PackageLicenseConcluded: (.*)', foss_output)[0]
- # These may be more than one
- package_info['PackageLicenseInfoFromFiles'] = re.findall('PackageLicenseInfoFromFiles: (.*)', foss_output)
- else:
- DEFAULT = "NOASSERTION"
- package_info['PackageCopyrightText'] = "<text>" + DEFAULT + "</text>"
- package_info['PackageLicenseDeclared'] = DEFAULT
- package_info['PackageLicenseConcluded'] = DEFAULT
- package_info['PackageLicenseInfoFromFiles'] = []
-
- # File info
- file_info = {}
- records = []
- # FileName is also in PackageFileName, so we match on FileType as well.
- records = re.findall('FileName:.*?FileType:.*?</text>', foss_output, re.S)
- for rec in records:
- chksum = re.findall('FileChecksum: SHA1: (.*)\n', rec)[0]
- file_info[chksum] = {}
- file_info[chksum]['FileCopyrightText'] = re.findall('FileCopyrightText: '
- + '(.*?</text>)', rec, re.S )[0]
- fields = ['FileName', 'FileType', 'LicenseConcluded', 'LicenseInfoInFile']
- for field in fields:
- file_info[chksum][field] = re.findall(field + ': (.*)', rec)[0]
-
- # Licenses
- license_info = {}
- licenses = []
- licenses = re.findall('LicenseID:.*?LicenseName:.*?\n', foss_output, re.S)
- for lic in licenses:
- license_id = re.findall('LicenseID: (.*)\n', lic)[0]
- license_info[license_id] = {}
- license_info[license_id]['ExtractedText'] = re.findall('ExtractedText: (.*?</text>)', lic, re.S)[0]
- license_info[license_id]['LicenseName'] = re.findall('LicenseName: (.*)', lic)[0]
-
- return (package_info, file_info, license_info)
-
-def create_spdx_doc(file_info, scanned_files):
- import json
- ## push foss changes back into cache
- for chksum, lic_info in scanned_files.iteritems():
- if chksum in file_info:
- file_info[chksum]['FileType'] = lic_info['FileType']
- file_info[chksum]['FileChecksum: SHA1'] = chksum
- file_info[chksum]['LicenseInfoInFile'] = lic_info['LicenseInfoInFile']
- file_info[chksum]['LicenseConcluded'] = lic_info['LicenseConcluded']
- file_info[chksum]['FileCopyrightText'] = lic_info['FileCopyrightText']
- else:
- bb.warn("SPDX: " + lic_info['FileName'] + " : " + chksum
- + " : is not in the local file info: "
- + json.dumps(lic_info, indent=1))
- return file_info
-
-def get_ver_code(dirname):
- chksums = []
- for f_dir, f in list_files(dirname):
- path = os.path.join(dirname, f_dir, f)
- hash = hash_file(path)
- if not hash is None:
- chksums.append(hash)
- else:
- bb.warn("SPDX: Could not hash file: " + path)
- ver_code_string = ''.join(chksums).lower()
- ver_code = hash_string(ver_code_string)
- return ver_code
-
-def get_header_info(info, spdx_verification_code, package_info):
- """
- Put together the header SPDX information.
- Eventually this needs to become a lot less
- of a hardcoded thing.
- """
- from datetime import datetime
- import os
- head = []
- DEFAULT = "NOASSERTION"
-
- package_checksum = hash_file(info['tar_file'])
- if package_checksum is None:
- package_checksum = DEFAULT
-
- ## document level information
- head.append("## SPDX Document Information")
- head.append("SPDXVersion: " + info['spdx_version'])
- head.append("DataLicense: " + info['data_license'])
- head.append("DocumentComment: <text>SPDX for "
- + info['pn'] + " version " + info['pv'] + "</text>")
- head.append("")
-
- ## Creator information
- ## Note that this does not give time in UTC.
- now = datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')
- head.append("## Creation Information")
- ## Tools are supposed to have a version, but FOSSology+SPDX provides none.
- head.append("Creator: Tool: FOSSology+SPDX")
- head.append("Created: " + now)
- head.append("CreatorComment: <text>UNO</text>")
- head.append("")
-
- ## package level information
- head.append("## Package Information")
- head.append("PackageName: " + info['pn'])
- head.append("PackageVersion: " + info['pv'])
- head.append("PackageFileName: " + os.path.basename(info['tar_file']))
- head.append("PackageSupplier: Person:" + DEFAULT)
- head.append("PackageDownloadLocation: " + DEFAULT)
- head.append("PackageSummary: <text></text>")
- head.append("PackageOriginator: Person:" + DEFAULT)
- head.append("PackageChecksum: SHA1: " + package_checksum)
- head.append("PackageVerificationCode: " + spdx_verification_code)
- head.append("PackageDescription: <text>" + info['pn']
- + " version " + info['pv'] + "</text>")
- head.append("")
- head.append("PackageCopyrightText: "
- + package_info['PackageCopyrightText'])
- head.append("")
- head.append("PackageLicenseDeclared: "
- + package_info['PackageLicenseDeclared'])
- head.append("PackageLicenseConcluded: "
- + package_info['PackageLicenseConcluded'])
-
- for licref in package_info['PackageLicenseInfoFromFiles']:
- head.append("PackageLicenseInfoFromFiles: " + licref)
- head.append("")
-
- ## header for file level
- head.append("## File Information")
- head.append("")
-
- return '\n'.join(head)
diff --git a/meta/classes/sstate.bbclass b/meta/classes/sstate.bbclass
index 6f51d9c187..1c0cae4893 100644
--- a/meta/classes/sstate.bbclass
+++ b/meta/classes/sstate.bbclass
@@ -1,21 +1,45 @@
-SSTATE_VERSION = "3"
+SSTATE_VERSION = "8"
+
+SSTATE_ZSTD_CLEVEL ??= "8"
SSTATE_MANIFESTS ?= "${TMPDIR}/sstate-control"
SSTATE_MANFILEPREFIX = "${SSTATE_MANIFESTS}/manifest-${SSTATE_MANMACH}-${PN}"
-def generate_sstatefn(spec, hash, d):
+def generate_sstatefn(spec, hash, taskname, siginfo, d):
+ if taskname is None:
+ return ""
+ extension = ".tar.zst"
+ # 8 chars reserved for siginfo
+ limit = 254 - 8
+ if siginfo:
+ limit = 254
+ extension = ".tar.zst.siginfo"
if not hash:
hash = "INVALID"
- return hash[:2] + "/" + spec + hash
+ fn = spec + hash + "_" + taskname + extension
+ # If the filename is too long, attempt to reduce it
+ if len(fn) > limit:
+ components = spec.split(":")
+ # Fields 0,5,6 are mandatory, 1 is most useful, 2,3,4 are just for information
+ # 7 is for the separators
+ avail = (limit - len(hash + "_" + taskname + extension) - len(components[0]) - len(components[1]) - len(components[5]) - len(components[6]) - 7) // 3
+ components[2] = components[2][:avail]
+ components[3] = components[3][:avail]
+ components[4] = components[4][:avail]
+ spec = ":".join(components)
+ fn = spec + hash + "_" + taskname + extension
+ if len(fn) > limit:
+ bb.fatal("Unable to reduce sstate name to less than 255 chararacters")
+ return hash[:2] + "/" + hash[2:4] + "/" + fn
SSTATE_PKGARCH = "${PACKAGE_ARCH}"
SSTATE_PKGSPEC = "sstate:${PN}:${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}:${PV}:${PR}:${SSTATE_PKGARCH}:${SSTATE_VERSION}:"
SSTATE_SWSPEC = "sstate:${PN}::${PV}:${PR}::${SSTATE_VERSION}:"
-SSTATE_PKGNAME = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PKGSPEC'), d.getVar('BB_UNIHASH'), d)}"
+SSTATE_PKGNAME = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PKGSPEC'), d.getVar('BB_UNIHASH'), d.getVar('SSTATE_CURRTASK'), False, d)}"
SSTATE_PKG = "${SSTATE_DIR}/${SSTATE_PKGNAME}"
SSTATE_EXTRAPATH = ""
SSTATE_EXTRAPATHWILDCARD = ""
-SSTATE_PATHSPEC = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/${SSTATE_PKGSPEC}"
+SSTATE_PATHSPEC = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/*/${SSTATE_PKGSPEC}*_${SSTATE_PATH_CURRTASK}.tar.zst*"
# explicitly make PV to depend on evaluated value of PV variable
PV[vardepvalue] = "${PV}"
@@ -26,42 +50,50 @@ SSTATE_EXTRAPATH[vardepvalue] = ""
SSTATE_EXTRAPATHWILDCARD[vardepvalue] = ""
# For multilib rpm the allarch packagegroup files can overwrite (in theory they're identical)
-SSTATE_DUPWHITELIST = "${DEPLOY_DIR}/licenses/"
+SSTATE_ALLOW_OVERLAP_FILES = "${DEPLOY_DIR}/licenses/"
# Avoid docbook/sgml catalog warnings for now
-SSTATE_DUPWHITELIST += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml"
+SSTATE_ALLOW_OVERLAP_FILES += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml"
# sdk-provides-dummy-nativesdk and nativesdk-buildtools-perl-dummy overlap for different SDKMACHINE
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_RPM}/sdk_provides_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-nativesdk/"
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_RPM}/buildtools_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/buildtools-dummy-nativesdk/"
+SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_RPM}/sdk_provides_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-nativesdk/"
+SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_RPM}/buildtools_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/buildtools-dummy-nativesdk/"
# target-sdk-provides-dummy overlaps that allarch is disabled when multilib is used
-SSTATE_DUPWHITELIST += "${COMPONENTS_DIR}/sdk-provides-dummy-target/ ${DEPLOY_DIR_RPM}/sdk_provides_dummy_target/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-target/"
+SSTATE_ALLOW_OVERLAP_FILES += "${COMPONENTS_DIR}/sdk-provides-dummy-target/ ${DEPLOY_DIR_RPM}/sdk_provides_dummy_target/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-target/"
# Archive the sources for many architectures in one deploy folder
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_SRC}"
+SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_SRC}"
# ovmf/grub-efi/systemd-boot/intel-microcode multilib recipes can generate identical overlapping files
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_IMAGE}/ovmf"
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_IMAGE}/grub-efi"
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_IMAGE}/systemd-boot"
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_IMAGE}/microcode"
+SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/ovmf"
+SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/grub-efi"
+SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/systemd-boot"
+SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/microcode"
SSTATE_SCAN_FILES ?= "*.la *-config *_config postinst-*"
SSTATE_SCAN_CMD ??= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES").split())}" \) -type f'
SSTATE_SCAN_CMD_NATIVE ??= 'grep -Irl -e ${RECIPE_SYSROOT} -e ${RECIPE_SYSROOT_NATIVE} -e ${HOSTTOOLS_DIR} ${SSTATE_BUILDDIR}'
+SSTATE_HASHEQUIV_FILEMAP ?= " \
+ populate_sysroot:*/postinst-useradd-*:${TMPDIR} \
+ populate_sysroot:*/postinst-useradd-*:${COREBASE} \
+ populate_sysroot:*/postinst-useradd-*:regex-\s(PATH|PSEUDO_IGNORE_PATHS|HOME|LOGNAME|OMP_NUM_THREADS|USER)=.*\s \
+ populate_sysroot:*/crossscripts/*:${TMPDIR} \
+ populate_sysroot:*/crossscripts/*:${COREBASE} \
+ "
BB_HASHFILENAME = "False ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}"
SSTATE_ARCHS = " \
${BUILD_ARCH} \
+ ${BUILD_ARCH}_${ORIGNATIVELSBSTRING} \
${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS} \
- ${BUILD_ARCH}_${TARGET_ARCH} \
${SDK_ARCH}_${SDK_OS} \
${SDK_ARCH}_${PACKAGE_ARCH} \
allarch \
${PACKAGE_ARCH} \
${PACKAGE_EXTRA_ARCHS} \
${MACHINE_ARCH}"
+SSTATE_ARCHS[vardepsexclude] = "ORIGNATIVELSBSTRING"
SSTATE_MANMACH ?= "${SSTATE_PKGARCH}"
-SSTATECREATEFUNCS = "sstate_hardcode_path"
+SSTATECREATEFUNCS += "sstate_hardcode_path"
SSTATECREATEFUNCS[vardeps] = "SSTATE_SCAN_FILES"
SSTATEPOSTCREATEFUNCS = ""
SSTATEPREINSTFUNCS = ""
@@ -83,17 +115,15 @@ SSTATE_SIG_KEY ?= ""
SSTATE_SIG_PASSPHRASE ?= ""
# Whether to verify the GnUPG signatures when extracting sstate archives
SSTATE_VERIFY_SIG ?= "0"
+# List of signatures to consider valid.
+SSTATE_VALID_SIGS ??= ""
+SSTATE_VALID_SIGS[vardepvalue] = ""
SSTATE_HASHEQUIV_METHOD ?= "oe.sstatesig.OEOuthashBasic"
SSTATE_HASHEQUIV_METHOD[doc] = "The fully-qualified function used to calculate \
the output hash for a task, which in turn is used to determine equivalency. \
"
-SSTATE_HASHEQUIV_SERVER ?= ""
-SSTATE_HASHEQUIV_SERVER[doc] = "The hash equivalence sever. For example, \
- 'http://192.168.0.1:5000'. Do not include a trailing slash \
- "
-
SSTATE_HASHEQUIV_REPORT_TASKDATA ?= "0"
SSTATE_HASHEQUIV_REPORT_TASKDATA[doc] = "Report additional useful data to the \
hash equivalency server, such as PN, PV, taskname, etc. This information \
@@ -107,7 +137,7 @@ python () {
elif bb.data.inherits_class('crosssdk', d):
d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"))
elif bb.data.inherits_class('cross', d):
- d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${TARGET_ARCH}"))
+ d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}"))
elif bb.data.inherits_class('nativesdk', d):
d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${SDK_OS}"))
elif bb.data.inherits_class('cross-canadian', d):
@@ -127,6 +157,8 @@ python () {
for task in unique_tasks:
d.prependVarFlag(task, 'prefuncs', "sstate_task_prefunc ")
d.appendVarFlag(task, 'postfuncs', " sstate_task_postfunc")
+ d.setVarFlag(task, 'network', '1')
+ d.setVarFlag(task + "_setscene", 'network', '1')
}
def sstate_init(task, d):
@@ -227,13 +259,13 @@ def sstate_install(ss, d):
shareddirs.append(dstdir)
# Check the file list for conflicts against files which already exist
- whitelist = (d.getVar("SSTATE_DUPWHITELIST") or "").split()
+ overlap_allowed = (d.getVar("SSTATE_ALLOW_OVERLAP_FILES") or "").split()
match = []
for f in sharedfiles:
if os.path.exists(f) and not os.path.islink(f):
f = os.path.normpath(f)
realmatch = True
- for w in whitelist:
+ for w in overlap_allowed:
w = os.path.normpath(w)
if f.startswith(w):
realmatch = False
@@ -263,7 +295,7 @@ def sstate_install(ss, d):
"DISTRO_FEATURES on an existing build directory is not supported - you " \
"should really clean out tmp and rebuild (reusing sstate should be safe). " \
"It could be the overlapping files detected are harmless in which case " \
- "adding them to SSTATE_DUPWHITELIST may be the correct solution. It could " \
+ "adding them to SSTATE_ALLOW_OVERLAP_FILES may be the correct solution. It could " \
"also be your build is including two different conflicting versions of " \
"things (e.g. bluez 4 and bluez 5 and the correct solution for that would " \
"be to resolve the conflict. If in doubt, please ask on the mailing list, " \
@@ -298,6 +330,8 @@ def sstate_install(ss, d):
if os.path.exists(i):
with open(i, "r") as f:
manifests = f.readlines()
+ # We append new entries, we don't remove older entries which may have the same
+ # manifest name but different versions from stamp/workdir. See below.
if filedata not in manifests:
with open(i, "a+") as f:
f.write(filedata)
@@ -315,32 +349,36 @@ def sstate_install(ss, d):
for lock in locks:
bb.utils.unlockfile(lock)
-sstate_install[vardepsexclude] += "SSTATE_DUPWHITELIST STATE_MANMACH SSTATE_MANFILEPREFIX"
+sstate_install[vardepsexclude] += "SSTATE_ALLOW_OVERLAP_FILES STATE_MANMACH SSTATE_MANFILEPREFIX"
sstate_install[vardeps] += "${SSTATEPOSTINSTFUNCS}"
def sstate_installpkg(ss, d):
from oe.gpg_sign import get_signer
sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
- sstatefetch = d.getVar('SSTATE_PKGNAME') + '_' + ss['task'] + ".tgz"
- sstatepkg = d.getVar('SSTATE_PKG') + '_' + ss['task'] + ".tgz"
+ d.setVar("SSTATE_CURRTASK", ss['task'])
+ sstatefetch = d.getVar('SSTATE_PKGNAME')
+ sstatepkg = d.getVar('SSTATE_PKG')
if not os.path.exists(sstatepkg):
pstaging_fetch(sstatefetch, d)
if not os.path.isfile(sstatepkg):
- bb.note("Staging package %s does not exist" % sstatepkg)
+ bb.note("Sstate package %s does not exist" % sstatepkg)
return False
sstate_clean(ss, d)
d.setVar('SSTATE_INSTDIR', sstateinst)
- d.setVar('SSTATE_PKG', sstatepkg)
if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False):
+ if not os.path.isfile(sstatepkg + '.sig'):
+ bb.warn("No signature file for sstate package %s, skipping acceleration..." % sstatepkg)
+ return False
signer = get_signer(d, 'local')
- if not signer.verify(sstatepkg + '.sig'):
- bb.warn("Cannot verify signature on sstate package %s" % sstatepkg)
+ if not signer.verify(sstatepkg + '.sig', d.getVar("SSTATE_VALID_SIGS")):
+ bb.warn("Cannot verify signature on sstate package %s, skipping acceleration..." % sstatepkg)
+ return False
# Empty sstateinst directory, ensure its clean
if os.path.exists(sstateinst):
@@ -376,7 +414,7 @@ def sstate_installpkgdir(ss, d):
for state in ss['dirs']:
prepdir(state[1])
- os.rename(sstateinst + state[0], state[1])
+ bb.utils.rename(sstateinst + state[0], state[1])
sstate_install(ss, d)
for plain in ss['plaindirs']:
@@ -388,7 +426,7 @@ def sstate_installpkgdir(ss, d):
dest = plain
bb.utils.mkdirhier(src)
prepdir(dest)
- os.rename(src, dest)
+ bb.utils.rename(src, dest)
return True
@@ -444,8 +482,9 @@ python sstate_hardcode_path_unpack () {
def sstate_clean_cachefile(ss, d):
import oe.path
- sstatepkgfile = d.getVar('SSTATE_PATHSPEC') + "*_" + ss['task'] + ".tgz*"
if d.getVarFlag('do_%s' % ss['task'], 'task'):
+ d.setVar("SSTATE_PATH_CURRTASK", ss['task'])
+ sstatepkgfile = d.getVar('SSTATE_PATHSPEC')
bb.note("Removing %s" % sstatepkgfile)
oe.path.remove(sstatepkgfile)
@@ -455,7 +494,7 @@ def sstate_clean_cachefiles(d):
ss = sstate_state_fromvars(ld, task)
sstate_clean_cachefile(ss, ld)
-def sstate_clean_manifest(manifest, d, prefix=None):
+def sstate_clean_manifest(manifest, d, canrace=False, prefix=None):
import oe.path
mfile = open(manifest)
@@ -473,7 +512,9 @@ def sstate_clean_manifest(manifest, d, prefix=None):
if entry.endswith("/"):
if os.path.islink(entry[:-1]):
os.remove(entry[:-1])
- elif os.path.exists(entry) and len(os.listdir(entry)) == 0:
+ elif os.path.exists(entry) and len(os.listdir(entry)) == 0 and not canrace:
+ # Removing directories whilst builds are in progress exposes a race. Only
+ # do it in contexts where it is safe to do so.
os.rmdir(entry[:-1])
else:
os.remove(entry)
@@ -511,7 +552,7 @@ def sstate_clean(ss, d):
for lock in ss['lockfiles']:
locks.append(bb.utils.lockfile(lock))
- sstate_clean_manifest(manifest, d)
+ sstate_clean_manifest(manifest, d, canrace=True)
for lock in locks:
bb.utils.unlockfile(lock)
@@ -612,14 +653,24 @@ python sstate_hardcode_path () {
def sstate_package(ss, d):
import oe.path
+ import time
tmpdir = d.getVar('TMPDIR')
+ fixtime = False
+ if ss['task'] == "package":
+ fixtime = True
+
+ def fixtimestamp(root, path):
+ f = os.path.join(root, path)
+ if os.lstat(f).st_mtime > sde:
+ os.utime(f, (sde, sde), follow_symlinks=False)
+
sstatebuild = d.expand("${WORKDIR}/sstate-build-%s/" % ss['task'])
- sstatepkg = d.getVar('SSTATE_PKG') + '_'+ ss['task'] + ".tgz"
+ sde = int(d.getVar("SOURCE_DATE_EPOCH") or time.time())
+ d.setVar("SSTATE_CURRTASK", ss['task'])
bb.utils.remove(sstatebuild, recurse=True)
bb.utils.mkdirhier(sstatebuild)
- bb.utils.mkdirhier(os.path.dirname(sstatepkg))
for state in ss['dirs']:
if not os.path.exists(state[1]):
continue
@@ -629,6 +680,8 @@ def sstate_package(ss, d):
# to sstate tasks but there aren't many of these so better just avoid them entirely.
for walkroot, dirs, files in os.walk(state[1]):
for file in files + dirs:
+ if fixtime:
+ fixtimestamp(walkroot, file)
srcpath = os.path.join(walkroot, file)
if not os.path.islink(srcpath):
continue
@@ -639,7 +692,7 @@ def sstate_package(ss, d):
continue
bb.error("sstate found an absolute path symlink %s pointing at %s. Please replace this with a relative link." % (srcpath, link))
bb.debug(2, "Preparing tree %s for packaging at %s" % (state[1], sstatebuild + state[0]))
- os.rename(state[1], sstatebuild + state[0])
+ bb.utils.rename(state[1], sstatebuild + state[0])
workdir = d.getVar('WORKDIR')
sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared")
@@ -649,25 +702,48 @@ def sstate_package(ss, d):
pdir = plain.replace(sharedworkdir, sstatebuild)
bb.utils.mkdirhier(plain)
bb.utils.mkdirhier(pdir)
- os.rename(plain, pdir)
+ bb.utils.rename(plain, pdir)
+ if fixtime:
+ fixtimestamp(pdir, "")
+ for walkroot, dirs, files in os.walk(pdir):
+ for file in files + dirs:
+ fixtimestamp(walkroot, file)
d.setVar('SSTATE_BUILDDIR', sstatebuild)
- d.setVar('SSTATE_PKG', sstatepkg)
d.setVar('SSTATE_INSTDIR', sstatebuild)
if d.getVar('SSTATE_SKIP_CREATION') == '1':
return
+ sstate_create_package = ['sstate_report_unihash', 'sstate_create_package']
+ if d.getVar('SSTATE_SIG_KEY'):
+ sstate_create_package.append('sstate_sign_package')
+
for f in (d.getVar('SSTATECREATEFUNCS') or '').split() + \
- ['sstate_report_unihash', 'sstate_create_package', 'sstate_sign_package'] + \
+ sstate_create_package + \
(d.getVar('SSTATEPOSTCREATEFUNCS') or '').split():
# All hooks should run in SSTATE_BUILDDIR.
bb.build.exec_func(f, d, (sstatebuild,))
- bb.siggen.dump_this_task(sstatepkg + ".siginfo", d)
+ # SSTATE_PKG may have been changed by sstate_report_unihash
+ siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
+ if not os.path.exists(siginfo):
+ bb.siggen.dump_this_task(siginfo, d)
+ else:
+ try:
+ os.utime(siginfo, None)
+ except PermissionError:
+ pass
+ except OSError as e:
+ # Handle read-only file systems gracefully
+ import errno
+ if e.errno != errno.EROFS:
+ raise e
return
+sstate_package[vardepsexclude] += "SSTATE_SIG_KEY"
+
def pstaging_fetch(sstatefetch, d):
import bb.fetch2
@@ -686,10 +762,12 @@ def pstaging_fetch(sstatefetch, d):
localdata.setVar('FILESPATH', dldir)
localdata.setVar('DL_DIR', dldir)
localdata.setVar('PREMIRRORS', mirrors)
+ localdata.setVar('SRCPV', d.getVar('SRCPV'))
# if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
# we'll want to allow network access for the current set of fetches.
- if localdata.getVar('BB_NO_NETWORK') == "1" and localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK') == "1":
+ if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
+ bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
localdata.delVar('BB_NO_NETWORK')
# Try a fetch from the sstate mirror, if it fails just return and
@@ -703,16 +781,22 @@ def pstaging_fetch(sstatefetch, d):
localdata.setVar('SRC_URI', srcuri)
try:
fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
+ fetcher.checkstatus()
fetcher.download()
except bb.fetch2.BBFetchException:
- break
+ pass
+
+pstaging_fetch[vardepsexclude] += "SRCPV"
+
def sstate_setscene(d):
shared_state = sstate_state_fromvars(d)
accelerate = sstate_installpkg(shared_state, d)
if not accelerate:
- bb.fatal("No suitable staging package found")
+ msg = "No sstate archive obtainable, will run full task instead."
+ bb.warn(msg)
+ raise bb.BBHandledException(msg)
python sstate_task_prefunc () {
shared_state = sstate_state_fromvars(d)
@@ -747,40 +831,60 @@ sstate_task_postfunc[dirs] = "${WORKDIR}"
# set as SSTATE_BUILDDIR. Will be run from within SSTATE_BUILDDIR.
#
sstate_create_package () {
+ # Exit early if it already exists
+ if [ -e ${SSTATE_PKG} ]; then
+ touch ${SSTATE_PKG} 2>/dev/null || true
+ return
+ fi
+
+ mkdir --mode=0775 -p `dirname ${SSTATE_PKG}`
TFILE=`mktemp ${SSTATE_PKG}.XXXXXXXX`
- # Use pigz if available
- OPT="-czS"
- if [ -x "$(command -v pigz)" ]; then
- OPT="-I pigz -cS"
- fi
+ OPT="-cS"
+ ZSTD="zstd -${SSTATE_ZSTD_CLEVEL} -T${ZSTD_THREADS}"
+ # Use pzstd if available
+ if [ -x "$(command -v pzstd)" ]; then
+ ZSTD="pzstd -${SSTATE_ZSTD_CLEVEL} -p ${ZSTD_THREADS}"
+ fi
# Need to handle empty directories
if [ "$(ls -A)" ]; then
set +e
- tar $OPT -f $TFILE *
+ tar -I "$ZSTD" $OPT -f $TFILE *
ret=$?
if [ $ret -ne 0 ] && [ $ret -ne 1 ]; then
exit 1
fi
set -e
else
- tar $OPT --file=$TFILE --files-from=/dev/null
+ tar -I "$ZSTD" $OPT --file=$TFILE --files-from=/dev/null
fi
chmod 0664 $TFILE
- mv -f $TFILE ${SSTATE_PKG}
+ # Skip if it was already created by some other process
+ if [ -h ${SSTATE_PKG} ] && [ ! -e ${SSTATE_PKG} ]; then
+ # There is a symbolic link, but it links to nothing.
+ # Forcefully replace it with the new file.
+ ln -f $TFILE ${SSTATE_PKG} || true
+ elif [ ! -e ${SSTATE_PKG} ]; then
+ # Move into place using ln to attempt an atomic op.
+ # Abort if it already exists
+ ln $TFILE ${SSTATE_PKG} || true
+ else
+ touch ${SSTATE_PKG} 2>/dev/null || true
+ fi
+ rm $TFILE
}
python sstate_sign_package () {
from oe.gpg_sign import get_signer
- if d.getVar('SSTATE_SIG_KEY'):
- signer = get_signer(d, 'local')
- sstate_pkg = d.getVar('SSTATE_PKG')
- if os.path.exists(sstate_pkg + '.sig'):
- os.unlink(sstate_pkg + '.sig')
- signer.detach_sign(sstate_pkg, d.getVar('SSTATE_SIG_KEY', False), None,
- d.getVar('SSTATE_SIG_PASSPHRASE'), armor=False)
+
+ signer = get_signer(d, 'local')
+ sstate_pkg = d.getVar('SSTATE_PKG')
+ if os.path.exists(sstate_pkg + '.sig'):
+ os.unlink(sstate_pkg + '.sig')
+ signer.detach_sign(sstate_pkg, d.getVar('SSTATE_SIG_KEY', False), None,
+ d.getVar('SSTATE_SIG_PASSPHRASE'), armor=False)
}
python sstate_report_unihash() {
@@ -796,40 +900,40 @@ python sstate_report_unihash() {
# Will be run from within SSTATE_INSTDIR.
#
sstate_unpack_package () {
- tar -xvzf ${SSTATE_PKG}
- # update .siginfo atime on local/NFS mirror
- [ -w ${SSTATE_PKG}.siginfo ] && [ -h ${SSTATE_PKG}.siginfo ] && touch -a ${SSTATE_PKG}.siginfo
- # Use "! -w ||" to return true for read only files
- [ ! -w ${SSTATE_PKG} ] || touch --no-dereference ${SSTATE_PKG}
- [ ! -w ${SSTATE_PKG}.sig ] || [ ! -e ${SSTATE_PKG}.sig ] || touch --no-dereference ${SSTATE_PKG}.sig
- [ ! -w ${SSTATE_PKG}.siginfo ] || [ ! -e ${SSTATE_PKG}.siginfo ] || touch --no-dereference ${SSTATE_PKG}.siginfo
+ ZSTD="zstd -T${ZSTD_THREADS}"
+ # Use pzstd if available
+ if [ -x "$(command -v pzstd)" ]; then
+ ZSTD="pzstd -p ${ZSTD_THREADS}"
+ fi
+
+ tar -I "$ZSTD" -xvpf ${SSTATE_PKG}
+ # update .siginfo atime on local/NFS mirror if it is a symbolic link
+ [ ! -h ${SSTATE_PKG}.siginfo ] || [ ! -e ${SSTATE_PKG}.siginfo ] || touch -a ${SSTATE_PKG}.siginfo 2>/dev/null || true
+ # update each symbolic link instead of any referenced file
+ touch --no-dereference ${SSTATE_PKG} 2>/dev/null || true
+ [ ! -e ${SSTATE_PKG}.sig ] || touch --no-dereference ${SSTATE_PKG}.sig 2>/dev/null || true
+ [ ! -e ${SSTATE_PKG}.siginfo ] || touch --no-dereference ${SSTATE_PKG}.siginfo 2>/dev/null || true
}
BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
-def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False, *, sq_unihash=None):
-
- ret = []
- missed = []
- extension = ".tgz"
- if siginfo:
- extension = extension + ".siginfo"
+def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True, **kwargs):
+ found = set()
+ missed = set()
def gethash(task):
- if sq_unihash is not None:
- return sq_unihash[task]
- return sq_hash[task]
+ return sq_data['unihash'][task]
def getpathcomponents(task, d):
# Magic data from BB_HASHFILENAME
- splithashfn = sq_hashfn[task].split(" ")
+ splithashfn = sq_data['hashfn'][task].split(" ")
spec = splithashfn[1]
if splithashfn[0] == "True":
extrapath = d.getVar("NATIVELSBSTRING") + "/"
else:
extrapath = ""
-
- tname = sq_task[task][3:]
+
+ tname = bb.runqueue.taskname_from_tid(task)[3:]
if tname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and splithashfn[2]:
spec = splithashfn[2]
@@ -837,21 +941,22 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False, *,
return spec, extrapath, tname
+ def getsstatefile(tid, siginfo, d):
+ spec, extrapath, tname = getpathcomponents(tid, d)
+ return extrapath + generate_sstatefn(spec, gethash(tid), tname, siginfo, d)
- for task in range(len(sq_fn)):
+ for tid in sq_data['hash']:
- spec, extrapath, tname = getpathcomponents(task, d)
-
- sstatefile = d.expand("${SSTATE_DIR}/" + extrapath + generate_sstatefn(spec, gethash(task), d) + "_" + tname + extension)
+ sstatefile = d.expand("${SSTATE_DIR}/" + getsstatefile(tid, siginfo, d))
if os.path.exists(sstatefile):
+ found.add(tid)
bb.debug(2, "SState: Found valid sstate file %s" % sstatefile)
- ret.append(task)
- continue
else:
- missed.append(task)
+ missed.add(tid)
bb.debug(2, "SState: Looked for but didn't find file %s" % sstatefile)
+ foundLocal = len(found)
mirrors = d.getVar("SSTATE_MIRRORS")
if mirrors:
# Copy the data object and override DL_DIR and SRC_URI
@@ -867,7 +972,8 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False, *,
# if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
# we'll want to allow network access for the current set of fetches.
- if localdata.getVar('BB_NO_NETWORK') == "1" and localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK') == "1":
+ if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
+ bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
localdata.delVar('BB_NO_NETWORK')
from bb.fetch2 import FetchConnectionCache
@@ -878,82 +984,88 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False, *,
thread_worker.connection_cache.close_connections()
def checkstatus(thread_worker, arg):
- (task, sstatefile) = arg
+ (tid, sstatefile) = arg
localdata2 = bb.data.createCopy(localdata)
srcuri = "file://" + sstatefile
- localdata.setVar('SRC_URI', srcuri)
+ localdata2.setVar('SRC_URI', srcuri)
bb.debug(2, "SState: Attempting to fetch %s" % srcuri)
+ import traceback
+
try:
fetcher = bb.fetch2.Fetch(srcuri.split(), localdata2,
connection_cache=thread_worker.connection_cache)
fetcher.checkstatus()
bb.debug(2, "SState: Successful fetch test for %s" % srcuri)
- ret.append(task)
- if task in missed:
- missed.remove(task)
- except:
- missed.append(task)
- bb.debug(2, "SState: Unsuccessful fetch test for %s" % srcuri)
- pass
- bb.event.fire(bb.event.ProcessProgress(msg, len(tasklist) - thread_worker.tasks.qsize()), d)
+ found.add(tid)
+ missed.remove(tid)
+ except bb.fetch2.FetchError as e:
+ bb.debug(2, "SState: Unsuccessful fetch test for %s (%s)\n%s" % (srcuri, repr(e), traceback.format_exc()))
+ except Exception as e:
+ bb.error("SState: cannot test %s: %s\n%s" % (srcuri, repr(e), traceback.format_exc()))
+
+ if progress:
+ bb.event.fire(bb.event.ProcessProgress(msg, len(tasklist) - thread_worker.tasks.qsize()), d)
tasklist = []
- for task in range(len(sq_fn)):
- if task in ret:
- continue
- spec, extrapath, tname = getpathcomponents(task, d)
- sstatefile = d.expand(extrapath + generate_sstatefn(spec, gethash(task), d) + "_" + tname + extension)
- tasklist.append((task, sstatefile))
+ for tid in missed:
+ sstatefile = d.expand(getsstatefile(tid, siginfo, d))
+ tasklist.append((tid, sstatefile))
if tasklist:
- msg = "Checking sstate mirror object availability"
- bb.event.fire(bb.event.ProcessStarted(msg, len(tasklist)), d)
-
- import multiprocessing
- nproc = min(multiprocessing.cpu_count(), len(tasklist))
-
- bb.event.enable_threadlock()
- pool = oe.utils.ThreadedPool(nproc, len(tasklist),
- worker_init=checkstatus_init, worker_end=checkstatus_end)
- for t in tasklist:
- pool.add_task(checkstatus, t)
- pool.start()
- pool.wait_completion()
- bb.event.disable_threadlock()
-
- bb.event.fire(bb.event.ProcessFinished(msg), d)
+ nproc = min(int(d.getVar("BB_NUMBER_THREADS")), len(tasklist))
+
+ progress = len(tasklist) >= 100
+ if progress:
+ msg = "Checking sstate mirror object availability"
+ bb.event.fire(bb.event.ProcessStarted(msg, len(tasklist)), d)
+
+ # Have to setup the fetcher environment here rather than in each thread as it would race
+ fetcherenv = bb.fetch2.get_fetcher_environment(d)
+ with bb.utils.environment(**fetcherenv):
+ bb.event.enable_threadlock()
+ pool = oe.utils.ThreadedPool(nproc, len(tasklist),
+ worker_init=checkstatus_init, worker_end=checkstatus_end,
+ name="sstate_checkhashes-")
+ for t in tasklist:
+ pool.add_task(checkstatus, t)
+ pool.start()
+ pool.wait_completion()
+ bb.event.disable_threadlock()
+
+ if progress:
+ bb.event.fire(bb.event.ProcessFinished(msg), d)
inheritlist = d.getVar("INHERIT")
if "toaster" in inheritlist:
evdata = {'missed': [], 'found': []};
- for task in missed:
- spec, extrapath, tname = getpathcomponents(task, d)
- sstatefile = d.expand(extrapath + generate_sstatefn(spec, gethash(task), d) + "_" + tname + ".tgz")
- evdata['missed'].append( (sq_fn[task], sq_task[task], gethash(task), sstatefile ) )
- for task in ret:
- spec, extrapath, tname = getpathcomponents(task, d)
- sstatefile = d.expand(extrapath + generate_sstatefn(spec, gethash(task), d) + "_" + tname + ".tgz")
- evdata['found'].append( (sq_fn[task], sq_task[task], gethash(task), sstatefile ) )
+ for tid in missed:
+ sstatefile = d.expand(getsstatefile(tid, False, d))
+ evdata['missed'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
+ for tid in found:
+ sstatefile = d.expand(getsstatefile(tid, False, d))
+ evdata['found'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
bb.event.fire(bb.event.MetadataEvent("MissedSstate", evdata), d)
- # Print some summary statistics about the current task completion and how much sstate
- # reuse there was. Avoid divide by zero errors.
- total = len(sq_fn)
- currentcount = d.getVar("BB_SETSCENE_STAMPCURRENT_COUNT") or 0
- complete = 0
- if currentcount:
- complete = (len(ret) + currentcount) / (total + currentcount) * 100
- match = 0
- if total:
- match = len(ret) / total * 100
- bb.plain("Sstate summary: Wanted %d Found %d Missed %d Current %d (%d%% match, %d%% complete)" % (total, len(ret), len(missed), currentcount, match, complete))
+ if summary:
+ # Print some summary statistics about the current task completion and how much sstate
+ # reuse there was. Avoid divide by zero errors.
+ total = len(sq_data['hash'])
+ complete = 0
+ if currentcount:
+ complete = (len(found) + currentcount) / (total + currentcount) * 100
+ match = 0
+ if total:
+ match = len(found) / total * 100
+ bb.plain("Sstate summary: Wanted %d Local %d Mirrors %d Missed %d Current %d (%d%% match, %d%% complete)" %
+ (total, foundLocal, len(found)-foundLocal, len(missed), currentcount, match, complete))
if hasattr(bb.parse.siggen, "checkhashes"):
- bb.parse.siggen.checkhashes(missed, ret, sq_fn, sq_task, sq_hash, sq_hashfn, d)
+ bb.parse.siggen.checkhashes(sq_data, missed, found, d)
- return ret
+ return found
+setscene_depvalid[vardepsexclude] = "SSTATE_EXCLUDEDEPS_SYSROOT"
BB_SETSCENE_DEPVALID = "setscene_depvalid"
@@ -972,15 +1084,13 @@ def setscene_depvalid(task, taskdependees, notneeded, d, log=None):
logit("Considering setscene task: %s" % (str(taskdependees[task])), log)
+ directtasks = ["do_populate_lic", "do_deploy_source_date_epoch", "do_shared_workdir", "do_stash_locale", "do_gcc_stash_builddir", "do_create_spdx"]
+
def isNativeCross(x):
return x.endswith("-native") or "-cross-" in x or "-crosssdk" in x or x.endswith("-cross")
- # We only need to trigger populate_lic through direct dependencies
- if taskdependees[task][1] == "do_populate_lic":
- return True
-
- # stash_locale and gcc_stash_builddir are never needed as a dependency for built objects
- if taskdependees[task][1] == "do_stash_locale" or taskdependees[task][1] == "do_gcc_stash_builddir":
+ # We only need to trigger deploy_source_date_epoch through direct dependencies
+ if taskdependees[task][1] in directtasks:
return True
# We only need to trigger packagedata through direct dependencies
@@ -1003,8 +1113,8 @@ def setscene_depvalid(task, taskdependees, notneeded, d, log=None):
# do_package_write_* need do_populate_sysroot as they're mainly postinstall dependencies
if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm']:
return False
- # do_package/packagedata/package_qa don't need do_populate_sysroot
- if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_packagedata', 'do_package_qa']:
+ # do_package/packagedata/package_qa/deploy don't need do_populate_sysroot
+ if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_packagedata', 'do_package_qa', 'do_deploy']:
continue
# Native/Cross packages don't exist and are noexec anyway
if isNativeCross(taskdependees[dep][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package', 'do_package_qa']:
@@ -1052,13 +1162,9 @@ def setscene_depvalid(task, taskdependees, notneeded, d, log=None):
# Target populate_sysroot need their dependencies
return False
- if taskdependees[task][1] == 'do_shared_workdir':
- continue
-
- if taskdependees[dep][1] == "do_populate_lic":
+ if taskdependees[dep][1] in directtasks:
continue
-
# Safe fallthrough default
logit(" Default setscene dependency fall through due to dependency: %s" % (str(taskdependees[dep])), log)
return False
@@ -1068,29 +1174,45 @@ addhandler sstate_eventhandler
sstate_eventhandler[eventmask] = "bb.build.TaskSucceeded"
python sstate_eventhandler() {
d = e.data
- # When we write an sstate package we rewrite the SSTATE_PKG
- spkg = d.getVar('SSTATE_PKG')
- if not spkg.endswith(".tgz"):
+ writtensstate = d.getVar('SSTATE_CURRTASK')
+ if not writtensstate:
taskname = d.getVar("BB_RUNTASK")[3:]
spec = d.getVar('SSTATE_PKGSPEC')
swspec = d.getVar('SSTATE_SWSPEC')
if taskname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and swspec:
d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
d.setVar("SSTATE_EXTRAPATH", "")
- sstatepkg = d.getVar('SSTATE_PKG')
- bb.siggen.dump_this_task(sstatepkg + '_' + taskname + ".tgz" ".siginfo", d)
+ d.setVar("SSTATE_CURRTASK", taskname)
+ siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
+ if not os.path.exists(siginfo):
+ bb.siggen.dump_this_task(siginfo, d)
+ else:
+ try:
+ os.utime(siginfo, None)
+ except PermissionError:
+ pass
+ except OSError as e:
+ # Handle read-only file systems gracefully
+ import errno
+ if e.errno != errno.EROFS:
+ raise e
+
}
SSTATE_PRUNE_OBSOLETEWORKDIR ?= "1"
-# Event handler which removes manifests and stamps file for
-# recipes which are no longer reachable in a build where they
-# once were.
+#
+# Event handler which removes manifests and stamps file for recipes which are no
+# longer 'reachable' in a build where they once were. 'Reachable' refers to
+# whether a recipe is parsed so recipes in a layer which was removed would no
+# longer be reachable. Switching between systemd and sysvinit where recipes
+# became skipped would be another example.
+#
# Also optionally removes the workdir of those tasks/recipes
#
-addhandler sstate_eventhandler2
-sstate_eventhandler2[eventmask] = "bb.event.ReachableStamps"
-python sstate_eventhandler2() {
+addhandler sstate_eventhandler_reachablestamps
+sstate_eventhandler_reachablestamps[eventmask] = "bb.event.ReachableStamps"
+python sstate_eventhandler_reachablestamps() {
import glob
d = e.data
stamps = e.stamps.values()
@@ -1116,11 +1238,21 @@ python sstate_eventhandler2() {
i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
if not os.path.exists(i):
continue
+ manseen = set()
+ ignore = []
with open(i, "r") as f:
lines = f.readlines()
- for l in lines:
+ for l in reversed(lines):
try:
(stamp, manifest, workdir) = l.split()
+ # The index may have multiple entries for the same manifest as the code above only appends
+ # new entries and there may be an entry with matching manifest but differing version in stamp/workdir.
+ # The last entry in the list is the valid one, any earlier entries with matching manifests
+ # should be ignored.
+ if manifest in manseen:
+ ignore.append(l)
+ continue
+ manseen.add(manifest)
if stamp not in stamps and stamp not in preservestamps and stamp in machineindex:
toremove.append(l)
if stamp not in seen:
@@ -1151,6 +1283,8 @@ python sstate_eventhandler2() {
with open(i, "w") as f:
for l in lines:
+ if l in ignore:
+ continue
f.write(l)
machineindex |= set(stamps)
with open(mi, "w") as f:
@@ -1160,3 +1294,59 @@ python sstate_eventhandler2() {
if preservestamps:
os.remove(preservestampfile)
}
+
+
+#
+# Bitbake can generate an event showing which setscene tasks are 'stale',
+# i.e. which ones will be rerun. These are ones where a stamp file is present but
+# it is stable (e.g. taskhash doesn't match). With that list we can go through
+# the manifests for matching tasks and "uninstall" those manifests now. We do
+# this now rather than mid build since the distribution of files between sstate
+# objects may have changed, new tasks may run first and if those new tasks overlap
+# with the stale tasks, we'd see overlapping files messages and failures. Thankfully
+# removing these files is fast.
+#
+addhandler sstate_eventhandler_stalesstate
+sstate_eventhandler_stalesstate[eventmask] = "bb.event.StaleSetSceneTasks"
+python sstate_eventhandler_stalesstate() {
+ d = e.data
+ tasks = e.tasks
+
+ bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
+
+ for a in list(set(d.getVar("SSTATE_ARCHS").split())):
+ toremove = []
+ i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
+ if not os.path.exists(i):
+ continue
+ with open(i, "r") as f:
+ lines = f.readlines()
+ for l in lines:
+ try:
+ (stamp, manifest, workdir) = l.split()
+ for tid in tasks:
+ for s in tasks[tid]:
+ if s.startswith(stamp):
+ taskname = bb.runqueue.taskname_from_tid(tid)[3:]
+ manname = manifest + "." + taskname
+ if os.path.exists(manname):
+ bb.debug(2, "Sstate for %s is stale, removing related manifest %s" % (tid, manname))
+ toremove.append((manname, tid, tasks[tid]))
+ break
+ except ValueError:
+ bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
+
+ if toremove:
+ msg = "Removing %d stale sstate objects for arch %s" % (len(toremove), a)
+ bb.event.fire(bb.event.ProcessStarted(msg, len(toremove)), d)
+
+ removed = 0
+ for (manname, tid, stamps) in toremove:
+ sstate_clean_manifest(manname, d)
+ for stamp in stamps:
+ bb.utils.remove(stamp)
+ removed = removed + 1
+ bb.event.fire(bb.event.ProcessProgress(msg, removed), d)
+
+ bb.event.fire(bb.event.ProcessFinished(msg), d)
+}
diff --git a/meta/classes/staging.bbclass b/meta/classes/staging.bbclass
index 84e13bab59..ab827766be 100644
--- a/meta/classes/staging.bbclass
+++ b/meta/classes/staging.bbclass
@@ -5,6 +5,7 @@ SYSROOT_DIRS = " \
${base_libdir} \
${nonarch_base_libdir} \
${datadir} \
+ /sysroot-only \
"
# These directories are also staged in the sysroot when they contain files that
@@ -18,20 +19,25 @@ SYSROOT_DIRS_NATIVE = " \
${sysconfdir} \
${localstatedir} \
"
-SYSROOT_DIRS_append_class-native = " ${SYSROOT_DIRS_NATIVE}"
-SYSROOT_DIRS_append_class-cross = " ${SYSROOT_DIRS_NATIVE}"
-SYSROOT_DIRS_append_class-crosssdk = " ${SYSROOT_DIRS_NATIVE}"
+SYSROOT_DIRS:append:class-native = " ${SYSROOT_DIRS_NATIVE}"
+SYSROOT_DIRS:append:class-cross = " ${SYSROOT_DIRS_NATIVE}"
+SYSROOT_DIRS:append:class-crosssdk = " ${SYSROOT_DIRS_NATIVE}"
# These directories will not be staged in the sysroot
-SYSROOT_DIRS_BLACKLIST = " \
+SYSROOT_DIRS_IGNORE = " \
${mandir} \
${docdir} \
${infodir} \
- ${datadir}/locale \
+ ${datadir}/X11/locale \
${datadir}/applications \
+ ${datadir}/bash-completion \
${datadir}/fonts \
+ ${datadir}/gtk-doc/html \
+ ${datadir}/installed-tests \
+ ${datadir}/locale \
${datadir}/pixmaps \
- ${libdir}/${PN}/ptest \
+ ${datadir}/terminfo \
+ ${libdir}/${BPN}/ptest \
"
sysroot_stage_dir() {
@@ -43,9 +49,10 @@ sysroot_stage_dir() {
fi
mkdir -p "$dest"
+ rdest=$(realpath --relative-to="$src" "$dest")
(
cd $src
- find . -print0 | cpio --null -pdlu $dest
+ find . -print0 | cpio --null -pdlu $rdest
)
}
@@ -58,7 +65,7 @@ sysroot_stage_dirs() {
done
# Remove directories we do not care about
- for dir in ${SYSROOT_DIRS_BLACKLIST}; do
+ for dir in ${SYSROOT_DIRS_IGNORE}; do
rm -rf "$to$dir"
done
}
@@ -74,9 +81,9 @@ python sysroot_strip () {
dstdir = d.getVar('SYSROOT_DESTDIR')
pn = d.getVar('PN')
- libdir = os.path.abspath(dstdir + os.sep + d.getVar("libdir"))
- base_libdir = os.path.abspath(dstdir + os.sep + d.getVar("base_libdir"))
- qa_already_stripped = 'already-stripped' in (d.getVar('INSANE_SKIP_' + pn) or "").split()
+ libdir = d.getVar("libdir")
+ base_libdir = d.getVar("base_libdir")
+ qa_already_stripped = 'already-stripped' in (d.getVar('INSANE_SKIP:' + pn) or "").split()
strip_cmd = d.getVar("STRIP")
oe.package.strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, d,
@@ -84,7 +91,6 @@ python sysroot_strip () {
}
do_populate_sysroot[dirs] = "${SYSROOT_DESTDIR}"
-do_populate_sysroot[umask] = "022"
addtask populate_sysroot after do_install
@@ -92,12 +98,13 @@ SYSROOT_PREPROCESS_FUNCS ?= ""
SYSROOT_DESTDIR = "${WORKDIR}/sysroot-destdir"
python do_populate_sysroot () {
+ # SYSROOT 'version' 2
bb.build.exec_func("sysroot_stage_all", d)
bb.build.exec_func("sysroot_strip", d)
for f in (d.getVar('SYSROOT_PREPROCESS_FUNCS') or '').split():
bb.build.exec_func(f, d)
pn = d.getVar("PN")
- multiprov = d.getVar("MULTI_PROVIDER_WHITELIST").split()
+ multiprov = d.getVar("BB_MULTI_PROVIDER_ALLOWED").split()
provdir = d.expand("${SYSROOT_DESTDIR}${base_prefix}/sysroot-providers/")
bb.utils.mkdirhier(provdir)
for p in d.getVar("PROVIDES").split():
@@ -109,11 +116,11 @@ python do_populate_sysroot () {
}
do_populate_sysroot[vardeps] += "${SYSROOT_PREPROCESS_FUNCS}"
-do_populate_sysroot[vardepsexclude] += "MULTI_PROVIDER_WHITELIST"
+do_populate_sysroot[vardepsexclude] += "BB_MULTI_PROVIDER_ALLOWED"
POPULATESYSROOTDEPS = ""
-POPULATESYSROOTDEPS_class-target = "virtual/${MLPREFIX}${TARGET_PREFIX}binutils:do_populate_sysroot"
-POPULATESYSROOTDEPS_class-nativesdk = "virtual/${TARGET_PREFIX}binutils-crosssdk:do_populate_sysroot"
+POPULATESYSROOTDEPS:class-target = "virtual/${MLPREFIX}${HOST_PREFIX}binutils:do_populate_sysroot"
+POPULATESYSROOTDEPS:class-nativesdk = "virtual/${HOST_PREFIX}binutils-crosssdk:do_populate_sysroot"
do_populate_sysroot[depends] += "${POPULATESYSROOTDEPS}"
SSTATETASKS += "do_populate_sysroot"
@@ -167,7 +174,7 @@ def staging_processfixme(fixme, target, recipesysroot, recipesysrootnative, d):
if not fixme:
return
cmd = "sed -e 's:^[^/]*/:%s/:g' %s | xargs sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIRHOST:%s:g'" % (target, " ".join(fixme), recipesysroot, recipesysrootnative)
- for fixmevar in ['COMPONENTS_DIR', 'HOSTTOOLS_DIR', 'PKGDATA_DIR', 'PSEUDO_LOCALSTATEDIR', 'LOGFIFO']:
+ for fixmevar in ['PSEUDO_SYSROOT', 'HOSTTOOLS_DIR', 'PKGDATA_DIR', 'PSEUDO_LOCALSTATEDIR', 'LOGFIFO']:
fixme_path = d.getVar(fixmevar)
cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path)
bb.debug(2, cmd)
@@ -196,7 +203,11 @@ def staging_populate_sysroot_dir(targetsysroot, nativesysroot, native, d):
for pkgarch in pkgarchs:
for manifest in glob.glob(d.expand("${SSTATE_MANIFESTS}/manifest-%s-*.populate_sysroot" % pkgarch)):
if manifest.endswith("-initial.populate_sysroot"):
- # skip glibc-initial and libgcc-initial due to file overlap
+ # skip libgcc-initial due to file overlap
+ continue
+ if not native and (manifest.endswith("-native.populate_sysroot") or "nativesdk-" in manifest):
+ continue
+ if native and not (manifest.endswith("-native.populate_sysroot") or manifest.endswith("-cross.populate_sysroot") or "-cross-" in manifest):
continue
tmanifest = targetdir + "/" + os.path.basename(manifest)
if os.path.exists(tmanifest):
@@ -256,12 +267,10 @@ python extend_recipe_sysroot() {
workdir = d.getVar("WORKDIR")
#bb.warn(str(taskdepdata))
pn = d.getVar("PN")
- mc = d.getVar("BB_CURRENT_MC")
stagingdir = d.getVar("STAGING_DIR")
sharedmanifests = d.getVar("COMPONENTS_DIR") + "/manifests"
recipesysroot = d.getVar("RECIPE_SYSROOT")
recipesysrootnative = d.getVar("RECIPE_SYSROOT_NATIVE")
- current_variant = d.getVar("BBEXTENDVARIANT")
# Detect bitbake -b usage
nodeps = d.getVar("BB_LIMITEDDEPS") or False
@@ -274,11 +283,13 @@ python extend_recipe_sysroot() {
start = None
configuredeps = []
+ owntaskdeps = []
for dep in taskdepdata:
data = taskdepdata[dep]
if data[1] == mytaskname and data[0] == pn:
start = dep
- break
+ elif data[0] == pn:
+ owntaskdeps.append(data[1])
if start is None:
bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
@@ -296,6 +307,7 @@ python extend_recipe_sysroot() {
sstatetasks = d.getVar("SSTATETASKS").split()
# Add recipe specific tasks referenced by setscene_depvalid()
sstatetasks.append("do_stash_locale")
+ sstatetasks.append("do_deploy")
def print_dep_tree(deptree):
data = ""
@@ -399,7 +411,7 @@ python extend_recipe_sysroot() {
if os.path.islink(f) and not os.path.exists(f):
bb.note("%s no longer exists, removing from sysroot" % f)
lnk = os.readlink(f.replace(".complete", ""))
- sstate_clean_manifest(depdir + "/" + lnk, d, workdir)
+ sstate_clean_manifest(depdir + "/" + lnk, d, canrace=True, prefix=workdir)
os.unlink(f)
os.unlink(f.replace(".complete", ""))
@@ -424,7 +436,7 @@ python extend_recipe_sysroot() {
# Was likely already uninstalled
continue
potential.append(l)
- # We need to ensure not other task needs this dependency. We hold the sysroot
+ # We need to ensure no other task needs this dependency. We hold the sysroot
# lock so we ca search the indexes to check
if potential:
for i in glob.glob(depdir + "/index.*"):
@@ -432,6 +444,11 @@ python extend_recipe_sysroot() {
continue
with open(i, "r") as f:
for l in f:
+ if l.startswith("TaskDeps:"):
+ prevtasks = l.split()[1:]
+ if mytaskname in prevtasks:
+ # We're a dependency of this task so we can clear items out the sysroot
+ break
l = l.strip()
if l in potential:
potential.remove(l)
@@ -439,19 +456,15 @@ python extend_recipe_sysroot() {
fl = depdir + "/" + l
bb.note("Task %s no longer depends on %s, removing from sysroot" % (mytaskname, l))
lnk = os.readlink(fl)
- sstate_clean_manifest(depdir + "/" + lnk, d, workdir)
+ sstate_clean_manifest(depdir + "/" + lnk, d, canrace=True, prefix=workdir)
os.unlink(fl)
os.unlink(fl + ".complete")
msg_exists = []
msg_adding = []
+ # Handle all removals first since files may move between recipes
for dep in configuredeps:
- if mc != 'default':
- # We should not care about other multiconfigs
- depmc = dep.split(':')[1]
- if depmc != mc:
- continue
c = setscenedeps[dep][0]
if c not in installed:
continue
@@ -461,17 +474,31 @@ python extend_recipe_sysroot() {
if os.path.exists(depdir + "/" + c):
lnk = os.readlink(depdir + "/" + c)
if lnk == c + "." + taskhash and os.path.exists(depdir + "/" + c + ".complete"):
- msg_exists.append(c)
continue
else:
bb.note("%s exists in sysroot, but is stale (%s vs. %s), removing." % (c, lnk, c + "." + taskhash))
- sstate_clean_manifest(depdir + "/" + lnk, d, workdir)
+ sstate_clean_manifest(depdir + "/" + lnk, d, canrace=True, prefix=workdir)
os.unlink(depdir + "/" + c)
if os.path.lexists(depdir + "/" + c + ".complete"):
os.unlink(depdir + "/" + c + ".complete")
elif os.path.lexists(depdir + "/" + c):
os.unlink(depdir + "/" + c)
+ binfiles = {}
+ # Now handle installs
+ for dep in configuredeps:
+ c = setscenedeps[dep][0]
+ if c not in installed:
+ continue
+ taskhash = setscenedeps[dep][5]
+ taskmanifest = depdir + "/" + c + "." + taskhash
+
+ if os.path.exists(depdir + "/" + c):
+ lnk = os.readlink(depdir + "/" + c)
+ if lnk == c + "." + taskhash and os.path.exists(depdir + "/" + c + ".complete"):
+ msg_exists.append(c)
+ continue
+
msg_adding.append(c)
os.symlink(c + "." + taskhash, depdir + "/" + c)
@@ -550,7 +577,16 @@ python extend_recipe_sysroot() {
if l.endswith("/"):
staging_copydir(l, targetdir, dest, seendirs)
continue
- staging_copyfile(l, targetdir, dest, postinsts, seendirs)
+ if "/bin/" in l or "/sbin/" in l:
+ # defer /*bin/* files until last in case they need libs
+ binfiles[l] = (targetdir, dest)
+ else:
+ staging_copyfile(l, targetdir, dest, postinsts, seendirs)
+
+ # Handle deferred binfiles
+ for l in binfiles:
+ (targetdir, dest) = binfiles[l]
+ staging_copyfile(l, targetdir, dest, postinsts, seendirs)
bb.note("Installed into sysroot: %s" % str(msg_adding))
bb.note("Skipping as already exists in sysroot: %s" % str(msg_exists))
@@ -566,6 +602,7 @@ python extend_recipe_sysroot() {
os.symlink(manifests[dep], depdir + "/" + c + ".complete")
with open(taskindex, "w") as f:
+ f.write("TaskDeps: " + " ".join(owntaskdeps) + "\n")
for l in sorted(installed):
f.write(l + "\n")
@@ -573,28 +610,51 @@ python extend_recipe_sysroot() {
}
extend_recipe_sysroot[vardepsexclude] += "MACHINE_ARCH PACKAGE_EXTRA_ARCHS SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA"
+do_prepare_recipe_sysroot[deptask] = "do_populate_sysroot"
python do_prepare_recipe_sysroot () {
bb.build.exec_func("extend_recipe_sysroot", d)
}
addtask do_prepare_recipe_sysroot before do_configure after do_fetch
-# Clean out the recipe specific sysroots before do_fetch
-# (use a prefunc so we can order before extend_recipe_sysroot if it gets added)
-python clean_recipe_sysroot() {
- # We remove these stamps since we're removing any content they'd have added with
- # cleandirs. This removes the sigdata too, likely not a big deal,
- oe.path.remove(d.getVar("STAMP") + "*addto_recipe_sysroot*")
- return
-}
-clean_recipe_sysroot[cleandirs] += "${RECIPE_SYSROOT} ${RECIPE_SYSROOT_NATIVE}"
-do_fetch[prefuncs] += "clean_recipe_sysroot"
-
python staging_taskhandler() {
bbtasks = e.tasklist
for task in bbtasks:
deps = d.getVarFlag(task, "depends")
- if deps and "populate_sysroot" in deps:
- d.appendVarFlag(task, "prefuncs", " extend_recipe_sysroot")
+ if task == "do_configure" or (deps and "populate_sysroot" in deps):
+ d.prependVarFlag(task, "prefuncs", "extend_recipe_sysroot ")
}
staging_taskhandler[eventmask] = "bb.event.RecipeTaskPreProcess"
addhandler staging_taskhandler
+
+
+#
+# Target build output, stored in do_populate_sysroot or do_package can depend
+# not only upon direct dependencies but also indirect ones. A good example is
+# linux-libc-headers. The toolchain depends on this but most target recipes do
+# not. There are some headers which are not used by the toolchain build and do
+# not change the toolchain task output, hence the task hashes can change without
+# changing the sysroot output of that recipe yet they can influence others.
+#
+# A specific example is rtc.h which can change rtcwake.c in util-linux but is not
+# used in the glibc or gcc build. To account for this, we need to account for the
+# populate_sysroot hashes in the task output hashes.
+#
+python target_add_sysroot_deps () {
+ current_task = "do_" + d.getVar("BB_CURRENTTASK")
+ if current_task not in ["do_populate_sysroot", "do_package"]:
+ return
+
+ pn = d.getVar("PN")
+ if pn.endswith("-native"):
+ return
+
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ deps = {}
+ for dep in taskdepdata.values():
+ if dep[1] == "do_populate_sysroot" and not dep[0].endswith(("-native", "-initial")) and "-cross-" not in dep[0]:
+ deps[dep[0]] = dep[6]
+
+ d.setVar("HASHEQUIV_EXTRA_SIGDATA", "\n".join("%s: %s" % (k, deps[k]) for k in sorted(deps.keys())))
+}
+SSTATECREATEFUNCS += "target_add_sysroot_deps"
+
diff --git a/meta/classes/syslinux.bbclass b/meta/classes/syslinux.bbclass
index 031dacbf7d..894f6b3718 100644
--- a/meta/classes/syslinux.bbclass
+++ b/meta/classes/syslinux.bbclass
@@ -75,11 +75,6 @@ syslinux_hddimg_install() {
syslinux ${IMGDEPLOYDIR}/${IMAGE_NAME}.hddimg
}
-syslinux_hdddirect_install() {
- DEST=$1
- syslinux $DEST
-}
-
python build_syslinux_cfg () {
import copy
import sys
diff --git a/meta/classes/systemd-boot-cfg.bbclass b/meta/classes/systemd-boot-cfg.bbclass
index 021c9f9331..b3e0e6ad41 100644
--- a/meta/classes/systemd-boot-cfg.bbclass
+++ b/meta/classes/systemd-boot-cfg.bbclass
@@ -2,6 +2,9 @@ SYSTEMD_BOOT_CFG ?= "${S}/loader.conf"
SYSTEMD_BOOT_ENTRIES ?= ""
SYSTEMD_BOOT_TIMEOUT ?= "10"
+# Uses MACHINE specific KERNEL_IMAGETYPE
+PACKAGE_ARCH = "${MACHINE_ARCH}"
+
# Need UUID utility code.
inherit fs-uuid
diff --git a/meta/classes/systemd-boot.bbclass b/meta/classes/systemd-boot.bbclass
index 3cd6811a6c..57ec0acbc5 100644
--- a/meta/classes/systemd-boot.bbclass
+++ b/meta/classes/systemd-boot.bbclass
@@ -11,50 +11,25 @@
do_bootimg[depends] += "${MLPREFIX}systemd-boot:do_deploy"
-EFIDIR = "/EFI/BOOT"
+require conf/image-uefi.conf
# Need UUID utility code.
inherit fs-uuid
efi_populate() {
- DEST=$1
+ efi_populate_common "$1" systemd
- EFI_IMAGE="systemd-bootia32.efi"
- DEST_EFI_IMAGE="bootia32.efi"
- if [ "${TARGET_ARCH}" = "x86_64" ]; then
- EFI_IMAGE="systemd-bootx64.efi"
- DEST_EFI_IMAGE="bootx64.efi"
- fi
-
- install -d ${DEST}${EFIDIR}
# systemd-boot requires these paths for configuration files
# they are not customizable so no point in new vars
install -d ${DEST}/loader
install -d ${DEST}/loader/entries
- install -m 0644 ${DEPLOY_DIR_IMAGE}/${EFI_IMAGE} ${DEST}${EFIDIR}/${DEST_EFI_IMAGE}
- EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
- printf 'fs0:%s\%s\n' "$EFIPATH" "$DEST_EFI_IMAGE" >${DEST}/startup.nsh
install -m 0644 ${SYSTEMD_BOOT_CFG} ${DEST}/loader/loader.conf
for i in ${SYSTEMD_BOOT_ENTRIES}; do
install -m 0644 ${i} ${DEST}/loader/entries
done
}
-efi_iso_populate() {
- iso_dir=$1
- efi_populate $iso_dir
- mkdir -p ${EFIIMGDIR}/${EFIDIR}
- cp $iso_dir/${EFIDIR}/* ${EFIIMGDIR}${EFIDIR}
+efi_iso_populate:append() {
cp -r $iso_dir/loader ${EFIIMGDIR}
- cp $iso_dir/${KERNEL_IMAGETYPE} ${EFIIMGDIR}
- EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
- echo "fs0:${EFIPATH}\\${DEST_EFI_IMAGE}" > ${EFIIMGDIR}/startup.nsh
- if [ -f "$iso_dir/initrd" ] ; then
- cp $iso_dir/initrd ${EFIIMGDIR}
- fi
-}
-
-efi_hddimg_populate() {
- efi_populate $1
}
inherit systemd-boot-cfg
diff --git a/meta/classes/systemd.bbclass b/meta/classes/systemd.bbclass
index c8f4fdec88..09ec52792d 100644
--- a/meta/classes/systemd.bbclass
+++ b/meta/classes/systemd.bbclass
@@ -1,9 +1,9 @@
# The list of packages that should have systemd packaging scripts added. For
-# each entry, optionally have a SYSTEMD_SERVICE_[package] that lists the service
+# each entry, optionally have a SYSTEMD_SERVICE:[package] that lists the service
# files in this package. If this variable isn't set, [package].service is used.
SYSTEMD_PACKAGES ?= "${PN}"
-SYSTEMD_PACKAGES_class-native ?= ""
-SYSTEMD_PACKAGES_class-nativesdk ?= ""
+SYSTEMD_PACKAGES:class-native ?= ""
+SYSTEMD_PACKAGES:class-nativesdk ?= ""
# Whether to enable or disable the services on installation.
SYSTEMD_AUTO_ENABLE ??= "enable"
@@ -23,38 +23,37 @@ python __anonymous() {
}
systemd_postinst() {
-OPTS=""
+if systemctl >/dev/null 2>/dev/null; then
+ OPTS=""
-if [ -n "$D" ]; then
- OPTS="--root=$D"
-fi
+ if [ -n "$D" ]; then
+ OPTS="--root=$D"
+ fi
-if type systemctl >/dev/null 2>/dev/null; then
- if [ -z "$D" ]; then
- systemctl daemon-reload
+ if [ "${SYSTEMD_AUTO_ENABLE}" = "enable" ]; then
+ for service in ${SYSTEMD_SERVICE_ESCAPED}; do
+ systemctl ${OPTS} enable "$service"
+ done
fi
- systemctl $OPTS ${SYSTEMD_AUTO_ENABLE} ${SYSTEMD_SERVICE_ESCAPED}
+ if [ -z "$D" ]; then
+ systemctl daemon-reload
+ systemctl preset ${SYSTEMD_SERVICE_ESCAPED}
- if [ -z "$D" -a "${SYSTEMD_AUTO_ENABLE}" = "enable" ]; then
- systemctl --no-block restart ${SYSTEMD_SERVICE_ESCAPED}
+ if [ "${SYSTEMD_AUTO_ENABLE}" = "enable" ]; then
+ systemctl --no-block restart ${SYSTEMD_SERVICE_ESCAPED}
+ fi
fi
fi
}
systemd_prerm() {
-OPTS=""
-
-if [ -n "$D" ]; then
- OPTS="--root=$D"
-fi
-
-if type systemctl >/dev/null 2>/dev/null; then
+if systemctl >/dev/null 2>/dev/null; then
if [ -z "$D" ]; then
systemctl stop ${SYSTEMD_SERVICE_ESCAPED}
- fi
- systemctl $OPTS disable ${SYSTEMD_SERVICE_ESCAPED}
+ systemctl disable ${SYSTEMD_SERVICE_ESCAPED}
+ fi
fi
}
@@ -71,7 +70,7 @@ python systemd_populate_packages() {
return
def get_package_var(d, var, pkg):
- val = (d.getVar('%s_%s' % (var, pkg)) or "").strip()
+ val = (d.getVar('%s:%s' % (var, pkg)) or "").strip()
if val == "":
val = (d.getVar(var) or "").strip()
return val
@@ -86,39 +85,39 @@ python systemd_populate_packages() {
def systemd_generate_package_scripts(pkg):
bb.debug(1, 'adding systemd calls to postinst/postrm for %s' % pkg)
- paths_escaped = ' '.join(shlex.quote(s) for s in d.getVar('SYSTEMD_SERVICE_' + pkg).split())
- d.setVar('SYSTEMD_SERVICE_ESCAPED_' + pkg, paths_escaped)
+ paths_escaped = ' '.join(shlex.quote(s) for s in d.getVar('SYSTEMD_SERVICE:' + pkg).split())
+ d.setVar('SYSTEMD_SERVICE_ESCAPED:' + pkg, paths_escaped)
- # Add pkg to the overrides so that it finds the SYSTEMD_SERVICE_pkg
+ # Add pkg to the overrides so that it finds the SYSTEMD_SERVICE:pkg
# variable.
localdata = d.createCopy()
localdata.prependVar("OVERRIDES", pkg + ":")
- postinst = d.getVar('pkg_postinst_%s' % pkg)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
postinst += localdata.getVar('systemd_postinst')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
- prerm = d.getVar('pkg_prerm_%s' % pkg)
+ prerm = d.getVar('pkg_prerm:%s' % pkg)
if not prerm:
prerm = '#!/bin/sh\n'
prerm += localdata.getVar('systemd_prerm')
- d.setVar('pkg_prerm_%s' % pkg, prerm)
+ d.setVar('pkg_prerm:%s' % pkg, prerm)
- # Add files to FILES_*-systemd if existent and not already done
+ # Add files to FILES:*-systemd if existent and not already done
def systemd_append_file(pkg_systemd, file_append):
appended = False
if os.path.exists(oe.path.join(d.getVar("D"), file_append)):
- var_name = "FILES_" + pkg_systemd
+ var_name = "FILES:" + pkg_systemd
files = d.getVar(var_name, False) or ""
if file_append not in files.split():
d.appendVar(var_name, " " + file_append)
appended = True
return appended
- # Add systemd files to FILES_*-systemd, parse for Also= and follow recursive
+ # Add systemd files to FILES:*-systemd, parse for Also= and follow recursive
def systemd_add_files_and_parse(pkg_systemd, path, service, keys):
# avoid infinite recursion
if systemd_append_file(pkg_systemd, oe.path.join(path, service)):
@@ -175,18 +174,32 @@ python systemd_populate_packages() {
if path_found != '':
systemd_add_files_and_parse(pkg_systemd, path_found, service, keys)
else:
- bb.fatal("SYSTEMD_SERVICE_%s value %s does not exist" % (pkg_systemd, service))
+ bb.fatal("Didn't find service unit '{0}', specified in SYSTEMD_SERVICE:{1}. {2}".format(
+ service, pkg_systemd, "Also looked for service unit '{0}'.".format(base) if base is not None else ""))
+
+ def systemd_create_presets(pkg, action):
+ presetf = oe.path.join(d.getVar("PKGD"), d.getVar("systemd_unitdir"), "system-preset/98-%s.preset" % pkg)
+ bb.utils.mkdirhier(os.path.dirname(presetf))
+ with open(presetf, 'a') as fd:
+ for service in d.getVar('SYSTEMD_SERVICE:%s' % pkg).split():
+ fd.write("%s %s\n" % (action,service))
+ d.appendVar("FILES:%s" % pkg, ' ' + oe.path.join(d.getVar("systemd_unitdir"), "system-preset/98-%s.preset" % pkg))
# Run all modifications once when creating package
if os.path.exists(d.getVar("D")):
for pkg in d.getVar('SYSTEMD_PACKAGES').split():
systemd_check_package(pkg)
- if d.getVar('SYSTEMD_SERVICE_' + pkg):
+ if d.getVar('SYSTEMD_SERVICE:' + pkg):
systemd_generate_package_scripts(pkg)
+ action = get_package_var(d, 'SYSTEMD_AUTO_ENABLE', pkg)
+ if action in ("enable", "disable"):
+ systemd_create_presets(pkg, action)
+ elif action not in ("mask", "preset"):
+ bb.fatal("SYSTEMD_AUTO_ENABLE:%s '%s' is not 'enable', 'disable', 'mask' or 'preset'" % (pkg, action))
systemd_check_services()
}
-PACKAGESPLITFUNCS_prepend = "systemd_populate_packages "
+PACKAGESPLITFUNCS:prepend = "systemd_populate_packages "
python rm_systemd_unitdir (){
import shutil
@@ -198,7 +211,6 @@ python rm_systemd_unitdir (){
if (os.path.exists(systemd_libdir) and not os.listdir(systemd_libdir)):
os.rmdir(systemd_libdir)
}
-do_install[postfuncs] += "rm_systemd_unitdir "
python rm_sysvinit_initddir (){
import shutil
@@ -213,4 +225,9 @@ python rm_sysvinit_initddir (){
if (os.path.exists(systemd_system_unitdir) and os.listdir(systemd_system_unitdir)):
shutil.rmtree(sysv_initddir)
}
-do_install[postfuncs] += "rm_sysvinit_initddir "
+
+do_install[postfuncs] += "${RMINITDIR} "
+RMINITDIR:class-target = " rm_sysvinit_initddir rm_systemd_unitdir "
+RMINITDIR:class-nativesdk = " rm_sysvinit_initddir rm_systemd_unitdir "
+RMINITDIR = ""
+
diff --git a/meta/classes/terminal.bbclass b/meta/classes/terminal.bbclass
index 73e765d57a..a564ee7494 100644
--- a/meta/classes/terminal.bbclass
+++ b/meta/classes/terminal.bbclass
@@ -14,6 +14,7 @@ def oe_terminal_prioritized():
return " ".join(o.name for o in oe.terminal.prioritized())
def emit_terminal_func(command, envdata, d):
+ import bb.build
cmd_func = 'do_terminal'
envdata.setVar(cmd_func, 'exec ' + command)
@@ -25,8 +26,10 @@ def emit_terminal_func(command, envdata, d):
bb.utils.mkdirhier(os.path.dirname(runfile))
with open(runfile, 'w') as script:
- script.write('#!/usr/bin/env %s\n' % d.getVar('SHELL'))
- script.write('set -e\n')
+ # Override the shell shell_trap_code specifies.
+ # If our shell is bash, we might well face silent death.
+ script.write("#!/bin/bash\n")
+ script.write(bb.build.shell_trap_code())
bb.data.emit_func(cmd_func, script, envdata)
script.write(cmd_func)
script.write("\n")
@@ -37,7 +40,7 @@ def emit_terminal_func(command, envdata, d):
def oe_terminal(command, title, d):
import oe.data
import oe.terminal
-
+
envdata = bb.data.init()
for v in os.environ:
diff --git a/meta/classes/testexport.bbclass b/meta/classes/testexport.bbclass
index 59cbaefbf9..1b0fb44a4a 100644
--- a/meta/classes/testexport.bbclass
+++ b/meta/classes/testexport.bbclass
@@ -137,7 +137,7 @@ def copy_needed_files(d, tc):
shutil.rmtree(os.path.join(subdir, dir))
# Create tar file for common parts of testexport
- create_tarball(d, "testexport.tar.gz", d.getVar("TEST_EXPORT_DIR"))
+ testexport_create_tarball(d, "testexport.tar.gz", d.getVar("TEST_EXPORT_DIR"))
# Copy packages needed for runtime testing
package_extraction(d, tc.suites)
@@ -146,7 +146,7 @@ def copy_needed_files(d, tc):
export_pkg_dir = os.path.join(d.getVar("TEST_EXPORT_DIR"), "packages")
oe.path.copytree(test_pkg_dir, export_pkg_dir)
# Create tar file for packages needed by the DUT
- create_tarball(d, "testexport_packages_%s.tar.gz" % d.getVar("MACHINE"), export_pkg_dir)
+ testexport_create_tarball(d, "testexport_packages_%s.tar.gz" % d.getVar("MACHINE"), export_pkg_dir)
# Copy SDK
if d.getVar("TEST_EXPORT_SDK_ENABLED") == "1":
@@ -159,11 +159,11 @@ def copy_needed_files(d, tc):
shutil.copy2(tarball_path, export_sdk_dir)
# Create tar file for the sdk
- create_tarball(d, "testexport_sdk_%s.tar.gz" % d.getVar("SDK_ARCH"), export_sdk_dir)
+ testexport_create_tarball(d, "testexport_sdk_%s.tar.gz" % d.getVar("SDK_ARCH"), export_sdk_dir)
bb.plain("Exported tests to: %s" % export_path)
-def create_tarball(d, tar_name, src_dir):
+def testexport_create_tarball(d, tar_name, src_dir):
import tarfile
diff --git a/meta/classes/testimage.bbclass b/meta/classes/testimage.bbclass
index ff1c53b93e..898248992c 100644
--- a/meta/classes/testimage.bbclass
+++ b/meta/classes/testimage.bbclass
@@ -3,6 +3,8 @@
# Released under the MIT license (see COPYING.MIT)
inherit metadata_scm
+inherit image-artifact-names
+
# testimage.bbclass enables testing of qemu images using python unittests.
# Most of the tests are commands run on target image over ssh.
# To use it add testimage to global inherit and call your target image with -c testimage
@@ -31,8 +33,21 @@ TESTIMAGE_AUTO ??= "0"
# TEST_LOG_DIR contains a command ssh log and may contain infromation about what command is running, output and return codes and for qemu a boot log till login.
# Booting is handled by this class, and it's not a test in itself.
# TEST_QEMUBOOT_TIMEOUT can be used to set the maximum time in seconds the launch code will wait for the login prompt.
+# TEST_OVERALL_TIMEOUT can be used to set the maximum time in seconds the tests will be allowed to run (defaults to no limit).
# TEST_QEMUPARAMS can be used to pass extra parameters to qemu, e.g. "-m 1024" for setting the amount of ram to 1 GB.
# TEST_RUNQEMUPARAMS can be used to pass extra parameters to runqemu, e.g. "gl" to enable OpenGL acceleration.
+# QEMU_USE_KVM can be set to "" to disable the use of kvm (by default it is enabled if target_arch == build_arch or both of them are x86 archs)
+
+# TESTIMAGE_BOOT_PATTERNS can be used to override certain patterns used to communicate with the target when booting,
+# if a pattern is not specifically present on this variable a default will be used when booting the target.
+# TESTIMAGE_BOOT_PATTERNS[<flag>] overrides the pattern used for that specific flag, where flag comes from a list of accepted flags
+# e.g. normally the system boots and waits for a login prompt (login:), after that it sends the command: "root\n" to log as the root user
+# if we wanted to log in as the hypothetical "webserver" user for example we could set the following:
+# TESTIMAGE_BOOT_PATTERNS = "send_login_user search_login_succeeded"
+# TESTIMAGE_BOOT_PATTERNS[send_login_user] = "webserver\n"
+# TESTIMAGE_BOOT_PATTERNS[search_login_succeeded] = "webserver@[a-zA-Z0-9\-]+:~#"
+# The accepted flags are the following: search_reached_prompt, send_login_user, search_login_succeeded, search_cmd_finished.
+# They are prefixed with either search/send, to differentiate if the pattern is meant to be sent or searched to/from the target terminal
TEST_LOG_DIR ?= "${WORKDIR}/testimage"
@@ -46,30 +61,32 @@ BASICTESTSUITE = "\
ping date df ssh scp python perl gi ptest parselogs \
logrotate connman systemd oe_syslog pam stap ldd xorg \
kernelmodule gcc buildcpio buildlzip buildgalculator \
- dnf rpm opkg apt"
+ dnf rpm opkg apt weston go rust"
DEFAULT_TEST_SUITES = "${BASICTESTSUITE}"
-# aarch64 has no graphics
-DEFAULT_TEST_SUITES_remove_aarch64 = "xorg"
# musl doesn't support systemtap
-DEFAULT_TEST_SUITES_remove_libc-musl = "stap"
+DEFAULT_TEST_SUITES:remove:libc-musl = "stap"
# qemumips is quite slow and has reached the timeout limit several times on the YP build cluster,
# mitigate this by removing build tests for qemumips machines.
MIPSREMOVE ??= "buildcpio buildlzip buildgalculator"
-DEFAULT_TEST_SUITES_remove_qemumips = "${MIPSREMOVE}"
-DEFAULT_TEST_SUITES_remove_qemumips64 = "${MIPSREMOVE}"
+DEFAULT_TEST_SUITES:remove:qemumips = "${MIPSREMOVE}"
+DEFAULT_TEST_SUITES:remove:qemumips64 = "${MIPSREMOVE}"
TEST_SUITES ?= "${DEFAULT_TEST_SUITES}"
+QEMU_USE_KVM ?= "1"
TEST_QEMUBOOT_TIMEOUT ?= "1000"
+TEST_OVERALL_TIMEOUT ?= ""
TEST_TARGET ?= "qemu"
TEST_QEMUPARAMS ?= ""
TEST_RUNQEMUPARAMS ?= ""
+TESTIMAGE_BOOT_PATTERNS ?= ""
+
TESTIMAGEDEPENDS = ""
-TESTIMAGEDEPENDS_append_qemuall = " qemu-native:do_populate_sysroot qemu-helper-native:do_populate_sysroot qemu-helper-native:do_addto_recipe_sysroot"
+TESTIMAGEDEPENDS:append:qemuall = " qemu-native:do_populate_sysroot qemu-helper-native:do_populate_sysroot qemu-helper-native:do_addto_recipe_sysroot"
TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'cpio-native:do_populate_sysroot', '', d)}"
TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'dnf-native:do_populate_sysroot', '', d)}"
TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'createrepo-c-native:do_populate_sysroot', '', d)}"
@@ -77,7 +94,7 @@ TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'ipk', 'opkg-utils-na
TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'deb', 'apt-native:do_populate_sysroot package-index:do_package_index', '', d)}"
TESTIMAGELOCK = "${TMPDIR}/testimage.lock"
-TESTIMAGELOCK_qemuall = ""
+TESTIMAGELOCK:qemuall = ""
TESTIMAGE_DUMP_DIR ?= "${LOG_DIR}/runtime-hostdump/"
@@ -110,12 +127,19 @@ testimage_dump_host () {
netstat -an
}
+testimage_dump_monitor () {
+ query-status
+ query-block
+ dump-guest-memory {"paging":false,"protocol":"file:%s.img"}
+}
+
python do_testimage() {
testimage_main(d)
}
addtask testimage
do_testimage[nostamp] = "1"
+do_testimage[network] = "1"
do_testimage[depends] += "${TESTIMAGEDEPENDS}"
do_testimage[lockfiles] += "${TESTIMAGELOCK}"
@@ -150,11 +174,35 @@ def get_testimage_json_result_dir(d):
def get_testimage_result_id(configuration):
return '%s_%s_%s_%s' % (configuration['TEST_TYPE'], configuration['IMAGE_BASENAME'], configuration['MACHINE'], configuration['STARTTIME'])
+def get_testimage_boot_patterns(d):
+ from collections import defaultdict
+ boot_patterns = defaultdict(str)
+ # Only accept certain values
+ accepted_patterns = ['search_reached_prompt', 'send_login_user', 'search_login_succeeded', 'search_cmd_finished']
+ # Not all patterns need to be overriden, e.g. perhaps we only want to change the user
+ boot_patterns_flags = d.getVarFlags('TESTIMAGE_BOOT_PATTERNS') or {}
+ if boot_patterns_flags:
+ patterns_set = [p for p in boot_patterns_flags.items() if p[0] in d.getVar('TESTIMAGE_BOOT_PATTERNS').split()]
+ for flag, flagval in patterns_set:
+ if flag not in accepted_patterns:
+ bb.fatal('Testimage: The only accepted boot patterns are: search_reached_prompt,send_login_user, \
+ search_login_succeeded,search_cmd_finished\n Make sure your TESTIMAGE_BOOT_PATTERNS=%s \
+ contains an accepted flag.' % d.getVar('TESTIMAGE_BOOT_PATTERNS'))
+ return
+ # We know boot prompt is searched through in binary format, others might be expressions
+ if flag == 'search_reached_prompt':
+ boot_patterns[flag] = flagval.encode()
+ else:
+ boot_patterns[flag] = flagval.encode().decode('unicode-escape')
+ return boot_patterns
+
+
def testimage_main(d):
import os
import json
import signal
import logging
+ import shutil
from bb.utils import export_proxies
from oeqa.core.utils.misc import updateTestData
@@ -168,7 +216,11 @@ def testimage_main(d):
"""
Catch SIGTERM from worker in order to stop qemu.
"""
- raise RuntimeError
+ os.kill(os.getpid(), signal.SIGINT)
+
+ def handle_test_timeout(timeout):
+ bb.warn("Global test timeout reached (%s seconds), stopping the tests." %(timeout))
+ os.kill(os.getpid(), signal.SIGINT)
testimage_sanity(d)
@@ -186,9 +238,10 @@ def testimage_main(d):
tdname = "%s.testdata.json" % image_name
try:
- td = json.load(open(tdname, "r"))
- except (FileNotFoundError) as err:
- bb.fatal('File %s Not Found. Have you built the image with INHERIT+="testimage" in the conf/local.conf?' % tdname)
+ with open(tdname, "r") as f:
+ td = json.load(f)
+ except FileNotFoundError as err:
+ bb.fatal('File %s not found (%s).\nHave you built the image with INHERIT += "testimage" in the conf/local.conf?' % (tdname, err))
# Some variables need to be updates (mostly paths) with the
# ones of the current environment because some tests require them.
@@ -207,10 +260,14 @@ def testimage_main(d):
if d.getVar("TEST_TARGET") == "qemu":
fstypes = [fs for fs in fstypes if fs in supported_fstypes]
if not fstypes:
- bb.fatal('Unsupported image type built. Add a comptible image to '
+ bb.fatal('Unsupported image type built. Add a compatible image to '
'IMAGE_FSTYPES. Supported types: %s' %
', '.join(supported_fstypes))
- rootfs = '%s.%s' % (image_name, fstypes[0])
+ qfstype = fstypes[0]
+ qdeffstype = d.getVar("QB_DEFAULT_FSTYPE")
+ if qdeffstype:
+ qfstype = qdeffstype
+ rootfs = '%s.%s' % (image_name, qfstype)
# Get tmpdir (not really used, just for compatibility)
tmpdir = d.getVar("TMPDIR")
@@ -235,11 +292,14 @@ def testimage_main(d):
# Get use_kvm
kvm = oe.types.qemu_use_kvm(d.getVar('QEMU_USE_KVM'), d.getVar('TARGET_ARCH'))
+ # Get OVMF
+ ovmf = d.getVar("QEMU_USE_OVMF")
+
slirp = False
if d.getVar("QEMU_USE_SLIRP"):
slirp = True
- # TODO: We use the current implementatin of qemu runner because of
+ # TODO: We use the current implementation of qemu runner because of
# time constrains, qemu runner really needs a refactor too.
target_kwargs = { 'machine' : machine,
'rootfs' : rootfs,
@@ -252,11 +312,33 @@ def testimage_main(d):
'kvm' : kvm,
'slirp' : slirp,
'dump_dir' : d.getVar("TESTIMAGE_DUMP_DIR"),
+ 'serial_ports': len(d.getVar("SERIAL_CONSOLES").split()),
+ 'ovmf' : ovmf,
+ 'tmpfsdir' : d.getVar("RUNQEMU_TMPFS_DIR"),
}
- # TODO: Currently BBPATH is needed for custom loading of targets.
- # It would be better to find these modules using instrospection.
- target_kwargs['target_modules_path'] = d.getVar('BBPATH')
+ if d.getVar("TESTIMAGE_BOOT_PATTERNS"):
+ target_kwargs['boot_patterns'] = get_testimage_boot_patterns(d)
+
+ # hardware controlled targets might need further access
+ target_kwargs['powercontrol_cmd'] = d.getVar("TEST_POWERCONTROL_CMD") or None
+ target_kwargs['powercontrol_extra_args'] = d.getVar("TEST_POWERCONTROL_EXTRA_ARGS") or ""
+ target_kwargs['serialcontrol_cmd'] = d.getVar("TEST_SERIALCONTROL_CMD") or None
+ target_kwargs['serialcontrol_extra_args'] = d.getVar("TEST_SERIALCONTROL_EXTRA_ARGS") or ""
+ target_kwargs['testimage_dump_monitor'] = d.getVar("testimage_dump_monitor") or ""
+ target_kwargs['testimage_dump_target'] = d.getVar("testimage_dump_target") or ""
+
+ def export_ssh_agent(d):
+ import os
+
+ variables = ['SSH_AGENT_PID', 'SSH_AUTH_SOCK']
+ for v in variables:
+ if v not in os.environ.keys():
+ val = d.getVar(v)
+ if val is not None:
+ os.environ[v] = val
+
+ export_ssh_agent(d)
# runtime use network for download projects for build
export_proxies(d)
@@ -292,34 +374,50 @@ def testimage_main(d):
package_extraction(d, tc.suites)
results = None
+ complete = False
orig_sigterm_handler = signal.signal(signal.SIGTERM, sigterm_exception)
try:
# We need to check if runqemu ends unexpectedly
# or if the worker send us a SIGTERM
tc.target.start(params=d.getVar("TEST_QEMUPARAMS"), runqemuparams=d.getVar("TEST_RUNQEMUPARAMS"))
+ import threading
+ try:
+ threading.Timer(int(d.getVar("TEST_OVERALL_TIMEOUT")), handle_test_timeout, (int(d.getVar("TEST_OVERALL_TIMEOUT")),)).start()
+ except ValueError:
+ pass
results = tc.runTests()
- except (RuntimeError, BlockingIOError) as err:
- if isinstance(err, RuntimeError):
- bb.error('testimage received SIGTERM, shutting down...')
+ complete = True
+ except (KeyboardInterrupt, BlockingIOError) as err:
+ if isinstance(err, KeyboardInterrupt):
+ bb.error('testimage interrupted, shutting down...')
else:
bb.error('runqemu failed, shutting down...')
if results:
results.stop()
- results = None
+ results = tc.results
finally:
signal.signal(signal.SIGTERM, orig_sigterm_handler)
tc.target.stop()
# Show results (if we have them)
- if not results:
- bb.fatal('%s - FAILED - tests were interrupted during execution' % pn, forcelog=True)
- configuration = get_testimage_configuration(d, 'runtime', machine)
- results.logDetails(get_testimage_json_result_dir(d),
- configuration,
- get_testimage_result_id(configuration))
- results.logSummary(pn)
+ if results:
+ configuration = get_testimage_configuration(d, 'runtime', machine)
+ results.logDetails(get_testimage_json_result_dir(d),
+ configuration,
+ get_testimage_result_id(configuration),
+ dump_streams=d.getVar('TESTREPORT_FULLLOGS'))
+ results.logSummary(pn)
+
+ # Copy additional logs to tmp/log/oeqa so it's easier to find them
+ targetdir = os.path.join(get_testimage_json_result_dir(d), d.getVar("PN"))
+ os.makedirs(targetdir, exist_ok=True)
+ os.symlink(bootlog, os.path.join(targetdir, os.path.basename(bootlog)))
+ os.symlink(d.getVar("BB_LOGFILE"), os.path.join(targetdir, os.path.basename(d.getVar("BB_LOGFILE") + "." + d.getVar('DATETIME'))))
+
+ if not results or not complete:
+ bb.fatal('%s - FAILED - tests were interrupted during execution, check the logs in %s' % (pn, d.getVar("LOG_DIR")), forcelog=True)
if not results.wasSuccessful():
- bb.fatal('%s - FAILED - check the task log and the ssh log' % pn, forcelog=True)
+ bb.fatal('%s - FAILED - also check the logs in %s' % (pn, d.getVar("LOG_DIR")), forcelog=True)
def get_runtime_paths(d):
"""
diff --git a/meta/classes/testsdk.bbclass b/meta/classes/testsdk.bbclass
index 758a23ac55..8b2e74f606 100644
--- a/meta/classes/testsdk.bbclass
+++ b/meta/classes/testsdk.bbclass
@@ -36,12 +36,14 @@ python do_testsdk() {
}
addtask testsdk
do_testsdk[nostamp] = "1"
+do_testsdk[network] = "1"
python do_testsdkext() {
import_and_run('TESTSDKEXT_CLASS_NAME', d)
}
addtask testsdkext
do_testsdkext[nostamp] = "1"
+do_testsdkext[network] = "1"
python () {
if oe.types.boolean(d.getVar("TESTIMAGE_AUTO") or "False"):
diff --git a/meta/classes/texinfo.bbclass b/meta/classes/texinfo.bbclass
index 6b0def0eac..68c9d4fb70 100644
--- a/meta/classes/texinfo.bbclass
+++ b/meta/classes/texinfo.bbclass
@@ -6,13 +6,13 @@
# Texinfo recipe, you can remove texinfo-native from ASSUME_PROVIDED and
# makeinfo from SANITY_REQUIRED_UTILITIES.
-TEXDEP = "texinfo-native"
-TEXDEP_class-native = "texinfo-dummy-native"
-TEXDEP_class-cross = "texinfo-dummy-native"
-DEPENDS_append = " ${TEXDEP}"
-PATH_prepend_class-native = "${STAGING_BINDIR_NATIVE}/texinfo-dummy-native:"
-PATH_prepend_class-cross = "${STAGING_BINDIR_NATIVE}/texinfo-dummy-native:"
+TEXDEP = "${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'texinfo-replacement-native', 'texinfo-dummy-native', d)}"
+TEXDEP:class-native = "texinfo-dummy-native"
+TEXDEP:class-cross = "texinfo-dummy-native"
+TEXDEP:class-crosssdk = "texinfo-dummy-native"
+TEXDEP:class-cross-canadian = "texinfo-dummy-native"
+DEPENDS:append = " ${TEXDEP}"
# libtool-cross doesn't inherit cross
-TEXDEP_pn-libtool-cross = "texinfo-dummy-native"
-PATH_prepend_pn-libtool-cross = "${STAGING_BINDIR_NATIVE}/texinfo-dummy-native:"
+TEXDEP:pn-libtool-cross = "texinfo-dummy-native"
+
diff --git a/meta/classes/tinderclient.bbclass b/meta/classes/tinderclient.bbclass
deleted file mode 100644
index 00f453cec1..0000000000
--- a/meta/classes/tinderclient.bbclass
+++ /dev/null
@@ -1,368 +0,0 @@
-def tinder_http_post(server, selector, content_type, body):
- import httplib
- # now post it
- for i in range(0,5):
- try:
- h = httplib.HTTP(server)
- h.putrequest('POST', selector)
- h.putheader('content-type', content_type)
- h.putheader('content-length', str(len(body)))
- h.endheaders()
- h.send(body)
- errcode, errmsg, headers = h.getreply()
- #print(errcode, errmsg, headers)
- return (errcode,errmsg, headers, h.file)
- except:
- print("Error sending the report!")
- # try again
- pass
-
- # return some garbage
- return (-1, "unknown", "unknown", None)
-
-def tinder_form_data(bound, dict, log):
- output = []
- # for each key in the dictionary
- for name in dict:
- assert dict[name]
- output.append( "--" + bound )
- output.append( 'Content-Disposition: form-data; name="%s"' % name )
- output.append( "" )
- output.append( dict[name] )
- if log:
- output.append( "--" + bound )
- output.append( 'Content-Disposition: form-data; name="log"; filename="log.txt"' )
- output.append( '' )
- output.append( log )
- output.append( '--' + bound + '--' )
- output.append( '' )
-
- return "\r\n".join(output)
-
-def tinder_time_string():
- """
- Return the time as GMT
- """
- return ""
-
-def tinder_format_http_post(d,status,log):
- """
- Format the Tinderbox HTTP post with the data needed
- for the tinderbox to be happy.
- """
-
- import random
-
- # the variables we will need to send on this form post
- variables = {
- "tree" : d.getVar('TINDER_TREE'),
- "machine_name" : d.getVar('TINDER_MACHINE'),
- "os" : os.uname()[0],
- "os_version" : os.uname()[2],
- "compiler" : "gcc",
- "clobber" : d.getVar('TINDER_CLOBBER') or "0",
- "srcdate" : d.getVar('SRCDATE'),
- "PN" : d.getVar('PN'),
- "PV" : d.getVar('PV'),
- "PR" : d.getVar('PR'),
- "FILE" : d.getVar('FILE') or "N/A",
- "TARGETARCH" : d.getVar('TARGET_ARCH'),
- "TARGETFPU" : d.getVar('TARGET_FPU') or "Unknown",
- "TARGETOS" : d.getVar('TARGET_OS') or "Unknown",
- "MACHINE" : d.getVar('MACHINE') or "Unknown",
- "DISTRO" : d.getVar('DISTRO') or "Unknown",
- "zecke-rocks" : "sure",
- }
-
- # optionally add the status
- if status:
- variables["status"] = str(status)
-
- # try to load the machine id
- # we only need on build_status.pl but sending it
- # always does not hurt
- try:
- f = open(d.getVar('TMPDIR')+'/tinder-machine.id', 'r')
- id = f.read()
- variables['machine_id'] = id
- except:
- pass
-
- # the boundary we will need
- boundary = "----------------------------------%d" % int(random.random()*1000000000000)
-
- # now format the body
- body = tinder_form_data( boundary, variables, log )
-
- return ("multipart/form-data; boundary=%s" % boundary),body
-
-
-def tinder_build_start(d):
- """
- Inform the tinderbox that a build is starting. We do this
- by posting our name and tree to the build_start.pl script
- on the server.
- """
-
- # get the body and type
- content_type, body = tinder_format_http_post(d,None,None)
- server = d.getVar('TINDER_HOST')
- url = d.getVar('TINDER_URL')
-
- selector = url + "/xml/build_start.pl"
-
- #print("selector %s and url %s" % (selector, url))
-
- # now post it
- errcode, errmsg, headers, h_file = tinder_http_post(server,selector,content_type, body)
- #print(errcode, errmsg, headers)
- report = h_file.read()
-
- # now let us find the machine id that was assigned to us
- search = "<machine id='"
- report = report[report.find(search)+len(search):]
- report = report[0:report.find("'")]
-
- bb.note("Machine ID assigned by tinderbox: %s" % report )
-
- # now we will need to save the machine number
- # we will override any previous numbers
- f = open(d.getVar('TMPDIR')+"/tinder-machine.id", 'w')
- f.write(report)
-
-
-def tinder_send_http(d, status, _log):
- """
- Send this log as build status
- """
-
- # get the body and type
- server = d.getVar('TINDER_HOST')
- url = d.getVar('TINDER_URL')
-
- selector = url + "/xml/build_status.pl"
-
- # now post it - in chunks of 10.000 characters
- new_log = _log
- while len(new_log) > 0:
- content_type, body = tinder_format_http_post(d,status,new_log[0:18000])
- errcode, errmsg, headers, h_file = tinder_http_post(server,selector,content_type, body)
- #print(errcode, errmsg, headers)
- #print(h.file.read())
- new_log = new_log[18000:]
-
-
-def tinder_print_info(d):
- """
- Print the TinderBox Info
- Including informations of the BaseSystem and the Tree
- we use.
- """
-
- # get the local vars
- time = tinder_time_string()
- ops = os.uname()[0]
- version = os.uname()[2]
- url = d.getVar('TINDER_URL')
- tree = d.getVar('TINDER_TREE')
- branch = d.getVar('TINDER_BRANCH')
- srcdate = d.getVar('SRCDATE')
- machine = d.getVar('MACHINE')
- distro = d.getVar('DISTRO')
- bbfiles = d.getVar('BBFILES')
- tarch = d.getVar('TARGET_ARCH')
- fpu = d.getVar('TARGET_FPU')
- oerev = d.getVar('OE_REVISION') or "unknown"
-
- # there is a bug with tipple quoted strings
- # i will work around but will fix the original
- # bug as well
- output = []
- output.append("== Tinderbox Info" )
- output.append("Time: %(time)s" )
- output.append("OS: %(ops)s" )
- output.append("%(version)s" )
- output.append("Compiler: gcc" )
- output.append("Tinderbox Client: 0.1" )
- output.append("Tinderbox Client Last Modified: yesterday" )
- output.append("Tinderbox Protocol: 0.1" )
- output.append("URL: %(url)s" )
- output.append("Tree: %(tree)s" )
- output.append("Config:" )
- output.append("branch = '%(branch)s'" )
- output.append("TARGET_ARCH = '%(tarch)s'" )
- output.append("TARGET_FPU = '%(fpu)s'" )
- output.append("SRCDATE = '%(srcdate)s'" )
- output.append("MACHINE = '%(machine)s'" )
- output.append("DISTRO = '%(distro)s'" )
- output.append("BBFILES = '%(bbfiles)s'" )
- output.append("OEREV = '%(oerev)s'" )
- output.append("== End Tinderbox Client Info" )
-
- # now create the real output
- return "\n".join(output) % vars()
-
-
-def tinder_print_env():
- """
- Print the environment variables of this build
- """
- time_start = tinder_time_string()
- time_end = tinder_time_string()
-
- # build the environment
- env = ""
- for var in os.environ:
- env += "%s=%s\n" % (var, os.environ[var])
-
- output = []
- output.append( "---> TINDERBOX RUNNING env %(time_start)s" )
- output.append( env )
- output.append( "<--- TINDERBOX FINISHED (SUCCESS) %(time_end)s" )
-
- return "\n".join(output) % vars()
-
-def tinder_tinder_start(d, event):
- """
- PRINT the configuration of this build
- """
-
- time_start = tinder_time_string()
- config = tinder_print_info(d)
- #env = tinder_print_env()
- time_end = tinder_time_string()
- packages = " ".join( event.getPkgs() )
-
- output = []
- output.append( "---> TINDERBOX PRINTING CONFIGURATION %(time_start)s" )
- output.append( config )
- #output.append( env )
- output.append( "<--- TINDERBOX FINISHED PRINTING CONFIGURATION %(time_end)s" )
- output.append( "---> TINDERBOX BUILDING '%(packages)s'" )
- output.append( "<--- TINDERBOX STARTING BUILD NOW" )
-
- output.append( "" )
-
- return "\n".join(output) % vars()
-
-def tinder_do_tinder_report(event):
- """
- Report to the tinderbox:
- On the BuildStart we will inform the box directly
- On the other events we will write to the TINDER_LOG and
- when the Task is finished we will send the report.
-
- The above is not yet fully implemented. Currently we send
- information immediately. The caching/queuing needs to be
- implemented. Also sending more or less information is not
- implemented yet.
-
- We have two temporary files stored in the TMP directory. One file
- contains the assigned machine id for the tinderclient. This id gets
- assigned when we connect the box and start the build process the second
- file is used to workaround an EventHandler limitation. If BitBake is ran
- with the continue option we want the Build to fail even if we get the
- BuildCompleted Event. In this case we have to look up the status and
- send it instead of 100/success.
- """
- import glob
-
- # variables
- name = bb.event.getName(event)
- log = ""
- status = 1
- # Check what we need to do Build* shows we start or are done
- if name == "BuildStarted":
- tinder_build_start(event.data)
- log = tinder_tinder_start(event.data,event)
-
- try:
- # truncate the tinder log file
- f = open(event.data.getVar('TINDER_LOG'), 'w')
- f.write("")
- f.close()
- except:
- pass
-
- try:
- # write a status to the file. This is needed for the -k option
- # of BitBake
- g = open(event.data.getVar('TMPDIR')+"/tinder-status", 'w')
- g.write("")
- g.close()
- except IOError:
- pass
-
- # Append the Task-Log (compile,configure...) to the log file
- # we will send to the server
- if name == "TaskSucceeded" or name == "TaskFailed":
- log_file = glob.glob("%s/log.%s.*" % (event.data.getVar('T'), event.task))
-
- if len(log_file) != 0:
- to_file = event.data.getVar('TINDER_LOG')
- log += "".join(open(log_file[0], 'r').readlines())
-
- # set the right 'HEADER'/Summary for the TinderBox
- if name == "TaskStarted":
- log += "---> TINDERBOX Task %s started\n" % event.task
- elif name == "TaskSucceeded":
- log += "<--- TINDERBOX Task %s done (SUCCESS)\n" % event.task
- elif name == "TaskFailed":
- log += "<--- TINDERBOX Task %s failed (FAILURE)\n" % event.task
- elif name == "PkgStarted":
- log += "---> TINDERBOX Package %s started\n" % event.data.getVar('PF')
- elif name == "PkgSucceeded":
- log += "<--- TINDERBOX Package %s done (SUCCESS)\n" % event.data.getVar('PF')
- elif name == "PkgFailed":
- if not event.data.getVar('TINDER_AUTOBUILD') == "0":
- build.exec_task('do_clean', event.data)
- log += "<--- TINDERBOX Package %s failed (FAILURE)\n" % event.data.getVar('PF')
- status = 200
- # remember the failure for the -k case
- h = open(event.data.getVar('TMPDIR')+"/tinder-status", 'w')
- h.write("200")
- elif name == "BuildCompleted":
- log += "Build Completed\n"
- status = 100
- # Check if we have a old status...
- try:
- h = open(event.data.getVar('TMPDIR')+'/tinder-status', 'r')
- status = int(h.read())
- except:
- pass
-
- elif name == "MultipleProviders":
- log += "---> TINDERBOX Multiple Providers\n"
- log += "multiple providers are available (%s);\n" % ", ".join(event.getCandidates())
- log += "consider defining PREFERRED_PROVIDER_%s\n" % event.getItem()
- log += "is runtime: %d\n" % event.isRuntime()
- log += "<--- TINDERBOX Multiple Providers\n"
- elif name == "NoProvider":
- log += "Error: No Provider for: %s\n" % event.getItem()
- log += "Error:Was Runtime: %d\n" % event.isRuntime()
- status = 200
- # remember the failure for the -k case
- h = open(event.data.getVar('TMPDIR')+"/tinder-status", 'w')
- h.write("200")
-
- # now post the log
- if len(log) == 0:
- return
-
- # for now we will use the http post method as it is the only one
- log_post_method = tinder_send_http
- log_post_method(event.data, status, log)
-
-
-# we want to be an event handler
-addhandler tinderclient_eventhandler
-python tinderclient_eventhandler() {
- if e.data is None or bb.event.getName(e) == "MsgNote":
- return
-
- do_tinder_report = e.data.getVar('TINDER_REPORT')
- if do_tinder_report and do_tinder_report == "1":
- tinder_do_tinder_report(e)
-
- return
-}
diff --git a/meta/classes/toaster.bbclass b/meta/classes/toaster.bbclass
index 6cef0b8f6e..dd5c7f224b 100644
--- a/meta/classes/toaster.bbclass
+++ b/meta/classes/toaster.bbclass
@@ -106,14 +106,14 @@ def _toaster_load_pkgdatafile(dirpath, filepath):
pkgdata['OPKGN'] = m.group(1)
kn = "_".join([x for x in kn.split("_") if x.isupper()])
pkgdata[kn] = kv.strip()
- if kn == 'FILES_INFO':
+ if kn.startswith('FILES_INFO'):
pkgdata[kn] = json.loads(kv)
except ValueError:
pass # ignore lines without valid key: value pairs
return pkgdata
-python toaster_package_dumpdata() {
+def _toaster_dumpdata(pkgdatadir, d):
"""
Dumps the data about the packages created by a recipe
"""
@@ -122,16 +122,24 @@ python toaster_package_dumpdata() {
if not d.getVar('PACKAGES'):
return
- pkgdatadir = d.getVar('PKGDESTWORK')
lpkgdata = {}
datadir = os.path.join(pkgdatadir, 'runtime')
# scan and send data for each generated package
- for datafile in os.listdir(datadir):
- if not datafile.endswith('.packaged'):
- lpkgdata = _toaster_load_pkgdatafile(datadir, datafile)
- # Fire an event containing the pkg data
- bb.event.fire(bb.event.MetadataEvent("SinglePackageInfo", lpkgdata), d)
+ if os.path.exists(datadir):
+ for datafile in os.listdir(datadir):
+ if not datafile.endswith('.packaged'):
+ lpkgdata = _toaster_load_pkgdatafile(datadir, datafile)
+ # Fire an event containing the pkg data
+ bb.event.fire(bb.event.MetadataEvent("SinglePackageInfo", lpkgdata), d)
+
+python toaster_package_dumpdata() {
+ _toaster_dumpdata(d.getVar('PKGDESTWORK'), d)
+}
+
+python toaster_packagedata_dumpdata() {
+ # This path needs to match do_packagedata[sstate-inputdirs]
+ _toaster_dumpdata(os.path.join(d.getVar('WORKDIR'), 'pkgdata-pdata-input'), d)
}
# 2. Dump output image files information
@@ -366,8 +374,8 @@ toaster_buildhistory_dump[eventmask] = "bb.event.BuildCompleted"
addhandler toaster_artifacts
toaster_artifacts[eventmask] = "bb.runqueue.runQueueTaskSkipped bb.runqueue.runQueueTaskCompleted"
-do_packagedata_setscene[postfuncs] += "toaster_package_dumpdata "
-do_packagedata_setscene[vardepsexclude] += "toaster_package_dumpdata "
+do_packagedata_setscene[postfuncs] += "toaster_packagedata_dumpdata "
+do_packagedata_setscene[vardepsexclude] += "toaster_packagedata_dumpdata "
do_package[postfuncs] += "toaster_package_dumpdata "
do_package[vardepsexclude] += "toaster_package_dumpdata "
diff --git a/meta/classes/toolchain-scripts.bbclass b/meta/classes/toolchain-scripts.bbclass
index 1a2ec4f3b2..8f914cce27 100644
--- a/meta/classes/toolchain-scripts.bbclass
+++ b/meta/classes/toolchain-scripts.bbclass
@@ -3,11 +3,13 @@ inherit toolchain-scripts-base siteinfo kernel-arch
# We want to be able to change the value of MULTIMACH_TARGET_SYS, because it
# doesn't always match our expectations... but we default to the stock value
REAL_MULTIMACH_TARGET_SYS ?= "${MULTIMACH_TARGET_SYS}"
-TARGET_CC_ARCH_append_libc-musl = " -mmusl"
+TARGET_CC_ARCH:append:libc-musl = " -mmusl"
# default debug prefix map isn't valid in the SDK
DEBUG_PREFIX_MAP = ""
+EXPORT_SDK_PS1 = "${@ 'export PS1=\\"%s\\"' % d.getVar('SDK_PS1') if d.getVar('SDK_PS1') else ''}"
+
# This function creates an environment-setup-script for use in a deployable SDK
toolchain_create_sdk_env_script () {
# Create environment setup script. Remember that $SDKTARGETSYSROOT should
@@ -39,6 +41,7 @@ toolchain_create_sdk_env_script () {
echo ' return 1' >> $script
echo 'fi' >> $script
+ echo "${EXPORT_SDK_PS1}" >> $script
echo 'export SDKTARGETSYSROOT='"$sysroot" >> $script
EXTRAPATH=""
for i in ${CANADIANEXTRAOS}; do
@@ -62,6 +65,7 @@ toolchain_create_sdk_env_script () {
# This function creates an environment-setup-script in the TMPDIR which enables
# a OE-core IDE to integrate with the build tree
+# Caller must ensure CONFIG_SITE is setup
toolchain_create_tree_env_script () {
script=${TMPDIR}/environment-setup-${REAL_MULTIMACH_TARGET_SYS}
rm -f $script
@@ -70,7 +74,7 @@ toolchain_create_tree_env_script () {
echo 'export PATH=${STAGING_DIR_NATIVE}/usr/bin:${STAGING_BINDIR_TOOLCHAIN}:$PATH' >> $script
echo 'export PKG_CONFIG_SYSROOT_DIR=${PKG_CONFIG_SYSROOT_DIR}' >> $script
echo 'export PKG_CONFIG_PATH=${PKG_CONFIG_PATH}' >> $script
- echo 'export CONFIG_SITE="${@siteinfo_get_files(d)}"' >> $script
+ echo 'export CONFIG_SITE="${CONFIG_SITE}"' >> $script
echo 'export SDKTARGETSYSROOT=${STAGING_DIR_TARGET}' >> $script
echo 'export OECORE_NATIVE_SYSROOT="${STAGING_DIR_NATIVE}"' >> $script
echo 'export OECORE_TARGET_SYSROOT="${STAGING_DIR_TARGET}"' >> $script
@@ -90,6 +94,7 @@ toolchain_shared_env_script () {
echo 'export RANLIB=${TARGET_PREFIX}ranlib' >> $script
echo 'export OBJCOPY=${TARGET_PREFIX}objcopy' >> $script
echo 'export OBJDUMP=${TARGET_PREFIX}objdump' >> $script
+ echo 'export READELF=${TARGET_PREFIX}readelf' >> $script
echo 'export AR=${TARGET_PREFIX}ar' >> $script
echo 'export NM=${TARGET_PREFIX}nm' >> $script
echo 'export M4=m4' >> $script
@@ -157,7 +162,7 @@ EOF
}
#we get the cached site config in the runtime
-TOOLCHAIN_CONFIGSITE_NOCACHE = "${@siteinfo_get_files(d)}"
+TOOLCHAIN_CONFIGSITE_NOCACHE = "${@' '.join(siteinfo_get_files(d)[0])}"
TOOLCHAIN_CONFIGSITE_SYSROOTCACHE = "${STAGING_DIR}/${MLPREFIX}${MACHINE}/${target_datadir}/${TARGET_SYS}_config_site.d"
TOOLCHAIN_NEED_CONFIGSITE_CACHE ??= "virtual/${MLPREFIX}libc ncurses"
DEPENDS += "${TOOLCHAIN_NEED_CONFIGSITE_CACHE}"
diff --git a/meta/classes/uboot-config.bbclass b/meta/classes/uboot-config.bbclass
index 89ff970fcc..b9ad35821a 100644
--- a/meta/classes/uboot-config.bbclass
+++ b/meta/classes/uboot-config.bbclass
@@ -11,7 +11,79 @@
#
# Copyright 2013, 2014 (C) O.S. Systems Software LTDA.
+def removesuffix(s, suffix):
+ if suffix and s.endswith(suffix):
+ return s[:-len(suffix)]
+ return s
+
+# Some versions of u-boot use .bin and others use .img. By default use .bin
+# but enable individual recipes to change this value.
+UBOOT_SUFFIX ??= "bin"
UBOOT_BINARY ?= "u-boot.${UBOOT_SUFFIX}"
+UBOOT_BINARYNAME ?= "${@os.path.splitext(d.getVar("UBOOT_BINARY"))[0]}"
+UBOOT_IMAGE ?= "${UBOOT_BINARYNAME}-${MACHINE}-${PV}-${PR}.${UBOOT_SUFFIX}"
+UBOOT_SYMLINK ?= "${UBOOT_BINARYNAME}-${MACHINE}.${UBOOT_SUFFIX}"
+UBOOT_MAKE_TARGET ?= "all"
+
+# Output the ELF generated. Some platforms can use the ELF file and directly
+# load it (JTAG booting, QEMU) additionally the ELF can be used for debugging
+# purposes.
+UBOOT_ELF ?= ""
+UBOOT_ELF_SUFFIX ?= "elf"
+UBOOT_ELF_IMAGE ?= "u-boot-${MACHINE}-${PV}-${PR}.${UBOOT_ELF_SUFFIX}"
+UBOOT_ELF_BINARY ?= "u-boot.${UBOOT_ELF_SUFFIX}"
+UBOOT_ELF_SYMLINK ?= "u-boot-${MACHINE}.${UBOOT_ELF_SUFFIX}"
+
+# Some versions of u-boot build an SPL (Second Program Loader) image that
+# should be packaged along with the u-boot binary as well as placed in the
+# deploy directory. For those versions they can set the following variables
+# to allow packaging the SPL.
+SPL_SUFFIX ?= ""
+SPL_BINARY ?= ""
+SPL_DELIMITER ?= "${@'.' if d.getVar("SPL_SUFFIX") else ''}"
+SPL_BINARYFILE ?= "${@os.path.basename(d.getVar("SPL_BINARY"))}"
+SPL_BINARYNAME ?= "${@removesuffix(d.getVar("SPL_BINARYFILE"), "." + d.getVar("SPL_SUFFIX"))}"
+SPL_IMAGE ?= "${SPL_BINARYNAME}-${MACHINE}-${PV}-${PR}${SPL_DELIMITER}${SPL_SUFFIX}"
+SPL_SYMLINK ?= "${SPL_BINARYNAME}-${MACHINE}${SPL_DELIMITER}${SPL_SUFFIX}"
+
+# Additional environment variables or a script can be installed alongside
+# u-boot to be used automatically on boot. This file, typically 'uEnv.txt'
+# or 'boot.scr', should be packaged along with u-boot as well as placed in the
+# deploy directory. Machine configurations needing one of these files should
+# include it in the SRC_URI and set the UBOOT_ENV parameter.
+UBOOT_ENV_SUFFIX ?= "txt"
+UBOOT_ENV ?= ""
+UBOOT_ENV_SRC_SUFFIX ?= "cmd"
+UBOOT_ENV_SRC ?= "${UBOOT_ENV}.${UBOOT_ENV_SRC_SUFFIX}"
+UBOOT_ENV_BINARY ?= "${UBOOT_ENV}.${UBOOT_ENV_SUFFIX}"
+UBOOT_ENV_IMAGE ?= "${UBOOT_ENV}-${MACHINE}-${PV}-${PR}.${UBOOT_ENV_SUFFIX}"
+UBOOT_ENV_SYMLINK ?= "${UBOOT_ENV}-${MACHINE}.${UBOOT_ENV_SUFFIX}"
+
+# Default name of u-boot initial env, but enable individual recipes to change
+# this value.
+UBOOT_INITIAL_ENV ?= "${PN}-initial-env"
+
+# U-Boot EXTLINUX variables. U-Boot searches for /boot/extlinux/extlinux.conf
+# to find EXTLINUX conf file.
+UBOOT_EXTLINUX_INSTALL_DIR ?= "/boot/extlinux"
+UBOOT_EXTLINUX_CONF_NAME ?= "extlinux.conf"
+UBOOT_EXTLINUX_SYMLINK ?= "${UBOOT_EXTLINUX_CONF_NAME}-${MACHINE}-${PR}"
+
+# Options for the device tree compiler passed to mkimage '-D' feature:
+UBOOT_MKIMAGE_DTCOPTS ??= ""
+SPL_MKIMAGE_DTCOPTS ??= ""
+
+# mkimage command
+UBOOT_MKIMAGE ?= "uboot-mkimage"
+UBOOT_MKIMAGE_SIGN ?= "${UBOOT_MKIMAGE}"
+
+# Arguments passed to mkimage for signing
+UBOOT_MKIMAGE_SIGN_ARGS ?= ""
+SPL_MKIMAGE_SIGN_ARGS ?= ""
+
+# Options to deploy the u-boot device tree
+UBOOT_DTB ?= ""
+UBOOT_DTB_BINARY ??= ""
python () {
ubootmachine = d.getVar("UBOOT_MACHINE")
diff --git a/meta/classes/uboot-extlinux-config.bbclass b/meta/classes/uboot-extlinux-config.bbclass
index c65c421b60..dcebe7ff31 100644
--- a/meta/classes/uboot-extlinux-config.bbclass
+++ b/meta/classes/uboot-extlinux-config.bbclass
@@ -64,7 +64,7 @@ UBOOT_EXTLINUX_FDT ??= ""
UBOOT_EXTLINUX_FDTDIR ??= "../"
UBOOT_EXTLINUX_KERNEL_IMAGE ??= "../${KERNEL_IMAGETYPE}"
UBOOT_EXTLINUX_KERNEL_ARGS ??= "rootwait rw"
-UBOOT_EXTLINUX_MENU_DESCRIPTION_linux ??= "${DISTRO_NAME}"
+UBOOT_EXTLINUX_MENU_DESCRIPTION:linux ??= "${DISTRO_NAME}"
UBOOT_EXTLINUX_CONFIG = "${B}/extlinux.conf"
@@ -104,13 +104,16 @@ python do_create_extlinux_config() {
if default:
cfgfile.write('DEFAULT %s\n' % (default))
- for label in labels.split():
+ # Need to deconflict the labels with existing overrides
+ label_overrides = labels.split()
+ default_overrides = localdata.getVar('OVERRIDES').split(':')
+ # We're keeping all the existing overrides that aren't used as a label
+ # an override for that label will be added back in while we're processing that label
+ keep_overrides = list(filter(lambda x: x not in label_overrides, default_overrides))
- overrides = localdata.getVar('OVERRIDES')
- if not overrides:
- bb.fatal('OVERRIDES not defined')
+ for label in labels.split():
- localdata.setVar('OVERRIDES', label + ':' + overrides)
+ localdata.setVar('OVERRIDES', ':'.join(keep_overrides + [label]))
extlinux_console = localdata.getVar('UBOOT_EXTLINUX_CONSOLE')
@@ -148,5 +151,8 @@ python do_create_extlinux_config() {
except OSError:
bb.fatal('Unable to open %s' % (cfile))
}
+UBOOT_EXTLINUX_VARS = "CONSOLE MENU_DESCRIPTION ROOT KERNEL_IMAGE FDTDIR FDT KERNEL_ARGS INITRD"
+do_create_extlinux_config[vardeps] += "${@' '.join(['UBOOT_EXTLINUX_%s_%s' % (v, l) for v in d.getVar('UBOOT_EXTLINUX_VARS').split() for l in d.getVar('UBOOT_EXTLINUX_LABELS').split()])}"
+do_create_extlinux_config[vardepsexclude] += "OVERRIDES"
addtask create_extlinux_config before do_install do_deploy after do_compile
diff --git a/meta/classes/uboot-sign.bbclass b/meta/classes/uboot-sign.bbclass
index 9e3d1d6fc1..8d136e9405 100644
--- a/meta/classes/uboot-sign.bbclass
+++ b/meta/classes/uboot-sign.bbclass
@@ -19,7 +19,7 @@
# The tasks sequence is set as below, using DEPLOY_IMAGE_DIR as common place to
# treat the device tree blob:
#
-# * u-boot:do_install_append
+# * u-boot:do_install:append
# Install UBOOT_DTB_BINARY to datadir, so that kernel can use it for
# signing, and kernel will deploy UBOOT_DTB_BINARY after signs it.
#
@@ -31,76 +31,464 @@
#
# For more details on signature process, please refer to U-Boot documentation.
-# Signature activation.
+# We need some variables from u-boot-config
+inherit uboot-config
+
+# Enable use of a U-Boot fitImage
+UBOOT_FITIMAGE_ENABLE ?= "0"
+
+# Signature activation - these require their respective fitImages
UBOOT_SIGN_ENABLE ?= "0"
+SPL_SIGN_ENABLE ?= "0"
# Default value for deployment filenames.
UBOOT_DTB_IMAGE ?= "u-boot-${MACHINE}-${PV}-${PR}.dtb"
UBOOT_DTB_BINARY ?= "u-boot.dtb"
UBOOT_DTB_SYMLINK ?= "u-boot-${MACHINE}.dtb"
-UBOOT_NODTB_IMAGE ?= "u-boot-nodtb-${MACHINE}-${PV}-${PR}.${UBOOT_SUFFIX}"
-UBOOT_NODTB_BINARY ?= "u-boot-nodtb.${UBOOT_SUFFIX}"
-UBOOT_NODTB_SYMLINK ?= "u-boot-nodtb-${MACHINE}.${UBOOT_SUFFIX}"
+UBOOT_NODTB_IMAGE ?= "u-boot-nodtb-${MACHINE}-${PV}-${PR}.bin"
+UBOOT_NODTB_BINARY ?= "u-boot-nodtb.bin"
+UBOOT_NODTB_SYMLINK ?= "u-boot-nodtb-${MACHINE}.bin"
+UBOOT_ITS_IMAGE ?= "u-boot-its-${MACHINE}-${PV}-${PR}"
+UBOOT_ITS ?= "u-boot.its"
+UBOOT_ITS_SYMLINK ?= "u-boot-its-${MACHINE}"
+UBOOT_FITIMAGE_IMAGE ?= "u-boot-fitImage-${MACHINE}-${PV}-${PR}"
+UBOOT_FITIMAGE_BINARY ?= "u-boot-fitImage"
+UBOOT_FITIMAGE_SYMLINK ?= "u-boot-fitImage-${MACHINE}"
+SPL_DIR ?= "spl"
+SPL_DTB_IMAGE ?= "u-boot-spl-${MACHINE}-${PV}-${PR}.dtb"
+SPL_DTB_BINARY ?= "u-boot-spl.dtb"
+SPL_DTB_SYMLINK ?= "u-boot-spl-${MACHINE}.dtb"
+SPL_NODTB_IMAGE ?= "u-boot-spl-nodtb-${MACHINE}-${PV}-${PR}.bin"
+SPL_NODTB_BINARY ?= "u-boot-spl-nodtb.bin"
+SPL_NODTB_SYMLINK ?= "u-boot-spl-nodtb-${MACHINE}.bin"
+
+# U-Boot fitImage description
+UBOOT_FIT_DESC ?= "U-Boot fitImage for ${DISTRO_NAME}/${PV}/${MACHINE}"
+
+# Kernel / U-Boot fitImage Hash Algo
+FIT_HASH_ALG ?= "sha256"
+UBOOT_FIT_HASH_ALG ?= "sha256"
+
+# Kernel / U-Boot fitImage Signature Algo
+FIT_SIGN_ALG ?= "rsa2048"
+UBOOT_FIT_SIGN_ALG ?= "rsa2048"
+
+# Generate keys for signing Kernel / U-Boot fitImage
+FIT_GENERATE_KEYS ?= "0"
+UBOOT_FIT_GENERATE_KEYS ?= "0"
+
+# Size of private keys in number of bits
+FIT_SIGN_NUMBITS ?= "2048"
+UBOOT_FIT_SIGN_NUMBITS ?= "2048"
-# Functions in this bbclass is for u-boot only
+# args to openssl genrsa (Default is just the public exponent)
+FIT_KEY_GENRSA_ARGS ?= "-F4"
+UBOOT_FIT_KEY_GENRSA_ARGS ?= "-F4"
+
+# args to openssl req (Default is -batch for non interactive mode and
+# -new for new certificate)
+FIT_KEY_REQ_ARGS ?= "-batch -new"
+UBOOT_FIT_KEY_REQ_ARGS ?= "-batch -new"
+
+# Standard format for public key certificate
+FIT_KEY_SIGN_PKCS ?= "-x509"
+UBOOT_FIT_KEY_SIGN_PKCS ?= "-x509"
+
+# Functions on this bbclass can apply to either U-boot or Kernel,
+# depending on the scenario
UBOOT_PN = "${@d.getVar('PREFERRED_PROVIDER_u-boot') or 'u-boot'}"
+KERNEL_PN = "${@d.getVar('PREFERRED_PROVIDER_virtual/kernel')}"
-concat_dtb() {
- if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${PN}" = "${UBOOT_PN}" ]; then
- mkdir -p ${DEPLOYDIR}
- if [ -e ${B}/${UBOOT_DTB_BINARY} ]; then
- ln -sf ${UBOOT_DTB_IMAGE} ${DEPLOYDIR}/${UBOOT_DTB_BINARY}
- ln -sf ${UBOOT_DTB_IMAGE} ${DEPLOYDIR}/${UBOOT_DTB_SYMLINK}
- fi
+# We need u-boot-tools-native if we're creating a U-Boot fitImage
+python() {
+ if d.getVar('UBOOT_FITIMAGE_ENABLE') == '1':
+ depends = d.getVar("DEPENDS")
+ depends = "%s u-boot-tools-native dtc-native" % depends
+ d.setVar("DEPENDS", depends)
+}
- if [ -f ${B}/${UBOOT_NODTB_BINARY} ]; then
- install ${B}/${UBOOT_NODTB_BINARY} ${DEPLOYDIR}/${UBOOT_NODTB_IMAGE}
- ln -sf ${UBOOT_NODTB_IMAGE} ${UBOOT_NODTB_SYMLINK}
- ln -sf ${UBOOT_NODTB_IMAGE} ${UBOOT_NODTB_BINARY}
- fi
+concat_dtb_helper() {
+ if [ -e "${UBOOT_DTB_BINARY}" ]; then
+ ln -sf ${UBOOT_DTB_IMAGE} ${DEPLOYDIR}/${UBOOT_DTB_BINARY}
+ ln -sf ${UBOOT_DTB_IMAGE} ${DEPLOYDIR}/${UBOOT_DTB_SYMLINK}
+ fi
- # Concatenate U-Boot w/o DTB & DTB with public key
- # (cf. kernel-fitimage.bbclass for more details)
+ if [ -f "${UBOOT_NODTB_BINARY}" ]; then
+ install ${UBOOT_NODTB_BINARY} ${DEPLOYDIR}/${UBOOT_NODTB_IMAGE}
+ ln -sf ${UBOOT_NODTB_IMAGE} ${DEPLOYDIR}/${UBOOT_NODTB_SYMLINK}
+ ln -sf ${UBOOT_NODTB_IMAGE} ${DEPLOYDIR}/${UBOOT_NODTB_BINARY}
+ fi
+
+ # If we're not using a signed u-boot fit, concatenate SPL w/o DTB & U-Boot DTB
+ # with public key (otherwise it will be deployed by the equivalent
+ # concat_spl_dtb_helper function - cf. kernel-fitimage.bbclass for more details)
+ if [ "${SPL_SIGN_ENABLE}" != "1" ] ; then
deployed_uboot_dtb_binary='${DEPLOY_DIR_IMAGE}/${UBOOT_DTB_IMAGE}'
if [ "x${UBOOT_SUFFIX}" = "ximg" -o "x${UBOOT_SUFFIX}" = "xrom" ] && \
[ -e "$deployed_uboot_dtb_binary" ]; then
- cd ${B}
oe_runmake EXT_DTB=$deployed_uboot_dtb_binary
- install ${B}/${UBOOT_BINARY} ${DEPLOYDIR}/${UBOOT_IMAGE}
+ install ${UBOOT_BINARY} ${DEPLOYDIR}/${UBOOT_IMAGE}
elif [ -e "${DEPLOYDIR}/${UBOOT_NODTB_IMAGE}" -a -e "$deployed_uboot_dtb_binary" ]; then
cd ${DEPLOYDIR}
- cat ${UBOOT_NODTB_IMAGE} $deployed_uboot_dtb_binary | tee ${B}/${UBOOT_BINARY} > ${UBOOT_IMAGE}
- elif [ -n "${UBOOT_DTB_BINARY}" ]; then
+ cat ${UBOOT_NODTB_IMAGE} $deployed_uboot_dtb_binary | tee ${B}/${CONFIG_B_PATH}/${UBOOT_BINARY} > ${UBOOT_IMAGE}
+
+ if [ -n "${UBOOT_CONFIG}" ]
+ then
+ for config in ${UBOOT_MACHINE}; do
+ i=$(expr $i + 1);
+ for type in ${UBOOT_CONFIG}; do
+ j=$(expr $j + 1);
+ if [ $j -eq $i ]
+ then
+ cp ${UBOOT_IMAGE} ${B}/${CONFIG_B_PATH}/u-boot-$type.${UBOOT_SUFFIX}
+ fi
+ done
+ done
+ fi
+ else
bbwarn "Failure while adding public key to u-boot binary. Verified boot won't be available."
fi
fi
}
+concat_spl_dtb_helper() {
+
+ # We only deploy symlinks to the u-boot-spl.dtb,as the KERNEL_PN will
+ # be responsible for deploying the real file
+ if [ -e "${SPL_DIR}/${SPL_DTB_BINARY}" ] ; then
+ ln -sf ${SPL_DTB_IMAGE} ${DEPLOYDIR}/${SPL_DTB_SYMLINK}
+ ln -sf ${SPL_DTB_IMAGE} ${DEPLOYDIR}/${SPL_DTB_BINARY}
+ fi
+
+ # Concatenate the SPL nodtb binary and u-boot.dtb
+ deployed_spl_dtb_binary='${DEPLOY_DIR_IMAGE}/${SPL_DTB_IMAGE}'
+ if [ -e "${DEPLOYDIR}/${SPL_NODTB_IMAGE}" -a -e "$deployed_spl_dtb_binary" ] ; then
+ cd ${DEPLOYDIR}
+ cat ${SPL_NODTB_IMAGE} $deployed_spl_dtb_binary | tee ${B}/${CONFIG_B_PATH}/${SPL_BINARY} > ${SPL_IMAGE}
+ else
+ bbwarn "Failure while adding public key to spl binary. Verified U-Boot boot won't be available."
+ fi
+}
+
+
+concat_dtb() {
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${PN}" = "${UBOOT_PN}" -a -n "${UBOOT_DTB_BINARY}" ]; then
+ mkdir -p ${DEPLOYDIR}
+ if [ -n "${UBOOT_CONFIG}" ]; then
+ for config in ${UBOOT_MACHINE}; do
+ CONFIG_B_PATH="$config"
+ cd ${B}/$config
+ concat_dtb_helper
+ done
+ else
+ CONFIG_B_PATH=""
+ cd ${B}
+ concat_dtb_helper
+ fi
+ fi
+}
+
+concat_spl_dtb() {
+ if [ "${SPL_SIGN_ENABLE}" = "1" -a "${PN}" = "${UBOOT_PN}" -a -n "${SPL_DTB_BINARY}" ]; then
+ mkdir -p ${DEPLOYDIR}
+ if [ -n "${UBOOT_CONFIG}" ]; then
+ for config in ${UBOOT_MACHINE}; do
+ CONFIG_B_PATH="$config"
+ cd ${B}/$config
+ concat_spl_dtb_helper
+ done
+ else
+ CONFIG_B_PATH=""
+ cd ${B}
+ concat_spl_dtb_helper
+ fi
+ fi
+}
+
+
# Install UBOOT_DTB_BINARY to datadir, so that kernel can use it for
# signing, and kernel will deploy UBOOT_DTB_BINARY after signs it.
-do_install_append() {
- if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${PN}" = "${UBOOT_PN}" ]; then
- if [ -f ${B}/${UBOOT_DTB_BINARY} ]; then
- install -d ${D}${datadir}
- # UBOOT_DTB_BINARY is a symlink to UBOOT_DTB_IMAGE, so we
- # need both of them.
- install ${B}/${UBOOT_DTB_BINARY} ${D}${datadir}/${UBOOT_DTB_IMAGE}
- ln -sf ${UBOOT_DTB_IMAGE} ${D}${datadir}/${UBOOT_DTB_BINARY}
- elif [ -n "${UBOOT_DTB_BINARY}" ]; then
- bbwarn "${B}/${UBOOT_DTB_BINARY} not found"
+install_helper() {
+ if [ -f "${UBOOT_DTB_BINARY}" ]; then
+ # UBOOT_DTB_BINARY is a symlink to UBOOT_DTB_IMAGE, so we
+ # need both of them.
+ install -Dm 0644 ${UBOOT_DTB_BINARY} ${D}${datadir}/${UBOOT_DTB_IMAGE}
+ ln -sf ${UBOOT_DTB_IMAGE} ${D}${datadir}/${UBOOT_DTB_BINARY}
+ else
+ bbwarn "${UBOOT_DTB_BINARY} not found"
+ fi
+}
+
+# Install SPL dtb and u-boot nodtb to datadir,
+install_spl_helper() {
+ if [ -f "${SPL_DIR}/${SPL_DTB_BINARY}" ]; then
+ install -Dm 0644 ${SPL_DIR}/${SPL_DTB_BINARY} ${D}${datadir}/${SPL_DTB_IMAGE}
+ ln -sf ${SPL_DTB_IMAGE} ${D}${datadir}/${SPL_DTB_BINARY}
+ else
+ bbwarn "${SPL_DTB_BINARY} not found"
+ fi
+ if [ -f "${UBOOT_NODTB_BINARY}" ] ; then
+ install -Dm 0644 ${UBOOT_NODTB_BINARY} ${D}${datadir}/${UBOOT_NODTB_IMAGE}
+ ln -sf ${UBOOT_NODTB_IMAGE} ${D}${datadir}/${UBOOT_NODTB_BINARY}
+ else
+ bbwarn "${UBOOT_NODTB_BINARY} not found"
+ fi
+
+ # We need to install a 'stub' u-boot-fitimage + its to datadir,
+ # so that the KERNEL_PN can use the correct filename when
+ # assembling and deploying them
+ touch ${D}/${datadir}/${UBOOT_FITIMAGE_IMAGE}
+ touch ${D}/${datadir}/${UBOOT_ITS_IMAGE}
+}
+
+do_install:append() {
+ if [ "${PN}" = "${UBOOT_PN}" ]; then
+ if [ -n "${UBOOT_CONFIG}" ]; then
+ for config in ${UBOOT_MACHINE}; do
+ cd ${B}/$config
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" -o "${UBOOT_FITIMAGE_ENABLE}" = "1" ] && \
+ [ -n "${UBOOT_DTB_BINARY}" ]; then
+ install_helper
+ fi
+ if [ "${SPL_SIGN_ENABLE}" = "1" -a -n "${SPL_DTB_BINARY}" ]; then
+ install_spl_helper
+ fi
+ done
+ else
+ cd ${B}
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" -o "${UBOOT_FITIMAGE_ENABLE}" = "1" ] && \
+ [ -n "${UBOOT_DTB_BINARY}" ]; then
+ install_helper
+ fi
+ if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" -a -n "${SPL_DTB_BINARY}" ]; then
+ install_spl_helper
+ fi
fi
fi
}
+do_uboot_generate_rsa_keys() {
+ if [ "${SPL_SIGN_ENABLE}" = "0" ] && [ "${UBOOT_FIT_GENERATE_KEYS}" = "1" ]; then
+ bbwarn "UBOOT_FIT_GENERATE_KEYS is set to 1 eventhough SPL_SIGN_ENABLE is set to 0. The keys will not be generated as they won't be used."
+ fi
+
+ if [ "${SPL_SIGN_ENABLE}" = "1" ] && [ "${UBOOT_FIT_GENERATE_KEYS}" = "1" ]; then
+
+ # Generate keys only if they don't already exist
+ if [ ! -f "${SPL_SIGN_KEYDIR}/${SPL_SIGN_KEYNAME}".key ] || \
+ [ ! -f "${SPL_SIGN_KEYDIR}/${SPL_SIGN_KEYNAME}".crt ]; then
+
+ # make directory if it does not already exist
+ mkdir -p "${SPL_SIGN_KEYDIR}"
+
+ echo "Generating RSA private key for signing U-Boot fitImage"
+ openssl genrsa ${UBOOT_FIT_KEY_GENRSA_ARGS} -out \
+ "${SPL_SIGN_KEYDIR}/${SPL_SIGN_KEYNAME}".key \
+ "${UBOOT_FIT_SIGN_NUMBITS}"
+
+ echo "Generating certificate for signing U-Boot fitImage"
+ openssl req ${FIT_KEY_REQ_ARGS} "${UBOOT_FIT_KEY_SIGN_PKCS}" \
+ -key "${SPL_SIGN_KEYDIR}/${SPL_SIGN_KEYNAME}".key \
+ -out "${SPL_SIGN_KEYDIR}/${SPL_SIGN_KEYNAME}".crt
+ fi
+ fi
+
+}
+
+addtask uboot_generate_rsa_keys before do_uboot_assemble_fitimage after do_compile
+
+# Create a ITS file for the U-boot FIT, for use when
+# we want to sign it so that the SPL can verify it
+uboot_fitimage_assemble() {
+ uboot_its="$1"
+ uboot_nodtb_bin="$2"
+ uboot_dtb="$3"
+ uboot_bin="$4"
+ spl_dtb="$5"
+ uboot_csum="${UBOOT_FIT_HASH_ALG}"
+ uboot_sign_algo="${UBOOT_FIT_SIGN_ALG}"
+ uboot_sign_keyname="${SPL_SIGN_KEYNAME}"
+
+ rm -f $uboot_its $uboot_bin
+
+ # First we create the ITS script
+ cat << EOF >> $uboot_its
+/dts-v1/;
+
+/ {
+ description = "${UBOOT_FIT_DESC}";
+ #address-cells = <1>;
+
+ images {
+ uboot {
+ description = "U-Boot image";
+ data = /incbin/("$uboot_nodtb_bin");
+ type = "standalone";
+ os = "u-boot";
+ arch = "${UBOOT_ARCH}";
+ compression = "none";
+ load = <${UBOOT_LOADADDRESS}>;
+ entry = <${UBOOT_ENTRYPOINT}>;
+EOF
+
+ if [ "${SPL_SIGN_ENABLE}" = "1" ] ; then
+ cat << EOF >> $uboot_its
+ signature {
+ algo = "$uboot_csum,$uboot_sign_algo";
+ key-name-hint = "$uboot_sign_keyname";
+ };
+EOF
+ fi
+
+ cat << EOF >> $uboot_its
+ };
+ fdt {
+ description = "U-Boot FDT";
+ data = /incbin/("$uboot_dtb");
+ type = "flat_dt";
+ arch = "${UBOOT_ARCH}";
+ compression = "none";
+EOF
+
+ if [ "${SPL_SIGN_ENABLE}" = "1" ] ; then
+ cat << EOF >> $uboot_its
+ signature {
+ algo = "$uboot_csum,$uboot_sign_algo";
+ key-name-hint = "$uboot_sign_keyname";
+ };
+EOF
+ fi
+
+ cat << EOF >> $uboot_its
+ };
+ };
+
+ configurations {
+ default = "conf";
+ conf {
+ description = "Boot with signed U-Boot FIT";
+ loadables = "uboot";
+ fdt = "fdt";
+ };
+ };
+};
+EOF
+
+ #
+ # Assemble the U-boot FIT image
+ #
+ ${UBOOT_MKIMAGE} \
+ ${@'-D "${SPL_MKIMAGE_DTCOPTS}"' if len('${SPL_MKIMAGE_DTCOPTS}') else ''} \
+ -f $uboot_its \
+ $uboot_bin
+
+ if [ "${SPL_SIGN_ENABLE}" = "1" ] ; then
+ #
+ # Sign the U-boot FIT image and add public key to SPL dtb
+ #
+ ${UBOOT_MKIMAGE_SIGN} \
+ ${@'-D "${SPL_MKIMAGE_DTCOPTS}"' if len('${SPL_MKIMAGE_DTCOPTS}') else ''} \
+ -F -k "${SPL_SIGN_KEYDIR}" \
+ -K "$spl_dtb" \
+ -r $uboot_bin \
+ ${SPL_MKIMAGE_SIGN_ARGS}
+ fi
+
+}
+
+do_uboot_assemble_fitimage() {
+ # This function runs in KERNEL_PN context. The reason for that is that we need to
+ # support the scenario where UBOOT_SIGN_ENABLE is placing the Kernel fitImage's
+ # pubkey in the u-boot.dtb file, so that we can use it when building the U-Boot
+ # fitImage itself.
+ if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" ] && \
+ [ -n "${SPL_DTB_BINARY}" -a "${PN}" = "${KERNEL_PN}" ] ; then
+ if [ "${UBOOT_SIGN_ENABLE}" != "1" ]; then
+ # If we're not signing the Kernel fitImage, that means
+ # we need to copy the u-boot.dtb from staging ourselves
+ cp -P ${STAGING_DATADIR}/u-boot*.dtb ${B}
+ fi
+ # As we are in the kernel context, we need to copy u-boot-spl.dtb from staging first.
+ # Unfortunately, need to glob on top of ${SPL_DTB_BINARY} since _IMAGE and _SYMLINK
+ # will contain U-boot's PV
+ # Similarly, we need to get the filename for the 'stub' u-boot-fitimage + its in
+ # staging so that we can use it for creating the image with the correct filename
+ # in the KERNEL_PN context.
+ # As for the u-boot.dtb (with fitimage's pubkey), it should come from the dependent
+ # do_assemble_fitimage task
+ cp -P ${STAGING_DATADIR}/u-boot-spl*.dtb ${B}
+ cp -P ${STAGING_DATADIR}/u-boot-nodtb*.bin ${B}
+ rm -rf ${B}/u-boot-fitImage-* ${B}/u-boot-its-*
+ kernel_uboot_fitimage_name=`basename ${STAGING_DATADIR}/u-boot-fitImage-*`
+ kernel_uboot_its_name=`basename ${STAGING_DATADIR}/u-boot-its-*`
+ cd ${B}
+ uboot_fitimage_assemble $kernel_uboot_its_name ${UBOOT_NODTB_BINARY} \
+ ${UBOOT_DTB_BINARY} $kernel_uboot_fitimage_name \
+ ${SPL_DTB_BINARY}
+ fi
+}
+
+addtask uboot_assemble_fitimage before do_deploy after do_compile
+
+do_deploy:prepend:pn-${UBOOT_PN}() {
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" -a -n "${UBOOT_DTB_BINARY}" ] ; then
+ concat_dtb
+ fi
+
+ if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" ] ; then
+ # Deploy the u-boot-nodtb binary and symlinks...
+ if [ -f "${SPL_DIR}/${SPL_NODTB_BINARY}" ] ; then
+ echo "Copying u-boot-nodtb binary..."
+ install -m 0644 ${SPL_DIR}/${SPL_NODTB_BINARY} ${DEPLOYDIR}/${SPL_NODTB_IMAGE}
+ ln -sf ${SPL_NODTB_IMAGE} ${DEPLOYDIR}/${SPL_NODTB_SYMLINK}
+ ln -sf ${SPL_NODTB_IMAGE} ${DEPLOYDIR}/${SPL_NODTB_BINARY}
+ fi
+
+
+ # We only deploy the symlinks to the uboot-fitImage and uboot-its
+ # images, as the KERNEL_PN will take care of deploying the real file
+ ln -sf ${UBOOT_FITIMAGE_IMAGE} ${DEPLOYDIR}/${UBOOT_FITIMAGE_BINARY}
+ ln -sf ${UBOOT_FITIMAGE_IMAGE} ${DEPLOYDIR}/${UBOOT_FITIMAGE_SYMLINK}
+ ln -sf ${UBOOT_ITS_IMAGE} ${DEPLOYDIR}/${UBOOT_ITS}
+ ln -sf ${UBOOT_ITS_IMAGE} ${DEPLOYDIR}/${UBOOT_ITS_SYMLINK}
+ fi
+
+ if [ "${SPL_SIGN_ENABLE}" = "1" -a -n "${SPL_DTB_BINARY}" ] ; then
+ concat_spl_dtb
+ fi
+
+
+}
+
+do_deploy:append:pn-${UBOOT_PN}() {
+ # If we're creating a u-boot fitImage, point u-boot.bin
+ # symlink since it might get used by image recipes
+ if [ "${UBOOT_FITIMAGE_ENABLE}" = "1" ] ; then
+ ln -sf ${UBOOT_FITIMAGE_IMAGE} ${DEPLOYDIR}/${UBOOT_BINARY}
+ ln -sf ${UBOOT_FITIMAGE_IMAGE} ${DEPLOYDIR}/${UBOOT_SYMLINK}
+ fi
+}
+
python () {
- if d.getVar('UBOOT_SIGN_ENABLE') == '1' and d.getVar('PN') == d.getVar('UBOOT_PN'):
- kernel_pn = d.getVar('PREFERRED_PROVIDER_virtual/kernel')
+ if ( (d.getVar('UBOOT_SIGN_ENABLE') == '1'
+ or d.getVar('UBOOT_FITIMAGE_ENABLE') == '1')
+ and d.getVar('PN') == d.getVar('UBOOT_PN')
+ and d.getVar('UBOOT_DTB_BINARY')):
# Make "bitbake u-boot -cdeploy" deploys the signed u-boot.dtb
- d.appendVarFlag('do_deploy', 'depends', ' %s:do_deploy' % kernel_pn)
+ # and/or the U-Boot fitImage
+ d.appendVarFlag('do_deploy', 'depends', ' %s:do_deploy' % d.getVar('KERNEL_PN'))
+
+ if d.getVar('UBOOT_FITIMAGE_ENABLE') == '1' and d.getVar('PN') == d.getVar('KERNEL_PN'):
+ # As the U-Boot fitImage is created by the KERNEL_PN, we need
+ # to make sure that the u-boot-spl.dtb and u-boot-spl-nodtb.bin
+ # files are in the staging dir for it's use
+ d.appendVarFlag('do_uboot_assemble_fitimage', 'depends', ' %s:do_populate_sysroot' % d.getVar('UBOOT_PN'))
+
+ # If the Kernel fitImage is being signed, we need to
+ # create the U-Boot fitImage after it
+ if d.getVar('UBOOT_SIGN_ENABLE') == '1':
+ d.appendVarFlag('do_uboot_assemble_fitimage', 'depends', ' %s:do_assemble_fitimage' % d.getVar('KERNEL_PN'))
+ d.appendVarFlag('do_uboot_assemble_fitimage', 'depends', ' %s:do_assemble_fitimage_initramfs' % d.getVar('KERNEL_PN'))
- # kernerl's do_deploy is a litle special, so we can't use
- # do_deploy_append, otherwise it would override
- # kernel_do_deploy.
- d.appendVarFlag('do_deploy', 'prefuncs', ' concat_dtb')
}
diff --git a/meta/classes/uninative.bbclass b/meta/classes/uninative.bbclass
index ba99fb6e8f..6a9e862bcd 100644
--- a/meta/classes/uninative.bbclass
+++ b/meta/classes/uninative.bbclass
@@ -1,16 +1,16 @@
-UNINATIVE_LOADER ?= "${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/lib/${@bb.utils.contains('BUILD_ARCH', 'x86_64', 'ld-linux-x86-64.so.2', '', d)}${@bb.utils.contains('BUILD_ARCH', 'i686', 'ld-linux.so.2', '', d)}${@bb.utils.contains('BUILD_ARCH', 'aarch64', 'ld-linux-aarch64.so.1', '', d)}"
+UNINATIVE_LOADER ?= "${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/lib/${@bb.utils.contains('BUILD_ARCH', 'x86_64', 'ld-linux-x86-64.so.2', '', d)}${@bb.utils.contains('BUILD_ARCH', 'i686', 'ld-linux.so.2', '', d)}${@bb.utils.contains('BUILD_ARCH', 'aarch64', 'ld-linux-aarch64.so.1', '', d)}${@bb.utils.contains('BUILD_ARCH', 'ppc64le', 'ld64.so.2', '', d)}"
UNINATIVE_STAGING_DIR ?= "${STAGING_DIR}"
UNINATIVE_URL ?= "unset"
-UNINATIVE_TARBALL ?= "${BUILD_ARCH}-nativesdk-libc.tar.bz2"
+UNINATIVE_TARBALL ?= "${BUILD_ARCH}-nativesdk-libc-${UNINATIVE_VERSION}.tar.xz"
# Example checksums
#UNINATIVE_CHECKSUM[aarch64] = "dead"
#UNINATIVE_CHECKSUM[i686] = "dead"
#UNINATIVE_CHECKSUM[x86_64] = "dead"
UNINATIVE_DLDIR ?= "${DL_DIR}/uninative/"
-# Enabling uninative will change the following variables so they need to go the parsing white list to prevent multiple recipe parsing
-BB_HASHCONFIG_WHITELIST += "NATIVELSBSTRING SSTATEPOSTUNPACKFUNCS BUILD_LDFLAGS"
+# Enabling uninative will change the following variables so they need to go the parsing ignored variables list to prevent multiple recipe parsing
+BB_HASHCONFIG_IGNORE_VARS += "NATIVELSBSTRING SSTATEPOSTUNPACKFUNCS BUILD_LDFLAGS"
addhandler uninative_event_fetchloader
uninative_event_fetchloader[eventmask] = "bb.event.BuildStarted"
@@ -45,7 +45,7 @@ python uninative_event_fetchloader() {
tarballdir = os.path.join(d.getVar("UNINATIVE_DLDIR"), chksum)
tarballpath = os.path.join(tarballdir, tarball)
- if not os.path.exists(tarballpath):
+ if not os.path.exists(tarballpath + ".done"):
bb.utils.mkdirhier(tarballdir)
if d.getVar("UNINATIVE_URL") == "unset":
bb.fatal("Uninative selected but not configured, please set UNINATIVE_URL")
@@ -56,12 +56,17 @@ python uninative_event_fetchloader() {
# Our games with path manipulation of DL_DIR mean standard PREMIRRORS don't work
# and we can't easily put 'chksum' into the url path from a url parameter with
# the current fetcher url handling
- ownmirror = d.getVar('SOURCE_MIRROR_URL')
- if ownmirror:
- localdata.appendVar("PREMIRRORS", " ${UNINATIVE_URL}${UNINATIVE_TARBALL} ${SOURCE_MIRROR_URL}/uninative/%s/${UNINATIVE_TARBALL}" % chksum)
+ premirrors = bb.fetch2.mirror_from_string(localdata.getVar("PREMIRRORS"))
+ for line in premirrors:
+ try:
+ (find, replace) = line
+ except ValueError:
+ continue
+ if find.startswith("http"):
+ localdata.appendVar("PREMIRRORS", " ${UNINATIVE_URL}${UNINATIVE_TARBALL} %s/uninative/%s/${UNINATIVE_TARBALL}" % (replace, chksum))
srcuri = d.expand("${UNINATIVE_URL}${UNINATIVE_TARBALL};sha256sum=%s" % chksum)
- bb.note("Fetching uninative binary shim from %s" % srcuri)
+ bb.note("Fetching uninative binary shim %s (will check PREMIRRORS first)" % srcuri)
fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
fetcher.download()
@@ -84,18 +89,18 @@ python uninative_event_fetchloader() {
# ldd output is "ldd (Ubuntu GLIBC 2.23-0ubuntu10) 2.23", extract last option from first line
glibcver = subprocess.check_output(["ldd", "--version"]).decode('utf-8').split('\n')[0].split()[-1]
if bb.utils.vercmp_string(d.getVar("UNINATIVE_MAXGLIBCVERSION"), glibcver) < 0:
- raise RuntimeError("Your host glibc verson (%s) is newer than that in uninative (%s). Disabling uninative so that sstate is not corrupted." % (glibcver, d.getVar("UNINATIVE_MAXGLIBCVERSION")))
+ raise RuntimeError("Your host glibc version (%s) is newer than that in uninative (%s). Disabling uninative so that sstate is not corrupted." % (glibcver, d.getVar("UNINATIVE_MAXGLIBCVERSION")))
cmd = d.expand("\
mkdir -p ${UNINATIVE_STAGING_DIR}-uninative; \
cd ${UNINATIVE_STAGING_DIR}-uninative; \
-tar -xjf ${UNINATIVE_DLDIR}/%s/${UNINATIVE_TARBALL}; \
+tar -xJf ${UNINATIVE_DLDIR}/%s/${UNINATIVE_TARBALL}; \
${UNINATIVE_STAGING_DIR}-uninative/relocate_sdk.py \
${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux \
${UNINATIVE_LOADER} \
${UNINATIVE_LOADER} \
${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/${bindir_native}/patchelf-uninative \
- ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux${base_libdir_native}/libc*.so" % chksum)
+ ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux${base_libdir_native}/libc*.so*" % chksum)
subprocess.check_output(cmd, shell=True)
with open(loaderchksum, "w") as f:
diff --git a/meta/classes/update-alternatives.bbclass b/meta/classes/update-alternatives.bbclass
index 537e85d9a3..fc1ffd828c 100644
--- a/meta/classes/update-alternatives.bbclass
+++ b/meta/classes/update-alternatives.bbclass
@@ -6,9 +6,9 @@
# To use this class a number of variables should be defined:
#
# List all of the alternatives needed by a package:
-# ALTERNATIVE_<pkg> = "name1 name2 name3 ..."
+# ALTERNATIVE:<pkg> = "name1 name2 name3 ..."
#
-# i.e. ALTERNATIVE_busybox = "sh sed test bracket"
+# i.e. ALTERNATIVE:busybox = "sh sed test bracket"
#
# The pathname of the link
# ALTERNATIVE_LINK_NAME[name] = "target"
@@ -123,7 +123,7 @@ def gen_updatealternativesvars(d):
for p in pkgs:
for v in vars:
- ret.append(v + "_" + p)
+ ret.append(v + ":" + p)
ret.append(v + "_VARDEPS_" + p)
return " ".join(ret)
@@ -141,10 +141,10 @@ python apply_update_alternative_renames () {
import re
def update_files(alt_target, alt_target_rename, pkg, d):
- f = d.getVar('FILES_' + pkg)
+ f = d.getVar('FILES:' + pkg)
if f:
f = re.sub(r'(^|\s)%s(\s|$)' % re.escape (alt_target), r'\1%s\2' % alt_target_rename, f)
- d.setVar('FILES_' + pkg, f)
+ d.setVar('FILES:' + pkg, f)
# Check for deprecated usage...
pn = d.getVar('BPN')
@@ -156,7 +156,7 @@ python apply_update_alternative_renames () {
for pkg in (d.getVar('PACKAGES') or "").split():
# If the src == dest, we know we need to rename the dest by appending ${BPN}
link_rename = []
- for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg) or "").split():
+ for alt_name in (d.getVar('ALTERNATIVE:%s' % pkg) or "").split():
alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name)
if not alt_link:
alt_link = "%s/%s" % (d.getVar('bindir'), alt_name)
@@ -184,7 +184,7 @@ python apply_update_alternative_renames () {
link_rename.append((alt_target, alt_target_rename))
else:
bb.note('%s: Rename %s -> %s' % (pn, alt_target, alt_target_rename))
- os.rename(src, dest)
+ bb.utils.rename(src, dest)
update_files(alt_target, alt_target_rename, pkg, d)
else:
bb.warn("%s: alternative target (%s or %s) does not exist, skipping..." % (pn, alt_target, alt_target_rename))
@@ -201,7 +201,7 @@ python apply_update_alternative_renames () {
if os.path.lexists(link_target):
# Ok, the link_target exists, we can rename
bb.note('%s: Rename (link) %s -> %s' % (pn, alt_target, alt_target_rename))
- os.rename(src, dest)
+ bb.utils.rename(src, dest)
else:
# Try to resolve the broken link to link.${BPN}
link_maybe = '%s.%s' % (os.readlink(src), pn)
@@ -216,42 +216,62 @@ python apply_update_alternative_renames () {
update_files(alt_target, alt_target_rename, pkg, d)
}
-PACKAGESPLITFUNCS_prepend = "populate_packages_updatealternatives "
+def update_alternatives_alt_targets(d, pkg):
+ """
+ Returns the update-alternatives metadata for a package.
+
+ The returned format is a list of tuples where the tuple contains:
+ alt_name: The binary name
+ alt_link: The path for the binary (Shared by different packages)
+ alt_target: The path for the renamed binary (Unique per package)
+ alt_priority: The priority of the alt_target
+
+ All the alt_targets will be installed into the sysroot. The alt_link is
+ a symlink pointing to the alt_target with the highest priority.
+ """
+
+ pn = d.getVar('BPN')
+ pkgdest = d.getVar('PKGD')
+ updates = list()
+ for alt_name in (d.getVar('ALTERNATIVE:%s' % pkg) or "").split():
+ alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name)
+ alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name) or \
+ d.getVarFlag('ALTERNATIVE_TARGET', alt_name) or \
+ d.getVar('ALTERNATIVE_TARGET_%s' % pkg) or \
+ d.getVar('ALTERNATIVE_TARGET') or \
+ alt_link
+ alt_priority = d.getVarFlag('ALTERNATIVE_PRIORITY_%s' % pkg, alt_name) or \
+ d.getVarFlag('ALTERNATIVE_PRIORITY', alt_name) or \
+ d.getVar('ALTERNATIVE_PRIORITY_%s' % pkg) or \
+ d.getVar('ALTERNATIVE_PRIORITY')
+
+ # This shouldn't trigger, as it should have been resolved earlier!
+ if alt_link == alt_target:
+ bb.note('alt_link == alt_target: %s == %s -- correcting, this should not happen!' % (alt_link, alt_target))
+ alt_target = '%s.%s' % (alt_target, pn)
+
+ if not os.path.lexists('%s/%s' % (pkgdest, alt_target)):
+ bb.warn('%s: NOT adding alternative provide %s: %s does not exist' % (pn, alt_link, alt_target))
+ continue
+
+ alt_target = os.path.normpath(alt_target)
+ updates.append( (alt_name, alt_link, alt_target, alt_priority) )
+
+ return updates
+
+PACKAGESPLITFUNCS:prepend = "populate_packages_updatealternatives "
python populate_packages_updatealternatives () {
if not update_alternatives_enabled(d):
return
- pn = d.getVar('BPN')
-
# Do actual update alternatives processing
- pkgdest = d.getVar('PKGD')
for pkg in (d.getVar('PACKAGES') or "").split():
# Create post install/removal scripts
alt_setup_links = ""
alt_remove_links = ""
- for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg) or "").split():
- alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name)
- alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name)
- alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg) or d.getVar('ALTERNATIVE_TARGET') or alt_link
- # Sometimes alt_target is specified as relative to the link name.
- alt_target = os.path.join(os.path.dirname(alt_link), alt_target)
-
- alt_priority = d.getVarFlag('ALTERNATIVE_PRIORITY_%s' % pkg, alt_name) or d.getVarFlag('ALTERNATIVE_PRIORITY', alt_name)
- alt_priority = alt_priority or d.getVar('ALTERNATIVE_PRIORITY_%s' % pkg) or d.getVar('ALTERNATIVE_PRIORITY')
-
- # This shouldn't trigger, as it should have been resolved earlier!
- if alt_link == alt_target:
- bb.note('alt_link == alt_target: %s == %s -- correcting, this should not happen!' % (alt_link, alt_target))
- alt_target = '%s.%s' % (alt_target, pn)
-
- if not os.path.lexists('%s/%s' % (pkgdest, alt_target)):
- bb.warn('%s: NOT adding alternative provide %s: %s does not exist' % (pn, alt_link, alt_target))
- continue
-
- # Default to generate shell script.. eventually we may want to change this...
- alt_target = os.path.normpath(alt_target)
-
+ updates = update_alternatives_alt_targets(d, pkg)
+ for alt_name, alt_link, alt_target, alt_priority in updates:
alt_setup_links += '\tupdate-alternatives --install %s %s %s %s\n' % (alt_link, alt_name, alt_target, alt_priority)
alt_remove_links += '\tupdate-alternatives --remove %s %s\n' % (alt_name, alt_target)
@@ -260,21 +280,24 @@ python populate_packages_updatealternatives () {
provider = d.getVar('VIRTUAL-RUNTIME_update-alternatives')
if provider:
#bb.note('adding runtime requirement for update-alternatives for %s' % pkg)
- d.appendVar('RDEPENDS_%s' % pkg, ' ' + d.getVar('MLPREFIX', False) + provider)
+ d.appendVar('RDEPENDS:%s' % pkg, ' ' + d.getVar('MLPREFIX', False) + provider)
bb.note('adding update-alternatives calls to postinst/prerm for %s' % pkg)
bb.note('%s' % alt_setup_links)
- postinst = d.getVar('pkg_postinst_%s' % pkg) or '#!/bin/sh\n'
- postinst += alt_setup_links
- d.setVar('pkg_postinst_%s' % pkg, postinst)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
+ if postinst:
+ postinst = alt_setup_links + postinst
+ else:
+ postinst = '#!/bin/sh\n' + alt_setup_links
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
bb.note('%s' % alt_remove_links)
- prerm = d.getVar('pkg_prerm_%s' % pkg) or '#!/bin/sh\n'
+ prerm = d.getVar('pkg_prerm:%s' % pkg) or '#!/bin/sh\n'
prerm += alt_remove_links
- d.setVar('pkg_prerm_%s' % pkg, prerm)
+ d.setVar('pkg_prerm:%s' % pkg, prerm)
}
-python package_do_filedeps_append () {
+python package_do_filedeps:append () {
if update_alternatives_enabled(d):
apply_update_alternative_provides(d)
}
@@ -284,7 +307,7 @@ def apply_update_alternative_provides(d):
pkgdest = d.getVar('PKGDEST')
for pkg in d.getVar('PACKAGES').split():
- for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg) or "").split():
+ for alt_name in (d.getVar('ALTERNATIVE:%s' % pkg) or "").split():
alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name)
alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name)
alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg) or d.getVar('ALTERNATIVE_TARGET') or alt_link
@@ -298,7 +321,7 @@ def apply_update_alternative_provides(d):
# Add file provide
trans_target = oe.package.file_translate(alt_target)
- d.appendVar('FILERPROVIDES_%s_%s' % (trans_target, pkg), " " + alt_link)
- if not trans_target in (d.getVar('FILERPROVIDESFLIST_%s' % pkg) or ""):
- d.appendVar('FILERPROVIDESFLIST_%s' % pkg, " " + trans_target)
+ d.appendVar('FILERPROVIDES:%s:%s' % (trans_target, pkg), " " + alt_link)
+ if not trans_target in (d.getVar('FILERPROVIDESFLIST:%s' % pkg) or ""):
+ d.appendVar('FILERPROVIDESFLIST:%s' % pkg, " " + trans_target)
diff --git a/meta/classes/update-rc.d.bbclass b/meta/classes/update-rc.d.bbclass
index 265c4be9d1..0a3a608662 100644
--- a/meta/classes/update-rc.d.bbclass
+++ b/meta/classes/update-rc.d.bbclass
@@ -1,11 +1,11 @@
UPDATERCPN ?= "${PN}"
-DEPENDS_append_class-target = "${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', ' update-rc.d initscripts', '', d)}"
+DEPENDS:append:class-target = "${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', ' update-rc.d initscripts', '', d)}"
UPDATERCD = "update-rc.d"
-UPDATERCD_class-cross = ""
-UPDATERCD_class-native = ""
-UPDATERCD_class-nativesdk = ""
+UPDATERCD:class-cross = ""
+UPDATERCD:class-native = ""
+UPDATERCD:class-nativesdk = ""
INITSCRIPT_PARAMS ?= "defaults"
@@ -20,28 +20,14 @@ def use_updatercd(d):
return '[ -n "$D" -o ! -d /run/systemd/system ]'
return 'true'
-updatercd_preinst() {
-if ${@use_updatercd(d)} && [ -z "$D" -a -f "${INIT_D_DIR}/${INITSCRIPT_NAME}" ]; then
- ${INIT_D_DIR}/${INITSCRIPT_NAME} stop || :
-fi
-if ${@use_updatercd(d)} && type update-rc.d >/dev/null 2>/dev/null; then
- if [ -n "$D" ]; then
- OPT="-f -r $D"
- else
- OPT="-f"
- fi
- update-rc.d $OPT ${INITSCRIPT_NAME} remove
-fi
-}
-
PACKAGE_WRITE_DEPS += "update-rc.d-native"
updatercd_postinst() {
if ${@use_updatercd(d)} && type update-rc.d >/dev/null 2>/dev/null; then
if [ -n "$D" ]; then
- OPT="-f -r $D"
+ OPT="-r $D"
else
- OPT="-f -s"
+ OPT="-s"
fi
update-rc.d $OPT ${INITSCRIPT_NAME} ${INITSCRIPT_PARAMS}
fi
@@ -76,10 +62,10 @@ python __anonymous() {
update_rc_after_parse(d)
}
-PACKAGESPLITFUNCS_prepend = "${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', 'populate_packages_updatercd ', '', d)}"
-PACKAGESPLITFUNCS_remove_class-nativesdk = "populate_packages_updatercd "
+PACKAGESPLITFUNCS:prepend = "${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', 'populate_packages_updatercd ', '', d)}"
+PACKAGESPLITFUNCS:remove:class-nativesdk = "populate_packages_updatercd "
-populate_packages_updatercd[vardeps] += "updatercd_prerm updatercd_postrm updatercd_preinst updatercd_postinst"
+populate_packages_updatercd[vardeps] += "updatercd_prerm updatercd_postrm updatercd_postinst"
populate_packages_updatercd[vardepsexclude] += "OVERRIDES"
python populate_packages_updatercd () {
@@ -92,10 +78,10 @@ python populate_packages_updatercd () {
statement = "grep -q -w '/etc/init.d/functions' %s" % path
if subprocess.call(statement, shell=True) == 0:
mlprefix = d.getVar('MLPREFIX') or ""
- d.appendVar('RDEPENDS_' + pkg, ' %sinitd-functions' % (mlprefix))
+ d.appendVar('RDEPENDS:' + pkg, ' %sinitd-functions' % (mlprefix))
def update_rcd_package(pkg):
- bb.debug(1, 'adding update-rc.d calls to preinst/postinst/prerm/postrm for %s' % pkg)
+ bb.debug(1, 'adding update-rc.d calls to postinst/prerm/postrm for %s' % pkg)
localdata = bb.data.createCopy(d)
overrides = localdata.getVar("OVERRIDES")
@@ -103,31 +89,25 @@ python populate_packages_updatercd () {
update_rcd_auto_depend(pkg)
- preinst = d.getVar('pkg_preinst_%s' % pkg)
- if not preinst:
- preinst = '#!/bin/sh\n'
- preinst += localdata.getVar('updatercd_preinst')
- d.setVar('pkg_preinst_%s' % pkg, preinst)
-
- postinst = d.getVar('pkg_postinst_%s' % pkg)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
postinst += localdata.getVar('updatercd_postinst')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
- prerm = d.getVar('pkg_prerm_%s' % pkg)
+ prerm = d.getVar('pkg_prerm:%s' % pkg)
if not prerm:
prerm = '#!/bin/sh\n'
prerm += localdata.getVar('updatercd_prerm')
- d.setVar('pkg_prerm_%s' % pkg, prerm)
+ d.setVar('pkg_prerm:%s' % pkg, prerm)
- postrm = d.getVar('pkg_postrm_%s' % pkg)
+ postrm = d.getVar('pkg_postrm:%s' % pkg)
if not postrm:
postrm = '#!/bin/sh\n'
postrm += localdata.getVar('updatercd_postrm')
- d.setVar('pkg_postrm_%s' % pkg, postrm)
+ d.setVar('pkg_postrm:%s' % pkg, postrm)
- d.appendVar('RRECOMMENDS_' + pkg, " ${MLPREFIX}${UPDATERCD}")
+ d.appendVar('RRECOMMENDS:' + pkg, " ${MLPREFIX}${UPDATERCD}")
# Check that this class isn't being inhibited (generally, by
# systemd.bbclass) before doing any work.
diff --git a/meta/classes/useradd-staticids.bbclass b/meta/classes/useradd-staticids.bbclass
index 70d59e5573..3acf59cd46 100644
--- a/meta/classes/useradd-staticids.bbclass
+++ b/meta/classes/useradd-staticids.bbclass
@@ -76,8 +76,8 @@ def update_useradd_static_config(d):
for param in oe.useradd.split_commands(params):
try:
uaargs = parser.parse_args(oe.useradd.split_args(param))
- except:
- bb.fatal("%s: Unable to parse arguments for USERADD_PARAM_%s: '%s'" % (d.getVar('PN'), pkg, param))
+ except Exception as e:
+ bb.fatal("%s: Unable to parse arguments for USERADD_PARAM:%s '%s': %s" % (d.getVar('PN'), pkg, param, e))
# Read all passwd files specified in USERADD_UID_TABLES or files/passwd
# Use the standard passwd layout:
@@ -140,13 +140,13 @@ def update_useradd_static_config(d):
uaargs.gid = uaargs.groupid
uaargs.user_group = None
if newgroup and is_pkg:
- groupadd = d.getVar("GROUPADD_PARAM_%s" % pkg)
+ groupadd = d.getVar("GROUPADD_PARAM:%s" % pkg)
if groupadd:
# Only add the group if not already specified
if not uaargs.groupname in groupadd:
- d.setVar("GROUPADD_PARAM_%s" % pkg, "%s; %s" % (groupadd, newgroup))
+ d.setVar("GROUPADD_PARAM:%s" % pkg, "%s; %s" % (groupadd, newgroup))
else:
- d.setVar("GROUPADD_PARAM_%s" % pkg, newgroup)
+ d.setVar("GROUPADD_PARAM:%s" % pkg, newgroup)
uaargs.comment = "'%s'" % field[4] if field[4] else uaargs.comment
uaargs.home_dir = field[5] or uaargs.home_dir
@@ -174,8 +174,6 @@ def update_useradd_static_config(d):
newparam += ['', ' --non-unique'][uaargs.non_unique]
if uaargs.password != None:
newparam += ['', ' --password %s' % uaargs.password][uaargs.password != None]
- elif uaargs.clear_password:
- newparam += ['', ' --clear-password %s' % uaargs.clear_password][uaargs.clear_password != None]
newparam += ['', ' --root %s' % uaargs.root][uaargs.root != None]
newparam += ['', ' --system'][uaargs.system]
newparam += ['', ' --shell %s' % uaargs.shell][uaargs.shell != None]
@@ -197,8 +195,8 @@ def update_useradd_static_config(d):
try:
# If we're processing multiple lines, we could have left over values here...
gaargs = parser.parse_args(oe.useradd.split_args(param))
- except:
- bb.fatal("%s: Unable to parse arguments for GROUPADD_PARAM_%s: '%s'" % (d.getVar('PN'), pkg, param))
+ except Exception as e:
+ bb.fatal("%s: Unable to parse arguments for GROUPADD_PARAM:%s '%s': %s" % (d.getVar('PN'), pkg, param, e))
# Read all group files specified in USERADD_GID_TABLES or files/group
# Use the standard group layout:
@@ -236,8 +234,6 @@ def update_useradd_static_config(d):
newparam += ['', ' --non-unique'][gaargs.non_unique]
if gaargs.password != None:
newparam += ['', ' --password %s' % gaargs.password][gaargs.password != None]
- elif gaargs.clear_password:
- newparam += ['', ' --clear-password %s' % gaargs.clear_password][gaargs.clear_password != None]
newparam += ['', ' --root %s' % gaargs.root][gaargs.root != None]
newparam += ['', ' --system'][gaargs.system]
newparam += ' %s' % gaargs.GROUP
@@ -265,17 +261,17 @@ def update_useradd_static_config(d):
for pkg in useradd_packages.split():
# Groupmems doesn't have anything we might want to change, so simply validating
# is a bit of a waste -- only process useradd/groupadd
- useradd_param = d.getVar('USERADD_PARAM_%s' % pkg)
+ useradd_param = d.getVar('USERADD_PARAM:%s' % pkg)
if useradd_param:
- #bb.warn("Before: 'USERADD_PARAM_%s' - '%s'" % (pkg, useradd_param))
- d.setVar('USERADD_PARAM_%s' % pkg, rewrite_useradd(useradd_param, True))
- #bb.warn("After: 'USERADD_PARAM_%s' - '%s'" % (pkg, d.getVar('USERADD_PARAM_%s' % pkg)))
+ #bb.warn("Before: 'USERADD_PARAM:%s' - '%s'" % (pkg, useradd_param))
+ d.setVar('USERADD_PARAM:%s' % pkg, rewrite_useradd(useradd_param, True))
+ #bb.warn("After: 'USERADD_PARAM:%s' - '%s'" % (pkg, d.getVar('USERADD_PARAM:%s' % pkg)))
- groupadd_param = d.getVar('GROUPADD_PARAM_%s' % pkg)
+ groupadd_param = d.getVar('GROUPADD_PARAM:%s' % pkg)
if groupadd_param:
- #bb.warn("Before: 'GROUPADD_PARAM_%s' - '%s'" % (pkg, groupadd_param))
- d.setVar('GROUPADD_PARAM_%s' % pkg, rewrite_groupadd(groupadd_param, True))
- #bb.warn("After: 'GROUPADD_PARAM_%s' - '%s'" % (pkg, d.getVar('GROUPADD_PARAM_%s' % pkg)))
+ #bb.warn("Before: 'GROUPADD_PARAM:%s' - '%s'" % (pkg, groupadd_param))
+ d.setVar('GROUPADD_PARAM:%s' % pkg, rewrite_groupadd(groupadd_param, True))
+ #bb.warn("After: 'GROUPADD_PARAM:%s' - '%s'" % (pkg, d.getVar('GROUPADD_PARAM:%s' % pkg)))
# Load and process extra users and groups, rewriting only adduser/addgroup params
pkg = d.getVar('PN')
diff --git a/meta/classes/useradd.bbclass b/meta/classes/useradd.bbclass
index 124becd082..20771a0ce5 100644
--- a/meta/classes/useradd.bbclass
+++ b/meta/classes/useradd.bbclass
@@ -3,7 +3,7 @@ inherit useradd_base
# base-passwd-cross provides the default passwd and group files in the
# target sysroot, and shadow -native and -sysroot provide the utilities
# and support files needed to add and modify user and group accounts
-DEPENDS_append_class-target = " base-files shadow-native shadow-sysroot shadow base-passwd"
+DEPENDS:append:class-target = " base-files shadow-native shadow-sysroot shadow base-passwd"
PACKAGE_WRITE_DEPS += "shadow-native"
# This preinstall function can be run in four different contexts:
@@ -100,6 +100,8 @@ useradd_sysroot () {
# Pseudo may (do_prepare_recipe_sysroot) or may not (do_populate_sysroot_setscene) be running
# at this point so we're explicit about the environment so pseudo can load if
# not already present.
+ # PSEUDO_SYSROOT can contain references to the build architecture and COMPONENT_DIR
+ # so needs the STAGING_FIXME below
export PSEUDO="${FAKEROOTENV} ${PSEUDO_SYSROOT}${bindir_native}/pseudo"
# Explicitly set $D since it isn't set to anything
@@ -134,10 +136,10 @@ useradd_sysroot () {
}
# The export of PSEUDO in useradd_sysroot() above contains references to
-# ${COMPONENTS_DIR} and ${PSEUDO_LOCALSTATEDIR}. Additionally, the logging
+# ${PSEUDO_SYSROOT} and ${PSEUDO_LOCALSTATEDIR}. Additionally, the logging
# shell functions use ${LOGFIFO}. These need to be handled when restoring
# postinst-useradd-${PN} from the sstate cache.
-EXTRA_STAGING_FIXMES += "COMPONENTS_DIR PSEUDO_LOCALSTATEDIR LOGFIFO"
+EXTRA_STAGING_FIXMES += "PSEUDO_SYSROOT PSEUDO_LOCALSTATEDIR LOGFIFO"
python useradd_sysroot_sstate () {
scriptfile = None
@@ -162,16 +164,16 @@ python useradd_sysroot_sstate () {
}
do_prepare_recipe_sysroot[postfuncs] += "${SYSROOTFUNC}"
-SYSROOTFUNC_class-target = "useradd_sysroot_sstate"
+SYSROOTFUNC:class-target = "useradd_sysroot_sstate"
SYSROOTFUNC = ""
SYSROOT_PREPROCESS_FUNCS += "${SYSROOTFUNC}"
-SSTATEPREINSTFUNCS_append_class-target = " useradd_sysroot_sstate"
+SSTATEPREINSTFUNCS:append:class-target = " useradd_sysroot_sstate"
do_package_setscene[depends] += "${USERADDSETSCENEDEPS}"
do_populate_sysroot_setscene[depends] += "${USERADDSETSCENEDEPS}"
-USERADDSETSCENEDEPS_class-target = "${MLPREFIX}base-passwd:do_populate_sysroot_setscene pseudo-native:do_populate_sysroot_setscene shadow-native:do_populate_sysroot_setscene ${MLPREFIX}shadow-sysroot:do_populate_sysroot_setscene"
+USERADDSETSCENEDEPS:class-target = "${MLPREFIX}base-passwd:do_populate_sysroot_setscene pseudo-native:do_populate_sysroot_setscene shadow-native:do_populate_sysroot_setscene ${MLPREFIX}shadow-sysroot:do_populate_sysroot_setscene"
USERADDSETSCENEDEPS = ""
# Recipe parse-time sanity checks
@@ -182,7 +184,8 @@ def update_useradd_after_parse(d):
bb.fatal("%s inherits useradd but doesn't set USERADD_PACKAGES" % d.getVar('FILE', False))
for pkg in useradd_packages.split():
- if not d.getVar('USERADD_PARAM_%s' % pkg) and not d.getVar('GROUPADD_PARAM_%s' % pkg) and not d.getVar('GROUPMEMS_PARAM_%s' % pkg):
+ d.appendVarFlag("do_populate_sysroot", "vardeps", "USERADD_PARAM:%s GROUPADD_PARAM:%s GROUPMEMS_PARAM:%s" % (pkg, pkg, pkg))
+ if not d.getVar('USERADD_PARAM:%s' % pkg) and not d.getVar('GROUPADD_PARAM:%s' % pkg) and not d.getVar('GROUPMEMS_PARAM:%s' % pkg):
bb.fatal("%s inherits useradd but doesn't set USERADD_PARAM, GROUPADD_PARAM or GROUPMEMS_PARAM for package %s" % (d.getVar('FILE', False), pkg))
python __anonymous() {
@@ -196,7 +199,7 @@ python __anonymous() {
def get_all_cmd_params(d, cmd_type):
import string
- param_type = cmd_type.upper() + "_PARAM_%s"
+ param_type = cmd_type.upper() + "_PARAM:%s"
params = []
useradd_packages = d.getVar('USERADD_PACKAGES') or ""
@@ -208,7 +211,7 @@ def get_all_cmd_params(d, cmd_type):
return "; ".join(params)
# Adds the preinst script into generated packages
-fakeroot python populate_packages_prepend () {
+fakeroot python populate_packages:prepend () {
def update_useradd_package(pkg):
bb.debug(1, 'adding user/group calls to preinst for %s' % pkg)
@@ -217,7 +220,7 @@ fakeroot python populate_packages_prepend () {
required to execute on the target. Not doing so may cause
useradd preinst to be invoked twice, causing unwanted warnings.
"""
- preinst = d.getVar('pkg_preinst_%s' % pkg) or d.getVar('pkg_preinst')
+ preinst = d.getVar('pkg_preinst:%s' % pkg) or d.getVar('pkg_preinst')
if not preinst:
preinst = '#!/bin/sh\n'
preinst += 'bbnote () {\n\techo "NOTE: $*"\n}\n'
@@ -227,15 +230,19 @@ fakeroot python populate_packages_prepend () {
preinst += 'perform_useradd () {\n%s}\n' % d.getVar('perform_useradd')
preinst += 'perform_groupmems () {\n%s}\n' % d.getVar('perform_groupmems')
preinst += d.getVar('useradd_preinst')
- d.setVar('pkg_preinst_%s' % pkg, preinst)
+ # Expand out the *_PARAM variables to the package specific versions
+ for rep in ["GROUPADD_PARAM", "USERADD_PARAM", "GROUPMEMS_PARAM"]:
+ val = d.getVar(rep + ":" + pkg) or ""
+ preinst = preinst.replace("${" + rep + "}", val)
+ d.setVar('pkg_preinst:%s' % pkg, preinst)
# RDEPENDS setup
- rdepends = d.getVar("RDEPENDS_%s" % pkg) or ""
+ rdepends = d.getVar("RDEPENDS:%s" % pkg) or ""
rdepends += ' ' + d.getVar('MLPREFIX', False) + 'base-passwd'
rdepends += ' ' + d.getVar('MLPREFIX', False) + 'shadow'
# base-files is where the default /etc/skel is packaged
rdepends += ' ' + d.getVar('MLPREFIX', False) + 'base-files'
- d.setVar("RDEPENDS_%s" % pkg, rdepends)
+ d.setVar("RDEPENDS:%s" % pkg, rdepends)
# Add the user/group preinstall scripts and RDEPENDS requirements
# to packages specified by USERADD_PACKAGES
diff --git a/meta/classes/useradd_base.bbclass b/meta/classes/useradd_base.bbclass
index 0d0bdb80f5..7f5b9b7219 100644
--- a/meta/classes/useradd_base.bbclass
+++ b/meta/classes/useradd_base.bbclass
@@ -145,3 +145,21 @@ perform_usermod () {
fi
set -e
}
+
+perform_passwd_expire () {
+ local rootdir="$1"
+ local opts="$2"
+ bbnote "${PN}: Performing equivalent of passwd --expire with [$opts]"
+ # Directly set sp_lstchg to 0 without using the passwd command: Only root can do that
+ local username=`echo "$opts" | awk '{ print $NF }'`
+ local user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
+ if test "x$user_exists" != "x"; then
+ eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO sed -i \''s/^\('$username':[^:]*\):[^:]*:/\1:0:/'\' $rootdir/etc/shadow \" || true
+ local passwd_lastchanged="`grep "^$username:" $rootdir/etc/shadow | cut -d: -f3`"
+ if test "x$passwd_lastchanged" != "x0"; then
+ bbfatal "${PN}: passwd --expire operation did not succeed."
+ fi
+ else
+ bbnote "${PN}: user $username doesn't exist, not expiring its password"
+ fi
+}
diff --git a/meta/classes/utility-tasks.bbclass b/meta/classes/utility-tasks.bbclass
index b1f27d3658..0466325c13 100644
--- a/meta/classes/utility-tasks.bbclass
+++ b/meta/classes/utility-tasks.bbclass
@@ -19,7 +19,7 @@ python do_listtasks() {
CLEANFUNCS ?= ""
-T_task-clean = "${LOG_DIR}/cleanlogs/${PN}"
+T:task-clean = "${LOG_DIR}/cleanlogs/${PN}"
addtask clean
do_clean[nostamp] = "1"
python do_clean() {
@@ -38,6 +38,7 @@ python do_clean() {
addtask checkuri
do_checkuri[nostamp] = "1"
+do_checkuri[network] = "1"
python do_checkuri() {
src_uri = (d.getVar('SRC_URI') or "").split()
if len(src_uri) == 0:
diff --git a/meta/classes/utils.bbclass b/meta/classes/utils.bbclass
index 0016e5c4e6..b4eb3d38ab 100644
--- a/meta/classes/utils.bbclass
+++ b/meta/classes/utils.bbclass
@@ -1,22 +1,3 @@
-def machine_paths(d):
- """List any existing machine specific filespath directories"""
- machine = d.getVar("MACHINE")
- filespathpkg = d.getVar("FILESPATHPKG").split(":")
- for basepath in d.getVar("FILESPATHBASE").split(":"):
- for pkgpath in filespathpkg:
- machinepath = os.path.join(basepath, pkgpath, machine)
- if os.path.isdir(machinepath):
- yield machinepath
-
-def is_machine_specific(d):
- """Determine whether the current recipe is machine specific"""
- machinepaths = set(machine_paths(d))
- srcuri = d.getVar("SRC_URI").split()
- for url in srcuri:
- fetcher = bb.fetch2.Fetch([srcuri], d)
- if url.startswith("file://"):
- if any(fetcher.localpath(url).startswith(mp + "/") for mp in machinepaths):
- return True
oe_soinstall() {
# Purpose: Install shared library file and
@@ -49,7 +30,6 @@ oe_libinstall() {
silent=""
require_static=""
require_shared=""
- staging_install=""
while [ "$#" -gt 0 ]; do
case "$1" in
-C)
@@ -81,10 +61,6 @@ oe_libinstall() {
if [ -z "$destpath" ]; then
bbfatal "oe_libinstall: no destination path specified"
fi
- if echo "$destpath/" | egrep '^${STAGING_LIBDIR}/' >/dev/null
- then
- staging_install=1
- fi
__runcmd () {
if [ -z "$silent" ]; then
@@ -178,36 +154,6 @@ oe_libinstall() {
__runcmd cd "$olddir"
}
-oe_machinstall() {
- # Purpose: Install machine dependent files, if available
- # If not available, check if there is a default
- # If no default, just touch the destination
- # Example:
- # $1 $2 $3 $4
- # oe_machinstall -m 0644 fstab ${D}/etc/fstab
- #
- # TODO: Check argument number?
- #
- filename=`basename $3`
- dirname=`dirname $3`
-
- for o in `echo ${OVERRIDES} | tr ':' ' '`; do
- if [ -e $dirname/$o/$filename ]; then
- bbnote $dirname/$o/$filename present, installing to $4
- install $1 $2 $dirname/$o/$filename $4
- return
- fi
- done
-# bbnote overrides specific file NOT present, trying default=$3...
- if [ -e $3 ]; then
- bbnote $3 present, installing to $4
- install $1 $2 $3 $4
- else
- bbnote $3 NOT present, touching empty $4
- touch $4
- fi
-}
-
create_cmdline_wrapper () {
# Create a wrapper script where commandline options are needed
#
@@ -233,7 +179,7 @@ create_cmdline_wrapper () {
#!/bin/bash
realpath=\`readlink -fn \$0\`
realdir=\`dirname \$realpath\`
-exec -a \`dirname \$realpath\`/$cmdname \`dirname \$realpath\`/$cmdname.real $cmdoptions "\$@"
+exec -a \$realdir/$cmdname \$realdir/$cmdname.real $cmdoptions "\$@"
END
chmod +x $cmd
}
@@ -264,7 +210,7 @@ create_wrapper () {
realpath=\`readlink -fn \$0\`
realdir=\`dirname \$realpath\`
export $exportstring
-exec -a \`dirname \$realpath\`/$cmdname \`dirname \$realpath\`/$cmdname.real "\$@"
+exec -a "\$0" \$realdir/$cmdname.real "\$@"
END
chmod +x $cmd
}
diff --git a/meta/classes/vala.bbclass b/meta/classes/vala.bbclass
index 615eb379ad..bfcceff7cf 100644
--- a/meta/classes/vala.bbclass
+++ b/meta/classes/vala.bbclass
@@ -2,16 +2,16 @@
# because that is where target builds look for .vapi files.
#
VALADEPENDS = ""
-VALADEPENDS_class-target = "vala"
-DEPENDS_append = " vala-native ${VALADEPENDS}"
+VALADEPENDS:class-target = "vala"
+DEPENDS:append = " vala-native ${VALADEPENDS}"
# Our patched version of Vala looks in STAGING_DATADIR for .vapi files
export STAGING_DATADIR
# Upstream Vala >= 0.11 looks in XDG_DATA_DIRS for .vapi files
-export XDG_DATA_DIRS = "${STAGING_DATADIR}"
+export XDG_DATA_DIRS = "${STAGING_DATADIR}:${STAGING_LIBDIR}"
# Package additional files
-FILES_${PN}-dev += "\
+FILES:${PN}-dev += "\
${datadir}/vala/vapi/*.vapi \
${datadir}/vala/vapi/*.deps \
${datadir}/gir-1.0 \
@@ -19,6 +19,6 @@ FILES_${PN}-dev += "\
# Remove vapigen.m4 that is bundled with tarballs
# because it does not yet have our cross-compile fixes
-do_configure_prepend() {
+do_configure:prepend() {
rm -f ${S}/m4/vapigen.m4
}
diff --git a/meta/classes/waf.bbclass b/meta/classes/waf.bbclass
index 8e6d754c29..464564afa1 100644
--- a/meta/classes/waf.bbclass
+++ b/meta/classes/waf.bbclass
@@ -1,19 +1,55 @@
# avoids build breaks when using no-static-libs.inc
DISABLE_STATIC = ""
+# What Python interpretter to use. Defaults to Python 3 but can be
+# overridden if required.
+WAF_PYTHON ?= "python3"
+
B = "${WORKDIR}/build"
+do_configure[cleandirs] += "${B}"
+
+EXTRA_OECONF:append = " ${PACKAGECONFIG_CONFARGS}"
+
+EXTRA_OEWAF_BUILD ??= ""
+# In most cases, you want to pass the same arguments to `waf build` and `waf
+# install`, but you can override it if necessary
+EXTRA_OEWAF_INSTALL ??= "${EXTRA_OEWAF_BUILD}"
+
+def waflock_hash(d):
+ # Calculates the hash used for the waf lock file. This should include
+ # all of the user controllable inputs passed to waf configure. Note
+ # that the full paths for ${B} and ${S} are used; this is OK and desired
+ # because a change to either of these should create a unique lock file
+ # to prevent collisions.
+ import hashlib
+ h = hashlib.sha512()
+ def update(name):
+ val = d.getVar(name)
+ if val is not None:
+ h.update(val.encode('utf-8'))
+ update('S')
+ update('B')
+ update('prefix')
+ update('EXTRA_OECONF')
+ return h.hexdigest()
-EXTRA_OECONF_append = " ${PACKAGECONFIG_CONFARGS}"
+# Use WAFLOCK to specify a separate lock file. The build is already
+# sufficiently isolated by setting the output directory, this ensures that
+# bitbake won't step on toes of any other configured context in the source
+# directory (e.g. if the source is coming from externalsrc and was previously
+# configured elsewhere).
+export WAFLOCK = ".lock-waf_oe_${@waflock_hash(d)}_build"
+BB_BASEHASH_IGNORE_VARS += "WAFLOCK"
python waf_preconfigure() {
import subprocess
- from distutils.version import StrictVersion
subsrcdir = d.getVar('S')
+ python = d.getVar('WAF_PYTHON')
wafbin = os.path.join(subsrcdir, 'waf')
try:
- result = subprocess.check_output([wafbin, '--version'], cwd=subsrcdir, stderr=subprocess.STDOUT)
+ result = subprocess.check_output([python, wafbin, '--version'], cwd=subsrcdir, stderr=subprocess.STDOUT)
version = result.decode('utf-8').split()[1]
- if StrictVersion(version) >= StrictVersion("1.8.7"):
+ if bb.utils.vercmp_string_op(version, "1.8.7", ">="):
d.setVar("WAF_EXTRA_CONF", "--bindir=${bindir} --libdir=${libdir}")
except subprocess.CalledProcessError as e:
bb.warn("Unable to execute waf --version, exit code %d. Assuming waf version without bindir/libdir support." % e.returncode)
@@ -24,16 +60,16 @@ python waf_preconfigure() {
do_configure[prefuncs] += "waf_preconfigure"
waf_do_configure() {
- (cd ${S} && ./waf configure -o ${B} --prefix=${prefix} ${WAF_EXTRA_CONF} ${EXTRA_OECONF})
+ (cd ${S} && ${WAF_PYTHON} ./waf configure -o ${B} --prefix=${prefix} ${WAF_EXTRA_CONF} ${EXTRA_OECONF})
}
do_compile[progress] = "outof:^\[\s*(\d+)/\s*(\d+)\]\s+"
waf_do_compile() {
- (cd ${S} && ./waf build ${@oe.utils.parallel_make_argument(d, '-j%d', limit=64)})
+ (cd ${S} && ${WAF_PYTHON} ./waf build ${@oe.utils.parallel_make_argument(d, '-j%d', limit=64)} ${EXTRA_OEWAF_BUILD})
}
waf_do_install() {
- (cd ${S} && ./waf install --destdir=${D})
+ (cd ${S} && ${WAF_PYTHON} ./waf install --destdir=${D} ${EXTRA_OEWAF_INSTALL})
}
EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes/xmlcatalog.bbclass b/meta/classes/xmlcatalog.bbclass
new file mode 100644
index 0000000000..be155b7bc2
--- /dev/null
+++ b/meta/classes/xmlcatalog.bbclass
@@ -0,0 +1,26 @@
+DEPENDS = "libxml2-native"
+
+# A whitespace-separated list of XML catalogs to be registered, for example
+# "${sysconfdir}/xml/docbook-xml.xml".
+XMLCATALOGS ?= ""
+
+SYSROOT_PREPROCESS_FUNCS:append = " xmlcatalog_sstate_postinst"
+
+xmlcatalog_complete() {
+ ROOTCATALOG="${STAGING_ETCDIR_NATIVE}/xml/catalog"
+ if [ ! -f $ROOTCATALOG ]; then
+ mkdir --parents $(dirname $ROOTCATALOG)
+ xmlcatalog --noout --create $ROOTCATALOG
+ fi
+ for CATALOG in ${XMLCATALOGS}; do
+ xmlcatalog --noout --add nextCatalog unused file://$CATALOG $ROOTCATALOG
+ done
+}
+
+xmlcatalog_sstate_postinst() {
+ mkdir -p ${SYSROOT_DESTDIR}${bindir}
+ dest=${SYSROOT_DESTDIR}${bindir}/postinst-${PN}-xmlcatalog
+ echo '#!/bin/sh' > $dest
+ echo '${xmlcatalog_complete}' >> $dest
+ chmod 0755 $dest
+}
diff --git a/meta/classes/yocto-check-layer.bbclass b/meta/classes/yocto-check-layer.bbclass
new file mode 100644
index 0000000000..329d3f8edb
--- /dev/null
+++ b/meta/classes/yocto-check-layer.bbclass
@@ -0,0 +1,16 @@
+#
+# This class is used by yocto-check-layer script for additional per-recipe tests
+# The first test ensures that the layer has no recipes skipping 'installed-vs-shipped' QA checks
+#
+
+WARN_QA:remove = "installed-vs-shipped"
+ERROR_QA:append = " installed-vs-shipped"
+
+python () {
+ packages = set((d.getVar('PACKAGES') or '').split())
+ for package in packages:
+ skip = set((d.getVar('INSANE_SKIP') or "").split() +
+ (d.getVar('INSANE_SKIP:' + package) or "").split())
+ if 'installed-vs-shipped' in skip:
+ oe.qa.handle_error("installed-vs-shipped", 'Package %s is skipping "installed-vs-shipped" QA test.' % package, d)
+}