summaryrefslogtreecommitdiffstats
path: root/meta/classes
diff options
context:
space:
mode:
Diffstat (limited to 'meta/classes')
-rw-r--r--meta/classes/allarch.bbclass63
-rw-r--r--meta/classes/archiver.bbclass111
-rw-r--r--meta/classes/autotools-brokensep.bbclass5
-rw-r--r--meta/classes/autotools.bbclass258
-rw-r--r--meta/classes/base.bbclass729
-rw-r--r--meta/classes/bash-completion.bbclass7
-rw-r--r--meta/classes/bin_package.bbclass39
-rw-r--r--meta/classes/binconfig-disabled.bbclass30
-rw-r--r--meta/classes/binconfig.bbclass54
-rw-r--r--meta/classes/blacklist.bbclass20
-rw-r--r--meta/classes/buildhistory.bbclass227
-rw-r--r--meta/classes/buildstats-summary.bbclass6
-rw-r--r--meta/classes/buildstats.bbclass217
-rw-r--r--meta/classes/ccache.bbclass21
-rw-r--r--meta/classes/ccmake.bbclass6
-rw-r--r--meta/classes/chrpath.bbclass8
-rw-r--r--meta/classes/clutter.bbclass18
-rw-r--r--meta/classes/cmake.bbclass211
-rw-r--r--meta/classes/cml1.bbclass79
-rw-r--r--meta/classes/compress_doc.bbclass263
-rw-r--r--meta/classes/copyleft_compliance.bbclass6
-rw-r--r--meta/classes/copyleft_filter.bbclass8
-rw-r--r--meta/classes/core-image.bbclass75
-rw-r--r--meta/classes/cpan-base.bbclass18
-rw-r--r--meta/classes/cpan.bbclass65
-rw-r--r--meta/classes/cpan_build.bbclass41
-rw-r--r--meta/classes/create-spdx-2.2.bbclass1158
-rw-r--r--meta/classes/create-spdx.bbclass8
-rw-r--r--meta/classes/cross-canadian.bbclass194
-rw-r--r--meta/classes/cross.bbclass99
-rw-r--r--meta/classes/crosssdk.bbclass51
-rw-r--r--meta/classes/cve-check.bbclass600
-rw-r--r--meta/classes/debian.bbclass146
-rw-r--r--meta/classes/deploy.bbclass12
-rw-r--r--meta/classes/devicetree.bbclass148
-rw-r--r--meta/classes/devshell.bbclass155
-rw-r--r--meta/classes/devtool-source.bbclass13
-rw-r--r--meta/classes/devupstream.bbclass48
-rw-r--r--meta/classes/distro_features_check.bbclass7
-rw-r--r--meta/classes/distrooverrides.bbclass12
-rw-r--r--meta/classes/distutils-common-base.bbclass25
-rw-r--r--meta/classes/distutils3-base.bbclass5
-rw-r--r--meta/classes/distutils3.bbclass65
-rw-r--r--meta/classes/dos2unix.bbclass14
-rw-r--r--meta/classes/externalsrc.bbclass86
-rw-r--r--meta/classes/extrausers.bbclass13
-rw-r--r--meta/classes/features_check.bbclass57
-rw-r--r--meta/classes/fontcache.bbclass57
-rw-r--r--meta/classes/fs-uuid.bbclass24
-rw-r--r--meta/classes/gconf.bbclass71
-rw-r--r--meta/classes/gettext.bbclass22
-rw-r--r--meta/classes/gio-module-cache.bbclass38
-rw-r--r--meta/classes/glide.bbclass9
-rw-r--r--meta/classes/gnomebase.bbclass30
-rw-r--r--meta/classes/go-mod.bbclass20
-rw-r--r--meta/classes/go-ptest.bbclass54
-rw-r--r--meta/classes/go-vendor.bbclass211
-rw-r--r--meta/classes/go.bbclass155
-rw-r--r--meta/classes/goarch.bbclass119
-rw-r--r--meta/classes/gobject-introspection-data.bbclass7
-rw-r--r--meta/classes/gobject-introspection.bbclass53
-rw-r--r--meta/classes/godep.bbclass8
-rw-r--r--meta/classes/grub-efi-cfg.bbclass122
-rw-r--r--meta/classes/grub-efi.bbclass8
-rw-r--r--meta/classes/gsettings.bbclass42
-rw-r--r--meta/classes/gtk-doc.bbclass82
-rw-r--r--meta/classes/gtk-icon-cache.bbclass79
-rw-r--r--meta/classes/gtk-immodules-cache.bbclass75
-rw-r--r--meta/classes/icecc.bbclass140
-rw-r--r--meta/classes/image-buildinfo.bbclass54
-rw-r--r--meta/classes/image-combined-dbg.bbclass9
-rw-r--r--meta/classes/image-container.bbclass21
-rw-r--r--meta/classes/image-live.bbclass264
-rw-r--r--meta/classes/image-mklibs.bbclass56
-rw-r--r--meta/classes/image-postinst-intercepts.bbclass23
-rw-r--r--meta/classes/image-prelink.bbclass81
-rw-r--r--meta/classes/image.bbclass672
-rw-r--r--meta/classes/image_types.bbclass335
-rw-r--r--meta/classes/image_types_wic.bbclass148
-rw-r--r--meta/classes/insane.bbclass1357
-rw-r--r--meta/classes/kernel-arch.bbclass68
-rw-r--r--meta/classes/kernel-artifact-names.bbclass18
-rw-r--r--meta/classes/kernel-devicetree.bbclass95
-rw-r--r--meta/classes/kernel-fitimage.bbclass529
-rw-r--r--meta/classes/kernel-grub.bbclass105
-rw-r--r--meta/classes/kernel-module-split.bbclass175
-rw-r--r--meta/classes/kernel-uboot.bbclass26
-rw-r--r--meta/classes/kernel-uimage.bbclass35
-rw-r--r--meta/classes/kernel-yocto.bbclass512
-rw-r--r--meta/classes/kernel.bbclass740
-rw-r--r--meta/classes/kernelsrc.bbclass10
-rw-r--r--meta/classes/lib_package.bbclass7
-rw-r--r--meta/classes/libc-package.bbclass384
-rw-r--r--meta/classes/license.bbclass436
-rw-r--r--meta/classes/license_image.bbclass256
-rw-r--r--meta/classes/linux-kernel-base.bbclass41
-rw-r--r--meta/classes/linuxloader.bbclass70
-rw-r--r--meta/classes/live-vm-common.bbclass94
-rw-r--r--meta/classes/logging.bbclass101
-rw-r--r--meta/classes/manpages.bbclass44
-rw-r--r--meta/classes/mcextend.bbclass6
-rw-r--r--meta/classes/meson.bbclass186
-rw-r--r--meta/classes/meta.bbclass4
-rw-r--r--meta/classes/metadata_scm.bbclass52
-rw-r--r--meta/classes/migrate_localcount.bbclass6
-rw-r--r--meta/classes/mime-xdg.bbclass74
-rw-r--r--meta/classes/mime.bbclass70
-rw-r--r--meta/classes/mirrors.bbclass76
-rw-r--r--meta/classes/module-base.bbclass21
-rw-r--r--meta/classes/module.bbclass74
-rw-r--r--meta/classes/multilib.bbclass48
-rw-r--r--meta/classes/multilib_global.bbclass93
-rw-r--r--meta/classes/multilib_header.bbclass52
-rw-r--r--meta/classes/multilib_script.bbclass34
-rw-r--r--meta/classes/native.bbclass198
-rw-r--r--meta/classes/nativesdk.bbclass114
-rw-r--r--meta/classes/nopackages.bbclass12
-rw-r--r--meta/classes/npm.bbclass307
-rw-r--r--meta/classes/oelint.bbclass6
-rw-r--r--meta/classes/own-mirrors.bbclass33
-rw-r--r--meta/classes/package.bbclass2439
-rw-r--r--meta/classes/package_deb.bbclass325
-rw-r--r--meta/classes/package_ipk.bbclass282
-rw-r--r--meta/classes/package_pkgdata.bbclass167
-rw-r--r--meta/classes/package_rpm.bbclass755
-rw-r--r--meta/classes/package_tar.bbclass73
-rw-r--r--meta/classes/packagedata.bbclass34
-rw-r--r--meta/classes/packagefeed-stability.bbclass252
-rw-r--r--meta/classes/packagegroup.bbclass61
-rw-r--r--meta/classes/patch.bbclass167
-rw-r--r--meta/classes/perl-version.bbclass66
-rw-r--r--meta/classes/perlnative.bbclass3
-rw-r--r--meta/classes/pixbufcache.bbclass63
-rw-r--r--meta/classes/pkgconfig.bbclass2
-rw-r--r--meta/classes/populate_sdk.bbclass7
-rw-r--r--meta/classes/populate_sdk_base.bbclass338
-rw-r--r--meta/classes/populate_sdk_ext.bbclass782
-rw-r--r--meta/classes/prexport.bbclass6
-rw-r--r--meta/classes/primport.bbclass6
-rw-r--r--meta/classes/ptest-gnome.bbclass8
-rw-r--r--meta/classes/ptest-perl.bbclass30
-rw-r--r--meta/classes/ptest.bbclass119
-rw-r--r--meta/classes/pypi.bbclass26
-rw-r--r--meta/classes/python3-dir.bbclass5
-rw-r--r--meta/classes/python3native.bbclass26
-rw-r--r--meta/classes/qemu.bbclass67
-rw-r--r--meta/classes/qemuboot.bbclass142
-rw-r--r--meta/classes/recipe_sanity.bbclass8
-rw-r--r--meta/classes/relative_symlinks.bbclass6
-rw-r--r--meta/classes/relocatable.bbclass6
-rw-r--r--meta/classes/remove-libtool.bbclass6
-rw-r--r--meta/classes/report-error.bbclass58
-rw-r--r--meta/classes/reproducible_build.bbclass202
-rw-r--r--meta/classes/reproducible_build_simple.bbclass9
-rw-r--r--meta/classes/rm_work.bbclass126
-rw-r--r--meta/classes/rm_work_and_downloads.bbclass5
-rw-r--r--meta/classes/rootfs-postcommands.bbclass373
-rw-r--r--meta/classes/rootfs_deb.bbclass35
-rw-r--r--meta/classes/rootfs_ipk.bbclass38
-rw-r--r--meta/classes/rootfs_rpm.bbclass39
-rw-r--r--meta/classes/rootfsdebugfiles.bbclass41
-rw-r--r--meta/classes/sanity.bbclass1029
-rw-r--r--meta/classes/scons.bbclass31
-rw-r--r--meta/classes/setuptools3.bbclass4
-rw-r--r--meta/classes/sign_ipk.bbclass6
-rw-r--r--meta/classes/sign_package_feed.bbclass9
-rw-r--r--meta/classes/sign_rpm.bbclass6
-rw-r--r--meta/classes/siteconfig.bbclass6
-rw-r--r--meta/classes/siteinfo.bbclass199
-rw-r--r--meta/classes/spdx.bbclass360
-rw-r--r--meta/classes/sstate.bbclass1217
-rw-r--r--meta/classes/staging.bbclass621
-rw-r--r--meta/classes/syslinux.bbclass194
-rw-r--r--meta/classes/systemd-boot-cfg.bbclass71
-rw-r--r--meta/classes/systemd-boot.bbclass35
-rw-r--r--meta/classes/systemd.bbclass232
-rw-r--r--meta/classes/terminal.bbclass11
-rw-r--r--meta/classes/testexport.bbclass182
-rw-r--r--meta/classes/testimage.bbclass485
-rw-r--r--meta/classes/testsdk.bbclass50
-rw-r--r--meta/classes/texinfo.bbclass18
-rw-r--r--meta/classes/toaster.bbclass10
-rw-r--r--meta/classes/toolchain-scripts-base.bbclass11
-rw-r--r--meta/classes/toolchain-scripts.bbclass203
-rw-r--r--meta/classes/typecheck.bbclass6
-rw-r--r--meta/classes/uboot-config.bbclass57
-rw-r--r--meta/classes/uboot-extlinux-config.bbclass157
-rw-r--r--meta/classes/uboot-sign.bbclass132
-rw-r--r--meta/classes/uninative.bbclass166
-rw-r--r--meta/classes/update-alternatives.bbclass327
-rw-r--r--meta/classes/update-rc.d.bbclass123
-rw-r--r--meta/classes/upstream-version-is-even.bbclass5
-rw-r--r--meta/classes/useradd-staticids.bbclass38
-rw-r--r--meta/classes/useradd.bbclass105
-rw-r--r--meta/classes/useradd_base.bbclass24
-rw-r--r--meta/classes/utility-tasks.bbclass53
-rw-r--r--meta/classes/utils.bbclass362
-rw-r--r--meta/classes/vala.bbclass24
-rw-r--r--meta/classes/waf.bbclass65
-rw-r--r--meta/classes/xmlcatalog.bbclass26
-rw-r--r--meta/classes/yocto-check-layer.bbclass22
201 files changed, 2738 insertions, 26306 deletions
diff --git a/meta/classes/allarch.bbclass b/meta/classes/allarch.bbclass
deleted file mode 100644
index 5bd5c44a27..0000000000
--- a/meta/classes/allarch.bbclass
+++ /dev/null
@@ -1,63 +0,0 @@
-#
-# This class is used for architecture independent recipes/data files (usually scripts)
-#
-
-python allarch_package_arch_handler () {
- if bb.data.inherits_class("native", d) or bb.data.inherits_class("nativesdk", d) \
- or bb.data.inherits_class("crosssdk", d):
- return
-
- variants = d.getVar("MULTILIB_VARIANTS")
- if not variants:
- d.setVar("PACKAGE_ARCH", "all" )
-}
-
-addhandler allarch_package_arch_handler
-allarch_package_arch_handler[eventmask] = "bb.event.RecipePreFinalise"
-
-python () {
- # Allow this class to be included but overridden - only set
- # the values if we're still "all" package arch.
- if d.getVar("PACKAGE_ARCH") == "all":
- # No need for virtual/libc or a cross compiler
- d.setVar("INHIBIT_DEFAULT_DEPS","1")
-
- # Set these to a common set of values, we shouldn't be using them other that for WORKDIR directory
- # naming anyway
- d.setVar("baselib", "lib")
- d.setVar("TARGET_ARCH", "allarch")
- d.setVar("TARGET_OS", "linux")
- d.setVar("TARGET_CC_ARCH", "none")
- d.setVar("TARGET_LD_ARCH", "none")
- d.setVar("TARGET_AS_ARCH", "none")
- d.setVar("TARGET_FPU", "")
- d.setVar("TARGET_PREFIX", "")
- # Expand PACKAGE_EXTRA_ARCHS since the staging code needs this
- # (this removes any dependencies from the hash perspective)
- d.setVar("PACKAGE_EXTRA_ARCHS", d.getVar("PACKAGE_EXTRA_ARCHS"))
- d.setVar("SDK_ARCH", "none")
- d.setVar("SDK_CC_ARCH", "none")
- d.setVar("TARGET_CPPFLAGS", "none")
- d.setVar("TARGET_CFLAGS", "none")
- d.setVar("TARGET_CXXFLAGS", "none")
- d.setVar("TARGET_LDFLAGS", "none")
- d.setVar("POPULATESYSROOTDEPS", "")
-
- # Avoid this being unnecessarily different due to nuances of
- # the target machine that aren't important for "all" arch
- # packages.
- d.setVar("LDFLAGS", "")
-
- # No need to do shared library processing or debug symbol handling
- d.setVar("EXCLUDE_FROM_SHLIBS", "1")
- d.setVar("INHIBIT_PACKAGE_DEBUG_SPLIT", "1")
- d.setVar("INHIBIT_PACKAGE_STRIP", "1")
-
- # These multilib values shouldn't change allarch packages so exclude them
- d.appendVarFlag("emit_pkgdata", "vardepsexclude", " MULTILIB_VARIANTS")
- d.appendVarFlag("write_specfile", "vardepsexclude", " MULTILIBS")
- d.appendVarFlag("do_package", "vardepsexclude", " package_do_shlibs")
- elif bb.data.inherits_class('packagegroup', d) and not bb.data.inherits_class('nativesdk', d):
- bb.error("Please ensure recipe %s sets PACKAGE_ARCH before inherit packagegroup" % d.getVar("FILE"))
-}
-
diff --git a/meta/classes/archiver.bbclass b/meta/classes/archiver.bbclass
index aff1f9dbb0..2d0bbfbd42 100644
--- a/meta/classes/archiver.bbclass
+++ b/meta/classes/archiver.bbclass
@@ -1,11 +1,15 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
#
# This bbclass is used for creating archive for:
# 1) original (or unpacked) source: ARCHIVER_MODE[src] = "original"
# 2) patched source: ARCHIVER_MODE[src] = "patched" (default)
# 3) configured source: ARCHIVER_MODE[src] = "configured"
-# 4) source mirror: ARCHIVE_MODE[src] = "mirror"
+# 4) source mirror: ARCHIVER_MODE[src] = "mirror"
# 5) The patches between do_unpack and do_patch:
# ARCHIVER_MODE[diff] = "1"
# And you can set the one that you'd like to exclude from the diff:
@@ -51,55 +55,66 @@ ARCHIVER_MODE[diff-exclude] ?= ".pc autom4te.cache patches"
ARCHIVER_MODE[dumpdata] ?= "0"
ARCHIVER_MODE[recipe] ?= "0"
ARCHIVER_MODE[mirror] ?= "split"
+ARCHIVER_MODE[compression] ?= "xz"
DEPLOY_DIR_SRC ?= "${DEPLOY_DIR}/sources"
-ARCHIVER_TOPDIR ?= "${WORKDIR}/deploy-sources"
-ARCHIVER_OUTDIR = "${ARCHIVER_TOPDIR}/${TARGET_SYS}/${PF}/"
+ARCHIVER_TOPDIR ?= "${WORKDIR}/archiver-sources"
+ARCHIVER_ARCH = "${TARGET_SYS}"
+ARCHIVER_OUTDIR = "${ARCHIVER_TOPDIR}/${ARCHIVER_ARCH}/${PF}/"
ARCHIVER_RPMTOPDIR ?= "${WORKDIR}/deploy-sources-rpm"
-ARCHIVER_RPMOUTDIR = "${ARCHIVER_RPMTOPDIR}/${TARGET_SYS}/${PF}/"
+ARCHIVER_RPMOUTDIR = "${ARCHIVER_RPMTOPDIR}/${ARCHIVER_ARCH}/${PF}/"
ARCHIVER_WORKDIR = "${WORKDIR}/archiver-work/"
# When producing a combined mirror directory, allow duplicates for the case
# where multiple recipes use the same SRC_URI.
ARCHIVER_COMBINED_MIRRORDIR = "${ARCHIVER_TOPDIR}/mirror"
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_SRC}/mirror"
+SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_SRC}/mirror"
do_dumpdata[dirs] = "${ARCHIVER_OUTDIR}"
do_ar_recipe[dirs] = "${ARCHIVER_OUTDIR}"
do_ar_original[dirs] = "${ARCHIVER_OUTDIR} ${ARCHIVER_WORKDIR}"
-do_deploy_archives[dirs] = "${WORKDIR}"
# This is a convenience for the shell script to use it
-
-python () {
- pn = d.getVar('PN')
- assume_provided = (d.getVar("ASSUME_PROVIDED") or "").split()
- if pn in assume_provided:
- for p in d.getVar("PROVIDES").split():
- if p != pn:
- pn = p
- break
+def include_package(d, pn):
included, reason = copyleft_should_include(d)
if not included:
bb.debug(1, 'archiver: %s is excluded: %s' % (pn, reason))
- return
+ return False
+
else:
bb.debug(1, 'archiver: %s is included: %s' % (pn, reason))
-
# glibc-locale: do_fetch, do_unpack and do_patch tasks have been deleted,
# so avoid archiving source here.
if pn.startswith('glibc-locale'):
- return
+ return False
# We just archive gcc-source for all the gcc related recipes
if d.getVar('BPN') in ['gcc', 'libgcc'] \
and not pn.startswith('gcc-source'):
bb.debug(1, 'archiver: %s is excluded, covered by gcc-source' % pn)
+ return False
+
+ return True
+
+python () {
+ pn = d.getVar('PN')
+ assume_provided = (d.getVar("ASSUME_PROVIDED") or "").split()
+ if pn in assume_provided:
+ for p in d.getVar("PROVIDES").split():
+ if p != pn:
+ pn = p
+ break
+
+ if not include_package(d, pn):
return
+ # TARGET_SYS in ARCHIVER_ARCH will break the stamp for gcc-source in multiconfig
+ if pn.startswith('gcc-source'):
+ d.setVar('ARCHIVER_ARCH', "allarch")
+
def hasTask(task):
return bool(d.getVarFlag(task, "task", False)) and not bool(d.getVarFlag(task, "noexec", False))
@@ -118,7 +133,7 @@ python () {
d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_patched' % pn)
elif ar_src == "configured":
# We can't use "addtask do_ar_configured after do_configure" since it
- # will cause the deptask of do_populate_sysroot to run not matter what
+ # will cause the deptask of do_populate_sysroot to run no matter what
# archives we need, so we add the depends here.
# There is a corner case with "gcc-source-${PV}" recipes, they don't have
@@ -163,7 +178,7 @@ python () {
d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_configured' % pn)
}
-# Take all the sources for a recipe and puts them in WORKDIR/archiver-work/.
+# Take all the sources for a recipe and put them in WORKDIR/archiver-work/.
# Files in SRC_URI are copied directly, anything that's a directory
# (e.g. git repositories) is "unpacked" and then put into a tarball.
python do_ar_original() {
@@ -281,7 +296,10 @@ python do_ar_configured() {
# ${STAGING_DATADIR}/aclocal/libtool.m4, so we can't re-run the
# do_configure, we archive the already configured ${S} to
# instead of.
- elif pn != 'libtool-native':
+ # The kernel class functions require it to be on work-shared, we
+ # don't unpack, patch, configure again, just archive the already
+ # configured ${S}
+ elif not (pn == 'libtool-native' or is_work_shared(d)):
def runTask(task):
prefuncs = d.getVarFlag(task, 'prefuncs') or ''
for func in prefuncs.split():
@@ -383,19 +401,11 @@ python do_ar_mirror() {
subprocess.check_call(cmd, shell=True)
}
-def exclude_useless_paths(tarinfo):
- if tarinfo.isdir():
- if tarinfo.name.endswith('/temp') or tarinfo.name.endswith('/patches') or tarinfo.name.endswith('/.pc'):
- return None
- elif tarinfo.name == 'temp' or tarinfo.name == 'patches' or tarinfo.name == '.pc':
- return None
- return tarinfo
-
def create_tarball(d, srcdir, suffix, ar_outdir):
"""
create the tarball from srcdir
"""
- import tarfile
+ import subprocess
# Make sure we are only creating a single tarball for gcc sources
if (d.getVar('SRC_URI') == ""):
@@ -406,17 +416,30 @@ def create_tarball(d, srcdir, suffix, ar_outdir):
# that we archive the actual directory and not just the link.
srcdir = os.path.realpath(srcdir)
+ compression_method = d.getVarFlag('ARCHIVER_MODE', 'compression')
+ if compression_method == "xz":
+ compression_cmd = "xz %s" % d.getVar('XZ_DEFAULTS')
+ # To keep compatibility with ARCHIVER_MODE[compression]
+ elif compression_method == "gz":
+ compression_cmd = "gzip"
+ elif compression_method == "bz2":
+ compression_cmd = "bzip2"
+ else:
+ bb.fatal("Unsupported compression_method: %s" % compression_method)
+
bb.utils.mkdirhier(ar_outdir)
if suffix:
- filename = '%s-%s.tar.gz' % (d.getVar('PF'), suffix)
+ filename = '%s-%s.tar.%s' % (d.getVar('PF'), suffix, compression_method)
else:
- filename = '%s.tar.gz' % d.getVar('PF')
+ filename = '%s.tar.%s' % (d.getVar('PF'), compression_method)
tarname = os.path.join(ar_outdir, filename)
bb.note('Creating %s' % tarname)
- tar = tarfile.open(tarname, 'w:gz')
- tar.add(srcdir, arcname=os.path.basename(srcdir), filter=exclude_useless_paths)
- tar.close()
+ dirname = os.path.dirname(srcdir)
+ basename = os.path.basename(srcdir)
+ exclude = "--exclude=temp --exclude=patches --exclude='.pc'"
+ tar_cmd = "tar %s -cf - %s | %s > %s" % (exclude, basename, compression_cmd, tarname)
+ subprocess.check_call(tar_cmd, cwd=dirname, shell=True)
# creating .diff.gz between source.orig and source
def create_diff_gz(d, src_orig, src, ar_outdir):
@@ -449,8 +472,8 @@ def create_diff_gz(d, src_orig, src, ar_outdir):
os.chdir(cwd)
def is_work_shared(d):
- pn = d.getVar('PN')
- return bb.data.inherits_class('kernel', d) or pn.startswith('gcc-source')
+ sharedworkdir = os.path.join(d.getVar('TMPDIR'), 'work-shared')
+ return d.getVar('S').startswith(sharedworkdir)
# Run do_unpack and do_patch
python do_unpack_and_patch() {
@@ -463,7 +486,7 @@ python do_unpack_and_patch() {
ar_sysroot_native = d.getVar('STAGING_DIR_NATIVE')
pn = d.getVar('PN')
- # The kernel class functions require it to be on work-shared, so we dont change WORKDIR
+ # The kernel class functions require it to be on work-shared, so we don't change WORKDIR
if not is_work_shared(d):
# Change the WORKDIR to make do_unpack do_patch run in another dir.
d.setVar('WORKDIR', ar_workdir)
@@ -483,6 +506,9 @@ python do_unpack_and_patch() {
src_orig = '%s.orig' % src
oe.path.copytree(src, src_orig)
+ if bb.data.inherits_class('dos2unix', d):
+ bb.build.exec_func('do_convert_crlf_to_lf', d)
+
# Make sure gcc and kernel sources are patched only once
if not (d.getVar('SRC_URI') == "" or is_work_shared(d)):
bb.build.exec_func('do_patch', d)
@@ -505,7 +531,7 @@ python do_unpack_and_patch() {
# of the output file ensures that we create it each time the recipe
# gets rebuilt, at least as long as a PR server is used. We also rely
# on that mechanism to catch changes in the file content, because the
-# file content is not part of of the task signature either.
+# file content is not part of the task signature either.
do_ar_recipe[vardepsexclude] += "BBINCLUDED"
python do_ar_recipe () {
"""
@@ -571,7 +597,7 @@ python do_dumpdata () {
SSTATETASKS += "do_deploy_archives"
do_deploy_archives () {
- echo "Deploying source archive files from ${ARCHIVER_TOPDIR} to ${DEPLOY_DIR_SRC}."
+ bbnote "Deploying source archive files from ${ARCHIVER_TOPDIR} to ${DEPLOY_DIR_SRC}."
}
python do_deploy_archives_setscene () {
sstate_setscene(d)
@@ -590,6 +616,7 @@ addtask do_dumpdata
addtask do_ar_recipe
addtask do_deploy_archives
do_build[recrdeptask] += "do_deploy_archives"
+do_rootfs[recrdeptask] += "do_deploy_archives"
do_populate_sdk[recrdeptask] += "do_deploy_archives"
python () {
diff --git a/meta/classes/autotools-brokensep.bbclass b/meta/classes/autotools-brokensep.bbclass
deleted file mode 100644
index 71cf97a391..0000000000
--- a/meta/classes/autotools-brokensep.bbclass
+++ /dev/null
@@ -1,5 +0,0 @@
-# Autotools class for recipes where separate build dir doesn't work
-# Ideally we should fix software so it does work. Standard autotools supports
-# this.
-inherit autotools
-B = "${S}"
diff --git a/meta/classes/autotools.bbclass b/meta/classes/autotools.bbclass
deleted file mode 100644
index 6c2a33ac72..0000000000
--- a/meta/classes/autotools.bbclass
+++ /dev/null
@@ -1,258 +0,0 @@
-def autotools_dep_prepend(d):
- if d.getVar('INHIBIT_AUTOTOOLS_DEPS'):
- return ''
-
- pn = d.getVar('PN')
- deps = ''
-
- if pn in ['autoconf-native', 'automake-native', 'help2man-native']:
- return deps
- deps += 'autoconf-native automake-native '
-
- if not pn in ['libtool', 'libtool-native'] and not pn.endswith("libtool-cross"):
- deps += 'libtool-native '
- if not bb.data.inherits_class('native', d) \
- and not bb.data.inherits_class('nativesdk', d) \
- and not bb.data.inherits_class('cross', d) \
- and not d.getVar('INHIBIT_DEFAULT_DEPS'):
- deps += 'libtool-cross '
-
- return deps + 'gnu-config-native '
-
-DEPENDS_prepend = "${@autotools_dep_prepend(d)} "
-
-inherit siteinfo
-
-# Space separated list of shell scripts with variables defined to supply test
-# results for autoconf tests we cannot run at build time.
-# The value of this variable is filled in in a prefunc because it depends on
-# the contents of the sysroot.
-export CONFIG_SITE
-
-acpaths ?= "default"
-EXTRA_AUTORECONF = "--exclude=autopoint"
-
-export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir}"
-
-# When building tools for use at build-time it's recommended for the build
-# system to use these variables when cross-compiling.
-# (http://sources.redhat.com/autobook/autobook/autobook_270.html)
-export CPP_FOR_BUILD = "${BUILD_CPP}"
-export CPPFLAGS_FOR_BUILD = "${BUILD_CPPFLAGS}"
-
-export CC_FOR_BUILD = "${BUILD_CC}"
-export CFLAGS_FOR_BUILD = "${BUILD_CFLAGS}"
-
-export CXX_FOR_BUILD = "${BUILD_CXX}"
-export CXXFLAGS_FOR_BUILD="${BUILD_CXXFLAGS}"
-
-export LD_FOR_BUILD = "${BUILD_LD}"
-export LDFLAGS_FOR_BUILD = "${BUILD_LDFLAGS}"
-
-def append_libtool_sysroot(d):
- # Only supply libtool sysroot option for non-native packages
- if not bb.data.inherits_class('native', d):
- return '--with-libtool-sysroot=${STAGING_DIR_HOST}'
- return ""
-
-CONFIGUREOPTS = " --build=${BUILD_SYS} \
- --host=${HOST_SYS} \
- --target=${TARGET_SYS} \
- --prefix=${prefix} \
- --exec_prefix=${exec_prefix} \
- --bindir=${bindir} \
- --sbindir=${sbindir} \
- --libexecdir=${libexecdir} \
- --datadir=${datadir} \
- --sysconfdir=${sysconfdir} \
- --sharedstatedir=${sharedstatedir} \
- --localstatedir=${localstatedir} \
- --libdir=${libdir} \
- --includedir=${includedir} \
- --oldincludedir=${oldincludedir} \
- --infodir=${infodir} \
- --mandir=${mandir} \
- --disable-silent-rules \
- ${CONFIGUREOPT_DEPTRACK} \
- ${@append_libtool_sysroot(d)}"
-CONFIGUREOPT_DEPTRACK ?= "--disable-dependency-tracking"
-
-CACHED_CONFIGUREVARS ?= ""
-
-AUTOTOOLS_SCRIPT_PATH ?= "${S}"
-CONFIGURE_SCRIPT ?= "${AUTOTOOLS_SCRIPT_PATH}/configure"
-
-AUTOTOOLS_AUXDIR ?= "${AUTOTOOLS_SCRIPT_PATH}"
-
-oe_runconf () {
- # Use relative path to avoid buildpaths in files
- cfgscript_name="`basename ${CONFIGURE_SCRIPT}`"
- cfgscript=`python3 -c "import os; print(os.path.relpath(os.path.dirname('${CONFIGURE_SCRIPT}'), '.'))"`/$cfgscript_name
- if [ -x "$cfgscript" ] ; then
- bbnote "Running $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} $@"
- if ! ${CACHED_CONFIGUREVARS} CONFIG_SHELL=/bin/bash $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} "$@"; then
- bbnote "The following config.log files may provide further information."
- bbnote `find ${B} -ignore_readdir_race -type f -name config.log`
- bbfatal_log "configure failed"
- fi
- else
- bbfatal "no configure script found at $cfgscript"
- fi
-}
-
-CONFIGURESTAMPFILE = "${WORKDIR}/configure.sstate"
-
-autotools_preconfigure() {
- if [ -n "${CONFIGURESTAMPFILE}" -a -e "${CONFIGURESTAMPFILE}" ]; then
- if [ "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" ]; then
- if [ "${S}" != "${B}" ]; then
- echo "Previously configured separate build directory detected, cleaning ${B}"
- rm -rf ${B}
- mkdir -p ${B}
- else
- # At least remove the .la files since automake won't automatically
- # regenerate them even if CFLAGS/LDFLAGS are different
- cd ${S}
- if [ "${CLEANBROKEN}" != "1" -a \( -e Makefile -o -e makefile -o -e GNUmakefile \) ]; then
- oe_runmake clean
- fi
- find ${S} -ignore_readdir_race -name \*.la -delete
- fi
- fi
- fi
-}
-
-autotools_postconfigure(){
- if [ -n "${CONFIGURESTAMPFILE}" ]; then
- mkdir -p `dirname ${CONFIGURESTAMPFILE}`
- echo ${BB_TASKHASH} > ${CONFIGURESTAMPFILE}
- fi
-}
-
-EXTRACONFFUNCS ??= ""
-
-EXTRA_OECONF_append = " ${PACKAGECONFIG_CONFARGS}"
-
-do_configure[prefuncs] += "autotools_preconfigure autotools_aclocals ${EXTRACONFFUNCS}"
-do_compile[prefuncs] += "autotools_aclocals"
-do_install[prefuncs] += "autotools_aclocals"
-do_configure[postfuncs] += "autotools_postconfigure"
-
-ACLOCALDIR = "${STAGING_DATADIR}/aclocal"
-ACLOCALEXTRAPATH = ""
-ACLOCALEXTRAPATH_class-target = " -I ${STAGING_DATADIR_NATIVE}/aclocal/"
-ACLOCALEXTRAPATH_class-nativesdk = " -I ${STAGING_DATADIR_NATIVE}/aclocal/"
-
-python autotools_aclocals () {
- d.setVar("CONFIG_SITE", siteinfo_get_files(d, sysrootcache=True))
-}
-
-CONFIGURE_FILES = "${S}/configure.in ${S}/configure.ac ${S}/config.h.in ${S}/acinclude.m4 Makefile.am"
-
-autotools_do_configure() {
- # WARNING: gross hack follows:
- # An autotools built package generally needs these scripts, however only
- # automake or libtoolize actually install the current versions of them.
- # This is a problem in builds that do not use libtool or automake, in the case
- # where we -need- the latest version of these scripts. e.g. running a build
- # for a package whose autotools are old, on an x86_64 machine, which the old
- # config.sub does not support. Work around this by installing them manually
- # regardless.
-
- PRUNE_M4=""
-
- for ac in `find ${S} -ignore_readdir_race -name configure.in -o -name configure.ac`; do
- rm -f `dirname $ac`/configure
- done
- if [ -e ${AUTOTOOLS_SCRIPT_PATH}/configure.in -o -e ${AUTOTOOLS_SCRIPT_PATH}/configure.ac ]; then
- olddir=`pwd`
- cd ${AUTOTOOLS_SCRIPT_PATH}
- mkdir -p ${ACLOCALDIR}
- ACLOCAL="aclocal --system-acdir=${ACLOCALDIR}/"
- if [ x"${acpaths}" = xdefault ]; then
- acpaths=
- for i in `find ${AUTOTOOLS_SCRIPT_PATH} -ignore_readdir_race -maxdepth 2 -name \*.m4|grep -v 'aclocal.m4'| \
- grep -v 'acinclude.m4' | sed -e 's,\(.*/\).*$,\1,'|sort -u`; do
- acpaths="$acpaths -I $i"
- done
- else
- acpaths="${acpaths}"
- fi
- acpaths="$acpaths ${ACLOCALEXTRAPATH}"
- AUTOV=`automake --version | sed -e '1{s/.* //;s/\.[0-9]\+$//};q'`
- automake --version
- echo "AUTOV is $AUTOV"
- if [ -d ${STAGING_DATADIR_NATIVE}/aclocal-$AUTOV ]; then
- ACLOCAL="$ACLOCAL --automake-acdir=${STAGING_DATADIR_NATIVE}/aclocal-$AUTOV"
- fi
- # autoreconf is too shy to overwrite aclocal.m4 if it doesn't look
- # like it was auto-generated. Work around this by blowing it away
- # by hand, unless the package specifically asked not to run aclocal.
- if ! echo ${EXTRA_AUTORECONF} | grep -q "aclocal"; then
- rm -f aclocal.m4
- fi
- if [ -e configure.in ]; then
- CONFIGURE_AC=configure.in
- else
- CONFIGURE_AC=configure.ac
- fi
- if grep -q "^[[:space:]]*AM_GLIB_GNU_GETTEXT" $CONFIGURE_AC; then
- if grep -q "sed.*POTFILES" $CONFIGURE_AC; then
- : do nothing -- we still have an old unmodified configure.ac
- else
- bbnote Executing glib-gettextize --force --copy
- echo "no" | glib-gettextize --force --copy
- fi
- elif [ "${BPN}" != "gettext" ] && grep -q "^[[:space:]]*AM_GNU_GETTEXT" $CONFIGURE_AC; then
- # We'd call gettextize here if it wasn't so broken...
- cp ${STAGING_DATADIR_NATIVE}/gettext/config.rpath ${AUTOTOOLS_AUXDIR}/
- if [ -d ${S}/po/ ]; then
- cp -f ${STAGING_DATADIR_NATIVE}/gettext/po/Makefile.in.in ${S}/po/
- if [ ! -e ${S}/po/remove-potcdate.sin ]; then
- cp ${STAGING_DATADIR_NATIVE}/gettext/po/remove-potcdate.sin ${S}/po/
- fi
- fi
- PRUNE_M4="$PRUNE_M4 gettext.m4 iconv.m4 lib-ld.m4 lib-link.m4 lib-prefix.m4 nls.m4 po.m4 progtest.m4"
- fi
- mkdir -p m4
- if grep -q "^[[:space:]]*[AI][CT]_PROG_INTLTOOL" $CONFIGURE_AC; then
- if ! echo "${DEPENDS}" | grep -q intltool-native; then
- bbwarn "Missing DEPENDS on intltool-native"
- fi
- PRUNE_M4="$PRUNE_M4 intltool.m4"
- bbnote Executing intltoolize --copy --force --automake
- intltoolize --copy --force --automake
- fi
-
- for i in $PRUNE_M4; do
- find ${S} -ignore_readdir_race -name $i -delete
- done
-
- bbnote Executing ACLOCAL=\"$ACLOCAL\" autoreconf -Wcross --verbose --install --force ${EXTRA_AUTORECONF} $acpaths
- ACLOCAL="$ACLOCAL" autoreconf -Wcross --verbose --install --force ${EXTRA_AUTORECONF} $acpaths || die "autoreconf execution failed."
- cd $olddir
- fi
- if [ -e ${CONFIGURE_SCRIPT} ]; then
- oe_runconf
- else
- bbnote "nothing to configure"
- fi
-}
-
-autotools_do_compile() {
- oe_runmake
-}
-
-autotools_do_install() {
- oe_runmake 'DESTDIR=${D}' install
- # Info dir listing isn't interesting at this point so remove it if it exists.
- if [ -e "${D}${infodir}/dir" ]; then
- rm -f ${D}${infodir}/dir
- fi
-}
-
-inherit siteconfig
-
-EXPORT_FUNCTIONS do_configure do_compile do_install
-
-B = "${WORKDIR}/build"
diff --git a/meta/classes/base.bbclass b/meta/classes/base.bbclass
deleted file mode 100644
index 4c681cc870..0000000000
--- a/meta/classes/base.bbclass
+++ /dev/null
@@ -1,729 +0,0 @@
-BB_DEFAULT_TASK ?= "build"
-CLASSOVERRIDE ?= "class-target"
-
-inherit patch
-inherit staging
-
-inherit mirrors
-inherit utils
-inherit utility-tasks
-inherit metadata_scm
-inherit logging
-
-OE_EXTRA_IMPORTS ?= ""
-
-OE_IMPORTS += "os sys time oe.path oe.utils oe.types oe.package oe.packagegroup oe.sstatesig oe.lsb oe.cachedpath oe.license ${OE_EXTRA_IMPORTS}"
-OE_IMPORTS[type] = "list"
-
-PACKAGECONFIG_CONFARGS ??= ""
-
-def oe_import(d):
- import sys
-
- bbpath = d.getVar("BBPATH").split(":")
- sys.path[0:0] = [os.path.join(dir, "lib") for dir in bbpath]
-
- def inject(name, value):
- """Make a python object accessible from the metadata"""
- if hasattr(bb.utils, "_context"):
- bb.utils._context[name] = value
- else:
- __builtins__[name] = value
-
- import oe.data
- for toimport in oe.data.typed_value("OE_IMPORTS", d):
- try:
- imported = __import__(toimport)
- inject(toimport.split(".", 1)[0], imported)
- except AttributeError as e:
- bb.error("Error importing OE modules: %s" % str(e))
- return ""
-
-# We need the oe module name space early (before INHERITs get added)
-OE_IMPORTED := "${@oe_import(d)}"
-
-def lsb_distro_identifier(d):
- adjust = d.getVar('LSB_DISTRO_ADJUST')
- adjust_func = None
- if adjust:
- try:
- adjust_func = globals()[adjust]
- except KeyError:
- pass
- return oe.lsb.distro_identifier(adjust_func)
-
-die() {
- bbfatal_log "$*"
-}
-
-oe_runmake_call() {
- bbnote ${MAKE} ${EXTRA_OEMAKE} "$@"
- ${MAKE} ${EXTRA_OEMAKE} "$@"
-}
-
-oe_runmake() {
- oe_runmake_call "$@" || die "oe_runmake failed"
-}
-
-
-def base_dep_prepend(d):
- if d.getVar('INHIBIT_DEFAULT_DEPS', False):
- return ""
- return "${BASE_DEFAULT_DEPS}"
-
-BASE_DEFAULT_DEPS = "virtual/${TARGET_PREFIX}gcc virtual/${TARGET_PREFIX}compilerlibs virtual/libc"
-
-BASEDEPENDS = ""
-BASEDEPENDS_class-target = "${@base_dep_prepend(d)}"
-BASEDEPENDS_class-nativesdk = "${@base_dep_prepend(d)}"
-
-DEPENDS_prepend="${BASEDEPENDS} "
-
-FILESPATH = "${@base_set_filespath(["${FILE_DIRNAME}/${BP}", "${FILE_DIRNAME}/${BPN}", "${FILE_DIRNAME}/files"], d)}"
-# THISDIR only works properly with imediate expansion as it has to run
-# in the context of the location its used (:=)
-THISDIR = "${@os.path.dirname(d.getVar('FILE'))}"
-
-def extra_path_elements(d):
- path = ""
- elements = (d.getVar('EXTRANATIVEPATH') or "").split()
- for e in elements:
- path = path + "${STAGING_BINDIR_NATIVE}/" + e + ":"
- return path
-
-PATH_prepend = "${@extra_path_elements(d)}"
-
-def get_lic_checksum_file_list(d):
- filelist = []
- lic_files = d.getVar("LIC_FILES_CHKSUM") or ''
- tmpdir = d.getVar("TMPDIR")
- s = d.getVar("S")
- b = d.getVar("B")
- workdir = d.getVar("WORKDIR")
-
- urls = lic_files.split()
- for url in urls:
- # We only care about items that are absolute paths since
- # any others should be covered by SRC_URI.
- try:
- (method, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
- if method != "file" or not path:
- raise bb.fetch.MalformedUrl(url)
-
- if path[0] == '/':
- if path.startswith((tmpdir, s, b, workdir)):
- continue
- filelist.append(path + ":" + str(os.path.exists(path)))
- except bb.fetch.MalformedUrl:
- bb.fatal(d.getVar('PN') + ": LIC_FILES_CHKSUM contains an invalid URL: " + url)
- return " ".join(filelist)
-
-def setup_hosttools_dir(dest, toolsvar, d, fatal=True):
- tools = d.getVar(toolsvar).split()
- origbbenv = d.getVar("BB_ORIGENV", False)
- path = origbbenv.getVar("PATH")
- bb.utils.mkdirhier(dest)
- notfound = []
- for tool in tools:
- desttool = os.path.join(dest, tool)
- if not os.path.exists(desttool):
- # clean up dead symlink
- if os.path.islink(desttool):
- os.unlink(desttool)
- srctool = bb.utils.which(path, tool, executable=True)
- # gcc/g++ may link to ccache on some hosts, e.g.,
- # /usr/local/bin/ccache/gcc -> /usr/bin/ccache, then which(gcc)
- # would return /usr/local/bin/ccache/gcc, but what we need is
- # /usr/bin/gcc, this code can check and fix that.
- if "ccache" in srctool:
- srctool = bb.utils.which(path, tool, executable=True, direction=1)
- if srctool:
- os.symlink(srctool, desttool)
- else:
- notfound.append(tool)
-
- if notfound and fatal:
- bb.fatal("The following required tools (as specified by HOSTTOOLS) appear to be unavailable in PATH, please install them in order to proceed:\n %s" % " ".join(notfound))
-
-addtask fetch
-do_fetch[dirs] = "${DL_DIR}"
-do_fetch[file-checksums] = "${@bb.fetch.get_checksum_file_list(d)}"
-do_fetch[file-checksums] += " ${@get_lic_checksum_file_list(d)}"
-do_fetch[vardeps] += "SRCREV"
-python base_do_fetch() {
-
- src_uri = (d.getVar('SRC_URI') or "").split()
- if len(src_uri) == 0:
- return
-
- try:
- fetcher = bb.fetch2.Fetch(src_uri, d)
- fetcher.download()
- except bb.fetch2.BBFetchException as e:
- bb.fatal(str(e))
-}
-
-addtask unpack after do_fetch
-do_unpack[dirs] = "${WORKDIR}"
-
-do_unpack[cleandirs] = "${@d.getVar('S') if os.path.normpath(d.getVar('S')) != os.path.normpath(d.getVar('WORKDIR')) else os.path.join('${S}', 'patches')}"
-
-python base_do_unpack() {
- src_uri = (d.getVar('SRC_URI') or "").split()
- if len(src_uri) == 0:
- return
-
- try:
- fetcher = bb.fetch2.Fetch(src_uri, d)
- fetcher.unpack(d.getVar('WORKDIR'))
- except bb.fetch2.BBFetchException as e:
- bb.fatal(str(e))
-}
-
-def get_layers_branch_rev(d):
- layers = (d.getVar("BBLAYERS") or "").split()
- layers_branch_rev = ["%-20s = \"%s:%s\"" % (os.path.basename(i), \
- base_get_metadata_git_branch(i, None).strip(), \
- base_get_metadata_git_revision(i, None)) \
- for i in layers]
- i = len(layers_branch_rev)-1
- p1 = layers_branch_rev[i].find("=")
- s1 = layers_branch_rev[i][p1:]
- while i > 0:
- p2 = layers_branch_rev[i-1].find("=")
- s2= layers_branch_rev[i-1][p2:]
- if s1 == s2:
- layers_branch_rev[i-1] = layers_branch_rev[i-1][0:p2]
- i -= 1
- else:
- i -= 1
- p1 = layers_branch_rev[i].find("=")
- s1= layers_branch_rev[i][p1:]
- return layers_branch_rev
-
-
-BUILDCFG_FUNCS ??= "buildcfg_vars get_layers_branch_rev buildcfg_neededvars"
-BUILDCFG_FUNCS[type] = "list"
-
-def buildcfg_vars(d):
- statusvars = oe.data.typed_value('BUILDCFG_VARS', d)
- for var in statusvars:
- value = d.getVar(var)
- if value is not None:
- yield '%-20s = "%s"' % (var, value)
-
-def buildcfg_neededvars(d):
- needed_vars = oe.data.typed_value("BUILDCFG_NEEDEDVARS", d)
- pesteruser = []
- for v in needed_vars:
- val = d.getVar(v)
- if not val or val == 'INVALID':
- pesteruser.append(v)
-
- if pesteruser:
- bb.fatal('The following variable(s) were not set: %s\nPlease set them directly, or choose a MACHINE or DISTRO that sets them.' % ', '.join(pesteruser))
-
-addhandler base_eventhandler
-base_eventhandler[eventmask] = "bb.event.ConfigParsed bb.event.MultiConfigParsed bb.event.BuildStarted bb.event.RecipePreFinalise bb.event.RecipeParsed"
-python base_eventhandler() {
- import bb.runqueue
-
- if isinstance(e, bb.event.ConfigParsed):
- if not d.getVar("NATIVELSBSTRING", False):
- d.setVar("NATIVELSBSTRING", lsb_distro_identifier(d))
- d.setVar('BB_VERSION', bb.__version__)
-
- # There might be no bb.event.ConfigParsed event if bitbake server is
- # running, so check bb.event.BuildStarted too to make sure ${HOSTTOOLS_DIR}
- # exists.
- if isinstance(e, bb.event.ConfigParsed) or \
- (isinstance(e, bb.event.BuildStarted) and not os.path.exists(d.getVar('HOSTTOOLS_DIR'))):
- # Works with the line in layer.conf which changes PATH to point here
- setup_hosttools_dir(d.getVar('HOSTTOOLS_DIR'), 'HOSTTOOLS', d)
- setup_hosttools_dir(d.getVar('HOSTTOOLS_DIR'), 'HOSTTOOLS_NONFATAL', d, fatal=False)
-
- if isinstance(e, bb.event.MultiConfigParsed):
- # We need to expand SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS in each of the multiconfig data stores
- # own contexts so the variables get expanded correctly for that arch, then inject back into
- # the main data store.
- deps = []
- for config in e.mcdata:
- deps.append(e.mcdata[config].getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS"))
- deps = " ".join(deps)
- e.mcdata[''].setVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", deps)
-
- if isinstance(e, bb.event.BuildStarted):
- localdata = bb.data.createCopy(d)
- statuslines = []
- for func in oe.data.typed_value('BUILDCFG_FUNCS', localdata):
- g = globals()
- if func not in g:
- bb.warn("Build configuration function '%s' does not exist" % func)
- else:
- flines = g[func](localdata)
- if flines:
- statuslines.extend(flines)
-
- statusheader = d.getVar('BUILDCFG_HEADER')
- if statusheader:
- bb.plain('\n%s\n%s\n' % (statusheader, '\n'.join(statuslines)))
-
- # This code is to silence warnings where the SDK variables overwrite the
- # target ones and we'd see dulpicate key names overwriting each other
- # for various PREFERRED_PROVIDERS
- if isinstance(e, bb.event.RecipePreFinalise):
- if d.getVar("TARGET_PREFIX") == d.getVar("SDK_PREFIX"):
- d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}binutils")
- d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}gcc")
- d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}g++")
- d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}compilerlibs")
-
- if isinstance(e, bb.event.RecipeParsed):
- #
- # If we have multiple providers of virtual/X and a PREFERRED_PROVIDER_virtual/X is set
- # skip parsing for all the other providers which will mean they get uninstalled from the
- # sysroot since they're now "unreachable". This makes switching virtual/kernel work in
- # particular.
- #
- pn = d.getVar('PN')
- source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', False)
- if not source_mirror_fetch:
- provs = (d.getVar("PROVIDES") or "").split()
- multiwhitelist = (d.getVar("MULTI_PROVIDER_WHITELIST") or "").split()
- for p in provs:
- if p.startswith("virtual/") and p not in multiwhitelist:
- profprov = d.getVar("PREFERRED_PROVIDER_" + p)
- if profprov and pn != profprov:
- raise bb.parse.SkipRecipe("PREFERRED_PROVIDER_%s set to %s, not %s" % (p, profprov, pn))
-}
-
-CONFIGURESTAMPFILE = "${WORKDIR}/configure.sstate"
-CLEANBROKEN = "0"
-
-addtask configure after do_patch
-do_configure[dirs] = "${B}"
-base_do_configure() {
- if [ -n "${CONFIGURESTAMPFILE}" -a -e "${CONFIGURESTAMPFILE}" ]; then
- if [ "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" ]; then
- cd ${B}
- if [ "${CLEANBROKEN}" != "1" -a \( -e Makefile -o -e makefile -o -e GNUmakefile \) ]; then
- oe_runmake clean
- fi
- # -ignore_readdir_race does not work correctly with -delete;
- # use xargs to avoid spurious build failures
- find ${B} -ignore_readdir_race -name \*.la -type f -print0 | xargs -0 rm -f
- fi
- fi
- if [ -n "${CONFIGURESTAMPFILE}" ]; then
- mkdir -p `dirname ${CONFIGURESTAMPFILE}`
- echo ${BB_TASKHASH} > ${CONFIGURESTAMPFILE}
- fi
-}
-
-addtask compile after do_configure
-do_compile[dirs] = "${B}"
-base_do_compile() {
- if [ -e Makefile -o -e makefile -o -e GNUmakefile ]; then
- oe_runmake || die "make failed"
- else
- bbnote "nothing to compile"
- fi
-}
-
-addtask install after do_compile
-do_install[dirs] = "${B}"
-# Remove and re-create ${D} so that is it guaranteed to be empty
-do_install[cleandirs] = "${D}"
-
-base_do_install() {
- :
-}
-
-base_do_package() {
- :
-}
-
-addtask build after do_populate_sysroot
-do_build[noexec] = "1"
-do_build[recrdeptask] += "do_deploy"
-do_build () {
- :
-}
-
-def set_packagetriplet(d):
- archs = []
- tos = []
- tvs = []
-
- archs.append(d.getVar("PACKAGE_ARCHS").split())
- tos.append(d.getVar("TARGET_OS"))
- tvs.append(d.getVar("TARGET_VENDOR"))
-
- def settriplet(d, varname, archs, tos, tvs):
- triplets = []
- for i in range(len(archs)):
- for arch in archs[i]:
- triplets.append(arch + tvs[i] + "-" + tos[i])
- triplets.reverse()
- d.setVar(varname, " ".join(triplets))
-
- settriplet(d, "PKGTRIPLETS", archs, tos, tvs)
-
- variants = d.getVar("MULTILIB_VARIANTS") or ""
- for item in variants.split():
- localdata = bb.data.createCopy(d)
- overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + item
- localdata.setVar("OVERRIDES", overrides)
-
- archs.append(localdata.getVar("PACKAGE_ARCHS").split())
- tos.append(localdata.getVar("TARGET_OS"))
- tvs.append(localdata.getVar("TARGET_VENDOR"))
-
- settriplet(d, "PKGMLTRIPLETS", archs, tos, tvs)
-
-python () {
- import string, re
-
- # Handle backfilling
- oe.utils.features_backfill("DISTRO_FEATURES", d)
- oe.utils.features_backfill("MACHINE_FEATURES", d)
-
- # Handle PACKAGECONFIG
- #
- # These take the form:
- #
- # PACKAGECONFIG ??= "<default options>"
- # PACKAGECONFIG[foo] = "--enable-foo,--disable-foo,foo_depends,foo_runtime_depends,foo_runtime_recommends,foo_conflict_packageconfig"
- pkgconfigflags = d.getVarFlags("PACKAGECONFIG") or {}
- if pkgconfigflags:
- pkgconfig = (d.getVar('PACKAGECONFIG') or "").split()
- pn = d.getVar("PN")
-
- mlprefix = d.getVar("MLPREFIX")
-
- def expandFilter(appends, extension, prefix):
- appends = bb.utils.explode_deps(d.expand(" ".join(appends)))
- newappends = []
- for a in appends:
- if a.endswith("-native") or ("-cross-" in a):
- newappends.append(a)
- elif a.startswith("virtual/"):
- subs = a.split("/", 1)[1]
- if subs.startswith(prefix):
- newappends.append(a + extension)
- else:
- newappends.append("virtual/" + prefix + subs + extension)
- else:
- if a.startswith(prefix):
- newappends.append(a + extension)
- else:
- newappends.append(prefix + a + extension)
- return newappends
-
- def appendVar(varname, appends):
- if not appends:
- return
- if varname.find("DEPENDS") != -1:
- if bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('cross-canadian', d) :
- appends = expandFilter(appends, "", "nativesdk-")
- elif bb.data.inherits_class('native', d):
- appends = expandFilter(appends, "-native", "")
- elif mlprefix:
- appends = expandFilter(appends, "", mlprefix)
- varname = d.expand(varname)
- d.appendVar(varname, " " + " ".join(appends))
-
- extradeps = []
- extrardeps = []
- extrarrecs = []
- extraconf = []
- for flag, flagval in sorted(pkgconfigflags.items()):
- items = flagval.split(",")
- num = len(items)
- if num > 6:
- bb.error("%s: PACKAGECONFIG[%s] Only enable,disable,depend,rdepend,rrecommend,conflict_packageconfig can be specified!"
- % (d.getVar('PN'), flag))
-
- if flag in pkgconfig:
- if num >= 3 and items[2]:
- extradeps.append(items[2])
- if num >= 4 and items[3]:
- extrardeps.append(items[3])
- if num >= 5 and items[4]:
- extrarrecs.append(items[4])
- if num >= 1 and items[0]:
- extraconf.append(items[0])
- elif num >= 2 and items[1]:
- extraconf.append(items[1])
-
- if num >= 6 and items[5]:
- conflicts = set(items[5].split())
- invalid = conflicts.difference(set(pkgconfigflags.keys()))
- if invalid:
- bb.error("%s: PACKAGECONFIG[%s] Invalid conflict package config%s '%s' specified."
- % (d.getVar('PN'), flag, 's' if len(invalid) > 1 else '', ' '.join(invalid)))
-
- if flag in pkgconfig:
- intersec = conflicts.intersection(set(pkgconfig))
- if intersec:
- bb.fatal("%s: PACKAGECONFIG[%s] Conflict package config%s '%s' set in PACKAGECONFIG."
- % (d.getVar('PN'), flag, 's' if len(intersec) > 1 else '', ' '.join(intersec)))
-
- appendVar('DEPENDS', extradeps)
- appendVar('RDEPENDS_${PN}', extrardeps)
- appendVar('RRECOMMENDS_${PN}', extrarrecs)
- appendVar('PACKAGECONFIG_CONFARGS', extraconf)
-
- pn = d.getVar('PN')
- license = d.getVar('LICENSE')
- if license == "INVALID" and pn != "defaultpkgname":
- bb.fatal('This recipe does not have the LICENSE field set (%s)' % pn)
-
- if bb.data.inherits_class('license', d):
- check_license_format(d)
- unmatched_license_flags = check_license_flags(d)
- if unmatched_license_flags:
- if len(unmatched_license_flags) == 1:
- message = "because it has a restricted license '{0}'. Which is not whitelisted in LICENSE_FLAGS_WHITELIST".format(unmatched_license_flags[0])
- else:
- message = "because it has restricted licenses {0}. Which are not whitelisted in LICENSE_FLAGS_WHITELIST".format(
- ", ".join("'{0}'".format(f) for f in unmatched_license_flags))
- bb.debug(1, "Skipping %s %s" % (pn, message))
- raise bb.parse.SkipRecipe(message)
-
- # If we're building a target package we need to use fakeroot (pseudo)
- # in order to capture permissions, owners, groups and special files
- if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
- d.appendVarFlag('do_prepare_recipe_sysroot', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
- d.setVarFlag('do_unpack', 'umask', '022')
- d.setVarFlag('do_configure', 'umask', '022')
- d.setVarFlag('do_compile', 'umask', '022')
- d.appendVarFlag('do_install', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
- d.setVarFlag('do_install', 'fakeroot', '1')
- d.setVarFlag('do_install', 'umask', '022')
- d.appendVarFlag('do_package', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
- d.setVarFlag('do_package', 'fakeroot', '1')
- d.setVarFlag('do_package', 'umask', '022')
- d.setVarFlag('do_package_setscene', 'fakeroot', '1')
- d.appendVarFlag('do_package_setscene', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
- d.setVarFlag('do_devshell', 'fakeroot', '1')
- d.appendVarFlag('do_devshell', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
-
- need_machine = d.getVar('COMPATIBLE_MACHINE')
- if need_machine and not d.getVar('PARSE_ALL_RECIPES', False):
- import re
- compat_machines = (d.getVar('MACHINEOVERRIDES') or "").split(":")
- for m in compat_machines:
- if re.match(need_machine, m):
- break
- else:
- raise bb.parse.SkipRecipe("incompatible with machine %s (not in COMPATIBLE_MACHINE)" % d.getVar('MACHINE'))
-
- source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', False) or d.getVar('PARSE_ALL_RECIPES', False)
- if not source_mirror_fetch:
- need_host = d.getVar('COMPATIBLE_HOST')
- if need_host:
- import re
- this_host = d.getVar('HOST_SYS')
- if not re.match(need_host, this_host):
- raise bb.parse.SkipRecipe("incompatible with host %s (not in COMPATIBLE_HOST)" % this_host)
-
- bad_licenses = (d.getVar('INCOMPATIBLE_LICENSE') or "").split()
-
- check_license = False if pn.startswith("nativesdk-") else True
- for t in ["-native", "-cross-${TARGET_ARCH}", "-cross-initial-${TARGET_ARCH}",
- "-crosssdk-${SDK_SYS}", "-crosssdk-initial-${SDK_SYS}",
- "-cross-canadian-${TRANSLATED_TARGET_ARCH}"]:
- if pn.endswith(d.expand(t)):
- check_license = False
- if pn.startswith("gcc-source-"):
- check_license = False
-
- if check_license and bad_licenses:
- bad_licenses = expand_wildcard_licenses(d, bad_licenses)
-
- whitelist = []
- for lic in bad_licenses:
- spdx_license = return_spdx(d, lic)
- whitelist.extend((d.getVar("WHITELIST_" + lic) or "").split())
- if spdx_license:
- whitelist.extend((d.getVar("WHITELIST_" + spdx_license) or "").split())
-
- if pn in whitelist:
- '''
- We need to track what we are whitelisting and why. If pn is
- incompatible we need to be able to note that the image that
- is created may infact contain incompatible licenses despite
- INCOMPATIBLE_LICENSE being set.
- '''
- bb.note("Including %s as buildable despite it having an incompatible license because it has been whitelisted" % pn)
- else:
- pkgs = d.getVar('PACKAGES').split()
- skipped_pkgs = {}
- unskipped_pkgs = []
- for pkg in pkgs:
- incompatible_lic = incompatible_license(d, bad_licenses, pkg)
- if incompatible_lic:
- skipped_pkgs[pkg] = incompatible_lic
- else:
- unskipped_pkgs.append(pkg)
- if unskipped_pkgs:
- for pkg in skipped_pkgs:
- bb.debug(1, "Skipping the package %s at do_rootfs because of incompatible license(s): %s" % (pkg, ' '.join(skipped_pkgs[pkg])))
- d.setVar('LICENSE_EXCLUSION-' + pkg, ' '.join(skipped_pkgs[pkg]))
- for pkg in unskipped_pkgs:
- bb.debug(1, "Including the package %s" % pkg)
- else:
- incompatible_lic = incompatible_license(d, bad_licenses)
- for pkg in skipped_pkgs:
- incompatible_lic += skipped_pkgs[pkg]
- incompatible_lic = sorted(list(set(incompatible_lic)))
-
- if incompatible_lic:
- bb.debug(1, "Skipping recipe %s because of incompatible license(s): %s" % (pn, ' '.join(incompatible_lic)))
- raise bb.parse.SkipRecipe("it has incompatible license(s): %s" % ' '.join(incompatible_lic))
-
- needsrcrev = False
- srcuri = d.getVar('SRC_URI')
- for uri in srcuri.split():
- (scheme, _ , path) = bb.fetch.decodeurl(uri)[:3]
-
- # HTTP/FTP use the wget fetcher
- if scheme in ("http", "https", "ftp"):
- d.appendVarFlag('do_fetch', 'depends', ' wget-native:do_populate_sysroot')
-
- # Svn packages should DEPEND on subversion-native
- if scheme == "svn":
- needsrcrev = True
- d.appendVarFlag('do_fetch', 'depends', ' subversion-native:do_populate_sysroot')
-
- # Git packages should DEPEND on git-native
- elif scheme in ("git", "gitsm"):
- needsrcrev = True
- d.appendVarFlag('do_fetch', 'depends', ' git-native:do_populate_sysroot')
-
- # Mercurial packages should DEPEND on mercurial-native
- elif scheme == "hg":
- needsrcrev = True
- d.appendVar("EXTRANATIVEPATH", ' python3-native ')
- d.appendVarFlag('do_fetch', 'depends', ' mercurial-native:do_populate_sysroot')
-
- # Perforce packages support SRCREV = "${AUTOREV}"
- elif scheme == "p4":
- needsrcrev = True
-
- # OSC packages should DEPEND on osc-native
- elif scheme == "osc":
- d.appendVarFlag('do_fetch', 'depends', ' osc-native:do_populate_sysroot')
-
- elif scheme == "npm":
- d.appendVarFlag('do_fetch', 'depends', ' nodejs-native:do_populate_sysroot')
-
- # *.lz4 should DEPEND on lz4-native for unpacking
- if path.endswith('.lz4'):
- d.appendVarFlag('do_unpack', 'depends', ' lz4-native:do_populate_sysroot')
-
- # *.lz should DEPEND on lzip-native for unpacking
- elif path.endswith('.lz'):
- d.appendVarFlag('do_unpack', 'depends', ' lzip-native:do_populate_sysroot')
-
- # *.xz should DEPEND on xz-native for unpacking
- elif path.endswith('.xz') or path.endswith('.txz'):
- d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
-
- # .zip should DEPEND on unzip-native for unpacking
- elif path.endswith('.zip') or path.endswith('.jar'):
- d.appendVarFlag('do_unpack', 'depends', ' unzip-native:do_populate_sysroot')
-
- # Some rpm files may be compressed internally using xz (for example, rpms from Fedora)
- elif path.endswith('.rpm'):
- d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
-
- # *.deb should DEPEND on xz-native for unpacking
- elif path.endswith('.deb'):
- d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
-
- if needsrcrev:
- d.setVar("SRCPV", "${@bb.fetch2.get_srcrev(d)}")
-
- # Gather all named SRCREVs to add to the sstate hash calculation
- # This anonymous python snippet is called multiple times so we
- # need to be careful to not double up the appends here and cause
- # the base hash to mismatch the task hash
- for uri in srcuri.split():
- parm = bb.fetch.decodeurl(uri)[5]
- uri_names = parm.get("name", "").split(",")
- for uri_name in filter(None, uri_names):
- srcrev_name = "SRCREV_{}".format(uri_name)
- if srcrev_name not in (d.getVarFlag("do_fetch", "vardeps") or "").split():
- d.appendVarFlag("do_fetch", "vardeps", " {}".format(srcrev_name))
-
- set_packagetriplet(d)
-
- # 'multimachine' handling
- mach_arch = d.getVar('MACHINE_ARCH')
- pkg_arch = d.getVar('PACKAGE_ARCH')
-
- if (pkg_arch == mach_arch):
- # Already machine specific - nothing further to do
- return
-
- #
- # We always try to scan SRC_URI for urls with machine overrides
- # unless the package sets SRC_URI_OVERRIDES_PACKAGE_ARCH=0
- #
- override = d.getVar('SRC_URI_OVERRIDES_PACKAGE_ARCH')
- if override != '0':
- paths = []
- fpaths = (d.getVar('FILESPATH') or '').split(':')
- machine = d.getVar('MACHINE')
- for p in fpaths:
- if os.path.basename(p) == machine and os.path.isdir(p):
- paths.append(p)
-
- if len(paths) != 0:
- for s in srcuri.split():
- if not s.startswith("file://"):
- continue
- fetcher = bb.fetch2.Fetch([s], d)
- local = fetcher.localpath(s)
- for mp in paths:
- if local.startswith(mp):
- #bb.note("overriding PACKAGE_ARCH from %s to %s for %s" % (pkg_arch, mach_arch, pn))
- d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}")
- return
-
- packages = d.getVar('PACKAGES').split()
- for pkg in packages:
- pkgarch = d.getVar("PACKAGE_ARCH_%s" % pkg)
-
- # We could look for != PACKAGE_ARCH here but how to choose
- # if multiple differences are present?
- # Look through PACKAGE_ARCHS for the priority order?
- if pkgarch and pkgarch == mach_arch:
- d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}")
- bb.warn("Recipe %s is marked as only being architecture specific but seems to have machine specific packages?! The recipe may as well mark itself as machine specific directly." % d.getVar("PN"))
-}
-
-addtask cleansstate after do_clean
-python do_cleansstate() {
- sstate_clean_cachefiles(d)
-}
-addtask cleanall after do_cleansstate
-do_cleansstate[nostamp] = "1"
-
-python do_cleanall() {
- src_uri = (d.getVar('SRC_URI') or "").split()
- if len(src_uri) == 0:
- return
-
- try:
- fetcher = bb.fetch2.Fetch(src_uri, d)
- fetcher.clean()
- except bb.fetch2.BBFetchException as e:
- bb.fatal(str(e))
-}
-do_cleanall[nostamp] = "1"
-
-
-EXPORT_FUNCTIONS do_fetch do_unpack do_configure do_compile do_install do_package
diff --git a/meta/classes/bash-completion.bbclass b/meta/classes/bash-completion.bbclass
deleted file mode 100644
index 80ee9b4874..0000000000
--- a/meta/classes/bash-completion.bbclass
+++ /dev/null
@@ -1,7 +0,0 @@
-DEPENDS_append_class-target = " bash-completion"
-
-PACKAGES += "${PN}-bash-completion"
-
-FILES_${PN}-bash-completion = "${datadir}/bash-completion ${sysconfdir}/bash_completion.d"
-
-RDEPENDS_${PN}-bash-completion = "bash-completion"
diff --git a/meta/classes/bin_package.bbclass b/meta/classes/bin_package.bbclass
deleted file mode 100644
index cbc9b1fa13..0000000000
--- a/meta/classes/bin_package.bbclass
+++ /dev/null
@@ -1,39 +0,0 @@
-#
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
-#
-# Common variable and task for the binary package recipe.
-# Basic principle:
-# * The files have been unpacked to ${S} by base.bbclass
-# * Skip do_configure and do_compile
-# * Use do_install to install the files to ${D}
-#
-# Note:
-# The "subdir" parameter in the SRC_URI is useful when the input package
-# is rpm, ipk, deb and so on, for example:
-#
-# SRC_URI = "http://foo.com/foo-1.0-r1.i586.rpm;subdir=foo-1.0"
-#
-# Then the files would be unpacked to ${WORKDIR}/foo-1.0, otherwise
-# they would be in ${WORKDIR}.
-#
-
-# Skip the unwanted steps
-do_configure[noexec] = "1"
-do_compile[noexec] = "1"
-
-# Install the files to ${D}
-bin_package_do_install () {
- # Do it carefully
- [ -d "${S}" ] || exit 1
- if [ -z "$(ls -A ${S})" ]; then
- bbfatal bin_package has nothing to install. Be sure the SRC_URI unpacks into S.
- fi
- cd ${S}
- tar --no-same-owner --exclude='./patches' --exclude='./.pc' -cpf - . \
- | tar --no-same-owner -xpf - -C ${D}
-}
-
-FILES_${PN} = "/"
-
-EXPORT_FUNCTIONS do_install
diff --git a/meta/classes/binconfig-disabled.bbclass b/meta/classes/binconfig-disabled.bbclass
deleted file mode 100644
index 096b670e12..0000000000
--- a/meta/classes/binconfig-disabled.bbclass
+++ /dev/null
@@ -1,30 +0,0 @@
-#
-# Class to disable binconfig files instead of installing them
-#
-
-# The list of scripts which should be disabled.
-BINCONFIG ?= ""
-
-FILES_${PN}-dev += "${bindir}/*-config"
-
-do_install_append () {
- for x in ${BINCONFIG}; do
- # Make the disabled script emit invalid parameters for those configure
- # scripts which call it without checking the return code.
- echo "#!/bin/sh" > ${D}$x
- echo "echo 'ERROR: $x should not be used, use an alternative such as pkg-config' >&2" >> ${D}$x
- echo "echo '--should-not-have-used-$x'" >> ${D}$x
- echo "exit 1" >> ${D}$x
- chmod +x ${D}$x
- done
-}
-
-SYSROOT_PREPROCESS_FUNCS += "binconfig_disabled_sysroot_preprocess"
-
-binconfig_disabled_sysroot_preprocess () {
- for x in ${BINCONFIG}; do
- configname=`basename $x`
- install -d ${SYSROOT_DESTDIR}${bindir_crossscripts}
- install ${D}$x ${SYSROOT_DESTDIR}${bindir_crossscripts}
- done
-}
diff --git a/meta/classes/binconfig.bbclass b/meta/classes/binconfig.bbclass
deleted file mode 100644
index 9112ed4608..0000000000
--- a/meta/classes/binconfig.bbclass
+++ /dev/null
@@ -1,54 +0,0 @@
-FILES_${PN}-dev += "${bindir}/*-config"
-
-# The namespaces can clash here hence the two step replace
-def get_binconfig_mangle(d):
- s = "-e ''"
- if not bb.data.inherits_class('native', d):
- optional_quote = r"\(\"\?\)"
- s += " -e 's:=%s${base_libdir}:=\\1OEBASELIBDIR:;'" % optional_quote
- s += " -e 's:=%s${libdir}:=\\1OELIBDIR:;'" % optional_quote
- s += " -e 's:=%s${includedir}:=\\1OEINCDIR:;'" % optional_quote
- s += " -e 's:=%s${datadir}:=\\1OEDATADIR:'" % optional_quote
- s += " -e 's:=%s${prefix}/:=\\1OEPREFIX/:'" % optional_quote
- s += " -e 's:=%s${exec_prefix}/:=\\1OEEXECPREFIX/:'" % optional_quote
- s += " -e 's:-L${libdir}:-LOELIBDIR:;'"
- s += " -e 's:-I${includedir}:-IOEINCDIR:;'"
- s += " -e 's:-L${WORKDIR}:-LOELIBDIR:'"
- s += " -e 's:-I${WORKDIR}:-IOEINCDIR:'"
- s += " -e 's:OEBASELIBDIR:${STAGING_BASELIBDIR}:;'"
- s += " -e 's:OELIBDIR:${STAGING_LIBDIR}:;'"
- s += " -e 's:OEINCDIR:${STAGING_INCDIR}:;'"
- s += " -e 's:OEDATADIR:${STAGING_DATADIR}:'"
- s += " -e 's:OEPREFIX:${STAGING_DIR_HOST}${prefix}:'"
- s += " -e 's:OEEXECPREFIX:${STAGING_DIR_HOST}${exec_prefix}:'"
- if d.getVar("OE_BINCONFIG_EXTRA_MANGLE", False):
- s += d.getVar("OE_BINCONFIG_EXTRA_MANGLE")
-
- return s
-
-BINCONFIG_GLOB ?= "*-config"
-
-PACKAGE_PREPROCESS_FUNCS += "binconfig_package_preprocess"
-
-binconfig_package_preprocess () {
- for config in `find ${PKGD} -type f -name '${BINCONFIG_GLOB}'`; do
- sed -i \
- -e 's:${STAGING_BASELIBDIR}:${base_libdir}:g;' \
- -e 's:${STAGING_LIBDIR}:${libdir}:g;' \
- -e 's:${STAGING_INCDIR}:${includedir}:g;' \
- -e 's:${STAGING_DATADIR}:${datadir}:' \
- -e 's:${STAGING_DIR_HOST}${prefix}:${prefix}:' \
- $config
- done
-}
-
-SYSROOT_PREPROCESS_FUNCS += "binconfig_sysroot_preprocess"
-
-binconfig_sysroot_preprocess () {
- for config in `find ${S} -type f -name '${BINCONFIG_GLOB}'` `find ${B} -type f -name '${BINCONFIG_GLOB}'`; do
- configname=`basename $config`
- install -d ${SYSROOT_DESTDIR}${bindir_crossscripts}
- sed ${@get_binconfig_mangle(d)} $config > ${SYSROOT_DESTDIR}${bindir_crossscripts}/$configname
- chmod u+x ${SYSROOT_DESTDIR}${bindir_crossscripts}/$configname
- done
-}
diff --git a/meta/classes/blacklist.bbclass b/meta/classes/blacklist.bbclass
deleted file mode 100644
index dc794228ff..0000000000
--- a/meta/classes/blacklist.bbclass
+++ /dev/null
@@ -1,20 +0,0 @@
-# anonymous support class from originally from angstrom
-#
-# To use the blacklist, a distribution should include this
-# class in the INHERIT_DISTRO
-#
-# No longer use ANGSTROM_BLACKLIST, instead use a table of
-# recipes in PNBLACKLIST
-#
-# Features:
-#
-# * To add a package to the blacklist, set:
-# PNBLACKLIST[pn] = "message"
-#
-
-python () {
- blacklist = d.getVarFlag('PNBLACKLIST', d.getVar('PN'))
-
- if blacklist:
- raise bb.parse.SkipRecipe("Recipe is blacklisted: %s" % (blacklist))
-}
diff --git a/meta/classes/buildhistory.bbclass b/meta/classes/buildhistory.bbclass
index a4288ef9e1..fd53e92402 100644
--- a/meta/classes/buildhistory.bbclass
+++ b/meta/classes/buildhistory.bbclass
@@ -6,6 +6,10 @@
# Copyright (C) 2011-2016 Intel Corporation
# Copyright (C) 2007-2011 Koen Kooi <koen@openembedded.org>
#
+# SPDX-License-Identifier: MIT
+#
+
+IMAGE_CLASSES += "image-artifact-names"
BUILDHISTORY_FEATURES ?= "image package sdk"
BUILDHISTORY_DIR ?= "${TOPDIR}/buildhistory"
@@ -29,7 +33,7 @@ BUILDHISTORY_DIR_PACKAGE = "${BUILDHISTORY_DIR}/packages/${MULTIMACH_TARGET_SYS}
# of failed builds.
#
# The expected usage is via auto.conf, but passing via the command line also works
-# with: BB_ENV_EXTRAWHITE=BUILDHISTORY_RESET BUILDHISTORY_RESET=1
+# with: BB_ENV_PASSTHROUGH_ADDITIONS=BUILDHISTORY_RESET BUILDHISTORY_RESET=1
BUILDHISTORY_RESET ?= ""
BUILDHISTORY_OLD_DIR = "${BUILDHISTORY_DIR}/${@ "old" if "${BUILDHISTORY_RESET}" else ""}"
@@ -41,15 +45,16 @@ BUILDHISTORY_COMMIT ?= "1"
BUILDHISTORY_COMMIT_AUTHOR ?= "buildhistory <buildhistory@${DISTRO}>"
BUILDHISTORY_PUSH_REPO ?= ""
BUILDHISTORY_TAG ?= "build"
+BUILDHISTORY_PATH_PREFIX_STRIP ?= ""
-SSTATEPOSTINSTFUNCS_append = " buildhistory_emit_pkghistory"
+SSTATEPOSTINSTFUNCS:append = " buildhistory_emit_pkghistory"
# We want to avoid influencing the signatures of sstate tasks - first the function itself:
sstate_install[vardepsexclude] += "buildhistory_emit_pkghistory"
# then the value added to SSTATEPOSTINSTFUNCS:
SSTATEPOSTINSTFUNCS[vardepvalueexclude] .= "| buildhistory_emit_pkghistory"
# Similarly for our function that gets the output signatures
-SSTATEPOSTUNPACKFUNCS_append = " buildhistory_emit_outputsigs"
+SSTATEPOSTUNPACKFUNCS:append = " buildhistory_emit_outputsigs"
sstate_installpkgdir[vardepsexclude] += "buildhistory_emit_outputsigs"
SSTATEPOSTUNPACKFUNCS[vardepvalueexclude] .= "| buildhistory_emit_outputsigs"
@@ -88,13 +93,19 @@ buildhistory_emit_sysroot() {
python buildhistory_emit_pkghistory() {
if d.getVar('BB_CURRENTTASK') in ['populate_sysroot', 'populate_sysroot_setscene']:
bb.build.exec_func("buildhistory_emit_sysroot", d)
-
- if not d.getVar('BB_CURRENTTASK') in ['packagedata', 'packagedata_setscene']:
return 0
if not "package" in (d.getVar('BUILDHISTORY_FEATURES') or "").split():
return 0
+ if d.getVar('BB_CURRENTTASK') in ['package', 'package_setscene']:
+ # Create files-in-<package-name>.txt files containing a list of files of each recipe's package
+ bb.build.exec_func("buildhistory_list_pkg_files", d)
+ return 0
+
+ if not d.getVar('BB_CURRENTTASK') in ['packagedata', 'packagedata_setscene']:
+ return 0
+
import re
import json
import shlex
@@ -113,7 +124,9 @@ python buildhistory_emit_pkghistory() {
self.packages = ""
self.srcrev = ""
self.layer = ""
+ self.license = ""
self.config = ""
+ self.src_uri = ""
class PackageInfo:
@@ -215,6 +228,7 @@ python buildhistory_emit_pkghistory() {
pv = d.getVar('PV')
pr = d.getVar('PR')
layer = bb.utils.get_file_layer(d.getVar('FILE'), d)
+ license = d.getVar('LICENSE')
pkgdata_dir = d.getVar('PKGDATA_DIR')
packages = ""
@@ -255,23 +269,20 @@ python buildhistory_emit_pkghistory() {
rcpinfo.depends = sortlist(oe.utils.squashspaces(d.getVar('DEPENDS') or ""))
rcpinfo.packages = packages
rcpinfo.layer = layer
+ rcpinfo.license = license
rcpinfo.config = sortlist(oe.utils.squashspaces(d.getVar('PACKAGECONFIG') or ""))
+ rcpinfo.src_uri = oe.utils.squashspaces(d.getVar('SRC_URI') or "")
write_recipehistory(rcpinfo, d)
- pkgdest = d.getVar('PKGDEST')
+ bb.build.exec_func("read_subpackage_metadata", d)
+
for pkg in packagelist:
- pkgdata = {}
- with open(os.path.join(pkgdata_dir, 'runtime', pkg)) as f:
- for line in f.readlines():
- item = line.rstrip('\n').split(': ', 1)
- key = item[0]
- if key.endswith('_' + pkg):
- key = key[:-len(pkg)-1]
- pkgdata[key] = item[1].encode('latin-1').decode('unicode_escape')
-
- pkge = pkgdata.get('PKGE', '0')
- pkgv = pkgdata['PKGV']
- pkgr = pkgdata['PKGR']
+ localdata = d.createCopy()
+ localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + pkg)
+
+ pkge = localdata.getVar("PKGE") or '0'
+ pkgv = localdata.getVar("PKGV")
+ pkgr = localdata.getVar("PKGR")
#
# Find out what the last version was
# Make sure the version did not decrease
@@ -284,40 +295,39 @@ python buildhistory_emit_pkghistory() {
r = bb.utils.vercmp((pkge, pkgv, pkgr), (last_pkge, last_pkgv, last_pkgr))
if r < 0:
msg = "Package version for package %s went backwards which would break package feeds (from %s:%s-%s to %s:%s-%s)" % (pkg, last_pkge, last_pkgv, last_pkgr, pkge, pkgv, pkgr)
- package_qa_handle_error("version-going-backwards", msg, d)
+ oe.qa.handle_error("version-going-backwards", msg, d)
pkginfo = PackageInfo(pkg)
# Apparently the version can be different on a per-package basis (see Python)
- pkginfo.pe = pkgdata.get('PE', '0')
- pkginfo.pv = pkgdata['PV']
- pkginfo.pr = pkgdata['PR']
- pkginfo.pkg = pkgdata['PKG']
+ pkginfo.pe = localdata.getVar("PE") or '0'
+ pkginfo.pv = localdata.getVar("PV")
+ pkginfo.pr = localdata.getVar("PR")
+ pkginfo.pkg = localdata.getVar("PKG")
pkginfo.pkge = pkge
pkginfo.pkgv = pkgv
pkginfo.pkgr = pkgr
- pkginfo.rprovides = sortpkglist(oe.utils.squashspaces(pkgdata.get('RPROVIDES', "")))
- pkginfo.rdepends = sortpkglist(oe.utils.squashspaces(pkgdata.get('RDEPENDS', "")))
- pkginfo.rrecommends = sortpkglist(oe.utils.squashspaces(pkgdata.get('RRECOMMENDS', "")))
- pkginfo.rsuggests = sortpkglist(oe.utils.squashspaces(pkgdata.get('RSUGGESTS', "")))
- pkginfo.rreplaces = sortpkglist(oe.utils.squashspaces(pkgdata.get('RREPLACES', "")))
- pkginfo.rconflicts = sortpkglist(oe.utils.squashspaces(pkgdata.get('RCONFLICTS', "")))
- pkginfo.files = oe.utils.squashspaces(pkgdata.get('FILES', ""))
+ pkginfo.rprovides = sortpkglist(oe.utils.squashspaces(localdata.getVar("RPROVIDES") or ""))
+ pkginfo.rdepends = sortpkglist(oe.utils.squashspaces(localdata.getVar("RDEPENDS") or ""))
+ pkginfo.rrecommends = sortpkglist(oe.utils.squashspaces(localdata.getVar("RRECOMMENDS") or ""))
+ pkginfo.rsuggests = sortpkglist(oe.utils.squashspaces(localdata.getVar("RSUGGESTS") or ""))
+ pkginfo.replaces = sortpkglist(oe.utils.squashspaces(localdata.getVar("RREPLACES") or ""))
+ pkginfo.rconflicts = sortpkglist(oe.utils.squashspaces(localdata.getVar("RCONFLICTS") or ""))
+ pkginfo.files = oe.utils.squashspaces(localdata.getVar("FILES") or "")
for filevar in pkginfo.filevars:
- pkginfo.filevars[filevar] = pkgdata.get(filevar, "")
+ pkginfo.filevars[filevar] = localdata.getVar(filevar) or ""
# Gather information about packaged files
- val = pkgdata.get('FILES_INFO', '')
+ val = localdata.getVar('FILES_INFO') or ''
dictval = json.loads(val)
filelist = list(dictval.keys())
filelist.sort()
pkginfo.filelist = " ".join([shlex.quote(x) for x in filelist])
- pkginfo.size = int(pkgdata['PKGSIZE'])
+ pkginfo.size = int(localdata.getVar('PKGSIZE') or '0')
write_pkghistory(pkginfo, d)
- # Create files-in-<package-name>.txt files containing a list of files of each recipe's package
- bb.build.exec_func("buildhistory_list_pkg_files", d)
+ oe.qa.exit_if_errors(d)
}
python buildhistory_emit_outputsigs() {
@@ -370,7 +380,9 @@ def write_recipehistory(rcpinfo, d):
f.write(u"DEPENDS = %s\n" % rcpinfo.depends)
f.write(u"PACKAGES = %s\n" % rcpinfo.packages)
f.write(u"LAYER = %s\n" % rcpinfo.layer)
+ f.write(u"LICENSE = %s\n" % rcpinfo.license)
f.write(u"CONFIG = %s\n" % rcpinfo.config)
+ f.write(u"SRC_URI = %s\n" % rcpinfo.src_uri)
write_latest_srcrev(d, pkghistdir)
@@ -429,19 +441,24 @@ def buildhistory_list_installed(d, rootfs_type="image"):
from oe.sdk import sdk_list_installed_packages
from oe.utils import format_pkg_list
- process_list = [('file', 'bh_installed_pkgs.txt'),\
- ('deps', 'bh_installed_pkgs_deps.txt')]
+ process_list = [('file', 'bh_installed_pkgs_%s.txt' % os.getpid()),\
+ ('deps', 'bh_installed_pkgs_deps_%s.txt' % os.getpid())]
if rootfs_type == "image":
pkgs = image_list_installed_packages(d)
else:
pkgs = sdk_list_installed_packages(d, rootfs_type == "sdk_target")
+ if rootfs_type == "sdk_host":
+ pkgdata_dir = d.getVar('PKGDATA_DIR_SDK')
+ else:
+ pkgdata_dir = d.getVar('PKGDATA_DIR')
+
for output_type, output_file in process_list:
output_file_full = os.path.join(d.getVar('WORKDIR'), output_file)
with open(output_file_full, 'w') as output:
- output.write(format_pkg_list(pkgs, output_type))
+ output.write(format_pkg_list(pkgs, output_type, pkgdata_dir))
python buildhistory_list_installed_image() {
buildhistory_list_installed(d)
@@ -460,9 +477,10 @@ buildhistory_get_installed() {
# Get list of installed packages
pkgcache="$1/installed-packages.tmp"
- cat ${WORKDIR}/bh_installed_pkgs.txt | sort > $pkgcache && rm ${WORKDIR}/bh_installed_pkgs.txt
+ cat ${WORKDIR}/bh_installed_pkgs_${PID}.txt | sort > $pkgcache && rm ${WORKDIR}/bh_installed_pkgs_${PID}.txt
cat $pkgcache | awk '{ print $1 }' > $1/installed-package-names.txt
+
if [ -s $pkgcache ] ; then
cat $pkgcache | awk '{ print $2 }' | xargs -n1 basename > $1/installed-packages.txt
else
@@ -471,8 +489,8 @@ buildhistory_get_installed() {
# Produce dependency graph
# First, quote each name to handle characters that cause issues for dot
- sed 's:\([^| ]*\):"\1":g' ${WORKDIR}/bh_installed_pkgs_deps.txt > $1/depends.tmp &&
- rm ${WORKDIR}/bh_installed_pkgs_deps.txt
+ sed 's:\([^| ]*\):"\1":g' ${WORKDIR}/bh_installed_pkgs_deps_${PID}.txt > $1/depends.tmp &&
+ rm ${WORKDIR}/bh_installed_pkgs_deps_${PID}.txt
# Remove lines with rpmlib(...) and config(...) dependencies, change the
# delimiter from pipe to "->", set the style for recommend lines and
# turn versioned dependencies into edge labels.
@@ -481,6 +499,8 @@ buildhistory_get_installed() {
-e 's:|: -> :' \
-e 's:"\[REC\]":[style=dotted]:' \
-e 's:"\([<>=]\+\)" "\([^"]*\)":[label="\1 \2"]:' \
+ -e 's:"\([*]\+\)" "\([^"]*\)":[label="\2"]:' \
+ -e 's:"\[RPROVIDES\]":[style=dashed]:' \
$1/depends.tmp
# Add header, sorted and de-duped contents and footer and then delete the temp file
printf "digraph depends {\n node [shape=plaintext]\n" > $1/depends.dot
@@ -488,11 +508,22 @@ buildhistory_get_installed() {
echo "}" >> $1/depends.dot
rm $1/depends.tmp
+ # Set correct pkgdatadir
+ pkgdatadir=${PKGDATA_DIR}
+ if [ "$2" = "sdk" ] && [ "$3" = "host" ] ; then
+ pkgdatadir="${PKGDATA_DIR_SDK}"
+ fi
+
# Produce installed package sizes list
- oe-pkgdata-util -p ${PKGDATA_DIR} read-value "PKGSIZE" -n -f $pkgcache > $1/installed-package-sizes.tmp
+ oe-pkgdata-util -p $pkgdatadir read-value "PKGSIZE" -n -f $pkgcache > $1/installed-package-sizes.tmp
cat $1/installed-package-sizes.tmp | awk '{print $2 "\tKiB\t" $1}' | sort -n -r > $1/installed-package-sizes.txt
rm $1/installed-package-sizes.tmp
+ # Produce package info: runtime_name, buildtime_name, recipe, version, size
+ oe-pkgdata-util -p $pkgdatadir read-value "PACKAGE,PN,PV,PKGSIZE" -n -f $pkgcache > $1/installed-package-info.tmp
+ cat $1/installed-package-info.tmp | sort -n -r -k 5 > $1/installed-package-info.txt
+ rm $1/installed-package-info.tmp
+
# We're now done with the cache, delete it
rm $pkgcache
@@ -529,7 +560,7 @@ buildhistory_get_sdk_installed() {
return
fi
- buildhistory_get_installed ${BUILDHISTORY_DIR_SDK}/$1 sdk
+ buildhistory_get_installed ${BUILDHISTORY_DIR_SDK}/$1 sdk $1
}
buildhistory_get_sdk_installed_host() {
@@ -660,26 +691,29 @@ python buildhistory_get_extra_sdkinfo() {
# By using ROOTFS_POSTUNINSTALL_COMMAND we get in after uninstallation of
# unneeded packages but before the removal of packaging files
-ROOTFS_POSTUNINSTALL_COMMAND += "buildhistory_list_installed_image ;"
-ROOTFS_POSTUNINSTALL_COMMAND += "buildhistory_get_image_installed ;"
-ROOTFS_POSTUNINSTALL_COMMAND[vardepvalueexclude] .= "| buildhistory_list_installed_image ;| buildhistory_get_image_installed ;"
+ROOTFS_POSTUNINSTALL_COMMAND += "buildhistory_list_installed_image"
+ROOTFS_POSTUNINSTALL_COMMAND += "buildhistory_get_image_installed"
+ROOTFS_POSTUNINSTALL_COMMAND[vardepvalueexclude] .= "| buildhistory_list_installed_image| buildhistory_get_image_installed"
ROOTFS_POSTUNINSTALL_COMMAND[vardepsexclude] += "buildhistory_list_installed_image buildhistory_get_image_installed"
-IMAGE_POSTPROCESS_COMMAND += "buildhistory_get_imageinfo ;"
-IMAGE_POSTPROCESS_COMMAND[vardepvalueexclude] .= "| buildhistory_get_imageinfo ;"
+IMAGE_POSTPROCESS_COMMAND += "buildhistory_get_imageinfo"
+IMAGE_POSTPROCESS_COMMAND[vardepvalueexclude] .= "| buildhistory_get_imageinfo"
IMAGE_POSTPROCESS_COMMAND[vardepsexclude] += "buildhistory_get_imageinfo"
# We want these to be the last run so that we get called after complementary package installation
-POPULATE_SDK_POST_TARGET_COMMAND_append = " buildhistory_list_installed_sdk_target;"
-POPULATE_SDK_POST_TARGET_COMMAND_append = " buildhistory_get_sdk_installed_target;"
-POPULATE_SDK_POST_TARGET_COMMAND[vardepvalueexclude] .= "| buildhistory_list_installed_sdk_target;| buildhistory_get_sdk_installed_target;"
+POPULATE_SDK_POST_TARGET_COMMAND:append = " buildhistory_list_installed_sdk_target"
+POPULATE_SDK_POST_TARGET_COMMAND:append = " buildhistory_get_sdk_installed_target"
+POPULATE_SDK_POST_TARGET_COMMAND[vardepvalueexclude] .= "| buildhistory_list_installed_sdk_target| buildhistory_get_sdk_installed_target"
+POPULATE_SDK_POST_TARGET_COMMAND[vardepsexclude] += "buildhistory_list_installed_sdk_target buildhistory_get_sdk_installed_target"
-POPULATE_SDK_POST_HOST_COMMAND_append = " buildhistory_list_installed_sdk_host;"
-POPULATE_SDK_POST_HOST_COMMAND_append = " buildhistory_get_sdk_installed_host;"
-POPULATE_SDK_POST_HOST_COMMAND[vardepvalueexclude] .= "| buildhistory_list_installed_sdk_host;| buildhistory_get_sdk_installed_host;"
+POPULATE_SDK_POST_HOST_COMMAND:append = " buildhistory_list_installed_sdk_host"
+POPULATE_SDK_POST_HOST_COMMAND:append = " buildhistory_get_sdk_installed_host"
+POPULATE_SDK_POST_HOST_COMMAND[vardepvalueexclude] .= "| buildhistory_list_installed_sdk_host| buildhistory_get_sdk_installed_host"
+POPULATE_SDK_POST_HOST_COMMAND[vardepsexclude] += "buildhistory_list_installed_sdk_host buildhistory_get_sdk_installed_host"
-SDK_POSTPROCESS_COMMAND_append = " buildhistory_get_sdkinfo ; buildhistory_get_extra_sdkinfo; "
-SDK_POSTPROCESS_COMMAND[vardepvalueexclude] .= "| buildhistory_get_sdkinfo ; buildhistory_get_extra_sdkinfo; "
+SDK_POSTPROCESS_COMMAND:append = " buildhistory_get_sdkinfo buildhistory_get_extra_sdkinfo"
+SDK_POSTPROCESS_COMMAND[vardepvalueexclude] .= "| buildhistory_get_sdkinfo buildhistory_get_extra_sdkinfo"
+SDK_POSTPROCESS_COMMAND[vardepsexclude] += "buildhistory_get_sdkinfo buildhistory_get_extra_sdkinfo"
python buildhistory_write_sigs() {
if not "task" in (d.getVar('BUILDHISTORY_FEATURES') or "").split():
@@ -689,7 +723,7 @@ python buildhistory_write_sigs() {
if hasattr(bb.parse.siggen, 'dump_siglist'):
taskoutdir = os.path.join(d.getVar('BUILDHISTORY_DIR'), 'task')
bb.utils.mkdirhier(taskoutdir)
- bb.parse.siggen.dump_siglist(os.path.join(taskoutdir, 'tasksigs.txt'))
+ bb.parse.siggen.dump_siglist(os.path.join(taskoutdir, 'tasksigs.txt'), d.getVar("BUILDHISTORY_PATH_PREFIX_STRIP"))
}
def buildhistory_get_build_id(d):
@@ -709,30 +743,10 @@ def buildhistory_get_build_id(d):
statusheader = d.getVar('BUILDCFG_HEADER')
return('\n%s\n%s\n' % (statusheader, '\n'.join(statuslines)))
-def buildhistory_get_modified(path):
- # copied from get_layer_git_status() in image-buildinfo.bbclass
- import subprocess
- try:
- subprocess.check_output("""cd %s; export PSEUDO_UNLOAD=1; set -e;
- git diff --quiet --no-ext-diff
- git diff --quiet --no-ext-diff --cached""" % path,
- shell=True,
- stderr=subprocess.STDOUT)
- return ""
- except subprocess.CalledProcessError as ex:
- # Silently treat errors as "modified", without checking for the
- # (expected) return code 1 in a modified git repo. For example, we get
- # output and a 129 return code when a layer isn't a git repo at all.
- return " -- modified"
-
def buildhistory_get_metadata_revs(d):
- # We want an easily machine-readable format here, so get_layers_branch_rev isn't quite what we want
- layers = (d.getVar("BBLAYERS") or "").split()
- medadata_revs = ["%-17s = %s:%s%s" % (os.path.basename(i), \
- base_get_metadata_git_branch(i, None).strip(), \
- base_get_metadata_git_revision(i, None), \
- buildhistory_get_modified(i)) \
- for i in layers]
+ # We want an easily machine-readable format here
+ revisions = oe.buildcfg.get_layer_revisions(d)
+ medadata_revs = ["%-17s = %s:%s%s" % (r[1], r[2], r[3], r[4]) for r in revisions]
return '\n'.join(medadata_revs)
def outputvars(vars, listvars, d):
@@ -757,11 +771,11 @@ def buildhistory_get_imagevars(d):
def buildhistory_get_sdkvars(d):
if d.getVar('BB_WORKERCONTEXT') != '1':
return ""
- sdkvars = "DISTRO DISTRO_VERSION SDK_NAME SDK_VERSION SDKMACHINE SDKIMAGE_FEATURES BAD_RECOMMENDATIONS NO_RECOMMENDATIONS PACKAGE_EXCLUDE"
+ sdkvars = "DISTRO DISTRO_VERSION SDK_NAME SDK_VERSION SDKMACHINE SDKIMAGE_FEATURES TOOLCHAIN_HOST_TASK TOOLCHAIN_TARGET_TASK BAD_RECOMMENDATIONS NO_RECOMMENDATIONS PACKAGE_EXCLUDE"
if d.getVar('BB_CURRENTTASK') == 'populate_sdk_ext':
# Extensible SDK uses some additional variables
- sdkvars += " SDK_LOCAL_CONF_WHITELIST SDK_LOCAL_CONF_BLACKLIST SDK_INHERIT_BLACKLIST SDK_UPDATE_URL SDK_EXT_TYPE SDK_RECRDEP_TASKS SDK_INCLUDE_PKGDATA SDK_INCLUDE_TOOLCHAIN"
- listvars = "SDKIMAGE_FEATURES BAD_RECOMMENDATIONS PACKAGE_EXCLUDE SDK_LOCAL_CONF_WHITELIST SDK_LOCAL_CONF_BLACKLIST SDK_INHERIT_BLACKLIST"
+ sdkvars += " ESDK_LOCALCONF_ALLOW ESDK_LOCALCONF_REMOVE ESDK_CLASS_INHERIT_DISABLE SDK_UPDATE_URL SDK_EXT_TYPE SDK_RECRDEP_TASKS SDK_INCLUDE_PKGDATA SDK_INCLUDE_TOOLCHAIN"
+ listvars = "SDKIMAGE_FEATURES BAD_RECOMMENDATIONS PACKAGE_EXCLUDE ESDK_LOCALCONF_ALLOW ESDK_LOCALCONF_REMOVE ESDK_CLASS_INHERIT_DISABLE"
return outputvars(sdkvars, listvars, d)
@@ -854,7 +868,7 @@ END
}
python buildhistory_eventhandler() {
- if e.data.getVar('BUILDHISTORY_FEATURES').strip():
+ if (e.data.getVar('BUILDHISTORY_FEATURES') or "").strip():
reset = e.data.getVar("BUILDHISTORY_RESET")
olddir = e.data.getVar("BUILDHISTORY_OLD_DIR")
if isinstance(e, bb.event.BuildStarted):
@@ -864,10 +878,11 @@ python buildhistory_eventhandler() {
if os.path.isdir(olddir):
shutil.rmtree(olddir)
rootdir = e.data.getVar("BUILDHISTORY_DIR")
+ bb.utils.mkdirhier(rootdir)
entries = [ x for x in os.listdir(rootdir) if not x.startswith('.') ]
bb.utils.mkdirhier(olddir)
for entry in entries:
- os.rename(os.path.join(rootdir, entry),
+ bb.utils.rename(os.path.join(rootdir, entry),
os.path.join(olddir, entry))
elif isinstance(e, bb.event.BuildCompleted):
if reset:
@@ -906,22 +921,12 @@ def _get_srcrev_values(d):
if urldata[u].method.supports_srcrev():
scms.append(u)
- autoinc_templ = 'AUTOINC+'
dict_srcrevs = {}
dict_tag_srcrevs = {}
for scm in scms:
ud = urldata[scm]
for name in ud.names:
- try:
- rev = ud.method.sortable_revision(ud, d, name)
- except TypeError:
- # support old bitbake versions
- rev = ud.method.sortable_revision(scm, ud, d, name)
- # Clean this up when we next bump bitbake version
- if type(rev) != str:
- autoinc, rev = rev
- elif rev.startswith(autoinc_templ):
- rev = rev[len(autoinc_templ):]
+ autoinc, rev = ud.method.sortable_revision(ud, d, name)
dict_srcrevs[name] = rev
if 'tag' in ud.parm:
tag = ud.parm['tag'];
@@ -952,23 +957,19 @@ def write_latest_srcrev(d, pkghistdir):
value = value.replace('"', '').strip()
old_tag_srcrevs[key] = value
with open(srcrevfile, 'w') as f:
- orig_srcrev = d.getVar('SRCREV', False) or 'INVALID'
- if orig_srcrev != 'INVALID':
- f.write('# SRCREV = "%s"\n' % orig_srcrev)
- if len(srcrevs) > 1:
- for name, srcrev in sorted(srcrevs.items()):
- orig_srcrev = d.getVar('SRCREV_%s' % name, False)
- if orig_srcrev:
- f.write('# SRCREV_%s = "%s"\n' % (name, orig_srcrev))
- f.write('SRCREV_%s = "%s"\n' % (name, srcrev))
- else:
- f.write('SRCREV = "%s"\n' % next(iter(srcrevs.values())))
- if len(tag_srcrevs) > 0:
- for name, srcrev in sorted(tag_srcrevs.items()):
- f.write('# tag_%s = "%s"\n' % (name, srcrev))
- if name in old_tag_srcrevs and old_tag_srcrevs[name] != srcrev:
- pkg = d.getVar('PN')
- bb.warn("Revision for tag %s in package %s was changed since last build (from %s to %s)" % (name, pkg, old_tag_srcrevs[name], srcrev))
+ for name, srcrev in sorted(srcrevs.items()):
+ suffix = "_" + name
+ if name == "default":
+ suffix = ""
+ orig_srcrev = d.getVar('SRCREV%s' % suffix, False)
+ if orig_srcrev:
+ f.write('# SRCREV%s = "%s"\n' % (suffix, orig_srcrev))
+ f.write('SRCREV%s = "%s"\n' % (suffix, srcrev))
+ for name, srcrev in sorted(tag_srcrevs.items()):
+ f.write('# tag_%s = "%s"\n' % (name, srcrev))
+ if name in old_tag_srcrevs and old_tag_srcrevs[name] != srcrev:
+ pkg = d.getVar('PN')
+ bb.warn("Revision for tag %s in package %s was changed since last build (from %s to %s)" % (name, pkg, old_tag_srcrevs[name], srcrev))
else:
if os.path.exists(srcrevfile):
diff --git a/meta/classes/buildstats-summary.bbclass b/meta/classes/buildstats-summary.bbclass
index f9b241b6c5..12e8f17836 100644
--- a/meta/classes/buildstats-summary.bbclass
+++ b/meta/classes/buildstats-summary.bbclass
@@ -1,3 +1,9 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
# Summarize sstate usage at the end of the build
python buildstats_summary () {
import collections
diff --git a/meta/classes/buildstats.bbclass b/meta/classes/buildstats.bbclass
deleted file mode 100644
index 6f87187233..0000000000
--- a/meta/classes/buildstats.bbclass
+++ /dev/null
@@ -1,217 +0,0 @@
-BUILDSTATS_BASE = "${TMPDIR}/buildstats/"
-
-################################################################################
-# Build statistics gathering.
-#
-# The CPU and Time gathering/tracking functions and bbevent inspiration
-# were written by Christopher Larson.
-#
-################################################################################
-
-def get_buildprocess_cputime(pid):
- with open("/proc/%d/stat" % pid, "r") as f:
- fields = f.readline().rstrip().split()
- # 13: utime, 14: stime, 15: cutime, 16: cstime
- return sum(int(field) for field in fields[13:16])
-
-def get_process_cputime(pid):
- import resource
- with open("/proc/%d/stat" % pid, "r") as f:
- fields = f.readline().rstrip().split()
- stats = {
- 'utime' : fields[13],
- 'stime' : fields[14],
- 'cutime' : fields[15],
- 'cstime' : fields[16],
- }
- iostats = {}
- if os.path.isfile("/proc/%d/io" % pid):
- with open("/proc/%d/io" % pid, "r") as f:
- while True:
- i = f.readline().strip()
- if not i:
- break
- if not ":" in i:
- # one more extra line is appended (empty or containing "0")
- # most probably due to race condition in kernel while
- # updating IO stats
- break
- i = i.split(": ")
- iostats[i[0]] = i[1]
- resources = resource.getrusage(resource.RUSAGE_SELF)
- childres = resource.getrusage(resource.RUSAGE_CHILDREN)
- return stats, iostats, resources, childres
-
-def get_cputime():
- with open("/proc/stat", "r") as f:
- fields = f.readline().rstrip().split()[1:]
- return sum(int(field) for field in fields)
-
-def set_timedata(var, d, server_time):
- d.setVar(var, server_time)
-
-def get_timedata(var, d, end_time):
- oldtime = d.getVar(var, False)
- if oldtime is None:
- return
- return end_time - oldtime
-
-def set_buildtimedata(var, d):
- import time
- time = time.time()
- cputime = get_cputime()
- proctime = get_buildprocess_cputime(os.getpid())
- d.setVar(var, (time, cputime, proctime))
-
-def get_buildtimedata(var, d):
- import time
- timedata = d.getVar(var, False)
- if timedata is None:
- return
- oldtime, oldcpu, oldproc = timedata
- procdiff = get_buildprocess_cputime(os.getpid()) - oldproc
- cpudiff = get_cputime() - oldcpu
- end_time = time.time()
- timediff = end_time - oldtime
- if cpudiff > 0:
- cpuperc = float(procdiff) * 100 / cpudiff
- else:
- cpuperc = None
- return timediff, cpuperc
-
-def write_task_data(status, logfile, e, d):
- with open(os.path.join(logfile), "a") as f:
- elapsedtime = get_timedata("__timedata_task", d, e.time)
- if elapsedtime:
- f.write(d.expand("${PF}: %s\n" % e.task))
- f.write(d.expand("Elapsed time: %0.2f seconds\n" % elapsedtime))
- cpu, iostats, resources, childres = get_process_cputime(os.getpid())
- if cpu:
- f.write("utime: %s\n" % cpu['utime'])
- f.write("stime: %s\n" % cpu['stime'])
- f.write("cutime: %s\n" % cpu['cutime'])
- f.write("cstime: %s\n" % cpu['cstime'])
- for i in iostats:
- f.write("IO %s: %s\n" % (i, iostats[i]))
- rusages = ["ru_utime", "ru_stime", "ru_maxrss", "ru_minflt", "ru_majflt", "ru_inblock", "ru_oublock", "ru_nvcsw", "ru_nivcsw"]
- for i in rusages:
- f.write("rusage %s: %s\n" % (i, getattr(resources, i)))
- for i in rusages:
- f.write("Child rusage %s: %s\n" % (i, getattr(childres, i)))
- if status == "passed":
- f.write("Status: PASSED \n")
- else:
- f.write("Status: FAILED \n")
- f.write("Ended: %0.2f \n" % e.time)
-
-python run_buildstats () {
- import bb.build
- import bb.event
- import time, subprocess, platform
-
- bn = d.getVar('BUILDNAME')
- bsdir = os.path.join(d.getVar('BUILDSTATS_BASE'), bn)
- taskdir = os.path.join(bsdir, d.getVar('PF'))
-
- if isinstance(e, bb.event.BuildStarted):
- ########################################################################
- # If the kernel was not configured to provide I/O statistics, issue
- # a one time warning.
- ########################################################################
- if not os.path.isfile("/proc/%d/io" % os.getpid()):
- bb.warn("The Linux kernel on your build host was not configured to provide process I/O statistics. (CONFIG_TASK_IO_ACCOUNTING is not set)")
-
- ########################################################################
- # at first pass make the buildstats hierarchy and then
- # set the buildname
- ########################################################################
- bb.utils.mkdirhier(bsdir)
- set_buildtimedata("__timedata_build", d)
- build_time = os.path.join(bsdir, "build_stats")
- # write start of build into build_time
- with open(build_time, "a") as f:
- host_info = platform.uname()
- f.write("Host Info: ")
- for x in host_info:
- if x:
- f.write(x + " ")
- f.write("\n")
- f.write("Build Started: %0.2f \n" % d.getVar('__timedata_build', False)[0])
-
- elif isinstance(e, bb.event.BuildCompleted):
- build_time = os.path.join(bsdir, "build_stats")
- with open(build_time, "a") as f:
- ########################################################################
- # Write build statistics for the build
- ########################################################################
- timedata = get_buildtimedata("__timedata_build", d)
- if timedata:
- time, cpu = timedata
- # write end of build and cpu used into build_time
- f.write("Elapsed time: %0.2f seconds \n" % (time))
- if cpu:
- f.write("CPU usage: %0.1f%% \n" % cpu)
-
- if isinstance(e, bb.build.TaskStarted):
- set_timedata("__timedata_task", d, e.time)
- bb.utils.mkdirhier(taskdir)
- # write into the task event file the name and start time
- with open(os.path.join(taskdir, e.task), "a") as f:
- f.write("Event: %s \n" % bb.event.getName(e))
- f.write("Started: %0.2f \n" % e.time)
-
- elif isinstance(e, bb.build.TaskSucceeded):
- write_task_data("passed", os.path.join(taskdir, e.task), e, d)
- if e.task == "do_rootfs":
- bs = os.path.join(bsdir, "build_stats")
- with open(bs, "a") as f:
- rootfs = d.getVar('IMAGE_ROOTFS')
- if os.path.isdir(rootfs):
- try:
- rootfs_size = subprocess.check_output(["du", "-sh", rootfs],
- stderr=subprocess.STDOUT).decode('utf-8')
- f.write("Uncompressed Rootfs size: %s" % rootfs_size)
- except subprocess.CalledProcessError as err:
- bb.warn("Failed to get rootfs size: %s" % err.output.decode('utf-8'))
-
- elif isinstance(e, bb.build.TaskFailed):
- # Can have a failure before TaskStarted so need to mkdir here too
- bb.utils.mkdirhier(taskdir)
- write_task_data("failed", os.path.join(taskdir, e.task), e, d)
- ########################################################################
- # Lets make things easier and tell people where the build failed in
- # build_status. We do this here because BuildCompleted triggers no
- # matter what the status of the build actually is
- ########################################################################
- build_status = os.path.join(bsdir, "build_stats")
- with open(build_status, "a") as f:
- f.write(d.expand("Failed at: ${PF} at task: %s \n" % e.task))
-}
-
-addhandler run_buildstats
-run_buildstats[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted bb.build.TaskStarted bb.build.TaskSucceeded bb.build.TaskFailed"
-
-python runqueue_stats () {
- import buildstats
- from bb import event, runqueue
- # We should not record any samples before the first task has started,
- # because that's the first activity shown in the process chart.
- # Besides, at that point we are sure that the build variables
- # are available that we need to find the output directory.
- # The persistent SystemStats is stored in the datastore and
- # closed when the build is done.
- system_stats = d.getVar('_buildstats_system_stats', False)
- if not system_stats and isinstance(e, (bb.runqueue.sceneQueueTaskStarted, bb.runqueue.runQueueTaskStarted)):
- system_stats = buildstats.SystemStats(d)
- d.setVar('_buildstats_system_stats', system_stats)
- if system_stats:
- # Ensure that we sample at important events.
- done = isinstance(e, bb.event.BuildCompleted)
- system_stats.sample(e, force=done)
- if done:
- system_stats.close()
- d.delVar('_buildstats_system_stats')
-}
-
-addhandler runqueue_stats
-runqueue_stats[eventmask] = "bb.runqueue.sceneQueueTaskStarted bb.runqueue.runQueueTaskStarted bb.event.HeartbeatEvent bb.event.BuildCompleted bb.event.MonitorDiskEvent"
diff --git a/meta/classes/ccache.bbclass b/meta/classes/ccache.bbclass
index b5457359ca..262db6672c 100644
--- a/meta/classes/ccache.bbclass
+++ b/meta/classes/ccache.bbclass
@@ -1,4 +1,10 @@
#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+#
# Usage:
# - Enable ccache
# Add the following line to a conffile such as conf/local.conf:
@@ -22,6 +28,11 @@
# be shared between different builds.
CCACHE_TOP_DIR ?= "${TMPDIR}/ccache"
+# ccache-native and cmake-native have a circular dependency
+# that affects other native recipes, but not all.
+# Allows to use ccache in specified native recipes.
+CCACHE_NATIVE_RECIPES_ALLOWED ?= ""
+
# ccahe removes CCACHE_BASEDIR from file path, so that hashes will be the same
# in different builds.
export CCACHE_BASEDIR ?= "${TMPDIR}"
@@ -33,6 +44,10 @@ export CCACHE_CONFIGPATH ?= "${COREBASE}/meta/conf/ccache.conf"
export CCACHE_DIR ?= "${CCACHE_TOP_DIR}/${MULTIMACH_TARGET_SYS}/${PN}"
+# Fixed errors:
+# ccache: error: Failed to create directory /run/user/0/ccache-tmp: Permission denied
+export CCACHE_TEMPDIR ?= "${CCACHE_DIR}/tmp"
+
# We need to stop ccache considering the current directory or the
# debug-prefix-map target directory to be significant when calculating
# its hash. Without this the cache would be invalidated every time
@@ -44,9 +59,9 @@ python() {
Enable ccache for the recipe
"""
pn = d.getVar('PN')
- # quilt-native doesn't need ccache since no c files
- if not (pn in ('ccache-native', 'quilt-native') or
- bb.utils.to_boolean(d.getVar('CCACHE_DISABLE'))):
+ if (pn in d.getVar('CCACHE_NATIVE_RECIPES_ALLOWED') or
+ not (bb.data.inherits_class("native", d) or
+ bb.utils.to_boolean(d.getVar('CCACHE_DISABLE')))):
d.appendVar('DEPENDS', ' ccache-native')
d.setVar('CCACHE', 'ccache ')
}
diff --git a/meta/classes/ccmake.bbclass b/meta/classes/ccmake.bbclass
index df5134a108..c5b4bf6260 100644
--- a/meta/classes/ccmake.bbclass
+++ b/meta/classes/ccmake.bbclass
@@ -1,3 +1,9 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
inherit terminal
python do_ccmake() {
diff --git a/meta/classes/chrpath.bbclass b/meta/classes/chrpath.bbclass
index 26b984c4db..16729dcf61 100644
--- a/meta/classes/chrpath.bbclass
+++ b/meta/classes/chrpath.bbclass
@@ -1,3 +1,9 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
CHRPATH_BIN ?= "chrpath"
PREPROCESS_RELOCATE_DIRS ?= ""
@@ -56,7 +62,7 @@ def process_file_linux(cmd, fpath, rootdir, baseprefix, tmpdir, d, break_hardlin
def process_file_darwin(cmd, fpath, rootdir, baseprefix, tmpdir, d, break_hardlinks = False):
import subprocess as sub
- p = sub.Popen([d.expand("${HOST_PREFIX}otool"), '-L', fpath],stdout=sub.PIPE,stderr=sub.PIPE)
+ p = sub.Popen([d.expand("${HOST_PREFIX}otool"), '-L', fpath],stdout=sub.PIPE,stderr=sub.PIPE, text=True)
out, err = p.communicate()
# If returned successfully, process stdout for results
if p.returncode != 0:
diff --git a/meta/classes/clutter.bbclass b/meta/classes/clutter.bbclass
deleted file mode 100644
index 24b53a13e4..0000000000
--- a/meta/classes/clutter.bbclass
+++ /dev/null
@@ -1,18 +0,0 @@
-def get_minor_dir(v):
- import re
- m = re.match(r"^([0-9]+)\.([0-9]+)", v)
- return "%s.%s" % (m.group(1), m.group(2))
-
-def get_real_name(n):
- import re
- m = re.match(r"^([a-z]+(-[a-z]+)?)(-[0-9]+\.[0-9]+)?", n)
- return "%s" % (m.group(1))
-
-VERMINOR = "${@get_minor_dir("${PV}")}"
-REALNAME = "${@get_real_name("${BPN}")}"
-
-SRC_URI = "${GNOME_MIRROR}/${REALNAME}/${VERMINOR}/${REALNAME}-${PV}.tar.xz;name=archive"
-S = "${WORKDIR}/${REALNAME}-${PV}"
-
-CLUTTERBASEBUILDCLASS ??= "autotools"
-inherit ${CLUTTERBASEBUILDCLASS} pkgconfig gtk-doc gettext
diff --git a/meta/classes/cmake.bbclass b/meta/classes/cmake.bbclass
deleted file mode 100644
index 94ed8061bb..0000000000
--- a/meta/classes/cmake.bbclass
+++ /dev/null
@@ -1,211 +0,0 @@
-# Path to the CMake file to process.
-OECMAKE_SOURCEPATH ??= "${S}"
-
-DEPENDS_prepend = "cmake-native "
-B = "${WORKDIR}/build"
-
-# What CMake generator to use.
-# The supported options are "Unix Makefiles" or "Ninja".
-OECMAKE_GENERATOR ?= "Ninja"
-
-python() {
- generator = d.getVar("OECMAKE_GENERATOR")
- if "Unix Makefiles" in generator:
- args = "-G '" + generator + "' -DCMAKE_MAKE_PROGRAM=" + d.getVar("MAKE")
- d.setVar("OECMAKE_GENERATOR_ARGS", args)
- d.setVarFlag("do_compile", "progress", "percent")
- elif "Ninja" in generator:
- args = "-G '" + generator + "' -DCMAKE_MAKE_PROGRAM=ninja"
- d.appendVar("DEPENDS", " ninja-native")
- d.setVar("OECMAKE_GENERATOR_ARGS", args)
- d.setVarFlag("do_compile", "progress", r"outof:^\[(\d+)/(\d+)\]\s+")
- else:
- bb.fatal("Unknown CMake Generator %s" % generator)
-
- # C/C++ Compiler (without cpu arch/tune arguments)
- if not d.getVar('OECMAKE_C_COMPILER'):
- cc_list = d.getVar('CC').split()
- if cc_list[0] == 'ccache':
- d.setVar('OECMAKE_C_COMPILER_LAUNCHER', cc_list[0])
- d.setVar('OECMAKE_C_COMPILER', cc_list[1])
- else:
- d.setVar('OECMAKE_C_COMPILER', cc_list[0])
-
- if not d.getVar('OECMAKE_CXX_COMPILER'):
- cxx_list = d.getVar('CXX').split()
- if cxx_list[0] == 'ccache':
- d.setVar('OECMAKE_CXX_COMPILER_LAUNCHER', cxx_list[0])
- d.setVar('OECMAKE_CXX_COMPILER', cxx_list[1])
- else:
- d.setVar('OECMAKE_CXX_COMPILER', cxx_list[0])
-}
-OECMAKE_AR ?= "${AR}"
-
-# Compiler flags
-OECMAKE_C_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CFLAGS}"
-OECMAKE_CXX_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS}"
-OECMAKE_C_FLAGS_RELEASE ?= "-DNDEBUG"
-OECMAKE_CXX_FLAGS_RELEASE ?= "-DNDEBUG"
-OECMAKE_C_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CPPFLAGS} ${LDFLAGS}"
-OECMAKE_CXX_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS} ${LDFLAGS}"
-CXXFLAGS += "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS}"
-CFLAGS += "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS}"
-
-OECMAKE_C_COMPILER_LAUNCHER ?= ""
-OECMAKE_CXX_COMPILER_LAUNCHER ?= ""
-
-OECMAKE_RPATH ?= ""
-OECMAKE_PERLNATIVE_DIR ??= ""
-OECMAKE_EXTRA_ROOT_PATH ?= ""
-
-OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM = "ONLY"
-OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM_class-native = "BOTH"
-
-EXTRA_OECMAKE_append = " ${PACKAGECONFIG_CONFARGS}"
-
-export CMAKE_BUILD_PARALLEL_LEVEL
-CMAKE_BUILD_PARALLEL_LEVEL_task-compile = "${@oe.utils.parallel_make(d, False)}"
-CMAKE_BUILD_PARALLEL_LEVEL_task-install = "${@oe.utils.parallel_make(d, True)}"
-
-OECMAKE_TARGET_COMPILE ?= "all"
-OECMAKE_TARGET_INSTALL ?= "install"
-
-# CMake expects target architectures in the format of uname(2),
-# which do not always match TARGET_ARCH, so all the necessary
-# conversions should happen here.
-def map_target_arch_to_uname_arch(target_arch):
- if target_arch == "powerpc":
- return "ppc"
- if target_arch == "powerpc64":
- return "ppc64"
- return target_arch
-
-cmake_do_generate_toolchain_file() {
- if [ "${BUILD_SYS}" = "${HOST_SYS}" ]; then
- cmake_crosscompiling="set( CMAKE_CROSSCOMPILING FALSE )"
- fi
- cat > ${WORKDIR}/toolchain.cmake <<EOF
-# CMake system name must be something like "Linux".
-# This is important for cross-compiling.
-$cmake_crosscompiling
-set( CMAKE_SYSTEM_NAME `echo ${TARGET_OS} | sed -e 's/^./\u&/' -e 's/^\(Linux\).*/\1/'` )
-set( CMAKE_SYSTEM_PROCESSOR ${@map_target_arch_to_uname_arch(d.getVar('TARGET_ARCH'))} )
-set( CMAKE_C_COMPILER ${OECMAKE_C_COMPILER} )
-set( CMAKE_CXX_COMPILER ${OECMAKE_CXX_COMPILER} )
-set( CMAKE_C_COMPILER_LAUNCHER ${OECMAKE_C_COMPILER_LAUNCHER} )
-set( CMAKE_CXX_COMPILER_LAUNCHER ${OECMAKE_CXX_COMPILER_LAUNCHER} )
-set( CMAKE_ASM_COMPILER ${OECMAKE_C_COMPILER} )
-set( CMAKE_AR ${OECMAKE_AR} CACHE FILEPATH "Archiver" )
-set( CMAKE_C_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "CFLAGS" )
-set( CMAKE_CXX_FLAGS "${OECMAKE_CXX_FLAGS}" CACHE STRING "CXXFLAGS" )
-set( CMAKE_ASM_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "ASM FLAGS" )
-set( CMAKE_C_FLAGS_RELEASE "${OECMAKE_C_FLAGS_RELEASE}" CACHE STRING "Additional CFLAGS for release" )
-set( CMAKE_CXX_FLAGS_RELEASE "${OECMAKE_CXX_FLAGS_RELEASE}" CACHE STRING "Additional CXXFLAGS for release" )
-set( CMAKE_ASM_FLAGS_RELEASE "${OECMAKE_C_FLAGS_RELEASE}" CACHE STRING "Additional ASM FLAGS for release" )
-set( CMAKE_C_LINK_FLAGS "${OECMAKE_C_LINK_FLAGS}" CACHE STRING "LDFLAGS" )
-set( CMAKE_CXX_LINK_FLAGS "${OECMAKE_CXX_LINK_FLAGS}" CACHE STRING "LDFLAGS" )
-
-# only search in the paths provided so cmake doesnt pick
-# up libraries and tools from the native build machine
-set( CMAKE_FIND_ROOT_PATH ${STAGING_DIR_HOST} ${STAGING_DIR_NATIVE} ${CROSS_DIR} ${OECMAKE_PERLNATIVE_DIR} ${OECMAKE_EXTRA_ROOT_PATH} ${EXTERNAL_TOOLCHAIN} ${HOSTTOOLS_DIR})
-set( CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY )
-set( CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ${OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM} )
-set( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY )
-set( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY )
-set( CMAKE_PROGRAM_PATH "/" )
-
-# Use qt.conf settings
-set( ENV{QT_CONF_PATH} ${WORKDIR}/qt.conf )
-
-# We need to set the rpath to the correct directory as cmake does not provide any
-# directory as rpath by default
-set( CMAKE_INSTALL_RPATH ${OECMAKE_RPATH} )
-
-# Use RPATHs relative to build directory for reproducibility
-set( CMAKE_BUILD_RPATH_USE_ORIGIN ON )
-
-# Use our cmake modules
-list(APPEND CMAKE_MODULE_PATH "${STAGING_DATADIR}/cmake/Modules/")
-
-# add for non /usr/lib libdir, e.g. /usr/lib64
-set( CMAKE_LIBRARY_PATH ${libdir} ${base_libdir})
-
-# add include dir to implicit includes in case it differs from /usr/include
-list(APPEND CMAKE_C_IMPLICIT_INCLUDE_DIRECTORIES ${includedir})
-list(APPEND CMAKE_CXX_IMPLICIT_INCLUDE_DIRECTORIES ${includedir})
-
-EOF
-}
-
-addtask generate_toolchain_file after do_patch before do_configure
-
-CONFIGURE_FILES = "CMakeLists.txt"
-
-cmake_do_configure() {
- if [ "${OECMAKE_BUILDPATH}" ]; then
- bbnote "cmake.bbclass no longer uses OECMAKE_BUILDPATH. The default behaviour is now out-of-tree builds with B=WORKDIR/build."
- fi
-
- if [ "${S}" != "${B}" ]; then
- rm -rf ${B}
- mkdir -p ${B}
- cd ${B}
- else
- find ${B} -name CMakeFiles -or -name Makefile -or -name cmake_install.cmake -or -name CMakeCache.txt -delete
- fi
-
- # Just like autotools cmake can use a site file to cache result that need generated binaries to run
- if [ -e ${WORKDIR}/site-file.cmake ] ; then
- oecmake_sitefile="-C ${WORKDIR}/site-file.cmake"
- else
- oecmake_sitefile=
- fi
-
- cmake \
- ${OECMAKE_GENERATOR_ARGS} \
- $oecmake_sitefile \
- ${OECMAKE_SOURCEPATH} \
- -DCMAKE_INSTALL_PREFIX:PATH=${prefix} \
- -DCMAKE_INSTALL_BINDIR:PATH=${@os.path.relpath(d.getVar('bindir'), d.getVar('prefix') + '/')} \
- -DCMAKE_INSTALL_SBINDIR:PATH=${@os.path.relpath(d.getVar('sbindir'), d.getVar('prefix') + '/')} \
- -DCMAKE_INSTALL_LIBEXECDIR:PATH=${@os.path.relpath(d.getVar('libexecdir'), d.getVar('prefix') + '/')} \
- -DCMAKE_INSTALL_SYSCONFDIR:PATH=${sysconfdir} \
- -DCMAKE_INSTALL_SHAREDSTATEDIR:PATH=${@os.path.relpath(d.getVar('sharedstatedir'), d. getVar('prefix') + '/')} \
- -DCMAKE_INSTALL_LOCALSTATEDIR:PATH=${localstatedir} \
- -DCMAKE_INSTALL_LIBDIR:PATH=${@os.path.relpath(d.getVar('libdir'), d.getVar('prefix') + '/')} \
- -DCMAKE_INSTALL_INCLUDEDIR:PATH=${@os.path.relpath(d.getVar('includedir'), d.getVar('prefix') + '/')} \
- -DCMAKE_INSTALL_DATAROOTDIR:PATH=${@os.path.relpath(d.getVar('datadir'), d.getVar('prefix') + '/')} \
- -DPYTHON_EXECUTABLE:PATH=${PYTHON} \
- -DPython_EXECUTABLE:PATH=${PYTHON} \
- -DPython3_EXECUTABLE:PATH=${PYTHON} \
- -DLIB_SUFFIX=${@d.getVar('baselib').replace('lib', '')} \
- -DCMAKE_INSTALL_SO_NO_EXE=0 \
- -DCMAKE_TOOLCHAIN_FILE=${WORKDIR}/toolchain.cmake \
- -DCMAKE_NO_SYSTEM_FROM_IMPORTED=1 \
- ${EXTRA_OECMAKE} \
- -Wno-dev
-}
-
-# To disable verbose cmake logs for a given recipe or globally config metadata e.g. local.conf
-# add following
-#
-# CMAKE_VERBOSE = ""
-#
-
-CMAKE_VERBOSE ??= "VERBOSE=1"
-
-# Then run do_compile again
-cmake_runcmake_build() {
- bbnote ${DESTDIR:+DESTDIR=${DESTDIR} }${CMAKE_VERBOSE} cmake --build '${B}' "$@" -- ${EXTRA_OECMAKE_BUILD}
- eval ${DESTDIR:+DESTDIR=${DESTDIR} }${CMAKE_VERBOSE} cmake --build '${B}' "$@" -- ${EXTRA_OECMAKE_BUILD}
-}
-
-cmake_do_compile() {
- cmake_runcmake_build --target ${OECMAKE_TARGET_COMPILE}
-}
-
-cmake_do_install() {
- DESTDIR='${D}' cmake_runcmake_build --target ${OECMAKE_TARGET_INSTALL}
-}
-
-EXPORT_FUNCTIONS do_configure do_compile do_install do_generate_toolchain_file
diff --git a/meta/classes/cml1.bbclass b/meta/classes/cml1.bbclass
deleted file mode 100644
index c7f6723cb3..0000000000
--- a/meta/classes/cml1.bbclass
+++ /dev/null
@@ -1,79 +0,0 @@
-cml1_do_configure() {
- set -e
- unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
- yes '' | oe_runmake oldconfig
-}
-
-EXPORT_FUNCTIONS do_configure
-addtask configure after do_unpack do_patch before do_compile
-
-inherit terminal
-
-OE_TERMINAL_EXPORTS += "HOST_EXTRACFLAGS HOSTLDFLAGS TERMINFO CROSS_CURSES_LIB CROSS_CURSES_INC"
-HOST_EXTRACFLAGS = "${BUILD_CFLAGS} ${BUILD_LDFLAGS}"
-HOSTLDFLAGS = "${BUILD_LDFLAGS}"
-CROSS_CURSES_LIB = "-lncurses -ltinfo"
-CROSS_CURSES_INC = '-DCURSES_LOC="<curses.h>"'
-TERMINFO = "${STAGING_DATADIR_NATIVE}/terminfo"
-
-KCONFIG_CONFIG_COMMAND ??= "menuconfig"
-python do_menuconfig() {
- import shutil
-
- try:
- mtime = os.path.getmtime(".config")
- shutil.copy(".config", ".config.orig")
- except OSError:
- mtime = 0
-
- oe_terminal("sh -c \"make %s; if [ \\$? -ne 0 ]; then echo 'Command failed.'; printf 'Press any key to continue... '; read r; fi\"" % d.getVar('KCONFIG_CONFIG_COMMAND'),
- d.getVar('PN') + ' Configuration', d)
-
- # FIXME this check can be removed when the minimum bitbake version has been bumped
- if hasattr(bb.build, 'write_taint'):
- try:
- newmtime = os.path.getmtime(".config")
- except OSError:
- newmtime = 0
-
- if newmtime > mtime:
- bb.note("Configuration changed, recompile will be forced")
- bb.build.write_taint('do_compile', d)
-}
-do_menuconfig[depends] += "ncurses-native:do_populate_sysroot"
-do_menuconfig[nostamp] = "1"
-do_menuconfig[dirs] = "${B}"
-addtask menuconfig after do_configure
-
-python do_diffconfig() {
- import shutil
- import subprocess
-
- workdir = d.getVar('WORKDIR')
- fragment = workdir + '/fragment.cfg'
- configorig = '.config.orig'
- config = '.config'
-
- try:
- md5newconfig = bb.utils.md5_file(configorig)
- md5config = bb.utils.md5_file(config)
- isdiff = md5newconfig != md5config
- except IOError as e:
- bb.fatal("No config files found. Did you do menuconfig ?\n%s" % e)
-
- if isdiff:
- statement = 'diff --unchanged-line-format= --old-line-format= --new-line-format="%L" ' + configorig + ' ' + config + '>' + fragment
- subprocess.call(statement, shell=True)
- # No need to check the exit code as we know it's going to be
- # non-zero, but that's what we expect.
- shutil.copy(configorig, config)
-
- bb.plain("Config fragment has been dumped into:\n %s" % fragment)
- else:
- if os.path.exists(fragment):
- os.unlink(fragment)
-}
-
-do_diffconfig[nostamp] = "1"
-do_diffconfig[dirs] = "${B}"
-addtask diffconfig
diff --git a/meta/classes/compress_doc.bbclass b/meta/classes/compress_doc.bbclass
deleted file mode 100644
index d6d11fad26..0000000000
--- a/meta/classes/compress_doc.bbclass
+++ /dev/null
@@ -1,263 +0,0 @@
-# Compress man pages in ${mandir} and info pages in ${infodir}
-#
-# 1. The doc will be compressed to gz format by default.
-#
-# 2. It will automatically correct the compressed doc which is not
-# in ${DOC_COMPRESS} but in ${DOC_COMPRESS_LIST} to the format
-# of ${DOC_COMPRESS} policy
-#
-# 3. It is easy to add a new type compression by editing
-# local.conf, such as:
-# DOC_COMPRESS_LIST_append = ' abc'
-# DOC_COMPRESS = 'abc'
-# DOC_COMPRESS_CMD[abc] = 'abc compress cmd ***'
-# DOC_DECOMPRESS_CMD[abc] = 'abc decompress cmd ***'
-
-# All supported compression policy
-DOC_COMPRESS_LIST ?= "gz xz bz2"
-
-# Compression policy, must be one of ${DOC_COMPRESS_LIST}
-DOC_COMPRESS ?= "gz"
-
-# Compression shell command
-DOC_COMPRESS_CMD[gz] ?= 'gzip -v -9 -n'
-DOC_COMPRESS_CMD[bz2] ?= "bzip2 -v -9"
-DOC_COMPRESS_CMD[xz] ?= "xz -v"
-
-# Decompression shell command
-DOC_DECOMPRESS_CMD[gz] ?= 'gunzip -v'
-DOC_DECOMPRESS_CMD[bz2] ?= "bunzip2 -v"
-DOC_DECOMPRESS_CMD[xz] ?= "unxz -v"
-
-PACKAGE_PREPROCESS_FUNCS += "package_do_compress_doc compress_doc_updatealternatives"
-python package_do_compress_doc() {
- compress_mode = d.getVar('DOC_COMPRESS')
- compress_list = (d.getVar('DOC_COMPRESS_LIST') or '').split()
- if compress_mode not in compress_list:
- bb.fatal('Compression policy %s not supported (not listed in %s)\n' % (compress_mode, compress_list))
-
- dvar = d.getVar('PKGD')
- compress_cmds = {}
- decompress_cmds = {}
- for mode in compress_list:
- compress_cmds[mode] = d.getVarFlag('DOC_COMPRESS_CMD', mode)
- decompress_cmds[mode] = d.getVarFlag('DOC_DECOMPRESS_CMD', mode)
-
- mandir = os.path.abspath(dvar + os.sep + d.getVar("mandir"))
- if os.path.exists(mandir):
- # Decompress doc files which format is not compress_mode
- decompress_doc(mandir, compress_mode, decompress_cmds)
- compress_doc(mandir, compress_mode, compress_cmds)
-
- infodir = os.path.abspath(dvar + os.sep + d.getVar("infodir"))
- if os.path.exists(infodir):
- # Decompress doc files which format is not compress_mode
- decompress_doc(infodir, compress_mode, decompress_cmds)
- compress_doc(infodir, compress_mode, compress_cmds)
-}
-
-def _get_compress_format(file, compress_format_list):
- for compress_format in compress_format_list:
- compress_suffix = '.' + compress_format
- if file.endswith(compress_suffix):
- return compress_format
-
- return ''
-
-# Collect hardlinks to dict, each element in dict lists hardlinks
-# which points to the same doc file.
-# {hardlink10: [hardlink11, hardlink12],,,}
-# The hardlink10, hardlink11 and hardlink12 are the same file.
-def _collect_hardlink(hardlink_dict, file):
- for hardlink in hardlink_dict:
- # Add to the existed hardlink
- if os.path.samefile(hardlink, file):
- hardlink_dict[hardlink].append(file)
- return hardlink_dict
-
- hardlink_dict[file] = []
- return hardlink_dict
-
-def _process_hardlink(hardlink_dict, compress_mode, shell_cmds, decompress=False):
- import subprocess
- for target in hardlink_dict:
- if decompress:
- compress_format = _get_compress_format(target, shell_cmds.keys())
- cmd = "%s -f %s" % (shell_cmds[compress_format], target)
- bb.note('decompress hardlink %s' % target)
- else:
- cmd = "%s -f %s" % (shell_cmds[compress_mode], target)
- bb.note('compress hardlink %s' % target)
- (retval, output) = subprocess.getstatusoutput(cmd)
- if retval:
- bb.warn("de/compress file failed %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
- return
-
- for hardlink_dup in hardlink_dict[target]:
- if decompress:
- # Remove compress suffix
- compress_suffix = '.' + compress_format
- new_hardlink = hardlink_dup[:-len(compress_suffix)]
- new_target = target[:-len(compress_suffix)]
- else:
- # Append compress suffix
- compress_suffix = '.' + compress_mode
- new_hardlink = hardlink_dup + compress_suffix
- new_target = target + compress_suffix
-
- bb.note('hardlink %s-->%s' % (new_hardlink, new_target))
- if not os.path.exists(new_hardlink):
- os.link(new_target, new_hardlink)
- if os.path.exists(hardlink_dup):
- os.unlink(hardlink_dup)
-
-def _process_symlink(file, compress_format, decompress=False):
- compress_suffix = '.' + compress_format
- if decompress:
- # Remove compress suffix
- new_linkname = file[:-len(compress_suffix)]
- new_source = os.readlink(file)[:-len(compress_suffix)]
- else:
- # Append compress suffix
- new_linkname = file + compress_suffix
- new_source = os.readlink(file) + compress_suffix
-
- bb.note('symlink %s-->%s' % (new_linkname, new_source))
- if not os.path.exists(new_linkname):
- os.symlink(new_source, new_linkname)
-
- os.unlink(file)
-
-def _is_info(file):
- flags = '.info .info-'.split()
- for flag in flags:
- if flag in os.path.basename(file):
- return True
-
- return False
-
-def _is_man(file):
- import re
-
- # It refers MANSECT-var in man(1.6g)'s man.config
- # ".1:.1p:.8:.2:.3:.3p:.4:.5:.6:.7:.9:.0p:.tcl:.n:.l:.p:.o"
- # Not start with '.', and contain the above colon-seperate element
- p = re.compile(r'[^\.]+\.([1-9lnop]|0p|tcl)')
- if p.search(file):
- return True
-
- return False
-
-def _is_compress_doc(file, compress_format_list):
- compress_format = _get_compress_format(file, compress_format_list)
- compress_suffix = '.' + compress_format
- if file.endswith(compress_suffix):
- # Remove the compress suffix
- uncompress_file = file[:-len(compress_suffix)]
- if _is_info(uncompress_file) or _is_man(uncompress_file):
- return True, compress_format
-
- return False, ''
-
-def compress_doc(topdir, compress_mode, compress_cmds):
- import subprocess
- hardlink_dict = {}
- for root, dirs, files in os.walk(topdir):
- for f in files:
- file = os.path.join(root, f)
- if os.path.isdir(file):
- continue
-
- if _is_info(file) or _is_man(file):
- # Symlink
- if os.path.islink(file):
- _process_symlink(file, compress_mode)
- # Hardlink
- elif os.lstat(file).st_nlink > 1:
- _collect_hardlink(hardlink_dict, file)
- # Normal file
- elif os.path.isfile(file):
- cmd = "%s %s" % (compress_cmds[compress_mode], file)
- (retval, output) = subprocess.getstatusoutput(cmd)
- if retval:
- bb.warn("compress failed %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
- continue
- bb.note('compress file %s' % file)
-
- _process_hardlink(hardlink_dict, compress_mode, compress_cmds)
-
-# Decompress doc files which format is not compress_mode
-def decompress_doc(topdir, compress_mode, decompress_cmds):
- import subprocess
- hardlink_dict = {}
- decompress = True
- for root, dirs, files in os.walk(topdir):
- for f in files:
- file = os.path.join(root, f)
- if os.path.isdir(file):
- continue
-
- res, compress_format = _is_compress_doc(file, decompress_cmds.keys())
- # Decompress files which format is not compress_mode
- if res and compress_mode!=compress_format:
- # Symlink
- if os.path.islink(file):
- _process_symlink(file, compress_format, decompress)
- # Hardlink
- elif os.lstat(file).st_nlink > 1:
- _collect_hardlink(hardlink_dict, file)
- # Normal file
- elif os.path.isfile(file):
- cmd = "%s %s" % (decompress_cmds[compress_format], file)
- (retval, output) = subprocess.getstatusoutput(cmd)
- if retval:
- bb.warn("decompress failed %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
- continue
- bb.note('decompress file %s' % file)
-
- _process_hardlink(hardlink_dict, compress_mode, decompress_cmds, decompress)
-
-python compress_doc_updatealternatives () {
- if not bb.data.inherits_class('update-alternatives', d):
- return
-
- mandir = d.getVar("mandir")
- infodir = d.getVar("infodir")
- compress_mode = d.getVar('DOC_COMPRESS')
- for pkg in (d.getVar('PACKAGES') or "").split():
- old_names = (d.getVar('ALTERNATIVE_%s' % pkg) or "").split()
- new_names = []
- for old_name in old_names:
- old_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', old_name)
- old_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name) or \
- d.getVarFlag('ALTERNATIVE_TARGET', old_name) or \
- d.getVar('ALTERNATIVE_TARGET_%s' % pkg) or \
- d.getVar('ALTERNATIVE_TARGET') or \
- old_link
- # Sometimes old_target is specified as relative to the link name.
- old_target = os.path.join(os.path.dirname(old_link), old_target)
-
- # The updatealternatives used for compress doc
- if mandir in old_target or infodir in old_target:
- new_name = old_name + '.' + compress_mode
- new_link = old_link + '.' + compress_mode
- new_target = old_target + '.' + compress_mode
- d.delVarFlag('ALTERNATIVE_LINK_NAME', old_name)
- d.setVarFlag('ALTERNATIVE_LINK_NAME', new_name, new_link)
- if d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name):
- d.delVarFlag('ALTERNATIVE_TARGET_%s' % pkg, old_name)
- d.setVarFlag('ALTERNATIVE_TARGET_%s' % pkg, new_name, new_target)
- elif d.getVarFlag('ALTERNATIVE_TARGET', old_name):
- d.delVarFlag('ALTERNATIVE_TARGET', old_name)
- d.setVarFlag('ALTERNATIVE_TARGET', new_name, new_target)
- elif d.getVar('ALTERNATIVE_TARGET_%s' % pkg):
- d.setVar('ALTERNATIVE_TARGET_%s' % pkg, new_target)
- elif d.getVar('ALTERNATIVE_TARGET'):
- d.setVar('ALTERNATIVE_TARGET', new_target)
-
- new_names.append(new_name)
-
- if new_names:
- d.setVar('ALTERNATIVE_%s' % pkg, ' '.join(new_names))
-}
-
diff --git a/meta/classes/copyleft_compliance.bbclass b/meta/classes/copyleft_compliance.bbclass
index eabf12ce7a..9ff9956fe9 100644
--- a/meta/classes/copyleft_compliance.bbclass
+++ b/meta/classes/copyleft_compliance.bbclass
@@ -1,3 +1,9 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
# Deploy sources for recipes for compliance with copyleft-style licenses
# Defaults to using symlinks, as it's a quick operation, and one can easily
# follow the links when making use of the files (e.g. tar with the -h arg).
diff --git a/meta/classes/copyleft_filter.bbclass b/meta/classes/copyleft_filter.bbclass
index c36bce431a..83cd90060d 100644
--- a/meta/classes/copyleft_filter.bbclass
+++ b/meta/classes/copyleft_filter.bbclass
@@ -1,10 +1,14 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
# Filter the license, the copyleft_should_include returns True for the
# COPYLEFT_LICENSE_INCLUDE recipe, and False for the
# COPYLEFT_LICENSE_EXCLUDE.
#
# By default, includes all GPL and LGPL, and excludes CLOSED and Proprietary.
-#
-# vi:sts=4:sw=4:et
COPYLEFT_LICENSE_INCLUDE ?= 'GPL* LGPL* AGPL*'
COPYLEFT_LICENSE_INCLUDE[type] = 'list'
diff --git a/meta/classes/core-image.bbclass b/meta/classes/core-image.bbclass
deleted file mode 100644
index 88ca272145..0000000000
--- a/meta/classes/core-image.bbclass
+++ /dev/null
@@ -1,75 +0,0 @@
-# Common code for generating core reference images
-#
-# Copyright (C) 2007-2011 Linux Foundation
-
-# IMAGE_FEATURES control content of the core reference images
-#
-# By default we install packagegroup-core-boot and packagegroup-base-extended packages;
-# this gives us working (console only) rootfs.
-#
-# Available IMAGE_FEATURES:
-#
-# - x11 - X server
-# - x11-base - X server with minimal environment
-# - x11-sato - OpenedHand Sato environment
-# - tools-debug - debugging tools
-# - eclipse-debug - Eclipse remote debugging support
-# - tools-profile - profiling tools
-# - tools-testapps - tools usable to make some device tests
-# - tools-sdk - SDK (C/C++ compiler, autotools, etc.)
-# - nfs-server - NFS server
-# - nfs-client - NFS client
-# - ssh-server-dropbear - SSH server (dropbear)
-# - ssh-server-openssh - SSH server (openssh)
-# - hwcodecs - Install hardware acceleration codecs
-# - package-management - installs package management tools and preserves the package manager database
-# - debug-tweaks - makes an image suitable for development, e.g. allowing passwordless root logins
-# - empty-root-password
-# - allow-empty-password
-# - allow-root-login
-# - post-install-logging
-# - dev-pkgs - development packages (headers, etc.) for all installed packages in the rootfs
-# - dbg-pkgs - debug symbol packages for all installed packages in the rootfs
-# - doc-pkgs - documentation packages for all installed packages in the rootfs
-# - bash-completion-pkgs - bash-completion packages for recipes using bash-completion bbclass
-# - ptest-pkgs - ptest packages for all ptest-enabled recipes
-# - read-only-rootfs - tweaks an image to support read-only rootfs
-# - stateless-rootfs - systemctl-native not run, image populated by systemd at runtime
-# - splash - bootup splash screen
-#
-FEATURE_PACKAGES_x11 = "packagegroup-core-x11"
-FEATURE_PACKAGES_x11-base = "packagegroup-core-x11-base"
-FEATURE_PACKAGES_x11-sato = "packagegroup-core-x11-sato"
-FEATURE_PACKAGES_tools-debug = "packagegroup-core-tools-debug"
-FEATURE_PACKAGES_eclipse-debug = "packagegroup-core-eclipse-debug"
-FEATURE_PACKAGES_tools-profile = "packagegroup-core-tools-profile"
-FEATURE_PACKAGES_tools-testapps = "packagegroup-core-tools-testapps"
-FEATURE_PACKAGES_tools-sdk = "packagegroup-core-sdk packagegroup-core-standalone-sdk-target"
-FEATURE_PACKAGES_nfs-server = "packagegroup-core-nfs-server"
-FEATURE_PACKAGES_nfs-client = "packagegroup-core-nfs-client"
-FEATURE_PACKAGES_ssh-server-dropbear = "packagegroup-core-ssh-dropbear"
-FEATURE_PACKAGES_ssh-server-openssh = "packagegroup-core-ssh-openssh"
-FEATURE_PACKAGES_hwcodecs = "${MACHINE_HWCODECS}"
-
-
-# IMAGE_FEATURES_REPLACES_foo = 'bar1 bar2'
-# Including image feature foo would replace the image features bar1 and bar2
-IMAGE_FEATURES_REPLACES_ssh-server-openssh = "ssh-server-dropbear"
-
-# IMAGE_FEATURES_CONFLICTS_foo = 'bar1 bar2'
-# An error exception would be raised if both image features foo and bar1(or bar2) are included
-
-MACHINE_HWCODECS ??= ""
-
-CORE_IMAGE_BASE_INSTALL = '\
- packagegroup-core-boot \
- packagegroup-base-extended \
- \
- ${CORE_IMAGE_EXTRA_INSTALL} \
- '
-
-CORE_IMAGE_EXTRA_INSTALL ?= ""
-
-IMAGE_INSTALL ?= "${CORE_IMAGE_BASE_INSTALL}"
-
-inherit image
diff --git a/meta/classes/cpan-base.bbclass b/meta/classes/cpan-base.bbclass
deleted file mode 100644
index 867edf8707..0000000000
--- a/meta/classes/cpan-base.bbclass
+++ /dev/null
@@ -1,18 +0,0 @@
-#
-# cpan-base providers various perl related information needed for building
-# cpan modules
-#
-FILES_${PN} += "${libdir}/perl5 ${datadir}/perl5"
-
-DEPENDS += "${@["perl", "perl-native"][(bb.data.inherits_class('native', d))]}"
-RDEPENDS_${PN} += "${@["perl", ""][(bb.data.inherits_class('native', d))]}"
-
-inherit perl-version
-
-def is_target(d):
- if not bb.data.inherits_class('native', d):
- return "yes"
- return "no"
-
-PERLLIBDIRS = "${libdir}/perl5"
-PERLLIBDIRS_class-native = "${libdir}/perl5"
diff --git a/meta/classes/cpan.bbclass b/meta/classes/cpan.bbclass
deleted file mode 100644
index e9908ae4b8..0000000000
--- a/meta/classes/cpan.bbclass
+++ /dev/null
@@ -1,65 +0,0 @@
-#
-# This is for perl modules that use the old Makefile.PL build system
-#
-inherit cpan-base perlnative
-
-EXTRA_CPANFLAGS ?= ""
-EXTRA_PERLFLAGS ?= ""
-
-# Env var which tells perl if it should use host (no) or target (yes) settings
-export PERLCONFIGTARGET = "${@is_target(d)}"
-
-# Env var which tells perl where the perl include files are
-export PERL_INC = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/${@get_perl_version(d)}/${@get_perl_arch(d)}/CORE"
-export PERL_LIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/${@get_perl_version(d)}"
-export PERL_ARCHLIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/${@get_perl_version(d)}/${@get_perl_arch(d)}"
-export PERLHOSTLIB = "${STAGING_LIBDIR_NATIVE}/perl5/${@get_perl_version(d)}/"
-export PERLHOSTARCHLIB = "${STAGING_LIBDIR_NATIVE}/perl5/${@get_perl_version(d)}/${@get_perl_hostarch(d)}/"
-
-cpan_do_configure () {
- yes '' | perl ${EXTRA_PERLFLAGS} Makefile.PL INSTALLDIRS=vendor NO_PERLLOCAL=1 NO_PACKLIST=1 PERL=$(which perl) ${EXTRA_CPANFLAGS}
-
- # Makefile.PLs can exit with success without generating a
- # Makefile, e.g. in cases of missing configure time
- # dependencies. This is considered a best practice by
- # cpantesters.org. See:
- # * http://wiki.cpantesters.org/wiki/CPANAuthorNotes
- # * http://www.nntp.perl.org/group/perl.qa/2008/08/msg11236.html
- [ -e Makefile ] || bbfatal "No Makefile was generated by Makefile.PL"
-
- if [ "${BUILD_SYS}" != "${HOST_SYS}" ]; then
- . ${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/config.sh
- # Use find since there can be a Makefile generated for each Makefile.PL
- for f in `find -name Makefile.PL`; do
- f2=`echo $f | sed -e 's/.PL//'`
- test -f $f2 || continue
- sed -i -e "s:\(PERL_ARCHLIB = \).*:\1${PERL_ARCHLIB}:" \
- -e 's/perl.real/perl/' \
- -e "s|^\(CCFLAGS =.*\)|\1 ${CFLAGS}|" \
- $f2
- done
- fi
-}
-
-do_configure_append_class-target() {
- find . -name Makefile | xargs sed -E -i \
- -e 's:LD_RUN_PATH ?= ?"?[^"]*"?::g'
-}
-
-do_configure_append_class-nativesdk() {
- find . -name Makefile | xargs sed -E -i \
- -e 's:LD_RUN_PATH ?= ?"?[^"]*"?::g'
-}
-
-cpan_do_compile () {
- oe_runmake PASTHRU_INC="${CFLAGS}" LD="${CCLD}"
-}
-
-cpan_do_install () {
- oe_runmake DESTDIR="${D}" install_vendor
- for PERLSCRIPT in `grep -rIEl '#! *${bindir}/perl-native.*/perl' ${D}`; do
- sed -i -e 's|${bindir}/perl-native.*/perl|/usr/bin/env nativeperl|' $PERLSCRIPT
- done
-}
-
-EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes/cpan_build.bbclass b/meta/classes/cpan_build.bbclass
deleted file mode 100644
index f3fb4666ef..0000000000
--- a/meta/classes/cpan_build.bbclass
+++ /dev/null
@@ -1,41 +0,0 @@
-#
-# This is for perl modules that use the new Build.PL build system
-#
-inherit cpan-base perlnative
-
-EXTRA_CPAN_BUILD_FLAGS ?= ""
-
-# Env var which tells perl if it should use host (no) or target (yes) settings
-export PERLCONFIGTARGET = "${@is_target(d)}"
-export PERL_ARCHLIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/${@get_perl_version(d)}/${@get_perl_arch(d)}"
-export PERLHOSTLIB = "${STAGING_LIBDIR_NATIVE}/perl5/${@get_perl_version(d)}/"
-export PERLHOSTARCHLIB = "${STAGING_LIBDIR_NATIVE}/perl5/${@get_perl_version(d)}/${@get_perl_hostarch(d)}/"
-export LD = "${CCLD}"
-
-cpan_build_do_configure () {
- if [ "${@is_target(d)}" = "yes" ]; then
- # build for target
- . ${STAGING_LIBDIR}/perl5/config.sh
- fi
-
- perl Build.PL --installdirs vendor --destdir ${D} \
- ${EXTRA_CPAN_BUILD_FLAGS}
-
- # Build.PLs can exit with success without generating a
- # Build, e.g. in cases of missing configure time
- # dependencies. This is considered a best practice by
- # cpantesters.org. See:
- # * http://wiki.cpantesters.org/wiki/CPANAuthorNotes
- # * http://www.nntp.perl.org/group/perl.qa/2008/08/msg11236.html
- [ -e Build ] || bbfatal "No Build was generated by Build.PL"
-}
-
-cpan_build_do_compile () {
- perl Build --perl "${bindir}/perl" verbose=1
-}
-
-cpan_build_do_install () {
- perl Build install --destdir ${D}
-}
-
-EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes/create-spdx-2.2.bbclass b/meta/classes/create-spdx-2.2.bbclass
new file mode 100644
index 0000000000..486efadba9
--- /dev/null
+++ b/meta/classes/create-spdx-2.2.bbclass
@@ -0,0 +1,1158 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+DEPLOY_DIR_SPDX ??= "${DEPLOY_DIR}/spdx"
+
+# The product name that the CVE database uses. Defaults to BPN, but may need to
+# be overriden per recipe (for example tiff.bb sets CVE_PRODUCT=libtiff).
+CVE_PRODUCT ??= "${BPN}"
+CVE_VERSION ??= "${PV}"
+
+SPDXDIR ??= "${WORKDIR}/spdx"
+SPDXDEPLOY = "${SPDXDIR}/deploy"
+SPDXWORK = "${SPDXDIR}/work"
+SPDXIMAGEWORK = "${SPDXDIR}/image-work"
+SPDXSDKWORK = "${SPDXDIR}/sdk-work"
+SPDXDEPS = "${SPDXDIR}/deps.json"
+
+SPDX_TOOL_NAME ??= "oe-spdx-creator"
+SPDX_TOOL_VERSION ??= "1.0"
+
+SPDXRUNTIMEDEPLOY = "${SPDXDIR}/runtime-deploy"
+
+SPDX_INCLUDE_SOURCES ??= "0"
+SPDX_ARCHIVE_SOURCES ??= "0"
+SPDX_ARCHIVE_PACKAGED ??= "0"
+
+SPDX_UUID_NAMESPACE ??= "sbom.openembedded.org"
+SPDX_NAMESPACE_PREFIX ??= "http://spdx.org/spdxdoc"
+SPDX_PRETTY ??= "0"
+
+SPDX_LICENSES ??= "${COREBASE}/meta/files/spdx-licenses.json"
+
+SPDX_CUSTOM_ANNOTATION_VARS ??= ""
+
+SPDX_ORG ??= "OpenEmbedded ()"
+SPDX_SUPPLIER ??= "Organization: ${SPDX_ORG}"
+SPDX_SUPPLIER[doc] = "The SPDX PackageSupplier field for SPDX packages created from \
+ this recipe. For SPDX documents create using this class during the build, this \
+ is the contact information for the person or organization who is doing the \
+ build."
+
+def extract_licenses(filename):
+ import re
+
+ lic_regex = re.compile(rb'^\W*SPDX-License-Identifier:\s*([ \w\d.()+-]+?)(?:\s+\W*)?$', re.MULTILINE)
+
+ try:
+ with open(filename, 'rb') as f:
+ size = min(15000, os.stat(filename).st_size)
+ txt = f.read(size)
+ licenses = re.findall(lic_regex, txt)
+ if licenses:
+ ascii_licenses = [lic.decode('ascii') for lic in licenses]
+ return ascii_licenses
+ except Exception as e:
+ bb.warn(f"Exception reading {filename}: {e}")
+ return None
+
+def get_doc_namespace(d, doc):
+ import uuid
+ namespace_uuid = uuid.uuid5(uuid.NAMESPACE_DNS, d.getVar("SPDX_UUID_NAMESPACE"))
+ return "%s/%s-%s" % (d.getVar("SPDX_NAMESPACE_PREFIX"), doc.name, str(uuid.uuid5(namespace_uuid, doc.name)))
+
+def create_annotation(d, comment):
+ from datetime import datetime, timezone
+
+ creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
+ annotation = oe.spdx.SPDXAnnotation()
+ annotation.annotationDate = creation_time
+ annotation.annotationType = "OTHER"
+ annotation.annotator = "Tool: %s - %s" % (d.getVar("SPDX_TOOL_NAME"), d.getVar("SPDX_TOOL_VERSION"))
+ annotation.comment = comment
+ return annotation
+
+def recipe_spdx_is_native(d, recipe):
+ return any(a.annotationType == "OTHER" and
+ a.annotator == "Tool: %s - %s" % (d.getVar("SPDX_TOOL_NAME"), d.getVar("SPDX_TOOL_VERSION")) and
+ a.comment == "isNative" for a in recipe.annotations)
+
+def is_work_shared_spdx(d):
+ return bb.data.inherits_class('kernel', d) or ('work-shared' in d.getVar('WORKDIR'))
+
+def get_json_indent(d):
+ if d.getVar("SPDX_PRETTY") == "1":
+ return 2
+ return None
+
+python() {
+ import json
+ if d.getVar("SPDX_LICENSE_DATA"):
+ return
+
+ with open(d.getVar("SPDX_LICENSES"), "r") as f:
+ data = json.load(f)
+ # Transform the license array to a dictionary
+ data["licenses"] = {l["licenseId"]: l for l in data["licenses"]}
+ d.setVar("SPDX_LICENSE_DATA", data)
+}
+
+def convert_license_to_spdx(lic, document, d, existing={}):
+ from pathlib import Path
+ import oe.spdx
+
+ license_data = d.getVar("SPDX_LICENSE_DATA")
+ extracted = {}
+
+ def add_extracted_license(ident, name):
+ nonlocal document
+
+ if name in extracted:
+ return
+
+ extracted_info = oe.spdx.SPDXExtractedLicensingInfo()
+ extracted_info.name = name
+ extracted_info.licenseId = ident
+ extracted_info.extractedText = None
+
+ if name == "PD":
+ # Special-case this.
+ extracted_info.extractedText = "Software released to the public domain"
+ else:
+ # Seach for the license in COMMON_LICENSE_DIR and LICENSE_PATH
+ for directory in [d.getVar('COMMON_LICENSE_DIR')] + (d.getVar('LICENSE_PATH') or '').split():
+ try:
+ with (Path(directory) / name).open(errors="replace") as f:
+ extracted_info.extractedText = f.read()
+ break
+ except FileNotFoundError:
+ pass
+ if extracted_info.extractedText is None:
+ # If it's not SPDX or PD, then NO_GENERIC_LICENSE must be set
+ filename = d.getVarFlag('NO_GENERIC_LICENSE', name)
+ if filename:
+ filename = d.expand("${S}/" + filename)
+ with open(filename, errors="replace") as f:
+ extracted_info.extractedText = f.read()
+ else:
+ bb.fatal("Cannot find any text for license %s" % name)
+
+ extracted[name] = extracted_info
+ document.hasExtractedLicensingInfos.append(extracted_info)
+
+ def convert(l):
+ if l == "(" or l == ")":
+ return l
+
+ if l == "&":
+ return "AND"
+
+ if l == "|":
+ return "OR"
+
+ if l == "CLOSED":
+ return "NONE"
+
+ spdx_license = d.getVarFlag("SPDXLICENSEMAP", l) or l
+ if spdx_license in license_data["licenses"]:
+ return spdx_license
+
+ try:
+ spdx_license = existing[l]
+ except KeyError:
+ spdx_license = "LicenseRef-" + l
+ add_extracted_license(spdx_license, l)
+
+ return spdx_license
+
+ lic_split = lic.replace("(", " ( ").replace(")", " ) ").replace("|", " | ").replace("&", " & ").split()
+
+ return ' '.join(convert(l) for l in lic_split)
+
+def process_sources(d):
+ pn = d.getVar('PN')
+ assume_provided = (d.getVar("ASSUME_PROVIDED") or "").split()
+ if pn in assume_provided:
+ for p in d.getVar("PROVIDES").split():
+ if p != pn:
+ pn = p
+ break
+
+ # glibc-locale: do_fetch, do_unpack and do_patch tasks have been deleted,
+ # so avoid archiving source here.
+ if pn.startswith('glibc-locale'):
+ return False
+ if d.getVar('PN') == "libtool-cross":
+ return False
+ if d.getVar('PN') == "libgcc-initial":
+ return False
+ if d.getVar('PN') == "shadow-sysroot":
+ return False
+
+ # We just archive gcc-source for all the gcc related recipes
+ if d.getVar('BPN') in ['gcc', 'libgcc']:
+ bb.debug(1, 'spdx: There is bug in scan of %s is, do nothing' % pn)
+ return False
+
+ return True
+
+
+def add_package_files(d, doc, spdx_pkg, topdir, get_spdxid, get_types, *, archive=None, ignore_dirs=[], ignore_top_level_dirs=[]):
+ from pathlib import Path
+ import oe.spdx
+ import hashlib
+
+ source_date_epoch = d.getVar("SOURCE_DATE_EPOCH")
+ if source_date_epoch:
+ source_date_epoch = int(source_date_epoch)
+
+ sha1s = []
+ spdx_files = []
+
+ file_counter = 1
+ for subdir, dirs, files in os.walk(topdir):
+ dirs[:] = [d for d in dirs if d not in ignore_dirs]
+ if subdir == str(topdir):
+ dirs[:] = [d for d in dirs if d not in ignore_top_level_dirs]
+
+ for file in files:
+ filepath = Path(subdir) / file
+ filename = str(filepath.relative_to(topdir))
+
+ if not filepath.is_symlink() and filepath.is_file():
+ spdx_file = oe.spdx.SPDXFile()
+ spdx_file.SPDXID = get_spdxid(file_counter)
+ for t in get_types(filepath):
+ spdx_file.fileTypes.append(t)
+ spdx_file.fileName = filename
+
+ if archive is not None:
+ with filepath.open("rb") as f:
+ info = archive.gettarinfo(fileobj=f)
+ info.name = filename
+ info.uid = 0
+ info.gid = 0
+ info.uname = "root"
+ info.gname = "root"
+
+ if source_date_epoch is not None and info.mtime > source_date_epoch:
+ info.mtime = source_date_epoch
+
+ archive.addfile(info, f)
+
+ sha1 = bb.utils.sha1_file(filepath)
+ sha1s.append(sha1)
+ spdx_file.checksums.append(oe.spdx.SPDXChecksum(
+ algorithm="SHA1",
+ checksumValue=sha1,
+ ))
+ spdx_file.checksums.append(oe.spdx.SPDXChecksum(
+ algorithm="SHA256",
+ checksumValue=bb.utils.sha256_file(filepath),
+ ))
+
+ if "SOURCE" in spdx_file.fileTypes:
+ extracted_lics = extract_licenses(filepath)
+ if extracted_lics:
+ spdx_file.licenseInfoInFiles = extracted_lics
+
+ doc.files.append(spdx_file)
+ doc.add_relationship(spdx_pkg, "CONTAINS", spdx_file)
+ spdx_pkg.hasFiles.append(spdx_file.SPDXID)
+
+ spdx_files.append(spdx_file)
+
+ file_counter += 1
+
+ sha1s.sort()
+ verifier = hashlib.sha1()
+ for v in sha1s:
+ verifier.update(v.encode("utf-8"))
+ spdx_pkg.packageVerificationCode.packageVerificationCodeValue = verifier.hexdigest()
+
+ return spdx_files
+
+
+def add_package_sources_from_debug(d, package_doc, spdx_package, package, package_files, sources):
+ from pathlib import Path
+ import hashlib
+ import oe.packagedata
+ import oe.spdx
+
+ debug_search_paths = [
+ Path(d.getVar('PKGD')),
+ Path(d.getVar('STAGING_DIR_TARGET')),
+ Path(d.getVar('STAGING_DIR_NATIVE')),
+ Path(d.getVar('STAGING_KERNEL_DIR')),
+ ]
+
+ pkg_data = oe.packagedata.read_subpkgdata_extended(package, d)
+
+ if pkg_data is None:
+ return
+
+ for file_path, file_data in pkg_data["files_info"].items():
+ if not "debugsrc" in file_data:
+ continue
+
+ for pkg_file in package_files:
+ if file_path.lstrip("/") == pkg_file.fileName.lstrip("/"):
+ break
+ else:
+ bb.fatal("No package file found for %s in %s; SPDX found: %s" % (str(file_path), package,
+ " ".join(p.fileName for p in package_files)))
+ continue
+
+ for debugsrc in file_data["debugsrc"]:
+ ref_id = "NOASSERTION"
+ for search in debug_search_paths:
+ if debugsrc.startswith("/usr/src/kernel"):
+ debugsrc_path = search / debugsrc.replace('/usr/src/kernel/', '')
+ else:
+ debugsrc_path = search / debugsrc.lstrip("/")
+ if not debugsrc_path.exists():
+ continue
+
+ file_sha256 = bb.utils.sha256_file(debugsrc_path)
+
+ if file_sha256 in sources:
+ source_file = sources[file_sha256]
+
+ doc_ref = package_doc.find_external_document_ref(source_file.doc.documentNamespace)
+ if doc_ref is None:
+ doc_ref = oe.spdx.SPDXExternalDocumentRef()
+ doc_ref.externalDocumentId = "DocumentRef-dependency-" + source_file.doc.name
+ doc_ref.spdxDocument = source_file.doc.documentNamespace
+ doc_ref.checksum.algorithm = "SHA1"
+ doc_ref.checksum.checksumValue = source_file.doc_sha1
+ package_doc.externalDocumentRefs.append(doc_ref)
+
+ ref_id = "%s:%s" % (doc_ref.externalDocumentId, source_file.file.SPDXID)
+ else:
+ bb.debug(1, "Debug source %s with SHA256 %s not found in any dependency" % (str(debugsrc_path), file_sha256))
+ break
+ else:
+ bb.debug(1, "Debug source %s not found" % debugsrc)
+
+ package_doc.add_relationship(pkg_file, "GENERATED_FROM", ref_id, comment=debugsrc)
+
+add_package_sources_from_debug[vardepsexclude] += "STAGING_KERNEL_DIR"
+
+def collect_dep_recipes(d, doc, spdx_recipe):
+ import json
+ from pathlib import Path
+ import oe.sbom
+ import oe.spdx
+
+ deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
+ spdx_deps_file = Path(d.getVar("SPDXDEPS"))
+ package_archs = d.getVar("SSTATE_ARCHS").split()
+ package_archs.reverse()
+
+ dep_recipes = []
+
+ with spdx_deps_file.open("r") as f:
+ deps = json.load(f)
+
+ for dep_pn, dep_hashfn in deps:
+ dep_recipe_path = oe.sbom.doc_find_by_hashfn(deploy_dir_spdx, package_archs, "recipe-" + dep_pn, dep_hashfn)
+ if not dep_recipe_path:
+ bb.fatal("Cannot find any SPDX file for recipe %s, %s" % (dep_pn, dep_hashfn))
+
+ spdx_dep_doc, spdx_dep_sha1 = oe.sbom.read_doc(dep_recipe_path)
+
+ for pkg in spdx_dep_doc.packages:
+ if pkg.name == dep_pn:
+ spdx_dep_recipe = pkg
+ break
+ else:
+ continue
+
+ dep_recipes.append(oe.sbom.DepRecipe(spdx_dep_doc, spdx_dep_sha1, spdx_dep_recipe))
+
+ dep_recipe_ref = oe.spdx.SPDXExternalDocumentRef()
+ dep_recipe_ref.externalDocumentId = "DocumentRef-dependency-" + spdx_dep_doc.name
+ dep_recipe_ref.spdxDocument = spdx_dep_doc.documentNamespace
+ dep_recipe_ref.checksum.algorithm = "SHA1"
+ dep_recipe_ref.checksum.checksumValue = spdx_dep_sha1
+
+ doc.externalDocumentRefs.append(dep_recipe_ref)
+
+ doc.add_relationship(
+ "%s:%s" % (dep_recipe_ref.externalDocumentId, spdx_dep_recipe.SPDXID),
+ "BUILD_DEPENDENCY_OF",
+ spdx_recipe
+ )
+
+ return dep_recipes
+
+collect_dep_recipes[vardepsexclude] = "SSTATE_ARCHS"
+
+def collect_dep_sources(d, dep_recipes):
+ import oe.sbom
+
+ sources = {}
+ for dep in dep_recipes:
+ # Don't collect sources from native recipes as they
+ # match non-native sources also.
+ if recipe_spdx_is_native(d, dep.recipe):
+ continue
+ recipe_files = set(dep.recipe.hasFiles)
+
+ for spdx_file in dep.doc.files:
+ if spdx_file.SPDXID not in recipe_files:
+ continue
+
+ if "SOURCE" in spdx_file.fileTypes:
+ for checksum in spdx_file.checksums:
+ if checksum.algorithm == "SHA256":
+ sources[checksum.checksumValue] = oe.sbom.DepSource(dep.doc, dep.doc_sha1, dep.recipe, spdx_file)
+ break
+
+ return sources
+
+def add_download_packages(d, doc, recipe):
+ import os.path
+ from bb.fetch2 import decodeurl, CHECKSUM_LIST
+ import bb.process
+ import oe.spdx
+ import oe.sbom
+
+ for download_idx, src_uri in enumerate(d.getVar('SRC_URI').split()):
+ f = bb.fetch2.FetchData(src_uri, d)
+
+ for name in f.names:
+ package = oe.spdx.SPDXPackage()
+ package.name = "%s-source-%d" % (d.getVar("PN"), download_idx + 1)
+ package.SPDXID = oe.sbom.get_download_spdxid(d, download_idx + 1)
+
+ if f.type == "file":
+ continue
+
+ uri = f.type
+ proto = getattr(f, "proto", None)
+ if proto is not None:
+ uri = uri + "+" + proto
+ uri = uri + "://" + f.host + f.path
+
+ if f.method.supports_srcrev():
+ uri = uri + "@" + f.revisions[name]
+
+ if f.method.supports_checksum(f):
+ for checksum_id in CHECKSUM_LIST:
+ if checksum_id.upper() not in oe.spdx.SPDXPackage.ALLOWED_CHECKSUMS:
+ continue
+
+ expected_checksum = getattr(f, "%s_expected" % checksum_id)
+ if expected_checksum is None:
+ continue
+
+ c = oe.spdx.SPDXChecksum()
+ c.algorithm = checksum_id.upper()
+ c.checksumValue = expected_checksum
+ package.checksums.append(c)
+
+ package.downloadLocation = uri
+ doc.packages.append(package)
+ doc.add_relationship(doc, "DESCRIBES", package)
+ # In the future, we might be able to do more fancy dependencies,
+ # but this should be sufficient for now
+ doc.add_relationship(package, "BUILD_DEPENDENCY_OF", recipe)
+
+def collect_direct_deps(d, dep_task):
+ current_task = "do_" + d.getVar("BB_CURRENTTASK")
+ pn = d.getVar("PN")
+
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+
+ for this_dep in taskdepdata.values():
+ if this_dep[0] == pn and this_dep[1] == current_task:
+ break
+ else:
+ bb.fatal(f"Unable to find this {pn}:{current_task} in taskdepdata")
+
+ deps = set()
+ for dep_name in this_dep[3]:
+ dep_data = taskdepdata[dep_name]
+ if dep_data[1] == dep_task and dep_data[0] != pn:
+ deps.add((dep_data[0], dep_data[7]))
+
+ return sorted(deps)
+
+collect_direct_deps[vardepsexclude] += "BB_TASKDEPDATA"
+collect_direct_deps[vardeps] += "DEPENDS"
+
+python do_collect_spdx_deps() {
+ # This task calculates the build time dependencies of the recipe, and is
+ # required because while a task can deptask on itself, those dependencies
+ # do not show up in BB_TASKDEPDATA. To work around that, this task does the
+ # deptask on do_create_spdx and writes out the dependencies it finds, then
+ # do_create_spdx reads in the found dependencies when writing the actual
+ # SPDX document
+ import json
+ from pathlib import Path
+
+ spdx_deps_file = Path(d.getVar("SPDXDEPS"))
+
+ deps = collect_direct_deps(d, "do_create_spdx")
+
+ with spdx_deps_file.open("w") as f:
+ json.dump(deps, f)
+}
+# NOTE: depending on do_unpack is a hack that is necessary to get it's dependencies for archive the source
+addtask do_collect_spdx_deps after do_unpack
+do_collect_spdx_deps[depends] += "${PATCHDEPENDENCY}"
+do_collect_spdx_deps[deptask] = "do_create_spdx"
+do_collect_spdx_deps[dirs] = "${SPDXDIR}"
+
+python do_create_spdx() {
+ from datetime import datetime, timezone
+ import oe.sbom
+ import oe.spdx
+ import uuid
+ from pathlib import Path
+ from contextlib import contextmanager
+ import oe.cve_check
+
+ @contextmanager
+ def optional_tarfile(name, guard, mode="w"):
+ import tarfile
+ import bb.compress.zstd
+
+ num_threads = int(d.getVar("BB_NUMBER_THREADS"))
+
+ if guard:
+ name.parent.mkdir(parents=True, exist_ok=True)
+ with bb.compress.zstd.open(name, mode=mode + "b", num_threads=num_threads) as f:
+ with tarfile.open(fileobj=f, mode=mode + "|") as tf:
+ yield tf
+ else:
+ yield None
+
+
+ deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
+ spdx_workdir = Path(d.getVar("SPDXWORK"))
+ include_sources = d.getVar("SPDX_INCLUDE_SOURCES") == "1"
+ archive_sources = d.getVar("SPDX_ARCHIVE_SOURCES") == "1"
+ archive_packaged = d.getVar("SPDX_ARCHIVE_PACKAGED") == "1"
+ pkg_arch = d.getVar("SSTATE_PKGARCH")
+
+ creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
+
+ doc = oe.spdx.SPDXDocument()
+
+ doc.name = "recipe-" + d.getVar("PN")
+ doc.documentNamespace = get_doc_namespace(d, doc)
+ doc.creationInfo.created = creation_time
+ doc.creationInfo.comment = "This document was created by analyzing recipe files during the build."
+ doc.creationInfo.licenseListVersion = d.getVar("SPDX_LICENSE_DATA")["licenseListVersion"]
+ doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass")
+ doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG"))
+ doc.creationInfo.creators.append("Person: N/A ()")
+
+ recipe = oe.spdx.SPDXPackage()
+ recipe.name = d.getVar("PN")
+ recipe.versionInfo = d.getVar("PV")
+ recipe.SPDXID = oe.sbom.get_recipe_spdxid(d)
+ recipe.supplier = d.getVar("SPDX_SUPPLIER")
+ if bb.data.inherits_class("native", d) or bb.data.inherits_class("cross", d):
+ recipe.annotations.append(create_annotation(d, "isNative"))
+
+ homepage = d.getVar("HOMEPAGE")
+ if homepage:
+ recipe.homepage = homepage
+
+ license = d.getVar("LICENSE")
+ if license:
+ recipe.licenseDeclared = convert_license_to_spdx(license, doc, d)
+
+ summary = d.getVar("SUMMARY")
+ if summary:
+ recipe.summary = summary
+
+ description = d.getVar("DESCRIPTION")
+ if description:
+ recipe.description = description
+
+ if d.getVar("SPDX_CUSTOM_ANNOTATION_VARS"):
+ for var in d.getVar('SPDX_CUSTOM_ANNOTATION_VARS').split():
+ recipe.annotations.append(create_annotation(d, var + "=" + d.getVar(var)))
+
+ # Some CVEs may be patched during the build process without incrementing the version number,
+ # so querying for CVEs based on the CPE id can lead to false positives. To account for this,
+ # save the CVEs fixed by patches to source information field in the SPDX.
+ patched_cves = oe.cve_check.get_patched_cves(d)
+ patched_cves = list(patched_cves)
+ patched_cves = ' '.join(patched_cves)
+ if patched_cves:
+ recipe.sourceInfo = "CVEs fixed: " + patched_cves
+
+ cpe_ids = oe.cve_check.get_cpe_ids(d.getVar("CVE_PRODUCT"), d.getVar("CVE_VERSION"))
+ if cpe_ids:
+ for cpe_id in cpe_ids:
+ cpe = oe.spdx.SPDXExternalReference()
+ cpe.referenceCategory = "SECURITY"
+ cpe.referenceType = "http://spdx.org/rdf/references/cpe23Type"
+ cpe.referenceLocator = cpe_id
+ recipe.externalRefs.append(cpe)
+
+ doc.packages.append(recipe)
+ doc.add_relationship(doc, "DESCRIBES", recipe)
+
+ add_download_packages(d, doc, recipe)
+
+ if process_sources(d) and include_sources:
+ recipe_archive = deploy_dir_spdx / "recipes" / (doc.name + ".tar.zst")
+ with optional_tarfile(recipe_archive, archive_sources) as archive:
+ spdx_get_src(d)
+
+ add_package_files(
+ d,
+ doc,
+ recipe,
+ spdx_workdir,
+ lambda file_counter: "SPDXRef-SourceFile-%s-%d" % (d.getVar("PN"), file_counter),
+ lambda filepath: ["SOURCE"],
+ ignore_dirs=[".git"],
+ ignore_top_level_dirs=["temp"],
+ archive=archive,
+ )
+
+ if archive is not None:
+ recipe.packageFileName = str(recipe_archive.name)
+
+ dep_recipes = collect_dep_recipes(d, doc, recipe)
+
+ doc_sha1 = oe.sbom.write_doc(d, doc, pkg_arch, "recipes", indent=get_json_indent(d))
+ dep_recipes.append(oe.sbom.DepRecipe(doc, doc_sha1, recipe))
+
+ recipe_ref = oe.spdx.SPDXExternalDocumentRef()
+ recipe_ref.externalDocumentId = "DocumentRef-recipe-" + recipe.name
+ recipe_ref.spdxDocument = doc.documentNamespace
+ recipe_ref.checksum.algorithm = "SHA1"
+ recipe_ref.checksum.checksumValue = doc_sha1
+
+ sources = collect_dep_sources(d, dep_recipes)
+ found_licenses = {license.name:recipe_ref.externalDocumentId + ":" + license.licenseId for license in doc.hasExtractedLicensingInfos}
+
+ if not recipe_spdx_is_native(d, recipe):
+ bb.build.exec_func("read_subpackage_metadata", d)
+
+ pkgdest = Path(d.getVar("PKGDEST"))
+ for package in d.getVar("PACKAGES").split():
+ if not oe.packagedata.packaged(package, d):
+ continue
+
+ package_doc = oe.spdx.SPDXDocument()
+ pkg_name = d.getVar("PKG:%s" % package) or package
+ package_doc.name = pkg_name
+ package_doc.documentNamespace = get_doc_namespace(d, package_doc)
+ package_doc.creationInfo.created = creation_time
+ package_doc.creationInfo.comment = "This document was created by analyzing packages created during the build."
+ package_doc.creationInfo.licenseListVersion = d.getVar("SPDX_LICENSE_DATA")["licenseListVersion"]
+ package_doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass")
+ package_doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG"))
+ package_doc.creationInfo.creators.append("Person: N/A ()")
+ package_doc.externalDocumentRefs.append(recipe_ref)
+
+ package_license = d.getVar("LICENSE:%s" % package) or d.getVar("LICENSE")
+
+ spdx_package = oe.spdx.SPDXPackage()
+
+ spdx_package.SPDXID = oe.sbom.get_package_spdxid(pkg_name)
+ spdx_package.name = pkg_name
+ spdx_package.versionInfo = d.getVar("PV")
+ spdx_package.licenseDeclared = convert_license_to_spdx(package_license, package_doc, d, found_licenses)
+ spdx_package.supplier = d.getVar("SPDX_SUPPLIER")
+
+ package_doc.packages.append(spdx_package)
+
+ package_doc.add_relationship(spdx_package, "GENERATED_FROM", "%s:%s" % (recipe_ref.externalDocumentId, recipe.SPDXID))
+ package_doc.add_relationship(package_doc, "DESCRIBES", spdx_package)
+
+ package_archive = deploy_dir_spdx / "packages" / (package_doc.name + ".tar.zst")
+ with optional_tarfile(package_archive, archive_packaged) as archive:
+ package_files = add_package_files(
+ d,
+ package_doc,
+ spdx_package,
+ pkgdest / package,
+ lambda file_counter: oe.sbom.get_packaged_file_spdxid(pkg_name, file_counter),
+ lambda filepath: ["BINARY"],
+ ignore_top_level_dirs=['CONTROL', 'DEBIAN'],
+ archive=archive,
+ )
+
+ if archive is not None:
+ spdx_package.packageFileName = str(package_archive.name)
+
+ add_package_sources_from_debug(d, package_doc, spdx_package, package, package_files, sources)
+
+ oe.sbom.write_doc(d, package_doc, pkg_arch, "packages", indent=get_json_indent(d))
+}
+do_create_spdx[vardepsexclude] += "BB_NUMBER_THREADS"
+# NOTE: depending on do_unpack is a hack that is necessary to get it's dependencies for archive the source
+addtask do_create_spdx after do_package do_packagedata do_unpack do_collect_spdx_deps before do_populate_sdk do_build do_rm_work
+
+SSTATETASKS += "do_create_spdx"
+do_create_spdx[sstate-inputdirs] = "${SPDXDEPLOY}"
+do_create_spdx[sstate-outputdirs] = "${DEPLOY_DIR_SPDX}"
+
+python do_create_spdx_setscene () {
+ sstate_setscene(d)
+}
+addtask do_create_spdx_setscene
+
+do_create_spdx[dirs] = "${SPDXWORK}"
+do_create_spdx[cleandirs] = "${SPDXDEPLOY} ${SPDXWORK}"
+do_create_spdx[depends] += "${PATCHDEPENDENCY}"
+
+def collect_package_providers(d):
+ from pathlib import Path
+ import oe.sbom
+ import oe.spdx
+ import json
+
+ deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
+
+ providers = {}
+
+ deps = collect_direct_deps(d, "do_create_spdx")
+ deps.append((d.getVar("PN"), d.getVar("BB_HASHFILENAME")))
+
+ for dep_pn, dep_hashfn in deps:
+ localdata = d
+ recipe_data = oe.packagedata.read_pkgdata(dep_pn, localdata)
+ if not recipe_data:
+ localdata = bb.data.createCopy(d)
+ localdata.setVar("PKGDATA_DIR", "${PKGDATA_DIR_SDK}")
+ recipe_data = oe.packagedata.read_pkgdata(dep_pn, localdata)
+
+ for pkg in recipe_data.get("PACKAGES", "").split():
+
+ pkg_data = oe.packagedata.read_subpkgdata_dict(pkg, localdata)
+ rprovides = set(n for n, _ in bb.utils.explode_dep_versions2(pkg_data.get("RPROVIDES", "")).items())
+ rprovides.add(pkg)
+
+ if "PKG" in pkg_data:
+ pkg = pkg_data["PKG"]
+ rprovides.add(pkg)
+
+ for r in rprovides:
+ providers[r] = (pkg, dep_hashfn)
+
+ return providers
+
+collect_package_providers[vardepsexclude] += "BB_TASKDEPDATA"
+
+python do_create_runtime_spdx() {
+ from datetime import datetime, timezone
+ import oe.sbom
+ import oe.spdx
+ import oe.packagedata
+ from pathlib import Path
+
+ deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
+ spdx_deploy = Path(d.getVar("SPDXRUNTIMEDEPLOY"))
+ is_native = bb.data.inherits_class("native", d) or bb.data.inherits_class("cross", d)
+
+ creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
+
+ providers = collect_package_providers(d)
+ pkg_arch = d.getVar("SSTATE_PKGARCH")
+ package_archs = d.getVar("SSTATE_ARCHS").split()
+ package_archs.reverse()
+
+ if not is_native:
+ bb.build.exec_func("read_subpackage_metadata", d)
+
+ dep_package_cache = {}
+
+ pkgdest = Path(d.getVar("PKGDEST"))
+ for package in d.getVar("PACKAGES").split():
+ localdata = bb.data.createCopy(d)
+ pkg_name = d.getVar("PKG:%s" % package) or package
+ localdata.setVar("PKG", pkg_name)
+ localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + package)
+
+ if not oe.packagedata.packaged(package, localdata):
+ continue
+
+ pkg_spdx_path = oe.sbom.doc_path(deploy_dir_spdx, pkg_name, pkg_arch, "packages")
+
+ package_doc, package_doc_sha1 = oe.sbom.read_doc(pkg_spdx_path)
+
+ for p in package_doc.packages:
+ if p.name == pkg_name:
+ spdx_package = p
+ break
+ else:
+ bb.fatal("Package '%s' not found in %s" % (pkg_name, pkg_spdx_path))
+
+ runtime_doc = oe.spdx.SPDXDocument()
+ runtime_doc.name = "runtime-" + pkg_name
+ runtime_doc.documentNamespace = get_doc_namespace(localdata, runtime_doc)
+ runtime_doc.creationInfo.created = creation_time
+ runtime_doc.creationInfo.comment = "This document was created by analyzing package runtime dependencies."
+ runtime_doc.creationInfo.licenseListVersion = d.getVar("SPDX_LICENSE_DATA")["licenseListVersion"]
+ runtime_doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass")
+ runtime_doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG"))
+ runtime_doc.creationInfo.creators.append("Person: N/A ()")
+
+ package_ref = oe.spdx.SPDXExternalDocumentRef()
+ package_ref.externalDocumentId = "DocumentRef-package-" + package
+ package_ref.spdxDocument = package_doc.documentNamespace
+ package_ref.checksum.algorithm = "SHA1"
+ package_ref.checksum.checksumValue = package_doc_sha1
+
+ runtime_doc.externalDocumentRefs.append(package_ref)
+
+ runtime_doc.add_relationship(
+ runtime_doc.SPDXID,
+ "AMENDS",
+ "%s:%s" % (package_ref.externalDocumentId, package_doc.SPDXID)
+ )
+
+ deps = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS") or "")
+ seen_deps = set()
+ for dep, _ in deps.items():
+ if dep in seen_deps:
+ continue
+
+ if dep not in providers:
+ continue
+
+ (dep, dep_hashfn) = providers[dep]
+
+ if not oe.packagedata.packaged(dep, localdata):
+ continue
+
+ dep_pkg_data = oe.packagedata.read_subpkgdata_dict(dep, d)
+ dep_pkg = dep_pkg_data["PKG"]
+
+ if dep in dep_package_cache:
+ (dep_spdx_package, dep_package_ref) = dep_package_cache[dep]
+ else:
+ dep_path = oe.sbom.doc_find_by_hashfn(deploy_dir_spdx, package_archs, dep_pkg, dep_hashfn)
+ if not dep_path:
+ bb.fatal("No SPDX file found for package %s, %s" % (dep_pkg, dep_hashfn))
+
+ spdx_dep_doc, spdx_dep_sha1 = oe.sbom.read_doc(dep_path)
+
+ for pkg in spdx_dep_doc.packages:
+ if pkg.name == dep_pkg:
+ dep_spdx_package = pkg
+ break
+ else:
+ bb.fatal("Package '%s' not found in %s" % (dep_pkg, dep_path))
+
+ dep_package_ref = oe.spdx.SPDXExternalDocumentRef()
+ dep_package_ref.externalDocumentId = "DocumentRef-runtime-dependency-" + spdx_dep_doc.name
+ dep_package_ref.spdxDocument = spdx_dep_doc.documentNamespace
+ dep_package_ref.checksum.algorithm = "SHA1"
+ dep_package_ref.checksum.checksumValue = spdx_dep_sha1
+
+ dep_package_cache[dep] = (dep_spdx_package, dep_package_ref)
+
+ runtime_doc.externalDocumentRefs.append(dep_package_ref)
+
+ runtime_doc.add_relationship(
+ "%s:%s" % (dep_package_ref.externalDocumentId, dep_spdx_package.SPDXID),
+ "RUNTIME_DEPENDENCY_OF",
+ "%s:%s" % (package_ref.externalDocumentId, spdx_package.SPDXID)
+ )
+ seen_deps.add(dep)
+
+ oe.sbom.write_doc(d, runtime_doc, pkg_arch, "runtime", spdx_deploy, indent=get_json_indent(d))
+}
+
+do_create_runtime_spdx[vardepsexclude] += "OVERRIDES SSTATE_ARCHS"
+
+addtask do_create_runtime_spdx after do_create_spdx before do_build do_rm_work
+SSTATETASKS += "do_create_runtime_spdx"
+do_create_runtime_spdx[sstate-inputdirs] = "${SPDXRUNTIMEDEPLOY}"
+do_create_runtime_spdx[sstate-outputdirs] = "${DEPLOY_DIR_SPDX}"
+
+python do_create_runtime_spdx_setscene () {
+ sstate_setscene(d)
+}
+addtask do_create_runtime_spdx_setscene
+
+do_create_runtime_spdx[dirs] = "${SPDXRUNTIMEDEPLOY}"
+do_create_runtime_spdx[cleandirs] = "${SPDXRUNTIMEDEPLOY}"
+do_create_runtime_spdx[rdeptask] = "do_create_spdx"
+
+def spdx_get_src(d):
+ """
+ save patched source of the recipe in SPDX_WORKDIR.
+ """
+ import shutil
+ spdx_workdir = d.getVar('SPDXWORK')
+ spdx_sysroot_native = d.getVar('STAGING_DIR_NATIVE')
+ pn = d.getVar('PN')
+
+ workdir = d.getVar("WORKDIR")
+
+ try:
+ # The kernel class functions require it to be on work-shared, so we dont change WORKDIR
+ if not is_work_shared_spdx(d):
+ # Change the WORKDIR to make do_unpack do_patch run in another dir.
+ d.setVar('WORKDIR', spdx_workdir)
+ # Restore the original path to recipe's native sysroot (it's relative to WORKDIR).
+ d.setVar('STAGING_DIR_NATIVE', spdx_sysroot_native)
+
+ # The changed 'WORKDIR' also caused 'B' changed, create dir 'B' for the
+ # possibly requiring of the following tasks (such as some recipes's
+ # do_patch required 'B' existed).
+ bb.utils.mkdirhier(d.getVar('B'))
+
+ bb.build.exec_func('do_unpack', d)
+ # Copy source of kernel to spdx_workdir
+ if is_work_shared_spdx(d):
+ share_src = d.getVar('WORKDIR')
+ d.setVar('WORKDIR', spdx_workdir)
+ d.setVar('STAGING_DIR_NATIVE', spdx_sysroot_native)
+ src_dir = spdx_workdir + "/" + d.getVar('PN')+ "-" + d.getVar('PV') + "-" + d.getVar('PR')
+ bb.utils.mkdirhier(src_dir)
+ if bb.data.inherits_class('kernel',d):
+ share_src = d.getVar('STAGING_KERNEL_DIR')
+ cmd_copy_share = "cp -rf " + share_src + "/* " + src_dir + "/"
+ cmd_copy_shared_res = os.popen(cmd_copy_share).read()
+ bb.note("cmd_copy_shared_result = " + cmd_copy_shared_res)
+
+ git_path = src_dir + "/.git"
+ if os.path.exists(git_path):
+ shutils.rmtree(git_path)
+
+ # Make sure gcc and kernel sources are patched only once
+ if not (d.getVar('SRC_URI') == "" or is_work_shared_spdx(d)):
+ bb.build.exec_func('do_patch', d)
+
+ # Some userland has no source.
+ if not os.path.exists( spdx_workdir ):
+ bb.utils.mkdirhier(spdx_workdir)
+ finally:
+ d.setVar("WORKDIR", workdir)
+
+spdx_get_src[vardepsexclude] += "STAGING_KERNEL_DIR"
+
+do_rootfs[recrdeptask] += "do_create_spdx do_create_runtime_spdx"
+do_rootfs[cleandirs] += "${SPDXIMAGEWORK}"
+
+ROOTFS_POSTUNINSTALL_COMMAND =+ "image_combine_spdx"
+
+do_populate_sdk[recrdeptask] += "do_create_spdx do_create_runtime_spdx"
+do_populate_sdk[cleandirs] += "${SPDXSDKWORK}"
+POPULATE_SDK_POST_HOST_COMMAND:append:task-populate-sdk = " sdk_host_combine_spdx"
+POPULATE_SDK_POST_TARGET_COMMAND:append:task-populate-sdk = " sdk_target_combine_spdx"
+
+python image_combine_spdx() {
+ import os
+ import oe.sbom
+ from pathlib import Path
+ from oe.rootfs import image_list_installed_packages
+
+ image_name = d.getVar("IMAGE_NAME")
+ image_link_name = d.getVar("IMAGE_LINK_NAME")
+ imgdeploydir = Path(d.getVar("IMGDEPLOYDIR"))
+ img_spdxid = oe.sbom.get_image_spdxid(image_name)
+ packages = image_list_installed_packages(d)
+
+ combine_spdx(d, image_name, imgdeploydir, img_spdxid, packages, Path(d.getVar("SPDXIMAGEWORK")))
+
+ def make_image_link(target_path, suffix):
+ if image_link_name:
+ link = imgdeploydir / (image_link_name + suffix)
+ if link != target_path:
+ link.symlink_to(os.path.relpath(target_path, link.parent))
+
+ spdx_tar_path = imgdeploydir / (image_name + ".spdx.tar.zst")
+ make_image_link(spdx_tar_path, ".spdx.tar.zst")
+}
+
+python sdk_host_combine_spdx() {
+ sdk_combine_spdx(d, "host")
+}
+
+python sdk_target_combine_spdx() {
+ sdk_combine_spdx(d, "target")
+}
+
+def sdk_combine_spdx(d, sdk_type):
+ import oe.sbom
+ from pathlib import Path
+ from oe.sdk import sdk_list_installed_packages
+
+ sdk_name = d.getVar("TOOLCHAIN_OUTPUTNAME") + "-" + sdk_type
+ sdk_deploydir = Path(d.getVar("SDKDEPLOYDIR"))
+ sdk_spdxid = oe.sbom.get_sdk_spdxid(sdk_name)
+ sdk_packages = sdk_list_installed_packages(d, sdk_type == "target")
+ combine_spdx(d, sdk_name, sdk_deploydir, sdk_spdxid, sdk_packages, Path(d.getVar('SPDXSDKWORK')))
+
+def combine_spdx(d, rootfs_name, rootfs_deploydir, rootfs_spdxid, packages, spdx_workdir):
+ import os
+ import oe.spdx
+ import oe.sbom
+ import io
+ import json
+ from datetime import timezone, datetime
+ from pathlib import Path
+ import tarfile
+ import bb.compress.zstd
+
+ providers = collect_package_providers(d)
+ package_archs = d.getVar("SSTATE_ARCHS").split()
+ package_archs.reverse()
+
+ creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
+ deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
+ source_date_epoch = d.getVar("SOURCE_DATE_EPOCH")
+
+ doc = oe.spdx.SPDXDocument()
+ doc.name = rootfs_name
+ doc.documentNamespace = get_doc_namespace(d, doc)
+ doc.creationInfo.created = creation_time
+ doc.creationInfo.comment = "This document was created by analyzing the source of the Yocto recipe during the build."
+ doc.creationInfo.licenseListVersion = d.getVar("SPDX_LICENSE_DATA")["licenseListVersion"]
+ doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass")
+ doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG"))
+ doc.creationInfo.creators.append("Person: N/A ()")
+
+ image = oe.spdx.SPDXPackage()
+ image.name = d.getVar("PN")
+ image.versionInfo = d.getVar("PV")
+ image.SPDXID = rootfs_spdxid
+ image.supplier = d.getVar("SPDX_SUPPLIER")
+
+ doc.packages.append(image)
+
+ for name in sorted(packages.keys()):
+ if name not in providers:
+ bb.fatal("Unable to find SPDX provider for '%s'" % name)
+
+ pkg_name, pkg_hashfn = providers[name]
+
+ pkg_spdx_path = oe.sbom.doc_find_by_hashfn(deploy_dir_spdx, package_archs, pkg_name, pkg_hashfn)
+ if not pkg_spdx_path:
+ bb.fatal("No SPDX file found for package %s, %s" % (pkg_name, pkg_hashfn))
+
+ pkg_doc, pkg_doc_sha1 = oe.sbom.read_doc(pkg_spdx_path)
+
+ for p in pkg_doc.packages:
+ if p.name == name:
+ pkg_ref = oe.spdx.SPDXExternalDocumentRef()
+ pkg_ref.externalDocumentId = "DocumentRef-%s" % pkg_doc.name
+ pkg_ref.spdxDocument = pkg_doc.documentNamespace
+ pkg_ref.checksum.algorithm = "SHA1"
+ pkg_ref.checksum.checksumValue = pkg_doc_sha1
+
+ doc.externalDocumentRefs.append(pkg_ref)
+ doc.add_relationship(image, "CONTAINS", "%s:%s" % (pkg_ref.externalDocumentId, p.SPDXID))
+ break
+ else:
+ bb.fatal("Unable to find package with name '%s' in SPDX file %s" % (name, pkg_spdx_path))
+
+ runtime_spdx_path = oe.sbom.doc_find_by_hashfn(deploy_dir_spdx, package_archs, "runtime-" + name, pkg_hashfn)
+ if not runtime_spdx_path:
+ bb.fatal("No runtime SPDX document found for %s, %s" % (name, pkg_hashfn))
+
+ runtime_doc, runtime_doc_sha1 = oe.sbom.read_doc(runtime_spdx_path)
+
+ runtime_ref = oe.spdx.SPDXExternalDocumentRef()
+ runtime_ref.externalDocumentId = "DocumentRef-%s" % runtime_doc.name
+ runtime_ref.spdxDocument = runtime_doc.documentNamespace
+ runtime_ref.checksum.algorithm = "SHA1"
+ runtime_ref.checksum.checksumValue = runtime_doc_sha1
+
+ # "OTHER" isn't ideal here, but I can't find a relationship that makes sense
+ doc.externalDocumentRefs.append(runtime_ref)
+ doc.add_relationship(
+ image,
+ "OTHER",
+ "%s:%s" % (runtime_ref.externalDocumentId, runtime_doc.SPDXID),
+ comment="Runtime dependencies for %s" % name
+ )
+ bb.utils.mkdirhier(spdx_workdir)
+ image_spdx_path = spdx_workdir / (rootfs_name + ".spdx.json")
+
+ with image_spdx_path.open("wb") as f:
+ doc.to_json(f, sort_keys=True, indent=get_json_indent(d))
+
+ num_threads = int(d.getVar("BB_NUMBER_THREADS"))
+
+ visited_docs = set()
+
+ index = {"documents": []}
+
+ spdx_tar_path = rootfs_deploydir / (rootfs_name + ".spdx.tar.zst")
+ with bb.compress.zstd.open(spdx_tar_path, "w", num_threads=num_threads) as f:
+ with tarfile.open(fileobj=f, mode="w|") as tar:
+ def collect_spdx_document(path):
+ nonlocal tar
+ nonlocal deploy_dir_spdx
+ nonlocal source_date_epoch
+ nonlocal index
+
+ if path in visited_docs:
+ return
+
+ visited_docs.add(path)
+
+ with path.open("rb") as f:
+ doc, sha1 = oe.sbom.read_doc(f)
+ f.seek(0)
+
+ if doc.documentNamespace in visited_docs:
+ return
+
+ bb.note("Adding SPDX document %s" % path)
+ visited_docs.add(doc.documentNamespace)
+ info = tar.gettarinfo(fileobj=f)
+
+ info.name = doc.name + ".spdx.json"
+ info.uid = 0
+ info.gid = 0
+ info.uname = "root"
+ info.gname = "root"
+
+ if source_date_epoch is not None and info.mtime > int(source_date_epoch):
+ info.mtime = int(source_date_epoch)
+
+ tar.addfile(info, f)
+
+ index["documents"].append({
+ "filename": info.name,
+ "documentNamespace": doc.documentNamespace,
+ "sha1": sha1,
+ })
+
+ for ref in doc.externalDocumentRefs:
+ ref_path = oe.sbom.doc_find_by_namespace(deploy_dir_spdx, package_archs, ref.spdxDocument)
+ if not ref_path:
+ bb.fatal("Cannot find any SPDX file for document %s" % ref.spdxDocument)
+ collect_spdx_document(ref_path)
+
+ collect_spdx_document(image_spdx_path)
+
+ index["documents"].sort(key=lambda x: x["filename"])
+
+ index_str = io.BytesIO(json.dumps(
+ index,
+ sort_keys=True,
+ indent=get_json_indent(d),
+ ).encode("utf-8"))
+
+ info = tarfile.TarInfo()
+ info.name = "index.json"
+ info.size = len(index_str.getvalue())
+ info.uid = 0
+ info.gid = 0
+ info.uname = "root"
+ info.gname = "root"
+
+ tar.addfile(info, fileobj=index_str)
+
+combine_spdx[vardepsexclude] += "BB_NUMBER_THREADS SSTATE_ARCHS"
diff --git a/meta/classes/create-spdx.bbclass b/meta/classes/create-spdx.bbclass
new file mode 100644
index 0000000000..19c6c0ff0b
--- /dev/null
+++ b/meta/classes/create-spdx.bbclass
@@ -0,0 +1,8 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Include this class when you don't care what version of SPDX you get; it will
+# be updated to the latest stable version that is supported
+inherit create-spdx-2.2
diff --git a/meta/classes/cross-canadian.bbclass b/meta/classes/cross-canadian.bbclass
deleted file mode 100644
index f5c9f61595..0000000000
--- a/meta/classes/cross-canadian.bbclass
+++ /dev/null
@@ -1,194 +0,0 @@
-#
-# NOTE - When using this class the user is responsible for ensuring that
-# TRANSLATED_TARGET_ARCH is added into PN. This ensures that if the TARGET_ARCH
-# is changed, another nativesdk xxx-canadian-cross can be installed
-#
-
-
-# SDK packages are built either explicitly by the user,
-# or indirectly via dependency. No need to be in 'world'.
-EXCLUDE_FROM_WORLD = "1"
-NATIVESDKLIBC ?= "libc-glibc"
-LIBCOVERRIDE = ":${NATIVESDKLIBC}"
-CLASSOVERRIDE = "class-cross-canadian"
-STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${SDK_ARCH}${SDK_VENDOR}-${SDK_OS}:${STAGING_DIR_NATIVE}${bindir_native}/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
-
-#
-# Update BASE_PACKAGE_ARCH and PACKAGE_ARCHS
-#
-PACKAGE_ARCH = "${SDK_ARCH}-${SDKPKGSUFFIX}"
-BASECANADIANEXTRAOS ?= "linux-musl"
-CANADIANEXTRAOS = "${BASECANADIANEXTRAOS}"
-CANADIANEXTRAVENDOR = ""
-MODIFYTOS ??= "1"
-python () {
- archs = d.getVar('PACKAGE_ARCHS').split()
- sdkarchs = []
- for arch in archs:
- sdkarchs.append(arch + '-${SDKPKGSUFFIX}')
- d.setVar('PACKAGE_ARCHS', " ".join(sdkarchs))
-
- # Allow the following code segment to be disabled, e.g. meta-environment
- if d.getVar("MODIFYTOS") != "1":
- return
-
- if d.getVar("TCLIBC") in [ 'baremetal', 'newlib' ]:
- return
-
- tos = d.getVar("TARGET_OS")
- whitelist = []
- extralibcs = [""]
- if "musl" in d.getVar("BASECANADIANEXTRAOS"):
- extralibcs.append("musl")
- for variant in ["", "spe", "x32", "eabi", "n32", "_ilp32"]:
- for libc in extralibcs:
- entry = "linux"
- if variant and libc:
- entry = entry + "-" + libc + variant
- elif variant:
- entry = entry + "-gnu" + variant
- elif libc:
- entry = entry + "-" + libc
- whitelist.append(entry)
- if tos not in whitelist:
- bb.fatal("Building cross-candian for an unknown TARGET_SYS (%s), please update cross-canadian.bbclass" % d.getVar("TARGET_SYS"))
-
- for n in ["PROVIDES", "DEPENDS"]:
- d.setVar(n, d.getVar(n))
- d.setVar("STAGING_BINDIR_TOOLCHAIN", d.getVar("STAGING_BINDIR_TOOLCHAIN"))
- for prefix in ["AR", "AS", "DLLTOOL", "CC", "CXX", "GCC", "LD", "LIPO", "NM", "OBJDUMP", "RANLIB", "STRIP", "WINDRES"]:
- n = prefix + "_FOR_TARGET"
- d.setVar(n, d.getVar(n))
- # This is a bit ugly. We need to zero LIBC/ABI extension which will change TARGET_OS
- # however we need the old value in some variables. We expand those here first.
- tarch = d.getVar("TARGET_ARCH")
- if tarch == "x86_64":
- d.setVar("LIBCEXTENSION", "")
- d.setVar("ABIEXTENSION", "")
- d.appendVar("CANADIANEXTRAOS", " linux-gnux32")
- for extraos in d.getVar("BASECANADIANEXTRAOS").split():
- d.appendVar("CANADIANEXTRAOS", " " + extraos + "x32")
- elif tarch == "powerpc":
- # PowerPC can build "linux" and "linux-gnuspe"
- d.setVar("LIBCEXTENSION", "")
- d.setVar("ABIEXTENSION", "")
- d.appendVar("CANADIANEXTRAOS", " linux-gnuspe")
- for extraos in d.getVar("BASECANADIANEXTRAOS").split():
- d.appendVar("CANADIANEXTRAOS", " " + extraos + "spe")
- elif tarch == "mips64":
- d.appendVar("CANADIANEXTRAOS", " linux-gnun32")
- for extraos in d.getVar("BASECANADIANEXTRAOS").split():
- d.appendVar("CANADIANEXTRAOS", " " + extraos + "n32")
- if tarch == "arm" or tarch == "armeb":
- d.appendVar("CANADIANEXTRAOS", " linux-gnueabi linux-musleabi")
- d.setVar("TARGET_OS", "linux-gnueabi")
- else:
- d.setVar("TARGET_OS", "linux")
-
- # Also need to handle multilib target vendors
- vendors = d.getVar("CANADIANEXTRAVENDOR")
- if not vendors:
- vendors = all_multilib_tune_values(d, 'TARGET_VENDOR')
- origvendor = d.getVar("TARGET_VENDOR_MULTILIB_ORIGINAL")
- if origvendor:
- d.setVar("TARGET_VENDOR", origvendor)
- if origvendor not in vendors.split():
- vendors = origvendor + " " + vendors
- d.setVar("CANADIANEXTRAVENDOR", vendors)
-}
-MULTIMACH_TARGET_SYS = "${PACKAGE_ARCH}${HOST_VENDOR}-${HOST_OS}"
-
-INHIBIT_DEFAULT_DEPS = "1"
-
-STAGING_DIR_HOST = "${RECIPE_SYSROOT}"
-
-TOOLCHAIN_OPTIONS = " --sysroot=${RECIPE_SYSROOT}"
-
-PATH_append = ":${TMPDIR}/sysroots/${HOST_ARCH}/${bindir_cross}"
-PKGHIST_DIR = "${TMPDIR}/pkghistory/${HOST_ARCH}-${SDKPKGSUFFIX}${HOST_VENDOR}-${HOST_OS}/"
-
-HOST_ARCH = "${SDK_ARCH}"
-HOST_VENDOR = "${SDK_VENDOR}"
-HOST_OS = "${SDK_OS}"
-HOST_PREFIX = "${SDK_PREFIX}"
-HOST_CC_ARCH = "${SDK_CC_ARCH}"
-HOST_LD_ARCH = "${SDK_LD_ARCH}"
-HOST_AS_ARCH = "${SDK_AS_ARCH}"
-
-#assign DPKG_ARCH
-DPKG_ARCH = "${@debian_arch_map(d.getVar('SDK_ARCH'), '')}"
-
-CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
-CFLAGS = "${BUILDSDK_CFLAGS}"
-CXXFLAGS = "${BUILDSDK_CFLAGS}"
-LDFLAGS = "${BUILDSDK_LDFLAGS} \
- -Wl,-rpath-link,${STAGING_LIBDIR}/.. \
- -Wl,-rpath,${libdir}/.. "
-
-#
-# We need chrpath >= 0.14 to ensure we can deal with 32 and 64 bit
-# binaries
-#
-DEPENDS_append = " chrpath-replacement-native"
-EXTRANATIVEPATH += "chrpath-native"
-
-# Path mangling needed by the cross packaging
-# Note that we use := here to ensure that libdir and includedir are
-# target paths.
-target_base_prefix := "${base_prefix}"
-target_prefix := "${prefix}"
-target_exec_prefix := "${exec_prefix}"
-target_base_libdir = "${target_base_prefix}/${baselib}"
-target_libdir = "${target_exec_prefix}/${baselib}"
-target_includedir := "${includedir}"
-
-# Change to place files in SDKPATH
-base_prefix = "${SDKPATHNATIVE}"
-prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
-exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
-bindir = "${exec_prefix}/bin/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
-sbindir = "${bindir}"
-base_bindir = "${bindir}"
-base_sbindir = "${bindir}"
-libdir = "${exec_prefix}/lib/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
-libexecdir = "${exec_prefix}/libexec/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
-
-FILES_${PN} = "${prefix}"
-
-export PKG_CONFIG_DIR = "${STAGING_DIR_HOST}${layout_libdir}/pkgconfig"
-export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR_HOST}"
-
-do_populate_sysroot[stamp-extra-info] = ""
-do_packagedata[stamp-extra-info] = ""
-
-USE_NLS = "${SDKUSE_NLS}"
-
-# We have to us TARGET_ARCH but we care about the absolute value
-# and not any particular tune that is enabled.
-TARGET_ARCH[vardepsexclude] = "TUNE_ARCH"
-
-PKGDATA_DIR = "${TMPDIR}/pkgdata/${SDK_SYS}"
-# If MLPREFIX is set by multilib code, shlibs
-# points to the wrong place so force it
-SHLIBSDIRS = "${PKGDATA_DIR}/nativesdk-shlibs2"
-SHLIBSWORKDIR = "${PKGDATA_DIR}/nativesdk-shlibs2"
-
-cross_canadian_bindirlinks () {
- for i in linux ${CANADIANEXTRAOS}
- do
- for v in ${CANADIANEXTRAVENDOR}
- do
- d=${D}${bindir}/../${TARGET_ARCH}$v-$i
- if [ -d $d ];
- then
- continue
- fi
- install -d $d
- for j in `ls ${D}${bindir}`
- do
- p=${TARGET_ARCH}$v-$i-`echo $j | sed -e s,${TARGET_PREFIX},,`
- ln -s ../${TARGET_SYS}/$j $d/$p
- done
- done
- done
-}
diff --git a/meta/classes/cross.bbclass b/meta/classes/cross.bbclass
deleted file mode 100644
index bfec91d043..0000000000
--- a/meta/classes/cross.bbclass
+++ /dev/null
@@ -1,99 +0,0 @@
-inherit relocatable
-
-# Cross packages are built indirectly via dependency,
-# no need for them to be a direct target of 'world'
-EXCLUDE_FROM_WORLD = "1"
-
-CLASSOVERRIDE = "class-cross"
-PACKAGES = ""
-PACKAGES_DYNAMIC = ""
-PACKAGES_DYNAMIC_class-native = ""
-
-HOST_ARCH = "${BUILD_ARCH}"
-HOST_VENDOR = "${BUILD_VENDOR}"
-HOST_OS = "${BUILD_OS}"
-HOST_PREFIX = "${BUILD_PREFIX}"
-HOST_CC_ARCH = "${BUILD_CC_ARCH}"
-HOST_LD_ARCH = "${BUILD_LD_ARCH}"
-HOST_AS_ARCH = "${BUILD_AS_ARCH}"
-
-# No strip sysroot when DEBUG_BUILD is enabled
-INHIBIT_SYSROOT_STRIP ?= "${@oe.utils.vartrue('DEBUG_BUILD', '1', '', d)}"
-
-export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir} /lib /lib64 /usr/lib /usr/lib64"
-
-STAGING_DIR_HOST = "${RECIPE_SYSROOT_NATIVE}"
-
-PACKAGE_ARCH = "${BUILD_ARCH}"
-
-MULTIMACH_TARGET_SYS = "${BUILD_ARCH}${BUILD_VENDOR}-${BUILD_OS}"
-
-export PKG_CONFIG_DIR = "${exec_prefix}/lib/pkgconfig"
-export PKG_CONFIG_SYSROOT_DIR = ""
-
-TARGET_CPPFLAGS = ""
-TARGET_CFLAGS = ""
-TARGET_CXXFLAGS = ""
-TARGET_LDFLAGS = ""
-
-CPPFLAGS = "${BUILD_CPPFLAGS}"
-CFLAGS = "${BUILD_CFLAGS}"
-CXXFLAGS = "${BUILD_CFLAGS}"
-LDFLAGS = "${BUILD_LDFLAGS}"
-
-TOOLCHAIN_OPTIONS = ""
-
-# This class encodes staging paths into its scripts data so can only be
-# reused if we manipulate the paths.
-SSTATE_SCAN_CMD ?= "${SSTATE_SCAN_CMD_NATIVE}"
-
-# Path mangling needed by the cross packaging
-# Note that we use := here to ensure that libdir and includedir are
-# target paths.
-target_base_prefix := "${root_prefix}"
-target_prefix := "${prefix}"
-target_exec_prefix := "${exec_prefix}"
-target_base_libdir = "${target_base_prefix}/${baselib}"
-target_libdir = "${target_exec_prefix}/${baselib}"
-target_includedir := "${includedir}"
-
-# Overrides for paths
-CROSS_TARGET_SYS_DIR = "${TARGET_SYS}"
-prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
-base_prefix = "${STAGING_DIR_NATIVE}"
-exec_prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
-bindir = "${exec_prefix}/bin/${CROSS_TARGET_SYS_DIR}"
-sbindir = "${bindir}"
-base_bindir = "${bindir}"
-base_sbindir = "${bindir}"
-libdir = "${exec_prefix}/lib/${CROSS_TARGET_SYS_DIR}"
-libexecdir = "${exec_prefix}/libexec/${CROSS_TARGET_SYS_DIR}"
-
-do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE}/"
-do_packagedata[stamp-extra-info] = ""
-
-do_install () {
- oe_runmake 'DESTDIR=${D}' install
-}
-
-USE_NLS = "no"
-
-export CC = "${BUILD_CC}"
-export CXX = "${BUILD_CXX}"
-export FC = "${BUILD_FC}"
-export CPP = "${BUILD_CPP}"
-export LD = "${BUILD_LD}"
-export CCLD = "${BUILD_CCLD}"
-export AR = "${BUILD_AR}"
-export AS = "${BUILD_AS}"
-export RANLIB = "${BUILD_RANLIB}"
-export STRIP = "${BUILD_STRIP}"
-export NM = "${BUILD_NM}"
-
-inherit nopackages
-
-python do_addto_recipe_sysroot () {
- bb.build.exec_func("extend_recipe_sysroot", d)
-}
-addtask addto_recipe_sysroot after do_populate_sysroot
-do_addto_recipe_sysroot[deptask] = "do_populate_sysroot"
diff --git a/meta/classes/crosssdk.bbclass b/meta/classes/crosssdk.bbclass
deleted file mode 100644
index 04aecb694e..0000000000
--- a/meta/classes/crosssdk.bbclass
+++ /dev/null
@@ -1,51 +0,0 @@
-inherit cross
-
-CLASSOVERRIDE = "class-crosssdk"
-NATIVESDKLIBC ?= "libc-glibc"
-LIBCOVERRIDE = ":${NATIVESDKLIBC}"
-MACHINEOVERRIDES = ""
-PACKAGE_ARCH = "${SDK_ARCH}"
-
-python () {
- # set TUNE_PKGARCH to SDK_ARCH
- d.setVar('TUNE_PKGARCH', d.getVar('SDK_ARCH'))
- # Set features here to prevent appends and distro features backfill
- # from modifying nativesdk distro features
- features = set(d.getVar("DISTRO_FEATURES_NATIVESDK").split())
- filtered = set(bb.utils.filter("DISTRO_FEATURES", d.getVar("DISTRO_FEATURES_FILTER_NATIVESDK"), d).split())
- d.setVar("DISTRO_FEATURES", " ".join(sorted(features | filtered)))
-}
-
-STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
-
-# This class encodes staging paths into its scripts data so can only be
-# reused if we manipulate the paths.
-SSTATE_SCAN_CMD ?= "${SSTATE_SCAN_CMD_NATIVE}"
-
-TARGET_ARCH = "${SDK_ARCH}"
-TARGET_VENDOR = "${SDK_VENDOR}"
-TARGET_OS = "${SDK_OS}"
-TARGET_PREFIX = "${SDK_PREFIX}"
-TARGET_CC_ARCH = "${SDK_CC_ARCH}"
-TARGET_LD_ARCH = "${SDK_LD_ARCH}"
-TARGET_AS_ARCH = "${SDK_AS_ARCH}"
-TARGET_CPPFLAGS = ""
-TARGET_CFLAGS = ""
-TARGET_CXXFLAGS = ""
-TARGET_LDFLAGS = ""
-TARGET_FPU = ""
-
-
-target_libdir = "${SDKPATHNATIVE}${libdir_nativesdk}"
-target_includedir = "${SDKPATHNATIVE}${includedir_nativesdk}"
-target_base_libdir = "${SDKPATHNATIVE}${base_libdir_nativesdk}"
-target_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
-target_exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
-baselib = "lib"
-
-do_packagedata[stamp-extra-info] = ""
-
-# Need to force this to ensure consitency across architectures
-EXTRA_OECONF_GCC_FLOAT = ""
-
-USE_NLS = "no"
diff --git a/meta/classes/cve-check.bbclass b/meta/classes/cve-check.bbclass
index 514897e8b8..56ba8bceef 100644
--- a/meta/classes/cve-check.bbclass
+++ b/meta/classes/cve-check.bbclass
@@ -1,3 +1,9 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
# This class is used to check recipes against public CVEs.
#
# In order to use this class just inherit the class in the
@@ -20,53 +26,178 @@
# the only method to check against CVEs. Running this tool
# doesn't guarantee your packages are free of CVEs.
-# The product name that the CVE database uses. Defaults to BPN, but may need to
+# The product name that the CVE database uses defaults to BPN, but may need to
# be overriden per recipe (for example tiff.bb sets CVE_PRODUCT=libtiff).
CVE_PRODUCT ??= "${BPN}"
CVE_VERSION ??= "${PV}"
CVE_CHECK_DB_DIR ?= "${DL_DIR}/CVE_CHECK"
-CVE_CHECK_DB_FILE ?= "${CVE_CHECK_DB_DIR}/nvdcve_1.1.db"
+CVE_CHECK_DB_FILE ?= "${CVE_CHECK_DB_DIR}/nvdcve_2-1.db"
+CVE_CHECK_DB_FILE_LOCK ?= "${CVE_CHECK_DB_FILE}.lock"
CVE_CHECK_LOG ?= "${T}/cve.log"
CVE_CHECK_TMP_FILE ?= "${TMPDIR}/cve_check"
+CVE_CHECK_SUMMARY_DIR ?= "${LOG_DIR}/cve"
+CVE_CHECK_SUMMARY_FILE_NAME ?= "cve-summary"
+CVE_CHECK_SUMMARY_FILE ?= "${CVE_CHECK_SUMMARY_DIR}/${CVE_CHECK_SUMMARY_FILE_NAME}"
+CVE_CHECK_SUMMARY_FILE_NAME_JSON = "cve-summary.json"
+CVE_CHECK_SUMMARY_INDEX_PATH = "${CVE_CHECK_SUMMARY_DIR}/cve-summary-index.txt"
+
+CVE_CHECK_LOG_JSON ?= "${T}/cve.json"
CVE_CHECK_DIR ??= "${DEPLOY_DIR}/cve"
-CVE_CHECK_MANIFEST ?= "${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cve"
+CVE_CHECK_RECIPE_FILE ?= "${CVE_CHECK_DIR}/${PN}"
+CVE_CHECK_RECIPE_FILE_JSON ?= "${CVE_CHECK_DIR}/${PN}_cve.json"
+CVE_CHECK_MANIFEST ?= "${IMGDEPLOYDIR}/${IMAGE_NAME}.cve"
+CVE_CHECK_MANIFEST_JSON ?= "${IMGDEPLOYDIR}/${IMAGE_NAME}.json"
CVE_CHECK_COPY_FILES ??= "1"
CVE_CHECK_CREATE_MANIFEST ??= "1"
-# Whitelist for packages (PN)
-CVE_CHECK_PN_WHITELIST ?= ""
+# Report Patched or Ignored CVEs
+CVE_CHECK_REPORT_PATCHED ??= "1"
+
+CVE_CHECK_SHOW_WARNINGS ??= "1"
+
+# Provide text output
+CVE_CHECK_FORMAT_TEXT ??= "1"
+
+# Provide JSON output
+CVE_CHECK_FORMAT_JSON ??= "1"
+
+# Check for packages without CVEs (no issues or missing product name)
+CVE_CHECK_COVERAGE ??= "1"
+
+# Skip CVE Check for packages (PN)
+CVE_CHECK_SKIP_RECIPE ?= ""
+
+# Replace NVD DB check status for a given CVE. Each of CVE has to be mentioned
+# separately with optional detail and description for this status.
+#
+# CVE_STATUS[CVE-1234-0001] = "not-applicable-platform: Issue only applies on Windows"
+# CVE_STATUS[CVE-1234-0002] = "fixed-version: Fixed externally"
+#
+# Settings the same status and reason for multiple CVEs is possible
+# via CVE_STATUS_GROUPS variable.
+#
+# CVE_STATUS_GROUPS = "CVE_STATUS_WIN CVE_STATUS_PATCHED"
+#
+# CVE_STATUS_WIN = "CVE-1234-0001 CVE-1234-0003"
+# CVE_STATUS_WIN[status] = "not-applicable-platform: Issue only applies on Windows"
+# CVE_STATUS_PATCHED = "CVE-1234-0002 CVE-1234-0004"
+# CVE_STATUS_PATCHED[status] = "fixed-version: Fixed externally"
+#
+# All possible CVE statuses could be found in cve-check-map.conf
+# CVE_CHECK_STATUSMAP[not-applicable-platform] = "Ignored"
+# CVE_CHECK_STATUSMAP[fixed-version] = "Patched"
+#
+# CVE_CHECK_IGNORE is deprecated and CVE_STATUS has to be used instead.
+# Keep CVE_CHECK_IGNORE until other layers migrate to new variables
+CVE_CHECK_IGNORE ?= ""
+
+# Layers to be excluded
+CVE_CHECK_LAYER_EXCLUDELIST ??= ""
+
+# Layers to be included
+CVE_CHECK_LAYER_INCLUDELIST ??= ""
+
+
+# set to "alphabetical" for version using single alphabetical character as increment release
+CVE_VERSION_SUFFIX ??= ""
+
+python () {
+ # Fallback all CVEs from CVE_CHECK_IGNORE to CVE_STATUS
+ cve_check_ignore = d.getVar("CVE_CHECK_IGNORE")
+ if cve_check_ignore:
+ bb.warn("CVE_CHECK_IGNORE is deprecated in favor of CVE_STATUS")
+ for cve in (d.getVar("CVE_CHECK_IGNORE") or "").split():
+ d.setVarFlag("CVE_STATUS", cve, "ignored")
+
+ # Process CVE_STATUS_GROUPS to set multiple statuses and optional detail or description at once
+ for cve_status_group in (d.getVar("CVE_STATUS_GROUPS") or "").split():
+ cve_group = d.getVar(cve_status_group)
+ if cve_group is not None:
+ for cve in cve_group.split():
+ d.setVarFlag("CVE_STATUS", cve, d.getVarFlag(cve_status_group, "status"))
+ else:
+ bb.warn("CVE_STATUS_GROUPS contains undefined variable %s" % cve_status_group)
+}
+
+def generate_json_report(d, out_path, link_path):
+ if os.path.exists(d.getVar("CVE_CHECK_SUMMARY_INDEX_PATH")):
+ import json
+ from oe.cve_check import cve_check_merge_jsons, update_symlinks
+
+ bb.note("Generating JSON CVE summary")
+ index_file = d.getVar("CVE_CHECK_SUMMARY_INDEX_PATH")
+ summary = {"version":"1", "package": []}
+ with open(index_file) as f:
+ filename = f.readline()
+ while filename:
+ with open(filename.rstrip()) as j:
+ data = json.load(j)
+ cve_check_merge_jsons(summary, data)
+ filename = f.readline()
+
+ summary["package"].sort(key=lambda d: d['name'])
+
+ with open(out_path, "w") as f:
+ json.dump(summary, f, indent=2)
+
+ update_symlinks(out_path, link_path)
+
+python cve_save_summary_handler () {
+ import shutil
+ import datetime
+ from oe.cve_check import update_symlinks
+
+ cve_tmp_file = d.getVar("CVE_CHECK_TMP_FILE")
+
+ cve_summary_name = d.getVar("CVE_CHECK_SUMMARY_FILE_NAME")
+ cvelogpath = d.getVar("CVE_CHECK_SUMMARY_DIR")
+ bb.utils.mkdirhier(cvelogpath)
-# Whitelist for CVE. If a CVE is found, then it is considered patched.
-# The value is a string containing space separated CVE values:
-#
-# CVE_CHECK_WHITELIST = 'CVE-2014-2524 CVE-2018-1234'
-#
-CVE_CHECK_WHITELIST ?= ""
+ timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
+ cve_summary_file = os.path.join(cvelogpath, "%s-%s.txt" % (cve_summary_name, timestamp))
+
+ if os.path.exists(cve_tmp_file):
+ shutil.copyfile(cve_tmp_file, cve_summary_file)
+ cvefile_link = os.path.join(cvelogpath, cve_summary_name)
+ update_symlinks(cve_summary_file, cvefile_link)
+ bb.plain("Complete CVE report summary created at: %s" % cvefile_link)
+
+ if d.getVar("CVE_CHECK_FORMAT_JSON") == "1":
+ json_summary_link_name = os.path.join(cvelogpath, d.getVar("CVE_CHECK_SUMMARY_FILE_NAME_JSON"))
+ json_summary_name = os.path.join(cvelogpath, "%s-%s.json" % (cve_summary_name, timestamp))
+ generate_json_report(d, json_summary_name, json_summary_link_name)
+ bb.plain("Complete CVE JSON report summary created at: %s" % json_summary_link_name)
+}
+
+addhandler cve_save_summary_handler
+cve_save_summary_handler[eventmask] = "bb.event.BuildCompleted"
python do_cve_check () {
"""
Check recipe for patched and unpatched CVEs
"""
+ from oe.cve_check import get_patched_cves
- if os.path.exists(d.getVar("CVE_CHECK_DB_FILE")):
- try:
- patched_cves = get_patches_cves(d)
- except FileNotFoundError:
- bb.fatal("Failure in searching patches")
- whitelisted, patched, unpatched = check_cves(d, patched_cves)
- if patched or unpatched:
- cve_data = get_cve_info(d, patched + unpatched)
- cve_write_data(d, patched, unpatched, whitelisted, cve_data)
- else:
- bb.note("No CVE database found, skipping CVE check")
+ with bb.utils.fileslocked([d.getVar("CVE_CHECK_DB_FILE_LOCK")], shared=True):
+ if os.path.exists(d.getVar("CVE_CHECK_DB_FILE")):
+ try:
+ patched_cves = get_patched_cves(d)
+ except FileNotFoundError:
+ bb.fatal("Failure in searching patches")
+ ignored, patched, unpatched, status = check_cves(d, patched_cves)
+ if patched or unpatched or (d.getVar("CVE_CHECK_COVERAGE") == "1" and status):
+ cve_data = get_cve_info(d, patched + unpatched + ignored)
+ cve_write_data(d, patched, unpatched, ignored, cve_data, status)
+ else:
+ bb.note("No CVE database found, skipping CVE check")
}
-addtask cve_check before do_build after do_fetch
-do_cve_check[depends] = "cve-update-db-native:do_populate_cve_db"
+addtask cve_check before do_build
+do_cve_check[depends] = "cve-update-nvd2-native:do_fetch"
do_cve_check[nostamp] = "1"
python cve_check_cleanup () {
@@ -74,10 +205,11 @@ python cve_check_cleanup () {
Delete the file used to gather all the CVE information.
"""
bb.utils.remove(e.data.getVar("CVE_CHECK_TMP_FILE"))
+ bb.utils.remove(e.data.getVar("CVE_CHECK_SUMMARY_INDEX_PATH"))
}
addhandler cve_check_cleanup
-cve_check_cleanup[eventmask] = "bb.cooker.CookerExit"
+cve_check_cleanup[eventmask] = "bb.event.BuildCompleted"
python cve_check_write_rootfs_manifest () {
"""
@@ -85,112 +217,113 @@ python cve_check_write_rootfs_manifest () {
"""
import shutil
+ import json
+ from oe.rootfs import image_list_installed_packages
+ from oe.cve_check import cve_check_merge_jsons, update_symlinks
if d.getVar("CVE_CHECK_COPY_FILES") == "1":
- deploy_file = os.path.join(d.getVar("CVE_CHECK_DIR"), d.getVar("PN"))
+ deploy_file = d.getVar("CVE_CHECK_RECIPE_FILE")
if os.path.exists(deploy_file):
bb.utils.remove(deploy_file)
-
- if os.path.exists(d.getVar("CVE_CHECK_TMP_FILE")):
- bb.note("Writing rootfs CVE manifest")
- deploy_dir = d.getVar("DEPLOY_DIR_IMAGE")
- link_name = d.getVar("IMAGE_LINK_NAME")
+ deploy_file_json = d.getVar("CVE_CHECK_RECIPE_FILE_JSON")
+ if os.path.exists(deploy_file_json):
+ bb.utils.remove(deploy_file_json)
+
+ # Create a list of relevant recipies
+ recipies = set()
+ for pkg in list(image_list_installed_packages(d)):
+ pkg_info = os.path.join(d.getVar('PKGDATA_DIR'),
+ 'runtime-reverse', pkg)
+ pkg_data = oe.packagedata.read_pkgdatafile(pkg_info)
+ recipies.add(pkg_data["PN"])
+
+ bb.note("Writing rootfs CVE manifest")
+ deploy_dir = d.getVar("IMGDEPLOYDIR")
+ link_name = d.getVar("IMAGE_LINK_NAME")
+
+ json_data = {"version":"1", "package": []}
+ text_data = ""
+ enable_json = d.getVar("CVE_CHECK_FORMAT_JSON") == "1"
+ enable_text = d.getVar("CVE_CHECK_FORMAT_TEXT") == "1"
+
+ save_pn = d.getVar("PN")
+
+ for pkg in recipies:
+ # To be able to use the CVE_CHECK_RECIPE_FILE variable we have to evaluate
+ # it with the different PN names set each time.
+ d.setVar("PN", pkg)
+ if enable_text:
+ pkgfilepath = d.getVar("CVE_CHECK_RECIPE_FILE")
+ if os.path.exists(pkgfilepath):
+ with open(pkgfilepath) as pfile:
+ text_data += pfile.read()
+
+ if enable_json:
+ pkgfilepath = d.getVar("CVE_CHECK_RECIPE_FILE_JSON")
+ if os.path.exists(pkgfilepath):
+ with open(pkgfilepath) as j:
+ data = json.load(j)
+ cve_check_merge_jsons(json_data, data)
+
+ d.setVar("PN", save_pn)
+
+ if enable_text:
+ link_path = os.path.join(deploy_dir, "%s.cve" % link_name)
manifest_name = d.getVar("CVE_CHECK_MANIFEST")
- cve_tmp_file = d.getVar("CVE_CHECK_TMP_FILE")
-
- shutil.copyfile(cve_tmp_file, manifest_name)
- if manifest_name and os.path.exists(manifest_name):
- manifest_link = os.path.join(deploy_dir, "%s.cve" % link_name)
- # If we already have another manifest, update symlinks
- if os.path.exists(os.path.realpath(manifest_link)):
- os.remove(manifest_link)
- os.symlink(os.path.basename(manifest_name), manifest_link)
- bb.plain("Image CVE report stored in: %s" % manifest_name)
-}
-
-ROOTFS_POSTPROCESS_COMMAND_prepend = "${@'cve_check_write_rootfs_manifest; ' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
-do_rootfs[recrdeptask] += "${@'do_cve_check' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
+ with open(manifest_name, "w") as f:
+ f.write(text_data)
-def get_patches_cves(d):
- """
- Get patches that solve CVEs using the "CVE: " tag.
- """
+ update_symlinks(manifest_name, link_path)
+ bb.plain("Image CVE report stored in: %s" % manifest_name)
- import re
+ if enable_json:
+ link_path = os.path.join(deploy_dir, "%s.json" % link_name)
+ manifest_name = d.getVar("CVE_CHECK_MANIFEST_JSON")
- pn = d.getVar("PN")
- cve_match = re.compile("CVE:( CVE\-\d{4}\-\d+)+")
-
- # Matches last CVE-1234-211432 in the file name, also if written
- # with small letters. Not supporting multiple CVE id's in a single
- # file name.
- cve_file_name_match = re.compile(".*([Cc][Vv][Ee]\-\d{4}\-\d+)")
-
- patched_cves = set()
- bb.debug(2, "Looking for patches that solves CVEs for %s" % pn)
- for url in src_patches(d):
- patch_file = bb.fetch.decodeurl(url)[2]
-
- if not os.path.isfile(patch_file):
- bb.error("File Not found: %s" % patch_file)
- raise FileNotFoundError
-
- # Check patch file name for CVE ID
- fname_match = cve_file_name_match.search(patch_file)
- if fname_match:
- cve = fname_match.group(1).upper()
- patched_cves.add(cve)
- bb.debug(2, "Found CVE %s from patch file name %s" % (cve, patch_file))
-
- with open(patch_file, "r", encoding="utf-8") as f:
- try:
- patch_text = f.read()
- except UnicodeDecodeError:
- bb.debug(1, "Failed to read patch %s using UTF-8 encoding"
- " trying with iso8859-1" % patch_file)
- f.close()
- with open(patch_file, "r", encoding="iso8859-1") as f:
- patch_text = f.read()
-
- # Search for one or more "CVE: " lines
- text_match = False
- for match in cve_match.finditer(patch_text):
- # Get only the CVEs without the "CVE: " tag
- cves = patch_text[match.start()+5:match.end()]
- for cve in cves.split():
- bb.debug(2, "Patch %s solves %s" % (patch_file, cve))
- patched_cves.add(cve)
- text_match = True
+ with open(manifest_name, "w") as f:
+ json.dump(json_data, f, indent=2)
- if not fname_match and not text_match:
- bb.debug(2, "Patch %s doesn't solve CVEs" % patch_file)
+ update_symlinks(manifest_name, link_path)
+ bb.plain("Image CVE JSON report stored in: %s" % manifest_name)
+}
- return patched_cves
+ROOTFS_POSTPROCESS_COMMAND:prepend = "${@'cve_check_write_rootfs_manifest ' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
+do_rootfs[recrdeptask] += "${@'do_cve_check' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
+do_populate_sdk[recrdeptask] += "${@'do_cve_check' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
def check_cves(d, patched_cves):
"""
Connect to the NVD database and find unpatched cves.
"""
- from distutils.version import LooseVersion
+ from oe.cve_check import Version, convert_cve_version, decode_cve_status
+
+ pn = d.getVar("PN")
+ real_pv = d.getVar("PV")
+ suffix = d.getVar("CVE_VERSION_SUFFIX")
cves_unpatched = []
+ cves_ignored = []
+ cves_status = []
+ cves_in_recipe = False
# CVE_PRODUCT can contain more than one product (eg. curl/libcurl)
products = d.getVar("CVE_PRODUCT").split()
# If this has been unset then we're not scanning for CVEs here (for example, image recipes)
if not products:
- return ([], [], [])
+ return ([], [], [], [])
pv = d.getVar("CVE_VERSION").split("+git")[0]
- # If the recipe has been whitlisted we return empty lists
- if d.getVar("PN") in d.getVar("CVE_CHECK_PN_WHITELIST").split():
- bb.note("Recipe has been whitelisted, skipping check")
- return ([], [], [])
+ # If the recipe has been skipped/ignored we return empty lists
+ if pn in d.getVar("CVE_CHECK_SKIP_RECIPE").split():
+ bb.note("Recipe has been skipped by cve-check")
+ return ([], [], [], [])
- old_cve_whitelist = d.getVar("CVE_CHECK_CVE_WHITELIST")
- if old_cve_whitelist:
- bb.warn("CVE_CHECK_CVE_WHITELIST is deprecated, please use CVE_CHECK_WHITELIST.")
- cve_whitelist = d.getVar("CVE_CHECK_WHITELIST").split()
+ # Convert CVE_STATUS into ignored CVEs and check validity
+ cve_ignore = []
+ for cve in (d.getVarFlags("CVE_STATUS") or {}):
+ decoded_status, _, _ = decode_cve_status(d, cve)
+ if decoded_status == "Ignored":
+ cve_ignore.append(cve)
import sqlite3
db_file = d.expand("file:${CVE_CHECK_DB_FILE}?mode=ro")
@@ -198,36 +331,50 @@ def check_cves(d, patched_cves):
# For each of the known product names (e.g. curl has CPEs using curl and libcurl)...
for product in products:
+ cves_in_product = False
if ":" in product:
vendor, product = product.split(":", 1)
else:
vendor = "%"
# Find all relevant CVE IDs.
- for cverow in conn.execute("SELECT DISTINCT ID FROM PRODUCTS WHERE PRODUCT IS ? AND VENDOR LIKE ?", (product, vendor)):
+ cve_cursor = conn.execute("SELECT DISTINCT ID FROM PRODUCTS WHERE PRODUCT IS ? AND VENDOR LIKE ?", (product, vendor))
+ for cverow in cve_cursor:
cve = cverow[0]
- if cve in cve_whitelist:
- bb.note("%s-%s has been whitelisted for %s" % (product, pv, cve))
- # TODO: this should be in the report as 'whitelisted'
- patched_cves.add(cve)
+ if cve in cve_ignore:
+ bb.note("%s-%s ignores %s" % (product, pv, cve))
+ cves_ignored.append(cve)
continue
elif cve in patched_cves:
bb.note("%s has been patched" % (cve))
continue
+ # Write status once only for each product
+ if not cves_in_product:
+ cves_status.append([product, True])
+ cves_in_product = True
+ cves_in_recipe = True
vulnerable = False
- for row in conn.execute("SELECT * FROM PRODUCTS WHERE ID IS ? AND PRODUCT IS ? AND VENDOR LIKE ?", (cve, product, vendor)):
+ ignored = False
+
+ product_cursor = conn.execute("SELECT * FROM PRODUCTS WHERE ID IS ? AND PRODUCT IS ? AND VENDOR LIKE ?", (cve, product, vendor))
+ for row in product_cursor:
(_, _, _, version_start, operator_start, version_end, operator_end) = row
#bb.debug(2, "Evaluating row " + str(row))
+ if cve in cve_ignore:
+ ignored = True
+
+ version_start = convert_cve_version(version_start)
+ version_end = convert_cve_version(version_end)
if (operator_start == '=' and pv == version_start) or version_start == '-':
vulnerable = True
else:
if operator_start:
try:
- vulnerable_start = (operator_start == '>=' and LooseVersion(pv) >= LooseVersion(version_start))
- vulnerable_start |= (operator_start == '>' and LooseVersion(pv) > LooseVersion(version_start))
+ vulnerable_start = (operator_start == '>=' and Version(pv,suffix) >= Version(version_start,suffix))
+ vulnerable_start |= (operator_start == '>' and Version(pv,suffix) > Version(version_start,suffix))
except:
bb.warn("%s: Failed to compare %s %s %s for %s" %
(product, pv, operator_start, version_start, cve))
@@ -237,8 +384,8 @@ def check_cves(d, patched_cves):
if operator_end:
try:
- vulnerable_end = (operator_end == '<=' and LooseVersion(pv) <= LooseVersion(version_end))
- vulnerable_end |= (operator_end == '<' and LooseVersion(pv) < LooseVersion(version_end))
+ vulnerable_end = (operator_end == '<=' and Version(pv,suffix) <= Version(version_end,suffix) )
+ vulnerable_end |= (operator_end == '<' and Version(pv,suffix) < Version(version_end,suffix) )
except:
bb.warn("%s: Failed to compare %s %s %s for %s" %
(product, pv, operator_end, version_end, cve))
@@ -252,18 +399,33 @@ def check_cves(d, patched_cves):
vulnerable = vulnerable_start or vulnerable_end
if vulnerable:
- bb.note("%s-%s is vulnerable to %s" % (product, pv, cve))
- cves_unpatched.append(cve)
+ if ignored:
+ bb.note("%s is ignored in %s-%s" % (cve, pn, real_pv))
+ cves_ignored.append(cve)
+ else:
+ bb.note("%s-%s is vulnerable to %s" % (pn, real_pv, cve))
+ cves_unpatched.append(cve)
break
+ product_cursor.close()
if not vulnerable:
- bb.note("%s-%s is not vulnerable to %s" % (product, pv, cve))
- # TODO: not patched but not vulnerable
+ bb.note("%s-%s is not vulnerable to %s" % (pn, real_pv, cve))
patched_cves.add(cve)
+ cve_cursor.close()
+
+ if not cves_in_product:
+ bb.note("No CVE records found for product %s, pn %s" % (product, pn))
+ cves_status.append([product, False])
conn.close()
+ diff_ignore = list(set(cve_ignore) - set(cves_ignored))
+ if diff_ignore:
+ oe.qa.handle_error("cve_status_not_in_db", "Found CVE (%s) with CVE_STATUS set that are not found in database for this component" % " ".join(diff_ignore), d)
+
+ if not cves_in_recipe:
+ bb.note("No CVE records for products in recipe %s" % (pn))
- return (list(cve_whitelist), list(patched_cves), cves_unpatched)
+ return (list(cves_ignored), list(patched_cves), cves_unpatched, cves_status)
def get_cve_info(d, cves):
"""
@@ -273,50 +435,88 @@ def get_cve_info(d, cves):
import sqlite3
cve_data = {}
- conn = sqlite3.connect(d.getVar("CVE_CHECK_DB_FILE"))
+ db_file = d.expand("file:${CVE_CHECK_DB_FILE}?mode=ro")
+ conn = sqlite3.connect(db_file, uri=True)
for cve in cves:
- for row in conn.execute("SELECT * FROM NVD WHERE ID IS ?", (cve,)):
+ cursor = conn.execute("SELECT * FROM NVD WHERE ID IS ?", (cve,))
+ for row in cursor:
cve_data[row[0]] = {}
cve_data[row[0]]["summary"] = row[1]
cve_data[row[0]]["scorev2"] = row[2]
cve_data[row[0]]["scorev3"] = row[3]
cve_data[row[0]]["modified"] = row[4]
cve_data[row[0]]["vector"] = row[5]
-
+ cve_data[row[0]]["vectorString"] = row[6]
+ cursor.close()
conn.close()
return cve_data
-def cve_write_data(d, patched, unpatched, whitelisted, cve_data):
+def cve_write_data_text(d, patched, unpatched, ignored, cve_data):
"""
Write CVE information in WORKDIR; and to CVE_CHECK_DIR, and
CVE manifest if enabled.
"""
+ from oe.cve_check import decode_cve_status
+
cve_file = d.getVar("CVE_CHECK_LOG")
- nvd_link = "https://web.nvd.nist.gov/view/vuln/detail?vulnId="
+ fdir_name = d.getVar("FILE_DIRNAME")
+ layer = fdir_name.split("/")[-3]
+
+ include_layers = d.getVar("CVE_CHECK_LAYER_INCLUDELIST").split()
+ exclude_layers = d.getVar("CVE_CHECK_LAYER_EXCLUDELIST").split()
+
+ report_all = d.getVar("CVE_CHECK_REPORT_PATCHED") == "1"
+
+ if exclude_layers and layer in exclude_layers:
+ return
+
+ if include_layers and layer not in include_layers:
+ return
+
+ # Early exit, the text format does not report packages without CVEs
+ if not patched+unpatched+ignored:
+ return
+
+ nvd_link = "https://nvd.nist.gov/vuln/detail/"
write_string = ""
unpatched_cves = []
bb.utils.mkdirhier(os.path.dirname(cve_file))
for cve in sorted(cve_data):
+ is_patched = cve in patched
+ is_ignored = cve in ignored
+
+ status = "Unpatched"
+ if (is_patched or is_ignored) and not report_all:
+ continue
+ if is_ignored:
+ status = "Ignored"
+ elif is_patched:
+ status = "Patched"
+ else:
+ # default value of status is Unpatched
+ unpatched_cves.append(cve)
+
+ write_string += "LAYER: %s\n" % layer
write_string += "PACKAGE NAME: %s\n" % d.getVar("PN")
write_string += "PACKAGE VERSION: %s%s\n" % (d.getVar("EXTENDPE"), d.getVar("PV"))
write_string += "CVE: %s\n" % cve
- if cve in whitelisted:
- write_string += "CVE STATUS: Whitelisted\n"
- elif cve in patched:
- write_string += "CVE STATUS: Patched\n"
- else:
- unpatched_cves.append(cve)
- write_string += "CVE STATUS: Unpatched\n"
+ write_string += "CVE STATUS: %s\n" % status
+ _, detail, description = decode_cve_status(d, cve)
+ if detail:
+ write_string += "CVE DETAIL: %s\n" % detail
+ if description:
+ write_string += "CVE DESCRIPTION: %s\n" % description
write_string += "CVE SUMMARY: %s\n" % cve_data[cve]["summary"]
write_string += "CVSS v2 BASE SCORE: %s\n" % cve_data[cve]["scorev2"]
write_string += "CVSS v3 BASE SCORE: %s\n" % cve_data[cve]["scorev3"]
write_string += "VECTOR: %s\n" % cve_data[cve]["vector"]
+ write_string += "VECTORSTRING: %s\n" % cve_data[cve]["vectorString"]
write_string += "MORE INFORMATION: %s%s\n\n" % (nvd_link, cve)
- if unpatched_cves:
+ if unpatched_cves and d.getVar("CVE_CHECK_SHOW_WARNINGS") == "1":
bb.warn("Found unpatched CVE (%s), for more information check %s" % (" ".join(unpatched_cves),cve_file))
with open(cve_file, "w") as f:
@@ -324,12 +524,138 @@ def cve_write_data(d, patched, unpatched, whitelisted, cve_data):
f.write(write_string)
if d.getVar("CVE_CHECK_COPY_FILES") == "1":
- cve_dir = d.getVar("CVE_CHECK_DIR")
- bb.utils.mkdirhier(cve_dir)
- deploy_file = os.path.join(cve_dir, d.getVar("PN"))
+ deploy_file = d.getVar("CVE_CHECK_RECIPE_FILE")
+ bb.utils.mkdirhier(os.path.dirname(deploy_file))
with open(deploy_file, "w") as f:
f.write(write_string)
if d.getVar("CVE_CHECK_CREATE_MANIFEST") == "1":
+ cvelogpath = d.getVar("CVE_CHECK_SUMMARY_DIR")
+ bb.utils.mkdirhier(cvelogpath)
+
with open(d.getVar("CVE_CHECK_TMP_FILE"), "a") as f:
f.write("%s" % write_string)
+
+def cve_check_write_json_output(d, output, direct_file, deploy_file, manifest_file):
+ """
+ Write CVE information in the JSON format: to WORKDIR; and to
+ CVE_CHECK_DIR, if CVE manifest if enabled, write fragment
+ files that will be assembled at the end in cve_check_write_rootfs_manifest.
+ """
+
+ import json
+
+ write_string = json.dumps(output, indent=2)
+ with open(direct_file, "w") as f:
+ bb.note("Writing file %s with CVE information" % direct_file)
+ f.write(write_string)
+
+ if d.getVar("CVE_CHECK_COPY_FILES") == "1":
+ bb.utils.mkdirhier(os.path.dirname(deploy_file))
+ with open(deploy_file, "w") as f:
+ f.write(write_string)
+
+ if d.getVar("CVE_CHECK_CREATE_MANIFEST") == "1":
+ cvelogpath = d.getVar("CVE_CHECK_SUMMARY_DIR")
+ index_path = d.getVar("CVE_CHECK_SUMMARY_INDEX_PATH")
+ bb.utils.mkdirhier(cvelogpath)
+ fragment_file = os.path.basename(deploy_file)
+ fragment_path = os.path.join(cvelogpath, fragment_file)
+ with open(fragment_path, "w") as f:
+ f.write(write_string)
+ with open(index_path, "a+") as f:
+ f.write("%s\n" % fragment_path)
+
+def cve_write_data_json(d, patched, unpatched, ignored, cve_data, cve_status):
+ """
+ Prepare CVE data for the JSON format, then write it.
+ """
+
+ from oe.cve_check import decode_cve_status
+
+ output = {"version":"1", "package": []}
+ nvd_link = "https://nvd.nist.gov/vuln/detail/"
+
+ fdir_name = d.getVar("FILE_DIRNAME")
+ layer = fdir_name.split("/")[-3]
+
+ include_layers = d.getVar("CVE_CHECK_LAYER_INCLUDELIST").split()
+ exclude_layers = d.getVar("CVE_CHECK_LAYER_EXCLUDELIST").split()
+
+ report_all = d.getVar("CVE_CHECK_REPORT_PATCHED") == "1"
+
+ if exclude_layers and layer in exclude_layers:
+ return
+
+ if include_layers and layer not in include_layers:
+ return
+
+ unpatched_cves = []
+
+ product_data = []
+ for s in cve_status:
+ p = {"product": s[0], "cvesInRecord": "Yes"}
+ if s[1] == False:
+ p["cvesInRecord"] = "No"
+ product_data.append(p)
+
+ package_version = "%s%s" % (d.getVar("EXTENDPE"), d.getVar("PV"))
+ package_data = {
+ "name" : d.getVar("PN"),
+ "layer" : layer,
+ "version" : package_version,
+ "products": product_data
+ }
+ cve_list = []
+
+ for cve in sorted(cve_data):
+ is_patched = cve in patched
+ is_ignored = cve in ignored
+ status = "Unpatched"
+ if (is_patched or is_ignored) and not report_all:
+ continue
+ if is_ignored:
+ status = "Ignored"
+ elif is_patched:
+ status = "Patched"
+ else:
+ # default value of status is Unpatched
+ unpatched_cves.append(cve)
+
+ issue_link = "%s%s" % (nvd_link, cve)
+
+ cve_item = {
+ "id" : cve,
+ "summary" : cve_data[cve]["summary"],
+ "scorev2" : cve_data[cve]["scorev2"],
+ "scorev3" : cve_data[cve]["scorev3"],
+ "vector" : cve_data[cve]["vector"],
+ "vectorString" : cve_data[cve]["vectorString"],
+ "status" : status,
+ "link": issue_link
+ }
+ _, detail, description = decode_cve_status(d, cve)
+ if detail:
+ cve_item["detail"] = detail
+ if description:
+ cve_item["description"] = description
+ cve_list.append(cve_item)
+
+ package_data["issue"] = cve_list
+ output["package"].append(package_data)
+
+ direct_file = d.getVar("CVE_CHECK_LOG_JSON")
+ deploy_file = d.getVar("CVE_CHECK_RECIPE_FILE_JSON")
+ manifest_file = d.getVar("CVE_CHECK_SUMMARY_FILE_NAME_JSON")
+
+ cve_check_write_json_output(d, output, direct_file, deploy_file, manifest_file)
+
+def cve_write_data(d, patched, unpatched, ignored, cve_data, status):
+ """
+ Write CVE data in each enabled format.
+ """
+
+ if d.getVar("CVE_CHECK_FORMAT_TEXT") == "1":
+ cve_write_data_text(d, patched, unpatched, ignored, cve_data)
+ if d.getVar("CVE_CHECK_FORMAT_JSON") == "1":
+ cve_write_data_json(d, patched, unpatched, ignored, cve_data, status)
diff --git a/meta/classes/debian.bbclass b/meta/classes/debian.bbclass
deleted file mode 100644
index 6f8a599ccb..0000000000
--- a/meta/classes/debian.bbclass
+++ /dev/null
@@ -1,146 +0,0 @@
-# Debian package renaming only occurs when a package is built
-# We therefore have to make sure we build all runtime packages
-# before building the current package to make the packages runtime
-# depends are correct
-#
-# Custom library package names can be defined setting
-# DEBIANNAME_ + pkgname to the desired name.
-#
-# Better expressed as ensure all RDEPENDS package before we package
-# This means we can't have circular RDEPENDS/RRECOMMENDS
-
-AUTO_LIBNAME_PKGS = "${PACKAGES}"
-
-inherit package
-
-DEBIANRDEP = "do_packagedata"
-do_package_write_ipk[rdeptask] = "${DEBIANRDEP}"
-do_package_write_deb[rdeptask] = "${DEBIANRDEP}"
-do_package_write_tar[rdeptask] = "${DEBIANRDEP}"
-do_package_write_rpm[rdeptask] = "${DEBIANRDEP}"
-
-python () {
- if not d.getVar("PACKAGES"):
- d.setVar("DEBIANRDEP", "")
-}
-
-python debian_package_name_hook () {
- import glob, copy, stat, errno, re, pathlib, subprocess
-
- pkgdest = d.getVar("PKGDEST")
- packages = d.getVar('PACKAGES')
- so_re = re.compile(r"lib.*\.so")
-
- def socrunch(s):
- s = s.lower().replace('_', '-')
- m = re.match(r"^(.*)(.)\.so\.(.*)$", s)
- if m is None:
- return None
- if m.group(2) in '0123456789':
- bin = '%s%s-%s' % (m.group(1), m.group(2), m.group(3))
- else:
- bin = m.group(1) + m.group(2) + m.group(3)
- dev = m.group(1) + m.group(2)
- return (bin, dev)
-
- def isexec(path):
- try:
- s = os.stat(path)
- except (os.error, AttributeError):
- return 0
- return (s[stat.ST_MODE] & stat.S_IEXEC)
-
- def add_rprovides(pkg, d):
- newpkg = d.getVar('PKG_' + pkg)
- if newpkg and newpkg != pkg:
- provs = (d.getVar('RPROVIDES_' + pkg) or "").split()
- if pkg not in provs:
- d.appendVar('RPROVIDES_' + pkg, " " + pkg + " (=" + d.getVar("PKGV") + ")")
-
- def auto_libname(packages, orig_pkg):
- p = lambda var: pathlib.PurePath(d.getVar(var))
- libdirs = (p("base_libdir"), p("libdir"))
- bindirs = (p("base_bindir"), p("base_sbindir"), p("bindir"), p("sbindir"))
-
- sonames = []
- has_bins = 0
- has_libs = 0
- for f in pkgfiles[orig_pkg]:
- # This is .../packages-split/orig_pkg/
- pkgpath = pathlib.PurePath(pkgdest, orig_pkg)
- # Strip pkgpath off the full path to a file in the package, re-root
- # so it is absolute, and then get the parent directory of the file.
- path = pathlib.PurePath("/") / (pathlib.PurePath(f).relative_to(pkgpath).parent)
- if path in bindirs:
- has_bins = 1
- if path in libdirs:
- has_libs = 1
- if so_re.match(os.path.basename(f)):
- try:
- cmd = [d.expand("${TARGET_PREFIX}objdump"), "-p", f]
- output = subprocess.check_output(cmd).decode("utf-8")
- for m in re.finditer(r"\s+SONAME\s+([^\s]+)", output):
- if m.group(1) not in sonames:
- sonames.append(m.group(1))
- except subprocess.CalledProcessError:
- pass
- bb.debug(1, 'LIBNAMES: pkg %s libs %d bins %d sonames %s' % (orig_pkg, has_libs, has_bins, sonames))
- soname = None
- if len(sonames) == 1:
- soname = sonames[0]
- elif len(sonames) > 1:
- lead = d.getVar('LEAD_SONAME')
- if lead:
- r = re.compile(lead)
- filtered = []
- for s in sonames:
- if r.match(s):
- filtered.append(s)
- if len(filtered) == 1:
- soname = filtered[0]
- elif len(filtered) > 1:
- bb.note("Multiple matches (%s) for LEAD_SONAME '%s'" % (", ".join(filtered), lead))
- else:
- bb.note("Multiple libraries (%s) found, but LEAD_SONAME '%s' doesn't match any of them" % (", ".join(sonames), lead))
- else:
- bb.note("Multiple libraries (%s) found and LEAD_SONAME not defined" % ", ".join(sonames))
-
- if has_libs and not has_bins and soname:
- soname_result = socrunch(soname)
- if soname_result:
- (pkgname, devname) = soname_result
- for pkg in packages.split():
- if (d.getVar('PKG_' + pkg, False) or d.getVar('DEBIAN_NOAUTONAME_' + pkg, False)):
- add_rprovides(pkg, d)
- continue
- debian_pn = d.getVar('DEBIANNAME_' + pkg, False)
- if debian_pn:
- newpkg = debian_pn
- elif pkg == orig_pkg:
- newpkg = pkgname
- else:
- newpkg = pkg.replace(orig_pkg, devname, 1)
- mlpre=d.getVar('MLPREFIX')
- if mlpre:
- if not newpkg.find(mlpre) == 0:
- newpkg = mlpre + newpkg
- if newpkg != pkg:
- bb.note("debian: renaming %s to %s" % (pkg, newpkg))
- d.setVar('PKG_' + pkg, newpkg)
- add_rprovides(pkg, d)
- else:
- add_rprovides(orig_pkg, d)
-
- # reversed sort is needed when some package is substring of another
- # ie in ncurses we get without reverse sort:
- # DEBUG: LIBNAMES: pkgname libtic5 devname libtic pkg ncurses-libtic orig_pkg ncurses-libtic debian_pn None newpkg libtic5
- # and later
- # DEBUG: LIBNAMES: pkgname libtic5 devname libtic pkg ncurses-libticw orig_pkg ncurses-libtic debian_pn None newpkg libticw
- # so we need to handle ncurses-libticw->libticw5 before ncurses-libtic->libtic5
- for pkg in sorted((d.getVar('AUTO_LIBNAME_PKGS') or "").split(), reverse=True):
- auto_libname(packages, pkg)
-}
-
-EXPORT_FUNCTIONS package_name_hook
-
-DEBIAN_NAMES = "1"
diff --git a/meta/classes/deploy.bbclass b/meta/classes/deploy.bbclass
deleted file mode 100644
index 737c26122b..0000000000
--- a/meta/classes/deploy.bbclass
+++ /dev/null
@@ -1,12 +0,0 @@
-DEPLOYDIR = "${WORKDIR}/deploy-${PN}"
-SSTATETASKS += "do_deploy"
-do_deploy[sstate-inputdirs] = "${DEPLOYDIR}"
-do_deploy[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}"
-
-python do_deploy_setscene () {
- sstate_setscene(d)
-}
-addtask do_deploy_setscene
-do_deploy[dirs] = "${DEPLOYDIR} ${B}"
-do_deploy[cleandirs] = "${DEPLOYDIR}"
-do_deploy[stamp-extra-info] = "${MACHINE_ARCH}"
diff --git a/meta/classes/devicetree.bbclass b/meta/classes/devicetree.bbclass
deleted file mode 100644
index c772ab2ab9..0000000000
--- a/meta/classes/devicetree.bbclass
+++ /dev/null
@@ -1,148 +0,0 @@
-# This bbclass implements device tree compliation for user provided device tree
-# sources. The compilation of the device tree sources is the same as the kernel
-# device tree compilation process, this includes being able to include sources
-# from the kernel such as soc dtsi files or header files such as gpio.h. In
-# addition to device trees this bbclass also handles compilation of device tree
-# overlays.
-#
-# The output of this class behaves similar to how kernel-devicetree.bbclass
-# operates in that the output files are installed into /boot/devicetree.
-# However this class on purpose separates the deployed device trees into the
-# 'devicetree' subdirectory. This prevents clashes with the kernel-devicetree
-# output. Additionally the device trees are populated into the sysroot for
-# access via the sysroot from within other recipes.
-
-SECTION ?= "bsp"
-
-# The default inclusion of kernel device tree includes and headers means that
-# device trees built with them are at least GPLv2 (and in some cases dual
-# licensed). Default to GPLv2 if the recipe does not specify a license.
-LICENSE ?= "GPLv2"
-LIC_FILES_CHKSUM ?= "file://${COMMON_LICENSE_DIR}/GPL-2.0;md5=801f80980d171dd6425610833a22dbe6"
-
-INHIBIT_DEFAULT_DEPS = "1"
-DEPENDS += "dtc-native"
-
-inherit deploy kernel-arch
-
-COMPATIBLE_MACHINE ?= "^$"
-
-PROVIDES = "virtual/dtb"
-
-PACKAGE_ARCH = "${MACHINE_ARCH}"
-
-SYSROOT_DIRS += "/boot/devicetree"
-FILES_${PN} = "/boot/devicetree/*.dtb /boot/devicetree/*.dtbo"
-
-S = "${WORKDIR}"
-B = "${WORKDIR}/build"
-
-# Default kernel includes, these represent what are normally used for in-kernel
-# sources.
-KERNEL_INCLUDE ??= " \
- ${STAGING_KERNEL_DIR}/arch/${ARCH}/boot/dts \
- ${STAGING_KERNEL_DIR}/arch/${ARCH}/boot/dts/* \
- ${STAGING_KERNEL_DIR}/scripts/dtc/include-prefixes \
- "
-
-DT_INCLUDE[doc] = "Search paths to be made available to both the device tree compiler and preprocessor for inclusion."
-DT_INCLUDE ?= "${DT_FILES_PATH} ${KERNEL_INCLUDE}"
-DT_FILES_PATH[doc] = "Defaults to source directory, can be used to select dts files that are not in source (e.g. generated)."
-DT_FILES_PATH ?= "${S}"
-
-DT_PADDING_SIZE[doc] = "Size of padding on the device tree blob, used as extra space typically for additional properties during boot."
-DT_PADDING_SIZE ??= "0x3000"
-DT_RESERVED_MAP[doc] = "Number of reserved map entires."
-DT_RESERVED_MAP ??= "8"
-DT_BOOT_CPU[doc] = "The boot cpu, defaults to 0"
-DT_BOOT_CPU ??= "0"
-
-DTC_FLAGS ?= "-R ${DT_RESERVED_MAP} -b ${DT_BOOT_CPU}"
-DTC_PPFLAGS ?= "-nostdinc -undef -D__DTS__ -x assembler-with-cpp"
-DTC_BFLAGS ?= "-p ${DT_PADDING_SIZE} -@"
-DTC_OFLAGS ?= "-p 0 -@ -H epapr"
-
-python () {
- if d.getVar("KERNEL_INCLUDE"):
- # auto add dependency on kernel tree, but only if kernel include paths
- # are specified.
- d.appendVarFlag("do_compile", "depends", " virtual/kernel:do_configure")
-}
-
-def expand_includes(varname, d):
- import glob
- includes = set()
- # expand all includes with glob
- for i in (d.getVar(varname) or "").split():
- for g in glob.glob(i):
- if os.path.isdir(g): # only add directories to include path
- includes.add(g)
- return includes
-
-def devicetree_source_is_overlay(path):
- # determine if a dts file is an overlay by checking if it uses "/plugin/;"
- with open(path, "r") as f:
- for i in f:
- if i.startswith("/plugin/;"):
- return True
- return False
-
-def devicetree_compile(dtspath, includes, d):
- import subprocess
- dts = os.path.basename(dtspath)
- dtname = os.path.splitext(dts)[0]
- bb.note("Processing {0} [{1}]".format(dtname, dts))
-
- # preprocess
- ppargs = d.getVar("BUILD_CPP").split()
- ppargs += (d.getVar("DTC_PPFLAGS") or "").split()
- for i in includes:
- ppargs.append("-I{0}".format(i))
- ppargs += ["-o", "{0}.pp".format(dts), dtspath]
- bb.note("Running {0}".format(" ".join(ppargs)))
- subprocess.run(ppargs, check = True)
-
- # determine if the file is an overlay or not (using the preprocessed file)
- isoverlay = devicetree_source_is_overlay("{0}.pp".format(dts))
-
- # compile
- dtcargs = ["dtc"] + (d.getVar("DTC_FLAGS") or "").split()
- if isoverlay:
- dtcargs += (d.getVar("DTC_OFLAGS") or "").split()
- else:
- dtcargs += (d.getVar("DTC_BFLAGS") or "").split()
- for i in includes:
- dtcargs += ["-i", i]
- dtcargs += ["-o", "{0}.{1}".format(dtname, "dtbo" if isoverlay else "dtb")]
- dtcargs += ["-I", "dts", "-O", "dtb", "{0}.pp".format(dts)]
- bb.note("Running {0}".format(" ".join(dtcargs)))
- subprocess.run(dtcargs, check = True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
-
-python devicetree_do_compile() {
- includes = expand_includes("DT_INCLUDE", d)
- listpath = d.getVar("DT_FILES_PATH")
- for dts in os.listdir(listpath):
- dtspath = os.path.join(listpath, dts)
- try:
- if not(os.path.isfile(dtspath)) or not(dts.endswith(".dts") or devicetree_source_is_overlay(dtspath)):
- continue # skip non-.dts files and non-overlay files
- except:
- continue # skip if can't determine if overlay
- devicetree_compile(dtspath, includes, d)
-}
-
-devicetree_do_install() {
- for DTB_FILE in `ls *.dtb *.dtbo`; do
- install -Dm 0644 ${B}/${DTB_FILE} ${D}/boot/devicetree/${DTB_FILE}
- done
-}
-
-devicetree_do_deploy() {
- for DTB_FILE in `ls *.dtb *.dtbo`; do
- install -Dm 0644 ${B}/${DTB_FILE} ${DEPLOYDIR}/devicetree/${DTB_FILE}
- done
-}
-addtask deploy before do_build after do_install
-
-EXPORT_FUNCTIONS do_compile do_install do_deploy
-
diff --git a/meta/classes/devshell.bbclass b/meta/classes/devshell.bbclass
deleted file mode 100644
index fdf7dc100f..0000000000
--- a/meta/classes/devshell.bbclass
+++ /dev/null
@@ -1,155 +0,0 @@
-inherit terminal
-
-DEVSHELL = "${SHELL}"
-
-python do_devshell () {
- if d.getVarFlag("do_devshell", "manualfakeroot"):
- d.prependVar("DEVSHELL", "pseudo ")
- fakeenv = d.getVar("FAKEROOTENV").split()
- for f in fakeenv:
- k = f.split("=")
- d.setVar(k[0], k[1])
- d.appendVar("OE_TERMINAL_EXPORTS", " " + k[0])
- d.delVarFlag("do_devshell", "fakeroot")
-
- oe_terminal(d.getVar('DEVSHELL'), 'OpenEmbedded Developer Shell', d)
-}
-
-addtask devshell after do_patch do_prepare_recipe_sysroot
-
-# The directory that the terminal starts in
-DEVSHELL_STARTDIR ?= "${S}"
-do_devshell[dirs] = "${DEVSHELL_STARTDIR}"
-do_devshell[nostamp] = "1"
-
-# devshell and fakeroot/pseudo need careful handling since only the final
-# command should run under fakeroot emulation, any X connection should
-# be done as the normal user. We therfore carefully construct the envionment
-# manually
-python () {
- if d.getVarFlag("do_devshell", "fakeroot"):
- # We need to signal our code that we want fakeroot however we
- # can't manipulate the environment and variables here yet (see YOCTO #4795)
- d.setVarFlag("do_devshell", "manualfakeroot", "1")
- d.delVarFlag("do_devshell", "fakeroot")
-}
-
-def devpyshell(d):
-
- import code
- import select
- import signal
- import termios
-
- m, s = os.openpty()
- sname = os.ttyname(s)
-
- def noechoicanon(fd):
- old = termios.tcgetattr(fd)
- old[3] = old[3] &~ termios.ECHO &~ termios.ICANON
- # &~ termios.ISIG
- termios.tcsetattr(fd, termios.TCSADRAIN, old)
-
- # No echo or buffering over the pty
- noechoicanon(s)
-
- pid = os.fork()
- if pid:
- os.close(m)
- oe_terminal("oepydevshell-internal.py %s %d" % (sname, pid), 'OpenEmbedded Developer PyShell', d)
- os._exit(0)
- else:
- os.close(s)
-
- os.dup2(m, sys.stdin.fileno())
- os.dup2(m, sys.stdout.fileno())
- os.dup2(m, sys.stderr.fileno())
-
- bb.utils.nonblockingfd(sys.stdout)
- bb.utils.nonblockingfd(sys.stderr)
- bb.utils.nonblockingfd(sys.stdin)
-
- _context = {
- "os": os,
- "bb": bb,
- "time": time,
- "d": d,
- }
-
- ps1 = "pydevshell> "
- ps2 = "... "
- buf = []
- more = False
-
- i = code.InteractiveInterpreter(locals=_context)
- print("OE PyShell (PN = %s)\n" % d.getVar("PN"))
-
- def prompt(more):
- if more:
- prompt = ps2
- else:
- prompt = ps1
- sys.stdout.write(prompt)
- sys.stdout.flush()
-
- # Restore Ctrl+C since bitbake masks this
- def signal_handler(signal, frame):
- raise KeyboardInterrupt
- signal.signal(signal.SIGINT, signal_handler)
-
- child = None
-
- prompt(more)
- while True:
- try:
- try:
- (r, _, _) = select.select([sys.stdin], [], [], 1)
- if not r:
- continue
- line = sys.stdin.readline().strip()
- if not line:
- prompt(more)
- continue
- except EOFError as e:
- sys.stdout.write("\n")
- sys.stdout.flush()
- except (OSError, IOError) as e:
- if e.errno == 11:
- continue
- if e.errno == 5:
- return
- raise
- else:
- if not child:
- child = int(line)
- continue
- buf.append(line)
- source = "\n".join(buf)
- more = i.runsource(source, "<pyshell>")
- if not more:
- buf = []
- prompt(more)
- except KeyboardInterrupt:
- i.write("\nKeyboardInterrupt\n")
- buf = []
- more = False
- prompt(more)
- except SystemExit:
- # Easiest way to ensure everything exits
- os.kill(child, signal.SIGTERM)
- break
-
-python do_devpyshell() {
- import signal
-
- try:
- devpyshell(d)
- except SystemExit:
- # Stop the SIGTERM above causing an error exit code
- return
- finally:
- return
-}
-addtask devpyshell after do_patch
-
-do_devpyshell[nostamp] = "1"
diff --git a/meta/classes/devtool-source.bbclass b/meta/classes/devtool-source.bbclass
index 280d6009f3..4158c20c7e 100644
--- a/meta/classes/devtool-source.bbclass
+++ b/meta/classes/devtool-source.bbclass
@@ -1,3 +1,9 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
# Development tool - source extraction helper class
#
# NOTE: this class is intended for use by devtool and should not be
@@ -199,6 +205,7 @@ python devtool_post_patch() {
# Run do_patch function with the override applied
localdata = bb.data.createCopy(d)
localdata.setVar('OVERRIDES', ':'.join(no_overrides))
+ localdata.setVar('FILESOVERRIDES', ':'.join(no_overrides))
bb.build.exec_func('do_patch', localdata)
rm_patches()
# Now we need to reconcile the dev branch with the no-overrides one
@@ -216,7 +223,8 @@ python devtool_post_patch() {
# Reset back to the initial commit on a new branch
bb.process.run('git checkout %s -b devtool-override-%s' % (initial_rev, override), cwd=srcsubdir)
# Run do_patch function with the override applied
- localdata.appendVar('OVERRIDES', ':%s' % override)
+ localdata.setVar('OVERRIDES', ':'.join(no_overrides + [override]))
+ localdata.setVar('FILESOVERRIDES', ':'.join(no_overrides + [override]))
bb.build.exec_func('do_patch', localdata)
rm_patches()
# Now we need to reconcile the new branch with the no-overrides one
@@ -224,6 +232,9 @@ python devtool_post_patch() {
bb.process.run('git rebase devtool-no-overrides', cwd=srcsubdir)
bb.process.run('git checkout %s' % devbranch, cwd=srcsubdir)
bb.process.run('git tag -f devtool-patched', cwd=srcsubdir)
+ if os.path.exists(os.path.join(srcsubdir, '.gitmodules')):
+ bb.process.run('git submodule foreach --recursive "git tag -f devtool-patched"', cwd=srcsubdir)
+
}
python devtool_post_configure() {
diff --git a/meta/classes/devupstream.bbclass b/meta/classes/devupstream.bbclass
deleted file mode 100644
index 7780c5482c..0000000000
--- a/meta/classes/devupstream.bbclass
+++ /dev/null
@@ -1,48 +0,0 @@
-# Class for use in BBCLASSEXTEND to make it easier to have a single recipe that
-# can build both stable tarballs and snapshots from upstream source
-# repositories.
-#
-# Usage:
-# BBCLASSEXTEND = "devupstream:target"
-# SRC_URI_class-devupstream = "git://git.example.com/example"
-# SRCREV_class-devupstream = "abcdef"
-#
-# If the first entry in SRC_URI is a git: URL then S is rewritten to
-# WORKDIR/git.
-#
-# There are a few caveats that remain to be solved:
-# - You can't build native or nativesdk recipes using for example
-# devupstream:native, you can only build target recipes.
-# - If the fetcher requires native tools (such as subversion-native) then
-# bitbake won't be able to add them automatically.
-
-CLASSOVERRIDE .= ":class-devupstream"
-
-python devupstream_virtclass_handler () {
- # Do nothing if this is inherited, as it's for BBCLASSEXTEND
- if "devupstream" not in (d.getVar('BBCLASSEXTEND') or ""):
- bb.error("Don't inherit devupstream, use BBCLASSEXTEND")
- return
-
- variant = d.getVar("BBEXTENDVARIANT")
- if variant not in ("target"):
- bb.error("Pass the variant when using devupstream, for example devupstream:target")
- return
-
- # Develpment releases are never preferred by default
- d.setVar("DEFAULT_PREFERENCE", "-1")
-
- uri = bb.fetch2.URI(d.getVar("SRC_URI").split()[0])
-
- if uri.scheme == "git":
- d.setVar("S", "${WORKDIR}/git")
-
- # Modify the PV if the recipe hasn't already overridden it
- pv = d.getVar("PV")
- proto_marker = "+" + uri.scheme
- if proto_marker not in pv:
- d.setVar("PV", pv + proto_marker + "${SRCPV}")
-}
-
-addhandler devupstream_virtclass_handler
-devupstream_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
diff --git a/meta/classes/distro_features_check.bbclass b/meta/classes/distro_features_check.bbclass
deleted file mode 100644
index 8124a8ca27..0000000000
--- a/meta/classes/distro_features_check.bbclass
+++ /dev/null
@@ -1,7 +0,0 @@
-# Temporarily provide fallback to the old name of the class
-
-python __anonymous() {
- bb.warn("distro_features_check.bbclass is deprecated, please use features_check.bbclass instead")
-}
-
-inherit features_check
diff --git a/meta/classes/distrooverrides.bbclass b/meta/classes/distrooverrides.bbclass
index 9f4db0d771..8d9d7cda7d 100644
--- a/meta/classes/distrooverrides.bbclass
+++ b/meta/classes/distrooverrides.bbclass
@@ -1,3 +1,9 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
# Turns certain DISTRO_FEATURES into overrides with the same
# name plus a df- prefix. Ensures that these special
# distro features remain set also for native and nativesdk
@@ -6,7 +12,7 @@
# This makes it simpler to write .bbappends that only change the
# task signatures of the recipe if the change is really enabled,
# for example with:
-# do_install_append_df-my-feature () { ... }
+# do_install:append:df-my-feature () { ... }
# where "my-feature" is a DISTRO_FEATURE.
#
# The class is meant to be used in a layer.conf or distro
@@ -22,8 +28,8 @@ DISTRO_FEATURES_OVERRIDES ?= ""
DISTRO_FEATURES_OVERRIDES[doc] = "A space-separated list of <feature> entries. \
Each entry is added to OVERRIDES as df-<feature> if <feature> is in DISTRO_FEATURES."
-DISTRO_FEATURES_FILTER_NATIVE_append = " ${DISTRO_FEATURES_OVERRIDES}"
-DISTRO_FEATURES_FILTER_NATIVESDK_append = " ${DISTRO_FEATURES_OVERRIDES}"
+DISTRO_FEATURES_FILTER_NATIVE:append = " ${DISTRO_FEATURES_OVERRIDES}"
+DISTRO_FEATURES_FILTER_NATIVESDK:append = " ${DISTRO_FEATURES_OVERRIDES}"
# If DISTRO_FEATURES_OVERRIDES or DISTRO_FEATURES show up in a task
# signature because of this line, then the task dependency on
diff --git a/meta/classes/distutils-common-base.bbclass b/meta/classes/distutils-common-base.bbclass
deleted file mode 100644
index 94b5fd426d..0000000000
--- a/meta/classes/distutils-common-base.bbclass
+++ /dev/null
@@ -1,25 +0,0 @@
-export STAGING_INCDIR
-export STAGING_LIBDIR
-
-# LDSHARED is the ld *command* used to create shared library
-export LDSHARED = "${CCLD} -shared"
-# LDXXSHARED is the ld *command* used to create shared library of C++
-# objects
-export LDCXXSHARED = "${CXX} -shared"
-# CCSHARED are the C *flags* used to create objects to go into a shared
-# library (module)
-export CCSHARED = "-fPIC -DPIC"
-# LINKFORSHARED are the flags passed to the $(CC) command that links
-# the python executable
-export LINKFORSHARED = "{SECURITY_CFLAGS} -Xlinker -export-dynamic"
-
-FILES_${PN} += "${libdir}/* ${libdir}/${PYTHON_DIR}/*"
-
-FILES_${PN}-staticdev += "\
- ${PYTHON_SITEPACKAGES_DIR}/*.a \
-"
-FILES_${PN}-dev += "\
- ${datadir}/pkgconfig \
- ${libdir}/pkgconfig \
- ${PYTHON_SITEPACKAGES_DIR}/*.la \
-"
diff --git a/meta/classes/distutils3-base.bbclass b/meta/classes/distutils3-base.bbclass
deleted file mode 100644
index 7dbf07ac4b..0000000000
--- a/meta/classes/distutils3-base.bbclass
+++ /dev/null
@@ -1,5 +0,0 @@
-DEPENDS += "${@["${PYTHON_PN}-native ${PYTHON_PN}", ""][(d.getVar('PACKAGES') == '')]}"
-RDEPENDS_${PN} += "${@['', '${PYTHON_PN}-core']['${CLASSOVERRIDE}' == 'class-target']}"
-
-inherit distutils-common-base python3native
-
diff --git a/meta/classes/distutils3.bbclass b/meta/classes/distutils3.bbclass
deleted file mode 100644
index 7356b5245a..0000000000
--- a/meta/classes/distutils3.bbclass
+++ /dev/null
@@ -1,65 +0,0 @@
-inherit distutils3-base
-
-B = "${WORKDIR}/build"
-distutils_do_configure[cleandirs] = "${B}"
-
-DISTUTILS_BUILD_ARGS ?= ""
-DISTUTILS_INSTALL_ARGS ?= "--root=${D} \
- --prefix=${prefix} \
- --install-lib=${PYTHON_SITEPACKAGES_DIR} \
- --install-data=${datadir}"
-
-DISTUTILS_PYTHON = "python3"
-DISTUTILS_PYTHON_class-native = "nativepython3"
-
-distutils3_do_configure() {
- :
-}
-
-distutils3_do_compile() {
- cd ${S}
- NO_FETCH_BUILD=1 \
- STAGING_INCDIR=${STAGING_INCDIR} \
- STAGING_LIBDIR=${STAGING_LIBDIR} \
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} ${S}/setup.py \
- build --build-base=${B} ${DISTUTILS_BUILD_ARGS} || \
- bbfatal_log "'${PYTHON_PN} setup.py build ${DISTUTILS_BUILD_ARGS}' execution failed."
-}
-distutils3_do_compile[vardepsexclude] = "MACHINE"
-
-distutils3_do_install() {
- cd ${S}
- install -d ${D}${PYTHON_SITEPACKAGES_DIR}
- STAGING_INCDIR=${STAGING_INCDIR} \
- STAGING_LIBDIR=${STAGING_LIBDIR} \
- PYTHONPATH=${D}${PYTHON_SITEPACKAGES_DIR} \
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} ${S}/setup.py \
- build --build-base=${B} install --skip-build ${DISTUTILS_INSTALL_ARGS} || \
- bbfatal_log "'${PYTHON_PN} setup.py install ${DISTUTILS_INSTALL_ARGS}' execution failed."
-
- # support filenames with *spaces*
- find ${D} -name "*.py" -exec grep -q ${D} {} \; \
- -exec sed -i -e s:${D}::g {} \;
-
- for i in ${D}${bindir}/* ${D}${sbindir}/*; do
- if [ -f "$i" ]; then
- sed -i -e s:${PYTHON}:${USRBINPATH}/env\ ${DISTUTILS_PYTHON}:g $i
- sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
- fi
- done
-
- rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/easy-install.pth
-
- #
- # FIXME: Bandaid against wrong datadir computation
- #
- if [ -e ${D}${datadir}/share ]; then
- mv -f ${D}${datadir}/share/* ${D}${datadir}/
- rmdir ${D}${datadir}/share
- fi
-}
-distutils3_do_install[vardepsexclude] = "MACHINE"
-
-EXPORT_FUNCTIONS do_configure do_compile do_install
-
-export LDSHARED="${CCLD} -shared"
diff --git a/meta/classes/dos2unix.bbclass b/meta/classes/dos2unix.bbclass
deleted file mode 100644
index 3fc17e2196..0000000000
--- a/meta/classes/dos2unix.bbclass
+++ /dev/null
@@ -1,14 +0,0 @@
-# Class for use to convert all CRLF line terminators to LF
-# provided that some projects are being developed/maintained
-# on Windows so they have different line terminators(CRLF) vs
-# on Linux(LF), which can cause annoying patching errors during
-# git push/checkout processes.
-
-do_convert_crlf_to_lf[depends] += "dos2unix-native:do_populate_sysroot"
-
-# Convert CRLF line terminators to LF
-do_convert_crlf_to_lf () {
- find ${S} -type f -exec dos2unix {} \;
-}
-
-addtask convert_crlf_to_lf after do_unpack before do_patch
diff --git a/meta/classes/externalsrc.bbclass b/meta/classes/externalsrc.bbclass
index d200129987..70e27a8d35 100644
--- a/meta/classes/externalsrc.bbclass
+++ b/meta/classes/externalsrc.bbclass
@@ -2,7 +2,8 @@
# Author: Richard Purdie
# Some code and influence taken from srctree.bbclass:
# Copyright (C) 2009 Chris Larson <clarson@kergoth.com>
-# Released under the MIT license (see COPYING.MIT for the terms)
+#
+# SPDX-License-Identifier: MIT
#
# externalsrc.bbclass enables use of an existing source tree, usually external to
# the build system to build a piece of software rather than the usual fetch/unpack/patch
@@ -13,7 +14,7 @@
# called "myrecipe" you would do:
#
# INHERIT += "externalsrc"
-# EXTERNALSRC_pn-myrecipe = "/path/to/my/source/tree"
+# EXTERNALSRC:pn-myrecipe = "/path/to/my/source/tree"
#
# In order to make this class work for both target and native versions (or with
# multilibs/cross or other BBCLASSEXTEND variants), B is set to point to a separate
@@ -21,7 +22,7 @@
# the default, but the build directory can be set to the source directory if
# circumstances dictate by setting EXTERNALSRC_BUILD to the same value, e.g.:
#
-# EXTERNALSRC_BUILD_pn-myrecipe = "/path/to/my/source/tree"
+# EXTERNALSRC_BUILD:pn-myrecipe = "/path/to/my/source/tree"
#
SRCTREECOVEREDTASKS ?= "do_patch do_unpack do_fetch"
@@ -45,11 +46,11 @@ python () {
if bpn == d.getVar('PN') or not classextend:
if (externalsrc or
('native' in classextend and
- d.getVar('EXTERNALSRC_pn-%s-native' % bpn)) or
+ d.getVar('EXTERNALSRC:pn-%s-native' % bpn)) or
('nativesdk' in classextend and
- d.getVar('EXTERNALSRC_pn-nativesdk-%s' % bpn)) or
+ d.getVar('EXTERNALSRC:pn-nativesdk-%s' % bpn)) or
('cross' in classextend and
- d.getVar('EXTERNALSRC_pn-%s-cross' % bpn))):
+ d.getVar('EXTERNALSRC:pn-%s-cross' % bpn))):
d.setVar('BB_DONT_CACHE', '1')
if externalsrc:
@@ -60,21 +61,21 @@ python () {
if externalsrcbuild:
d.setVar('B', externalsrcbuild)
else:
- d.setVar('B', '${WORKDIR}/${BPN}-${PV}/')
+ d.setVar('B', '${WORKDIR}/${BPN}-${PV}')
+ bb.fetch.get_hashvalue(d)
local_srcuri = []
fetch = bb.fetch2.Fetch((d.getVar('SRC_URI') or '').split(), d)
for url in fetch.urls:
url_data = fetch.ud[url]
parm = url_data.parm
- if (url_data.type == 'file' or
- 'type' in parm and parm['type'] == 'kmeta'):
+ if url_data.type in ['file', 'npmsw', 'crate'] or parm.get('type') in ['kmeta', 'git-dependency']:
local_srcuri.append(url)
d.setVar('SRC_URI', ' '.join(local_srcuri))
- # Dummy value because the default function can't be called with blank SRC_URI
- d.setVar('SRCPV', '999')
+ # sstate is never going to work for external source trees, disable it
+ d.setVar('SSTATE_SKIP_CREATION', '1')
if d.getVar('CONFIGUREOPT_DEPTRACK') == '--disable-dependency-tracking':
d.setVar('CONFIGUREOPT_DEPTRACK', '')
@@ -82,32 +83,42 @@ python () {
tasks = filter(lambda k: d.getVarFlag(k, "task"), d.keys())
for task in tasks:
- if task.endswith("_setscene"):
- # sstate is never going to work for external source trees, disable it
- bb.build.deltask(task, d)
- else:
+ if os.path.realpath(d.getVar('S')) == os.path.realpath(d.getVar('B')):
# Since configure will likely touch ${S}, ensure only we lock so one task has access at a time
d.appendVarFlag(task, "lockfiles", " ${S}/singletask.lock")
- # We do not want our source to be wiped out, ever (kernel.bbclass does this for do_clean)
- cleandirs = oe.recipeutils.split_var_value(d.getVarFlag(task, 'cleandirs', False) or '')
- setvalue = False
- for cleandir in cleandirs[:]:
- if oe.path.is_path_parent(externalsrc, d.expand(cleandir)):
- cleandirs.remove(cleandir)
- setvalue = True
- if setvalue:
- d.setVarFlag(task, 'cleandirs', ' '.join(cleandirs))
+ for v in d.keys():
+ cleandirs = d.getVarFlag(v, "cleandirs", False)
+ if cleandirs:
+ # We do not want our source to be wiped out, ever (kernel.bbclass does this for do_clean)
+ cleandirs = oe.recipeutils.split_var_value(cleandirs)
+ setvalue = False
+ for cleandir in cleandirs[:]:
+ if oe.path.is_path_parent(externalsrc, d.expand(cleandir)):
+ cleandirs.remove(cleandir)
+ setvalue = True
+ if setvalue:
+ d.setVarFlag(v, 'cleandirs', ' '.join(cleandirs))
fetch_tasks = ['do_fetch', 'do_unpack']
# If we deltask do_patch, there's no dependency to ensure do_unpack gets run, so add one
# Note that we cannot use d.appendVarFlag() here because deps is expected to be a list object, not a string
d.setVarFlag('do_configure', 'deps', (d.getVarFlag('do_configure', 'deps', False) or []) + ['do_unpack'])
+ d.setVarFlag('do_populate_lic', 'deps', (d.getVarFlag('do_populate_lic', 'deps', False) or []) + ['do_unpack'])
for task in d.getVar("SRCTREECOVEREDTASKS").split():
if local_srcuri and task in fetch_tasks:
continue
bb.build.deltask(task, d)
+ if task == 'do_unpack':
+ # The reproducible build create_source_date_epoch_stamp function must
+ # be run after the source is available and before the
+ # do_deploy_source_date_epoch task. In the normal case, it's attached
+ # to do_unpack as a postfuncs, but since we removed do_unpack (above)
+ # we need to move the function elsewhere. The easiest thing to do is
+ # move it into the prefuncs of the do_deploy_source_date_epoch task.
+ # This is safe, as externalsrc runs with the source already unpacked.
+ d.prependVarFlag('do_deploy_source_date_epoch', 'prefuncs', 'create_source_date_epoch_stamp ')
d.prependVarFlag('do_compile', 'prefuncs', "externalsrc_compile_prefunc ")
d.prependVarFlag('do_configure', 'prefuncs', "externalsrc_configure_prefunc ")
@@ -115,6 +126,9 @@ python () {
d.setVarFlag('do_compile', 'file-checksums', '${@srctree_hash_files(d)}')
d.setVarFlag('do_configure', 'file-checksums', '${@srctree_configure_hash_files(d)}')
+ d.appendVarFlag('do_compile', 'prefuncs', ' fetcher_hashes_dummyfunc')
+ d.appendVarFlag('do_configure', 'prefuncs', ' fetcher_hashes_dummyfunc')
+
# We don't want the workdir to go away
d.appendVar('RM_WORK_EXCLUDE', ' ' + d.getVar('PN'))
@@ -190,6 +204,7 @@ def srctree_hash_files(d, srcdir=None):
import shutil
import subprocess
import tempfile
+ import hashlib
s_dir = srcdir or d.getVar('EXTERNALSRC')
git_dir = None
@@ -197,6 +212,10 @@ def srctree_hash_files(d, srcdir=None):
try:
git_dir = os.path.join(s_dir,
subprocess.check_output(['git', '-C', s_dir, 'rev-parse', '--git-dir'], stderr=subprocess.DEVNULL).decode("utf-8").rstrip())
+ top_git_dir = os.path.join(d.getVar("TOPDIR"),
+ subprocess.check_output(['git', '-C', d.getVar("TOPDIR"), 'rev-parse', '--git-dir'], stderr=subprocess.DEVNULL).decode("utf-8").rstrip())
+ if git_dir == top_git_dir:
+ git_dir = None
except subprocess.CalledProcessError:
pass
@@ -210,7 +229,18 @@ def srctree_hash_files(d, srcdir=None):
env = os.environ.copy()
env['GIT_INDEX_FILE'] = tmp_index.name
subprocess.check_output(['git', 'add', '-A', '.'], cwd=s_dir, env=env)
- sha1 = subprocess.check_output(['git', 'write-tree'], cwd=s_dir, env=env).decode("utf-8")
+ git_sha1 = subprocess.check_output(['git', 'write-tree'], cwd=s_dir, env=env).decode("utf-8")
+ if os.path.exists(os.path.join(s_dir, ".gitmodules")) and os.path.getsize(os.path.join(s_dir, ".gitmodules")) > 0:
+ submodule_helper = subprocess.check_output(["git", "config", "--file", ".gitmodules", "--get-regexp", "path"], cwd=s_dir, env=env).decode("utf-8")
+ for line in submodule_helper.splitlines():
+ module_dir = os.path.join(s_dir, line.rsplit(maxsplit=1)[1])
+ if os.path.isdir(module_dir):
+ proc = subprocess.Popen(['git', 'add', '-A', '.'], cwd=module_dir, env=env, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
+ proc.communicate()
+ proc = subprocess.Popen(['git', 'write-tree'], cwd=module_dir, env=env, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
+ stdout, _ = proc.communicate()
+ git_sha1 += stdout.decode("utf-8")
+ sha1 = hashlib.sha1(git_sha1.encode("utf-8")).hexdigest()
with open(oe_hash_file, 'w') as fobj:
fobj.write(sha1)
ret = oe_hash_file + ':True'
@@ -223,6 +253,8 @@ def srctree_configure_hash_files(d):
Get the list of files that should trigger do_configure to re-execute,
based on the value of CONFIGURE_FILES
"""
+ import fnmatch
+
in_files = (d.getVar('CONFIGURE_FILES') or '').split()
out_items = []
search_files = []
@@ -234,8 +266,8 @@ def srctree_configure_hash_files(d):
if search_files:
s_dir = d.getVar('EXTERNALSRC')
for root, _, files in os.walk(s_dir):
- for f in files:
- if f in search_files:
+ for p in search_files:
+ for f in fnmatch.filter(files, p):
out_items.append('%s:True' % os.path.join(root, f))
return ' '.join(out_items)
diff --git a/meta/classes/extrausers.bbclass b/meta/classes/extrausers.bbclass
index 32569e97db..c825c06df9 100644
--- a/meta/classes/extrausers.bbclass
+++ b/meta/classes/extrausers.bbclass
@@ -1,3 +1,9 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
# This bbclass is used for image level user/group configuration.
# Inherit this class if you want to make EXTRA_USERS_PARAMS effective.
@@ -14,10 +20,10 @@
inherit useradd_base
-PACKAGE_INSTALL_append = " ${@['', 'base-passwd shadow'][bool(d.getVar('EXTRA_USERS_PARAMS'))]}"
+PACKAGE_INSTALL:append = " ${@['', 'base-passwd shadow'][bool(d.getVar('EXTRA_USERS_PARAMS'))]}"
# Image level user / group settings
-ROOTFS_POSTPROCESS_COMMAND_append = " set_user_group;"
+ROOTFS_POSTPROCESS_COMMAND:append = " set_user_group"
# Image level user / group settings
set_user_group () {
@@ -46,6 +52,9 @@ set_user_group () {
usermod)
perform_usermod "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts"
;;
+ passwd-expire)
+ perform_passwd_expire "${IMAGE_ROOTFS}" "$opts"
+ ;;
groupmod)
perform_groupmod "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts"
;;
diff --git a/meta/classes/features_check.bbclass b/meta/classes/features_check.bbclass
deleted file mode 100644
index b3c8047861..0000000000
--- a/meta/classes/features_check.bbclass
+++ /dev/null
@@ -1,57 +0,0 @@
-# Allow checking of required and conflicting features
-#
-# xxx = [DISTRO,MACHINE,COMBINED]
-#
-# ANY_OF_xxx_FEATURES: ensure at least one item on this list is included
-# in xxx_FEATURES.
-# REQUIRED_xxx_FEATURES: ensure every item on this list is included
-# in xxx_FEATURES.
-# CONFLICT_xxx_FEATURES: ensure no item in this list is included in
-# xxx_FEATURES.
-#
-# Copyright 2019 (C) Texas Instruments Inc.
-# Copyright 2013 (C) O.S. Systems Software LTDA.
-
-python () {
- if d.getVar('PARSE_ALL_RECIPES', False):
- return
-
- unused = True
-
- for kind in ['DISTRO', 'MACHINE', 'COMBINED']:
- if d.getVar('ANY_OF_' + kind + '_FEATURES') is None and \
- d.overridedata.get('ANY_OF_' + kind + '_FEATURES') is None and \
- d.getVar('REQUIRED_' + kind + '_FEATURES') is None and \
- d.overridedata.get('REQUIRED_' + kind + '_FEATURES') is None and \
- d.getVar('CONFLICT_' + kind + '_FEATURES') is None and \
- d.overridedata.get('CONFLICT_' + kind + '_FEATURES') is None:
- continue
-
- unused = False
-
- # Assume at least one var is set.
- features = set((d.getVar(kind + '_FEATURES') or '').split())
-
- any_of_features = set((d.getVar('ANY_OF_' + kind + '_FEATURES') or '').split())
- if any_of_features:
- if set.isdisjoint(any_of_features, features):
- raise bb.parse.SkipRecipe("one of '%s' needs to be in %s_FEATURES"
- % (' '.join(any_of_features), kind))
-
- required_features = set((d.getVar('REQUIRED_' + kind + '_FEATURES') or '').split())
- if required_features:
- missing = set.difference(required_features, features)
- if missing:
- raise bb.parse.SkipRecipe("missing required %s feature%s '%s' (not in %s_FEATURES)"
- % (kind.lower(), 's' if len(missing) > 1 else '', ' '.join(missing), kind))
-
- conflict_features = set((d.getVar('CONFLICT_' + kind + '_FEATURES') or '').split())
- if conflict_features:
- conflicts = set.intersection(conflict_features, features)
- if conflicts:
- raise bb.parse.SkipRecipe("conflicting %s feature%s '%s' (in %s_FEATURES)"
- % (kind.lower(), 's' if len(conflicts) > 1 else '', ' '.join(conflicts), kind))
-
- if unused:
- bb.warn("Recipe inherits features_check but doesn't use it")
-}
diff --git a/meta/classes/fontcache.bbclass b/meta/classes/fontcache.bbclass
deleted file mode 100644
index 624a420a0d..0000000000
--- a/meta/classes/fontcache.bbclass
+++ /dev/null
@@ -1,57 +0,0 @@
-#
-# This class will generate the proper postinst/postrm scriptlets for font
-# packages.
-#
-
-PACKAGE_WRITE_DEPS += "qemu-native"
-inherit qemu
-
-FONT_PACKAGES ??= "${PN}"
-FONT_EXTRA_RDEPENDS ?= "${MLPREFIX}fontconfig-utils"
-FONTCONFIG_CACHE_DIR ?= "${localstatedir}/cache/fontconfig"
-FONTCONFIG_CACHE_PARAMS ?= "-v"
-# You can change this to e.g. FC_DEBUG=16 to debug fc-cache issues,
-# something has to be set, because qemuwrapper is using this variable after -E
-# multiple variables aren't allowed because for qemu they are separated
-# by comma and in -n "$D" case they should be separated by space
-FONTCONFIG_CACHE_ENV ?= "FC_DEBUG=1"
-fontcache_common() {
-if [ -n "$D" ] ; then
- $INTERCEPT_DIR/postinst_intercept update_font_cache ${PKG} mlprefix=${MLPREFIX} binprefix=${MLPREFIX} \
- 'bindir="${bindir}"' \
- 'libdir="${libdir}"' \
- 'libexecdir="${libexecdir}"' \
- 'base_libdir="${base_libdir}"' \
- 'fontconfigcachedir="${FONTCONFIG_CACHE_DIR}"' \
- 'fontconfigcacheparams="${FONTCONFIG_CACHE_PARAMS}"' \
- 'fontconfigcacheenv="${FONTCONFIG_CACHE_ENV}"'
-else
- ${FONTCONFIG_CACHE_ENV} fc-cache ${FONTCONFIG_CACHE_PARAMS}
-fi
-}
-
-python () {
- font_pkgs = d.getVar('FONT_PACKAGES').split()
- deps = d.getVar("FONT_EXTRA_RDEPENDS")
-
- for pkg in font_pkgs:
- if deps: d.appendVar('RDEPENDS_' + pkg, ' '+deps)
-}
-
-python add_fontcache_postinsts() {
- for pkg in d.getVar('FONT_PACKAGES').split():
- bb.note("adding fonts postinst and postrm scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg) or d.getVar('pkg_postinst')
- if not postinst:
- postinst = '#!/bin/sh\n'
- postinst += d.getVar('fontcache_common')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
-
- postrm = d.getVar('pkg_postrm_%s' % pkg) or d.getVar('pkg_postrm')
- if not postrm:
- postrm = '#!/bin/sh\n'
- postrm += d.getVar('fontcache_common')
- d.setVar('pkg_postrm_%s' % pkg, postrm)
-}
-
-PACKAGEFUNCS =+ "add_fontcache_postinsts"
diff --git a/meta/classes/fs-uuid.bbclass b/meta/classes/fs-uuid.bbclass
deleted file mode 100644
index 9b53dfba7a..0000000000
--- a/meta/classes/fs-uuid.bbclass
+++ /dev/null
@@ -1,24 +0,0 @@
-# Extract UUID from ${ROOTFS}, which must have been built
-# by the time that this function gets called. Only works
-# on ext file systems and depends on tune2fs.
-def get_rootfs_uuid(d):
- import subprocess
- rootfs = d.getVar('ROOTFS')
- output = subprocess.check_output(['tune2fs', '-l', rootfs])
- for line in output.split('\n'):
- if line.startswith('Filesystem UUID:'):
- uuid = line.split()[-1]
- bb.note('UUID of %s: %s' % (rootfs, uuid))
- return uuid
- bb.fatal('Could not determine filesystem UUID of %s' % rootfs)
-
-# Replace the special <<uuid-of-rootfs>> inside a string (like the
-# root= APPEND string in a syslinux.cfg or systemd-boot entry) with the
-# actual UUID of the rootfs. Does nothing if the special string
-# is not used.
-def replace_rootfs_uuid(d, string):
- UUID_PLACEHOLDER = '<<uuid-of-rootfs>>'
- if UUID_PLACEHOLDER in string:
- uuid = get_rootfs_uuid(d)
- string = string.replace(UUID_PLACEHOLDER, uuid)
- return string
diff --git a/meta/classes/gconf.bbclass b/meta/classes/gconf.bbclass
deleted file mode 100644
index 3e3c509d5f..0000000000
--- a/meta/classes/gconf.bbclass
+++ /dev/null
@@ -1,71 +0,0 @@
-DEPENDS += "gconf"
-PACKAGE_WRITE_DEPS += "gconf-native"
-
-# These are for when gconftool is used natively and the prefix isn't necessarily
-# the sysroot. TODO: replicate the postinst logic for -native packages going
-# into sysroot as they won't be running their own install-time schema
-# registration (disabled below) nor the postinst script (as they don't happen).
-export GCONF_SCHEMA_INSTALL_SOURCE = "xml:merged:${STAGING_DIR_NATIVE}${sysconfdir}/gconf/gconf.xml.defaults"
-export GCONF_BACKEND_DIR = "${STAGING_LIBDIR_NATIVE}/GConf/2"
-
-# Disable install-time schema registration as we're a packaging system so this
-# happens in the postinst script, not at install time. Set both the configure
-# script option and the traditional envionment variable just to make sure.
-EXTRA_OECONF += "--disable-schemas-install"
-export GCONF_DISABLE_MAKEFILE_SCHEMA_INSTALL = "1"
-
-gconf_postinst() {
-if [ "x$D" != "x" ]; then
- export GCONF_CONFIG_SOURCE="xml::$D${sysconfdir}/gconf/gconf.xml.defaults"
-else
- export GCONF_CONFIG_SOURCE=`gconftool-2 --get-default-source`
-fi
-
-SCHEMA_LOCATION=$D/etc/gconf/schemas
-for SCHEMA in ${SCHEMA_FILES}; do
- if [ -e $SCHEMA_LOCATION/$SCHEMA ]; then
- HOME=$D/root gconftool-2 \
- --makefile-install-rule $SCHEMA_LOCATION/$SCHEMA > /dev/null
- fi
-done
-}
-
-gconf_prerm() {
-SCHEMA_LOCATION=/etc/gconf/schemas
-for SCHEMA in ${SCHEMA_FILES}; do
- if [ -e $SCHEMA_LOCATION/$SCHEMA ]; then
- HOME=/root GCONF_CONFIG_SOURCE=`gconftool-2 --get-default-source` \
- gconftool-2 \
- --makefile-uninstall-rule $SCHEMA_LOCATION/$SCHEMA > /dev/null
- fi
-done
-}
-
-python populate_packages_append () {
- import re
- packages = d.getVar('PACKAGES').split()
- pkgdest = d.getVar('PKGDEST')
-
- for pkg in packages:
- schema_dir = '%s/%s/etc/gconf/schemas' % (pkgdest, pkg)
- schemas = []
- schema_re = re.compile(r".*\.schemas$")
- if os.path.exists(schema_dir):
- for f in os.listdir(schema_dir):
- if schema_re.match(f):
- schemas.append(f)
- if schemas != []:
- bb.note("adding gconf postinst and prerm scripts to %s" % pkg)
- d.setVar('SCHEMA_FILES', " ".join(schemas))
- postinst = d.getVar('pkg_postinst_%s' % pkg)
- if not postinst:
- postinst = '#!/bin/sh\n'
- postinst += d.getVar('gconf_postinst')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
- prerm = d.getVar('pkg_prerm_%s' % pkg)
- if not prerm:
- prerm = '#!/bin/sh\n'
- prerm += d.getVar('gconf_prerm')
- d.setVar('pkg_prerm_%s' % pkg, prerm)
- d.appendVar("RDEPENDS_%s" % pkg, ' ' + d.getVar('MLPREFIX', False) + 'gconf')
-}
diff --git a/meta/classes/gettext.bbclass b/meta/classes/gettext.bbclass
deleted file mode 100644
index be2ef3b311..0000000000
--- a/meta/classes/gettext.bbclass
+++ /dev/null
@@ -1,22 +0,0 @@
-def gettext_dependencies(d):
- if d.getVar('INHIBIT_DEFAULT_DEPS') and not oe.utils.inherits(d, 'cross-canadian'):
- return ""
- if d.getVar('USE_NLS') == 'no':
- return "gettext-minimal-native"
- return "gettext-native"
-
-def gettext_oeconf(d):
- if d.getVar('USE_NLS') == 'no':
- return '--disable-nls'
- # Remove the NLS bits if USE_NLS is no or INHIBIT_DEFAULT_DEPS is set
- if d.getVar('INHIBIT_DEFAULT_DEPS') and not oe.utils.inherits(d, 'cross-canadian'):
- return '--disable-nls'
- return "--enable-nls"
-
-BASEDEPENDS_append = " ${@gettext_dependencies(d)}"
-EXTRA_OECONF_append = " ${@gettext_oeconf(d)}"
-
-# Without this, msgfmt from gettext-native will not find ITS files
-# provided by target recipes (for example, polkit.its).
-GETTEXTDATADIRS_append_class-target = ":${STAGING_DATADIR}/gettext"
-export GETTEXTDATADIRS
diff --git a/meta/classes/gio-module-cache.bbclass b/meta/classes/gio-module-cache.bbclass
deleted file mode 100644
index e429bd3197..0000000000
--- a/meta/classes/gio-module-cache.bbclass
+++ /dev/null
@@ -1,38 +0,0 @@
-PACKAGE_WRITE_DEPS += "qemu-native"
-inherit qemu
-
-GIO_MODULE_PACKAGES ??= "${PN}"
-
-gio_module_cache_common() {
-if [ "x$D" != "x" ]; then
- $INTERCEPT_DIR/postinst_intercept update_gio_module_cache ${PKG} \
- mlprefix=${MLPREFIX} \
- binprefix=${MLPREFIX} \
- libdir=${libdir} \
- libexecdir=${libexecdir} \
- base_libdir=${base_libdir} \
- bindir=${bindir}
-else
- ${libexecdir}/${MLPREFIX}gio-querymodules ${libdir}/gio/modules/
-fi
-}
-
-python populate_packages_append () {
- packages = d.getVar('GIO_MODULE_PACKAGES').split()
-
- for pkg in packages:
- bb.note("adding gio-module-cache postinst and postrm scripts to %s" % pkg)
-
- postinst = d.getVar('pkg_postinst_%s' % pkg)
- if not postinst:
- postinst = '#!/bin/sh\n'
- postinst += d.getVar('gio_module_cache_common')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
-
- postrm = d.getVar('pkg_postrm_%s' % pkg)
- if not postrm:
- postrm = '#!/bin/sh\n'
- postrm += d.getVar('gio_module_cache_common')
- d.setVar('pkg_postrm_%s' % pkg, postrm)
-}
-
diff --git a/meta/classes/glide.bbclass b/meta/classes/glide.bbclass
deleted file mode 100644
index db421745bd..0000000000
--- a/meta/classes/glide.bbclass
+++ /dev/null
@@ -1,9 +0,0 @@
-# Handle Glide Vendor Package Management use
-#
-# Copyright 2018 (C) O.S. Systems Software LTDA.
-
-DEPENDS_append = " glide-native"
-
-do_compile_prepend() {
- ( cd ${B}/src/${GO_IMPORT} && glide install )
-}
diff --git a/meta/classes/gnomebase.bbclass b/meta/classes/gnomebase.bbclass
deleted file mode 100644
index efcb6caae1..0000000000
--- a/meta/classes/gnomebase.bbclass
+++ /dev/null
@@ -1,30 +0,0 @@
-def gnome_verdir(v):
- return oe.utils.trim_version(v, 2)
-
-GNOME_COMPRESS_TYPE ?= "xz"
-SECTION ?= "x11/gnome"
-GNOMEBN ?= "${BPN}"
-SRC_URI = "${GNOME_MIRROR}/${GNOMEBN}/${@gnome_verdir("${PV}")}/${GNOMEBN}-${PV}.tar.${GNOME_COMPRESS_TYPE};name=archive"
-
-FILES_${PN} += "${datadir}/application-registry \
- ${datadir}/mime-info \
- ${datadir}/mime/packages \
- ${datadir}/mime/application \
- ${datadir}/gnome-2.0 \
- ${datadir}/polkit* \
- ${datadir}/GConf \
- ${datadir}/glib-2.0/schemas \
- ${datadir}/appdata \
- ${datadir}/icons \
-"
-
-FILES_${PN}-doc += "${datadir}/devhelp"
-
-GNOMEBASEBUILDCLASS ??= "autotools"
-inherit ${GNOMEBASEBUILDCLASS} pkgconfig
-
-do_install_append() {
- rm -rf ${D}${localstatedir}/lib/scrollkeeper/*
- rm -rf ${D}${localstatedir}/scrollkeeper/*
- rm -f ${D}${datadir}/applications/*.cache
-}
diff --git a/meta/classes/go-mod.bbclass b/meta/classes/go-mod.bbclass
deleted file mode 100644
index 5871d02506..0000000000
--- a/meta/classes/go-mod.bbclass
+++ /dev/null
@@ -1,20 +0,0 @@
-# Handle Go Modules support
-#
-# When using Go Modules, the the current working directory MUST be at or below
-# the location of the 'go.mod' file when the go tool is used, and there is no
-# way to tell it to look elsewhere. It will automatically look upwards for the
-# file, but not downwards.
-#
-# To support this use case, we provide the `GO_WORKDIR` variable, which defaults
-# to `GO_IMPORT` but allows for easy override.
-#
-# Copyright 2020 (C) O.S. Systems Software LTDA.
-
-# The '-modcacherw' option ensures we have write access to the cached objects so
-# we avoid errors during clean task as well as when removing the TMPDIR.
-export GOBUILDFLAGS ?= "-v ${GO_LDFLAGS} -modcacherw"
-
-inherit go
-
-GO_WORKDIR ?= "${GO_IMPORT}"
-do_compile[dirs] += "${B}/src/${GO_WORKDIR}"
diff --git a/meta/classes/go-ptest.bbclass b/meta/classes/go-ptest.bbclass
deleted file mode 100644
index e230a80587..0000000000
--- a/meta/classes/go-ptest.bbclass
+++ /dev/null
@@ -1,54 +0,0 @@
-inherit go ptest
-
-do_compile_ptest_base() {
- export TMPDIR="${GOTMPDIR}"
- rm -f ${B}/.go_compiled_tests.list
- go_list_package_tests | while read pkg; do
- cd ${B}/src/$pkg
- ${GO} test ${GOPTESTBUILDFLAGS} $pkg
- find . -mindepth 1 -maxdepth 1 -type f -name '*.test' -exec echo $pkg/{} \; | \
- sed -e's,/\./,/,'>> ${B}/.go_compiled_tests.list
- done
- do_compile_ptest
-}
-
-do_compile_ptest_base[dirs] =+ "${GOTMPDIR}"
-
-go_make_ptest_wrapper() {
- cat >${D}${PTEST_PATH}/run-ptest <<EOF
-#!/bin/sh
-RC=0
-run_test() (
- cd "\$1"
- ((((./\$2 ${GOPTESTFLAGS}; echo \$? >&3) | sed -r -e"s,^(PASS|SKIP|FAIL)\$,\\1: \$1/\$2," >&4) 3>&1) | (read rc; exit \$rc)) 4>&1
- exit \$?)
-EOF
-
-}
-
-do_install_ptest_base() {
- test -f "${B}/.go_compiled_tests.list" || exit 0
- install -d ${D}${PTEST_PATH}
- go_stage_testdata
- go_make_ptest_wrapper
- havetests=""
- while read test; do
- testdir=`dirname $test`
- testprog=`basename $test`
- install -d ${D}${PTEST_PATH}/$testdir
- install -m 0755 ${B}/src/$test ${D}${PTEST_PATH}/$test
- echo "run_test $testdir $testprog || RC=1" >> ${D}${PTEST_PATH}/run-ptest
- havetests="yes"
- done < ${B}/.go_compiled_tests.list
- if [ -n "$havetests" ]; then
- echo "exit \$RC" >> ${D}${PTEST_PATH}/run-ptest
- chmod +x ${D}${PTEST_PATH}/run-ptest
- else
- rm -rf ${D}${PTEST_PATH}
- fi
- do_install_ptest
- chown -R root:root ${D}${PTEST_PATH}
-}
-
-INSANE_SKIP_${PN}-ptest += "ldflags"
-
diff --git a/meta/classes/go-vendor.bbclass b/meta/classes/go-vendor.bbclass
new file mode 100644
index 0000000000..1bbb99ac79
--- /dev/null
+++ b/meta/classes/go-vendor.bbclass
@@ -0,0 +1,211 @@
+#
+# Copyright 2023 (C) Weidmueller GmbH & Co KG
+# Author: Lukas Funke <lukas.funke@weidmueller.com>
+#
+# Handle Go vendor support for offline builds
+#
+# When importing Go modules, Go downloads the imported modules using
+# a network (proxy) connection ahead of the compile stage. This contradicts
+# the yocto build concept of fetching every source ahead of build-time
+# and supporting offline builds.
+#
+# To support offline builds, we use Go 'vendoring': module dependencies are
+# downloaded during the fetch-phase and unpacked into the modules 'vendor'
+# folder. Additionally a manifest file is generated for the 'vendor' folder
+#
+
+inherit go-mod
+
+def go_src_uri(repo, version, path=None, subdir=None, \
+ vcs='git', replaces=None, pathmajor=None):
+
+ destsuffix = "git/src/import/vendor.fetch"
+ module_path = repo if not path else path
+
+ src_uri = "{}://{};name={}".format(vcs, repo, module_path.replace('/', '.'))
+ src_uri += ";destsuffix={}/{}@{}".format(destsuffix, repo, version)
+
+ if vcs == "git":
+ src_uri += ";nobranch=1;protocol=https"
+
+ src_uri += ";go_module_path={}".format(module_path)
+
+ if replaces:
+ src_uri += ";go_module_replacement={}".format(replaces)
+ if subdir:
+ src_uri += ";go_subdir={}".format(subdir)
+ if pathmajor:
+ src_uri += ";go_pathmajor={}".format(pathmajor)
+ src_uri += ";is_go_dependency=1"
+
+ return src_uri
+
+python do_vendor_unlink() {
+ go_import = d.getVar('GO_IMPORT')
+ source_dir = d.getVar('S')
+ linkname = os.path.join(source_dir, *['src', go_import, 'vendor'])
+
+ os.unlink(linkname)
+}
+
+addtask vendor_unlink before do_package after do_install
+
+python do_go_vendor() {
+ import shutil
+
+ src_uri = (d.getVar('SRC_URI') or "").split()
+
+ if not src_uri:
+ bb.fatal("SRC_URI is empty")
+
+ default_destsuffix = "git/src/import/vendor.fetch"
+ fetcher = bb.fetch2.Fetch(src_uri, d)
+ go_import = d.getVar('GO_IMPORT')
+ source_dir = d.getVar('S')
+
+ linkname = os.path.join(source_dir, *['src', go_import, 'vendor'])
+ vendor_dir = os.path.join(source_dir, *['src', 'import', 'vendor'])
+ import_dir = os.path.join(source_dir, *['src', 'import', 'vendor.fetch'])
+
+ if os.path.exists(vendor_dir):
+ # Nothing to do except re-establish link to actual vendor folder
+ if not os.path.exists(linkname):
+ os.symlink(vendor_dir, linkname)
+ return
+
+ bb.utils.mkdirhier(vendor_dir)
+
+ modules = {}
+
+ for url in fetcher.urls:
+ srcuri = fetcher.ud[url].host + fetcher.ud[url].path
+
+ # Skip non Go module src uris
+ if not fetcher.ud[url].parm.get('is_go_dependency'):
+ continue
+
+ destsuffix = fetcher.ud[url].parm.get('destsuffix')
+ # We derive the module repo / version in the following manner (exmaple):
+ #
+ # destsuffix = git/src/import/vendor.fetch/github.com/foo/bar@v1.2.3
+ # p = github.com/foo/bar@v1.2.3
+ # repo = github.com/foo/bar
+ # version = v1.2.3
+
+ p = destsuffix[len(default_destsuffix)+1:]
+ repo, version = p.split('@')
+
+ module_path = fetcher.ud[url].parm.get('go_module_path')
+
+ subdir = fetcher.ud[url].parm.get('go_subdir')
+ subdir = None if not subdir else subdir
+
+ pathMajor = fetcher.ud[url].parm.get('go_pathmajor')
+ pathMajor = None if not pathMajor else pathMajor.strip('/')
+
+ if not (repo, version) in modules:
+ modules[(repo, version)] = {
+ "repo_path": os.path.join(import_dir, p),
+ "module_path": module_path,
+ "subdir": subdir,
+ "pathMajor": pathMajor }
+
+ for module_key, module in modules.items():
+
+ # only take the version which is explicitly listed
+ # as a dependency in the go.mod
+ module_path = module['module_path']
+ rootdir = module['repo_path']
+ subdir = module['subdir']
+ pathMajor = module['pathMajor']
+
+ src = rootdir
+
+ if subdir:
+ src = os.path.join(rootdir, subdir)
+
+ # If the module is released at major version 2 or higher, the module
+ # path must end with a major version suffix like /v2.
+ # This may or may not be part of the subdirectory name
+ #
+ # https://go.dev/ref/mod#modules-overview
+ if pathMajor:
+ tmp = os.path.join(src, pathMajor)
+ # source directory including major version path may or may not exist
+ if os.path.exists(tmp):
+ src = tmp
+
+ dst = os.path.join(vendor_dir, module_path)
+
+ bb.debug(1, "cp %s --> %s" % (src, dst))
+ shutil.copytree(src, dst, symlinks=True, dirs_exist_ok=True, \
+ ignore=shutil.ignore_patterns(".git", \
+ "vendor", \
+ "*._test.go"))
+
+ # If the root directory has a LICENSE file but not the subdir
+ # we copy the root license to the sub module since the license
+ # applies to all modules in the repository
+ # see https://go.dev/ref/mod#vcs-license
+ if subdir:
+ rootdirLicese = os.path.join(rootdir, "LICENSE")
+ subdirLicense = os.path.join(src, "LICENSE")
+
+ if not os.path.exists(subdir) and \
+ os.path.exists(rootdirLicese):
+ shutil.copy2(rootdirLicese, subdirLicense)
+
+ # Copy vendor manifest
+ modules_txt_src = os.path.join(d.getVar('WORKDIR'), "modules.txt")
+ bb.debug(1, "cp %s --> %s" % (modules_txt_src, vendor_dir))
+ shutil.copy2(modules_txt_src, vendor_dir)
+
+ # Clean up vendor dir
+ # We only require the modules in the modules_txt file
+ fetched_paths = set([os.path.relpath(x[0], vendor_dir) for x in os.walk(vendor_dir)])
+
+ # Remove toplevel dir
+ fetched_paths.remove('.')
+
+ vendored_paths = set()
+ replaced_paths = dict()
+ with open(modules_txt_src) as f:
+ for line in f:
+ if not line.startswith("#"):
+ line = line.strip()
+ vendored_paths.add(line)
+
+ # Add toplevel dirs into vendored dir, as we want to keep them
+ topdir = os.path.dirname(line)
+ while len(topdir):
+ if not topdir in vendored_paths:
+ vendored_paths.add(topdir)
+
+ topdir = os.path.dirname(topdir)
+ else:
+ replaced_module = line.split("=>")
+ if len(replaced_module) > 1:
+ # This module has been replaced, use a local path
+ # we parse the line that has a pattern "# module-name [module-version] => local-path
+ actual_path = replaced_module[1].strip()
+ vendored_name = replaced_module[0].split()[1]
+ bb.debug(1, "added vendored name %s for actual path %s" % (vendored_name, actual_path))
+ replaced_paths[vendored_name] = actual_path
+
+ for path in fetched_paths:
+ if path not in vendored_paths:
+ realpath = os.path.join(vendor_dir, path)
+ if os.path.exists(realpath):
+ shutil.rmtree(realpath)
+
+ for vendored_name, replaced_path in replaced_paths.items():
+ symlink_target = os.path.join(source_dir, *['src', go_import, replaced_path])
+ symlink_name = os.path.join(vendor_dir, vendored_name)
+ bb.debug(1, "vendored name %s, symlink name %s" % (vendored_name, symlink_name))
+ os.symlink(symlink_target, symlink_name)
+
+ # Create a symlink to the actual directory
+ os.symlink(vendor_dir, linkname)
+}
+
+addtask go_vendor before do_patch after do_unpack
diff --git a/meta/classes/go.bbclass b/meta/classes/go.bbclass
deleted file mode 100644
index a9e31b50ea..0000000000
--- a/meta/classes/go.bbclass
+++ /dev/null
@@ -1,155 +0,0 @@
-inherit goarch
-
-GO_PARALLEL_BUILD ?= "${@oe.utils.parallel_make_argument(d, '-p %d')}"
-
-GOROOT_class-native = "${STAGING_LIBDIR_NATIVE}/go"
-GOROOT_class-nativesdk = "${STAGING_DIR_TARGET}${libdir}/go"
-GOROOT = "${STAGING_LIBDIR}/go"
-export GOROOT
-export GOROOT_FINAL = "${libdir}/go"
-export GOCACHE = "${B}/.cache"
-
-export GOARCH = "${TARGET_GOARCH}"
-export GOOS = "${TARGET_GOOS}"
-export GOHOSTARCH="${BUILD_GOARCH}"
-export GOHOSTOS="${BUILD_GOOS}"
-
-GOARM[export] = "0"
-GOARM_arm_class-target = "${TARGET_GOARM}"
-GOARM_arm_class-target[export] = "1"
-
-GO386[export] = "0"
-GO386_x86_class-target = "${TARGET_GO386}"
-GO386_x86_class-target[export] = "1"
-
-GOMIPS[export] = "0"
-GOMIPS_mips_class-target = "${TARGET_GOMIPS}"
-GOMIPS_mips_class-target[export] = "1"
-
-DEPENDS_GOLANG_class-target = "virtual/${TUNE_PKGARCH}-go virtual/${TARGET_PREFIX}go-runtime"
-DEPENDS_GOLANG_class-native = "go-native"
-DEPENDS_GOLANG_class-nativesdk = "virtual/${TARGET_PREFIX}go-crosssdk virtual/${TARGET_PREFIX}go-runtime"
-
-DEPENDS_append = " ${DEPENDS_GOLANG}"
-
-GO_LINKSHARED ?= "${@'-linkshared' if d.getVar('GO_DYNLINK') else ''}"
-GO_RPATH_LINK = "${@'-Wl,-rpath-link=${STAGING_DIR_TARGET}${libdir}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
-GO_RPATH = "${@'-r ${libdir}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
-GO_RPATH_class-native = "${@'-r ${STAGING_LIBDIR_NATIVE}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
-GO_RPATH_LINK_class-native = "${@'-Wl,-rpath-link=${STAGING_LIBDIR_NATIVE}/go/pkg/${TARGET_GOTUPLE}_dynlink' if d.getVar('GO_DYNLINK') else ''}"
-GO_EXTLDFLAGS ?= "${HOST_CC_ARCH}${TOOLCHAIN_OPTIONS} ${GO_RPATH_LINK} ${LDFLAGS}"
-GO_LINKMODE ?= ""
-GO_LINKMODE_class-nativesdk = "--linkmode=external"
-GO_LDFLAGS ?= '-ldflags="${GO_RPATH} ${GO_LINKMODE} -extldflags '${GO_EXTLDFLAGS}'"'
-export GOBUILDFLAGS ?= "-v ${GO_LDFLAGS} -trimpath"
-export GOPATH_OMIT_IN_ACTIONID ?= "1"
-export GOPTESTBUILDFLAGS ?= "${GOBUILDFLAGS} -c"
-export GOPTESTFLAGS ?= ""
-GOBUILDFLAGS_prepend_task-compile = "${GO_PARALLEL_BUILD} "
-
-export GO = "${HOST_PREFIX}go"
-GOTOOLDIR = "${STAGING_LIBDIR_NATIVE}/${TARGET_SYS}/go/pkg/tool/${BUILD_GOTUPLE}"
-GOTOOLDIR_class-native = "${STAGING_LIBDIR_NATIVE}/go/pkg/tool/${BUILD_GOTUPLE}"
-export GOTOOLDIR
-
-export CGO_ENABLED ?= "1"
-export CGO_ENABLED_riscv64 = "0"
-export CGO_CFLAGS ?= "${CFLAGS}"
-export CGO_CPPFLAGS ?= "${CPPFLAGS}"
-export CGO_CXXFLAGS ?= "${CXXFLAGS}"
-export CGO_LDFLAGS ?= "${LDFLAGS}"
-
-GO_INSTALL ?= "${GO_IMPORT}/..."
-GO_INSTALL_FILTEROUT ?= "${GO_IMPORT}/vendor/"
-
-B = "${WORKDIR}/build"
-export GOPATH = "${B}"
-export GOTMPDIR ?= "${WORKDIR}/go-tmp"
-GOTMPDIR[vardepvalue] = ""
-
-python go_do_unpack() {
- src_uri = (d.getVar('SRC_URI') or "").split()
- if len(src_uri) == 0:
- return
-
- fetcher = bb.fetch2.Fetch(src_uri, d)
- for url in fetcher.urls:
- if fetcher.ud[url].type == 'git':
- if fetcher.ud[url].parm.get('destsuffix') is None:
- s_dirname = os.path.basename(d.getVar('S'))
- fetcher.ud[url].parm['destsuffix'] = os.path.join(s_dirname, 'src', d.getVar('GO_IMPORT')) + '/'
- fetcher.unpack(d.getVar('WORKDIR'))
-}
-
-go_list_packages() {
- ${GO} list -f '{{.ImportPath}}' ${GOBUILDFLAGS} ${GO_INSTALL} | \
- egrep -v '${GO_INSTALL_FILTEROUT}'
-}
-
-go_list_package_tests() {
- ${GO} list -f '{{.ImportPath}} {{.TestGoFiles}}' ${GOBUILDFLAGS} ${GO_INSTALL} | \
- grep -v '\[\]$' | \
- egrep -v '${GO_INSTALL_FILTEROUT}' | \
- awk '{ print $1 }'
-}
-
-go_do_configure() {
- ln -snf ${S}/src ${B}/
-}
-do_configure[dirs] =+ "${GOTMPDIR}"
-
-go_do_compile() {
- export TMPDIR="${GOTMPDIR}"
- if [ -n "${GO_INSTALL}" ]; then
- if [ -n "${GO_LINKSHARED}" ]; then
- ${GO} install ${GOBUILDFLAGS} `go_list_packages`
- rm -rf ${B}/bin
- fi
- ${GO} install ${GO_LINKSHARED} ${GOBUILDFLAGS} `go_list_packages`
- fi
-}
-do_compile[dirs] =+ "${GOTMPDIR}"
-do_compile[cleandirs] = "${B}/bin ${B}/pkg"
-
-go_do_install() {
- install -d ${D}${libdir}/go/src/${GO_IMPORT}
- tar -C ${S}/src/${GO_IMPORT} -cf - --exclude-vcs --exclude '*.test' --exclude 'testdata' . | \
- tar -C ${D}${libdir}/go/src/${GO_IMPORT} --no-same-owner -xf -
- tar -C ${B} -cf - --exclude-vcs pkg | tar -C ${D}${libdir}/go --no-same-owner -xf -
-
- if [ -n "`ls ${B}/${GO_BUILD_BINDIR}/`" ]; then
- install -d ${D}${bindir}
- install -m 0755 ${B}/${GO_BUILD_BINDIR}/* ${D}${bindir}/
- fi
-}
-
-go_stage_testdata() {
- oldwd="$PWD"
- cd ${S}/src
- find ${GO_IMPORT} -depth -type d -name testdata | while read d; do
- if echo "$d" | grep -q '/vendor/'; then
- continue
- fi
- parent=`dirname $d`
- install -d ${D}${PTEST_PATH}/$parent
- cp --preserve=mode,timestamps -R $d ${D}${PTEST_PATH}/$parent/
- done
- cd "$oldwd"
-}
-
-EXPORT_FUNCTIONS do_unpack do_configure do_compile do_install
-
-FILES_${PN}-dev = "${libdir}/go/src"
-FILES_${PN}-staticdev = "${libdir}/go/pkg"
-
-INSANE_SKIP_${PN} += "ldflags"
-
-# Add -buildmode=pie to GOBUILDFLAGS to satisfy "textrel" QA checking, but mips
-# doesn't support -buildmode=pie, so skip the QA checking for mips and its
-# variants.
-python() {
- if 'mips' in d.getVar('TARGET_ARCH') or 'riscv' in d.getVar('TARGET_ARCH'):
- d.appendVar('INSANE_SKIP_%s' % d.getVar('PN'), " textrel")
- else:
- d.appendVar('GOBUILDFLAGS', ' -buildmode=pie')
-}
diff --git a/meta/classes/goarch.bbclass b/meta/classes/goarch.bbclass
deleted file mode 100644
index 1099b95769..0000000000
--- a/meta/classes/goarch.bbclass
+++ /dev/null
@@ -1,119 +0,0 @@
-BUILD_GOOS = "${@go_map_os(d.getVar('BUILD_OS'), d)}"
-BUILD_GOARCH = "${@go_map_arch(d.getVar('BUILD_ARCH'), d)}"
-BUILD_GOTUPLE = "${BUILD_GOOS}_${BUILD_GOARCH}"
-HOST_GOOS = "${@go_map_os(d.getVar('HOST_OS'), d)}"
-HOST_GOARCH = "${@go_map_arch(d.getVar('HOST_ARCH'), d)}"
-HOST_GOARM = "${@go_map_arm(d.getVar('HOST_ARCH'), d)}"
-HOST_GO386 = "${@go_map_386(d.getVar('HOST_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
-HOST_GOMIPS = "${@go_map_mips(d.getVar('HOST_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
-HOST_GOARM_class-native = "7"
-HOST_GO386_class-native = "sse2"
-HOST_GOMIPS_class-native = "hardfloat"
-HOST_GOTUPLE = "${HOST_GOOS}_${HOST_GOARCH}"
-TARGET_GOOS = "${@go_map_os(d.getVar('TARGET_OS'), d)}"
-TARGET_GOARCH = "${@go_map_arch(d.getVar('TARGET_ARCH'), d)}"
-TARGET_GOARM = "${@go_map_arm(d.getVar('TARGET_ARCH'), d)}"
-TARGET_GO386 = "${@go_map_386(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
-TARGET_GOMIPS = "${@go_map_mips(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
-TARGET_GOARM_class-native = "7"
-TARGET_GO386_class-native = "sse2"
-TARGET_GOMIPS_class-native = "hardfloat"
-TARGET_GOTUPLE = "${TARGET_GOOS}_${TARGET_GOARCH}"
-GO_BUILD_BINDIR = "${@['bin/${HOST_GOTUPLE}','bin'][d.getVar('BUILD_GOTUPLE') == d.getVar('HOST_GOTUPLE')]}"
-
-# Use the MACHINEOVERRIDES to map ARM CPU architecture passed to GO via GOARM.
-# This is combined with *_ARCH to set HOST_GOARM and TARGET_GOARM.
-BASE_GOARM = ''
-BASE_GOARM_armv7ve = '7'
-BASE_GOARM_armv7a = '7'
-BASE_GOARM_armv6 = '6'
-BASE_GOARM_armv5 = '5'
-
-# Go supports dynamic linking on a limited set of architectures.
-# See the supportsDynlink function in go/src/cmd/compile/internal/gc/main.go
-GO_DYNLINK = ""
-GO_DYNLINK_arm = "1"
-GO_DYNLINK_aarch64 = "1"
-GO_DYNLINK_x86 = "1"
-GO_DYNLINK_x86-64 = "1"
-GO_DYNLINK_powerpc64 = "1"
-GO_DYNLINK_powerpc64le = "1"
-GO_DYNLINK_class-native = ""
-GO_DYNLINK_class-nativesdk = ""
-
-# define here because everybody inherits this class
-#
-COMPATIBLE_HOST_linux-gnux32 = "null"
-COMPATIBLE_HOST_linux-muslx32 = "null"
-COMPATIBLE_HOST_powerpc = "null"
-COMPATIBLE_HOST_powerpc64 = "null"
-COMPATIBLE_HOST_powerpc64le = "null"
-COMPATIBLE_HOST_mipsarchn32 = "null"
-
-ARM_INSTRUCTION_SET_armv4 = "arm"
-ARM_INSTRUCTION_SET_armv5 = "arm"
-ARM_INSTRUCTION_SET_armv6 = "arm"
-
-TUNE_CCARGS_remove = "-march=mips32r2"
-SECURITY_NOPIE_CFLAGS ??= ""
-
-# go can't be built with ccache:
-# gcc: fatal error: no input files
-CCACHE_DISABLE ?= "1"
-
-def go_map_arch(a, d):
- import re
- if re.match('i.86', a):
- return '386'
- elif a == 'x86_64':
- return 'amd64'
- elif re.match('arm.*', a):
- return 'arm'
- elif re.match('aarch64.*', a):
- return 'arm64'
- elif re.match('mips64el.*', a):
- return 'mips64le'
- elif re.match('mips64.*', a):
- return 'mips64'
- elif a == 'mips':
- return 'mips'
- elif a == 'mipsel':
- return 'mipsle'
- elif re.match('p(pc|owerpc)(64)', a):
- return 'ppc64'
- elif re.match('p(pc|owerpc)(64el)', a):
- return 'ppc64le'
- elif a == 'riscv64':
- return 'riscv64'
- else:
- raise bb.parse.SkipRecipe("Unsupported CPU architecture: %s" % a)
-
-def go_map_arm(a, d):
- if a.startswith("arm"):
- return d.getVar('BASE_GOARM')
- return ''
-
-def go_map_386(a, f, d):
- import re
- if re.match('i.86', a):
- if ('core2' in f) or ('corei7' in f):
- return 'sse2'
- else:
- return '387'
- return ''
-
-def go_map_mips(a, f, d):
- import re
- if a == 'mips' or a == 'mipsel':
- if 'fpu-hard' in f:
- return 'hardfloat'
- else:
- return 'softfloat'
- return ''
-
-def go_map_os(o, d):
- if o.startswith('linux'):
- return 'linux'
- return o
-
-
diff --git a/meta/classes/gobject-introspection-data.bbclass b/meta/classes/gobject-introspection-data.bbclass
deleted file mode 100644
index 2ef684626a..0000000000
--- a/meta/classes/gobject-introspection-data.bbclass
+++ /dev/null
@@ -1,7 +0,0 @@
-# This variable is set to True if gobject-introspection-data is in
-# DISTRO_FEATURES and qemu-usermode is in MACHINE_FEATURES, and False otherwise.
-#
-# It should be used in recipes to determine whether introspection data should be built,
-# so that qemu use can be avoided when necessary.
-GI_DATA_ENABLED ?= "${@bb.utils.contains('DISTRO_FEATURES', 'gobject-introspection-data', \
- bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d), 'False', d)}"
diff --git a/meta/classes/gobject-introspection.bbclass b/meta/classes/gobject-introspection.bbclass
deleted file mode 100644
index 504f75e28d..0000000000
--- a/meta/classes/gobject-introspection.bbclass
+++ /dev/null
@@ -1,53 +0,0 @@
-# Inherit this class in recipes to enable building their introspection files
-
-# python3native is inherited to prevent introspection tools being run with
-# host's python 3 (they need to be run with native python 3)
-#
-# This also sets up autoconf-based recipes to build introspection data (or not),
-# depending on distro and machine features (see gobject-introspection-data class).
-inherit python3native gobject-introspection-data
-
-# meson: default option name to enable/disable introspection. This matches most
-# project's configuration. In doubts - check meson_options.txt in project's
-# source path.
-GIR_MESON_OPTION ?= 'introspection'
-GIR_MESON_ENABLE_FLAG ?= 'true'
-GIR_MESON_DISABLE_FLAG ?= 'false'
-
-# Auto enable/disable based on GI_DATA_ENABLED
-EXTRA_OECONF_prepend_class-target = "${@bb.utils.contains('GI_DATA_ENABLED', 'True', '--enable-introspection', '--disable-introspection', d)} "
-EXTRA_OEMESON_prepend_class-target = "-D${GIR_MESON_OPTION}=${@bb.utils.contains('GI_DATA_ENABLED', 'True', '${GIR_MESON_ENABLE_FLAG}', '${GIR_MESON_DISABLE_FLAG}', d)} "
-
-# When building native recipes, disable introspection, as it is not necessary,
-# pulls in additional dependencies, and makes build times longer
-EXTRA_OECONF_prepend_class-native = "--disable-introspection "
-EXTRA_OECONF_prepend_class-nativesdk = "--disable-introspection "
-EXTRA_OEMESON_prepend_class-native = "-D${GIR_MESON_OPTION}=${GIR_MESON_DISABLE_FLAG} "
-EXTRA_OEMESON_prepend_class-nativesdk = "-D${GIR_MESON_OPTION}=${GIR_MESON_DISABLE_FLAG} "
-
-# Generating introspection data depends on a combination of native and target
-# introspection tools, and qemu to run the target tools.
-DEPENDS_append_class-target = " gobject-introspection gobject-introspection-native qemu-native prelink-native"
-
-# Even though introspection is disabled on -native, gobject-introspection package is still
-# needed for m4 macros.
-DEPENDS_append_class-native = " gobject-introspection-native"
-DEPENDS_append_class-nativesdk = " gobject-introspection-native"
-
-# This is used by introspection tools to find .gir includes
-export XDG_DATA_DIRS = "${STAGING_DATADIR}:${STAGING_LIBDIR}"
-
-do_configure_prepend_class-target () {
- # introspection.m4 pre-packaged with upstream tarballs does not yet
- # have our fixes
- mkdir -p ${S}/m4
- cp ${STAGING_DIR_TARGET}/${datadir}/aclocal/introspection.m4 ${S}/m4
-}
-
-# .typelib files are needed at runtime and so they go to the main package (so
-# they'll be together with libraries they support).
-FILES_${PN}_append = " ${libdir}/girepository-*/*.typelib"
-
-# .gir files go to dev package, as they're needed for developing (but not for
-# running) things that depends on introspection.
-FILES_${PN}-dev_append = " ${datadir}/gir-*/*.gir ${libdir}/gir-*/*.gir"
diff --git a/meta/classes/godep.bbclass b/meta/classes/godep.bbclass
deleted file mode 100644
index c82401c313..0000000000
--- a/meta/classes/godep.bbclass
+++ /dev/null
@@ -1,8 +0,0 @@
-DEPENDS_append = " go-dep-native"
-
-do_compile_prepend() {
- rm -f ${WORKDIR}/build/src/${GO_IMPORT}/Gopkg.toml
- rm -f ${WORKDIR}/build/src/${GO_IMPORT}/Gopkg.lock
- ( cd ${WORKDIR}/build/src/${GO_IMPORT} && dep init && dep ensure )
-}
-
diff --git a/meta/classes/grub-efi-cfg.bbclass b/meta/classes/grub-efi-cfg.bbclass
deleted file mode 100644
index 3a2cdd698b..0000000000
--- a/meta/classes/grub-efi-cfg.bbclass
+++ /dev/null
@@ -1,122 +0,0 @@
-# grub-efi.bbclass
-# Copyright (c) 2011, Intel Corporation.
-# All rights reserved.
-#
-# Released under the MIT license (see packages/COPYING)
-
-# Provide grub-efi specific functions for building bootable images.
-
-# External variables
-# ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional)
-# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
-# ${GRUB_GFXSERIAL} - set this to 1 to have graphics and serial in the boot menu
-# ${LABELS} - a list of targets for the automatic config
-# ${APPEND} - an override list of append strings for each label
-# ${GRUB_OPTS} - additional options to add to the config, ';' delimited # (optional)
-# ${GRUB_TIMEOUT} - timeout before executing the deault label (optional)
-# ${GRUB_ROOT} - grub's root device.
-
-GRUB_SERIAL ?= "console=ttyS0,115200"
-GRUB_CFG_VM = "${S}/grub_vm.cfg"
-GRUB_CFG_LIVE = "${S}/grub_live.cfg"
-GRUB_TIMEOUT ?= "10"
-#FIXME: build this from the machine config
-GRUB_OPTS ?= "serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1"
-
-GRUB_ROOT ?= "${ROOT}"
-APPEND ?= ""
-
-# Uses MACHINE specific KERNEL_IMAGETYPE
-PACKAGE_ARCH = "${MACHINE_ARCH}"
-
-# Need UUID utility code.
-inherit fs-uuid
-
-python build_efi_cfg() {
- import sys
-
- workdir = d.getVar('WORKDIR')
- if not workdir:
- bb.error("WORKDIR not defined, unable to package")
- return
-
- gfxserial = d.getVar('GRUB_GFXSERIAL') or ""
-
- labels = d.getVar('LABELS')
- if not labels:
- bb.debug(1, "LABELS not defined, nothing to do")
- return
-
- if labels == []:
- bb.debug(1, "No labels, nothing to do")
- return
-
- cfile = d.getVar('GRUB_CFG')
- if not cfile:
- bb.fatal('Unable to read GRUB_CFG')
-
- try:
- cfgfile = open(cfile, 'w')
- except OSError:
- bb.fatal('Unable to open %s' % cfile)
-
- cfgfile.write('# Automatically created by OE\n')
-
- opts = d.getVar('GRUB_OPTS')
- if opts:
- for opt in opts.split(';'):
- cfgfile.write('%s\n' % opt)
-
- cfgfile.write('default=%s\n' % (labels.split()[0]))
-
- timeout = d.getVar('GRUB_TIMEOUT')
- if timeout:
- cfgfile.write('timeout=%s\n' % timeout)
- else:
- cfgfile.write('timeout=50\n')
-
- root = d.getVar('GRUB_ROOT')
- if not root:
- bb.fatal('GRUB_ROOT not defined')
-
- if gfxserial == "1":
- btypes = [ [ " graphics console", "" ],
- [ " serial console", d.getVar('GRUB_SERIAL') or "" ] ]
- else:
- btypes = [ [ "", "" ] ]
-
- for label in labels.split():
- localdata = d.createCopy()
-
- overrides = localdata.getVar('OVERRIDES')
- if not overrides:
- bb.fatal('OVERRIDES not defined')
-
- localdata.setVar('OVERRIDES', 'grub_' + label + ':' + overrides)
-
- for btype in btypes:
- cfgfile.write('\nmenuentry \'%s%s\'{\n' % (label, btype[0]))
- lb = label
- if label == "install":
- lb = "install-efi"
- kernel = localdata.getVar('KERNEL_IMAGETYPE')
- cfgfile.write('linux /%s LABEL=%s' % (kernel, lb))
-
- cfgfile.write(' %s' % replace_rootfs_uuid(d, root))
-
- append = localdata.getVar('APPEND')
- initrd = localdata.getVar('INITRD')
-
- if append:
- append = replace_rootfs_uuid(d, append)
- cfgfile.write(' %s' % (append))
-
- cfgfile.write(' %s' % btype[1])
- cfgfile.write('\n')
-
- if initrd:
- cfgfile.write('initrd /initrd')
- cfgfile.write('\n}\n')
-
- cfgfile.close()
-}
diff --git a/meta/classes/grub-efi.bbclass b/meta/classes/grub-efi.bbclass
deleted file mode 100644
index 8fc6999e52..0000000000
--- a/meta/classes/grub-efi.bbclass
+++ /dev/null
@@ -1,8 +0,0 @@
-inherit grub-efi-cfg
-require conf/image-uefi.conf
-
-efi_populate() {
- efi_populate_common "$1" grub-efi
-
- install -m 0644 ${GRUB_CFG} ${DEST}${EFIDIR}/grub.cfg
-}
diff --git a/meta/classes/gsettings.bbclass b/meta/classes/gsettings.bbclass
deleted file mode 100644
index 33afc96a9c..0000000000
--- a/meta/classes/gsettings.bbclass
+++ /dev/null
@@ -1,42 +0,0 @@
-# A bbclass to handle installed GSettings (glib) schemas, updated the compiled
-# form on package install and remove.
-#
-# The compiled schemas are platform-agnostic, so we can depend on
-# glib-2.0-native for the native tool and run the postinst script when the
-# rootfs builds to save a little time on first boot.
-
-# TODO use a trigger so that this runs once per package operation run
-
-GSETTINGS_PACKAGE ?= "${PN}"
-
-python __anonymous() {
- pkg = d.getVar("GSETTINGS_PACKAGE")
- if pkg:
- d.appendVar("PACKAGE_WRITE_DEPS", " glib-2.0-native")
- d.appendVar("RDEPENDS_" + pkg, " ${MLPREFIX}glib-2.0-utils")
- d.appendVar("FILES_" + pkg, " ${datadir}/glib-2.0/schemas")
-}
-
-gsettings_postinstrm () {
- glib-compile-schemas $D${datadir}/glib-2.0/schemas
-}
-
-python populate_packages_append () {
- pkg = d.getVar('GSETTINGS_PACKAGE')
- if pkg:
- bb.note("adding gsettings postinst scripts to %s" % pkg)
-
- postinst = d.getVar('pkg_postinst_%s' % pkg) or d.getVar('pkg_postinst')
- if not postinst:
- postinst = '#!/bin/sh\n'
- postinst += d.getVar('gsettings_postinstrm')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
-
- bb.note("adding gsettings postrm scripts to %s" % pkg)
-
- postrm = d.getVar('pkg_postrm_%s' % pkg) or d.getVar('pkg_postrm')
- if not postrm:
- postrm = '#!/bin/sh\n'
- postrm += d.getVar('gsettings_postinstrm')
- d.setVar('pkg_postrm_%s' % pkg, postrm)
-}
diff --git a/meta/classes/gtk-doc.bbclass b/meta/classes/gtk-doc.bbclass
deleted file mode 100644
index 7dd662bf86..0000000000
--- a/meta/classes/gtk-doc.bbclass
+++ /dev/null
@@ -1,82 +0,0 @@
-# Helper class to pull in the right gtk-doc dependencies and configure
-# gtk-doc to enable or disable documentation building (which requries the
-# use of usermode qemu).
-
-# This variable is set to True if api-documentation is in
-# DISTRO_FEATURES and qemu-usermode is in MACHINE_FEATURES, and False otherwise.
-#
-# It should be used in recipes to determine whether gtk-doc based documentation should be built,
-# so that qemu use can be avoided when necessary.
-GTKDOC_ENABLED ?= "${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', \
- bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d), 'False', d)}"
-
-# meson: default option name to enable/disable gtk-doc. This matches most
-# project's configuration. In doubts - check meson_options.txt in project's
-# source path.
-GTKDOC_MESON_OPTION ?= 'docs'
-GTKDOC_MESON_ENABLE_FLAG ?= 'true'
-GTKDOC_MESON_DISABLE_FLAG ?= 'false'
-
-# Auto enable/disable based on GTKDOC_ENABLED
-EXTRA_OECONF_prepend_class-target = "${@bb.utils.contains('GTKDOC_ENABLED', 'True', '--enable-gtk-doc --enable-gtk-doc-html --disable-gtk-doc-pdf', \
- '--disable-gtk-doc', d)} "
-EXTRA_OEMESON_prepend_class-target = "-D${GTKDOC_MESON_OPTION}=${@bb.utils.contains('GTKDOC_ENABLED', 'True', '${GTKDOC_MESON_ENABLE_FLAG}', '${GTKDOC_MESON_DISABLE_FLAG}', d)} "
-
-# When building native recipes, disable gtkdoc, as it is not necessary,
-# pulls in additional dependencies, and makes build times longer
-EXTRA_OECONF_prepend_class-native = "--disable-gtk-doc "
-EXTRA_OECONF_prepend_class-nativesdk = "--disable-gtk-doc "
-EXTRA_OEMESON_prepend_class-native = "-D${GTKDOC_MESON_OPTION}=${GTKDOC_MESON_DISABLE_FLAG} "
-EXTRA_OEMESON_prepend_class-nativesdk = "-D${GTKDOC_MESON_OPTION}=${GTKDOC_MESON_DISABLE_FLAG} "
-
-# Even though gtkdoc is disabled on -native, gtk-doc package is still
-# needed for m4 macros.
-DEPENDS_append = " gtk-doc-native"
-
-# The documentation directory, where the infrastructure will be copied.
-# gtkdocize has a default of "." so to handle out-of-tree builds set this to $S.
-GTKDOC_DOCDIR ?= "${S}"
-
-export STAGING_DIR_HOST
-
-inherit python3native pkgconfig qemu
-DEPENDS_append = "${@' qemu-native' if d.getVar('GTKDOC_ENABLED') == 'True' else ''}"
-
-do_configure_prepend () {
- # Need to use ||true as this is only needed if configure.ac both exists
- # and uses GTK_DOC_CHECK.
- gtkdocize --srcdir ${S} --docdir ${GTKDOC_DOCDIR} || true
-}
-
-do_compile_prepend_class-target () {
- if [ ${GTKDOC_ENABLED} = True ]; then
- # Write out a qemu wrapper that will be given to gtkdoc-scangobj so that it
- # can run target helper binaries through that.
- qemu_binary="${@qemu_wrapper_cmdline(d, '$STAGING_DIR_HOST', ['\\$GIR_EXTRA_LIBS_PATH','$STAGING_DIR_HOST/${libdir}','$STAGING_DIR_HOST/${base_libdir}'])}"
- cat > ${B}/gtkdoc-qemuwrapper << EOF
-#!/bin/sh
-# Use a modules directory which doesn't exist so we don't load random things
-# which may then get deleted (or their dependencies) and potentially segfault
-export GIO_MODULE_DIR=${STAGING_LIBDIR}/gio/modules-dummy
-
-GIR_EXTRA_LIBS_PATH=\`find ${B} -name *.so -printf "%h\n"|sort|uniq| tr '\n' ':'\`\$GIR_EXTRA_LIBS_PATH
-GIR_EXTRA_LIBS_PATH=\`find ${B} -name .libs| tr '\n' ':'\`\$GIR_EXTRA_LIBS_PATH
-
-# meson sets this wrongly (only to libs in build-dir), qemu-wrapper_cmdline() and GIR_EXTRA_LIBS_PATH take care of it properly
-unset LD_LIBRARY_PATH
-
-if [ -d ".libs" ]; then
- $qemu_binary ".libs/\$@"
-else
- $qemu_binary "\$@"
-fi
-
-if [ \$? -ne 0 ]; then
- echo "If the above error message is about missing .so libraries, then setting up GIR_EXTRA_LIBS_PATH in the recipe should help."
- echo "(typically like this: GIR_EXTRA_LIBS_PATH=\"$""{B}/something/.libs\" )"
- exit 1
-fi
-EOF
- chmod +x ${B}/gtkdoc-qemuwrapper
- fi
-}
diff --git a/meta/classes/gtk-icon-cache.bbclass b/meta/classes/gtk-icon-cache.bbclass
deleted file mode 100644
index dd394af27c..0000000000
--- a/meta/classes/gtk-icon-cache.bbclass
+++ /dev/null
@@ -1,79 +0,0 @@
-FILES_${PN} += "${datadir}/icons/hicolor"
-
-DEPENDS +=" ${@['hicolor-icon-theme', '']['${BPN}' == 'hicolor-icon-theme']} \
- ${@['gdk-pixbuf', '']['${BPN}' == 'gdk-pixbuf']} \
- ${@['gtk+3', '']['${BPN}' == 'gtk+3']} \
- gtk+3-native \
-"
-
-PACKAGE_WRITE_DEPS += "gtk+3-native gdk-pixbuf-native"
-
-gtk_icon_cache_postinst() {
-if [ "x$D" != "x" ]; then
- $INTERCEPT_DIR/postinst_intercept update_gtk_icon_cache ${PKG} \
- mlprefix=${MLPREFIX} \
- libdir_native=${libdir_native}
-else
-
- # Update the pixbuf loaders in case they haven't been registered yet
- ${libdir}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders --update-cache
-
- for icondir in /usr/share/icons/* ; do
- if [ -d $icondir ] ; then
- gtk-update-icon-cache -fqt $icondir
- fi
- done
-fi
-}
-
-gtk_icon_cache_postrm() {
-if [ "x$D" != "x" ]; then
- $INTERCEPT_DIR/postinst_intercept update_gtk_icon_cache ${PKG} \
- mlprefix=${MLPREFIX} \
- libdir=${libdir}
-else
- for icondir in /usr/share/icons/* ; do
- if [ -d $icondir ] ; then
- gtk-update-icon-cache -qt $icondir
- fi
- done
-fi
-}
-
-python populate_packages_append () {
- packages = d.getVar('PACKAGES').split()
- pkgdest = d.getVar('PKGDEST')
-
- for pkg in packages:
- icon_dir = '%s/%s/%s/icons' % (pkgdest, pkg, d.getVar('datadir'))
- if not os.path.exists(icon_dir):
- continue
-
- bb.note("adding hicolor-icon-theme dependency to %s" % pkg)
- rdepends = ' ' + d.getVar('MLPREFIX', False) + "hicolor-icon-theme"
- d.appendVar('RDEPENDS_%s' % pkg, rdepends)
-
- #gtk_icon_cache_postinst depend on gdk-pixbuf and gtk+3
- bb.note("adding gdk-pixbuf dependency to %s" % pkg)
- rdepends = ' ' + d.getVar('MLPREFIX', False) + "gdk-pixbuf"
- d.appendVar('RDEPENDS_%s' % pkg, rdepends)
-
- bb.note("adding gtk+3 dependency to %s" % pkg)
- rdepends = ' ' + d.getVar('MLPREFIX', False) + "gtk+3"
- d.appendVar('RDEPENDS_%s' % pkg, rdepends)
-
- bb.note("adding gtk-icon-cache postinst and postrm scripts to %s" % pkg)
-
- postinst = d.getVar('pkg_postinst_%s' % pkg)
- if not postinst:
- postinst = '#!/bin/sh\n'
- postinst += d.getVar('gtk_icon_cache_postinst')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
-
- postrm = d.getVar('pkg_postrm_%s' % pkg)
- if not postrm:
- postrm = '#!/bin/sh\n'
- postrm += d.getVar('gtk_icon_cache_postrm')
- d.setVar('pkg_postrm_%s' % pkg, postrm)
-}
-
diff --git a/meta/classes/gtk-immodules-cache.bbclass b/meta/classes/gtk-immodules-cache.bbclass
deleted file mode 100644
index 9bb0af8b26..0000000000
--- a/meta/classes/gtk-immodules-cache.bbclass
+++ /dev/null
@@ -1,75 +0,0 @@
-# This class will update the inputmethod module cache for virtual keyboards
-#
-# Usage: Set GTKIMMODULES_PACKAGES to the packages that needs to update the inputmethod modules
-
-PACKAGE_WRITE_DEPS += "qemu-native"
-
-inherit qemu
-
-GTKIMMODULES_PACKAGES ?= "${PN}"
-
-gtk_immodule_cache_postinst() {
-if [ "x$D" != "x" ]; then
- $INTERCEPT_DIR/postinst_intercept update_gtk_immodules_cache ${PKG} \
- mlprefix=${MLPREFIX} \
- binprefix=${MLPREFIX} \
- libdir=${libdir} \
- libexecdir=${libexecdir} \
- base_libdir=${base_libdir} \
- bindir=${bindir}
-else
- if [ ! -z `which gtk-query-immodules-2.0` ]; then
- gtk-query-immodules-2.0 > ${libdir}/gtk-2.0/2.10.0/immodules.cache
- fi
- if [ ! -z `which gtk-query-immodules-3.0` ]; then
- gtk-query-immodules-3.0 > ${libdir}/gtk-3.0/3.0.0/immodules.cache
- fi
-fi
-}
-
-gtk_immodule_cache_postrm() {
-if [ "x$D" != "x" ]; then
- $INTERCEPT_DIR/postinst_intercept update_gtk_immodules_cache ${PKG} \
- mlprefix=${MLPREFIX} \
- binprefix=${MLPREFIX} \
- libdir=${libdir} \
- libexecdir=${libexecdir} \
- base_libdir=${base_libdir} \
- bindir=${bindir}
-else
- if [ ! -z `which gtk-query-immodules-2.0` ]; then
- gtk-query-immodules-2.0 > ${libdir}/gtk-2.0/2.10.0/immodules.cache
- fi
- if [ ! -z `which gtk-query-immodules-3.0` ]; then
- gtk-query-immodules-3.0 > ${libdir}/gtk-3.0/3.0.0/immodules.cache
- fi
-fi
-}
-
-python populate_packages_append () {
- gtkimmodules_pkgs = d.getVar('GTKIMMODULES_PACKAGES').split()
-
- for pkg in gtkimmodules_pkgs:
- bb.note("adding gtk-immodule-cache postinst and postrm scripts to %s" % pkg)
-
- postinst = d.getVar('pkg_postinst_%s' % pkg)
- if not postinst:
- postinst = '#!/bin/sh\n'
- postinst += d.getVar('gtk_immodule_cache_postinst')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
-
- postrm = d.getVar('pkg_postrm_%s' % pkg)
- if not postrm:
- postrm = '#!/bin/sh\n'
- postrm += d.getVar('gtk_immodule_cache_postrm')
- d.setVar('pkg_postrm_%s' % pkg, postrm)
-}
-
-python __anonymous() {
- if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
- gtkimmodules_check = d.getVar('GTKIMMODULES_PACKAGES', False)
- if not gtkimmodules_check:
- bb_filename = d.getVar('FILE', False)
- bb.fatal("ERROR: %s inherits gtk-immodules-cache but doesn't set GTKIMMODULES_PACKAGES" % bb_filename)
-}
-
diff --git a/meta/classes/icecc.bbclass b/meta/classes/icecc.bbclass
index d095305ed8..159cae20f8 100644
--- a/meta/classes/icecc.bbclass
+++ b/meta/classes/icecc.bbclass
@@ -1,40 +1,45 @@
-# IceCream distributed compiling support
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+# Icecream distributed compiling support
#
# Stages directories with symlinks from gcc/g++ to icecc, for both
# native and cross compilers. Depending on each configure or compile,
# the directories are added at the head of the PATH list and ICECC_CXX
-# and ICEC_CC are set.
+# and ICECC_CC are set.
#
# For the cross compiler, creates a tar.gz of our toolchain and sets
# ICECC_VERSION accordingly.
#
# The class now handles all 3 different compile 'stages' (i.e native ,cross-kernel and target) creating the
# necessary environment tar.gz file to be used by the remote machines.
-# It also supports meta-toolchain generation
+# It also supports meta-toolchain generation.
#
# If ICECC_PATH is not set in local.conf then the class will try to locate it using 'bb.utils.which'
-# but nothing is sure ;)
+# but nothing is sure. ;)
#
# If ICECC_ENV_EXEC is set in local.conf, then it should point to the icecc-create-env script provided by the user
-# or the default one provided by icecc-create-env.bb will be used
-# (NOTE that this is a modified version of the script need it and *not the one that comes with icecc*
+# or the default one provided by icecc-create-env_0.1.bb will be used.
+# (NOTE that this is a modified version of the needed script and *not the one that comes with icecream*).
#
-# User can specify if specific packages or packages belonging to class should not use icecc to distribute
-# compile jobs to remote machines, but handled locally, by defining ICECC_USER_CLASS_BL and ICECC_USER_PACKAGE_BL
-# with the appropriate values in local.conf. In addition the user can force to enable icecc for packages
-# which set an empty PARALLEL_MAKE variable by defining ICECC_USER_PACKAGE_WL.
+# User can specify if specific recipes or recipes inheriting specific classes should not use icecc to distribute
+# compile jobs to remote machines, but handle them locally by defining ICECC_CLASS_DISABLE and ICECC_RECIPE_DISABLE
+# with the appropriate values in local.conf. In addition the user can force to enable icecc for recipes
+# which set an empty PARALLEL_MAKE variable by defining ICECC_RECIPE_ENABLE.
#
#########################################################################################
-#Error checking is kept to minimum so double check any parameters you pass to the class
-###########################################################################################
+# Error checking is kept to minimum so double check any parameters you pass to the class
+#########################################################################################
-BB_HASHBASE_WHITELIST += "ICECC_PARALLEL_MAKE ICECC_DISABLED ICECC_USER_PACKAGE_BL \
- ICECC_USER_CLASS_BL ICECC_USER_PACKAGE_WL ICECC_PATH ICECC_ENV_EXEC \
+BB_BASEHASH_IGNORE_VARS += "ICECC_PARALLEL_MAKE ICECC_DISABLED ICECC_RECIPE_DISABLE \
+ ICECC_CLASS_DISABLE ICECC_RECIPE_ENABLE ICECC_PATH ICECC_ENV_EXEC \
ICECC_CARET_WORKAROUND ICECC_CFLAGS ICECC_ENV_VERSION \
ICECC_DEBUG ICECC_LOGFILE ICECC_REPEAT_RATE ICECC_PREFERRED_HOST \
ICECC_CLANG_REMOTE_CPP ICECC_IGNORE_UNVERIFIED ICECC_TEST_SOCKET \
- ICECC_ENV_DEBUG ICECC_SYSTEM_PACKAGE_BL ICECC_SYSTEM_CLASS_BL \
- ICECC_REMOTE_CPP \
+ ICECC_ENV_DEBUG ICECC_REMOTE_CPP \
"
ICECC_ENV_EXEC ?= "${STAGING_BINDIR_NATIVE}/icecc-create-env"
@@ -45,9 +50,9 @@ HOSTTOOLS_NONFATAL += "icecc patchelf"
# invalidate the version on the compile nodes. Changing it will cause a new
# environment to be created.
#
-# A useful thing to do for testing Icecream changes locally is to add a
+# A useful thing to do for testing icecream changes locally is to add a
# subversion in local.conf:
-# ICECC_ENV_VERSION_append = "-my-ver-1"
+# ICECC_ENV_VERSION:append = "-my-ver-1"
ICECC_ENV_VERSION = "2"
# Default to disabling the caret workaround, If set to "1" in local.conf, icecc
@@ -66,46 +71,46 @@ CXXFLAGS += "${ICECC_CFLAGS}"
# Debug flags when generating environments
ICECC_ENV_DEBUG ??= ""
-# "system" recipe blacklist contains a list of packages that can not distribute
-# compile tasks for one reason or the other. When adding new entry, please
+# Disable recipe list contains a list of recipes that can not distribute
+# compile tasks for one reason or the other. When adding a new entry, please
# document why (how it failed) so that we can re-evaluate it later e.g. when
-# there is new version
+# there is a new version.
#
# libgcc-initial - fails with CPP sanity check error if host sysroot contains
-# cross gcc built for another target tune/variant
+# cross gcc built for another target tune/variant.
# pixman - prng_state: TLS reference mismatches non-TLS reference, possibly due to
-# pragma omp threadprivate(prng_state)
+# pragma omp threadprivate(prng_state).
# systemtap - _HelperSDT.c undefs macros and uses the identifiers in macros emitting
-# inline assembly
+# inline assembly.
# target-sdk-provides-dummy - ${HOST_PREFIX} is empty which triggers the "NULL
# prefix" error.
-ICECC_SYSTEM_PACKAGE_BL += "\
+ICECC_RECIPE_DISABLE += "\
libgcc-initial \
pixman \
systemtap \
target-sdk-provides-dummy \
"
-# "system" classes that should be blacklisted. When adding new entry, please
-# document why (how it failed) so that we can re-evaluate it later
+# Classes that should not use icecc. When adding a new entry, please
+# document why (how it failed) so that we can re-evaluate it later.
#
-# image - Image aren't compiling, but the testing framework for images captures
+# image - images aren't compiling, but the testing framework for images captures
# PARALLEL_MAKE as part of the test environment. Many tests won't use
# icecream, but leaving the high level of parallelism can cause them to
# consume an unnecessary amount of resources.
-ICECC_SYSTEM_CLASS_BL += "\
+ICECC_CLASS_DISABLE += "\
image \
"
-def icecc_dep_prepend(d):
- # INHIBIT_DEFAULT_DEPS doesn't apply to the patch command. Whether or not
+def get_icecc_dep(d):
+ # INHIBIT_DEFAULT_DEPS doesn't apply to the patch command. Whether or not
# we need that built is the responsibility of the patch function / class, not
# the application.
if not d.getVar('INHIBIT_DEFAULT_DEPS'):
return "icecc-create-env-native"
return ""
-DEPENDS_prepend = "${@icecc_dep_prepend(d)} "
+DEPENDS:prepend = "${@get_icecc_dep(d)} "
get_cross_kernel_cc[vardepsexclude] += "KERNEL_CC"
def get_cross_kernel_cc(bb,d):
@@ -138,39 +143,31 @@ def use_icecc(bb,d):
if icecc_is_cross_canadian(bb, d):
return "no"
- if d.getVar('INHIBIT_DEFAULT_DEPS', False):
- # We don't have a compiler, so no icecc
- return "no"
-
pn = d.getVar('PN')
bpn = d.getVar('BPN')
- # Blacklist/whitelist checks are made against BPN, because there is a good
+ # Enable/disable checks are made against BPN, because there is a good
# chance that if icecc should be skipped for a recipe, it should be skipped
# for all the variants of that recipe. PN is still checked in case a user
# specified a more specific recipe.
check_pn = set([pn, bpn])
- system_class_blacklist = (d.getVar('ICECC_SYSTEM_CLASS_BL') or "").split()
- user_class_blacklist = (d.getVar('ICECC_USER_CLASS_BL') or "none").split()
- package_class_blacklist = system_class_blacklist + user_class_blacklist
+ class_disable = (d.getVar('ICECC_CLASS_DISABLE') or "").split()
- for black in package_class_blacklist:
- if bb.data.inherits_class(black, d):
- bb.debug(1, "%s: class %s found in blacklist, disable icecc" % (pn, black))
+ for bbclass in class_disable:
+ if bb.data.inherits_class(bbclass, d):
+ bb.debug(1, "%s: bbclass %s found in disable, disable icecc" % (pn, bbclass))
return "no"
- system_package_blacklist = (d.getVar('ICECC_SYSTEM_PACKAGE_BL') or "").split()
- user_package_blacklist = (d.getVar('ICECC_USER_PACKAGE_BL') or "").split()
- user_package_whitelist = (d.getVar('ICECC_USER_PACKAGE_WL') or "").split()
- package_blacklist = system_package_blacklist + user_package_blacklist
+ disabled_recipes = (d.getVar('ICECC_RECIPE_DISABLE') or "").split()
+ enabled_recipes = (d.getVar('ICECC_RECIPE_ENABLE') or "").split()
- if check_pn & set(package_blacklist):
- bb.debug(1, "%s: found in blacklist, disable icecc" % pn)
+ if check_pn & set(disabled_recipes):
+ bb.debug(1, "%s: found in disable list, disable icecc" % pn)
return "no"
- if check_pn & set(user_package_whitelist):
- bb.debug(1, "%s: found in whitelist, enable icecc" % pn)
+ if check_pn & set(enabled_recipes):
+ bb.debug(1, "%s: found in enabled recipes list, enable icecc" % pn)
return "yes"
if d.getVar('PARALLEL_MAKE') == "":
@@ -262,7 +259,7 @@ def icecc_get_tool_link(tool, d):
def icecc_get_path_tool(tool, d):
# This is a little ugly, but we want to make sure we add an actual
# compiler to the toolchain, not ccache. Some distros (e.g. Fedora)
- # have ccache enabled by default using symlinks PATH, meaning ccache
+ # have ccache enabled by default using symlinks in PATH, meaning ccache
# would be found first when looking for the compiler.
paths = os.getenv("PATH").split(':')
while True:
@@ -313,7 +310,7 @@ wait_for_file() {
local TIMEOUT=$2
until [ -f "$FILE_TO_TEST" ]
do
- TIME_ELAPSED=`expr $TIME_ELAPSED + 1`
+ TIME_ELAPSED=$(expr $TIME_ELAPSED + 1)
if [ $TIME_ELAPSED -gt $TIMEOUT ]
then
return 1
@@ -362,12 +359,12 @@ set_icecc_env() {
ICECC_WHICH_AS="${@bb.utils.which(os.getenv('PATH'), 'as')}"
if [ ! -x "${ICECC_CC}" -o ! -x "${ICECC_CXX}" ]
then
- bbwarn "Cannot use icecc: could not get ICECC_CC or ICECC_CXX"
+ bbnote "Cannot use icecc: could not get ICECC_CC or ICECC_CXX"
return
fi
- ICE_VERSION=`$ICECC_CC -dumpversion`
- ICECC_VERSION=`echo ${ICECC_VERSION} | sed -e "s/@VERSION@/$ICE_VERSION/g"`
+ ICE_VERSION="$($ICECC_CC -dumpversion)"
+ ICECC_VERSION=$(echo ${ICECC_VERSION} | sed -e "s/@VERSION@/$ICE_VERSION/g")
if [ ! -x "${ICECC_ENV_EXEC}" ]
then
bbwarn "Cannot use icecc: invalid ICECC_ENV_EXEC"
@@ -383,7 +380,6 @@ set_icecc_env() {
fi
for compiler in $compilers; do
ln -sf $ICECC_BIN $ICE_PATH/symlinks/$compiler
- rm -f $ICE_PATH/$compiler
cat <<-__EOF__ > $ICE_PATH/$compiler
#!/bin/sh -e
export ICECC_VERSION=$ICECC_VERSION
@@ -394,18 +390,18 @@ set_icecc_env() {
chmod 775 $ICE_PATH/$compiler
done
- ICECC_AS="`${ICECC_CC} -print-prog-name=as`"
+ ICECC_AS="$(${ICECC_CC} -print-prog-name=as)"
# for target recipes should return something like:
# /OE/tmp-eglibc/sysroots/x86_64-linux/usr/libexec/arm920tt-oe-linux-gnueabi/gcc/arm-oe-linux-gnueabi/4.8.2/as
# and just "as" for native, if it returns "as" in current directory (for whatever reason) use "as" from PATH
- if [ "`dirname "${ICECC_AS}"`" = "." ]
+ if [ "$(dirname "${ICECC_AS}")" = "." ]
then
ICECC_AS="${ICECC_WHICH_AS}"
fi
if [ ! -f "${ICECC_VERSION}.done" ]
then
- mkdir -p "`dirname "${ICECC_VERSION}"`"
+ mkdir -p "$(dirname "${ICECC_VERSION}")"
# the ICECC_VERSION generation step must be locked by a mutex
# in order to prevent race conditions
@@ -432,28 +428,34 @@ set_icecc_env() {
bbnote "Using icecc tarball: $ICECC_VERSION"
}
-do_configure_prepend() {
+do_configure:prepend() {
set_icecc_env
}
-do_compile_prepend() {
+do_compile:prepend() {
set_icecc_env
}
-do_compile_kernelmodules_prepend() {
+do_compile_kernelmodules:prepend() {
set_icecc_env
}
-do_install_prepend() {
+do_install:prepend() {
set_icecc_env
}
-# IceCream is not (currently) supported in the extensible SDK
+# Icecream is not (currently) supported in the extensible SDK
ICECC_SDK_HOST_TASK = "nativesdk-icecc-toolchain"
-ICECC_SDK_HOST_TASK_task-populate-sdk-ext = ""
+ICECC_SDK_HOST_TASK:task-populate-sdk-ext = ""
-# Don't include IceCream in uninative tarball
-ICECC_SDK_HOST_TASK_pn-uninative-tarball = ""
+# Don't include icecream in uninative tarball
+ICECC_SDK_HOST_TASK:pn-uninative-tarball = ""
# Add the toolchain scripts to the SDK
-TOOLCHAIN_HOST_TASK_append = " ${ICECC_SDK_HOST_TASK}"
+TOOLCHAIN_HOST_TASK:append = " ${ICECC_SDK_HOST_TASK}"
+
+python () {
+ if d.getVar('ICECC_DISABLED') != "1":
+ for task in ['do_configure', 'do_compile', 'do_compile_kernelmodules', 'do_install']:
+ d.setVarFlag(task, 'network', '1')
+}
diff --git a/meta/classes/image-buildinfo.bbclass b/meta/classes/image-buildinfo.bbclass
index 94c585d4cd..b83ce650ad 100644
--- a/meta/classes/image-buildinfo.bbclass
+++ b/meta/classes/image-buildinfo.bbclass
@@ -1,10 +1,10 @@
#
-# Writes build information to target filesystem on /etc/build
+# Writes build information to target filesystem on /etc/buildinfo
#
# Copyright (C) 2014 Intel Corporation
# Author: Alejandro Enedino Hernandez Samaniego <alejandro.hernandez@intel.com>
#
-# Licensed under the MIT license, see COPYING.MIT for details
+# SPDX-License-Identifier: MIT
#
# Usage: add INHERIT += "image-buildinfo" to your conf file
#
@@ -13,7 +13,8 @@
IMAGE_BUILDINFO_VARS ?= "DISTRO DISTRO_VERSION"
# Desired location of the output file in the image.
-IMAGE_BUILDINFO_FILE ??= "${sysconfdir}/build"
+IMAGE_BUILDINFO_FILE ??= "${sysconfdir}/buildinfo"
+SDK_BUILDINFO_FILE ??= "/buildinfo"
# From buildhistory.bbclass
def image_buildinfo_outputvars(vars, d):
@@ -26,30 +27,10 @@ def image_buildinfo_outputvars(vars, d):
ret += "%s = %s\n" % (var, value)
return ret.rstrip('\n')
-# Gets git branch's status (clean or dirty)
-def get_layer_git_status(path):
- import subprocess
- try:
- subprocess.check_output("""cd %s; export PSEUDO_UNLOAD=1; set -e;
- git diff --quiet --no-ext-diff
- git diff --quiet --no-ext-diff --cached""" % path,
- shell=True,
- stderr=subprocess.STDOUT)
- return ""
- except subprocess.CalledProcessError as ex:
- # Silently treat errors as "modified", without checking for the
- # (expected) return code 1 in a modified git repo. For example, we get
- # output and a 129 return code when a layer isn't a git repo at all.
- return "-- modified"
-
# Returns layer revisions along with their respective status
def get_layer_revs(d):
- layers = (d.getVar("BBLAYERS") or "").split()
- medadata_revs = ["%-17s = %s:%s %s" % (os.path.basename(i), \
- base_get_metadata_git_branch(i, None).strip(), \
- base_get_metadata_git_revision(i, None), \
- get_layer_git_status(i)) \
- for i in layers]
+ revisions = oe.buildcfg.get_layer_revisions(d)
+ medadata_revs = ["%-17s = %s:%s%s" % (r[1], r[2], r[3], r[4]) for r in revisions]
return '\n'.join(medadata_revs)
def buildinfo_target(d):
@@ -60,11 +41,12 @@ def buildinfo_target(d):
vars = (d.getVar("IMAGE_BUILDINFO_VARS") or "")
return image_buildinfo_outputvars(vars, d)
-# Write build information to target filesystem
-python buildinfo () {
+python buildinfo() {
if not d.getVar('IMAGE_BUILDINFO_FILE'):
return
- with open(d.expand('${IMAGE_ROOTFS}${IMAGE_BUILDINFO_FILE}'), 'w') as build:
+ destfile = d.expand('${BUILDINFODEST}${IMAGE_BUILDINFO_FILE}')
+ bb.utils.mkdirhier(os.path.dirname(destfile))
+ with open(destfile, 'w') as build:
build.writelines((
'''-----------------------
Build Configuration: |
@@ -82,4 +64,18 @@ Layer Revisions: |
))
}
-IMAGE_PREPROCESS_COMMAND += "buildinfo;"
+# Write build information to target filesystem
+python buildinfo_image () {
+ d.setVar("BUILDINFODEST", "${IMAGE_ROOTFS}")
+ bb.build.exec_func("buildinfo", d)
+}
+
+python buildinfo_sdk () {
+ d.setVar("BUILDINFODEST", "${SDK_OUTPUT}/${SDKPATH}")
+ d.setVar("IMAGE_BUILDINFO_FILE", d.getVar("SDK_BUILDINFO_FILE"))
+ bb.build.exec_func("buildinfo", d)
+}
+
+IMAGE_PREPROCESS_COMMAND += "buildinfo_image"
+POPULATE_SDK_PRE_TARGET_COMMAND += "buildinfo_sdk"
+
diff --git a/meta/classes/image-combined-dbg.bbclass b/meta/classes/image-combined-dbg.bbclass
deleted file mode 100644
index f4772f7ea1..0000000000
--- a/meta/classes/image-combined-dbg.bbclass
+++ /dev/null
@@ -1,9 +0,0 @@
-IMAGE_PREPROCESS_COMMAND_append = " combine_dbg_image; "
-
-combine_dbg_image () {
- if [ "${IMAGE_GEN_DEBUGFS}" = "1" -a -e ${IMAGE_ROOTFS}-dbg ]; then
- # copy target files into -dbg rootfs, so it can be used for
- # debug purposes directly
- tar -C ${IMAGE_ROOTFS} -cf - . | tar -C ${IMAGE_ROOTFS}-dbg -xf -
- fi
-}
diff --git a/meta/classes/image-container.bbclass b/meta/classes/image-container.bbclass
deleted file mode 100644
index f002858bd2..0000000000
--- a/meta/classes/image-container.bbclass
+++ /dev/null
@@ -1,21 +0,0 @@
-ROOTFS_BOOTSTRAP_INSTALL = ""
-IMAGE_TYPES_MASKED += "container"
-IMAGE_TYPEDEP_container = "tar.bz2"
-
-python __anonymous() {
- if "container" in d.getVar("IMAGE_FSTYPES") and \
- d.getVar("IMAGE_CONTAINER_NO_DUMMY") != "1" and \
- "linux-dummy" not in d.getVar("PREFERRED_PROVIDER_virtual/kernel"):
- msg = '"container" is in IMAGE_FSTYPES, but ' \
- 'PREFERRED_PROVIDER_virtual/kernel is not "linux-dummy". ' \
- 'Unless a particular kernel is needed, using linux-dummy will ' \
- 'prevent a kernel from being built, which can reduce ' \
- 'build times. If you don\'t want to use "linux-dummy", set ' \
- '"IMAGE_CONTAINER_NO_DUMMY" to "1".'
-
- # Raising skip recipe was Paul's clever idea. It causes the error to
- # only be shown for the recipes actually requested to build, rather
- # than bb.fatal which would appear for all recipes inheriting the
- # class.
- raise bb.parse.SkipRecipe(msg)
-}
diff --git a/meta/classes/image-live.bbclass b/meta/classes/image-live.bbclass
deleted file mode 100644
index 54058b350d..0000000000
--- a/meta/classes/image-live.bbclass
+++ /dev/null
@@ -1,264 +0,0 @@
-# Copyright (C) 2004, Advanced Micro Devices, Inc. All Rights Reserved
-# Released under the MIT license (see packages/COPYING)
-
-# Creates a bootable image using syslinux, your kernel and an optional
-# initrd
-
-#
-# End result is two things:
-#
-# 1. A .hddimg file which is an msdos filesystem containing syslinux, a kernel,
-# an initrd and a rootfs image. These can be written to harddisks directly and
-# also booted on USB flash disks (write them there with dd).
-#
-# 2. A CD .iso image
-
-# Boot process is that the initrd will boot and process which label was selected
-# in syslinux. Actions based on the label are then performed (e.g. installing to
-# an hdd)
-
-# External variables (also used by syslinux.bbclass)
-# ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional)
-# ${HDDIMG_ID} - FAT image volume-id
-# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
-
-inherit live-vm-common
-
-do_bootimg[depends] += "dosfstools-native:do_populate_sysroot \
- mtools-native:do_populate_sysroot \
- cdrtools-native:do_populate_sysroot \
- virtual/kernel:do_deploy \
- ${MLPREFIX}syslinux:do_populate_sysroot \
- syslinux-native:do_populate_sysroot \
- ${PN}:do_image_${@d.getVar('LIVE_ROOTFS_TYPE').replace('-', '_')} \
- "
-
-
-LABELS_LIVE ?= "boot install"
-ROOT_LIVE ?= "root=/dev/ram0"
-INITRD_IMAGE_LIVE ?= "${MLPREFIX}core-image-minimal-initramfs"
-INITRD_LIVE ?= "${DEPLOY_DIR_IMAGE}/${INITRD_IMAGE_LIVE}-${MACHINE}.${INITRAMFS_FSTYPES}"
-
-LIVE_ROOTFS_TYPE ?= "ext4"
-ROOTFS ?= "${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.${LIVE_ROOTFS_TYPE}"
-
-IMAGE_TYPEDEP_live = "${LIVE_ROOTFS_TYPE}"
-IMAGE_TYPEDEP_iso = "${LIVE_ROOTFS_TYPE}"
-IMAGE_TYPEDEP_hddimg = "${LIVE_ROOTFS_TYPE}"
-IMAGE_TYPES_MASKED += "live hddimg iso"
-
-python() {
- image_b = d.getVar('IMAGE_BASENAME')
- initrd_i = d.getVar('INITRD_IMAGE_LIVE')
- if image_b == initrd_i:
- bb.error('INITRD_IMAGE_LIVE %s cannot use image live, hddimg or iso.' % initrd_i)
- bb.fatal('Check IMAGE_FSTYPES and INITRAMFS_FSTYPES settings.')
- elif initrd_i:
- d.appendVarFlag('do_bootimg', 'depends', ' %s:do_image_complete' % initrd_i)
-}
-
-HDDDIR = "${S}/hddimg"
-ISODIR = "${S}/iso"
-EFIIMGDIR = "${S}/efi_img"
-COMPACT_ISODIR = "${S}/iso.z"
-
-ISOLINUXDIR ?= "/isolinux"
-ISO_BOOTIMG = "isolinux/isolinux.bin"
-ISO_BOOTCAT = "isolinux/boot.cat"
-MKISOFS_OPTIONS = "-no-emul-boot -boot-load-size 4 -boot-info-table"
-
-BOOTIMG_VOLUME_ID ?= "boot"
-BOOTIMG_EXTRA_SPACE ?= "512"
-
-populate_live() {
- populate_kernel $1
- if [ -s "${ROOTFS}" ]; then
- install -m 0644 ${ROOTFS} $1/rootfs.img
- fi
-}
-
-build_iso() {
- # Only create an ISO if we have an INITRD and the live or iso image type was selected
- if [ -z "${INITRD}" ] || [ "${@bb.utils.contains_any('IMAGE_FSTYPES', 'live iso', '1', '0', d)}" != "1" ]; then
- bbnote "ISO image will not be created."
- return
- fi
- # ${INITRD} is a list of multiple filesystem images
- for fs in ${INITRD}
- do
- if [ ! -s "$fs" ]; then
- bbwarn "ISO image will not be created. $fs is invalid."
- return
- fi
- done
-
- populate_live ${ISODIR}
-
- if [ "${PCBIOS}" = "1" ]; then
- syslinux_iso_populate ${ISODIR}
- fi
- if [ "${EFI}" = "1" ]; then
- efi_iso_populate ${ISODIR}
- build_fat_img ${EFIIMGDIR} ${ISODIR}/efi.img
- fi
-
- # EFI only
- if [ "${PCBIOS}" != "1" ] && [ "${EFI}" = "1" ] ; then
- # Work around bug in isohybrid where it requires isolinux.bin
- # In the boot catalog, even though it is not used
- mkdir -p ${ISODIR}/${ISOLINUXDIR}
- install -m 0644 ${STAGING_DATADIR}/syslinux/isolinux.bin ${ISODIR}${ISOLINUXDIR}
- fi
-
- # We used to have support for zisofs; this is a relic of that
- mkisofs_compress_opts="-r"
-
- # Check the size of ${ISODIR}/rootfs.img, use mkisofs -iso-level 3
- # when it exceeds 3.8GB, the specification is 4G - 1 bytes, we need
- # leave a few space for other files.
- mkisofs_iso_level=""
-
- if [ -n "${ROOTFS}" ] && [ -s "${ROOTFS}" ]; then
- rootfs_img_size=`stat -c '%s' ${ISODIR}/rootfs.img`
- # 4080218931 = 3.8 * 1024 * 1024 * 1024
- if [ $rootfs_img_size -gt 4080218931 ]; then
- bbnote "${ISODIR}/rootfs.img execeeds 3.8GB, using '-iso-level 3' for mkisofs"
- mkisofs_iso_level="-iso-level 3"
- fi
- fi
-
- if [ "${PCBIOS}" = "1" ] && [ "${EFI}" != "1" ] ; then
- # PCBIOS only media
- mkisofs -V ${BOOTIMG_VOLUME_ID} \
- -o ${IMGDEPLOYDIR}/${IMAGE_NAME}.iso \
- -b ${ISO_BOOTIMG} -c ${ISO_BOOTCAT} \
- $mkisofs_compress_opts \
- ${MKISOFS_OPTIONS} $mkisofs_iso_level ${ISODIR}
- else
- # EFI only OR EFI+PCBIOS
- mkisofs -A ${BOOTIMG_VOLUME_ID} -V ${BOOTIMG_VOLUME_ID} \
- -o ${IMGDEPLOYDIR}/${IMAGE_NAME}.iso \
- -b ${ISO_BOOTIMG} -c ${ISO_BOOTCAT} \
- $mkisofs_compress_opts ${MKISOFS_OPTIONS} $mkisofs_iso_level \
- -eltorito-alt-boot -eltorito-platform efi \
- -b efi.img -no-emul-boot \
- ${ISODIR}
- isohybrid_args="-u"
- fi
-
- isohybrid $isohybrid_args ${IMGDEPLOYDIR}/${IMAGE_NAME}.iso
-}
-
-build_fat_img() {
- FATSOURCEDIR=$1
- FATIMG=$2
-
- # Calculate the size required for the final image including the
- # data and filesystem overhead.
- # Sectors: 512 bytes
- # Blocks: 1024 bytes
-
- # Determine the sector count just for the data
- SECTORS=$(expr $(du --apparent-size -ks ${FATSOURCEDIR} | cut -f 1) \* 2)
-
- # Account for the filesystem overhead. This includes directory
- # entries in the clusters as well as the FAT itself.
- # Assumptions:
- # FAT32 (12 or 16 may be selected by mkdosfs, but the extra
- # padding will be minimal on those smaller images and not
- # worth the logic here to caclulate the smaller FAT sizes)
- # < 16 entries per directory
- # 8.3 filenames only
-
- # 32 bytes per dir entry
- DIR_BYTES=$(expr $(find ${FATSOURCEDIR} | tail -n +2 | wc -l) \* 32)
- # 32 bytes for every end-of-directory dir entry
- DIR_BYTES=$(expr $DIR_BYTES + $(expr $(find ${FATSOURCEDIR} -type d | tail -n +2 | wc -l) \* 32))
- # 4 bytes per FAT entry per sector of data
- FAT_BYTES=$(expr $SECTORS \* 4)
- # 4 bytes per FAT entry per end-of-cluster list
- FAT_BYTES=$(expr $FAT_BYTES + $(expr $(find ${FATSOURCEDIR} -type d | tail -n +2 | wc -l) \* 4))
-
- # Use a ceiling function to determine FS overhead in sectors
- DIR_SECTORS=$(expr $(expr $DIR_BYTES + 511) / 512)
- # There are two FATs on the image
- FAT_SECTORS=$(expr $(expr $(expr $FAT_BYTES + 511) / 512) \* 2)
- SECTORS=$(expr $SECTORS + $(expr $DIR_SECTORS + $FAT_SECTORS))
-
- # Determine the final size in blocks accounting for some padding
- BLOCKS=$(expr $(expr $SECTORS / 2) + ${BOOTIMG_EXTRA_SPACE})
-
- # mkdosfs will sometimes use FAT16 when it is not appropriate,
- # resulting in a boot failure from SYSLINUX. Use FAT32 for
- # images larger than 512MB, otherwise let mkdosfs decide.
- if [ $(expr $BLOCKS / 1024) -gt 512 ]; then
- FATSIZE="-F 32"
- fi
-
- # mkdosfs will fail if ${FATIMG} exists. Since we are creating an
- # new image, it is safe to delete any previous image.
- if [ -e ${FATIMG} ]; then
- rm ${FATIMG}
- fi
-
- if [ -z "${HDDIMG_ID}" ]; then
- mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} ${MKDOSFS_EXTRAOPTS} -C ${FATIMG} \
- ${BLOCKS}
- else
- mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} ${MKDOSFS_EXTRAOPTS} -C ${FATIMG} \
- ${BLOCKS} -i ${HDDIMG_ID}
- fi
-
- # Copy FATSOURCEDIR recursively into the image file directly
- mcopy -i ${FATIMG} -s ${FATSOURCEDIR}/* ::/
-}
-
-build_hddimg() {
- # Create an HDD image
- if [ "${@bb.utils.contains_any('IMAGE_FSTYPES', 'live hddimg', '1', '0', d)}" = "1" ] ; then
- populate_live ${HDDDIR}
-
- if [ "${PCBIOS}" = "1" ]; then
- syslinux_hddimg_populate ${HDDDIR}
- fi
- if [ "${EFI}" = "1" ]; then
- efi_hddimg_populate ${HDDDIR}
- fi
-
- # Check the size of ${HDDDIR}/rootfs.img, error out if it
- # exceeds 4GB, it is the single file's max size of FAT fs.
- if [ -f ${HDDDIR}/rootfs.img ]; then
- rootfs_img_size=`stat -c '%s' ${HDDDIR}/rootfs.img`
- max_size=`expr 4 \* 1024 \* 1024 \* 1024`
- if [ $rootfs_img_size -ge $max_size ]; then
- bberror "${HDDDIR}/rootfs.img rootfs size is greather than or equal to 4GB,"
- bberror "and this doesn't work on a FAT filesystem. You can either:"
- bberror "1) Reduce the size of rootfs.img, or,"
- bbfatal "2) Use wic, vmdk or vdi instead of hddimg\n"
- fi
- fi
-
- build_fat_img ${HDDDIR} ${IMGDEPLOYDIR}/${IMAGE_NAME}.hddimg
-
- if [ "${PCBIOS}" = "1" ]; then
- syslinux_hddimg_install
- fi
-
- chmod 644 ${IMGDEPLOYDIR}/${IMAGE_NAME}.hddimg
- fi
-}
-
-python do_bootimg() {
- set_live_vm_vars(d, 'LIVE')
- if d.getVar("PCBIOS") == "1":
- bb.build.exec_func('build_syslinux_cfg', d)
- if d.getVar("EFI") == "1":
- bb.build.exec_func('build_efi_cfg', d)
- bb.build.exec_func('build_hddimg', d)
- bb.build.exec_func('build_iso', d)
- bb.build.exec_func('create_symlinks', d)
-}
-do_bootimg[subimages] = "hddimg iso"
-do_bootimg[imgsuffix] = "."
-
-addtask bootimg before do_image_complete
diff --git a/meta/classes/image-mklibs.bbclass b/meta/classes/image-mklibs.bbclass
deleted file mode 100644
index 68e11d4365..0000000000
--- a/meta/classes/image-mklibs.bbclass
+++ /dev/null
@@ -1,56 +0,0 @@
-do_rootfs[depends] += "mklibs-native:do_populate_sysroot"
-
-IMAGE_PREPROCESS_COMMAND += "mklibs_optimize_image; "
-
-inherit linuxloader
-
-mklibs_optimize_image_doit() {
- rm -rf ${WORKDIR}/mklibs
- mkdir -p ${WORKDIR}/mklibs/dest
- cd ${IMAGE_ROOTFS}
- du -bs > ${WORKDIR}/mklibs/du.before.mklibs.txt
-
- # Build a list of dynamically linked executable ELF files.
- # Omit libc/libpthread as a special case because it has an interpreter
- # but is primarily what we intend to strip down.
- for i in `find . -type f -executable ! -name 'libc-*' ! -name 'libpthread-*'`; do
- file $i | grep -q ELF || continue
- ${HOST_PREFIX}readelf -l $i | grep -q INTERP || continue
- echo $i
- done > ${WORKDIR}/mklibs/executables.list
-
- dynamic_loader=${@get_linuxloader(d)}
-
- mklibs -v \
- --ldlib ${dynamic_loader} \
- --libdir ${baselib} \
- --sysroot ${PKG_CONFIG_SYSROOT_DIR} \
- --gcc-options "--sysroot=${PKG_CONFIG_SYSROOT_DIR}" \
- --root ${IMAGE_ROOTFS} \
- --target `echo ${TARGET_PREFIX} | sed 's/-$//' ` \
- -d ${WORKDIR}/mklibs/dest \
- `cat ${WORKDIR}/mklibs/executables.list`
-
- cd ${WORKDIR}/mklibs/dest
- for i in *
- do
- cp $i `find ${IMAGE_ROOTFS} -name $i`
- done
-
- cd ${IMAGE_ROOTFS}
- du -bs > ${WORKDIR}/mklibs/du.after.mklibs.txt
-
- echo rootfs size before mklibs optimization: `cat ${WORKDIR}/mklibs/du.before.mklibs.txt`
- echo rootfs size after mklibs optimization: `cat ${WORKDIR}/mklibs/du.after.mklibs.txt`
-}
-
-mklibs_optimize_image() {
- for img in ${MKLIBS_OPTIMIZED_IMAGES}
- do
- if [ "${img}" = "${PN}" ] || [ "${img}" = "all" ]
- then
- mklibs_optimize_image_doit
- break
- fi
- done
-}
diff --git a/meta/classes/image-postinst-intercepts.bbclass b/meta/classes/image-postinst-intercepts.bbclass
deleted file mode 100644
index ed30bbd98d..0000000000
--- a/meta/classes/image-postinst-intercepts.bbclass
+++ /dev/null
@@ -1,23 +0,0 @@
-# Gather existing and candidate postinst intercepts from BBPATH
-POSTINST_INTERCEPTS_DIR ?= "${COREBASE}/scripts/postinst-intercepts"
-POSTINST_INTERCEPTS_PATHS ?= "${@':'.join('%s/postinst-intercepts' % p for p in '${BBPATH}'.split(':'))}:${POSTINST_INTERCEPTS_DIR}"
-
-python find_intercepts() {
- intercepts = {}
- search_paths = []
- paths = d.getVar('POSTINST_INTERCEPTS_PATHS').split(':')
- overrides = (':' + d.getVar('FILESOVERRIDES')).split(':') + ['']
- search_paths = [os.path.join(p, op) for p in paths for op in overrides]
- searched = oe.path.which_wild('*', ':'.join(search_paths), candidates=True)
- files, chksums = [], []
- for pathname, candidates in searched:
- if os.path.isfile(pathname):
- files.append(pathname)
- chksums.append('%s:True' % pathname)
- chksums.extend('%s:False' % c for c in candidates[:-1])
-
- d.setVar('POSTINST_INTERCEPT_CHECKSUMS', ' '.join(chksums))
- d.setVar('POSTINST_INTERCEPTS', ' '.join(files))
-}
-find_intercepts[eventmask] += "bb.event.RecipePreFinalise"
-addhandler find_intercepts
diff --git a/meta/classes/image-prelink.bbclass b/meta/classes/image-prelink.bbclass
deleted file mode 100644
index ebf6e6d7ee..0000000000
--- a/meta/classes/image-prelink.bbclass
+++ /dev/null
@@ -1,81 +0,0 @@
-do_rootfs[depends] += "prelink-native:do_populate_sysroot"
-
-IMAGE_PREPROCESS_COMMAND_append_libc-glibc = " prelink_setup; prelink_image; "
-
-python prelink_setup () {
- oe.utils.write_ld_so_conf(d)
-}
-
-inherit linuxloader
-
-prelink_image () {
-# export PSEUDO_DEBUG=4
-# /bin/env | /bin/grep PSEUDO
-# echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
-# echo "LD_PRELOAD=$LD_PRELOAD"
-
- pre_prelink_size=`du -ks ${IMAGE_ROOTFS} | awk '{size = $1 ; print size }'`
- echo "Size before prelinking $pre_prelink_size."
-
- # The filesystem may not contain sysconfdir so establish what is present
- # to enable cleanup after temporary creation of sysconfdir if needed
- presentdir="${IMAGE_ROOTFS}${sysconfdir}"
- while [ "${IMAGE_ROOTFS}" != "${presentdir}" ] ; do
- [ ! -d "${presentdir}" ] || break
- presentdir=`dirname "${presentdir}"`
- done
-
- mkdir -p "${IMAGE_ROOTFS}${sysconfdir}"
-
- # We need a prelink conf on the filesystem, add one if it's missing
- if [ ! -e ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf ]; then
- cp ${STAGING_ETCDIR_NATIVE}/prelink.conf \
- ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf
- dummy_prelink_conf=true;
- else
- dummy_prelink_conf=false;
- fi
-
- # We need a ld.so.conf with pathnames in,prelink conf on the filesystem, add one if it's missing
- ldsoconf=${IMAGE_ROOTFS}${sysconfdir}/ld.so.conf
- if [ -e $ldsoconf ]; then
- cp $ldsoconf $ldsoconf.prelink
- fi
- cat ${STAGING_DIR_TARGET}${sysconfdir}/ld.so.conf >> $ldsoconf
-
- dynamic_loader=${@get_linuxloader(d)}
-
- # prelink!
- if [ "${BUILD_REPRODUCIBLE_BINARIES}" = "1" ]; then
- bbnote " prelink: BUILD_REPRODUCIBLE_BINARIES..."
- if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then
- export PRELINK_TIMESTAMP=`git log -1 --pretty=%ct `
- else
- export PRELINK_TIMESTAMP=$REPRODUCIBLE_TIMESTAMP_ROOTFS
- fi
- ${STAGING_SBINDIR_NATIVE}/prelink --root ${IMAGE_ROOTFS} -am -N -c ${sysconfdir}/prelink.conf --dynamic-linker $dynamic_loader
- else
- ${STAGING_SBINDIR_NATIVE}/prelink --root ${IMAGE_ROOTFS} -amR -N -c ${sysconfdir}/prelink.conf --dynamic-linker $dynamic_loader
- fi
-
- # Remove the prelink.conf if we had to add it.
- if [ "$dummy_prelink_conf" = "true" ]; then
- rm -f ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf
- fi
-
- if [ -e $ldsoconf.prelink ]; then
- mv $ldsoconf.prelink $ldsoconf
- else
- rm $ldsoconf
- fi
-
- # Remove any directories temporarily created for sysconfdir
- cleanupdir="${IMAGE_ROOTFS}${sysconfdir}"
- while [ "${presentdir}" != "${cleanupdir}" ] ; do
- rmdir "${cleanupdir}"
- cleanupdir=`dirname ${cleanupdir}`
- done
-
- pre_prelink_size=`du -ks ${IMAGE_ROOTFS} | awk '{size = $1 ; print size }'`
- echo "Size after prelinking $pre_prelink_size."
-}
diff --git a/meta/classes/image.bbclass b/meta/classes/image.bbclass
deleted file mode 100644
index 694b58fc9f..0000000000
--- a/meta/classes/image.bbclass
+++ /dev/null
@@ -1,672 +0,0 @@
-
-IMAGE_CLASSES ??= ""
-
-# rootfs bootstrap install
-# warning - image-container resets this
-ROOTFS_BOOTSTRAP_INSTALL = "run-postinsts"
-
-# Handle inherits of any of the image classes we need
-IMGCLASSES = "rootfs_${IMAGE_PKGTYPE} image_types ${IMAGE_CLASSES}"
-# Only Linux SDKs support populate_sdk_ext, fall back to populate_sdk_base
-# in the non-Linux SDK_OS case, such as mingw32
-IMGCLASSES += "${@['populate_sdk_base', 'populate_sdk_ext']['linux' in d.getVar("SDK_OS")]}"
-IMGCLASSES += "${@bb.utils.contains_any('IMAGE_FSTYPES', 'live iso hddimg', 'image-live', '', d)}"
-IMGCLASSES += "${@bb.utils.contains('IMAGE_FSTYPES', 'container', 'image-container', '', d)}"
-IMGCLASSES += "image_types_wic"
-IMGCLASSES += "rootfs-postcommands"
-IMGCLASSES += "image-postinst-intercepts"
-inherit ${IMGCLASSES}
-
-TOOLCHAIN_TARGET_TASK += "${PACKAGE_INSTALL}"
-TOOLCHAIN_TARGET_TASK_ATTEMPTONLY += "${PACKAGE_INSTALL_ATTEMPTONLY}"
-POPULATE_SDK_POST_TARGET_COMMAND += "rootfs_sysroot_relativelinks; "
-
-LICENSE ?= "MIT"
-PACKAGES = ""
-DEPENDS += "${@' '.join(["%s-qemuwrapper-cross" % m for m in d.getVar("MULTILIB_VARIANTS").split()])} qemuwrapper-cross depmodwrapper-cross cross-localedef-native"
-RDEPENDS += "${PACKAGE_INSTALL} ${LINGUAS_INSTALL} ${IMAGE_INSTALL_DEBUGFS}"
-RRECOMMENDS += "${PACKAGE_INSTALL_ATTEMPTONLY}"
-PATH_prepend = "${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:"
-
-INHIBIT_DEFAULT_DEPS = "1"
-
-# IMAGE_FEATURES may contain any available package group
-IMAGE_FEATURES ?= ""
-IMAGE_FEATURES[type] = "list"
-IMAGE_FEATURES[validitems] += "debug-tweaks read-only-rootfs stateless-rootfs empty-root-password allow-empty-password allow-root-login post-install-logging"
-
-# Generate companion debugfs?
-IMAGE_GEN_DEBUGFS ?= "0"
-
-# These pacackages will be installed as additional into debug rootfs
-IMAGE_INSTALL_DEBUGFS ?= ""
-
-# These packages will be removed from a read-only rootfs after all other
-# packages have been installed
-ROOTFS_RO_UNNEEDED ??= "update-rc.d base-passwd shadow ${VIRTUAL-RUNTIME_update-alternatives} ${ROOTFS_BOOTSTRAP_INSTALL}"
-
-# packages to install from features
-FEATURE_INSTALL = "${@' '.join(oe.packagegroup.required_packages(oe.data.typed_value('IMAGE_FEATURES', d), d))}"
-FEATURE_INSTALL[vardepvalue] = "${FEATURE_INSTALL}"
-FEATURE_INSTALL_OPTIONAL = "${@' '.join(oe.packagegroup.optional_packages(oe.data.typed_value('IMAGE_FEATURES', d), d))}"
-FEATURE_INSTALL_OPTIONAL[vardepvalue] = "${FEATURE_INSTALL_OPTIONAL}"
-
-# Define some very basic feature package groups
-FEATURE_PACKAGES_package-management = "${ROOTFS_PKGMANAGE}"
-SPLASH ?= "psplash"
-FEATURE_PACKAGES_splash = "${SPLASH}"
-
-IMAGE_INSTALL_COMPLEMENTARY = '${@complementary_globs("IMAGE_FEATURES", d)}'
-
-def check_image_features(d):
- valid_features = (d.getVarFlag('IMAGE_FEATURES', 'validitems') or "").split()
- valid_features += d.getVarFlags('COMPLEMENTARY_GLOB').keys()
- for var in d:
- if var.startswith("FEATURE_PACKAGES_"):
- valid_features.append(var[17:])
- valid_features.sort()
-
- features = set(oe.data.typed_value('IMAGE_FEATURES', d))
- for feature in features:
- if feature not in valid_features:
- if bb.utils.contains('EXTRA_IMAGE_FEATURES', feature, True, False, d):
- raise bb.parse.SkipRecipe("'%s' in IMAGE_FEATURES (added via EXTRA_IMAGE_FEATURES) is not a valid image feature. Valid features: %s" % (feature, ' '.join(valid_features)))
- else:
- raise bb.parse.SkipRecipe("'%s' in IMAGE_FEATURES is not a valid image feature. Valid features: %s" % (feature, ' '.join(valid_features)))
-
-IMAGE_INSTALL ?= ""
-IMAGE_INSTALL[type] = "list"
-export PACKAGE_INSTALL ?= "${IMAGE_INSTALL} ${ROOTFS_BOOTSTRAP_INSTALL} ${FEATURE_INSTALL}"
-PACKAGE_INSTALL_ATTEMPTONLY ?= "${FEATURE_INSTALL_OPTIONAL}"
-
-IMGDEPLOYDIR = "${WORKDIR}/deploy-${PN}-image-complete"
-
-# Images are generally built explicitly, do not need to be part of world.
-EXCLUDE_FROM_WORLD = "1"
-
-USE_DEVFS ?= "1"
-USE_DEPMOD ?= "1"
-
-PID = "${@os.getpid()}"
-
-PACKAGE_ARCH = "${MACHINE_ARCH}"
-
-LDCONFIGDEPEND ?= "ldconfig-native:do_populate_sysroot"
-LDCONFIGDEPEND_libc-musl = ""
-
-# This is needed to have depmod data in PKGDATA_DIR,
-# but if you're building small initramfs image
-# e.g. to include it in your kernel, you probably
-# don't want this dependency, which is causing dependency loop
-KERNELDEPMODDEPEND ?= "virtual/kernel:do_packagedata"
-
-do_rootfs[depends] += " \
- makedevs-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot ${LDCONFIGDEPEND} \
- virtual/update-alternatives-native:do_populate_sysroot update-rc.d-native:do_populate_sysroot \
- ${KERNELDEPMODDEPEND} \
-"
-do_rootfs[recrdeptask] += "do_packagedata"
-
-def rootfs_command_variables(d):
- return ['ROOTFS_POSTPROCESS_COMMAND','ROOTFS_PREPROCESS_COMMAND','ROOTFS_POSTINSTALL_COMMAND','ROOTFS_POSTUNINSTALL_COMMAND','OPKG_PREPROCESS_COMMANDS','OPKG_POSTPROCESS_COMMANDS','IMAGE_POSTPROCESS_COMMAND',
- 'IMAGE_PREPROCESS_COMMAND','RPM_PREPROCESS_COMMANDS','RPM_POSTPROCESS_COMMANDS','DEB_PREPROCESS_COMMANDS','DEB_POSTPROCESS_COMMANDS']
-
-python () {
- variables = rootfs_command_variables(d) + sdk_command_variables(d)
- for var in variables:
- if d.getVar(var, False):
- d.setVarFlag(var, 'func', '1')
-}
-
-def rootfs_variables(d):
- from oe.rootfs import variable_depends
- variables = ['IMAGE_DEVICE_TABLE','IMAGE_DEVICE_TABLES','BUILD_IMAGES_FROM_FEEDS','IMAGE_TYPES_MASKED','IMAGE_ROOTFS_ALIGNMENT','IMAGE_OVERHEAD_FACTOR','IMAGE_ROOTFS_SIZE','IMAGE_ROOTFS_EXTRA_SPACE',
- 'IMAGE_ROOTFS_MAXSIZE','IMAGE_NAME','IMAGE_LINK_NAME','IMAGE_MANIFEST','DEPLOY_DIR_IMAGE','IMAGE_FSTYPES','IMAGE_INSTALL_COMPLEMENTARY','IMAGE_LINGUAS', 'IMAGE_LINGUAS_COMPLEMENTARY',
- 'MULTILIBRE_ALLOW_REP','MULTILIB_TEMP_ROOTFS','MULTILIB_VARIANTS','MULTILIBS','ALL_MULTILIB_PACKAGE_ARCHS','MULTILIB_GLOBAL_VARIANTS','BAD_RECOMMENDATIONS','NO_RECOMMENDATIONS',
- 'PACKAGE_ARCHS','PACKAGE_CLASSES','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','OVERRIDES','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI','INTERCEPT_DIR','USE_DEVFS',
- 'CONVERSIONTYPES', 'IMAGE_GEN_DEBUGFS', 'ROOTFS_RO_UNNEEDED', 'IMGDEPLOYDIR', 'PACKAGE_EXCLUDE_COMPLEMENTARY', 'REPRODUCIBLE_TIMESTAMP_ROOTFS', 'IMAGE_INSTALL_DEBUGFS']
- variables.extend(rootfs_command_variables(d))
- variables.extend(variable_depends(d))
- return " ".join(variables)
-
-do_rootfs[vardeps] += "${@rootfs_variables(d)}"
-
-do_build[depends] += "virtual/kernel:do_deploy"
-
-
-python () {
- def extraimage_getdepends(task):
- deps = ""
- for dep in (d.getVar('EXTRA_IMAGEDEPENDS') or "").split():
- deps += " %s:%s" % (dep, task)
- return deps
-
- d.appendVarFlag('do_image_complete', 'depends', extraimage_getdepends('do_populate_sysroot'))
-
- deps = " " + imagetypes_getdepends(d)
- d.appendVarFlag('do_rootfs', 'depends', deps)
-
- #process IMAGE_FEATURES, we must do this before runtime_mapping_rename
- #Check for replaces image features
- features = set(oe.data.typed_value('IMAGE_FEATURES', d))
- remain_features = features.copy()
- for feature in features:
- replaces = set((d.getVar("IMAGE_FEATURES_REPLACES_%s" % feature) or "").split())
- remain_features -= replaces
-
- #Check for conflict image features
- for feature in remain_features:
- conflicts = set((d.getVar("IMAGE_FEATURES_CONFLICTS_%s" % feature) or "").split())
- temp = conflicts & remain_features
- if temp:
- bb.fatal("%s contains conflicting IMAGE_FEATURES %s %s" % (d.getVar('PN'), feature, ' '.join(list(temp))))
-
- d.setVar('IMAGE_FEATURES', ' '.join(sorted(list(remain_features))))
-
- check_image_features(d)
-}
-
-IMAGE_POSTPROCESS_COMMAND ?= ""
-
-# some default locales
-IMAGE_LINGUAS ?= "de-de fr-fr en-gb"
-
-LINGUAS_INSTALL ?= "${@" ".join(map(lambda s: "locale-base-%s" % s, d.getVar('IMAGE_LINGUAS').split()))}"
-
-# Prefer image, but use the fallback files for lookups if the image ones
-# aren't yet available.
-PSEUDO_PASSWD = "${IMAGE_ROOTFS}:${STAGING_DIR_NATIVE}"
-
-PACKAGE_EXCLUDE ??= ""
-PACKAGE_EXCLUDE[type] = "list"
-
-fakeroot python do_rootfs () {
- from oe.rootfs import create_rootfs
- from oe.manifest import create_manifest
- import logging
-
- logger = d.getVar('BB_TASK_LOGGER', False)
- if logger:
- logcatcher = bb.utils.LogCatcher()
- logger.addHandler(logcatcher)
- else:
- logcatcher = None
-
- # NOTE: if you add, remove or significantly refactor the stages of this
- # process then you should recalculate the weightings here. This is quite
- # easy to do - just change the MultiStageProgressReporter line temporarily
- # to pass debug=True as the last parameter and you'll get a printout of
- # the weightings as well as a map to the lines where next_stage() was
- # called. Of course this isn't critical, but it helps to keep the progress
- # reporting accurate.
- stage_weights = [1, 203, 354, 186, 65, 4228, 1, 353, 49, 330, 382, 23, 1]
- progress_reporter = bb.progress.MultiStageProgressReporter(d, stage_weights)
- progress_reporter.next_stage()
-
- # Handle package exclusions
- excl_pkgs = d.getVar("PACKAGE_EXCLUDE").split()
- inst_pkgs = d.getVar("PACKAGE_INSTALL").split()
- inst_attempt_pkgs = d.getVar("PACKAGE_INSTALL_ATTEMPTONLY").split()
-
- d.setVar('PACKAGE_INSTALL_ORIG', ' '.join(inst_pkgs))
- d.setVar('PACKAGE_INSTALL_ATTEMPTONLY', ' '.join(inst_attempt_pkgs))
-
- for pkg in excl_pkgs:
- if pkg in inst_pkgs:
- bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL (%s). It will be removed from the list." % (pkg, d.getVar('PN'), inst_pkgs))
- inst_pkgs.remove(pkg)
-
- if pkg in inst_attempt_pkgs:
- bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL_ATTEMPTONLY (%s). It will be removed from the list." % (pkg, d.getVar('PN'), inst_pkgs))
- inst_attempt_pkgs.remove(pkg)
-
- d.setVar("PACKAGE_INSTALL", ' '.join(inst_pkgs))
- d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", ' '.join(inst_attempt_pkgs))
-
- # Ensure we handle package name remapping
- # We have to delay the runtime_mapping_rename until just before rootfs runs
- # otherwise, the multilib renaming could step in and squash any fixups that
- # may have occurred.
- pn = d.getVar('PN')
- runtime_mapping_rename("PACKAGE_INSTALL", pn, d)
- runtime_mapping_rename("PACKAGE_INSTALL_ATTEMPTONLY", pn, d)
- runtime_mapping_rename("BAD_RECOMMENDATIONS", pn, d)
-
- # Generate the initial manifest
- create_manifest(d)
-
- progress_reporter.next_stage()
-
- # generate rootfs
- d.setVarFlag('REPRODUCIBLE_TIMESTAMP_ROOTFS', 'export', '1')
- create_rootfs(d, progress_reporter=progress_reporter, logcatcher=logcatcher)
-
- progress_reporter.finish()
-}
-do_rootfs[dirs] = "${TOPDIR}"
-do_rootfs[cleandirs] += "${S} ${IMGDEPLOYDIR}"
-do_rootfs[umask] = "022"
-do_rootfs[file-checksums] += "${POSTINST_INTERCEPT_CHECKSUMS}"
-addtask rootfs after do_prepare_recipe_sysroot
-
-fakeroot python do_image () {
- from oe.utils import execute_pre_post_process
-
- d.setVarFlag('REPRODUCIBLE_TIMESTAMP_ROOTFS', 'export', '1')
- pre_process_cmds = d.getVar("IMAGE_PREPROCESS_COMMAND")
-
- execute_pre_post_process(d, pre_process_cmds)
-}
-do_image[dirs] = "${TOPDIR}"
-do_image[umask] = "022"
-addtask do_image after do_rootfs
-
-fakeroot python do_image_complete () {
- from oe.utils import execute_pre_post_process
-
- post_process_cmds = d.getVar("IMAGE_POSTPROCESS_COMMAND")
-
- execute_pre_post_process(d, post_process_cmds)
-}
-do_image_complete[dirs] = "${TOPDIR}"
-do_image_complete[umask] = "022"
-SSTATETASKS += "do_image_complete"
-SSTATE_SKIP_CREATION_task-image-complete = '1'
-do_image_complete[sstate-inputdirs] = "${IMGDEPLOYDIR}"
-do_image_complete[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}"
-do_image_complete[stamp-extra-info] = "${MACHINE_ARCH}"
-addtask do_image_complete after do_image before do_build
-python do_image_complete_setscene () {
- sstate_setscene(d)
-}
-addtask do_image_complete_setscene
-
-# Add image-level QA/sanity checks to IMAGE_QA_COMMANDS
-#
-# IMAGE_QA_COMMANDS += " \
-# image_check_everything_ok \
-# "
-# This task runs all functions in IMAGE_QA_COMMANDS after the rootfs
-# construction has completed in order to validate the resulting image.
-#
-# The functions should use ${IMAGE_ROOTFS} to find the unpacked rootfs
-# directory, which if QA passes will be the basis for the images.
-fakeroot python do_image_qa () {
- from oe.utils import ImageQAFailed
-
- qa_cmds = (d.getVar('IMAGE_QA_COMMANDS') or '').split()
- qamsg = ""
-
- for cmd in qa_cmds:
- try:
- bb.build.exec_func(cmd, d)
- except oe.utils.ImageQAFailed as e:
- qamsg = qamsg + '\tImage QA function %s failed: %s\n' % (e.name, e.description)
- except Exception as e:
- qamsg = qamsg + '\tImage QA function %s failed\n' % cmd
-
- if qamsg:
- imgname = d.getVar('IMAGE_NAME')
- bb.fatal("QA errors found whilst validating image: %s\n%s" % (imgname, qamsg))
-}
-addtask do_image_qa after do_rootfs before do_image
-
-SSTATETASKS += "do_image_qa"
-SSTATE_SKIP_CREATION_task-image-qa = '1'
-do_image_qa[sstate-inputdirs] = ""
-do_image_qa[sstate-outputdirs] = ""
-python do_image_qa_setscene () {
- sstate_setscene(d)
-}
-addtask do_image_qa_setscene
-
-def setup_debugfs_variables(d):
- d.appendVar('IMAGE_ROOTFS', '-dbg')
- if d.getVar('IMAGE_LINK_NAME'):
- d.appendVar('IMAGE_LINK_NAME', '-dbg')
- d.appendVar('IMAGE_NAME','-dbg')
- d.setVar('IMAGE_BUILDING_DEBUGFS', 'true')
- debugfs_image_fstypes = d.getVar('IMAGE_FSTYPES_DEBUGFS')
- if debugfs_image_fstypes:
- d.setVar('IMAGE_FSTYPES', debugfs_image_fstypes)
-
-python setup_debugfs () {
- setup_debugfs_variables(d)
-}
-
-python () {
- vardeps = set()
- # We allow CONVERSIONTYPES to have duplicates. That avoids breaking
- # derived distros when OE-core or some other layer independently adds
- # the same type. There is still only one command for each type, but
- # presumably the commands will do the same when the type is the same,
- # even when added in different places.
- #
- # Without de-duplication, gen_conversion_cmds() below
- # would create the same compression command multiple times.
- ctypes = set(d.getVar('CONVERSIONTYPES').split())
- old_overrides = d.getVar('OVERRIDES', False)
-
- def _image_base_type(type):
- basetype = type
- for ctype in ctypes:
- if type.endswith("." + ctype):
- basetype = type[:-len("." + ctype)]
- break
-
- if basetype != type:
- # New base type itself might be generated by a conversion command.
- basetype = _image_base_type(basetype)
-
- return basetype
-
- basetypes = {}
- alltypes = d.getVar('IMAGE_FSTYPES').split()
- typedeps = {}
-
- if d.getVar('IMAGE_GEN_DEBUGFS') == "1":
- debugfs_fstypes = d.getVar('IMAGE_FSTYPES_DEBUGFS').split()
- for t in debugfs_fstypes:
- alltypes.append("debugfs_" + t)
-
- def _add_type(t):
- baset = _image_base_type(t)
- input_t = t
- if baset not in basetypes:
- basetypes[baset]= []
- if t not in basetypes[baset]:
- basetypes[baset].append(t)
- debug = ""
- if t.startswith("debugfs_"):
- t = t[8:]
- debug = "debugfs_"
- deps = (d.getVar('IMAGE_TYPEDEP_' + t) or "").split()
- vardeps.add('IMAGE_TYPEDEP_' + t)
- if baset not in typedeps:
- typedeps[baset] = set()
- deps = [debug + dep for dep in deps]
- for dep in deps:
- if dep not in alltypes:
- alltypes.append(dep)
- _add_type(dep)
- basedep = _image_base_type(dep)
- typedeps[baset].add(basedep)
-
- if baset != input_t:
- _add_type(baset)
-
- for t in alltypes[:]:
- _add_type(t)
-
- d.appendVarFlag('do_image', 'vardeps', ' '.join(vardeps))
-
- maskedtypes = (d.getVar('IMAGE_TYPES_MASKED') or "").split()
- maskedtypes = [dbg + t for t in maskedtypes for dbg in ("", "debugfs_")]
-
- for t in basetypes:
- vardeps = set()
- cmds = []
- subimages = []
- realt = t
-
- if t in maskedtypes:
- continue
-
- localdata = bb.data.createCopy(d)
- debug = ""
- if t.startswith("debugfs_"):
- setup_debugfs_variables(localdata)
- debug = "setup_debugfs "
- realt = t[8:]
- localdata.setVar('OVERRIDES', '%s:%s' % (realt, old_overrides))
- localdata.setVar('type', realt)
- # Delete DATETIME so we don't expand any references to it now
- # This means the task's hash can be stable rather than having hardcoded
- # date/time values. It will get expanded at execution time.
- # Similarly TMPDIR since otherwise we see QA stamp comparision problems
- # Expand PV else it can trigger get_srcrev which can fail due to these variables being unset
- localdata.setVar('PV', d.getVar('PV'))
- localdata.delVar('DATETIME')
- localdata.delVar('DATE')
- localdata.delVar('TMPDIR')
- vardepsexclude = (d.getVarFlag('IMAGE_CMD_' + realt, 'vardepsexclude', True) or '').split()
- for dep in vardepsexclude:
- localdata.delVar(dep)
-
- image_cmd = localdata.getVar("IMAGE_CMD")
- vardeps.add('IMAGE_CMD_' + realt)
- if image_cmd:
- cmds.append("\t" + image_cmd)
- else:
- bb.fatal("No IMAGE_CMD defined for IMAGE_FSTYPES entry '%s' - possibly invalid type name or missing support class" % t)
- cmds.append(localdata.expand("\tcd ${IMGDEPLOYDIR}"))
-
- # Since a copy of IMAGE_CMD_xxx will be inlined within do_image_xxx,
- # prevent a redundant copy of IMAGE_CMD_xxx being emitted as a function.
- d.delVarFlag('IMAGE_CMD_' + realt, 'func')
-
- rm_tmp_images = set()
- def gen_conversion_cmds(bt):
- for ctype in sorted(ctypes):
- if bt.endswith("." + ctype):
- type = bt[0:-len(ctype) - 1]
- if type.startswith("debugfs_"):
- type = type[8:]
- # Create input image first.
- gen_conversion_cmds(type)
- localdata.setVar('type', type)
- cmd = "\t" + (localdata.getVar("CONVERSION_CMD_" + ctype) or localdata.getVar("COMPRESS_CMD_" + ctype))
- if cmd not in cmds:
- cmds.append(cmd)
- vardeps.add('CONVERSION_CMD_' + ctype)
- vardeps.add('COMPRESS_CMD_' + ctype)
- subimage = type + "." + ctype
- if subimage not in subimages:
- subimages.append(subimage)
- if type not in alltypes:
- rm_tmp_images.add(localdata.expand("${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"))
-
- for bt in basetypes[t]:
- gen_conversion_cmds(bt)
-
- localdata.setVar('type', realt)
- if t not in alltypes:
- rm_tmp_images.add(localdata.expand("${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"))
- else:
- subimages.append(realt)
-
- # Clean up after applying all conversion commands. Some of them might
- # use the same input, therefore we cannot delete sooner without applying
- # some complex dependency analysis.
- for image in sorted(rm_tmp_images):
- cmds.append("\trm " + image)
-
- after = 'do_image'
- for dep in typedeps[t]:
- after += ' do_image_%s' % dep.replace("-", "_").replace(".", "_")
-
- task = "do_image_%s" % t.replace("-", "_").replace(".", "_")
-
- d.setVar(task, '\n'.join(cmds))
- d.setVarFlag(task, 'func', '1')
- d.setVarFlag(task, 'fakeroot', '1')
-
- d.appendVarFlag(task, 'prefuncs', ' ' + debug + ' set_image_size')
- d.prependVarFlag(task, 'postfuncs', 'create_symlinks ')
- d.appendVarFlag(task, 'subimages', ' ' + ' '.join(subimages))
- d.appendVarFlag(task, 'vardeps', ' ' + ' '.join(vardeps))
- d.appendVarFlag(task, 'vardepsexclude', ' DATETIME DATE ' + ' '.join(vardepsexclude))
-
- bb.debug(2, "Adding task %s before %s, after %s" % (task, 'do_image_complete', after))
- bb.build.addtask(task, 'do_image_complete', after, d)
-}
-
-#
-# Compute the rootfs size
-#
-def get_rootfs_size(d):
- import subprocess
-
- rootfs_alignment = int(d.getVar('IMAGE_ROOTFS_ALIGNMENT'))
- overhead_factor = float(d.getVar('IMAGE_OVERHEAD_FACTOR'))
- rootfs_req_size = int(d.getVar('IMAGE_ROOTFS_SIZE'))
- rootfs_extra_space = eval(d.getVar('IMAGE_ROOTFS_EXTRA_SPACE'))
- rootfs_maxsize = d.getVar('IMAGE_ROOTFS_MAXSIZE')
- image_fstypes = d.getVar('IMAGE_FSTYPES') or ''
- initramfs_fstypes = d.getVar('INITRAMFS_FSTYPES') or ''
- initramfs_maxsize = d.getVar('INITRAMFS_MAXSIZE')
-
- output = subprocess.check_output(['du', '-ks',
- d.getVar('IMAGE_ROOTFS')])
- size_kb = int(output.split()[0])
-
- base_size = size_kb * overhead_factor
- bb.debug(1, '%f = %d * %f' % (base_size, size_kb, overhead_factor))
- base_size2 = max(base_size, rootfs_req_size) + rootfs_extra_space
- bb.debug(1, '%f = max(%f, %d)[%f] + %d' % (base_size2, base_size, rootfs_req_size, max(base_size, rootfs_req_size), rootfs_extra_space))
-
- base_size = base_size2
- if base_size != int(base_size):
- base_size = int(base_size + 1)
- else:
- base_size = int(base_size)
- bb.debug(1, '%f = int(%f)' % (base_size, base_size2))
-
- base_size_saved = base_size
- base_size += rootfs_alignment - 1
- base_size -= base_size % rootfs_alignment
- bb.debug(1, '%d = aligned(%d)' % (base_size, base_size_saved))
-
- # Do not check image size of the debugfs image. This is not supposed
- # to be deployed, etc. so it doesn't make sense to limit the size
- # of the debug.
- if (d.getVar('IMAGE_BUILDING_DEBUGFS') or "") == "true":
- bb.debug(1, 'returning debugfs size %d' % (base_size))
- return base_size
-
- # Check the rootfs size against IMAGE_ROOTFS_MAXSIZE (if set)
- if rootfs_maxsize:
- rootfs_maxsize_int = int(rootfs_maxsize)
- if base_size > rootfs_maxsize_int:
- bb.fatal("The rootfs size %d(K) overrides IMAGE_ROOTFS_MAXSIZE: %d(K)" % \
- (base_size, rootfs_maxsize_int))
-
- # Check the initramfs size against INITRAMFS_MAXSIZE (if set)
- if image_fstypes == initramfs_fstypes != '' and initramfs_maxsize:
- initramfs_maxsize_int = int(initramfs_maxsize)
- if base_size > initramfs_maxsize_int:
- bb.error("The initramfs size %d(K) overrides INITRAMFS_MAXSIZE: %d(K)" % \
- (base_size, initramfs_maxsize_int))
- bb.error("You can set INITRAMFS_MAXSIZE a larger value. Usually, it should")
- bb.fatal("be less than 1/2 of ram size, or you may fail to boot it.\n")
-
- bb.debug(1, 'returning %d' % (base_size))
- return base_size
-
-python set_image_size () {
- rootfs_size = get_rootfs_size(d)
- d.setVar('ROOTFS_SIZE', str(rootfs_size))
- d.setVarFlag('ROOTFS_SIZE', 'export', '1')
-}
-
-#
-# Create symlinks to the newly created image
-#
-python create_symlinks() {
-
- deploy_dir = d.getVar('IMGDEPLOYDIR')
- img_name = d.getVar('IMAGE_NAME')
- link_name = d.getVar('IMAGE_LINK_NAME')
- manifest_name = d.getVar('IMAGE_MANIFEST')
- taskname = d.getVar("BB_CURRENTTASK")
- subimages = (d.getVarFlag("do_" + taskname, 'subimages', False) or "").split()
- imgsuffix = d.getVarFlag("do_" + taskname, 'imgsuffix') or d.expand("${IMAGE_NAME_SUFFIX}.")
-
- if not link_name:
- return
- for type in subimages:
- dst = os.path.join(deploy_dir, link_name + "." + type)
- src = img_name + imgsuffix + type
- if os.path.exists(os.path.join(deploy_dir, src)):
- bb.note("Creating symlink: %s -> %s" % (dst, src))
- if os.path.islink(dst):
- os.remove(dst)
- os.symlink(src, dst)
- else:
- bb.note("Skipping symlink, source does not exist: %s -> %s" % (dst, src))
-}
-
-MULTILIBRE_ALLOW_REP =. "${base_bindir}|${base_sbindir}|${bindir}|${sbindir}|${libexecdir}|${sysconfdir}|${nonarch_base_libdir}/udev|/lib/modules/[^/]*/modules.*|"
-MULTILIB_CHECK_FILE = "${WORKDIR}/multilib_check.py"
-MULTILIB_TEMP_ROOTFS = "${WORKDIR}/multilib"
-
-do_fetch[noexec] = "1"
-do_unpack[noexec] = "1"
-do_patch[noexec] = "1"
-do_configure[noexec] = "1"
-do_compile[noexec] = "1"
-do_install[noexec] = "1"
-deltask do_populate_lic
-deltask do_populate_sysroot
-do_package[noexec] = "1"
-deltask do_package_qa
-do_packagedata[noexec] = "1"
-deltask do_package_write_ipk
-deltask do_package_write_deb
-deltask do_package_write_rpm
-
-# Prepare the root links to point to the /usr counterparts.
-create_merged_usr_symlinks() {
- root="$1"
- install -d $root${base_bindir} $root${base_sbindir} $root${base_libdir}
- lnr $root${base_bindir} $root/bin
- lnr $root${base_sbindir} $root/sbin
- lnr $root${base_libdir} $root/${baselib}
-
- if [ "${nonarch_base_libdir}" != "${base_libdir}" ]; then
- install -d $root${nonarch_base_libdir}
- lnr $root${nonarch_base_libdir} $root/lib
- fi
-
- # create base links for multilibs
- multi_libdirs="${@d.getVar('MULTILIB_VARIANTS')}"
- for d in $multi_libdirs; do
- install -d $root${exec_prefix}/$d
- lnr $root${exec_prefix}/$d $root/$d
- done
-}
-
-create_merged_usr_symlinks_rootfs() {
- create_merged_usr_symlinks ${IMAGE_ROOTFS}
-}
-
-create_merged_usr_symlinks_sdk() {
- create_merged_usr_symlinks ${SDK_OUTPUT}${SDKTARGETSYSROOT}
-}
-
-ROOTFS_PREPROCESS_COMMAND += "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', 'create_merged_usr_symlinks_rootfs; ', '',d)}"
-POPULATE_SDK_PRE_TARGET_COMMAND += "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', 'create_merged_usr_symlinks_sdk; ', '',d)}"
-
-reproducible_final_image_task () {
- if [ "${BUILD_REPRODUCIBLE_BINARIES}" = "1" ]; then
- if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then
- REPRODUCIBLE_TIMESTAMP_ROOTFS=`git -C "${COREBASE}" log -1 --pretty=%ct 2>/dev/null` || true
- if [ "${REPRODUCIBLE_TIMESTAMP_ROOTFS}" = "" ]; then
- REPRODUCIBLE_TIMESTAMP_ROOTFS=`stat -c%Y ${@bb.utils.which(d.getVar("BBPATH"), "conf/bitbake.conf")}`
- fi
- fi
- # Set mtime of all files to a reproducible value
- bbnote "reproducible_final_image_task: mtime set to $REPRODUCIBLE_TIMESTAMP_ROOTFS"
- find ${IMAGE_ROOTFS} -exec touch -h --date=@$REPRODUCIBLE_TIMESTAMP_ROOTFS {} \;
- fi
-}
-
-systemd_preset_all () {
- if [ -e ${IMAGE_ROOTFS}${root_prefix}/lib/systemd/systemd ]; then
- systemctl --root="${IMAGE_ROOTFS}" --preset-mode=enable-only preset-all
- fi
-}
-
-IMAGE_PREPROCESS_COMMAND_append = " ${@ 'systemd_preset_all;' if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d) and not bb.utils.contains('IMAGE_FEATURES', 'stateless-rootfs', True, False, d) else ''} reproducible_final_image_task; "
-
-CVE_PRODUCT = ""
diff --git a/meta/classes/image_types.bbclass b/meta/classes/image_types.bbclass
deleted file mode 100644
index ab05cc90ff..0000000000
--- a/meta/classes/image_types.bbclass
+++ /dev/null
@@ -1,335 +0,0 @@
-# IMAGE_NAME is the base name for everything produced when building images.
-# The actual image that contains the rootfs has an additional suffix (.rootfs
-# by default) followed by additional suffices which describe the format (.ext4,
-# .ext4.xz, etc.).
-IMAGE_NAME_SUFFIX ??= ".rootfs"
-
-# The default aligment of the size of the rootfs is set to 1KiB. In case
-# you're using the SD card emulation of a QEMU system simulator you may
-# set this value to 2048 (2MiB alignment).
-IMAGE_ROOTFS_ALIGNMENT ?= "1"
-
-def imagetypes_getdepends(d):
- def adddep(depstr, deps):
- for d in (depstr or "").split():
- # Add task dependency if not already present
- if ":" not in d:
- d += ":do_populate_sysroot"
- deps.add(d)
-
- # Take a type in the form of foo.bar.car and split it into the items
- # needed for the image deps "foo", and the conversion deps ["bar", "car"]
- def split_types(typestring):
- types = typestring.split(".")
- return types[0], types[1:]
-
- fstypes = set((d.getVar('IMAGE_FSTYPES') or "").split())
- fstypes |= set((d.getVar('IMAGE_FSTYPES_DEBUGFS') or "").split())
-
- deprecated = set()
- deps = set()
- for typestring in fstypes:
- basetype, resttypes = split_types(typestring)
-
- var = "IMAGE_DEPENDS_%s" % basetype
- if d.getVar(var) is not None:
- deprecated.add(var)
-
- for typedepends in (d.getVar("IMAGE_TYPEDEP_%s" % basetype) or "").split():
- base, rest = split_types(typedepends)
- resttypes += rest
-
- var = "IMAGE_DEPENDS_%s" % base
- if d.getVar(var) is not None:
- deprecated.add(var)
-
- for ctype in resttypes:
- adddep(d.getVar("CONVERSION_DEPENDS_%s" % ctype), deps)
- adddep(d.getVar("COMPRESS_DEPENDS_%s" % ctype), deps)
-
- if deprecated:
- bb.fatal('Deprecated variable(s) found: "%s". '
- 'Use do_image_<type>[depends] += "<recipe>:<task>" instead' % ', '.join(deprecated))
-
- # Sort the set so that ordering is consistant
- return " ".join(sorted(deps))
-
-XZ_COMPRESSION_LEVEL ?= "-9"
-XZ_INTEGRITY_CHECK ?= "crc32"
-
-ZIP_COMPRESSION_LEVEL ?= "-9"
-
-ZSTD_COMPRESSION_LEVEL ?= "-3"
-
-JFFS2_SUM_EXTRA_ARGS ?= ""
-IMAGE_CMD_jffs2 = "mkfs.jffs2 --root=${IMAGE_ROOTFS} --faketime --output=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.jffs2 ${EXTRA_IMAGECMD}"
-
-IMAGE_CMD_cramfs = "mkfs.cramfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cramfs ${EXTRA_IMAGECMD}"
-
-oe_mkext234fs () {
- fstype=$1
- extra_imagecmd=""
-
- if [ $# -gt 1 ]; then
- shift
- extra_imagecmd=$@
- fi
-
- # If generating an empty image the size of the sparse block should be large
- # enough to allocate an ext4 filesystem using 4096 bytes per inode, this is
- # about 60K, so dd needs a minimum count of 60, with bs=1024 (bytes per IO)
- eval local COUNT=\"0\"
- eval local MIN_COUNT=\"60\"
- if [ $ROOTFS_SIZE -lt $MIN_COUNT ]; then
- eval COUNT=\"$MIN_COUNT\"
- fi
- # Create a sparse image block
- bbdebug 1 Executing "dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype seek=$ROOTFS_SIZE count=$COUNT bs=1024"
- dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype seek=$ROOTFS_SIZE count=$COUNT bs=1024
- bbdebug 1 "Actual Rootfs size: `du -s ${IMAGE_ROOTFS}`"
- bbdebug 1 "Actual Partion size: `stat -c '%s' ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype`"
- bbdebug 1 Executing "mkfs.$fstype -F $extra_imagecmd ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype -d ${IMAGE_ROOTFS}"
- mkfs.$fstype -F $extra_imagecmd ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype -d ${IMAGE_ROOTFS}
- # Error codes 0-3 indicate successfull operation of fsck (no errors or errors corrected)
- fsck.$fstype -pvfD ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype || [ $? -le 3 ]
-}
-
-IMAGE_CMD_ext2 = "oe_mkext234fs ext2 ${EXTRA_IMAGECMD}"
-IMAGE_CMD_ext3 = "oe_mkext234fs ext3 ${EXTRA_IMAGECMD}"
-IMAGE_CMD_ext4 = "oe_mkext234fs ext4 ${EXTRA_IMAGECMD}"
-
-MIN_BTRFS_SIZE ?= "16384"
-IMAGE_CMD_btrfs () {
- size=${ROOTFS_SIZE}
- if [ ${size} -lt ${MIN_BTRFS_SIZE} ] ; then
- size=${MIN_BTRFS_SIZE}
- bbwarn "Rootfs size is too small for BTRFS. Filesystem will be extended to ${size}K"
- fi
- dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.btrfs seek=${size} count=0 bs=1024
- mkfs.btrfs ${EXTRA_IMAGECMD} -r ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.btrfs
-}
-
-IMAGE_CMD_squashfs = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs ${EXTRA_IMAGECMD} -noappend"
-IMAGE_CMD_squashfs-xz = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-xz ${EXTRA_IMAGECMD} -noappend -comp xz"
-IMAGE_CMD_squashfs-lzo = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-lzo ${EXTRA_IMAGECMD} -noappend -comp lzo"
-IMAGE_CMD_squashfs-lz4 = "mksquashfs ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-lz4 ${EXTRA_IMAGECMD} -noappend -comp lz4"
-
-# By default, tar from the host is used, which can be quite old. If
-# you need special parameters (like --xattrs) which are only supported
-# by GNU tar upstream >= 1.27, then override that default:
-# IMAGE_CMD_TAR = "tar --xattrs --xattrs-include=*"
-# do_image_tar[depends] += "tar-replacement-native:do_populate_sysroot"
-# EXTRANATIVEPATH += "tar-native"
-#
-# The GNU documentation does not specify whether --xattrs-include is necessary.
-# In practice, it turned out to be not needed when creating archives and
-# required when extracting, but it seems prudent to use it in both cases.
-IMAGE_CMD_TAR ?= "tar"
-# ignore return code 1 "file changed as we read it" as other tasks(e.g. do_image_wic) may be hardlinking rootfs
-IMAGE_CMD_tar = "${IMAGE_CMD_TAR} --numeric-owner -cf ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.tar -C ${IMAGE_ROOTFS} . || [ $? -eq 1 ]"
-
-do_image_cpio[cleandirs] += "${WORKDIR}/cpio_append"
-IMAGE_CMD_cpio () {
- (cd ${IMAGE_ROOTFS} && find . | sort | cpio --reproducible -o -H newc >${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cpio)
- # We only need the /init symlink if we're building the real
- # image. The -dbg image doesn't need it! By being clever
- # about this we also avoid 'touch' below failing, as it
- # might be trying to touch /sbin/init on the host since both
- # the normal and the -dbg image share the same WORKDIR
- if [ "${IMAGE_BUILDING_DEBUGFS}" != "true" ]; then
- if [ ! -L ${IMAGE_ROOTFS}/init ] && [ ! -e ${IMAGE_ROOTFS}/init ]; then
- if [ -L ${IMAGE_ROOTFS}/sbin/init ] || [ -e ${IMAGE_ROOTFS}/sbin/init ]; then
- ln -sf /sbin/init ${WORKDIR}/cpio_append/init
- else
- touch ${WORKDIR}/cpio_append/init
- fi
- (cd ${WORKDIR}/cpio_append && echo ./init | cpio -oA -H newc -F ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cpio)
- fi
- fi
-}
-
-UBI_VOLNAME ?= "${MACHINE}-rootfs"
-
-multiubi_mkfs() {
- local mkubifs_args="$1"
- local ubinize_args="$2"
-
- # Added prompt error message for ubi and ubifs image creation.
- if [ -z "$mkubifs_args" ] || [ -z "$ubinize_args" ]; then
- bbfatal "MKUBIFS_ARGS and UBINIZE_ARGS have to be set, see http://www.linux-mtd.infradead.org/faq/ubifs.html for details"
- fi
-
- if [ -z "$3" ]; then
- local vname=""
- else
- local vname="_$3"
- fi
-
- echo \[ubifs\] > ubinize${vname}-${IMAGE_NAME}.cfg
- echo mode=ubi >> ubinize${vname}-${IMAGE_NAME}.cfg
- echo image=${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs >> ubinize${vname}-${IMAGE_NAME}.cfg
- echo vol_id=0 >> ubinize${vname}-${IMAGE_NAME}.cfg
- echo vol_type=dynamic >> ubinize${vname}-${IMAGE_NAME}.cfg
- echo vol_name=${UBI_VOLNAME} >> ubinize${vname}-${IMAGE_NAME}.cfg
- echo vol_flags=autoresize >> ubinize${vname}-${IMAGE_NAME}.cfg
- if [ -n "$vname" ]; then
- mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs ${mkubifs_args}
- fi
- ubinize -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubi ${ubinize_args} ubinize${vname}-${IMAGE_NAME}.cfg
-
- # Cleanup cfg file
- mv ubinize${vname}-${IMAGE_NAME}.cfg ${IMGDEPLOYDIR}/
-
- # Create own symlinks for 'named' volumes
- if [ -n "$vname" ]; then
- cd ${IMGDEPLOYDIR}
- if [ -e ${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs ]; then
- ln -sf ${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs \
- ${IMAGE_LINK_NAME}${vname}.ubifs
- fi
- if [ -e ${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubi ]; then
- ln -sf ${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubi \
- ${IMAGE_LINK_NAME}${vname}.ubi
- fi
- cd -
- fi
-}
-
-IMAGE_CMD_multiubi () {
- # Split MKUBIFS_ARGS_<name> and UBINIZE_ARGS_<name>
- for name in ${MULTIUBI_BUILD}; do
- eval local mkubifs_args=\"\$MKUBIFS_ARGS_${name}\"
- eval local ubinize_args=\"\$UBINIZE_ARGS_${name}\"
-
- multiubi_mkfs "${mkubifs_args}" "${ubinize_args}" "${name}"
- done
-}
-
-IMAGE_CMD_ubi () {
- multiubi_mkfs "${MKUBIFS_ARGS}" "${UBINIZE_ARGS}"
-}
-IMAGE_TYPEDEP_ubi = "ubifs"
-
-IMAGE_CMD_ubifs = "mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.ubifs ${MKUBIFS_ARGS}"
-
-MIN_F2FS_SIZE ?= "524288"
-IMAGE_CMD_f2fs () {
- # We need to add additional smarts here form devices smaller than 1.5G
- # Need to scale appropriately between 40M -> 1.5G as the "overprovision
- # ratio" goes down as the device gets bigger (70% -> 4.5%), below about
- # 500M the standard IMAGE_OVERHEAD_FACTOR does not work, so add additional
- # space here when under 500M
- size=${ROOTFS_SIZE}
- if [ ${size} -lt ${MIN_F2FS_SIZE} ] ; then
- size=${MIN_F2FS_SIZE}
- bbwarn "Rootfs size is too small for F2FS. Filesystem will be extended to ${size}K"
- fi
- dd if=/dev/zero of=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.f2fs seek=${size} count=0 bs=1024
- mkfs.f2fs ${EXTRA_IMAGECMD} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.f2fs
- sload.f2fs -f ${IMAGE_ROOTFS} ${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.f2fs
-}
-
-EXTRA_IMAGECMD = ""
-
-inherit siteinfo kernel-arch
-JFFS2_ENDIANNESS ?= "${@oe.utils.conditional('SITEINFO_ENDIANNESS', 'le', '-l', '-b', d)}"
-JFFS2_ERASEBLOCK ?= "0x40000"
-EXTRA_IMAGECMD_jffs2 ?= "--pad ${JFFS2_ENDIANNESS} --eraseblock=${JFFS2_ERASEBLOCK} --no-cleanmarkers"
-
-# Change these if you want default mkfs behavior (i.e. create minimal inode number)
-EXTRA_IMAGECMD_ext2 ?= "-i 4096"
-EXTRA_IMAGECMD_ext3 ?= "-i 4096"
-EXTRA_IMAGECMD_ext4 ?= "-i 4096"
-EXTRA_IMAGECMD_btrfs ?= "-n 4096"
-EXTRA_IMAGECMD_f2fs ?= ""
-
-do_image_cpio[depends] += "cpio-native:do_populate_sysroot"
-do_image_jffs2[depends] += "mtd-utils-native:do_populate_sysroot"
-do_image_cramfs[depends] += "util-linux-native:do_populate_sysroot"
-do_image_ext2[depends] += "e2fsprogs-native:do_populate_sysroot"
-do_image_ext3[depends] += "e2fsprogs-native:do_populate_sysroot"
-do_image_ext4[depends] += "e2fsprogs-native:do_populate_sysroot"
-do_image_btrfs[depends] += "btrfs-tools-native:do_populate_sysroot"
-do_image_squashfs[depends] += "squashfs-tools-native:do_populate_sysroot"
-do_image_squashfs_xz[depends] += "squashfs-tools-native:do_populate_sysroot"
-do_image_squashfs_lzo[depends] += "squashfs-tools-native:do_populate_sysroot"
-do_image_squashfs_lz4[depends] += "squashfs-tools-native:do_populate_sysroot"
-do_image_ubi[depends] += "mtd-utils-native:do_populate_sysroot"
-do_image_ubifs[depends] += "mtd-utils-native:do_populate_sysroot"
-do_image_multiubi[depends] += "mtd-utils-native:do_populate_sysroot"
-do_image_f2fs[depends] += "f2fs-tools-native:do_populate_sysroot"
-
-# This variable is available to request which values are suitable for IMAGE_FSTYPES
-IMAGE_TYPES = " \
- jffs2 jffs2.sum \
- cramfs \
- ext2 ext2.gz ext2.bz2 ext2.lzma \
- ext3 ext3.gz \
- ext4 ext4.gz \
- btrfs \
- iso \
- hddimg \
- squashfs squashfs-xz squashfs-lzo squashfs-lz4 \
- ubi ubifs multiubi \
- tar tar.gz tar.bz2 tar.xz tar.lz4 tar.zst \
- cpio cpio.gz cpio.xz cpio.lzma cpio.lz4 \
- wic wic.gz wic.bz2 wic.lzma wic.zst \
- container \
- f2fs \
-"
-
-# Compression is a special case of conversion. The old variable
-# names are still supported for backward-compatibility. When defining
-# new compression or conversion commands, use CONVERSIONTYPES and
-# CONVERSION_CMD/DEPENDS.
-COMPRESSIONTYPES ?= ""
-
-CONVERSIONTYPES = "gz bz2 lzma xz lz4 lzo zip zst sum md5sum sha1sum sha224sum sha256sum sha384sum sha512sum bmap u-boot vmdk vdi qcow2 base64 ${COMPRESSIONTYPES}"
-CONVERSION_CMD_lzma = "lzma -k -f -7 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
-CONVERSION_CMD_gz = "gzip -f -9 -n -c --rsyncable ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.gz"
-CONVERSION_CMD_bz2 = "pbzip2 -f -k ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
-CONVERSION_CMD_xz = "xz -f -k -c ${XZ_COMPRESSION_LEVEL} ${XZ_DEFAULTS} --check=${XZ_INTEGRITY_CHECK} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.xz"
-CONVERSION_CMD_lz4 = "lz4 -9 -z -l ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.lz4"
-CONVERSION_CMD_lzo = "lzop -9 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
-CONVERSION_CMD_zip = "zip ${ZIP_COMPRESSION_LEVEL} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.zip ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
-CONVERSION_CMD_zst = "zstd -f -k -T0 -c ${ZSTD_COMPRESSION_LEVEL} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.zst"
-CONVERSION_CMD_sum = "sumtool -i ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} -o ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sum ${JFFS2_SUM_EXTRA_ARGS}"
-CONVERSION_CMD_md5sum = "md5sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.md5sum"
-CONVERSION_CMD_sha1sum = "sha1sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha1sum"
-CONVERSION_CMD_sha224sum = "sha224sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha224sum"
-CONVERSION_CMD_sha256sum = "sha256sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha256sum"
-CONVERSION_CMD_sha384sum = "sha384sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha384sum"
-CONVERSION_CMD_sha512sum = "sha512sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha512sum"
-CONVERSION_CMD_bmap = "bmaptool create ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} -o ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.bmap"
-CONVERSION_CMD_u-boot = "mkimage -A ${UBOOT_ARCH} -O linux -T ramdisk -C none -n ${IMAGE_NAME} -d ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.u-boot"
-CONVERSION_CMD_vmdk = "qemu-img convert -O vmdk ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vmdk"
-CONVERSION_CMD_vdi = "qemu-img convert -O vdi ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vdi"
-CONVERSION_CMD_qcow2 = "qemu-img convert -O qcow2 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.qcow2"
-CONVERSION_CMD_base64 = "base64 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.base64"
-CONVERSION_DEPENDS_lzma = "xz-native"
-CONVERSION_DEPENDS_gz = "pigz-native"
-CONVERSION_DEPENDS_bz2 = "pbzip2-native"
-CONVERSION_DEPENDS_xz = "xz-native"
-CONVERSION_DEPENDS_lz4 = "lz4-native"
-CONVERSION_DEPENDS_lzo = "lzop-native"
-CONVERSION_DEPENDS_zip = "zip-native"
-CONVERSION_DEPENDS_zst = "zstd-native"
-CONVERSION_DEPENDS_sum = "mtd-utils-native"
-CONVERSION_DEPENDS_bmap = "bmap-tools-native"
-CONVERSION_DEPENDS_u-boot = "u-boot-tools-native"
-CONVERSION_DEPENDS_vmdk = "qemu-system-native"
-CONVERSION_DEPENDS_vdi = "qemu-system-native"
-CONVERSION_DEPENDS_qcow2 = "qemu-system-native"
-CONVERSION_DEPENDS_base64 = "coreutils-native"
-
-RUNNABLE_IMAGE_TYPES ?= "ext2 ext3 ext4"
-RUNNABLE_MACHINE_PATTERNS ?= "qemu"
-
-DEPLOYABLE_IMAGE_TYPES ?= "hddimg iso"
-
-# The IMAGE_TYPES_MASKED variable will be used to mask out from the IMAGE_FSTYPES,
-# images that will not be built at do_rootfs time: vmdk, vdi, qcow2, hddimg, iso, etc.
-IMAGE_TYPES_MASKED ?= ""
-
-# bmap requires python3 to be in the PATH
-EXTRANATIVEPATH += "${@'python3-native' if d.getVar('IMAGE_FSTYPES').find('.bmap') else ''}"
diff --git a/meta/classes/image_types_wic.bbclass b/meta/classes/image_types_wic.bbclass
deleted file mode 100644
index 7b1db50a28..0000000000
--- a/meta/classes/image_types_wic.bbclass
+++ /dev/null
@@ -1,148 +0,0 @@
-# The WICVARS variable is used to define list of bitbake variables used in wic code
-# variables from this list is written to <image>.env file
-WICVARS ?= "\
- BBLAYERS IMGDEPLOYDIR DEPLOY_DIR_IMAGE FAKEROOTCMD IMAGE_BASENAME IMAGE_BOOT_FILES \
- IMAGE_LINK_NAME IMAGE_ROOTFS INITRAMFS_FSTYPES INITRD INITRD_LIVE ISODIR RECIPE_SYSROOT_NATIVE \
- ROOTFS_SIZE STAGING_DATADIR STAGING_DIR STAGING_LIBDIR TARGET_SYS \
- KERNEL_IMAGETYPE MACHINE INITRAMFS_IMAGE INITRAMFS_IMAGE_BUNDLE INITRAMFS_LINK_NAME APPEND"
-
-inherit ${@bb.utils.contains('INITRAMFS_IMAGE_BUNDLE', '1', 'kernel-artifact-names', '', d)}
-
-WKS_FILE ??= "${IMAGE_BASENAME}.${MACHINE}.wks"
-WKS_FILES ?= "${WKS_FILE} ${IMAGE_BASENAME}.wks"
-WKS_SEARCH_PATH ?= "${THISDIR}:${@':'.join('%s/wic' % p for p in '${BBPATH}'.split(':'))}:${@':'.join('%s/scripts/lib/wic/canned-wks' % l for l in '${BBPATH}:${COREBASE}'.split(':'))}"
-WKS_FULL_PATH = "${@wks_search(d.getVar('WKS_FILES').split(), d.getVar('WKS_SEARCH_PATH')) or ''}"
-
-def wks_search(files, search_path):
- for f in files:
- if os.path.isabs(f):
- if os.path.exists(f):
- return f
- else:
- searched = bb.utils.which(search_path, f)
- if searched:
- return searched
-
-WIC_CREATE_EXTRA_ARGS ?= ""
-
-IMAGE_CMD_wic () {
- out="${IMGDEPLOYDIR}/${IMAGE_NAME}"
- build_wic="${WORKDIR}/build-wic"
- wks="${WKS_FULL_PATH}"
- if [ -z "$wks" ]; then
- bbfatal "No kickstart files from WKS_FILES were found: ${WKS_FILES}. Please set WKS_FILE or WKS_FILES appropriately."
- fi
- BUILDDIR="${TOPDIR}" PSEUDO_UNLOAD=1 wic create "$wks" --vars "${STAGING_DIR}/${MACHINE}/imgdata/" -e "${IMAGE_BASENAME}" -o "$build_wic/" ${WIC_CREATE_EXTRA_ARGS}
- mv "$build_wic/$(basename "${wks%.wks}")"*.direct "$out${IMAGE_NAME_SUFFIX}.wic"
-}
-IMAGE_CMD_wic[vardepsexclude] = "WKS_FULL_PATH WKS_FILES TOPDIR"
-do_image_wic[cleandirs] = "${WORKDIR}/build-wic"
-
-# Rebuild when the wks file or vars in WICVARS change
-USING_WIC = "${@bb.utils.contains_any('IMAGE_FSTYPES', 'wic ' + ' '.join('wic.%s' % c for c in '${CONVERSIONTYPES}'.split()), '1', '', d)}"
-WKS_FILE_CHECKSUM = "${@'${WKS_FULL_PATH}:%s' % os.path.exists('${WKS_FULL_PATH}') if '${USING_WIC}' else ''}"
-do_image_wic[file-checksums] += "${WKS_FILE_CHECKSUM}"
-do_image_wic[depends] += "${@' '.join('%s-native:do_populate_sysroot' % r for r in ('parted', 'gptfdisk', 'dosfstools', 'mtools'))}"
-
-# We ensure all artfacts are deployed (e.g virtual/bootloader)
-do_image_wic[recrdeptask] += "do_deploy"
-do_image_wic[deptask] += "do_image_complete"
-
-WKS_FILE_DEPENDS_DEFAULT = '${@bb.utils.contains_any("BUILD_ARCH", [ 'x86_64', 'i686' ], "syslinux-native", "",d)}'
-WKS_FILE_DEPENDS_DEFAULT += "bmap-tools-native cdrtools-native btrfs-tools-native squashfs-tools-native e2fsprogs-native"
-WKS_FILE_DEPENDS_BOOTLOADERS = ""
-WKS_FILE_DEPENDS_BOOTLOADERS_x86 = "syslinux grub-efi systemd-boot"
-WKS_FILE_DEPENDS_BOOTLOADERS_x86-64 = "syslinux grub-efi systemd-boot"
-WKS_FILE_DEPENDS_BOOTLOADERS_x86-x32 = "syslinux grub-efi"
-
-WKS_FILE_DEPENDS ??= "${WKS_FILE_DEPENDS_DEFAULT} ${WKS_FILE_DEPENDS_BOOTLOADERS}"
-
-DEPENDS += "${@ '${WKS_FILE_DEPENDS}' if d.getVar('USING_WIC') else '' }"
-
-python do_write_wks_template () {
- """Write out expanded template contents to WKS_FULL_PATH."""
- import re
-
- template_body = d.getVar('_WKS_TEMPLATE')
-
- # Remove any remnant variable references left behind by the expansion
- # due to undefined variables
- expand_var_regexp = re.compile(r"\${[^{}@\n\t :]+}")
- while True:
- new_body = re.sub(expand_var_regexp, '', template_body)
- if new_body == template_body:
- break
- else:
- template_body = new_body
-
- wks_file = d.getVar('WKS_FULL_PATH')
- with open(wks_file, 'w') as f:
- f.write(template_body)
- f.close()
- # Copy the finalized wks file to the deploy directory for later use
- depdir = d.getVar('IMGDEPLOYDIR')
- basename = d.getVar('IMAGE_BASENAME')
- bb.utils.copyfile(wks_file, "%s/%s" % (depdir, basename + '-' + os.path.basename(wks_file)))
-}
-
-do_flush_pseudodb() {
- ${FAKEROOTENV} ${FAKEROOTCMD} -S
-}
-
-python () {
- if d.getVar('USING_WIC'):
- wks_file_u = d.getVar('WKS_FULL_PATH', False)
- wks_file = d.expand(wks_file_u)
- base, ext = os.path.splitext(wks_file)
- if ext == '.in' and os.path.exists(wks_file):
- wks_out_file = os.path.join(d.getVar('WORKDIR'), os.path.basename(base))
- d.setVar('WKS_FULL_PATH', wks_out_file)
- d.setVar('WKS_TEMPLATE_PATH', wks_file_u)
- d.setVar('WKS_FILE_CHECKSUM', '${WKS_TEMPLATE_PATH}:True')
-
- # We need to re-parse each time the file changes, and bitbake
- # needs to be told about that explicitly.
- bb.parse.mark_dependency(d, wks_file)
-
- try:
- with open(wks_file, 'r') as f:
- body = f.read()
- except (IOError, OSError) as exc:
- pass
- else:
- # Previously, I used expandWithRefs to get the dependency list
- # and add it to WICVARS, but there's no point re-parsing the
- # file in process_wks_template as well, so just put it in
- # a variable and let the metadata deal with the deps.
- d.setVar('_WKS_TEMPLATE', body)
- bb.build.addtask('do_write_wks_template', 'do_image_wic', 'do_image', d)
- bb.build.addtask('do_image_wic', 'do_image_complete', None, d)
-}
-
-#
-# Write environment variables used by wic
-# to tmp/sysroots/<machine>/imgdata/<image>.env
-#
-python do_rootfs_wicenv () {
- wicvars = d.getVar('WICVARS')
- if not wicvars:
- return
-
- stdir = d.getVar('STAGING_DIR')
- outdir = os.path.join(stdir, d.getVar('MACHINE'), 'imgdata')
- bb.utils.mkdirhier(outdir)
- basename = d.getVar('IMAGE_BASENAME')
- with open(os.path.join(outdir, basename) + '.env', 'w') as envf:
- for var in wicvars.split():
- value = d.getVar(var)
- if value:
- envf.write('%s="%s"\n' % (var, value.strip()))
- envf.close()
- # Copy .env file to deploy directory for later use with stand alone wic
- depdir = d.getVar('IMGDEPLOYDIR')
- bb.utils.copyfile(os.path.join(outdir, basename) + '.env', os.path.join(depdir, basename) + '.env')
-}
-addtask do_flush_pseudodb after do_rootfs before do_image do_image_qa
-addtask do_rootfs_wicenv after do_image before do_image_wic
-do_rootfs_wicenv[vardeps] += "${WICVARS}"
-do_rootfs_wicenv[prefuncs] = 'set_image_size'
diff --git a/meta/classes/insane.bbclass b/meta/classes/insane.bbclass
deleted file mode 100644
index 649aea1da1..0000000000
--- a/meta/classes/insane.bbclass
+++ /dev/null
@@ -1,1357 +0,0 @@
-# BB Class inspired by ebuild.sh
-#
-# This class will test files after installation for certain
-# security issues and other kind of issues.
-#
-# Checks we do:
-# -Check the ownership and permissions
-# -Check the RUNTIME path for the $TMPDIR
-# -Check if .la files wrongly point to workdir
-# -Check if .pc files wrongly point to workdir
-# -Check if packages contains .debug directories or .so files
-# where they should be in -dev or -dbg
-# -Check if config.log contains traces to broken autoconf tests
-# -Check invalid characters (non-utf8) on some package metadata
-# -Ensure that binaries in base_[bindir|sbindir|libdir] do not link
-# into exec_prefix
-# -Check that scripts in base_[bindir|sbindir|libdir] do not reference
-# files under exec_prefix
-# -Check if the package name is upper case
-
-QA_SANE = "True"
-
-# Elect whether a given type of error is a warning or error, they may
-# have been set by other files.
-WARN_QA ?= " libdir xorg-driver-abi \
- textrel incompatible-license files-invalid \
- infodir build-deps src-uri-bad symlink-to-sysroot multilib \
- invalid-packageconfig host-user-contaminated uppercase-pn patch-fuzz \
- mime mime-xdg unlisted-pkg-lics unhandled-features-check \
- "
-ERROR_QA ?= "dev-so debug-deps dev-deps debug-files arch pkgconfig la \
- perms dep-cmp pkgvarcheck perm-config perm-line perm-link \
- split-strip packages-list pkgv-undefined var-undefined \
- version-going-backwards expanded-d invalid-chars \
- license-checksum dev-elf file-rdeps configure-unsafe \
- configure-gettext perllocalpod shebang-size \
- already-stripped installed-vs-shipped ldflags compile-host-path \
- install-host-path pn-overrides unknown-configure-option \
- useless-rpaths rpaths staticdev \
- "
-# Add usrmerge QA check based on distro feature
-ERROR_QA_append = "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', ' usrmerge', '', d)}"
-
-FAKEROOT_QA = "host-user-contaminated"
-FAKEROOT_QA[doc] = "QA tests which need to run under fakeroot. If any \
-enabled tests are listed here, the do_package_qa task will run under fakeroot."
-
-ALL_QA = "${WARN_QA} ${ERROR_QA}"
-
-UNKNOWN_CONFIGURE_WHITELIST ?= "--enable-nls --disable-nls --disable-silent-rules --disable-dependency-tracking --with-libtool-sysroot --disable-static"
-
-def package_qa_clean_path(path, d, pkg=None):
- """
- Remove redundant paths from the path for display. If pkg isn't set then
- TMPDIR is stripped, otherwise PKGDEST/pkg is stripped.
- """
- if pkg:
- path = path.replace(os.path.join(d.getVar("PKGDEST"), pkg), "/")
- return path.replace(d.getVar("TMPDIR"), "/").replace("//", "/")
-
-def package_qa_write_error(type, error, d):
- logfile = d.getVar('QA_LOGFILE')
- if logfile:
- p = d.getVar('P')
- with open(logfile, "a+") as f:
- f.write("%s: %s [%s]\n" % (p, error, type))
-
-def package_qa_handle_error(error_class, error_msg, d):
- if error_class in (d.getVar("ERROR_QA") or "").split():
- package_qa_write_error(error_class, error_msg, d)
- bb.error("QA Issue: %s [%s]" % (error_msg, error_class))
- d.setVar("QA_SANE", False)
- return False
- elif error_class in (d.getVar("WARN_QA") or "").split():
- package_qa_write_error(error_class, error_msg, d)
- bb.warn("QA Issue: %s [%s]" % (error_msg, error_class))
- else:
- bb.note("QA Issue: %s [%s]" % (error_msg, error_class))
- return True
-
-def package_qa_add_message(messages, section, new_msg):
- if section not in messages:
- messages[section] = new_msg
- else:
- messages[section] = messages[section] + "\n" + new_msg
-
-QAPATHTEST[shebang-size] = "package_qa_check_shebang_size"
-def package_qa_check_shebang_size(path, name, d, elf, messages):
- if os.path.islink(path) or elf:
- return
-
- try:
- with open(path, 'rb') as f:
- stanza = f.readline(130)
- except IOError:
- return
-
- if stanza.startswith(b'#!'):
- #Shebang not found
- try:
- stanza = stanza.decode("utf-8")
- except UnicodeDecodeError:
- #If it is not a text file, it is not a script
- return
-
- if len(stanza) > 129:
- package_qa_add_message(messages, "shebang-size", "%s: %s maximum shebang size exceeded, the maximum size is 128." % (name, package_qa_clean_path(path, d)))
- return
-
-QAPATHTEST[libexec] = "package_qa_check_libexec"
-def package_qa_check_libexec(path,name, d, elf, messages):
-
- # Skip the case where the default is explicitly /usr/libexec
- libexec = d.getVar('libexecdir')
- if libexec == "/usr/libexec":
- return True
-
- if 'libexec' in path.split(os.path.sep):
- package_qa_add_message(messages, "libexec", "%s: %s is using libexec please relocate to %s" % (name, package_qa_clean_path(path, d), libexec))
- return False
-
- return True
-
-QAPATHTEST[rpaths] = "package_qa_check_rpath"
-def package_qa_check_rpath(file,name, d, elf, messages):
- """
- Check for dangerous RPATHs
- """
- if not elf:
- return
-
- if os.path.islink(file):
- return
-
- bad_dirs = [d.getVar('BASE_WORKDIR'), d.getVar('STAGING_DIR_TARGET')]
-
- phdrs = elf.run_objdump("-p", d)
-
- import re
- rpath_re = re.compile(r"\s+RPATH\s+(.*)")
- for line in phdrs.split("\n"):
- m = rpath_re.match(line)
- if m:
- rpath = m.group(1)
- for dir in bad_dirs:
- if dir in rpath:
- package_qa_add_message(messages, "rpaths", "package %s contains bad RPATH %s in file %s" % (name, rpath, file))
-
-QAPATHTEST[useless-rpaths] = "package_qa_check_useless_rpaths"
-def package_qa_check_useless_rpaths(file, name, d, elf, messages):
- """
- Check for RPATHs that are useless but not dangerous
- """
- def rpath_eq(a, b):
- return os.path.normpath(a) == os.path.normpath(b)
-
- if not elf:
- return
-
- if os.path.islink(file):
- return
-
- libdir = d.getVar("libdir")
- base_libdir = d.getVar("base_libdir")
-
- phdrs = elf.run_objdump("-p", d)
-
- import re
- rpath_re = re.compile(r"\s+RPATH\s+(.*)")
- for line in phdrs.split("\n"):
- m = rpath_re.match(line)
- if m:
- rpath = m.group(1)
- if rpath_eq(rpath, libdir) or rpath_eq(rpath, base_libdir):
- # The dynamic linker searches both these places anyway. There is no point in
- # looking there again.
- package_qa_add_message(messages, "useless-rpaths", "%s: %s contains probably-redundant RPATH %s" % (name, package_qa_clean_path(file, d), rpath))
-
-QAPATHTEST[dev-so] = "package_qa_check_dev"
-def package_qa_check_dev(path, name, d, elf, messages):
- """
- Check for ".so" library symlinks in non-dev packages
- """
-
- if not name.endswith("-dev") and not name.endswith("-dbg") and not name.endswith("-ptest") and not name.startswith("nativesdk-") and path.endswith(".so") and os.path.islink(path):
- package_qa_add_message(messages, "dev-so", "non -dev/-dbg/nativesdk- package contains symlink .so: %s path '%s'" % \
- (name, package_qa_clean_path(path,d)))
-
-QAPATHTEST[dev-elf] = "package_qa_check_dev_elf"
-def package_qa_check_dev_elf(path, name, d, elf, messages):
- """
- Check that -dev doesn't contain real shared libraries. The test has to
- check that the file is not a link and is an ELF object as some recipes
- install link-time .so files that are linker scripts.
- """
- if name.endswith("-dev") and path.endswith(".so") and not os.path.islink(path) and elf:
- package_qa_add_message(messages, "dev-elf", "-dev package contains non-symlink .so: %s path '%s'" % \
- (name, package_qa_clean_path(path,d)))
-
-QAPATHTEST[staticdev] = "package_qa_check_staticdev"
-def package_qa_check_staticdev(path, name, d, elf, messages):
- """
- Check for ".a" library in non-staticdev packages
- There are a number of exceptions to this rule, -pic packages can contain
- static libraries, the _nonshared.a belong with their -dev packages and
- libgcc.a, libgcov.a will be skipped in their packages
- """
-
- if not name.endswith("-pic") and not name.endswith("-staticdev") and not name.endswith("-ptest") and path.endswith(".a") and not path.endswith("_nonshared.a") and not '/usr/lib/debug-static/' in path and not '/.debug-static/' in path:
- package_qa_add_message(messages, "staticdev", "non -staticdev package contains static .a library: %s path '%s'" % \
- (name, package_qa_clean_path(path,d)))
-
-QAPATHTEST[mime] = "package_qa_check_mime"
-def package_qa_check_mime(path, name, d, elf, messages):
- """
- Check if package installs mime types to /usr/share/mime/packages
- while no inheriting mime.bbclass
- """
-
- if d.getVar("datadir") + "/mime/packages" in path and path.endswith('.xml') and not bb.data.inherits_class("mime", d):
- package_qa_add_message(messages, "mime", "package contains mime types but does not inherit mime: %s path '%s'" % \
- (name, package_qa_clean_path(path,d)))
-
-QAPATHTEST[mime-xdg] = "package_qa_check_mime_xdg"
-def package_qa_check_mime_xdg(path, name, d, elf, messages):
- """
- Check if package installs desktop file containing MimeType and requires
- mime-types.bbclass to create /usr/share/applications/mimeinfo.cache
- """
-
- if d.getVar("datadir") + "/applications" in path and path.endswith('.desktop') and not bb.data.inherits_class("mime-xdg", d):
- mime_type_found = False
- try:
- with open(path, 'r') as f:
- for line in f.read().split('\n'):
- if 'MimeType' in line:
- mime_type_found = True
- break;
- except:
- # At least libreoffice installs symlinks with absolute paths that are dangling here.
- # We could implement some magic but for few (one) recipes it is not worth the effort so just warn:
- wstr = "%s cannot open %s - is it a symlink with absolute path?\n" % (name, package_qa_clean_path(path,d))
- wstr += "Please check if (linked) file contains key 'MimeType'.\n"
- pkgname = name
- if name == d.getVar('PN'):
- pkgname = '${PN}'
- wstr += "If yes: add \'inhert mime-xdg\' and \'MIME_XDG_PACKAGES += \"%s\"\' / if no add \'INSANE_SKIP_%s += \"mime-xdg\"\' to recipe." % (pkgname, pkgname)
- package_qa_add_message(messages, "mime-xdg", wstr)
- if mime_type_found:
- package_qa_add_message(messages, "mime-xdg", "package contains desktop file with key 'MimeType' but does not inhert mime-xdg: %s path '%s'" % \
- (name, package_qa_clean_path(path,d)))
-
-def package_qa_check_libdir(d):
- """
- Check for wrong library installation paths. For instance, catch
- recipes installing /lib/bar.so when ${base_libdir}="lib32" or
- installing in /usr/lib64 when ${libdir}="/usr/lib"
- """
- import re
-
- pkgdest = d.getVar('PKGDEST')
- base_libdir = d.getVar("base_libdir") + os.sep
- libdir = d.getVar("libdir") + os.sep
- libexecdir = d.getVar("libexecdir") + os.sep
- exec_prefix = d.getVar("exec_prefix") + os.sep
-
- messages = []
-
- # The re's are purposely fuzzy, as some there are some .so.x.y.z files
- # that don't follow the standard naming convention. It checks later
- # that they are actual ELF files
- lib_re = re.compile(r"^/lib.+\.so(\..+)?$")
- exec_re = re.compile(r"^%s.*/lib.+\.so(\..+)?$" % exec_prefix)
-
- for root, dirs, files in os.walk(pkgdest):
- if root == pkgdest:
- # Skip subdirectories for any packages with libdir in INSANE_SKIP
- skippackages = []
- for package in dirs:
- if 'libdir' in (d.getVar('INSANE_SKIP_' + package) or "").split():
- bb.note("Package %s skipping libdir QA test" % (package))
- skippackages.append(package)
- elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-file-directory' and package.endswith("-dbg"):
- bb.note("Package %s skipping libdir QA test for PACKAGE_DEBUG_SPLIT_STYLE equals debug-file-directory" % (package))
- skippackages.append(package)
- for package in skippackages:
- dirs.remove(package)
- for file in files:
- full_path = os.path.join(root, file)
- rel_path = os.path.relpath(full_path, pkgdest)
- if os.sep in rel_path:
- package, rel_path = rel_path.split(os.sep, 1)
- rel_path = os.sep + rel_path
- if lib_re.match(rel_path):
- if base_libdir not in rel_path:
- # make sure it's an actual ELF file
- elf = oe.qa.ELFFile(full_path)
- try:
- elf.open()
- messages.append("%s: found library in wrong location: %s" % (package, rel_path))
- except (oe.qa.NotELFFileError):
- pass
- if exec_re.match(rel_path):
- if libdir not in rel_path and libexecdir not in rel_path:
- # make sure it's an actual ELF file
- elf = oe.qa.ELFFile(full_path)
- try:
- elf.open()
- messages.append("%s: found library in wrong location: %s" % (package, rel_path))
- except (oe.qa.NotELFFileError):
- pass
-
- if messages:
- package_qa_handle_error("libdir", "\n".join(messages), d)
-
-QAPATHTEST[debug-files] = "package_qa_check_dbg"
-def package_qa_check_dbg(path, name, d, elf, messages):
- """
- Check for ".debug" files or directories outside of the dbg package
- """
-
- if not "-dbg" in name and not "-ptest" in name:
- if '.debug' in path.split(os.path.sep):
- package_qa_add_message(messages, "debug-files", "non debug package contains .debug directory: %s path %s" % \
- (name, package_qa_clean_path(path,d)))
-
-QAPATHTEST[arch] = "package_qa_check_arch"
-def package_qa_check_arch(path,name,d, elf, messages):
- """
- Check if archs are compatible
- """
- import re, oe.elf
-
- if not elf:
- return
-
- target_os = d.getVar('TARGET_OS')
- target_arch = d.getVar('TARGET_ARCH')
- provides = d.getVar('PROVIDES')
- bpn = d.getVar('BPN')
-
- if target_arch == "allarch":
- pn = d.getVar('PN')
- package_qa_add_message(messages, "arch", pn + ": Recipe inherits the allarch class, but has packaged architecture-specific binaries")
- return
-
- # FIXME: Cross package confuse this check, so just skip them
- for s in ['cross', 'nativesdk', 'cross-canadian']:
- if bb.data.inherits_class(s, d):
- return
-
- # avoid following links to /usr/bin (e.g. on udev builds)
- # we will check the files pointed to anyway...
- if os.path.islink(path):
- return
-
- #if this will throw an exception, then fix the dict above
- (machine, osabi, abiversion, littleendian, bits) \
- = oe.elf.machine_dict(d)[target_os][target_arch]
-
- # Check the architecture and endiannes of the binary
- is_32 = (("virtual/kernel" in provides) or bb.data.inherits_class("module", d)) and \
- (target_os == "linux-gnux32" or target_os == "linux-muslx32" or \
- target_os == "linux-gnu_ilp32" or re.match(r'mips64.*32', d.getVar('DEFAULTTUNE')))
- is_bpf = (oe.qa.elf_machine_to_string(elf.machine()) == "BPF")
- if not ((machine == elf.machine()) or is_32 or is_bpf):
- package_qa_add_message(messages, "arch", "Architecture did not match (%s, expected %s) on %s" % \
- (oe.qa.elf_machine_to_string(elf.machine()), oe.qa.elf_machine_to_string(machine), package_qa_clean_path(path,d)))
- elif not ((bits == elf.abiSize()) or is_32 or is_bpf):
- package_qa_add_message(messages, "arch", "Bit size did not match (%d to %d) %s on %s" % \
- (bits, elf.abiSize(), bpn, package_qa_clean_path(path,d)))
- elif not ((littleendian == elf.isLittleEndian()) or is_bpf):
- package_qa_add_message(messages, "arch", "Endiannes did not match (%d to %d) on %s" % \
- (littleendian, elf.isLittleEndian(), package_qa_clean_path(path,d)))
-
-QAPATHTEST[desktop] = "package_qa_check_desktop"
-def package_qa_check_desktop(path, name, d, elf, messages):
- """
- Run all desktop files through desktop-file-validate.
- """
- if path.endswith(".desktop"):
- desktop_file_validate = os.path.join(d.getVar('STAGING_BINDIR_NATIVE'),'desktop-file-validate')
- output = os.popen("%s %s" % (desktop_file_validate, path))
- # This only produces output on errors
- for l in output:
- package_qa_add_message(messages, "desktop", "Desktop file issue: " + l.strip())
-
-QAPATHTEST[textrel] = "package_qa_textrel"
-def package_qa_textrel(path, name, d, elf, messages):
- """
- Check if the binary contains relocations in .text
- """
-
- if not elf:
- return
-
- if os.path.islink(path):
- return
-
- phdrs = elf.run_objdump("-p", d)
- sane = True
-
- import re
- textrel_re = re.compile(r"\s+TEXTREL\s+")
- for line in phdrs.split("\n"):
- if textrel_re.match(line):
- sane = False
- break
-
- if not sane:
- path = package_qa_clean_path(path, d, name)
- package_qa_add_message(messages, "textrel", "%s: ELF binary %s has relocations in .text" % (name, path))
-
-QAPATHTEST[ldflags] = "package_qa_hash_style"
-def package_qa_hash_style(path, name, d, elf, messages):
- """
- Check if the binary has the right hash style...
- """
-
- if not elf:
- return
-
- if os.path.islink(path):
- return
-
- gnu_hash = "--hash-style=gnu" in d.getVar('LDFLAGS')
- if not gnu_hash:
- gnu_hash = "--hash-style=both" in d.getVar('LDFLAGS')
- if not gnu_hash:
- return
-
- sane = False
- has_syms = False
-
- phdrs = elf.run_objdump("-p", d)
-
- # If this binary has symbols, we expect it to have GNU_HASH too.
- for line in phdrs.split("\n"):
- if "SYMTAB" in line:
- has_syms = True
- if "GNU_HASH" or "DT_MIPS_XHASH" in line:
- sane = True
- if ("[mips32]" in line or "[mips64]" in line) and d.getVar('TCLIBC') == "musl":
- sane = True
- if has_syms and not sane:
- package_qa_add_message(messages, "ldflags", "No GNU_HASH in the ELF binary %s, didn't pass LDFLAGS?" % path)
-
-
-QAPATHTEST[buildpaths] = "package_qa_check_buildpaths"
-def package_qa_check_buildpaths(path, name, d, elf, messages):
- """
- Check for build paths inside target files and error if not found in the whitelist
- """
- # Ignore .debug files, not interesting
- if path.find(".debug") != -1:
- return
-
- # Ignore symlinks
- if os.path.islink(path):
- return
-
- # Ignore ipk and deb's CONTROL dir
- if path.find(name + "/CONTROL/") != -1 or path.find(name + "/DEBIAN/") != -1:
- return
-
- tmpdir = bytes(d.getVar('TMPDIR'), encoding="utf-8")
- with open(path, 'rb') as f:
- file_content = f.read()
- if tmpdir in file_content:
- trimmed = path.replace(os.path.join (d.getVar("PKGDEST"), name), "")
- package_qa_add_message(messages, "buildpaths", "File %s in package %s contains reference to TMPDIR" % (trimmed, name))
-
-
-QAPATHTEST[xorg-driver-abi] = "package_qa_check_xorg_driver_abi"
-def package_qa_check_xorg_driver_abi(path, name, d, elf, messages):
- """
- Check that all packages containing Xorg drivers have ABI dependencies
- """
-
- # Skip dev, dbg or nativesdk packages
- if name.endswith("-dev") or name.endswith("-dbg") or name.startswith("nativesdk-"):
- return
-
- driverdir = d.expand("${libdir}/xorg/modules/drivers/")
- if driverdir in path and path.endswith(".so"):
- mlprefix = d.getVar('MLPREFIX') or ''
- for rdep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + name) or ""):
- if rdep.startswith("%sxorg-abi-" % mlprefix):
- return
- package_qa_add_message(messages, "xorg-driver-abi", "Package %s contains Xorg driver (%s) but no xorg-abi- dependencies" % (name, os.path.basename(path)))
-
-QAPATHTEST[infodir] = "package_qa_check_infodir"
-def package_qa_check_infodir(path, name, d, elf, messages):
- """
- Check that /usr/share/info/dir isn't shipped in a particular package
- """
- infodir = d.expand("${infodir}/dir")
-
- if infodir in path:
- package_qa_add_message(messages, "infodir", "The /usr/share/info/dir file is not meant to be shipped in a particular package.")
-
-QAPATHTEST[symlink-to-sysroot] = "package_qa_check_symlink_to_sysroot"
-def package_qa_check_symlink_to_sysroot(path, name, d, elf, messages):
- """
- Check that the package doesn't contain any absolute symlinks to the sysroot.
- """
- if os.path.islink(path):
- target = os.readlink(path)
- if os.path.isabs(target):
- tmpdir = d.getVar('TMPDIR')
- if target.startswith(tmpdir):
- trimmed = path.replace(os.path.join (d.getVar("PKGDEST"), name), "")
- package_qa_add_message(messages, "symlink-to-sysroot", "Symlink %s in %s points to TMPDIR" % (trimmed, name))
-
-# Check license variables
-do_populate_lic[postfuncs] += "populate_lic_qa_checksum"
-python populate_lic_qa_checksum() {
- """
- Check for changes in the license files.
- """
- sane = True
-
- lic_files = d.getVar('LIC_FILES_CHKSUM') or ''
- lic = d.getVar('LICENSE')
- pn = d.getVar('PN')
-
- if lic == "CLOSED":
- return
-
- if not lic_files and d.getVar('SRC_URI'):
- sane &= package_qa_handle_error("license-checksum", pn + ": Recipe file fetches files and does not have license file information (LIC_FILES_CHKSUM)", d)
-
- srcdir = d.getVar('S')
- corebase_licensefile = d.getVar('COREBASE') + "/LICENSE"
- for url in lic_files.split():
- try:
- (type, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
- except bb.fetch.MalformedUrl:
- sane &= package_qa_handle_error("license-checksum", pn + ": LIC_FILES_CHKSUM contains an invalid URL: " + url, d)
- continue
- srclicfile = os.path.join(srcdir, path)
- if not os.path.isfile(srclicfile):
- sane &= package_qa_handle_error("license-checksum", pn + ": LIC_FILES_CHKSUM points to an invalid file: " + srclicfile, d)
- continue
-
- if (srclicfile == corebase_licensefile):
- bb.warn("${COREBASE}/LICENSE is not a valid license file, please use '${COMMON_LICENSE_DIR}/MIT' for a MIT License file in LIC_FILES_CHKSUM. This will become an error in the future")
-
- recipemd5 = parm.get('md5', '')
- beginline, endline = 0, 0
- if 'beginline' in parm:
- beginline = int(parm['beginline'])
- if 'endline' in parm:
- endline = int(parm['endline'])
-
- if (not beginline) and (not endline):
- md5chksum = bb.utils.md5_file(srclicfile)
- with open(srclicfile, 'r', errors='replace') as f:
- license = f.read().splitlines()
- else:
- with open(srclicfile, 'rb') as f:
- import hashlib
- lineno = 0
- license = []
- m = hashlib.md5()
- for line in f:
- lineno += 1
- if (lineno >= beginline):
- if ((lineno <= endline) or not endline):
- m.update(line)
- license.append(line.decode('utf-8', errors='replace').rstrip())
- else:
- break
- md5chksum = m.hexdigest()
- if recipemd5 == md5chksum:
- bb.note (pn + ": md5 checksum matched for ", url)
- else:
- if recipemd5:
- msg = pn + ": The LIC_FILES_CHKSUM does not match for " + url
- msg = msg + "\n" + pn + ": The new md5 checksum is " + md5chksum
- max_lines = int(d.getVar('QA_MAX_LICENSE_LINES') or 20)
- if not license or license[-1] != '':
- # Ensure that our license text ends with a line break
- # (will be added with join() below).
- license.append('')
- remove = len(license) - max_lines
- if remove > 0:
- start = max_lines // 2
- end = start + remove - 1
- del license[start:end]
- license.insert(start, '...')
- msg = msg + "\n" + pn + ": Here is the selected license text:" + \
- "\n" + \
- "{:v^70}".format(" beginline=%d " % beginline if beginline else "") + \
- "\n" + "\n".join(license) + \
- "{:^^70}".format(" endline=%d " % endline if endline else "")
- if beginline:
- if endline:
- srcfiledesc = "%s (lines %d through to %d)" % (srclicfile, beginline, endline)
- else:
- srcfiledesc = "%s (beginning on line %d)" % (srclicfile, beginline)
- elif endline:
- srcfiledesc = "%s (ending on line %d)" % (srclicfile, endline)
- else:
- srcfiledesc = srclicfile
- msg = msg + "\n" + pn + ": Check if the license information has changed in %s to verify that the LICENSE value \"%s\" remains valid" % (srcfiledesc, lic)
-
- else:
- msg = pn + ": LIC_FILES_CHKSUM is not specified for " + url
- msg = msg + "\n" + pn + ": The md5 checksum is " + md5chksum
- sane &= package_qa_handle_error("license-checksum", msg, d)
-
- if not sane:
- bb.fatal("Fatal QA errors found, failing task.")
-}
-
-def qa_check_staged(path,d):
- """
- Check staged la and pc files for common problems like references to the work
- directory.
-
- As this is run after every stage we should be able to find the one
- responsible for the errors easily even if we look at every .pc and .la file.
- """
-
- sane = True
- tmpdir = d.getVar('TMPDIR')
- workdir = os.path.join(tmpdir, "work")
- recipesysroot = d.getVar("RECIPE_SYSROOT")
-
- if bb.data.inherits_class("native", d) or bb.data.inherits_class("cross", d):
- pkgconfigcheck = workdir
- else:
- pkgconfigcheck = tmpdir
-
- skip = (d.getVar('INSANE_SKIP') or "").split()
- skip_la = False
- if 'la' in skip:
- bb.note("Recipe %s skipping qa checking: la" % d.getVar('PN'))
- skip_la = True
-
- skip_pkgconfig = False
- if 'pkgconfig' in skip:
- bb.note("Recipe %s skipping qa checking: pkgconfig" % d.getVar('PN'))
- skip_pkgconfig = True
-
- # find all .la and .pc files
- # read the content
- # and check for stuff that looks wrong
- for root, dirs, files in os.walk(path):
- for file in files:
- path = os.path.join(root,file)
- if file.endswith(".la") and not skip_la:
- with open(path) as f:
- file_content = f.read()
- file_content = file_content.replace(recipesysroot, "")
- if workdir in file_content:
- error_msg = "%s failed sanity test (workdir) in path %s" % (file,root)
- sane &= package_qa_handle_error("la", error_msg, d)
- elif file.endswith(".pc") and not skip_pkgconfig:
- with open(path) as f:
- file_content = f.read()
- file_content = file_content.replace(recipesysroot, "")
- if pkgconfigcheck in file_content:
- error_msg = "%s failed sanity test (tmpdir) in path %s" % (file,root)
- sane &= package_qa_handle_error("pkgconfig", error_msg, d)
-
- return sane
-
-# Run all package-wide warnfuncs and errorfuncs
-def package_qa_package(warnfuncs, errorfuncs, package, d):
- warnings = {}
- errors = {}
-
- for func in warnfuncs:
- func(package, d, warnings)
- for func in errorfuncs:
- func(package, d, errors)
-
- for w in warnings:
- package_qa_handle_error(w, warnings[w], d)
- for e in errors:
- package_qa_handle_error(e, errors[e], d)
-
- return len(errors) == 0
-
-# Run all recipe-wide warnfuncs and errorfuncs
-def package_qa_recipe(warnfuncs, errorfuncs, pn, d):
- warnings = {}
- errors = {}
-
- for func in warnfuncs:
- func(pn, d, warnings)
- for func in errorfuncs:
- func(pn, d, errors)
-
- for w in warnings:
- package_qa_handle_error(w, warnings[w], d)
- for e in errors:
- package_qa_handle_error(e, errors[e], d)
-
- return len(errors) == 0
-
-# Walk over all files in a directory and call func
-def package_qa_walk(warnfuncs, errorfuncs, package, d):
- import oe.qa
-
- #if this will throw an exception, then fix the dict above
- target_os = d.getVar('TARGET_OS')
- target_arch = d.getVar('TARGET_ARCH')
-
- warnings = {}
- errors = {}
- for path in pkgfiles[package]:
- elf = oe.qa.ELFFile(path)
- try:
- elf.open()
- except (IOError, oe.qa.NotELFFileError):
- # IOError can happen if the packaging control files disappear,
- elf = None
- for func in warnfuncs:
- func(path, package, d, elf, warnings)
- for func in errorfuncs:
- func(path, package, d, elf, errors)
-
- for w in warnings:
- package_qa_handle_error(w, warnings[w], d)
- for e in errors:
- package_qa_handle_error(e, errors[e], d)
-
-def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
- # Don't do this check for kernel/module recipes, there aren't too many debug/development
- # packages and you can get false positives e.g. on kernel-module-lirc-dev
- if bb.data.inherits_class("kernel", d) or bb.data.inherits_class("module-base", d):
- return
-
- if not "-dbg" in pkg and not "packagegroup-" in pkg and not "-image" in pkg:
- localdata = bb.data.createCopy(d)
- localdata.setVar('OVERRIDES', localdata.getVar('OVERRIDES') + ':' + pkg)
-
- # Now check the RDEPENDS
- rdepends = bb.utils.explode_deps(localdata.getVar('RDEPENDS') or "")
-
- # Now do the sanity check!!!
- if "build-deps" not in skip:
- for rdepend in rdepends:
- if "-dbg" in rdepend and "debug-deps" not in skip:
- error_msg = "%s rdepends on %s" % (pkg,rdepend)
- package_qa_handle_error("debug-deps", error_msg, d)
- if (not "-dev" in pkg and not "-staticdev" in pkg) and rdepend.endswith("-dev") and "dev-deps" not in skip:
- error_msg = "%s rdepends on %s" % (pkg, rdepend)
- package_qa_handle_error("dev-deps", error_msg, d)
- if rdepend not in packages:
- rdep_data = oe.packagedata.read_subpkgdata(rdepend, d)
- if rdep_data and 'PN' in rdep_data and rdep_data['PN'] in taskdeps:
- continue
- if not rdep_data or not 'PN' in rdep_data:
- pkgdata_dir = d.getVar("PKGDATA_DIR")
- try:
- possibles = os.listdir("%s/runtime-rprovides/%s/" % (pkgdata_dir, rdepend))
- except OSError:
- possibles = []
- for p in possibles:
- rdep_data = oe.packagedata.read_subpkgdata(p, d)
- if rdep_data and 'PN' in rdep_data and rdep_data['PN'] in taskdeps:
- break
- if rdep_data and 'PN' in rdep_data and rdep_data['PN'] in taskdeps:
- continue
- if rdep_data and 'PN' in rdep_data:
- error_msg = "%s rdepends on %s, but it isn't a build dependency, missing %s in DEPENDS or PACKAGECONFIG?" % (pkg, rdepend, rdep_data['PN'])
- else:
- error_msg = "%s rdepends on %s, but it isn't a build dependency?" % (pkg, rdepend)
- package_qa_handle_error("build-deps", error_msg, d)
-
- if "file-rdeps" not in skip:
- ignored_file_rdeps = set(['/bin/sh', '/usr/bin/env', 'rtld(GNU_HASH)'])
- if bb.data.inherits_class('nativesdk', d):
- ignored_file_rdeps |= set(['/bin/bash', '/usr/bin/perl', 'perl'])
- # For Saving the FILERDEPENDS
- filerdepends = {}
- rdep_data = oe.packagedata.read_subpkgdata(pkg, d)
- for key in rdep_data:
- if key.startswith("FILERDEPENDS_"):
- for subkey in bb.utils.explode_deps(rdep_data[key]):
- if subkey not in ignored_file_rdeps and \
- not subkey.startswith('perl('):
- # We already know it starts with FILERDEPENDS_
- filerdepends[subkey] = key[13:]
-
- if filerdepends:
- done = rdepends[:]
- # Add the rprovides of itself
- if pkg not in done:
- done.insert(0, pkg)
-
- # The python is not a package, but python-core provides it, so
- # skip checking /usr/bin/python if python is in the rdeps, in
- # case there is a RDEPENDS_pkg = "python" in the recipe.
- for py in [ d.getVar('MLPREFIX') + "python", "python" ]:
- if py in done:
- filerdepends.pop("/usr/bin/python",None)
- done.remove(py)
- for rdep in done:
- # The file dependencies may contain package names, e.g.,
- # perl
- filerdepends.pop(rdep,None)
-
- # For Saving the FILERPROVIDES, RPROVIDES and FILES_INFO
- rdep_data = oe.packagedata.read_subpkgdata(rdep, d)
- for key in rdep_data:
- if key.startswith("FILERPROVIDES_") or key.startswith("RPROVIDES_"):
- for subkey in bb.utils.explode_deps(rdep_data[key]):
- filerdepends.pop(subkey,None)
- # Add the files list to the rprovides
- if key == "FILES_INFO":
- # Use eval() to make it as a dict
- for subkey in eval(rdep_data[key]):
- filerdepends.pop(subkey,None)
- if not filerdepends:
- # Break if all the file rdepends are met
- break
- if filerdepends:
- for key in filerdepends:
- error_msg = "%s contained in package %s requires %s, but no providers found in RDEPENDS_%s?" % \
- (filerdepends[key].replace("_%s" % pkg, "").replace("@underscore@", "_"), pkg, key, pkg)
- package_qa_handle_error("file-rdeps", error_msg, d)
-package_qa_check_rdepends[vardepsexclude] = "OVERRIDES"
-
-def package_qa_check_deps(pkg, pkgdest, d):
-
- localdata = bb.data.createCopy(d)
- localdata.setVar('OVERRIDES', pkg)
-
- def check_valid_deps(var):
- try:
- rvar = bb.utils.explode_dep_versions2(localdata.getVar(var) or "")
- except ValueError as e:
- bb.fatal("%s_%s: %s" % (var, pkg, e))
- for dep in rvar:
- for v in rvar[dep]:
- if v and not v.startswith(('< ', '= ', '> ', '<= ', '>=')):
- error_msg = "%s_%s is invalid: %s (%s) only comparisons <, =, >, <=, and >= are allowed" % (var, pkg, dep, v)
- package_qa_handle_error("dep-cmp", error_msg, d)
-
- check_valid_deps('RDEPENDS')
- check_valid_deps('RRECOMMENDS')
- check_valid_deps('RSUGGESTS')
- check_valid_deps('RPROVIDES')
- check_valid_deps('RREPLACES')
- check_valid_deps('RCONFLICTS')
-
-QAPKGTEST[usrmerge] = "package_qa_check_usrmerge"
-def package_qa_check_usrmerge(pkg, d, messages):
- pkgdest = d.getVar('PKGDEST')
- pkg_dir = pkgdest + os.sep + pkg + os.sep
- merged_dirs = ['bin', 'sbin', 'lib'] + d.getVar('MULTILIB_VARIANTS').split()
- for f in merged_dirs:
- if os.path.exists(pkg_dir + f) and not os.path.islink(pkg_dir + f):
- msg = "%s package is not obeying usrmerge distro feature. /%s should be relocated to /usr." % (pkg, f)
- package_qa_add_message(messages, "usrmerge", msg)
- return False
- return True
-
-QAPKGTEST[perllocalpod] = "package_qa_check_perllocalpod"
-def package_qa_check_perllocalpod(pkg, d, messages):
- """
- Check that the recipe didn't ship a perlocal.pod file, which shouldn't be
- installed in a distribution package. cpan.bbclass sets NO_PERLLOCAL=1 to
- handle this for most recipes.
- """
- import glob
- pkgd = oe.path.join(d.getVar('PKGDEST'), pkg)
- podpath = oe.path.join(pkgd, d.getVar("libdir"), "perl*", "*", "*", "perllocal.pod")
-
- matches = glob.glob(podpath)
- if matches:
- matches = [package_qa_clean_path(path, d, pkg) for path in matches]
- msg = "%s contains perllocal.pod (%s), should not be installed" % (pkg, " ".join(matches))
- package_qa_add_message(messages, "perllocalpod", msg)
-
-QAPKGTEST[expanded-d] = "package_qa_check_expanded_d"
-def package_qa_check_expanded_d(package, d, messages):
- """
- Check for the expanded D (${D}) value in pkg_* and FILES
- variables, warn the user to use it correctly.
- """
- sane = True
- expanded_d = d.getVar('D')
-
- for var in 'FILES','pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm':
- bbvar = d.getVar(var + "_" + package) or ""
- if expanded_d in bbvar:
- if var == 'FILES':
- package_qa_add_message(messages, "expanded-d", "FILES in %s recipe should not contain the ${D} variable as it references the local build directory not the target filesystem, best solution is to remove the ${D} reference" % package)
- sane = False
- else:
- package_qa_add_message(messages, "expanded-d", "%s in %s recipe contains ${D}, it should be replaced by $D instead" % (var, package))
- sane = False
- return sane
-
-QAPKGTEST[unlisted-pkg-lics] = "package_qa_check_unlisted_pkg_lics"
-def package_qa_check_unlisted_pkg_lics(package, d, messages):
- """
- Check that all licenses for a package are among the licenses for the recipe.
- """
- pkg_lics = d.getVar('LICENSE_' + package)
- if not pkg_lics:
- return True
-
- recipe_lics_set = oe.license.list_licenses(d.getVar('LICENSE'))
- unlisted = oe.license.list_licenses(pkg_lics) - recipe_lics_set
- if not unlisted:
- return True
-
- package_qa_add_message(messages, "unlisted-pkg-lics",
- "LICENSE_%s includes licenses (%s) that are not "
- "listed in LICENSE" % (package, ' '.join(unlisted)))
- return False
-
-def package_qa_check_encoding(keys, encode, d):
- def check_encoding(key, enc):
- sane = True
- value = d.getVar(key)
- if value:
- try:
- s = value.encode(enc)
- except UnicodeDecodeError as e:
- error_msg = "%s has non %s characters" % (key,enc)
- sane = False
- package_qa_handle_error("invalid-chars", error_msg, d)
- return sane
-
- for key in keys:
- sane = check_encoding(key, encode)
- if not sane:
- break
-
-HOST_USER_UID := "${@os.getuid()}"
-HOST_USER_GID := "${@os.getgid()}"
-
-QAPATHTEST[host-user-contaminated] = "package_qa_check_host_user"
-def package_qa_check_host_user(path, name, d, elf, messages):
- """Check for paths outside of /home which are owned by the user running bitbake."""
-
- if not os.path.lexists(path):
- return
-
- dest = d.getVar('PKGDEST')
- pn = d.getVar('PN')
- home = os.path.join(dest, 'home')
- if path == home or path.startswith(home + os.sep):
- return
-
- try:
- stat = os.lstat(path)
- except OSError as exc:
- import errno
- if exc.errno != errno.ENOENT:
- raise
- else:
- check_uid = int(d.getVar('HOST_USER_UID'))
- if stat.st_uid == check_uid:
- package_qa_add_message(messages, "host-user-contaminated", "%s: %s is owned by uid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, package_qa_clean_path(path, d, name), check_uid))
- return False
-
- check_gid = int(d.getVar('HOST_USER_GID'))
- if stat.st_gid == check_gid:
- package_qa_add_message(messages, "host-user-contaminated", "%s: %s is owned by gid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, package_qa_clean_path(path, d, name), check_gid))
- return False
- return True
-
-QARECIPETEST[src-uri-bad] = "package_qa_check_src_uri"
-def package_qa_check_src_uri(pn, d, messages):
- import re
-
- if "${PN}" in d.getVar("SRC_URI", False):
- package_qa_handle_error("src-uri-bad", "%s: SRC_URI uses PN not BPN" % pn, d)
-
- for url in d.getVar("SRC_URI").split():
- if re.search(r"github\.com/.+/.+/archive/.+", url):
- package_qa_handle_error("src-uri-bad", "%s: SRC_URI uses unstable GitHub archives" % pn, d)
-
-QARECIPETEST[unhandled-features-check] = "package_qa_check_unhandled_features_check"
-def package_qa_check_unhandled_features_check(pn, d, messages):
- if not bb.data.inherits_class('features_check', d):
- var_set = False
- for kind in ['DISTRO', 'MACHINE', 'COMBINED']:
- for var in ['ANY_OF_' + kind + '_FEATURES', 'REQUIRED_' + kind + '_FEATURES', 'CONFLICT_' + kind + '_FEATURES']:
- if d.getVar(var) is not None or d.overridedata.get(var) is not None:
- var_set = True
- if var_set:
- package_qa_handle_error("unhandled-features-check", "%s: recipe doesn't inherit features_check" % pn, d)
-
-# The PACKAGE FUNC to scan each package
-python do_package_qa () {
- import subprocess
- import oe.packagedata
-
- bb.note("DO PACKAGE QA")
-
- bb.build.exec_func("read_subpackage_metadata", d)
-
- # Check non UTF-8 characters on recipe's metadata
- package_qa_check_encoding(['DESCRIPTION', 'SUMMARY', 'LICENSE', 'SECTION'], 'utf-8', d)
-
- logdir = d.getVar('T')
- pn = d.getVar('PN')
-
- # Check the compile log for host contamination
- compilelog = os.path.join(logdir,"log.do_compile")
-
- if os.path.exists(compilelog):
- statement = "grep -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s > /dev/null" % compilelog
- if subprocess.call(statement, shell=True) == 0:
- msg = "%s: The compile log indicates that host include and/or library paths were used.\n \
- Please check the log '%s' for more information." % (pn, compilelog)
- package_qa_handle_error("compile-host-path", msg, d)
-
- # Check the install log for host contamination
- installlog = os.path.join(logdir,"log.do_install")
-
- if os.path.exists(installlog):
- statement = "grep -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s > /dev/null" % installlog
- if subprocess.call(statement, shell=True) == 0:
- msg = "%s: The install log indicates that host include and/or library paths were used.\n \
- Please check the log '%s' for more information." % (pn, installlog)
- package_qa_handle_error("install-host-path", msg, d)
-
- # Scan the packages...
- pkgdest = d.getVar('PKGDEST')
- packages = set((d.getVar('PACKAGES') or '').split())
-
- global pkgfiles
- pkgfiles = {}
- for pkg in packages:
- pkgfiles[pkg] = []
- for walkroot, dirs, files in os.walk(os.path.join(pkgdest, pkg)):
- for file in files:
- pkgfiles[pkg].append(os.path.join(walkroot, file))
-
- # no packages should be scanned
- if not packages:
- return
-
- import re
- # The package name matches the [a-z0-9.+-]+ regular expression
- pkgname_pattern = re.compile(r"^[a-z0-9.+-]+$")
-
- taskdepdata = d.getVar("BB_TASKDEPDATA", False)
- taskdeps = set()
- for dep in taskdepdata:
- taskdeps.add(taskdepdata[dep][0])
-
- def parse_test_matrix(matrix_name):
- testmatrix = d.getVarFlags(matrix_name) or {}
- g = globals()
- warnchecks = []
- for w in (d.getVar("WARN_QA") or "").split():
- if w in skip:
- continue
- if w in testmatrix and testmatrix[w] in g:
- warnchecks.append(g[testmatrix[w]])
-
- errorchecks = []
- for e in (d.getVar("ERROR_QA") or "").split():
- if e in skip:
- continue
- if e in testmatrix and testmatrix[e] in g:
- errorchecks.append(g[testmatrix[e]])
- return warnchecks, errorchecks
-
- for package in packages:
- skip = set((d.getVar('INSANE_SKIP') or "").split() +
- (d.getVar('INSANE_SKIP_' + package) or "").split())
- if skip:
- bb.note("Package %s skipping QA tests: %s" % (package, str(skip)))
-
- bb.note("Checking Package: %s" % package)
- # Check package name
- if not pkgname_pattern.match(package):
- package_qa_handle_error("pkgname",
- "%s doesn't match the [a-z0-9.+-]+ regex" % package, d)
-
- warn_checks, error_checks = parse_test_matrix("QAPATHTEST")
- package_qa_walk(warn_checks, error_checks, package, d)
-
- warn_checks, error_checks = parse_test_matrix("QAPKGTEST")
- package_qa_package(warn_checks, error_checks, package, d)
-
- package_qa_check_rdepends(package, pkgdest, skip, taskdeps, packages, d)
- package_qa_check_deps(package, pkgdest, d)
-
- warn_checks, error_checks = parse_test_matrix("QARECIPETEST")
- package_qa_recipe(warn_checks, error_checks, pn, d)
-
- if 'libdir' in d.getVar("ALL_QA").split():
- package_qa_check_libdir(d)
-
- qa_sane = d.getVar("QA_SANE")
- if not qa_sane:
- bb.fatal("QA run found fatal errors. Please consider fixing them.")
- bb.note("DONE with PACKAGE QA")
-}
-
-# binutils is used for most checks, so need to set as dependency
-# POPULATESYSROOTDEPS is defined in staging class.
-do_package_qa[depends] += "${POPULATESYSROOTDEPS}"
-do_package_qa[vardepsexclude] = "BB_TASKDEPDATA"
-do_package_qa[rdeptask] = "do_packagedata"
-addtask do_package_qa after do_packagedata do_package before do_build
-
-# Add the package specific INSANE_SKIPs to the sstate dependencies
-python() {
- pkgs = (d.getVar('PACKAGES') or '').split()
- for pkg in pkgs:
- d.appendVarFlag("do_package_qa", "vardeps", " INSANE_SKIP_{}".format(pkg))
-}
-
-SSTATETASKS += "do_package_qa"
-do_package_qa[sstate-inputdirs] = ""
-do_package_qa[sstate-outputdirs] = ""
-python do_package_qa_setscene () {
- sstate_setscene(d)
-}
-addtask do_package_qa_setscene
-
-python do_qa_staging() {
- bb.note("QA checking staging")
- if not qa_check_staged(d.expand('${SYSROOT_DESTDIR}${libdir}'), d):
- bb.fatal("QA staging was broken by the package built above")
-}
-
-python do_qa_patch() {
- import subprocess
-
- ###########################################################################
- # Check patch.log for fuzz warnings
- #
- # Further information on why we check for patch fuzz warnings:
- # http://lists.openembedded.org/pipermail/openembedded-core/2018-March/148675.html
- # https://bugzilla.yoctoproject.org/show_bug.cgi?id=10450
- ###########################################################################
-
- logdir = d.getVar('T')
- patchlog = os.path.join(logdir,"log.do_patch")
-
- if os.path.exists(patchlog):
- fuzzheader = '--- Patch fuzz start ---'
- fuzzfooter = '--- Patch fuzz end ---'
- statement = "grep -e '%s' %s > /dev/null" % (fuzzheader, patchlog)
- if subprocess.call(statement, shell=True) == 0:
- msg = "Fuzz detected:\n\n"
- fuzzmsg = ""
- inFuzzInfo = False
- f = open(patchlog, "r")
- for line in f:
- if fuzzheader in line:
- inFuzzInfo = True
- fuzzmsg = ""
- elif fuzzfooter in line:
- fuzzmsg = fuzzmsg.replace('\n\n', '\n')
- msg += fuzzmsg
- msg += "\n"
- inFuzzInfo = False
- elif inFuzzInfo and not 'Now at patch' in line:
- fuzzmsg += line
- f.close()
- msg += "The context lines in the patches can be updated with devtool:\n"
- msg += "\n"
- msg += " devtool modify %s\n" % d.getVar('PN')
- msg += " devtool finish --force-patch-refresh %s <layer_path>\n\n" % d.getVar('PN')
- msg += "Don't forget to review changes done by devtool!\n"
- if 'patch-fuzz' in d.getVar('ERROR_QA'):
- bb.error(msg)
- elif 'patch-fuzz' in d.getVar('WARN_QA'):
- bb.warn(msg)
- msg = "Patch log indicates that patches do not apply cleanly."
- package_qa_handle_error("patch-fuzz", msg, d)
-}
-
-python do_qa_configure() {
- import subprocess
-
- ###########################################################################
- # Check config.log for cross compile issues
- ###########################################################################
-
- configs = []
- workdir = d.getVar('WORKDIR')
-
- skip = (d.getVar('INSANE_SKIP') or "").split()
- skip_configure_unsafe = False
- if 'configure-unsafe' in skip:
- bb.note("Recipe %s skipping qa checking: configure-unsafe" % d.getVar('PN'))
- skip_configure_unsafe = True
-
- if bb.data.inherits_class('autotools', d) and not skip_configure_unsafe:
- bb.note("Checking autotools environment for common misconfiguration")
- for root, dirs, files in os.walk(workdir):
- statement = "grep -q -F -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s" % \
- os.path.join(root,"config.log")
- if "config.log" in files:
- if subprocess.call(statement, shell=True) == 0:
- error_msg = """This autoconf log indicates errors, it looked at host include and/or library paths while determining system capabilities.
-Rerun configure task after fixing this."""
- package_qa_handle_error("configure-unsafe", error_msg, d)
-
- if "configure.ac" in files:
- configs.append(os.path.join(root,"configure.ac"))
- if "configure.in" in files:
- configs.append(os.path.join(root, "configure.in"))
-
- ###########################################################################
- # Check gettext configuration and dependencies are correct
- ###########################################################################
-
- skip_configure_gettext = False
- if 'configure-gettext' in skip:
- bb.note("Recipe %s skipping qa checking: configure-gettext" % d.getVar('PN'))
- skip_configure_gettext = True
-
- cnf = d.getVar('EXTRA_OECONF') or ""
- if not ("gettext" in d.getVar('P') or "gcc-runtime" in d.getVar('P') or \
- "--disable-nls" in cnf or skip_configure_gettext):
- ml = d.getVar("MLPREFIX") or ""
- if bb.data.inherits_class('cross-canadian', d):
- gt = "nativesdk-gettext"
- else:
- gt = "gettext-native"
- deps = bb.utils.explode_deps(d.getVar('DEPENDS') or "")
- if gt not in deps:
- for config in configs:
- gnu = "grep \"^[[:space:]]*AM_GNU_GETTEXT\" %s >/dev/null" % config
- if subprocess.call(gnu, shell=True) == 0:
- error_msg = "AM_GNU_GETTEXT used but no inherit gettext"
- package_qa_handle_error("configure-gettext", error_msg, d)
-
- ###########################################################################
- # Check unrecognised configure options (with a white list)
- ###########################################################################
- if bb.data.inherits_class("autotools", d) or bb.data.inherits_class("meson", d):
- bb.note("Checking configure output for unrecognised options")
- try:
- if bb.data.inherits_class("autotools", d):
- flag = "WARNING: unrecognized options:"
- log = os.path.join(d.getVar('B'), 'config.log')
- if bb.data.inherits_class("meson", d):
- flag = "WARNING: Unknown options:"
- log = os.path.join(d.getVar('T'), 'log.do_configure')
- output = subprocess.check_output(['grep', '-F', flag, log]).decode("utf-8").replace(', ', ' ').replace('"', '')
- options = set()
- for line in output.splitlines():
- options |= set(line.partition(flag)[2].split())
- whitelist = set(d.getVar("UNKNOWN_CONFIGURE_WHITELIST").split())
- options -= whitelist
- if options:
- pn = d.getVar('PN')
- error_msg = pn + ": configure was passed unrecognised options: " + " ".join(options)
- package_qa_handle_error("unknown-configure-option", error_msg, d)
- except subprocess.CalledProcessError:
- pass
-
- # Check invalid PACKAGECONFIG
- pkgconfig = (d.getVar("PACKAGECONFIG") or "").split()
- if pkgconfig:
- pkgconfigflags = d.getVarFlags("PACKAGECONFIG") or {}
- for pconfig in pkgconfig:
- if pconfig not in pkgconfigflags:
- pn = d.getVar('PN')
- error_msg = "%s: invalid PACKAGECONFIG: %s" % (pn, pconfig)
- package_qa_handle_error("invalid-packageconfig", error_msg, d)
-
- qa_sane = d.getVar("QA_SANE")
- if not qa_sane:
- bb.fatal("Fatal QA errors found, failing task.")
-}
-
-python do_qa_unpack() {
- src_uri = d.getVar('SRC_URI')
- s_dir = d.getVar('S')
- if src_uri and not os.path.exists(s_dir):
- bb.warn('%s: the directory %s (%s) pointed to by the S variable doesn\'t exist - please set S within the recipe to point to where the source has been unpacked to' % (d.getVar('PN'), d.getVar('S', False), s_dir))
-}
-
-# The Staging Func, to check all staging
-#addtask qa_staging after do_populate_sysroot before do_build
-do_populate_sysroot[postfuncs] += "do_qa_staging "
-
-# Check for patch fuzz
-do_patch[postfuncs] += "do_qa_patch "
-
-# Check broken config.log files, for packages requiring Gettext which
-# don't have it in DEPENDS.
-#addtask qa_configure after do_configure before do_compile
-do_configure[postfuncs] += "do_qa_configure "
-
-# Check does S exist.
-do_unpack[postfuncs] += "do_qa_unpack"
-
-python () {
- import re
-
- tests = d.getVar('ALL_QA').split()
- if "desktop" in tests:
- d.appendVar("PACKAGE_DEPENDS", " desktop-file-utils-native")
-
- ###########################################################################
- # Check various variables
- ###########################################################################
-
- # Checking ${FILESEXTRAPATHS}
- extrapaths = (d.getVar("FILESEXTRAPATHS") or "")
- if '__default' not in extrapaths.split(":"):
- msg = "FILESEXTRAPATHS-variable, must always use _prepend (or _append)\n"
- msg += "type of assignment, and don't forget the colon.\n"
- msg += "Please assign it with the format of:\n"
- msg += " FILESEXTRAPATHS_append := \":${THISDIR}/Your_Files_Path\" or\n"
- msg += " FILESEXTRAPATHS_prepend := \"${THISDIR}/Your_Files_Path:\"\n"
- msg += "in your bbappend file\n\n"
- msg += "Your incorrect assignment is:\n"
- msg += "%s\n" % extrapaths
- bb.warn(msg)
-
- overrides = d.getVar('OVERRIDES').split(':')
- pn = d.getVar('PN')
- if pn in overrides:
- msg = 'Recipe %s has PN of "%s" which is in OVERRIDES, this can result in unexpected behaviour.' % (d.getVar("FILE"), pn)
- package_qa_handle_error("pn-overrides", msg, d)
- prog = re.compile(r'[A-Z]')
- if prog.search(pn):
- package_qa_handle_error("uppercase-pn", 'PN: %s is upper case, this can result in unexpected behavior.' % pn, d)
-
- # Some people mistakenly use DEPENDS_${PN} instead of DEPENDS and wonder
- # why it doesn't work.
- if (d.getVar(d.expand('DEPENDS_${PN}'))):
- package_qa_handle_error("pkgvarcheck", "recipe uses DEPENDS_${PN}, should use DEPENDS", d)
-
- issues = []
- if (d.getVar('PACKAGES') or "").split():
- for dep in (d.getVar('QADEPENDS') or "").split():
- d.appendVarFlag('do_package_qa', 'depends', " %s:do_populate_sysroot" % dep)
- for var in 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RCONFLICTS', 'RPROVIDES', 'RREPLACES', 'FILES', 'pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm', 'ALLOW_EMPTY':
- if d.getVar(var, False):
- issues.append(var)
-
- fakeroot_tests = d.getVar('FAKEROOT_QA').split()
- if set(tests) & set(fakeroot_tests):
- d.setVarFlag('do_package_qa', 'fakeroot', '1')
- d.appendVarFlag('do_package_qa', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
- else:
- d.setVarFlag('do_package_qa', 'rdeptask', '')
- for i in issues:
- package_qa_handle_error("pkgvarcheck", "%s: Variable %s is set as not being package specific, please fix this." % (d.getVar("FILE"), i), d)
- qa_sane = d.getVar("QA_SANE")
- if not qa_sane:
- bb.fatal("Fatal QA errors found, failing task.")
-}
diff --git a/meta/classes/kernel-arch.bbclass b/meta/classes/kernel-arch.bbclass
deleted file mode 100644
index 07ec242e63..0000000000
--- a/meta/classes/kernel-arch.bbclass
+++ /dev/null
@@ -1,68 +0,0 @@
-#
-# set the ARCH environment variable for kernel compilation (including
-# modules). return value must match one of the architecture directories
-# in the kernel source "arch" directory
-#
-
-valid_archs = "alpha cris ia64 \
- i386 x86 \
- m68knommu m68k ppc powerpc powerpc64 ppc64 \
- sparc sparc64 \
- arm aarch64 \
- m32r mips \
- sh sh64 um h8300 \
- parisc s390 v850 \
- avr32 blackfin \
- microblaze \
- nios2 arc riscv xtensa"
-
-def map_kernel_arch(a, d):
- import re
-
- valid_archs = d.getVar('valid_archs').split()
-
- if re.match('(i.86|athlon|x86.64)$', a): return 'x86'
- elif re.match('arceb$', a): return 'arc'
- elif re.match('armeb$', a): return 'arm'
- elif re.match('aarch64$', a): return 'arm64'
- elif re.match('aarch64_be$', a): return 'arm64'
- elif re.match('aarch64_ilp32$', a): return 'arm64'
- elif re.match('aarch64_be_ilp32$', a): return 'arm64'
- elif re.match('mips(isa|)(32|64|)(r6|)(el|)$', a): return 'mips'
- elif re.match('mcf', a): return 'm68k'
- elif re.match('riscv(32|64|)(eb|)$', a): return 'riscv'
- elif re.match('p(pc|owerpc)(|64)', a): return 'powerpc'
- elif re.match('sh(3|4)$', a): return 'sh'
- elif re.match('bfin', a): return 'blackfin'
- elif re.match('microblazee[bl]', a): return 'microblaze'
- elif a in valid_archs: return a
- else:
- if not d.getVar("TARGET_OS").startswith("linux"):
- return a
- bb.error("cannot map '%s' to a linux kernel architecture" % a)
-
-export ARCH = "${@map_kernel_arch(d.getVar('TARGET_ARCH'), d)}"
-
-def map_uboot_arch(a, d):
- import re
-
- if re.match('p(pc|owerpc)(|64)', a): return 'ppc'
- elif re.match('i.86$', a): return 'x86'
- return a
-
-export UBOOT_ARCH = "${@map_uboot_arch(d.getVar('ARCH'), d)}"
-
-# Set TARGET_??_KERNEL_ARCH in the machine .conf to set architecture
-# specific options necessary for building the kernel and modules.
-TARGET_CC_KERNEL_ARCH ?= ""
-HOST_CC_KERNEL_ARCH ?= "${TARGET_CC_KERNEL_ARCH}"
-TARGET_LD_KERNEL_ARCH ?= ""
-HOST_LD_KERNEL_ARCH ?= "${TARGET_LD_KERNEL_ARCH}"
-TARGET_AR_KERNEL_ARCH ?= ""
-HOST_AR_KERNEL_ARCH ?= "${TARGET_AR_KERNEL_ARCH}"
-
-KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_KERNEL_ARCH} -fuse-ld=bfd ${DEBUG_PREFIX_MAP} -fdebug-prefix-map=${STAGING_KERNEL_DIR}=${KERNEL_SRC_PATH}"
-KERNEL_LD = "${CCACHE}${HOST_PREFIX}ld.bfd ${HOST_LD_KERNEL_ARCH}"
-KERNEL_AR = "${CCACHE}${HOST_PREFIX}ar ${HOST_AR_KERNEL_ARCH}"
-TOOLCHAIN = "gcc"
-
diff --git a/meta/classes/kernel-artifact-names.bbclass b/meta/classes/kernel-artifact-names.bbclass
deleted file mode 100644
index bbeecba7bd..0000000000
--- a/meta/classes/kernel-artifact-names.bbclass
+++ /dev/null
@@ -1,18 +0,0 @@
-KERNEL_ARTIFACT_NAME ?= "${PKGE}-${PKGV}-${PKGR}-${MACHINE}${IMAGE_VERSION_SUFFIX}"
-KERNEL_ARTIFACT_LINK_NAME ?= "${MACHINE}"
-
-KERNEL_IMAGE_NAME ?= "${KERNEL_ARTIFACT_NAME}"
-KERNEL_IMAGE_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
-
-KERNEL_DTB_NAME ?= "${KERNEL_ARTIFACT_NAME}"
-KERNEL_DTB_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
-
-KERNEL_FIT_NAME ?= "${KERNEL_ARTIFACT_NAME}"
-KERNEL_FIT_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
-
-MODULE_TARBALL_NAME ?= "${KERNEL_ARTIFACT_NAME}"
-MODULE_TARBALL_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
-MODULE_TARBALL_DEPLOY ?= "1"
-
-INITRAMFS_NAME ?= "initramfs-${KERNEL_ARTIFACT_NAME}"
-INITRAMFS_LINK_NAME ?= "initramfs-${KERNEL_ARTIFACT_LINK_NAME}"
diff --git a/meta/classes/kernel-devicetree.bbclass b/meta/classes/kernel-devicetree.bbclass
deleted file mode 100644
index 522c46575d..0000000000
--- a/meta/classes/kernel-devicetree.bbclass
+++ /dev/null
@@ -1,95 +0,0 @@
-# Support for device tree generation
-PACKAGES_append = " \
- ${KERNEL_PACKAGE_NAME}-devicetree \
- ${@[d.getVar('KERNEL_PACKAGE_NAME') + '-image-zimage-bundle', ''][d.getVar('KERNEL_DEVICETREE_BUNDLE') != '1']} \
-"
-FILES_${KERNEL_PACKAGE_NAME}-devicetree = "/${KERNEL_IMAGEDEST}/*.dtb /${KERNEL_IMAGEDEST}/*.dtbo"
-FILES_${KERNEL_PACKAGE_NAME}-image-zimage-bundle = "/${KERNEL_IMAGEDEST}/zImage-*.dtb.bin"
-
-# Generate kernel+devicetree bundle
-KERNEL_DEVICETREE_BUNDLE ?= "0"
-
-normalize_dtb () {
- dtb="$1"
- if echo $dtb | grep -q '/dts/'; then
- bbwarn "$dtb contains the full path to the the dts file, but only the dtb name should be used."
- dtb=`basename $dtb | sed 's,\.dts$,.dtb,g'`
- fi
- echo "$dtb"
-}
-
-get_real_dtb_path_in_kernel () {
- dtb="$1"
- dtb_path="${B}/arch/${ARCH}/boot/dts/$dtb"
- if [ ! -e "$dtb_path" ]; then
- dtb_path="${B}/arch/${ARCH}/boot/$dtb"
- fi
- echo "$dtb_path"
-}
-
-do_configure_append() {
- if [ "${KERNEL_DEVICETREE_BUNDLE}" = "1" ]; then
- if echo ${KERNEL_IMAGETYPE_FOR_MAKE} | grep -q 'zImage'; then
- case "${ARCH}" in
- "arm")
- config="${B}/.config"
- if ! grep -q 'CONFIG_ARM_APPENDED_DTB=y' $config; then
- bbwarn 'CONFIG_ARM_APPENDED_DTB is NOT enabled in the kernel. Enabling it to allow the kernel to boot with the Device Tree appended!'
- sed -i "/CONFIG_ARM_APPENDED_DTB[ =]/d" $config
- echo "CONFIG_ARM_APPENDED_DTB=y" >> $config
- echo "# CONFIG_ARM_ATAG_DTB_COMPAT is not set" >> $config
- fi
- ;;
- *)
- bberror "KERNEL_DEVICETREE_BUNDLE is not supported for ${ARCH}. Currently it is only supported for 'ARM'."
- esac
- else
- bberror 'The KERNEL_DEVICETREE_BUNDLE requires the KERNEL_IMAGETYPE to contain zImage.'
- fi
- fi
-}
-
-do_compile_append() {
- for dtbf in ${KERNEL_DEVICETREE}; do
- dtb=`normalize_dtb "$dtbf"`
- oe_runmake $dtb
- done
-}
-
-do_install_append() {
- for dtbf in ${KERNEL_DEVICETREE}; do
- dtb=`normalize_dtb "$dtbf"`
- dtb_ext=${dtb##*.}
- dtb_base_name=`basename $dtb .$dtb_ext`
- dtb_path=`get_real_dtb_path_in_kernel "$dtb"`
- install -m 0644 $dtb_path ${D}/${KERNEL_IMAGEDEST}/$dtb_base_name.$dtb_ext
- done
-}
-
-do_deploy_append() {
- for dtbf in ${KERNEL_DEVICETREE}; do
- dtb=`normalize_dtb "$dtbf"`
- dtb_ext=${dtb##*.}
- dtb_base_name=`basename $dtb .$dtb_ext`
- install -d $deployDir
- install -m 0644 ${D}/${KERNEL_IMAGEDEST}/$dtb_base_name.$dtb_ext $deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext
- ln -sf $dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext $deployDir/$dtb_base_name.$dtb_ext
- ln -sf $dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext $deployDir/$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext
- for type in ${KERNEL_IMAGETYPE_FOR_MAKE}; do
- if [ "$type" = "zImage" ] && [ "${KERNEL_DEVICETREE_BUNDLE}" = "1" ]; then
- cat ${D}/${KERNEL_IMAGEDEST}/$type \
- $deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext \
- > $deployDir/$type-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin
- ln -sf $type-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin \
- $deployDir/$type-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext.bin
- if [ -e "${KERNEL_OUTPUT_DIR}/${type}.initramfs" ]; then
- cat ${KERNEL_OUTPUT_DIR}/${type}.initramfs \
- $deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext \
- > $deployDir/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin
- ln -sf ${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin \
- $deployDir/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext.bin
- fi
- fi
- done
- done
-}
diff --git a/meta/classes/kernel-fitimage.bbclass b/meta/classes/kernel-fitimage.bbclass
deleted file mode 100644
index 72b05ff8d1..0000000000
--- a/meta/classes/kernel-fitimage.bbclass
+++ /dev/null
@@ -1,529 +0,0 @@
-inherit kernel-uboot kernel-artifact-names uboot-sign
-
-python __anonymous () {
- kerneltypes = d.getVar('KERNEL_IMAGETYPES') or ""
- if 'fitImage' in kerneltypes.split():
- depends = d.getVar("DEPENDS")
- depends = "%s u-boot-tools-native dtc-native" % depends
- d.setVar("DEPENDS", depends)
-
- uarch = d.getVar("UBOOT_ARCH")
- if uarch == "arm64":
- replacementtype = "Image"
- elif uarch == "riscv":
- replacementtype = "Image"
- elif uarch == "mips":
- replacementtype = "vmlinuz.bin"
- elif uarch == "x86":
- replacementtype = "bzImage"
- elif uarch == "microblaze":
- replacementtype = "linux.bin"
- else:
- replacementtype = "zImage"
-
- # Override KERNEL_IMAGETYPE_FOR_MAKE variable, which is internal
- # to kernel.bbclass . We have to override it, since we pack zImage
- # (at least for now) into the fitImage .
- typeformake = d.getVar("KERNEL_IMAGETYPE_FOR_MAKE") or ""
- if 'fitImage' in typeformake.split():
- d.setVar('KERNEL_IMAGETYPE_FOR_MAKE', typeformake.replace('fitImage', replacementtype))
-
- image = d.getVar('INITRAMFS_IMAGE')
- if image:
- d.appendVarFlag('do_assemble_fitimage_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete')
-
- #check if there are any dtb providers
- providerdtb = d.getVar("PREFERRED_PROVIDER_virtual/dtb")
- if providerdtb:
- d.appendVarFlag('do_assemble_fitimage', 'depends', ' virtual/dtb:do_populate_sysroot')
- d.appendVarFlag('do_assemble_fitimage_initramfs', 'depends', ' virtual/dtb:do_populate_sysroot')
- d.setVar('EXTERNAL_KERNEL_DEVICETREE', "${RECIPE_SYSROOT}/boot/devicetree")
-
- # Verified boot will sign the fitImage and append the public key to
- # U-Boot dtb. We ensure the U-Boot dtb is deployed before assembling
- # the fitImage:
- if d.getVar('UBOOT_SIGN_ENABLE') == "1" and d.getVar('UBOOT_DTB_BINARY'):
- uboot_pn = d.getVar('PREFERRED_PROVIDER_u-boot') or 'u-boot'
- d.appendVarFlag('do_assemble_fitimage', 'depends', ' %s:do_populate_sysroot' % uboot_pn)
-}
-
-# Options for the device tree compiler passed to mkimage '-D' feature:
-UBOOT_MKIMAGE_DTCOPTS ??= ""
-
-# fitImage Hash Algo
-FIT_HASH_ALG ?= "sha256"
-
-# fitImage Signature Algo
-FIT_SIGN_ALG ?= "rsa2048"
-
-#
-# Emit the fitImage ITS header
-#
-# $1 ... .its filename
-fitimage_emit_fit_header() {
- cat << EOF >> ${1}
-/dts-v1/;
-
-/ {
- description = "U-Boot fitImage for ${DISTRO_NAME}/${PV}/${MACHINE}";
- #address-cells = <1>;
-EOF
-}
-
-#
-# Emit the fitImage section bits
-#
-# $1 ... .its filename
-# $2 ... Section bit type: imagestart - image section start
-# confstart - configuration section start
-# sectend - section end
-# fitend - fitimage end
-#
-fitimage_emit_section_maint() {
- case $2 in
- imagestart)
- cat << EOF >> ${1}
-
- images {
-EOF
- ;;
- confstart)
- cat << EOF >> ${1}
-
- configurations {
-EOF
- ;;
- sectend)
- cat << EOF >> ${1}
- };
-EOF
- ;;
- fitend)
- cat << EOF >> ${1}
-};
-EOF
- ;;
- esac
-}
-
-#
-# Emit the fitImage ITS kernel section
-#
-# $1 ... .its filename
-# $2 ... Image counter
-# $3 ... Path to kernel image
-# $4 ... Compression type
-fitimage_emit_section_kernel() {
-
- kernel_csum="${FIT_HASH_ALG}"
-
- ENTRYPOINT="${UBOOT_ENTRYPOINT}"
- if [ -n "${UBOOT_ENTRYSYMBOL}" ]; then
- ENTRYPOINT=`${HOST_PREFIX}nm vmlinux | \
- awk '$3=="${UBOOT_ENTRYSYMBOL}" {print "0x"$1;exit}'`
- fi
-
- cat << EOF >> ${1}
- kernel@${2} {
- description = "Linux kernel";
- data = /incbin/("${3}");
- type = "kernel";
- arch = "${UBOOT_ARCH}";
- os = "linux";
- compression = "${4}";
- load = <${UBOOT_LOADADDRESS}>;
- entry = <${ENTRYPOINT}>;
- hash@1 {
- algo = "${kernel_csum}";
- };
- };
-EOF
-}
-
-#
-# Emit the fitImage ITS DTB section
-#
-# $1 ... .its filename
-# $2 ... Image counter
-# $3 ... Path to DTB image
-fitimage_emit_section_dtb() {
-
- dtb_csum="${FIT_HASH_ALG}"
-
- dtb_loadline=""
- dtb_ext=${DTB##*.}
- if [ "${dtb_ext}" = "dtbo" ]; then
- if [ -n "${UBOOT_DTBO_LOADADDRESS}" ]; then
- dtb_loadline="load = <${UBOOT_DTBO_LOADADDRESS}>;"
- fi
- elif [ -n "${UBOOT_DTB_LOADADDRESS}" ]; then
- dtb_loadline="load = <${UBOOT_DTB_LOADADDRESS}>;"
- fi
- cat << EOF >> ${1}
- fdt@${2} {
- description = "Flattened Device Tree blob";
- data = /incbin/("${3}");
- type = "flat_dt";
- arch = "${UBOOT_ARCH}";
- compression = "none";
- ${dtb_loadline}
- hash@1 {
- algo = "${dtb_csum}";
- };
- };
-EOF
-}
-
-#
-# Emit the fitImage ITS setup section
-#
-# $1 ... .its filename
-# $2 ... Image counter
-# $3 ... Path to setup image
-fitimage_emit_section_setup() {
-
- setup_csum="${FIT_HASH_ALG}"
-
- cat << EOF >> ${1}
- setup@${2} {
- description = "Linux setup.bin";
- data = /incbin/("${3}");
- type = "x86_setup";
- arch = "${UBOOT_ARCH}";
- os = "linux";
- compression = "none";
- load = <0x00090000>;
- entry = <0x00090000>;
- hash@1 {
- algo = "${setup_csum}";
- };
- };
-EOF
-}
-
-#
-# Emit the fitImage ITS ramdisk section
-#
-# $1 ... .its filename
-# $2 ... Image counter
-# $3 ... Path to ramdisk image
-fitimage_emit_section_ramdisk() {
-
- ramdisk_csum="${FIT_HASH_ALG}"
- ramdisk_loadline=""
- ramdisk_entryline=""
-
- if [ -n "${UBOOT_RD_LOADADDRESS}" ]; then
- ramdisk_loadline="load = <${UBOOT_RD_LOADADDRESS}>;"
- fi
- if [ -n "${UBOOT_RD_ENTRYPOINT}" ]; then
- ramdisk_entryline="entry = <${UBOOT_RD_ENTRYPOINT}>;"
- fi
-
- cat << EOF >> ${1}
- ramdisk@${2} {
- description = "${INITRAMFS_IMAGE}";
- data = /incbin/("${3}");
- type = "ramdisk";
- arch = "${UBOOT_ARCH}";
- os = "linux";
- compression = "none";
- ${ramdisk_loadline}
- ${ramdisk_entryline}
- hash@1 {
- algo = "${ramdisk_csum}";
- };
- };
-EOF
-}
-
-#
-# Emit the fitImage ITS configuration section
-#
-# $1 ... .its filename
-# $2 ... Linux kernel ID
-# $3 ... DTB image name
-# $4 ... ramdisk ID
-# $5 ... config ID
-# $6 ... default flag
-fitimage_emit_section_config() {
-
- conf_csum="${FIT_HASH_ALG}"
- conf_sign_algo="${FIT_SIGN_ALG}"
- if [ -n "${UBOOT_SIGN_ENABLE}" ] ; then
- conf_sign_keyname="${UBOOT_SIGN_KEYNAME}"
- fi
-
- # Test if we have any DTBs at all
- sep=""
- conf_desc=""
- kernel_line=""
- fdt_line=""
- ramdisk_line=""
- setup_line=""
- default_line=""
-
- if [ -n "${2}" ]; then
- conf_desc="Linux kernel"
- sep=", "
- kernel_line="kernel = \"kernel@${2}\";"
- fi
-
- if [ -n "${3}" ]; then
- conf_desc="${conf_desc}${sep}FDT blob"
- sep=", "
- fdt_line="fdt = \"fdt@${3}\";"
- fi
-
- if [ -n "${4}" ]; then
- conf_desc="${conf_desc}${sep}ramdisk"
- sep=", "
- ramdisk_line="ramdisk = \"ramdisk@${4}\";"
- fi
-
- if [ -n "${5}" ]; then
- conf_desc="${conf_desc}${sep}setup"
- setup_line="setup = \"setup@${5}\";"
- fi
-
- if [ "${6}" = "1" ]; then
- default_line="default = \"conf@${3}\";"
- fi
-
- cat << EOF >> ${1}
- ${default_line}
- conf@${3} {
- description = "${6} ${conf_desc}";
- ${kernel_line}
- ${fdt_line}
- ${ramdisk_line}
- ${setup_line}
- hash@1 {
- algo = "${conf_csum}";
- };
-EOF
-
- if [ ! -z "${conf_sign_keyname}" ] ; then
-
- sign_line="sign-images = "
- sep=""
-
- if [ -n "${2}" ]; then
- sign_line="${sign_line}${sep}\"kernel\""
- sep=", "
- fi
-
- if [ -n "${3}" ]; then
- sign_line="${sign_line}${sep}\"fdt\""
- sep=", "
- fi
-
- if [ -n "${4}" ]; then
- sign_line="${sign_line}${sep}\"ramdisk\""
- sep=", "
- fi
-
- if [ -n "${5}" ]; then
- sign_line="${sign_line}${sep}\"setup\""
- fi
-
- sign_line="${sign_line};"
-
- cat << EOF >> ${1}
- signature@1 {
- algo = "${conf_csum},${conf_sign_algo}";
- key-name-hint = "${conf_sign_keyname}";
- ${sign_line}
- };
-EOF
- fi
-
- cat << EOF >> ${1}
- };
-EOF
-}
-
-#
-# Assemble fitImage
-#
-# $1 ... .its filename
-# $2 ... fitImage name
-# $3 ... include ramdisk
-fitimage_assemble() {
- kernelcount=1
- dtbcount=""
- DTBS=""
- ramdiskcount=${3}
- setupcount=""
- rm -f ${1} arch/${ARCH}/boot/${2}
-
- fitimage_emit_fit_header ${1}
-
- #
- # Step 1: Prepare a kernel image section.
- #
- fitimage_emit_section_maint ${1} imagestart
-
- uboot_prep_kimage
- fitimage_emit_section_kernel ${1} "${kernelcount}" linux.bin "${linux_comp}"
-
- #
- # Step 2: Prepare a DTB image section
- #
-
- if [ -z "${EXTERNAL_KERNEL_DEVICETREE}" ] && [ -n "${KERNEL_DEVICETREE}" ]; then
- dtbcount=1
- for DTB in ${KERNEL_DEVICETREE}; do
- if echo ${DTB} | grep -q '/dts/'; then
- bbwarn "${DTB} contains the full path to the the dts file, but only the dtb name should be used."
- DTB=`basename ${DTB} | sed 's,\.dts$,.dtb,g'`
- fi
- DTB_PATH="arch/${ARCH}/boot/dts/${DTB}"
- if [ ! -e "${DTB_PATH}" ]; then
- DTB_PATH="arch/${ARCH}/boot/${DTB}"
- fi
-
- DTB=$(echo "${DTB}" | tr '/' '_')
- DTBS="${DTBS} ${DTB}"
- fitimage_emit_section_dtb ${1} ${DTB} ${DTB_PATH}
- done
- fi
-
- if [ -n "${EXTERNAL_KERNEL_DEVICETREE}" ]; then
- dtbcount=1
- for DTB in $(find "${EXTERNAL_KERNEL_DEVICETREE}" \( -name '*.dtb' -o -name '*.dtbo' \) -printf '%P\n' | sort); do
- DTB=$(echo "${DTB}" | tr '/' '_')
- DTBS="${DTBS} ${DTB}"
- fitimage_emit_section_dtb ${1} ${DTB} "${EXTERNAL_KERNEL_DEVICETREE}/${DTB}"
- done
- fi
-
- #
- # Step 3: Prepare a setup section. (For x86)
- #
- if [ -e arch/${ARCH}/boot/setup.bin ]; then
- setupcount=1
- fitimage_emit_section_setup ${1} "${setupcount}" arch/${ARCH}/boot/setup.bin
- fi
-
- #
- # Step 4: Prepare a ramdisk section.
- #
- if [ "x${ramdiskcount}" = "x1" ] ; then
- # Find and use the first initramfs image archive type we find
- for img in cpio.lz4 cpio.lzo cpio.lzma cpio.xz cpio.gz ext2.gz cpio; do
- initramfs_path="${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.${img}"
- echo "Using $initramfs_path"
- if [ -e "${initramfs_path}" ]; then
- fitimage_emit_section_ramdisk ${1} "${ramdiskcount}" "${initramfs_path}"
- break
- fi
- done
- fi
-
- fitimage_emit_section_maint ${1} sectend
-
- # Force the first Kernel and DTB in the default config
- kernelcount=1
- if [ -n "${dtbcount}" ]; then
- dtbcount=1
- fi
-
- #
- # Step 5: Prepare a configurations section
- #
- fitimage_emit_section_maint ${1} confstart
-
- if [ -n "${DTBS}" ]; then
- i=1
- for DTB in ${DTBS}; do
- dtb_ext=${DTB##*.}
- if [ "${dtb_ext}" = "dtbo" ]; then
- fitimage_emit_section_config ${1} "" "${DTB}" "" "" "`expr ${i} = ${dtbcount}`"
- else
- fitimage_emit_section_config ${1} "${kernelcount}" "${DTB}" "${ramdiskcount}" "${setupcount}" "`expr ${i} = ${dtbcount}`"
- fi
- i=`expr ${i} + 1`
- done
- fi
-
- fitimage_emit_section_maint ${1} sectend
-
- fitimage_emit_section_maint ${1} fitend
-
- #
- # Step 6: Assemble the image
- #
- uboot-mkimage \
- ${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \
- -f ${1} \
- arch/${ARCH}/boot/${2}
-
- #
- # Step 7: Sign the image and add public key to U-Boot dtb
- #
- if [ "x${UBOOT_SIGN_ENABLE}" = "x1" ] ; then
- add_key_to_u_boot=""
- if [ -n "${UBOOT_DTB_BINARY}" ]; then
- # The u-boot.dtb is a symlink to UBOOT_DTB_IMAGE, so we need copy
- # both of them, and don't dereference the symlink.
- cp -P ${STAGING_DATADIR}/u-boot*.dtb ${B}
- add_key_to_u_boot="-K ${B}/${UBOOT_DTB_BINARY}"
- fi
- uboot-mkimage \
- ${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \
- -F -k "${UBOOT_SIGN_KEYDIR}" \
- $add_key_to_u_boot \
- -r arch/${ARCH}/boot/${2}
- fi
-}
-
-do_assemble_fitimage() {
- if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage"; then
- cd ${B}
- fitimage_assemble fit-image.its fitImage
- fi
-}
-
-addtask assemble_fitimage before do_install after do_compile
-
-do_assemble_fitimage_initramfs() {
- if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage" && \
- test -n "${INITRAMFS_IMAGE}" ; then
- cd ${B}
- fitimage_assemble fit-image-${INITRAMFS_IMAGE}.its fitImage-${INITRAMFS_IMAGE} 1
- fi
-}
-
-addtask assemble_fitimage_initramfs before do_deploy after do_bundle_initramfs
-
-
-kernel_do_deploy[vardepsexclude] = "DATETIME"
-kernel_do_deploy_append() {
- # Update deploy directory
- if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage"; then
- echo "Copying fit-image.its source file..."
- install -m 0644 ${B}/fit-image.its "$deployDir/fitImage-its-${KERNEL_FIT_NAME}.its"
- ln -snf fitImage-its-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${KERNEL_FIT_LINK_NAME}"
-
- echo "Copying linux.bin file..."
- install -m 0644 ${B}/linux.bin $deployDir/fitImage-linux.bin-${KERNEL_FIT_NAME}.bin
- ln -snf fitImage-linux.bin-${KERNEL_FIT_NAME}.bin "$deployDir/fitImage-linux.bin-${KERNEL_FIT_LINK_NAME}"
-
- if [ -n "${INITRAMFS_IMAGE}" ]; then
- echo "Copying fit-image-${INITRAMFS_IMAGE}.its source file..."
- install -m 0644 ${B}/fit-image-${INITRAMFS_IMAGE}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its"
- ln -snf fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
-
- echo "Copying fitImage-${INITRAMFS_IMAGE} file..."
- install -m 0644 ${B}/arch/${ARCH}/boot/fitImage-${INITRAMFS_IMAGE} "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin"
- ln -snf fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
- fi
- if [ "${UBOOT_SIGN_ENABLE}" = "1" -a -n "${UBOOT_DTB_BINARY}" ] ; then
- # UBOOT_DTB_IMAGE is a realfile, but we can't use
- # ${UBOOT_DTB_IMAGE} since it contains ${PV} which is aimed
- # for u-boot, but we are in kernel env now.
- install -m 0644 ${B}/u-boot-${MACHINE}*.dtb "$deployDir/"
- fi
- fi
-}
diff --git a/meta/classes/kernel-grub.bbclass b/meta/classes/kernel-grub.bbclass
deleted file mode 100644
index 5d92f3b636..0000000000
--- a/meta/classes/kernel-grub.bbclass
+++ /dev/null
@@ -1,105 +0,0 @@
-#
-# While installing a rpm to update kernel on a deployed target, it will update
-# the boot area and the boot menu with the kernel as the priority but allow
-# you to fall back to the original kernel as well.
-#
-# - In kernel-image's preinstall scriptlet, it backs up original kernel to avoid
-# probable confliction with the new one.
-#
-# - In kernel-image's postinstall scriptlet, it modifies grub's config file to
-# updates the new kernel as the boot priority.
-#
-
-python __anonymous () {
- import re
-
- preinst = '''
- # Parsing confliction
- [ -f "$D/boot/grub/menu.list" ] && grubcfg="$D/boot/grub/menu.list"
- [ -f "$D/boot/grub/grub.cfg" ] && grubcfg="$D/boot/grub/grub.cfg"
- if [ -n "$grubcfg" ]; then
- # Dereference symlink to avoid confliction with new kernel name.
- if grep -q "/KERNEL_IMAGETYPE \+root=" $grubcfg; then
- if [ -L "$D/boot/KERNEL_IMAGETYPE" ]; then
- kimage=`realpath $D/boot/KERNEL_IMAGETYPE 2>/dev/null`
- if [ -f "$D$kimage" ]; then
- sed -i "s:KERNEL_IMAGETYPE \+root=:${kimage##*/} root=:" $grubcfg
- fi
- fi
- fi
-
- # Rename old kernel if it conflicts with new kernel name.
- if grep -q "/KERNEL_IMAGETYPE-${KERNEL_VERSION} \+root=" $grubcfg; then
- if [ -f "$D/boot/KERNEL_IMAGETYPE-${KERNEL_VERSION}" ]; then
- timestamp=`date +%s`
- kimage="$D/boot/KERNEL_IMAGETYPE-${KERNEL_VERSION}-$timestamp-back"
- sed -i "s:KERNEL_IMAGETYPE-${KERNEL_VERSION} \+root=:${kimage##*/} root=:" $grubcfg
- mv "$D/boot/KERNEL_IMAGETYPE-${KERNEL_VERSION}" "$kimage"
- fi
- fi
- fi
-'''
-
- postinst = '''
- get_new_grub_cfg() {
- grubcfg="$1"
- old_image="$2"
- title="Update KERNEL_IMAGETYPE-${KERNEL_VERSION}-${PV}"
- if [ "${grubcfg##*/}" = "grub.cfg" ]; then
- rootfs=`grep " *linux \+[^ ]\+ \+root=" $grubcfg -m 1 | \
- sed "s#${old_image}#${old_image%/*}/KERNEL_IMAGETYPE-${KERNEL_VERSION}#"`
-
- echo "menuentry \"$title\" {"
- echo " set root=(hd0,1)"
- echo "$rootfs"
- echo "}"
- elif [ "${grubcfg##*/}" = "menu.list" ]; then
- rootfs=`grep "kernel \+[^ ]\+ \+root=" $grubcfg -m 1 | \
- sed "s#${old_image}#${old_image%/*}/KERNEL_IMAGETYPE-${KERNEL_VERSION}#"`
-
- echo "default 0"
- echo "timeout 30"
- echo "title $title"
- echo "root (hd0,0)"
- echo "$rootfs"
- fi
- }
-
- get_old_grub_cfg() {
- grubcfg="$1"
- if [ "${grubcfg##*/}" = "grub.cfg" ]; then
- cat "$grubcfg"
- elif [ "${grubcfg##*/}" = "menu.list" ]; then
- sed -e '/^default/d' -e '/^timeout/d' "$grubcfg"
- fi
- }
-
- if [ -f "$D/boot/grub/grub.cfg" ]; then
- grubcfg="$D/boot/grub/grub.cfg"
- old_image=`grep ' *linux \+[^ ]\+ \+root=' -m 1 "$grubcfg" | awk '{print $2}'`
- elif [ -f "$D/boot/grub/menu.list" ]; then
- grubcfg="$D/boot/grub/menu.list"
- old_image=`grep '^kernel \+[^ ]\+ \+root=' -m 1 "$grubcfg" | awk '{print $2}'`
- fi
-
- # Don't update grubcfg at first install while old bzImage doesn't exist.
- if [ -f "$D/boot/${old_image##*/}" ]; then
- grubcfgtmp="$grubcfg.tmp"
- get_new_grub_cfg "$grubcfg" "$old_image" > $grubcfgtmp
- get_old_grub_cfg "$grubcfg" >> $grubcfgtmp
- mv $grubcfgtmp $grubcfg
- echo "Caution! Update kernel may affect kernel-module!"
- fi
-'''
-
- imagetypes = d.getVar('KERNEL_IMAGETYPES')
- imagetypes = re.sub(r'\.gz$', '', imagetypes)
-
- for type in imagetypes.split():
- typelower = type.lower()
- preinst_append = preinst.replace('KERNEL_IMAGETYPE', type)
- postinst_prepend = postinst.replace('KERNEL_IMAGETYPE', type)
- d.setVar('pkg_preinst_kernel-image-' + typelower + '_append', preinst_append)
- d.setVar('pkg_postinst_kernel-image-' + typelower + '_prepend', postinst_prepend)
-}
-
diff --git a/meta/classes/kernel-module-split.bbclass b/meta/classes/kernel-module-split.bbclass
deleted file mode 100644
index 221022b7bc..0000000000
--- a/meta/classes/kernel-module-split.bbclass
+++ /dev/null
@@ -1,175 +0,0 @@
-pkg_postinst_modules () {
-if [ -z "$D" ]; then
- depmod -a ${KERNEL_VERSION}
-else
- # image.bbclass will call depmodwrapper after everything is installed,
- # no need to do it here as well
- :
-fi
-}
-
-pkg_postrm_modules () {
-if [ -z "$D" ]; then
- depmod -a ${KERNEL_VERSION}
-else
- depmodwrapper -a -b $D ${KERNEL_VERSION}
-fi
-}
-
-autoload_postinst_fragment() {
-if [ x"$D" = "x" ]; then
- modprobe %s || true
-fi
-}
-
-PACKAGE_WRITE_DEPS += "kmod-native depmodwrapper-cross"
-
-do_install_append() {
- install -d ${D}${sysconfdir}/modules-load.d/ ${D}${sysconfdir}/modprobe.d/
-}
-
-PACKAGESPLITFUNCS_prepend = "split_kernel_module_packages "
-
-KERNEL_MODULES_META_PACKAGE ?= "${@ d.getVar("KERNEL_PACKAGE_NAME") or "kernel" }-modules"
-
-KERNEL_MODULE_PACKAGE_PREFIX ?= ""
-KERNEL_MODULE_PACKAGE_SUFFIX ?= "-${KERNEL_VERSION}"
-KERNEL_MODULE_PROVIDE_VIRTUAL ?= "1"
-
-python split_kernel_module_packages () {
- import re
-
- modinfoexp = re.compile("([^=]+)=(.*)")
-
- def extract_modinfo(file):
- import tempfile, subprocess
- tempfile.tempdir = d.getVar("WORKDIR")
- compressed = re.match( r'.*\.([xg])z$', file)
- tf = tempfile.mkstemp()
- tmpfile = tf[1]
- if compressed:
- tmpkofile = tmpfile + ".ko"
- if compressed.group(1) == 'g':
- cmd = "gunzip -dc %s > %s" % (file, tmpkofile)
- subprocess.check_call(cmd, shell=True)
- elif compressed.group(1) == 'x':
- cmd = "xz -dc %s > %s" % (file, tmpkofile)
- subprocess.check_call(cmd, shell=True)
- else:
- msg = "Cannot decompress '%s'" % file
- raise msg
- cmd = "%sobjcopy -j .modinfo -O binary %s %s" % (d.getVar("HOST_PREFIX") or "", tmpkofile, tmpfile)
- else:
- cmd = "%sobjcopy -j .modinfo -O binary %s %s" % (d.getVar("HOST_PREFIX") or "", file, tmpfile)
- subprocess.check_call(cmd, shell=True)
- # errors='replace': Some old kernel versions contain invalid utf-8 characters in mod descriptions (like 0xf6, 'ö')
- f = open(tmpfile, errors='replace')
- l = f.read().split("\000")
- f.close()
- os.close(tf[0])
- os.unlink(tmpfile)
- if compressed:
- os.unlink(tmpkofile)
- vals = {}
- for i in l:
- m = modinfoexp.match(i)
- if not m:
- continue
- vals[m.group(1)] = m.group(2)
- return vals
-
- def frob_metadata(file, pkg, pattern, format, basename):
- vals = extract_modinfo(file)
-
- dvar = d.getVar('PKGD')
-
- # If autoloading is requested, output /etc/modules-load.d/<name>.conf and append
- # appropriate modprobe commands to the postinst
- autoloadlist = (d.getVar("KERNEL_MODULE_AUTOLOAD") or "").split()
- autoload = d.getVar('module_autoload_%s' % basename)
- if autoload and autoload == basename:
- bb.warn("module_autoload_%s was replaced by KERNEL_MODULE_AUTOLOAD for cases where basename == module name, please drop it" % basename)
- if autoload and basename not in autoloadlist:
- bb.warn("module_autoload_%s is defined but '%s' isn't included in KERNEL_MODULE_AUTOLOAD, please add it there" % (basename, basename))
- if basename in autoloadlist:
- name = '%s/etc/modules-load.d/%s.conf' % (dvar, basename)
- f = open(name, 'w')
- if autoload:
- for m in autoload.split():
- f.write('%s\n' % m)
- else:
- f.write('%s\n' % basename)
- f.close()
- postinst = d.getVar('pkg_postinst_%s' % pkg)
- if not postinst:
- bb.fatal("pkg_postinst_%s not defined" % pkg)
- postinst += d.getVar('autoload_postinst_fragment') % (autoload or basename)
- d.setVar('pkg_postinst_%s' % pkg, postinst)
-
- # Write out any modconf fragment
- modconflist = (d.getVar("KERNEL_MODULE_PROBECONF") or "").split()
- modconf = d.getVar('module_conf_%s' % basename)
- if modconf and basename in modconflist:
- name = '%s/etc/modprobe.d/%s.conf' % (dvar, basename)
- f = open(name, 'w')
- f.write("%s\n" % modconf)
- f.close()
- elif modconf:
- bb.error("Please ensure module %s is listed in KERNEL_MODULE_PROBECONF since module_conf_%s is set" % (basename, basename))
-
- files = d.getVar('FILES_%s' % pkg)
- files = "%s /etc/modules-load.d/%s.conf /etc/modprobe.d/%s.conf" % (files, basename, basename)
- d.setVar('FILES_%s' % pkg, files)
-
- if "description" in vals:
- old_desc = d.getVar('DESCRIPTION_' + pkg) or ""
- d.setVar('DESCRIPTION_' + pkg, old_desc + "; " + vals["description"])
-
- rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg) or "")
- modinfo_deps = []
- if "depends" in vals and vals["depends"] != "":
- for dep in vals["depends"].split(","):
- on = legitimize_package_name(dep)
- dependency_pkg = format % on
- modinfo_deps.append(dependency_pkg)
- for dep in modinfo_deps:
- if not dep in rdepends:
- rdepends[dep] = []
- d.setVar('RDEPENDS_' + pkg, bb.utils.join_deps(rdepends, commasep=False))
-
- # Avoid automatic -dev recommendations for modules ending with -dev.
- d.setVarFlag('RRECOMMENDS_' + pkg, 'nodeprrecs', 1)
-
- # Provide virtual package without postfix
- providevirt = d.getVar('KERNEL_MODULE_PROVIDE_VIRTUAL')
- if providevirt == "1":
- postfix = format.split('%s')[1]
- d.setVar('RPROVIDES_' + pkg, pkg.replace(postfix, ''))
-
- kernel_package_name = d.getVar("KERNEL_PACKAGE_NAME") or "kernel"
- kernel_version = d.getVar("KERNEL_VERSION")
-
- module_regex = r'^(.*)\.k?o(?:\.[xg]z)?$'
-
- module_pattern_prefix = d.getVar('KERNEL_MODULE_PACKAGE_PREFIX')
- module_pattern_suffix = d.getVar('KERNEL_MODULE_PACKAGE_SUFFIX')
- module_pattern = module_pattern_prefix + kernel_package_name + '-module-%s' + module_pattern_suffix
-
- postinst = d.getVar('pkg_postinst_modules')
- postrm = d.getVar('pkg_postrm_modules')
-
- modules = do_split_packages(d, root='${nonarch_base_libdir}/modules', file_regex=module_regex, output_pattern=module_pattern, description='%s kernel module', postinst=postinst, postrm=postrm, recursive=True, hook=frob_metadata, extra_depends='%s-%s' % (kernel_package_name, kernel_version))
- if modules:
- metapkg = d.getVar('KERNEL_MODULES_META_PACKAGE')
- d.appendVar('RDEPENDS_' + metapkg, ' '+' '.join(modules))
-
- # If modules-load.d and modprobe.d are empty at this point, remove them to
- # avoid warnings. removedirs only raises an OSError if an empty
- # directory cannot be removed.
- dvar = d.getVar('PKGD')
- for dir in ["%s/etc/modprobe.d" % (dvar), "%s/etc/modules-load.d" % (dvar), "%s/etc" % (dvar)]:
- if len(os.listdir(dir)) == 0:
- os.rmdir(dir)
-}
-
-do_package[vardeps] += '${@" ".join(map(lambda s: "module_conf_" + s, (d.getVar("KERNEL_MODULE_PROBECONF") or "").split()))}'
diff --git a/meta/classes/kernel-uboot.bbclass b/meta/classes/kernel-uboot.bbclass
deleted file mode 100644
index 87f02654fa..0000000000
--- a/meta/classes/kernel-uboot.bbclass
+++ /dev/null
@@ -1,26 +0,0 @@
-uboot_prep_kimage() {
- if [ -e arch/${ARCH}/boot/compressed/vmlinux ]; then
- vmlinux_path="arch/${ARCH}/boot/compressed/vmlinux"
- linux_suffix=""
- linux_comp="none"
- elif [ -e arch/${ARCH}/boot/vmlinuz.bin ]; then
- rm -f linux.bin
- cp -l arch/${ARCH}/boot/vmlinuz.bin linux.bin
- vmlinux_path=""
- linux_suffix=""
- linux_comp="none"
- else
- vmlinux_path="vmlinux"
- linux_suffix=".gz"
- linux_comp="gzip"
- fi
-
- [ -n "${vmlinux_path}" ] && ${OBJCOPY} -O binary -R .note -R .comment -S "${vmlinux_path}" linux.bin
-
- if [ "${linux_comp}" != "none" ] ; then
- gzip -9 linux.bin
- mv -f "linux.bin${linux_suffix}" linux.bin
- fi
-
- echo "${linux_comp}"
-}
diff --git a/meta/classes/kernel-uimage.bbclass b/meta/classes/kernel-uimage.bbclass
deleted file mode 100644
index cedb4fa070..0000000000
--- a/meta/classes/kernel-uimage.bbclass
+++ /dev/null
@@ -1,35 +0,0 @@
-inherit kernel-uboot
-
-python __anonymous () {
- if "uImage" in d.getVar('KERNEL_IMAGETYPES'):
- depends = d.getVar("DEPENDS")
- depends = "%s u-boot-tools-native" % depends
- d.setVar("DEPENDS", depends)
-
- # Override KERNEL_IMAGETYPE_FOR_MAKE variable, which is internal
- # to kernel.bbclass . We override the variable here, since we need
- # to build uImage using the kernel build system if and only if
- # KEEPUIMAGE == yes. Otherwise, we pack compressed vmlinux into
- # the uImage .
- if d.getVar("KEEPUIMAGE") != 'yes':
- typeformake = d.getVar("KERNEL_IMAGETYPE_FOR_MAKE") or ""
- if "uImage" in typeformake.split():
- d.setVar('KERNEL_IMAGETYPE_FOR_MAKE', typeformake.replace('uImage', 'vmlinux'))
-
- # Enable building of uImage with mkimage
- bb.build.addtask('do_uboot_mkimage', 'do_install', 'do_kernel_link_images', d)
-}
-
-do_uboot_mkimage[dirs] += "${B}"
-do_uboot_mkimage() {
- uboot_prep_kimage
-
- ENTRYPOINT=${UBOOT_ENTRYPOINT}
- if [ -n "${UBOOT_ENTRYSYMBOL}" ]; then
- ENTRYPOINT=`${HOST_PREFIX}nm ${B}/vmlinux | \
- awk '$3=="${UBOOT_ENTRYSYMBOL}" {print "0x"$1;exit}'`
- fi
-
- uboot-mkimage -A ${UBOOT_ARCH} -O linux -T kernel -C "${linux_comp}" -a ${UBOOT_LOADADDRESS} -e $ENTRYPOINT -n "${DISTRO_NAME}/${PV}/${MACHINE}" -d linux.bin ${B}/arch/${ARCH}/boot/uImage
- rm -f linux.bin
-}
diff --git a/meta/classes/kernel-yocto.bbclass b/meta/classes/kernel-yocto.bbclass
deleted file mode 100644
index 41d8620e67..0000000000
--- a/meta/classes/kernel-yocto.bbclass
+++ /dev/null
@@ -1,512 +0,0 @@
-# remove tasks that modify the source tree in case externalsrc is inherited
-SRCTREECOVEREDTASKS += "do_validate_branches do_kernel_configcheck do_kernel_checkout do_fetch do_unpack do_patch"
-PATCH_GIT_USER_EMAIL ?= "kernel-yocto@oe"
-PATCH_GIT_USER_NAME ?= "OpenEmbedded"
-
-# The distro or local.conf should set this, but if nobody cares...
-LINUX_KERNEL_TYPE ??= "standard"
-
-# KMETA ?= ""
-KBRANCH ?= "master"
-KMACHINE ?= "${MACHINE}"
-SRCREV_FORMAT ?= "meta_machine"
-
-# LEVELS:
-# 0: no reporting
-# 1: report options that are specified, but not in the final config
-# 2: report options that are not hardware related, but set by a BSP
-KCONF_AUDIT_LEVEL ?= "1"
-KCONF_BSP_AUDIT_LEVEL ?= "0"
-KMETA_AUDIT ?= "yes"
-
-# returns local (absolute) path names for all valid patches in the
-# src_uri
-def find_patches(d,subdir):
- patches = src_patches(d)
- patch_list=[]
- for p in patches:
- _, _, local, _, _, parm = bb.fetch.decodeurl(p)
- # if patchdir has been passed, we won't be able to apply it so skip
- # the patch for now, and special processing happens later
- patchdir = ''
- if "patchdir" in parm:
- patchdir = parm["patchdir"]
- if subdir:
- if subdir == patchdir:
- patch_list.append(local)
- else:
- patch_list.append(local)
-
- return patch_list
-
-# returns all the elements from the src uri that are .scc files
-def find_sccs(d):
- sources=src_patches(d, True)
- sources_list=[]
- for s in sources:
- base, ext = os.path.splitext(os.path.basename(s))
- if ext and ext in [".scc", ".cfg"]:
- sources_list.append(s)
- elif base and 'defconfig' in base:
- sources_list.append(s)
-
- return sources_list
-
-# check the SRC_URI for "kmeta" type'd git repositories. Return the name of
-# the repository as it will be found in WORKDIR
-def find_kernel_feature_dirs(d):
- feature_dirs=[]
- fetch = bb.fetch2.Fetch([], d)
- for url in fetch.urls:
- urldata = fetch.ud[url]
- parm = urldata.parm
- type=""
- if "type" in parm:
- type = parm["type"]
- if "destsuffix" in parm:
- destdir = parm["destsuffix"]
- if type == "kmeta":
- feature_dirs.append(destdir)
-
- return feature_dirs
-
-# find the master/machine source branch. In the same way that the fetcher proceses
-# git repositories in the SRC_URI we take the first repo found, first branch.
-def get_machine_branch(d, default):
- fetch = bb.fetch2.Fetch([], d)
- for url in fetch.urls:
- urldata = fetch.ud[url]
- parm = urldata.parm
- if "branch" in parm:
- branches = urldata.parm.get("branch").split(',')
- btype = urldata.parm.get("type")
- if btype != "kmeta":
- return branches[0]
-
- return default
-
-do_kernel_metadata() {
- set +e
- cd ${S}
- export KMETA=${KMETA}
-
- # if kernel tools are available in-tree, they are preferred
- # and are placed on the path before any external tools. Unless
- # the external tools flag is set, in that case we do nothing.
- if [ -f "${S}/scripts/util/configme" ]; then
- if [ -z "${EXTERNAL_KERNEL_TOOLS}" ]; then
- PATH=${S}/scripts/util:${PATH}
- fi
- fi
-
- # In a similar manner to the kernel itself:
- #
- # defconfig: $(obj)/conf
- # ifeq ($(KBUILD_DEFCONFIG),)
- # $< --defconfig $(Kconfig)
- # else
- # @echo "*** Default configuration is based on '$(KBUILD_DEFCONFIG)'"
- # $(Q)$< --defconfig=arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG) $(Kconfig)
- # endif
- #
- # If a defconfig is specified via the KBUILD_DEFCONFIG variable, we copy it
- # from the source tree, into a common location and normalized "defconfig" name,
- # where the rest of the process will include and incoroporate it into the build
- #
- # If the fetcher has already placed a defconfig in WORKDIR (from the SRC_URI),
- # we don't overwrite it, but instead warn the user that SRC_URI defconfigs take
- # precendence.
- #
- if [ -n "${KBUILD_DEFCONFIG}" ]; then
- if [ -f "${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG}" ]; then
- if [ -f "${WORKDIR}/defconfig" ]; then
- # If the two defconfig's are different, warn that we didn't overwrite the
- # one already placed in WORKDIR by the fetcher.
- cmp "${WORKDIR}/defconfig" "${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG}"
- if [ $? -ne 0 ]; then
- bbwarn "defconfig detected in WORKDIR. ${KBUILD_DEFCONFIG} skipped"
- else
- cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${WORKDIR}/defconfig
- fi
- else
- cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${WORKDIR}/defconfig
- fi
- in_tree_defconfig="${WORKDIR}/defconfig"
- else
- bbfatal "A KBUILD_DEFCONFIG '${KBUILD_DEFCONFIG}' was specified, but not present in the source tree"
- fi
- fi
-
- # was anyone trying to patch the kernel meta data ?, we need to do
- # this here, since the scc commands migrate the .cfg fragments to the
- # kernel source tree, where they'll be used later.
- check_git_config
- patches="${@" ".join(find_patches(d,'kernel-meta'))}"
- for p in $patches; do
- (
- cd ${WORKDIR}/kernel-meta
- git am -s $p
- )
- done
-
- sccs_from_src_uri="${@" ".join(find_sccs(d))}"
- patches="${@" ".join(find_patches(d,''))}"
- feat_dirs="${@" ".join(find_kernel_feature_dirs(d))}"
-
- # a quick check to make sure we don't have duplicate defconfigs If
- # there's a defconfig in the SRC_URI, did we also have one from the
- # KBUILD_DEFCONFIG processing above ?
- src_uri_defconfig=$(echo $sccs_from_src_uri | awk '{ if ($0=="defconfig") { print $0 } }' RS=' ')
- # drop and defconfig's from the src_uri variable, we captured it just above here if it existed
- sccs_from_src_uri=$(echo $sccs_from_src_uri | awk '{ if ($0!="defconfig") { print $0 } }' RS=' ')
- if [ -n "$in_tree_defconfig" ]; then
- sccs_defconfig=$in_tree_defconfig
- if [ -n "$src_uri_defconfig" ]; then
- bbwarn "[NOTE]: defconfig was supplied both via KBUILD_DEFCONFIG and SRC_URI. Dropping SRC_URI defconfig"
- fi
- else
- # if we didn't have an in-tree one, make our defconfig the one
- # from the src_uri. Note: there may not have been one from the
- # src_uri, so this can be an empty variable.
- sccs_defconfig=$src_uri_defconfig
- fi
- sccs="$sccs_from_src_uri"
-
- # check for feature directories/repos/branches that were part of the
- # SRC_URI. If they were supplied, we convert them into include directives
- # for the update part of the process
- for f in ${feat_dirs}; do
- if [ -d "${WORKDIR}/$f/meta" ]; then
- includes="$includes -I${WORKDIR}/$f/kernel-meta"
- elif [ -d "${WORKDIR}/../oe-local-files/$f" ]; then
- includes="$includes -I${WORKDIR}/../oe-local-files/$f"
- elif [ -d "${WORKDIR}/$f" ]; then
- includes="$includes -I${WORKDIR}/$f"
- fi
- done
- for s in ${sccs} ${patches}; do
- sdir=$(dirname $s)
- includes="$includes -I${sdir}"
- # if a SRC_URI passed patch or .scc has a subdir of "kernel-meta",
- # then we add it to the search path
- if [ -d "${sdir}/kernel-meta" ]; then
- includes="$includes -I${sdir}/kernel-meta"
- fi
- done
-
- # expand kernel features into their full path equivalents
- bsp_definition=$(spp ${includes} --find -DKMACHINE=${KMACHINE} -DKTYPE=${LINUX_KERNEL_TYPE})
- if [ -z "$bsp_definition" ]; then
- if [ -z "$sccs_defconfig" ]; then
- bbfatal_log "Could not locate BSP definition for ${KMACHINE}/${LINUX_KERNEL_TYPE} and no defconfig was provided"
- fi
- else
- # if the bsp definition has "define KMETA_EXTERNAL_BSP t",
- # then we need to set a flag that will instruct the next
- # steps to use the BSP as both configuration and patches.
- grep -q KMETA_EXTERNAL_BSP $bsp_definition
- if [ $? -eq 0 ]; then
- KMETA_EXTERNAL_BSPS="t"
- fi
- fi
- meta_dir=$(kgit --meta)
-
- # run1: pull all the configuration fragments, no matter where they come from
- elements="`echo -n ${bsp_definition} ${sccs} ${patches} ${KERNEL_FEATURES}`"
- if [ -n "${elements}" ]; then
- echo "${bsp_definition}" > ${S}/${meta_dir}/bsp_definition
- scc --force -o ${S}/${meta_dir}:cfg,merge,meta ${includes} $sccs_defconfig $bsp_definition $sccs $patches ${KERNEL_FEATURES}
- if [ $? -ne 0 ]; then
- bbfatal_log "Could not generate configuration queue for ${KMACHINE}."
- fi
- fi
-
- # if KMETA_EXTERNAL_BSPS has been set, or it has been detected from
- # the bsp definition, then we inject the bsp_definition into the
- # patch phase below. we'll piggy back on the sccs variable.
- if [ -n "${KMETA_EXTERNAL_BSPS}" ]; then
- sccs="${bsp_definition} ${sccs}"
- fi
-
- # run2: only generate patches for elements that have been passed on the SRC_URI
- elements="`echo -n ${sccs} ${patches} ${KERNEL_FEATURES}`"
- if [ -n "${elements}" ]; then
- scc --force -o ${S}/${meta_dir}:patch --cmds patch ${includes} ${sccs} ${patches} ${KERNEL_FEATURES}
- if [ $? -ne 0 ]; then
- bbfatal_log "Could not generate configuration queue for ${KMACHINE}."
- fi
- fi
-}
-
-do_patch() {
- set +e
- cd ${S}
-
- check_git_config
- meta_dir=$(kgit --meta)
- (cd ${meta_dir}; ln -sf patch.queue series)
- if [ -f "${meta_dir}/series" ]; then
- kgit-s2q --gen -v --patches .kernel-meta/
- if [ $? -ne 0 ]; then
- bberror "Could not apply patches for ${KMACHINE}."
- bbfatal_log "Patch failures can be resolved in the linux source directory ${S})"
- fi
- fi
-
- if [ -f "${meta_dir}/merge.queue" ]; then
- # we need to merge all these branches
- for b in $(cat ${meta_dir}/merge.queue); do
- git show-ref --verify --quiet refs/heads/${b}
- if [ $? -eq 0 ]; then
- bbnote "Merging branch ${b}"
- git merge -q --no-ff -m "Merge branch ${b}" ${b}
- else
- bbfatal "branch ${b} does not exist, cannot merge"
- fi
- done
- fi
-}
-
-do_kernel_checkout() {
- set +e
-
- source_dir=`echo ${S} | sed 's%/$%%'`
- source_workdir="${WORKDIR}/git"
- if [ -d "${WORKDIR}/git/" ]; then
- # case: git repository
- # if S is WORKDIR/git, then we shouldn't be moving or deleting the tree.
- if [ "${source_dir}" != "${source_workdir}" ]; then
- if [ -d "${source_workdir}/.git" ]; then
- # regular git repository with .git
- rm -rf ${S}
- mv ${WORKDIR}/git ${S}
- else
- # create source for bare cloned git repository
- git clone ${WORKDIR}/git ${S}
- rm -rf ${WORKDIR}/git
- fi
- fi
- cd ${S}
- else
- # case: we have no git repository at all.
- # To support low bandwidth options for building the kernel, we'll just
- # convert the tree to a git repo and let the rest of the process work unchanged
-
- # if ${S} hasn't been set to the proper subdirectory a default of "linux" is
- # used, but we can't initialize that empty directory. So check it and throw a
- # clear error
-
- cd ${S}
- if [ ! -f "Makefile" ]; then
- bberror "S is not set to the linux source directory. Check "
- bbfatal "the recipe and set S to the proper extracted subdirectory"
- fi
- rm -f .gitignore
- git init
- check_git_config
- git add .
- git commit -q -m "baseline commit: creating repo for ${PN}-${PV}"
- git clean -d -f
- fi
-
- # convert any remote branches to local tracking ones
- for i in `git branch -a --no-color | grep remotes | grep -v HEAD`; do
- b=`echo $i | cut -d' ' -f2 | sed 's%remotes/origin/%%'`;
- git show-ref --quiet --verify -- "refs/heads/$b"
- if [ $? -ne 0 ]; then
- git branch $b $i > /dev/null
- fi
- done
-
- # Create a working tree copy of the kernel by checking out a branch
- machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
-
- # checkout and clobber any unimportant files
- git checkout -f ${machine_branch}
-}
-do_kernel_checkout[dirs] = "${S}"
-
-addtask kernel_checkout before do_kernel_metadata after do_symlink_kernsrc
-addtask kernel_metadata after do_validate_branches do_unpack before do_patch
-do_kernel_metadata[depends] = "kern-tools-native:do_populate_sysroot"
-do_validate_branches[depends] = "kern-tools-native:do_populate_sysroot"
-
-do_kernel_configme[depends] += "virtual/${TARGET_PREFIX}binutils:do_populate_sysroot"
-do_kernel_configme[depends] += "virtual/${TARGET_PREFIX}gcc:do_populate_sysroot"
-do_kernel_configme[depends] += "bc-native:do_populate_sysroot bison-native:do_populate_sysroot"
-do_kernel_configme[depends] += "kern-tools-native:do_populate_sysroot"
-do_kernel_configme[dirs] += "${S} ${B}"
-do_kernel_configme() {
- # translate the kconfig_mode into something that merge_config.sh
- # understands
- case ${KCONFIG_MODE} in
- *allnoconfig)
- config_flags="-n"
- ;;
- *alldefconfig)
- config_flags=""
- ;;
- *)
- if [ -f ${WORKDIR}/defconfig ]; then
- config_flags="-n"
- fi
- ;;
- esac
-
- cd ${S}
-
- meta_dir=$(kgit --meta)
- configs="$(scc --configs -o ${meta_dir})"
- if [ $? -ne 0 ]; then
- bberror "${configs}"
- bbfatal_log "Could not find configuration queue (${meta_dir}/config.queue)"
- fi
-
- CFLAGS="${CFLAGS} ${TOOLCHAIN_OPTIONS}" HOSTCC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}" CC="${KERNEL_CC}" LD="${KERNEL_LD}" ARCH=${ARCH} merge_config.sh -O ${B} ${config_flags} ${configs} > ${meta_dir}/cfg/merge_config_build.log 2>&1
- if [ $? -ne 0 -o ! -f ${B}/.config ]; then
- bberror "Could not generate a .config for ${KMACHINE}-${LINUX_KERNEL_TYPE}"
- if [ ${KCONF_AUDIT_LEVEL} -gt 1 ]; then
- bbfatal_log "`cat ${meta_dir}/cfg/merge_config_build.log`"
- else
- bbfatal_log "Details can be found at: ${S}/${meta_dir}/cfg/merge_config_build.log"
- fi
- fi
-
- if [ ! -z "${LINUX_VERSION_EXTENSION}" ]; then
- echo "# Global settings from linux recipe" >> ${B}/.config
- echo "CONFIG_LOCALVERSION="\"${LINUX_VERSION_EXTENSION}\" >> ${B}/.config
- fi
-}
-
-addtask kernel_configme before do_configure after do_patch
-
-python do_kernel_configcheck() {
- import re, string, sys, subprocess
-
- # if KMETA isn't set globally by a recipe using this routine, we need to
- # set the default to 'meta'. Otherwise, kconf_check is not passed a valid
- # meta-series for processing
- kmeta = d.getVar("KMETA") or "meta"
- if not os.path.exists(kmeta):
- kmeta = "." + kmeta
-
- s = d.getVar('S')
-
- env = os.environ.copy()
- env['PATH'] = "%s:%s%s" % (d.getVar('PATH'), s, "/scripts/util/")
- env['LD'] = "${KERNEL_LD}"
-
- try:
- configs = subprocess.check_output(['scc', '--configs', '-o', s + '/.kernel-meta'], env=env).decode('utf-8')
- except subprocess.CalledProcessError as e:
- bb.fatal( "Cannot gather config fragments for audit: %s" % e.output.decode("utf-8") )
-
- try:
- subprocess.check_call(['kconf_check', '--report', '-o',
- '%s/%s/cfg' % (s, kmeta), d.getVar('B') + '/.config', s, configs], cwd=s, env=env)
- except subprocess.CalledProcessError:
- # The configuration gathering can return different exit codes, but
- # we interpret them based on the KCONF_AUDIT_LEVEL variable, so we catch
- # everything here, and let the run continue.
- pass
-
- config_check_visibility = int(d.getVar("KCONF_AUDIT_LEVEL") or 0)
- bsp_check_visibility = int(d.getVar("KCONF_BSP_AUDIT_LEVEL") or 0)
-
- # if config check visibility is non-zero, report dropped configuration values
- mismatch_file = d.expand("${S}/%s/cfg/mismatch.txt" % kmeta)
- if os.path.exists(mismatch_file):
- if config_check_visibility:
- with open (mismatch_file, "r") as myfile:
- results = myfile.read()
- bb.warn( "[kernel config]: specified values did not make it into the kernel's final configuration:\n\n%s" % results)
-
- if bsp_check_visibility:
- invalid_file = d.expand("${S}/%s/cfg/invalid.cfg" % kmeta)
- if os.path.exists(invalid_file) and os.stat(invalid_file).st_size > 0:
- with open (invalid_file, "r") as myfile:
- results = myfile.read()
- bb.warn( "[kernel config]: This BSP sets config options that are not offered anywhere within this kernel:\n\n%s" % results)
- errors_file = d.expand("${S}/%s/cfg/fragment_errors.txt" % kmeta)
- if os.path.exists(errors_file) and os.stat(errors_file).st_size > 0:
- with open (errors_file, "r") as myfile:
- results = myfile.read()
- bb.warn( "[kernel config]: This BSP contains fragments with errors:\n\n%s" % results)
-
- # if the audit level is greater than two, we report if a fragment has overriden
- # a value from a base fragment. This is really only used for new kernel introduction
- if bsp_check_visibility > 2:
- redefinition_file = d.expand("${S}/%s/cfg/redefinition.txt" % kmeta)
- if os.path.exists(redefinition_file) and os.stat(redefinition_file).st_size > 0:
- with open (redefinition_file, "r") as myfile:
- results = myfile.read()
- bb.warn( "[kernel config]: This BSP has configuration options defined in more than one config, with differing values:\n\n%s" % results)
-}
-
-# Ensure that the branches (BSP and meta) are on the locations specified by
-# their SRCREV values. If they are NOT on the right commits, the branches
-# are corrected to the proper commit.
-do_validate_branches() {
- set +e
- cd ${S}
-
- machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
- machine_srcrev="${SRCREV_machine}"
-
- # if SRCREV is AUTOREV it shows up as AUTOINC there's nothing to
- # check and we can exit early
- if [ "${machine_srcrev}" = "AUTOINC" ]; then
- bbnote "SRCREV validation is not required for AUTOREV"
- elif [ "${machine_srcrev}" = "" ]; then
- if [ "${SRCREV}" != "AUTOINC" ] && [ "${SRCREV}" != "INVALID" ]; then
- # SRCREV_machine_<MACHINE> was not set. This means that a custom recipe
- # that doesn't use the SRCREV_FORMAT "machine_meta" is being built. In
- # this case, we need to reset to the give SRCREV before heading to patching
- bbnote "custom recipe is being built, forcing SRCREV to ${SRCREV}"
- force_srcrev="${SRCREV}"
- fi
- else
- git cat-file -t ${machine_srcrev} > /dev/null
- if [ $? -ne 0 ]; then
- bberror "${machine_srcrev} is not a valid commit ID."
- bbfatal_log "The kernel source tree may be out of sync"
- fi
- force_srcrev=${machine_srcrev}
- fi
-
- git checkout -q -f ${machine_branch}
- if [ -n "${force_srcrev}" ]; then
- # see if the branch we are about to patch has been properly reset to the defined
- # SRCREV .. if not, we reset it.
- branch_head=`git rev-parse HEAD`
- if [ "${force_srcrev}" != "${branch_head}" ]; then
- current_branch=`git rev-parse --abbrev-ref HEAD`
- git branch "$current_branch-orig"
- git reset --hard ${force_srcrev}
- # We've checked out HEAD, make sure we cleanup kgit-s2q fence post check
- # so the patches are applied as expected otherwise no patching
- # would be done in some corner cases.
- kgit-s2q --clean
- fi
- fi
-}
-
-OE_TERMINAL_EXPORTS += "KBUILD_OUTPUT"
-KBUILD_OUTPUT = "${B}"
-
-python () {
- # If diffconfig is available, ensure it runs after kernel_configme
- if 'do_diffconfig' in d:
- bb.build.addtask('do_diffconfig', None, 'do_kernel_configme', d)
-
- externalsrc = d.getVar('EXTERNALSRC')
- if externalsrc:
- # If we deltask do_patch, do_kernel_configme is left without
- # dependencies and runs too early
- d.setVarFlag('do_kernel_configme', 'deps', (d.getVarFlag('do_kernel_configme', 'deps', False) or []) + ['do_unpack'])
-}
-
-# extra tasks
-addtask kernel_version_sanity_check after do_kernel_metadata do_kernel_checkout before do_compile
-addtask validate_branches before do_patch after do_kernel_checkout
-addtask kernel_configcheck after do_configure before do_compile
diff --git a/meta/classes/kernel.bbclass b/meta/classes/kernel.bbclass
deleted file mode 100644
index c9044befbe..0000000000
--- a/meta/classes/kernel.bbclass
+++ /dev/null
@@ -1,740 +0,0 @@
-inherit linux-kernel-base kernel-module-split
-
-KERNEL_PACKAGE_NAME ??= "kernel"
-KERNEL_DEPLOYSUBDIR ??= "${@ "" if (d.getVar("KERNEL_PACKAGE_NAME") == "kernel") else d.getVar("KERNEL_PACKAGE_NAME") }"
-
-PROVIDES += "${@ "virtual/kernel" if (d.getVar("KERNEL_PACKAGE_NAME") == "kernel") else "" }"
-DEPENDS += "virtual/${TARGET_PREFIX}binutils virtual/${TARGET_PREFIX}gcc kmod-native bc-native lzop-native bison-native"
-DEPENDS += "${@bb.utils.contains("INITRAMFS_FSTYPES", "cpio.lz4", "lz4-native", "", d)}"
-PACKAGE_WRITE_DEPS += "depmodwrapper-cross"
-
-do_deploy[depends] += "depmodwrapper-cross:do_populate_sysroot"
-do_clean[depends] += "make-mod-scripts:do_clean"
-
-CVE_PRODUCT ?= "linux_kernel"
-
-S = "${STAGING_KERNEL_DIR}"
-B = "${WORKDIR}/build"
-KBUILD_OUTPUT = "${B}"
-OE_TERMINAL_EXPORTS += "KBUILD_OUTPUT"
-
-# we include gcc above, we dont need virtual/libc
-INHIBIT_DEFAULT_DEPS = "1"
-
-KERNEL_IMAGETYPE ?= "zImage"
-INITRAMFS_IMAGE ?= ""
-INITRAMFS_IMAGE_NAME ?= "${@['${INITRAMFS_IMAGE}-${MACHINE}', ''][d.getVar('INITRAMFS_IMAGE') == '']}"
-INITRAMFS_TASK ?= ""
-INITRAMFS_IMAGE_BUNDLE ?= ""
-
-# KERNEL_VERSION is extracted from source code. It is evaluated as
-# None for the first parsing, since the code has not been fetched.
-# After the code is fetched, it will be evaluated as real version
-# number and cause kernel to be rebuilt. To avoid this, make
-# KERNEL_VERSION_NAME and KERNEL_VERSION_PKG_NAME depend on
-# LINUX_VERSION which is a constant.
-KERNEL_VERSION_NAME = "${@d.getVar('KERNEL_VERSION') or ""}"
-KERNEL_VERSION_NAME[vardepvalue] = "${LINUX_VERSION}"
-KERNEL_VERSION_PKG_NAME = "${@legitimize_package_name(d.getVar('KERNEL_VERSION'))}"
-KERNEL_VERSION_PKG_NAME[vardepvalue] = "${LINUX_VERSION}"
-
-python __anonymous () {
- pn = d.getVar("PN")
- kpn = d.getVar("KERNEL_PACKAGE_NAME")
-
- # XXX Remove this after bug 11905 is resolved
- # FILES_${KERNEL_PACKAGE_NAME}-dev doesn't expand correctly
- if kpn == pn:
- bb.warn("Some packages (E.g. *-dev) might be missing due to "
- "bug 11905 (variable KERNEL_PACKAGE_NAME == PN)")
-
- # The default kernel recipe builds in a shared location defined by
- # bitbake/distro confs: STAGING_KERNEL_DIR and STAGING_KERNEL_BUILDDIR.
- # Set these variables to directories under ${WORKDIR} in alternate
- # kernel recipes (I.e. where KERNEL_PACKAGE_NAME != kernel) so that they
- # may build in parallel with the default kernel without clobbering.
- if kpn != "kernel":
- workdir = d.getVar("WORKDIR")
- sourceDir = os.path.join(workdir, 'kernel-source')
- artifactsDir = os.path.join(workdir, 'kernel-build-artifacts')
- d.setVar("STAGING_KERNEL_DIR", sourceDir)
- d.setVar("STAGING_KERNEL_BUILDDIR", artifactsDir)
-
- # Merge KERNEL_IMAGETYPE and KERNEL_ALT_IMAGETYPE into KERNEL_IMAGETYPES
- type = d.getVar('KERNEL_IMAGETYPE') or ""
- alttype = d.getVar('KERNEL_ALT_IMAGETYPE') or ""
- types = d.getVar('KERNEL_IMAGETYPES') or ""
- if type not in types.split():
- types = (type + ' ' + types).strip()
- if alttype not in types.split():
- types = (alttype + ' ' + types).strip()
- d.setVar('KERNEL_IMAGETYPES', types)
-
- # KERNEL_IMAGETYPES may contain a mixture of image types supported directly
- # by the kernel build system and types which are created by post-processing
- # the output of the kernel build system (e.g. compressing vmlinux ->
- # vmlinux.gz in kernel_do_compile()).
- # KERNEL_IMAGETYPE_FOR_MAKE should contain only image types supported
- # directly by the kernel build system.
- if not d.getVar('KERNEL_IMAGETYPE_FOR_MAKE'):
- typeformake = set()
- for type in types.split():
- if type == 'vmlinux.gz':
- type = 'vmlinux'
- typeformake.add(type)
-
- d.setVar('KERNEL_IMAGETYPE_FOR_MAKE', ' '.join(sorted(typeformake)))
-
- kname = d.getVar('KERNEL_PACKAGE_NAME') or "kernel"
- imagedest = d.getVar('KERNEL_IMAGEDEST')
-
- for type in types.split():
- typelower = type.lower()
- d.appendVar('PACKAGES', ' %s-image-%s' % (kname, typelower))
- d.setVar('FILES_' + kname + '-image-' + typelower, '/' + imagedest + '/' + type + '-${KERNEL_VERSION_NAME}' + ' /' + imagedest + '/' + type)
- d.appendVar('RDEPENDS_%s-image' % kname, ' %s-image-%s' % (kname, typelower))
- d.setVar('PKG_%s-image-%s' % (kname,typelower), '%s-image-%s-${KERNEL_VERSION_PKG_NAME}' % (kname, typelower))
- d.setVar('ALLOW_EMPTY_%s-image-%s' % (kname, typelower), '1')
-
- image = d.getVar('INITRAMFS_IMAGE')
- # If the INTIRAMFS_IMAGE is set but the INITRAMFS_IMAGE_BUNDLE is set to 0,
- # the do_bundle_initramfs does nothing, but the INITRAMFS_IMAGE is built
- # standalone for use by wic and other tools.
- if image:
- d.appendVarFlag('do_bundle_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete')
-
- # NOTE: setting INITRAMFS_TASK is for backward compatibility
- # The preferred method is to set INITRAMFS_IMAGE, because
- # this INITRAMFS_TASK has circular dependency problems
- # if the initramfs requires kernel modules
- image_task = d.getVar('INITRAMFS_TASK')
- if image_task:
- d.appendVarFlag('do_configure', 'depends', ' ${INITRAMFS_TASK}')
-}
-
-# Here we pull in all various kernel image types which we support.
-#
-# In case you're wondering why kernel.bbclass inherits the other image
-# types instead of the other way around, the reason for that is to
-# maintain compatibility with various currently existing meta-layers.
-# By pulling in the various kernel image types here, we retain the
-# original behavior of kernel.bbclass, so no meta-layers should get
-# broken.
-#
-# KERNEL_CLASSES by default pulls in kernel-uimage.bbclass, since this
-# used to be the default behavior when only uImage was supported. This
-# variable can be appended by users who implement support for new kernel
-# image types.
-
-KERNEL_CLASSES ?= " kernel-uimage "
-inherit ${KERNEL_CLASSES}
-
-# Old style kernels may set ${S} = ${WORKDIR}/git for example
-# We need to move these over to STAGING_KERNEL_DIR. We can't just
-# create the symlink in advance as the git fetcher can't cope with
-# the symlink.
-do_unpack[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILDDIR}"
-do_clean[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILDDIR}"
-python do_symlink_kernsrc () {
- s = d.getVar("S")
- if s[-1] == '/':
- # drop trailing slash, so that os.symlink(kernsrc, s) doesn't use s as directory name and fail
- s=s[:-1]
- kernsrc = d.getVar("STAGING_KERNEL_DIR")
- if s != kernsrc:
- bb.utils.mkdirhier(kernsrc)
- bb.utils.remove(kernsrc, recurse=True)
- if d.getVar("EXTERNALSRC"):
- # With EXTERNALSRC S will not be wiped so we can symlink to it
- os.symlink(s, kernsrc)
- else:
- import shutil
- shutil.move(s, kernsrc)
- os.symlink(kernsrc, s)
-}
-addtask symlink_kernsrc before do_configure after do_unpack
-
-inherit kernel-arch deploy
-
-PACKAGES_DYNAMIC += "^${KERNEL_PACKAGE_NAME}-module-.*"
-PACKAGES_DYNAMIC += "^${KERNEL_PACKAGE_NAME}-image-.*"
-PACKAGES_DYNAMIC += "^${KERNEL_PACKAGE_NAME}-firmware-.*"
-
-export OS = "${TARGET_OS}"
-export CROSS_COMPILE = "${TARGET_PREFIX}"
-export KBUILD_BUILD_VERSION = "1"
-export KBUILD_BUILD_USER ?= "oe-user"
-export KBUILD_BUILD_HOST ?= "oe-host"
-
-KERNEL_RELEASE ?= "${KERNEL_VERSION}"
-
-# The directory where built kernel lies in the kernel tree
-KERNEL_OUTPUT_DIR ?= "arch/${ARCH}/boot"
-KERNEL_IMAGEDEST ?= "boot"
-
-#
-# configuration
-#
-export CMDLINE_CONSOLE = "console=${@d.getVar("KERNEL_CONSOLE") or "ttyS0"}"
-
-KERNEL_VERSION = "${@get_kernelversion_headers('${B}')}"
-
-KERNEL_LOCALVERSION ?= ""
-
-# kernels are generally machine specific
-PACKAGE_ARCH = "${MACHINE_ARCH}"
-
-# U-Boot support
-UBOOT_ENTRYPOINT ?= "20008000"
-UBOOT_LOADADDRESS ?= "${UBOOT_ENTRYPOINT}"
-
-# Some Linux kernel configurations need additional parameters on the command line
-KERNEL_EXTRA_ARGS ?= ""
-
-EXTRA_OEMAKE = " HOSTCC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}""
-KERNEL_ALT_IMAGETYPE ??= ""
-
-copy_initramfs() {
- echo "Copying initramfs into ./usr ..."
- # In case the directory is not created yet from the first pass compile:
- mkdir -p ${B}/usr
- # Find and use the first initramfs image archive type we find
- rm -f ${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
- for img in cpio cpio.gz cpio.lz4 cpio.lzo cpio.lzma cpio.xz; do
- if [ -e "${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.$img" ]; then
- cp ${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.$img ${B}/usr/.
- case $img in
- *gz)
- echo "gzip decompressing image"
- gunzip -f ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
- break
- ;;
- *lz4)
- echo "lz4 decompressing image"
- lz4 -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img ${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
- break
- ;;
- *lzo)
- echo "lzo decompressing image"
- lzop -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
- break
- ;;
- *lzma)
- echo "lzma decompressing image"
- lzma -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
- break
- ;;
- *xz)
- echo "xz decompressing image"
- xz -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
- break
- ;;
- esac
- break
- fi
- done
- # Verify that the above loop found a initramfs, fail otherwise
- [ -f ${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio ] && echo "Finished copy of initramfs into ./usr" || die "Could not find any ${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.cpio{.gz|.lz4|.lzo|.lzma|.xz) for bundling; INITRAMFS_IMAGE_NAME might be wrong."
-}
-
-do_bundle_initramfs () {
- if [ ! -z "${INITRAMFS_IMAGE}" -a x"${INITRAMFS_IMAGE_BUNDLE}" = x1 ]; then
- echo "Creating a kernel image with a bundled initramfs..."
- copy_initramfs
- # Backing up kernel image relies on its type(regular file or symbolic link)
- tmp_path=""
- for imageType in ${KERNEL_IMAGETYPE_FOR_MAKE} ; do
- if [ -h ${KERNEL_OUTPUT_DIR}/$imageType ] ; then
- linkpath=`readlink -n ${KERNEL_OUTPUT_DIR}/$imageType`
- realpath=`readlink -fn ${KERNEL_OUTPUT_DIR}/$imageType`
- mv -f $realpath $realpath.bak
- tmp_path=$tmp_path" "$imageType"#"$linkpath"#"$realpath
- elif [ -f ${KERNEL_OUTPUT_DIR}/$imageType ]; then
- mv -f ${KERNEL_OUTPUT_DIR}/$imageType ${KERNEL_OUTPUT_DIR}/$imageType.bak
- tmp_path=$tmp_path" "$imageType"##"
- fi
- done
- use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
- kernel_do_compile
- # Restoring kernel image
- for tp in $tmp_path ; do
- imageType=`echo $tp|cut -d "#" -f 1`
- linkpath=`echo $tp|cut -d "#" -f 2`
- realpath=`echo $tp|cut -d "#" -f 3`
- if [ -n "$realpath" ]; then
- mv -f $realpath $realpath.initramfs
- mv -f $realpath.bak $realpath
- ln -sf $linkpath.initramfs ${B}/${KERNEL_OUTPUT_DIR}/$imageType.initramfs
- else
- mv -f ${KERNEL_OUTPUT_DIR}/$imageType ${KERNEL_OUTPUT_DIR}/$imageType.initramfs
- mv -f ${KERNEL_OUTPUT_DIR}/$imageType.bak ${KERNEL_OUTPUT_DIR}/$imageType
- fi
- done
- fi
-}
-do_bundle_initramfs[dirs] = "${B}"
-
-python do_devshell_prepend () {
- os.environ["LDFLAGS"] = ''
-}
-
-addtask bundle_initramfs after do_install before do_deploy
-
-get_cc_option () {
- # Check if KERNEL_CC supports the option "file-prefix-map".
- # This option allows us to build images with __FILE__ values that do not
- # contain the host build path.
- if ${KERNEL_CC} -Q --help=joined | grep -q "\-ffile-prefix-map=<old=new>"; then
- echo "-ffile-prefix-map=${S}=/kernel-source/"
- fi
-}
-
-kernel_do_compile() {
- unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
- if [ "${BUILD_REPRODUCIBLE_BINARIES}" = "1" ]; then
- # kernel sources do not use do_unpack, so SOURCE_DATE_EPOCH may not
- # be set....
- if [ "${SOURCE_DATE_EPOCH}" = "" -o "${SOURCE_DATE_EPOCH}" = "0" ]; then
- # The source directory is not necessarily a git repository, so we
- # specify the git-dir to ensure that git does not query a
- # repository in any parent directory.
- SOURCE_DATE_EPOCH=`git --git-dir="${S}/.git" log -1 --pretty=%ct 2>/dev/null || echo "${REPRODUCIBLE_TIMESTAMP_ROOTFS}"`
- fi
-
- ts=`LC_ALL=C date -d @$SOURCE_DATE_EPOCH`
- export KBUILD_BUILD_TIMESTAMP="$ts"
- export KCONFIG_NOTIMESTAMP=1
- bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
- fi
- # The $use_alternate_initrd is only set from
- # do_bundle_initramfs() This variable is specifically for the
- # case where we are making a second pass at the kernel
- # compilation and we want to force the kernel build to use a
- # different initramfs image. The way to do that in the kernel
- # is to specify:
- # make ...args... CONFIG_INITRAMFS_SOURCE=some_other_initramfs.cpio
- if [ "$use_alternate_initrd" = "" ] && [ "${INITRAMFS_TASK}" != "" ] ; then
- # The old style way of copying an prebuilt image and building it
- # is turned on via INTIRAMFS_TASK != ""
- copy_initramfs
- use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
- fi
- cc_extra=$(get_cc_option)
- for typeformake in ${KERNEL_IMAGETYPE_FOR_MAKE} ; do
- oe_runmake ${typeformake} CC="${KERNEL_CC} $cc_extra " LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS} $use_alternate_initrd
- done
- # vmlinux.gz is not built by kernel
- if (echo "${KERNEL_IMAGETYPES}" | grep -wq "vmlinux\.gz"); then
- mkdir -p "${KERNEL_OUTPUT_DIR}"
- gzip -9cn < ${B}/vmlinux > "${KERNEL_OUTPUT_DIR}/vmlinux.gz"
- fi
-}
-
-do_compile_kernelmodules() {
- unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
- if [ "${BUILD_REPRODUCIBLE_BINARIES}" = "1" ]; then
- # kernel sources do not use do_unpack, so SOURCE_DATE_EPOCH may not
- # be set....
- if [ "${SOURCE_DATE_EPOCH}" = "" -o "${SOURCE_DATE_EPOCH}" = "0" ]; then
- # The source directory is not necessarily a git repository, so we
- # specify the git-dir to ensure that git does not query a
- # repository in any parent directory.
- SOURCE_DATE_EPOCH=`git --git-dir="${S}/.git" log -1 --pretty=%ct 2>/dev/null || echo "${REPRODUCIBLE_TIMESTAMP_ROOTFS}"`
- fi
-
- ts=`LC_ALL=C date -d @$SOURCE_DATE_EPOCH`
- export KBUILD_BUILD_TIMESTAMP="$ts"
- export KCONFIG_NOTIMESTAMP=1
- bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
- fi
- if (grep -q -i -e '^CONFIG_MODULES=y$' ${B}/.config); then
- cc_extra=$(get_cc_option)
- oe_runmake -C ${B} ${PARALLEL_MAKE} modules CC="${KERNEL_CC} $cc_extra " LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS}
-
- # Module.symvers gets updated during the
- # building of the kernel modules. We need to
- # update this in the shared workdir since some
- # external kernel modules has a dependency on
- # other kernel modules and will look at this
- # file to do symbol lookups
- cp ${B}/Module.symvers ${STAGING_KERNEL_BUILDDIR}/
- else
- bbnote "no modules to compile"
- fi
-}
-addtask compile_kernelmodules after do_compile before do_strip
-
-kernel_do_install() {
- #
- # First install the modules
- #
- unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
- if (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
- oe_runmake DEPMOD=echo MODLIB=${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION} INSTALL_FW_PATH=${D}${nonarch_base_libdir}/firmware modules_install
- rm "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
- rm "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/source"
- # If the kernel/ directory is empty remove it to prevent QA issues
- rmdir --ignore-fail-on-non-empty "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/kernel"
- else
- bbnote "no modules to install"
- fi
-
- #
- # Install various kernel output (zImage, map file, config, module support files)
- #
- install -d ${D}/${KERNEL_IMAGEDEST}
- install -d ${D}/boot
- for imageType in ${KERNEL_IMAGETYPES} ; do
- install -m 0644 ${KERNEL_OUTPUT_DIR}/${imageType} ${D}/${KERNEL_IMAGEDEST}/${imageType}-${KERNEL_VERSION}
- if [ "${KERNEL_PACKAGE_NAME}" = "kernel" ]; then
- ln -sf ${imageType}-${KERNEL_VERSION} ${D}/${KERNEL_IMAGEDEST}/${imageType}
- fi
- done
- install -m 0644 System.map ${D}/boot/System.map-${KERNEL_VERSION}
- install -m 0644 .config ${D}/boot/config-${KERNEL_VERSION}
- install -m 0644 vmlinux ${D}/boot/vmlinux-${KERNEL_VERSION}
- [ -e Module.symvers ] && install -m 0644 Module.symvers ${D}/boot/Module.symvers-${KERNEL_VERSION}
- install -d ${D}${sysconfdir}/modules-load.d
- install -d ${D}${sysconfdir}/modprobe.d
-}
-do_install[prefuncs] += "package_get_auto_pr"
-
-# Must be ran no earlier than after do_kernel_checkout or else Makefile won't be in ${S}/Makefile
-do_kernel_version_sanity_check() {
- if [ "x${KERNEL_VERSION_SANITY_SKIP}" = "x1" ]; then
- exit 0
- fi
-
- # The Makefile determines the kernel version shown at runtime
- # Don't use KERNEL_VERSION because the headers it grabs the version from aren't generated until do_compile
- VERSION=$(grep "^VERSION =" ${S}/Makefile | sed s/.*=\ *//)
- PATCHLEVEL=$(grep "^PATCHLEVEL =" ${S}/Makefile | sed s/.*=\ *//)
- SUBLEVEL=$(grep "^SUBLEVEL =" ${S}/Makefile | sed s/.*=\ *//)
- EXTRAVERSION=$(grep "^EXTRAVERSION =" ${S}/Makefile | sed s/.*=\ *//)
-
- # Build a string for regex and a plain version string
- reg="^${VERSION}\.${PATCHLEVEL}"
- vers="${VERSION}.${PATCHLEVEL}"
- if [ -n "${SUBLEVEL}" ]; then
- # Ignoring a SUBLEVEL of zero is fine
- if [ "${SUBLEVEL}" = "0" ]; then
- reg="${reg}(\.${SUBLEVEL})?"
- else
- reg="${reg}\.${SUBLEVEL}"
- vers="${vers}.${SUBLEVEL}"
- fi
- fi
- vers="${vers}${EXTRAVERSION}"
- reg="${reg}${EXTRAVERSION}"
-
- if [ -z `echo ${PV} | grep -E "${reg}"` ]; then
- bbfatal "Package Version (${PV}) does not match of kernel being built (${vers}). Please update the PV variable to match the kernel source or set KERNEL_VERSION_SANITY_SKIP=\"1\" in your recipe."
- fi
- exit 0
-}
-
-addtask shared_workdir after do_compile before do_compile_kernelmodules
-addtask shared_workdir_setscene
-
-do_shared_workdir_setscene () {
- exit 1
-}
-
-emit_depmod_pkgdata() {
- # Stash data for depmod
- install -d ${PKGDESTWORK}/${KERNEL_PACKAGE_NAME}-depmod/
- echo "${KERNEL_VERSION}" > ${PKGDESTWORK}/${KERNEL_PACKAGE_NAME}-depmod/${KERNEL_PACKAGE_NAME}-abiversion
- cp ${B}/System.map ${PKGDESTWORK}/${KERNEL_PACKAGE_NAME}-depmod/System.map-${KERNEL_VERSION}
-}
-
-PACKAGEFUNCS += "emit_depmod_pkgdata"
-
-do_shared_workdir[cleandirs] += " ${STAGING_KERNEL_BUILDDIR}"
-do_shared_workdir () {
- cd ${B}
-
- kerneldir=${STAGING_KERNEL_BUILDDIR}
- install -d $kerneldir
-
- #
- # Store the kernel version in sysroots for module-base.bbclass
- #
-
- echo "${KERNEL_VERSION}" > $kerneldir/${KERNEL_PACKAGE_NAME}-abiversion
-
- # Copy files required for module builds
- cp System.map $kerneldir/System.map-${KERNEL_VERSION}
- [ -e Module.symvers ] && cp Module.symvers $kerneldir/
- cp .config $kerneldir/
- mkdir -p $kerneldir/include/config
- cp include/config/kernel.release $kerneldir/include/config/kernel.release
- if [ -e certs/signing_key.x509 ]; then
- # The signing_key.* files are stored in the certs/ dir in
- # newer Linux kernels
- mkdir -p $kerneldir/certs
- cp certs/signing_key.* $kerneldir/certs/
- elif [ -e signing_key.priv ]; then
- cp signing_key.* $kerneldir/
- fi
-
- # We can also copy over all the generated files and avoid special cases
- # like version.h, but we've opted to keep this small until file creep starts
- # to happen
- if [ -e include/linux/version.h ]; then
- mkdir -p $kerneldir/include/linux
- cp include/linux/version.h $kerneldir/include/linux/version.h
- fi
-
- # As of Linux kernel version 3.0.1, the clean target removes
- # arch/powerpc/lib/crtsavres.o which is present in
- # KBUILD_LDFLAGS_MODULE, making it required to build external modules.
- if [ ${ARCH} = "powerpc" ]; then
- if [ -e arch/powerpc/lib/crtsavres.o ]; then
- mkdir -p $kerneldir/arch/powerpc/lib/
- cp arch/powerpc/lib/crtsavres.o $kerneldir/arch/powerpc/lib/crtsavres.o
- fi
- fi
-
- if [ -d include/generated ]; then
- mkdir -p $kerneldir/include/generated/
- cp -fR include/generated/* $kerneldir/include/generated/
- fi
-
- if [ -d arch/${ARCH}/include/generated ]; then
- mkdir -p $kerneldir/arch/${ARCH}/include/generated/
- cp -fR arch/${ARCH}/include/generated/* $kerneldir/arch/${ARCH}/include/generated/
- fi
-
- if (grep -q -i -e '^CONFIG_UNWINDER_ORC=y$' $kerneldir/.config); then
- # With CONFIG_UNWINDER_ORC (the default in 4.14), objtool is required for
- # out-of-tree modules to be able to generate object files.
- if [ -x tools/objtool/objtool ]; then
- mkdir -p ${kerneldir}/tools/objtool
- cp tools/objtool/objtool ${kerneldir}/tools/objtool/
- fi
- fi
-}
-
-# We don't need to stage anything, not the modules/firmware since those would clash with linux-firmware
-sysroot_stage_all () {
- :
-}
-
-KERNEL_CONFIG_COMMAND ?= "oe_runmake_call -C ${S} CC="${KERNEL_CC}" LD="${KERNEL_LD}" O=${B} olddefconfig || oe_runmake -C ${S} O=${B} CC="${KERNEL_CC}" LD="${KERNEL_LD}" oldnoconfig"
-
-python check_oldest_kernel() {
- oldest_kernel = d.getVar('OLDEST_KERNEL')
- kernel_version = d.getVar('KERNEL_VERSION')
- tclibc = d.getVar('TCLIBC')
- if tclibc == 'glibc':
- kernel_version = kernel_version.split('-', 1)[0]
- if oldest_kernel and kernel_version:
- if bb.utils.vercmp_string(kernel_version, oldest_kernel) < 0:
- bb.warn('%s: OLDEST_KERNEL is "%s" but the version of the kernel you are building is "%s" - therefore %s as built may not be compatible with this kernel. Either set OLDEST_KERNEL to an older version, or build a newer kernel.' % (d.getVar('PN'), oldest_kernel, kernel_version, tclibc))
-}
-
-check_oldest_kernel[vardepsexclude] += "OLDEST_KERNEL KERNEL_VERSION"
-do_configure[prefuncs] += "check_oldest_kernel"
-
-kernel_do_configure() {
- # fixes extra + in /lib/modules/2.6.37+
- # $ scripts/setlocalversion . => +
- # $ make kernelversion => 2.6.37
- # $ make kernelrelease => 2.6.37+
- touch ${B}/.scmversion ${S}/.scmversion
-
- if [ "${S}" != "${B}" ] && [ -f "${S}/.config" ] && [ ! -f "${B}/.config" ]; then
- mv "${S}/.config" "${B}/.config"
- fi
-
- # Copy defconfig to .config if .config does not exist. This allows
- # recipes to manage the .config themselves in do_configure_prepend().
- if [ -f "${WORKDIR}/defconfig" ] && [ ! -f "${B}/.config" ]; then
- cp "${WORKDIR}/defconfig" "${B}/.config"
- fi
-
- ${KERNEL_CONFIG_COMMAND}
-}
-
-do_savedefconfig() {
- bbplain "Saving defconfig to:\n${B}/defconfig"
- oe_runmake -C ${B} savedefconfig
-}
-do_savedefconfig[nostamp] = "1"
-addtask savedefconfig after do_configure
-
-inherit cml1
-
-KCONFIG_CONFIG_COMMAND_append = " HOSTLDFLAGS='${BUILD_LDFLAGS}'"
-
-EXPORT_FUNCTIONS do_compile do_install do_configure
-
-# kernel-base becomes kernel-${KERNEL_VERSION}
-# kernel-image becomes kernel-image-${KERNEL_VERSION}
-PACKAGES = "${KERNEL_PACKAGE_NAME} ${KERNEL_PACKAGE_NAME}-base ${KERNEL_PACKAGE_NAME}-vmlinux ${KERNEL_PACKAGE_NAME}-image ${KERNEL_PACKAGE_NAME}-dev ${KERNEL_PACKAGE_NAME}-modules"
-FILES_${PN} = ""
-FILES_${KERNEL_PACKAGE_NAME}-base = "${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.order ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.builtin ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.builtin.modinfo"
-FILES_${KERNEL_PACKAGE_NAME}-image = ""
-FILES_${KERNEL_PACKAGE_NAME}-dev = "/boot/System.map* /boot/Module.symvers* /boot/config* ${KERNEL_SRC_PATH} ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
-FILES_${KERNEL_PACKAGE_NAME}-vmlinux = "/boot/vmlinux-${KERNEL_VERSION_NAME}"
-FILES_${KERNEL_PACKAGE_NAME}-modules = ""
-RDEPENDS_${KERNEL_PACKAGE_NAME} = "${KERNEL_PACKAGE_NAME}-base"
-# Allow machines to override this dependency if kernel image files are
-# not wanted in images as standard
-RDEPENDS_${KERNEL_PACKAGE_NAME}-base ?= "${KERNEL_PACKAGE_NAME}-image"
-PKG_${KERNEL_PACKAGE_NAME}-image = "${KERNEL_PACKAGE_NAME}-image-${@legitimize_package_name(d.getVar('KERNEL_VERSION'))}"
-RDEPENDS_${KERNEL_PACKAGE_NAME}-image += "${@oe.utils.conditional('KERNEL_IMAGETYPE', 'vmlinux', '${KERNEL_PACKAGE_NAME}-vmlinux', '', d)}"
-PKG_${KERNEL_PACKAGE_NAME}-base = "${KERNEL_PACKAGE_NAME}-${@legitimize_package_name(d.getVar('KERNEL_VERSION'))}"
-RPROVIDES_${KERNEL_PACKAGE_NAME}-base += "${KERNEL_PACKAGE_NAME}-${KERNEL_VERSION}"
-ALLOW_EMPTY_${KERNEL_PACKAGE_NAME} = "1"
-ALLOW_EMPTY_${KERNEL_PACKAGE_NAME}-base = "1"
-ALLOW_EMPTY_${KERNEL_PACKAGE_NAME}-image = "1"
-ALLOW_EMPTY_${KERNEL_PACKAGE_NAME}-modules = "1"
-DESCRIPTION_${KERNEL_PACKAGE_NAME}-modules = "Kernel modules meta package"
-
-pkg_postinst_${KERNEL_PACKAGE_NAME}-base () {
- if [ ! -e "$D/lib/modules/${KERNEL_VERSION}" ]; then
- mkdir -p $D/lib/modules/${KERNEL_VERSION}
- fi
- if [ -n "$D" ]; then
- depmodwrapper -a -b $D ${KERNEL_VERSION}
- else
- depmod -a ${KERNEL_VERSION}
- fi
-}
-
-PACKAGESPLITFUNCS_prepend = "split_kernel_packages "
-
-python split_kernel_packages () {
- do_split_packages(d, root='${nonarch_base_libdir}/firmware', file_regex=r'^(.*)\.(bin|fw|cis|csp|dsp)$', output_pattern='${KERNEL_PACKAGE_NAME}-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='')
-}
-
-# Many scripts want to look in arch/$arch/boot for the bootable
-# image. This poses a problem for vmlinux and vmlinuz based
-# booting. This task arranges to have vmlinux and vmlinuz appear
-# in the normalized directory location.
-do_kernel_link_images() {
- if [ ! -d "${B}/arch/${ARCH}/boot" ]; then
- mkdir ${B}/arch/${ARCH}/boot
- fi
- cd ${B}/arch/${ARCH}/boot
- ln -sf ../../../vmlinux
- if [ -f ../../../vmlinuz ]; then
- ln -sf ../../../vmlinuz
- fi
- if [ -f ../../../vmlinuz.bin ]; then
- ln -sf ../../../vmlinuz.bin
- fi
- if [ -f ../../../vmlinux.64 ]; then
- ln -sf ../../../vmlinux.64
- fi
-}
-addtask kernel_link_images after do_compile before do_strip
-
-do_strip() {
- if [ -n "${KERNEL_IMAGE_STRIP_EXTRA_SECTIONS}" ]; then
- if ! (echo "${KERNEL_IMAGETYPES}" | grep -wq "vmlinux"); then
- bbwarn "image type(s) will not be stripped (not supported): ${KERNEL_IMAGETYPES}"
- return
- fi
-
- cd ${B}
- headers=`"$CROSS_COMPILE"readelf -S ${KERNEL_OUTPUT_DIR}/vmlinux | \
- grep "^ \{1,\}\[[0-9 ]\{1,\}\] [^ ]" | \
- sed "s/^ \{1,\}\[[0-9 ]\{1,\}\] //" | \
- gawk '{print $1}'`
-
- for str in ${KERNEL_IMAGE_STRIP_EXTRA_SECTIONS}; do {
- if ! (echo "$headers" | grep -q "^$str$"); then
- bbwarn "Section not found: $str";
- fi
-
- "$CROSS_COMPILE"strip -s -R $str ${KERNEL_OUTPUT_DIR}/vmlinux
- }; done
-
- bbnote "KERNEL_IMAGE_STRIP_EXTRA_SECTIONS is set, stripping sections:" \
- "${KERNEL_IMAGE_STRIP_EXTRA_SECTIONS}"
- fi;
-}
-do_strip[dirs] = "${B}"
-
-addtask strip before do_sizecheck after do_kernel_link_images
-
-# Support checking the kernel size since some kernels need to reside in partitions
-# with a fixed length or there is a limit in transferring the kernel to memory.
-# If more than one image type is enabled, warn on any that don't fit but only fail
-# if none fit.
-do_sizecheck() {
- if [ ! -z "${KERNEL_IMAGE_MAXSIZE}" ]; then
- invalid=`echo ${KERNEL_IMAGE_MAXSIZE} | sed 's/[0-9]//g'`
- if [ -n "$invalid" ]; then
- die "Invalid KERNEL_IMAGE_MAXSIZE: ${KERNEL_IMAGE_MAXSIZE}, should be an integer (The unit is Kbytes)"
- fi
- at_least_one_fits=
- for imageType in ${KERNEL_IMAGETYPES} ; do
- size=`du -ks ${B}/${KERNEL_OUTPUT_DIR}/$imageType | awk '{print $1}'`
- if [ $size -ge ${KERNEL_IMAGE_MAXSIZE} ]; then
- bbwarn "This kernel $imageType (size=$size(K) > ${KERNEL_IMAGE_MAXSIZE}(K)) is too big for your device."
- else
- at_least_one_fits=y
- fi
- done
- if [ -z "$at_least_one_fits" ]; then
- die "All kernel images are too big for your device. Please reduce the size of the kernel by making more of it modular."
- fi
- fi
-}
-do_sizecheck[dirs] = "${B}"
-
-addtask sizecheck before do_install after do_strip
-
-inherit kernel-artifact-names
-
-kernel_do_deploy() {
- deployDir="${DEPLOYDIR}"
- if [ -n "${KERNEL_DEPLOYSUBDIR}" ]; then
- deployDir="${DEPLOYDIR}/${KERNEL_DEPLOYSUBDIR}"
- mkdir "$deployDir"
- fi
-
- for imageType in ${KERNEL_IMAGETYPES} ; do
- base_name=${imageType}-${KERNEL_IMAGE_NAME}
- install -m 0644 ${KERNEL_OUTPUT_DIR}/${imageType} $deployDir/${base_name}.bin
- symlink_name=${imageType}-${KERNEL_IMAGE_LINK_NAME}
- ln -sf ${base_name}.bin $deployDir/${symlink_name}.bin
- ln -sf ${base_name}.bin $deployDir/${imageType}
- done
-
- if [ ${MODULE_TARBALL_DEPLOY} = "1" ] && (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
- mkdir -p ${D}${root_prefix}/lib
- if [ -n "${SOURCE_DATE_EPOCH}" ]; then
- TAR_ARGS="--sort=name --clamp-mtime --mtime=@${SOURCE_DATE_EPOCH}"
- else
- TAR_ARGS=""
- fi
- TAR_ARGS="$TAR_ARGS --owner=0 --group=0"
- tar $TAR_ARGS -cv -C ${D}${root_prefix} lib | gzip -9n > $deployDir/modules-${MODULE_TARBALL_NAME}.tgz
-
- ln -sf modules-${MODULE_TARBALL_NAME}.tgz $deployDir/modules-${MODULE_TARBALL_LINK_NAME}.tgz
- fi
-
- if [ ! -z "${INITRAMFS_IMAGE}" -a x"${INITRAMFS_IMAGE_BUNDLE}" = x1 ]; then
- for imageType in ${KERNEL_IMAGETYPES} ; do
- if [ "$imageType" = "fitImage" ] ; then
- continue
- fi
- initramfs_base_name=${imageType}-${INITRAMFS_NAME}
- initramfs_symlink_name=${imageType}-${INITRAMFS_LINK_NAME}
- install -m 0644 ${KERNEL_OUTPUT_DIR}/${imageType}.initramfs $deployDir/${initramfs_base_name}.bin
- ln -sf ${initramfs_base_name}.bin $deployDir/${initramfs_symlink_name}.bin
- done
- fi
-}
-do_deploy[prefuncs] += "package_get_auto_pr"
-
-addtask deploy after do_populate_sysroot do_packagedata
-
-EXPORT_FUNCTIONS do_deploy
-
-# Add using Device Tree support
-inherit kernel-devicetree
diff --git a/meta/classes/kernelsrc.bbclass b/meta/classes/kernelsrc.bbclass
deleted file mode 100644
index a951ba3325..0000000000
--- a/meta/classes/kernelsrc.bbclass
+++ /dev/null
@@ -1,10 +0,0 @@
-S = "${STAGING_KERNEL_DIR}"
-deltask do_fetch
-deltask do_unpack
-do_patch[depends] += "virtual/kernel:do_shared_workdir"
-do_patch[noexec] = "1"
-do_package[depends] += "virtual/kernel:do_populate_sysroot"
-KERNEL_VERSION = "${@get_kernelversion_file("${STAGING_KERNEL_BUILDDIR}")}"
-
-inherit linux-kernel-base
-
diff --git a/meta/classes/lib_package.bbclass b/meta/classes/lib_package.bbclass
deleted file mode 100644
index 8849f59042..0000000000
--- a/meta/classes/lib_package.bbclass
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# ${PN}-bin is defined in bitbake.conf
-#
-# We need to allow the other packages to be greedy with what they
-# want out of /usr/bin and /usr/sbin before ${PN}-bin gets greedy.
-#
-PACKAGE_BEFORE_PN = "${PN}-bin"
diff --git a/meta/classes/libc-package.bbclass b/meta/classes/libc-package.bbclass
deleted file mode 100644
index de3b4250c7..0000000000
--- a/meta/classes/libc-package.bbclass
+++ /dev/null
@@ -1,384 +0,0 @@
-#
-# This class knows how to package up [e]glibc. Its shared since prebuild binary toolchains
-# may need packaging and its pointless to duplicate this code.
-#
-# Caller should set GLIBC_INTERNAL_USE_BINARY_LOCALE to one of:
-# "compile" - Use QEMU to generate the binary locale files
-# "precompiled" - The binary locale files are pregenerated and already present
-# "ondevice" - The device will build the locale files upon first boot through the postinst
-
-GLIBC_INTERNAL_USE_BINARY_LOCALE ?= "ondevice"
-
-GLIBC_SPLIT_LC_PACKAGES ?= "0"
-
-python __anonymous () {
- enabled = d.getVar("ENABLE_BINARY_LOCALE_GENERATION")
-
- pn = d.getVar("PN")
- if pn.endswith("-initial"):
- enabled = False
-
- if enabled and int(enabled):
- import re
-
- target_arch = d.getVar("TARGET_ARCH")
- binary_arches = d.getVar("BINARY_LOCALE_ARCHES") or ""
- use_cross_localedef = d.getVar("LOCALE_GENERATION_WITH_CROSS-LOCALEDEF") or ""
-
- for regexp in binary_arches.split(" "):
- r = re.compile(regexp)
-
- if r.match(target_arch):
- depends = d.getVar("DEPENDS")
- if use_cross_localedef == "1" :
- depends = "%s cross-localedef-native" % depends
- else:
- depends = "%s qemu-native" % depends
- d.setVar("DEPENDS", depends)
- d.setVar("GLIBC_INTERNAL_USE_BINARY_LOCALE", "compile")
- break
-}
-
-# try to fix disable charsets/locales/locale-code compile fail
-PACKAGE_NO_GCONV ?= "0"
-
-OVERRIDES_append = ":${TARGET_ARCH}-${TARGET_OS}"
-
-locale_base_postinst_ontarget() {
-localedef --inputfile=${datadir}/i18n/locales/%s --charmap=%s %s
-}
-
-locale_base_postrm() {
-#!/bin/sh
-localedef --delete-from-archive --inputfile=${datadir}/locales/%s --charmap=%s %s
-}
-
-LOCALETREESRC ?= "${PKGD}"
-
-do_prep_locale_tree() {
- treedir=${WORKDIR}/locale-tree
- rm -rf $treedir
- mkdir -p $treedir/${base_bindir} $treedir/${base_libdir} $treedir/${datadir} $treedir/${localedir}
- tar -cf - -C ${LOCALETREESRC}${datadir} -p i18n | tar -xf - -C $treedir/${datadir}
- # unzip to avoid parsing errors
- for i in $treedir/${datadir}/i18n/charmaps/*gz; do
- gunzip $i
- done
- # The extract pattern "./l*.so*" is carefully selected so that it will
- # match ld*.so and lib*.so*, but not any files in the gconv directory
- # (if it exists). This makes sure we only unpack the files we need.
- # This is important in case usrmerge is set in DISTRO_FEATURES, which
- # means ${base_libdir} == ${libdir}.
- tar -cf - -C ${LOCALETREESRC}${base_libdir} -p . | tar -xf - -C $treedir/${base_libdir} --wildcards './l*.so*'
- if [ -f ${STAGING_LIBDIR_NATIVE}/libgcc_s.* ]; then
- tar -cf - -C ${STAGING_LIBDIR_NATIVE} -p libgcc_s.* | tar -xf - -C $treedir/${base_libdir}
- fi
- install -m 0755 ${LOCALETREESRC}${bindir}/localedef $treedir/${base_bindir}
-}
-
-do_collect_bins_from_locale_tree() {
- treedir=${WORKDIR}/locale-tree
-
- parent=$(dirname ${localedir})
- mkdir -p ${PKGD}/$parent
- tar -cf - -C $treedir/$parent -p $(basename ${localedir}) | tar -xf - -C ${PKGD}$parent
-
- # Finalize tree by chaning all duplicate files into hard links
- cross-localedef-hardlink -c -v ${WORKDIR}/locale-tree
-}
-
-inherit qemu
-
-python package_do_split_gconvs () {
- import re
- if (d.getVar('PACKAGE_NO_GCONV') == '1'):
- bb.note("package requested not splitting gconvs")
- return
-
- if not d.getVar('PACKAGES'):
- return
-
- mlprefix = d.getVar("MLPREFIX") or ""
-
- bpn = d.getVar('BPN')
- libdir = d.getVar('libdir')
- if not libdir:
- bb.error("libdir not defined")
- return
- datadir = d.getVar('datadir')
- if not datadir:
- bb.error("datadir not defined")
- return
-
- gconv_libdir = oe.path.join(libdir, "gconv")
- charmap_dir = oe.path.join(datadir, "i18n", "charmaps")
- locales_dir = oe.path.join(datadir, "i18n", "locales")
- binary_locales_dir = d.getVar('localedir')
-
- def calc_gconv_deps(fn, pkg, file_regex, output_pattern, group):
- deps = []
- f = open(fn, "rb")
- c_re = re.compile(r'^copy "(.*)"')
- i_re = re.compile(r'^include "(\w+)".*')
- for l in f.readlines():
- l = l.decode("latin-1")
- m = c_re.match(l) or i_re.match(l)
- if m:
- dp = legitimize_package_name('%s%s-gconv-%s' % (mlprefix, bpn, m.group(1)))
- if not dp in deps:
- deps.append(dp)
- f.close()
- if deps != []:
- d.setVar('RDEPENDS_%s' % pkg, " ".join(deps))
- if bpn != 'glibc':
- d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
-
- do_split_packages(d, gconv_libdir, file_regex=r'^(.*)\.so$', output_pattern=bpn+'-gconv-%s', \
- description='gconv module for character set %s', hook=calc_gconv_deps, \
- extra_depends=bpn+'-gconv')
-
- def calc_charmap_deps(fn, pkg, file_regex, output_pattern, group):
- deps = []
- f = open(fn, "rb")
- c_re = re.compile(r'^copy "(.*)"')
- i_re = re.compile(r'^include "(\w+)".*')
- for l in f.readlines():
- l = l.decode("latin-1")
- m = c_re.match(l) or i_re.match(l)
- if m:
- dp = legitimize_package_name('%s%s-charmap-%s' % (mlprefix, bpn, m.group(1)))
- if not dp in deps:
- deps.append(dp)
- f.close()
- if deps != []:
- d.setVar('RDEPENDS_%s' % pkg, " ".join(deps))
- if bpn != 'glibc':
- d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
-
- do_split_packages(d, charmap_dir, file_regex=r'^(.*)\.gz$', output_pattern=bpn+'-charmap-%s', \
- description='character map for %s encoding', hook=calc_charmap_deps, extra_depends='')
-
- def calc_locale_deps(fn, pkg, file_regex, output_pattern, group):
- deps = []
- f = open(fn, "rb")
- c_re = re.compile(r'^copy "(.*)"')
- i_re = re.compile(r'^include "(\w+)".*')
- for l in f.readlines():
- l = l.decode("latin-1")
- m = c_re.match(l) or i_re.match(l)
- if m:
- dp = legitimize_package_name(mlprefix+bpn+'-localedata-%s' % m.group(1))
- if not dp in deps:
- deps.append(dp)
- f.close()
- if deps != []:
- d.setVar('RDEPENDS_%s' % pkg, " ".join(deps))
- if bpn != 'glibc':
- d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
-
- do_split_packages(d, locales_dir, file_regex=r'(.*)', output_pattern=bpn+'-localedata-%s', \
- description='locale definition for %s', hook=calc_locale_deps, extra_depends='')
- d.setVar('PACKAGES', d.getVar('PACKAGES', False) + ' ' + d.getVar('MLPREFIX', False) + bpn + '-gconv')
-
- use_bin = d.getVar("GLIBC_INTERNAL_USE_BINARY_LOCALE")
-
- dot_re = re.compile(r"(.*)\.(.*)")
-
- # Read in supported locales and associated encodings
- supported = {}
- with open(oe.path.join(d.getVar('WORKDIR'), "SUPPORTED")) as f:
- for line in f.readlines():
- try:
- locale, charset = line.rstrip().split()
- except ValueError:
- continue
- supported[locale] = charset
-
- # GLIBC_GENERATE_LOCALES var specifies which locales to be generated. empty or "all" means all locales
- to_generate = d.getVar('GLIBC_GENERATE_LOCALES')
- if not to_generate or to_generate == 'all':
- to_generate = sorted(supported.keys())
- else:
- to_generate = to_generate.split()
- for locale in to_generate:
- if locale not in supported:
- if '.' in locale:
- charset = locale.split('.')[1]
- else:
- charset = 'UTF-8'
- bb.warn("Unsupported locale '%s', assuming encoding '%s'" % (locale, charset))
- supported[locale] = charset
-
- def output_locale_source(name, pkgname, locale, encoding):
- d.setVar('RDEPENDS_%s' % pkgname, '%slocaledef %s-localedata-%s %s-charmap-%s' % \
- (mlprefix, mlprefix+bpn, legitimize_package_name(locale), mlprefix+bpn, legitimize_package_name(encoding)))
- d.setVar('pkg_postinst_ontarget_%s' % pkgname, d.getVar('locale_base_postinst_ontarget') \
- % (locale, encoding, locale))
- d.setVar('pkg_postrm_%s' % pkgname, d.getVar('locale_base_postrm') % \
- (locale, encoding, locale))
-
- def output_locale_binary_rdepends(name, pkgname, locale, encoding):
- dep = legitimize_package_name('%s-binary-localedata-%s' % (bpn, name))
- lcsplit = d.getVar('GLIBC_SPLIT_LC_PACKAGES')
- if lcsplit and int(lcsplit):
- d.appendVar('PACKAGES', ' ' + dep)
- d.setVar('ALLOW_EMPTY_%s' % dep, '1')
- d.setVar('RDEPENDS_%s' % pkgname, mlprefix + dep)
-
- commands = {}
-
- def output_locale_binary(name, pkgname, locale, encoding):
- treedir = oe.path.join(d.getVar("WORKDIR"), "locale-tree")
- ldlibdir = oe.path.join(treedir, d.getVar("base_libdir"))
- path = d.getVar("PATH")
- i18npath = oe.path.join(treedir, datadir, "i18n")
- gconvpath = oe.path.join(treedir, "iconvdata")
- outputpath = oe.path.join(treedir, binary_locales_dir)
-
- use_cross_localedef = d.getVar("LOCALE_GENERATION_WITH_CROSS-LOCALEDEF") or "0"
- if use_cross_localedef == "1":
- target_arch = d.getVar('TARGET_ARCH')
- locale_arch_options = { \
- "arc": " --uint32-align=4 --little-endian ", \
- "arceb": " --uint32-align=4 --big-endian ", \
- "arm": " --uint32-align=4 --little-endian ", \
- "armeb": " --uint32-align=4 --big-endian ", \
- "aarch64": " --uint32-align=4 --little-endian ", \
- "aarch64_be": " --uint32-align=4 --big-endian ", \
- "sh4": " --uint32-align=4 --big-endian ", \
- "powerpc": " --uint32-align=4 --big-endian ", \
- "powerpc64": " --uint32-align=4 --big-endian ", \
- "powerpc64le": " --uint32-align=4 --little-endian ", \
- "mips": " --uint32-align=4 --big-endian ", \
- "mipsisa32r6": " --uint32-align=4 --big-endian ", \
- "mips64": " --uint32-align=4 --big-endian ", \
- "mipsisa64r6": " --uint32-align=4 --big-endian ", \
- "mipsel": " --uint32-align=4 --little-endian ", \
- "mipsisa32r6el": " --uint32-align=4 --little-endian ", \
- "mips64el":" --uint32-align=4 --little-endian ", \
- "mipsisa64r6el":" --uint32-align=4 --little-endian ", \
- "riscv64": " --uint32-align=4 --little-endian ", \
- "riscv32": " --uint32-align=4 --little-endian ", \
- "i586": " --uint32-align=4 --little-endian ", \
- "i686": " --uint32-align=4 --little-endian ", \
- "x86_64": " --uint32-align=4 --little-endian " }
-
- if target_arch in locale_arch_options:
- localedef_opts = locale_arch_options[target_arch]
- else:
- bb.error("locale_arch_options not found for target_arch=" + target_arch)
- bb.fatal("unknown arch:" + target_arch + " for locale_arch_options")
-
- localedef_opts += " --force --no-hard-links --no-archive --prefix=%s \
- --inputfile=%s/%s/i18n/locales/%s --charmap=%s %s/%s" \
- % (treedir, treedir, datadir, locale, encoding, outputpath, name)
-
- cmd = "PATH=\"%s\" I18NPATH=\"%s\" GCONV_PATH=\"%s\" cross-localedef %s" % \
- (path, i18npath, gconvpath, localedef_opts)
- else: # earlier slower qemu way
- qemu = qemu_target_binary(d)
- localedef_opts = "--force --no-hard-links --no-archive --prefix=%s \
- --inputfile=%s/i18n/locales/%s --charmap=%s %s" \
- % (treedir, datadir, locale, encoding, name)
-
- qemu_options = d.getVar('QEMU_OPTIONS')
-
- cmd = "PSEUDO_RELOADED=YES PATH=\"%s\" I18NPATH=\"%s\" %s -L %s \
- -E LD_LIBRARY_PATH=%s %s %s${base_bindir}/localedef %s" % \
- (path, i18npath, qemu, treedir, ldlibdir, qemu_options, treedir, localedef_opts)
-
- commands["%s/%s" % (outputpath, name)] = cmd
-
- bb.note("generating locale %s (%s)" % (locale, encoding))
-
- def output_locale(name, locale, encoding):
- pkgname = d.getVar('MLPREFIX', False) + 'locale-base-' + legitimize_package_name(name)
- d.setVar('ALLOW_EMPTY_%s' % pkgname, '1')
- d.setVar('PACKAGES', '%s %s' % (pkgname, d.getVar('PACKAGES')))
- rprovides = ' %svirtual-locale-%s' % (mlprefix, legitimize_package_name(name))
- m = re.match(r"(.*)_(.*)", name)
- if m:
- rprovides += ' %svirtual-locale-%s' % (mlprefix, m.group(1))
- d.setVar('RPROVIDES_%s' % pkgname, rprovides)
-
- if use_bin == "compile":
- output_locale_binary_rdepends(name, pkgname, locale, encoding)
- output_locale_binary(name, pkgname, locale, encoding)
- elif use_bin == "precompiled":
- output_locale_binary_rdepends(name, pkgname, locale, encoding)
- else:
- output_locale_source(name, pkgname, locale, encoding)
-
- if use_bin == "compile":
- bb.note("preparing tree for binary locale generation")
- bb.build.exec_func("do_prep_locale_tree", d)
-
- utf8_only = int(d.getVar('LOCALE_UTF8_ONLY') or 0)
- utf8_is_default = int(d.getVar('LOCALE_UTF8_IS_DEFAULT') or 0)
-
- encodings = {}
- for locale in to_generate:
- charset = supported[locale]
- if utf8_only and charset != 'UTF-8':
- continue
-
- m = dot_re.match(locale)
- if m:
- base = m.group(1)
- else:
- base = locale
-
- # Non-precompiled locales may be renamed so that the default
- # (non-suffixed) encoding is always UTF-8, i.e., instead of en_US and
- # en_US.UTF-8, we have en_US and en_US.ISO-8859-1. This implicitly
- # contradicts SUPPORTED.
- if use_bin == "precompiled" or not utf8_is_default:
- output_locale(locale, base, charset)
- else:
- if charset == 'UTF-8':
- output_locale(base, base, charset)
- else:
- output_locale('%s.%s' % (base, charset), base, charset)
-
- def metapkg_hook(file, pkg, pattern, format, basename):
- name = basename.split('/', 1)[0]
- metapkg = legitimize_package_name('%s-binary-localedata-%s' % (mlprefix+bpn, name))
- d.appendVar('RDEPENDS_%s' % metapkg, ' ' + pkg)
-
- if use_bin == "compile":
- makefile = oe.path.join(d.getVar("WORKDIR"), "locale-tree", "Makefile")
- with open(makefile, "w") as m:
- m.write("all: %s\n\n" % " ".join(commands.keys()))
- total = len(commands)
- for i, (maketarget, makerecipe) in enumerate(commands.items()):
- m.write(maketarget + ":\n")
- m.write("\t@echo 'Progress %d/%d'\n" % (i, total))
- m.write("\t" + makerecipe + "\n\n")
- d.setVar("EXTRA_OEMAKE", "-C %s ${PARALLEL_MAKE}" % (os.path.dirname(makefile)))
- d.setVarFlag("oe_runmake", "progress", "outof:Progress\s(\d+)/(\d+)")
- bb.note("Executing binary locale generation makefile")
- bb.build.exec_func("oe_runmake", d)
- bb.note("collecting binary locales from locale tree")
- bb.build.exec_func("do_collect_bins_from_locale_tree", d)
-
- if use_bin in ('compile', 'precompiled'):
- lcsplit = d.getVar('GLIBC_SPLIT_LC_PACKAGES')
- if lcsplit and int(lcsplit):
- do_split_packages(d, binary_locales_dir, file_regex=r'^(.*/LC_\w+)', \
- output_pattern=bpn+'-binary-localedata-%s', \
- description='binary locale definition for %s', recursive=True,
- hook=metapkg_hook, extra_depends='', allow_dirs=True, match_path=True)
- else:
- do_split_packages(d, binary_locales_dir, file_regex=r'(.*)', \
- output_pattern=bpn+'-binary-localedata-%s', \
- description='binary locale definition for %s', extra_depends='', allow_dirs=True)
- else:
- bb.note("generation of binary locales disabled. this may break i18n!")
-
-}
-
-# We want to do this indirection so that we can safely 'return'
-# from the called function even though we're prepending
-python populate_packages_prepend () {
- bb.build.exec_func('package_do_split_gconvs', d)
-}
diff --git a/meta/classes/license.bbclass b/meta/classes/license.bbclass
deleted file mode 100644
index f90176d6c0..0000000000
--- a/meta/classes/license.bbclass
+++ /dev/null
@@ -1,436 +0,0 @@
-# Populates LICENSE_DIRECTORY as set in distro config with the license files as set by
-# LIC_FILES_CHKSUM.
-# TODO:
-# - There is a real issue revolving around license naming standards.
-
-LICENSE_DIRECTORY ??= "${DEPLOY_DIR}/licenses"
-LICSSTATEDIR = "${WORKDIR}/license-destdir/"
-
-# Create extra package with license texts and add it to RRECOMMENDS_${PN}
-LICENSE_CREATE_PACKAGE[type] = "boolean"
-LICENSE_CREATE_PACKAGE ??= "0"
-LICENSE_PACKAGE_SUFFIX ??= "-lic"
-LICENSE_FILES_DIRECTORY ??= "${datadir}/licenses/"
-
-addtask populate_lic after do_patch before do_build
-do_populate_lic[dirs] = "${LICSSTATEDIR}/${PN}"
-do_populate_lic[cleandirs] = "${LICSSTATEDIR}"
-
-python do_populate_lic() {
- """
- Populate LICENSE_DIRECTORY with licenses.
- """
- lic_files_paths = find_license_files(d)
-
- # The base directory we wrangle licenses to
- destdir = os.path.join(d.getVar('LICSSTATEDIR'), d.getVar('PN'))
- copy_license_files(lic_files_paths, destdir)
- info = get_recipe_info(d)
- with open(os.path.join(destdir, "recipeinfo"), "w") as f:
- for key in sorted(info.keys()):
- f.write("%s: %s\n" % (key, info[key]))
-}
-
-# it would be better to copy them in do_install_append, but find_license_filesa is python
-python perform_packagecopy_prepend () {
- enabled = oe.data.typed_value('LICENSE_CREATE_PACKAGE', d)
- if d.getVar('CLASSOVERRIDE') == 'class-target' and enabled:
- lic_files_paths = find_license_files(d)
-
- # LICENSE_FILES_DIRECTORY starts with '/' so os.path.join cannot be used to join D and LICENSE_FILES_DIRECTORY
- destdir = d.getVar('D') + os.path.join(d.getVar('LICENSE_FILES_DIRECTORY'), d.getVar('PN'))
- copy_license_files(lic_files_paths, destdir)
- add_package_and_files(d)
-}
-perform_packagecopy[vardeps] += "LICENSE_CREATE_PACKAGE"
-
-def get_recipe_info(d):
- info = {}
- info["PV"] = d.getVar("PV")
- info["PR"] = d.getVar("PR")
- info["LICENSE"] = d.getVar("LICENSE")
- return info
-
-def add_package_and_files(d):
- packages = d.getVar('PACKAGES')
- files = d.getVar('LICENSE_FILES_DIRECTORY')
- pn = d.getVar('PN')
- pn_lic = "%s%s" % (pn, d.getVar('LICENSE_PACKAGE_SUFFIX', False))
- if pn_lic in packages.split():
- bb.warn("%s package already existed in %s." % (pn_lic, pn))
- else:
- # first in PACKAGES to be sure that nothing else gets LICENSE_FILES_DIRECTORY
- d.setVar('PACKAGES', "%s %s" % (pn_lic, packages))
- d.setVar('FILES_' + pn_lic, files)
- for pn in packages.split():
- if pn == pn_lic:
- continue
- rrecommends_pn = d.getVar('RRECOMMENDS_' + pn)
- if rrecommends_pn:
- d.setVar('RRECOMMENDS_' + pn, "%s %s" % (pn_lic, rrecommends_pn))
- else:
- d.setVar('RRECOMMENDS_' + pn, "%s" % (pn_lic))
-
-def copy_license_files(lic_files_paths, destdir):
- import shutil
- import errno
-
- bb.utils.mkdirhier(destdir)
- for (basename, path, beginline, endline) in lic_files_paths:
- try:
- src = path
- dst = os.path.join(destdir, basename)
- if os.path.exists(dst):
- os.remove(dst)
- if os.path.islink(src):
- src = os.path.realpath(src)
- canlink = os.access(src, os.W_OK) and (os.stat(src).st_dev == os.stat(destdir).st_dev) and beginline is None and endline is None
- if canlink:
- try:
- os.link(src, dst)
- except OSError as err:
- if err.errno == errno.EXDEV:
- # Copy license files if hard-link is not possible even if st_dev is the
- # same on source and destination (docker container with device-mapper?)
- canlink = False
- else:
- raise
- # Only chown if we did hardling, and, we're running under pseudo
- if canlink and os.environ.get('PSEUDO_DISABLED') == '0':
- os.chown(dst,0,0)
- if not canlink:
- begin_idx = int(beginline)-1 if beginline is not None else None
- end_idx = int(endline) if endline is not None else None
- if begin_idx is None and end_idx is None:
- shutil.copyfile(src, dst)
- else:
- with open(src, 'rb') as src_f:
- with open(dst, 'wb') as dst_f:
- dst_f.write(b''.join(src_f.readlines()[begin_idx:end_idx]))
-
- except Exception as e:
- bb.warn("Could not copy license file %s to %s: %s" % (src, dst, e))
-
-def find_license_files(d):
- """
- Creates list of files used in LIC_FILES_CHKSUM and generic LICENSE files.
- """
- import shutil
- import oe.license
- from collections import defaultdict, OrderedDict
-
- # All the license files for the package
- lic_files = d.getVar('LIC_FILES_CHKSUM') or ""
- pn = d.getVar('PN')
- # The license files are located in S/LIC_FILE_CHECKSUM.
- srcdir = d.getVar('S')
- # Directory we store the generic licenses as set in the distro configuration
- generic_directory = d.getVar('COMMON_LICENSE_DIR')
- # List of basename, path tuples
- lic_files_paths = []
- # hash for keep track generic lics mappings
- non_generic_lics = {}
- # Entries from LIC_FILES_CHKSUM
- lic_chksums = {}
- license_source_dirs = []
- license_source_dirs.append(generic_directory)
- try:
- additional_lic_dirs = d.getVar('LICENSE_PATH').split()
- for lic_dir in additional_lic_dirs:
- license_source_dirs.append(lic_dir)
- except:
- pass
-
- class FindVisitor(oe.license.LicenseVisitor):
- def visit_Str(self, node):
- #
- # Until I figure out what to do with
- # the two modifiers I support (or greater = +
- # and "with exceptions" being *
- # we'll just strip out the modifier and put
- # the base license.
- find_license(node.s.replace("+", "").replace("*", ""))
- self.generic_visit(node)
-
- def find_license(license_type):
- try:
- bb.utils.mkdirhier(gen_lic_dest)
- except:
- pass
- spdx_generic = None
- license_source = None
- # If the generic does not exist we need to check to see if there is an SPDX mapping to it,
- # unless NO_GENERIC_LICENSE is set.
- for lic_dir in license_source_dirs:
- if not os.path.isfile(os.path.join(lic_dir, license_type)):
- if d.getVarFlag('SPDXLICENSEMAP', license_type) != None:
- # Great, there is an SPDXLICENSEMAP. We can copy!
- bb.debug(1, "We need to use a SPDXLICENSEMAP for %s" % (license_type))
- spdx_generic = d.getVarFlag('SPDXLICENSEMAP', license_type)
- license_source = lic_dir
- break
- elif os.path.isfile(os.path.join(lic_dir, license_type)):
- spdx_generic = license_type
- license_source = lic_dir
- break
-
- non_generic_lic = d.getVarFlag('NO_GENERIC_LICENSE', license_type)
- if spdx_generic and license_source:
- # we really should copy to generic_ + spdx_generic, however, that ends up messing the manifest
- # audit up. This should be fixed in emit_pkgdata (or, we actually got and fix all the recipes)
-
- lic_files_paths.append(("generic_" + license_type, os.path.join(license_source, spdx_generic),
- None, None))
-
- # The user may attempt to use NO_GENERIC_LICENSE for a generic license which doesn't make sense
- # and should not be allowed, warn the user in this case.
- if d.getVarFlag('NO_GENERIC_LICENSE', license_type):
- bb.warn("%s: %s is a generic license, please don't use NO_GENERIC_LICENSE for it." % (pn, license_type))
-
- elif non_generic_lic and non_generic_lic in lic_chksums:
- # if NO_GENERIC_LICENSE is set, we copy the license files from the fetched source
- # of the package rather than the license_source_dirs.
- lic_files_paths.append(("generic_" + license_type,
- os.path.join(srcdir, non_generic_lic), None, None))
- non_generic_lics[non_generic_lic] = license_type
- else:
- # Add explicity avoid of CLOSED license because this isn't generic
- if license_type != 'CLOSED':
- # And here is where we warn people that their licenses are lousy
- bb.warn("%s: No generic license file exists for: %s in any provider" % (pn, license_type))
- pass
-
- if not generic_directory:
- bb.fatal("COMMON_LICENSE_DIR is unset. Please set this in your distro config")
-
- for url in lic_files.split():
- try:
- (method, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
- if method != "file" or not path:
- raise bb.fetch.MalformedUrl()
- except bb.fetch.MalformedUrl:
- bb.fatal("%s: LIC_FILES_CHKSUM contains an invalid URL: %s" % (d.getVar('PF'), url))
- # We want the license filename and path
- chksum = parm.get('md5', None)
- beginline = parm.get('beginline')
- endline = parm.get('endline')
- lic_chksums[path] = (chksum, beginline, endline)
-
- v = FindVisitor()
- try:
- v.visit_string(d.getVar('LICENSE'))
- except oe.license.InvalidLicense as exc:
- bb.fatal('%s: %s' % (d.getVar('PF'), exc))
- except SyntaxError:
- bb.warn("%s: Failed to parse it's LICENSE field." % (d.getVar('PF')))
- # Add files from LIC_FILES_CHKSUM to list of license files
- lic_chksum_paths = defaultdict(OrderedDict)
- for path, data in sorted(lic_chksums.items()):
- lic_chksum_paths[os.path.basename(path)][data] = (os.path.join(srcdir, path), data[1], data[2])
- for basename, files in lic_chksum_paths.items():
- if len(files) == 1:
- # Don't copy again a LICENSE already handled as non-generic
- if basename in non_generic_lics:
- continue
- data = list(files.values())[0]
- lic_files_paths.append(tuple([basename] + list(data)))
- else:
- # If there are multiple different license files with identical
- # basenames we rename them to <file>.0, <file>.1, ...
- for i, data in enumerate(files.values()):
- lic_files_paths.append(tuple(["%s.%d" % (basename, i)] + list(data)))
-
- return lic_files_paths
-
-def return_spdx(d, license):
- """
- This function returns the spdx mapping of a license if it exists.
- """
- return d.getVarFlag('SPDXLICENSEMAP', license)
-
-def canonical_license(d, license):
- """
- Return the canonical (SPDX) form of the license if available (so GPLv3
- becomes GPL-3.0), for the license named 'X+', return canonical form of
- 'X' if available and the tailing '+' (so GPLv3+ becomes GPL-3.0+),
- or the passed license if there is no canonical form.
- """
- lic = d.getVarFlag('SPDXLICENSEMAP', license) or ""
- if not lic and license.endswith('+'):
- lic = d.getVarFlag('SPDXLICENSEMAP', license.rstrip('+'))
- if lic:
- lic += '+'
- return lic or license
-
-def available_licenses(d):
- """
- Return the available licenses by searching the directories specified by
- COMMON_LICENSE_DIR and LICENSE_PATH.
- """
- lic_dirs = ((d.getVar('COMMON_LICENSE_DIR') or '') + ' ' +
- (d.getVar('LICENSE_PATH') or '')).split()
-
- licenses = []
- for lic_dir in lic_dirs:
- licenses += os.listdir(lic_dir)
-
- licenses = sorted(licenses)
- return licenses
-
-# Only determine the list of all available licenses once. This assumes that any
-# additions to LICENSE_PATH have been done before this file is parsed.
-AVAILABLE_LICENSES := "${@' '.join(available_licenses(d))}"
-
-def expand_wildcard_licenses(d, wildcard_licenses):
- """
- Return actual spdx format license names if wildcards are used. We expand
- wildcards from SPDXLICENSEMAP flags and AVAILABLE_LICENSES.
- """
- import fnmatch
- licenses = wildcard_licenses[:]
- spdxmapkeys = d.getVarFlags('SPDXLICENSEMAP').keys()
- for wld_lic in wildcard_licenses:
- spdxflags = fnmatch.filter(spdxmapkeys, wld_lic)
- licenses += [d.getVarFlag('SPDXLICENSEMAP', flag) for flag in spdxflags]
-
- spdx_lics = d.getVar('AVAILABLE_LICENSES').split()
- for wld_lic in wildcard_licenses:
- licenses += fnmatch.filter(spdx_lics, wld_lic)
-
- licenses = list(set(licenses))
- return licenses
-
-def incompatible_license_contains(license, truevalue, falsevalue, d):
- license = canonical_license(d, license)
- bad_licenses = (d.getVar('INCOMPATIBLE_LICENSE') or "").split()
- bad_licenses = expand_wildcard_licenses(d, bad_licenses)
- return truevalue if license in bad_licenses else falsevalue
-
-def incompatible_pkg_license(d, dont_want_licenses, license):
- # Handles an "or" or two license sets provided by
- # flattened_licenses(), pick one that works if possible.
- def choose_lic_set(a, b):
- return a if all(oe.license.license_ok(canonical_license(d, lic),
- dont_want_licenses) for lic in a) else b
-
- try:
- licenses = oe.license.flattened_licenses(license, choose_lic_set)
- except oe.license.LicenseError as exc:
- bb.fatal('%s: %s' % (d.getVar('P'), exc))
-
- incompatible_lic = []
- for l in licenses:
- license = canonical_license(d, l)
- if not oe.license.license_ok(license, dont_want_licenses):
- incompatible_lic.append(license)
-
- return sorted(incompatible_lic)
-
-def incompatible_license(d, dont_want_licenses, package=None):
- """
- This function checks if a recipe has only incompatible licenses. It also
- take into consideration 'or' operand. dont_want_licenses should be passed
- as canonical (SPDX) names.
- """
- import oe.license
- license = d.getVar("LICENSE_%s" % package) if package else None
- if not license:
- license = d.getVar('LICENSE')
-
- return incompatible_pkg_license(d, dont_want_licenses, license)
-
-def check_license_flags(d):
- """
- This function checks if a recipe has any LICENSE_FLAGS that
- aren't whitelisted.
-
- If it does, it returns the all LICENSE_FLAGS missing from the whitelist, or
- all of the LICENSE_FLAGS if there is no whitelist.
-
- If everything is is properly whitelisted, it returns None.
- """
-
- def license_flag_matches(flag, whitelist, pn):
- """
- Return True if flag matches something in whitelist, None if not.
-
- Before we test a flag against the whitelist, we append _${PN}
- to it. We then try to match that string against the
- whitelist. This covers the normal case, where we expect
- LICENSE_FLAGS to be a simple string like 'commercial', which
- the user typically matches exactly in the whitelist by
- explicitly appending the package name e.g 'commercial_foo'.
- If we fail the match however, we then split the flag across
- '_' and append each fragment and test until we either match or
- run out of fragments.
- """
- flag_pn = ("%s_%s" % (flag, pn))
- for candidate in whitelist:
- if flag_pn == candidate:
- return True
-
- flag_cur = ""
- flagments = flag_pn.split("_")
- flagments.pop() # we've already tested the full string
- for flagment in flagments:
- if flag_cur:
- flag_cur += "_"
- flag_cur += flagment
- for candidate in whitelist:
- if flag_cur == candidate:
- return True
- return False
-
- def all_license_flags_match(license_flags, whitelist):
- """ Return all unmatched flags, None if all flags match """
- pn = d.getVar('PN')
- split_whitelist = whitelist.split()
- flags = []
- for flag in license_flags.split():
- if not license_flag_matches(flag, split_whitelist, pn):
- flags.append(flag)
- return flags if flags else None
-
- license_flags = d.getVar('LICENSE_FLAGS')
- if license_flags:
- whitelist = d.getVar('LICENSE_FLAGS_WHITELIST')
- if not whitelist:
- return license_flags.split()
- unmatched_flags = all_license_flags_match(license_flags, whitelist)
- if unmatched_flags:
- return unmatched_flags
- return None
-
-def check_license_format(d):
- """
- This function checks if LICENSE is well defined,
- Validate operators in LICENSES.
- No spaces are allowed between LICENSES.
- """
- pn = d.getVar('PN')
- licenses = d.getVar('LICENSE')
- from oe.license import license_operator, license_operator_chars, license_pattern
-
- elements = list(filter(lambda x: x.strip(), license_operator.split(licenses)))
- for pos, element in enumerate(elements):
- if license_pattern.match(element):
- if pos > 0 and license_pattern.match(elements[pos - 1]):
- bb.warn('%s: LICENSE value "%s" has an invalid format - license names ' \
- 'must be separated by the following characters to indicate ' \
- 'the license selection: %s' %
- (pn, licenses, license_operator_chars))
- elif not license_operator.match(element):
- bb.warn('%s: LICENSE value "%s" has an invalid separator "%s" that is not ' \
- 'in the valid list of separators (%s)' %
- (pn, licenses, element, license_operator_chars))
-
-SSTATETASKS += "do_populate_lic"
-do_populate_lic[sstate-inputdirs] = "${LICSSTATEDIR}"
-do_populate_lic[sstate-outputdirs] = "${LICENSE_DIRECTORY}/"
-
-IMAGE_CLASSES_append = " license_image"
-
-python do_populate_lic_setscene () {
- sstate_setscene(d)
-}
-addtask do_populate_lic_setscene
diff --git a/meta/classes/license_image.bbclass b/meta/classes/license_image.bbclass
deleted file mode 100644
index a8c72da3cb..0000000000
--- a/meta/classes/license_image.bbclass
+++ /dev/null
@@ -1,256 +0,0 @@
-python write_package_manifest() {
- # Get list of installed packages
- license_image_dir = d.expand('${LICENSE_DIRECTORY}/${IMAGE_NAME}')
- bb.utils.mkdirhier(license_image_dir)
- from oe.rootfs import image_list_installed_packages
- from oe.utils import format_pkg_list
-
- pkgs = image_list_installed_packages(d)
- output = format_pkg_list(pkgs)
- open(os.path.join(license_image_dir, 'package.manifest'),
- 'w+').write(output)
-}
-
-python license_create_manifest() {
- import oe.packagedata
- from oe.rootfs import image_list_installed_packages
-
- build_images_from_feeds = d.getVar('BUILD_IMAGES_FROM_FEEDS')
- if build_images_from_feeds == "1":
- return 0
-
- pkg_dic = {}
- for pkg in sorted(image_list_installed_packages(d)):
- pkg_info = os.path.join(d.getVar('PKGDATA_DIR'),
- 'runtime-reverse', pkg)
- pkg_name = os.path.basename(os.readlink(pkg_info))
-
- pkg_dic[pkg_name] = oe.packagedata.read_pkgdatafile(pkg_info)
- if not "LICENSE" in pkg_dic[pkg_name].keys():
- pkg_lic_name = "LICENSE_" + pkg_name
- pkg_dic[pkg_name]["LICENSE"] = pkg_dic[pkg_name][pkg_lic_name]
-
- rootfs_license_manifest = os.path.join(d.getVar('LICENSE_DIRECTORY'),
- d.getVar('IMAGE_NAME'), 'license.manifest')
- write_license_files(d, rootfs_license_manifest, pkg_dic, rootfs=True)
-}
-
-def write_license_files(d, license_manifest, pkg_dic, rootfs=True):
- import re
- import stat
-
- bad_licenses = (d.getVar("INCOMPATIBLE_LICENSE") or "").split()
- bad_licenses = [canonical_license(d, l) for l in bad_licenses]
- bad_licenses = expand_wildcard_licenses(d, bad_licenses)
-
- whitelist = []
- for lic in bad_licenses:
- whitelist.extend((d.getVar("WHITELIST_" + lic) or "").split())
-
- with open(license_manifest, "w") as license_file:
- for pkg in sorted(pkg_dic):
- if bad_licenses and pkg not in whitelist:
- try:
- licenses = incompatible_pkg_license(d, bad_licenses, pkg_dic[pkg]["LICENSE"])
- if licenses:
- bb.fatal("Package %s cannot be installed into the image because it has incompatible license(s): %s" %(pkg, ' '.join(licenses)))
- (pkg_dic[pkg]["LICENSE"], pkg_dic[pkg]["LICENSES"]) = \
- oe.license.manifest_licenses(pkg_dic[pkg]["LICENSE"],
- bad_licenses, canonical_license, d)
- except oe.license.LicenseError as exc:
- bb.fatal('%s: %s' % (d.getVar('P'), exc))
- else:
- pkg_dic[pkg]["LICENSES"] = re.sub(r'[|&()*]', ' ', pkg_dic[pkg]["LICENSE"])
- pkg_dic[pkg]["LICENSES"] = re.sub(r' *', ' ', pkg_dic[pkg]["LICENSES"])
- pkg_dic[pkg]["LICENSES"] = pkg_dic[pkg]["LICENSES"].split()
- if pkg in whitelist:
- bb.warn("Including %s with an incompatible license %s into the image, because it has been whitelisted." %(pkg, pkg_dic[pkg]["LICENSE"]))
-
- if not "IMAGE_MANIFEST" in pkg_dic[pkg]:
- # Rootfs manifest
- license_file.write("PACKAGE NAME: %s\n" % pkg)
- license_file.write("PACKAGE VERSION: %s\n" % pkg_dic[pkg]["PV"])
- license_file.write("RECIPE NAME: %s\n" % pkg_dic[pkg]["PN"])
- license_file.write("LICENSE: %s\n\n" % pkg_dic[pkg]["LICENSE"])
-
- # If the package doesn't contain any file, that is, its size is 0, the license
- # isn't relevant as far as the final image is concerned. So doing license check
- # doesn't make much sense, skip it.
- if pkg_dic[pkg]["PKGSIZE_%s" % pkg] == "0":
- continue
- else:
- # Image manifest
- license_file.write("RECIPE NAME: %s\n" % pkg_dic[pkg]["PN"])
- license_file.write("VERSION: %s\n" % pkg_dic[pkg]["PV"])
- license_file.write("LICENSE: %s\n" % pkg_dic[pkg]["LICENSE"])
- license_file.write("FILES: %s\n\n" % pkg_dic[pkg]["FILES"])
-
- for lic in pkg_dic[pkg]["LICENSES"]:
- lic_file = os.path.join(d.getVar('LICENSE_DIRECTORY'),
- pkg_dic[pkg]["PN"], "generic_%s" %
- re.sub(r'\+', '', lic))
- # add explicity avoid of CLOSED license because isn't generic
- if lic == "CLOSED":
- continue
-
- if not os.path.exists(lic_file):
- bb.warn("The license listed %s was not in the "\
- "licenses collected for recipe %s"
- % (lic, pkg_dic[pkg]["PN"]))
-
- # Two options here:
- # - Just copy the manifest
- # - Copy the manifest and the license directories
- # With both options set we see a .5 M increase in core-image-minimal
- copy_lic_manifest = d.getVar('COPY_LIC_MANIFEST')
- copy_lic_dirs = d.getVar('COPY_LIC_DIRS')
- if rootfs and copy_lic_manifest == "1":
- rootfs_license_dir = os.path.join(d.getVar('IMAGE_ROOTFS'),
- 'usr', 'share', 'common-licenses')
- bb.utils.mkdirhier(rootfs_license_dir)
- rootfs_license_manifest = os.path.join(rootfs_license_dir,
- os.path.split(license_manifest)[1])
- if not os.path.exists(rootfs_license_manifest):
- oe.path.copyhardlink(license_manifest, rootfs_license_manifest)
-
- if copy_lic_dirs == "1":
- for pkg in sorted(pkg_dic):
- pkg_rootfs_license_dir = os.path.join(rootfs_license_dir, pkg)
- bb.utils.mkdirhier(pkg_rootfs_license_dir)
- pkg_license_dir = os.path.join(d.getVar('LICENSE_DIRECTORY'),
- pkg_dic[pkg]["PN"])
-
- pkg_manifest_licenses = [canonical_license(d, lic) \
- for lic in pkg_dic[pkg]["LICENSES"]]
-
- licenses = os.listdir(pkg_license_dir)
- for lic in licenses:
- rootfs_license = os.path.join(rootfs_license_dir, lic)
- pkg_license = os.path.join(pkg_license_dir, lic)
- pkg_rootfs_license = os.path.join(pkg_rootfs_license_dir, lic)
-
- if re.match(r"^generic_.*$", lic):
- generic_lic = canonical_license(d,
- re.search(r"^generic_(.*)$", lic).group(1))
-
- # Do not copy generic license into package if isn't
- # declared into LICENSES of the package.
- if not re.sub(r'\+$', '', generic_lic) in \
- [re.sub(r'\+', '', lic) for lic in \
- pkg_manifest_licenses]:
- continue
-
- if oe.license.license_ok(generic_lic,
- bad_licenses) == False:
- continue
-
- if not os.path.exists(rootfs_license):
- oe.path.copyhardlink(pkg_license, rootfs_license)
-
- if not os.path.exists(pkg_rootfs_license):
- os.symlink(os.path.join('..', lic), pkg_rootfs_license)
- else:
- if (oe.license.license_ok(canonical_license(d,
- lic), bad_licenses) == False or
- os.path.exists(pkg_rootfs_license)):
- continue
-
- oe.path.copyhardlink(pkg_license, pkg_rootfs_license)
- # Fixup file ownership and permissions
- for walkroot, dirs, files in os.walk(rootfs_license_dir):
- for f in files:
- p = os.path.join(walkroot, f)
- os.lchown(p, 0, 0)
- if not os.path.islink(p):
- os.chmod(p, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
- for dir in dirs:
- p = os.path.join(walkroot, dir)
- os.lchown(p, 0, 0)
- os.chmod(p, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
-
-
-
-def license_deployed_manifest(d):
- """
- Write the license manifest for the deployed recipes.
- The deployed recipes usually includes the bootloader
- and extra files to boot the target.
- """
-
- dep_dic = {}
- man_dic = {}
- lic_dir = d.getVar("LICENSE_DIRECTORY")
-
- dep_dic = get_deployed_dependencies(d)
- for dep in dep_dic.keys():
- man_dic[dep] = {}
- # It is necessary to mark this will be used for image manifest
- man_dic[dep]["IMAGE_MANIFEST"] = True
- man_dic[dep]["PN"] = dep
- man_dic[dep]["FILES"] = \
- " ".join(get_deployed_files(dep_dic[dep]))
- with open(os.path.join(lic_dir, dep, "recipeinfo"), "r") as f:
- for line in f.readlines():
- key,val = line.split(": ", 1)
- man_dic[dep][key] = val[:-1]
-
- lic_manifest_dir = os.path.join(d.getVar('LICENSE_DIRECTORY'),
- d.getVar('IMAGE_NAME'))
- bb.utils.mkdirhier(lic_manifest_dir)
- image_license_manifest = os.path.join(lic_manifest_dir, 'image_license.manifest')
- write_license_files(d, image_license_manifest, man_dic, rootfs=False)
-
-def get_deployed_dependencies(d):
- """
- Get all the deployed dependencies of an image
- """
-
- deploy = {}
- # Get all the dependencies for the current task (rootfs).
- taskdata = d.getVar("BB_TASKDEPDATA", False)
- depends = list(set([dep[0] for dep
- in list(taskdata.values())
- if not dep[0].endswith("-native")]))
-
- # To verify what was deployed it checks the rootfs dependencies against
- # the SSTATE_MANIFESTS for "deploy" task.
- # The manifest file name contains the arch. Because we are not running
- # in the recipe context it is necessary to check every arch used.
- sstate_manifest_dir = d.getVar("SSTATE_MANIFESTS")
- archs = list(set(d.getVar("SSTATE_ARCHS").split()))
- for dep in depends:
- for arch in archs:
- sstate_manifest_file = os.path.join(sstate_manifest_dir,
- "manifest-%s-%s.deploy" % (arch, dep))
- if os.path.exists(sstate_manifest_file):
- deploy[dep] = sstate_manifest_file
- break
-
- return deploy
-get_deployed_dependencies[vardepsexclude] = "BB_TASKDEPDATA"
-
-def get_deployed_files(man_file):
- """
- Get the files deployed from the sstate manifest
- """
-
- dep_files = []
- excluded_files = []
- with open(man_file, "r") as manifest:
- all_files = manifest.read()
- for f in all_files.splitlines():
- if ((not (os.path.islink(f) or os.path.isdir(f))) and
- not os.path.basename(f) in excluded_files):
- dep_files.append(os.path.basename(f))
- return dep_files
-
-ROOTFS_POSTPROCESS_COMMAND_prepend = "write_package_manifest; license_create_manifest; "
-do_rootfs[recrdeptask] += "do_populate_lic"
-
-python do_populate_lic_deploy() {
- license_deployed_manifest(d)
-}
-
-addtask populate_lic_deploy before do_build after do_image_complete
-do_populate_lic_deploy[recrdeptask] += "do_populate_lic do_deploy"
-
diff --git a/meta/classes/linux-kernel-base.bbclass b/meta/classes/linux-kernel-base.bbclass
deleted file mode 100644
index ba59222c24..0000000000
--- a/meta/classes/linux-kernel-base.bbclass
+++ /dev/null
@@ -1,41 +0,0 @@
-# parse kernel ABI version out of <linux/version.h>
-def get_kernelversion_headers(p):
- import re
-
- fn = p + '/include/linux/utsrelease.h'
- if not os.path.isfile(fn):
- # after 2.6.33-rc1
- fn = p + '/include/generated/utsrelease.h'
- if not os.path.isfile(fn):
- fn = p + '/include/linux/version.h'
-
- try:
- f = open(fn, 'r')
- except IOError:
- return None
-
- l = f.readlines()
- f.close()
- r = re.compile("#define UTS_RELEASE \"(.*)\"")
- for s in l:
- m = r.match(s)
- if m:
- return m.group(1)
- return None
-
-
-def get_kernelversion_file(p):
- fn = p + '/kernel-abiversion'
-
- try:
- with open(fn, 'r') as f:
- return f.readlines()[0].strip()
- except IOError:
- return None
-
-def linux_module_packages(s, d):
- suffix = ""
- return " ".join(map(lambda s: "kernel-module-%s%s" % (s.lower().replace('_', '-').replace('@', '+'), suffix), s.split()))
-
-# that's all
-
diff --git a/meta/classes/linuxloader.bbclass b/meta/classes/linuxloader.bbclass
deleted file mode 100644
index ec0e0556dd..0000000000
--- a/meta/classes/linuxloader.bbclass
+++ /dev/null
@@ -1,70 +0,0 @@
-def get_musl_loader_arch(d):
- import re
- ldso_arch = None
-
- targetarch = d.getVar("TARGET_ARCH")
- if targetarch.startswith("microblaze"):
- ldso_arch = "microblaze${@bb.utils.contains('TUNE_FEATURES', 'bigendian', '', 'el' ,d)}"
- elif targetarch.startswith("mips"):
- ldso_arch = "mips${ABIEXTENSION}${MIPSPKGSFX_BYTE}${MIPSPKGSFX_R6}${MIPSPKGSFX_ENDIAN}${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}"
- elif targetarch == "powerpc":
- ldso_arch = "powerpc${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}"
- elif targetarch == "powerpc64":
- ldso_arch = "powerpc64"
- elif targetarch == "x86_64":
- ldso_arch = "x86_64"
- elif re.search("i.86", targetarch):
- ldso_arch = "i386"
- elif targetarch.startswith("arm"):
- ldso_arch = "arm${ARMPKGSFX_ENDIAN}${ARMPKGSFX_EABI}"
- elif targetarch.startswith("aarch64"):
- ldso_arch = "aarch64${ARMPKGSFX_ENDIAN_64}"
- elif targetarch.startswith("riscv64"):
- ldso_arch = "riscv64${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}"
- return ldso_arch
-
-def get_musl_loader(d):
- import re
- return "/lib/ld-musl-" + get_musl_loader_arch(d) + ".so.1"
-
-def get_glibc_loader(d):
- import re
-
- dynamic_loader = None
- targetarch = d.getVar("TARGET_ARCH")
- if targetarch in ["powerpc", "microblaze"]:
- dynamic_loader = "${base_libdir}/ld.so.1"
- elif targetarch in ["mipsisa32r6el", "mipsisa32r6", "mipsisa64r6el", "mipsisa64r6"]:
- dynamic_loader = "${base_libdir}/ld-linux-mipsn8.so.1"
- elif targetarch.startswith("mips"):
- dynamic_loader = "${base_libdir}/ld.so.1"
- elif targetarch == "powerpc64":
- dynamic_loader = "${base_libdir}/ld64.so.1"
- elif targetarch == "x86_64":
- dynamic_loader = "${base_libdir}/ld-linux-x86-64.so.2"
- elif re.search("i.86", targetarch):
- dynamic_loader = "${base_libdir}/ld-linux.so.2"
- elif targetarch == "arm":
- dynamic_loader = "${base_libdir}/ld-linux${@['-armhf', ''][d.getVar('TARGET_FPU') == 'soft']}.so.3"
- elif targetarch.startswith("aarch64"):
- dynamic_loader = "${base_libdir}/ld-linux-aarch64${ARMPKGSFX_ENDIAN_64}.so.1"
- elif targetarch.startswith("riscv64"):
- dynamic_loader = "${base_libdir}/ld-linux-riscv64-lp64${@['d', ''][d.getVar('TARGET_FPU') == 'soft']}.so.1"
- return dynamic_loader
-
-def get_linuxloader(d):
- overrides = d.getVar("OVERRIDES").split(":")
-
- if "libc-baremetal" in overrides:
- return None
-
- if "libc-musl" in overrides:
- dynamic_loader = get_musl_loader(d)
- else:
- dynamic_loader = get_glibc_loader(d)
- return dynamic_loader
-
-get_linuxloader[vardepvalue] = "${@get_linuxloader(d)}"
-get_musl_loader[vardepvalue] = "${@get_musl_loader(d)}"
-get_musl_loader_arch[vardepvalue] = "${@get_musl_loader_arch(d)}"
-get_glibc_loader[vardepvalue] = "${@get_glibc_loader(d)}"
diff --git a/meta/classes/live-vm-common.bbclass b/meta/classes/live-vm-common.bbclass
deleted file mode 100644
index 74e7074a53..0000000000
--- a/meta/classes/live-vm-common.bbclass
+++ /dev/null
@@ -1,94 +0,0 @@
-# Some of the vars for vm and live image are conflicted, this function
-# is used for fixing the problem.
-def set_live_vm_vars(d, suffix):
- vars = ['GRUB_CFG', 'SYSLINUX_CFG', 'ROOT', 'LABELS', 'INITRD']
- for var in vars:
- var_with_suffix = var + '_' + suffix
- if d.getVar(var):
- bb.warn('Found potential conflicted var %s, please use %s rather than %s' % \
- (var, var_with_suffix, var))
- elif d.getVar(var_with_suffix):
- d.setVar(var, d.getVar(var_with_suffix))
-
-
-EFI = "${@bb.utils.contains("MACHINE_FEATURES", "efi", "1", "0", d)}"
-EFI_PROVIDER ?= "grub-efi"
-EFI_CLASS = "${@bb.utils.contains("MACHINE_FEATURES", "efi", "${EFI_PROVIDER}", "", d)}"
-
-MKDOSFS_EXTRAOPTS ??= "-S 512"
-
-# Include legacy boot if MACHINE_FEATURES includes "pcbios" or if it does not
-# contain "efi". This way legacy is supported by default if neither is
-# specified, maintaining the original behavior.
-def pcbios(d):
- pcbios = bb.utils.contains("MACHINE_FEATURES", "pcbios", "1", "0", d)
- if pcbios == "0":
- pcbios = bb.utils.contains("MACHINE_FEATURES", "efi", "0", "1", d)
- return pcbios
-
-PCBIOS = "${@pcbios(d)}"
-PCBIOS_CLASS = "${@['','syslinux'][d.getVar('PCBIOS') == '1']}"
-
-# efi_populate_common DEST BOOTLOADER
-efi_populate_common() {
- # DEST must be the root of the image so that EFIDIR is not
- # nested under a top level directory.
- DEST=$1
-
- install -d ${DEST}${EFIDIR}
-
- install -m 0644 ${DEPLOY_DIR_IMAGE}/$2-${EFI_BOOT_IMAGE} ${DEST}${EFIDIR}/${EFI_BOOT_IMAGE}
- EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
- printf 'fs0:%s\%s\n' "$EFIPATH" "${EFI_BOOT_IMAGE}" >${DEST}/startup.nsh
-}
-
-efi_iso_populate() {
- iso_dir=$1
- efi_populate $iso_dir
- # Build a EFI directory to create efi.img
- mkdir -p ${EFIIMGDIR}/${EFIDIR}
- cp $iso_dir/${EFIDIR}/* ${EFIIMGDIR}${EFIDIR}
- cp $iso_dir/${KERNEL_IMAGETYPE} ${EFIIMGDIR}
-
- EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
- printf 'fs0:%s\%s\n' "$EFIPATH" "${EFI_BOOT_IMAGE}" >${EFIIMGDIR}/startup.nsh
-
- if [ -f "$iso_dir/initrd" ] ; then
- cp $iso_dir/initrd ${EFIIMGDIR}
- fi
-}
-
-efi_hddimg_populate() {
- efi_populate $1
-}
-
-inherit ${EFI_CLASS}
-inherit ${PCBIOS_CLASS}
-
-populate_kernel() {
- dest=$1
- install -d $dest
-
- # Install bzImage, initrd, and rootfs.img in DEST for all loaders to use.
- bbnote "Trying to install ${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} as $dest/${KERNEL_IMAGETYPE}"
- if [ -e ${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} ]; then
- install -m 0644 ${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} $dest/${KERNEL_IMAGETYPE}
- else
- bbwarn "${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} doesn't exist"
- fi
-
- # initrd is made of concatenation of multiple filesystem images
- if [ -n "${INITRD}" ]; then
- rm -f $dest/initrd
- for fs in ${INITRD}
- do
- if [ -s "$fs" ]; then
- cat $fs >> $dest/initrd
- else
- bbfatal "$fs is invalid. initrd image creation failed."
- fi
- done
- chmod 0644 $dest/initrd
- fi
-}
-
diff --git a/meta/classes/logging.bbclass b/meta/classes/logging.bbclass
deleted file mode 100644
index a0c94e98c7..0000000000
--- a/meta/classes/logging.bbclass
+++ /dev/null
@@ -1,101 +0,0 @@
-# The following logging mechanisms are to be used in bash functions of recipes.
-# They are intended to map one to one in intention and output format with the
-# python recipe logging functions of a similar naming convention: bb.plain(),
-# bb.note(), etc.
-
-LOGFIFO = "${T}/fifo.${@os.getpid()}"
-
-# Print the output exactly as it is passed in. Typically used for output of
-# tasks that should be seen on the console. Use sparingly.
-# Output: logs console
-bbplain() {
- if [ -p ${LOGFIFO} ] ; then
- printf "%b\0" "bbplain $*" > ${LOGFIFO}
- else
- echo "$*"
- fi
-}
-
-# Notify the user of a noteworthy condition.
-# Output: logs
-bbnote() {
- if [ -p ${LOGFIFO} ] ; then
- printf "%b\0" "bbnote $*" > ${LOGFIFO}
- else
- echo "NOTE: $*"
- fi
-}
-
-# Print a warning to the log. Warnings are non-fatal, and do not
-# indicate a build failure.
-# Output: logs console
-bbwarn() {
- if [ -p ${LOGFIFO} ] ; then
- printf "%b\0" "bbwarn $*" > ${LOGFIFO}
- else
- echo "WARNING: $*"
- fi
-}
-
-# Print an error to the log. Errors are non-fatal in that the build can
-# continue, but they do indicate a build failure.
-# Output: logs console
-bberror() {
- if [ -p ${LOGFIFO} ] ; then
- printf "%b\0" "bberror $*" > ${LOGFIFO}
- else
- echo "ERROR: $*"
- fi
-}
-
-# Print a fatal error to the log. Fatal errors indicate build failure
-# and halt the build, exiting with an error code.
-# Output: logs console
-bbfatal() {
- if [ -p ${LOGFIFO} ] ; then
- printf "%b\0" "bbfatal $*" > ${LOGFIFO}
- else
- echo "ERROR: $*"
- fi
- exit 1
-}
-
-# Like bbfatal, except prevents the suppression of the error log by
-# bitbake's UI.
-# Output: logs console
-bbfatal_log() {
- if [ -p ${LOGFIFO} ] ; then
- printf "%b\0" "bbfatal_log $*" > ${LOGFIFO}
- else
- echo "ERROR: $*"
- fi
- exit 1
-}
-
-# Print debug messages. These are appropriate for progress checkpoint
-# messages to the logs. Depending on the debug log level, they may also
-# go to the console.
-# Output: logs console
-# Usage: bbdebug 1 "first level debug message"
-# bbdebug 2 "second level debug message"
-bbdebug() {
- USAGE='Usage: bbdebug [123] "message"'
- if [ $# -lt 2 ]; then
- bbfatal "$USAGE"
- fi
-
- # Strip off the debug level and ensure it is an integer
- DBGLVL=$1; shift
- NONDIGITS=$(echo "$DBGLVL" | tr -d "[:digit:]")
- if [ "$NONDIGITS" ]; then
- bbfatal "$USAGE"
- fi
-
- # All debug output is printed to the logs
- if [ -p ${LOGFIFO} ] ; then
- printf "%b\0" "bbdebug $DBGLVL $*" > ${LOGFIFO}
- else
- echo "DEBUG: $*"
- fi
-}
-
diff --git a/meta/classes/manpages.bbclass b/meta/classes/manpages.bbclass
deleted file mode 100644
index 1e66780646..0000000000
--- a/meta/classes/manpages.bbclass
+++ /dev/null
@@ -1,44 +0,0 @@
-# Inherit this class to enable or disable building and installation of manpages
-# depending on whether 'api-documentation' is in DISTRO_FEATURES. Such building
-# tends to pull in the entire XML stack and other tools, so it's not enabled
-# by default.
-PACKAGECONFIG_append_class-target = " ${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'manpages', '', d)}"
-
-inherit qemu
-
-# usually manual files are packaged to ${PN}-doc except man-pages
-MAN_PKG ?= "${PN}-doc"
-
-# only add man-db to RDEPENDS when manual files are built and installed
-RDEPENDS_${MAN_PKG} += "${@bb.utils.contains('PACKAGECONFIG', 'manpages', 'man-db', '', d)}"
-
-pkg_postinst_append_${MAN_PKG} () {
- # only update manual page index caches when manual files are built and installed
- if ${@bb.utils.contains('PACKAGECONFIG', 'manpages', 'true', 'false', d)}; then
- if test -n "$D"; then
- if ${@bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'true','false', d)}; then
- sed "s:\(\s\)/:\1$D/:g" $D${sysconfdir}/man_db.conf | ${@qemu_run_binary(d, '$D', '${bindir}/mandb')} -C - -u -q $D${mandir}
- chown -R root:root $D${mandir}
- mkdir -p $D${localstatedir}/cache/man
- cd $D${mandir}
- find . -name index.db | while read index; do
- mkdir -p $D${localstatedir}/cache/man/$(dirname ${index})
- mv ${index} $D${localstatedir}/cache/man/${index}
- chown man:man $D${localstatedir}/cache/man/${index}
- done
- cd -
- else
- $INTERCEPT_DIR/postinst_intercept delay_to_first_boot ${PKG} mlprefix=${MLPREFIX}
- fi
- else
- mandb -q
- fi
- fi
-}
-
-pkg_postrm_append_${MAN_PKG} () {
- # only update manual page index caches when manual files are built and installed
- if ${@bb.utils.contains('PACKAGECONFIG', 'manpages', 'true', 'false', d)}; then
- mandb -q
- fi
-}
diff --git a/meta/classes/mcextend.bbclass b/meta/classes/mcextend.bbclass
index 0f8f962298..a489eeb3c7 100644
--- a/meta/classes/mcextend.bbclass
+++ b/meta/classes/mcextend.bbclass
@@ -1,3 +1,9 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
python mcextend_virtclass_handler () {
cls = e.data.getVar("BBEXTENDCURR")
variant = e.data.getVar("BBEXTENDVARIANT")
diff --git a/meta/classes/meson.bbclass b/meta/classes/meson.bbclass
deleted file mode 100644
index ff52d20e56..0000000000
--- a/meta/classes/meson.bbclass
+++ /dev/null
@@ -1,186 +0,0 @@
-inherit siteinfo python3native
-
-DEPENDS_append = " meson-native ninja-native"
-
-# As Meson enforces out-of-tree builds we can just use cleandirs
-B = "${WORKDIR}/build"
-do_configure[cleandirs] = "${B}"
-
-# Where the meson.build build configuration is
-MESON_SOURCEPATH = "${S}"
-
-def noprefix(var, d):
- return d.getVar(var).replace(d.getVar('prefix') + '/', '', 1)
-
-MESON_BUILDTYPE ?= "plain"
-MESONOPTS = " --prefix ${prefix} \
- --buildtype ${MESON_BUILDTYPE} \
- --bindir ${@noprefix('bindir', d)} \
- --sbindir ${@noprefix('sbindir', d)} \
- --datadir ${@noprefix('datadir', d)} \
- --libdir ${@noprefix('libdir', d)} \
- --libexecdir ${@noprefix('libexecdir', d)} \
- --includedir ${@noprefix('includedir', d)} \
- --mandir ${@noprefix('mandir', d)} \
- --infodir ${@noprefix('infodir', d)} \
- --sysconfdir ${sysconfdir} \
- --localstatedir ${localstatedir} \
- --sharedstatedir ${sharedstatedir} \
- --wrap-mode nodownload"
-
-EXTRA_OEMESON_append = " ${PACKAGECONFIG_CONFARGS}"
-
-MESON_CROSS_FILE = ""
-MESON_CROSS_FILE_class-target = "--cross-file ${WORKDIR}/meson.cross"
-MESON_CROSS_FILE_class-nativesdk = "--cross-file ${WORKDIR}/meson.cross"
-
-def meson_array(var, d):
- items = d.getVar(var).split()
- return repr(items[0] if len(items) == 1 else items)
-
-# Map our ARCH values to what Meson expects:
-# http://mesonbuild.com/Reference-tables.html#cpu-families
-def meson_cpu_family(var, d):
- import re
- arch = d.getVar(var)
- if arch == 'powerpc':
- return 'ppc'
- elif arch == 'powerpc64' or arch == 'powerpc64le':
- return 'ppc64'
- elif arch == 'armeb':
- return 'arm'
- elif arch == 'aarch64_be':
- return 'aarch64'
- elif arch == 'mipsel':
- return 'mips'
- elif arch == 'mips64el':
- return 'mips64'
- elif re.match(r"i[3-6]86", arch):
- return "x86"
- elif arch == "microblazeel":
- return "microblaze"
- else:
- return arch
-
-# Map our OS values to what Meson expects:
-# https://mesonbuild.com/Reference-tables.html#operating-system-names
-def meson_operating_system(var, d):
- os = d.getVar(var)
- if "mingw" in os:
- return "windows"
- # avoid e.g 'linux-gnueabi'
- elif "linux" in os:
- return "linux"
- else:
- return os
-
-def meson_endian(prefix, d):
- arch, os = d.getVar(prefix + "_ARCH"), d.getVar(prefix + "_OS")
- sitedata = siteinfo_data_for_machine(arch, os, d)
- if "endian-little" in sitedata:
- return "little"
- elif "endian-big" in sitedata:
- return "big"
- else:
- bb.fatal("Cannot determine endianism for %s-%s" % (arch, os))
-
-addtask write_config before do_configure
-do_write_config[vardeps] += "CC CXX LD AR NM STRIP READELF CFLAGS CXXFLAGS LDFLAGS"
-do_write_config() {
- # This needs to be Py to split the args into single-element lists
- cat >${WORKDIR}/meson.cross <<EOF
-[binaries]
-c = ${@meson_array('CC', d)}
-cpp = ${@meson_array('CXX', d)}
-ar = ${@meson_array('AR', d)}
-nm = ${@meson_array('NM', d)}
-strip = ${@meson_array('STRIP', d)}
-readelf = ${@meson_array('READELF', d)}
-pkgconfig = 'pkg-config'
-llvm-config = 'llvm-config${LLVMVERSION}'
-
-[properties]
-needs_exe_wrapper = true
-c_args = ${@meson_array('CFLAGS', d)}
-c_link_args = ${@meson_array('LDFLAGS', d)}
-cpp_args = ${@meson_array('CXXFLAGS', d)}
-cpp_link_args = ${@meson_array('LDFLAGS', d)}
-gtkdoc_exe_wrapper = '${B}/gtkdoc-qemuwrapper'
-
-[host_machine]
-system = '${@meson_operating_system('HOST_OS', d)}'
-cpu_family = '${@meson_cpu_family('HOST_ARCH', d)}'
-cpu = '${HOST_ARCH}'
-endian = '${@meson_endian('HOST', d)}'
-
-[target_machine]
-system = '${@meson_operating_system('TARGET_OS', d)}'
-cpu_family = '${@meson_cpu_family('TARGET_ARCH', d)}'
-cpu = '${TARGET_ARCH}'
-endian = '${@meson_endian('TARGET', d)}'
-EOF
-}
-
-CONFIGURE_FILES = "meson.build"
-
-meson_do_configure() {
- # Meson requires this to be 'bfd, 'lld' or 'gold' from 0.53 onwards
- # https://github.com/mesonbuild/meson/commit/ef9aeb188ea2bc7353e59916c18901cde90fa2b3
- unset LD
-
- # Work around "Meson fails if /tmp is mounted with noexec #2972"
- mkdir -p "${B}/meson-private/tmp"
- export TMPDIR="${B}/meson-private/tmp"
- bbnote Executing meson ${EXTRA_OEMESON}...
- if ! meson ${MESONOPTS} "${MESON_SOURCEPATH}" "${B}" ${MESON_CROSS_FILE} ${EXTRA_OEMESON}; then
- bbfatal_log meson failed
- fi
-}
-
-override_native_tools() {
- # Set these so that meson uses the native tools for its build sanity tests,
- # which require executables to be runnable. The cross file will still
- # override these for the target build.
- export CC="${BUILD_CC}"
- export CXX="${BUILD_CXX}"
- export LD="${BUILD_LD}"
- export AR="${BUILD_AR}"
- export STRIP="${BUILD_STRIP}"
- # These contain *target* flags but will be used as *native* flags. The
- # correct native flags will be passed via -Dc_args and so on, unset them so
- # they don't interfere with tools invoked by Meson (such as g-ir-scanner)
- unset CPPFLAGS CFLAGS CXXFLAGS LDFLAGS
-}
-
-meson_do_configure_prepend_class-target() {
- override_native_tools
-}
-
-meson_do_configure_prepend_class-nativesdk() {
- override_native_tools
-}
-
-meson_do_configure_prepend_class-native() {
- export PKG_CONFIG="pkg-config-native"
-}
-
-python meson_do_qa_configure() {
- import re
- warn_re = re.compile(r"^WARNING: Cross property (.+) is using default value (.+)$", re.MULTILINE)
- with open(d.expand("${B}/meson-logs/meson-log.txt")) as logfile:
- log = logfile.read()
- for (prop, value) in warn_re.findall(log):
- bb.warn("Meson cross property %s used without explicit assignment, defaulting to %s" % (prop, value))
-}
-do_configure[postfuncs] += "meson_do_qa_configure"
-
-do_compile[progress] = "outof:^\[(\d+)/(\d+)\]\s+"
-meson_do_compile() {
- ninja -v ${PARALLEL_MAKE}
-}
-
-meson_do_install() {
- DESTDIR='${D}' ninja -v ${PARALLEL_MAKEINST} install
-}
-
-EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes/meta.bbclass b/meta/classes/meta.bbclass
deleted file mode 100644
index 5e6890238b..0000000000
--- a/meta/classes/meta.bbclass
+++ /dev/null
@@ -1,4 +0,0 @@
-
-PACKAGES = ""
-
-do_build[recrdeptask] = "do_build"
diff --git a/meta/classes/metadata_scm.bbclass b/meta/classes/metadata_scm.bbclass
index 58bb4c555a..6842119b6b 100644
--- a/meta/classes/metadata_scm.bbclass
+++ b/meta/classes/metadata_scm.bbclass
@@ -1,42 +1,10 @@
-METADATA_BRANCH ?= "${@base_detect_branch(d)}"
-METADATA_REVISION ?= "${@base_detect_revision(d)}"
-
-def base_detect_revision(d):
- path = base_get_scmbasepath(d)
- return base_get_metadata_git_revision(path, d)
-
-def base_detect_branch(d):
- path = base_get_scmbasepath(d)
- return base_get_metadata_git_branch(path, d)
-
-def base_get_scmbasepath(d):
- return os.path.join(d.getVar('COREBASE'), 'meta')
-
-def base_get_metadata_svn_revision(path, d):
- # This only works with older subversion. For newer versions
- # this function will need to be fixed by someone interested
- revision = "<unknown>"
- try:
- with open("%s/.svn/entries" % path) as f:
- revision = f.readlines()[3].strip()
- except (IOError, IndexError):
- pass
- return revision
-
-def base_get_metadata_git_branch(path, d):
- import bb.process
-
- try:
- rev, _ = bb.process.run('git rev-parse --abbrev-ref HEAD', cwd=path)
- except bb.process.ExecutionError:
- rev = '<unknown>'
- return rev.strip()
-
-def base_get_metadata_git_revision(path, d):
- import bb.process
-
- try:
- rev, _ = bb.process.run('git rev-parse HEAD', cwd=path)
- except bb.process.ExecutionError:
- rev = '<unknown>'
- return rev.strip()
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+METADATA_BRANCH := "${@oe.buildcfg.detect_branch(d)}"
+METADATA_BRANCH[vardepvalue] = "${METADATA_BRANCH}"
+METADATA_REVISION := "${@oe.buildcfg.detect_revision(d)}"
+METADATA_REVISION[vardepvalue] = "${METADATA_REVISION}"
diff --git a/meta/classes/migrate_localcount.bbclass b/meta/classes/migrate_localcount.bbclass
index 810a541316..1d00c110e2 100644
--- a/meta/classes/migrate_localcount.bbclass
+++ b/meta/classes/migrate_localcount.bbclass
@@ -1,3 +1,9 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
PRSERV_DUMPDIR ??= "${LOG_DIR}/db"
LOCALCOUNT_DUMPFILE ??= "${PRSERV_DUMPDIR}/prserv-localcount-exports.inc"
diff --git a/meta/classes/mime-xdg.bbclass b/meta/classes/mime-xdg.bbclass
deleted file mode 100644
index 642a5b7595..0000000000
--- a/meta/classes/mime-xdg.bbclass
+++ /dev/null
@@ -1,74 +0,0 @@
-#
-# This class creates mime <-> application associations based on entry
-# 'MimeType' in *.desktop files
-#
-
-DEPENDS += "desktop-file-utils"
-PACKAGE_WRITE_DEPS += "desktop-file-utils-native"
-DESKTOPDIR = "${datadir}/applications"
-
-# There are recipes out there installing their .desktop files as absolute
-# symlinks. For us these are dangling and cannot be introspected for "MimeType"
-# easily. By addding package-names to MIME_XDG_PACKAGES, packager can force
-# proper update-desktop-database handling. Note that all introspection is
-# skipped for MIME_XDG_PACKAGES not empty
-MIME_XDG_PACKAGES ?= ""
-
-mime_xdg_postinst() {
-if [ "x$D" != "x" ]; then
- $INTERCEPT_DIR/postinst_intercept update_desktop_database ${PKG} \
- mlprefix=${MLPREFIX} \
- desktop_dir=${DESKTOPDIR}
-else
- update-desktop-database $D${DESKTOPDIR}
-fi
-}
-
-mime_xdg_postrm() {
-if [ "x$D" != "x" ]; then
- $INTERCEPT_DIR/postinst_intercept update_desktop_database ${PKG} \
- mlprefix=${MLPREFIX} \
- desktop_dir=${DESKTOPDIR}
-else
- update-desktop-database $D${DESKTOPDIR}
-fi
-}
-
-python populate_packages_append () {
- packages = d.getVar('PACKAGES').split()
- pkgdest = d.getVar('PKGDEST')
- desktop_base = d.getVar('DESKTOPDIR')
- forced_mime_xdg_pkgs = (d.getVar('MIME_XDG_PACKAGES') or '').split()
-
- for pkg in packages:
- desktops_with_mime_found = pkg in forced_mime_xdg_pkgs
- if d.getVar('MIME_XDG_PACKAGES') == '':
- desktop_dir = '%s/%s%s' % (pkgdest, pkg, desktop_base)
- if os.path.exists(desktop_dir):
- for df in os.listdir(desktop_dir):
- if df.endswith('.desktop'):
- try:
- with open(desktop_dir + '/'+ df, 'r') as f:
- for line in f.read().split('\n'):
- if 'MimeType' in line:
- desktops_with_mime_found = True
- break;
- except:
- bb.warn('Could not open %s. Set MIME_XDG_PACKAGES in recipe or add mime-xdg to INSANE_SKIP.' % desktop_dir + '/'+ df)
- if desktops_with_mime_found:
- break
- if desktops_with_mime_found:
- bb.note("adding mime-xdg postinst and postrm scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg)
- if not postinst:
- postinst = '#!/bin/sh\n'
- postinst += d.getVar('mime_xdg_postinst')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
- postrm = d.getVar('pkg_postrm_%s' % pkg)
- if not postrm:
- postrm = '#!/bin/sh\n'
- postrm += d.getVar('mime_xdg_postrm')
- d.setVar('pkg_postrm_%s' % pkg, postrm)
- bb.note("adding desktop-file-utils dependency to %s" % pkg)
- d.appendVar('RDEPENDS_' + pkg, " " + d.getVar('MLPREFIX')+"desktop-file-utils")
-}
diff --git a/meta/classes/mime.bbclass b/meta/classes/mime.bbclass
deleted file mode 100644
index bb99bc35cb..0000000000
--- a/meta/classes/mime.bbclass
+++ /dev/null
@@ -1,70 +0,0 @@
-#
-# This class is used by recipes installing mime types
-#
-
-DEPENDS += "${@bb.utils.contains('BPN', 'shared-mime-info', '', 'shared-mime-info', d)}"
-PACKAGE_WRITE_DEPS += "shared-mime-info-native"
-MIMEDIR = "${datadir}/mime"
-
-mime_postinst() {
-if [ "x$D" != "x" ]; then
- $INTERCEPT_DIR/postinst_intercept update_mime_database ${PKG} \
- mlprefix=${MLPREFIX} \
- mimedir=${MIMEDIR}
-else
- echo "Updating MIME database... this may take a while."
- update-mime-database $D${MIMEDIR}
-fi
-}
-
-mime_postrm() {
-if [ "x$D" != "x" ]; then
- $INTERCEPT_DIR/postinst_intercept update_mime_database ${PKG} \
- mlprefix=${MLPREFIX} \
- mimedir=${MIMEDIR}
-else
- echo "Updating MIME database... this may take a while."
- # $D${MIMEDIR}/packages belong to package shared-mime-info-data,
- # packages like libfm-mime depend on shared-mime-info-data.
- # after shared-mime-info-data uninstalled, $D${MIMEDIR}/packages
- # is removed, but update-mime-database need this dir to update
- # database, workaround to create one and remove it later
- if [ ! -d $D${MIMEDIR}/packages ]; then
- mkdir -p $D${MIMEDIR}/packages
- update-mime-database $D${MIMEDIR}
- rmdir --ignore-fail-on-non-empty $D${MIMEDIR}/packages
- else
- update-mime-database $D${MIMEDIR}
-fi
-fi
-}
-
-python populate_packages_append () {
- packages = d.getVar('PACKAGES').split()
- pkgdest = d.getVar('PKGDEST')
- mimedir = d.getVar('MIMEDIR')
-
- for pkg in packages:
- mime_packages_dir = '%s/%s%s/packages' % (pkgdest, pkg, mimedir)
- mimes_types_found = False
- if os.path.exists(mime_packages_dir):
- for f in os.listdir(mime_packages_dir):
- if f.endswith('.xml'):
- mimes_types_found = True
- break
- if mimes_types_found:
- bb.note("adding mime postinst and postrm scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg)
- if not postinst:
- postinst = '#!/bin/sh\n'
- postinst += d.getVar('mime_postinst')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
- postrm = d.getVar('pkg_postrm_%s' % pkg)
- if not postrm:
- postrm = '#!/bin/sh\n'
- postrm += d.getVar('mime_postrm')
- d.setVar('pkg_postrm_%s' % pkg, postrm)
- if pkg != 'shared-mime-info-data':
- bb.note("adding shared-mime-info-data dependency to %s" % pkg)
- d.appendVar('RDEPENDS_' + pkg, " " + d.getVar('MLPREFIX')+"shared-mime-info-data")
-}
diff --git a/meta/classes/mirrors.bbclass b/meta/classes/mirrors.bbclass
deleted file mode 100644
index 87bba41472..0000000000
--- a/meta/classes/mirrors.bbclass
+++ /dev/null
@@ -1,76 +0,0 @@
-MIRRORS += "\
-${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian/20180310T215105Z/pool \n \
-${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20120328T092752Z/debian/pool \n \
-${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20110127T084257Z/debian/pool \n \
-${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20090802T004153Z/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.de.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.au.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.cl.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.hr.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.fi.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.hk.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.hu.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.ie.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.it.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.jp.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.no.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.pl.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.ro.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.si.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.es.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.se.debian.org/debian/pool \n \
-${DEBIAN_MIRROR} http://ftp.tr.debian.org/debian/pool \n \
-${GNU_MIRROR} https://mirrors.kernel.org/gnu \n \
-${KERNELORG_MIRROR} http://www.kernel.org/pub \n \
-${GNUPG_MIRROR} ftp://ftp.gnupg.org/gcrypt \n \
-${GNUPG_MIRROR} ftp://ftp.franken.de/pub/crypt/mirror/ftp.gnupg.org/gcrypt \n \
-${GNUPG_MIRROR} ftp://mirrors.dotsrc.org/gcrypt \n \
-ftp://dante.ctan.org/tex-archive ftp://ftp.fu-berlin.de/tex/CTAN \n \
-ftp://dante.ctan.org/tex-archive http://sunsite.sut.ac.jp/pub/archives/ctan/ \n \
-ftp://dante.ctan.org/tex-archive http://ctan.unsw.edu.au/ \n \
-ftp://ftp.gnutls.org/gcrypt/gnutls ${GNUPG_MIRROR}/gnutls \n \
-http://ftp.info-zip.org/pub/infozip/src/ http://mirror.switch.ch/ftp/mirror/infozip/src/ \n \
-http://ftp.info-zip.org/pub/infozip/src/ ftp://sunsite.icm.edu.pl/pub/unix/archiving/info-zip/src/ \n \
-http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/ http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/OLD/ \n \
-${APACHE_MIRROR} http://www.us.apache.org/dist \n \
-${APACHE_MIRROR} http://archive.apache.org/dist \n \
-http://downloads.sourceforge.net/watchdog/ http://fossies.org/linux/misc/ \n \
-${SAVANNAH_GNU_MIRROR} http://download-mirror.savannah.gnu.org/releases \n \
-${SAVANNAH_NONGNU_MIRROR} http://download-mirror.savannah.nongnu.org/releases \n \
-ftp://sourceware.org/pub http://mirrors.kernel.org/sourceware \n \
-ftp://sourceware.org/pub http://gd.tuwien.ac.at/gnu/sourceware \n \
-ftp://sourceware.org/pub http://ftp.gwdg.de/pub/linux/sources.redhat.com/sourceware \n \
-cvs://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-svn://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-git://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-hg://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-bzr://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-p4://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-osc://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-https?$://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-ftp://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-npm://.*/?.* http://downloads.yoctoproject.org/mirror/sources/ \n \
-cvs://.*/.* http://sources.openembedded.org/ \n \
-svn://.*/.* http://sources.openembedded.org/ \n \
-git://.*/.* http://sources.openembedded.org/ \n \
-hg://.*/.* http://sources.openembedded.org/ \n \
-bzr://.*/.* http://sources.openembedded.org/ \n \
-p4://.*/.* http://sources.openembedded.org/ \n \
-osc://.*/.* http://sources.openembedded.org/ \n \
-https?$://.*/.* http://sources.openembedded.org/ \n \
-ftp://.*/.* http://sources.openembedded.org/ \n \
-npm://.*/?.* http://sources.openembedded.org/ \n \
-${CPAN_MIRROR} http://cpan.metacpan.org/ \n \
-${CPAN_MIRROR} http://search.cpan.org/CPAN/ \n \
-"
-
-# Use MIRRORS to provide git repo fallbacks using the https protocol, for cases
-# where git native protocol fetches may fail due to local firewall rules, etc.
-
-MIRRORS += "\
-git://salsa.debian.org/.* git://salsa.debian.org/PATH;protocol=https \n \
-git://git.gnome.org/.* git://gitlab.gnome.org/GNOME/PATH;protocol=https \n \
-git://git.savannah.gnu.org/.* git://git.savannah.gnu.org/git/PATH;protocol=https \n \
-git://git.yoctoproject.org/.* git://git.yoctoproject.org/git/PATH;protocol=https \n \
-git://.*/.* git://HOST/PATH;protocol=https \n \
-"
diff --git a/meta/classes/module-base.bbclass b/meta/classes/module-base.bbclass
deleted file mode 100644
index 27bd69ff33..0000000000
--- a/meta/classes/module-base.bbclass
+++ /dev/null
@@ -1,21 +0,0 @@
-inherit kernel-arch
-
-# We do the dependency this way because the output is not preserved
-# in sstate, so we must force do_compile to run (once).
-do_configure[depends] += "make-mod-scripts:do_compile"
-
-export OS = "${TARGET_OS}"
-export CROSS_COMPILE = "${TARGET_PREFIX}"
-
-# This points to the build artefacts from the main kernel build
-# such as .config and System.map
-# Confusingly it is not the module build output (which is ${B}) but
-# we didn't pick the name.
-export KBUILD_OUTPUT = "${STAGING_KERNEL_BUILDDIR}"
-
-export KERNEL_VERSION = "${@oe.utils.read_file('${STAGING_KERNEL_BUILDDIR}/kernel-abiversion')}"
-KERNEL_OBJECT_SUFFIX = ".ko"
-
-# kernel modules are generally machine specific
-PACKAGE_ARCH = "${MACHINE_ARCH}"
-
diff --git a/meta/classes/module.bbclass b/meta/classes/module.bbclass
deleted file mode 100644
index c0dfa35061..0000000000
--- a/meta/classes/module.bbclass
+++ /dev/null
@@ -1,74 +0,0 @@
-inherit module-base kernel-module-split pkgconfig
-
-EXTRA_OEMAKE += "KERNEL_SRC=${STAGING_KERNEL_DIR}"
-
-MODULES_INSTALL_TARGET ?= "modules_install"
-MODULES_MODULE_SYMVERS_LOCATION ?= ""
-
-python __anonymous () {
- depends = d.getVar('DEPENDS')
- extra_symbols = []
- for dep in depends.split():
- if dep.startswith("kernel-module-"):
- extra_symbols.append("${STAGING_INCDIR}/" + dep + "/Module.symvers")
- d.setVar('KBUILD_EXTRA_SYMBOLS', " ".join(extra_symbols))
-}
-
-python do_devshell_prepend () {
- os.environ['CFLAGS'] = ''
- os.environ['CPPFLAGS'] = ''
- os.environ['CXXFLAGS'] = ''
- os.environ['LDFLAGS'] = ''
-
- os.environ['KERNEL_PATH'] = d.getVar('STAGING_KERNEL_DIR')
- os.environ['KERNEL_SRC'] = d.getVar('STAGING_KERNEL_DIR')
- os.environ['KERNEL_VERSION'] = d.getVar('KERNEL_VERSION')
- os.environ['CC'] = d.getVar('KERNEL_CC')
- os.environ['LD'] = d.getVar('KERNEL_LD')
- os.environ['AR'] = d.getVar('KERNEL_AR')
- os.environ['O'] = d.getVar('STAGING_KERNEL_BUILDDIR')
- kbuild_extra_symbols = d.getVar('KBUILD_EXTRA_SYMBOLS')
- if kbuild_extra_symbols:
- os.environ['KBUILD_EXTRA_SYMBOLS'] = kbuild_extra_symbols
- else:
- os.environ['KBUILD_EXTRA_SYMBOLS'] = ''
-}
-
-module_do_compile() {
- unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
- oe_runmake KERNEL_PATH=${STAGING_KERNEL_DIR} \
- KERNEL_VERSION=${KERNEL_VERSION} \
- CC="${KERNEL_CC}" LD="${KERNEL_LD}" \
- AR="${KERNEL_AR}" \
- O=${STAGING_KERNEL_BUILDDIR} \
- KBUILD_EXTRA_SYMBOLS="${KBUILD_EXTRA_SYMBOLS}" \
- ${MAKE_TARGETS}
-}
-
-module_do_install() {
- unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
- oe_runmake DEPMOD=echo MODLIB="${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}" \
- INSTALL_FW_PATH="${D}${nonarch_base_libdir}/firmware" \
- CC="${KERNEL_CC}" LD="${KERNEL_LD}" \
- O=${STAGING_KERNEL_BUILDDIR} \
- ${MODULES_INSTALL_TARGET}
-
- if [ ! -e "${B}/${MODULES_MODULE_SYMVERS_LOCATION}/Module.symvers" ] ; then
- bbwarn "Module.symvers not found in ${B}/${MODULES_MODULE_SYMVERS_LOCATION}"
- bbwarn "Please consider setting MODULES_MODULE_SYMVERS_LOCATION to a"
- bbwarn "directory below B to get correct inter-module dependencies"
- else
- install -Dm0644 "${B}/${MODULES_MODULE_SYMVERS_LOCATION}"/Module.symvers ${D}${includedir}/${BPN}/Module.symvers
- # Module.symvers contains absolute path to the build directory.
- # While it doesn't actually seem to matter which path is specified,
- # clear them out to avoid confusion
- sed -e 's:${B}/::g' -i ${D}${includedir}/${BPN}/Module.symvers
- fi
-}
-
-EXPORT_FUNCTIONS do_compile do_install
-
-# add all splitted modules to PN RDEPENDS, PN can be empty now
-KERNEL_MODULES_META_PACKAGE = "${PN}"
-FILES_${PN} = ""
-ALLOW_EMPTY_${PN} = "1"
diff --git a/meta/classes/multilib.bbclass b/meta/classes/multilib.bbclass
index 9f726e4537..b6c09969b1 100644
--- a/meta/classes/multilib.bbclass
+++ b/meta/classes/multilib.bbclass
@@ -1,3 +1,9 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
python multilib_virtclass_handler () {
cls = e.data.getVar("BBEXTENDCURR")
variant = e.data.getVar("BBEXTENDVARIANT")
@@ -24,6 +30,9 @@ python multilib_virtclass_handler () {
if val:
e.data.setVar(name + "_MULTILIB_ORIGINAL", val)
+ # We nearly don't need this but dependencies on NON_MULTILIB_RECIPES don't work without it
+ d.setVar("SSTATE_ARCHS_TUNEPKG", "${@all_multilib_tune_values(d, 'TUNE_PKGARCH')}")
+
overrides = e.data.getVar("OVERRIDES", False)
pn = e.data.getVar("PN", False)
overrides = overrides.replace("pn-${PN}", "pn-${PN}:pn-" + pn)
@@ -35,7 +44,7 @@ python multilib_virtclass_handler () {
e.data.setVar('SDKTARGETSYSROOT', e.data.getVar('SDKTARGETSYSROOT'))
override = ":virtclass-multilib-" + variant
e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
- target_vendor = e.data.getVar("TARGET_VENDOR_" + "virtclass-multilib-" + variant, False)
+ target_vendor = e.data.getVar("TARGET_VENDOR:" + "virtclass-multilib-" + variant, False)
if target_vendor:
e.data.setVar("TARGET_VENDOR", target_vendor)
return
@@ -45,6 +54,7 @@ python multilib_virtclass_handler () {
e.data.setVar("RECIPE_SYSROOT", "${WORKDIR}/recipe-sysroot")
e.data.setVar("STAGING_DIR_TARGET", "${WORKDIR}/recipe-sysroot")
e.data.setVar("STAGING_DIR_HOST", "${WORKDIR}/recipe-sysroot")
+ e.data.setVar("RECIPE_SYSROOT_MANIFEST_SUBDIR", "nativesdk-" + variant)
e.data.setVar("MLPREFIX", variant + "-")
override = ":virtclass-multilib-" + variant
e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
@@ -65,24 +75,25 @@ python multilib_virtclass_handler () {
override = ":virtclass-multilib-" + variant
- blacklist = e.data.getVarFlag('PNBLACKLIST', e.data.getVar('PN'))
- if blacklist:
+ skip_msg = e.data.getVarFlag('SKIP_RECIPE', e.data.getVar('PN'))
+ if skip_msg:
pn_new = variant + "-" + e.data.getVar('PN')
- if not e.data.getVarFlag('PNBLACKLIST', pn_new):
- e.data.setVarFlag('PNBLACKLIST', pn_new, blacklist)
+ if not e.data.getVarFlag('SKIP_RECIPE', pn_new):
+ e.data.setVarFlag('SKIP_RECIPE', pn_new, skip_msg)
e.data.setVar("MLPREFIX", variant + "-")
e.data.setVar("PN", variant + "-" + e.data.getVar("PN", False))
e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
- # Expand WHITELIST_GPL-3.0 with multilib prefix
- pkgs = e.data.getVar("WHITELIST_GPL-3.0")
- for pkg in pkgs.split():
- pkgs += " " + variant + "-" + pkg
- e.data.setVar("WHITELIST_GPL-3.0", pkgs)
+ # Expand INCOMPATIBLE_LICENSE_EXCEPTIONS with multilib prefix
+ pkgs = e.data.getVar("INCOMPATIBLE_LICENSE_EXCEPTIONS")
+ if pkgs:
+ for pkg in pkgs.split():
+ pkgs += " " + variant + "-" + pkg
+ e.data.setVar("INCOMPATIBLE_LICENSE_EXCEPTIONS", pkgs)
# DEFAULTTUNE can change TARGET_ARCH override so expand this now before update_data
- newtune = e.data.getVar("DEFAULTTUNE_" + "virtclass-multilib-" + variant, False)
+ newtune = e.data.getVar("DEFAULTTUNE:" + "virtclass-multilib-" + variant, False)
if newtune:
e.data.setVar("DEFAULTTUNE", newtune)
}
@@ -92,6 +103,10 @@ multilib_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
python __anonymous () {
if bb.data.inherits_class('image', d):
+ # set rpm preferred file color for 32-bit multilib image
+ if d.getVar("SITEINFO_BITS") == "32":
+ d.setVar("RPM_PREFER_ELF_ARCH", "1")
+
variant = d.getVar("BBEXTENDVARIANT")
import oe.classextend
@@ -105,7 +120,6 @@ python __anonymous () {
d.setVar("LINGUAS_INSTALL", "")
# FIXME, we need to map this to something, not delete it!
d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", "")
- bb.build.deltask('do_populate_sdk', d)
bb.build.deltask('do_populate_sdk_ext', d)
return
}
@@ -126,6 +140,7 @@ python multilib_virtclass_handler_postkeyexp () {
return
clsextend.map_depends_variable("DEPENDS")
+ clsextend.map_depends_variable("PACKAGE_WRITE_DEPS")
clsextend.map_variable("PROVIDES")
if bb.data.inherits_class('cross-canadian', d):
@@ -177,7 +192,7 @@ def reset_alternative_priority(d):
bb.debug(1, '%s: Setting ALTERNATIVE_PRIORITY_%s to %s' % (pkg, pkg, reset_priority))
d.setVar('ALTERNATIVE_PRIORITY_%s' % pkg, reset_priority)
- for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg) or "").split():
+ for alt_name in (d.getVar('ALTERNATIVE:%s' % pkg) or "").split():
# ALTERNATIVE_PRIORITY_pkg[tool] = priority
alt_priority_pkg_name = d.getVarFlag('ALTERNATIVE_PRIORITY_%s' % pkg, alt_name)
# ALTERNATIVE_PRIORITY[tool] = priority
@@ -192,12 +207,12 @@ def reset_alternative_priority(d):
bb.debug(1, '%s: Setting ALTERNATIVE_PRIORITY[%s] to %s' % (pkg, alt_name, reset_priority))
d.setVarFlag('ALTERNATIVE_PRIORITY', alt_name, reset_priority)
-PACKAGEFUNCS_append = " do_package_qa_multilib"
+PACKAGEFUNCS:append = " do_package_qa_multilib"
python do_package_qa_multilib() {
def check_mlprefix(pkg, var, mlprefix):
- values = bb.utils.explode_deps(d.getVar('%s_%s' % (var, pkg)) or d.getVar(var) or "")
+ values = bb.utils.explode_deps(d.getVar('%s:%s' % (var, pkg)) or d.getVar(var) or "")
candidates = []
for i in values:
if i.startswith('virtual/'):
@@ -211,7 +226,7 @@ python do_package_qa_multilib() {
if len(candidates) > 0:
msg = "%s package %s - suspicious values '%s' in %s" \
% (d.getVar('PN'), pkg, ' '.join(candidates), var)
- package_qa_handle_error("multilib", msg, d)
+ oe.qa.handle_error("multilib", msg, d)
ml = d.getVar('MLPREFIX')
if not ml:
@@ -229,4 +244,5 @@ python do_package_qa_multilib() {
check_mlprefix(pkg, 'RSUGGESTS', ml)
check_mlprefix(pkg, 'RREPLACES', ml)
check_mlprefix(pkg, 'RCONFLICTS', ml)
+ oe.qa.exit_if_errors(d)
}
diff --git a/meta/classes/multilib_global.bbclass b/meta/classes/multilib_global.bbclass
index 98f65c8aae..6095d278dd 100644
--- a/meta/classes/multilib_global.bbclass
+++ b/meta/classes/multilib_global.bbclass
@@ -1,6 +1,13 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
def preferred_ml_updates(d):
- # If any PREFERRED_PROVIDER or PREFERRED_VERSION are set,
- # we need to mirror these variables in the multilib case;
+ # If any of PREFERRED_PROVIDER, PREFERRED_RPROVIDER, REQUIRED_VERSION
+ # or PREFERRED_VERSION are set, we need to mirror these variables in
+ # the multilib case;
multilibs = d.getVar('MULTILIBS') or ""
if not multilibs:
return
@@ -11,43 +18,54 @@ def preferred_ml_updates(d):
if len(eext) > 1 and eext[0] == 'multilib':
prefixes.append(eext[1])
- versions = []
+ required_versions = []
+ preferred_versions = []
providers = []
rproviders = []
for v in d.keys():
+ if v.startswith("REQUIRED_VERSION_"):
+ required_versions.append(v)
if v.startswith("PREFERRED_VERSION_"):
- versions.append(v)
+ preferred_versions.append(v)
if v.startswith("PREFERRED_PROVIDER_"):
providers.append(v)
if v.startswith("PREFERRED_RPROVIDER_"):
rproviders.append(v)
- for v in versions:
- val = d.getVar(v, False)
- pkg = v.replace("PREFERRED_VERSION_", "")
- if pkg.endswith("-native") or "-crosssdk-" in pkg or pkg.startswith(("nativesdk-", "virtual/nativesdk-")):
- continue
- if '-cross-' in pkg and '${' in pkg:
+ def sort_versions(versions, keyword):
+ version_str = "_".join([keyword, "VERSION", ""])
+ for v in versions:
+ val = d.getVar(v, False)
+ pkg = v.replace(version_str, "")
+ if pkg.endswith("-native") or "-crosssdk-" in pkg or pkg.startswith(("nativesdk-", "virtual/nativesdk-")):
+ continue
+ if '-cross-' in pkg and '${' in pkg:
+ for p in prefixes:
+ localdata = bb.data.createCopy(d)
+ override = ":virtclass-multilib-" + p
+ localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
+ if "-canadian-" in pkg:
+ newtune = localdata.getVar("DEFAULTTUNE:" + "virtclass-multilib-" + p, False)
+ if newtune:
+ localdata.setVar("DEFAULTTUNE", newtune)
+ newname = localdata.expand(v)
+ else:
+ newname = localdata.expand(v).replace(version_str, version_str + p + '-')
+ if newname != v:
+ newval = localdata.expand(val)
+ d.setVar(newname, newval)
+ # Avoid future variable key expansion
+ vexp = d.expand(v)
+ if v != vexp and d.getVar(v, False):
+ d.renameVar(v, vexp)
+ continue
for p in prefixes:
- localdata = bb.data.createCopy(d)
- override = ":virtclass-multilib-" + p
- localdata.setVar("OVERRIDES", localdata.getVar("OVERRIDES", False) + override)
- if "-canadian-" in pkg:
- newname = localdata.expand(v)
- else:
- newname = localdata.expand(v).replace("PREFERRED_VERSION_", "PREFERRED_VERSION_" + p + '-')
- if newname != v:
- newval = localdata.expand(val)
- d.setVar(newname, newval)
- # Avoid future variable key expansion
- vexp = d.expand(v)
- if v != vexp and d.getVar(v, False):
- d.renameVar(v, vexp)
- continue
- for p in prefixes:
- newname = "PREFERRED_VERSION_" + p + "-" + pkg
- if not d.getVar(newname, False):
- d.setVar(newname, val)
+ newname = version_str + p + "-" + pkg
+ if not d.getVar(newname, False):
+ d.setVar(newname, val)
+
+ sort_versions(required_versions, "REQUIRED")
+ sort_versions(preferred_versions, "PREFERRED")
for prov in providers:
val = d.getVar(prov, False)
@@ -128,14 +146,14 @@ def preferred_ml_updates(d):
prov = prov.replace("virtual/", "")
return "virtual/" + prefix + "-" + prov
- mp = (d.getVar("MULTI_PROVIDER_WHITELIST") or "").split()
+ mp = (d.getVar("BB_MULTI_PROVIDER_ALLOWED") or "").split()
extramp = []
for p in mp:
if p.endswith("-native") or "-crosssdk-" in p or p.startswith(("nativesdk-", "virtual/nativesdk-")) or 'cross-canadian' in p:
continue
for pref in prefixes:
extramp.append(translate_provide(pref, p))
- d.setVar("MULTI_PROVIDER_WHITELIST", " ".join(mp + extramp))
+ d.setVar("BB_MULTI_PROVIDER_ALLOWED", " ".join(mp + extramp))
abisafe = (d.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE") or "").split()
extras = []
@@ -155,8 +173,8 @@ def preferred_ml_updates(d):
python multilib_virtclass_handler_vendor () {
if isinstance(e, bb.event.ConfigParsed):
for v in e.data.getVar("MULTILIB_VARIANTS").split():
- if e.data.getVar("TARGET_VENDOR_virtclass-multilib-" + v, False) is None:
- e.data.setVar("TARGET_VENDOR_virtclass-multilib-" + v, e.data.getVar("TARGET_VENDOR", False) + "ml" + v)
+ if e.data.getVar("TARGET_VENDOR:virtclass-multilib-" + v, False) is None:
+ e.data.setVar("TARGET_VENDOR:virtclass-multilib-" + v, e.data.getVar("TARGET_VENDOR", False) + "ml" + v)
preferred_ml_updates(e.data)
}
addhandler multilib_virtclass_handler_vendor
@@ -177,6 +195,7 @@ python multilib_virtclass_handler_global () {
# from a copy of the datastore
localdata = bb.data.createCopy(d)
localdata.delVar("KERNEL_VERSION")
+ localdata.delVar("KERNEL_VERSION_PKG_NAME")
variants = (e.data.getVar("MULTILIB_VARIANTS") or "").split()
@@ -198,13 +217,13 @@ python multilib_virtclass_handler_global () {
if rprovs.strip():
e.data.setVar("RPROVIDES", rprovs)
- # Process RPROVIDES_${PN}...
+ # Process RPROVIDES:${PN}...
for pkg in (e.data.getVar("PACKAGES") or "").split():
- origrprovs = rprovs = localdata.getVar("RPROVIDES_%s" % pkg) or ""
+ origrprovs = rprovs = localdata.getVar("RPROVIDES:%s" % pkg) or ""
for clsextend in clsextends:
- rprovs = rprovs + " " + clsextend.map_variable("RPROVIDES_%s" % pkg, setvar=False)
+ rprovs = rprovs + " " + clsextend.map_variable("RPROVIDES:%s" % pkg, setvar=False)
rprovs = rprovs + " " + clsextend.extname + "-" + pkg
- e.data.setVar("RPROVIDES_%s" % pkg, rprovs)
+ e.data.setVar("RPROVIDES:%s" % pkg, rprovs)
}
addhandler multilib_virtclass_handler_global
diff --git a/meta/classes/multilib_header.bbclass b/meta/classes/multilib_header.bbclass
deleted file mode 100644
index e03f5b13b2..0000000000
--- a/meta/classes/multilib_header.bbclass
+++ /dev/null
@@ -1,52 +0,0 @@
-inherit siteinfo
-
-# If applicable on the architecture, this routine will rename the header and
-# add a unique identifier to the name for the ABI/bitsize that is being used.
-# A wrapper will be generated for the architecture that knows how to call
-# all of the ABI variants for that given architecture.
-#
-oe_multilib_header() {
-
- case ${HOST_OS} in
- *-musl*)
- return
- ;;
- *)
- esac
- # For MIPS: "n32" is a special case, which needs to be
- # distinct from both 64-bit and 32-bit.
- case ${TARGET_ARCH} in
- mips*) case "${MIPSPKGSFX_ABI}" in
- "-n32")
- ident=n32
- ;;
- *)
- ident=${SITEINFO_BITS}
- ;;
- esac
- ;;
- *) ident=${SITEINFO_BITS}
- esac
- for each_header in "$@" ; do
- if [ ! -f "${D}/${includedir}/$each_header" ]; then
- bberror "oe_multilib_header: Unable to find header $each_header."
- continue
- fi
- stem=$(echo $each_header | sed 's#\.h$##')
- # if mips64/n32 set ident to n32
- mv ${D}/${includedir}/$each_header ${D}/${includedir}/${stem}-${ident}.h
-
- sed -e "s#ENTER_HEADER_FILENAME_HERE#${stem}#g" ${COREBASE}/scripts/multilib_header_wrapper.h > ${D}/${includedir}/$each_header
- done
-}
-
-# Dependencies on arch variables like MIPSPKGSFX_ABI can be problematic.
-# We don't need multilib headers for native builds so brute force things.
-oe_multilib_header_class-native () {
- return
-}
-
-# Nor do we need multilib headers for nativesdk builds.
-oe_multilib_header_class-nativesdk () {
- return
-}
diff --git a/meta/classes/multilib_script.bbclass b/meta/classes/multilib_script.bbclass
deleted file mode 100644
index b11efc1ec5..0000000000
--- a/meta/classes/multilib_script.bbclass
+++ /dev/null
@@ -1,34 +0,0 @@
-#
-# Recipe needs to set MULTILIB_SCRIPTS in the form <pkgname>:<scriptname>, e.g.
-# MULTILIB_SCRIPTS = "${PN}-dev:${bindir}/file1 ${PN}:${base_bindir}/file2"
-# to indicate which script files to process from which packages.
-#
-
-inherit update-alternatives
-
-MULTILIB_SUFFIX = "${@d.getVar('base_libdir',1).split('/')[-1]}"
-
-PACKAGE_PREPROCESS_FUNCS += "multilibscript_rename"
-
-multilibscript_rename() {
- :
-}
-
-python () {
- # Do nothing if multilib isn't being used
- if not d.getVar("MULTILIB_VARIANTS"):
- return
- # Do nothing for native/cross
- if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d):
- return
-
- for entry in (d.getVar("MULTILIB_SCRIPTS", False) or "").split():
- pkg, script = entry.split(":")
- epkg = d.expand(pkg)
- scriptname = os.path.basename(script)
- d.appendVar("ALTERNATIVE_" + epkg, " " + scriptname + " ")
- d.setVarFlag("ALTERNATIVE_LINK_NAME", scriptname, script)
- d.setVarFlag("ALTERNATIVE_TARGET", scriptname, script + "-${MULTILIB_SUFFIX}")
- d.appendVar("multilibscript_rename", "\n mv ${PKGD}" + script + " ${PKGD}" + script + "-${MULTILIB_SUFFIX}")
- d.appendVar("FILES_" + epkg, " " + script + "-${MULTILIB_SUFFIX}")
-}
diff --git a/meta/classes/native.bbclass b/meta/classes/native.bbclass
deleted file mode 100644
index 08106e345c..0000000000
--- a/meta/classes/native.bbclass
+++ /dev/null
@@ -1,198 +0,0 @@
-# We want native packages to be relocatable
-inherit relocatable
-
-# Native packages are built indirectly via dependency,
-# no need for them to be a direct target of 'world'
-EXCLUDE_FROM_WORLD = "1"
-
-PACKAGES = ""
-PACKAGES_class-native = ""
-PACKAGES_DYNAMIC = ""
-PACKAGES_DYNAMIC_class-native = ""
-PACKAGE_ARCH = "${BUILD_ARCH}"
-
-# used by cmake class
-OECMAKE_RPATH = "${libdir}"
-OECMAKE_RPATH_class-native = "${libdir}"
-
-# When this class has packaging enabled, setting
-# RPROVIDES becomes unnecessary.
-RPROVIDES = "${PN}"
-
-TARGET_ARCH = "${BUILD_ARCH}"
-TARGET_OS = "${BUILD_OS}"
-TARGET_VENDOR = "${BUILD_VENDOR}"
-TARGET_PREFIX = "${BUILD_PREFIX}"
-TARGET_CC_ARCH = "${BUILD_CC_ARCH}"
-TARGET_LD_ARCH = "${BUILD_LD_ARCH}"
-TARGET_AS_ARCH = "${BUILD_AS_ARCH}"
-TARGET_CPPFLAGS = "${BUILD_CPPFLAGS}"
-TARGET_CFLAGS = "${BUILD_CFLAGS}"
-TARGET_CXXFLAGS = "${BUILD_CXXFLAGS}"
-TARGET_LDFLAGS = "${BUILD_LDFLAGS}"
-TARGET_FPU = ""
-
-HOST_ARCH = "${BUILD_ARCH}"
-HOST_OS = "${BUILD_OS}"
-HOST_VENDOR = "${BUILD_VENDOR}"
-HOST_PREFIX = "${BUILD_PREFIX}"
-HOST_CC_ARCH = "${BUILD_CC_ARCH}"
-HOST_LD_ARCH = "${BUILD_LD_ARCH}"
-HOST_AS_ARCH = "${BUILD_AS_ARCH}"
-
-CPPFLAGS = "${BUILD_CPPFLAGS}"
-CFLAGS = "${BUILD_CFLAGS}"
-CXXFLAGS = "${BUILD_CXXFLAGS}"
-LDFLAGS = "${BUILD_LDFLAGS}"
-
-STAGING_BINDIR = "${STAGING_BINDIR_NATIVE}"
-STAGING_BINDIR_CROSS = "${STAGING_BINDIR_NATIVE}"
-
-# native pkg doesn't need the TOOLCHAIN_OPTIONS.
-TOOLCHAIN_OPTIONS = ""
-
-# Don't build ptest natively
-PTEST_ENABLED = "0"
-
-# Don't use site files for native builds
-export CONFIG_SITE = "${COREBASE}/meta/site/native"
-
-# set the compiler as well. It could have been set to something else
-export CC = "${BUILD_CC}"
-export CXX = "${BUILD_CXX}"
-export FC = "${BUILD_FC}"
-export CPP = "${BUILD_CPP}"
-export LD = "${BUILD_LD}"
-export CCLD = "${BUILD_CCLD}"
-export AR = "${BUILD_AR}"
-export AS = "${BUILD_AS}"
-export RANLIB = "${BUILD_RANLIB}"
-export STRIP = "${BUILD_STRIP}"
-export NM = "${BUILD_NM}"
-
-# Path prefixes
-base_prefix = "${STAGING_DIR_NATIVE}"
-prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
-exec_prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
-
-bindir = "${STAGING_BINDIR_NATIVE}"
-sbindir = "${STAGING_SBINDIR_NATIVE}"
-base_libdir = "${STAGING_LIBDIR_NATIVE}"
-libdir = "${STAGING_LIBDIR_NATIVE}"
-includedir = "${STAGING_INCDIR_NATIVE}"
-sysconfdir = "${STAGING_ETCDIR_NATIVE}"
-datadir = "${STAGING_DATADIR_NATIVE}"
-
-baselib = "lib"
-
-export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir} /lib /lib64 /usr/lib /usr/lib64"
-
-NATIVE_PACKAGE_PATH_SUFFIX ?= ""
-bindir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
-sbindir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
-base_libdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
-libdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
-libexecdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
-
-do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE}/"
-do_populate_sysroot[sstate-outputdirs] = "${COMPONENTS_DIR}/${PACKAGE_ARCH}/${PN}"
-
-# Since we actually install these into situ there is no staging prefix
-STAGING_DIR_HOST = ""
-STAGING_DIR_TARGET = ""
-PKG_CONFIG_DIR = "${libdir}/pkgconfig"
-
-EXTRA_NATIVE_PKGCONFIG_PATH ?= ""
-PKG_CONFIG_PATH .= "${EXTRA_NATIVE_PKGCONFIG_PATH}"
-PKG_CONFIG_SYSROOT_DIR = ""
-PKG_CONFIG_SYSTEM_LIBRARY_PATH[unexport] = "1"
-PKG_CONFIG_SYSTEM_INCLUDE_PATH[unexport] = "1"
-
-# we dont want libc-*libc to kick in for native recipes
-LIBCOVERRIDE = ""
-CLASSOVERRIDE = "class-native"
-MACHINEOVERRIDES = ""
-MACHINE_FEATURES = ""
-
-PATH_prepend = "${COREBASE}/scripts/native-intercept:"
-
-# This class encodes staging paths into its scripts data so can only be
-# reused if we manipulate the paths.
-SSTATE_SCAN_CMD ?= "${SSTATE_SCAN_CMD_NATIVE}"
-
-# No strip sysroot when DEBUG_BUILD is enabled
-INHIBIT_SYSROOT_STRIP ?= "${@oe.utils.vartrue('DEBUG_BUILD', '1', '', d)}"
-
-python native_virtclass_handler () {
- pn = e.data.getVar("PN")
- if not pn.endswith("-native"):
- return
-
- # Set features here to prevent appends and distro features backfill
- # from modifying native distro features
- features = set(d.getVar("DISTRO_FEATURES_NATIVE").split())
- filtered = set(bb.utils.filter("DISTRO_FEATURES", d.getVar("DISTRO_FEATURES_FILTER_NATIVE"), d).split())
- d.setVar("DISTRO_FEATURES", " ".join(sorted(features | filtered)))
-
- classextend = e.data.getVar('BBCLASSEXTEND') or ""
- if "native" not in classextend:
- return
-
- def map_dependencies(varname, d, suffix = ""):
- if suffix:
- varname = varname + "_" + suffix
- deps = d.getVar(varname)
- if not deps:
- return
- deps = bb.utils.explode_deps(deps)
- newdeps = []
- for dep in deps:
- if dep == pn:
- continue
- elif "-cross-" in dep:
- newdeps.append(dep.replace("-cross", "-native"))
- elif not dep.endswith("-native"):
- newdeps.append(dep + "-native")
- else:
- newdeps.append(dep)
- d.setVar(varname, " ".join(newdeps))
-
- map_dependencies("DEPENDS", e.data)
- for pkg in [e.data.getVar("PN"), "", "${PN}"]:
- map_dependencies("RDEPENDS", e.data, pkg)
- map_dependencies("RRECOMMENDS", e.data, pkg)
- map_dependencies("RSUGGESTS", e.data, pkg)
- map_dependencies("RPROVIDES", e.data, pkg)
- map_dependencies("RREPLACES", e.data, pkg)
-
- provides = e.data.getVar("PROVIDES")
- nprovides = []
- for prov in provides.split():
- if prov.find(pn) != -1:
- nprovides.append(prov)
- elif not prov.endswith("-native"):
- nprovides.append(prov.replace(prov, prov + "-native"))
- else:
- nprovides.append(prov)
- e.data.setVar("PROVIDES", ' '.join(nprovides))
-
-
-}
-
-addhandler native_virtclass_handler
-native_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
-
-python do_addto_recipe_sysroot () {
- bb.build.exec_func("extend_recipe_sysroot", d)
-}
-addtask addto_recipe_sysroot after do_populate_sysroot
-do_addto_recipe_sysroot[deptask] = "do_populate_sysroot"
-
-inherit nopackages
-
-do_packagedata[stamp-extra-info] = ""
-
-USE_NLS = "no"
-
-RECIPERDEPTASK = "do_populate_sysroot"
-do_populate_sysroot[rdeptask] = "${RECIPERDEPTASK}"
diff --git a/meta/classes/nativesdk.bbclass b/meta/classes/nativesdk.bbclass
deleted file mode 100644
index 7b75710726..0000000000
--- a/meta/classes/nativesdk.bbclass
+++ /dev/null
@@ -1,114 +0,0 @@
-# SDK packages are built either explicitly by the user,
-# or indirectly via dependency. No need to be in 'world'.
-EXCLUDE_FROM_WORLD = "1"
-
-STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${SDK_ARCH}${SDK_VENDOR}-${SDK_OS}"
-
-# libc for the SDK can be different to that of the target
-NATIVESDKLIBC ?= "libc-glibc"
-LIBCOVERRIDE = ":${NATIVESDKLIBC}"
-CLASSOVERRIDE = "class-nativesdk"
-MACHINEOVERRIDES = ""
-
-MULTILIBS = ""
-
-# we need consistent staging dir whether or not multilib is enabled
-STAGING_DIR_HOST = "${WORKDIR}/recipe-sysroot"
-STAGING_DIR_TARGET = "${WORKDIR}/recipe-sysroot"
-RECIPE_SYSROOT = "${WORKDIR}/recipe-sysroot"
-
-#
-# Update PACKAGE_ARCH and PACKAGE_ARCHS
-#
-PACKAGE_ARCH = "${SDK_ARCH}-${SDKPKGSUFFIX}"
-PACKAGE_ARCHS = "${SDK_PACKAGE_ARCHS}"
-
-#
-# We need chrpath >= 0.14 to ensure we can deal with 32 and 64 bit
-# binaries
-#
-DEPENDS_append = " chrpath-replacement-native"
-EXTRANATIVEPATH += "chrpath-native"
-
-PKGDATA_DIR = "${TMPDIR}/pkgdata/${SDK_SYS}"
-
-HOST_ARCH = "${SDK_ARCH}"
-HOST_VENDOR = "${SDK_VENDOR}"
-HOST_OS = "${SDK_OS}"
-HOST_PREFIX = "${SDK_PREFIX}"
-HOST_CC_ARCH = "${SDK_CC_ARCH}"
-HOST_LD_ARCH = "${SDK_LD_ARCH}"
-HOST_AS_ARCH = "${SDK_AS_ARCH}"
-#HOST_SYS = "${HOST_ARCH}${TARGET_VENDOR}-${HOST_OS}"
-
-TARGET_ARCH = "${SDK_ARCH}"
-TARGET_VENDOR = "${SDK_VENDOR}"
-TARGET_OS = "${SDK_OS}"
-TARGET_PREFIX = "${SDK_PREFIX}"
-TARGET_CC_ARCH = "${SDK_CC_ARCH}"
-TARGET_LD_ARCH = "${SDK_LD_ARCH}"
-TARGET_AS_ARCH = "${SDK_AS_ARCH}"
-TARGET_CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
-TARGET_CFLAGS = "${BUILDSDK_CFLAGS}"
-TARGET_CXXFLAGS = "${BUILDSDK_CXXFLAGS}"
-TARGET_LDFLAGS = "${BUILDSDK_LDFLAGS}"
-TARGET_FPU = ""
-EXTRA_OECONF_GCC_FLOAT = ""
-
-CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
-CFLAGS = "${BUILDSDK_CFLAGS}"
-CXXFLAGS = "${BUILDSDK_CFLAGS}"
-LDFLAGS = "${BUILDSDK_LDFLAGS}"
-
-# Change to place files in SDKPATH
-base_prefix = "${SDKPATHNATIVE}"
-prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
-exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
-baselib = "lib"
-sbindir = "${bindir}"
-
-export PKG_CONFIG_DIR = "${STAGING_DIR_HOST}${libdir}/pkgconfig"
-export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR_HOST}"
-
-python nativesdk_virtclass_handler () {
- pn = e.data.getVar("PN")
- if not (pn.endswith("-nativesdk") or pn.startswith("nativesdk-")):
- return
-
- # Set features here to prevent appends and distro features backfill
- # from modifying nativesdk distro features
- features = set(d.getVar("DISTRO_FEATURES_NATIVESDK").split())
- filtered = set(bb.utils.filter("DISTRO_FEATURES", d.getVar("DISTRO_FEATURES_FILTER_NATIVESDK"), d).split())
- d.setVar("DISTRO_FEATURES", " ".join(sorted(features | filtered)))
-
- e.data.setVar("MLPREFIX", "nativesdk-")
- e.data.setVar("PN", "nativesdk-" + e.data.getVar("PN").replace("-nativesdk", "").replace("nativesdk-", ""))
-}
-
-python () {
- pn = d.getVar("PN")
- if not pn.startswith("nativesdk-"):
- return
-
- import oe.classextend
-
- clsextend = oe.classextend.NativesdkClassExtender("nativesdk", d)
- clsextend.rename_packages()
- clsextend.rename_package_variables((d.getVar("PACKAGEVARS") or "").split())
-
- clsextend.map_depends_variable("DEPENDS")
- clsextend.map_packagevars()
- clsextend.map_variable("PROVIDES")
- clsextend.map_regexp_variable("PACKAGES_DYNAMIC")
- d.setVar("LIBCEXTENSION", "")
- d.setVar("ABIEXTENSION", "")
-}
-
-addhandler nativesdk_virtclass_handler
-nativesdk_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
-
-do_packagedata[stamp-extra-info] = ""
-
-USE_NLS = "${SDKUSE_NLS}"
-
-OLDEST_KERNEL = "${SDK_OLDEST_KERNEL}"
diff --git a/meta/classes/nopackages.bbclass b/meta/classes/nopackages.bbclass
deleted file mode 100644
index 559f5078bd..0000000000
--- a/meta/classes/nopackages.bbclass
+++ /dev/null
@@ -1,12 +0,0 @@
-deltask do_package
-deltask do_package_write_rpm
-deltask do_package_write_ipk
-deltask do_package_write_deb
-deltask do_package_qa
-deltask do_packagedata
-deltask do_package_setscene
-deltask do_package_write_rpm_setscene
-deltask do_package_write_ipk_setscene
-deltask do_package_write_deb_setscene
-deltask do_package_qa_setscene
-deltask do_packagedata_setscene
diff --git a/meta/classes/npm.bbclass b/meta/classes/npm.bbclass
deleted file mode 100644
index 068032a1e5..0000000000
--- a/meta/classes/npm.bbclass
+++ /dev/null
@@ -1,307 +0,0 @@
-# Copyright (C) 2020 Savoir-Faire Linux
-#
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# This bbclass builds and installs an npm package to the target. The package
-# sources files should be fetched in the calling recipe by using the SRC_URI
-# variable. The ${S} variable should be updated depending of your fetcher.
-#
-# Usage:
-# SRC_URI = "..."
-# inherit npm
-#
-# Optional variables:
-# NPM_ARCH:
-# Override the auto generated npm architecture.
-#
-# NPM_INSTALL_DEV:
-# Set to 1 to also install devDependencies.
-
-DEPENDS_prepend = "nodejs-native "
-RDEPENDS_${PN}_prepend = "nodejs "
-
-NPM_INSTALL_DEV ?= "0"
-
-def npm_target_arch_map(target_arch):
- """Maps arch names to npm arch names"""
- import re
- if re.match("p(pc|owerpc)(|64)", target_arch):
- return "ppc"
- elif re.match("i.86$", target_arch):
- return "ia32"
- elif re.match("x86_64$", target_arch):
- return "x64"
- elif re.match("arm64$", target_arch):
- return "arm"
- return target_arch
-
-NPM_ARCH ?= "${@npm_target_arch_map(d.getVar("TARGET_ARCH"))}"
-
-NPM_PACKAGE = "${WORKDIR}/npm-package"
-NPM_CACHE = "${WORKDIR}/npm-cache"
-NPM_BUILD = "${WORKDIR}/npm-build"
-
-def npm_global_configs(d):
- """Get the npm global configuration"""
- configs = []
- # Ensure no network access is done
- configs.append(("offline", "true"))
- configs.append(("proxy", "http://invalid"))
- # Configure the cache directory
- configs.append(("cache", d.getVar("NPM_CACHE")))
- return configs
-
-def npm_pack(env, srcdir, workdir):
- """Run 'npm pack' on a specified directory"""
- import shlex
- cmd = "npm pack %s" % shlex.quote(srcdir)
- configs = [("ignore-scripts", "true")]
- tarball = env.run(cmd, configs=configs, workdir=workdir).strip("\n")
- return os.path.join(workdir, tarball)
-
-python npm_do_configure() {
- """
- Step one: configure the npm cache and the main npm package
-
- Every dependencies have been fetched and patched in the source directory.
- They have to be packed (this remove unneeded files) and added to the npm
- cache to be available for the next step.
-
- The main package and its associated manifest file and shrinkwrap file have
- to be configured to take into account these cached dependencies.
- """
- import base64
- import copy
- import json
- import re
- import shlex
- import tempfile
- from bb.fetch2.npm import NpmEnvironment
- from bb.fetch2.npm import npm_unpack
- from bb.fetch2.npmsw import foreach_dependencies
- from bb.progress import OutOfProgressHandler
-
- bb.utils.remove(d.getVar("NPM_CACHE"), recurse=True)
- bb.utils.remove(d.getVar("NPM_PACKAGE"), recurse=True)
-
- env = NpmEnvironment(d, configs=npm_global_configs(d))
-
- def _npm_cache_add(tarball):
- """Run 'npm cache add' for a specified tarball"""
- cmd = "npm cache add %s" % shlex.quote(tarball)
- env.run(cmd)
-
- def _npm_integrity(tarball):
- """Return the npm integrity of a specified tarball"""
- sha512 = bb.utils.sha512_file(tarball)
- return "sha512-" + base64.b64encode(bytes.fromhex(sha512)).decode()
-
- def _npm_version(tarball):
- """Return the version of a specified tarball"""
- regex = r"-(\d+\.\d+\.\d+(-.*)?(\+.*)?)\.tgz"
- return re.search(regex, tarball).group(1)
-
- def _npmsw_dependency_dict(orig, deptree):
- """
- Return the sub dictionary in the 'orig' dictionary corresponding to the
- 'deptree' dependency tree. This function follows the shrinkwrap file
- format.
- """
- ptr = orig
- for dep in deptree:
- if "dependencies" not in ptr:
- ptr["dependencies"] = {}
- ptr = ptr["dependencies"]
- if dep not in ptr:
- ptr[dep] = {}
- ptr = ptr[dep]
- return ptr
-
- # Manage the manifest file and shrinkwrap files
- orig_manifest_file = d.expand("${S}/package.json")
- orig_shrinkwrap_file = d.expand("${S}/npm-shrinkwrap.json")
- cached_manifest_file = d.expand("${NPM_PACKAGE}/package.json")
- cached_shrinkwrap_file = d.expand("${NPM_PACKAGE}/npm-shrinkwrap.json")
-
- with open(orig_manifest_file, "r") as f:
- orig_manifest = json.load(f)
-
- cached_manifest = copy.deepcopy(orig_manifest)
- cached_manifest.pop("dependencies", None)
- cached_manifest.pop("devDependencies", None)
-
- with open(orig_shrinkwrap_file, "r") as f:
- orig_shrinkwrap = json.load(f)
-
- cached_shrinkwrap = copy.deepcopy(orig_shrinkwrap)
- cached_shrinkwrap.pop("dependencies", None)
-
- # Manage the dependencies
- progress = OutOfProgressHandler(d, r"^(\d+)/(\d+)$")
- progress_total = 1 # also count the main package
- progress_done = 0
-
- def _count_dependency(name, params, deptree):
- nonlocal progress_total
- progress_total += 1
-
- def _cache_dependency(name, params, deptree):
- destsubdirs = [os.path.join("node_modules", dep) for dep in deptree]
- destsuffix = os.path.join(*destsubdirs)
- with tempfile.TemporaryDirectory() as tmpdir:
- # Add the dependency to the npm cache
- destdir = os.path.join(d.getVar("S"), destsuffix)
- tarball = npm_pack(env, destdir, tmpdir)
- _npm_cache_add(tarball)
- # Add its signature to the cached shrinkwrap
- dep = _npmsw_dependency_dict(cached_shrinkwrap, deptree)
- dep["version"] = _npm_version(tarball)
- dep["integrity"] = _npm_integrity(tarball)
- if params.get("dev", False):
- dep["dev"] = True
- # Display progress
- nonlocal progress_done
- progress_done += 1
- progress.write("%d/%d" % (progress_done, progress_total))
-
- dev = bb.utils.to_boolean(d.getVar("NPM_INSTALL_DEV"), False)
- foreach_dependencies(orig_shrinkwrap, _count_dependency, dev)
- foreach_dependencies(orig_shrinkwrap, _cache_dependency, dev)
-
- # Configure the main package
- with tempfile.TemporaryDirectory() as tmpdir:
- tarball = npm_pack(env, d.getVar("S"), tmpdir)
- npm_unpack(tarball, d.getVar("NPM_PACKAGE"), d)
-
- # Configure the cached manifest file and cached shrinkwrap file
- def _update_manifest(depkey):
- for name in orig_manifest.get(depkey, {}):
- version = cached_shrinkwrap["dependencies"][name]["version"]
- if depkey not in cached_manifest:
- cached_manifest[depkey] = {}
- cached_manifest[depkey][name] = version
-
- _update_manifest("dependencies")
-
- if dev:
- _update_manifest("devDependencies")
-
- with open(cached_manifest_file, "w") as f:
- json.dump(cached_manifest, f, indent=2)
-
- with open(cached_shrinkwrap_file, "w") as f:
- json.dump(cached_shrinkwrap, f, indent=2)
-}
-
-python npm_do_compile() {
- """
- Step two: install the npm package
-
- Use the configured main package and the cached dependencies to run the
- installation process. The installation is done in a directory which is
- not the destination directory yet.
-
- A combination of 'npm pack' and 'npm install' is used to ensure that the
- installed files are actual copies instead of symbolic links (which is the
- default npm behavior).
- """
- import shlex
- import tempfile
- from bb.fetch2.npm import NpmEnvironment
-
- bb.utils.remove(d.getVar("NPM_BUILD"), recurse=True)
-
- env = NpmEnvironment(d, configs=npm_global_configs(d))
-
- dev = bb.utils.to_boolean(d.getVar("NPM_INSTALL_DEV"), False)
-
- with tempfile.TemporaryDirectory() as tmpdir:
- args = []
- configs = []
-
- if dev:
- configs.append(("also", "development"))
- else:
- configs.append(("only", "production"))
-
- # Report as many logs as possible for debugging purpose
- configs.append(("loglevel", "silly"))
-
- # Configure the installation to be done globally in the build directory
- configs.append(("global", "true"))
- configs.append(("prefix", d.getVar("NPM_BUILD")))
-
- # Add node-gyp configuration
- configs.append(("arch", d.getVar("NPM_ARCH")))
- configs.append(("release", "true"))
- sysroot = d.getVar("RECIPE_SYSROOT_NATIVE")
- nodedir = os.path.join(sysroot, d.getVar("prefix_native").strip("/"))
- configs.append(("nodedir", nodedir))
- bindir = os.path.join(sysroot, d.getVar("bindir_native").strip("/"))
- pythondir = os.path.join(bindir, "python-native", "python")
- configs.append(("python", pythondir))
-
- # Add node-pre-gyp configuration
- args.append(("target_arch", d.getVar("NPM_ARCH")))
- args.append(("build-from-source", "true"))
-
- # Pack and install the main package
- tarball = npm_pack(env, d.getVar("NPM_PACKAGE"), tmpdir)
- env.run("npm install %s" % shlex.quote(tarball), args=args, configs=configs)
-}
-
-npm_do_install() {
- # Step three: final install
- #
- # The previous installation have to be filtered to remove some extra files.
-
- rm -rf ${D}
-
- # Copy the entire lib and bin directories
- install -d ${D}/${nonarch_libdir}
- cp --no-preserve=ownership --recursive ${NPM_BUILD}/lib/. ${D}/${nonarch_libdir}
-
- if [ -d "${NPM_BUILD}/bin" ]
- then
- install -d ${D}/${bindir}
- cp --no-preserve=ownership --recursive ${NPM_BUILD}/bin/. ${D}/${bindir}
- fi
-
- # If the package (or its dependencies) uses node-gyp to build native addons,
- # object files, static libraries or other temporary files can be hidden in
- # the lib directory. To reduce the package size and to avoid QA issues
- # (staticdev with static library files) these files must be removed.
- local GYP_REGEX=".*/build/Release/[^/]*.node"
-
- # Remove any node-gyp directory in ${D} to remove temporary build files
- for GYP_D_FILE in $(find ${D} -regex "${GYP_REGEX}")
- do
- local GYP_D_DIR=${GYP_D_FILE%/Release/*}
-
- rm --recursive --force ${GYP_D_DIR}
- done
-
- # Copy only the node-gyp release files
- for GYP_B_FILE in $(find ${NPM_BUILD} -regex "${GYP_REGEX}")
- do
- local GYP_D_FILE=${D}/${prefix}/${GYP_B_FILE#${NPM_BUILD}}
-
- install -d ${GYP_D_FILE%/*}
- install -m 755 ${GYP_B_FILE} ${GYP_D_FILE}
- done
-
- # Remove the shrinkwrap file which does not need to be packed
- rm -f ${D}/${nonarch_libdir}/node_modules/*/npm-shrinkwrap.json
- rm -f ${D}/${nonarch_libdir}/node_modules/@*/*/npm-shrinkwrap.json
-
- # node(1) is using /usr/lib/node as default include directory and npm(1) is
- # using /usr/lib/node_modules as install directory. Let's make both happy.
- ln -fs node_modules ${D}/${nonarch_libdir}/node
-}
-
-FILES_${PN} += " \
- ${bindir} \
- ${nonarch_libdir} \
-"
-
-EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes/oelint.bbclass b/meta/classes/oelint.bbclass
index 2589d34059..458a25ecc3 100644
--- a/meta/classes/oelint.bbclass
+++ b/meta/classes/oelint.bbclass
@@ -1,3 +1,9 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
addtask lint before do_build
do_lint[nostamp] = "1"
python do_lint() {
diff --git a/meta/classes/own-mirrors.bbclass b/meta/classes/own-mirrors.bbclass
index a777835138..36c7f8e3f3 100644
--- a/meta/classes/own-mirrors.bbclass
+++ b/meta/classes/own-mirrors.bbclass
@@ -1,13 +1,22 @@
-PREMIRRORS_prepend = " \
-cvs://.*/.* ${SOURCE_MIRROR_URL} \n \
-svn://.*/.* ${SOURCE_MIRROR_URL} \n \
-git://.*/.* ${SOURCE_MIRROR_URL} \n \
-gitsm://.*/.* ${SOURCE_MIRROR_URL} \n \
-hg://.*/.* ${SOURCE_MIRROR_URL} \n \
-bzr://.*/.* ${SOURCE_MIRROR_URL} \n \
-p4://.*/.* ${SOURCE_MIRROR_URL} \n \
-osc://.*/.* ${SOURCE_MIRROR_URL} \n \
-https?$://.*/.* ${SOURCE_MIRROR_URL} \n \
-ftp://.*/.* ${SOURCE_MIRROR_URL} \n \
-npm://.*/?.* ${SOURCE_MIRROR_URL} \n \
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+PREMIRRORS:prepend = " \
+cvs://.*/.* ${SOURCE_MIRROR_URL} \
+svn://.*/.* ${SOURCE_MIRROR_URL} \
+git://.*/.* ${SOURCE_MIRROR_URL} \
+gitsm://.*/.* ${SOURCE_MIRROR_URL} \
+hg://.*/.* ${SOURCE_MIRROR_URL} \
+bzr://.*/.* ${SOURCE_MIRROR_URL} \
+p4://.*/.* ${SOURCE_MIRROR_URL} \
+osc://.*/.* ${SOURCE_MIRROR_URL} \
+https?://.*/.* ${SOURCE_MIRROR_URL} \
+ftp://.*/.* ${SOURCE_MIRROR_URL} \
+npm://.*/?.* ${SOURCE_MIRROR_URL} \
+s3://.*/.* ${SOURCE_MIRROR_URL} \
+crate://.*/.* ${SOURCE_MIRROR_URL} \
+gs://.*/.* ${SOURCE_MIRROR_URL} \
"
diff --git a/meta/classes/package.bbclass b/meta/classes/package.bbclass
deleted file mode 100644
index 0af5f66733..0000000000
--- a/meta/classes/package.bbclass
+++ /dev/null
@@ -1,2439 +0,0 @@
-#
-# Packaging process
-#
-# Executive summary: This class iterates over the functions listed in PACKAGEFUNCS
-# Taking D and splitting it up into the packages listed in PACKAGES, placing the
-# resulting output in PKGDEST.
-#
-# There are the following default steps but PACKAGEFUNCS can be extended:
-#
-# a) package_get_auto_pr - get PRAUTO from remote PR service
-#
-# b) perform_packagecopy - Copy D into PKGD
-#
-# c) package_do_split_locales - Split out the locale files, updates FILES and PACKAGES
-#
-# d) split_and_strip_files - split the files into runtime and debug and strip them.
-# Debug files include debug info split, and associated sources that end up in -dbg packages
-#
-# e) fixup_perms - Fix up permissions in the package before we split it.
-#
-# f) populate_packages - Split the files in PKGD into separate packages in PKGDEST/<pkgname>
-# Also triggers the binary stripping code to put files in -dbg packages.
-#
-# g) package_do_filedeps - Collect perfile run-time dependency metadata
-# The data is stores in FILER{PROVIDES,DEPENDS}_file_pkg variables with
-# a list of affected files in FILER{PROVIDES,DEPENDS}FLIST_pkg
-#
-# h) package_do_shlibs - Look at the shared libraries generated and autotmatically add any
-# dependencies found. Also stores the package name so anyone else using this library
-# knows which package to depend on.
-#
-# i) package_do_pkgconfig - Keep track of which packages need and provide which .pc files
-#
-# j) read_shlibdeps - Reads the stored shlibs information into the metadata
-#
-# k) package_depchains - Adds automatic dependencies to -dbg and -dev packages
-#
-# l) emit_pkgdata - saves the packaging data into PKGDATA_DIR for use in later
-# packaging steps
-
-inherit packagedata
-inherit chrpath
-inherit package_pkgdata
-
-# Need the package_qa_handle_error() in insane.bbclass
-inherit insane
-
-PKGD = "${WORKDIR}/package"
-PKGDEST = "${WORKDIR}/packages-split"
-
-LOCALE_SECTION ?= ''
-
-ALL_MULTILIB_PACKAGE_ARCHS = "${@all_multilib_tune_values(d, 'PACKAGE_ARCHS')}"
-
-# rpm is used for the per-file dependency identification
-# dwarfsrcfiles is used to determine the list of debug source files
-PACKAGE_DEPENDS += "rpm-native dwarfsrcfiles-native"
-
-
-# If your postinstall can execute at rootfs creation time rather than on
-# target but depends on a native/cross tool in order to execute, you need to
-# list that tool in PACKAGE_WRITE_DEPS. Target package dependencies belong
-# in the package dependencies as normal, this is just for native/cross support
-# tools at rootfs build time.
-PACKAGE_WRITE_DEPS ??= ""
-
-def legitimize_package_name(s):
- """
- Make sure package names are legitimate strings
- """
- import re
-
- def fixutf(m):
- cp = m.group(1)
- if cp:
- return ('\\u%s' % cp).encode('latin-1').decode('unicode_escape')
-
- # Handle unicode codepoints encoded as <U0123>, as in glibc locale files.
- s = re.sub(r'<U([0-9A-Fa-f]{1,4})>', fixutf, s)
-
- # Remaining package name validity fixes
- return s.lower().replace('_', '-').replace('@', '+').replace(',', '+').replace('/', '-')
-
-def do_split_packages(d, root, file_regex, output_pattern, description, postinst=None, recursive=False, hook=None, extra_depends=None, aux_files_pattern=None, postrm=None, allow_dirs=False, prepend=False, match_path=False, aux_files_pattern_verbatim=None, allow_links=False, summary=None):
- """
- Used in .bb files to split up dynamically generated subpackages of a
- given package, usually plugins or modules.
-
- Arguments:
- root -- the path in which to search
- file_regex -- regular expression to match searched files. Use
- parentheses () to mark the part of this expression
- that should be used to derive the module name (to be
- substituted where %s is used in other function
- arguments as noted below)
- output_pattern -- pattern to use for the package names. Must include %s.
- description -- description to set for each package. Must include %s.
- postinst -- postinstall script to use for all packages (as a
- string)
- recursive -- True to perform a recursive search - default False
- hook -- a hook function to be called for every match. The
- function will be called with the following arguments
- (in the order listed):
- f: full path to the file/directory match
- pkg: the package name
- file_regex: as above
- output_pattern: as above
- modulename: the module name derived using file_regex
- extra_depends -- extra runtime dependencies (RDEPENDS) to be set for
- all packages. The default value of None causes a
- dependency on the main package (${PN}) - if you do
- not want this, pass '' for this parameter.
- aux_files_pattern -- extra item(s) to be added to FILES for each
- package. Can be a single string item or a list of
- strings for multiple items. Must include %s.
- postrm -- postrm script to use for all packages (as a string)
- allow_dirs -- True allow directories to be matched - default False
- prepend -- if True, prepend created packages to PACKAGES instead
- of the default False which appends them
- match_path -- match file_regex on the whole relative path to the
- root rather than just the file name
- aux_files_pattern_verbatim -- extra item(s) to be added to FILES for
- each package, using the actual derived module name
- rather than converting it to something legal for a
- package name. Can be a single string item or a list
- of strings for multiple items. Must include %s.
- allow_links -- True to allow symlinks to be matched - default False
- summary -- Summary to set for each package. Must include %s;
- defaults to description if not set.
-
- """
-
- dvar = d.getVar('PKGD')
- root = d.expand(root)
- output_pattern = d.expand(output_pattern)
- extra_depends = d.expand(extra_depends)
-
- # If the root directory doesn't exist, don't error out later but silently do
- # no splitting.
- if not os.path.exists(dvar + root):
- return []
-
- ml = d.getVar("MLPREFIX")
- if ml:
- if not output_pattern.startswith(ml):
- output_pattern = ml + output_pattern
-
- newdeps = []
- for dep in (extra_depends or "").split():
- if dep.startswith(ml):
- newdeps.append(dep)
- else:
- newdeps.append(ml + dep)
- if newdeps:
- extra_depends = " ".join(newdeps)
-
-
- packages = d.getVar('PACKAGES').split()
- split_packages = set()
-
- if postinst:
- postinst = '#!/bin/sh\n' + postinst + '\n'
- if postrm:
- postrm = '#!/bin/sh\n' + postrm + '\n'
- if not recursive:
- objs = os.listdir(dvar + root)
- else:
- objs = []
- for walkroot, dirs, files in os.walk(dvar + root):
- for file in files:
- relpath = os.path.join(walkroot, file).replace(dvar + root + '/', '', 1)
- if relpath:
- objs.append(relpath)
-
- if extra_depends == None:
- extra_depends = d.getVar("PN")
-
- if not summary:
- summary = description
-
- for o in sorted(objs):
- import re, stat
- if match_path:
- m = re.match(file_regex, o)
- else:
- m = re.match(file_regex, os.path.basename(o))
-
- if not m:
- continue
- f = os.path.join(dvar + root, o)
- mode = os.lstat(f).st_mode
- if not (stat.S_ISREG(mode) or (allow_links and stat.S_ISLNK(mode)) or (allow_dirs and stat.S_ISDIR(mode))):
- continue
- on = legitimize_package_name(m.group(1))
- pkg = output_pattern % on
- split_packages.add(pkg)
- if not pkg in packages:
- if prepend:
- packages = [pkg] + packages
- else:
- packages.append(pkg)
- oldfiles = d.getVar('FILES_' + pkg)
- newfile = os.path.join(root, o)
- # These names will be passed through glob() so if the filename actually
- # contains * or ? (rare, but possible) we need to handle that specially
- newfile = newfile.replace('*', '[*]')
- newfile = newfile.replace('?', '[?]')
- if not oldfiles:
- the_files = [newfile]
- if aux_files_pattern:
- if type(aux_files_pattern) is list:
- for fp in aux_files_pattern:
- the_files.append(fp % on)
- else:
- the_files.append(aux_files_pattern % on)
- if aux_files_pattern_verbatim:
- if type(aux_files_pattern_verbatim) is list:
- for fp in aux_files_pattern_verbatim:
- the_files.append(fp % m.group(1))
- else:
- the_files.append(aux_files_pattern_verbatim % m.group(1))
- d.setVar('FILES_' + pkg, " ".join(the_files))
- else:
- d.setVar('FILES_' + pkg, oldfiles + " " + newfile)
- if extra_depends != '':
- d.appendVar('RDEPENDS_' + pkg, ' ' + extra_depends)
- if not d.getVar('DESCRIPTION_' + pkg):
- d.setVar('DESCRIPTION_' + pkg, description % on)
- if not d.getVar('SUMMARY_' + pkg):
- d.setVar('SUMMARY_' + pkg, summary % on)
- if postinst:
- d.setVar('pkg_postinst_' + pkg, postinst)
- if postrm:
- d.setVar('pkg_postrm_' + pkg, postrm)
- if callable(hook):
- hook(f, pkg, file_regex, output_pattern, m.group(1))
-
- d.setVar('PACKAGES', ' '.join(packages))
- return list(split_packages)
-
-PACKAGE_DEPENDS += "file-native"
-
-python () {
- if d.getVar('PACKAGES') != '':
- deps = ""
- for dep in (d.getVar('PACKAGE_DEPENDS') or "").split():
- deps += " %s:do_populate_sysroot" % dep
- if d.getVar('PACKAGE_MINIDEBUGINFO') == '1':
- deps += ' xz-native:do_populate_sysroot'
- d.appendVarFlag('do_package', 'depends', deps)
-
- # shlibs requires any DEPENDS to have already packaged for the *.list files
- d.appendVarFlag('do_package', 'deptask', " do_packagedata")
-}
-
-# Get a list of files from file vars by searching files under current working directory
-# The list contains symlinks, directories and normal files.
-def files_from_filevars(filevars):
- import os,glob
- cpath = oe.cachedpath.CachedPath()
- files = []
- for f in filevars:
- if os.path.isabs(f):
- f = '.' + f
- if not f.startswith("./"):
- f = './' + f
- globbed = glob.glob(f)
- if globbed:
- if [ f ] != globbed:
- files += globbed
- continue
- files.append(f)
-
- symlink_paths = []
- for ind, f in enumerate(files):
- # Handle directory symlinks. Truncate path to the lowest level symlink
- parent = ''
- for dirname in f.split('/')[:-1]:
- parent = os.path.join(parent, dirname)
- if dirname == '.':
- continue
- if cpath.islink(parent):
- bb.warn("FILES contains file '%s' which resides under a "
- "directory symlink. Please fix the recipe and use the "
- "real path for the file." % f[1:])
- symlink_paths.append(f)
- files[ind] = parent
- f = parent
- break
-
- if not cpath.islink(f):
- if cpath.isdir(f):
- newfiles = [ os.path.join(f,x) for x in os.listdir(f) ]
- if newfiles:
- files += newfiles
-
- return files, symlink_paths
-
-# Called in package_<rpm,ipk,deb>.bbclass to get the correct list of configuration files
-def get_conffiles(pkg, d):
- pkgdest = d.getVar('PKGDEST')
- root = os.path.join(pkgdest, pkg)
- cwd = os.getcwd()
- os.chdir(root)
-
- conffiles = d.getVar('CONFFILES_%s' % pkg);
- if conffiles == None:
- conffiles = d.getVar('CONFFILES')
- if conffiles == None:
- conffiles = ""
- conffiles = conffiles.split()
- conf_orig_list = files_from_filevars(conffiles)[0]
-
- # Remove links and directories from conf_orig_list to get conf_list which only contains normal files
- conf_list = []
- for f in conf_orig_list:
- if os.path.isdir(f):
- continue
- if os.path.islink(f):
- continue
- if not os.path.exists(f):
- continue
- conf_list.append(f)
-
- # Remove the leading './'
- for i in range(0, len(conf_list)):
- conf_list[i] = conf_list[i][1:]
-
- os.chdir(cwd)
- return conf_list
-
-def checkbuildpath(file, d):
- tmpdir = d.getVar('TMPDIR')
- with open(file) as f:
- file_content = f.read()
- if tmpdir in file_content:
- return True
-
- return False
-
-def parse_debugsources_from_dwarfsrcfiles_output(dwarfsrcfiles_output):
- debugfiles = {}
-
- for line in dwarfsrcfiles_output.splitlines():
- if line.startswith("\t"):
- debugfiles[os.path.normpath(line.split()[0])] = ""
-
- return debugfiles.keys()
-
-def source_info(file, d, fatal=True):
- import subprocess
-
- cmd = ["dwarfsrcfiles", file]
- try:
- output = subprocess.check_output(cmd, universal_newlines=True, stderr=subprocess.STDOUT)
- retval = 0
- except subprocess.CalledProcessError as exc:
- output = exc.output
- retval = exc.returncode
-
- # 255 means a specific file wasn't fully parsed to get the debug file list, which is not a fatal failure
- if retval != 0 and retval != 255:
- msg = "dwarfsrcfiles failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else "")
- if fatal:
- bb.fatal(msg)
- bb.note(msg)
-
- debugsources = parse_debugsources_from_dwarfsrcfiles_output(output)
-
- return list(debugsources)
-
-def splitdebuginfo(file, dvar, debugdir, debuglibdir, debugappend, debugsrcdir, d):
- # Function to split a single file into two components, one is the stripped
- # target system binary, the other contains any debugging information. The
- # two files are linked to reference each other.
- #
- # return a mapping of files:debugsources
-
- import stat
- import subprocess
-
- src = file[len(dvar):]
- dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend
- debugfile = dvar + dest
- sources = []
-
- # Split the file...
- bb.utils.mkdirhier(os.path.dirname(debugfile))
- #bb.note("Split %s -> %s" % (file, debugfile))
- # Only store off the hard link reference if we successfully split!
-
- dvar = d.getVar('PKGD')
- objcopy = d.getVar("OBJCOPY")
-
- # We ignore kernel modules, we don't generate debug info files.
- if file.find("/lib/modules/") != -1 and file.endswith(".ko"):
- return (file, sources)
-
- newmode = None
- if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
- origmode = os.stat(file)[stat.ST_MODE]
- newmode = origmode | stat.S_IWRITE | stat.S_IREAD
- os.chmod(file, newmode)
-
- # We need to extract the debug src information here...
- if debugsrcdir:
- sources = source_info(file, d)
-
- bb.utils.mkdirhier(os.path.dirname(debugfile))
-
- subprocess.check_output([objcopy, '--only-keep-debug', file, debugfile], stderr=subprocess.STDOUT)
-
- # Set the debuglink to have the view of the file path on the target
- subprocess.check_output([objcopy, '--add-gnu-debuglink', debugfile, file], stderr=subprocess.STDOUT)
-
- if newmode:
- os.chmod(file, origmode)
-
- return (file, sources)
-
-def splitstaticdebuginfo(file, dvar, debugstaticdir, debugstaticlibdir, debugstaticappend, debugsrcdir, d):
- # Unlike the function above, there is no way to split a static library
- # two components. So to get similar results we will copy the unmodified
- # static library (containing the debug symbols) into a new directory.
- # We will then strip (preserving symbols) the static library in the
- # typical location.
- #
- # return a mapping of files:debugsources
-
- import stat
- import shutil
-
- src = file[len(dvar):]
- dest = debugstaticlibdir + os.path.dirname(src) + debugstaticdir + "/" + os.path.basename(src) + debugstaticappend
- debugfile = dvar + dest
- sources = []
-
- # Copy the file...
- bb.utils.mkdirhier(os.path.dirname(debugfile))
- #bb.note("Copy %s -> %s" % (file, debugfile))
-
- dvar = d.getVar('PKGD')
-
- newmode = None
- if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
- origmode = os.stat(file)[stat.ST_MODE]
- newmode = origmode | stat.S_IWRITE | stat.S_IREAD
- os.chmod(file, newmode)
-
- # We need to extract the debug src information here...
- if debugsrcdir:
- sources = source_info(file, d)
-
- bb.utils.mkdirhier(os.path.dirname(debugfile))
-
- # Copy the unmodified item to the debug directory
- shutil.copy2(file, debugfile)
-
- if newmode:
- os.chmod(file, origmode)
-
- return (file, sources)
-
-def inject_minidebuginfo(file, dvar, debugdir, debuglibdir, debugappend, debugsrcdir, d):
- # Extract just the symbols from debuginfo into minidebuginfo,
- # compress it with xz and inject it back into the binary in a .gnu_debugdata section.
- # https://sourceware.org/gdb/onlinedocs/gdb/MiniDebugInfo.html
-
- import subprocess
-
- readelf = d.getVar('READELF')
- nm = d.getVar('NM')
- objcopy = d.getVar('OBJCOPY')
-
- minidebuginfodir = d.expand('${WORKDIR}/minidebuginfo')
-
- src = file[len(dvar):]
- dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend
- debugfile = dvar + dest
- minidebugfile = minidebuginfodir + src + '.minidebug'
- bb.utils.mkdirhier(os.path.dirname(minidebugfile))
-
- # If we didn't produce debuginfo for any reason, we can't produce minidebuginfo either
- # so skip it.
- if not os.path.exists(debugfile):
- bb.debug(1, 'ELF file {} has no debuginfo, skipping minidebuginfo injection'.format(file))
- return
-
- # Find non-allocated PROGBITS, NOTE, and NOBITS sections in the debuginfo.
- # We will exclude all of these from minidebuginfo to save space.
- remove_section_names = []
- for line in subprocess.check_output([readelf, '-W', '-S', debugfile], universal_newlines=True).splitlines():
- fields = line.split()
- if len(fields) < 8:
- continue
- name = fields[0]
- type = fields[1]
- flags = fields[7]
- # .debug_ sections will be removed by objcopy -S so no need to explicitly remove them
- if name.startswith('.debug_'):
- continue
- if 'A' not in flags and type in ['PROGBITS', 'NOTE', 'NOBITS']:
- remove_section_names.append(name)
-
- # List dynamic symbols in the binary. We can exclude these from minidebuginfo
- # because they are always present in the binary.
- dynsyms = set()
- for line in subprocess.check_output([nm, '-D', file, '--format=posix', '--defined-only'], universal_newlines=True).splitlines():
- dynsyms.add(line.split()[0])
-
- # Find all function symbols from debuginfo which aren't in the dynamic symbols table.
- # These are the ones we want to keep in minidebuginfo.
- keep_symbols_file = minidebugfile + '.symlist'
- found_any_symbols = False
- with open(keep_symbols_file, 'w') as f:
- for line in subprocess.check_output([nm, debugfile, '--format=sysv', '--defined-only'], universal_newlines=True).splitlines():
- fields = line.split('|')
- if len(fields) < 7:
- continue
- name = fields[0].strip()
- type = fields[3].strip()
- if type == 'FUNC' and name not in dynsyms:
- f.write('{}\n'.format(name))
- found_any_symbols = True
-
- if not found_any_symbols:
- bb.debug(1, 'ELF file {} contains no symbols, skipping minidebuginfo injection'.format(file))
- return
-
- bb.utils.remove(minidebugfile)
- bb.utils.remove(minidebugfile + '.xz')
-
- subprocess.check_call([objcopy, '-S'] +
- ['--remove-section={}'.format(s) for s in remove_section_names] +
- ['--keep-symbols={}'.format(keep_symbols_file), debugfile, minidebugfile])
-
- subprocess.check_call(['xz', '--keep', minidebugfile])
-
- subprocess.check_call([objcopy, '--add-section', '.gnu_debugdata={}.xz'.format(minidebugfile), file])
-
-def copydebugsources(debugsrcdir, sources, d):
- # The debug src information written out to sourcefile is further processed
- # and copied to the destination here.
-
- import stat
- import subprocess
-
- if debugsrcdir and sources:
- sourcefile = d.expand("${WORKDIR}/debugsources.list")
- bb.utils.remove(sourcefile)
-
- # filenames are null-separated - this is an artefact of the previous use
- # of rpm's debugedit, which was writing them out that way, and the code elsewhere
- # is still assuming that.
- debuglistoutput = '\0'.join(sources) + '\0'
- with open(sourcefile, 'a') as sf:
- sf.write(debuglistoutput)
-
- dvar = d.getVar('PKGD')
- strip = d.getVar("STRIP")
- objcopy = d.getVar("OBJCOPY")
- workdir = d.getVar("WORKDIR")
- workparentdir = os.path.dirname(os.path.dirname(workdir))
- workbasedir = os.path.basename(os.path.dirname(workdir)) + "/" + os.path.basename(workdir)
-
- # If build path exists in sourcefile, it means toolchain did not use
- # -fdebug-prefix-map to compile
- if checkbuildpath(sourcefile, d):
- localsrc_prefix = workparentdir + "/"
- else:
- localsrc_prefix = "/usr/src/debug/"
-
- nosuchdir = []
- basepath = dvar
- for p in debugsrcdir.split("/"):
- basepath = basepath + "/" + p
- if not cpath.exists(basepath):
- nosuchdir.append(basepath)
- bb.utils.mkdirhier(basepath)
- cpath.updatecache(basepath)
-
- # Ignore files from the recipe sysroots (target and native)
- processdebugsrc = "LC_ALL=C ; sort -z -u '%s' | egrep -v -z '((<internal>|<built-in>)$|/.*recipe-sysroot.*/)' | "
- # We need to ignore files that are not actually ours
- # we do this by only paying attention to items from this package
- processdebugsrc += "fgrep -zw '%s' | "
- # Remove prefix in the source paths
- processdebugsrc += "sed 's#%s##g' | "
- processdebugsrc += "(cd '%s' ; cpio -pd0mlL --no-preserve-owner '%s%s' 2>/dev/null)"
-
- cmd = processdebugsrc % (sourcefile, workbasedir, localsrc_prefix, workparentdir, dvar, debugsrcdir)
- try:
- subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError:
- # Can "fail" if internal headers/transient sources are attempted
- pass
-
- # cpio seems to have a bug with -lL together and symbolic links are just copied, not dereferenced.
- # Work around this by manually finding and copying any symbolic links that made it through.
- cmd = "find %s%s -type l -print0 -delete | sed s#%s%s/##g | (cd '%s' ; cpio -pd0mL --no-preserve-owner '%s%s')" % \
- (dvar, debugsrcdir, dvar, debugsrcdir, workparentdir, dvar, debugsrcdir)
- subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
-
- # The copy by cpio may have resulted in some empty directories! Remove these
- cmd = "find %s%s -empty -type d -delete" % (dvar, debugsrcdir)
- subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
-
- # Also remove debugsrcdir if its empty
- for p in nosuchdir[::-1]:
- if os.path.exists(p) and not os.listdir(p):
- os.rmdir(p)
-
-#
-# Package data handling routines
-#
-
-def get_package_mapping (pkg, basepkg, d, depversions=None):
- import oe.packagedata
-
- data = oe.packagedata.read_subpkgdata(pkg, d)
- key = "PKG_%s" % pkg
-
- if key in data:
- # Have to avoid undoing the write_extra_pkgs(global_variants...)
- if bb.data.inherits_class('allarch', d) and not d.getVar('MULTILIB_VARIANTS') \
- and data[key] == basepkg:
- return pkg
- if depversions == []:
- # Avoid returning a mapping if the renamed package rprovides its original name
- rprovkey = "RPROVIDES_%s" % pkg
- if rprovkey in data:
- if pkg in bb.utils.explode_dep_versions2(data[rprovkey]):
- bb.note("%s rprovides %s, not replacing the latter" % (data[key], pkg))
- return pkg
- # Do map to rewritten package name
- return data[key]
-
- return pkg
-
-def get_package_additional_metadata (pkg_type, d):
- base_key = "PACKAGE_ADD_METADATA"
- for key in ("%s_%s" % (base_key, pkg_type.upper()), base_key):
- if d.getVar(key, False) is None:
- continue
- d.setVarFlag(key, "type", "list")
- if d.getVarFlag(key, "separator") is None:
- d.setVarFlag(key, "separator", "\\n")
- metadata_fields = [field.strip() for field in oe.data.typed_value(key, d)]
- return "\n".join(metadata_fields).strip()
-
-def runtime_mapping_rename (varname, pkg, d):
- #bb.note("%s before: %s" % (varname, d.getVar(varname)))
-
- new_depends = {}
- deps = bb.utils.explode_dep_versions2(d.getVar(varname) or "")
- for depend, depversions in deps.items():
- new_depend = get_package_mapping(depend, pkg, d, depversions)
- if depend != new_depend:
- bb.note("package name mapping done: %s -> %s" % (depend, new_depend))
- new_depends[new_depend] = deps[depend]
-
- d.setVar(varname, bb.utils.join_deps(new_depends, commasep=False))
-
- #bb.note("%s after: %s" % (varname, d.getVar(varname)))
-
-#
-# Package functions suitable for inclusion in PACKAGEFUNCS
-#
-
-python package_get_auto_pr() {
- import oe.prservice
- import re
-
- # Support per recipe PRSERV_HOST
- pn = d.getVar('PN')
- host = d.getVar("PRSERV_HOST_" + pn)
- if not (host is None):
- d.setVar("PRSERV_HOST", host)
-
- pkgv = d.getVar("PKGV")
-
- # PR Server not active, handle AUTOINC
- if not d.getVar('PRSERV_HOST'):
- if 'AUTOINC' in pkgv:
- d.setVar("PKGV", pkgv.replace("AUTOINC", "0"))
- return
-
- auto_pr = None
- pv = d.getVar("PV")
- version = d.getVar("PRAUTOINX")
- pkgarch = d.getVar("PACKAGE_ARCH")
- checksum = d.getVar("BB_TASKHASH")
-
- if d.getVar('PRSERV_LOCKDOWN'):
- auto_pr = d.getVar('PRAUTO_' + version + '_' + pkgarch) or d.getVar('PRAUTO_' + version) or None
- if auto_pr is None:
- bb.fatal("Can NOT get PRAUTO from lockdown exported file")
- d.setVar('PRAUTO',str(auto_pr))
- return
-
- try:
- conn = d.getVar("__PRSERV_CONN")
- if conn is None:
- conn = oe.prservice.prserv_make_conn(d)
- if conn is not None:
- if "AUTOINC" in pkgv:
- srcpv = bb.fetch2.get_srcrev(d)
- base_ver = "AUTOINC-%s" % version[:version.find(srcpv)]
- value = conn.getPR(base_ver, pkgarch, srcpv)
- d.setVar("PKGV", pkgv.replace("AUTOINC", str(value)))
-
- auto_pr = conn.getPR(version, pkgarch, checksum)
- except Exception as e:
- bb.fatal("Can NOT get PRAUTO, exception %s" % str(e))
- if auto_pr is None:
- bb.fatal("Can NOT get PRAUTO from remote PR service")
- d.setVar('PRAUTO',str(auto_pr))
-}
-
-LOCALEBASEPN ??= "${PN}"
-
-python package_do_split_locales() {
- if (d.getVar('PACKAGE_NO_LOCALE') == '1'):
- bb.debug(1, "package requested not splitting locales")
- return
-
- packages = (d.getVar('PACKAGES') or "").split()
-
- datadir = d.getVar('datadir')
- if not datadir:
- bb.note("datadir not defined")
- return
-
- dvar = d.getVar('PKGD')
- pn = d.getVar('LOCALEBASEPN')
-
- if pn + '-locale' in packages:
- packages.remove(pn + '-locale')
-
- localedir = os.path.join(dvar + datadir, 'locale')
-
- if not cpath.isdir(localedir):
- bb.debug(1, "No locale files in this package")
- return
-
- locales = os.listdir(localedir)
-
- summary = d.getVar('SUMMARY') or pn
- description = d.getVar('DESCRIPTION') or ""
- locale_section = d.getVar('LOCALE_SECTION')
- mlprefix = d.getVar('MLPREFIX') or ""
- for l in sorted(locales):
- ln = legitimize_package_name(l)
- pkg = pn + '-locale-' + ln
- packages.append(pkg)
- d.setVar('FILES_' + pkg, os.path.join(datadir, 'locale', l))
- d.setVar('RRECOMMENDS_' + pkg, '%svirtual-locale-%s' % (mlprefix, ln))
- d.setVar('RPROVIDES_' + pkg, '%s-locale %s%s-translation' % (pn, mlprefix, ln))
- d.setVar('SUMMARY_' + pkg, '%s - %s translations' % (summary, l))
- d.setVar('DESCRIPTION_' + pkg, '%s This package contains language translation files for the %s locale.' % (description, l))
- if locale_section:
- d.setVar('SECTION_' + pkg, locale_section)
-
- d.setVar('PACKAGES', ' '.join(packages))
-
- # Disabled by RP 18/06/07
- # Wildcards aren't supported in debian
- # They break with ipkg since glibc-locale* will mean that
- # glibc-localedata-translit* won't install as a dependency
- # for some other package which breaks meta-toolchain
- # Probably breaks since virtual-locale- isn't provided anywhere
- #rdep = (d.getVar('RDEPENDS_%s' % pn) or "").split()
- #rdep.append('%s-locale*' % pn)
- #d.setVar('RDEPENDS_%s' % pn, ' '.join(rdep))
-}
-
-python perform_packagecopy () {
- import subprocess
-
- dest = d.getVar('D')
- dvar = d.getVar('PKGD')
-
- # Start by package population by taking a copy of the installed
- # files to operate on
- # Preserve sparse files and hard links
- cmd = 'tar -cf - -C %s -p -S . | tar -xf - -C %s' % (dest, dvar)
- subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
-
- # replace RPATHs for the nativesdk binaries, to make them relocatable
- if bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('cross-canadian', d):
- rpath_replace (dvar, d)
-}
-perform_packagecopy[cleandirs] = "${PKGD}"
-perform_packagecopy[dirs] = "${PKGD}"
-
-# We generate a master list of directories to process, we start by
-# seeding this list with reasonable defaults, then load from
-# the fs-perms.txt files
-python fixup_perms () {
- import pwd, grp
-
- # init using a string with the same format as a line as documented in
- # the fs-perms.txt file
- # <path> <mode> <uid> <gid> <walk> <fmode> <fuid> <fgid>
- # <path> link <link target>
- #
- # __str__ can be used to print out an entry in the input format
- #
- # if fs_perms_entry.path is None:
- # an error occurred
- # if fs_perms_entry.link, you can retrieve:
- # fs_perms_entry.path = path
- # fs_perms_entry.link = target of link
- # if not fs_perms_entry.link, you can retrieve:
- # fs_perms_entry.path = path
- # fs_perms_entry.mode = expected dir mode or None
- # fs_perms_entry.uid = expected uid or -1
- # fs_perms_entry.gid = expected gid or -1
- # fs_perms_entry.walk = 'true' or something else
- # fs_perms_entry.fmode = expected file mode or None
- # fs_perms_entry.fuid = expected file uid or -1
- # fs_perms_entry_fgid = expected file gid or -1
- class fs_perms_entry():
- def __init__(self, line):
- lsplit = line.split()
- if len(lsplit) == 3 and lsplit[1].lower() == "link":
- self._setlink(lsplit[0], lsplit[2])
- elif len(lsplit) == 8:
- self._setdir(lsplit[0], lsplit[1], lsplit[2], lsplit[3], lsplit[4], lsplit[5], lsplit[6], lsplit[7])
- else:
- msg = "Fixup Perms: invalid config line %s" % line
- package_qa_handle_error("perm-config", msg, d)
- self.path = None
- self.link = None
-
- def _setdir(self, path, mode, uid, gid, walk, fmode, fuid, fgid):
- self.path = os.path.normpath(path)
- self.link = None
- self.mode = self._procmode(mode)
- self.uid = self._procuid(uid)
- self.gid = self._procgid(gid)
- self.walk = walk.lower()
- self.fmode = self._procmode(fmode)
- self.fuid = self._procuid(fuid)
- self.fgid = self._procgid(fgid)
-
- def _setlink(self, path, link):
- self.path = os.path.normpath(path)
- self.link = link
-
- def _procmode(self, mode):
- if not mode or (mode and mode == "-"):
- return None
- else:
- return int(mode,8)
-
- # Note uid/gid -1 has special significance in os.lchown
- def _procuid(self, uid):
- if uid is None or uid == "-":
- return -1
- elif uid.isdigit():
- return int(uid)
- else:
- return pwd.getpwnam(uid).pw_uid
-
- def _procgid(self, gid):
- if gid is None or gid == "-":
- return -1
- elif gid.isdigit():
- return int(gid)
- else:
- return grp.getgrnam(gid).gr_gid
-
- # Use for debugging the entries
- def __str__(self):
- if self.link:
- return "%s link %s" % (self.path, self.link)
- else:
- mode = "-"
- if self.mode:
- mode = "0%o" % self.mode
- fmode = "-"
- if self.fmode:
- fmode = "0%o" % self.fmode
- uid = self._mapugid(self.uid)
- gid = self._mapugid(self.gid)
- fuid = self._mapugid(self.fuid)
- fgid = self._mapugid(self.fgid)
- return "%s %s %s %s %s %s %s %s" % (self.path, mode, uid, gid, self.walk, fmode, fuid, fgid)
-
- def _mapugid(self, id):
- if id is None or id == -1:
- return "-"
- else:
- return "%d" % id
-
- # Fix the permission, owner and group of path
- def fix_perms(path, mode, uid, gid, dir):
- if mode and not os.path.islink(path):
- #bb.note("Fixup Perms: chmod 0%o %s" % (mode, dir))
- os.chmod(path, mode)
- # -1 is a special value that means don't change the uid/gid
- # if they are BOTH -1, don't bother to lchown
- if not (uid == -1 and gid == -1):
- #bb.note("Fixup Perms: lchown %d:%d %s" % (uid, gid, dir))
- os.lchown(path, uid, gid)
-
- # Return a list of configuration files based on either the default
- # files/fs-perms.txt or the contents of FILESYSTEM_PERMS_TABLES
- # paths are resolved via BBPATH
- def get_fs_perms_list(d):
- str = ""
- bbpath = d.getVar('BBPATH')
- fs_perms_tables = d.getVar('FILESYSTEM_PERMS_TABLES') or ""
- for conf_file in fs_perms_tables.split():
- confpath = bb.utils.which(bbpath, conf_file)
- if confpath:
- str += " %s" % bb.utils.which(bbpath, conf_file)
- else:
- bb.warn("cannot find %s specified in FILESYSTEM_PERMS_TABLES" % conf_file)
- return str
-
-
-
- dvar = d.getVar('PKGD')
-
- fs_perms_table = {}
- fs_link_table = {}
-
- # By default all of the standard directories specified in
- # bitbake.conf will get 0755 root:root.
- target_path_vars = [ 'base_prefix',
- 'prefix',
- 'exec_prefix',
- 'base_bindir',
- 'base_sbindir',
- 'base_libdir',
- 'datadir',
- 'sysconfdir',
- 'servicedir',
- 'sharedstatedir',
- 'localstatedir',
- 'infodir',
- 'mandir',
- 'docdir',
- 'bindir',
- 'sbindir',
- 'libexecdir',
- 'libdir',
- 'includedir',
- 'oldincludedir' ]
-
- for path in target_path_vars:
- dir = d.getVar(path) or ""
- if dir == "":
- continue
- fs_perms_table[dir] = fs_perms_entry(d.expand("%s 0755 root root false - - -" % (dir)))
-
- # Now we actually load from the configuration files
- for conf in get_fs_perms_list(d).split():
- if not os.path.exists(conf):
- continue
- with open(conf) as f:
- for line in f:
- if line.startswith('#'):
- continue
- lsplit = line.split()
- if len(lsplit) == 0:
- continue
- if len(lsplit) != 8 and not (len(lsplit) == 3 and lsplit[1].lower() == "link"):
- msg = "Fixup perms: %s invalid line: %s" % (conf, line)
- package_qa_handle_error("perm-line", msg, d)
- continue
- entry = fs_perms_entry(d.expand(line))
- if entry and entry.path:
- if entry.link:
- fs_link_table[entry.path] = entry
- if entry.path in fs_perms_table:
- fs_perms_table.pop(entry.path)
- else:
- fs_perms_table[entry.path] = entry
- if entry.path in fs_link_table:
- fs_link_table.pop(entry.path)
-
- # Debug -- list out in-memory table
- #for dir in fs_perms_table:
- # bb.note("Fixup Perms: %s: %s" % (dir, str(fs_perms_table[dir])))
- #for link in fs_link_table:
- # bb.note("Fixup Perms: %s: %s" % (link, str(fs_link_table[link])))
-
- # We process links first, so we can go back and fixup directory ownership
- # for any newly created directories
- # Process in sorted order so /run gets created before /run/lock, etc.
- for entry in sorted(fs_link_table.values(), key=lambda x: x.link):
- link = entry.link
- dir = entry.path
- origin = dvar + dir
- if not (cpath.exists(origin) and cpath.isdir(origin) and not cpath.islink(origin)):
- continue
-
- if link[0] == "/":
- target = dvar + link
- ptarget = link
- else:
- target = os.path.join(os.path.dirname(origin), link)
- ptarget = os.path.join(os.path.dirname(dir), link)
- if os.path.exists(target):
- msg = "Fixup Perms: Unable to correct directory link, target already exists: %s -> %s" % (dir, ptarget)
- package_qa_handle_error("perm-link", msg, d)
- continue
-
- # Create path to move directory to, move it, and then setup the symlink
- bb.utils.mkdirhier(os.path.dirname(target))
- #bb.note("Fixup Perms: Rename %s -> %s" % (dir, ptarget))
- os.rename(origin, target)
- #bb.note("Fixup Perms: Link %s -> %s" % (dir, link))
- os.symlink(link, origin)
-
- for dir in fs_perms_table:
- origin = dvar + dir
- if not (cpath.exists(origin) and cpath.isdir(origin)):
- continue
-
- fix_perms(origin, fs_perms_table[dir].mode, fs_perms_table[dir].uid, fs_perms_table[dir].gid, dir)
-
- if fs_perms_table[dir].walk == 'true':
- for root, dirs, files in os.walk(origin):
- for dr in dirs:
- each_dir = os.path.join(root, dr)
- fix_perms(each_dir, fs_perms_table[dir].mode, fs_perms_table[dir].uid, fs_perms_table[dir].gid, dir)
- for f in files:
- each_file = os.path.join(root, f)
- fix_perms(each_file, fs_perms_table[dir].fmode, fs_perms_table[dir].fuid, fs_perms_table[dir].fgid, dir)
-}
-
-python split_and_strip_files () {
- import stat, errno
- import subprocess
-
- dvar = d.getVar('PKGD')
- pn = d.getVar('PN')
- targetos = d.getVar('TARGET_OS')
-
- oldcwd = os.getcwd()
- os.chdir(dvar)
-
- # We default to '.debug' style
- if d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-file-directory':
- # Single debug-file-directory style debug info
- debugappend = ".debug"
- debugstaticappend = ""
- debugdir = ""
- debugstaticdir = ""
- debuglibdir = "/usr/lib/debug"
- debugstaticlibdir = "/usr/lib/debug-static"
- debugsrcdir = "/usr/src/debug"
- elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-without-src':
- # Original OE-core, a.k.a. ".debug", style debug info, but without sources in /usr/src/debug
- debugappend = ""
- debugstaticappend = ""
- debugdir = "/.debug"
- debugstaticdir = "/.debug-static"
- debuglibdir = ""
- debugstaticlibdir = ""
- debugsrcdir = ""
- elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg':
- debugappend = ""
- debugstaticappend = ""
- debugdir = "/.debug"
- debugstaticdir = "/.debug-static"
- debuglibdir = ""
- debugstaticlibdir = ""
- debugsrcdir = "/usr/src/debug"
- else:
- # Original OE-core, a.k.a. ".debug", style debug info
- debugappend = ""
- debugstaticappend = ""
- debugdir = "/.debug"
- debugstaticdir = "/.debug-static"
- debuglibdir = ""
- debugstaticlibdir = ""
- debugsrcdir = "/usr/src/debug"
-
- #
- # First lets figure out all of the files we may have to process ... do this only once!
- #
- elffiles = {}
- symlinks = {}
- kernmods = []
- staticlibs = []
- inodes = {}
- libdir = os.path.abspath(dvar + os.sep + d.getVar("libdir"))
- baselibdir = os.path.abspath(dvar + os.sep + d.getVar("base_libdir"))
- skipfiles = (d.getVar("INHIBIT_PACKAGE_STRIP_FILES") or "").split()
- if (d.getVar('INHIBIT_PACKAGE_STRIP') != '1' or \
- d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT') != '1'):
- checkelf = {}
- checkelflinks = {}
- for root, dirs, files in cpath.walk(dvar):
- for f in files:
- file = os.path.join(root, f)
-
- # Skip debug files
- if debugappend and file.endswith(debugappend):
- continue
- if debugdir and debugdir in os.path.dirname(file[len(dvar):]):
- continue
-
- if file in skipfiles:
- continue
-
- if file.endswith(".ko") and file.find("/lib/modules/") != -1:
- kernmods.append(file)
- continue
- if oe.package.is_static_lib(file):
- staticlibs.append(file)
- continue
-
- try:
- ltarget = cpath.realpath(file, dvar, False)
- s = cpath.lstat(ltarget)
- except OSError as e:
- (err, strerror) = e.args
- if err != errno.ENOENT:
- raise
- # Skip broken symlinks
- continue
- if not s:
- continue
- # Check its an executable
- if (s[stat.ST_MODE] & stat.S_IXUSR) or (s[stat.ST_MODE] & stat.S_IXGRP) or (s[stat.ST_MODE] & stat.S_IXOTH) \
- or ((file.startswith(libdir) or file.startswith(baselibdir)) and (".so" in f or ".node" in f)):
-
- if cpath.islink(file):
- checkelflinks[file] = ltarget
- continue
- # Use a reference of device ID and inode number to identify files
- file_reference = "%d_%d" % (s.st_dev, s.st_ino)
- checkelf[file] = (file, file_reference)
-
- results = oe.utils.multiprocess_launch(oe.package.is_elf, checkelflinks.values(), d)
- results_map = {}
- for (ltarget, elf_file) in results:
- results_map[ltarget] = elf_file
- for file in checkelflinks:
- ltarget = checkelflinks[file]
- # If it's a symlink, and points to an ELF file, we capture the readlink target
- if results_map[ltarget]:
- target = os.readlink(file)
- #bb.note("Sym: %s (%d)" % (ltarget, results_map[ltarget]))
- symlinks[file] = target
-
- results = oe.utils.multiprocess_launch(oe.package.is_elf, checkelf.keys(), d)
-
- # Sort results by file path. This ensures that the files are always
- # processed in the same order, which is important to make sure builds
- # are reproducible when dealing with hardlinks
- results.sort(key=lambda x: x[0])
-
- for (file, elf_file) in results:
- # It's a file (or hardlink), not a link
- # ...but is it ELF, and is it already stripped?
- if elf_file & 1:
- if elf_file & 2:
- if 'already-stripped' in (d.getVar('INSANE_SKIP_' + pn) or "").split():
- bb.note("Skipping file %s from %s for already-stripped QA test" % (file[len(dvar):], pn))
- else:
- msg = "File '%s' from %s was already stripped, this will prevent future debugging!" % (file[len(dvar):], pn)
- package_qa_handle_error("already-stripped", msg, d)
- continue
-
- # At this point we have an unstripped elf file. We need to:
- # a) Make sure any file we strip is not hardlinked to anything else outside this tree
- # b) Only strip any hardlinked file once (no races)
- # c) Track any hardlinks between files so that we can reconstruct matching debug file hardlinks
-
- # Use a reference of device ID and inode number to identify files
- file_reference = checkelf[file][1]
- if file_reference in inodes:
- os.unlink(file)
- os.link(inodes[file_reference][0], file)
- inodes[file_reference].append(file)
- else:
- inodes[file_reference] = [file]
- # break hardlink
- bb.utils.break_hardlinks(file)
- elffiles[file] = elf_file
- # Modified the file so clear the cache
- cpath.updatecache(file)
-
- #
- # First lets process debug splitting
- #
- if (d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT') != '1'):
- results = oe.utils.multiprocess_launch(splitdebuginfo, list(elffiles), d, extraargs=(dvar, debugdir, debuglibdir, debugappend, debugsrcdir, d))
-
- if debugsrcdir and not targetos.startswith("mingw"):
- if (d.getVar('PACKAGE_DEBUG_STATIC_SPLIT') == '1'):
- results = oe.utils.multiprocess_launch(splitstaticdebuginfo, staticlibs, d, extraargs=(dvar, debugstaticdir, debugstaticlibdir, debugstaticappend, debugsrcdir, d))
- else:
- for file in staticlibs:
- results.append( (file,source_info(file, d)) )
-
- sources = set()
- for r in results:
- sources.update(r[1])
-
- # Hardlink our debug symbols to the other hardlink copies
- for ref in inodes:
- if len(inodes[ref]) == 1:
- continue
-
- target = inodes[ref][0][len(dvar):]
- for file in inodes[ref][1:]:
- src = file[len(dvar):]
- dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(target) + debugappend
- fpath = dvar + dest
- ftarget = dvar + debuglibdir + os.path.dirname(target) + debugdir + "/" + os.path.basename(target) + debugappend
- bb.utils.mkdirhier(os.path.dirname(fpath))
- # Only one hardlink of separated debug info file in each directory
- if not os.access(fpath, os.R_OK):
- #bb.note("Link %s -> %s" % (fpath, ftarget))
- os.link(ftarget, fpath)
-
- # Create symlinks for all cases we were able to split symbols
- for file in symlinks:
- src = file[len(dvar):]
- dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend
- fpath = dvar + dest
- # Skip it if the target doesn't exist
- try:
- s = os.stat(fpath)
- except OSError as e:
- (err, strerror) = e.args
- if err != errno.ENOENT:
- raise
- continue
-
- ltarget = symlinks[file]
- lpath = os.path.dirname(ltarget)
- lbase = os.path.basename(ltarget)
- ftarget = ""
- if lpath and lpath != ".":
- ftarget += lpath + debugdir + "/"
- ftarget += lbase + debugappend
- if lpath.startswith(".."):
- ftarget = os.path.join("..", ftarget)
- bb.utils.mkdirhier(os.path.dirname(fpath))
- #bb.note("Symlink %s -> %s" % (fpath, ftarget))
- os.symlink(ftarget, fpath)
-
- # Process the debugsrcdir if requested...
- # This copies and places the referenced sources for later debugging...
- copydebugsources(debugsrcdir, sources, d)
- #
- # End of debug splitting
- #
-
- #
- # Now lets go back over things and strip them
- #
- if (d.getVar('INHIBIT_PACKAGE_STRIP') != '1'):
- strip = d.getVar("STRIP")
- sfiles = []
- for file in elffiles:
- elf_file = int(elffiles[file])
- #bb.note("Strip %s" % file)
- sfiles.append((file, elf_file, strip))
- for f in kernmods:
- sfiles.append((f, 16, strip))
- if (d.getVar('PACKAGE_STRIP_STATIC') == '1' or d.getVar('PACKAGE_DEBUG_STATIC_SPLIT') == '1'):
- for f in staticlibs:
- sfiles.append((f, 16, strip))
-
- oe.utils.multiprocess_launch(oe.package.runstrip, sfiles, d)
-
- # Build "minidebuginfo" and reinject it back into the stripped binaries
- if d.getVar('PACKAGE_MINIDEBUGINFO') == '1':
- oe.utils.multiprocess_launch(inject_minidebuginfo, list(elffiles), d,
- extraargs=(dvar, debugdir, debuglibdir, debugappend, debugsrcdir, d))
-
- #
- # End of strip
- #
- os.chdir(oldcwd)
-}
-
-python populate_packages () {
- import glob, re
-
- workdir = d.getVar('WORKDIR')
- outdir = d.getVar('DEPLOY_DIR')
- dvar = d.getVar('PKGD')
- packages = d.getVar('PACKAGES').split()
- pn = d.getVar('PN')
-
- bb.utils.mkdirhier(outdir)
- os.chdir(dvar)
-
- autodebug = not (d.getVar("NOAUTOPACKAGEDEBUG") or False)
-
- split_source_package = (d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg')
-
- # If debug-with-srcpkg mode is enabled then add the source package if it
- # doesn't exist and add the source file contents to the source package.
- if split_source_package:
- src_package_name = ('%s-src' % d.getVar('PN'))
- if not src_package_name in packages:
- packages.append(src_package_name)
- d.setVar('FILES_%s' % src_package_name, '/usr/src/debug')
-
- # Sanity check PACKAGES for duplicates
- # Sanity should be moved to sanity.bbclass once we have the infrastructure
- package_dict = {}
-
- for i, pkg in enumerate(packages):
- if pkg in package_dict:
- msg = "%s is listed in PACKAGES multiple times, this leads to packaging errors." % pkg
- package_qa_handle_error("packages-list", msg, d)
- # Ensure the source package gets the chance to pick up the source files
- # before the debug package by ordering it first in PACKAGES. Whether it
- # actually picks up any source files is controlled by
- # PACKAGE_DEBUG_SPLIT_STYLE.
- elif pkg.endswith("-src"):
- package_dict[pkg] = (10, i)
- elif autodebug and pkg.endswith("-dbg"):
- package_dict[pkg] = (30, i)
- else:
- package_dict[pkg] = (50, i)
- packages = sorted(package_dict.keys(), key=package_dict.get)
- d.setVar('PACKAGES', ' '.join(packages))
- pkgdest = d.getVar('PKGDEST')
-
- seen = []
-
- # os.mkdir masks the permissions with umask so we have to unset it first
- oldumask = os.umask(0)
-
- debug = []
- for root, dirs, files in cpath.walk(dvar):
- dir = root[len(dvar):]
- if not dir:
- dir = os.sep
- for f in (files + dirs):
- path = "." + os.path.join(dir, f)
- if "/.debug/" in path or "/.debug-static/" in path or path.endswith("/.debug"):
- debug.append(path)
-
- for pkg in packages:
- root = os.path.join(pkgdest, pkg)
- bb.utils.mkdirhier(root)
-
- filesvar = d.getVar('FILES_%s' % pkg) or ""
- if "//" in filesvar:
- msg = "FILES variable for package %s contains '//' which is invalid. Attempting to fix this but you should correct the metadata.\n" % pkg
- package_qa_handle_error("files-invalid", msg, d)
- filesvar.replace("//", "/")
-
- origfiles = filesvar.split()
- files, symlink_paths = files_from_filevars(origfiles)
-
- if autodebug and pkg.endswith("-dbg"):
- files.extend(debug)
-
- for file in files:
- if (not cpath.islink(file)) and (not cpath.exists(file)):
- continue
- if file in seen:
- continue
- seen.append(file)
-
- def mkdir(src, dest, p):
- src = os.path.join(src, p)
- dest = os.path.join(dest, p)
- fstat = cpath.stat(src)
- os.mkdir(dest)
- os.chmod(dest, fstat.st_mode)
- os.chown(dest, fstat.st_uid, fstat.st_gid)
- if p not in seen:
- seen.append(p)
- cpath.updatecache(dest)
-
- def mkdir_recurse(src, dest, paths):
- if cpath.exists(dest + '/' + paths):
- return
- while paths.startswith("./"):
- paths = paths[2:]
- p = "."
- for c in paths.split("/"):
- p = os.path.join(p, c)
- if not cpath.exists(os.path.join(dest, p)):
- mkdir(src, dest, p)
-
- if cpath.isdir(file) and not cpath.islink(file):
- mkdir_recurse(dvar, root, file)
- continue
-
- mkdir_recurse(dvar, root, os.path.dirname(file))
- fpath = os.path.join(root,file)
- if not cpath.islink(file):
- os.link(file, fpath)
- continue
- ret = bb.utils.copyfile(file, fpath)
- if ret is False or ret == 0:
- bb.fatal("File population failed")
-
- # Check if symlink paths exist
- for file in symlink_paths:
- if not os.path.exists(os.path.join(root,file)):
- bb.fatal("File '%s' cannot be packaged into '%s' because its "
- "parent directory structure does not exist. One of "
- "its parent directories is a symlink whose target "
- "directory is not included in the package." %
- (file, pkg))
-
- os.umask(oldumask)
- os.chdir(workdir)
-
- # Handle LICENSE_EXCLUSION
- package_list = []
- for pkg in packages:
- licenses = d.getVar('LICENSE_EXCLUSION-' + pkg)
- if licenses:
- msg = "Excluding %s from packaging as it has incompatible license(s): %s" % (pkg, licenses)
- package_qa_handle_error("incompatible-license", msg, d)
- else:
- package_list.append(pkg)
- d.setVar('PACKAGES', ' '.join(package_list))
-
- unshipped = []
- for root, dirs, files in cpath.walk(dvar):
- dir = root[len(dvar):]
- if not dir:
- dir = os.sep
- for f in (files + dirs):
- path = os.path.join(dir, f)
- if ('.' + path) not in seen:
- unshipped.append(path)
-
- if unshipped != []:
- msg = pn + ": Files/directories were installed but not shipped in any package:"
- if "installed-vs-shipped" in (d.getVar('INSANE_SKIP_' + pn) or "").split():
- bb.note("Package %s skipping QA tests: installed-vs-shipped" % pn)
- else:
- for f in unshipped:
- msg = msg + "\n " + f
- msg = msg + "\nPlease set FILES such that these items are packaged. Alternatively if they are unneeded, avoid installing them or delete them within do_install.\n"
- msg = msg + "%s: %d installed and not shipped files." % (pn, len(unshipped))
- package_qa_handle_error("installed-vs-shipped", msg, d)
-}
-populate_packages[dirs] = "${D}"
-
-python package_fixsymlinks () {
- import errno
- pkgdest = d.getVar('PKGDEST')
- packages = d.getVar("PACKAGES", False).split()
-
- dangling_links = {}
- pkg_files = {}
- for pkg in packages:
- dangling_links[pkg] = []
- pkg_files[pkg] = []
- inst_root = os.path.join(pkgdest, pkg)
- for path in pkgfiles[pkg]:
- rpath = path[len(inst_root):]
- pkg_files[pkg].append(rpath)
- rtarget = cpath.realpath(path, inst_root, True, assume_dir = True)
- if not cpath.lexists(rtarget):
- dangling_links[pkg].append(os.path.normpath(rtarget[len(inst_root):]))
-
- newrdepends = {}
- for pkg in dangling_links:
- for l in dangling_links[pkg]:
- found = False
- bb.debug(1, "%s contains dangling link %s" % (pkg, l))
- for p in packages:
- if l in pkg_files[p]:
- found = True
- bb.debug(1, "target found in %s" % p)
- if p == pkg:
- break
- if pkg not in newrdepends:
- newrdepends[pkg] = []
- newrdepends[pkg].append(p)
- break
- if found == False:
- bb.note("%s contains dangling symlink to %s" % (pkg, l))
-
- for pkg in newrdepends:
- rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg) or "")
- for p in newrdepends[pkg]:
- if p not in rdepends:
- rdepends[p] = []
- d.setVar('RDEPENDS_' + pkg, bb.utils.join_deps(rdepends, commasep=False))
-}
-
-
-python package_package_name_hook() {
- """
- A package_name_hook function can be used to rewrite the package names by
- changing PKG. For an example, see debian.bbclass.
- """
- pass
-}
-
-EXPORT_FUNCTIONS package_name_hook
-
-
-PKGDESTWORK = "${WORKDIR}/pkgdata"
-
-PKGDATA_VARS = "PN PE PV PR PKGE PKGV PKGR LICENSE DESCRIPTION SUMMARY RDEPENDS RPROVIDES RRECOMMENDS RSUGGESTS RREPLACES RCONFLICTS SECTION PKG ALLOW_EMPTY FILES CONFFILES FILES_INFO PACKAGE_ADD_METADATA pkg_postinst pkg_postrm pkg_preinst pkg_prerm"
-
-python emit_pkgdata() {
- from glob import glob
- import json
-
- def process_postinst_on_target(pkg, mlprefix):
- pkgval = d.getVar('PKG_%s' % pkg)
- if pkgval is None:
- pkgval = pkg
-
- defer_fragment = """
-if [ -n "$D" ]; then
- $INTERCEPT_DIR/postinst_intercept delay_to_first_boot %s mlprefix=%s
- exit 0
-fi
-""" % (pkgval, mlprefix)
-
- postinst = d.getVar('pkg_postinst_%s' % pkg)
- postinst_ontarget = d.getVar('pkg_postinst_ontarget_%s' % pkg)
-
- if postinst_ontarget:
- bb.debug(1, 'adding deferred pkg_postinst_ontarget() to pkg_postinst() for %s' % pkg)
- if not postinst:
- postinst = '#!/bin/sh\n'
- postinst += defer_fragment
- postinst += postinst_ontarget
- d.setVar('pkg_postinst_%s' % pkg, postinst)
-
- def add_set_e_to_scriptlets(pkg):
- for scriptlet_name in ('pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm'):
- scriptlet = d.getVar('%s_%s' % (scriptlet_name, pkg))
- if scriptlet:
- scriptlet_split = scriptlet.split('\n')
- if scriptlet_split[0].startswith("#!"):
- scriptlet = scriptlet_split[0] + "\nset -e\n" + "\n".join(scriptlet_split[1:])
- else:
- scriptlet = "set -e\n" + "\n".join(scriptlet_split[0:])
- d.setVar('%s_%s' % (scriptlet_name, pkg), scriptlet)
-
- def write_if_exists(f, pkg, var):
- def encode(str):
- import codecs
- c = codecs.getencoder("unicode_escape")
- return c(str)[0].decode("latin1")
-
- val = d.getVar('%s_%s' % (var, pkg))
- if val:
- f.write('%s_%s: %s\n' % (var, pkg, encode(val)))
- return val
- val = d.getVar('%s' % (var))
- if val:
- f.write('%s: %s\n' % (var, encode(val)))
- return val
-
- def write_extra_pkgs(variants, pn, packages, pkgdatadir):
- for variant in variants:
- with open("%s/%s-%s" % (pkgdatadir, variant, pn), 'w') as fd:
- fd.write("PACKAGES: %s\n" % ' '.join(
- map(lambda pkg: '%s-%s' % (variant, pkg), packages.split())))
-
- def write_extra_runtime_pkgs(variants, packages, pkgdatadir):
- for variant in variants:
- for pkg in packages.split():
- ml_pkg = "%s-%s" % (variant, pkg)
- subdata_file = "%s/runtime/%s" % (pkgdatadir, ml_pkg)
- with open(subdata_file, 'w') as fd:
- fd.write("PKG_%s: %s" % (ml_pkg, pkg))
-
- packages = d.getVar('PACKAGES')
- pkgdest = d.getVar('PKGDEST')
- pkgdatadir = d.getVar('PKGDESTWORK')
-
- data_file = pkgdatadir + d.expand("/${PN}")
- with open(data_file, 'w') as fd:
- fd.write("PACKAGES: %s\n" % packages)
-
- pn = d.getVar('PN')
- global_variants = (d.getVar('MULTILIB_GLOBAL_VARIANTS') or "").split()
- variants = (d.getVar('MULTILIB_VARIANTS') or "").split()
-
- if bb.data.inherits_class('kernel', d) or bb.data.inherits_class('module-base', d):
- write_extra_pkgs(variants, pn, packages, pkgdatadir)
-
- if bb.data.inherits_class('allarch', d) and not variants \
- and not bb.data.inherits_class('packagegroup', d):
- write_extra_pkgs(global_variants, pn, packages, pkgdatadir)
-
- workdir = d.getVar('WORKDIR')
-
- for pkg in packages.split():
- pkgval = d.getVar('PKG_%s' % pkg)
- if pkgval is None:
- pkgval = pkg
- d.setVar('PKG_%s' % pkg, pkg)
-
- pkgdestpkg = os.path.join(pkgdest, pkg)
- files = {}
- total_size = 0
- seen = set()
- for f in pkgfiles[pkg]:
- relpth = os.path.relpath(f, pkgdestpkg)
- fstat = os.lstat(f)
- files[os.sep + relpth] = fstat.st_size
- if fstat.st_ino not in seen:
- seen.add(fstat.st_ino)
- total_size += fstat.st_size
- d.setVar('FILES_INFO', json.dumps(files, sort_keys=True))
-
- process_postinst_on_target(pkg, d.getVar("MLPREFIX"))
- add_set_e_to_scriptlets(pkg)
-
- subdata_file = pkgdatadir + "/runtime/%s" % pkg
- with open(subdata_file, 'w') as sf:
- for var in (d.getVar('PKGDATA_VARS') or "").split():
- val = write_if_exists(sf, pkg, var)
-
- write_if_exists(sf, pkg, 'FILERPROVIDESFLIST')
- for dfile in (d.getVar('FILERPROVIDESFLIST_' + pkg) or "").split():
- write_if_exists(sf, pkg, 'FILERPROVIDES_' + dfile)
-
- write_if_exists(sf, pkg, 'FILERDEPENDSFLIST')
- for dfile in (d.getVar('FILERDEPENDSFLIST_' + pkg) or "").split():
- write_if_exists(sf, pkg, 'FILERDEPENDS_' + dfile)
-
- sf.write('%s_%s: %d\n' % ('PKGSIZE', pkg, total_size))
-
- # Symlinks needed for rprovides lookup
- rprov = d.getVar('RPROVIDES_%s' % pkg) or d.getVar('RPROVIDES')
- if rprov:
- for p in rprov.strip().split():
- subdata_sym = pkgdatadir + "/runtime-rprovides/%s/%s" % (p, pkg)
- bb.utils.mkdirhier(os.path.dirname(subdata_sym))
- oe.path.symlink("../../runtime/%s" % pkg, subdata_sym, True)
-
- allow_empty = d.getVar('ALLOW_EMPTY_%s' % pkg)
- if not allow_empty:
- allow_empty = d.getVar('ALLOW_EMPTY')
- root = "%s/%s" % (pkgdest, pkg)
- os.chdir(root)
- g = glob('*')
- if g or allow_empty == "1":
- # Symlinks needed for reverse lookups (from the final package name)
- subdata_sym = pkgdatadir + "/runtime-reverse/%s" % pkgval
- oe.path.symlink("../runtime/%s" % pkg, subdata_sym, True)
-
- packagedfile = pkgdatadir + '/runtime/%s.packaged' % pkg
- open(packagedfile, 'w').close()
-
- if bb.data.inherits_class('kernel', d) or bb.data.inherits_class('module-base', d):
- write_extra_runtime_pkgs(variants, packages, pkgdatadir)
-
- if bb.data.inherits_class('allarch', d) and not variants \
- and not bb.data.inherits_class('packagegroup', d):
- write_extra_runtime_pkgs(global_variants, packages, pkgdatadir)
-
-}
-emit_pkgdata[dirs] = "${PKGDESTWORK}/runtime ${PKGDESTWORK}/runtime-reverse ${PKGDESTWORK}/runtime-rprovides"
-
-ldconfig_postinst_fragment() {
-if [ x"$D" = "x" ]; then
- if [ -x /sbin/ldconfig ]; then /sbin/ldconfig ; fi
-fi
-}
-
-RPMDEPS = "${STAGING_LIBDIR_NATIVE}/rpm/rpmdeps --alldeps"
-
-# Collect perfile run-time dependency metadata
-# Output:
-# FILERPROVIDESFLIST_pkg - list of all files w/ deps
-# FILERPROVIDES_filepath_pkg - per file dep
-#
-# FILERDEPENDSFLIST_pkg - list of all files w/ deps
-# FILERDEPENDS_filepath_pkg - per file dep
-
-python package_do_filedeps() {
- if d.getVar('SKIP_FILEDEPS') == '1':
- return
-
- pkgdest = d.getVar('PKGDEST')
- packages = d.getVar('PACKAGES')
- rpmdeps = d.getVar('RPMDEPS')
-
- def chunks(files, n):
- return [files[i:i+n] for i in range(0, len(files), n)]
-
- pkglist = []
- for pkg in packages.split():
- if d.getVar('SKIP_FILEDEPS_' + pkg) == '1':
- continue
- if pkg.endswith('-dbg') or pkg.endswith('-doc') or pkg.find('-locale-') != -1 or pkg.find('-localedata-') != -1 or pkg.find('-gconv-') != -1 or pkg.find('-charmap-') != -1 or pkg.startswith('kernel-module-') or pkg.endswith('-src'):
- continue
- for files in chunks(pkgfiles[pkg], 100):
- pkglist.append((pkg, files, rpmdeps, pkgdest))
-
- processed = oe.utils.multiprocess_launch(oe.package.filedeprunner, pkglist, d)
-
- provides_files = {}
- requires_files = {}
-
- for result in processed:
- (pkg, provides, requires) = result
-
- if pkg not in provides_files:
- provides_files[pkg] = []
- if pkg not in requires_files:
- requires_files[pkg] = []
-
- for file in sorted(provides):
- provides_files[pkg].append(file)
- key = "FILERPROVIDES_" + file + "_" + pkg
- d.appendVar(key, " " + " ".join(provides[file]))
-
- for file in sorted(requires):
- requires_files[pkg].append(file)
- key = "FILERDEPENDS_" + file + "_" + pkg
- d.appendVar(key, " " + " ".join(requires[file]))
-
- for pkg in requires_files:
- d.setVar("FILERDEPENDSFLIST_" + pkg, " ".join(requires_files[pkg]))
- for pkg in provides_files:
- d.setVar("FILERPROVIDESFLIST_" + pkg, " ".join(provides_files[pkg]))
-}
-
-SHLIBSDIRS = "${WORKDIR_PKGDATA}/${MLPREFIX}shlibs2"
-SHLIBSWORKDIR = "${PKGDESTWORK}/${MLPREFIX}shlibs2"
-
-python package_do_shlibs() {
- import itertools
- import re, pipes
- import subprocess
-
- exclude_shlibs = d.getVar('EXCLUDE_FROM_SHLIBS', False)
- if exclude_shlibs:
- bb.note("not generating shlibs")
- return
-
- lib_re = re.compile(r"^.*\.so")
- libdir_re = re.compile(r".*/%s$" % d.getVar('baselib'))
-
- packages = d.getVar('PACKAGES')
-
- shlib_pkgs = []
- exclusion_list = d.getVar("EXCLUDE_PACKAGES_FROM_SHLIBS")
- if exclusion_list:
- for pkg in packages.split():
- if pkg not in exclusion_list.split():
- shlib_pkgs.append(pkg)
- else:
- bb.note("not generating shlibs for %s" % pkg)
- else:
- shlib_pkgs = packages.split()
-
- targetos = d.getVar('TARGET_OS')
-
- workdir = d.getVar('WORKDIR')
-
- ver = d.getVar('PKGV')
- if not ver:
- msg = "PKGV not defined"
- package_qa_handle_error("pkgv-undefined", msg, d)
- return
-
- pkgdest = d.getVar('PKGDEST')
-
- shlibswork_dir = d.getVar('SHLIBSWORKDIR')
-
- def linux_so(file, pkg, pkgver, d):
- needs_ldconfig = False
- needed = set()
- sonames = set()
- renames = []
- ldir = os.path.dirname(file).replace(pkgdest + "/" + pkg, '')
- cmd = d.getVar('OBJDUMP') + " -p " + pipes.quote(file) + " 2>/dev/null"
- fd = os.popen(cmd)
- lines = fd.readlines()
- fd.close()
- rpath = tuple()
- for l in lines:
- m = re.match(r"\s+RPATH\s+([^\s]*)", l)
- if m:
- rpaths = m.group(1).replace("$ORIGIN", ldir).split(":")
- rpath = tuple(map(os.path.normpath, rpaths))
- for l in lines:
- m = re.match(r"\s+NEEDED\s+([^\s]*)", l)
- if m:
- dep = m.group(1)
- if dep not in needed:
- needed.add((dep, file, rpath))
- m = re.match(r"\s+SONAME\s+([^\s]*)", l)
- if m:
- this_soname = m.group(1)
- prov = (this_soname, ldir, pkgver)
- if not prov in sonames:
- # if library is private (only used by package) then do not build shlib for it
- import fnmatch
- if not private_libs or len([i for i in private_libs if fnmatch.fnmatch(this_soname, i)]) == 0:
- sonames.add(prov)
- if libdir_re.match(os.path.dirname(file)):
- needs_ldconfig = True
- if snap_symlinks and (os.path.basename(file) != this_soname):
- renames.append((file, os.path.join(os.path.dirname(file), this_soname)))
- return (needs_ldconfig, needed, sonames, renames)
-
- def darwin_so(file, needed, sonames, renames, pkgver):
- if not os.path.exists(file):
- return
- ldir = os.path.dirname(file).replace(pkgdest + "/" + pkg, '')
-
- def get_combinations(base):
- #
- # Given a base library name, find all combinations of this split by "." and "-"
- #
- combos = []
- options = base.split(".")
- for i in range(1, len(options) + 1):
- combos.append(".".join(options[0:i]))
- options = base.split("-")
- for i in range(1, len(options) + 1):
- combos.append("-".join(options[0:i]))
- return combos
-
- if (file.endswith('.dylib') or file.endswith('.so')) and not pkg.endswith('-dev') and not pkg.endswith('-dbg') and not pkg.endswith('-src'):
- # Drop suffix
- name = os.path.basename(file).rsplit(".",1)[0]
- # Find all combinations
- combos = get_combinations(name)
- for combo in combos:
- if not combo in sonames:
- prov = (combo, ldir, pkgver)
- sonames.add(prov)
- if file.endswith('.dylib') or file.endswith('.so'):
- rpath = []
- p = subprocess.Popen([d.expand("${HOST_PREFIX}otool"), '-l', file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- out, err = p.communicate()
- # If returned successfully, process stdout for results
- if p.returncode == 0:
- for l in out.split("\n"):
- l = l.strip()
- if l.startswith('path '):
- rpath.append(l.split()[1])
-
- p = subprocess.Popen([d.expand("${HOST_PREFIX}otool"), '-L', file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- out, err = p.communicate()
- # If returned successfully, process stdout for results
- if p.returncode == 0:
- for l in out.split("\n"):
- l = l.strip()
- if not l or l.endswith(":"):
- continue
- if "is not an object file" in l:
- continue
- name = os.path.basename(l.split()[0]).rsplit(".", 1)[0]
- if name and name not in needed[pkg]:
- needed[pkg].add((name, file, tuple()))
-
- def mingw_dll(file, needed, sonames, renames, pkgver):
- if not os.path.exists(file):
- return
-
- if file.endswith(".dll"):
- # assume all dlls are shared objects provided by the package
- sonames.add((os.path.basename(file), os.path.dirname(file).replace(pkgdest + "/" + pkg, ''), pkgver))
-
- if (file.endswith(".dll") or file.endswith(".exe")):
- # use objdump to search for "DLL Name: .*\.dll"
- p = subprocess.Popen([d.expand("${HOST_PREFIX}objdump"), "-p", file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- out, err = p.communicate()
- # process the output, grabbing all .dll names
- if p.returncode == 0:
- for m in re.finditer(r"DLL Name: (.*?\.dll)$", out.decode(), re.MULTILINE | re.IGNORECASE):
- dllname = m.group(1)
- if dllname:
- needed[pkg].add((dllname, file, tuple()))
-
- if d.getVar('PACKAGE_SNAP_LIB_SYMLINKS') == "1":
- snap_symlinks = True
- else:
- snap_symlinks = False
-
- needed = {}
-
- shlib_provider = oe.package.read_shlib_providers(d)
-
- for pkg in shlib_pkgs:
- private_libs = d.getVar('PRIVATE_LIBS_' + pkg) or d.getVar('PRIVATE_LIBS') or ""
- private_libs = private_libs.split()
- needs_ldconfig = False
- bb.debug(2, "calculating shlib provides for %s" % pkg)
-
- pkgver = d.getVar('PKGV_' + pkg)
- if not pkgver:
- pkgver = d.getVar('PV_' + pkg)
- if not pkgver:
- pkgver = ver
-
- needed[pkg] = set()
- sonames = set()
- renames = []
- linuxlist = []
- for file in pkgfiles[pkg]:
- soname = None
- if cpath.islink(file):
- continue
- if targetos == "darwin" or targetos == "darwin8":
- darwin_so(file, needed, sonames, renames, pkgver)
- elif targetos.startswith("mingw"):
- mingw_dll(file, needed, sonames, renames, pkgver)
- elif os.access(file, os.X_OK) or lib_re.match(file):
- linuxlist.append(file)
-
- if linuxlist:
- results = oe.utils.multiprocess_launch(linux_so, linuxlist, d, extraargs=(pkg, pkgver, d))
- for r in results:
- ldconfig = r[0]
- needed[pkg] |= r[1]
- sonames |= r[2]
- renames.extend(r[3])
- needs_ldconfig = needs_ldconfig or ldconfig
-
- for (old, new) in renames:
- bb.note("Renaming %s to %s" % (old, new))
- os.rename(old, new)
- pkgfiles[pkg].remove(old)
-
- shlibs_file = os.path.join(shlibswork_dir, pkg + ".list")
- if len(sonames):
- with open(shlibs_file, 'w') as fd:
- for s in sonames:
- if s[0] in shlib_provider and s[1] in shlib_provider[s[0]]:
- (old_pkg, old_pkgver) = shlib_provider[s[0]][s[1]]
- if old_pkg != pkg:
- bb.warn('%s-%s was registered as shlib provider for %s, changing it to %s-%s because it was built later' % (old_pkg, old_pkgver, s[0], pkg, pkgver))
- bb.debug(1, 'registering %s-%s as shlib provider for %s' % (pkg, pkgver, s[0]))
- fd.write(s[0] + ':' + s[1] + ':' + s[2] + '\n')
- if s[0] not in shlib_provider:
- shlib_provider[s[0]] = {}
- shlib_provider[s[0]][s[1]] = (pkg, pkgver)
- if needs_ldconfig:
- bb.debug(1, 'adding ldconfig call to postinst for %s' % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg)
- if not postinst:
- postinst = '#!/bin/sh\n'
- postinst += d.getVar('ldconfig_postinst_fragment')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
- bb.debug(1, 'LIBNAMES: pkg %s sonames %s' % (pkg, sonames))
-
- assumed_libs = d.getVar('ASSUME_SHLIBS')
- if assumed_libs:
- libdir = d.getVar("libdir")
- for e in assumed_libs.split():
- l, dep_pkg = e.split(":")
- lib_ver = None
- dep_pkg = dep_pkg.rsplit("_", 1)
- if len(dep_pkg) == 2:
- lib_ver = dep_pkg[1]
- dep_pkg = dep_pkg[0]
- if l not in shlib_provider:
- shlib_provider[l] = {}
- shlib_provider[l][libdir] = (dep_pkg, lib_ver)
-
- libsearchpath = [d.getVar('libdir'), d.getVar('base_libdir')]
-
- for pkg in shlib_pkgs:
- bb.debug(2, "calculating shlib requirements for %s" % pkg)
-
- private_libs = d.getVar('PRIVATE_LIBS_' + pkg) or d.getVar('PRIVATE_LIBS') or ""
- private_libs = private_libs.split()
-
- deps = list()
- for n in needed[pkg]:
- # if n is in private libraries, don't try to search provider for it
- # this could cause problem in case some abc.bb provides private
- # /opt/abc/lib/libfoo.so.1 and contains /usr/bin/abc depending on system library libfoo.so.1
- # but skipping it is still better alternative than providing own
- # version and then adding runtime dependency for the same system library
- import fnmatch
- if private_libs and len([i for i in private_libs if fnmatch.fnmatch(n[0], i)]) > 0:
- bb.debug(2, '%s: Dependency %s covered by PRIVATE_LIBS' % (pkg, n[0]))
- continue
- if n[0] in shlib_provider.keys():
- shlib_provider_map = shlib_provider[n[0]]
- matches = set()
- for p in itertools.chain(list(n[2]), sorted(shlib_provider_map.keys()), libsearchpath):
- if p in shlib_provider_map:
- matches.add(p)
- if len(matches) > 1:
- matchpkgs = ', '.join([shlib_provider_map[match][0] for match in matches])
- bb.error("%s: Multiple shlib providers for %s: %s (used by files: %s)" % (pkg, n[0], matchpkgs, n[1]))
- elif len(matches) == 1:
- (dep_pkg, ver_needed) = shlib_provider_map[matches.pop()]
-
- bb.debug(2, '%s: Dependency %s requires package %s (used by files: %s)' % (pkg, n[0], dep_pkg, n[1]))
-
- if dep_pkg == pkg:
- continue
-
- if ver_needed:
- dep = "%s (>= %s)" % (dep_pkg, ver_needed)
- else:
- dep = dep_pkg
- if not dep in deps:
- deps.append(dep)
- continue
- bb.note("Couldn't find shared library provider for %s, used by files: %s" % (n[0], n[1]))
-
- deps_file = os.path.join(pkgdest, pkg + ".shlibdeps")
- if os.path.exists(deps_file):
- os.remove(deps_file)
- if deps:
- with open(deps_file, 'w') as fd:
- for dep in sorted(deps):
- fd.write(dep + '\n')
-}
-
-python package_do_pkgconfig () {
- import re
-
- packages = d.getVar('PACKAGES')
- workdir = d.getVar('WORKDIR')
- pkgdest = d.getVar('PKGDEST')
-
- shlibs_dirs = d.getVar('SHLIBSDIRS').split()
- shlibswork_dir = d.getVar('SHLIBSWORKDIR')
-
- pc_re = re.compile(r'(.*)\.pc$')
- var_re = re.compile(r'(.*)=(.*)')
- field_re = re.compile(r'(.*): (.*)')
-
- pkgconfig_provided = {}
- pkgconfig_needed = {}
- for pkg in packages.split():
- pkgconfig_provided[pkg] = []
- pkgconfig_needed[pkg] = []
- for file in pkgfiles[pkg]:
- m = pc_re.match(file)
- if m:
- pd = bb.data.init()
- name = m.group(1)
- pkgconfig_provided[pkg].append(name)
- if not os.access(file, os.R_OK):
- continue
- with open(file, 'r') as f:
- lines = f.readlines()
- for l in lines:
- m = var_re.match(l)
- if m:
- name = m.group(1)
- val = m.group(2)
- pd.setVar(name, pd.expand(val))
- continue
- m = field_re.match(l)
- if m:
- hdr = m.group(1)
- exp = pd.expand(m.group(2))
- if hdr == 'Requires':
- pkgconfig_needed[pkg] += exp.replace(',', ' ').split()
-
- for pkg in packages.split():
- pkgs_file = os.path.join(shlibswork_dir, pkg + ".pclist")
- if pkgconfig_provided[pkg] != []:
- with open(pkgs_file, 'w') as f:
- for p in pkgconfig_provided[pkg]:
- f.write('%s\n' % p)
-
- # Go from least to most specific since the last one found wins
- for dir in reversed(shlibs_dirs):
- if not os.path.exists(dir):
- continue
- for file in sorted(os.listdir(dir)):
- m = re.match(r'^(.*)\.pclist$', file)
- if m:
- pkg = m.group(1)
- with open(os.path.join(dir, file)) as fd:
- lines = fd.readlines()
- pkgconfig_provided[pkg] = []
- for l in lines:
- pkgconfig_provided[pkg].append(l.rstrip())
-
- for pkg in packages.split():
- deps = []
- for n in pkgconfig_needed[pkg]:
- found = False
- for k in pkgconfig_provided.keys():
- if n in pkgconfig_provided[k]:
- if k != pkg and not (k in deps):
- deps.append(k)
- found = True
- if found == False:
- bb.note("couldn't find pkgconfig module '%s' in any package" % n)
- deps_file = os.path.join(pkgdest, pkg + ".pcdeps")
- if len(deps):
- with open(deps_file, 'w') as fd:
- for dep in deps:
- fd.write(dep + '\n')
-}
-
-def read_libdep_files(d):
- pkglibdeps = {}
- packages = d.getVar('PACKAGES').split()
- for pkg in packages:
- pkglibdeps[pkg] = {}
- for extension in ".shlibdeps", ".pcdeps", ".clilibdeps":
- depsfile = d.expand("${PKGDEST}/" + pkg + extension)
- if os.access(depsfile, os.R_OK):
- with open(depsfile) as fd:
- lines = fd.readlines()
- for l in lines:
- l.rstrip()
- deps = bb.utils.explode_dep_versions2(l)
- for dep in deps:
- if not dep in pkglibdeps[pkg]:
- pkglibdeps[pkg][dep] = deps[dep]
- return pkglibdeps
-
-python read_shlibdeps () {
- pkglibdeps = read_libdep_files(d)
-
- packages = d.getVar('PACKAGES').split()
- for pkg in packages:
- rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg) or "")
- for dep in sorted(pkglibdeps[pkg]):
- # Add the dep if it's not already there, or if no comparison is set
- if dep not in rdepends:
- rdepends[dep] = []
- for v in pkglibdeps[pkg][dep]:
- if v not in rdepends[dep]:
- rdepends[dep].append(v)
- d.setVar('RDEPENDS_' + pkg, bb.utils.join_deps(rdepends, commasep=False))
-}
-
-python package_depchains() {
- """
- For a given set of prefix and postfix modifiers, make those packages
- RRECOMMENDS on the corresponding packages for its RDEPENDS.
-
- Example: If package A depends upon package B, and A's .bb emits an
- A-dev package, this would make A-dev Recommends: B-dev.
-
- If only one of a given suffix is specified, it will take the RRECOMMENDS
- based on the RDEPENDS of *all* other packages. If more than one of a given
- suffix is specified, its will only use the RDEPENDS of the single parent
- package.
- """
-
- packages = d.getVar('PACKAGES')
- postfixes = (d.getVar('DEPCHAIN_POST') or '').split()
- prefixes = (d.getVar('DEPCHAIN_PRE') or '').split()
-
- def pkg_adddeprrecs(pkg, base, suffix, getname, depends, d):
-
- #bb.note('depends for %s is %s' % (base, depends))
- rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS_' + pkg) or "")
-
- for depend in sorted(depends):
- if depend.find('-native') != -1 or depend.find('-cross') != -1 or depend.startswith('virtual/'):
- #bb.note("Skipping %s" % depend)
- continue
- if depend.endswith('-dev'):
- depend = depend[:-4]
- if depend.endswith('-dbg'):
- depend = depend[:-4]
- pkgname = getname(depend, suffix)
- #bb.note("Adding %s for %s" % (pkgname, depend))
- if pkgname not in rreclist and pkgname != pkg:
- rreclist[pkgname] = []
-
- #bb.note('setting: RRECOMMENDS_%s=%s' % (pkg, ' '.join(rreclist)))
- d.setVar('RRECOMMENDS_%s' % pkg, bb.utils.join_deps(rreclist, commasep=False))
-
- def pkg_addrrecs(pkg, base, suffix, getname, rdepends, d):
-
- #bb.note('rdepends for %s is %s' % (base, rdepends))
- rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS_' + pkg) or "")
-
- for depend in sorted(rdepends):
- if depend.find('virtual-locale-') != -1:
- #bb.note("Skipping %s" % depend)
- continue
- if depend.endswith('-dev'):
- depend = depend[:-4]
- if depend.endswith('-dbg'):
- depend = depend[:-4]
- pkgname = getname(depend, suffix)
- #bb.note("Adding %s for %s" % (pkgname, depend))
- if pkgname not in rreclist and pkgname != pkg:
- rreclist[pkgname] = []
-
- #bb.note('setting: RRECOMMENDS_%s=%s' % (pkg, ' '.join(rreclist)))
- d.setVar('RRECOMMENDS_%s' % pkg, bb.utils.join_deps(rreclist, commasep=False))
-
- def add_dep(list, dep):
- if dep not in list:
- list.append(dep)
-
- depends = []
- for dep in bb.utils.explode_deps(d.getVar('DEPENDS') or ""):
- add_dep(depends, dep)
-
- rdepends = []
- for pkg in packages.split():
- for dep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + pkg) or ""):
- add_dep(rdepends, dep)
-
- #bb.note('rdepends is %s' % rdepends)
-
- def post_getname(name, suffix):
- return '%s%s' % (name, suffix)
- def pre_getname(name, suffix):
- return '%s%s' % (suffix, name)
-
- pkgs = {}
- for pkg in packages.split():
- for postfix in postfixes:
- if pkg.endswith(postfix):
- if not postfix in pkgs:
- pkgs[postfix] = {}
- pkgs[postfix][pkg] = (pkg[:-len(postfix)], post_getname)
-
- for prefix in prefixes:
- if pkg.startswith(prefix):
- if not prefix in pkgs:
- pkgs[prefix] = {}
- pkgs[prefix][pkg] = (pkg[:-len(prefix)], pre_getname)
-
- if "-dbg" in pkgs:
- pkglibdeps = read_libdep_files(d)
- pkglibdeplist = []
- for pkg in pkglibdeps:
- for k in pkglibdeps[pkg]:
- add_dep(pkglibdeplist, k)
- dbgdefaultdeps = ((d.getVar('DEPCHAIN_DBGDEFAULTDEPS') == '1') or (bb.data.inherits_class('packagegroup', d)))
-
- for suffix in pkgs:
- for pkg in pkgs[suffix]:
- if d.getVarFlag('RRECOMMENDS_' + pkg, 'nodeprrecs'):
- continue
- (base, func) = pkgs[suffix][pkg]
- if suffix == "-dev":
- pkg_adddeprrecs(pkg, base, suffix, func, depends, d)
- elif suffix == "-dbg":
- if not dbgdefaultdeps:
- pkg_addrrecs(pkg, base, suffix, func, pkglibdeplist, d)
- continue
- if len(pkgs[suffix]) == 1:
- pkg_addrrecs(pkg, base, suffix, func, rdepends, d)
- else:
- rdeps = []
- for dep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + base) or ""):
- add_dep(rdeps, dep)
- pkg_addrrecs(pkg, base, suffix, func, rdeps, d)
-}
-
-# Since bitbake can't determine which variables are accessed during package
-# iteration, we need to list them here:
-PACKAGEVARS = "FILES RDEPENDS RRECOMMENDS SUMMARY DESCRIPTION RSUGGESTS RPROVIDES RCONFLICTS PKG ALLOW_EMPTY pkg_postinst pkg_postrm pkg_postinst_ontarget INITSCRIPT_NAME INITSCRIPT_PARAMS DEBIAN_NOAUTONAME ALTERNATIVE PKGE PKGV PKGR USERADD_PARAM GROUPADD_PARAM CONFFILES SYSTEMD_SERVICE LICENSE SECTION pkg_preinst pkg_prerm RREPLACES GROUPMEMS_PARAM SYSTEMD_AUTO_ENABLE SKIP_FILEDEPS PRIVATE_LIBS PACKAGE_ADD_METADATA"
-
-def gen_packagevar(d, pkgvars="PACKAGEVARS"):
- ret = []
- pkgs = (d.getVar("PACKAGES") or "").split()
- vars = (d.getVar(pkgvars) or "").split()
- for v in vars:
- ret.append(v)
- for p in pkgs:
- for v in vars:
- ret.append(v + "_" + p)
-
- # Ensure that changes to INCOMPATIBLE_LICENSE re-run do_package for
- # affected recipes.
- ret.append('LICENSE_EXCLUSION-%s' % p)
- return " ".join(ret)
-
-PACKAGE_PREPROCESS_FUNCS ?= ""
-# Functions for setting up PKGD
-PACKAGEBUILDPKGD ?= " \
- package_prepare_pkgdata \
- perform_packagecopy \
- ${PACKAGE_PREPROCESS_FUNCS} \
- split_and_strip_files \
- fixup_perms \
- "
-# Functions which split PKGD up into separate packages
-PACKAGESPLITFUNCS ?= " \
- package_do_split_locales \
- populate_packages"
-# Functions which process metadata based on split packages
-PACKAGEFUNCS += " \
- package_fixsymlinks \
- package_name_hook \
- package_do_filedeps \
- package_do_shlibs \
- package_do_pkgconfig \
- read_shlibdeps \
- package_depchains \
- emit_pkgdata"
-
-python do_package () {
- # Change the following version to cause sstate to invalidate the package
- # cache. This is useful if an item this class depends on changes in a
- # way that the output of this class changes. rpmdeps is a good example
- # as any change to rpmdeps requires this to be rerun.
- # PACKAGE_BBCLASS_VERSION = "2"
-
- # Init cachedpath
- global cpath
- cpath = oe.cachedpath.CachedPath()
-
- ###########################################################################
- # Sanity test the setup
- ###########################################################################
-
- packages = (d.getVar('PACKAGES') or "").split()
- if len(packages) < 1:
- bb.debug(1, "No packages to build, skipping do_package")
- return
-
- workdir = d.getVar('WORKDIR')
- outdir = d.getVar('DEPLOY_DIR')
- dest = d.getVar('D')
- dvar = d.getVar('PKGD')
- pn = d.getVar('PN')
-
- if not workdir or not outdir or not dest or not dvar or not pn:
- msg = "WORKDIR, DEPLOY_DIR, D, PN and PKGD all must be defined, unable to package"
- package_qa_handle_error("var-undefined", msg, d)
- return
-
- bb.build.exec_func("package_get_auto_pr", d)
-
- ###########################################################################
- # Optimisations
- ###########################################################################
-
- # Continually expanding complex expressions is inefficient, particularly
- # when we write to the datastore and invalidate the expansion cache. This
- # code pre-expands some frequently used variables
-
- def expandVar(x, d):
- d.setVar(x, d.getVar(x))
-
- for x in 'PN', 'PV', 'BPN', 'TARGET_SYS', 'EXTENDPRAUTO':
- expandVar(x, d)
-
- ###########################################################################
- # Setup PKGD (from D)
- ###########################################################################
-
- for f in (d.getVar('PACKAGEBUILDPKGD') or '').split():
- bb.build.exec_func(f, d)
-
- ###########################################################################
- # Split up PKGD into PKGDEST
- ###########################################################################
-
- cpath = oe.cachedpath.CachedPath()
-
- for f in (d.getVar('PACKAGESPLITFUNCS') or '').split():
- bb.build.exec_func(f, d)
-
- ###########################################################################
- # Process PKGDEST
- ###########################################################################
-
- # Build global list of files in each split package
- global pkgfiles
- pkgfiles = {}
- packages = d.getVar('PACKAGES').split()
- pkgdest = d.getVar('PKGDEST')
- for pkg in packages:
- pkgfiles[pkg] = []
- for walkroot, dirs, files in cpath.walk(pkgdest + "/" + pkg):
- for file in files:
- pkgfiles[pkg].append(walkroot + os.sep + file)
-
- for f in (d.getVar('PACKAGEFUNCS') or '').split():
- bb.build.exec_func(f, d)
-
- qa_sane = d.getVar("QA_SANE")
- if not qa_sane:
- bb.fatal("Fatal QA errors found, failing task.")
-}
-
-do_package[dirs] = "${SHLIBSWORKDIR} ${PKGDESTWORK} ${D}"
-do_package[vardeps] += "${PACKAGEBUILDPKGD} ${PACKAGESPLITFUNCS} ${PACKAGEFUNCS} ${@gen_packagevar(d)}"
-addtask package after do_install
-
-SSTATETASKS += "do_package"
-do_package[cleandirs] = "${PKGDEST} ${PKGDESTWORK}"
-do_package[sstate-plaindirs] = "${PKGD} ${PKGDEST} ${PKGDESTWORK}"
-do_package_setscene[dirs] = "${STAGING_DIR}"
-
-python do_package_setscene () {
- sstate_setscene(d)
-}
-addtask do_package_setscene
-
-# Copy from PKGDESTWORK to tempdirectory as tempdirectory can be cleaned at both
-# do_package_setscene and do_packagedata_setscene leading to races
-python do_packagedata () {
- src = d.expand("${PKGDESTWORK}")
- dest = d.expand("${WORKDIR}/pkgdata-pdata-input")
- oe.path.copyhardlinktree(src, dest)
-}
-
-addtask packagedata before do_build after do_package
-
-SSTATETASKS += "do_packagedata"
-do_packagedata[sstate-inputdirs] = "${WORKDIR}/pkgdata-pdata-input"
-do_packagedata[sstate-outputdirs] = "${PKGDATA_DIR}"
-do_packagedata[stamp-extra-info] = "${MACHINE_ARCH}"
-
-python do_packagedata_setscene () {
- sstate_setscene(d)
-}
-addtask do_packagedata_setscene
-
-#
-# Helper functions for the package writing classes
-#
-
-def mapping_rename_hook(d):
- """
- Rewrite variables to account for package renaming in things
- like debian.bbclass or manual PKG variable name changes
- """
- pkg = d.getVar("PKG")
- runtime_mapping_rename("RDEPENDS", pkg, d)
- runtime_mapping_rename("RRECOMMENDS", pkg, d)
- runtime_mapping_rename("RSUGGESTS", pkg, d)
diff --git a/meta/classes/package_deb.bbclass b/meta/classes/package_deb.bbclass
deleted file mode 100644
index cb723fc1d6..0000000000
--- a/meta/classes/package_deb.bbclass
+++ /dev/null
@@ -1,325 +0,0 @@
-#
-# Copyright 2006-2008 OpenedHand Ltd.
-#
-
-inherit package
-
-IMAGE_PKGTYPE ?= "deb"
-
-DPKG_BUILDCMD ??= "dpkg-deb"
-
-DPKG_ARCH ?= "${@debian_arch_map(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'))}"
-DPKG_ARCH[vardepvalue] = "${DPKG_ARCH}"
-
-PKGWRITEDIRDEB = "${WORKDIR}/deploy-debs"
-
-APTCONF_TARGET = "${WORKDIR}"
-
-APT_ARGS = "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS") == "1"]}"
-
-def debian_arch_map(arch, tune):
- tune_features = tune.split()
- if arch == "allarch":
- return "all"
- if arch in ["i586", "i686"]:
- return "i386"
- if arch == "x86_64":
- if "mx32" in tune_features:
- return "x32"
- return "amd64"
- if arch.startswith("mips"):
- endian = ["el", ""]["bigendian" in tune_features]
- if "n64" in tune_features:
- return "mips64" + endian
- if "n32" in tune_features:
- return "mipsn32" + endian
- return "mips" + endian
- if arch == "powerpc":
- return arch + ["", "spe"]["spe" in tune_features]
- if arch == "aarch64":
- return "arm64"
- if arch == "arm":
- return arch + ["el", "hf"]["callconvention-hard" in tune_features]
- return arch
-
-python do_package_deb () {
- packages = d.getVar('PACKAGES')
- if not packages:
- bb.debug(1, "PACKAGES not defined, nothing to package")
- return
-
- tmpdir = d.getVar('TMPDIR')
- if os.access(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"),os.R_OK):
- os.unlink(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"))
-
- oe.utils.multiprocess_launch(deb_write_pkg, packages.split(), d, extraargs=(d,))
-}
-do_package_deb[vardeps] += "deb_write_pkg"
-do_package_deb[vardepsexclude] = "BB_NUMBER_THREADS"
-
-def deb_write_pkg(pkg, d):
- import re, copy
- import textwrap
- import subprocess
- import collections
- import codecs
-
- outdir = d.getVar('PKGWRITEDIRDEB')
- pkgdest = d.getVar('PKGDEST')
-
- def cleanupcontrol(root):
- for p in ['CONTROL', 'DEBIAN']:
- p = os.path.join(root, p)
- if os.path.exists(p):
- bb.utils.prunedir(p)
-
- localdata = bb.data.createCopy(d)
- root = "%s/%s" % (pkgdest, pkg)
-
- lf = bb.utils.lockfile(root + ".lock")
- try:
-
- localdata.setVar('ROOT', '')
- localdata.setVar('ROOT_%s' % pkg, root)
- pkgname = localdata.getVar('PKG_%s' % pkg)
- if not pkgname:
- pkgname = pkg
- localdata.setVar('PKG', pkgname)
-
- localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + pkg)
-
- basedir = os.path.join(os.path.dirname(root))
-
- pkgoutdir = os.path.join(outdir, localdata.getVar('PACKAGE_ARCH'))
- bb.utils.mkdirhier(pkgoutdir)
-
- os.chdir(root)
- cleanupcontrol(root)
- from glob import glob
- g = glob('*')
- if not g and localdata.getVar('ALLOW_EMPTY', False) != "1":
- bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV'), localdata.getVar('PKGR')))
- return
-
- controldir = os.path.join(root, 'DEBIAN')
- bb.utils.mkdirhier(controldir)
- os.chmod(controldir, 0o755)
-
- ctrlfile = codecs.open(os.path.join(controldir, 'control'), 'w', 'utf-8')
-
- fields = []
- pe = d.getVar('PKGE')
- if pe and int(pe) > 0:
- fields.append(["Version: %s:%s-%s\n", ['PKGE', 'PKGV', 'PKGR']])
- else:
- fields.append(["Version: %s-%s\n", ['PKGV', 'PKGR']])
- fields.append(["Description: %s\n", ['DESCRIPTION']])
- fields.append(["Section: %s\n", ['SECTION']])
- fields.append(["Priority: %s\n", ['PRIORITY']])
- fields.append(["Maintainer: %s\n", ['MAINTAINER']])
- fields.append(["Architecture: %s\n", ['DPKG_ARCH']])
- fields.append(["OE: %s\n", ['PN']])
- fields.append(["PackageArch: %s\n", ['PACKAGE_ARCH']])
- if d.getVar('HOMEPAGE'):
- fields.append(["Homepage: %s\n", ['HOMEPAGE']])
-
- # Package, Version, Maintainer, Description - mandatory
- # Section, Priority, Essential, Architecture, Source, Depends, Pre-Depends, Recommends, Suggests, Conflicts, Replaces, Provides - Optional
-
-
- def pullData(l, d):
- l2 = []
- for i in l:
- data = d.getVar(i)
- if data is None:
- raise KeyError(i)
- if i == 'DPKG_ARCH' and d.getVar('PACKAGE_ARCH') == 'all':
- data = 'all'
- elif i == 'PACKAGE_ARCH' or i == 'DPKG_ARCH':
- # The params in deb package control don't allow character
- # `_', so change the arch's `_' to `-'. Such as `x86_64'
- # -->`x86-64'
- data = data.replace('_', '-')
- l2.append(data)
- return l2
-
- ctrlfile.write("Package: %s\n" % pkgname)
- if d.getVar('PACKAGE_ARCH') == "all":
- ctrlfile.write("Multi-Arch: foreign\n")
- # check for required fields
- for (c, fs) in fields:
- # Special behavior for description...
- if 'DESCRIPTION' in fs:
- summary = localdata.getVar('SUMMARY') or localdata.getVar('DESCRIPTION') or "."
- ctrlfile.write('Description: %s\n' % summary)
- description = localdata.getVar('DESCRIPTION') or "."
- description = textwrap.dedent(description).strip()
- if '\\n' in description:
- # Manually indent
- for t in description.split('\\n'):
- ctrlfile.write(' %s\n' % (t.strip() or '.'))
- else:
- # Auto indent
- ctrlfile.write('%s\n' % textwrap.fill(description.strip(), width=74, initial_indent=' ', subsequent_indent=' '))
-
- else:
- ctrlfile.write(c % tuple(pullData(fs, localdata)))
-
- # more fields
-
- custom_fields_chunk = get_package_additional_metadata("deb", localdata)
- if custom_fields_chunk:
- ctrlfile.write(custom_fields_chunk)
- ctrlfile.write("\n")
-
- mapping_rename_hook(localdata)
-
- def debian_cmp_remap(var):
- # dpkg does not allow for '(', ')' or ':' in a dependency name
- # Replace any instances of them with '__'
- #
- # In debian '>' and '<' do not mean what it appears they mean
- # '<' = less or equal
- # '>' = greater or equal
- # adjust these to the '<<' and '>>' equivalents
- #
- for dep in list(var.keys()):
- if '(' in dep or '/' in dep:
- newdep = re.sub(r'[(:)/]', '__', dep)
- if newdep.startswith("__"):
- newdep = "A" + newdep
- if newdep != dep:
- var[newdep] = var[dep]
- del var[dep]
- for dep in var:
- for i, v in enumerate(var[dep]):
- if (v or "").startswith("< "):
- var[dep][i] = var[dep][i].replace("< ", "<< ")
- elif (v or "").startswith("> "):
- var[dep][i] = var[dep][i].replace("> ", ">> ")
-
- rdepends = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS") or "")
- debian_cmp_remap(rdepends)
- for dep in list(rdepends.keys()):
- if dep == pkg:
- del rdepends[dep]
- continue
- if '*' in dep:
- del rdepends[dep]
- rrecommends = bb.utils.explode_dep_versions2(localdata.getVar("RRECOMMENDS") or "")
- debian_cmp_remap(rrecommends)
- for dep in list(rrecommends.keys()):
- if '*' in dep:
- del rrecommends[dep]
- rsuggests = bb.utils.explode_dep_versions2(localdata.getVar("RSUGGESTS") or "")
- debian_cmp_remap(rsuggests)
- # Deliberately drop version information here, not wanted/supported by deb
- rprovides = dict.fromkeys(bb.utils.explode_dep_versions2(localdata.getVar("RPROVIDES") or ""), [])
- # Remove file paths if any from rprovides, debian does not support custom providers
- for key in list(rprovides.keys()):
- if key.startswith('/'):
- del rprovides[key]
- rprovides = collections.OrderedDict(sorted(rprovides.items(), key=lambda x: x[0]))
- debian_cmp_remap(rprovides)
- rreplaces = bb.utils.explode_dep_versions2(localdata.getVar("RREPLACES") or "")
- debian_cmp_remap(rreplaces)
- rconflicts = bb.utils.explode_dep_versions2(localdata.getVar("RCONFLICTS") or "")
- debian_cmp_remap(rconflicts)
- if rdepends:
- ctrlfile.write("Depends: %s\n" % bb.utils.join_deps(rdepends))
- if rsuggests:
- ctrlfile.write("Suggests: %s\n" % bb.utils.join_deps(rsuggests))
- if rrecommends:
- ctrlfile.write("Recommends: %s\n" % bb.utils.join_deps(rrecommends))
- if rprovides:
- ctrlfile.write("Provides: %s\n" % bb.utils.join_deps(rprovides))
- if rreplaces:
- ctrlfile.write("Replaces: %s\n" % bb.utils.join_deps(rreplaces))
- if rconflicts:
- ctrlfile.write("Conflicts: %s\n" % bb.utils.join_deps(rconflicts))
- ctrlfile.close()
-
- for script in ["preinst", "postinst", "prerm", "postrm"]:
- scriptvar = localdata.getVar('pkg_%s' % script)
- if not scriptvar:
- continue
- scriptvar = scriptvar.strip()
- scriptfile = open(os.path.join(controldir, script), 'w')
-
- if scriptvar.startswith("#!"):
- pos = scriptvar.find("\n") + 1
- scriptfile.write(scriptvar[:pos])
- else:
- pos = 0
- scriptfile.write("#!/bin/sh\n")
-
- # Prevent the prerm/postrm scripts from being run during an upgrade
- if script in ('prerm', 'postrm'):
- scriptfile.write('[ "$1" != "upgrade" ] || exit 0\n')
-
- scriptfile.write(scriptvar[pos:])
- scriptfile.write('\n')
- scriptfile.close()
- os.chmod(os.path.join(controldir, script), 0o755)
-
- conffiles_str = ' '.join(get_conffiles(pkg, d))
- if conffiles_str:
- conffiles = open(os.path.join(controldir, 'conffiles'), 'w')
- for f in conffiles_str.split():
- if os.path.exists(oe.path.join(root, f)):
- conffiles.write('%s\n' % f)
- conffiles.close()
-
- os.chdir(basedir)
- subprocess.check_output("PATH=\"%s\" %s -b %s %s" % (localdata.getVar("PATH"), localdata.getVar("DPKG_BUILDCMD"),
- root, pkgoutdir),
- stderr=subprocess.STDOUT,
- shell=True)
-
- finally:
- cleanupcontrol(root)
- bb.utils.unlockfile(lf)
-
-# Otherwise allarch packages may change depending on override configuration
-deb_write_pkg[vardepsexclude] = "OVERRIDES"
-
-# Have to list any variables referenced as X_<pkg> that aren't in pkgdata here
-DEBEXTRAVARS = "PKGV PKGR PKGV DESCRIPTION SECTION PRIORITY MAINTAINER DPKG_ARCH PN HOMEPAGE PACKAGE_ADD_METADATA_DEB"
-do_package_write_deb[vardeps] += "${@gen_packagevar(d, 'DEBEXTRAVARS')}"
-
-SSTATETASKS += "do_package_write_deb"
-do_package_write_deb[sstate-inputdirs] = "${PKGWRITEDIRDEB}"
-do_package_write_deb[sstate-outputdirs] = "${DEPLOY_DIR_DEB}"
-
-python do_package_write_deb_setscene () {
- tmpdir = d.getVar('TMPDIR')
-
- if os.access(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"),os.R_OK):
- os.unlink(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"))
-
- sstate_setscene(d)
-}
-addtask do_package_write_deb_setscene
-
-python () {
- if d.getVar('PACKAGES') != '':
- deps = ' dpkg-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
- d.appendVarFlag('do_package_write_deb', 'depends', deps)
- d.setVarFlag('do_package_write_deb', 'fakeroot', "1")
-}
-
-python do_package_write_deb () {
- bb.build.exec_func("read_subpackage_metadata", d)
- bb.build.exec_func("do_package_deb", d)
-}
-do_package_write_deb[dirs] = "${PKGWRITEDIRDEB}"
-do_package_write_deb[cleandirs] = "${PKGWRITEDIRDEB}"
-do_package_write_deb[umask] = "022"
-do_package_write_deb[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
-addtask package_write_deb after do_packagedata do_package
-
-
-PACKAGEINDEXDEPS += "dpkg-native:do_populate_sysroot"
-PACKAGEINDEXDEPS += "apt-native:do_populate_sysroot"
-
-do_build[recrdeptask] += "do_package_write_deb"
diff --git a/meta/classes/package_ipk.bbclass b/meta/classes/package_ipk.bbclass
deleted file mode 100644
index 79cb36c513..0000000000
--- a/meta/classes/package_ipk.bbclass
+++ /dev/null
@@ -1,282 +0,0 @@
-inherit package
-
-IMAGE_PKGTYPE ?= "ipk"
-
-IPKGCONF_TARGET = "${WORKDIR}/opkg.conf"
-IPKGCONF_SDK = "${WORKDIR}/opkg-sdk.conf"
-
-PKGWRITEDIRIPK = "${WORKDIR}/deploy-ipks"
-
-# Program to be used to build opkg packages
-OPKGBUILDCMD ??= 'opkg-build -Z xz -a "${XZ_DEFAULTS}"'
-
-OPKG_ARGS += "--force_postinstall --prefer-arch-to-version"
-OPKG_ARGS += "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS") == "1"]}"
-OPKG_ARGS += "${@['', '--add-exclude ' + ' --add-exclude '.join((d.getVar('PACKAGE_EXCLUDE') or "").split())][(d.getVar("PACKAGE_EXCLUDE") or "").strip() != ""]}"
-
-OPKGLIBDIR ??= "${localstatedir}/lib"
-
-python do_package_ipk () {
- workdir = d.getVar('WORKDIR')
- outdir = d.getVar('PKGWRITEDIRIPK')
- tmpdir = d.getVar('TMPDIR')
- pkgdest = d.getVar('PKGDEST')
- if not workdir or not outdir or not tmpdir:
- bb.error("Variables incorrectly set, unable to package")
- return
-
- packages = d.getVar('PACKAGES')
- if not packages or packages == '':
- bb.debug(1, "No packages; nothing to do")
- return
-
- # We're about to add new packages so the index needs to be checked
- # so remove the appropriate stamp file.
- if os.access(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"), os.R_OK):
- os.unlink(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"))
-
- oe.utils.multiprocess_launch(ipk_write_pkg, packages.split(), d, extraargs=(d,))
-}
-do_package_ipk[vardeps] += "ipk_write_pkg"
-do_package_ipk[vardepsexclude] = "BB_NUMBER_THREADS"
-
-def ipk_write_pkg(pkg, d):
- import re, copy
- import subprocess
- import textwrap
- import collections
- import glob
-
- def cleanupcontrol(root):
- for p in ['CONTROL', 'DEBIAN']:
- p = os.path.join(root, p)
- if os.path.exists(p):
- bb.utils.prunedir(p)
-
- outdir = d.getVar('PKGWRITEDIRIPK')
- pkgdest = d.getVar('PKGDEST')
- recipesource = os.path.basename(d.getVar('FILE'))
-
- localdata = bb.data.createCopy(d)
- root = "%s/%s" % (pkgdest, pkg)
-
- lf = bb.utils.lockfile(root + ".lock")
- try:
- localdata.setVar('ROOT', '')
- localdata.setVar('ROOT_%s' % pkg, root)
- pkgname = localdata.getVar('PKG_%s' % pkg)
- if not pkgname:
- pkgname = pkg
- localdata.setVar('PKG', pkgname)
-
- localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + pkg)
-
- basedir = os.path.join(os.path.dirname(root))
- arch = localdata.getVar('PACKAGE_ARCH')
-
- if localdata.getVar('IPK_HIERARCHICAL_FEED', False) == "1":
- # Spread packages across subdirectories so each isn't too crowded
- if pkgname.startswith('lib'):
- pkg_prefix = 'lib' + pkgname[3]
- else:
- pkg_prefix = pkgname[0]
-
- # Keep -dbg, -dev, -doc, -staticdev, -locale and -locale-* packages
- # together. These package suffixes are taken from the definitions of
- # PACKAGES and PACKAGES_DYNAMIC in meta/conf/bitbake.conf
- if pkgname[-4:] in ('-dbg', '-dev', '-doc'):
- pkg_subdir = pkgname[:-4]
- elif pkgname.endswith('-staticdev'):
- pkg_subdir = pkgname[:-10]
- elif pkgname.endswith('-locale'):
- pkg_subdir = pkgname[:-7]
- elif '-locale-' in pkgname:
- pkg_subdir = pkgname[:pkgname.find('-locale-')]
- else:
- pkg_subdir = pkgname
-
- pkgoutdir = "%s/%s/%s/%s" % (outdir, arch, pkg_prefix, pkg_subdir)
- else:
- pkgoutdir = "%s/%s" % (outdir, arch)
-
- bb.utils.mkdirhier(pkgoutdir)
- os.chdir(root)
- cleanupcontrol(root)
- g = glob.glob('*')
- if not g and localdata.getVar('ALLOW_EMPTY', False) != "1":
- bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV'), localdata.getVar('PKGR')))
- return
-
- controldir = os.path.join(root, 'CONTROL')
- bb.utils.mkdirhier(controldir)
- ctrlfile = open(os.path.join(controldir, 'control'), 'w')
-
- fields = []
- pe = d.getVar('PKGE')
- if pe and int(pe) > 0:
- fields.append(["Version: %s:%s-%s\n", ['PKGE', 'PKGV', 'PKGR']])
- else:
- fields.append(["Version: %s-%s\n", ['PKGV', 'PKGR']])
- fields.append(["Description: %s\n", ['DESCRIPTION']])
- fields.append(["Section: %s\n", ['SECTION']])
- fields.append(["Priority: %s\n", ['PRIORITY']])
- fields.append(["Maintainer: %s\n", ['MAINTAINER']])
- fields.append(["License: %s\n", ['LICENSE']])
- fields.append(["Architecture: %s\n", ['PACKAGE_ARCH']])
- fields.append(["OE: %s\n", ['PN']])
- if d.getVar('HOMEPAGE'):
- fields.append(["Homepage: %s\n", ['HOMEPAGE']])
-
- def pullData(l, d):
- l2 = []
- for i in l:
- l2.append(d.getVar(i))
- return l2
-
- ctrlfile.write("Package: %s\n" % pkgname)
- # check for required fields
- for (c, fs) in fields:
- for f in fs:
- if localdata.getVar(f, False) is None:
- raise KeyError(f)
- # Special behavior for description...
- if 'DESCRIPTION' in fs:
- summary = localdata.getVar('SUMMARY') or localdata.getVar('DESCRIPTION') or "."
- ctrlfile.write('Description: %s\n' % summary)
- description = localdata.getVar('DESCRIPTION') or "."
- description = textwrap.dedent(description).strip()
- if '\\n' in description:
- # Manually indent: multiline description includes a leading space
- for t in description.split('\\n'):
- ctrlfile.write(' %s\n' % (t.strip() or ' .'))
- else:
- # Auto indent
- ctrlfile.write('%s\n' % textwrap.fill(description, width=74, initial_indent=' ', subsequent_indent=' '))
- else:
- ctrlfile.write(c % tuple(pullData(fs, localdata)))
-
- custom_fields_chunk = get_package_additional_metadata("ipk", localdata)
- if custom_fields_chunk is not None:
- ctrlfile.write(custom_fields_chunk)
- ctrlfile.write("\n")
-
- mapping_rename_hook(localdata)
-
- def debian_cmp_remap(var):
- # In debian '>' and '<' do not mean what it appears they mean
- # '<' = less or equal
- # '>' = greater or equal
- # adjust these to the '<<' and '>>' equivalents
- #
- for dep in var:
- for i, v in enumerate(var[dep]):
- if (v or "").startswith("< "):
- var[dep][i] = var[dep][i].replace("< ", "<< ")
- elif (v or "").startswith("> "):
- var[dep][i] = var[dep][i].replace("> ", ">> ")
-
- rdepends = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS") or "")
- debian_cmp_remap(rdepends)
- rrecommends = bb.utils.explode_dep_versions2(localdata.getVar("RRECOMMENDS") or "")
- debian_cmp_remap(rrecommends)
- rsuggests = bb.utils.explode_dep_versions2(localdata.getVar("RSUGGESTS") or "")
- debian_cmp_remap(rsuggests)
- # Deliberately drop version information here, not wanted/supported by ipk
- rprovides = dict.fromkeys(bb.utils.explode_dep_versions2(localdata.getVar("RPROVIDES") or ""), [])
- rprovides = collections.OrderedDict(sorted(rprovides.items(), key=lambda x: x[0]))
- debian_cmp_remap(rprovides)
- rreplaces = bb.utils.explode_dep_versions2(localdata.getVar("RREPLACES") or "")
- debian_cmp_remap(rreplaces)
- rconflicts = bb.utils.explode_dep_versions2(localdata.getVar("RCONFLICTS") or "")
- debian_cmp_remap(rconflicts)
-
- if rdepends:
- ctrlfile.write("Depends: %s\n" % bb.utils.join_deps(rdepends))
- if rsuggests:
- ctrlfile.write("Suggests: %s\n" % bb.utils.join_deps(rsuggests))
- if rrecommends:
- ctrlfile.write("Recommends: %s\n" % bb.utils.join_deps(rrecommends))
- if rprovides:
- ctrlfile.write("Provides: %s\n" % bb.utils.join_deps(rprovides))
- if rreplaces:
- ctrlfile.write("Replaces: %s\n" % bb.utils.join_deps(rreplaces))
- if rconflicts:
- ctrlfile.write("Conflicts: %s\n" % bb.utils.join_deps(rconflicts))
- ctrlfile.write("Source: %s\n" % recipesource)
- ctrlfile.close()
-
- for script in ["preinst", "postinst", "prerm", "postrm"]:
- scriptvar = localdata.getVar('pkg_%s' % script)
- if not scriptvar:
- continue
- scriptfile = open(os.path.join(controldir, script), 'w')
- scriptfile.write(scriptvar)
- scriptfile.close()
- os.chmod(os.path.join(controldir, script), 0o755)
-
- conffiles_str = ' '.join(get_conffiles(pkg, d))
- if conffiles_str:
- conffiles = open(os.path.join(controldir, 'conffiles'), 'w')
- for f in conffiles_str.split():
- if os.path.exists(oe.path.join(root, f)):
- conffiles.write('%s\n' % f)
- conffiles.close()
-
- os.chdir(basedir)
- subprocess.check_output("PATH=\"%s\" %s %s %s" % (localdata.getVar("PATH"),
- d.getVar("OPKGBUILDCMD"), pkg, pkgoutdir),
- stderr=subprocess.STDOUT,
- shell=True)
-
- if d.getVar('IPK_SIGN_PACKAGES') == '1':
- ipkver = "%s-%s" % (d.getVar('PKGV'), d.getVar('PKGR'))
- ipk_to_sign = "%s/%s_%s_%s.ipk" % (pkgoutdir, pkgname, ipkver, d.getVar('PACKAGE_ARCH'))
- sign_ipk(d, ipk_to_sign)
-
- finally:
- cleanupcontrol(root)
- bb.utils.unlockfile(lf)
-
-# Have to list any variables referenced as X_<pkg> that aren't in pkgdata here
-IPKEXTRAVARS = "PRIORITY MAINTAINER PACKAGE_ARCH HOMEPAGE PACKAGE_ADD_METADATA_IPK"
-ipk_write_pkg[vardeps] += "${@gen_packagevar(d, 'IPKEXTRAVARS')}"
-
-# Otherwise allarch packages may change depending on override configuration
-ipk_write_pkg[vardepsexclude] = "OVERRIDES"
-
-
-SSTATETASKS += "do_package_write_ipk"
-do_package_write_ipk[sstate-inputdirs] = "${PKGWRITEDIRIPK}"
-do_package_write_ipk[sstate-outputdirs] = "${DEPLOY_DIR_IPK}"
-
-python do_package_write_ipk_setscene () {
- tmpdir = d.getVar('TMPDIR')
-
- if os.access(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"), os.R_OK):
- os.unlink(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"))
-
- sstate_setscene(d)
-}
-addtask do_package_write_ipk_setscene
-
-python () {
- if d.getVar('PACKAGES') != '':
- deps = ' opkg-utils-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot xz-native:do_populate_sysroot'
- d.appendVarFlag('do_package_write_ipk', 'depends', deps)
- d.setVarFlag('do_package_write_ipk', 'fakeroot', "1")
-}
-
-python do_package_write_ipk () {
- bb.build.exec_func("read_subpackage_metadata", d)
- bb.build.exec_func("do_package_ipk", d)
-}
-do_package_write_ipk[dirs] = "${PKGWRITEDIRIPK}"
-do_package_write_ipk[cleandirs] = "${PKGWRITEDIRIPK}"
-do_package_write_ipk[umask] = "022"
-do_package_write_ipk[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
-addtask package_write_ipk after do_packagedata do_package
-
-PACKAGEINDEXDEPS += "opkg-utils-native:do_populate_sysroot"
-PACKAGEINDEXDEPS += "opkg-native:do_populate_sysroot"
-
-do_build[recrdeptask] += "do_package_write_ipk"
diff --git a/meta/classes/package_pkgdata.bbclass b/meta/classes/package_pkgdata.bbclass
deleted file mode 100644
index 18b7ed62e0..0000000000
--- a/meta/classes/package_pkgdata.bbclass
+++ /dev/null
@@ -1,167 +0,0 @@
-WORKDIR_PKGDATA = "${WORKDIR}/pkgdata-sysroot"
-
-def package_populate_pkgdata_dir(pkgdatadir, d):
- import glob
-
- postinsts = []
- seendirs = set()
- stagingdir = d.getVar("PKGDATA_DIR")
- pkgarchs = ['${MACHINE_ARCH}']
- pkgarchs = pkgarchs + list(reversed(d.getVar("PACKAGE_EXTRA_ARCHS").split()))
- pkgarchs.append('allarch')
-
- bb.utils.mkdirhier(pkgdatadir)
- for pkgarch in pkgarchs:
- for manifest in glob.glob(d.expand("${SSTATE_MANIFESTS}/manifest-%s-*.packagedata" % pkgarch)):
- with open(manifest, "r") as f:
- for l in f:
- l = l.strip()
- dest = l.replace(stagingdir, "")
- if l.endswith("/"):
- staging_copydir(l, pkgdatadir, dest, seendirs)
- continue
- try:
- staging_copyfile(l, pkgdatadir, dest, postinsts, seendirs)
- except FileExistsError:
- continue
-
-python package_prepare_pkgdata() {
- import copy
- import glob
-
- taskdepdata = d.getVar("BB_TASKDEPDATA", False)
- mytaskname = d.getVar("BB_RUNTASK")
- if mytaskname.endswith("_setscene"):
- mytaskname = mytaskname.replace("_setscene", "")
- workdir = d.getVar("WORKDIR")
- pn = d.getVar("PN")
- stagingdir = d.getVar("PKGDATA_DIR")
- pkgdatadir = d.getVar("WORKDIR_PKGDATA")
-
- # Detect bitbake -b usage
- nodeps = d.getVar("BB_LIMITEDDEPS") or False
- if nodeps:
- staging_package_populate_pkgdata_dir(pkgdatadir, d)
- return
-
- start = None
- configuredeps = []
- for dep in taskdepdata:
- data = taskdepdata[dep]
- if data[1] == mytaskname and data[0] == pn:
- start = dep
- break
- if start is None:
- bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
-
- # We need to figure out which sysroot files we need to expose to this task.
- # This needs to match what would get restored from sstate, which is controlled
- # ultimately by calls from bitbake to setscene_depvalid().
- # That function expects a setscene dependency tree. We build a dependency tree
- # condensed to inter-sstate task dependencies, similar to that used by setscene
- # tasks. We can then call into setscene_depvalid() and decide
- # which dependencies we can "see" and should expose in the recipe specific sysroot.
- setscenedeps = copy.deepcopy(taskdepdata)
-
- start = set([start])
-
- sstatetasks = d.getVar("SSTATETASKS").split()
- # Add recipe specific tasks referenced by setscene_depvalid()
- sstatetasks.append("do_stash_locale")
-
- # If start is an sstate task (like do_package) we need to add in its direct dependencies
- # else the code below won't recurse into them.
- for dep in set(start):
- for dep2 in setscenedeps[dep][3]:
- start.add(dep2)
- start.remove(dep)
-
- # Create collapsed do_populate_sysroot -> do_populate_sysroot tree
- for dep in taskdepdata:
- data = setscenedeps[dep]
- if data[1] not in sstatetasks:
- for dep2 in setscenedeps:
- data2 = setscenedeps[dep2]
- if dep in data2[3]:
- data2[3].update(setscenedeps[dep][3])
- data2[3].remove(dep)
- if dep in start:
- start.update(setscenedeps[dep][3])
- start.remove(dep)
- del setscenedeps[dep]
-
- # Remove circular references
- for dep in setscenedeps:
- if dep in setscenedeps[dep][3]:
- setscenedeps[dep][3].remove(dep)
-
- # Direct dependencies should be present and can be depended upon
- for dep in set(start):
- if setscenedeps[dep][1] == "do_packagedata":
- if dep not in configuredeps:
- configuredeps.append(dep)
-
- msgbuf = []
- # Call into setscene_depvalid for each sub-dependency and only copy sysroot files
- # for ones that would be restored from sstate.
- done = list(start)
- next = list(start)
- while next:
- new = []
- for dep in next:
- data = setscenedeps[dep]
- for datadep in data[3]:
- if datadep in done:
- continue
- taskdeps = {}
- taskdeps[dep] = setscenedeps[dep][:2]
- taskdeps[datadep] = setscenedeps[datadep][:2]
- retval = setscene_depvalid(datadep, taskdeps, [], d, msgbuf)
- done.append(datadep)
- new.append(datadep)
- if retval:
- msgbuf.append("Skipping setscene dependency %s" % datadep)
- continue
- if datadep not in configuredeps and setscenedeps[datadep][1] == "do_packagedata":
- configuredeps.append(datadep)
- msgbuf.append("Adding dependency on %s" % setscenedeps[datadep][0])
- else:
- msgbuf.append("Following dependency on %s" % setscenedeps[datadep][0])
- next = new
-
- # This logging is too verbose for day to day use sadly
- #bb.debug(2, "\n".join(msgbuf))
-
- seendirs = set()
- postinsts = []
- multilibs = {}
- manifests = {}
-
- msg_adding = []
-
- for dep in configuredeps:
- c = setscenedeps[dep][0]
- msg_adding.append(c)
-
- manifest, d2 = oe.sstatesig.find_sstate_manifest(c, setscenedeps[dep][2], "packagedata", d, multilibs)
- destsysroot = pkgdatadir
-
- if manifest:
- targetdir = destsysroot
- with open(manifest, "r") as f:
- manifests[dep] = manifest
- for l in f:
- l = l.strip()
- dest = targetdir + l.replace(stagingdir, "")
- if l.endswith("/"):
- staging_copydir(l, targetdir, dest, seendirs)
- continue
- staging_copyfile(l, targetdir, dest, postinsts, seendirs)
-
- bb.note("Installed into pkgdata-sysroot: %s" % str(msg_adding))
-
-}
-package_prepare_pkgdata[cleandirs] = "${WORKDIR_PKGDATA}"
-package_prepare_pkgdata[vardepsexclude] += "MACHINE_ARCH PACKAGE_EXTRA_ARCHS SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA"
-
-
diff --git a/meta/classes/package_rpm.bbclass b/meta/classes/package_rpm.bbclass
deleted file mode 100644
index 519c22be47..0000000000
--- a/meta/classes/package_rpm.bbclass
+++ /dev/null
@@ -1,755 +0,0 @@
-inherit package
-
-IMAGE_PKGTYPE ?= "rpm"
-
-RPM="rpm"
-RPMBUILD="rpmbuild"
-
-PKGWRITEDIRRPM = "${WORKDIR}/deploy-rpms"
-
-# Maintaining the perfile dependencies has singificant overhead when writing the
-# packages. When set, this value merges them for efficiency.
-MERGEPERFILEDEPS = "1"
-
-# Filter dependencies based on a provided function.
-def filter_deps(var, f):
- import collections
-
- depends_dict = bb.utils.explode_dep_versions2(var)
- newdeps_dict = collections.OrderedDict()
- for dep in depends_dict:
- if f(dep):
- newdeps_dict[dep] = depends_dict[dep]
- return bb.utils.join_deps(newdeps_dict, commasep=False)
-
-# Filter out absolute paths (typically /bin/sh and /usr/bin/env) and any perl
-# dependencies for nativesdk packages.
-def filter_nativesdk_deps(srcname, var):
- if var and srcname.startswith("nativesdk-"):
- var = filter_deps(var, lambda dep: not dep.startswith('/') and dep != 'perl' and not dep.startswith('perl('))
- return var
-
-# Construct per file dependencies file
-def write_rpm_perfiledata(srcname, d):
- workdir = d.getVar('WORKDIR')
- packages = d.getVar('PACKAGES')
- pkgd = d.getVar('PKGD')
-
- def dump_filerdeps(varname, outfile, d):
- outfile.write("#!/usr/bin/env python3\n\n")
- outfile.write("# Dependency table\n")
- outfile.write('deps = {\n')
- for pkg in packages.split():
- dependsflist_key = 'FILE' + varname + 'FLIST' + "_" + pkg
- dependsflist = (d.getVar(dependsflist_key) or "")
- for dfile in dependsflist.split():
- key = "FILE" + varname + "_" + dfile + "_" + pkg
- deps = filter_nativesdk_deps(srcname, d.getVar(key) or "")
- depends_dict = bb.utils.explode_dep_versions(deps)
- file = dfile.replace("@underscore@", "_")
- file = file.replace("@closebrace@", "]")
- file = file.replace("@openbrace@", "[")
- file = file.replace("@tab@", "\t")
- file = file.replace("@space@", " ")
- file = file.replace("@at@", "@")
- outfile.write('"' + pkgd + file + '" : "')
- for dep in depends_dict:
- ver = depends_dict[dep]
- if dep and ver:
- ver = ver.replace("(","")
- ver = ver.replace(")","")
- outfile.write(dep + " " + ver + " ")
- else:
- outfile.write(dep + " ")
- outfile.write('",\n')
- outfile.write('}\n\n')
- outfile.write("import sys\n")
- outfile.write("while 1:\n")
- outfile.write("\tline = sys.stdin.readline().strip()\n")
- outfile.write("\tif not line:\n")
- outfile.write("\t\tsys.exit(0)\n")
- outfile.write("\tif line in deps:\n")
- outfile.write("\t\tprint(deps[line] + '\\n')\n")
-
- # OE-core dependencies a.k.a. RPM requires
- outdepends = workdir + "/" + srcname + ".requires"
-
- dependsfile = open(outdepends, 'w')
-
- dump_filerdeps('RDEPENDS', dependsfile, d)
-
- dependsfile.close()
- os.chmod(outdepends, 0o755)
-
- # OE-core / RPM Provides
- outprovides = workdir + "/" + srcname + ".provides"
-
- providesfile = open(outprovides, 'w')
-
- dump_filerdeps('RPROVIDES', providesfile, d)
-
- providesfile.close()
- os.chmod(outprovides, 0o755)
-
- return (outdepends, outprovides)
-
-
-python write_specfile () {
- import oe.packagedata
-
- # append information for logs and patches to %prep
- def add_prep(d,spec_files_bottom):
- if d.getVarFlag('ARCHIVER_MODE', 'srpm') == '1' and bb.data.inherits_class('archiver', d):
- spec_files_bottom.append('%%prep -n %s' % d.getVar('PN') )
- spec_files_bottom.append('%s' % "echo \"include logs and patches, Please check them in SOURCES\"")
- spec_files_bottom.append('')
-
- # append the name of tarball to key word 'SOURCE' in xxx.spec.
- def tail_source(d):
- if d.getVarFlag('ARCHIVER_MODE', 'srpm') == '1' and bb.data.inherits_class('archiver', d):
- ar_outdir = d.getVar('ARCHIVER_OUTDIR')
- if not os.path.exists(ar_outdir):
- return
- source_list = os.listdir(ar_outdir)
- source_number = 0
- for source in source_list:
- # do_deploy_archives may have already run (from sstate) meaning a .src.rpm may already
- # exist in ARCHIVER_OUTDIR so skip if present.
- if source.endswith(".src.rpm"):
- continue
- # The rpmbuild doesn't need the root permission, but it needs
- # to know the file's user and group name, the only user and
- # group in fakeroot is "root" when working in fakeroot.
- f = os.path.join(ar_outdir, source)
- os.chown(f, 0, 0)
- spec_preamble_top.append('Source%s: %s' % (source_number, source))
- source_number += 1
-
- # In RPM, dependencies are of the format: pkg <>= Epoch:Version-Release
- # This format is similar to OE, however there are restrictions on the
- # characters that can be in a field. In the Version field, "-"
- # characters are not allowed. "-" is allowed in the Release field.
- #
- # We translate the "-" in the version to a "+", by loading the PKGV
- # from the dependent recipe, replacing the - with a +, and then using
- # that value to do a replace inside of this recipe's dependencies.
- # This preserves the "-" separator between the version and release, as
- # well as any "-" characters inside of the release field.
- #
- # All of this has to happen BEFORE the mapping_rename_hook as
- # after renaming we cannot look up the dependencies in the packagedata
- # store.
- def translate_vers(varname, d):
- depends = d.getVar(varname)
- if depends:
- depends_dict = bb.utils.explode_dep_versions2(depends)
- newdeps_dict = {}
- for dep in depends_dict:
- verlist = []
- for ver in depends_dict[dep]:
- if '-' in ver:
- subd = oe.packagedata.read_subpkgdata_dict(dep, d)
- if 'PKGV' in subd:
- pv = subd['PV']
- pkgv = subd['PKGV']
- reppv = pkgv.replace('-', '+')
- ver = ver.replace(pv, reppv).replace(pkgv, reppv)
- if 'PKGR' in subd:
- # Make sure PKGR rather than PR in ver
- pr = '-' + subd['PR']
- pkgr = '-' + subd['PKGR']
- if pkgr not in ver:
- ver = ver.replace(pr, pkgr)
- verlist.append(ver)
- else:
- verlist.append(ver)
- newdeps_dict[dep] = verlist
- depends = bb.utils.join_deps(newdeps_dict)
- d.setVar(varname, depends.strip())
-
- # We need to change the style the dependency from BB to RPM
- # This needs to happen AFTER the mapping_rename_hook
- def print_deps(variable, tag, array, d):
- depends = variable
- if depends:
- depends_dict = bb.utils.explode_dep_versions2(depends)
- for dep in depends_dict:
- for ver in depends_dict[dep]:
- ver = ver.replace('(', '')
- ver = ver.replace(')', '')
- array.append("%s: %s %s" % (tag, dep, ver))
- if not len(depends_dict[dep]):
- array.append("%s: %s" % (tag, dep))
-
- def walk_files(walkpath, target, conffiles, dirfiles):
- # We can race against the ipk/deb backends which create CONTROL or DEBIAN directories
- # when packaging. We just ignore these files which are created in
- # packages-split/ and not package/
- # We have the odd situation where the CONTROL/DEBIAN directory can be removed in the middle of
- # of the walk, the isdir() test would then fail and the walk code would assume its a file
- # hence we check for the names in files too.
- for rootpath, dirs, files in os.walk(walkpath):
- path = rootpath.replace(walkpath, "")
- if path.endswith("DEBIAN") or path.endswith("CONTROL"):
- continue
- path = path.replace("%", "%%%%%%%%")
- path = path.replace("[", "?")
- path = path.replace("]", "?")
-
- # Treat all symlinks to directories as normal files.
- # os.walk() lists them as directories.
- def move_to_files(dir):
- if os.path.islink(os.path.join(rootpath, dir)):
- files.append(dir)
- return True
- else:
- return False
- dirs[:] = [dir for dir in dirs if not move_to_files(dir)]
-
- # Directory handling can happen in two ways, either DIRFILES is not set at all
- # in which case we fall back to the older behaviour of packages owning all their
- # directories
- if dirfiles is None:
- for dir in dirs:
- if dir == "CONTROL" or dir == "DEBIAN":
- continue
- dir = dir.replace("%", "%%%%%%%%")
- dir = dir.replace("[", "?")
- dir = dir.replace("]", "?")
- # All packages own the directories their files are in...
- target.append('%dir "' + path + '/' + dir + '"')
- else:
- # packages own only empty directories or explict directory.
- # This will prevent the overlapping of security permission.
- if path and not files and not dirs:
- target.append('%dir "' + path + '"')
- elif path and path in dirfiles:
- target.append('%dir "' + path + '"')
-
- for file in files:
- if file == "CONTROL" or file == "DEBIAN":
- continue
- file = file.replace("%", "%%%%%%%%")
- file = file.replace("[", "?")
- file = file.replace("]", "?")
- if conffiles.count(path + '/' + file):
- target.append('%config "' + path + '/' + file + '"')
- else:
- target.append('"' + path + '/' + file + '"')
-
- # Prevent the prerm/postrm scripts from being run during an upgrade
- def wrap_uninstall(scriptvar):
- scr = scriptvar.strip()
- if scr.startswith("#!"):
- pos = scr.find("\n") + 1
- else:
- pos = 0
- scr = scr[:pos] + 'if [ "$1" = "0" ] ; then\n' + scr[pos:] + '\nfi'
- return scr
-
- def get_perfile(varname, pkg, d):
- deps = []
- dependsflist_key = 'FILE' + varname + 'FLIST' + "_" + pkg
- dependsflist = (d.getVar(dependsflist_key) or "")
- for dfile in dependsflist.split():
- key = "FILE" + varname + "_" + dfile + "_" + pkg
- depends = d.getVar(key)
- if depends:
- deps.append(depends)
- return " ".join(deps)
-
- def append_description(spec_preamble, text):
- """
- Add the description to the spec file.
- """
- import textwrap
- dedent_text = textwrap.dedent(text).strip()
- # Bitbake saves "\n" as "\\n"
- if '\\n' in dedent_text:
- for t in dedent_text.split('\\n'):
- spec_preamble.append(t.strip())
- else:
- spec_preamble.append('%s' % textwrap.fill(dedent_text, width=75))
-
- packages = d.getVar('PACKAGES')
- if not packages or packages == '':
- bb.debug(1, "No packages; nothing to do")
- return
-
- pkgdest = d.getVar('PKGDEST')
- if not pkgdest:
- bb.fatal("No PKGDEST")
-
- outspecfile = d.getVar('OUTSPECFILE')
- if not outspecfile:
- bb.fatal("No OUTSPECFILE")
-
- # Construct the SPEC file...
- srcname = d.getVar('PN')
- localdata = bb.data.createCopy(d)
- localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + srcname)
- srcsummary = (localdata.getVar('SUMMARY') or localdata.getVar('DESCRIPTION') or ".")
- srcversion = localdata.getVar('PKGV').replace('-', '+')
- srcrelease = localdata.getVar('PKGR')
- srcepoch = (localdata.getVar('PKGE') or "")
- srclicense = localdata.getVar('LICENSE')
- srcsection = localdata.getVar('SECTION')
- srcmaintainer = localdata.getVar('MAINTAINER')
- srchomepage = localdata.getVar('HOMEPAGE')
- srcdescription = localdata.getVar('DESCRIPTION') or "."
- srccustomtagschunk = get_package_additional_metadata("rpm", localdata)
-
- srcdepends = d.getVar('DEPENDS')
- srcrdepends = []
- srcrrecommends = []
- srcrsuggests = []
- srcrprovides = []
- srcrreplaces = []
- srcrconflicts = []
- srcrobsoletes = []
-
- srcrpreinst = []
- srcrpostinst = []
- srcrprerm = []
- srcrpostrm = []
-
- spec_preamble_top = []
- spec_preamble_bottom = []
-
- spec_scriptlets_top = []
- spec_scriptlets_bottom = []
-
- spec_files_top = []
- spec_files_bottom = []
-
- perfiledeps = (d.getVar("MERGEPERFILEDEPS") or "0") == "0"
- extra_pkgdata = (d.getVar("RPM_EXTRA_PKGDATA") or "0") == "1"
-
- for pkg in packages.split():
- localdata = bb.data.createCopy(d)
-
- root = "%s/%s" % (pkgdest, pkg)
-
- localdata.setVar('ROOT', '')
- localdata.setVar('ROOT_%s' % pkg, root)
- pkgname = localdata.getVar('PKG_%s' % pkg)
- if not pkgname:
- pkgname = pkg
- localdata.setVar('PKG', pkgname)
-
- localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + pkg)
-
- conffiles = get_conffiles(pkg, d)
- dirfiles = localdata.getVar('DIRFILES')
- if dirfiles is not None:
- dirfiles = dirfiles.split()
-
- splitname = pkgname
-
- splitsummary = (localdata.getVar('SUMMARY') or localdata.getVar('DESCRIPTION') or ".")
- splitversion = (localdata.getVar('PKGV') or "").replace('-', '+')
- splitrelease = (localdata.getVar('PKGR') or "")
- splitepoch = (localdata.getVar('PKGE') or "")
- splitlicense = (localdata.getVar('LICENSE') or "")
- splitsection = (localdata.getVar('SECTION') or "")
- splitdescription = (localdata.getVar('DESCRIPTION') or ".")
- splitcustomtagschunk = get_package_additional_metadata("rpm", localdata)
-
- translate_vers('RDEPENDS', localdata)
- translate_vers('RRECOMMENDS', localdata)
- translate_vers('RSUGGESTS', localdata)
- translate_vers('RPROVIDES', localdata)
- translate_vers('RREPLACES', localdata)
- translate_vers('RCONFLICTS', localdata)
-
- # Map the dependencies into their final form
- mapping_rename_hook(localdata)
-
- splitrdepends = localdata.getVar('RDEPENDS')
- splitrrecommends = localdata.getVar('RRECOMMENDS')
- splitrsuggests = localdata.getVar('RSUGGESTS')
- splitrprovides = localdata.getVar('RPROVIDES')
- splitrreplaces = localdata.getVar('RREPLACES')
- splitrconflicts = localdata.getVar('RCONFLICTS')
- splitrobsoletes = []
-
- splitrpreinst = localdata.getVar('pkg_preinst')
- splitrpostinst = localdata.getVar('pkg_postinst')
- splitrprerm = localdata.getVar('pkg_prerm')
- splitrpostrm = localdata.getVar('pkg_postrm')
-
-
- if not perfiledeps:
- # Add in summary of per file dependencies
- splitrdepends = splitrdepends + " " + get_perfile('RDEPENDS', pkg, d)
- splitrprovides = splitrprovides + " " + get_perfile('RPROVIDES', pkg, d)
-
- splitrdepends = filter_nativesdk_deps(srcname, splitrdepends)
-
- # Gather special src/first package data
- if srcname == splitname:
- archiving = d.getVarFlag('ARCHIVER_MODE', 'srpm') == '1' and \
- bb.data.inherits_class('archiver', d)
- if archiving and srclicense != splitlicense:
- bb.warn("The SRPM produced may not have the correct overall source license in the License tag. This is due to the LICENSE for the primary package and SRPM conflicting.")
-
- srclicense = splitlicense
- srcrdepends = splitrdepends
- srcrrecommends = splitrrecommends
- srcrsuggests = splitrsuggests
- srcrprovides = splitrprovides
- srcrreplaces = splitrreplaces
- srcrconflicts = splitrconflicts
-
- srcrpreinst = splitrpreinst
- srcrpostinst = splitrpostinst
- srcrprerm = splitrprerm
- srcrpostrm = splitrpostrm
-
- file_list = []
- walk_files(root, file_list, conffiles, dirfiles)
- if not file_list and localdata.getVar('ALLOW_EMPTY', False) != "1":
- bb.note("Not creating empty RPM package for %s" % splitname)
- else:
- spec_files_top.append('%files')
- if extra_pkgdata:
- package_rpm_extra_pkgdata(splitname, spec_files_top, localdata)
- spec_files_top.append('%defattr(-,-,-,-)')
- if file_list:
- bb.note("Creating RPM package for %s" % splitname)
- spec_files_top.extend(file_list)
- else:
- bb.note("Creating empty RPM package for %s" % splitname)
- spec_files_top.append('')
- continue
-
- # Process subpackage data
- spec_preamble_bottom.append('%%package -n %s' % splitname)
- spec_preamble_bottom.append('Summary: %s' % splitsummary)
- if srcversion != splitversion:
- spec_preamble_bottom.append('Version: %s' % splitversion)
- if srcrelease != splitrelease:
- spec_preamble_bottom.append('Release: %s' % splitrelease)
- if srcepoch != splitepoch:
- spec_preamble_bottom.append('Epoch: %s' % splitepoch)
- spec_preamble_bottom.append('License: %s' % splitlicense)
- spec_preamble_bottom.append('Group: %s' % splitsection)
-
- if srccustomtagschunk != splitcustomtagschunk:
- spec_preamble_bottom.append(splitcustomtagschunk)
-
- # Replaces == Obsoletes && Provides
- robsoletes = bb.utils.explode_dep_versions2(splitrobsoletes or "")
- rprovides = bb.utils.explode_dep_versions2(splitrprovides or "")
- rreplaces = bb.utils.explode_dep_versions2(splitrreplaces or "")
- for dep in rreplaces:
- if not dep in robsoletes:
- robsoletes[dep] = rreplaces[dep]
- if not dep in rprovides:
- rprovides[dep] = rreplaces[dep]
- splitrobsoletes = bb.utils.join_deps(robsoletes, commasep=False)
- splitrprovides = bb.utils.join_deps(rprovides, commasep=False)
-
- print_deps(splitrdepends, "Requires", spec_preamble_bottom, d)
- if splitrpreinst:
- print_deps(splitrdepends, "Requires(pre)", spec_preamble_bottom, d)
- if splitrpostinst:
- print_deps(splitrdepends, "Requires(post)", spec_preamble_bottom, d)
- if splitrprerm:
- print_deps(splitrdepends, "Requires(preun)", spec_preamble_bottom, d)
- if splitrpostrm:
- print_deps(splitrdepends, "Requires(postun)", spec_preamble_bottom, d)
-
- print_deps(splitrrecommends, "Recommends", spec_preamble_bottom, d)
- print_deps(splitrsuggests, "Suggests", spec_preamble_bottom, d)
- print_deps(splitrprovides, "Provides", spec_preamble_bottom, d)
- print_deps(splitrobsoletes, "Obsoletes", spec_preamble_bottom, d)
- print_deps(splitrconflicts, "Conflicts", spec_preamble_bottom, d)
-
- spec_preamble_bottom.append('')
-
- spec_preamble_bottom.append('%%description -n %s' % splitname)
- append_description(spec_preamble_bottom, splitdescription)
-
- spec_preamble_bottom.append('')
-
- # Now process scriptlets
- if splitrpreinst:
- spec_scriptlets_bottom.append('%%pre -n %s' % splitname)
- spec_scriptlets_bottom.append('# %s - preinst' % splitname)
- spec_scriptlets_bottom.append(splitrpreinst)
- spec_scriptlets_bottom.append('')
- if splitrpostinst:
- spec_scriptlets_bottom.append('%%post -n %s' % splitname)
- spec_scriptlets_bottom.append('# %s - postinst' % splitname)
- spec_scriptlets_bottom.append(splitrpostinst)
- spec_scriptlets_bottom.append('')
- if splitrprerm:
- spec_scriptlets_bottom.append('%%preun -n %s' % splitname)
- spec_scriptlets_bottom.append('# %s - prerm' % splitname)
- scriptvar = wrap_uninstall(splitrprerm)
- spec_scriptlets_bottom.append(scriptvar)
- spec_scriptlets_bottom.append('')
- if splitrpostrm:
- spec_scriptlets_bottom.append('%%postun -n %s' % splitname)
- spec_scriptlets_bottom.append('# %s - postrm' % splitname)
- scriptvar = wrap_uninstall(splitrpostrm)
- spec_scriptlets_bottom.append(scriptvar)
- spec_scriptlets_bottom.append('')
-
- # Now process files
- file_list = []
- walk_files(root, file_list, conffiles, dirfiles)
- if not file_list and localdata.getVar('ALLOW_EMPTY', False) != "1":
- bb.note("Not creating empty RPM package for %s" % splitname)
- else:
- spec_files_bottom.append('%%files -n %s' % splitname)
- if extra_pkgdata:
- package_rpm_extra_pkgdata(splitname, spec_files_bottom, localdata)
- spec_files_bottom.append('%defattr(-,-,-,-)')
- if file_list:
- bb.note("Creating RPM package for %s" % splitname)
- spec_files_bottom.extend(file_list)
- else:
- bb.note("Creating empty RPM package for %s" % splitname)
- spec_files_bottom.append('')
-
- del localdata
-
- add_prep(d,spec_files_bottom)
- spec_preamble_top.append('Summary: %s' % srcsummary)
- spec_preamble_top.append('Name: %s' % srcname)
- spec_preamble_top.append('Version: %s' % srcversion)
- spec_preamble_top.append('Release: %s' % srcrelease)
- if srcepoch and srcepoch.strip() != "":
- spec_preamble_top.append('Epoch: %s' % srcepoch)
- spec_preamble_top.append('License: %s' % srclicense)
- spec_preamble_top.append('Group: %s' % srcsection)
- spec_preamble_top.append('Packager: %s' % srcmaintainer)
- if srchomepage:
- spec_preamble_top.append('URL: %s' % srchomepage)
- if srccustomtagschunk:
- spec_preamble_top.append(srccustomtagschunk)
- tail_source(d)
-
- # Replaces == Obsoletes && Provides
- robsoletes = bb.utils.explode_dep_versions2(srcrobsoletes or "")
- rprovides = bb.utils.explode_dep_versions2(srcrprovides or "")
- rreplaces = bb.utils.explode_dep_versions2(srcrreplaces or "")
- for dep in rreplaces:
- if not dep in robsoletes:
- robsoletes[dep] = rreplaces[dep]
- if not dep in rprovides:
- rprovides[dep] = rreplaces[dep]
- srcrobsoletes = bb.utils.join_deps(robsoletes, commasep=False)
- srcrprovides = bb.utils.join_deps(rprovides, commasep=False)
-
- print_deps(srcdepends, "BuildRequires", spec_preamble_top, d)
- print_deps(srcrdepends, "Requires", spec_preamble_top, d)
- if srcrpreinst:
- print_deps(srcrdepends, "Requires(pre)", spec_preamble_top, d)
- if srcrpostinst:
- print_deps(srcrdepends, "Requires(post)", spec_preamble_top, d)
- if srcrprerm:
- print_deps(srcrdepends, "Requires(preun)", spec_preamble_top, d)
- if srcrpostrm:
- print_deps(srcrdepends, "Requires(postun)", spec_preamble_top, d)
-
- print_deps(srcrrecommends, "Recommends", spec_preamble_top, d)
- print_deps(srcrsuggests, "Suggests", spec_preamble_top, d)
- print_deps(srcrprovides + (" /bin/sh" if srcname.startswith("nativesdk-") else ""), "Provides", spec_preamble_top, d)
- print_deps(srcrobsoletes, "Obsoletes", spec_preamble_top, d)
- print_deps(srcrconflicts, "Conflicts", spec_preamble_top, d)
-
- spec_preamble_top.append('')
-
- spec_preamble_top.append('%description')
- append_description(spec_preamble_top, srcdescription)
-
- spec_preamble_top.append('')
-
- if srcrpreinst:
- spec_scriptlets_top.append('%pre')
- spec_scriptlets_top.append('# %s - preinst' % srcname)
- spec_scriptlets_top.append(srcrpreinst)
- spec_scriptlets_top.append('')
- if srcrpostinst:
- spec_scriptlets_top.append('%post')
- spec_scriptlets_top.append('# %s - postinst' % srcname)
- spec_scriptlets_top.append(srcrpostinst)
- spec_scriptlets_top.append('')
- if srcrprerm:
- spec_scriptlets_top.append('%preun')
- spec_scriptlets_top.append('# %s - prerm' % srcname)
- scriptvar = wrap_uninstall(srcrprerm)
- spec_scriptlets_top.append(scriptvar)
- spec_scriptlets_top.append('')
- if srcrpostrm:
- spec_scriptlets_top.append('%postun')
- spec_scriptlets_top.append('# %s - postrm' % srcname)
- scriptvar = wrap_uninstall(srcrpostrm)
- spec_scriptlets_top.append(scriptvar)
- spec_scriptlets_top.append('')
-
- # Write the SPEC file
- specfile = open(outspecfile, 'w')
-
- # RPMSPEC_PREAMBLE is a way to add arbitrary text to the top
- # of the generated spec file
- external_preamble = d.getVar("RPMSPEC_PREAMBLE")
- if external_preamble:
- specfile.write(external_preamble + "\n")
-
- for line in spec_preamble_top:
- specfile.write(line + "\n")
-
- for line in spec_preamble_bottom:
- specfile.write(line + "\n")
-
- for line in spec_scriptlets_top:
- specfile.write(line + "\n")
-
- for line in spec_scriptlets_bottom:
- specfile.write(line + "\n")
-
- for line in spec_files_top:
- specfile.write(line + "\n")
-
- for line in spec_files_bottom:
- specfile.write(line + "\n")
-
- specfile.close()
-}
-# Otherwise allarch packages may change depending on override configuration
-write_specfile[vardepsexclude] = "OVERRIDES"
-
-# Have to list any variables referenced as X_<pkg> that aren't in pkgdata here
-RPMEXTRAVARS = "PACKAGE_ADD_METADATA_RPM"
-write_specfile[vardeps] += "${@gen_packagevar(d, 'RPMEXTRAVARS')}"
-
-python do_package_rpm () {
- workdir = d.getVar('WORKDIR')
- tmpdir = d.getVar('TMPDIR')
- pkgd = d.getVar('PKGD')
- pkgdest = d.getVar('PKGDEST')
- if not workdir or not pkgd or not tmpdir:
- bb.error("Variables incorrectly set, unable to package")
- return
-
- packages = d.getVar('PACKAGES')
- if not packages or packages == '':
- bb.debug(1, "No packages; nothing to do")
- return
-
- # Construct the spec file...
- # If the spec file already exist, and has not been stored into
- # pseudo's files.db, it maybe cause rpmbuild src.rpm fail,
- # so remove it before doing rpmbuild src.rpm.
- srcname = d.getVar('PN')
- outspecfile = workdir + "/" + srcname + ".spec"
- if os.path.isfile(outspecfile):
- os.remove(outspecfile)
- d.setVar('OUTSPECFILE', outspecfile)
- bb.build.exec_func('write_specfile', d)
-
- perfiledeps = (d.getVar("MERGEPERFILEDEPS") or "0") == "0"
- if perfiledeps:
- outdepends, outprovides = write_rpm_perfiledata(srcname, d)
-
- # Setup the rpmbuild arguments...
- rpmbuild = d.getVar('RPMBUILD')
- targetsys = d.getVar('TARGET_SYS')
- targetvendor = d.getVar('HOST_VENDOR')
-
- # Too many places in dnf stack assume that arch-independent packages are "noarch".
- # Let's not fight against this.
- package_arch = (d.getVar('PACKAGE_ARCH') or "").replace("-", "_")
- if package_arch == "all":
- package_arch = "noarch"
-
- sdkpkgsuffix = (d.getVar('SDKPKGSUFFIX') or "nativesdk").replace("-", "_")
- d.setVar('PACKAGE_ARCH_EXTEND', package_arch)
- pkgwritedir = d.expand('${PKGWRITEDIRRPM}/${PACKAGE_ARCH_EXTEND}')
- d.setVar('RPM_PKGWRITEDIR', pkgwritedir)
- bb.debug(1, 'PKGWRITEDIR: %s' % d.getVar('RPM_PKGWRITEDIR'))
- pkgarch = d.expand('${PACKAGE_ARCH_EXTEND}${HOST_VENDOR}-linux')
- bb.utils.mkdirhier(pkgwritedir)
- os.chmod(pkgwritedir, 0o755)
-
- cmd = rpmbuild
- cmd = cmd + " --noclean --nodeps --short-circuit --target " + pkgarch + " --buildroot " + pkgd
- cmd = cmd + " --define '_topdir " + workdir + "' --define '_rpmdir " + pkgwritedir + "'"
- cmd = cmd + " --define '_builddir " + d.getVar('B') + "'"
- cmd = cmd + " --define '_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm'"
- cmd = cmd + " --define '_use_internal_dependency_generator 0'"
- cmd = cmd + " --define '_binaries_in_noarch_packages_terminate_build 0'"
- cmd = cmd + " --define '_build_id_links none'"
- cmd = cmd + " --define '_binary_payload w6T.xzdio'"
- cmd = cmd + " --define '_source_payload w6T.xzdio'"
- cmd = cmd + " --define 'clamp_mtime_to_source_date_epoch 1'"
- cmd = cmd + " --define '_buildhost reproducible'"
- if perfiledeps:
- cmd = cmd + " --define '__find_requires " + outdepends + "'"
- cmd = cmd + " --define '__find_provides " + outprovides + "'"
- else:
- cmd = cmd + " --define '__find_requires %{nil}'"
- cmd = cmd + " --define '__find_provides %{nil}'"
- cmd = cmd + " --define '_unpackaged_files_terminate_build 0'"
- cmd = cmd + " --define 'debug_package %{nil}'"
- cmd = cmd + " --define '_tmppath " + workdir + "'"
- if d.getVarFlag('ARCHIVER_MODE', 'srpm') == '1' and bb.data.inherits_class('archiver', d):
- cmd = cmd + " --define '_sourcedir " + d.getVar('ARCHIVER_OUTDIR') + "'"
- cmdsrpm = cmd + " --define '_srcrpmdir " + d.getVar('ARCHIVER_RPMOUTDIR') + "'"
- cmdsrpm = cmdsrpm + " -bs " + outspecfile
- # Build the .src.rpm
- d.setVar('SBUILDSPEC', cmdsrpm + "\n")
- d.setVarFlag('SBUILDSPEC', 'func', '1')
- bb.build.exec_func('SBUILDSPEC', d)
- cmd = cmd + " -bb " + outspecfile
-
- # rpm 4 creates various empty directories in _topdir, let's clean them up
- cleanupcmd = "rm -rf %s/BUILDROOT %s/SOURCES %s/SPECS %s/SRPMS" % (workdir, workdir, workdir, workdir)
-
- # Build the rpm package!
- d.setVar('BUILDSPEC', cmd + "\n" + cleanupcmd + "\n")
- d.setVarFlag('BUILDSPEC', 'func', '1')
- bb.build.exec_func('BUILDSPEC', d)
-
- if d.getVar('RPM_SIGN_PACKAGES') == '1':
- bb.build.exec_func("sign_rpm", d)
-}
-
-python () {
- if d.getVar('PACKAGES') != '':
- deps = ' rpm-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
- d.appendVarFlag('do_package_write_rpm', 'depends', deps)
- d.setVarFlag('do_package_write_rpm', 'fakeroot', '1')
-}
-
-SSTATETASKS += "do_package_write_rpm"
-do_package_write_rpm[sstate-inputdirs] = "${PKGWRITEDIRRPM}"
-do_package_write_rpm[sstate-outputdirs] = "${DEPLOY_DIR_RPM}"
-# Take a shared lock, we can write multiple packages at the same time...
-# but we need to stop the rootfs/solver from running while we do...
-do_package_write_rpm[sstate-lockfile-shared] += "${DEPLOY_DIR_RPM}/rpm.lock"
-
-python do_package_write_rpm_setscene () {
- sstate_setscene(d)
-}
-addtask do_package_write_rpm_setscene
-
-python do_package_write_rpm () {
- bb.build.exec_func("read_subpackage_metadata", d)
- bb.build.exec_func("do_package_rpm", d)
-}
-
-do_package_write_rpm[dirs] = "${PKGWRITEDIRRPM}"
-do_package_write_rpm[cleandirs] = "${PKGWRITEDIRRPM}"
-do_package_write_rpm[umask] = "022"
-do_package_write_rpm[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
-addtask package_write_rpm after do_packagedata do_package
-
-PACKAGEINDEXDEPS += "rpm-native:do_populate_sysroot"
-PACKAGEINDEXDEPS += "createrepo-c-native:do_populate_sysroot"
-
-do_build[recrdeptask] += "do_package_write_rpm"
diff --git a/meta/classes/package_tar.bbclass b/meta/classes/package_tar.bbclass
deleted file mode 100644
index ce3ab4c8e2..0000000000
--- a/meta/classes/package_tar.bbclass
+++ /dev/null
@@ -1,73 +0,0 @@
-inherit package
-
-IMAGE_PKGTYPE ?= "tar"
-
-python do_package_tar () {
- import subprocess
-
- oldcwd = os.getcwd()
-
- workdir = d.getVar('WORKDIR')
- if not workdir:
- bb.error("WORKDIR not defined, unable to package")
- return
-
- outdir = d.getVar('DEPLOY_DIR_TAR')
- if not outdir:
- bb.error("DEPLOY_DIR_TAR not defined, unable to package")
- return
-
- dvar = d.getVar('D')
- if not dvar:
- bb.error("D not defined, unable to package")
- return
-
- packages = d.getVar('PACKAGES')
- if not packages:
- bb.debug(1, "PACKAGES not defined, nothing to package")
- return
-
- pkgdest = d.getVar('PKGDEST')
-
- bb.utils.mkdirhier(outdir)
- bb.utils.mkdirhier(dvar)
-
- for pkg in packages.split():
- localdata = bb.data.createCopy(d)
- root = "%s/%s" % (pkgdest, pkg)
-
- overrides = localdata.getVar('OVERRIDES', False)
- localdata.setVar('OVERRIDES', '%s:%s' % (overrides, pkg))
-
- bb.utils.mkdirhier(root)
- basedir = os.path.dirname(root)
- tarfn = localdata.expand("${DEPLOY_DIR_TAR}/${PKG}-${PKGV}-${PKGR}.tar.gz")
- os.chdir(root)
- dlist = os.listdir(root)
- if not dlist:
- bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV'), localdata.getVar('PKGR')))
- continue
- args = "tar -cz --exclude=CONTROL --exclude=DEBIAN -f".split()
- ret = subprocess.call(args + [tarfn] + dlist)
- if ret != 0:
- bb.error("Creation of tar %s failed." % tarfn)
-
- os.chdir(oldcwd)
-}
-
-python () {
- if d.getVar('PACKAGES') != '':
- deps = (d.getVarFlag('do_package_write_tar', 'depends') or "").split()
- deps.append('tar-native:do_populate_sysroot')
- deps.append('virtual/fakeroot-native:do_populate_sysroot')
- d.setVarFlag('do_package_write_tar', 'depends', " ".join(deps))
- d.setVarFlag('do_package_write_tar', 'fakeroot', "1")
-}
-
-
-python do_package_write_tar () {
- bb.build.exec_func("read_subpackage_metadata", d)
- bb.build.exec_func("do_package_tar", d)
-}
-do_package_write_tar[dirs] = "${D}"
-addtask package_write_tar before do_build after do_packagedata do_package
diff --git a/meta/classes/packagedata.bbclass b/meta/classes/packagedata.bbclass
deleted file mode 100644
index a903e5cfd2..0000000000
--- a/meta/classes/packagedata.bbclass
+++ /dev/null
@@ -1,34 +0,0 @@
-python read_subpackage_metadata () {
- import oe.packagedata
-
- vars = {
- "PN" : d.getVar('PN'),
- "PE" : d.getVar('PE'),
- "PV" : d.getVar('PV'),
- "PR" : d.getVar('PR'),
- }
-
- data = oe.packagedata.read_pkgdata(vars["PN"], d)
-
- for key in data.keys():
- d.setVar(key, data[key])
-
- for pkg in d.getVar('PACKAGES').split():
- sdata = oe.packagedata.read_subpkgdata(pkg, d)
- for key in sdata.keys():
- if key in vars:
- if sdata[key] != vars[key]:
- if key == "PN":
- bb.fatal("Recipe %s is trying to create package %s which was already written by recipe %s. This will cause corruption, please resolve this and only provide the package from one recipe or the other or only build one of the recipes." % (vars[key], pkg, sdata[key]))
- bb.fatal("Recipe %s is trying to change %s from '%s' to '%s'. This will cause do_package_write_* failures since the incorrect data will be used and they will be unable to find the right workdir." % (vars["PN"], key, vars[key], sdata[key]))
- continue
- #
- # If we set unsuffixed variables here there is a chance they could clobber override versions
- # of that variable, e.g. DESCRIPTION could clobber DESCRIPTION_<pkgname>
- # We therefore don't clobber for the unsuffixed variable versions
- #
- if key.endswith("_" + pkg):
- d.setVar(key, sdata[key])
- else:
- d.setVar(key, sdata[key], parsing=True)
-}
diff --git a/meta/classes/packagefeed-stability.bbclass b/meta/classes/packagefeed-stability.bbclass
deleted file mode 100644
index 5648602564..0000000000
--- a/meta/classes/packagefeed-stability.bbclass
+++ /dev/null
@@ -1,252 +0,0 @@
-# Class to avoid copying packages into the feed if they haven't materially changed
-#
-# Copyright (C) 2015 Intel Corporation
-# Released under the MIT license (see COPYING.MIT for details)
-#
-# This class effectively intercepts packages as they are written out by
-# do_package_write_*, causing them to be written into a different
-# directory where we can compare them to whatever older packages might
-# be in the "real" package feed directory, and avoid copying the new
-# package to the feed if it has not materially changed. The idea is to
-# avoid unnecessary churn in the packages when dependencies trigger task
-# reexecution (and thus repackaging). Enabling the class is simple:
-#
-# INHERIT += "packagefeed-stability"
-#
-# Caveats:
-# 1) Latest PR values in the build system may not match those in packages
-# seen on the target (naturally)
-# 2) If you rebuild from sstate without the existing package feed present,
-# you will lose the "state" of the package feed i.e. the preserved old
-# package versions. Not the end of the world, but would negate the
-# entire purpose of this class.
-#
-# Note that running -c cleanall on a recipe will purposely delete the old
-# package files so they will definitely be copied the next time.
-
-python() {
- if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d):
- return
- # Package backend agnostic intercept
- # This assumes that the package_write task is called package_write_<pkgtype>
- # and that the directory in which packages should be written is
- # pointed to by the variable DEPLOY_DIR_<PKGTYPE>
- for pkgclass in (d.getVar('PACKAGE_CLASSES') or '').split():
- if pkgclass.startswith('package_'):
- pkgtype = pkgclass.split('_', 1)[1]
- pkgwritefunc = 'do_package_write_%s' % pkgtype
- sstate_outputdirs = d.getVarFlag(pkgwritefunc, 'sstate-outputdirs', False)
- deploydirvar = 'DEPLOY_DIR_%s' % pkgtype.upper()
- deploydirvarref = '${' + deploydirvar + '}'
- pkgcomparefunc = 'do_package_compare_%s' % pkgtype
-
- if bb.data.inherits_class('image', d):
- d.appendVarFlag('do_rootfs', 'recrdeptask', ' ' + pkgcomparefunc)
-
- if bb.data.inherits_class('populate_sdk_base', d):
- d.appendVarFlag('do_populate_sdk', 'recrdeptask', ' ' + pkgcomparefunc)
-
- if bb.data.inherits_class('populate_sdk_ext', d):
- d.appendVarFlag('do_populate_sdk_ext', 'recrdeptask', ' ' + pkgcomparefunc)
-
- d.appendVarFlag('do_build', 'recrdeptask', ' ' + pkgcomparefunc)
-
- if d.getVarFlag(pkgwritefunc, 'noexec') or not d.getVarFlag(pkgwritefunc, 'task'):
- # Packaging is disabled for this recipe, we shouldn't do anything
- continue
-
- if deploydirvarref in sstate_outputdirs:
- deplor_dir_pkgtype = d.expand(deploydirvarref + '-prediff')
- # Set intermediate output directory
- d.setVarFlag(pkgwritefunc, 'sstate-outputdirs', sstate_outputdirs.replace(deploydirvarref, deplor_dir_pkgtype))
- # Update SSTATE_DUPWHITELIST to avoid shared location conflicted error
- d.appendVar('SSTATE_DUPWHITELIST', ' %s' % deplor_dir_pkgtype)
-
- d.setVar(pkgcomparefunc, d.getVar('do_package_compare', False))
- d.setVarFlags(pkgcomparefunc, d.getVarFlags('do_package_compare', False))
- d.appendVarFlag(pkgcomparefunc, 'depends', ' build-compare-native:do_populate_sysroot')
- bb.build.addtask(pkgcomparefunc, 'do_build', 'do_packagedata ' + pkgwritefunc, d)
-}
-
-# This isn't the real task function - it's a template that we use in the
-# anonymous python code above
-fakeroot python do_package_compare () {
- currenttask = d.getVar('BB_CURRENTTASK')
- pkgtype = currenttask.rsplit('_', 1)[1]
- package_compare_impl(pkgtype, d)
-}
-
-def package_compare_impl(pkgtype, d):
- import errno
- import fnmatch
- import glob
- import subprocess
- import oe.sstatesig
-
- pn = d.getVar('PN')
- deploydir = d.getVar('DEPLOY_DIR_%s' % pkgtype.upper())
- prepath = deploydir + '-prediff/'
-
- # Find out PKGR values are
- pkgdatadir = d.getVar('PKGDATA_DIR')
- packages = []
- try:
- with open(os.path.join(pkgdatadir, pn), 'r') as f:
- for line in f:
- if line.startswith('PACKAGES:'):
- packages = line.split(':', 1)[1].split()
- break
- except IOError as e:
- if e.errno == errno.ENOENT:
- pass
-
- if not packages:
- bb.debug(2, '%s: no packages, nothing to do' % pn)
- return
-
- pkgrvalues = {}
- rpkgnames = {}
- rdepends = {}
- pkgvvalues = {}
- for pkg in packages:
- with open(os.path.join(pkgdatadir, 'runtime', pkg), 'r') as f:
- for line in f:
- if line.startswith('PKGR:'):
- pkgrvalues[pkg] = line.split(':', 1)[1].strip()
- if line.startswith('PKGV:'):
- pkgvvalues[pkg] = line.split(':', 1)[1].strip()
- elif line.startswith('PKG_%s:' % pkg):
- rpkgnames[pkg] = line.split(':', 1)[1].strip()
- elif line.startswith('RDEPENDS_%s:' % pkg):
- rdepends[pkg] = line.split(':', 1)[1].strip()
-
- # Prepare a list of the runtime package names for packages that were
- # actually produced
- rpkglist = []
- for pkg, rpkg in rpkgnames.items():
- if os.path.exists(os.path.join(pkgdatadir, 'runtime', pkg + '.packaged')):
- rpkglist.append((rpkg, pkg))
- rpkglist.sort(key=lambda x: len(x[0]), reverse=True)
-
- pvu = d.getVar('PV', False)
- if '$' + '{SRCPV}' in pvu:
- pvprefix = pvu.split('$' + '{SRCPV}', 1)[0]
- else:
- pvprefix = None
-
- pkgwritetask = 'package_write_%s' % pkgtype
- files = []
- docopy = False
- manifest, _ = oe.sstatesig.sstate_get_manifest_filename(pkgwritetask, d)
- mlprefix = d.getVar('MLPREFIX')
- # Copy recipe's all packages if one of the packages are different to make
- # they have the same PR.
- with open(manifest, 'r') as f:
- for line in f:
- if line.startswith(prepath):
- srcpath = line.rstrip()
- if os.path.isfile(srcpath):
- destpath = os.path.join(deploydir, os.path.relpath(srcpath, prepath))
-
- # This is crude but should work assuming the output
- # package file name starts with the package name
- # and rpkglist is sorted by length (descending)
- pkgbasename = os.path.basename(destpath)
- pkgname = None
- for rpkg, pkg in rpkglist:
- if mlprefix and pkgtype == 'rpm' and rpkg.startswith(mlprefix):
- rpkg = rpkg[len(mlprefix):]
- if pkgbasename.startswith(rpkg):
- pkgr = pkgrvalues[pkg]
- destpathspec = destpath.replace(pkgr, '*')
- if pvprefix:
- pkgv = pkgvvalues[pkg]
- if pkgv.startswith(pvprefix):
- pkgvsuffix = pkgv[len(pvprefix):]
- if '+' in pkgvsuffix:
- newpkgv = pvprefix + '*+' + pkgvsuffix.split('+', 1)[1]
- destpathspec = destpathspec.replace(pkgv, newpkgv)
- pkgname = pkg
- break
- else:
- bb.warn('Unable to map %s back to package' % pkgbasename)
- destpathspec = destpath
-
- oldfile = None
- if not docopy:
- oldfiles = glob.glob(destpathspec)
- if oldfiles:
- oldfile = oldfiles[-1]
- result = subprocess.call(['pkg-diff.sh', oldfile, srcpath])
- if result != 0:
- docopy = True
- bb.note("%s and %s are different, will copy packages" % (oldfile, srcpath))
- else:
- docopy = True
- bb.note("No old packages found for %s, will copy packages" % pkgname)
-
- files.append((pkgname, pkgbasename, srcpath, destpath))
-
- # Remove all the old files and copy again if docopy
- if docopy:
- bb.note('Copying packages for recipe %s' % pn)
- pcmanifest = os.path.join(prepath, d.expand('pkg-compare-manifest-${MULTIMACH_TARGET_SYS}-${PN}'))
- try:
- with open(pcmanifest, 'r') as f:
- for line in f:
- fn = line.rstrip()
- if fn:
- try:
- os.remove(fn)
- bb.note('Removed old package %s' % fn)
- except OSError as e:
- if e.errno == errno.ENOENT:
- pass
- except IOError as e:
- if e.errno == errno.ENOENT:
- pass
-
- # Create new manifest
- with open(pcmanifest, 'w') as f:
- for pkgname, pkgbasename, srcpath, destpath in files:
- destdir = os.path.dirname(destpath)
- bb.utils.mkdirhier(destdir)
- # Remove allarch rpm pkg if it is already existed (for
- # multilib), they're identical in theory, but sstate.bbclass
- # copies it again, so keep align with that.
- if os.path.exists(destpath) and pkgtype == 'rpm' \
- and d.getVar('PACKAGE_ARCH') == 'all':
- os.unlink(destpath)
- if (os.stat(srcpath).st_dev == os.stat(destdir).st_dev):
- # Use a hard link to save space
- os.link(srcpath, destpath)
- else:
- shutil.copyfile(srcpath, destpath)
- f.write('%s\n' % destpath)
- else:
- bb.note('Not copying packages for recipe %s' % pn)
-
-do_cleansstate[postfuncs] += "pfs_cleanpkgs"
-python pfs_cleanpkgs () {
- import errno
- for pkgclass in (d.getVar('PACKAGE_CLASSES') or '').split():
- if pkgclass.startswith('package_'):
- pkgtype = pkgclass.split('_', 1)[1]
- deploydir = d.getVar('DEPLOY_DIR_%s' % pkgtype.upper())
- prepath = deploydir + '-prediff'
- pcmanifest = os.path.join(prepath, d.expand('pkg-compare-manifest-${MULTIMACH_TARGET_SYS}-${PN}'))
- try:
- with open(pcmanifest, 'r') as f:
- for line in f:
- fn = line.rstrip()
- if fn:
- try:
- os.remove(fn)
- except OSError as e:
- if e.errno == errno.ENOENT:
- pass
- os.remove(pcmanifest)
- except IOError as e:
- if e.errno == errno.ENOENT:
- pass
-}
diff --git a/meta/classes/packagegroup.bbclass b/meta/classes/packagegroup.bbclass
deleted file mode 100644
index 1541c8fbff..0000000000
--- a/meta/classes/packagegroup.bbclass
+++ /dev/null
@@ -1,61 +0,0 @@
-# Class for packagegroup (package group) recipes
-
-# By default, only the packagegroup package itself is in PACKAGES.
-# -dbg and -dev flavours are handled by the anonfunc below.
-# This means that packagegroup recipes used to build multiple packagegroup
-# packages have to modify PACKAGES after inheriting packagegroup.bbclass.
-PACKAGES = "${PN}"
-
-# By default, packagegroup packages do not depend on a certain architecture.
-# Only if dependencies are modified by MACHINE_FEATURES, packages
-# need to be set to MACHINE_ARCH before inheriting packagegroup.bbclass
-PACKAGE_ARCH ?= "all"
-
-# Fully expanded - so it applies the overrides as well
-PACKAGE_ARCH_EXPANDED := "${PACKAGE_ARCH}"
-
-LICENSE ?= "MIT"
-
-inherit ${@oe.utils.ifelse(d.getVar('PACKAGE_ARCH_EXPANDED') == 'all', 'allarch', '')}
-
-# This automatically adds -dbg and -dev flavours of all PACKAGES
-# to the list. Their dependencies (RRECOMMENDS) are handled as usual
-# by package_depchains in a following step.
-# Also mark all packages as ALLOW_EMPTY
-python () {
- packages = d.getVar('PACKAGES').split()
- if d.getVar('PACKAGEGROUP_DISABLE_COMPLEMENTARY') != '1':
- types = ['', '-dbg', '-dev']
- if bb.utils.contains('DISTRO_FEATURES', 'ptest', True, False, d):
- types.append('-ptest')
- packages = [pkg + suffix for pkg in packages
- for suffix in types]
- d.setVar('PACKAGES', ' '.join(packages))
- for pkg in packages:
- d.setVar('ALLOW_EMPTY_%s' % pkg, '1')
-}
-
-# We don't want to look at shared library dependencies for the
-# dbg packages
-DEPCHAIN_DBGDEFAULTDEPS = "1"
-
-# We only need the packaging tasks - disable the rest
-deltask do_fetch
-deltask do_unpack
-deltask do_patch
-deltask do_configure
-deltask do_compile
-deltask do_install
-deltask do_populate_sysroot
-
-INHIBIT_DEFAULT_DEPS = "1"
-
-python () {
- if bb.data.inherits_class('nativesdk', d):
- return
- initman = d.getVar("VIRTUAL-RUNTIME_init_manager")
- if initman and initman in ['sysvinit', 'systemd'] and not bb.utils.contains('DISTRO_FEATURES', initman, True, False, d):
- bb.fatal("Please ensure that your setting of VIRTUAL-RUNTIME_init_manager (%s) matches the entries enabled in DISTRO_FEATURES" % initman)
-}
-
-CVE_PRODUCT = ""
diff --git a/meta/classes/patch.bbclass b/meta/classes/patch.bbclass
deleted file mode 100644
index 25ec089ae1..0000000000
--- a/meta/classes/patch.bbclass
+++ /dev/null
@@ -1,167 +0,0 @@
-# Copyright (C) 2006 OpenedHand LTD
-
-# Point to an empty file so any user's custom settings don't break things
-QUILTRCFILE ?= "${STAGING_ETCDIR_NATIVE}/quiltrc"
-
-PATCHDEPENDENCY = "${PATCHTOOL}-native:do_populate_sysroot"
-
-# There is a bug in patch 2.7.3 and earlier where index lines
-# in patches can change file modes when they shouldn't:
-# http://git.savannah.gnu.org/cgit/patch.git/patch/?id=82b800c9552a088a241457948219d25ce0a407a4
-# This leaks into debug sources in particular. Add the dependency
-# to target recipes to avoid this problem until we can rely on 2.7.4 or later.
-PATCHDEPENDENCY_append_class-target = " patch-replacement-native:do_populate_sysroot"
-
-PATCH_GIT_USER_NAME ?= "OpenEmbedded"
-PATCH_GIT_USER_EMAIL ?= "oe.patch@oe"
-
-inherit terminal
-
-python () {
- if d.getVar('PATCHTOOL') == 'git' and d.getVar('PATCH_COMMIT_FUNCTIONS') == '1':
- extratasks = bb.build.tasksbetween('do_unpack', 'do_patch', d)
- try:
- extratasks.remove('do_unpack')
- except ValueError:
- # For some recipes do_unpack doesn't exist, ignore it
- pass
-
- d.appendVarFlag('do_patch', 'prefuncs', ' patch_task_patch_prefunc')
- for task in extratasks:
- d.appendVarFlag(task, 'postfuncs', ' patch_task_postfunc')
-}
-
-python patch_task_patch_prefunc() {
- # Prefunc for do_patch
- srcsubdir = d.getVar('S')
-
- workdir = os.path.abspath(d.getVar('WORKDIR'))
- testsrcdir = os.path.abspath(srcsubdir)
- if (testsrcdir + os.sep).startswith(workdir + os.sep):
- # Double-check that either workdir or S or some directory in-between is a git repository
- found = False
- while testsrcdir != workdir:
- if os.path.exists(os.path.join(testsrcdir, '.git')):
- found = True
- break
- if testsrcdir == workdir:
- break
- testsrcdir = os.path.dirname(testsrcdir)
- if not found:
- bb.fatal('PATCHTOOL = "git" set for source tree that is not a git repository. Refusing to continue as that may result in commits being made in your metadata repository.')
-
- patchdir = os.path.join(srcsubdir, 'patches')
- if os.path.exists(patchdir):
- if os.listdir(patchdir):
- d.setVar('PATCH_HAS_PATCHES_DIR', '1')
- else:
- os.rmdir(patchdir)
-}
-
-python patch_task_postfunc() {
- # Prefunc for task functions between do_unpack and do_patch
- import oe.patch
- import shutil
- func = d.getVar('BB_RUNTASK')
- srcsubdir = d.getVar('S')
-
- if os.path.exists(srcsubdir):
- if func == 'do_patch':
- haspatches = (d.getVar('PATCH_HAS_PATCHES_DIR') == '1')
- patchdir = os.path.join(srcsubdir, 'patches')
- if os.path.exists(patchdir):
- shutil.rmtree(patchdir)
- if haspatches:
- stdout, _ = bb.process.run('git status --porcelain patches', cwd=srcsubdir)
- if stdout:
- bb.process.run('git checkout patches', cwd=srcsubdir)
- stdout, _ = bb.process.run('git status --porcelain .', cwd=srcsubdir)
- if stdout:
- useroptions = []
- oe.patch.GitApplyTree.gitCommandUserOptions(useroptions, d=d)
- bb.process.run('git add .; git %s commit -a -m "Committing changes from %s\n\n%s"' % (' '.join(useroptions), func, oe.patch.GitApplyTree.ignore_commit_prefix + ' - from %s' % func), cwd=srcsubdir)
-}
-
-def src_patches(d, all=False, expand=True):
- import oe.patch
- return oe.patch.src_patches(d, all, expand)
-
-def should_apply(parm, d):
- """Determine if we should apply the given patch"""
- import oe.patch
- return oe.patch.should_apply(parm, d)
-
-should_apply[vardepsexclude] = "DATE SRCDATE"
-
-python patch_do_patch() {
- import oe.patch
-
- patchsetmap = {
- "patch": oe.patch.PatchTree,
- "quilt": oe.patch.QuiltTree,
- "git": oe.patch.GitApplyTree,
- }
-
- cls = patchsetmap[d.getVar('PATCHTOOL') or 'quilt']
-
- resolvermap = {
- "noop": oe.patch.NOOPResolver,
- "user": oe.patch.UserResolver,
- }
-
- rcls = resolvermap[d.getVar('PATCHRESOLVE') or 'user']
-
- classes = {}
-
- s = d.getVar('S')
-
- os.putenv('PATH', d.getVar('PATH'))
-
- # We must use one TMPDIR per process so that the "patch" processes
- # don't generate the same temp file name.
-
- import tempfile
- process_tmpdir = tempfile.mkdtemp()
- os.environ['TMPDIR'] = process_tmpdir
-
- for patch in src_patches(d):
- _, _, local, _, _, parm = bb.fetch.decodeurl(patch)
-
- if "patchdir" in parm:
- patchdir = parm["patchdir"]
- if not os.path.isabs(patchdir):
- patchdir = os.path.join(s, patchdir)
- else:
- patchdir = s
-
- if not patchdir in classes:
- patchset = cls(patchdir, d)
- resolver = rcls(patchset, oe_terminal)
- classes[patchdir] = (patchset, resolver)
- patchset.Clean()
- else:
- patchset, resolver = classes[patchdir]
-
- bb.note("Applying patch '%s' (%s)" % (parm['patchname'], oe.path.format_display(local, d)))
- try:
- patchset.Import({"file":local, "strippath": parm['striplevel']}, True)
- except Exception as exc:
- bb.utils.remove(process_tmpdir, True)
- bb.fatal(str(exc))
- try:
- resolver.Resolve()
- except bb.BBHandledException as e:
- bb.utils.remove(process_tmpdir, True)
- bb.fatal(str(e))
-
- bb.utils.remove(process_tmpdir, True)
- del os.environ['TMPDIR']
-}
-patch_do_patch[vardepsexclude] = "PATCHRESOLVE"
-
-addtask patch after do_unpack
-do_patch[umask] = "022"
-do_patch[dirs] = "${WORKDIR}"
-do_patch[depends] = "${PATCHDEPENDENCY}"
-
-EXPORT_FUNCTIONS do_patch
diff --git a/meta/classes/perl-version.bbclass b/meta/classes/perl-version.bbclass
deleted file mode 100644
index 84b67b8180..0000000000
--- a/meta/classes/perl-version.bbclass
+++ /dev/null
@@ -1,66 +0,0 @@
-PERL_OWN_DIR = ""
-
-# Determine the staged version of perl from the perl configuration file
-# Assign vardepvalue, because otherwise signature is changed before and after
-# perl is built (from None to real version in config.sh).
-get_perl_version[vardepvalue] = "${PERL_OWN_DIR}"
-def get_perl_version(d):
- import re
- cfg = d.expand('${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/config.sh')
- try:
- f = open(cfg, 'r')
- except IOError:
- return None
- l = f.readlines();
- f.close();
- r = re.compile(r"^version='(\d*\.\d*\.\d*)'")
- for s in l:
- m = r.match(s)
- if m:
- return m.group(1)
- return None
-
-PERLVERSION := "${@get_perl_version(d)}"
-PERLVERSION[vardepvalue] = ""
-
-
-# Determine the staged arch of perl from the perl configuration file
-# Assign vardepvalue, because otherwise signature is changed before and after
-# perl is built (from None to real version in config.sh).
-def get_perl_arch(d):
- import re
- cfg = d.expand('${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/config.sh')
- try:
- f = open(cfg, 'r')
- except IOError:
- return None
- l = f.readlines();
- f.close();
- r = re.compile("^archname='([^']*)'")
- for s in l:
- m = r.match(s)
- if m:
- return m.group(1)
- return None
-
-PERLARCH := "${@get_perl_arch(d)}"
-PERLARCH[vardepvalue] = ""
-
-# Determine the staged arch of perl-native from the perl configuration file
-# Assign vardepvalue, because otherwise signature is changed before and after
-# perl is built (from None to real version in config.sh).
-def get_perl_hostarch(d):
- import re
- cfg = d.expand('${STAGING_LIBDIR_NATIVE}/perl5/config.sh')
- try:
- f = open(cfg, 'r')
- except IOError:
- return None
- l = f.readlines();
- f.close();
- r = re.compile("^archname='([^']*)'")
- for s in l:
- m = r.match(s)
- if m:
- return m.group(1)
- return None
diff --git a/meta/classes/perlnative.bbclass b/meta/classes/perlnative.bbclass
deleted file mode 100644
index cc8de8b381..0000000000
--- a/meta/classes/perlnative.bbclass
+++ /dev/null
@@ -1,3 +0,0 @@
-EXTRANATIVEPATH += "perl-native"
-DEPENDS += "perl-native"
-OECMAKE_PERLNATIVE_DIR = "${STAGING_BINDIR_NATIVE}/perl-native"
diff --git a/meta/classes/pixbufcache.bbclass b/meta/classes/pixbufcache.bbclass
deleted file mode 100644
index b07f51ed56..0000000000
--- a/meta/classes/pixbufcache.bbclass
+++ /dev/null
@@ -1,63 +0,0 @@
-#
-# This class will generate the proper postinst/postrm scriptlets for pixbuf
-# packages.
-#
-
-DEPENDS_append_class-target = " qemu-native"
-inherit qemu
-
-PIXBUF_PACKAGES ??= "${PN}"
-
-PACKAGE_WRITE_DEPS += "qemu-native gdk-pixbuf-native"
-
-pixbufcache_common() {
-if [ "x$D" != "x" ]; then
- $INTERCEPT_DIR/postinst_intercept update_pixbuf_cache ${PKG} mlprefix=${MLPREFIX} binprefix=${MLPREFIX} libdir=${libdir} \
- bindir=${bindir} base_libdir=${base_libdir}
-else
-
- # Update the pixbuf loaders in case they haven't been registered yet
- ${libdir}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders --update-cache
-
- if [ -x ${bindir}/gtk-update-icon-cache ] && [ -d ${datadir}/icons ]; then
- for icondir in /usr/share/icons/*; do
- if [ -d ${icondir} ]; then
- gtk-update-icon-cache -t -q ${icondir}
- fi
- done
- fi
-fi
-}
-
-python populate_packages_append() {
- pixbuf_pkgs = d.getVar('PIXBUF_PACKAGES').split()
-
- for pkg in pixbuf_pkgs:
- bb.note("adding pixbuf postinst and postrm scripts to %s" % pkg)
- postinst = d.getVar('pkg_postinst_%s' % pkg) or d.getVar('pkg_postinst')
- if not postinst:
- postinst = '#!/bin/sh\n'
- postinst += d.getVar('pixbufcache_common')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
-
- postrm = d.getVar('pkg_postrm_%s' % pkg) or d.getVar('pkg_postrm')
- if not postrm:
- postrm = '#!/bin/sh\n'
- postrm += d.getVar('pixbufcache_common')
- d.setVar('pkg_postrm_%s' % pkg, postrm)
-}
-
-gdkpixbuf_complete() {
-GDK_PIXBUF_FATAL_LOADER=1 ${STAGING_LIBDIR_NATIVE}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders --update-cache || exit 1
-}
-
-DEPENDS_append_class-native = " gdk-pixbuf-native"
-SYSROOT_PREPROCESS_FUNCS_append_class-native = " pixbufcache_sstate_postinst"
-
-pixbufcache_sstate_postinst() {
- mkdir -p ${SYSROOT_DESTDIR}${bindir}
- dest=${SYSROOT_DESTDIR}${bindir}/postinst-${PN}
- echo '#!/bin/sh' > $dest
- echo "${gdkpixbuf_complete}" >> $dest
- chmod 0755 $dest
-}
diff --git a/meta/classes/pkgconfig.bbclass b/meta/classes/pkgconfig.bbclass
deleted file mode 100644
index ad1f84f506..0000000000
--- a/meta/classes/pkgconfig.bbclass
+++ /dev/null
@@ -1,2 +0,0 @@
-DEPENDS_prepend = "pkgconfig-native "
-
diff --git a/meta/classes/populate_sdk.bbclass b/meta/classes/populate_sdk.bbclass
deleted file mode 100644
index f64a911b72..0000000000
--- a/meta/classes/populate_sdk.bbclass
+++ /dev/null
@@ -1,7 +0,0 @@
-# The majority of populate_sdk is located in populate_sdk_base
-# This chunk simply facilitates compatibility with SDK only recipes.
-
-inherit populate_sdk_base
-
-addtask populate_sdk after do_install before do_build
-
diff --git a/meta/classes/populate_sdk_base.bbclass b/meta/classes/populate_sdk_base.bbclass
deleted file mode 100644
index 990505e89b..0000000000
--- a/meta/classes/populate_sdk_base.bbclass
+++ /dev/null
@@ -1,338 +0,0 @@
-inherit meta image-postinst-intercepts
-
-# Wildcards specifying complementary packages to install for every package that has been explicitly
-# installed into the rootfs
-COMPLEMENTARY_GLOB[dev-pkgs] = '*-dev'
-COMPLEMENTARY_GLOB[staticdev-pkgs] = '*-staticdev'
-COMPLEMENTARY_GLOB[doc-pkgs] = '*-doc'
-COMPLEMENTARY_GLOB[dbg-pkgs] = '*-dbg'
-COMPLEMENTARY_GLOB[src-pkgs] = '*-src'
-COMPLEMENTARY_GLOB[ptest-pkgs] = '*-ptest'
-COMPLEMENTARY_GLOB[bash-completion-pkgs] = '*-bash-completion'
-
-def complementary_globs(featurevar, d):
- all_globs = d.getVarFlags('COMPLEMENTARY_GLOB')
- globs = []
- features = set((d.getVar(featurevar) or '').split())
- for name, glob in all_globs.items():
- if name in features:
- globs.append(glob)
- return ' '.join(globs)
-
-SDKIMAGE_FEATURES ??= "dev-pkgs dbg-pkgs src-pkgs ${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'doc-pkgs', '', d)}"
-SDKIMAGE_INSTALL_COMPLEMENTARY = '${@complementary_globs("SDKIMAGE_FEATURES", d)}'
-SDKIMAGE_INSTALL_COMPLEMENTARY[vardeps] += "SDKIMAGE_FEATURES"
-
-PACKAGE_ARCHS_append_task-populate-sdk = " sdk-provides-dummy-target"
-SDK_PACKAGE_ARCHS += "sdk-provides-dummy-${SDKPKGSUFFIX}"
-
-# List of locales to install, or "all" for all of them, or unset for none.
-SDKIMAGE_LINGUAS ?= "all"
-
-inherit rootfs_${IMAGE_PKGTYPE}
-
-SDK_DIR = "${WORKDIR}/sdk"
-SDK_OUTPUT = "${SDK_DIR}/image"
-SDK_DEPLOY = "${DEPLOY_DIR}/sdk"
-
-SDKDEPLOYDIR = "${WORKDIR}/${SDKMACHINE}-deploy-${PN}-populate-sdk"
-
-B_task-populate-sdk = "${SDK_DIR}"
-
-SDKTARGETSYSROOT = "${SDKPATH}/sysroots/${REAL_MULTIMACH_TARGET_SYS}"
-
-TOOLCHAIN_HOST_TASK ?= "nativesdk-packagegroup-sdk-host packagegroup-cross-canadian-${MACHINE}"
-TOOLCHAIN_HOST_TASK_ATTEMPTONLY ?= ""
-TOOLCHAIN_TARGET_TASK ?= "${@multilib_pkg_extend(d, 'packagegroup-core-standalone-sdk-target')} target-sdk-provides-dummy"
-TOOLCHAIN_TARGET_TASK_ATTEMPTONLY ?= ""
-TOOLCHAIN_OUTPUTNAME ?= "${SDK_NAME}-toolchain-${SDK_VERSION}"
-
-# Default archived SDK's suffix
-SDK_ARCHIVE_TYPE ?= "tar.xz"
-SDK_XZ_COMPRESSION_LEVEL ?= "-9"
-SDK_XZ_OPTIONS ?= "${XZ_DEFAULTS} ${SDK_XZ_COMPRESSION_LEVEL}"
-
-# To support different sdk type according to SDK_ARCHIVE_TYPE, now support zip and tar.xz
-python () {
- if d.getVar('SDK_ARCHIVE_TYPE') == 'zip':
- d.setVar('SDK_ARCHIVE_DEPENDS', 'zip-native')
- # SDK_ARCHIVE_CMD used to generate archived sdk ${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} from input dir ${SDK_OUTPUT}/${SDKPATH} to output dir ${SDKDEPLOYDIR}
- # recommand to cd into input dir first to avoid archive with buildpath
- d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; zip -r -y ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} .')
- else:
- d.setVar('SDK_ARCHIVE_DEPENDS', 'xz-native')
- d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; tar ${SDKTAROPTS} -cf - . | xz ${SDK_XZ_OPTIONS} > ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE}')
-}
-
-SDK_RDEPENDS = "${TOOLCHAIN_TARGET_TASK} ${TOOLCHAIN_HOST_TASK}"
-SDK_DEPENDS = "virtual/fakeroot-native ${SDK_ARCHIVE_DEPENDS} cross-localedef-native nativesdk-qemuwrapper-cross ${@' '.join(["%s-qemuwrapper-cross" % m for m in d.getVar("MULTILIB_VARIANTS").split()])} qemuwrapper-cross"
-PATH_prepend = "${STAGING_DIR_HOST}${SDKPATHNATIVE}${bindir}/crossscripts:${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:"
-SDK_DEPENDS += "nativesdk-glibc-locale"
-
-# We want the MULTIARCH_TARGET_SYS to point to the TUNE_PKGARCH, not PACKAGE_ARCH as it
-# could be set to the MACHINE_ARCH
-REAL_MULTIMACH_TARGET_SYS = "${TUNE_PKGARCH}${TARGET_VENDOR}-${TARGET_OS}"
-
-PID = "${@os.getpid()}"
-
-EXCLUDE_FROM_WORLD = "1"
-
-SDK_PACKAGING_FUNC ?= "create_shar"
-SDK_PRE_INSTALL_COMMAND ?= ""
-SDK_POST_INSTALL_COMMAND ?= ""
-SDK_RELOCATE_AFTER_INSTALL ?= "1"
-
-SDKEXTPATH ??= "~/${@d.getVar('DISTRO')}_sdk"
-SDK_TITLE ??= "${@d.getVar('DISTRO_NAME') or d.getVar('DISTRO')} SDK"
-
-SDK_TARGET_MANIFEST = "${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.target.manifest"
-SDK_HOST_MANIFEST = "${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.host.manifest"
-SDK_EXT_TARGET_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.target.manifest"
-SDK_EXT_HOST_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.host.manifest"
-
-python write_target_sdk_manifest () {
- from oe.sdk import sdk_list_installed_packages
- from oe.utils import format_pkg_list
- sdkmanifestdir = os.path.dirname(d.getVar("SDK_TARGET_MANIFEST"))
- pkgs = sdk_list_installed_packages(d, True)
- if not os.path.exists(sdkmanifestdir):
- bb.utils.mkdirhier(sdkmanifestdir)
- with open(d.getVar('SDK_TARGET_MANIFEST'), 'w') as output:
- output.write(format_pkg_list(pkgs, 'ver'))
-}
-
-python write_sdk_test_data() {
- from oe.data import export2json
- testdata = "%s/%s.testdata.json" % (d.getVar('SDKDEPLOYDIR'), d.getVar('TOOLCHAIN_OUTPUTNAME'))
- bb.utils.mkdirhier(os.path.dirname(testdata))
- export2json(d, testdata)
-}
-
-python write_host_sdk_manifest () {
- from oe.sdk import sdk_list_installed_packages
- from oe.utils import format_pkg_list
- sdkmanifestdir = os.path.dirname(d.getVar("SDK_HOST_MANIFEST"))
- pkgs = sdk_list_installed_packages(d, False)
- if not os.path.exists(sdkmanifestdir):
- bb.utils.mkdirhier(sdkmanifestdir)
- with open(d.getVar('SDK_HOST_MANIFEST'), 'w') as output:
- output.write(format_pkg_list(pkgs, 'ver'))
-}
-
-POPULATE_SDK_POST_TARGET_COMMAND_append = " write_sdk_test_data ; "
-POPULATE_SDK_POST_TARGET_COMMAND_append_task-populate-sdk = " write_target_sdk_manifest ; "
-POPULATE_SDK_POST_HOST_COMMAND_append_task-populate-sdk = " write_host_sdk_manifest; "
-SDK_PACKAGING_COMMAND = "${@'${SDK_PACKAGING_FUNC};' if '${SDK_PACKAGING_FUNC}' else ''}"
-SDK_POSTPROCESS_COMMAND = " create_sdk_files; check_sdk_sysroots; archive_sdk; ${SDK_PACKAGING_COMMAND} "
-
-def populate_sdk_common(d):
- from oe.sdk import populate_sdk
- from oe.manifest import create_manifest, Manifest
-
- # Handle package exclusions
- excl_pkgs = (d.getVar("PACKAGE_EXCLUDE") or "").split()
- inst_pkgs = (d.getVar("PACKAGE_INSTALL") or "").split()
- inst_attempt_pkgs = (d.getVar("PACKAGE_INSTALL_ATTEMPTONLY") or "").split()
-
- d.setVar('PACKAGE_INSTALL_ORIG', ' '.join(inst_pkgs))
- d.setVar('PACKAGE_INSTALL_ATTEMPTONLY', ' '.join(inst_attempt_pkgs))
-
- for pkg in excl_pkgs:
- if pkg in inst_pkgs:
- bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL (%s). It will be removed from the list." % (pkg, d.getVar('PN'), inst_pkgs))
- inst_pkgs.remove(pkg)
-
- if pkg in inst_attempt_pkgs:
- bb.warn("Package %s, set to be excluded, is in %s PACKAGE_INSTALL_ATTEMPTONLY (%s). It will be removed from the list." % (pkg, d.getVar('PN'), inst_pkgs))
- inst_attempt_pkgs.remove(pkg)
-
- d.setVar("PACKAGE_INSTALL", ' '.join(inst_pkgs))
- d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", ' '.join(inst_attempt_pkgs))
-
- pn = d.getVar('PN')
- runtime_mapping_rename("TOOLCHAIN_TARGET_TASK", pn, d)
- runtime_mapping_rename("TOOLCHAIN_TARGET_TASK_ATTEMPTONLY", pn, d)
-
- ld = bb.data.createCopy(d)
- ld.setVar("PKGDATA_DIR", "${STAGING_DIR}/${SDK_ARCH}-${SDKPKGSUFFIX}${SDK_VENDOR}-${SDK_OS}/pkgdata")
- runtime_mapping_rename("TOOLCHAIN_HOST_TASK", pn, ld)
- runtime_mapping_rename("TOOLCHAIN_HOST_TASK_ATTEMPTONLY", pn, ld)
- d.setVar("TOOLCHAIN_HOST_TASK", ld.getVar("TOOLCHAIN_HOST_TASK"))
- d.setVar("TOOLCHAIN_HOST_TASK_ATTEMPTONLY", ld.getVar("TOOLCHAIN_HOST_TASK_ATTEMPTONLY"))
-
- # create target/host SDK manifests
- create_manifest(d, manifest_dir=d.getVar('SDK_DIR'),
- manifest_type=Manifest.MANIFEST_TYPE_SDK_HOST)
- create_manifest(d, manifest_dir=d.getVar('SDK_DIR'),
- manifest_type=Manifest.MANIFEST_TYPE_SDK_TARGET)
-
- populate_sdk(d)
-
-fakeroot python do_populate_sdk() {
- populate_sdk_common(d)
-}
-SSTATETASKS += "do_populate_sdk"
-SSTATE_SKIP_CREATION_task-populate-sdk = '1'
-do_populate_sdk[cleandirs] = "${SDKDEPLOYDIR}"
-do_populate_sdk[sstate-inputdirs] = "${SDKDEPLOYDIR}"
-do_populate_sdk[sstate-outputdirs] = "${SDK_DEPLOY}"
-do_populate_sdk[stamp-extra-info] = "${MACHINE_ARCH}${SDKMACHINE}"
-
-fakeroot create_sdk_files() {
- cp ${COREBASE}/scripts/relocate_sdk.py ${SDK_OUTPUT}/${SDKPATH}/
-
- # Replace the ##DEFAULT_INSTALL_DIR## with the correct pattern.
- # Escape special characters like '+' and '.' in the SDKPATH
- escaped_sdkpath=$(echo ${SDKPATH} |sed -e "s:[\+\.]:\\\\\\\\\0:g")
- sed -i -e "s:##DEFAULT_INSTALL_DIR##:$escaped_sdkpath:" ${SDK_OUTPUT}/${SDKPATH}/relocate_sdk.py
-
- mkdir -p ${SDK_OUTPUT}/${SDKPATHNATIVE}${sysconfdir}/
- echo '${SDKPATHNATIVE}${libdir_nativesdk}
-${SDKPATHNATIVE}${base_libdir_nativesdk}
-include /etc/ld.so.conf' > ${SDK_OUTPUT}/${SDKPATHNATIVE}${sysconfdir}/ld.so.conf
-}
-
-python check_sdk_sysroots() {
- # Fails build if there are broken or dangling symlinks in SDK sysroots
-
- if d.getVar('CHECK_SDK_SYSROOTS') != '1':
- # disabled, bail out
- return
-
- def norm_path(path):
- return os.path.abspath(path)
-
- # Get scan root
- SCAN_ROOT = norm_path("%s/%s/sysroots/" % (d.getVar('SDK_OUTPUT'),
- d.getVar('SDKPATH')))
-
- bb.note('Checking SDK sysroots at ' + SCAN_ROOT)
-
- def check_symlink(linkPath):
- if not os.path.islink(linkPath):
- return
-
- linkDirPath = os.path.dirname(linkPath)
-
- targetPath = os.readlink(linkPath)
- if not os.path.isabs(targetPath):
- targetPath = os.path.join(linkDirPath, targetPath)
- targetPath = norm_path(targetPath)
-
- if SCAN_ROOT != os.path.commonprefix( [SCAN_ROOT, targetPath] ):
- bb.error("Escaping symlink {0!s} --> {1!s}".format(linkPath, targetPath))
- return
-
- if not os.path.exists(targetPath):
- bb.error("Broken symlink {0!s} --> {1!s}".format(linkPath, targetPath))
- return
-
- if os.path.isdir(targetPath):
- dir_walk(targetPath)
-
- def walk_error_handler(e):
- bb.error(str(e))
-
- def dir_walk(rootDir):
- for dirPath,subDirEntries,fileEntries in os.walk(rootDir, followlinks=False, onerror=walk_error_handler):
- entries = subDirEntries + fileEntries
- for e in entries:
- ePath = os.path.join(dirPath, e)
- check_symlink(ePath)
-
- # start
- dir_walk(SCAN_ROOT)
-}
-
-SDKTAROPTS = "--owner=root --group=root"
-
-fakeroot archive_sdk() {
- # Package it up
- mkdir -p ${SDKDEPLOYDIR}
- ${SDK_ARCHIVE_CMD}
-}
-
-TOOLCHAIN_SHAR_EXT_TMPL ?= "${COREBASE}/meta/files/toolchain-shar-extract.sh"
-TOOLCHAIN_SHAR_REL_TMPL ?= "${COREBASE}/meta/files/toolchain-shar-relocate.sh"
-
-fakeroot create_shar() {
- # copy in the template shar extractor script
- cp ${TOOLCHAIN_SHAR_EXT_TMPL} ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
-
- rm -f ${T}/pre_install_command ${T}/post_install_command
-
- if [ ${SDK_RELOCATE_AFTER_INSTALL} -eq 1 ] ; then
- cp ${TOOLCHAIN_SHAR_REL_TMPL} ${T}/post_install_command
- fi
- cat << "EOF" >> ${T}/pre_install_command
-${SDK_PRE_INSTALL_COMMAND}
-EOF
-
- cat << "EOF" >> ${T}/post_install_command
-${SDK_POST_INSTALL_COMMAND}
-EOF
- sed -i -e '/@SDK_PRE_INSTALL_COMMAND@/r ${T}/pre_install_command' \
- -e '/@SDK_POST_INSTALL_COMMAND@/r ${T}/post_install_command' \
- ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
-
- # substitute variables
- sed -i -e 's#@SDK_ARCH@#${SDK_ARCH}#g' \
- -e 's#@SDKPATH@#${SDKPATH}#g' \
- -e 's#@SDKEXTPATH@#${SDKEXTPATH}#g' \
- -e 's#@OLDEST_KERNEL@#${SDK_OLDEST_KERNEL}#g' \
- -e 's#@REAL_MULTIMACH_TARGET_SYS@#${REAL_MULTIMACH_TARGET_SYS}#g' \
- -e 's#@SDK_TITLE@#${@d.getVar("SDK_TITLE").replace('&', '\\&')}#g' \
- -e 's#@SDK_VERSION@#${SDK_VERSION}#g' \
- -e '/@SDK_PRE_INSTALL_COMMAND@/d' \
- -e '/@SDK_POST_INSTALL_COMMAND@/d' \
- -e 's#@SDK_GCC_VER@#${@oe.utils.host_gcc_version(d, taskcontextonly=True)}#g' \
- -e 's#@SDK_ARCHIVE_TYPE@#${SDK_ARCHIVE_TYPE}#g' \
- ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
-
- # add execution permission
- chmod +x ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
-
- # append the SDK tarball
- cat ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} >> ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
-
- # delete the old tarball, we don't need it anymore
- rm ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE}
-}
-
-populate_sdk_log_check() {
- for target in $*
- do
- lf_path="`dirname ${BB_LOGFILE}`/log.do_$target.${PID}"
-
- echo "log_check: Using $lf_path as logfile"
-
- if [ -e "$lf_path" ]; then
- ${IMAGE_PKGTYPE}_log_check $target $lf_path
- else
- echo "Cannot find logfile [$lf_path]"
- fi
- echo "Logfile is clean"
- done
-}
-
-def sdk_command_variables(d):
- return ['OPKG_PREPROCESS_COMMANDS','OPKG_POSTPROCESS_COMMANDS','POPULATE_SDK_POST_HOST_COMMAND','POPULATE_SDK_PRE_TARGET_COMMAND','POPULATE_SDK_POST_TARGET_COMMAND','SDK_POSTPROCESS_COMMAND','RPM_PREPROCESS_COMMANDS','RPM_POSTPROCESS_COMMANDS']
-
-def sdk_variables(d):
- variables = ['BUILD_IMAGES_FROM_FEEDS','SDK_OS','SDK_OUTPUT','SDKPATHNATIVE','SDKTARGETSYSROOT','SDK_DIR','SDK_VENDOR','SDKIMAGE_INSTALL_COMPLEMENTARY','SDK_PACKAGE_ARCHS','SDK_OUTPUT',
- 'SDKTARGETSYSROOT','MULTILIB_VARIANTS','MULTILIBS','ALL_MULTILIB_PACKAGE_ARCHS','MULTILIB_GLOBAL_VARIANTS','BAD_RECOMMENDATIONS','NO_RECOMMENDATIONS','PACKAGE_ARCHS',
- 'PACKAGE_CLASSES','TARGET_VENDOR','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI', 'PACKAGE_EXCLUDE_COMPLEMENTARY', 'IMAGE_INSTALL_DEBUGFS']
- variables.extend(sdk_command_variables(d))
- return " ".join(variables)
-
-do_populate_sdk[vardeps] += "${@sdk_variables(d)}"
-
-do_populate_sdk[file-checksums] += "${TOOLCHAIN_SHAR_REL_TMPL}:True \
- ${TOOLCHAIN_SHAR_EXT_TMPL}:True"
-
-do_populate_sdk[dirs] = "${PKGDATA_DIR} ${TOPDIR}"
-do_populate_sdk[depends] += "${@' '.join([x + ':do_populate_sysroot' for x in d.getVar('SDK_DEPENDS').split()])} ${@d.getVarFlag('do_rootfs', 'depends', False)}"
-do_populate_sdk[rdepends] = "${@' '.join([x + ':do_package_write_${IMAGE_PKGTYPE} ' + x + ':do_packagedata' for x in d.getVar('SDK_RDEPENDS').split()])}"
-do_populate_sdk[recrdeptask] += "do_packagedata do_package_write_rpm do_package_write_ipk do_package_write_deb"
-do_populate_sdk[file-checksums] += "${POSTINST_INTERCEPT_CHECKSUMS}"
-addtask populate_sdk
diff --git a/meta/classes/populate_sdk_ext.bbclass b/meta/classes/populate_sdk_ext.bbclass
deleted file mode 100644
index fd0da16e7e..0000000000
--- a/meta/classes/populate_sdk_ext.bbclass
+++ /dev/null
@@ -1,782 +0,0 @@
-# Extensible SDK
-
-inherit populate_sdk_base
-
-# NOTE: normally you cannot use task overrides for this kind of thing - this
-# only works because of get_sdk_ext_rdepends()
-
-TOOLCHAIN_HOST_TASK_task-populate-sdk-ext = " \
- meta-environment-extsdk-${MACHINE} \
- "
-
-TOOLCHAIN_TARGET_TASK_task-populate-sdk-ext = ""
-
-SDK_RELOCATE_AFTER_INSTALL_task-populate-sdk-ext = "0"
-
-SDK_EXT = ""
-SDK_EXT_task-populate-sdk-ext = "-ext"
-
-# Options are full or minimal
-SDK_EXT_TYPE ?= "full"
-SDK_INCLUDE_PKGDATA ?= "0"
-SDK_INCLUDE_TOOLCHAIN ?= "${@'1' if d.getVar('SDK_EXT_TYPE') == 'full' else '0'}"
-SDK_INCLUDE_NATIVESDK ?= "0"
-SDK_INCLUDE_BUILDTOOLS ?= '1'
-
-SDK_RECRDEP_TASKS ?= ""
-
-SDK_LOCAL_CONF_WHITELIST ?= ""
-SDK_LOCAL_CONF_BLACKLIST ?= "CONF_VERSION \
- BB_NUMBER_THREADS \
- BB_NUMBER_PARSE_THREADS \
- PARALLEL_MAKE \
- PRSERV_HOST \
- SSTATE_MIRRORS \
- DL_DIR \
- SSTATE_DIR \
- TMPDIR \
- BB_SERVER_TIMEOUT \
- "
-SDK_INHERIT_BLACKLIST ?= "buildhistory icecc"
-SDK_UPDATE_URL ?= ""
-
-SDK_TARGETS ?= "${PN}"
-
-def get_sdk_install_targets(d, images_only=False):
- sdk_install_targets = ''
- if images_only or d.getVar('SDK_EXT_TYPE') != 'minimal':
- sdk_install_targets = d.getVar('SDK_TARGETS')
-
- depd = d.getVar('BB_TASKDEPDATA', False)
- tasklist = bb.build.tasksbetween('do_image_complete', 'do_build', d)
- tasklist.remove('do_build')
- for v in depd.values():
- if v[1] in tasklist:
- if v[0] not in sdk_install_targets:
- sdk_install_targets += ' {}'.format(v[0])
-
- if not images_only:
- if d.getVar('SDK_INCLUDE_PKGDATA') == '1':
- sdk_install_targets += ' meta-world-pkgdata:do_allpackagedata'
- if d.getVar('SDK_INCLUDE_TOOLCHAIN') == '1':
- sdk_install_targets += ' meta-extsdk-toolchain:do_populate_sysroot'
-
- return sdk_install_targets
-
-get_sdk_install_targets[vardepsexclude] = "BB_TASKDEPDATA"
-
-OE_INIT_ENV_SCRIPT ?= "oe-init-build-env"
-
-# The files from COREBASE that you want preserved in the COREBASE copied
-# into the sdk. This allows someone to have their own setup scripts in
-# COREBASE be preserved as well as untracked files.
-COREBASE_FILES ?= " \
- oe-init-build-env \
- scripts \
- LICENSE \
- .templateconf \
-"
-
-SDK_DIR_task-populate-sdk-ext = "${WORKDIR}/sdk-ext"
-B_task-populate-sdk-ext = "${SDK_DIR}"
-TOOLCHAINEXT_OUTPUTNAME ?= "${SDK_NAME}-toolchain-ext-${SDK_VERSION}"
-TOOLCHAIN_OUTPUTNAME_task-populate-sdk-ext = "${TOOLCHAINEXT_OUTPUTNAME}"
-
-SDK_EXT_TARGET_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.target.manifest"
-SDK_EXT_HOST_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.host.manifest"
-
-python write_target_sdk_ext_manifest () {
- from oe.sdk import get_extra_sdkinfo
- sstate_dir = d.expand('${SDK_OUTPUT}/${SDKPATH}/sstate-cache')
- extra_info = get_extra_sdkinfo(sstate_dir)
-
- target = d.getVar('TARGET_SYS')
- target_multimach = d.getVar('MULTIMACH_TARGET_SYS')
- real_target_multimach = d.getVar('REAL_MULTIMACH_TARGET_SYS')
-
- pkgs = {}
- os.makedirs(os.path.dirname(d.getVar('SDK_EXT_TARGET_MANIFEST')), exist_ok=True)
- with open(d.getVar('SDK_EXT_TARGET_MANIFEST'), 'w') as f:
- for fn in extra_info['filesizes']:
- info = fn.split(':')
- if info[2] in (target, target_multimach, real_target_multimach) \
- or info[5] == 'allarch':
- if not info[1] in pkgs:
- f.write("%s %s %s\n" % (info[1], info[2], info[3]))
- pkgs[info[1]] = {}
-}
-python write_host_sdk_ext_manifest () {
- from oe.sdk import get_extra_sdkinfo
- sstate_dir = d.expand('${SDK_OUTPUT}/${SDKPATH}/sstate-cache')
- extra_info = get_extra_sdkinfo(sstate_dir)
- host = d.getVar('BUILD_SYS')
- with open(d.getVar('SDK_EXT_HOST_MANIFEST'), 'w') as f:
- for fn in extra_info['filesizes']:
- info = fn.split(':')
- if info[2] == host:
- f.write("%s %s %s\n" % (info[1], info[2], info[3]))
-}
-
-SDK_POSTPROCESS_COMMAND_append_task-populate-sdk-ext = "write_target_sdk_ext_manifest; write_host_sdk_ext_manifest; "
-
-SDK_TITLE_task-populate-sdk-ext = "${@d.getVar('DISTRO_NAME') or d.getVar('DISTRO')} Extensible SDK"
-
-def clean_esdk_builddir(d, sdkbasepath):
- """Clean up traces of the fake build for create_filtered_tasklist()"""
- import shutil
- cleanpaths = ['cache', 'tmp']
- for pth in cleanpaths:
- fullpth = os.path.join(sdkbasepath, pth)
- if os.path.isdir(fullpth):
- shutil.rmtree(fullpth)
- elif os.path.isfile(fullpth):
- os.remove(fullpth)
-
-def create_filtered_tasklist(d, sdkbasepath, tasklistfile, conf_initpath):
- """
- Create a filtered list of tasks. Also double-checks that the build system
- within the SDK basically works and required sstate artifacts are available.
- """
- import tempfile
- import shutil
- import oe.copy_buildsystem
-
- # Create a temporary build directory that we can pass to the env setup script
- shutil.copyfile(sdkbasepath + '/conf/local.conf', sdkbasepath + '/conf/local.conf.bak')
- try:
- with open(sdkbasepath + '/conf/local.conf', 'a') as f:
- # Force the use of sstate from the build system
- f.write('\nSSTATE_DIR_forcevariable = "%s"\n' % d.getVar('SSTATE_DIR'))
- f.write('SSTATE_MIRRORS_forcevariable = "file://universal/(.*) file://universal-4.9/\\1 file://universal-4.9/(.*) file://universal-4.8/\\1"\n')
- # Ensure TMPDIR is the default so that clean_esdk_builddir() can delete it
- f.write('TMPDIR_forcevariable = "${TOPDIR}/tmp"\n')
- f.write('TCLIBCAPPEND_forcevariable = ""\n')
- # Drop uninative if the build isn't using it (or else NATIVELSBSTRING will
- # be different and we won't be able to find our native sstate)
- if not bb.data.inherits_class('uninative', d):
- f.write('INHERIT_remove = "uninative"\n')
-
- # Unfortunately the default SDKPATH (or even a custom value) may contain characters that bitbake
- # will not allow in its COREBASE path, so we need to rename the directory temporarily
- temp_sdkbasepath = d.getVar('SDK_OUTPUT') + '/tmp-renamed-sdk'
- # Delete any existing temp dir
- try:
- shutil.rmtree(temp_sdkbasepath)
- except FileNotFoundError:
- pass
- os.rename(sdkbasepath, temp_sdkbasepath)
- cmdprefix = '. %s .; ' % conf_initpath
- logfile = d.getVar('WORKDIR') + '/tasklist_bb_log.txt'
- try:
- oe.copy_buildsystem.check_sstate_task_list(d, get_sdk_install_targets(d), tasklistfile, cmdprefix=cmdprefix, cwd=temp_sdkbasepath, logfile=logfile)
- except bb.process.ExecutionError as e:
- msg = 'Failed to generate filtered task list for extensible SDK:\n%s' % e.stdout.rstrip()
- if 'attempted to execute unexpectedly and should have been setscened' in e.stdout:
- msg += '\n----------\n\nNOTE: "attempted to execute unexpectedly and should have been setscened" errors indicate this may be caused by missing sstate artifacts that were likely produced in earlier builds, but have been subsequently deleted for some reason.\n'
- bb.fatal(msg)
- os.rename(temp_sdkbasepath, sdkbasepath)
- # Clean out residue of running bitbake, which check_sstate_task_list()
- # will effectively do
- clean_esdk_builddir(d, sdkbasepath)
- finally:
- localconf = sdkbasepath + '/conf/local.conf'
- if os.path.exists(localconf + '.bak'):
- os.replace(localconf + '.bak', localconf)
-
-python copy_buildsystem () {
- import re
- import shutil
- import glob
- import oe.copy_buildsystem
-
- oe_init_env_script = d.getVar('OE_INIT_ENV_SCRIPT')
-
- conf_bbpath = ''
- conf_initpath = ''
- core_meta_subdir = ''
-
- # Copy in all metadata layers + bitbake (as repositories)
- buildsystem = oe.copy_buildsystem.BuildSystem('extensible SDK', d)
- baseoutpath = d.getVar('SDK_OUTPUT') + '/' + d.getVar('SDKPATH')
-
- # Determine if we're building a derivative extensible SDK (from devtool build-sdk)
- derivative = (d.getVar('SDK_DERIVATIVE') or '') == '1'
- if derivative:
- workspace_name = 'orig-workspace'
- else:
- workspace_name = None
-
- corebase, sdkbblayers = buildsystem.copy_bitbake_and_layers(baseoutpath + '/layers', workspace_name)
- conf_bbpath = os.path.join('layers', corebase, 'bitbake')
-
- for path in os.listdir(baseoutpath + '/layers'):
- relpath = os.path.join('layers', path, oe_init_env_script)
- if os.path.exists(os.path.join(baseoutpath, relpath)):
- conf_initpath = relpath
-
- relpath = os.path.join('layers', path, 'scripts', 'devtool')
- if os.path.exists(os.path.join(baseoutpath, relpath)):
- scriptrelpath = os.path.dirname(relpath)
-
- relpath = os.path.join('layers', path, 'meta')
- if os.path.exists(os.path.join(baseoutpath, relpath, 'lib', 'oe')):
- core_meta_subdir = relpath
-
- d.setVar('oe_init_build_env_path', conf_initpath)
- d.setVar('scriptrelpath', scriptrelpath)
-
- # Write out config file for devtool
- import configparser
- config = configparser.SafeConfigParser()
- config.add_section('General')
- config.set('General', 'bitbake_subdir', conf_bbpath)
- config.set('General', 'init_path', conf_initpath)
- config.set('General', 'core_meta_subdir', core_meta_subdir)
- config.add_section('SDK')
- config.set('SDK', 'sdk_targets', d.getVar('SDK_TARGETS'))
- updateurl = d.getVar('SDK_UPDATE_URL')
- if updateurl:
- config.set('SDK', 'updateserver', updateurl)
- bb.utils.mkdirhier(os.path.join(baseoutpath, 'conf'))
- with open(os.path.join(baseoutpath, 'conf', 'devtool.conf'), 'w') as f:
- config.write(f)
-
- unlockedsigs = os.path.join(baseoutpath, 'conf', 'unlocked-sigs.inc')
- with open(unlockedsigs, 'w') as f:
- pass
-
- # Create a layer for new recipes / appends
- bbpath = d.getVar('BBPATH')
- bb.process.run(['devtool', '--bbpath', bbpath, '--basepath', baseoutpath, 'create-workspace', '--create-only', os.path.join(baseoutpath, 'workspace')])
-
- # Create bblayers.conf
- bb.utils.mkdirhier(baseoutpath + '/conf')
- with open(baseoutpath + '/conf/bblayers.conf', 'w') as f:
- f.write('# WARNING: this configuration has been automatically generated and in\n')
- f.write('# most cases should not be edited. If you need more flexibility than\n')
- f.write('# this configuration provides, it is strongly suggested that you set\n')
- f.write('# up a proper instance of the full build system and use that instead.\n\n')
-
- # LCONF_VERSION may not be set, for example when using meta-poky
- # so don't error if it isn't found
- lconf_version = d.getVar('LCONF_VERSION', False)
- if lconf_version is not None:
- f.write('LCONF_VERSION = "%s"\n\n' % lconf_version)
-
- f.write('BBPATH = "$' + '{TOPDIR}"\n')
- f.write('SDKBASEMETAPATH = "$' + '{TOPDIR}"\n')
- f.write('BBLAYERS := " \\\n')
- for layerrelpath in sdkbblayers:
- f.write(' $' + '{SDKBASEMETAPATH}/layers/%s \\\n' % layerrelpath)
- f.write(' $' + '{SDKBASEMETAPATH}/workspace \\\n')
- f.write(' "\n')
-
- # Copy uninative tarball
- # For now this is where uninative.bbclass expects the tarball
- if bb.data.inherits_class('uninative', d):
- uninative_file = d.expand('${UNINATIVE_DLDIR}/' + d.getVarFlag("UNINATIVE_CHECKSUM", d.getVar("BUILD_ARCH")) + '/${UNINATIVE_TARBALL}')
- uninative_checksum = bb.utils.sha256_file(uninative_file)
- uninative_outdir = '%s/downloads/uninative/%s' % (baseoutpath, uninative_checksum)
- bb.utils.mkdirhier(uninative_outdir)
- shutil.copy(uninative_file, uninative_outdir)
-
- env_whitelist = (d.getVar('BB_ENV_EXTRAWHITE') or '').split()
- env_whitelist_values = {}
-
- # Create local.conf
- builddir = d.getVar('TOPDIR')
- if derivative and os.path.exists(builddir + '/conf/site.conf'):
- shutil.copyfile(builddir + '/conf/site.conf', baseoutpath + '/conf/site.conf')
- if derivative and os.path.exists(builddir + '/conf/auto.conf'):
- shutil.copyfile(builddir + '/conf/auto.conf', baseoutpath + '/conf/auto.conf')
- if derivative:
- shutil.copyfile(builddir + '/conf/local.conf', baseoutpath + '/conf/local.conf')
- else:
- local_conf_whitelist = (d.getVar('SDK_LOCAL_CONF_WHITELIST') or '').split()
- local_conf_blacklist = (d.getVar('SDK_LOCAL_CONF_BLACKLIST') or '').split()
- def handle_var(varname, origvalue, op, newlines):
- if varname in local_conf_blacklist or (origvalue.strip().startswith('/') and not varname in local_conf_whitelist):
- newlines.append('# Removed original setting of %s\n' % varname)
- return None, op, 0, True
- else:
- if varname in env_whitelist:
- env_whitelist_values[varname] = origvalue
- return origvalue, op, 0, True
- varlist = ['[^#=+ ]*']
- oldlines = []
- if os.path.exists(builddir + '/conf/site.conf'):
- with open(builddir + '/conf/site.conf', 'r') as f:
- oldlines += f.readlines()
- if os.path.exists(builddir + '/conf/auto.conf'):
- with open(builddir + '/conf/auto.conf', 'r') as f:
- oldlines += f.readlines()
- with open(builddir + '/conf/local.conf', 'r') as f:
- oldlines += f.readlines()
- (updated, newlines) = bb.utils.edit_metadata(oldlines, varlist, handle_var)
-
- with open(baseoutpath + '/conf/local.conf', 'w') as f:
- f.write('# WARNING: this configuration has been automatically generated and in\n')
- f.write('# most cases should not be edited. If you need more flexibility than\n')
- f.write('# this configuration provides, it is strongly suggested that you set\n')
- f.write('# up a proper instance of the full build system and use that instead.\n\n')
- for line in newlines:
- if line.strip() and not line.startswith('#'):
- f.write(line)
- # Write a newline just in case there's none at the end of the original
- f.write('\n')
-
- f.write('TMPDIR = "${TOPDIR}/tmp"\n')
- f.write('TCLIBCAPPEND = ""\n')
- f.write('DL_DIR = "${TOPDIR}/downloads"\n')
-
- if bb.data.inherits_class('uninative', d):
- f.write('INHERIT += "%s"\n' % 'uninative')
- f.write('UNINATIVE_CHECKSUM[%s] = "%s"\n\n' % (d.getVar('BUILD_ARCH'), uninative_checksum))
- f.write('CONF_VERSION = "%s"\n\n' % d.getVar('CONF_VERSION', False))
-
- # Some classes are not suitable for SDK, remove them from INHERIT
- f.write('INHERIT_remove = "%s"\n' % d.getVar('SDK_INHERIT_BLACKLIST', False))
-
- # Bypass the default connectivity check if any
- f.write('CONNECTIVITY_CHECK_URIS = ""\n\n')
-
- # This warning will come out if reverse dependencies for a task
- # don't have sstate as well as the task itself. We already know
- # this will be the case for the extensible sdk, so turn off the
- # warning.
- f.write('SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK = "none"\n\n')
-
- # Warn if the sigs in the locked-signature file don't match
- # the sig computed from the metadata.
- f.write('SIGGEN_LOCKEDSIGS_TASKSIG_CHECK = "warn"\n\n')
-
- # We want to be able to set this without a full reparse
- f.write('BB_HASHCONFIG_WHITELIST_append = " SIGGEN_UNLOCKED_RECIPES"\n\n')
-
- # Set up whitelist for run on install
- f.write('BB_SETSCENE_ENFORCE_WHITELIST = "%:* *:do_shared_workdir *:do_rm_work wic-tools:* *:do_addto_recipe_sysroot"\n\n')
-
- # Hide the config information from bitbake output (since it's fixed within the SDK)
- f.write('BUILDCFG_HEADER = ""\n\n')
-
- f.write('# Provide a flag to indicate we are in the EXT_SDK Context\n')
- f.write('WITHIN_EXT_SDK = "1"\n\n')
-
- # Map gcc-dependent uninative sstate cache for installer usage
- f.write('SSTATE_MIRRORS += " file://universal/(.*) file://universal-4.9/\\1 file://universal-4.9/(.*) file://universal-4.8/\\1"\n\n')
-
- # Allow additional config through sdk-extra.conf
- fn = bb.cookerdata.findConfigFile('sdk-extra.conf', d)
- if fn:
- with open(fn, 'r') as xf:
- for line in xf:
- f.write(line)
-
- # If you define a sdk_extraconf() function then it can contain additional config
- # (Though this is awkward; sdk-extra.conf should probably be used instead)
- extraconf = (d.getVar('sdk_extraconf') or '').strip()
- if extraconf:
- # Strip off any leading / trailing spaces
- for line in extraconf.splitlines():
- f.write(line.strip() + '\n')
-
- f.write('require conf/locked-sigs.inc\n')
- f.write('require conf/unlocked-sigs.inc\n')
-
- if os.path.exists(builddir + '/cache/bb_unihashes.dat'):
- bb.parse.siggen.save_unitaskhashes()
- bb.utils.mkdirhier(os.path.join(baseoutpath, 'cache'))
- shutil.copyfile(builddir + '/cache/bb_unihashes.dat', baseoutpath + '/cache/bb_unihashes.dat')
-
- # Use templateconf.cfg file from builddir if exists
- if os.path.exists(builddir + '/conf/templateconf.cfg'):
- shutil.copyfile(builddir + '/conf/templateconf.cfg', baseoutpath + '/conf/templateconf.cfg')
- else:
- # Write a templateconf.cfg
- with open(baseoutpath + '/conf/templateconf.cfg', 'w') as f:
- f.write('meta/conf\n')
-
- # Ensure any variables set from the external environment (by way of
- # BB_ENV_EXTRAWHITE) are set in the SDK's configuration
- extralines = []
- for name, value in env_whitelist_values.items():
- actualvalue = d.getVar(name) or ''
- if value != actualvalue:
- extralines.append('%s = "%s"\n' % (name, actualvalue))
- if extralines:
- with open(baseoutpath + '/conf/local.conf', 'a') as f:
- f.write('\n')
- f.write('# Extra settings from environment:\n')
- for line in extralines:
- f.write(line)
- f.write('\n')
-
- # Filter the locked signatures file to just the sstate tasks we are interested in
- excluded_targets = get_sdk_install_targets(d, images_only=True)
- sigfile = d.getVar('WORKDIR') + '/locked-sigs.inc'
- lockedsigs_pruned = baseoutpath + '/conf/locked-sigs.inc'
- #nativesdk-only sigfile to merge into locked-sigs.inc
- sdk_include_nativesdk = (d.getVar("SDK_INCLUDE_NATIVESDK") == '1')
- nativesigfile = d.getVar('WORKDIR') + '/locked-sigs_nativesdk.inc'
- nativesigfile_pruned = d.getVar('WORKDIR') + '/locked-sigs_nativesdk_pruned.inc'
-
- if sdk_include_nativesdk:
- oe.copy_buildsystem.prune_lockedsigs([],
- excluded_targets.split(),
- nativesigfile,
- True,
- nativesigfile_pruned)
-
- oe.copy_buildsystem.merge_lockedsigs([],
- sigfile,
- nativesigfile_pruned,
- sigfile)
-
- oe.copy_buildsystem.prune_lockedsigs([],
- excluded_targets.split(),
- sigfile,
- False,
- lockedsigs_pruned)
-
- sstate_out = baseoutpath + '/sstate-cache'
- bb.utils.remove(sstate_out, True)
-
- # uninative.bbclass sets NATIVELSBSTRING to 'universal%s' % oe.utils.host_gcc_version(d)
- fixedlsbstring = "universal%s" % oe.utils.host_gcc_version(d)
-
- sdk_include_toolchain = (d.getVar('SDK_INCLUDE_TOOLCHAIN') == '1')
- sdk_ext_type = d.getVar('SDK_EXT_TYPE')
- if (sdk_ext_type != 'minimal' or sdk_include_toolchain or derivative) and not sdk_include_nativesdk:
- # Create the filtered task list used to generate the sstate cache shipped with the SDK
- tasklistfn = d.getVar('WORKDIR') + '/tasklist.txt'
- create_filtered_tasklist(d, baseoutpath, tasklistfn, conf_initpath)
- else:
- tasklistfn = None
-
- if os.path.exists(builddir + '/cache/bb_unihashes.dat'):
- bb.parse.siggen.save_unitaskhashes()
- bb.utils.mkdirhier(os.path.join(baseoutpath, 'cache'))
- shutil.copyfile(builddir + '/cache/bb_unihashes.dat', baseoutpath + '/cache/bb_unihashes.dat')
-
- # Add packagedata if enabled
- if d.getVar('SDK_INCLUDE_PKGDATA') == '1':
- lockedsigs_base = d.getVar('WORKDIR') + '/locked-sigs-base.inc'
- lockedsigs_copy = d.getVar('WORKDIR') + '/locked-sigs-copy.inc'
- shutil.move(lockedsigs_pruned, lockedsigs_base)
- oe.copy_buildsystem.merge_lockedsigs(['do_packagedata'],
- lockedsigs_base,
- d.getVar('STAGING_DIR_HOST') + '/world-pkgdata/locked-sigs-pkgdata.inc',
- lockedsigs_pruned,
- lockedsigs_copy)
-
- if sdk_include_toolchain:
- lockedsigs_base = d.getVar('WORKDIR') + '/locked-sigs-base2.inc'
- lockedsigs_toolchain = d.expand("${STAGING_DIR}/${TUNE_PKGARCH}/meta-extsdk-toolchain/locked-sigs/locked-sigs-extsdk-toolchain.inc")
- shutil.move(lockedsigs_pruned, lockedsigs_base)
- oe.copy_buildsystem.merge_lockedsigs([],
- lockedsigs_base,
- lockedsigs_toolchain,
- lockedsigs_pruned)
- oe.copy_buildsystem.create_locked_sstate_cache(lockedsigs_toolchain,
- d.getVar('SSTATE_DIR'),
- sstate_out, d,
- fixedlsbstring,
- filterfile=tasklistfn)
-
- if sdk_ext_type == 'minimal':
- if derivative:
- # Assume the user is not going to set up an additional sstate
- # mirror, thus we need to copy the additional artifacts (from
- # workspace recipes) into the derivative SDK
- lockedsigs_orig = d.getVar('TOPDIR') + '/conf/locked-sigs.inc'
- if os.path.exists(lockedsigs_orig):
- lockedsigs_extra = d.getVar('WORKDIR') + '/locked-sigs-extra.inc'
- oe.copy_buildsystem.merge_lockedsigs(None,
- lockedsigs_orig,
- lockedsigs_pruned,
- None,
- lockedsigs_extra)
- oe.copy_buildsystem.create_locked_sstate_cache(lockedsigs_extra,
- d.getVar('SSTATE_DIR'),
- sstate_out, d,
- fixedlsbstring,
- filterfile=tasklistfn)
- else:
- oe.copy_buildsystem.create_locked_sstate_cache(lockedsigs_pruned,
- d.getVar('SSTATE_DIR'),
- sstate_out, d,
- fixedlsbstring,
- filterfile=tasklistfn)
-
- # We don't need sstate do_package files
- for root, dirs, files in os.walk(sstate_out):
- for name in files:
- if name.endswith("_package.tgz"):
- f = os.path.join(root, name)
- os.remove(f)
-
- # Write manifest file
- # Note: at the moment we cannot include the env setup script here to keep
- # it updated, since it gets modified during SDK installation (see
- # sdk_ext_postinst() below) thus the checksum we take here would always
- # be different.
- manifest_file_list = ['conf/*']
- manifest_file = os.path.join(baseoutpath, 'conf', 'sdk-conf-manifest')
- with open(manifest_file, 'w') as f:
- for item in manifest_file_list:
- for fn in glob.glob(os.path.join(baseoutpath, item)):
- if fn == manifest_file:
- continue
- chksum = bb.utils.sha256_file(fn)
- f.write('%s\t%s\n' % (chksum, os.path.relpath(fn, baseoutpath)))
-}
-
-def get_current_buildtools(d):
- """Get the file name of the current buildtools installer"""
- import glob
- btfiles = glob.glob(os.path.join(d.getVar('SDK_DEPLOY'), '*-buildtools-nativesdk-standalone-*.sh'))
- btfiles.sort(key=os.path.getctime)
- return os.path.basename(btfiles[-1])
-
-def get_sdk_required_utilities(buildtools_fn, d):
- """Find required utilities that aren't provided by the buildtools"""
- sanity_required_utilities = (d.getVar('SANITY_REQUIRED_UTILITIES') or '').split()
- sanity_required_utilities.append(d.expand('${BUILD_PREFIX}gcc'))
- sanity_required_utilities.append(d.expand('${BUILD_PREFIX}g++'))
- if buildtools_fn:
- buildtools_installer = os.path.join(d.getVar('SDK_DEPLOY'), buildtools_fn)
- filelist, _ = bb.process.run('%s -l' % buildtools_installer)
- else:
- buildtools_installer = None
- filelist = ""
- localdata = bb.data.createCopy(d)
- localdata.setVar('SDKPATH', '.')
- sdkpathnative = localdata.getVar('SDKPATHNATIVE')
- sdkbindirs = [localdata.getVar('bindir_nativesdk'),
- localdata.getVar('sbindir_nativesdk'),
- localdata.getVar('base_bindir_nativesdk'),
- localdata.getVar('base_sbindir_nativesdk')]
- for line in filelist.splitlines():
- splitline = line.split()
- if len(splitline) > 5:
- fn = splitline[5]
- if not fn.startswith('./'):
- fn = './%s' % fn
- if fn.startswith(sdkpathnative):
- relpth = '/' + os.path.relpath(fn, sdkpathnative)
- for bindir in sdkbindirs:
- if relpth.startswith(bindir):
- relpth = os.path.relpath(relpth, bindir)
- if relpth in sanity_required_utilities:
- sanity_required_utilities.remove(relpth)
- break
- return ' '.join(sanity_required_utilities)
-
-install_tools() {
- install -d ${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}
- scripts="devtool recipetool oe-find-native-sysroot runqemu* wic"
- for script in $scripts; do
- for scriptfn in `find ${SDK_OUTPUT}/${SDKPATH}/${scriptrelpath} -maxdepth 1 -executable -name "$script"`; do
- targetscriptfn="${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}/$(basename $scriptfn)"
- test -e ${targetscriptfn} || lnr ${scriptfn} ${targetscriptfn}
- done
- done
- # We can't use the same method as above because files in the sysroot won't exist at this point
- # (they get populated from sstate on installation)
- unfsd_path="${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}/unfsd"
- if [ "${SDK_INCLUDE_TOOLCHAIN}" = "1" -a ! -e $unfsd_path ] ; then
- binrelpath=${@os.path.relpath(d.getVar('STAGING_BINDIR_NATIVE'), d.getVar('TMPDIR'))}
- lnr ${SDK_OUTPUT}/${SDKPATH}/tmp/$binrelpath/unfsd $unfsd_path
- fi
- touch ${SDK_OUTPUT}/${SDKPATH}/.devtoolbase
-
- # find latest buildtools-tarball and install it
- if [ -n "${SDK_BUILDTOOLS_INSTALLER}" ]; then
- install ${SDK_DEPLOY}/${SDK_BUILDTOOLS_INSTALLER} ${SDK_OUTPUT}/${SDKPATH}
- fi
-
- install -m 0644 ${COREBASE}/meta/files/ext-sdk-prepare.py ${SDK_OUTPUT}/${SDKPATH}
-}
-do_populate_sdk_ext[file-checksums] += "${COREBASE}/meta/files/ext-sdk-prepare.py:True"
-
-sdk_ext_preinst() {
- # Since bitbake won't run as root it doesn't make sense to try and install
- # the extensible sdk as root.
- if [ "`id -u`" = "0" ]; then
- echo "ERROR: The extensible sdk cannot be installed as root."
- exit 1
- fi
- if ! command -v locale > /dev/null; then
- echo "ERROR: The installer requires the locale command, please install it first"
- exit 1
- fi
- # Check setting of LC_ALL set above
- canonicalised_locale=`echo $LC_ALL | sed 's/UTF-8/utf8/'`
- if ! locale -a | grep -q $canonicalised_locale ; then
- echo "ERROR: the installer requires the $LC_ALL locale to be installed (but not selected), please install it first"
- exit 1
- fi
- # The relocation script used by buildtools installer requires python
- if ! command -v python3 > /dev/null; then
- echo "ERROR: The installer requires python3, please install it first"
- exit 1
- fi
- missing_utils=""
- for util in ${SDK_REQUIRED_UTILITIES}; do
- if ! command -v $util > /dev/null; then
- missing_utils="$missing_utils $util"
- fi
- done
- if [ -n "$missing_utils" ] ; then
- echo "ERROR: the SDK requires the following missing utilities, please install them: $missing_utils"
- exit 1
- fi
- SDK_EXTENSIBLE="1"
- if [ "$publish" = "1" ] && [ "${SDK_EXT_TYPE}" = "minimal" ] ; then
- EXTRA_TAR_OPTIONS="$EXTRA_TAR_OPTIONS --exclude=sstate-cache"
- fi
-}
-SDK_PRE_INSTALL_COMMAND_task-populate-sdk-ext = "${sdk_ext_preinst}"
-
-# FIXME this preparation should be done as part of the SDK construction
-sdk_ext_postinst() {
- printf "\nExtracting buildtools...\n"
- cd $target_sdk_dir
- env_setup_script="$target_sdk_dir/environment-setup-${REAL_MULTIMACH_TARGET_SYS}"
- if [ -n "${SDK_BUILDTOOLS_INSTALLER}" ]; then
- printf "buildtools\ny" | ./${SDK_BUILDTOOLS_INSTALLER} > buildtools.log || { printf 'ERROR: buildtools installation failed:\n' ; cat buildtools.log ; echo "printf 'ERROR: this SDK was not fully installed and needs reinstalling\n'" >> $env_setup_script ; exit 1 ; }
-
- # Delete the buildtools tar file since it won't be used again
- rm -f ./${SDK_BUILDTOOLS_INSTALLER}
- # We don't need the log either since it succeeded
- rm -f buildtools.log
-
- # Make sure when the user sets up the environment, they also get
- # the buildtools-tarball tools in their path.
- echo ". $target_sdk_dir/buildtools/environment-setup*" >> $env_setup_script
- fi
-
- # Allow bitbake environment setup to be ran as part of this sdk.
- echo "export OE_SKIP_SDK_CHECK=1" >> $env_setup_script
- # Work around runqemu not knowing how to get this information within the eSDK
- echo "export DEPLOY_DIR_IMAGE=$target_sdk_dir/tmp/${@os.path.relpath(d.getVar('DEPLOY_DIR_IMAGE'), d.getVar('TMPDIR'))}" >> $env_setup_script
-
- # A bit of another hack, but we need this in the path only for devtool
- # so put it at the end of $PATH.
- echo "export PATH=$target_sdk_dir/sysroots/${SDK_SYS}${bindir_nativesdk}:\$PATH" >> $env_setup_script
-
- echo "printf 'SDK environment now set up; additionally you may now run devtool to perform development tasks.\nRun devtool --help for further details.\n'" >> $env_setup_script
-
- # Warn if trying to use external bitbake and the ext SDK together
- echo "(which bitbake > /dev/null 2>&1 && echo 'WARNING: attempting to use the extensible SDK in an environment set up to run bitbake - this may lead to unexpected results. Please source this script in a new shell session instead.') || true" >> $env_setup_script
-
- if [ "$prepare_buildsystem" != "no" -a -n "${SDK_BUILDTOOLS_INSTALLER}" ]; then
- printf "Preparing build system...\n"
- # dash which is /bin/sh on Ubuntu will not preserve the
- # current working directory when first ran, nor will it set $1 when
- # sourcing a script. That is why this has to look so ugly.
- LOGFILE="$target_sdk_dir/preparing_build_system.log"
- sh -c ". buildtools/environment-setup* > $LOGFILE && cd $target_sdk_dir/`dirname ${oe_init_build_env_path}` && set $target_sdk_dir && . $target_sdk_dir/${oe_init_build_env_path} $target_sdk_dir >> $LOGFILE && python3 $target_sdk_dir/ext-sdk-prepare.py $LOGFILE '${SDK_INSTALL_TARGETS}'" || { echo "printf 'ERROR: this SDK was not fully installed and needs reinstalling\n'" >> $env_setup_script ; exit 1 ; }
- fi
- if [ -e $target_sdk_dir/ext-sdk-prepare.py ]; then
- rm $target_sdk_dir/ext-sdk-prepare.py
- fi
- echo done
-}
-
-SDK_POST_INSTALL_COMMAND_task-populate-sdk-ext = "${sdk_ext_postinst}"
-
-SDK_POSTPROCESS_COMMAND_prepend_task-populate-sdk-ext = "copy_buildsystem; install_tools; "
-
-SDK_INSTALL_TARGETS = ""
-fakeroot python do_populate_sdk_ext() {
- # FIXME hopefully we can remove this restriction at some point, but uninative
- # currently forces this upon us
- if d.getVar('SDK_ARCH') != d.getVar('BUILD_ARCH'):
- bb.fatal('The extensible SDK can currently only be built for the same architecture as the machine being built on - SDK_ARCH is set to %s (likely via setting SDKMACHINE) which is different from the architecture of the build machine (%s). Unable to continue.' % (d.getVar('SDK_ARCH'), d.getVar('BUILD_ARCH')))
-
- d.setVar('SDK_INSTALL_TARGETS', get_sdk_install_targets(d))
- if d.getVar('SDK_INCLUDE_BUILDTOOLS') == '1':
- buildtools_fn = get_current_buildtools(d)
- else:
- buildtools_fn = None
- d.setVar('SDK_REQUIRED_UTILITIES', get_sdk_required_utilities(buildtools_fn, d))
- d.setVar('SDK_BUILDTOOLS_INSTALLER', buildtools_fn)
- d.setVar('SDKDEPLOYDIR', '${SDKEXTDEPLOYDIR}')
- # ESDKs have a libc from the buildtools so ensure we don't ship linguas twice
- d.delVar('SDKIMAGE_LINGUAS')
- if d.getVar("SDK_INCLUDE_NATIVESDK") == '1':
- generate_nativesdk_lockedsigs(d)
- populate_sdk_common(d)
-}
-
-def generate_nativesdk_lockedsigs(d):
- import oe.copy_buildsystem
- sigfile = d.getVar('WORKDIR') + '/locked-sigs_nativesdk.inc'
- oe.copy_buildsystem.generate_locked_sigs(sigfile, d)
-
-def get_ext_sdk_depends(d):
- # Note: the deps varflag is a list not a string, so we need to specify expand=False
- deps = d.getVarFlag('do_image_complete', 'deps', False)
- pn = d.getVar('PN')
- deplist = ['%s:%s' % (pn, dep) for dep in deps]
- tasklist = bb.build.tasksbetween('do_image_complete', 'do_build', d)
- tasklist.append('do_rootfs')
- for task in tasklist:
- deplist.extend((d.getVarFlag(task, 'depends') or '').split())
- return ' '.join(deplist)
-
-python do_sdk_depends() {
- # We have to do this separately in its own task so we avoid recursing into
- # dependencies we don't need to (e.g. buildtools-tarball) and bringing those
- # into the SDK's sstate-cache
- import oe.copy_buildsystem
- sigfile = d.getVar('WORKDIR') + '/locked-sigs.inc'
- oe.copy_buildsystem.generate_locked_sigs(sigfile, d)
-}
-addtask sdk_depends
-
-do_sdk_depends[dirs] = "${WORKDIR}"
-do_sdk_depends[depends] = "${@get_ext_sdk_depends(d)} meta-extsdk-toolchain:do_populate_sysroot"
-do_sdk_depends[recrdeptask] = "${@d.getVarFlag('do_populate_sdk', 'recrdeptask', False)}"
-do_sdk_depends[recrdeptask] += "do_populate_lic do_package_qa do_populate_sysroot do_deploy ${SDK_RECRDEP_TASKS}"
-do_sdk_depends[rdepends] = "${@get_sdk_ext_rdepends(d)}"
-
-def get_sdk_ext_rdepends(d):
- localdata = d.createCopy()
- localdata.appendVar('OVERRIDES', ':task-populate-sdk-ext')
- return localdata.getVarFlag('do_populate_sdk', 'rdepends')
-
-do_populate_sdk_ext[dirs] = "${@d.getVarFlag('do_populate_sdk', 'dirs', False)}"
-
-do_populate_sdk_ext[depends] = "${@d.getVarFlag('do_populate_sdk', 'depends', False)} \
- ${@'buildtools-tarball:do_populate_sdk' if d.getVar('SDK_INCLUDE_BUILDTOOLS') == '1' else ''} \
- ${@'meta-world-pkgdata:do_collect_packagedata' if d.getVar('SDK_INCLUDE_PKGDATA') == '1' else ''} \
- ${@'meta-extsdk-toolchain:do_locked_sigs' if d.getVar('SDK_INCLUDE_TOOLCHAIN') == '1' else ''}"
-
-# We must avoid depending on do_build here if rm_work.bbclass is active,
-# because otherwise do_rm_work may run before do_populate_sdk_ext itself.
-# We can't mark do_populate_sdk_ext and do_sdk_depends as having to
-# run before do_rm_work, because then they would also run as part
-# of normal builds.
-do_populate_sdk_ext[rdepends] += "${@' '.join([x + ':' + (d.getVar('RM_WORK_BUILD_WITHOUT') or 'do_build') for x in d.getVar('SDK_TARGETS').split()])}"
-
-# Make sure code changes can result in rebuild
-do_populate_sdk_ext[vardeps] += "copy_buildsystem \
- sdk_ext_postinst"
-
-# Since any change in the metadata of any layer should cause a rebuild of the
-# sdk(since the layers are put in the sdk) set the task to nostamp so it
-# always runs.
-do_populate_sdk_ext[nostamp] = "1"
-
-SDKEXTDEPLOYDIR = "${WORKDIR}/deploy-${PN}-populate-sdk-ext"
-
-SSTATETASKS += "do_populate_sdk_ext"
-SSTATE_SKIP_CREATION_task-populate-sdk-ext = '1'
-do_populate_sdk_ext[cleandirs] = "${SDKEXTDEPLOYDIR}"
-do_populate_sdk_ext[sstate-inputdirs] = "${SDKEXTDEPLOYDIR}"
-do_populate_sdk_ext[sstate-outputdirs] = "${SDK_DEPLOY}"
-do_populate_sdk_ext[stamp-extra-info] = "${MACHINE_ARCH}"
-
-addtask populate_sdk_ext after do_sdk_depends
diff --git a/meta/classes/prexport.bbclass b/meta/classes/prexport.bbclass
index 6dcf99e29f..e5098e3308 100644
--- a/meta/classes/prexport.bbclass
+++ b/meta/classes/prexport.bbclass
@@ -1,3 +1,9 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
PRSERV_DUMPOPT_VERSION = "${PRAUTOINX}"
PRSERV_DUMPOPT_PKGARCH = ""
PRSERV_DUMPOPT_CHECKSUM = ""
diff --git a/meta/classes/primport.bbclass b/meta/classes/primport.bbclass
index 8ed45f03f0..00924174c1 100644
--- a/meta/classes/primport.bbclass
+++ b/meta/classes/primport.bbclass
@@ -1,3 +1,9 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
python primport_handler () {
import bb.event
if not e.data:
diff --git a/meta/classes/ptest-gnome.bbclass b/meta/classes/ptest-gnome.bbclass
deleted file mode 100644
index 478a33474d..0000000000
--- a/meta/classes/ptest-gnome.bbclass
+++ /dev/null
@@ -1,8 +0,0 @@
-inherit ptest
-
-EXTRA_OECONF_append = " ${@bb.utils.contains('PTEST_ENABLED', '1', '--enable-installed-tests', '--disable-installed-tests', d)}"
-
-FILES_${PN}-ptest += "${libexecdir}/installed-tests/ \
- ${datadir}/installed-tests/"
-
-RDEPENDS_${PN}-ptest += "gnome-desktop-testing"
diff --git a/meta/classes/ptest-perl.bbclass b/meta/classes/ptest-perl.bbclass
deleted file mode 100644
index a4bc40b51a..0000000000
--- a/meta/classes/ptest-perl.bbclass
+++ /dev/null
@@ -1,30 +0,0 @@
-inherit ptest
-
-FILESEXTRAPATHS_prepend := "${COREBASE}/meta/files:"
-
-SRC_URI += "file://ptest-perl/run-ptest"
-
-do_install_ptest_perl() {
- install -d ${D}${PTEST_PATH}
- if [ ! -f ${D}${PTEST_PATH}/run-ptest ]; then
- install -m 0755 ${WORKDIR}/ptest-perl/run-ptest ${D}${PTEST_PATH}
- fi
- cp -r ${B}/t ${D}${PTEST_PATH}
- chown -R root:root ${D}${PTEST_PATH}
-}
-
-FILES_${PN}-ptest_prepend = "${PTEST_PATH}/t/* ${PTEST_PATH}/run-ptest "
-
-RDEPENDS_${PN}-ptest_prepend = "perl "
-
-addtask install_ptest_perl after do_install_ptest_base before do_package
-
-python () {
- if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
- d.setVarFlag('do_install_ptest_perl', 'fakeroot', '1')
-
- # Remove all '*ptest_perl' tasks when ptest is not enabled
- if not(d.getVar('PTEST_ENABLED') == "1"):
- for i in ['do_install_ptest_perl']:
- bb.build.deltask(i, d)
-}
diff --git a/meta/classes/ptest.bbclass b/meta/classes/ptest.bbclass
deleted file mode 100644
index fa4c36ec76..0000000000
--- a/meta/classes/ptest.bbclass
+++ /dev/null
@@ -1,119 +0,0 @@
-SUMMARY_${PN}-ptest ?= "${SUMMARY} - Package test files"
-DESCRIPTION_${PN}-ptest ?= "${DESCRIPTION} \
-This package contains a test directory ${PTEST_PATH} for package test purposes."
-
-PTEST_PATH ?= "${libdir}/${BPN}/ptest"
-PTEST_BUILD_HOST_FILES ?= "Makefile"
-PTEST_BUILD_HOST_PATTERN ?= ""
-
-FILES_${PN}-ptest = "${PTEST_PATH}"
-SECTION_${PN}-ptest = "devel"
-ALLOW_EMPTY_${PN}-ptest = "1"
-PTEST_ENABLED = "${@bb.utils.contains('DISTRO_FEATURES', 'ptest', '1', '0', d)}"
-PTEST_ENABLED_class-native = ""
-PTEST_ENABLED_class-nativesdk = ""
-PTEST_ENABLED_class-cross-canadian = ""
-RDEPENDS_${PN}-ptest += "${PN}"
-RDEPENDS_${PN}-ptest_class-native = ""
-RDEPENDS_${PN}-ptest_class-nativesdk = ""
-RRECOMMENDS_${PN}-ptest += "ptest-runner"
-
-PACKAGES =+ "${@bb.utils.contains('PTEST_ENABLED', '1', '${PN}-ptest', '', d)}"
-
-do_configure_ptest() {
- :
-}
-
-do_configure_ptest_base() {
- do_configure_ptest
-}
-
-do_compile_ptest() {
- :
-}
-
-do_compile_ptest_base() {
- do_compile_ptest
-}
-
-do_install_ptest() {
- :
-}
-
-do_install_ptest_base() {
- if [ -f ${WORKDIR}/run-ptest ]; then
- install -D ${WORKDIR}/run-ptest ${D}${PTEST_PATH}/run-ptest
- fi
- if grep -q install-ptest: Makefile; then
- oe_runmake DESTDIR=${D}${PTEST_PATH} install-ptest
- fi
- do_install_ptest
- chown -R root:root ${D}${PTEST_PATH}
-
- # Strip build host paths from any installed Makefile
- for filename in ${PTEST_BUILD_HOST_FILES}; do
- for installed_ptest_file in $(find ${D}${PTEST_PATH} -type f -name $filename); do
- bbnote "Stripping host paths from: $installed_ptest_file"
- sed -e 's#${HOSTTOOLS_DIR}/*##g' \
- -e 's#${WORKDIR}/*=#.=#g' \
- -e 's#${WORKDIR}/*##g' \
- -i $installed_ptest_file
- if [ -n "${PTEST_BUILD_HOST_PATTERN}" ]; then
- sed -E '/${PTEST_BUILD_HOST_PATTERN}/d' \
- -i $installed_ptest_file
- fi
- done
- done
-}
-
-PTEST_BINDIR_PKGD_PATH = "${PKGD}${PTEST_PATH}/bin"
-
-# This function needs to run after apply_update_alternative_renames because the
-# aforementioned function will update the ALTERNATIVE_LINK_NAME flag. Append is
-# used here to make this function to run as late as possible.
-PACKAGE_PREPROCESS_FUNCS_append = "${@bb.utils.contains('PTEST_BINDIR', '1', \
- bb.utils.contains('PTEST_ENABLED', '1', ' ptest_update_alternatives', '', d), '', d)}"
-
-python ptest_update_alternatives() {
- """
- This function will generate the symlinks in the PTEST_BINDIR_PKGD_PATH
- to match the renamed binaries by update-alternatives.
- """
-
- if not bb.data.inherits_class('update-alternatives', d) \
- or not update_alternatives_enabled(d):
- return
-
- bb.note("Generating symlinks for ptest")
- bin_paths = { d.getVar("bindir"), d.getVar("base_bindir"),
- d.getVar("sbindir"), d.getVar("base_sbindir") }
- ptest_bindir = d.getVar("PTEST_BINDIR_PKGD_PATH")
- os.mkdir(ptest_bindir)
- for pkg in (d.getVar('PACKAGES') or "").split():
- alternatives = update_alternatives_alt_targets(d, pkg)
- for alt_name, alt_link, alt_target, _ in alternatives:
- # Some alternatives are for man pages,
- # check if the alternative is in PATH
- if os.path.dirname(alt_link) in bin_paths:
- os.symlink(alt_target, os.path.join(ptest_bindir, alt_name))
-}
-
-do_configure_ptest_base[dirs] = "${B}"
-do_compile_ptest_base[dirs] = "${B}"
-do_install_ptest_base[dirs] = "${B}"
-do_install_ptest_base[cleandirs] = "${D}${PTEST_PATH}"
-
-addtask configure_ptest_base after do_configure before do_compile
-addtask compile_ptest_base after do_compile before do_install
-addtask install_ptest_base after do_install before do_package do_populate_sysroot
-
-python () {
- if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
- d.setVarFlag('do_install_ptest_base', 'fakeroot', '1')
- d.setVarFlag('do_install_ptest_base', 'umask', '022')
-
- # Remove all '*ptest_base' tasks when ptest is not enabled
- if not(d.getVar('PTEST_ENABLED') == "1"):
- for i in ['do_configure_ptest_base', 'do_compile_ptest_base', 'do_install_ptest_base']:
- bb.build.deltask(i, d)
-}
diff --git a/meta/classes/pypi.bbclass b/meta/classes/pypi.bbclass
deleted file mode 100644
index 87b4c85fc0..0000000000
--- a/meta/classes/pypi.bbclass
+++ /dev/null
@@ -1,26 +0,0 @@
-def pypi_package(d):
- bpn = d.getVar('BPN')
- if bpn.startswith('python-'):
- return bpn[7:]
- elif bpn.startswith('python3-'):
- return bpn[8:]
- return bpn
-
-PYPI_PACKAGE ?= "${@pypi_package(d)}"
-PYPI_PACKAGE_EXT ?= "tar.gz"
-
-def pypi_src_uri(d):
- package = d.getVar('PYPI_PACKAGE')
- package_ext = d.getVar('PYPI_PACKAGE_EXT')
- pv = d.getVar('PV')
- return 'https://files.pythonhosted.org/packages/source/%s/%s/%s-%s.%s' % (package[0], package, package, pv, package_ext)
-
-PYPI_SRC_URI ?= "${@pypi_src_uri(d)}"
-
-HOMEPAGE ?= "https://pypi.python.org/pypi/${PYPI_PACKAGE}/"
-SECTION = "devel/python"
-SRC_URI += "${PYPI_SRC_URI}"
-S = "${WORKDIR}/${PYPI_PACKAGE}-${PV}"
-
-UPSTREAM_CHECK_URI ?= "https://pypi.org/project/${PYPI_PACKAGE}/"
-UPSTREAM_CHECK_REGEX ?= "/${PYPI_PACKAGE}/(?P<pver>(\d+[\.\-_]*)+)/"
diff --git a/meta/classes/python3-dir.bbclass b/meta/classes/python3-dir.bbclass
deleted file mode 100644
index 036d7140d9..0000000000
--- a/meta/classes/python3-dir.bbclass
+++ /dev/null
@@ -1,5 +0,0 @@
-PYTHON_BASEVERSION = "3.8"
-PYTHON_ABI = ""
-PYTHON_DIR = "python${PYTHON_BASEVERSION}"
-PYTHON_PN = "python3"
-PYTHON_SITEPACKAGES_DIR = "${libdir}/${PYTHON_DIR}/site-packages"
diff --git a/meta/classes/python3native.bbclass b/meta/classes/python3native.bbclass
deleted file mode 100644
index d98fb4c758..0000000000
--- a/meta/classes/python3native.bbclass
+++ /dev/null
@@ -1,26 +0,0 @@
-inherit python3-dir
-
-PYTHON="${STAGING_BINDIR_NATIVE}/python3-native/python3"
-EXTRANATIVEPATH += "python3-native"
-DEPENDS_append = " python3-native "
-
-# python-config and other scripts are using distutils modules
-# which we patch to access these variables
-export STAGING_INCDIR
-export STAGING_LIBDIR
-
-# Packages can use
-# find_package(PythonInterp REQUIRED)
-# find_package(PythonLibs REQUIRED)
-# which ends up using libs/includes from build host
-# Therefore pre-empt that effort
-export PYTHON_LIBRARY="${STAGING_LIBDIR}/lib${PYTHON_DIR}${PYTHON_ABI}.so"
-export PYTHON_INCLUDE_DIR="${STAGING_INCDIR}/${PYTHON_DIR}${PYTHON_ABI}"
-
-export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
-
-# suppress host user's site-packages dirs.
-export PYTHONNOUSERSITE = "1"
-
-# autoconf macros will use their internal default preference otherwise
-export PYTHON
diff --git a/meta/classes/qemu.bbclass b/meta/classes/qemu.bbclass
deleted file mode 100644
index 55bdff816b..0000000000
--- a/meta/classes/qemu.bbclass
+++ /dev/null
@@ -1,67 +0,0 @@
-#
-# This class contains functions for recipes that need QEMU or test for its
-# existence.
-#
-
-def qemu_target_binary(data):
- package_arch = data.getVar("PACKAGE_ARCH")
- qemu_target_binary = (data.getVar("QEMU_TARGET_BINARY_%s" % package_arch) or "")
- if qemu_target_binary:
- return qemu_target_binary
-
- target_arch = data.getVar("TARGET_ARCH")
- if target_arch in ("i486", "i586", "i686"):
- target_arch = "i386"
- elif target_arch == "powerpc":
- target_arch = "ppc"
- elif target_arch == "powerpc64":
- target_arch = "ppc64"
- elif target_arch == "powerpc64le":
- target_arch = "ppc64le"
-
- return "qemu-" + target_arch
-
-def qemu_wrapper_cmdline(data, rootfs_path, library_paths):
- import string
-
- qemu_binary = qemu_target_binary(data)
- if qemu_binary == "qemu-allarch":
- qemu_binary = "qemuwrapper"
-
- qemu_options = data.getVar("QEMU_OPTIONS")
-
- return "PSEUDO_UNLOAD=1 " + qemu_binary + " " + qemu_options + " -L " + rootfs_path\
- + " -E LD_LIBRARY_PATH=" + ":".join(library_paths) + " "
-
-# Next function will return a string containing the command that is needed to
-# to run a certain binary through qemu. For example, in order to make a certain
-# postinstall scriptlet run at do_rootfs time and running the postinstall is
-# architecture dependent, we can run it through qemu. For example, in the
-# postinstall scriptlet, we could use the following:
-#
-# ${@qemu_run_binary(d, '$D', '/usr/bin/test_app')} [test_app arguments]
-#
-def qemu_run_binary(data, rootfs_path, binary):
- libdir = rootfs_path + data.getVar("libdir", False)
- base_libdir = rootfs_path + data.getVar("base_libdir", False)
-
- return qemu_wrapper_cmdline(data, rootfs_path, [libdir, base_libdir]) + rootfs_path + binary
-
-# QEMU_EXTRAOPTIONS is not meant to be directly used, the extensions are
-# PACKAGE_ARCH, *NOT* overrides.
-# In some cases (e.g. ppc) simply being arch specific (apparently) isn't good
-# enough and a PACKAGE_ARCH specific -cpu option is needed (hence we have to do
-# this dance). For others (e.g. arm) a -cpu option is not necessary, since the
-# qemu-arm default CPU supports all required architecture levels.
-
-QEMU_OPTIONS = "-r ${OLDEST_KERNEL} ${@d.getVar("QEMU_EXTRAOPTIONS_%s" % d.getVar('PACKAGE_ARCH')) or ""}"
-QEMU_OPTIONS[vardeps] += "QEMU_EXTRAOPTIONS_${PACKAGE_ARCH}"
-
-QEMU_EXTRAOPTIONS_ppce500v2 = " -cpu e500v2"
-QEMU_EXTRAOPTIONS_ppce500mc = " -cpu e500mc"
-QEMU_EXTRAOPTIONS_ppce5500 = " -cpu e500mc"
-QEMU_EXTRAOPTIONS_ppc64e5500 = " -cpu e500mc"
-QEMU_EXTRAOPTIONS_ppce6500 = " -cpu e500mc"
-QEMU_EXTRAOPTIONS_ppc64e6500 = " -cpu e500mc"
-QEMU_EXTRAOPTIONS_ppc7400 = " -cpu 7400"
-QEMU_EXTRAOPTIONS_powerpc64le = " -cpu POWER8"
diff --git a/meta/classes/qemuboot.bbclass b/meta/classes/qemuboot.bbclass
deleted file mode 100644
index 4162c4e790..0000000000
--- a/meta/classes/qemuboot.bbclass
+++ /dev/null
@@ -1,142 +0,0 @@
-# Help runqemu boot target board, "QB" means Qemu Boot, the following
-# vars can be set in conf files, such as <bsp.conf> to make it can be
-# boot by runqemu:
-#
-# QB_SYSTEM_NAME: qemu name, e.g., "qemu-system-i386"
-#
-# QB_OPT_APPEND: options to append to qemu, e.g., "-device usb-mouse"
-#
-# QB_DEFAULT_KERNEL: default kernel to boot, e.g., "bzImage"
-#
-# QB_DEFAULT_FSTYPE: default FSTYPE to boot, e.g., "ext4"
-#
-# QB_MEM: memory, e.g., "-m 512"
-#
-# QB_MACHINE: qemu machine, e.g., "-machine virt"
-#
-# QB_CPU: qemu cpu, e.g., "-cpu qemu32"
-#
-# QB_CPU_KVM: the similar to QB_CPU, but used when kvm, e.g., '-cpu kvm64',
-# set it when support kvm.
-#
-# QB_KERNEL_CMDLINE_APPEND: options to append to kernel's -append
-# option, e.g., "console=ttyS0 console=tty"
-#
-# QB_DTB: qemu dtb name
-#
-# QB_AUDIO_DRV: qemu audio driver, e.g., "alsa", set it when support audio
-#
-# QB_AUDIO_OPT: qemu audio option, e.g., "-soundhw ac97,es1370", used
-# when QB_AUDIO_DRV is set.
-#
-# QB_KERNEL_ROOT: kernel's root, e.g., /dev/vda
-#
-# QB_NETWORK_DEVICE: network device, e.g., "-device virtio-net-pci,netdev=net0,mac=@MAC@",
-# it needs work with QB_TAP_OPT and QB_SLIRP_OPT.
-# Note, runqemu will replace @MAC@ with a predefined mac, you can set
-# a custom one, but that may cause conflicts when multiple qemus are
-# running on the same host.
-# Note: If more than one interface of type -device virtio-net-device gets added,
-# QB_NETWORK_DEVICE_prepend might be used, since Qemu enumerates the eth*
-# devices in reverse order to -device arguments.
-#
-# QB_TAP_OPT: netowrk option for 'tap' mode, e.g.,
-# "-netdev tap,id=net0,ifname=@TAP@,script=no,downscript=no"
-# Note, runqemu will replace "@TAP@" with the one which is used, such as tap0, tap1 ...
-#
-# QB_SLIRP_OPT: network option for SLIRP mode, e.g., -netdev user,id=net0"
-#
-# QB_CMDLINE_IP_SLIRP: If QB_NETWORK_DEVICE adds more than one network interface to qemu, usually the
-# ip= kernel comand line argument needs to be changed accordingly. Details are documented
-# in the kernel docuemntation https://www.kernel.org/doc/Documentation/filesystems/nfs/nfsroot.txt
-# Example to configure only the first interface: "ip=eth0:dhcp"
-# QB_CMDLINE_IP_TAP: This parameter is similar to the QB_CMDLINE_IP_SLIRP parameter. Since the tap interface requires
-# static IP configuration @CLIENT@ and @GATEWAY@ place holders are replaced by the IP and the gateway
-# address of the qemu guest by runqemu.
-# Example: "ip=192.168.7.@CLIENT@::192.168.7.@GATEWAY@:255.255.255.0::eth0"
-#
-# QB_ROOTFS_OPT: used as rootfs, e.g.,
-# "-drive id=disk0,file=@ROOTFS@,if=none,format=raw -device virtio-blk-device,drive=disk0"
-# Note, runqemu will replace "@ROOTFS@" with the one which is used, such as core-image-minimal-qemuarm64.ext4.
-#
-# QB_SERIAL_OPT: serial port, e.g., "-serial mon:stdio"
-#
-# QB_TCPSERIAL_OPT: tcp serial port option, e.g.,
-# " -device virtio-serial-device -chardev socket,id=virtcon,port=@PORT@,host=127.0.0.1 -device virtconsole,chardev=virtcon"
-# Note, runqemu will replace "@PORT@" with the port number which is used.
-#
-# QB_ROOTFS_EXTRA_OPT: extra options to be appended to the rootfs device in case there is none specified by QB_ROOTFS_OPT.
-# Can be used to automatically determine the image from the other variables
-# but define things link 'bootindex' when booting from EFI or 'readonly' when using squashfs
-# without the need to specify a dedicated qemu configuration
-# Usage:
-# IMAGE_CLASSES += "qemuboot"
-# See "runqemu help" for more info
-
-QB_MEM ?= "-m 256"
-QB_SERIAL_OPT ?= "-serial mon:stdio -serial null"
-QB_DEFAULT_KERNEL ?= "${KERNEL_IMAGETYPE}"
-QB_DEFAULT_FSTYPE ?= "ext4"
-QB_OPT_APPEND ?= ""
-QB_NETWORK_DEVICE ?= "-device virtio-net-pci,netdev=net0,mac=@MAC@"
-QB_CMDLINE_IP_SLIRP ?= "ip=dhcp"
-QB_CMDLINE_IP_TAP ?= "ip=192.168.7.@CLIENT@::192.168.7.@GATEWAY@:255.255.255.0"
-QB_ROOTFS_EXTRA_OPT ?= ""
-
-# This should be kept align with ROOT_VM
-QB_DRIVE_TYPE ?= "/dev/sd"
-
-# Create qemuboot.conf
-addtask do_write_qemuboot_conf after do_rootfs before do_image
-
-def qemuboot_vars(d):
- build_vars = ['MACHINE', 'TUNE_ARCH', 'DEPLOY_DIR_IMAGE',
- 'KERNEL_IMAGETYPE', 'IMAGE_NAME', 'IMAGE_LINK_NAME',
- 'STAGING_DIR_NATIVE', 'STAGING_BINDIR_NATIVE',
- 'STAGING_DIR_HOST']
- return build_vars + [k for k in d.keys() if k.startswith('QB_')]
-
-do_write_qemuboot_conf[vardeps] += "${@' '.join(qemuboot_vars(d))}"
-do_write_qemuboot_conf[vardepsexclude] += "TOPDIR"
-python do_write_qemuboot_conf() {
- import configparser
-
- qemuboot = "%s/%s.qemuboot.conf" % (d.getVar('IMGDEPLOYDIR'), d.getVar('IMAGE_NAME'))
- qemuboot_link = "%s/%s.qemuboot.conf" % (d.getVar('IMGDEPLOYDIR'), d.getVar('IMAGE_LINK_NAME'))
- finalpath = d.getVar("DEPLOY_DIR_IMAGE")
- topdir = d.getVar('TOPDIR')
- cf = configparser.ConfigParser()
- cf.add_section('config_bsp')
- for k in sorted(qemuboot_vars(d)):
- # qemu-helper-native sysroot is not removed by rm_work and
- # contains all tools required by runqemu
- if k == 'STAGING_BINDIR_NATIVE':
- val = os.path.join(d.getVar('BASE_WORKDIR'), d.getVar('BUILD_SYS'),
- 'qemu-helper-native/1.0-r1/recipe-sysroot-native/usr/bin/')
- else:
- val = d.getVar(k)
- # we only want to write out relative paths so that we can relocate images
- # and still run them
- if val.startswith(topdir):
- val = os.path.relpath(val, finalpath)
- cf.set('config_bsp', k, '%s' % val)
-
- # QB_DEFAULT_KERNEL's value of KERNEL_IMAGETYPE is the name of a symlink
- # to the kernel file, which hinders relocatability of the qb conf.
- # Read the link and replace it with the full filename of the target.
- kernel_link = os.path.join(d.getVar('DEPLOY_DIR_IMAGE'), d.getVar('QB_DEFAULT_KERNEL'))
- kernel = os.path.realpath(kernel_link)
- # we only want to write out relative paths so that we can relocate images
- # and still run them
- kernel = os.path.relpath(kernel, finalpath)
- cf.set('config_bsp', 'QB_DEFAULT_KERNEL', kernel)
-
- bb.utils.mkdirhier(os.path.dirname(qemuboot))
- with open(qemuboot, 'w') as f:
- cf.write(f)
-
- if qemuboot_link != qemuboot:
- if os.path.lexists(qemuboot_link):
- os.remove(qemuboot_link)
- os.symlink(os.path.basename(qemuboot), qemuboot_link)
-}
diff --git a/meta/classes/recipe_sanity.bbclass b/meta/classes/recipe_sanity.bbclass
index 7fa4a849ea..a5cc4315fb 100644
--- a/meta/classes/recipe_sanity.bbclass
+++ b/meta/classes/recipe_sanity.bbclass
@@ -1,3 +1,9 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
def __note(msg, d):
bb.note("%s: recipe_sanity: %s" % (d.getVar("P"), msg))
@@ -10,7 +16,7 @@ def bad_runtime_vars(cfgdata, d):
for var in d.getVar("__recipe_sanity_badruntimevars").split():
val = d.getVar(var, False)
if val and val != cfgdata.get(var):
- __note("%s should be %s_${PN}" % (var, var), d)
+ __note("%s should be %s:${PN}" % (var, var), d)
__recipe_sanity_reqvars = "DESCRIPTION"
__recipe_sanity_reqdiffvars = ""
diff --git a/meta/classes/relative_symlinks.bbclass b/meta/classes/relative_symlinks.bbclass
index 3157737347..9ee20e0d09 100644
--- a/meta/classes/relative_symlinks.bbclass
+++ b/meta/classes/relative_symlinks.bbclass
@@ -1,3 +1,9 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
do_install[postfuncs] += "install_relative_symlinks"
python install_relative_symlinks () {
diff --git a/meta/classes/relocatable.bbclass b/meta/classes/relocatable.bbclass
index af04be5cca..d0a623fb0a 100644
--- a/meta/classes/relocatable.bbclass
+++ b/meta/classes/relocatable.bbclass
@@ -1,3 +1,9 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
inherit chrpath
SYSROOT_PREPROCESS_FUNCS += "relocatable_binaries_preprocess relocatable_native_pcfiles"
diff --git a/meta/classes/remove-libtool.bbclass b/meta/classes/remove-libtool.bbclass
index 3fd0cd58f9..8e987388c8 100644
--- a/meta/classes/remove-libtool.bbclass
+++ b/meta/classes/remove-libtool.bbclass
@@ -1,3 +1,9 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
# This class removes libtool .la files after do_install
REMOVE_LIBTOOL_LA ?= "1"
diff --git a/meta/classes/report-error.bbclass b/meta/classes/report-error.bbclass
index 1a12db1206..1452513a66 100644
--- a/meta/classes/report-error.bbclass
+++ b/meta/classes/report-error.bbclass
@@ -4,7 +4,8 @@
# Copyright (C) 2013 Intel Corporation
# Author: Andreea Brandusa Proca <andreea.b.proca@intel.com>
#
-# Licensed under the MIT license, see COPYING.MIT for details
+# SPDX-License-Identifier: MIT
+#
ERR_REPORT_DIR ?= "${LOG_DIR}/error-report"
@@ -38,6 +39,19 @@ def get_conf_data(e, filename):
jsonstring=jsonstring + line
return jsonstring
+def get_common_data(e):
+ data = {}
+ data['machine'] = e.data.getVar("MACHINE")
+ data['build_sys'] = e.data.getVar("BUILD_SYS")
+ data['distro'] = e.data.getVar("DISTRO")
+ data['target_sys'] = e.data.getVar("TARGET_SYS")
+ data['branch_commit'] = str(oe.buildcfg.detect_branch(e.data)) + ": " + str(oe.buildcfg.detect_revision(e.data))
+ data['bitbake_version'] = e.data.getVar("BB_VERSION")
+ data['layer_version'] = get_layers_branch_rev(e.data)
+ data['local_conf'] = get_conf_data(e, 'local.conf')
+ data['auto_conf'] = get_conf_data(e, 'auto.conf')
+ return data
+
python errorreport_handler () {
import json
import codecs
@@ -55,17 +69,10 @@ python errorreport_handler () {
if isinstance(e, bb.event.BuildStarted):
bb.utils.mkdirhier(logpath)
data = {}
- machine = e.data.getVar("MACHINE")
- data['machine'] = machine
- data['build_sys'] = e.data.getVar("BUILD_SYS")
+ data = get_common_data(e)
data['nativelsb'] = nativelsb()
- data['distro'] = e.data.getVar("DISTRO")
- data['target_sys'] = e.data.getVar("TARGET_SYS")
data['failures'] = []
data['component'] = " ".join(e.getPkgs())
- data['branch_commit'] = str(base_detect_branch(e.data)) + ": " + str(base_detect_revision(e.data))
- data['local_conf'] = get_conf_data(e, 'local.conf')
- data['auto_conf'] = get_conf_data(e, 'auto.conf')
lock = bb.utils.lockfile(datafile + '.lock')
errorreport_savedata(e, data, "error-report.txt")
bb.utils.unlockfile(lock)
@@ -104,6 +111,37 @@ python errorreport_handler () {
errorreport_savedata(e, jsondata, "error-report.txt")
bb.utils.unlockfile(lock)
+ elif isinstance(e, bb.event.NoProvider):
+ bb.utils.mkdirhier(logpath)
+ data = {}
+ data = get_common_data(e)
+ data['nativelsb'] = nativelsb()
+ data['failures'] = []
+ data['component'] = str(e._item)
+ taskdata={}
+ taskdata['log'] = str(e)
+ taskdata['package'] = str(e._item)
+ taskdata['task'] = "Nothing provides " + "'" + str(e._item) + "'"
+ data['failures'].append(taskdata)
+ lock = bb.utils.lockfile(datafile + '.lock')
+ errorreport_savedata(e, data, "error-report.txt")
+ bb.utils.unlockfile(lock)
+
+ elif isinstance(e, bb.event.ParseError):
+ bb.utils.mkdirhier(logpath)
+ data = {}
+ data = get_common_data(e)
+ data['nativelsb'] = nativelsb()
+ data['failures'] = []
+ data['component'] = "parse"
+ taskdata={}
+ taskdata['log'] = str(e._msg)
+ taskdata['task'] = str(e._msg)
+ data['failures'].append(taskdata)
+ lock = bb.utils.lockfile(datafile + '.lock')
+ errorreport_savedata(e, data, "error-report.txt")
+ bb.utils.unlockfile(lock)
+
elif isinstance(e, bb.event.BuildCompleted):
lock = bb.utils.lockfile(datafile + '.lock')
jsondata = json.loads(errorreport_getdata(e))
@@ -117,4 +155,4 @@ python errorreport_handler () {
}
addhandler errorreport_handler
-errorreport_handler[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted bb.build.TaskFailed"
+errorreport_handler[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted bb.build.TaskFailed bb.event.NoProvider bb.event.ParseError"
diff --git a/meta/classes/reproducible_build.bbclass b/meta/classes/reproducible_build.bbclass
deleted file mode 100644
index 8da40f656a..0000000000
--- a/meta/classes/reproducible_build.bbclass
+++ /dev/null
@@ -1,202 +0,0 @@
-# reproducible_build.bbclass
-#
-# Sets SOURCE_DATE_EPOCH in each component's build environment.
-# Upstream components (generally) respect this environment variable,
-# using it in place of the "current" date and time.
-# See https://reproducible-builds.org/specs/source-date-epoch/
-#
-# After sources are unpacked but before they are patched, we set a reproducible value for SOURCE_DATE_EPOCH.
-# This value should be reproducible for anyone who builds the same revision from the same sources.
-#
-# There are 4 ways we determine SOURCE_DATE_EPOCH:
-#
-# 1. Use the value from __source_date_epoch.txt file if this file exists.
-# This file was most likely created in the previous build by one of the following methods 2,3,4.
-# Alternatively, it can be provided by a recipe via SRC_URI.
-#
-# If the file does not exist:
-#
-# 2. If there is a git checkout, use the last git commit timestamp.
-# Git does not preserve file timestamps on checkout.
-#
-# 3. Use the mtime of "known" files such as NEWS, CHANGLELOG, ...
-# This works for well-kept repositories distributed via tarball.
-#
-# 4. Use the modification time of the youngest file in the source tree, if there is one.
-# This will be the newest file from the distribution tarball, if any.
-#
-# 5. Fall back to a fixed timestamp.
-#
-# Once the value of SOURCE_DATE_EPOCH is determined, it is stored in the recipe's SDE_FILE.
-# If none of these mechanisms are suitable, replace the do_deploy_source_date_epoch task
-# with recipe-specific functionality to write the appropriate SOURCE_DATE_EPOCH into the SDE_FILE.
-#
-# If this file is found by other tasks, the value is exported in the SOURCE_DATE_EPOCH variable.
-# SOURCE_DATE_EPOCH is set for all tasks that might use it (do_configure, do_compile, do_package, ...)
-
-BUILD_REPRODUCIBLE_BINARIES ??= '1'
-inherit ${@oe.utils.ifelse(d.getVar('BUILD_REPRODUCIBLE_BINARIES') == '1', 'reproducible_build_simple', '')}
-
-SDE_DIR ="${WORKDIR}/source-date-epoch"
-SDE_FILE = "${SDE_DIR}/__source_date_epoch.txt"
-SDE_DEPLOYDIR = "${WORKDIR}/deploy-source-date-epoch"
-
-SSTATETASKS += "do_deploy_source_date_epoch"
-
-do_deploy_source_date_epoch () {
- mkdir -p ${SDE_DEPLOYDIR}
- if [ -e ${SDE_FILE} ]; then
- echo "Deploying SDE from ${SDE_FILE} -> ${SDE_DEPLOYDIR}."
- cp -p ${SDE_FILE} ${SDE_DEPLOYDIR}/__source_date_epoch.txt
- else
- echo "${SDE_FILE} not found!"
- fi
-}
-
-python do_deploy_source_date_epoch_setscene () {
- sstate_setscene(d)
- bb.utils.mkdirhier(d.getVar('SDE_DIR'))
- sde_file = os.path.join(d.getVar('SDE_DEPLOYDIR'), '__source_date_epoch.txt')
- if os.path.exists(sde_file):
- target = d.getVar('SDE_FILE')
- bb.debug(1, "Moving setscene SDE file %s -> %s" % (sde_file, target))
- os.rename(sde_file, target)
- else:
- bb.debug(1, "%s not found!" % sde_file)
-}
-
-do_deploy_source_date_epoch[dirs] = "${SDE_DEPLOYDIR}"
-do_deploy_source_date_epoch[sstate-plaindirs] = "${SDE_DEPLOYDIR}"
-addtask do_deploy_source_date_epoch_setscene
-addtask do_deploy_source_date_epoch before do_configure after do_patch
-
-def get_source_date_epoch_from_known_files(d, sourcedir):
- source_date_epoch = None
- newest_file = None
- known_files = set(["NEWS", "ChangeLog", "Changelog", "CHANGES"])
- for file in known_files:
- filepath = os.path.join(sourcedir, file)
- if os.path.isfile(filepath):
- mtime = int(os.lstat(filepath).st_mtime)
- # There may be more than one "known_file" present, if so, use the youngest one
- if not source_date_epoch or mtime > source_date_epoch:
- source_date_epoch = mtime
- newest_file = filepath
- if newest_file:
- bb.debug(1, "SOURCE_DATE_EPOCH taken from: %s" % newest_file)
- return source_date_epoch
-
-def find_git_folder(d, sourcedir):
- # First guess: WORKDIR/git
- # This is the default git fetcher unpack path
- workdir = d.getVar('WORKDIR')
- gitpath = os.path.join(workdir, "git/.git")
- if os.path.isdir(gitpath):
- return gitpath
-
- # Second guess: ${S}
- gitpath = os.path.join(sourcedir, ".git")
- if os.path.isdir(gitpath):
- return gitpath
-
- # Perhaps there was a subpath or destsuffix specified.
- # Go looking in the WORKDIR
- exclude = set(["build", "image", "license-destdir", "patches", "pseudo",
- "recipe-sysroot", "recipe-sysroot-native", "sysroot-destdir", "temp"])
- for root, dirs, files in os.walk(workdir, topdown=True):
- dirs[:] = [d for d in dirs if d not in exclude]
- if '.git' in dirs:
- return root
-
- bb.warn("Failed to find a git repository in WORKDIR: %s" % workdir)
- return None
-
-def get_source_date_epoch_from_git(d, sourcedir):
- source_date_epoch = None
- if "git://" in d.getVar('SRC_URI'):
- gitpath = find_git_folder(d, sourcedir)
- if gitpath:
- import subprocess
- source_date_epoch = int(subprocess.check_output(['git','log','-1','--pretty=%ct'], cwd=gitpath))
- bb.debug(1, "git repository: %s" % gitpath)
- return source_date_epoch
-
-def get_source_date_epoch_from_youngest_file(d, sourcedir):
- if sourcedir == d.getVar('WORKDIR'):
- # These sources are almost certainly not from a tarball
- return None
-
- # Do it the hard way: check all files and find the youngest one...
- source_date_epoch = None
- newest_file = None
- for root, dirs, files in os.walk(sourcedir, topdown=True):
- files = [f for f in files if not f[0] == '.']
-
- for fname in files:
- filename = os.path.join(root, fname)
- try:
- mtime = int(os.lstat(filename).st_mtime)
- except ValueError:
- mtime = 0
- if not source_date_epoch or mtime > source_date_epoch:
- source_date_epoch = mtime
- newest_file = filename
-
- if newest_file:
- bb.debug(1, "Newest file found: %s" % newest_file)
- return source_date_epoch
-
-def fixed_source_date_epoch():
- bb.debug(1, "No tarball or git repo found to determine SOURCE_DATE_EPOCH")
- return 0
-
-python create_source_date_epoch_stamp() {
- epochfile = d.getVar('SDE_FILE')
- # If it exists we need to regenerate as the sources may have changed
- if os.path.isfile(epochfile):
- bb.debug(1, "Deleting existing SOURCE_DATE_EPOCH from: %s" % epochfile)
- os.remove(epochfile)
-
- sourcedir = d.getVar('S')
- source_date_epoch = (
- get_source_date_epoch_from_git(d, sourcedir) or
- get_source_date_epoch_from_known_files(d, sourcedir) or
- get_source_date_epoch_from_youngest_file(d, sourcedir) or
- fixed_source_date_epoch() # Last resort
- )
-
- bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch)
- bb.utils.mkdirhier(d.getVar('SDE_DIR'))
- with open(epochfile, 'w') as f:
- f.write(str(source_date_epoch))
-}
-
-def get_source_date_epoch_value(d):
- cached = d.getVar('__CACHED_SOURCE_DATE_EPOCH')
- if cached:
- return cached
-
- epochfile = d.getVar('SDE_FILE')
- source_date_epoch = 0
- if os.path.isfile(epochfile):
- with open(epochfile, 'r') as f:
- s = f.read()
- try:
- source_date_epoch = int(s)
- except ValueError:
- bb.warn("SOURCE_DATE_EPOCH value '%s' is invalid. Reverting to 0" % s)
- source_date_epoch = 0
- bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch)
- else:
- bb.debug(1, "Cannot find %s. SOURCE_DATE_EPOCH will default to %d" % (epochfile, source_date_epoch))
-
- d.setVar('__CACHED_SOURCE_DATE_EPOCH', str(source_date_epoch))
- return str(source_date_epoch)
-
-export SOURCE_DATE_EPOCH ?= "${@get_source_date_epoch_value(d)}"
-BB_HASHBASE_WHITELIST += "SOURCE_DATE_EPOCH"
-
-python () {
- if d.getVar('BUILD_REPRODUCIBLE_BINARIES') == '1':
- d.appendVarFlag("do_unpack", "postfuncs", " create_source_date_epoch_stamp")
-}
diff --git a/meta/classes/reproducible_build_simple.bbclass b/meta/classes/reproducible_build_simple.bbclass
deleted file mode 100644
index 393372993d..0000000000
--- a/meta/classes/reproducible_build_simple.bbclass
+++ /dev/null
@@ -1,9 +0,0 @@
-# Setup default environment for reproducible builds.
-
-BUILD_REPRODUCIBLE_BINARIES = "1"
-
-export PYTHONHASHSEED = "0"
-export PERL_HASH_SEED = "0"
-export SOURCE_DATE_EPOCH ??= "1520598896"
-
-REPRODUCIBLE_TIMESTAMP_ROOTFS ??= "1520598896"
diff --git a/meta/classes/rm_work.bbclass b/meta/classes/rm_work.bbclass
index 01c2ab1c78..52ecfafb72 100644
--- a/meta/classes/rm_work.bbclass
+++ b/meta/classes/rm_work.bbclass
@@ -1,4 +1,10 @@
#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+#
# Removes source after build
#
# To use it add that line to conf/local.conf:
@@ -13,7 +19,7 @@
# Recipes can also configure which entries in their ${WORKDIR}
# are preserved besides temp, which already gets excluded by default
# because it contains logs:
-# do_install_append () {
+# do_install:append () {
# echo "bar" >${WORKDIR}/foo
# }
# RM_WORK_EXCLUDE_ITEMS += "foo"
@@ -24,9 +30,16 @@ RM_WORK_EXCLUDE_ITEMS = "temp"
BB_SCHEDULER ?= "completion"
# Run the rm_work task in the idle scheduling class
-BB_TASK_IONICE_LEVEL_task-rm_work = "3.0"
+BB_TASK_IONICE_LEVEL:task-rm_work = "3.0"
do_rm_work () {
+ # Force using the HOSTTOOLS 'rm' - otherwise the SYSROOT_NATIVE 'rm' can be selected depending on PATH
+ # Avoids race-condition accessing 'rm' when deleting WORKDIR folders at the end of this function
+ RM_BIN="$(PATH=${HOSTTOOLS_DIR} command -v rm)"
+ if [ -z "${RM_BIN}" ]; then
+ bbfatal "Binary 'rm' not found in HOSTTOOLS_DIR, cannot remove WORKDIR data."
+ fi
+
# If the recipe name is in the RM_WORK_EXCLUDE, skip the recipe.
for p in ${RM_WORK_EXCLUDE}; do
if [ "$p" = "${PN}" ]; then
@@ -44,55 +57,58 @@ do_rm_work () {
# Change normal stamps into setscene stamps as they better reflect the
# fact WORKDIR is now empty
# Also leave noexec stamps since setscene stamps don't cover them
- cd `dirname ${STAMP}`
- for i in `basename ${STAMP}`*
- do
- case $i in
- *sigdata*|*sigbasedata*)
- # Save/skip anything that looks like a signature data file.
- ;;
- *do_image_complete_setscene*|*do_image_qa_setscene*)
- # Ensure we don't 'stack' setscene extensions to these stamps with the sections below
- ;;
- *do_image_complete*)
- # Promote do_image_complete stamps to setscene versions (ahead of *do_image* below)
- mv $i `echo $i | sed -e "s#do_image_complete#do_image_complete_setscene#"`
- ;;
- *do_image_qa*)
- # Promote do_image_qa stamps to setscene versions (ahead of *do_image* below)
- mv $i `echo $i | sed -e "s#do_image_qa#do_image_qa_setscene#"`
- ;;
- *do_package_write*|*do_rootfs*|*do_image*|*do_bootimg*|*do_write_qemuboot_conf*|*do_build*)
- ;;
- *do_addto_recipe_sysroot*)
- # Preserve recipe-sysroot-native if do_addto_recipe_sysroot has been used
- excludes="$excludes recipe-sysroot-native"
- ;;
- *do_package|*do_package.*|*do_package_setscene.*)
- # We remove do_package entirely, including any
- # sstate version since otherwise we'd need to leave 'plaindirs' around
- # such as 'packages' and 'packages-split' and these can be large. No end
- # of chain tasks depend directly on do_package anymore.
- rm -f $i;
- ;;
- *_setscene*)
- # Skip stamps which are already setscene versions
- ;;
- *)
- # For everything else: if suitable, promote the stamp to a setscene
- # version, otherwise remove it
- for j in ${SSTATETASKS} do_shared_workdir
- do
- case $i in
- *$j|*$j.*)
- mv $i `echo $i | sed -e "s#${j}#${j}_setscene#"`
- break
- ;;
- esac
- done
- rm -f $i
- esac
- done
+ STAMPDIR=`dirname ${STAMP}`
+ if test -d $STAMPDIR; then
+ cd $STAMPDIR
+ for i in `basename ${STAMP}`*
+ do
+ case $i in
+ *sigdata*|*sigbasedata*)
+ # Save/skip anything that looks like a signature data file.
+ ;;
+ *do_image_complete_setscene*|*do_image_qa_setscene*)
+ # Ensure we don't 'stack' setscene extensions to these stamps with the sections below
+ ;;
+ *do_image_complete*)
+ # Promote do_image_complete stamps to setscene versions (ahead of *do_image* below)
+ mv $i `echo $i | sed -e "s#do_image_complete#do_image_complete_setscene#"`
+ ;;
+ *do_image_qa*)
+ # Promote do_image_qa stamps to setscene versions (ahead of *do_image* below)
+ mv $i `echo $i | sed -e "s#do_image_qa#do_image_qa_setscene#"`
+ ;;
+ *do_package_write*|*do_rootfs*|*do_image*|*do_bootimg*|*do_write_qemuboot_conf*|*do_build*)
+ ;;
+ *do_addto_recipe_sysroot*)
+ # Preserve recipe-sysroot-native if do_addto_recipe_sysroot has been used
+ excludes="$excludes recipe-sysroot-native"
+ ;;
+ *do_package|*do_package.*|*do_package_setscene.*)
+ # We remove do_package entirely, including any
+ # sstate version since otherwise we'd need to leave 'plaindirs' around
+ # such as 'packages' and 'packages-split' and these can be large. No end
+ # of chain tasks depend directly on do_package anymore.
+ "${RM_BIN}" -f -- $i;
+ ;;
+ *_setscene*)
+ # Skip stamps which are already setscene versions
+ ;;
+ *)
+ # For everything else: if suitable, promote the stamp to a setscene
+ # version, otherwise remove it
+ for j in ${SSTATETASKS} do_shared_workdir
+ do
+ case $i in
+ *$j|*$j.*)
+ mv $i `echo $i | sed -e "s#${j}#${j}_setscene#"`
+ break
+ ;;
+ esac
+ done
+ "${RM_BIN}" -f -- $i
+ esac
+ done
+ fi
cd ${WORKDIR}
for dir in *
@@ -100,12 +116,14 @@ do_rm_work () {
# Retain only logs and other files in temp, safely ignore
# failures of removing pseudo folers on NFS2/3 server.
if [ $dir = 'pseudo' ]; then
- rm -rf $dir 2> /dev/null || true
+ "${RM_BIN}" -rf -- $dir 2> /dev/null || true
elif ! echo "$excludes" | grep -q -w "$dir"; then
- rm -rf $dir
+ "${RM_BIN}" -rf -- $dir
fi
done
}
+do_rm_work[vardepsexclude] += "SSTATETASKS"
+
do_rm_work_all () {
:
}
@@ -172,7 +190,7 @@ python inject_rm_work() {
# other recipes and thus will typically run much later than completion of
# work in the recipe itself.
# In practice, addtask() here merely updates the dependencies.
- bb.build.addtask('do_rm_work', 'do_build', ' '.join(deps), d)
+ bb.build.addtask('do_rm_work', 'do_rm_work_all do_build', ' '.join(deps), d)
# Always update do_build_without_rm_work dependencies.
bb.build.addtask('do_build_without_rm_work', '', ' '.join(deps), d)
diff --git a/meta/classes/rm_work_and_downloads.bbclass b/meta/classes/rm_work_and_downloads.bbclass
index 7c00bea597..2695a3807f 100644
--- a/meta/classes/rm_work_and_downloads.bbclass
+++ b/meta/classes/rm_work_and_downloads.bbclass
@@ -1,8 +1,7 @@
# Author: Patrick Ohly <patrick.ohly@intel.com>
# Copyright: Copyright (C) 2015 Intel Corporation
#
-# This file is licensed under the MIT license, see COPYING.MIT in
-# this source distribution for the terms.
+# SPDX-License-Identifier: MIT
# This class is used like rm_work:
# INHERIT += "rm_work_and_downloads"
@@ -28,6 +27,6 @@ inherit rm_work
# Instead go up one level and remove ourself.
DL_DIR = "${BASE_WORKDIR}/${MULTIMACH_TARGET_SYS}/${PN}/downloads"
-do_rm_work_append () {
+do_rm_work:append () {
rm -rf ${DL_DIR}
}
diff --git a/meta/classes/rootfs-postcommands.bbclass b/meta/classes/rootfs-postcommands.bbclass
deleted file mode 100644
index c43b9a9823..0000000000
--- a/meta/classes/rootfs-postcommands.bbclass
+++ /dev/null
@@ -1,373 +0,0 @@
-
-# Zap the root password if debug-tweaks feature is not enabled
-ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'empty-root-password' ], "", "zap_empty_root_password ; ",d)}'
-
-# Allow dropbear/openssh to accept logins from accounts with an empty password string if debug-tweaks or allow-empty-password is enabled
-ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'allow-empty-password' ], "ssh_allow_empty_password; ", "",d)}'
-
-# Allow dropbear/openssh to accept root logins if debug-tweaks or allow-root-login is enabled
-ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'allow-root-login' ], "ssh_allow_root_login; ", "",d)}'
-
-# Enable postinst logging if debug-tweaks is enabled
-ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'post-install-logging' ], "postinst_enable_logging; ", "",d)}'
-
-# Create /etc/timestamp during image construction to give a reasonably sane default time setting
-ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp ; "
-
-# Tweak the mount options for rootfs in /etc/fstab if read-only-rootfs is enabled
-ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", "read_only_rootfs_hook; ", "",d)}'
-
-# We also need to do the same for the kernel boot parameters,
-# otherwise kernel or initramfs end up mounting the rootfs read/write
-# (the default) if supported by the underlying storage.
-#
-# We do this with _append because the default value might get set later with ?=
-# and we don't want to disable such a default that by setting a value here.
-APPEND_append = '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", " ro", "", d)}'
-
-# Generates test data file with data store variables expanded in json format
-ROOTFS_POSTPROCESS_COMMAND += "write_image_test_data ; "
-
-# Write manifest
-IMAGE_MANIFEST = "${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.manifest"
-ROOTFS_POSTUNINSTALL_COMMAND =+ "write_image_manifest ; "
-# Set default postinst log file
-POSTINST_LOGFILE ?= "${localstatedir}/log/postinstall.log"
-# Set default target for systemd images
-SYSTEMD_DEFAULT_TARGET ?= '${@bb.utils.contains("IMAGE_FEATURES", "x11-base", "graphical.target", "multi-user.target", d)}'
-ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("DISTRO_FEATURES", "systemd", "set_systemd_default_target; systemd_create_users;", "", d)}'
-
-ROOTFS_POSTPROCESS_COMMAND += 'empty_var_volatile;'
-
-# Sort the user and group entries in /etc by ID in order to make the content
-# deterministic. Package installs are not deterministic, causing the ordering
-# of entries to change between builds. In case that this isn't desired,
-# the command can be overridden.
-#
-# Note that useradd-staticids.bbclass has to be used to ensure that
-# the numeric IDs of dynamically created entries remain stable.
-#
-# We want this to run as late as possible, in particular after
-# systemd_sysusers_create and set_user_group. Using _append is not
-# enough for that, set_user_group is added that way and would end
-# up running after us.
-SORT_PASSWD_POSTPROCESS_COMMAND ??= " sort_passwd; "
-python () {
- d.appendVar('ROOTFS_POSTPROCESS_COMMAND', '${SORT_PASSWD_POSTPROCESS_COMMAND}')
- d.appendVar('ROOTFS_POSTPROCESS_COMMAND', 'rootfs_reproducible;')
-}
-
-systemd_create_users () {
- for conffile in ${IMAGE_ROOTFS}/usr/lib/sysusers.d/systemd.conf ${IMAGE_ROOTFS}/usr/lib/sysusers.d/systemd-remote.conf; do
- [ -e $conffile ] || continue
- grep -v "^#" $conffile | sed -e '/^$/d' | while read type name id comment; do
- if [ "$type" = "u" ]; then
- useradd_params="--shell /sbin/nologin"
- [ "$id" != "-" ] && useradd_params="$useradd_params --uid $id"
- [ "$comment" != "-" ] && useradd_params="$useradd_params --comment $comment"
- useradd_params="$useradd_params --system $name"
- eval useradd --root ${IMAGE_ROOTFS} $useradd_params || true
- elif [ "$type" = "g" ]; then
- groupadd_params=""
- [ "$id" != "-" ] && groupadd_params="$groupadd_params --gid $id"
- groupadd_params="$groupadd_params --system $name"
- eval groupadd --root ${IMAGE_ROOTFS} $groupadd_params || true
- elif [ "$type" = "m" ]; then
- group=$id
- if [ ! `grep -q "^${group}:" ${IMAGE_ROOTFS}${sysconfdir}/group` ]; then
- eval groupadd --root ${IMAGE_ROOTFS} --system $group
- fi
- if [ ! `grep -q "^${name}:" ${IMAGE_ROOTFS}${sysconfdir}/passwd` ]; then
- eval useradd --root ${IMAGE_ROOTFS} --shell /sbin/nologin --system $name
- fi
- eval usermod --root ${IMAGE_ROOTFS} -a -G $group $name
- fi
- done
- done
-}
-
-#
-# A hook function to support read-only-rootfs IMAGE_FEATURES
-#
-read_only_rootfs_hook () {
- # Tweak the mount option and fs_passno for rootfs in fstab
- if [ -f ${IMAGE_ROOTFS}/etc/fstab ]; then
- sed -i -e '/^[#[:space:]]*\/dev\/root/{s/defaults/ro/;s/\([[:space:]]*[[:digit:]]\)\([[:space:]]*\)[[:digit:]]$/\1\20/}' ${IMAGE_ROOTFS}/etc/fstab
- fi
-
- # Tweak the "mount -o remount,rw /" command in busybox-inittab inittab
- if [ -f ${IMAGE_ROOTFS}/etc/inittab ]; then
- sed -i 's|/bin/mount -o remount,rw /|/bin/mount -o remount,ro /|' ${IMAGE_ROOTFS}/etc/inittab
- fi
-
- # If we're using openssh and the /etc/ssh directory has no pre-generated keys,
- # we should configure openssh to use the configuration file /etc/ssh/sshd_config_readonly
- # and the keys under /var/run/ssh.
- if [ -d ${IMAGE_ROOTFS}/etc/ssh ]; then
- if [ -e ${IMAGE_ROOTFS}/etc/ssh/ssh_host_rsa_key ]; then
- echo "SYSCONFDIR=\${SYSCONFDIR:-/etc/ssh}" >> ${IMAGE_ROOTFS}/etc/default/ssh
- echo "SSHD_OPTS=" >> ${IMAGE_ROOTFS}/etc/default/ssh
- else
- echo "SYSCONFDIR=\${SYSCONFDIR:-/var/run/ssh}" >> ${IMAGE_ROOTFS}/etc/default/ssh
- echo "SSHD_OPTS='-f /etc/ssh/sshd_config_readonly'" >> ${IMAGE_ROOTFS}/etc/default/ssh
- fi
- fi
-
- # Also tweak the key location for dropbear in the same way.
- if [ -d ${IMAGE_ROOTFS}/etc/dropbear ]; then
- if [ ! -e ${IMAGE_ROOTFS}/etc/dropbear/dropbear_rsa_host_key ]; then
- echo "DROPBEAR_RSAKEY_DIR=/var/lib/dropbear" >> ${IMAGE_ROOTFS}/etc/default/dropbear
- fi
- fi
-
- if ${@bb.utils.contains("DISTRO_FEATURES", "sysvinit", "true", "false", d)}; then
- # Change the value of ROOTFS_READ_ONLY in /etc/default/rcS to yes
- if [ -e ${IMAGE_ROOTFS}/etc/default/rcS ]; then
- sed -i 's/ROOTFS_READ_ONLY=no/ROOTFS_READ_ONLY=yes/' ${IMAGE_ROOTFS}/etc/default/rcS
- fi
- # Run populate-volatile.sh at rootfs time to set up basic files
- # and directories to support read-only rootfs.
- if [ -x ${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh ]; then
- ${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh
- fi
- fi
-
- if ${@bb.utils.contains("DISTRO_FEATURES", "systemd", "true", "false", d)}; then
- # Create machine-id
- # 20:12 < mezcalero> koen: you have three options: a) run systemd-machine-id-setup at install time, b) have / read-only and an empty file there (for stateless) and c) boot with / writable
- touch ${IMAGE_ROOTFS}${sysconfdir}/machine-id
- fi
-}
-
-#
-# This function is intended to disallow empty root password if 'debug-tweaks' is not in IMAGE_FEATURES.
-#
-zap_empty_root_password () {
- if [ -e ${IMAGE_ROOTFS}/etc/shadow ]; then
- sed -i 's%^root::%root:*:%' ${IMAGE_ROOTFS}/etc/shadow
- fi
- if [ -e ${IMAGE_ROOTFS}/etc/passwd ]; then
- sed -i 's%^root::%root:*:%' ${IMAGE_ROOTFS}/etc/passwd
- fi
-}
-
-#
-# allow dropbear/openssh to accept logins from accounts with an empty password string
-#
-ssh_allow_empty_password () {
- for config in sshd_config sshd_config_readonly; do
- if [ -e ${IMAGE_ROOTFS}${sysconfdir}/ssh/$config ]; then
- sed -i 's/^[#[:space:]]*PermitEmptyPasswords.*/PermitEmptyPasswords yes/' ${IMAGE_ROOTFS}${sysconfdir}/ssh/$config
- fi
- done
-
- if [ -e ${IMAGE_ROOTFS}${sbindir}/dropbear ] ; then
- if grep -q DROPBEAR_EXTRA_ARGS ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear 2>/dev/null ; then
- if ! grep -q "DROPBEAR_EXTRA_ARGS=.*-B" ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear ; then
- sed -i 's/^DROPBEAR_EXTRA_ARGS="*\([^"]*\)"*/DROPBEAR_EXTRA_ARGS="\1 -B"/' ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear
- fi
- else
- printf '\nDROPBEAR_EXTRA_ARGS="-B"\n' >> ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear
- fi
- fi
-
- if [ -d ${IMAGE_ROOTFS}${sysconfdir}/pam.d ] ; then
- for f in `find ${IMAGE_ROOTFS}${sysconfdir}/pam.d/* -type f -exec test -e {} \; -print`
- do
- sed -i 's/nullok_secure/nullok/' $f
- done
- fi
-}
-
-#
-# allow dropbear/openssh to accept root logins
-#
-ssh_allow_root_login () {
- for config in sshd_config sshd_config_readonly; do
- if [ -e ${IMAGE_ROOTFS}${sysconfdir}/ssh/$config ]; then
- sed -i 's/^[#[:space:]]*PermitRootLogin.*/PermitRootLogin yes/' ${IMAGE_ROOTFS}${sysconfdir}/ssh/$config
- fi
- done
-
- if [ -e ${IMAGE_ROOTFS}${sbindir}/dropbear ] ; then
- if grep -q DROPBEAR_EXTRA_ARGS ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear 2>/dev/null ; then
- sed -i '/^DROPBEAR_EXTRA_ARGS=/ s/-w//' ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear
- fi
- fi
-}
-
-python sort_passwd () {
- import rootfspostcommands
- rootfspostcommands.sort_passwd(d.expand('${IMAGE_ROOTFS}${sysconfdir}'))
-}
-
-#
-# Enable postinst logging if debug-tweaks is enabled
-#
-postinst_enable_logging () {
- mkdir -p ${IMAGE_ROOTFS}${sysconfdir}/default
- echo "POSTINST_LOGGING=1" >> ${IMAGE_ROOTFS}${sysconfdir}/default/postinst
- echo "LOGFILE=${POSTINST_LOGFILE}" >> ${IMAGE_ROOTFS}${sysconfdir}/default/postinst
-}
-
-#
-# Modify systemd default target
-#
-set_systemd_default_target () {
- if [ -d ${IMAGE_ROOTFS}${sysconfdir}/systemd/system -a -e ${IMAGE_ROOTFS}${systemd_unitdir}/system/${SYSTEMD_DEFAULT_TARGET} ]; then
- ln -sf ${systemd_unitdir}/system/${SYSTEMD_DEFAULT_TARGET} ${IMAGE_ROOTFS}${sysconfdir}/systemd/system/default.target
- fi
-}
-
-# If /var/volatile is not empty, we have seen problems where programs such as the
-# journal make assumptions based on the contents of /var/volatile. The journal
-# would then write to /var/volatile before it was mounted, thus hiding the
-# items previously written.
-#
-# This change is to attempt to fix those types of issues in a way that doesn't
-# affect users that may not be using /var/volatile.
-empty_var_volatile () {
- if [ -e ${IMAGE_ROOTFS}/etc/fstab ]; then
- match=`awk '$1 !~ "#" && $2 ~ /\/var\/volatile/{print $2}' ${IMAGE_ROOTFS}/etc/fstab 2> /dev/null`
- if [ -n "$match" ]; then
- find ${IMAGE_ROOTFS}/var/volatile -mindepth 1 -delete
- fi
- fi
-}
-
-# Turn any symbolic /sbin/init link into a file
-remove_init_link () {
- if [ -h ${IMAGE_ROOTFS}/sbin/init ]; then
- LINKFILE=${IMAGE_ROOTFS}`readlink ${IMAGE_ROOTFS}/sbin/init`
- rm ${IMAGE_ROOTFS}/sbin/init
- cp $LINKFILE ${IMAGE_ROOTFS}/sbin/init
- fi
-}
-
-make_zimage_symlink_relative () {
- if [ -L ${IMAGE_ROOTFS}/boot/zImage ]; then
- (cd ${IMAGE_ROOTFS}/boot/ && for i in `ls zImage-* | sort`; do ln -sf $i zImage; done)
- fi
-}
-
-python write_image_manifest () {
- from oe.rootfs import image_list_installed_packages
- from oe.utils import format_pkg_list
-
- deploy_dir = d.getVar('IMGDEPLOYDIR')
- link_name = d.getVar('IMAGE_LINK_NAME')
- manifest_name = d.getVar('IMAGE_MANIFEST')
-
- if not manifest_name:
- return
-
- pkgs = image_list_installed_packages(d)
- with open(manifest_name, 'w+') as image_manifest:
- image_manifest.write(format_pkg_list(pkgs, "ver"))
-
- if os.path.exists(manifest_name) and link_name:
- manifest_link = deploy_dir + "/" + link_name + ".manifest"
- if os.path.lexists(manifest_link):
- os.remove(manifest_link)
- os.symlink(os.path.basename(manifest_name), manifest_link)
-}
-
-# Can be used to create /etc/timestamp during image construction to give a reasonably
-# sane default time setting
-rootfs_update_timestamp () {
- if [ "${REPRODUCIBLE_TIMESTAMP_ROOTFS}" != "" ]; then
- # Convert UTC into %4Y%2m%2d%2H%2M%2S
- sformatted=`date -u -d @${REPRODUCIBLE_TIMESTAMP_ROOTFS} +%4Y%2m%2d%2H%2M%2S`
- else
- sformatted=`date -u +%4Y%2m%2d%2H%2M%2S`
- fi
- echo $sformatted > ${IMAGE_ROOTFS}/etc/timestamp
- bbnote "rootfs_update_timestamp: set /etc/timestamp to $sformatted"
-}
-
-# Prevent X from being started
-rootfs_no_x_startup () {
- if [ -f ${IMAGE_ROOTFS}/etc/init.d/xserver-nodm ]; then
- chmod a-x ${IMAGE_ROOTFS}/etc/init.d/xserver-nodm
- fi
-}
-
-rootfs_trim_schemas () {
- for schema in ${IMAGE_ROOTFS}/etc/gconf/schemas/*.schemas
- do
- # Need this in case no files exist
- if [ -e $schema ]; then
- oe-trim-schemas $schema > $schema.new
- mv $schema.new $schema
- fi
- done
-}
-
-rootfs_check_host_user_contaminated () {
- contaminated="${WORKDIR}/host-user-contaminated.txt"
- HOST_USER_UID="$(PSEUDO_UNLOAD=1 id -u)"
- HOST_USER_GID="$(PSEUDO_UNLOAD=1 id -g)"
-
- find "${IMAGE_ROOTFS}" -path "${IMAGE_ROOTFS}/home" -prune -o \
- -user "$HOST_USER_UID" -print -o -group "$HOST_USER_GID" -print >"$contaminated"
-
- sed -e "s,${IMAGE_ROOTFS},," $contaminated | while read line; do
- bbwarn "Path in the rootfs is owned by the same user or group as the user running bitbake:" $line `ls -lan ${IMAGE_ROOTFS}/$line`
- done
-
- if [ -s "$contaminated" ]; then
- bbwarn "/etc/passwd:" `cat ${IMAGE_ROOTFS}/etc/passwd`
- bbwarn "/etc/group:" `cat ${IMAGE_ROOTFS}/etc/group`
- fi
-}
-
-# Make any absolute links in a sysroot relative
-rootfs_sysroot_relativelinks () {
- sysroot-relativelinks.py ${SDK_OUTPUT}/${SDKTARGETSYSROOT}
-}
-
-# Generated test data json file
-python write_image_test_data() {
- from oe.data import export2json
-
- deploy_dir = d.getVar('IMGDEPLOYDIR')
- link_name = d.getVar('IMAGE_LINK_NAME')
- testdata_name = os.path.join(deploy_dir, "%s.testdata.json" % d.getVar('IMAGE_NAME'))
-
- searchString = "%s/"%(d.getVar("TOPDIR")).replace("//","/")
- export2json(d, testdata_name, searchString=searchString, replaceString="")
-
- if os.path.exists(testdata_name) and link_name:
- testdata_link = os.path.join(deploy_dir, "%s.testdata.json" % link_name)
- if os.path.lexists(testdata_link):
- os.remove(testdata_link)
- os.symlink(os.path.basename(testdata_name), testdata_link)
-}
-write_image_test_data[vardepsexclude] += "TOPDIR"
-
-# Check for unsatisfied recommendations (RRECOMMENDS)
-python rootfs_log_check_recommends() {
- log_path = d.expand("${T}/log.do_rootfs")
- with open(log_path, 'r') as log:
- for line in log:
- if 'log_check' in line:
- continue
-
- if 'unsatisfied recommendation for' in line:
- bb.warn('[log_check] %s: %s' % (d.getVar('PN'), line))
-}
-
-# Perform any additional adjustments needed to make rootf binary reproducible
-rootfs_reproducible () {
- if [ "${REPRODUCIBLE_TIMESTAMP_ROOTFS}" != "" ]; then
- # Convert UTC into %4Y%2m%2d%2H%2M%2S
- sformatted=`date -u -d @${REPRODUCIBLE_TIMESTAMP_ROOTFS} +%4Y%2m%2d%2H%2M%2S`
- echo $sformatted > ${IMAGE_ROOTFS}/etc/version
- bbnote "rootfs_reproducible: set /etc/version to $sformatted"
-
- if [ -d ${IMAGE_ROOTFS}${sysconfdir}/gconf ]; then
- find ${IMAGE_ROOTFS}${sysconfdir}/gconf -name '%gconf.xml' -print0 | xargs -0r \
- sed -i -e 's@\bmtime="[0-9][0-9]*"@mtime="'${REPRODUCIBLE_TIMESTAMP_ROOTFS}'"@g'
- fi
- fi
-}
diff --git a/meta/classes/rootfs_deb.bbclass b/meta/classes/rootfs_deb.bbclass
deleted file mode 100644
index 2b93796a76..0000000000
--- a/meta/classes/rootfs_deb.bbclass
+++ /dev/null
@@ -1,35 +0,0 @@
-#
-# Copyright 2006-2007 Openedhand Ltd.
-#
-
-ROOTFS_PKGMANAGE = "dpkg apt"
-
-do_rootfs[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot"
-do_populate_sdk[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot bzip2-native:do_populate_sysroot"
-do_rootfs[recrdeptask] += "do_package_write_deb do_package_qa"
-do_rootfs[vardeps] += "PACKAGE_FEED_URIS"
-
-do_rootfs[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock"
-do_populate_sdk[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock"
-do_populate_sdk_ext[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock"
-
-python rootfs_deb_bad_recommendations() {
- if d.getVar("BAD_RECOMMENDATIONS"):
- bb.warn("Debian package install does not support BAD_RECOMMENDATIONS")
-}
-do_rootfs[prefuncs] += "rootfs_deb_bad_recommendations"
-
-DEB_POSTPROCESS_COMMANDS = ""
-
-opkglibdir = "${localstatedir}/lib/opkg"
-
-python () {
- # Map TARGET_ARCH to Debian's ideas about architectures
- darch = d.getVar('SDK_ARCH')
- if darch in ["x86", "i486", "i586", "i686", "pentium"]:
- d.setVar('DEB_SDK_ARCH', 'i386')
- elif darch == "x86_64":
- d.setVar('DEB_SDK_ARCH', 'amd64')
- elif darch == "arm":
- d.setVar('DEB_SDK_ARCH', 'armel')
-}
diff --git a/meta/classes/rootfs_ipk.bbclass b/meta/classes/rootfs_ipk.bbclass
deleted file mode 100644
index e73d2bfdae..0000000000
--- a/meta/classes/rootfs_ipk.bbclass
+++ /dev/null
@@ -1,38 +0,0 @@
-#
-# Creates a root filesystem out of IPKs
-#
-# This rootfs can be mounted via root-nfs or it can be put into an cramfs/jffs etc.
-# See image.bbclass for a usage of this.
-#
-
-EXTRAOPKGCONFIG ?= ""
-ROOTFS_PKGMANAGE = "opkg ${EXTRAOPKGCONFIG}"
-
-do_rootfs[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
-do_populate_sdk[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
-do_rootfs[recrdeptask] += "do_package_write_ipk do_package_qa"
-do_rootfs[vardeps] += "PACKAGE_FEED_URIS"
-
-do_rootfs[lockfiles] += "${WORKDIR}/ipk.lock"
-do_populate_sdk[lockfiles] += "${WORKDIR}/ipk.lock"
-do_populate_sdk_ext[lockfiles] += "${WORKDIR}/ipk.lock"
-
-OPKG_PREPROCESS_COMMANDS = ""
-
-OPKG_POSTPROCESS_COMMANDS = ""
-
-OPKGLIBDIR ??= "${localstatedir}/lib"
-
-MULTILIBRE_ALLOW_REP = "${OPKGLIBDIR}/opkg|/usr/lib/opkg"
-
-python () {
-
- if d.getVar('BUILD_IMAGES_FROM_FEEDS'):
- flags = d.getVarFlag('do_rootfs', 'recrdeptask')
- flags = flags.replace("do_package_write_ipk", "")
- flags = flags.replace("do_deploy", "")
- flags = flags.replace("do_populate_sysroot", "")
- d.setVarFlag('do_rootfs', 'recrdeptask', flags)
- d.setVar('OPKG_PREPROCESS_COMMANDS', "")
- d.setVar('OPKG_POSTPROCESS_COMMANDS', '')
-}
diff --git a/meta/classes/rootfs_rpm.bbclass b/meta/classes/rootfs_rpm.bbclass
deleted file mode 100644
index 51f89ea990..0000000000
--- a/meta/classes/rootfs_rpm.bbclass
+++ /dev/null
@@ -1,39 +0,0 @@
-#
-# Creates a root filesystem out of rpm packages
-#
-
-ROOTFS_PKGMANAGE = "rpm dnf"
-
-# dnf is using our custom distutils, and so will fail without these
-export STAGING_INCDIR
-export STAGING_LIBDIR
-
-# Add 100Meg of extra space for dnf
-IMAGE_ROOTFS_EXTRA_SPACE_append = "${@bb.utils.contains("PACKAGE_INSTALL", "dnf", " + 102400", "" ,d)}"
-
-# Dnf is python based, so be sure python3-native is available to us.
-EXTRANATIVEPATH += "python3-native"
-
-# opkg is needed for update-alternatives
-RPMROOTFSDEPENDS = "rpm-native:do_populate_sysroot \
- dnf-native:do_populate_sysroot \
- createrepo-c-native:do_populate_sysroot \
- opkg-native:do_populate_sysroot"
-
-do_rootfs[depends] += "${RPMROOTFSDEPENDS}"
-do_populate_sdk[depends] += "${RPMROOTFSDEPENDS}"
-
-do_rootfs[recrdeptask] += "do_package_write_rpm do_package_qa"
-do_rootfs[vardeps] += "PACKAGE_FEED_URIS"
-
-python () {
- if d.getVar('BUILD_IMAGES_FROM_FEEDS'):
- flags = d.getVarFlag('do_rootfs', 'recrdeptask')
- flags = flags.replace("do_package_write_rpm", "")
- flags = flags.replace("do_deploy", "")
- flags = flags.replace("do_populate_sysroot", "")
- d.setVarFlag('do_rootfs', 'recrdeptask', flags)
- d.setVar('RPM_PREPROCESS_COMMANDS', '')
- d.setVar('RPM_POSTPROCESS_COMMANDS', '')
-
-}
diff --git a/meta/classes/rootfsdebugfiles.bbclass b/meta/classes/rootfsdebugfiles.bbclass
deleted file mode 100644
index e2ba4e3647..0000000000
--- a/meta/classes/rootfsdebugfiles.bbclass
+++ /dev/null
@@ -1,41 +0,0 @@
-# This class installs additional files found on the build host
-# directly into the rootfs.
-#
-# One use case is to install a constant ssh host key in
-# an image that gets created for just one machine. This
-# solves two issues:
-# - host key generation on the device can stall when the
-# kernel has not gathered enough entropy yet (seen in practice
-# under qemu)
-# - ssh complains by default when the host key changes
-#
-# For dropbear, with the ssh host key store along side the local.conf:
-# 1. Extend local.conf:
-# INHERIT += "rootfsdebugfiles"
-# ROOTFS_DEBUG_FILES += "${TOPDIR}/conf/dropbear_rsa_host_key ${IMAGE_ROOTFS}/etc/dropbear/dropbear_rsa_host_key ;"
-# 2. Boot the image once, copy the dropbear_rsa_host_key from
-# the device into your build conf directory.
-# 3. A optional parameter can be used to set file mode
-# of the copied target, for instance:
-# ROOTFS_DEBUG_FILES += "${TOPDIR}/conf/dropbear_rsa_host_key ${IMAGE_ROOTFS}/etc/dropbear/dropbear_rsa_host_key 0600;"
-# in case they might be required to have a specific mode. (Shoundn't be too open, for example)
-#
-# Do not use for production images! It bypasses several
-# core build mechanisms (updating the image when one
-# of the files changes, license tracking in the image
-# manifest, ...).
-
-ROOTFS_DEBUG_FILES ?= ""
-ROOTFS_DEBUG_FILES[doc] = "Lists additional files or directories to be installed with 'cp -a' in the format 'source1 target1;source2 target2;...'"
-
-ROOTFS_POSTPROCESS_COMMAND += "rootfs_debug_files ;"
-rootfs_debug_files () {
- #!/bin/sh -e
- echo "${ROOTFS_DEBUG_FILES}" | sed -e 's/;/\n/g' | while read source target mode; do
- if [ -e "$source" ]; then
- mkdir -p $(dirname $target)
- cp -a $source $target
- [ -n "$mode" ] && chmod $mode $target
- fi
- done
-}
diff --git a/meta/classes/sanity.bbclass b/meta/classes/sanity.bbclass
deleted file mode 100644
index e021b9d240..0000000000
--- a/meta/classes/sanity.bbclass
+++ /dev/null
@@ -1,1029 +0,0 @@
-#
-# Sanity check the users setup for common misconfigurations
-#
-
-SANITY_REQUIRED_UTILITIES ?= "patch diffstat git bzip2 tar \
- gzip gawk chrpath wget cpio perl file which"
-
-def bblayers_conf_file(d):
- return os.path.join(d.getVar('TOPDIR'), 'conf/bblayers.conf')
-
-def sanity_conf_read(fn):
- with open(fn, 'r') as f:
- lines = f.readlines()
- return lines
-
-def sanity_conf_find_line(pattern, lines):
- import re
- return next(((index, line)
- for index, line in enumerate(lines)
- if re.search(pattern, line)), (None, None))
-
-def sanity_conf_update(fn, lines, version_var_name, new_version):
- index, line = sanity_conf_find_line(r"^%s" % version_var_name, lines)
- lines[index] = '%s = "%d"\n' % (version_var_name, new_version)
- with open(fn, "w") as f:
- f.write(''.join(lines))
-
-# Functions added to this variable MUST throw a NotImplementedError exception unless
-# they successfully changed the config version in the config file. Exceptions
-# are used since exec_func doesn't handle return values.
-BBLAYERS_CONF_UPDATE_FUNCS += " \
- conf/bblayers.conf:LCONF_VERSION:LAYER_CONF_VERSION:oecore_update_bblayers \
- conf/local.conf:CONF_VERSION:LOCALCONF_VERSION:oecore_update_localconf \
- conf/site.conf:SCONF_VERSION:SITE_CONF_VERSION:oecore_update_siteconf \
-"
-
-SANITY_DIFF_TOOL ?= "meld"
-
-SANITY_LOCALCONF_SAMPLE ?= "${COREBASE}/meta*/conf/local.conf.sample"
-python oecore_update_localconf() {
- # Check we are using a valid local.conf
- current_conf = d.getVar('CONF_VERSION')
- conf_version = d.getVar('LOCALCONF_VERSION')
-
- failmsg = """Your version of local.conf was generated from an older/newer version of
-local.conf.sample and there have been updates made to this file. Please compare the two
-files and merge any changes before continuing.
-
-Matching the version numbers will remove this message.
-
-\"${SANITY_DIFF_TOOL} conf/local.conf ${SANITY_LOCALCONF_SAMPLE}\"
-
-is a good way to visualise the changes."""
- failmsg = d.expand(failmsg)
-
- raise NotImplementedError(failmsg)
-}
-
-SANITY_SITECONF_SAMPLE ?= "${COREBASE}/meta*/conf/site.conf.sample"
-python oecore_update_siteconf() {
- # If we have a site.conf, check it's valid
- current_sconf = d.getVar('SCONF_VERSION')
- sconf_version = d.getVar('SITE_CONF_VERSION')
-
- failmsg = """Your version of site.conf was generated from an older version of
-site.conf.sample and there have been updates made to this file. Please compare the two
-files and merge any changes before continuing.
-
-Matching the version numbers will remove this message.
-
-\"${SANITY_DIFF_TOOL} conf/site.conf ${SANITY_SITECONF_SAMPLE}\"
-
-is a good way to visualise the changes."""
- failmsg = d.expand(failmsg)
-
- raise NotImplementedError(failmsg)
-}
-
-SANITY_BBLAYERCONF_SAMPLE ?= "${COREBASE}/meta*/conf/bblayers.conf.sample"
-python oecore_update_bblayers() {
- # bblayers.conf is out of date, so see if we can resolve that
-
- current_lconf = int(d.getVar('LCONF_VERSION'))
- lconf_version = int(d.getVar('LAYER_CONF_VERSION'))
-
- failmsg = """Your version of bblayers.conf has the wrong LCONF_VERSION (has ${LCONF_VERSION}, expecting ${LAYER_CONF_VERSION}).
-Please compare your file against bblayers.conf.sample and merge any changes before continuing.
-"${SANITY_DIFF_TOOL} conf/bblayers.conf ${SANITY_BBLAYERCONF_SAMPLE}"
-
-is a good way to visualise the changes."""
- failmsg = d.expand(failmsg)
-
- if not current_lconf:
- raise NotImplementedError(failmsg)
-
- lines = []
-
- if current_lconf < 4:
- raise NotImplementedError(failmsg)
-
- bblayers_fn = bblayers_conf_file(d)
- lines = sanity_conf_read(bblayers_fn)
-
- if current_lconf == 4 and lconf_version > 4:
- topdir_var = '$' + '{TOPDIR}'
- index, bbpath_line = sanity_conf_find_line('BBPATH', lines)
- if bbpath_line:
- start = bbpath_line.find('"')
- if start != -1 and (len(bbpath_line) != (start + 1)):
- if bbpath_line[start + 1] == '"':
- lines[index] = (bbpath_line[:start + 1] +
- topdir_var + bbpath_line[start + 1:])
- else:
- if not topdir_var in bbpath_line:
- lines[index] = (bbpath_line[:start + 1] +
- topdir_var + ':' + bbpath_line[start + 1:])
- else:
- raise NotImplementedError(failmsg)
- else:
- index, bbfiles_line = sanity_conf_find_line('BBFILES', lines)
- if bbfiles_line:
- lines.insert(index, 'BBPATH = "' + topdir_var + '"\n')
- else:
- raise NotImplementedError(failmsg)
-
- current_lconf += 1
- sanity_conf_update(bblayers_fn, lines, 'LCONF_VERSION', current_lconf)
- bb.note("Your conf/bblayers.conf has been automatically updated.")
- return
-
- elif current_lconf == 5 and lconf_version > 5:
- # Null update, to avoid issues with people switching between poky and other distros
- current_lconf = 6
- sanity_conf_update(bblayers_fn, lines, 'LCONF_VERSION', current_lconf)
- bb.note("Your conf/bblayers.conf has been automatically updated.")
- return
-
- status.addresult()
-
- elif current_lconf == 6 and lconf_version > 6:
- # Handle rename of meta-yocto -> meta-poky
- # This marks the start of separate version numbers but code is needed in OE-Core
- # for the migration, one last time.
- layers = d.getVar('BBLAYERS').split()
- layers = [ os.path.basename(path) for path in layers ]
- if 'meta-yocto' in layers:
- found = False
- while True:
- index, meta_yocto_line = sanity_conf_find_line(r'.*meta-yocto[\'"\s\n]', lines)
- if meta_yocto_line:
- lines[index] = meta_yocto_line.replace('meta-yocto', 'meta-poky')
- found = True
- else:
- break
- if not found:
- raise NotImplementedError(failmsg)
- index, meta_yocto_line = sanity_conf_find_line('LCONF_VERSION.*\n', lines)
- if meta_yocto_line:
- lines[index] = 'POKY_BBLAYERS_CONF_VERSION = "1"\n'
- else:
- raise NotImplementedError(failmsg)
- with open(bblayers_fn, "w") as f:
- f.write(''.join(lines))
- bb.note("Your conf/bblayers.conf has been automatically updated.")
- return
- current_lconf += 1
- sanity_conf_update(bblayers_fn, lines, 'LCONF_VERSION', current_lconf)
- bb.note("Your conf/bblayers.conf has been automatically updated.")
- return
-
- raise NotImplementedError(failmsg)
-}
-
-def raise_sanity_error(msg, d, network_error=False):
- if d.getVar("SANITY_USE_EVENTS") == "1":
- try:
- bb.event.fire(bb.event.SanityCheckFailed(msg, network_error), d)
- except TypeError:
- bb.event.fire(bb.event.SanityCheckFailed(msg), d)
- return
-
- bb.fatal(""" OE-core's config sanity checker detected a potential misconfiguration.
- Either fix the cause of this error or at your own risk disable the checker (see sanity.conf).
- Following is the list of potential problems / advisories:
-
- %s""" % msg)
-
-# Check flags associated with a tuning.
-def check_toolchain_tune_args(data, tune, multilib, errs):
- found_errors = False
- if check_toolchain_args_present(data, tune, multilib, errs, 'CCARGS'):
- found_errors = True
- if check_toolchain_args_present(data, tune, multilib, errs, 'ASARGS'):
- found_errors = True
- if check_toolchain_args_present(data, tune, multilib, errs, 'LDARGS'):
- found_errors = True
-
- return found_errors
-
-def check_toolchain_args_present(data, tune, multilib, tune_errors, which):
- args_set = (data.getVar("TUNE_%s" % which) or "").split()
- args_wanted = (data.getVar("TUNEABI_REQUIRED_%s_tune-%s" % (which, tune)) or "").split()
- args_missing = []
-
- # If no args are listed/required, we are done.
- if not args_wanted:
- return
- for arg in args_wanted:
- if arg not in args_set:
- args_missing.append(arg)
-
- found_errors = False
- if args_missing:
- found_errors = True
- tune_errors.append("TUNEABI for %s requires '%s' in TUNE_%s (%s)." %
- (tune, ' '.join(args_missing), which, ' '.join(args_set)))
- return found_errors
-
-# Check a single tune for validity.
-def check_toolchain_tune(data, tune, multilib):
- tune_errors = []
- if not tune:
- return "No tuning found for %s multilib." % multilib
- localdata = bb.data.createCopy(data)
- if multilib != "default":
- # Apply the overrides so we can look at the details.
- overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + multilib
- localdata.setVar("OVERRIDES", overrides)
- bb.debug(2, "Sanity-checking tuning '%s' (%s) features:" % (tune, multilib))
- features = (localdata.getVar("TUNE_FEATURES_tune-%s" % tune) or "").split()
- if not features:
- return "Tuning '%s' has no defined features, and cannot be used." % tune
- valid_tunes = localdata.getVarFlags('TUNEVALID') or {}
- conflicts = localdata.getVarFlags('TUNECONFLICTS') or {}
- # [doc] is the documentation for the variable, not a real feature
- if 'doc' in valid_tunes:
- del valid_tunes['doc']
- if 'doc' in conflicts:
- del conflicts['doc']
- for feature in features:
- if feature in conflicts:
- for conflict in conflicts[feature].split():
- if conflict in features:
- tune_errors.append("Feature '%s' conflicts with '%s'." %
- (feature, conflict))
- if feature in valid_tunes:
- bb.debug(2, " %s: %s" % (feature, valid_tunes[feature]))
- else:
- tune_errors.append("Feature '%s' is not defined." % feature)
- whitelist = localdata.getVar("TUNEABI_WHITELIST")
- if whitelist:
- tuneabi = localdata.getVar("TUNEABI_tune-%s" % tune)
- if not tuneabi:
- tuneabi = tune
- if True not in [x in whitelist.split() for x in tuneabi.split()]:
- tune_errors.append("Tuning '%s' (%s) cannot be used with any supported tuning/ABI." %
- (tune, tuneabi))
- else:
- if not check_toolchain_tune_args(localdata, tuneabi, multilib, tune_errors):
- bb.debug(2, "Sanity check: Compiler args OK for %s." % tune)
- if tune_errors:
- return "Tuning '%s' has the following errors:\n" % tune + '\n'.join(tune_errors)
-
-def check_toolchain(data):
- tune_error_set = []
- deftune = data.getVar("DEFAULTTUNE")
- tune_errors = check_toolchain_tune(data, deftune, 'default')
- if tune_errors:
- tune_error_set.append(tune_errors)
-
- multilibs = (data.getVar("MULTILIB_VARIANTS") or "").split()
- global_multilibs = (data.getVar("MULTILIB_GLOBAL_VARIANTS") or "").split()
-
- if multilibs:
- seen_libs = []
- seen_tunes = []
- for lib in multilibs:
- if lib in seen_libs:
- tune_error_set.append("The multilib '%s' appears more than once." % lib)
- else:
- seen_libs.append(lib)
- if not lib in global_multilibs:
- tune_error_set.append("Multilib %s is not present in MULTILIB_GLOBAL_VARIANTS" % lib)
- tune = data.getVar("DEFAULTTUNE_virtclass-multilib-%s" % lib)
- if tune in seen_tunes:
- tune_error_set.append("The tuning '%s' appears in more than one multilib." % tune)
- else:
- seen_libs.append(tune)
- if tune == deftune:
- tune_error_set.append("Multilib '%s' (%s) is also the default tuning." % (lib, deftune))
- else:
- tune_errors = check_toolchain_tune(data, tune, lib)
- if tune_errors:
- tune_error_set.append(tune_errors)
- if tune_error_set:
- return "Toolchain tunings invalid:\n" + '\n'.join(tune_error_set) + "\n"
-
- return ""
-
-def check_conf_exists(fn, data):
- bbpath = []
- fn = data.expand(fn)
- vbbpath = data.getVar("BBPATH", False)
- if vbbpath:
- bbpath += vbbpath.split(":")
- for p in bbpath:
- currname = os.path.join(data.expand(p), fn)
- if os.access(currname, os.R_OK):
- return True
- return False
-
-def check_create_long_filename(filepath, pathname):
- import string, random
- testfile = os.path.join(filepath, ''.join(random.choice(string.ascii_letters) for x in range(200)))
- try:
- if not os.path.exists(filepath):
- bb.utils.mkdirhier(filepath)
- f = open(testfile, "w")
- f.close()
- os.remove(testfile)
- except IOError as e:
- import errno
- err, strerror = e.args
- if err == errno.ENAMETOOLONG:
- return "Failed to create a file with a long name in %s. Please use a filesystem that does not unreasonably limit filename length.\n" % pathname
- else:
- return "Failed to create a file in %s: %s.\n" % (pathname, strerror)
- except OSError as e:
- errno, strerror = e.args
- return "Failed to create %s directory in which to run long name sanity check: %s.\n" % (pathname, strerror)
- return ""
-
-def check_path_length(filepath, pathname, limit):
- if len(filepath) > limit:
- return "The length of %s is longer than %s, this would cause unexpected errors, please use a shorter path.\n" % (pathname, limit)
- return ""
-
-def get_filesystem_id(path):
- import subprocess
- try:
- return subprocess.check_output(["stat", "-f", "-c", "%t", path]).decode('utf-8').strip()
- except subprocess.CalledProcessError:
- bb.warn("Can't get filesystem id of: %s" % path)
- return None
-
-# Check that the path isn't located on nfs.
-def check_not_nfs(path, name):
- # The nfs' filesystem id is 6969
- if get_filesystem_id(path) == "6969":
- return "The %s: %s can't be located on nfs.\n" % (name, path)
- return ""
-
-# Check that the path is on a case-sensitive file system
-def check_case_sensitive(path, name):
- import tempfile
- with tempfile.NamedTemporaryFile(prefix='TmP', dir=path) as tmp_file:
- if os.path.exists(tmp_file.name.lower()):
- return "The %s (%s) can't be on a case-insensitive file system.\n" % (name, path)
- return ""
-
-# Check that path isn't a broken symlink
-def check_symlink(lnk, data):
- if os.path.islink(lnk) and not os.path.exists(lnk):
- raise_sanity_error("%s is a broken symlink." % lnk, data)
-
-def check_connectivity(d):
- # URI's to check can be set in the CONNECTIVITY_CHECK_URIS variable
- # using the same syntax as for SRC_URI. If the variable is not set
- # the check is skipped
- test_uris = (d.getVar('CONNECTIVITY_CHECK_URIS') or "").split()
- retval = ""
-
- bbn = d.getVar('BB_NO_NETWORK')
- if bbn not in (None, '0', '1'):
- return 'BB_NO_NETWORK should be "0" or "1", but it is "%s"' % bbn
-
- # Only check connectivity if network enabled and the
- # CONNECTIVITY_CHECK_URIS are set
- network_enabled = not (bbn == '1')
- check_enabled = len(test_uris)
- if check_enabled and network_enabled:
- # Take a copy of the data store and unset MIRRORS and PREMIRRORS
- data = bb.data.createCopy(d)
- data.delVar('PREMIRRORS')
- data.delVar('MIRRORS')
- try:
- fetcher = bb.fetch2.Fetch(test_uris, data)
- fetcher.checkstatus()
- except Exception as err:
- # Allow the message to be configured so that users can be
- # pointed to a support mechanism.
- msg = data.getVar('CONNECTIVITY_CHECK_MSG') or ""
- if len(msg) == 0:
- msg = "%s.\n" % err
- msg += " Please ensure your host's network is configured correctly,\n"
- msg += " or set BB_NO_NETWORK = \"1\" to disable network access if\n"
- msg += " all required sources are on local disk.\n"
- retval = msg
-
- return retval
-
-def check_supported_distro(sanity_data):
- from fnmatch import fnmatch
-
- tested_distros = sanity_data.getVar('SANITY_TESTED_DISTROS')
- if not tested_distros:
- return
-
- try:
- distro = oe.lsb.distro_identifier()
- except Exception:
- distro = None
-
- if not distro:
- bb.warn('Host distribution could not be determined; you may possibly experience unexpected failures. It is recommended that you use a tested distribution.')
-
- for supported in [x.strip() for x in tested_distros.split('\\n')]:
- if fnmatch(distro, supported):
- return
-
- bb.warn('Host distribution "%s" has not been validated with this version of the build system; you may possibly experience unexpected failures. It is recommended that you use a tested distribution.' % distro)
-
-# Checks we should only make if MACHINE is set correctly
-def check_sanity_validmachine(sanity_data):
- messages = ""
-
- # Check TUNE_ARCH is set
- if sanity_data.getVar('TUNE_ARCH') == 'INVALID':
- messages = messages + 'TUNE_ARCH is unset. Please ensure your MACHINE configuration includes a valid tune configuration file which will set this correctly.\n'
-
- # Check TARGET_OS is set
- if sanity_data.getVar('TARGET_OS') == 'INVALID':
- messages = messages + 'Please set TARGET_OS directly, or choose a MACHINE or DISTRO that does so.\n'
-
- # Check that we don't have duplicate entries in PACKAGE_ARCHS & that TUNE_PKGARCH is in PACKAGE_ARCHS
- pkgarchs = sanity_data.getVar('PACKAGE_ARCHS')
- tunepkg = sanity_data.getVar('TUNE_PKGARCH')
- defaulttune = sanity_data.getVar('DEFAULTTUNE')
- tunefound = False
- seen = {}
- dups = []
-
- for pa in pkgarchs.split():
- if seen.get(pa, 0) == 1:
- dups.append(pa)
- else:
- seen[pa] = 1
- if pa == tunepkg:
- tunefound = True
-
- if len(dups):
- messages = messages + "Error, the PACKAGE_ARCHS variable contains duplicates. The following archs are listed more than once: %s" % " ".join(dups)
-
- if tunefound == False:
- messages = messages + "Error, the PACKAGE_ARCHS variable (%s) for DEFAULTTUNE (%s) does not contain TUNE_PKGARCH (%s)." % (pkgarchs, defaulttune, tunepkg)
-
- return messages
-
-# Patch before 2.7 can't handle all the features in git-style diffs. Some
-# patches may incorrectly apply, and others won't apply at all.
-def check_patch_version(sanity_data):
- from distutils.version import LooseVersion
- import re, subprocess
-
- try:
- result = subprocess.check_output(["patch", "--version"], stderr=subprocess.STDOUT).decode('utf-8')
- version = re.search(r"[0-9.]+", result.splitlines()[0]).group()
- if LooseVersion(version) < LooseVersion("2.7"):
- return "Your version of patch is older than 2.7 and has bugs which will break builds. Please install a newer version of patch.\n"
- else:
- return None
- except subprocess.CalledProcessError as e:
- return "Unable to execute patch --version, exit code %d:\n%s\n" % (e.returncode, e.output)
-
-# Unpatched versions of make 3.82 are known to be broken. See GNU Savannah Bug 30612.
-# Use a modified reproducer from http://savannah.gnu.org/bugs/?30612 to validate.
-def check_make_version(sanity_data):
- from distutils.version import LooseVersion
- import subprocess
-
- try:
- result = subprocess.check_output(['make', '--version'], stderr=subprocess.STDOUT).decode('utf-8')
- except subprocess.CalledProcessError as e:
- return "Unable to execute make --version, exit code %d\n%s\n" % (e.returncode, e.output)
- version = result.split()[2]
- if LooseVersion(version) == LooseVersion("3.82"):
- # Construct a test file
- f = open("makefile_test", "w")
- f.write("makefile_test.a: makefile_test_a.c makefile_test_b.c makefile_test.a( makefile_test_a.c makefile_test_b.c)\n")
- f.write("\n")
- f.write("makefile_test_a.c:\n")
- f.write(" touch $@\n")
- f.write("\n")
- f.write("makefile_test_b.c:\n")
- f.write(" touch $@\n")
- f.close()
-
- # Check if make 3.82 has been patched
- try:
- subprocess.check_call(['make', '-f', 'makefile_test'])
- except subprocess.CalledProcessError as e:
- return "Your version of make 3.82 is broken. Please revert to 3.81 or install a patched version.\n"
- finally:
- os.remove("makefile_test")
- if os.path.exists("makefile_test_a.c"):
- os.remove("makefile_test_a.c")
- if os.path.exists("makefile_test_b.c"):
- os.remove("makefile_test_b.c")
- if os.path.exists("makefile_test.a"):
- os.remove("makefile_test.a")
- return None
-
-
-# Check if we're running on WSL (Windows Subsystem for Linux).
-# WSLv1 is known not to work but WSLv2 should work properly as
-# long as the VHDX file is optimized often, let the user know
-# upfront.
-# More information on installing WSLv2 at:
-# https://docs.microsoft.com/en-us/windows/wsl/wsl2-install
-def check_wsl(d):
- with open("/proc/version", "r") as f:
- verdata = f.readlines()
- for l in verdata:
- if "Microsoft" in l:
- return "OpenEmbedded doesn't work under WSLv1, please upgrade to WSLv2 if you want to run builds on Windows"
- elif "microsoft" in l:
- bb.warn("You are running bitbake under WSLv2, this works properly but you should optimize your VHDX file eventually to avoid running out of storage space")
- return None
-
-# Require at least gcc version 6.0.
-#
-# This can be fixed on CentOS-7 with devtoolset-6+
-# https://www.softwarecollections.org/en/scls/rhscl/devtoolset-6/
-#
-# A less invasive fix is with scripts/install-buildtools (or with user
-# built buildtools-extended-tarball)
-#
-def check_gcc_version(sanity_data):
- from distutils.version import LooseVersion
- import subprocess
-
- build_cc, version = oe.utils.get_host_compiler_version(sanity_data)
- if build_cc.strip() == "gcc":
- if LooseVersion(version) < LooseVersion("6.0"):
- return "Your version of gcc is older than 6.0 and will break builds. Please install a newer version of gcc (you could use the project's buildtools-extended-tarball or use scripts/install-buildtools).\n"
- return None
-
-# Tar version 1.24 and onwards handle overwriting symlinks correctly
-# but earlier versions do not; this needs to work properly for sstate
-# Version 1.28 is needed so opkg-build works correctly when reproducibile builds are enabled
-def check_tar_version(sanity_data):
- from distutils.version import LooseVersion
- import subprocess
- try:
- result = subprocess.check_output(["tar", "--version"], stderr=subprocess.STDOUT).decode('utf-8')
- except subprocess.CalledProcessError as e:
- return "Unable to execute tar --version, exit code %d\n%s\n" % (e.returncode, e.output)
- version = result.split()[3]
- if LooseVersion(version) < LooseVersion("1.28"):
- return "Your version of tar is older than 1.28 and does not have the support needed to enable reproducible builds. Please install a newer version of tar (you could use the project's buildtools-tarball from our last release or use scripts/install-buildtools).\n"
- return None
-
-# We use git parameters and functionality only found in 1.7.8 or later
-# The kernel tools assume git >= 1.8.3.1 (verified needed > 1.7.9.5) see #6162
-# The git fetcher also had workarounds for git < 1.7.9.2 which we've dropped
-def check_git_version(sanity_data):
- from distutils.version import LooseVersion
- import subprocess
- try:
- result = subprocess.check_output(["git", "--version"], stderr=subprocess.DEVNULL).decode('utf-8')
- except subprocess.CalledProcessError as e:
- return "Unable to execute git --version, exit code %d\n%s\n" % (e.returncode, e.output)
- version = result.split()[2]
- if LooseVersion(version) < LooseVersion("1.8.3.1"):
- return "Your version of git is older than 1.8.3.1 and has bugs which will break builds. Please install a newer version of git.\n"
- return None
-
-# Check the required perl modules which may not be installed by default
-def check_perl_modules(sanity_data):
- import subprocess
- ret = ""
- modules = ( "Text::ParseWords", "Thread::Queue", "Data::Dumper" )
- errresult = ''
- for m in modules:
- try:
- subprocess.check_output(["perl", "-e", "use %s" % m])
- except subprocess.CalledProcessError as e:
- errresult += bytes.decode(e.output)
- ret += "%s " % m
- if ret:
- return "Required perl module(s) not found: %s\n\n%s\n" % (ret, errresult)
- return None
-
-def sanity_check_conffiles(d):
- funcs = d.getVar('BBLAYERS_CONF_UPDATE_FUNCS').split()
- for func in funcs:
- conffile, current_version, required_version, func = func.split(":")
- if check_conf_exists(conffile, d) and d.getVar(current_version) is not None and \
- d.getVar(current_version) != d.getVar(required_version):
- try:
- bb.build.exec_func(func, d)
- except NotImplementedError as e:
- bb.fatal(str(e))
- d.setVar("BB_INVALIDCONF", True)
-
-def sanity_handle_abichanges(status, d):
- #
- # Check the 'ABI' of TMPDIR
- #
- import subprocess
-
- current_abi = d.getVar('OELAYOUT_ABI')
- abifile = d.getVar('SANITY_ABIFILE')
- if os.path.exists(abifile):
- with open(abifile, "r") as f:
- abi = f.read().strip()
- if not abi.isdigit():
- with open(abifile, "w") as f:
- f.write(current_abi)
- elif int(abi) <= 11 and current_abi == "12":
- status.addresult("The layout of TMPDIR changed for Recipe Specific Sysroots.\nConversion doesn't make sense and this change will rebuild everything so please delete TMPDIR (%s).\n" % d.getVar("TMPDIR"))
- elif (abi != current_abi):
- # Code to convert from one ABI to another could go here if possible.
- status.addresult("Error, TMPDIR has changed its layout version number (%s to %s) and you need to either rebuild, revert or adjust it at your own risk.\n" % (abi, current_abi))
- else:
- with open(abifile, "w") as f:
- f.write(current_abi)
-
-def check_sanity_sstate_dir_change(sstate_dir, data):
- # Sanity checks to be done when the value of SSTATE_DIR changes
-
- # Check that SSTATE_DIR isn't on a filesystem with limited filename length (eg. eCryptFS)
- testmsg = ""
- if sstate_dir != "":
- testmsg = check_create_long_filename(sstate_dir, "SSTATE_DIR")
- # If we don't have permissions to SSTATE_DIR, suggest the user set it as an SSTATE_MIRRORS
- try:
- err = testmsg.split(': ')[1].strip()
- if err == "Permission denied.":
- testmsg = testmsg + "You could try using %s in SSTATE_MIRRORS rather than as an SSTATE_CACHE.\n" % (sstate_dir)
- except IndexError:
- pass
- return testmsg
-
-def check_sanity_version_change(status, d):
- # Sanity checks to be done when SANITY_VERSION or NATIVELSBSTRING changes
- # In other words, these tests run once in a given build directory and then
- # never again until the sanity version or host distrubution id/version changes.
-
- # Check the python install is complete. Examples that are often removed in
- # minimal installations: glib-2.0-natives requries # xml.parsers.expat and icu
- # requires distutils.sysconfig.
- try:
- import xml.parsers.expat
- import distutils.sysconfig
- except ImportError as e:
- status.addresult('Your Python 3 is not a full install. Please install the module %s (see the Getting Started guide for further information).\n' % e.name)
-
- status.addresult(check_gcc_version(d))
- status.addresult(check_make_version(d))
- status.addresult(check_patch_version(d))
- status.addresult(check_tar_version(d))
- status.addresult(check_git_version(d))
- status.addresult(check_perl_modules(d))
- status.addresult(check_wsl(d))
-
- missing = ""
-
- if not check_app_exists("${MAKE}", d):
- missing = missing + "GNU make,"
-
- if not check_app_exists('${BUILD_CC}', d):
- missing = missing + "C Compiler (%s)," % d.getVar("BUILD_CC")
-
- if not check_app_exists('${BUILD_CXX}', d):
- missing = missing + "C++ Compiler (%s)," % d.getVar("BUILD_CXX")
-
- required_utilities = d.getVar('SANITY_REQUIRED_UTILITIES')
-
- for util in required_utilities.split():
- if not check_app_exists(util, d):
- missing = missing + "%s," % util
-
- if missing:
- missing = missing.rstrip(',')
- status.addresult("Please install the following missing utilities: %s\n" % missing)
-
- assume_provided = d.getVar('ASSUME_PROVIDED').split()
- # Check user doesn't have ASSUME_PROVIDED = instead of += in local.conf
- if "diffstat-native" not in assume_provided:
- status.addresult('Please use ASSUME_PROVIDED +=, not ASSUME_PROVIDED = in your local.conf\n')
-
- # Check that TMPDIR isn't on a filesystem with limited filename length (eg. eCryptFS)
- import stat
- tmpdir = d.getVar('TMPDIR')
- status.addresult(check_create_long_filename(tmpdir, "TMPDIR"))
- tmpdirmode = os.stat(tmpdir).st_mode
- if (tmpdirmode & stat.S_ISGID):
- status.addresult("TMPDIR is setgid, please don't build in a setgid directory")
- if (tmpdirmode & stat.S_ISUID):
- status.addresult("TMPDIR is setuid, please don't build in a setuid directory")
-
- # Some third-party software apparently relies on chmod etc. being suid root (!!)
- import stat
- suid_check_bins = "chown chmod mknod".split()
- for bin_cmd in suid_check_bins:
- bin_path = bb.utils.which(os.environ["PATH"], bin_cmd)
- if bin_path:
- bin_stat = os.stat(bin_path)
- if bin_stat.st_uid == 0 and bin_stat.st_mode & stat.S_ISUID:
- status.addresult('%s has the setuid bit set. This interferes with pseudo and may cause other issues that break the build process.\n' % bin_path)
-
- # Check that we can fetch from various network transports
- netcheck = check_connectivity(d)
- status.addresult(netcheck)
- if netcheck:
- status.network_error = True
-
- nolibs = d.getVar('NO32LIBS')
- if not nolibs:
- lib32path = '/lib'
- if os.path.exists('/lib64') and ( os.path.islink('/lib64') or os.path.islink('/lib') ):
- lib32path = '/lib32'
-
- if os.path.exists('%s/libc.so.6' % lib32path) and not os.path.exists('/usr/include/gnu/stubs-32.h'):
- status.addresult("You have a 32-bit libc, but no 32-bit headers. You must install the 32-bit libc headers.\n")
-
- bbpaths = d.getVar('BBPATH').split(":")
- if ("." in bbpaths or "./" in bbpaths or "" in bbpaths):
- status.addresult("BBPATH references the current directory, either through " \
- "an empty entry, a './' or a '.'.\n\t This is unsafe and means your "\
- "layer configuration is adding empty elements to BBPATH.\n\t "\
- "Please check your layer.conf files and other BBPATH " \
- "settings to remove the current working directory " \
- "references.\n" \
- "Parsed BBPATH is" + str(bbpaths));
-
- oes_bb_conf = d.getVar( 'OES_BITBAKE_CONF')
- if not oes_bb_conf:
- status.addresult('You are not using the OpenEmbedded version of conf/bitbake.conf. This means your environment is misconfigured, in particular check BBPATH.\n')
-
- # The length of TMPDIR can't be longer than 410
- status.addresult(check_path_length(tmpdir, "TMPDIR", 410))
-
- # Check that TMPDIR isn't located on nfs
- status.addresult(check_not_nfs(tmpdir, "TMPDIR"))
-
- # Check for case-insensitive file systems (such as Linux in Docker on
- # macOS with default HFS+ file system)
- status.addresult(check_case_sensitive(tmpdir, "TMPDIR"))
-
-def sanity_check_locale(d):
- """
- Currently bitbake switches locale to en_US.UTF-8 so check that this locale actually exists.
- """
- import locale
- try:
- locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
- except locale.Error:
- raise_sanity_error("Your system needs to support the en_US.UTF-8 locale.", d)
-
-def check_sanity_everybuild(status, d):
- import os, stat
- # Sanity tests which test the users environment so need to run at each build (or are so cheap
- # it makes sense to always run them.
-
- if 0 == os.getuid():
- raise_sanity_error("Do not use Bitbake as root.", d)
-
- # Check the Python version, we now have a minimum of Python 3.4
- import sys
- if sys.hexversion < 0x03040000:
- status.addresult('The system requires at least Python 3.4 to run. Please update your Python interpreter.\n')
-
- # Check the bitbake version meets minimum requirements
- from distutils.version import LooseVersion
- minversion = d.getVar('BB_MIN_VERSION')
- if (LooseVersion(bb.__version__) < LooseVersion(minversion)):
- status.addresult('Bitbake version %s is required and version %s was found\n' % (minversion, bb.__version__))
-
- sanity_check_locale(d)
-
- paths = d.getVar('PATH').split(":")
- if "." in paths or "./" in paths or "" in paths:
- status.addresult("PATH contains '.', './' or '' (empty element), which will break the build, please remove this.\nParsed PATH is " + str(paths) + "\n")
-
- # Check whether 'inherit' directive is found (used for a class to inherit)
- # in conf file it's supposed to be uppercase INHERIT
- inherit = d.getVar('inherit')
- if inherit:
- status.addresult("Please don't use inherit directive in your local.conf. The directive is supposed to be used in classes and recipes only to inherit of bbclasses. Here INHERIT should be used.\n")
-
- # Check that the DISTRO is valid, if set
- # need to take into account DISTRO renaming DISTRO
- distro = d.getVar('DISTRO')
- if distro and distro != "nodistro":
- if not ( check_conf_exists("conf/distro/${DISTRO}.conf", d) or check_conf_exists("conf/distro/include/${DISTRO}.inc", d) ):
- status.addresult("DISTRO '%s' not found. Please set a valid DISTRO in your local.conf\n" % d.getVar("DISTRO"))
-
- # Check that these variables don't use tilde-expansion as we don't do that
- for v in ("TMPDIR", "DL_DIR", "SSTATE_DIR"):
- if d.getVar(v).startswith("~"):
- status.addresult("%s uses ~ but Bitbake will not expand this, use an absolute path or variables." % v)
-
- # Check that DL_DIR is set, exists and is writable. In theory, we should never even hit the check if DL_DIR isn't
- # set, since so much relies on it being set.
- dldir = d.getVar('DL_DIR')
- if not dldir:
- status.addresult("DL_DIR is not set. Your environment is misconfigured, check that DL_DIR is set, and if the directory exists, that it is writable. \n")
- if os.path.exists(dldir) and not os.access(dldir, os.W_OK):
- status.addresult("DL_DIR: %s exists but you do not appear to have write access to it. \n" % dldir)
- check_symlink(dldir, d)
-
- # Check that the MACHINE is valid, if it is set
- machinevalid = True
- if d.getVar('MACHINE'):
- if not check_conf_exists("conf/machine/${MACHINE}.conf", d):
- status.addresult('MACHINE=%s is invalid. Please set a valid MACHINE in your local.conf, environment or other configuration file.\n' % (d.getVar('MACHINE')))
- machinevalid = False
- else:
- status.addresult(check_sanity_validmachine(d))
- else:
- status.addresult('Please set a MACHINE in your local.conf or environment\n')
- machinevalid = False
- if machinevalid:
- status.addresult(check_toolchain(d))
-
- # Check that the SDKMACHINE is valid, if it is set
- if d.getVar('SDKMACHINE'):
- if not check_conf_exists("conf/machine-sdk/${SDKMACHINE}.conf", d):
- status.addresult('Specified SDKMACHINE value is not valid\n')
- elif d.getVar('SDK_ARCH', False) == "${BUILD_ARCH}":
- status.addresult('SDKMACHINE is set, but SDK_ARCH has not been changed as a result - SDKMACHINE may have been set too late (e.g. in the distro configuration)\n')
-
- # If SDK_VENDOR looks like "-my-sdk" then the triples are badly formed so fail early
- sdkvendor = d.getVar("SDK_VENDOR")
- if not (sdkvendor.startswith("-") and sdkvendor.count("-") == 1):
- status.addresult("SDK_VENDOR should be of the form '-foosdk' with a single dash; found '%s'\n" % sdkvendor)
-
- check_supported_distro(d)
-
- omask = os.umask(0o022)
- if omask & 0o755:
- status.addresult("Please use a umask which allows a+rx and u+rwx\n")
- os.umask(omask)
-
- if d.getVar('TARGET_ARCH') == "arm":
- # This path is no longer user-readable in modern (very recent) Linux
- try:
- if os.path.exists("/proc/sys/vm/mmap_min_addr"):
- f = open("/proc/sys/vm/mmap_min_addr", "r")
- try:
- if (int(f.read().strip()) > 65536):
- status.addresult("/proc/sys/vm/mmap_min_addr is not <= 65536. This will cause problems with qemu so please fix the value (as root).\n\nTo fix this in later reboots, set vm.mmap_min_addr = 65536 in /etc/sysctl.conf.\n")
- finally:
- f.close()
- except:
- pass
-
- oeroot = d.getVar('COREBASE')
- if oeroot.find('+') != -1:
- status.addresult("Error, you have an invalid character (+) in your COREBASE directory path. Please move the installation to a directory which doesn't include any + characters.")
- if oeroot.find('@') != -1:
- status.addresult("Error, you have an invalid character (@) in your COREBASE directory path. Please move the installation to a directory which doesn't include any @ characters.")
- if oeroot.find(' ') != -1:
- status.addresult("Error, you have a space in your COREBASE directory path. Please move the installation to a directory which doesn't include a space since autotools doesn't support this.")
-
- # Check the format of MIRRORS, PREMIRRORS and SSTATE_MIRRORS
- import re
- mirror_vars = ['MIRRORS', 'PREMIRRORS', 'SSTATE_MIRRORS']
- protocols = ['http', 'ftp', 'file', 'https', \
- 'git', 'gitsm', 'hg', 'osc', 'p4', 'svn', \
- 'bzr', 'cvs', 'npm', 'sftp', 'ssh', 's3' ]
- for mirror_var in mirror_vars:
- mirrors = (d.getVar(mirror_var) or '').replace('\\n', ' ').split()
-
- # Split into pairs
- if len(mirrors) % 2 != 0:
- bb.warn('Invalid mirror variable value for %s: %s, should contain paired members.' % (mirror_var, str(mirrors)))
- continue
- mirrors = list(zip(*[iter(mirrors)]*2))
-
- for mirror_entry in mirrors:
- pattern, mirror = mirror_entry
-
- decoded = bb.fetch2.decodeurl(pattern)
- try:
- pattern_scheme = re.compile(decoded[0])
- except re.error as exc:
- bb.warn('Invalid scheme regex (%s) in %s; %s' % (pattern, mirror_var, mirror_entry))
- continue
-
- if not any(pattern_scheme.match(protocol) for protocol in protocols):
- bb.warn('Invalid protocol (%s) in %s: %s' % (decoded[0], mirror_var, mirror_entry))
- continue
-
- if not any(mirror.startswith(protocol + '://') for protocol in protocols):
- bb.warn('Invalid protocol in %s: %s' % (mirror_var, mirror_entry))
- continue
-
- if mirror.startswith('file://'):
- import urllib
- check_symlink(urllib.parse.urlparse(mirror).path, d)
- # SSTATE_MIRROR ends with a /PATH string
- if mirror.endswith('/PATH'):
- # remove /PATH$ from SSTATE_MIRROR to get a working
- # base directory path
- mirror_base = urllib.parse.urlparse(mirror[:-1*len('/PATH')]).path
- check_symlink(mirror_base, d)
-
- # Check that TMPDIR hasn't changed location since the last time we were run
- tmpdir = d.getVar('TMPDIR')
- checkfile = os.path.join(tmpdir, "saved_tmpdir")
- if os.path.exists(checkfile):
- with open(checkfile, "r") as f:
- saved_tmpdir = f.read().strip()
- if (saved_tmpdir != tmpdir):
- status.addresult("Error, TMPDIR has changed location. You need to either move it back to %s or delete it and rebuild\n" % saved_tmpdir)
- else:
- bb.utils.mkdirhier(tmpdir)
- # Remove setuid, setgid and sticky bits from TMPDIR
- try:
- os.chmod(tmpdir, os.stat(tmpdir).st_mode & ~ stat.S_ISUID)
- os.chmod(tmpdir, os.stat(tmpdir).st_mode & ~ stat.S_ISGID)
- os.chmod(tmpdir, os.stat(tmpdir).st_mode & ~ stat.S_ISVTX)
- except OSError as exc:
- bb.warn("Unable to chmod TMPDIR: %s" % exc)
- with open(checkfile, "w") as f:
- f.write(tmpdir)
-
- # If /bin/sh is a symlink, check that it points to dash or bash
- if os.path.islink('/bin/sh'):
- real_sh = os.path.realpath('/bin/sh')
- # Due to update-alternatives, the shell name may take various
- # forms, such as /bin/dash, bin/bash, /bin/bash.bash ...
- if '/dash' not in real_sh and '/bash' not in real_sh:
- status.addresult("Error, /bin/sh links to %s, must be dash or bash\n" % real_sh)
-
-def check_sanity(sanity_data):
- class SanityStatus(object):
- def __init__(self):
- self.messages = ""
- self.network_error = False
-
- def addresult(self, message):
- if message:
- self.messages = self.messages + message
-
- status = SanityStatus()
-
- tmpdir = sanity_data.getVar('TMPDIR')
- sstate_dir = sanity_data.getVar('SSTATE_DIR')
-
- check_symlink(sstate_dir, sanity_data)
-
- # Check saved sanity info
- last_sanity_version = 0
- last_tmpdir = ""
- last_sstate_dir = ""
- last_nativelsbstr = ""
- sanityverfile = sanity_data.expand("${TOPDIR}/cache/sanity_info")
- if os.path.exists(sanityverfile):
- with open(sanityverfile, 'r') as f:
- for line in f:
- if line.startswith('SANITY_VERSION'):
- last_sanity_version = int(line.split()[1])
- if line.startswith('TMPDIR'):
- last_tmpdir = line.split()[1]
- if line.startswith('SSTATE_DIR'):
- last_sstate_dir = line.split()[1]
- if line.startswith('NATIVELSBSTRING'):
- last_nativelsbstr = line.split()[1]
-
- check_sanity_everybuild(status, sanity_data)
-
- sanity_version = int(sanity_data.getVar('SANITY_VERSION') or 1)
- network_error = False
- # NATIVELSBSTRING var may have been overridden with "universal", so
- # get actual host distribution id and version
- nativelsbstr = lsb_distro_identifier(sanity_data)
- if last_sanity_version < sanity_version or last_nativelsbstr != nativelsbstr:
- check_sanity_version_change(status, sanity_data)
- status.addresult(check_sanity_sstate_dir_change(sstate_dir, sanity_data))
- else:
- if last_sstate_dir != sstate_dir:
- status.addresult(check_sanity_sstate_dir_change(sstate_dir, sanity_data))
-
- if os.path.exists(os.path.dirname(sanityverfile)) and not status.messages:
- with open(sanityverfile, 'w') as f:
- f.write("SANITY_VERSION %s\n" % sanity_version)
- f.write("TMPDIR %s\n" % tmpdir)
- f.write("SSTATE_DIR %s\n" % sstate_dir)
- f.write("NATIVELSBSTRING %s\n" % nativelsbstr)
-
- sanity_handle_abichanges(status, sanity_data)
-
- if status.messages != "":
- raise_sanity_error(sanity_data.expand(status.messages), sanity_data, status.network_error)
-
-# Create a copy of the datastore and finalise it to ensure appends and
-# overrides are set - the datastore has yet to be finalised at ConfigParsed
-def copy_data(e):
- sanity_data = bb.data.createCopy(e.data)
- sanity_data.finalize()
- return sanity_data
-
-addhandler config_reparse_eventhandler
-config_reparse_eventhandler[eventmask] = "bb.event.ConfigParsed"
-python config_reparse_eventhandler() {
- sanity_check_conffiles(e.data)
-}
-
-addhandler check_sanity_eventhandler
-check_sanity_eventhandler[eventmask] = "bb.event.SanityCheck bb.event.NetworkTest"
-python check_sanity_eventhandler() {
- if bb.event.getName(e) == "SanityCheck":
- sanity_data = copy_data(e)
- check_sanity(sanity_data)
- if e.generateevents:
- sanity_data.setVar("SANITY_USE_EVENTS", "1")
- bb.event.fire(bb.event.SanityCheckPassed(), e.data)
- elif bb.event.getName(e) == "NetworkTest":
- sanity_data = copy_data(e)
- if e.generateevents:
- sanity_data.setVar("SANITY_USE_EVENTS", "1")
- bb.event.fire(bb.event.NetworkTestFailed() if check_connectivity(sanity_data) else bb.event.NetworkTestPassed(), e.data)
-
- return
-}
diff --git a/meta/classes/scons.bbclass b/meta/classes/scons.bbclass
deleted file mode 100644
index 6b171ca8df..0000000000
--- a/meta/classes/scons.bbclass
+++ /dev/null
@@ -1,31 +0,0 @@
-inherit python3native
-
-DEPENDS += "python3-scons-native"
-
-EXTRA_OESCONS ?= ""
-
-do_configure() {
- unset _PYTHON_SYSCONFIGDATA_NAME
- if [ -n "${CONFIGURESTAMPFILE}" ]; then
- if [ -e "${CONFIGURESTAMPFILE}" -a "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" -a "${CLEANBROKEN}" != "1" ]; then
- ${STAGING_BINDIR_NATIVE}/scons --clean PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS}
- fi
-
- mkdir -p `dirname ${CONFIGURESTAMPFILE}`
- echo ${BB_TASKHASH} > ${CONFIGURESTAMPFILE}
- fi
-}
-
-scons_do_compile() {
- unset _PYTHON_SYSCONFIGDATA_NAME
- ${STAGING_BINDIR_NATIVE}/scons ${PARALLEL_MAKE} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} || \
- die "scons build execution failed."
-}
-
-scons_do_install() {
- unset _PYTHON_SYSCONFIGDATA_NAME
- ${STAGING_BINDIR_NATIVE}/scons install_root=${D}${prefix} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} install || \
- die "scons install execution failed."
-}
-
-EXPORT_FUNCTIONS do_compile do_install
diff --git a/meta/classes/setuptools3.bbclass b/meta/classes/setuptools3.bbclass
deleted file mode 100644
index 8ca66ee708..0000000000
--- a/meta/classes/setuptools3.bbclass
+++ /dev/null
@@ -1,4 +0,0 @@
-inherit distutils3
-
-DEPENDS += "python3-setuptools-native"
-
diff --git a/meta/classes/sign_ipk.bbclass b/meta/classes/sign_ipk.bbclass
index e5057b7799..51c24b38b2 100644
--- a/meta/classes/sign_ipk.bbclass
+++ b/meta/classes/sign_ipk.bbclass
@@ -1,3 +1,9 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
# Class for generating signed IPK packages.
#
# Configuration variables used by this class:
diff --git a/meta/classes/sign_package_feed.bbclass b/meta/classes/sign_package_feed.bbclass
index 7ff3a35a2f..e9d664750c 100644
--- a/meta/classes/sign_package_feed.bbclass
+++ b/meta/classes/sign_package_feed.bbclass
@@ -1,3 +1,9 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
# Class for signing package feeds
#
# Related configuration variables that will be used after this class is
@@ -27,9 +33,10 @@ inherit sanity
PACKAGE_FEED_SIGN = '1'
PACKAGE_FEED_GPG_BACKEND ?= 'local'
PACKAGE_FEED_GPG_SIGNATURE_TYPE ?= 'ASC'
+PACKAGEINDEXDEPS += "gnupg-native:do_populate_sysroot"
# Make feed signing key to be present in rootfs
-FEATURE_PACKAGES_package-management_append = " signing-keys-packagefeed"
+FEATURE_PACKAGES_package-management:append = " signing-keys-packagefeed"
python () {
# Check sanity of configuration
diff --git a/meta/classes/sign_rpm.bbclass b/meta/classes/sign_rpm.bbclass
index 73a55a512d..ee0c4808fa 100644
--- a/meta/classes/sign_rpm.bbclass
+++ b/meta/classes/sign_rpm.bbclass
@@ -1,3 +1,9 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
# Class for generating signed RPM packages.
#
# Configuration variables used by this class:
diff --git a/meta/classes/siteconfig.bbclass b/meta/classes/siteconfig.bbclass
index 0cfa5a6834..953cafd285 100644
--- a/meta/classes/siteconfig.bbclass
+++ b/meta/classes/siteconfig.bbclass
@@ -1,3 +1,9 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
python siteconfig_do_siteconfig () {
shared_state = sstate_state_fromvars(d)
if shared_state['task'] != 'populate_sysroot':
diff --git a/meta/classes/siteinfo.bbclass b/meta/classes/siteinfo.bbclass
deleted file mode 100644
index 1a048c053f..0000000000
--- a/meta/classes/siteinfo.bbclass
+++ /dev/null
@@ -1,199 +0,0 @@
-# This class exists to provide information about the targets that
-# may be needed by other classes and/or recipes. If you add a new
-# target this will probably need to be updated.
-
-#
-# Returns information about 'what' for the named target 'target'
-# where 'target' == "<arch>-<os>"
-#
-# 'what' can be one of
-# * target: Returns the target name ("<arch>-<os>")
-# * endianness: Return "be" for big endian targets, "le" for little endian
-# * bits: Returns the bit size of the target, either "32" or "64"
-# * libc: Returns the name of the c library used by the target
-#
-# It is an error for the target not to exist.
-# If 'what' doesn't exist then an empty value is returned
-#
-def siteinfo_data_for_machine(arch, os, d):
- archinfo = {
- "allarch": "endian-little bit-32", # bogus, but better than special-casing the checks below for allarch
- "aarch64": "endian-little bit-64 arm-common arm-64",
- "aarch64_be": "endian-big bit-64 arm-common arm-64",
- "arc": "endian-little bit-32 arc-common",
- "arceb": "endian-big bit-32 arc-common",
- "arm": "endian-little bit-32 arm-common arm-32",
- "armeb": "endian-big bit-32 arm-common arm-32",
- "avr32": "endian-big bit-32 avr32-common",
- "bfin": "endian-little bit-32 bfin-common",
- "epiphany": "endian-little bit-32",
- "i386": "endian-little bit-32 ix86-common",
- "i486": "endian-little bit-32 ix86-common",
- "i586": "endian-little bit-32 ix86-common",
- "i686": "endian-little bit-32 ix86-common",
- "ia64": "endian-little bit-64",
- "lm32": "endian-big bit-32",
- "m68k": "endian-big bit-32",
- "microblaze": "endian-big bit-32 microblaze-common",
- "microblazeel": "endian-little bit-32 microblaze-common",
- "mips": "endian-big bit-32 mips-common",
- "mips64": "endian-big bit-64 mips-common",
- "mips64el": "endian-little bit-64 mips-common",
- "mipsisa64r6": "endian-big bit-64 mips-common",
- "mipsisa64r6el": "endian-little bit-64 mips-common",
- "mipsel": "endian-little bit-32 mips-common",
- "mipsisa32r6": "endian-big bit-32 mips-common",
- "mipsisa32r6el": "endian-little bit-32 mips-common",
- "powerpc": "endian-big bit-32 powerpc-common",
- "nios2": "endian-little bit-32 nios2-common",
- "powerpc64": "endian-big bit-64 powerpc-common",
- "powerpc64le": "endian-little bit-64 powerpc-common",
- "ppc": "endian-big bit-32 powerpc-common",
- "ppc64": "endian-big bit-64 powerpc-common",
- "ppc64le" : "endian-little bit-64 powerpc-common",
- "riscv32": "endian-little bit-32 riscv-common",
- "riscv64": "endian-little bit-64 riscv-common",
- "sh3": "endian-little bit-32 sh-common",
- "sh4": "endian-little bit-32 sh-common",
- "sparc": "endian-big bit-32",
- "viac3": "endian-little bit-32 ix86-common",
- "x86_64": "endian-little", # bitinfo specified in targetinfo
- }
- osinfo = {
- "darwin": "common-darwin",
- "darwin9": "common-darwin",
- "linux": "common-linux common-glibc",
- "linux-gnu": "common-linux common-glibc",
- "linux-gnu_ilp32": "common-linux common-glibc",
- "linux-gnux32": "common-linux common-glibc",
- "linux-gnun32": "common-linux common-glibc",
- "linux-gnueabi": "common-linux common-glibc",
- "linux-gnuspe": "common-linux common-glibc",
- "linux-musl": "common-linux common-musl",
- "linux-muslx32": "common-linux common-musl",
- "linux-musleabi": "common-linux common-musl",
- "linux-muslspe": "common-linux common-musl",
- "uclinux-uclibc": "common-uclibc",
- "cygwin": "common-cygwin",
- "mingw32": "common-mingw",
- }
- targetinfo = {
- "aarch64-linux-gnu": "aarch64-linux",
- "aarch64_be-linux-gnu": "aarch64_be-linux",
- "aarch64-linux-gnu_ilp32": "bit-32 aarch64_be-linux arm-32",
- "aarch64_be-linux-gnu_ilp32": "bit-32 aarch64_be-linux arm-32",
- "aarch64-linux-musl": "aarch64-linux",
- "aarch64_be-linux-musl": "aarch64_be-linux",
- "arm-linux-gnueabi": "arm-linux",
- "arm-linux-musleabi": "arm-linux",
- "armeb-linux-gnueabi": "armeb-linux",
- "armeb-linux-musleabi": "armeb-linux",
- "microblazeel-linux" : "microblaze-linux",
- "microblazeel-linux-musl" : "microblaze-linux",
- "mips-linux-musl": "mips-linux",
- "mipsel-linux-musl": "mipsel-linux",
- "mips64-linux-musl": "mips64-linux",
- "mips64el-linux-musl": "mips64el-linux",
- "mips64-linux-gnun32": "mips-linux bit-32",
- "mips64el-linux-gnun32": "mipsel-linux bit-32",
- "mipsisa64r6-linux-gnun32": "mipsisa32r6-linux bit-32",
- "mipsisa64r6el-linux-gnun32": "mipsisa32r6el-linux bit-32",
- "powerpc-linux": "powerpc32-linux",
- "powerpc-linux-musl": "powerpc-linux powerpc32-linux",
- "powerpc-linux-gnuspe": "powerpc-linux powerpc32-linux",
- "powerpc-linux-muslspe": "powerpc-linux powerpc32-linux",
- "powerpc64-linux-gnuspe": "powerpc-linux powerpc64-linux",
- "powerpc64-linux-muslspe": "powerpc-linux powerpc64-linux",
- "powerpc64-linux": "powerpc-linux powerpc64-linux",
- "powerpc64-linux-musl": "powerpc-linux powerpc64-linux",
- "powerpc64le-linux": "powerpc-linux powerpc64-linux",
- "powerpc64le-linux-musl": "powerpc-linux powerpc64-linux",
- "riscv32-linux": "riscv32-linux",
- "riscv32-linux-musl": "riscv32-linux",
- "riscv64-linux": "riscv64-linux",
- "riscv64-linux-musl": "riscv64-linux",
- "x86_64-cygwin": "bit-64",
- "x86_64-darwin": "bit-64",
- "x86_64-darwin9": "bit-64",
- "x86_64-linux": "bit-64",
- "x86_64-linux-musl": "x86_64-linux bit-64",
- "x86_64-linux-muslx32": "bit-32 ix86-common x32-linux",
- "x86_64-elf": "bit-64",
- "x86_64-linux-gnu": "bit-64 x86_64-linux",
- "x86_64-linux-gnux32": "bit-32 ix86-common x32-linux",
- "x86_64-mingw32": "bit-64",
- }
-
- # Add in any extra user supplied data which may come from a BSP layer, removing the
- # need to always change this class directly
- extra_siteinfo = (d.getVar("SITEINFO_EXTRA_DATAFUNCS") or "").split()
- for m in extra_siteinfo:
- call = m + "(archinfo, osinfo, targetinfo, d)"
- locs = { "archinfo" : archinfo, "osinfo" : osinfo, "targetinfo" : targetinfo, "d" : d}
- archinfo, osinfo, targetinfo = bb.utils.better_eval(call, locs)
-
- target = "%s-%s" % (arch, os)
-
- sitedata = []
- if arch in archinfo:
- sitedata.extend(archinfo[arch].split())
- if os in osinfo:
- sitedata.extend(osinfo[os].split())
- if target in targetinfo:
- sitedata.extend(targetinfo[target].split())
- sitedata.append(target)
- sitedata.append("common")
-
- bb.debug(1, "SITE files %s" % sitedata);
- return sitedata
-
-def siteinfo_data(d):
- return siteinfo_data_for_machine(d.getVar("HOST_ARCH"), d.getVar("HOST_OS"), d)
-
-python () {
- sitedata = set(siteinfo_data(d))
- if "endian-little" in sitedata:
- d.setVar("SITEINFO_ENDIANNESS", "le")
- elif "endian-big" in sitedata:
- d.setVar("SITEINFO_ENDIANNESS", "be")
- else:
- bb.error("Unable to determine endianness for architecture '%s'" %
- d.getVar("HOST_ARCH"))
- bb.fatal("Please add your architecture to siteinfo.bbclass")
-
- if "bit-32" in sitedata:
- d.setVar("SITEINFO_BITS", "32")
- elif "bit-64" in sitedata:
- d.setVar("SITEINFO_BITS", "64")
- else:
- bb.error("Unable to determine bit size for architecture '%s'" %
- d.getVar("HOST_ARCH"))
- bb.fatal("Please add your architecture to siteinfo.bbclass")
-}
-
-def siteinfo_get_files(d, sysrootcache = False):
- sitedata = siteinfo_data(d)
- sitefiles = ""
- for path in d.getVar("BBPATH").split(":"):
- for element in sitedata:
- filename = os.path.join(path, "site", element)
- if os.path.exists(filename):
- sitefiles += filename + " "
-
- if not sysrootcache:
- return sitefiles
-
- # Now check for siteconfig cache files in sysroots
- path_siteconfig = d.getVar('SITECONFIG_SYSROOTCACHE')
- if path_siteconfig and os.path.isdir(path_siteconfig):
- for i in os.listdir(path_siteconfig):
- if not i.endswith("_config"):
- continue
- filename = os.path.join(path_siteconfig, i)
- sitefiles += filename + " "
- return sitefiles
-
-#
-# Make some information available via variables
-#
-SITECONFIG_SYSROOTCACHE = "${STAGING_DATADIR}/${TARGET_SYS}_config_site.d"
diff --git a/meta/classes/spdx.bbclass b/meta/classes/spdx.bbclass
deleted file mode 100644
index fb78e274a8..0000000000
--- a/meta/classes/spdx.bbclass
+++ /dev/null
@@ -1,360 +0,0 @@
-# This class integrates real-time license scanning, generation of SPDX standard
-# output and verifiying license info during the building process.
-# It is a combination of efforts from the OE-Core, SPDX and Fossology projects.
-#
-# For more information on FOSSology:
-# http://www.fossology.org
-#
-# For more information on FOSSologySPDX commandline:
-# https://github.com/spdx-tools/fossology-spdx/wiki/Fossology-SPDX-Web-API
-#
-# For more information on SPDX:
-# http://www.spdx.org
-#
-
-# SPDX file will be output to the path which is defined as[SPDX_MANIFEST_DIR]
-# in ./meta/conf/licenses.conf.
-
-SPDXSSTATEDIR = "${WORKDIR}/spdx_sstate_dir"
-
-# If ${S} isn't actually the top-level source directory, set SPDX_S to point at
-# the real top-level directory.
-SPDX_S ?= "${S}"
-
-python do_spdx () {
- import os, sys
- import json, shutil
-
- info = {}
- info['workdir'] = d.getVar('WORKDIR')
- info['sourcedir'] = d.getVar('SPDX_S')
- info['pn'] = d.getVar('PN')
- info['pv'] = d.getVar('PV')
- info['spdx_version'] = d.getVar('SPDX_VERSION')
- info['data_license'] = d.getVar('DATA_LICENSE')
-
- sstatedir = d.getVar('SPDXSSTATEDIR')
- sstatefile = os.path.join(sstatedir, info['pn'] + info['pv'] + ".spdx")
-
- manifest_dir = d.getVar('SPDX_MANIFEST_DIR')
- info['outfile'] = os.path.join(manifest_dir, info['pn'] + ".spdx" )
-
- info['spdx_temp_dir'] = d.getVar('SPDX_TEMP_DIR')
- info['tar_file'] = os.path.join(info['workdir'], info['pn'] + ".tar.gz" )
-
- # Make sure important dirs exist
- try:
- bb.utils.mkdirhier(manifest_dir)
- bb.utils.mkdirhier(sstatedir)
- bb.utils.mkdirhier(info['spdx_temp_dir'])
- except OSError as e:
- bb.error("SPDX: Could not set up required directories: " + str(e))
- return
-
- ## get everything from cache. use it to decide if
- ## something needs to be rerun
- cur_ver_code = get_ver_code(info['sourcedir'])
- cache_cur = False
- if os.path.exists(sstatefile):
- ## cache for this package exists. read it in
- cached_spdx = get_cached_spdx(sstatefile)
-
- if cached_spdx['PackageVerificationCode'] == cur_ver_code:
- bb.warn("SPDX: Verification code for " + info['pn']
- + "is same as cache's. do nothing")
- cache_cur = True
- else:
- local_file_info = setup_foss_scan(info, True, cached_spdx['Files'])
- else:
- local_file_info = setup_foss_scan(info, False, None)
-
- if cache_cur:
- spdx_file_info = cached_spdx['Files']
- foss_package_info = cached_spdx['Package']
- foss_license_info = cached_spdx['Licenses']
- else:
- ## setup fossology command
- foss_server = d.getVar('FOSS_SERVER')
- foss_flags = d.getVar('FOSS_WGET_FLAGS')
- foss_full_spdx = d.getVar('FOSS_FULL_SPDX') == "true" or False
- foss_command = "wget %s --post-file=%s %s"\
- % (foss_flags, info['tar_file'], foss_server)
-
- foss_result = run_fossology(foss_command, foss_full_spdx)
- if foss_result is not None:
- (foss_package_info, foss_file_info, foss_license_info) = foss_result
- spdx_file_info = create_spdx_doc(local_file_info, foss_file_info)
- ## write to cache
- write_cached_spdx(sstatefile, cur_ver_code, foss_package_info,
- spdx_file_info, foss_license_info)
- else:
- bb.error("SPDX: Could not communicate with FOSSology server. Command was: " + foss_command)
- return
-
- ## Get document and package level information
- spdx_header_info = get_header_info(info, cur_ver_code, foss_package_info)
-
- ## CREATE MANIFEST
- create_manifest(info, spdx_header_info, spdx_file_info, foss_license_info)
-
- ## clean up the temp stuff
- shutil.rmtree(info['spdx_temp_dir'], ignore_errors=True)
- if os.path.exists(info['tar_file']):
- remove_file(info['tar_file'])
-}
-addtask spdx after do_patch before do_configure
-
-def create_manifest(info, header, files, licenses):
- import codecs
- with codecs.open(info['outfile'], mode='w', encoding='utf-8') as f:
- # Write header
- f.write(header + '\n')
-
- # Write file data
- for chksum, block in files.iteritems():
- f.write("FileName: " + block['FileName'] + '\n')
- for key, value in block.iteritems():
- if not key == 'FileName':
- f.write(key + ": " + value + '\n')
- f.write('\n')
-
- # Write license data
- for id, block in licenses.iteritems():
- f.write("LicenseID: " + id + '\n')
- for key, value in block.iteritems():
- f.write(key + ": " + value + '\n')
- f.write('\n')
-
-def get_cached_spdx(sstatefile):
- import json
- import codecs
- cached_spdx_info = {}
- with codecs.open(sstatefile, mode='r', encoding='utf-8') as f:
- try:
- cached_spdx_info = json.load(f)
- except ValueError as e:
- cached_spdx_info = None
- return cached_spdx_info
-
-def write_cached_spdx(sstatefile, ver_code, package_info, files, license_info):
- import json
- import codecs
- spdx_doc = {}
- spdx_doc['PackageVerificationCode'] = ver_code
- spdx_doc['Files'] = {}
- spdx_doc['Files'] = files
- spdx_doc['Package'] = {}
- spdx_doc['Package'] = package_info
- spdx_doc['Licenses'] = {}
- spdx_doc['Licenses'] = license_info
- with codecs.open(sstatefile, mode='w', encoding='utf-8') as f:
- f.write(json.dumps(spdx_doc))
-
-def setup_foss_scan(info, cache, cached_files):
- import errno, shutil
- import tarfile
- file_info = {}
- cache_dict = {}
-
- for f_dir, f in list_files(info['sourcedir']):
- full_path = os.path.join(f_dir, f)
- abs_path = os.path.join(info['sourcedir'], full_path)
- dest_dir = os.path.join(info['spdx_temp_dir'], f_dir)
- dest_path = os.path.join(info['spdx_temp_dir'], full_path)
-
- checksum = hash_file(abs_path)
- if not checksum is None:
- file_info[checksum] = {}
- ## retain cache information if it exists
- if cache and checksum in cached_files:
- file_info[checksum] = cached_files[checksum]
- ## have the file included in what's sent to the FOSSology server
- else:
- file_info[checksum]['FileName'] = full_path
- try:
- bb.utils.mkdirhier(dest_dir)
- shutil.copyfile(abs_path, dest_path)
- except OSError as e:
- bb.warn("SPDX: mkdirhier failed: " + str(e))
- except shutil.Error as e:
- bb.warn("SPDX: copyfile failed: " + str(e))
- except IOError as e:
- bb.warn("SPDX: copyfile failed: " + str(e))
- else:
- bb.warn("SPDX: Could not get checksum for file: " + f)
-
- with tarfile.open(info['tar_file'], "w:gz") as tar:
- tar.add(info['spdx_temp_dir'], arcname=os.path.basename(info['spdx_temp_dir']))
-
- return file_info
-
-def remove_file(file_name):
- try:
- os.remove(file_name)
- except OSError as e:
- pass
-
-def list_files(dir):
- for root, subFolders, files in os.walk(dir):
- for f in files:
- rel_root = os.path.relpath(root, dir)
- yield rel_root, f
- return
-
-def hash_file(file_name):
- from bb.utils import sha1_file
- return sha1_file(file_name)
-
-def hash_string(data):
- import hashlib
- sha1 = hashlib.sha1()
- sha1.update(data.encode('utf-8'))
- return sha1.hexdigest()
-
-def run_fossology(foss_command, full_spdx):
- import string, re
- import subprocess
-
- try:
- foss_output = subprocess.check_output(foss_command.split(),
- stderr=subprocess.STDOUT).decode('utf-8')
- except subprocess.CalledProcessError as e:
- return None
-
- foss_output = foss_output.replace('\r', '')
-
- # Package info
- package_info = {}
- if full_spdx:
- # All mandatory, only one occurrence
- package_info['PackageCopyrightText'] = re.findall('PackageCopyrightText: (.*?</text>)', foss_output, re.S)[0]
- package_info['PackageLicenseDeclared'] = re.findall('PackageLicenseDeclared: (.*)', foss_output)[0]
- package_info['PackageLicenseConcluded'] = re.findall('PackageLicenseConcluded: (.*)', foss_output)[0]
- # These may be more than one
- package_info['PackageLicenseInfoFromFiles'] = re.findall('PackageLicenseInfoFromFiles: (.*)', foss_output)
- else:
- DEFAULT = "NOASSERTION"
- package_info['PackageCopyrightText'] = "<text>" + DEFAULT + "</text>"
- package_info['PackageLicenseDeclared'] = DEFAULT
- package_info['PackageLicenseConcluded'] = DEFAULT
- package_info['PackageLicenseInfoFromFiles'] = []
-
- # File info
- file_info = {}
- records = []
- # FileName is also in PackageFileName, so we match on FileType as well.
- records = re.findall('FileName:.*?FileType:.*?</text>', foss_output, re.S)
- for rec in records:
- chksum = re.findall('FileChecksum: SHA1: (.*)\n', rec)[0]
- file_info[chksum] = {}
- file_info[chksum]['FileCopyrightText'] = re.findall('FileCopyrightText: '
- + '(.*?</text>)', rec, re.S )[0]
- fields = ['FileName', 'FileType', 'LicenseConcluded', 'LicenseInfoInFile']
- for field in fields:
- file_info[chksum][field] = re.findall(field + ': (.*)', rec)[0]
-
- # Licenses
- license_info = {}
- licenses = []
- licenses = re.findall('LicenseID:.*?LicenseName:.*?\n', foss_output, re.S)
- for lic in licenses:
- license_id = re.findall('LicenseID: (.*)\n', lic)[0]
- license_info[license_id] = {}
- license_info[license_id]['ExtractedText'] = re.findall('ExtractedText: (.*?</text>)', lic, re.S)[0]
- license_info[license_id]['LicenseName'] = re.findall('LicenseName: (.*)', lic)[0]
-
- return (package_info, file_info, license_info)
-
-def create_spdx_doc(file_info, scanned_files):
- import json
- ## push foss changes back into cache
- for chksum, lic_info in scanned_files.iteritems():
- if chksum in file_info:
- file_info[chksum]['FileType'] = lic_info['FileType']
- file_info[chksum]['FileChecksum: SHA1'] = chksum
- file_info[chksum]['LicenseInfoInFile'] = lic_info['LicenseInfoInFile']
- file_info[chksum]['LicenseConcluded'] = lic_info['LicenseConcluded']
- file_info[chksum]['FileCopyrightText'] = lic_info['FileCopyrightText']
- else:
- bb.warn("SPDX: " + lic_info['FileName'] + " : " + chksum
- + " : is not in the local file info: "
- + json.dumps(lic_info, indent=1))
- return file_info
-
-def get_ver_code(dirname):
- chksums = []
- for f_dir, f in list_files(dirname):
- path = os.path.join(dirname, f_dir, f)
- hash = hash_file(path)
- if not hash is None:
- chksums.append(hash)
- else:
- bb.warn("SPDX: Could not hash file: " + path)
- ver_code_string = ''.join(chksums).lower()
- ver_code = hash_string(ver_code_string)
- return ver_code
-
-def get_header_info(info, spdx_verification_code, package_info):
- """
- Put together the header SPDX information.
- Eventually this needs to become a lot less
- of a hardcoded thing.
- """
- from datetime import datetime
- import os
- head = []
- DEFAULT = "NOASSERTION"
-
- package_checksum = hash_file(info['tar_file'])
- if package_checksum is None:
- package_checksum = DEFAULT
-
- ## document level information
- head.append("## SPDX Document Information")
- head.append("SPDXVersion: " + info['spdx_version'])
- head.append("DataLicense: " + info['data_license'])
- head.append("DocumentComment: <text>SPDX for "
- + info['pn'] + " version " + info['pv'] + "</text>")
- head.append("")
-
- ## Creator information
- ## Note that this does not give time in UTC.
- now = datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')
- head.append("## Creation Information")
- ## Tools are supposed to have a version, but FOSSology+SPDX provides none.
- head.append("Creator: Tool: FOSSology+SPDX")
- head.append("Created: " + now)
- head.append("CreatorComment: <text>UNO</text>")
- head.append("")
-
- ## package level information
- head.append("## Package Information")
- head.append("PackageName: " + info['pn'])
- head.append("PackageVersion: " + info['pv'])
- head.append("PackageFileName: " + os.path.basename(info['tar_file']))
- head.append("PackageSupplier: Person:" + DEFAULT)
- head.append("PackageDownloadLocation: " + DEFAULT)
- head.append("PackageSummary: <text></text>")
- head.append("PackageOriginator: Person:" + DEFAULT)
- head.append("PackageChecksum: SHA1: " + package_checksum)
- head.append("PackageVerificationCode: " + spdx_verification_code)
- head.append("PackageDescription: <text>" + info['pn']
- + " version " + info['pv'] + "</text>")
- head.append("")
- head.append("PackageCopyrightText: "
- + package_info['PackageCopyrightText'])
- head.append("")
- head.append("PackageLicenseDeclared: "
- + package_info['PackageLicenseDeclared'])
- head.append("PackageLicenseConcluded: "
- + package_info['PackageLicenseConcluded'])
-
- for licref in package_info['PackageLicenseInfoFromFiles']:
- head.append("PackageLicenseInfoFromFiles: " + licref)
- head.append("")
-
- ## header for file level
- head.append("## File Information")
- head.append("")
-
- return '\n'.join(head)
diff --git a/meta/classes/sstate.bbclass b/meta/classes/sstate.bbclass
deleted file mode 100644
index 375196ef21..0000000000
--- a/meta/classes/sstate.bbclass
+++ /dev/null
@@ -1,1217 +0,0 @@
-SSTATE_VERSION = "3"
-
-SSTATE_MANIFESTS ?= "${TMPDIR}/sstate-control"
-SSTATE_MANFILEPREFIX = "${SSTATE_MANIFESTS}/manifest-${SSTATE_MANMACH}-${PN}"
-
-def generate_sstatefn(spec, hash, taskname, siginfo, d):
- if taskname is None:
- return ""
- extension = ".tgz"
- # 8 chars reserved for siginfo
- limit = 254 - 8
- if siginfo:
- limit = 254
- extension = ".tgz.siginfo"
- if not hash:
- hash = "INVALID"
- fn = spec + hash + "_" + taskname + extension
- # If the filename is too long, attempt to reduce it
- if len(fn) > limit:
- components = spec.split(":")
- # Fields 0,5,6 are mandatory, 1 is most useful, 2,3,4 are just for information
- # 7 is for the separators
- avail = (254 - len(hash + "_" + taskname + extension) - len(components[0]) - len(components[1]) - len(components[5]) - len(components[6]) - 7) // 3
- components[2] = components[2][:avail]
- components[3] = components[3][:avail]
- components[4] = components[4][:avail]
- spec = ":".join(components)
- fn = spec + hash + "_" + taskname + extension
- if len(fn) > limit:
- bb.fatal("Unable to reduce sstate name to less than 255 chararacters")
- return hash[:2] + "/" + hash[2:4] + "/" + fn
-
-SSTATE_PKGARCH = "${PACKAGE_ARCH}"
-SSTATE_PKGSPEC = "sstate:${PN}:${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}:${PV}:${PR}:${SSTATE_PKGARCH}:${SSTATE_VERSION}:"
-SSTATE_SWSPEC = "sstate:${PN}::${PV}:${PR}::${SSTATE_VERSION}:"
-SSTATE_PKGNAME = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PKGSPEC'), d.getVar('BB_UNIHASH'), d.getVar('SSTATE_CURRTASK'), False, d)}"
-SSTATE_PKG = "${SSTATE_DIR}/${SSTATE_PKGNAME}"
-SSTATE_EXTRAPATH = ""
-SSTATE_EXTRAPATHWILDCARD = ""
-SSTATE_PATHSPEC = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/*/${SSTATE_PKGSPEC}*_${SSTATE_PATH_CURRTASK}.tgz*"
-
-# explicitly make PV to depend on evaluated value of PV variable
-PV[vardepvalue] = "${PV}"
-
-# We don't want the sstate to depend on things like the distro string
-# of the system, we let the sstate paths take care of this.
-SSTATE_EXTRAPATH[vardepvalue] = ""
-SSTATE_EXTRAPATHWILDCARD[vardepvalue] = ""
-
-# For multilib rpm the allarch packagegroup files can overwrite (in theory they're identical)
-SSTATE_DUPWHITELIST = "${DEPLOY_DIR}/licenses/"
-# Avoid docbook/sgml catalog warnings for now
-SSTATE_DUPWHITELIST += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml"
-# sdk-provides-dummy-nativesdk and nativesdk-buildtools-perl-dummy overlap for different SDKMACHINE
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_RPM}/sdk_provides_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-nativesdk/"
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_RPM}/buildtools_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/buildtools-dummy-nativesdk/"
-# target-sdk-provides-dummy overlaps that allarch is disabled when multilib is used
-SSTATE_DUPWHITELIST += "${COMPONENTS_DIR}/sdk-provides-dummy-target/ ${DEPLOY_DIR_RPM}/sdk_provides_dummy_target/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-target/"
-# Archive the sources for many architectures in one deploy folder
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_SRC}"
-# ovmf/grub-efi/systemd-boot/intel-microcode multilib recipes can generate identical overlapping files
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_IMAGE}/ovmf"
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_IMAGE}/grub-efi"
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_IMAGE}/systemd-boot"
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_IMAGE}/microcode"
-
-SSTATE_SCAN_FILES ?= "*.la *-config *_config postinst-*"
-SSTATE_SCAN_CMD ??= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES").split())}" \) -type f'
-SSTATE_SCAN_CMD_NATIVE ??= 'grep -Irl -e ${RECIPE_SYSROOT} -e ${RECIPE_SYSROOT_NATIVE} -e ${HOSTTOOLS_DIR} ${SSTATE_BUILDDIR}'
-
-BB_HASHFILENAME = "False ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}"
-
-SSTATE_ARCHS = " \
- ${BUILD_ARCH} \
- ${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS} \
- ${BUILD_ARCH}_${TARGET_ARCH} \
- ${SDK_ARCH}_${SDK_OS} \
- ${SDK_ARCH}_${PACKAGE_ARCH} \
- allarch \
- ${PACKAGE_ARCH} \
- ${PACKAGE_EXTRA_ARCHS} \
- ${MACHINE_ARCH}"
-
-SSTATE_MANMACH ?= "${SSTATE_PKGARCH}"
-
-SSTATECREATEFUNCS = "sstate_hardcode_path"
-SSTATECREATEFUNCS[vardeps] = "SSTATE_SCAN_FILES"
-SSTATEPOSTCREATEFUNCS = ""
-SSTATEPREINSTFUNCS = ""
-SSTATEPOSTUNPACKFUNCS = "sstate_hardcode_path_unpack"
-SSTATEPOSTINSTFUNCS = ""
-EXTRA_STAGING_FIXMES ?= "HOSTTOOLS_DIR"
-
-# Check whether sstate exists for tasks that support sstate and are in the
-# locked signatures file.
-SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK ?= 'error'
-
-# Check whether the task's computed hash matches the task's hash in the
-# locked signatures file.
-SIGGEN_LOCKEDSIGS_TASKSIG_CHECK ?= "error"
-
-# The GnuPG key ID and passphrase to use to sign sstate archives (or unset to
-# not sign)
-SSTATE_SIG_KEY ?= ""
-SSTATE_SIG_PASSPHRASE ?= ""
-# Whether to verify the GnUPG signatures when extracting sstate archives
-SSTATE_VERIFY_SIG ?= "0"
-
-SSTATE_HASHEQUIV_METHOD ?= "oe.sstatesig.OEOuthashBasic"
-SSTATE_HASHEQUIV_METHOD[doc] = "The fully-qualified function used to calculate \
- the output hash for a task, which in turn is used to determine equivalency. \
- "
-
-SSTATE_HASHEQUIV_REPORT_TASKDATA ?= "0"
-SSTATE_HASHEQUIV_REPORT_TASKDATA[doc] = "Report additional useful data to the \
- hash equivalency server, such as PN, PV, taskname, etc. This information \
- is very useful for developers looking at task data, but may leak sensitive \
- data if the equivalence server is public. \
- "
-
-python () {
- if bb.data.inherits_class('native', d):
- d.setVar('SSTATE_PKGARCH', d.getVar('BUILD_ARCH', False))
- elif bb.data.inherits_class('crosssdk', d):
- d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"))
- elif bb.data.inherits_class('cross', d):
- d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${TARGET_ARCH}"))
- elif bb.data.inherits_class('nativesdk', d):
- d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${SDK_OS}"))
- elif bb.data.inherits_class('cross-canadian', d):
- d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${PACKAGE_ARCH}"))
- elif bb.data.inherits_class('allarch', d) and d.getVar("PACKAGE_ARCH") == "all":
- d.setVar('SSTATE_PKGARCH', "allarch")
- else:
- d.setVar('SSTATE_MANMACH', d.expand("${PACKAGE_ARCH}"))
-
- if bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d):
- d.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
- d.setVar('BB_HASHFILENAME', "True ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}")
- d.setVar('SSTATE_EXTRAPATHWILDCARD', "${NATIVELSBSTRING}/")
-
- unique_tasks = sorted(set((d.getVar('SSTATETASKS') or "").split()))
- d.setVar('SSTATETASKS', " ".join(unique_tasks))
- for task in unique_tasks:
- d.prependVarFlag(task, 'prefuncs', "sstate_task_prefunc ")
- d.appendVarFlag(task, 'postfuncs', " sstate_task_postfunc")
-}
-
-def sstate_init(task, d):
- ss = {}
- ss['task'] = task
- ss['dirs'] = []
- ss['plaindirs'] = []
- ss['lockfiles'] = []
- ss['lockfiles-shared'] = []
- return ss
-
-def sstate_state_fromvars(d, task = None):
- if task is None:
- task = d.getVar('BB_CURRENTTASK')
- if not task:
- bb.fatal("sstate code running without task context?!")
- task = task.replace("_setscene", "")
-
- if task.startswith("do_"):
- task = task[3:]
- inputs = (d.getVarFlag("do_" + task, 'sstate-inputdirs') or "").split()
- outputs = (d.getVarFlag("do_" + task, 'sstate-outputdirs') or "").split()
- plaindirs = (d.getVarFlag("do_" + task, 'sstate-plaindirs') or "").split()
- lockfiles = (d.getVarFlag("do_" + task, 'sstate-lockfile') or "").split()
- lockfilesshared = (d.getVarFlag("do_" + task, 'sstate-lockfile-shared') or "").split()
- interceptfuncs = (d.getVarFlag("do_" + task, 'sstate-interceptfuncs') or "").split()
- fixmedir = d.getVarFlag("do_" + task, 'sstate-fixmedir') or ""
- if not task or len(inputs) != len(outputs):
- bb.fatal("sstate variables not setup correctly?!")
-
- if task == "populate_lic":
- d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
- d.setVar("SSTATE_EXTRAPATH", "")
- d.setVar('SSTATE_EXTRAPATHWILDCARD', "")
-
- ss = sstate_init(task, d)
- for i in range(len(inputs)):
- sstate_add(ss, inputs[i], outputs[i], d)
- ss['lockfiles'] = lockfiles
- ss['lockfiles-shared'] = lockfilesshared
- ss['plaindirs'] = plaindirs
- ss['interceptfuncs'] = interceptfuncs
- ss['fixmedir'] = fixmedir
- return ss
-
-def sstate_add(ss, source, dest, d):
- if not source.endswith("/"):
- source = source + "/"
- if not dest.endswith("/"):
- dest = dest + "/"
- source = os.path.normpath(source)
- dest = os.path.normpath(dest)
- srcbase = os.path.basename(source)
- ss['dirs'].append([srcbase, source, dest])
- return ss
-
-def sstate_install(ss, d):
- import oe.path
- import oe.sstatesig
- import subprocess
-
- sharedfiles = []
- shareddirs = []
- bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
-
- sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
-
- manifest, d2 = oe.sstatesig.sstate_get_manifest_filename(ss['task'], d)
-
- if os.access(manifest, os.R_OK):
- bb.fatal("Package already staged (%s)?!" % manifest)
-
- d.setVar("SSTATE_INST_POSTRM", manifest + ".postrm")
-
- locks = []
- for lock in ss['lockfiles-shared']:
- locks.append(bb.utils.lockfile(lock, True))
- for lock in ss['lockfiles']:
- locks.append(bb.utils.lockfile(lock))
-
- for state in ss['dirs']:
- bb.debug(2, "Staging files from %s to %s" % (state[1], state[2]))
- for walkroot, dirs, files in os.walk(state[1]):
- for file in files:
- srcpath = os.path.join(walkroot, file)
- dstpath = srcpath.replace(state[1], state[2])
- #bb.debug(2, "Staging %s to %s" % (srcpath, dstpath))
- sharedfiles.append(dstpath)
- for dir in dirs:
- srcdir = os.path.join(walkroot, dir)
- dstdir = srcdir.replace(state[1], state[2])
- #bb.debug(2, "Staging %s to %s" % (srcdir, dstdir))
- if os.path.islink(srcdir):
- sharedfiles.append(dstdir)
- continue
- if not dstdir.endswith("/"):
- dstdir = dstdir + "/"
- shareddirs.append(dstdir)
-
- # Check the file list for conflicts against files which already exist
- whitelist = (d.getVar("SSTATE_DUPWHITELIST") or "").split()
- match = []
- for f in sharedfiles:
- if os.path.exists(f) and not os.path.islink(f):
- f = os.path.normpath(f)
- realmatch = True
- for w in whitelist:
- w = os.path.normpath(w)
- if f.startswith(w):
- realmatch = False
- break
- if realmatch:
- match.append(f)
- sstate_search_cmd = "grep -rlF '%s' %s --exclude=master.list | sed -e 's:^.*/::'" % (f, d.expand("${SSTATE_MANIFESTS}"))
- search_output = subprocess.Popen(sstate_search_cmd, shell=True, stdout=subprocess.PIPE).communicate()[0]
- if search_output:
- match.append(" (matched in %s)" % search_output.decode('utf-8').rstrip())
- else:
- match.append(" (not matched to any task)")
- if match:
- bb.error("The recipe %s is trying to install files into a shared " \
- "area when those files already exist. Those files and their manifest " \
- "location are:\n %s\nPlease verify which recipe should provide the " \
- "above files.\n\nThe build has stopped, as continuing in this scenario WILL " \
- "break things - if not now, possibly in the future (we've seen builds fail " \
- "several months later). If the system knew how to recover from this " \
- "automatically it would, however there are several different scenarios " \
- "which can result in this and we don't know which one this is. It may be " \
- "you have switched providers of something like virtual/kernel (e.g. from " \
- "linux-yocto to linux-yocto-dev), in that case you need to execute the " \
- "clean task for both recipes and it will resolve this error. It may be " \
- "you changed DISTRO_FEATURES from systemd to udev or vice versa. Cleaning " \
- "those recipes should again resolve this error, however switching " \
- "DISTRO_FEATURES on an existing build directory is not supported - you " \
- "should really clean out tmp and rebuild (reusing sstate should be safe). " \
- "It could be the overlapping files detected are harmless in which case " \
- "adding them to SSTATE_DUPWHITELIST may be the correct solution. It could " \
- "also be your build is including two different conflicting versions of " \
- "things (e.g. bluez 4 and bluez 5 and the correct solution for that would " \
- "be to resolve the conflict. If in doubt, please ask on the mailing list, " \
- "sharing the error and filelist above." % \
- (d.getVar('PN'), "\n ".join(match)))
- bb.fatal("If the above message is too much, the simpler version is you're advised to wipe out tmp and rebuild (reusing sstate is fine). That will likely fix things in most (but not all) cases.")
-
- if ss['fixmedir'] and os.path.exists(ss['fixmedir'] + "/fixmepath.cmd"):
- sharedfiles.append(ss['fixmedir'] + "/fixmepath.cmd")
- sharedfiles.append(ss['fixmedir'] + "/fixmepath")
-
- # Write out the manifest
- f = open(manifest, "w")
- for file in sharedfiles:
- f.write(file + "\n")
-
- # We want to ensure that directories appear at the end of the manifest
- # so that when we test to see if they should be deleted any contents
- # added by the task will have been removed first.
- dirs = sorted(shareddirs, key=len)
- # Must remove children first, which will have a longer path than the parent
- for di in reversed(dirs):
- f.write(di + "\n")
- f.close()
-
- # Append to the list of manifests for this PACKAGE_ARCH
-
- i = d2.expand("${SSTATE_MANIFESTS}/index-${SSTATE_MANMACH}")
- l = bb.utils.lockfile(i + ".lock")
- filedata = d.getVar("STAMP") + " " + d2.getVar("SSTATE_MANFILEPREFIX") + " " + d.getVar("WORKDIR") + "\n"
- manifests = []
- if os.path.exists(i):
- with open(i, "r") as f:
- manifests = f.readlines()
- if filedata not in manifests:
- with open(i, "a+") as f:
- f.write(filedata)
- bb.utils.unlockfile(l)
-
- # Run the actual file install
- for state in ss['dirs']:
- if os.path.exists(state[1]):
- oe.path.copyhardlinktree(state[1], state[2])
-
- for postinst in (d.getVar('SSTATEPOSTINSTFUNCS') or '').split():
- # All hooks should run in the SSTATE_INSTDIR
- bb.build.exec_func(postinst, d, (sstateinst,))
-
- for lock in locks:
- bb.utils.unlockfile(lock)
-
-sstate_install[vardepsexclude] += "SSTATE_DUPWHITELIST STATE_MANMACH SSTATE_MANFILEPREFIX"
-sstate_install[vardeps] += "${SSTATEPOSTINSTFUNCS}"
-
-def sstate_installpkg(ss, d):
- from oe.gpg_sign import get_signer
-
- sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
- d.setVar("SSTATE_CURRTASK", ss['task'])
- sstatefetch = d.getVar('SSTATE_PKGNAME')
- sstatepkg = d.getVar('SSTATE_PKG')
-
- if not os.path.exists(sstatepkg):
- pstaging_fetch(sstatefetch, d)
-
- if not os.path.isfile(sstatepkg):
- bb.note("Sstate package %s does not exist" % sstatepkg)
- return False
-
- sstate_clean(ss, d)
-
- d.setVar('SSTATE_INSTDIR', sstateinst)
-
- if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False):
- if not os.path.isfile(sstatepkg + '.sig'):
- bb.warn("No signature file for sstate package %s, skipping acceleration..." % sstatepkg)
- return False
- signer = get_signer(d, 'local')
- if not signer.verify(sstatepkg + '.sig'):
- bb.warn("Cannot verify signature on sstate package %s, skipping acceleration..." % sstatepkg)
- return False
-
- # Empty sstateinst directory, ensure its clean
- if os.path.exists(sstateinst):
- oe.path.remove(sstateinst)
- bb.utils.mkdirhier(sstateinst)
-
- sstateinst = d.getVar("SSTATE_INSTDIR")
- d.setVar('SSTATE_FIXMEDIR', ss['fixmedir'])
-
- for f in (d.getVar('SSTATEPREINSTFUNCS') or '').split() + ['sstate_unpack_package']:
- # All hooks should run in the SSTATE_INSTDIR
- bb.build.exec_func(f, d, (sstateinst,))
-
- return sstate_installpkgdir(ss, d)
-
-def sstate_installpkgdir(ss, d):
- import oe.path
- import subprocess
-
- sstateinst = d.getVar("SSTATE_INSTDIR")
- d.setVar('SSTATE_FIXMEDIR', ss['fixmedir'])
-
- for f in (d.getVar('SSTATEPOSTUNPACKFUNCS') or '').split():
- # All hooks should run in the SSTATE_INSTDIR
- bb.build.exec_func(f, d, (sstateinst,))
-
- def prepdir(dir):
- # remove dir if it exists, ensure any parent directories do exist
- if os.path.exists(dir):
- oe.path.remove(dir)
- bb.utils.mkdirhier(dir)
- oe.path.remove(dir)
-
- for state in ss['dirs']:
- prepdir(state[1])
- os.rename(sstateinst + state[0], state[1])
- sstate_install(ss, d)
-
- for plain in ss['plaindirs']:
- workdir = d.getVar('WORKDIR')
- sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared")
- src = sstateinst + "/" + plain.replace(workdir, '')
- if sharedworkdir in plain:
- src = sstateinst + "/" + plain.replace(sharedworkdir, '')
- dest = plain
- bb.utils.mkdirhier(src)
- prepdir(dest)
- os.rename(src, dest)
-
- return True
-
-python sstate_hardcode_path_unpack () {
- # Fixup hardcoded paths
- #
- # Note: The logic below must match the reverse logic in
- # sstate_hardcode_path(d)
- import subprocess
-
- sstateinst = d.getVar('SSTATE_INSTDIR')
- sstatefixmedir = d.getVar('SSTATE_FIXMEDIR')
- fixmefn = sstateinst + "fixmepath"
- if os.path.isfile(fixmefn):
- staging_target = d.getVar('RECIPE_SYSROOT')
- staging_host = d.getVar('RECIPE_SYSROOT_NATIVE')
-
- if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d):
- sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRHOST:%s:g'" % (staging_host)
- elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
- sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIRHOST:%s:g'" % (staging_target, staging_host)
- else:
- sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g'" % (staging_target)
-
- extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or ''
- for fixmevar in extra_staging_fixmes.split():
- fixme_path = d.getVar(fixmevar)
- sstate_sed_cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path)
-
- # Add sstateinst to each filename in fixmepath, use xargs to efficiently call sed
- sstate_hardcode_cmd = "sed -e 's:^:%s:g' %s | xargs %s" % (sstateinst, fixmefn, sstate_sed_cmd)
-
- # Defer do_populate_sysroot relocation command
- if sstatefixmedir:
- bb.utils.mkdirhier(sstatefixmedir)
- with open(sstatefixmedir + "/fixmepath.cmd", "w") as f:
- sstate_hardcode_cmd = sstate_hardcode_cmd.replace(fixmefn, sstatefixmedir + "/fixmepath")
- sstate_hardcode_cmd = sstate_hardcode_cmd.replace(sstateinst, "FIXMEFINALSSTATEINST")
- sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_host, "FIXMEFINALSSTATEHOST")
- sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_target, "FIXMEFINALSSTATETARGET")
- f.write(sstate_hardcode_cmd)
- bb.utils.copyfile(fixmefn, sstatefixmedir + "/fixmepath")
- return
-
- bb.note("Replacing fixme paths in sstate package: %s" % (sstate_hardcode_cmd))
- subprocess.check_call(sstate_hardcode_cmd, shell=True)
-
- # Need to remove this or we'd copy it into the target directory and may
- # conflict with another writer
- os.remove(fixmefn)
-}
-
-def sstate_clean_cachefile(ss, d):
- import oe.path
-
- if d.getVarFlag('do_%s' % ss['task'], 'task'):
- d.setVar("SSTATE_PATH_CURRTASK", ss['task'])
- sstatepkgfile = d.getVar('SSTATE_PATHSPEC')
- bb.note("Removing %s" % sstatepkgfile)
- oe.path.remove(sstatepkgfile)
-
-def sstate_clean_cachefiles(d):
- for task in (d.getVar('SSTATETASKS') or "").split():
- ld = d.createCopy()
- ss = sstate_state_fromvars(ld, task)
- sstate_clean_cachefile(ss, ld)
-
-def sstate_clean_manifest(manifest, d, prefix=None):
- import oe.path
-
- mfile = open(manifest)
- entries = mfile.readlines()
- mfile.close()
-
- for entry in entries:
- entry = entry.strip()
- if prefix and not entry.startswith("/"):
- entry = prefix + "/" + entry
- bb.debug(2, "Removing manifest: %s" % entry)
- # We can race against another package populating directories as we're removing them
- # so we ignore errors here.
- try:
- if entry.endswith("/"):
- if os.path.islink(entry[:-1]):
- os.remove(entry[:-1])
- elif os.path.exists(entry) and len(os.listdir(entry)) == 0:
- os.rmdir(entry[:-1])
- else:
- os.remove(entry)
- except OSError:
- pass
-
- postrm = manifest + ".postrm"
- if os.path.exists(manifest + ".postrm"):
- import subprocess
- os.chmod(postrm, 0o755)
- subprocess.check_call(postrm, shell=True)
- oe.path.remove(postrm)
-
- oe.path.remove(manifest)
-
-def sstate_clean(ss, d):
- import oe.path
- import glob
-
- d2 = d.createCopy()
- stamp_clean = d.getVar("STAMPCLEAN")
- extrainf = d.getVarFlag("do_" + ss['task'], 'stamp-extra-info')
- if extrainf:
- d2.setVar("SSTATE_MANMACH", extrainf)
- wildcard_stfile = "%s.do_%s*.%s" % (stamp_clean, ss['task'], extrainf)
- else:
- wildcard_stfile = "%s.do_%s*" % (stamp_clean, ss['task'])
-
- manifest = d2.expand("${SSTATE_MANFILEPREFIX}.%s" % ss['task'])
-
- if os.path.exists(manifest):
- locks = []
- for lock in ss['lockfiles-shared']:
- locks.append(bb.utils.lockfile(lock))
- for lock in ss['lockfiles']:
- locks.append(bb.utils.lockfile(lock))
-
- sstate_clean_manifest(manifest, d)
-
- for lock in locks:
- bb.utils.unlockfile(lock)
-
- # Remove the current and previous stamps, but keep the sigdata.
- #
- # The glob() matches do_task* which may match multiple tasks, for
- # example: do_package and do_package_write_ipk, so we need to
- # exactly match *.do_task.* and *.do_task_setscene.*
- rm_stamp = '.do_%s.' % ss['task']
- rm_setscene = '.do_%s_setscene.' % ss['task']
- # For BB_SIGNATURE_HANDLER = "noop"
- rm_nohash = ".do_%s" % ss['task']
- for stfile in glob.glob(wildcard_stfile):
- # Keep the sigdata
- if ".sigdata." in stfile or ".sigbasedata." in stfile:
- continue
- # Preserve taint files in the stamps directory
- if stfile.endswith('.taint'):
- continue
- if rm_stamp in stfile or rm_setscene in stfile or \
- stfile.endswith(rm_nohash):
- oe.path.remove(stfile)
-
-sstate_clean[vardepsexclude] = "SSTATE_MANFILEPREFIX"
-
-CLEANFUNCS += "sstate_cleanall"
-
-python sstate_cleanall() {
- bb.note("Removing shared state for package %s" % d.getVar('PN'))
-
- manifest_dir = d.getVar('SSTATE_MANIFESTS')
- if not os.path.exists(manifest_dir):
- return
-
- tasks = d.getVar('SSTATETASKS').split()
- for name in tasks:
- ld = d.createCopy()
- shared_state = sstate_state_fromvars(ld, name)
- sstate_clean(shared_state, ld)
-}
-
-python sstate_hardcode_path () {
- import subprocess, platform
-
- # Need to remove hardcoded paths and fix these when we install the
- # staging packages.
- #
- # Note: the logic in this function needs to match the reverse logic
- # in sstate_installpkg(ss, d)
-
- staging_target = d.getVar('RECIPE_SYSROOT')
- staging_host = d.getVar('RECIPE_SYSROOT_NATIVE')
- sstate_builddir = d.getVar('SSTATE_BUILDDIR')
-
- sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRHOST:g'" % staging_host
- if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d):
- sstate_grep_cmd = "grep -l -e '%s'" % (staging_host)
- elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
- sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host)
- sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target
- else:
- sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host)
- sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target
-
- extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or ''
- for fixmevar in extra_staging_fixmes.split():
- fixme_path = d.getVar(fixmevar)
- sstate_sed_cmd += " -e 's:%s:FIXME_%s:g'" % (fixme_path, fixmevar)
- sstate_grep_cmd += " -e '%s'" % (fixme_path)
-
- fixmefn = sstate_builddir + "fixmepath"
-
- sstate_scan_cmd = d.getVar('SSTATE_SCAN_CMD')
- sstate_filelist_cmd = "tee %s" % (fixmefn)
-
- # fixmepath file needs relative paths, drop sstate_builddir prefix
- sstate_filelist_relative_cmd = "sed -i -e 's:^%s::g' %s" % (sstate_builddir, fixmefn)
-
- xargs_no_empty_run_cmd = '--no-run-if-empty'
- if platform.system() == 'Darwin':
- xargs_no_empty_run_cmd = ''
-
- # Limit the fixpaths and sed operations based on the initial grep search
- # This has the side effect of making sure the vfs cache is hot
- sstate_hardcode_cmd = "%s | xargs %s | %s | xargs %s %s" % (sstate_scan_cmd, sstate_grep_cmd, sstate_filelist_cmd, xargs_no_empty_run_cmd, sstate_sed_cmd)
-
- bb.note("Removing hardcoded paths from sstate package: '%s'" % (sstate_hardcode_cmd))
- subprocess.check_output(sstate_hardcode_cmd, shell=True, cwd=sstate_builddir)
-
- # If the fixmefn is empty, remove it..
- if os.stat(fixmefn).st_size == 0:
- os.remove(fixmefn)
- else:
- bb.note("Replacing absolute paths in fixmepath file: '%s'" % (sstate_filelist_relative_cmd))
- subprocess.check_output(sstate_filelist_relative_cmd, shell=True)
-}
-
-def sstate_package(ss, d):
- import oe.path
-
- tmpdir = d.getVar('TMPDIR')
-
- sstatebuild = d.expand("${WORKDIR}/sstate-build-%s/" % ss['task'])
- d.setVar("SSTATE_CURRTASK", ss['task'])
- bb.utils.remove(sstatebuild, recurse=True)
- bb.utils.mkdirhier(sstatebuild)
- for state in ss['dirs']:
- if not os.path.exists(state[1]):
- continue
- srcbase = state[0].rstrip("/").rsplit('/', 1)[0]
- # Find and error for absolute symlinks. We could attempt to relocate but its not
- # clear where the symlink is relative to in this context. We could add that markup
- # to sstate tasks but there aren't many of these so better just avoid them entirely.
- for walkroot, dirs, files in os.walk(state[1]):
- for file in files + dirs:
- srcpath = os.path.join(walkroot, file)
- if not os.path.islink(srcpath):
- continue
- link = os.readlink(srcpath)
- if not os.path.isabs(link):
- continue
- if not link.startswith(tmpdir):
- continue
- bb.error("sstate found an absolute path symlink %s pointing at %s. Please replace this with a relative link." % (srcpath, link))
- bb.debug(2, "Preparing tree %s for packaging at %s" % (state[1], sstatebuild + state[0]))
- os.rename(state[1], sstatebuild + state[0])
-
- workdir = d.getVar('WORKDIR')
- sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared")
- for plain in ss['plaindirs']:
- pdir = plain.replace(workdir, sstatebuild)
- if sharedworkdir in plain:
- pdir = plain.replace(sharedworkdir, sstatebuild)
- bb.utils.mkdirhier(plain)
- bb.utils.mkdirhier(pdir)
- os.rename(plain, pdir)
-
- d.setVar('SSTATE_BUILDDIR', sstatebuild)
- d.setVar('SSTATE_INSTDIR', sstatebuild)
-
- if d.getVar('SSTATE_SKIP_CREATION') == '1':
- return
-
- sstate_create_package = ['sstate_report_unihash', 'sstate_create_package']
- if d.getVar('SSTATE_SIG_KEY'):
- sstate_create_package.append('sstate_sign_package')
-
- for f in (d.getVar('SSTATECREATEFUNCS') or '').split() + \
- sstate_create_package + \
- (d.getVar('SSTATEPOSTCREATEFUNCS') or '').split():
- # All hooks should run in SSTATE_BUILDDIR.
- bb.build.exec_func(f, d, (sstatebuild,))
-
- # SSTATE_PKG may have been changed by sstate_report_unihash
- siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
- if not os.path.exists(siginfo):
- bb.siggen.dump_this_task(siginfo, d)
- else:
- try:
- os.utime(siginfo, None)
- except PermissionError:
- pass
-
- return
-
-def pstaging_fetch(sstatefetch, d):
- import bb.fetch2
-
- # Only try and fetch if the user has configured a mirror
- mirrors = d.getVar('SSTATE_MIRRORS')
- if not mirrors:
- return
-
- # Copy the data object and override DL_DIR and SRC_URI
- localdata = bb.data.createCopy(d)
-
- dldir = localdata.expand("${SSTATE_DIR}")
- bb.utils.mkdirhier(dldir)
-
- localdata.delVar('MIRRORS')
- localdata.setVar('FILESPATH', dldir)
- localdata.setVar('DL_DIR', dldir)
- localdata.setVar('PREMIRRORS', mirrors)
-
- # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
- # we'll want to allow network access for the current set of fetches.
- if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
- bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
- localdata.delVar('BB_NO_NETWORK')
-
- # Try a fetch from the sstate mirror, if it fails just return and
- # we will build the package
- uris = ['file://{0};downloadfilename={0}'.format(sstatefetch),
- 'file://{0}.siginfo;downloadfilename={0}.siginfo'.format(sstatefetch)]
- if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False):
- uris += ['file://{0}.sig;downloadfilename={0}.sig'.format(sstatefetch)]
-
- for srcuri in uris:
- localdata.setVar('SRC_URI', srcuri)
- try:
- fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
- fetcher.checkstatus()
- fetcher.download()
-
- except bb.fetch2.BBFetchException:
- pass
-
-def sstate_setscene(d):
- shared_state = sstate_state_fromvars(d)
- accelerate = sstate_installpkg(shared_state, d)
- if not accelerate:
- bb.fatal("No suitable staging package found")
-
-python sstate_task_prefunc () {
- shared_state = sstate_state_fromvars(d)
- sstate_clean(shared_state, d)
-}
-sstate_task_prefunc[dirs] = "${WORKDIR}"
-
-python sstate_task_postfunc () {
- shared_state = sstate_state_fromvars(d)
-
- for intercept in shared_state['interceptfuncs']:
- bb.build.exec_func(intercept, d, (d.getVar("WORKDIR"),))
-
- omask = os.umask(0o002)
- if omask != 0o002:
- bb.note("Using umask 0o002 (not %0o) for sstate packaging" % omask)
- sstate_package(shared_state, d)
- os.umask(omask)
-
- sstateinst = d.getVar("SSTATE_INSTDIR")
- d.setVar('SSTATE_FIXMEDIR', shared_state['fixmedir'])
-
- sstate_installpkgdir(shared_state, d)
-
- bb.utils.remove(d.getVar("SSTATE_BUILDDIR"), recurse=True)
-}
-sstate_task_postfunc[dirs] = "${WORKDIR}"
-
-
-#
-# Shell function to generate a sstate package from a directory
-# set as SSTATE_BUILDDIR. Will be run from within SSTATE_BUILDDIR.
-#
-sstate_create_package () {
- # Exit early if it already exists
- if [ -e ${SSTATE_PKG} ]; then
- [ ! -w ${SSTATE_PKG} ] || touch ${SSTATE_PKG}
- return
- fi
-
- mkdir -p `dirname ${SSTATE_PKG}`
- TFILE=`mktemp ${SSTATE_PKG}.XXXXXXXX`
-
- # Use pigz if available
- OPT="-czS"
- if [ -x "$(command -v pigz)" ]; then
- OPT="-I pigz -cS"
- fi
-
- # Need to handle empty directories
- if [ "$(ls -A)" ]; then
- set +e
- tar $OPT -f $TFILE *
- ret=$?
- if [ $ret -ne 0 ] && [ $ret -ne 1 ]; then
- exit 1
- fi
- set -e
- else
- tar $OPT --file=$TFILE --files-from=/dev/null
- fi
- chmod 0664 $TFILE
- # Skip if it was already created by some other process
- if [ ! -e ${SSTATE_PKG} ]; then
- # Move into place using ln to attempt an atomic op.
- # Abort if it already exists
- ln $TFILE ${SSTATE_PKG} && rm $TFILE
- else
- rm $TFILE
- fi
- [ ! -w ${SSTATE_PKG} ] || touch ${SSTATE_PKG}
-}
-
-python sstate_sign_package () {
- from oe.gpg_sign import get_signer
-
-
- signer = get_signer(d, 'local')
- sstate_pkg = d.getVar('SSTATE_PKG')
- if os.path.exists(sstate_pkg + '.sig'):
- os.unlink(sstate_pkg + '.sig')
- signer.detach_sign(sstate_pkg, d.getVar('SSTATE_SIG_KEY', False), None,
- d.getVar('SSTATE_SIG_PASSPHRASE'), armor=False)
-}
-
-python sstate_report_unihash() {
- report_unihash = getattr(bb.parse.siggen, 'report_unihash', None)
-
- if report_unihash:
- ss = sstate_state_fromvars(d)
- report_unihash(os.getcwd(), ss['task'], d)
-}
-
-#
-# Shell function to decompress and prepare a package for installation
-# Will be run from within SSTATE_INSTDIR.
-#
-sstate_unpack_package () {
- tar -xvzf ${SSTATE_PKG}
- # update .siginfo atime on local/NFS mirror
- [ -w ${SSTATE_PKG}.siginfo ] && [ -h ${SSTATE_PKG}.siginfo ] && touch -a ${SSTATE_PKG}.siginfo
- # Use "! -w ||" to return true for read only files
- [ ! -w ${SSTATE_PKG} ] || touch --no-dereference ${SSTATE_PKG}
- [ ! -w ${SSTATE_PKG}.sig ] || [ ! -e ${SSTATE_PKG}.sig ] || touch --no-dereference ${SSTATE_PKG}.sig
- [ ! -w ${SSTATE_PKG}.siginfo ] || [ ! -e ${SSTATE_PKG}.siginfo ] || touch --no-dereference ${SSTATE_PKG}.siginfo
-}
-
-BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
-
-def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True, **kwargs):
- found = set()
- missed = set()
-
- def gethash(task):
- return sq_data['unihash'][task]
-
- def getpathcomponents(task, d):
- # Magic data from BB_HASHFILENAME
- splithashfn = sq_data['hashfn'][task].split(" ")
- spec = splithashfn[1]
- if splithashfn[0] == "True":
- extrapath = d.getVar("NATIVELSBSTRING") + "/"
- else:
- extrapath = ""
-
- tname = bb.runqueue.taskname_from_tid(task)[3:]
-
- if tname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and splithashfn[2]:
- spec = splithashfn[2]
- extrapath = ""
-
- return spec, extrapath, tname
-
-
- for tid in sq_data['hash']:
-
- spec, extrapath, tname = getpathcomponents(tid, d)
-
- sstatefile = d.expand("${SSTATE_DIR}/" + extrapath + generate_sstatefn(spec, gethash(tid), tname, siginfo, d))
-
- if os.path.exists(sstatefile):
- bb.debug(2, "SState: Found valid sstate file %s" % sstatefile)
- found.add(tid)
- continue
- else:
- missed.add(tid)
- bb.debug(2, "SState: Looked for but didn't find file %s" % sstatefile)
-
- mirrors = d.getVar("SSTATE_MIRRORS")
- if mirrors:
- # Copy the data object and override DL_DIR and SRC_URI
- localdata = bb.data.createCopy(d)
-
- dldir = localdata.expand("${SSTATE_DIR}")
- localdata.delVar('MIRRORS')
- localdata.setVar('FILESPATH', dldir)
- localdata.setVar('DL_DIR', dldir)
- localdata.setVar('PREMIRRORS', mirrors)
-
- bb.debug(2, "SState using premirror of: %s" % mirrors)
-
- # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
- # we'll want to allow network access for the current set of fetches.
- if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
- bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
- localdata.delVar('BB_NO_NETWORK')
-
- from bb.fetch2 import FetchConnectionCache
- def checkstatus_init(thread_worker):
- thread_worker.connection_cache = FetchConnectionCache()
-
- def checkstatus_end(thread_worker):
- thread_worker.connection_cache.close_connections()
-
- def checkstatus(thread_worker, arg):
- (tid, sstatefile) = arg
-
- localdata2 = bb.data.createCopy(localdata)
- srcuri = "file://" + sstatefile
- localdata.setVar('SRC_URI', srcuri)
- bb.debug(2, "SState: Attempting to fetch %s" % srcuri)
-
- try:
- fetcher = bb.fetch2.Fetch(srcuri.split(), localdata2,
- connection_cache=thread_worker.connection_cache)
- fetcher.checkstatus()
- bb.debug(2, "SState: Successful fetch test for %s" % srcuri)
- found.add(tid)
- if tid in missed:
- missed.remove(tid)
- except:
- missed.add(tid)
- bb.debug(2, "SState: Unsuccessful fetch test for %s" % srcuri)
- pass
- if len(tasklist) >= min_tasks:
- bb.event.fire(bb.event.ProcessProgress(msg, len(tasklist) - thread_worker.tasks.qsize()), d)
-
- tasklist = []
- min_tasks = 100
- for tid in sq_data['hash']:
- if tid in found:
- continue
- spec, extrapath, tname = getpathcomponents(tid, d)
- sstatefile = d.expand(extrapath + generate_sstatefn(spec, gethash(tid), tname, siginfo, d))
- tasklist.append((tid, sstatefile))
-
- if tasklist:
- if len(tasklist) >= min_tasks:
- msg = "Checking sstate mirror object availability"
- bb.event.fire(bb.event.ProcessStarted(msg, len(tasklist)), d)
-
- import multiprocessing
- nproc = min(multiprocessing.cpu_count(), len(tasklist))
-
- bb.event.enable_threadlock()
- pool = oe.utils.ThreadedPool(nproc, len(tasklist),
- worker_init=checkstatus_init, worker_end=checkstatus_end)
- for t in tasklist:
- pool.add_task(checkstatus, t)
- pool.start()
- pool.wait_completion()
- bb.event.disable_threadlock()
-
- if len(tasklist) >= min_tasks:
- bb.event.fire(bb.event.ProcessFinished(msg), d)
-
- inheritlist = d.getVar("INHERIT")
- if "toaster" in inheritlist:
- evdata = {'missed': [], 'found': []};
- for tid in missed:
- spec, extrapath, tname = getpathcomponents(tid, d)
- sstatefile = d.expand(extrapath + generate_sstatefn(spec, gethash(tid), tname, False, d))
- evdata['missed'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
- for tid in found:
- spec, extrapath, tname = getpathcomponents(tid, d)
- sstatefile = d.expand(extrapath + generate_sstatefn(spec, gethash(tid), tname, False, d))
- evdata['found'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
- bb.event.fire(bb.event.MetadataEvent("MissedSstate", evdata), d)
-
- if summary:
- # Print some summary statistics about the current task completion and how much sstate
- # reuse there was. Avoid divide by zero errors.
- total = len(sq_data['hash'])
- complete = 0
- if currentcount:
- complete = (len(found) + currentcount) / (total + currentcount) * 100
- match = 0
- if total:
- match = len(found) / total * 100
- bb.plain("Sstate summary: Wanted %d Found %d Missed %d Current %d (%d%% match, %d%% complete)" % (total, len(found), len(missed), currentcount, match, complete))
-
- if hasattr(bb.parse.siggen, "checkhashes"):
- bb.parse.siggen.checkhashes(sq_data, missed, found, d)
-
- return found
-
-BB_SETSCENE_DEPVALID = "setscene_depvalid"
-
-def setscene_depvalid(task, taskdependees, notneeded, d, log=None):
- # taskdependees is a dict of tasks which depend on task, each being a 3 item list of [PN, TASKNAME, FILENAME]
- # task is included in taskdependees too
- # Return - False - We need this dependency
- # - True - We can skip this dependency
- import re
-
- def logit(msg, log):
- if log is not None:
- log.append(msg)
- else:
- bb.debug(2, msg)
-
- logit("Considering setscene task: %s" % (str(taskdependees[task])), log)
-
- def isNativeCross(x):
- return x.endswith("-native") or "-cross-" in x or "-crosssdk" in x or x.endswith("-cross")
-
- # We only need to trigger populate_lic through direct dependencies
- if taskdependees[task][1] == "do_populate_lic":
- return True
-
- # stash_locale and gcc_stash_builddir are never needed as a dependency for built objects
- if taskdependees[task][1] == "do_stash_locale" or taskdependees[task][1] == "do_gcc_stash_builddir":
- return True
-
- # We only need to trigger packagedata through direct dependencies
- # but need to preserve packagedata on packagedata links
- if taskdependees[task][1] == "do_packagedata":
- for dep in taskdependees:
- if taskdependees[dep][1] == "do_packagedata":
- return False
- return True
-
- for dep in taskdependees:
- logit(" considering dependency: %s" % (str(taskdependees[dep])), log)
- if task == dep:
- continue
- if dep in notneeded:
- continue
- # do_package_write_* and do_package doesn't need do_package
- if taskdependees[task][1] == "do_package" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package_qa']:
- continue
- # do_package_write_* need do_populate_sysroot as they're mainly postinstall dependencies
- if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm']:
- return False
- # do_package/packagedata/package_qa don't need do_populate_sysroot
- if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_packagedata', 'do_package_qa']:
- continue
- # Native/Cross packages don't exist and are noexec anyway
- if isNativeCross(taskdependees[dep][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package', 'do_package_qa']:
- continue
-
- # This is due to the [depends] in useradd.bbclass complicating matters
- # The logic *is* reversed here due to the way hard setscene dependencies are injected
- if (taskdependees[task][1] == 'do_package' or taskdependees[task][1] == 'do_populate_sysroot') and taskdependees[dep][0].endswith(('shadow-native', 'shadow-sysroot', 'base-passwd', 'pseudo-native')) and taskdependees[dep][1] == 'do_populate_sysroot':
- continue
-
- # Consider sysroot depending on sysroot tasks
- if taskdependees[task][1] == 'do_populate_sysroot' and taskdependees[dep][1] == 'do_populate_sysroot':
- # Allow excluding certain recursive dependencies. If a recipe needs it should add a
- # specific dependency itself, rather than relying on one of its dependees to pull
- # them in.
- # See also http://lists.openembedded.org/pipermail/openembedded-core/2018-January/146324.html
- not_needed = False
- excludedeps = d.getVar('_SSTATE_EXCLUDEDEPS_SYSROOT')
- if excludedeps is None:
- # Cache the regular expressions for speed
- excludedeps = []
- for excl in (d.getVar('SSTATE_EXCLUDEDEPS_SYSROOT') or "").split():
- excludedeps.append((re.compile(excl.split('->', 1)[0]), re.compile(excl.split('->', 1)[1])))
- d.setVar('_SSTATE_EXCLUDEDEPS_SYSROOT', excludedeps)
- for excl in excludedeps:
- if excl[0].match(taskdependees[dep][0]):
- if excl[1].match(taskdependees[task][0]):
- not_needed = True
- break
- if not_needed:
- continue
- # For meta-extsdk-toolchain we want all sysroot dependencies
- if taskdependees[dep][0] == 'meta-extsdk-toolchain':
- return False
- # Native/Cross populate_sysroot need their dependencies
- if isNativeCross(taskdependees[task][0]) and isNativeCross(taskdependees[dep][0]):
- return False
- # Target populate_sysroot depended on by cross tools need to be installed
- if isNativeCross(taskdependees[dep][0]):
- return False
- # Native/cross tools depended upon by target sysroot are not needed
- # Add an exception for shadow-native as required by useradd.bbclass
- if isNativeCross(taskdependees[task][0]) and taskdependees[task][0] != 'shadow-native':
- continue
- # Target populate_sysroot need their dependencies
- return False
-
- if taskdependees[task][1] == 'do_shared_workdir':
- continue
-
- if taskdependees[dep][1] == "do_populate_lic":
- continue
-
-
- # Safe fallthrough default
- logit(" Default setscene dependency fall through due to dependency: %s" % (str(taskdependees[dep])), log)
- return False
- return True
-
-addhandler sstate_eventhandler
-sstate_eventhandler[eventmask] = "bb.build.TaskSucceeded"
-python sstate_eventhandler() {
- d = e.data
- writtensstate = d.getVar('SSTATE_CURRTASK')
- if not writtensstate:
- taskname = d.getVar("BB_RUNTASK")[3:]
- spec = d.getVar('SSTATE_PKGSPEC')
- swspec = d.getVar('SSTATE_SWSPEC')
- if taskname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and swspec:
- d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
- d.setVar("SSTATE_EXTRAPATH", "")
- d.setVar("SSTATE_CURRTASK", taskname)
- siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
- if not os.path.exists(siginfo):
- bb.siggen.dump_this_task(siginfo, d)
- else:
- try:
- os.utime(siginfo, None)
- except PermissionError:
- pass
-
-}
-
-SSTATE_PRUNE_OBSOLETEWORKDIR ?= "1"
-
-# Event handler which removes manifests and stamps file for
-# recipes which are no longer reachable in a build where they
-# once were.
-# Also optionally removes the workdir of those tasks/recipes
-#
-addhandler sstate_eventhandler2
-sstate_eventhandler2[eventmask] = "bb.event.ReachableStamps"
-python sstate_eventhandler2() {
- import glob
- d = e.data
- stamps = e.stamps.values()
- removeworkdir = (d.getVar("SSTATE_PRUNE_OBSOLETEWORKDIR", False) == "1")
- preservestampfile = d.expand('${SSTATE_MANIFESTS}/preserve-stamps')
- preservestamps = []
- if os.path.exists(preservestampfile):
- with open(preservestampfile, 'r') as f:
- preservestamps = f.readlines()
- seen = []
-
- # The machine index contains all the stamps this machine has ever seen in this build directory.
- # We should only remove things which this machine once accessed but no longer does.
- machineindex = set()
- bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
- mi = d.expand("${SSTATE_MANIFESTS}/index-machine-${MACHINE}")
- if os.path.exists(mi):
- with open(mi, "r") as f:
- machineindex = set(line.strip() for line in f.readlines())
-
- for a in sorted(list(set(d.getVar("SSTATE_ARCHS").split()))):
- toremove = []
- i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
- if not os.path.exists(i):
- continue
- with open(i, "r") as f:
- lines = f.readlines()
- for l in lines:
- try:
- (stamp, manifest, workdir) = l.split()
- if stamp not in stamps and stamp not in preservestamps and stamp in machineindex:
- toremove.append(l)
- if stamp not in seen:
- bb.debug(2, "Stamp %s is not reachable, removing related manifests" % stamp)
- seen.append(stamp)
- except ValueError:
- bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
-
- if toremove:
- msg = "Removing %d recipes from the %s sysroot" % (len(toremove), a)
- bb.event.fire(bb.event.ProcessStarted(msg, len(toremove)), d)
-
- removed = 0
- for r in toremove:
- (stamp, manifest, workdir) = r.split()
- for m in glob.glob(manifest + ".*"):
- if m.endswith(".postrm"):
- continue
- sstate_clean_manifest(m, d)
- bb.utils.remove(stamp + "*")
- if removeworkdir:
- bb.utils.remove(workdir, recurse = True)
- lines.remove(r)
- removed = removed + 1
- bb.event.fire(bb.event.ProcessProgress(msg, removed), d)
-
- bb.event.fire(bb.event.ProcessFinished(msg), d)
-
- with open(i, "w") as f:
- for l in lines:
- f.write(l)
- machineindex |= set(stamps)
- with open(mi, "w") as f:
- for l in machineindex:
- f.write(l + "\n")
-
- if preservestamps:
- os.remove(preservestampfile)
-}
diff --git a/meta/classes/staging.bbclass b/meta/classes/staging.bbclass
deleted file mode 100644
index de3a19815a..0000000000
--- a/meta/classes/staging.bbclass
+++ /dev/null
@@ -1,621 +0,0 @@
-# These directories will be staged in the sysroot
-SYSROOT_DIRS = " \
- ${includedir} \
- ${libdir} \
- ${base_libdir} \
- ${nonarch_base_libdir} \
- ${datadir} \
-"
-
-# These directories are also staged in the sysroot when they contain files that
-# are usable on the build system
-SYSROOT_DIRS_NATIVE = " \
- ${bindir} \
- ${sbindir} \
- ${base_bindir} \
- ${base_sbindir} \
- ${libexecdir} \
- ${sysconfdir} \
- ${localstatedir} \
-"
-SYSROOT_DIRS_append_class-native = " ${SYSROOT_DIRS_NATIVE}"
-SYSROOT_DIRS_append_class-cross = " ${SYSROOT_DIRS_NATIVE}"
-SYSROOT_DIRS_append_class-crosssdk = " ${SYSROOT_DIRS_NATIVE}"
-
-# These directories will not be staged in the sysroot
-SYSROOT_DIRS_BLACKLIST = " \
- ${mandir} \
- ${docdir} \
- ${infodir} \
- ${datadir}/applications \
- ${datadir}/fonts \
- ${datadir}/gtk-doc/html \
- ${datadir}/locale \
- ${datadir}/pixmaps \
- ${libdir}/${BPN}/ptest \
-"
-
-sysroot_stage_dir() {
- src="$1"
- dest="$2"
- # if the src doesn't exist don't do anything
- if [ ! -d "$src" ]; then
- return
- fi
-
- mkdir -p "$dest"
- (
- cd $src
- find . -print0 | cpio --null -pdlu $dest
- )
-}
-
-sysroot_stage_dirs() {
- from="$1"
- to="$2"
-
- for dir in ${SYSROOT_DIRS}; do
- sysroot_stage_dir "$from$dir" "$to$dir"
- done
-
- # Remove directories we do not care about
- for dir in ${SYSROOT_DIRS_BLACKLIST}; do
- rm -rf "$to$dir"
- done
-}
-
-sysroot_stage_all() {
- sysroot_stage_dirs ${D} ${SYSROOT_DESTDIR}
-}
-
-python sysroot_strip () {
- inhibit_sysroot = d.getVar('INHIBIT_SYSROOT_STRIP')
- if inhibit_sysroot and oe.types.boolean(inhibit_sysroot):
- return
-
- dstdir = d.getVar('SYSROOT_DESTDIR')
- pn = d.getVar('PN')
- libdir = d.getVar("libdir")
- base_libdir = d.getVar("base_libdir")
- qa_already_stripped = 'already-stripped' in (d.getVar('INSANE_SKIP_' + pn) or "").split()
- strip_cmd = d.getVar("STRIP")
-
- oe.package.strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, d,
- qa_already_stripped=qa_already_stripped)
-}
-
-do_populate_sysroot[dirs] = "${SYSROOT_DESTDIR}"
-do_populate_sysroot[umask] = "022"
-
-addtask populate_sysroot after do_install
-
-SYSROOT_PREPROCESS_FUNCS ?= ""
-SYSROOT_DESTDIR = "${WORKDIR}/sysroot-destdir"
-
-python do_populate_sysroot () {
- # SYSROOT 'version' 2
- bb.build.exec_func("sysroot_stage_all", d)
- bb.build.exec_func("sysroot_strip", d)
- for f in (d.getVar('SYSROOT_PREPROCESS_FUNCS') or '').split():
- bb.build.exec_func(f, d)
- pn = d.getVar("PN")
- multiprov = d.getVar("MULTI_PROVIDER_WHITELIST").split()
- provdir = d.expand("${SYSROOT_DESTDIR}${base_prefix}/sysroot-providers/")
- bb.utils.mkdirhier(provdir)
- for p in d.getVar("PROVIDES").split():
- if p in multiprov:
- continue
- p = p.replace("/", "_")
- with open(provdir + p, "w") as f:
- f.write(pn)
-}
-
-do_populate_sysroot[vardeps] += "${SYSROOT_PREPROCESS_FUNCS}"
-do_populate_sysroot[vardepsexclude] += "MULTI_PROVIDER_WHITELIST"
-
-POPULATESYSROOTDEPS = ""
-POPULATESYSROOTDEPS_class-target = "virtual/${MLPREFIX}${TARGET_PREFIX}binutils:do_populate_sysroot"
-POPULATESYSROOTDEPS_class-nativesdk = "virtual/${TARGET_PREFIX}binutils-crosssdk:do_populate_sysroot"
-do_populate_sysroot[depends] += "${POPULATESYSROOTDEPS}"
-
-SSTATETASKS += "do_populate_sysroot"
-do_populate_sysroot[cleandirs] = "${SYSROOT_DESTDIR}"
-do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}"
-do_populate_sysroot[sstate-outputdirs] = "${COMPONENTS_DIR}/${PACKAGE_ARCH}/${PN}"
-do_populate_sysroot[sstate-fixmedir] = "${COMPONENTS_DIR}/${PACKAGE_ARCH}/${PN}"
-
-python do_populate_sysroot_setscene () {
- sstate_setscene(d)
-}
-addtask do_populate_sysroot_setscene
-
-def staging_copyfile(c, target, dest, postinsts, seendirs):
- import errno
-
- destdir = os.path.dirname(dest)
- if destdir not in seendirs:
- bb.utils.mkdirhier(destdir)
- seendirs.add(destdir)
- if "/usr/bin/postinst-" in c:
- postinsts.append(dest)
- if os.path.islink(c):
- linkto = os.readlink(c)
- if os.path.lexists(dest):
- if not os.path.islink(dest):
- raise OSError(errno.EEXIST, "Link %s already exists as a file" % dest, dest)
- if os.readlink(dest) == linkto:
- return dest
- raise OSError(errno.EEXIST, "Link %s already exists to a different location? (%s vs %s)" % (dest, os.readlink(dest), linkto), dest)
- os.symlink(linkto, dest)
- #bb.warn(c)
- else:
- try:
- os.link(c, dest)
- except OSError as err:
- if err.errno == errno.EXDEV:
- bb.utils.copyfile(c, dest)
- else:
- raise
- return dest
-
-def staging_copydir(c, target, dest, seendirs):
- if dest not in seendirs:
- bb.utils.mkdirhier(dest)
- seendirs.add(dest)
-
-def staging_processfixme(fixme, target, recipesysroot, recipesysrootnative, d):
- import subprocess
-
- if not fixme:
- return
- cmd = "sed -e 's:^[^/]*/:%s/:g' %s | xargs sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIRHOST:%s:g'" % (target, " ".join(fixme), recipesysroot, recipesysrootnative)
- for fixmevar in ['PSEUDO_SYSROOT', 'HOSTTOOLS_DIR', 'PKGDATA_DIR', 'PSEUDO_LOCALSTATEDIR', 'LOGFIFO']:
- fixme_path = d.getVar(fixmevar)
- cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path)
- bb.debug(2, cmd)
- subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
-
-
-def staging_populate_sysroot_dir(targetsysroot, nativesysroot, native, d):
- import glob
- import subprocess
- import errno
-
- fixme = []
- postinsts = []
- seendirs = set()
- stagingdir = d.getVar("STAGING_DIR")
- if native:
- pkgarchs = ['${BUILD_ARCH}', '${BUILD_ARCH}_*']
- targetdir = nativesysroot
- else:
- pkgarchs = ['${MACHINE_ARCH}']
- pkgarchs = pkgarchs + list(reversed(d.getVar("PACKAGE_EXTRA_ARCHS").split()))
- pkgarchs.append('allarch')
- targetdir = targetsysroot
-
- bb.utils.mkdirhier(targetdir)
- for pkgarch in pkgarchs:
- for manifest in glob.glob(d.expand("${SSTATE_MANIFESTS}/manifest-%s-*.populate_sysroot" % pkgarch)):
- if manifest.endswith("-initial.populate_sysroot"):
- # skip libgcc-initial due to file overlap
- continue
- if not native and (manifest.endswith("-native.populate_sysroot") or "nativesdk-" in manifest):
- continue
- if native and not (manifest.endswith("-native.populate_sysroot") or manifest.endswith("-cross.populate_sysroot") or "-cross-" in manifest):
- continue
- tmanifest = targetdir + "/" + os.path.basename(manifest)
- if os.path.exists(tmanifest):
- continue
- try:
- os.link(manifest, tmanifest)
- except OSError as err:
- if err.errno == errno.EXDEV:
- bb.utils.copyfile(manifest, tmanifest)
- else:
- raise
- with open(manifest, "r") as f:
- for l in f:
- l = l.strip()
- if l.endswith("/fixmepath"):
- fixme.append(l)
- continue
- if l.endswith("/fixmepath.cmd"):
- continue
- dest = l.replace(stagingdir, "")
- dest = targetdir + "/" + "/".join(dest.split("/")[3:])
- if l.endswith("/"):
- staging_copydir(l, targetdir, dest, seendirs)
- continue
- try:
- staging_copyfile(l, targetdir, dest, postinsts, seendirs)
- except FileExistsError:
- continue
-
- staging_processfixme(fixme, targetdir, targetsysroot, nativesysroot, d)
- for p in postinsts:
- subprocess.check_output(p, shell=True, stderr=subprocess.STDOUT)
-
-#
-# Manifests here are complicated. The main sysroot area has the unpacked sstate
-# which us unrelocated and tracked by the main sstate manifests. Each recipe
-# specific sysroot has manifests for each dependency that is installed there.
-# The task hash is used to tell whether the data needs to be reinstalled. We
-# use a symlink to point to the currently installed hash. There is also a
-# "complete" stamp file which is used to mark if installation completed. If
-# something fails (e.g. a postinst), this won't get written and we would
-# remove and reinstall the dependency. This also means partially installed
-# dependencies should get cleaned up correctly.
-#
-
-python extend_recipe_sysroot() {
- import copy
- import subprocess
- import errno
- import collections
- import glob
-
- taskdepdata = d.getVar("BB_TASKDEPDATA", False)
- mytaskname = d.getVar("BB_RUNTASK")
- if mytaskname.endswith("_setscene"):
- mytaskname = mytaskname.replace("_setscene", "")
- workdir = d.getVar("WORKDIR")
- #bb.warn(str(taskdepdata))
- pn = d.getVar("PN")
- stagingdir = d.getVar("STAGING_DIR")
- sharedmanifests = d.getVar("COMPONENTS_DIR") + "/manifests"
- recipesysroot = d.getVar("RECIPE_SYSROOT")
- recipesysrootnative = d.getVar("RECIPE_SYSROOT_NATIVE")
-
- # Detect bitbake -b usage
- nodeps = d.getVar("BB_LIMITEDDEPS") or False
- if nodeps:
- lock = bb.utils.lockfile(recipesysroot + "/sysroot.lock")
- staging_populate_sysroot_dir(recipesysroot, recipesysrootnative, True, d)
- staging_populate_sysroot_dir(recipesysroot, recipesysrootnative, False, d)
- bb.utils.unlockfile(lock)
- return
-
- start = None
- configuredeps = []
- owntaskdeps = []
- for dep in taskdepdata:
- data = taskdepdata[dep]
- if data[1] == mytaskname and data[0] == pn:
- start = dep
- elif data[0] == pn:
- owntaskdeps.append(data[1])
- if start is None:
- bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
-
- # We need to figure out which sysroot files we need to expose to this task.
- # This needs to match what would get restored from sstate, which is controlled
- # ultimately by calls from bitbake to setscene_depvalid().
- # That function expects a setscene dependency tree. We build a dependency tree
- # condensed to inter-sstate task dependencies, similar to that used by setscene
- # tasks. We can then call into setscene_depvalid() and decide
- # which dependencies we can "see" and should expose in the recipe specific sysroot.
- setscenedeps = copy.deepcopy(taskdepdata)
-
- start = set([start])
-
- sstatetasks = d.getVar("SSTATETASKS").split()
- # Add recipe specific tasks referenced by setscene_depvalid()
- sstatetasks.append("do_stash_locale")
-
- def print_dep_tree(deptree):
- data = ""
- for dep in deptree:
- deps = " " + "\n ".join(deptree[dep][3]) + "\n"
- data = data + "%s:\n %s\n %s\n%s %s\n %s\n" % (deptree[dep][0], deptree[dep][1], deptree[dep][2], deps, deptree[dep][4], deptree[dep][5])
- return data
-
- #bb.note("Full dep tree is:\n%s" % print_dep_tree(taskdepdata))
-
- #bb.note(" start2 is %s" % str(start))
-
- # If start is an sstate task (like do_package) we need to add in its direct dependencies
- # else the code below won't recurse into them.
- for dep in set(start):
- for dep2 in setscenedeps[dep][3]:
- start.add(dep2)
- start.remove(dep)
-
- #bb.note(" start3 is %s" % str(start))
-
- # Create collapsed do_populate_sysroot -> do_populate_sysroot tree
- for dep in taskdepdata:
- data = setscenedeps[dep]
- if data[1] not in sstatetasks:
- for dep2 in setscenedeps:
- data2 = setscenedeps[dep2]
- if dep in data2[3]:
- data2[3].update(setscenedeps[dep][3])
- data2[3].remove(dep)
- if dep in start:
- start.update(setscenedeps[dep][3])
- start.remove(dep)
- del setscenedeps[dep]
-
- # Remove circular references
- for dep in setscenedeps:
- if dep in setscenedeps[dep][3]:
- setscenedeps[dep][3].remove(dep)
-
- #bb.note("Computed dep tree is:\n%s" % print_dep_tree(setscenedeps))
- #bb.note(" start is %s" % str(start))
-
- # Direct dependencies should be present and can be depended upon
- for dep in set(start):
- if setscenedeps[dep][1] == "do_populate_sysroot":
- if dep not in configuredeps:
- configuredeps.append(dep)
- bb.note("Direct dependencies are %s" % str(configuredeps))
- #bb.note(" or %s" % str(start))
-
- msgbuf = []
- # Call into setscene_depvalid for each sub-dependency and only copy sysroot files
- # for ones that would be restored from sstate.
- done = list(start)
- next = list(start)
- while next:
- new = []
- for dep in next:
- data = setscenedeps[dep]
- for datadep in data[3]:
- if datadep in done:
- continue
- taskdeps = {}
- taskdeps[dep] = setscenedeps[dep][:2]
- taskdeps[datadep] = setscenedeps[datadep][:2]
- retval = setscene_depvalid(datadep, taskdeps, [], d, msgbuf)
- if retval:
- msgbuf.append("Skipping setscene dependency %s for installation into the sysroot" % datadep)
- continue
- done.append(datadep)
- new.append(datadep)
- if datadep not in configuredeps and setscenedeps[datadep][1] == "do_populate_sysroot":
- configuredeps.append(datadep)
- msgbuf.append("Adding dependency on %s" % setscenedeps[datadep][0])
- else:
- msgbuf.append("Following dependency on %s" % setscenedeps[datadep][0])
- next = new
-
- # This logging is too verbose for day to day use sadly
- #bb.debug(2, "\n".join(msgbuf))
-
- depdir = recipesysrootnative + "/installeddeps"
- bb.utils.mkdirhier(depdir)
- bb.utils.mkdirhier(sharedmanifests)
-
- lock = bb.utils.lockfile(recipesysroot + "/sysroot.lock")
-
- fixme = {}
- seendirs = set()
- postinsts = []
- multilibs = {}
- manifests = {}
- # All files that we're going to be installing, to find conflicts.
- fileset = {}
-
- for f in os.listdir(depdir):
- if not f.endswith(".complete"):
- continue
- f = depdir + "/" + f
- if os.path.islink(f) and not os.path.exists(f):
- bb.note("%s no longer exists, removing from sysroot" % f)
- lnk = os.readlink(f.replace(".complete", ""))
- sstate_clean_manifest(depdir + "/" + lnk, d, workdir)
- os.unlink(f)
- os.unlink(f.replace(".complete", ""))
-
- installed = []
- for dep in configuredeps:
- c = setscenedeps[dep][0]
- if mytaskname in ["do_sdk_depends", "do_populate_sdk_ext"] and c.endswith("-initial"):
- bb.note("Skipping initial setscene dependency %s for installation into the sysroot" % c)
- continue
- installed.append(c)
-
- # We want to remove anything which this task previously installed but is no longer a dependency
- taskindex = depdir + "/" + "index." + mytaskname
- if os.path.exists(taskindex):
- potential = []
- with open(taskindex, "r") as f:
- for l in f:
- l = l.strip()
- if l not in installed:
- fl = depdir + "/" + l
- if not os.path.exists(fl):
- # Was likely already uninstalled
- continue
- potential.append(l)
- # We need to ensure no other task needs this dependency. We hold the sysroot
- # lock so we ca search the indexes to check
- if potential:
- for i in glob.glob(depdir + "/index.*"):
- if i.endswith("." + mytaskname):
- continue
- with open(i, "r") as f:
- for l in f:
- if l.startswith("TaskDeps:"):
- prevtasks = l.split()[1:]
- if mytaskname in prevtasks:
- # We're a dependency of this task so we can clear items out the sysroot
- break
- l = l.strip()
- if l in potential:
- potential.remove(l)
- for l in potential:
- fl = depdir + "/" + l
- bb.note("Task %s no longer depends on %s, removing from sysroot" % (mytaskname, l))
- lnk = os.readlink(fl)
- sstate_clean_manifest(depdir + "/" + lnk, d, workdir)
- os.unlink(fl)
- os.unlink(fl + ".complete")
-
- msg_exists = []
- msg_adding = []
-
- # Handle all removals first since files may move between recipes
- for dep in configuredeps:
- c = setscenedeps[dep][0]
- if c not in installed:
- continue
- taskhash = setscenedeps[dep][5]
- taskmanifest = depdir + "/" + c + "." + taskhash
-
- if os.path.exists(depdir + "/" + c):
- lnk = os.readlink(depdir + "/" + c)
- if lnk == c + "." + taskhash and os.path.exists(depdir + "/" + c + ".complete"):
- continue
- else:
- bb.note("%s exists in sysroot, but is stale (%s vs. %s), removing." % (c, lnk, c + "." + taskhash))
- sstate_clean_manifest(depdir + "/" + lnk, d, workdir)
- os.unlink(depdir + "/" + c)
- if os.path.lexists(depdir + "/" + c + ".complete"):
- os.unlink(depdir + "/" + c + ".complete")
- elif os.path.lexists(depdir + "/" + c):
- os.unlink(depdir + "/" + c)
-
- binfiles = {}
- # Now handle installs
- for dep in configuredeps:
- c = setscenedeps[dep][0]
- if c not in installed:
- continue
- taskhash = setscenedeps[dep][5]
- taskmanifest = depdir + "/" + c + "." + taskhash
-
- if os.path.exists(depdir + "/" + c):
- lnk = os.readlink(depdir + "/" + c)
- if lnk == c + "." + taskhash and os.path.exists(depdir + "/" + c + ".complete"):
- msg_exists.append(c)
- continue
-
- msg_adding.append(c)
-
- os.symlink(c + "." + taskhash, depdir + "/" + c)
-
- manifest, d2 = oe.sstatesig.find_sstate_manifest(c, setscenedeps[dep][2], "populate_sysroot", d, multilibs)
- if d2 is not d:
- # If we don't do this, the recipe sysroot will be placed in the wrong WORKDIR for multilibs
- # We need a consistent WORKDIR for the image
- d2.setVar("WORKDIR", d.getVar("WORKDIR"))
- destsysroot = d2.getVar("RECIPE_SYSROOT")
- # We put allarch recipes into the default sysroot
- if manifest and "allarch" in manifest:
- destsysroot = d.getVar("RECIPE_SYSROOT")
-
- native = False
- if c.endswith("-native") or "-cross-" in c or "-crosssdk" in c:
- native = True
-
- if manifest:
- newmanifest = collections.OrderedDict()
- targetdir = destsysroot
- if native:
- targetdir = recipesysrootnative
- if targetdir not in fixme:
- fixme[targetdir] = []
- fm = fixme[targetdir]
-
- with open(manifest, "r") as f:
- manifests[dep] = manifest
- for l in f:
- l = l.strip()
- if l.endswith("/fixmepath"):
- fm.append(l)
- continue
- if l.endswith("/fixmepath.cmd"):
- continue
- dest = l.replace(stagingdir, "")
- dest = "/" + "/".join(dest.split("/")[3:])
- newmanifest[l] = targetdir + dest
-
- # Check if files have already been installed by another
- # recipe and abort if they have, explaining what recipes are
- # conflicting.
- hashname = targetdir + dest
- if not hashname.endswith("/"):
- if hashname in fileset:
- bb.fatal("The file %s is installed by both %s and %s, aborting" % (dest, c, fileset[hashname]))
- else:
- fileset[hashname] = c
-
- # Having multiple identical manifests in each sysroot eats diskspace so
- # create a shared pool of them and hardlink if we can.
- # We create the manifest in advance so that if something fails during installation,
- # or the build is interrupted, subsequent exeuction can cleanup.
- sharedm = sharedmanifests + "/" + os.path.basename(taskmanifest)
- if not os.path.exists(sharedm):
- smlock = bb.utils.lockfile(sharedm + ".lock")
- # Can race here. You'd think it just means we may not end up with all copies hardlinked to each other
- # but python can lose file handles so we need to do this under a lock.
- if not os.path.exists(sharedm):
- with open(sharedm, 'w') as m:
- for l in newmanifest:
- dest = newmanifest[l]
- m.write(dest.replace(workdir + "/", "") + "\n")
- bb.utils.unlockfile(smlock)
- try:
- os.link(sharedm, taskmanifest)
- except OSError as err:
- if err.errno == errno.EXDEV:
- bb.utils.copyfile(sharedm, taskmanifest)
- else:
- raise
- # Finally actually install the files
- for l in newmanifest:
- dest = newmanifest[l]
- if l.endswith("/"):
- staging_copydir(l, targetdir, dest, seendirs)
- continue
- if "/bin/" in l or "/sbin/" in l:
- # defer /*bin/* files until last in case they need libs
- binfiles[l] = (targetdir, dest)
- else:
- staging_copyfile(l, targetdir, dest, postinsts, seendirs)
-
- # Handle deferred binfiles
- for l in binfiles:
- (targetdir, dest) = binfiles[l]
- staging_copyfile(l, targetdir, dest, postinsts, seendirs)
-
- bb.note("Installed into sysroot: %s" % str(msg_adding))
- bb.note("Skipping as already exists in sysroot: %s" % str(msg_exists))
-
- for f in fixme:
- staging_processfixme(fixme[f], f, recipesysroot, recipesysrootnative, d)
-
- for p in postinsts:
- subprocess.check_output(p, shell=True, stderr=subprocess.STDOUT)
-
- for dep in manifests:
- c = setscenedeps[dep][0]
- os.symlink(manifests[dep], depdir + "/" + c + ".complete")
-
- with open(taskindex, "w") as f:
- f.write("TaskDeps: " + " ".join(owntaskdeps) + "\n")
- for l in sorted(installed):
- f.write(l + "\n")
-
- bb.utils.unlockfile(lock)
-}
-extend_recipe_sysroot[vardepsexclude] += "MACHINE_ARCH PACKAGE_EXTRA_ARCHS SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA"
-
-do_prepare_recipe_sysroot[deptask] = "do_populate_sysroot"
-python do_prepare_recipe_sysroot () {
- bb.build.exec_func("extend_recipe_sysroot", d)
-}
-addtask do_prepare_recipe_sysroot before do_configure after do_fetch
-
-python staging_taskhandler() {
- bbtasks = e.tasklist
- for task in bbtasks:
- deps = d.getVarFlag(task, "depends")
- if deps and "populate_sysroot" in deps:
- d.appendVarFlag(task, "prefuncs", " extend_recipe_sysroot")
-}
-staging_taskhandler[eventmask] = "bb.event.RecipeTaskPreProcess"
-addhandler staging_taskhandler
diff --git a/meta/classes/syslinux.bbclass b/meta/classes/syslinux.bbclass
deleted file mode 100644
index 894f6b3718..0000000000
--- a/meta/classes/syslinux.bbclass
+++ /dev/null
@@ -1,194 +0,0 @@
-# syslinux.bbclass
-# Copyright (C) 2004-2006, Advanced Micro Devices, Inc. All Rights Reserved
-# Released under the MIT license (see packages/COPYING)
-
-# Provide syslinux specific functions for building bootable images.
-
-# External variables
-# ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional)
-# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
-# ${AUTO_SYSLINUXMENU} - set this to 1 to enable creating an automatic menu
-# ${LABELS} - a list of targets for the automatic config
-# ${APPEND} - an override list of append strings for each label
-# ${SYSLINUX_OPTS} - additional options to add to the syslinux file ';' delimited
-# ${SYSLINUX_SPLASH} - A background for the vga boot menu if using the boot menu
-# ${SYSLINUX_DEFAULT_CONSOLE} - set to "console=ttyX" to change kernel boot default console
-# ${SYSLINUX_SERIAL} - Set an alternate serial port or turn off serial with empty string
-# ${SYSLINUX_SERIAL_TTY} - Set alternate console=tty... kernel boot argument
-# ${SYSLINUX_KERNEL_ARGS} - Add additional kernel arguments
-
-do_bootimg[depends] += "${MLPREFIX}syslinux:do_populate_sysroot \
- syslinux-native:do_populate_sysroot"
-
-ISOLINUXDIR ?= "/isolinux"
-SYSLINUXDIR = "/"
-# The kernel has an internal default console, which you can override with
-# a console=...some_tty...
-SYSLINUX_DEFAULT_CONSOLE ?= ""
-SYSLINUX_SERIAL ?= "0 115200"
-SYSLINUX_SERIAL_TTY ?= "console=ttyS0,115200"
-SYSLINUX_PROMPT ?= "0"
-SYSLINUX_TIMEOUT ?= "50"
-AUTO_SYSLINUXMENU ?= "1"
-SYSLINUX_ALLOWOPTIONS ?= "1"
-SYSLINUX_ROOT ?= "${ROOT}"
-SYSLINUX_CFG_VM ?= "${S}/syslinux_vm.cfg"
-SYSLINUX_CFG_LIVE ?= "${S}/syslinux_live.cfg"
-APPEND ?= ""
-
-# Need UUID utility code.
-inherit fs-uuid
-
-syslinux_populate() {
- DEST=$1
- BOOTDIR=$2
- CFGNAME=$3
-
- install -d ${DEST}${BOOTDIR}
-
- # Install the config files
- install -m 0644 ${SYSLINUX_CFG} ${DEST}${BOOTDIR}/${CFGNAME}
- if [ "${AUTO_SYSLINUXMENU}" = 1 ] ; then
- install -m 0644 ${STAGING_DATADIR}/syslinux/vesamenu.c32 ${DEST}${BOOTDIR}/vesamenu.c32
- install -m 0444 ${STAGING_DATADIR}/syslinux/libcom32.c32 ${DEST}${BOOTDIR}/libcom32.c32
- install -m 0444 ${STAGING_DATADIR}/syslinux/libutil.c32 ${DEST}${BOOTDIR}/libutil.c32
- if [ "${SYSLINUX_SPLASH}" != "" ] ; then
- install -m 0644 ${SYSLINUX_SPLASH} ${DEST}${BOOTDIR}/splash.lss
- fi
- fi
-}
-
-syslinux_iso_populate() {
- iso_dir=$1
- syslinux_populate $iso_dir ${ISOLINUXDIR} isolinux.cfg
- install -m 0644 ${STAGING_DATADIR}/syslinux/isolinux.bin $iso_dir${ISOLINUXDIR}
- install -m 0644 ${STAGING_DATADIR}/syslinux/ldlinux.c32 $iso_dir${ISOLINUXDIR}
-}
-
-syslinux_hddimg_populate() {
- hdd_dir=$1
- syslinux_populate $hdd_dir ${SYSLINUXDIR} syslinux.cfg
- install -m 0444 ${STAGING_DATADIR}/syslinux/ldlinux.sys $hdd_dir${SYSLINUXDIR}/ldlinux.sys
-}
-
-syslinux_hddimg_install() {
- syslinux ${IMGDEPLOYDIR}/${IMAGE_NAME}.hddimg
-}
-
-python build_syslinux_cfg () {
- import copy
- import sys
-
- workdir = d.getVar('WORKDIR')
- if not workdir:
- bb.error("WORKDIR not defined, unable to package")
- return
-
- labels = d.getVar('LABELS')
- if not labels:
- bb.debug(1, "LABELS not defined, nothing to do")
- return
-
- if labels == []:
- bb.debug(1, "No labels, nothing to do")
- return
-
- cfile = d.getVar('SYSLINUX_CFG')
- if not cfile:
- bb.fatal('Unable to read SYSLINUX_CFG')
-
- try:
- cfgfile = open(cfile, 'w')
- except OSError:
- bb.fatal('Unable to open %s' % cfile)
-
- cfgfile.write('# Automatically created by OE\n')
-
- opts = d.getVar('SYSLINUX_OPTS')
-
- if opts:
- for opt in opts.split(';'):
- cfgfile.write('%s\n' % opt)
-
- allowoptions = d.getVar('SYSLINUX_ALLOWOPTIONS')
- if allowoptions:
- cfgfile.write('ALLOWOPTIONS %s\n' % allowoptions)
- else:
- cfgfile.write('ALLOWOPTIONS 1\n')
-
- syslinux_default_console = d.getVar('SYSLINUX_DEFAULT_CONSOLE')
- syslinux_serial_tty = d.getVar('SYSLINUX_SERIAL_TTY')
- syslinux_serial = d.getVar('SYSLINUX_SERIAL')
- if syslinux_serial:
- cfgfile.write('SERIAL %s\n' % syslinux_serial)
-
- menu = (d.getVar('AUTO_SYSLINUXMENU') == "1")
-
- if menu and syslinux_serial:
- cfgfile.write('DEFAULT Graphics console %s\n' % (labels.split()[0]))
- else:
- cfgfile.write('DEFAULT %s\n' % (labels.split()[0]))
-
- timeout = d.getVar('SYSLINUX_TIMEOUT')
-
- if timeout:
- cfgfile.write('TIMEOUT %s\n' % timeout)
- else:
- cfgfile.write('TIMEOUT 50\n')
-
- prompt = d.getVar('SYSLINUX_PROMPT')
- if prompt:
- cfgfile.write('PROMPT %s\n' % prompt)
- else:
- cfgfile.write('PROMPT 1\n')
-
- if menu:
- cfgfile.write('ui vesamenu.c32\n')
- cfgfile.write('menu title Select kernel options and boot kernel\n')
- cfgfile.write('menu tabmsg Press [Tab] to edit, [Return] to select\n')
- splash = d.getVar('SYSLINUX_SPLASH')
- if splash:
- cfgfile.write('menu background splash.lss\n')
-
- for label in labels.split():
- localdata = bb.data.createCopy(d)
-
- overrides = localdata.getVar('OVERRIDES')
- if not overrides:
- bb.fatal('OVERRIDES not defined')
-
- localdata.setVar('OVERRIDES', label + ':' + overrides)
-
- btypes = [ [ "", syslinux_default_console ] ]
- if menu and syslinux_serial:
- btypes = [ [ "Graphics console ", syslinux_default_console ],
- [ "Serial console ", syslinux_serial_tty ] ]
-
- root= d.getVar('SYSLINUX_ROOT')
- if not root:
- bb.fatal('SYSLINUX_ROOT not defined')
-
- kernel = localdata.getVar('KERNEL_IMAGETYPE')
- for btype in btypes:
- cfgfile.write('LABEL %s%s\nKERNEL /%s\n' % (btype[0], label, kernel))
-
- exargs = d.getVar('SYSLINUX_KERNEL_ARGS')
- if exargs:
- btype[1] += " " + exargs
-
- append = localdata.getVar('APPEND')
- initrd = localdata.getVar('INITRD')
-
- append = root + " " + append
- cfgfile.write('APPEND ')
-
- if initrd:
- cfgfile.write('initrd=/initrd ')
-
- cfgfile.write('LABEL=%s '% (label))
- append = replace_rootfs_uuid(d, append)
- cfgfile.write('%s %s\n' % (append, btype[1]))
-
- cfgfile.close()
-}
-build_syslinux_cfg[dirs] = "${S}"
diff --git a/meta/classes/systemd-boot-cfg.bbclass b/meta/classes/systemd-boot-cfg.bbclass
deleted file mode 100644
index b3e0e6ad41..0000000000
--- a/meta/classes/systemd-boot-cfg.bbclass
+++ /dev/null
@@ -1,71 +0,0 @@
-SYSTEMD_BOOT_CFG ?= "${S}/loader.conf"
-SYSTEMD_BOOT_ENTRIES ?= ""
-SYSTEMD_BOOT_TIMEOUT ?= "10"
-
-# Uses MACHINE specific KERNEL_IMAGETYPE
-PACKAGE_ARCH = "${MACHINE_ARCH}"
-
-# Need UUID utility code.
-inherit fs-uuid
-
-python build_efi_cfg() {
- s = d.getVar("S")
- labels = d.getVar('LABELS')
- if not labels:
- bb.debug(1, "LABELS not defined, nothing to do")
- return
-
- if labels == []:
- bb.debug(1, "No labels, nothing to do")
- return
-
- cfile = d.getVar('SYSTEMD_BOOT_CFG')
- cdir = os.path.dirname(cfile)
- if not os.path.exists(cdir):
- os.makedirs(cdir)
- try:
- cfgfile = open(cfile, 'w')
- except OSError:
- bb.fatal('Unable to open %s' % cfile)
-
- cfgfile.write('# Automatically created by OE\n')
- cfgfile.write('default %s\n' % (labels.split()[0]))
- timeout = d.getVar('SYSTEMD_BOOT_TIMEOUT')
- if timeout:
- cfgfile.write('timeout %s\n' % timeout)
- else:
- cfgfile.write('timeout 10\n')
- cfgfile.close()
-
- for label in labels.split():
- localdata = d.createCopy()
-
- entryfile = "%s/%s.conf" % (s, label)
- if not os.path.exists(s):
- os.makedirs(s)
- d.appendVar("SYSTEMD_BOOT_ENTRIES", " " + entryfile)
- try:
- entrycfg = open(entryfile, "w")
- except OSError:
- bb.fatal('Unable to open %s' % entryfile)
-
- entrycfg.write('title %s\n' % label)
-
- kernel = localdata.getVar("KERNEL_IMAGETYPE")
- entrycfg.write('linux /%s\n' % kernel)
-
- append = localdata.getVar('APPEND')
- initrd = localdata.getVar('INITRD')
-
- if initrd:
- entrycfg.write('initrd /initrd\n')
- lb = label
- if label == "install":
- lb = "install-efi"
- entrycfg.write('options LABEL=%s ' % lb)
- if append:
- append = replace_rootfs_uuid(d, append)
- entrycfg.write('%s' % append)
- entrycfg.write('\n')
- entrycfg.close()
-}
diff --git a/meta/classes/systemd-boot.bbclass b/meta/classes/systemd-boot.bbclass
deleted file mode 100644
index 336c4c2ff5..0000000000
--- a/meta/classes/systemd-boot.bbclass
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright (C) 2016 Intel Corporation
-#
-# Released under the MIT license (see COPYING.MIT)
-
-# systemd-boot.bbclass - The "systemd-boot" is essentially the gummiboot merged into systemd.
-# The original standalone gummiboot project is dead without any more
-# maintenance.
-#
-# Set EFI_PROVIDER = "systemd-boot" to use systemd-boot on your live images instead of grub-efi
-# (images built by image-live.bbclass)
-
-do_bootimg[depends] += "${MLPREFIX}systemd-boot:do_deploy"
-
-require conf/image-uefi.conf
-# Need UUID utility code.
-inherit fs-uuid
-
-efi_populate() {
- efi_populate_common "$1" systemd
-
- # systemd-boot requires these paths for configuration files
- # they are not customizable so no point in new vars
- install -d ${DEST}/loader
- install -d ${DEST}/loader/entries
- install -m 0644 ${SYSTEMD_BOOT_CFG} ${DEST}/loader/loader.conf
- for i in ${SYSTEMD_BOOT_ENTRIES}; do
- install -m 0644 ${i} ${DEST}/loader/entries
- done
-}
-
-efi_iso_populate_append() {
- cp -r $iso_dir/loader ${EFIIMGDIR}
-}
-
-inherit systemd-boot-cfg
diff --git a/meta/classes/systemd.bbclass b/meta/classes/systemd.bbclass
deleted file mode 100644
index 9e8a82c9f1..0000000000
--- a/meta/classes/systemd.bbclass
+++ /dev/null
@@ -1,232 +0,0 @@
-# The list of packages that should have systemd packaging scripts added. For
-# each entry, optionally have a SYSTEMD_SERVICE_[package] that lists the service
-# files in this package. If this variable isn't set, [package].service is used.
-SYSTEMD_PACKAGES ?= "${PN}"
-SYSTEMD_PACKAGES_class-native ?= ""
-SYSTEMD_PACKAGES_class-nativesdk ?= ""
-
-# Whether to enable or disable the services on installation.
-SYSTEMD_AUTO_ENABLE ??= "enable"
-
-# This class will be included in any recipe that supports systemd init scripts,
-# even if systemd is not in DISTRO_FEATURES. As such don't make any changes
-# directly but check the DISTRO_FEATURES first.
-python __anonymous() {
- # If the distro features have systemd but not sysvinit, inhibit update-rcd
- # from doing any work so that pure-systemd images don't have redundant init
- # files.
- if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d):
- d.appendVar("DEPENDS", " systemd-systemctl-native")
- d.appendVar("PACKAGE_WRITE_DEPS", " systemd-systemctl-native")
- if not bb.utils.contains('DISTRO_FEATURES', 'sysvinit', True, False, d):
- d.setVar("INHIBIT_UPDATERCD_BBCLASS", "1")
-}
-
-systemd_postinst() {
-if type systemctl >/dev/null 2>/dev/null; then
- OPTS=""
-
- if [ -n "$D" ]; then
- OPTS="--root=$D"
- fi
-
- if [ "${SYSTEMD_AUTO_ENABLE}" = "enable" ]; then
- for service in ${SYSTEMD_SERVICE_ESCAPED}; do
- systemctl ${OPTS} enable "$service"
- done
- fi
-
- if [ -z "$D" ]; then
- systemctl daemon-reload
- systemctl preset ${SYSTEMD_SERVICE_ESCAPED}
-
- if [ "${SYSTEMD_AUTO_ENABLE}" = "enable" ]; then
- systemctl --no-block restart ${SYSTEMD_SERVICE_ESCAPED}
- fi
- fi
-fi
-}
-
-systemd_prerm() {
-if type systemctl >/dev/null 2>/dev/null; then
- if [ -z "$D" ]; then
- systemctl stop ${SYSTEMD_SERVICE_ESCAPED}
-
- systemctl disable ${SYSTEMD_SERVICE_ESCAPED}
- fi
-fi
-}
-
-
-systemd_populate_packages[vardeps] += "systemd_prerm systemd_postinst"
-systemd_populate_packages[vardepsexclude] += "OVERRIDES"
-
-
-python systemd_populate_packages() {
- import re
- import shlex
-
- if not bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d):
- return
-
- def get_package_var(d, var, pkg):
- val = (d.getVar('%s_%s' % (var, pkg)) or "").strip()
- if val == "":
- val = (d.getVar(var) or "").strip()
- return val
-
- # Check if systemd-packages already included in PACKAGES
- def systemd_check_package(pkg_systemd):
- packages = d.getVar('PACKAGES')
- if not pkg_systemd in packages.split():
- bb.error('%s does not appear in package list, please add it' % pkg_systemd)
-
-
- def systemd_generate_package_scripts(pkg):
- bb.debug(1, 'adding systemd calls to postinst/postrm for %s' % pkg)
-
- paths_escaped = ' '.join(shlex.quote(s) for s in d.getVar('SYSTEMD_SERVICE_' + pkg).split())
- d.setVar('SYSTEMD_SERVICE_ESCAPED_' + pkg, paths_escaped)
-
- # Add pkg to the overrides so that it finds the SYSTEMD_SERVICE_pkg
- # variable.
- localdata = d.createCopy()
- localdata.prependVar("OVERRIDES", pkg + ":")
-
- postinst = d.getVar('pkg_postinst_%s' % pkg)
- if not postinst:
- postinst = '#!/bin/sh\n'
- postinst += localdata.getVar('systemd_postinst')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
-
- prerm = d.getVar('pkg_prerm_%s' % pkg)
- if not prerm:
- prerm = '#!/bin/sh\n'
- prerm += localdata.getVar('systemd_prerm')
- d.setVar('pkg_prerm_%s' % pkg, prerm)
-
-
- # Add files to FILES_*-systemd if existent and not already done
- def systemd_append_file(pkg_systemd, file_append):
- appended = False
- if os.path.exists(oe.path.join(d.getVar("D"), file_append)):
- var_name = "FILES_" + pkg_systemd
- files = d.getVar(var_name, False) or ""
- if file_append not in files.split():
- d.appendVar(var_name, " " + file_append)
- appended = True
- return appended
-
- # Add systemd files to FILES_*-systemd, parse for Also= and follow recursive
- def systemd_add_files_and_parse(pkg_systemd, path, service, keys):
- # avoid infinite recursion
- if systemd_append_file(pkg_systemd, oe.path.join(path, service)):
- fullpath = oe.path.join(d.getVar("D"), path, service)
- if service.find('.service') != -1:
- # for *.service add *@.service
- service_base = service.replace('.service', '')
- systemd_add_files_and_parse(pkg_systemd, path, service_base + '@.service', keys)
- if service.find('.socket') != -1:
- # for *.socket add *.service and *@.service
- service_base = service.replace('.socket', '')
- systemd_add_files_and_parse(pkg_systemd, path, service_base + '.service', keys)
- systemd_add_files_and_parse(pkg_systemd, path, service_base + '@.service', keys)
- for key in keys.split():
- # recurse all dependencies found in keys ('Also';'Conflicts';..) and add to files
- cmd = "grep %s %s | sed 's,%s=,,g' | tr ',' '\\n'" % (key, shlex.quote(fullpath), key)
- pipe = os.popen(cmd, 'r')
- line = pipe.readline()
- while line:
- line = line.replace('\n', '')
- systemd_add_files_and_parse(pkg_systemd, path, line, keys)
- line = pipe.readline()
- pipe.close()
-
- # Check service-files and call systemd_add_files_and_parse for each entry
- def systemd_check_services():
- searchpaths = [oe.path.join(d.getVar("sysconfdir"), "systemd", "system"),]
- searchpaths.append(d.getVar("systemd_system_unitdir"))
- systemd_packages = d.getVar('SYSTEMD_PACKAGES')
-
- keys = 'Also'
- # scan for all in SYSTEMD_SERVICE[]
- for pkg_systemd in systemd_packages.split():
- for service in get_package_var(d, 'SYSTEMD_SERVICE', pkg_systemd).split():
- path_found = ''
-
- # Deal with adding, for example, 'ifplugd@eth0.service' from
- # 'ifplugd@.service'
- base = None
- at = service.find('@')
- if at != -1:
- ext = service.rfind('.')
- base = service[:at] + '@' + service[ext:]
-
- for path in searchpaths:
- if os.path.exists(oe.path.join(d.getVar("D"), path, service)):
- path_found = path
- break
- elif base is not None:
- if os.path.exists(oe.path.join(d.getVar("D"), path, base)):
- path_found = path
- break
-
- if path_found != '':
- systemd_add_files_and_parse(pkg_systemd, path_found, service, keys)
- else:
- bb.fatal("SYSTEMD_SERVICE_%s value %s does not exist" % (pkg_systemd, service))
-
- def systemd_create_presets(pkg, action):
- presetf = oe.path.join(d.getVar("PKGD"), d.getVar("systemd_unitdir"), "system-preset/98-%s.preset" % pkg)
- bb.utils.mkdirhier(os.path.dirname(presetf))
- with open(presetf, 'a') as fd:
- for service in d.getVar('SYSTEMD_SERVICE_%s' % pkg).split():
- fd.write("%s %s\n" % (action,service))
- d.appendVar("FILES_%s" % pkg, ' ' + oe.path.join(d.getVar("systemd_unitdir"), "system-preset/98-%s.preset" % pkg))
-
- # Run all modifications once when creating package
- if os.path.exists(d.getVar("D")):
- for pkg in d.getVar('SYSTEMD_PACKAGES').split():
- systemd_check_package(pkg)
- if d.getVar('SYSTEMD_SERVICE_' + pkg):
- systemd_generate_package_scripts(pkg)
- action = get_package_var(d, 'SYSTEMD_AUTO_ENABLE', pkg)
- if action in ("enable", "disable"):
- systemd_create_presets(pkg, action)
- elif action not in ("mask", "preset"):
- bb.fatal("SYSTEMD_AUTO_ENABLE_%s '%s' is not 'enable', 'disable', 'mask' or 'preset'" % (pkg, action))
- systemd_check_services()
-}
-
-PACKAGESPLITFUNCS_prepend = "systemd_populate_packages "
-
-python rm_systemd_unitdir (){
- import shutil
- if not bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d):
- systemd_unitdir = oe.path.join(d.getVar("D"), d.getVar('systemd_unitdir'))
- if os.path.exists(systemd_unitdir):
- shutil.rmtree(systemd_unitdir)
- systemd_libdir = os.path.dirname(systemd_unitdir)
- if (os.path.exists(systemd_libdir) and not os.listdir(systemd_libdir)):
- os.rmdir(systemd_libdir)
-}
-
-python rm_sysvinit_initddir (){
- import shutil
- sysv_initddir = oe.path.join(d.getVar("D"), (d.getVar('INIT_D_DIR') or "/etc/init.d"))
-
- if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d) and \
- not bb.utils.contains('DISTRO_FEATURES', 'sysvinit', True, False, d) and \
- os.path.exists(sysv_initddir):
- systemd_system_unitdir = oe.path.join(d.getVar("D"), d.getVar('systemd_system_unitdir'))
-
- # If systemd_system_unitdir contains anything, delete sysv_initddir
- if (os.path.exists(systemd_system_unitdir) and os.listdir(systemd_system_unitdir)):
- shutil.rmtree(sysv_initddir)
-}
-
-do_install[postfuncs] += "${RMINITDIR} "
-RMINITDIR_class-target = " rm_sysvinit_initddir rm_systemd_unitdir "
-RMINITDIR_class-nativesdk = " rm_sysvinit_initddir rm_systemd_unitdir "
-RMINITDIR = ""
-
diff --git a/meta/classes/terminal.bbclass b/meta/classes/terminal.bbclass
index 6059ae95e0..2dfc7db255 100644
--- a/meta/classes/terminal.bbclass
+++ b/meta/classes/terminal.bbclass
@@ -1,3 +1,9 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
OE_TERMINAL ?= 'auto'
OE_TERMINAL[type] = 'choice'
OE_TERMINAL[choices] = 'auto none \
@@ -26,6 +32,9 @@ def emit_terminal_func(command, envdata, d):
bb.utils.mkdirhier(os.path.dirname(runfile))
with open(runfile, 'w') as script:
+ # Override the shell shell_trap_code specifies.
+ # If our shell is bash, we might well face silent death.
+ script.write("#!/bin/bash\n")
script.write(bb.build.shell_trap_code())
bb.data.emit_func(cmd_func, script, envdata)
script.write(cmd_func)
@@ -37,7 +46,7 @@ def emit_terminal_func(command, envdata, d):
def oe_terminal(command, title, d):
import oe.data
import oe.terminal
-
+
envdata = bb.data.init()
for v in os.environ:
diff --git a/meta/classes/testexport.bbclass b/meta/classes/testexport.bbclass
deleted file mode 100644
index 59cbaefbf9..0000000000
--- a/meta/classes/testexport.bbclass
+++ /dev/null
@@ -1,182 +0,0 @@
-# Copyright (C) 2016 Intel Corporation
-#
-# Released under the MIT license (see COPYING.MIT)
-#
-#
-# testexport.bbclass allows to execute runtime test outside OE environment.
-# Most of the tests are commands run on target image over ssh.
-# To use it add testexport to global inherit and call your target image with -c testexport
-# You can try it out like this:
-# - First build an image. i.e. core-image-sato
-# - Add INHERIT += "testexport" in local.conf
-# - Then bitbake core-image-sato -c testexport. That will generate the directory structure
-# to execute the runtime tests using runexported.py.
-#
-# For more information on TEST_SUITES check testimage class.
-
-TEST_LOG_DIR ?= "${WORKDIR}/testexport"
-TEST_EXPORT_DIR ?= "${TMPDIR}/testexport/${PN}"
-TEST_EXPORT_PACKAGED_DIR ?= "packages/packaged"
-TEST_EXPORT_EXTRACTED_DIR ?= "packages/extracted"
-
-TEST_TARGET ?= "simpleremote"
-TEST_TARGET_IP ?= ""
-TEST_SERVER_IP ?= ""
-
-TEST_EXPORT_SDK_PACKAGES ?= ""
-TEST_EXPORT_SDK_ENABLED ?= "0"
-TEST_EXPORT_SDK_NAME ?= "testexport-tools-nativesdk"
-TEST_EXPORT_SDK_DIR ?= "sdk"
-
-TEST_EXPORT_DEPENDS = ""
-TEST_EXPORT_DEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'cpio-native:do_populate_sysroot', '', d)}"
-TEST_EXPORT_DEPENDS += "${@bb.utils.contains('TEST_EXPORT_SDK_ENABLED', '1', 'testexport-tarball:do_populate_sdk', '', d)}"
-TEST_EXPORT_LOCK = "${TMPDIR}/testimage.lock"
-
-addtask testexport
-do_testexport[nostamp] = "1"
-do_testexport[depends] += "${TEST_EXPORT_DEPENDS} ${TESTIMAGEDEPENDS}"
-do_testexport[lockfiles] += "${TEST_EXPORT_LOCK}"
-
-python do_testexport() {
- testexport_main(d)
-}
-
-def testexport_main(d):
- import json
- import logging
-
- from oeqa.runtime.context import OERuntimeTestContext
- from oeqa.runtime.context import OERuntimeTestContextExecutor
-
- image_name = ("%s/%s" % (d.getVar('DEPLOY_DIR_IMAGE'),
- d.getVar('IMAGE_LINK_NAME')))
-
- tdname = "%s.testdata.json" % image_name
- td = json.load(open(tdname, "r"))
-
- logger = logging.getLogger("BitBake")
-
- target = OERuntimeTestContextExecutor.getTarget(
- d.getVar("TEST_TARGET"), None, d.getVar("TEST_TARGET_IP"),
- d.getVar("TEST_SERVER_IP"))
-
- host_dumper = OERuntimeTestContextExecutor.getHostDumper(
- d.getVar("testimage_dump_host"), d.getVar("TESTIMAGE_DUMP_DIR"))
-
- image_manifest = "%s.manifest" % image_name
- image_packages = OERuntimeTestContextExecutor.readPackagesManifest(image_manifest)
-
- extract_dir = d.getVar("TEST_EXTRACTED_DIR")
-
- tc = OERuntimeTestContext(td, logger, target, host_dumper,
- image_packages, extract_dir)
-
- copy_needed_files(d, tc)
-
-def copy_needed_files(d, tc):
- import shutil
- import oe.path
-
- from oeqa.utils.package_manager import _get_json_file
- from oeqa.core.utils.test import getSuiteCasesFiles
-
- export_path = d.getVar('TEST_EXPORT_DIR')
- corebase_path = d.getVar('COREBASE')
-
- # Clean everything before starting
- oe.path.remove(export_path)
- bb.utils.mkdirhier(os.path.join(export_path, 'lib', 'oeqa'))
-
- # The source of files to copy are relative to 'COREBASE' directory
- # The destination is relative to 'TEST_EXPORT_DIR'
- # Because we are squashing the libraries, we need to remove
- # the layer/script directory
- files_to_copy = [ os.path.join('meta', 'lib', 'oeqa', 'core'),
- os.path.join('meta', 'lib', 'oeqa', 'runtime'),
- os.path.join('meta', 'lib', 'oeqa', 'files'),
- os.path.join('meta', 'lib', 'oeqa', 'utils'),
- os.path.join('scripts', 'oe-test'),
- os.path.join('scripts', 'lib', 'argparse_oe.py'),
- os.path.join('scripts', 'lib', 'scriptutils.py'), ]
-
- for f in files_to_copy:
- src = os.path.join(corebase_path, f)
- dst = os.path.join(export_path, f.split('/', 1)[-1])
- if os.path.isdir(src):
- oe.path.copytree(src, dst)
- else:
- shutil.copy2(src, dst)
-
- # Remove cases and just copy the ones specified
- cases_path = os.path.join(export_path, 'lib', 'oeqa', 'runtime', 'cases')
- oe.path.remove(cases_path)
- bb.utils.mkdirhier(cases_path)
- test_paths = get_runtime_paths(d)
- test_modules = d.getVar('TEST_SUITES').split()
- tc.loadTests(test_paths, modules=test_modules)
- for f in getSuiteCasesFiles(tc.suites):
- shutil.copy2(f, cases_path)
- json_file = _get_json_file(f)
- if json_file:
- shutil.copy2(json_file, cases_path)
-
- # Copy test data
- image_name = ("%s/%s" % (d.getVar('DEPLOY_DIR_IMAGE'),
- d.getVar('IMAGE_LINK_NAME')))
- image_manifest = "%s.manifest" % image_name
- tdname = "%s.testdata.json" % image_name
- test_data_path = os.path.join(export_path, 'data')
- bb.utils.mkdirhier(test_data_path)
- shutil.copy2(image_manifest, os.path.join(test_data_path, 'manifest'))
- shutil.copy2(tdname, os.path.join(test_data_path, 'testdata.json'))
-
- for subdir, dirs, files in os.walk(export_path):
- for dir in dirs:
- if dir == '__pycache__':
- shutil.rmtree(os.path.join(subdir, dir))
-
- # Create tar file for common parts of testexport
- create_tarball(d, "testexport.tar.gz", d.getVar("TEST_EXPORT_DIR"))
-
- # Copy packages needed for runtime testing
- package_extraction(d, tc.suites)
- test_pkg_dir = d.getVar("TEST_NEEDED_PACKAGES_DIR")
- if os.path.isdir(test_pkg_dir) and os.listdir(test_pkg_dir):
- export_pkg_dir = os.path.join(d.getVar("TEST_EXPORT_DIR"), "packages")
- oe.path.copytree(test_pkg_dir, export_pkg_dir)
- # Create tar file for packages needed by the DUT
- create_tarball(d, "testexport_packages_%s.tar.gz" % d.getVar("MACHINE"), export_pkg_dir)
-
- # Copy SDK
- if d.getVar("TEST_EXPORT_SDK_ENABLED") == "1":
- sdk_deploy = d.getVar("SDK_DEPLOY")
- tarball_name = "%s.sh" % d.getVar("TEST_EXPORT_SDK_NAME")
- tarball_path = os.path.join(sdk_deploy, tarball_name)
- export_sdk_dir = os.path.join(d.getVar("TEST_EXPORT_DIR"),
- d.getVar("TEST_EXPORT_SDK_DIR"))
- bb.utils.mkdirhier(export_sdk_dir)
- shutil.copy2(tarball_path, export_sdk_dir)
-
- # Create tar file for the sdk
- create_tarball(d, "testexport_sdk_%s.tar.gz" % d.getVar("SDK_ARCH"), export_sdk_dir)
-
- bb.plain("Exported tests to: %s" % export_path)
-
-def create_tarball(d, tar_name, src_dir):
-
- import tarfile
-
- tar_path = os.path.join(d.getVar("TEST_EXPORT_DIR"), tar_name)
- current_dir = os.getcwd()
- src_dir = src_dir.rstrip('/')
- dir_name = os.path.dirname(src_dir)
- base_name = os.path.basename(src_dir)
-
- os.chdir(dir_name)
- tar = tarfile.open(tar_path, "w:gz")
- tar.add(base_name)
- tar.close()
- os.chdir(current_dir)
-
-inherit testimage
diff --git a/meta/classes/testimage.bbclass b/meta/classes/testimage.bbclass
deleted file mode 100644
index 00f0c29836..0000000000
--- a/meta/classes/testimage.bbclass
+++ /dev/null
@@ -1,485 +0,0 @@
-# Copyright (C) 2013 Intel Corporation
-#
-# Released under the MIT license (see COPYING.MIT)
-
-inherit metadata_scm
-# testimage.bbclass enables testing of qemu images using python unittests.
-# Most of the tests are commands run on target image over ssh.
-# To use it add testimage to global inherit and call your target image with -c testimage
-# You can try it out like this:
-# - first add IMAGE_CLASSES += "testimage" in local.conf
-# - build a qemu core-image-sato
-# - then bitbake core-image-sato -c testimage. That will run a standard suite of tests.
-#
-# The tests can be run automatically each time an image is built if you set
-# TESTIMAGE_AUTO = "1"
-
-TESTIMAGE_AUTO ??= "0"
-
-# You can set (or append to) TEST_SUITES in local.conf to select the tests
-# which you want to run for your target.
-# The test names are the module names in meta/lib/oeqa/runtime/cases.
-# Each name in TEST_SUITES represents a required test for the image. (no skipping allowed)
-# Appending "auto" means that it will try to run all tests that are suitable for the image (each test decides that on it's own).
-# Note that order in TEST_SUITES is relevant: tests are run in an order such that
-# tests mentioned in @skipUnlessPassed run before the tests that depend on them,
-# but without such dependencies, tests run in the order in which they are listed
-# in TEST_SUITES.
-#
-# A layer can add its own tests in lib/oeqa/runtime, provided it extends BBPATH as normal in its layer.conf.
-
-# TEST_LOG_DIR contains a command ssh log and may contain infromation about what command is running, output and return codes and for qemu a boot log till login.
-# Booting is handled by this class, and it's not a test in itself.
-# TEST_QEMUBOOT_TIMEOUT can be used to set the maximum time in seconds the launch code will wait for the login prompt.
-# TEST_OVERALL_TIMEOUT can be used to set the maximum time in seconds the tests will be allowed to run (defaults to no limit).
-# TEST_QEMUPARAMS can be used to pass extra parameters to qemu, e.g. "-m 1024" for setting the amount of ram to 1 GB.
-# TEST_RUNQEMUPARAMS can be used to pass extra parameters to runqemu, e.g. "gl" to enable OpenGL acceleration.
-
-# TESTIMAGE_BOOT_PATTERNS can be used to override certain patterns used to communicate with the target when booting,
-# if a pattern is not specifically present on this variable a default will be used when booting the target.
-# TESTIMAGE_BOOT_PATTERNS[<flag>] overrides the pattern used for that specific flag, where flag comes from a list of accepted flags
-# e.g. normally the system boots and waits for a login prompt (login:), after that it sends the command: "root\n" to log as the root user
-# if we wanted to log in as the hypothetical "webserver" user for example we could set the following:
-# TESTIMAGE_BOOT_PATTERNS = "send_login_user search_login_succeeded"
-# TESTIMAGE_BOOT_PATTERNS[send_login_user] = "webserver\n"
-# TESTIMAGE_BOOT_PATTERNS[search_login_succeeded] = "webserver@[a-zA-Z0-9\-]+:~#"
-# The accepted flags are the following: search_reached_prompt, send_login_user, search_login_succeeded, search_cmd_finished.
-# They are prefixed with either search/send, to differentiate if the pattern is meant to be sent or searched to/from the target terminal
-
-TEST_LOG_DIR ?= "${WORKDIR}/testimage"
-
-TEST_EXPORT_DIR ?= "${TMPDIR}/testimage/${PN}"
-TEST_INSTALL_TMP_DIR ?= "${WORKDIR}/testimage/install_tmp"
-TEST_NEEDED_PACKAGES_DIR ?= "${WORKDIR}/testimage/packages"
-TEST_EXTRACTED_DIR ?= "${TEST_NEEDED_PACKAGES_DIR}/extracted"
-TEST_PACKAGED_DIR ?= "${TEST_NEEDED_PACKAGES_DIR}/packaged"
-
-BASICTESTSUITE = "\
- ping date df ssh scp python perl gi ptest parselogs \
- logrotate connman systemd oe_syslog pam stap ldd xorg \
- kernelmodule gcc buildcpio buildlzip buildgalculator \
- dnf rpm opkg apt weston"
-
-DEFAULT_TEST_SUITES = "${BASICTESTSUITE}"
-
-# aarch64 has no graphics
-DEFAULT_TEST_SUITES_remove_aarch64 = "xorg"
-# musl doesn't support systemtap
-DEFAULT_TEST_SUITES_remove_libc-musl = "stap"
-
-# qemumips is quite slow and has reached the timeout limit several times on the YP build cluster,
-# mitigate this by removing build tests for qemumips machines.
-MIPSREMOVE ??= "buildcpio buildlzip buildgalculator"
-DEFAULT_TEST_SUITES_remove_qemumips = "${MIPSREMOVE}"
-DEFAULT_TEST_SUITES_remove_qemumips64 = "${MIPSREMOVE}"
-
-TEST_SUITES ?= "${DEFAULT_TEST_SUITES}"
-
-TEST_QEMUBOOT_TIMEOUT ?= "1000"
-TEST_OVERALL_TIMEOUT ?= ""
-TEST_TARGET ?= "qemu"
-TEST_QEMUPARAMS ?= ""
-TEST_RUNQEMUPARAMS ?= ""
-
-TESTIMAGE_BOOT_PATTERNS ?= ""
-
-TESTIMAGEDEPENDS = ""
-TESTIMAGEDEPENDS_append_qemuall = " qemu-native:do_populate_sysroot qemu-helper-native:do_populate_sysroot qemu-helper-native:do_addto_recipe_sysroot"
-TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'cpio-native:do_populate_sysroot', '', d)}"
-TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'dnf-native:do_populate_sysroot', '', d)}"
-TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'createrepo-c-native:do_populate_sysroot', '', d)}"
-TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'ipk', 'opkg-utils-native:do_populate_sysroot package-index:do_package_index', '', d)}"
-TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'deb', 'apt-native:do_populate_sysroot package-index:do_package_index', '', d)}"
-
-TESTIMAGELOCK = "${TMPDIR}/testimage.lock"
-TESTIMAGELOCK_qemuall = ""
-
-TESTIMAGE_DUMP_DIR ?= "${LOG_DIR}/runtime-hostdump/"
-
-TESTIMAGE_UPDATE_VARS ?= "DL_DIR WORKDIR DEPLOY_DIR"
-
-testimage_dump_target () {
- top -bn1
- ps
- free
- df
- # The next command will export the default gateway IP
- export DEFAULT_GATEWAY=$(ip route | awk '/default/ { print $3}')
- ping -c3 $DEFAULT_GATEWAY
- dmesg
- netstat -an
- ip address
- # Next command will dump logs from /var/log/
- find /var/log/ -type f 2>/dev/null -exec echo "====================" \; -exec echo {} \; -exec echo "====================" \; -exec cat {} \; -exec echo "" \;
-}
-
-testimage_dump_host () {
- top -bn1
- iostat -x -z -N -d -p ALL 20 2
- ps -ef
- free
- df
- memstat
- dmesg
- ip -s link
- netstat -an
-}
-
-python do_testimage() {
- testimage_main(d)
-}
-
-addtask testimage
-do_testimage[nostamp] = "1"
-do_testimage[depends] += "${TESTIMAGEDEPENDS}"
-do_testimage[lockfiles] += "${TESTIMAGELOCK}"
-
-def testimage_sanity(d):
- if (d.getVar('TEST_TARGET') == 'simpleremote'
- and (not d.getVar('TEST_TARGET_IP')
- or not d.getVar('TEST_SERVER_IP'))):
- bb.fatal('When TEST_TARGET is set to "simpleremote" '
- 'TEST_TARGET_IP and TEST_SERVER_IP are needed too.')
-
-def get_testimage_configuration(d, test_type, machine):
- import platform
- from oeqa.utils.metadata import get_layers
- configuration = {'TEST_TYPE': test_type,
- 'MACHINE': machine,
- 'DISTRO': d.getVar("DISTRO"),
- 'IMAGE_BASENAME': d.getVar("IMAGE_BASENAME"),
- 'IMAGE_PKGTYPE': d.getVar("IMAGE_PKGTYPE"),
- 'STARTTIME': d.getVar("DATETIME"),
- 'HOST_DISTRO': oe.lsb.distro_identifier().replace(' ', '-'),
- 'LAYERS': get_layers(d.getVar("BBLAYERS"))}
- return configuration
-get_testimage_configuration[vardepsexclude] = "DATETIME"
-
-def get_testimage_json_result_dir(d):
- json_result_dir = os.path.join(d.getVar("LOG_DIR"), 'oeqa')
- custom_json_result_dir = d.getVar("OEQA_JSON_RESULT_DIR")
- if custom_json_result_dir:
- json_result_dir = custom_json_result_dir
- return json_result_dir
-
-def get_testimage_result_id(configuration):
- return '%s_%s_%s_%s' % (configuration['TEST_TYPE'], configuration['IMAGE_BASENAME'], configuration['MACHINE'], configuration['STARTTIME'])
-
-def get_testimage_boot_patterns(d):
- from collections import defaultdict
- boot_patterns = defaultdict(str)
- # Only accept certain values
- accepted_patterns = ['search_reached_prompt', 'send_login_user', 'search_login_succeeded', 'search_cmd_finished']
- # Not all patterns need to be overriden, e.g. perhaps we only want to change the user
- boot_patterns_flags = d.getVarFlags('TESTIMAGE_BOOT_PATTERNS') or {}
- if boot_patterns_flags:
- patterns_set = [p for p in boot_patterns_flags.items() if p[0] in d.getVar('TESTIMAGE_BOOT_PATTERNS').split()]
- for flag, flagval in patterns_set:
- if flag not in accepted_patterns:
- bb.fatal('Testimage: The only accepted boot patterns are: search_reached_prompt,send_login_user, \
- search_login_succeeded,search_cmd_finished\n Make sure your TESTIMAGE_BOOT_PATTERNS=%s \
- contains an accepted flag.' % d.getVar('TESTIMAGE_BOOT_PATTERNS'))
- return
- # We know boot prompt is searched through in binary format, others might be expressions
- if flag == 'search_reached_prompt':
- boot_patterns[flag] = flagval.encode()
- else:
- boot_patterns[flag] = flagval.encode().decode('unicode-escape')
- return boot_patterns
-
-
-def testimage_main(d):
- import os
- import json
- import signal
- import logging
-
- from bb.utils import export_proxies
- from oeqa.core.utils.misc import updateTestData
- from oeqa.runtime.context import OERuntimeTestContext
- from oeqa.runtime.context import OERuntimeTestContextExecutor
- from oeqa.core.target.qemu import supported_fstypes
- from oeqa.core.utils.test import getSuiteCases
- from oeqa.utils import make_logger_bitbake_compatible
-
- def sigterm_exception(signum, stackframe):
- """
- Catch SIGTERM from worker in order to stop qemu.
- """
- os.kill(os.getpid(), signal.SIGINT)
-
- def handle_test_timeout(timeout):
- bb.warn("Global test timeout reached (%s seconds), stopping the tests." %(timeout))
- os.kill(os.getpid(), signal.SIGINT)
-
- testimage_sanity(d)
-
- if (d.getVar('IMAGE_PKGTYPE') == 'rpm'
- and ('dnf' in d.getVar('TEST_SUITES') or 'auto' in d.getVar('TEST_SUITES'))):
- create_rpm_index(d)
-
- logger = make_logger_bitbake_compatible(logging.getLogger("BitBake"))
- pn = d.getVar("PN")
-
- bb.utils.mkdirhier(d.getVar("TEST_LOG_DIR"))
-
- image_name = ("%s/%s" % (d.getVar('DEPLOY_DIR_IMAGE'),
- d.getVar('IMAGE_LINK_NAME')))
-
- tdname = "%s.testdata.json" % image_name
- try:
- td = json.load(open(tdname, "r"))
- except (FileNotFoundError) as err:
- bb.fatal('File %s Not Found. Have you built the image with INHERIT+="testimage" in the conf/local.conf?' % tdname)
-
- # Some variables need to be updates (mostly paths) with the
- # ones of the current environment because some tests require them.
- updateTestData(d, td, d.getVar('TESTIMAGE_UPDATE_VARS').split())
-
- image_manifest = "%s.manifest" % image_name
- image_packages = OERuntimeTestContextExecutor.readPackagesManifest(image_manifest)
-
- extract_dir = d.getVar("TEST_EXTRACTED_DIR")
-
- # Get machine
- machine = d.getVar("MACHINE")
-
- # Get rootfs
- fstypes = d.getVar('IMAGE_FSTYPES').split()
- if d.getVar("TEST_TARGET") == "qemu":
- fstypes = [fs for fs in fstypes if fs in supported_fstypes]
- if not fstypes:
- bb.fatal('Unsupported image type built. Add a compatible image to '
- 'IMAGE_FSTYPES. Supported types: %s' %
- ', '.join(supported_fstypes))
- qfstype = fstypes[0]
- qdeffstype = d.getVar("QB_DEFAULT_FSTYPE")
- if qdeffstype:
- qfstype = qdeffstype
- rootfs = '%s.%s' % (image_name, qfstype)
-
- # Get tmpdir (not really used, just for compatibility)
- tmpdir = d.getVar("TMPDIR")
-
- # Get deploy_dir_image (not really used, just for compatibility)
- dir_image = d.getVar("DEPLOY_DIR_IMAGE")
-
- # Get bootlog
- bootlog = os.path.join(d.getVar("TEST_LOG_DIR"),
- 'qemu_boot_log.%s' % d.getVar('DATETIME'))
-
- # Get display
- display = d.getVar("BB_ORIGENV").getVar("DISPLAY")
-
- # Get kernel
- kernel_name = ('%s-%s.bin' % (d.getVar("KERNEL_IMAGETYPE"), machine))
- kernel = os.path.join(d.getVar("DEPLOY_DIR_IMAGE"), kernel_name)
-
- # Get boottime
- boottime = int(d.getVar("TEST_QEMUBOOT_TIMEOUT"))
-
- # Get use_kvm
- kvm = oe.types.qemu_use_kvm(d.getVar('QEMU_USE_KVM'), d.getVar('TARGET_ARCH'))
-
- # Get OVMF
- ovmf = d.getVar("QEMU_USE_OVMF")
-
- slirp = False
- if d.getVar("QEMU_USE_SLIRP"):
- slirp = True
-
- # TODO: We use the current implementation of qemu runner because of
- # time constrains, qemu runner really needs a refactor too.
- target_kwargs = { 'machine' : machine,
- 'rootfs' : rootfs,
- 'tmpdir' : tmpdir,
- 'dir_image' : dir_image,
- 'display' : display,
- 'kernel' : kernel,
- 'boottime' : boottime,
- 'bootlog' : bootlog,
- 'kvm' : kvm,
- 'slirp' : slirp,
- 'dump_dir' : d.getVar("TESTIMAGE_DUMP_DIR"),
- 'serial_ports': len(d.getVar("SERIAL_CONSOLES").split()),
- 'ovmf' : ovmf,
- }
-
- if d.getVar("TESTIMAGE_BOOT_PATTERNS"):
- target_kwargs['boot_patterns'] = get_testimage_boot_patterns(d)
-
- # TODO: Currently BBPATH is needed for custom loading of targets.
- # It would be better to find these modules using instrospection.
- target_kwargs['target_modules_path'] = d.getVar('BBPATH')
-
- # hardware controlled targets might need further access
- target_kwargs['powercontrol_cmd'] = d.getVar("TEST_POWERCONTROL_CMD") or None
- target_kwargs['powercontrol_extra_args'] = d.getVar("TEST_POWERCONTROL_EXTRA_ARGS") or ""
- target_kwargs['serialcontrol_cmd'] = d.getVar("TEST_SERIALCONTROL_CMD") or None
- target_kwargs['serialcontrol_extra_args'] = d.getVar("TEST_SERIALCONTROL_EXTRA_ARGS") or ""
-
- def export_ssh_agent(d):
- import os
-
- variables = ['SSH_AGENT_PID', 'SSH_AUTH_SOCK']
- for v in variables:
- if v not in os.environ.keys():
- val = d.getVar(v)
- if val is not None:
- os.environ[v] = val
-
- export_ssh_agent(d)
-
- # runtime use network for download projects for build
- export_proxies(d)
-
- # we need the host dumper in test context
- host_dumper = OERuntimeTestContextExecutor.getHostDumper(
- d.getVar("testimage_dump_host"),
- d.getVar("TESTIMAGE_DUMP_DIR"))
-
- # the robot dance
- target = OERuntimeTestContextExecutor.getTarget(
- d.getVar("TEST_TARGET"), logger, d.getVar("TEST_TARGET_IP"),
- d.getVar("TEST_SERVER_IP"), **target_kwargs)
-
- # test context
- tc = OERuntimeTestContext(td, logger, target, host_dumper,
- image_packages, extract_dir)
-
- # Load tests before starting the target
- test_paths = get_runtime_paths(d)
- test_modules = d.getVar('TEST_SUITES').split()
- if not test_modules:
- bb.fatal('Empty test suite, please verify TEST_SUITES variable')
-
- tc.loadTests(test_paths, modules=test_modules)
-
- suitecases = getSuiteCases(tc.suites)
- if not suitecases:
- bb.fatal('Empty test suite, please verify TEST_SUITES variable')
- else:
- bb.debug(2, 'test suites:\n\t%s' % '\n\t'.join([str(c) for c in suitecases]))
-
- package_extraction(d, tc.suites)
-
- results = None
- orig_sigterm_handler = signal.signal(signal.SIGTERM, sigterm_exception)
- try:
- # We need to check if runqemu ends unexpectedly
- # or if the worker send us a SIGTERM
- tc.target.start(params=d.getVar("TEST_QEMUPARAMS"), runqemuparams=d.getVar("TEST_RUNQEMUPARAMS"))
- import threading
- try:
- threading.Timer(int(d.getVar("TEST_OVERALL_TIMEOUT")), handle_test_timeout, (int(d.getVar("TEST_OVERALL_TIMEOUT")),)).start()
- except ValueError:
- pass
- results = tc.runTests()
- except (KeyboardInterrupt, BlockingIOError) as err:
- if isinstance(err, KeyboardInterrupt):
- bb.error('testimage interrupted, shutting down...')
- else:
- bb.error('runqemu failed, shutting down...')
- if results:
- results.stop()
- results = None
- finally:
- signal.signal(signal.SIGTERM, orig_sigterm_handler)
- tc.target.stop()
-
- # Show results (if we have them)
- if not results:
- bb.fatal('%s - FAILED - tests were interrupted during execution' % pn, forcelog=True)
- configuration = get_testimage_configuration(d, 'runtime', machine)
- results.logDetails(get_testimage_json_result_dir(d),
- configuration,
- get_testimage_result_id(configuration),
- dump_streams=d.getVar('TESTREPORT_FULLLOGS'))
- results.logSummary(pn)
- if not results.wasSuccessful():
- bb.fatal('%s - FAILED - check the task log and the ssh log' % pn, forcelog=True)
-
-def get_runtime_paths(d):
- """
- Returns a list of paths where runtime test must reside.
-
- Runtime tests are expected in <LAYER_DIR>/lib/oeqa/runtime/cases/
- """
- paths = []
-
- for layer in d.getVar('BBLAYERS').split():
- path = os.path.join(layer, 'lib/oeqa/runtime/cases')
- if os.path.isdir(path):
- paths.append(path)
- return paths
-
-def create_index(arg):
- import subprocess
-
- index_cmd = arg
- try:
- bb.note("Executing '%s' ..." % index_cmd)
- result = subprocess.check_output(index_cmd,
- stderr=subprocess.STDOUT,
- shell=True)
- result = result.decode('utf-8')
- except subprocess.CalledProcessError as e:
- return("Index creation command '%s' failed with return code "
- '%d:\n%s' % (e.cmd, e.returncode, e.output.decode("utf-8")))
- if result:
- bb.note(result)
- return None
-
-def create_rpm_index(d):
- import glob
- # Index RPMs
- rpm_createrepo = bb.utils.which(os.getenv('PATH'), "createrepo_c")
- index_cmds = []
- archs = (d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or '').replace('-', '_')
-
- for arch in archs.split():
- rpm_dir = os.path.join(d.getVar('DEPLOY_DIR_RPM'), arch)
- idx_path = os.path.join(d.getVar('WORKDIR'), 'oe-testimage-repo', arch)
-
- if not os.path.isdir(rpm_dir):
- continue
-
- lockfilename = os.path.join(d.getVar('DEPLOY_DIR_RPM'), 'rpm.lock')
- lf = bb.utils.lockfile(lockfilename, False)
- oe.path.copyhardlinktree(rpm_dir, idx_path)
- # Full indexes overload a 256MB image so reduce the number of rpms
- # in the feed by filtering to specific packages needed by the tests.
- package_list = glob.glob(idx_path + "*/*.rpm")
-
- for pkg in package_list:
- if not os.path.basename(pkg).startswith(("rpm", "run-postinsts", "busybox", "bash", "update-alternatives", "libc6", "curl", "musl")):
- bb.utils.remove(pkg)
-
- bb.utils.unlockfile(lf)
- cmd = '%s --update -q %s' % (rpm_createrepo, idx_path)
-
- # Create repodata
- result = create_index(cmd)
- if result:
- bb.fatal('%s' % ('\n'.join(result)))
-
-def package_extraction(d, test_suites):
- from oeqa.utils.package_manager import find_packages_to_extract
- from oeqa.utils.package_manager import extract_packages
-
- bb.utils.remove(d.getVar("TEST_NEEDED_PACKAGES_DIR"), recurse=True)
- packages = find_packages_to_extract(test_suites)
- if packages:
- bb.utils.mkdirhier(d.getVar("TEST_INSTALL_TMP_DIR"))
- bb.utils.mkdirhier(d.getVar("TEST_PACKAGED_DIR"))
- bb.utils.mkdirhier(d.getVar("TEST_EXTRACTED_DIR"))
- extract_packages(d, packages)
-
-testimage_main[vardepsexclude] += "BB_ORIGENV DATETIME"
-
-python () {
- if oe.types.boolean(d.getVar("TESTIMAGE_AUTO") or "False"):
- bb.build.addtask("testimage", "do_build", "do_image_complete", d)
-}
-
-inherit testsdk
diff --git a/meta/classes/testsdk.bbclass b/meta/classes/testsdk.bbclass
deleted file mode 100644
index 758a23ac55..0000000000
--- a/meta/classes/testsdk.bbclass
+++ /dev/null
@@ -1,50 +0,0 @@
-# Copyright (C) 2013 - 2016 Intel Corporation
-#
-# Released under the MIT license (see COPYING.MIT)
-
-# testsdk.bbclass enables testing for SDK and Extensible SDK
-#
-# To run SDK tests, run the commands:
-# $ bitbake <image-name> -c populate_sdk
-# $ bitbake <image-name> -c testsdk
-#
-# To run eSDK tests, run the commands:
-# $ bitbake <image-name> -c populate_sdk_ext
-# $ bitbake <image-name> -c testsdkext
-#
-# where "<image-name>" is an image like core-image-sato.
-
-TESTSDK_CLASS_NAME ?= "oeqa.sdk.testsdk.TestSDK"
-TESTSDKEXT_CLASS_NAME ?= "oeqa.sdkext.testsdk.TestSDKExt"
-
-def import_and_run(name, d):
- import importlib
-
- class_name = d.getVar(name)
- if class_name:
- module, cls = class_name.rsplit('.', 1)
- m = importlib.import_module(module)
- c = getattr(m, cls)()
- c.run(d)
- else:
- bb.warn('No tests were run because %s did not define a class' % name)
-
-import_and_run[vardepsexclude] = "DATETIME BB_ORIGENV"
-
-python do_testsdk() {
- import_and_run('TESTSDK_CLASS_NAME', d)
-}
-addtask testsdk
-do_testsdk[nostamp] = "1"
-
-python do_testsdkext() {
- import_and_run('TESTSDKEXT_CLASS_NAME', d)
-}
-addtask testsdkext
-do_testsdkext[nostamp] = "1"
-
-python () {
- if oe.types.boolean(d.getVar("TESTIMAGE_AUTO") or "False"):
- bb.build.addtask("testsdk", None, "do_populate_sdk", d)
- bb.build.addtask("testsdkext", None, "do_populate_sdk_ext", d)
-}
diff --git a/meta/classes/texinfo.bbclass b/meta/classes/texinfo.bbclass
deleted file mode 100644
index f46bacabd4..0000000000
--- a/meta/classes/texinfo.bbclass
+++ /dev/null
@@ -1,18 +0,0 @@
-# This class is inherited by recipes whose upstream packages invoke the
-# texinfo utilities at build-time. Native and cross recipes are made to use the
-# dummy scripts provided by texinfo-dummy-native, for improved performance.
-# Target architecture recipes use the genuine Texinfo utilities. By default,
-# they use the Texinfo utilities on the host system. If you want to use the
-# Texinfo recipe, you can remove texinfo-native from ASSUME_PROVIDED and
-# makeinfo from SANITY_REQUIRED_UTILITIES.
-
-TEXDEP = "${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'texinfo-replacement-native', 'texinfo-dummy-native', d)}"
-TEXDEP_class-native = "texinfo-dummy-native"
-TEXDEP_class-cross = "texinfo-dummy-native"
-TEXDEP_class-crosssdk = "texinfo-dummy-native"
-TEXDEP_class-cross-canadian = "texinfo-dummy-native"
-DEPENDS_append = " ${TEXDEP}"
-
-# libtool-cross doesn't inherit cross
-TEXDEP_pn-libtool-cross = "texinfo-dummy-native"
-
diff --git a/meta/classes/toaster.bbclass b/meta/classes/toaster.bbclass
index 9518ddf7a4..03c4f3a930 100644
--- a/meta/classes/toaster.bbclass
+++ b/meta/classes/toaster.bbclass
@@ -3,7 +3,7 @@
#
# Copyright (C) 2013 Intel Corporation
#
-# Released under the MIT license (see COPYING.MIT)
+# SPDX-License-Identifier: MIT
#
# This bbclass is designed to extract data used by OE-Core during the build process,
# for recording in the Toaster system.
@@ -101,12 +101,12 @@ def _toaster_load_pkgdatafile(dirpath, filepath):
for line in fin:
try:
kn, kv = line.strip().split(": ", 1)
- m = re.match(r"^PKG_([^A-Z:]*)", kn)
+ m = re.match(r"^PKG:([^A-Z:]*)", kn)
if m:
pkgdata['OPKGN'] = m.group(1)
- kn = "_".join([x for x in kn.split("_") if x.isupper()])
- pkgdata[kn] = kv.strip()
- if kn == 'FILES_INFO':
+ kn = kn.split(":")[0]
+ pkgdata[kn] = kv
+ if kn.startswith('FILES_INFO'):
pkgdata[kn] = json.loads(kv)
except ValueError:
diff --git a/meta/classes/toolchain-scripts-base.bbclass b/meta/classes/toolchain-scripts-base.bbclass
deleted file mode 100644
index 2489b9dbeb..0000000000
--- a/meta/classes/toolchain-scripts-base.bbclass
+++ /dev/null
@@ -1,11 +0,0 @@
-#This function create a version information file
-toolchain_create_sdk_version () {
- local versionfile=$1
- rm -f $versionfile
- touch $versionfile
- echo 'Distro: ${DISTRO}' >> $versionfile
- echo 'Distro Version: ${DISTRO_VERSION}' >> $versionfile
- echo 'Metadata Revision: ${METADATA_REVISION}' >> $versionfile
- echo 'Timestamp: ${DATETIME}' >> $versionfile
-}
-toolchain_create_sdk_version[vardepsexclude] = "DATETIME"
diff --git a/meta/classes/toolchain-scripts.bbclass b/meta/classes/toolchain-scripts.bbclass
deleted file mode 100644
index db1d3215ef..0000000000
--- a/meta/classes/toolchain-scripts.bbclass
+++ /dev/null
@@ -1,203 +0,0 @@
-inherit toolchain-scripts-base siteinfo kernel-arch
-
-# We want to be able to change the value of MULTIMACH_TARGET_SYS, because it
-# doesn't always match our expectations... but we default to the stock value
-REAL_MULTIMACH_TARGET_SYS ?= "${MULTIMACH_TARGET_SYS}"
-TARGET_CC_ARCH_append_libc-musl = " -mmusl"
-
-# default debug prefix map isn't valid in the SDK
-DEBUG_PREFIX_MAP = ""
-
-# This function creates an environment-setup-script for use in a deployable SDK
-toolchain_create_sdk_env_script () {
- # Create environment setup script. Remember that $SDKTARGETSYSROOT should
- # only be expanded on the target at runtime.
- base_sbindir=${10:-${base_sbindir_nativesdk}}
- base_bindir=${9:-${base_bindir_nativesdk}}
- sbindir=${8:-${sbindir_nativesdk}}
- sdkpathnative=${7:-${SDKPATHNATIVE}}
- prefix=${6:-${prefix_nativesdk}}
- bindir=${5:-${bindir_nativesdk}}
- libdir=${4:-${libdir}}
- sysroot=${3:-${SDKTARGETSYSROOT}}
- multimach_target_sys=${2:-${REAL_MULTIMACH_TARGET_SYS}}
- script=${1:-${SDK_OUTPUT}/${SDKPATH}/environment-setup-$multimach_target_sys}
- rm -f $script
- touch $script
-
- echo '# Check for LD_LIBRARY_PATH being set, which can break SDK and generally is a bad practice' >> $script
- echo '# http://tldp.org/HOWTO/Program-Library-HOWTO/shared-libraries.html#AEN80' >> $script
- echo '# http://xahlee.info/UnixResource_dir/_/ldpath.html' >> $script
- echo '# Only disable this check if you are absolutely know what you are doing!' >> $script
- echo 'if [ ! -z "$LD_LIBRARY_PATH" ]; then' >> $script
- echo " echo \"Your environment is misconfigured, you probably need to 'unset LD_LIBRARY_PATH'\"" >> $script
- echo " echo \"but please check why this was set in the first place and that it's safe to unset.\"" >> $script
- echo ' echo "The SDK will not operate correctly in most cases when LD_LIBRARY_PATH is set."' >> $script
- echo ' echo "For more references see:"' >> $script
- echo ' echo " http://tldp.org/HOWTO/Program-Library-HOWTO/shared-libraries.html#AEN80"' >> $script
- echo ' echo " http://xahlee.info/UnixResource_dir/_/ldpath.html"' >> $script
- echo ' return 1' >> $script
- echo 'fi' >> $script
-
- echo 'export SDKTARGETSYSROOT='"$sysroot" >> $script
- EXTRAPATH=""
- for i in ${CANADIANEXTRAOS}; do
- EXTRAPATH="$EXTRAPATH:$sdkpathnative$bindir/${TARGET_ARCH}${TARGET_VENDOR}-$i"
- done
- echo "export PATH=$sdkpathnative$bindir:$sdkpathnative$sbindir:$sdkpathnative$base_bindir:$sdkpathnative$base_sbindir:$sdkpathnative$bindir/../${HOST_SYS}/bin:$sdkpathnative$bindir/${TARGET_SYS}"$EXTRAPATH':$PATH' >> $script
- echo 'export PKG_CONFIG_SYSROOT_DIR=$SDKTARGETSYSROOT' >> $script
- echo 'export PKG_CONFIG_PATH=$SDKTARGETSYSROOT'"$libdir"'/pkgconfig:$SDKTARGETSYSROOT'"$prefix"'/share/pkgconfig' >> $script
- echo 'export CONFIG_SITE=${SDKPATH}/site-config-'"${multimach_target_sys}" >> $script
- echo "export OECORE_NATIVE_SYSROOT=\"$sdkpathnative\"" >> $script
- echo 'export OECORE_TARGET_SYSROOT="$SDKTARGETSYSROOT"' >> $script
- echo "export OECORE_ACLOCAL_OPTS=\"-I $sdkpathnative/usr/share/aclocal\"" >> $script
- echo 'export OECORE_BASELIB="${baselib}"' >> $script
- echo 'export OECORE_TARGET_ARCH="${TARGET_ARCH}"' >>$script
- echo 'export OECORE_TARGET_OS="${TARGET_OS}"' >>$script
-
- echo 'unset command_not_found_handle' >> $script
-
- toolchain_shared_env_script
-}
-
-# This function creates an environment-setup-script in the TMPDIR which enables
-# a OE-core IDE to integrate with the build tree
-toolchain_create_tree_env_script () {
- script=${TMPDIR}/environment-setup-${REAL_MULTIMACH_TARGET_SYS}
- rm -f $script
- touch $script
- echo 'orig=`pwd`; cd ${COREBASE}; . ./oe-init-build-env ${TOPDIR}; cd $orig' >> $script
- echo 'export PATH=${STAGING_DIR_NATIVE}/usr/bin:${STAGING_BINDIR_TOOLCHAIN}:$PATH' >> $script
- echo 'export PKG_CONFIG_SYSROOT_DIR=${PKG_CONFIG_SYSROOT_DIR}' >> $script
- echo 'export PKG_CONFIG_PATH=${PKG_CONFIG_PATH}' >> $script
- echo 'export CONFIG_SITE="${@siteinfo_get_files(d)}"' >> $script
- echo 'export SDKTARGETSYSROOT=${STAGING_DIR_TARGET}' >> $script
- echo 'export OECORE_NATIVE_SYSROOT="${STAGING_DIR_NATIVE}"' >> $script
- echo 'export OECORE_TARGET_SYSROOT="${STAGING_DIR_TARGET}"' >> $script
- echo 'export OECORE_ACLOCAL_OPTS="-I ${STAGING_DIR_NATIVE}/usr/share/aclocal"' >> $script
-
- toolchain_shared_env_script
-}
-
-toolchain_shared_env_script () {
- echo 'export CC="${TARGET_PREFIX}gcc ${TARGET_CC_ARCH} --sysroot=$SDKTARGETSYSROOT"' >> $script
- echo 'export CXX="${TARGET_PREFIX}g++ ${TARGET_CC_ARCH} --sysroot=$SDKTARGETSYSROOT"' >> $script
- echo 'export CPP="${TARGET_PREFIX}gcc -E ${TARGET_CC_ARCH} --sysroot=$SDKTARGETSYSROOT"' >> $script
- echo 'export AS="${TARGET_PREFIX}as ${TARGET_AS_ARCH}"' >> $script
- echo 'export LD="${TARGET_PREFIX}ld ${TARGET_LD_ARCH} --sysroot=$SDKTARGETSYSROOT"' >> $script
- echo 'export GDB=${TARGET_PREFIX}gdb' >> $script
- echo 'export STRIP=${TARGET_PREFIX}strip' >> $script
- echo 'export RANLIB=${TARGET_PREFIX}ranlib' >> $script
- echo 'export OBJCOPY=${TARGET_PREFIX}objcopy' >> $script
- echo 'export OBJDUMP=${TARGET_PREFIX}objdump' >> $script
- echo 'export READELF=${TARGET_PREFIX}readelf' >> $script
- echo 'export AR=${TARGET_PREFIX}ar' >> $script
- echo 'export NM=${TARGET_PREFIX}nm' >> $script
- echo 'export M4=m4' >> $script
- echo 'export TARGET_PREFIX=${TARGET_PREFIX}' >> $script
- echo 'export CONFIGURE_FLAGS="--target=${TARGET_SYS} --host=${TARGET_SYS} --build=${SDK_ARCH}-linux --with-libtool-sysroot=$SDKTARGETSYSROOT"' >> $script
- echo 'export CFLAGS="${TARGET_CFLAGS}"' >> $script
- echo 'export CXXFLAGS="${TARGET_CXXFLAGS}"' >> $script
- echo 'export LDFLAGS="${TARGET_LDFLAGS}"' >> $script
- echo 'export CPPFLAGS="${TARGET_CPPFLAGS}"' >> $script
- echo 'export KCFLAGS="--sysroot=$SDKTARGETSYSROOT"' >> $script
- echo 'export OECORE_DISTRO_VERSION="${DISTRO_VERSION}"' >> $script
- echo 'export OECORE_SDK_VERSION="${SDK_VERSION}"' >> $script
- echo 'export ARCH=${ARCH}' >> $script
- echo 'export CROSS_COMPILE=${TARGET_PREFIX}' >> $script
-
- cat >> $script <<EOF
-
-# Append environment subscripts
-if [ -d "\$OECORE_TARGET_SYSROOT/environment-setup.d" ]; then
- for envfile in \$OECORE_TARGET_SYSROOT/environment-setup.d/*.sh; do
- . \$envfile
- done
-fi
-if [ -d "\$OECORE_NATIVE_SYSROOT/environment-setup.d" ]; then
- for envfile in \$OECORE_NATIVE_SYSROOT/environment-setup.d/*.sh; do
- . \$envfile
- done
-fi
-EOF
-}
-
-toolchain_create_post_relocate_script() {
- relocate_script=$1
- env_dir=$2
- rm -f $relocate_script
- touch $relocate_script
-
- cat >> $relocate_script <<EOF
-if [ -d "${SDKPATHNATIVE}/post-relocate-setup.d/" ]; then
- # Source top-level SDK env scripts in case they are needed for the relocate
- # scripts.
- for env_setup_script in ${env_dir}/environment-setup-*; do
- . \$env_setup_script
- status=\$?
- if [ \$status != 0 ]; then
- echo "\$0: Failed to source \$env_setup_script with status \$status"
- exit \$status
- fi
-
- for s in ${SDKPATHNATIVE}/post-relocate-setup.d/*; do
- if [ ! -x \$s ]; then
- continue
- fi
- \$s "\$1"
- status=\$?
- if [ \$status != 0 ]; then
- echo "post-relocate command \"\$s \$1\" failed with status \$status" >&2
- exit \$status
- fi
- done
- done
- rm -rf "${SDKPATHNATIVE}/post-relocate-setup.d"
-fi
-EOF
-}
-
-#we get the cached site config in the runtime
-TOOLCHAIN_CONFIGSITE_NOCACHE = "${@siteinfo_get_files(d)}"
-TOOLCHAIN_CONFIGSITE_SYSROOTCACHE = "${STAGING_DIR}/${MLPREFIX}${MACHINE}/${target_datadir}/${TARGET_SYS}_config_site.d"
-TOOLCHAIN_NEED_CONFIGSITE_CACHE ??= "virtual/${MLPREFIX}libc ncurses"
-DEPENDS += "${TOOLCHAIN_NEED_CONFIGSITE_CACHE}"
-
-#This function create a site config file
-toolchain_create_sdk_siteconfig () {
- local siteconfig=$1
-
- rm -f $siteconfig
- touch $siteconfig
-
- for sitefile in ${TOOLCHAIN_CONFIGSITE_NOCACHE} ; do
- cat $sitefile >> $siteconfig
- done
-
- #get cached site config
- for sitefile in ${TOOLCHAIN_NEED_CONFIGSITE_CACHE}; do
- # Resolve virtual/* names to the real recipe name using sysroot-providers info
- case $sitefile in virtual/*)
- sitefile=`echo $sitefile | tr / _`
- sitefile=`cat ${STAGING_DIR_TARGET}/sysroot-providers/$sitefile`
- esac
-
- if [ -r ${TOOLCHAIN_CONFIGSITE_SYSROOTCACHE}/${sitefile}_config ]; then
- cat ${TOOLCHAIN_CONFIGSITE_SYSROOTCACHE}/${sitefile}_config >> $siteconfig
- fi
- done
-}
-# The immediate expansion above can result in unwanted path dependencies here
-toolchain_create_sdk_siteconfig[vardepsexclude] = "TOOLCHAIN_CONFIGSITE_SYSROOTCACHE"
-
-python __anonymous () {
- import oe.classextend
- deps = ""
- for dep in (d.getVar('TOOLCHAIN_NEED_CONFIGSITE_CACHE') or "").split():
- deps += " %s:do_populate_sysroot" % dep
- for variant in (d.getVar('MULTILIB_VARIANTS') or "").split():
- clsextend = oe.classextend.ClassExtender(variant, d)
- newdep = clsextend.extend_name(dep)
- deps += " %s:do_populate_sysroot" % newdep
- d.appendVarFlag('do_configure', 'depends', deps)
-}
diff --git a/meta/classes/typecheck.bbclass b/meta/classes/typecheck.bbclass
index 72da932232..160f7a024b 100644
--- a/meta/classes/typecheck.bbclass
+++ b/meta/classes/typecheck.bbclass
@@ -1,3 +1,9 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
# Check types of bitbake configuration variables
#
# See oe.types for details.
diff --git a/meta/classes/uboot-config.bbclass b/meta/classes/uboot-config.bbclass
deleted file mode 100644
index 89ff970fcc..0000000000
--- a/meta/classes/uboot-config.bbclass
+++ /dev/null
@@ -1,57 +0,0 @@
-# Handle U-Boot config for a machine
-#
-# The format to specify it, in the machine, is:
-#
-# UBOOT_CONFIG ??= <default>
-# UBOOT_CONFIG[foo] = "config,images,binary"
-#
-# or
-#
-# UBOOT_MACHINE = "config"
-#
-# Copyright 2013, 2014 (C) O.S. Systems Software LTDA.
-
-UBOOT_BINARY ?= "u-boot.${UBOOT_SUFFIX}"
-
-python () {
- ubootmachine = d.getVar("UBOOT_MACHINE")
- ubootconfigflags = d.getVarFlags('UBOOT_CONFIG')
- ubootbinary = d.getVar('UBOOT_BINARY')
- ubootbinaries = d.getVar('UBOOT_BINARIES')
- # The "doc" varflag is special, we don't want to see it here
- ubootconfigflags.pop('doc', None)
- ubootconfig = (d.getVar('UBOOT_CONFIG') or "").split()
-
- if not ubootmachine and not ubootconfig:
- PN = d.getVar("PN")
- FILE = os.path.basename(d.getVar("FILE"))
- bb.debug(1, "To build %s, see %s for instructions on \
- setting up your machine config" % (PN, FILE))
- raise bb.parse.SkipRecipe("Either UBOOT_MACHINE or UBOOT_CONFIG must be set in the %s machine configuration." % d.getVar("MACHINE"))
-
- if ubootmachine and ubootconfig:
- raise bb.parse.SkipRecipe("You cannot use UBOOT_MACHINE and UBOOT_CONFIG at the same time.")
-
- if ubootconfigflags and ubootbinaries:
- raise bb.parse.SkipRecipe("You cannot use UBOOT_BINARIES as it is internal to uboot_config.bbclass.")
-
- if len(ubootconfig) > 0:
- for config in ubootconfig:
- for f, v in ubootconfigflags.items():
- if config == f:
- items = v.split(',')
- if items[0] and len(items) > 3:
- raise bb.parse.SkipRecipe('Only config,images,binary can be specified!')
- d.appendVar('UBOOT_MACHINE', ' ' + items[0])
- # IMAGE_FSTYPES appending
- if len(items) > 1 and items[1]:
- bb.debug(1, "Appending '%s' to IMAGE_FSTYPES." % items[1])
- d.appendVar('IMAGE_FSTYPES', ' ' + items[1])
- if len(items) > 2 and items[2]:
- bb.debug(1, "Appending '%s' to UBOOT_BINARIES." % items[2])
- d.appendVar('UBOOT_BINARIES', ' ' + items[2])
- else:
- bb.debug(1, "Appending '%s' to UBOOT_BINARIES." % ubootbinary)
- d.appendVar('UBOOT_BINARIES', ' ' + ubootbinary)
- break
-}
diff --git a/meta/classes/uboot-extlinux-config.bbclass b/meta/classes/uboot-extlinux-config.bbclass
deleted file mode 100644
index f4bf94be04..0000000000
--- a/meta/classes/uboot-extlinux-config.bbclass
+++ /dev/null
@@ -1,157 +0,0 @@
-# uboot-extlinux-config.bbclass
-#
-# This class allow the extlinux.conf generation for U-Boot use. The
-# U-Boot support for it is given to allow the Generic Distribution
-# Configuration specification use by OpenEmbedded-based products.
-#
-# External variables:
-#
-# UBOOT_EXTLINUX_CONSOLE - Set to "console=ttyX" to change kernel boot
-# default console.
-# UBOOT_EXTLINUX_LABELS - A list of targets for the automatic config.
-# UBOOT_EXTLINUX_KERNEL_ARGS - Add additional kernel arguments.
-# UBOOT_EXTLINUX_KERNEL_IMAGE - Kernel image name.
-# UBOOT_EXTLINUX_FDTDIR - Device tree directory.
-# UBOOT_EXTLINUX_FDT - Device tree file.
-# UBOOT_EXTLINUX_INITRD - Indicates a list of filesystem images to
-# concatenate and use as an initrd (optional).
-# UBOOT_EXTLINUX_MENU_DESCRIPTION - Name to use as description.
-# UBOOT_EXTLINUX_ROOT - Root kernel cmdline.
-# UBOOT_EXTLINUX_TIMEOUT - Timeout before DEFAULT selection is made.
-# Measured in 1/10 of a second.
-# UBOOT_EXTLINUX_DEFAULT_LABEL - Target to be selected by default after
-# the timeout period
-#
-# If there's only one label system will boot automatically and menu won't be
-# created. If you want to use more than one labels, e.g linux and alternate,
-# use overrides to set menu description, console and others variables.
-#
-# Ex:
-#
-# UBOOT_EXTLINUX_LABELS ??= "default fallback"
-#
-# UBOOT_EXTLINUX_DEFAULT_LABEL ??= "Linux Default"
-# UBOOT_EXTLINUX_TIMEOUT ??= "30"
-#
-# UBOOT_EXTLINUX_KERNEL_IMAGE_default ??= "../zImage"
-# UBOOT_EXTLINUX_MENU_DESCRIPTION_default ??= "Linux Default"
-#
-# UBOOT_EXTLINUX_KERNEL_IMAGE_fallback ??= "../zImage-fallback"
-# UBOOT_EXTLINUX_MENU_DESCRIPTION_fallback ??= "Linux Fallback"
-#
-# Results:
-#
-# menu title Select the boot mode
-# TIMEOUT 30
-# DEFAULT Linux Default
-# LABEL Linux Default
-# KERNEL ../zImage
-# FDTDIR ../
-# APPEND root=/dev/mmcblk2p2 rootwait rw console=${console}
-# LABEL Linux Fallback
-# KERNEL ../zImage-fallback
-# FDTDIR ../
-# APPEND root=/dev/mmcblk2p2 rootwait rw console=${console}
-#
-# Copyright (C) 2016, O.S. Systems Software LTDA. All Rights Reserved
-# Released under the MIT license (see packages/COPYING)
-#
-# The kernel has an internal default console, which you can override with
-# a console=...some_tty...
-UBOOT_EXTLINUX_CONSOLE ??= "console=${console},${baudrate}"
-UBOOT_EXTLINUX_LABELS ??= "linux"
-UBOOT_EXTLINUX_FDT ??= ""
-UBOOT_EXTLINUX_FDTDIR ??= "../"
-UBOOT_EXTLINUX_KERNEL_IMAGE ??= "../${KERNEL_IMAGETYPE}"
-UBOOT_EXTLINUX_KERNEL_ARGS ??= "rootwait rw"
-UBOOT_EXTLINUX_MENU_DESCRIPTION_linux ??= "${DISTRO_NAME}"
-
-UBOOT_EXTLINUX_CONFIG = "${B}/extlinux.conf"
-
-python do_create_extlinux_config() {
- if d.getVar("UBOOT_EXTLINUX") != "1":
- return
-
- if not d.getVar('WORKDIR'):
- bb.error("WORKDIR not defined, unable to package")
-
- labels = d.getVar('UBOOT_EXTLINUX_LABELS')
- if not labels:
- bb.fatal("UBOOT_EXTLINUX_LABELS not defined, nothing to do")
-
- if not labels.strip():
- bb.fatal("No labels, nothing to do")
-
- cfile = d.getVar('UBOOT_EXTLINUX_CONFIG')
- if not cfile:
- bb.fatal('Unable to read UBOOT_EXTLINUX_CONFIG')
-
- localdata = bb.data.createCopy(d)
-
- try:
- with open(cfile, 'w') as cfgfile:
- cfgfile.write('# Generic Distro Configuration file generated by OpenEmbedded\n')
-
- if len(labels.split()) > 1:
- cfgfile.write('menu title Select the boot mode\n')
-
- timeout = localdata.getVar('UBOOT_EXTLINUX_TIMEOUT')
- if timeout:
- cfgfile.write('TIMEOUT %s\n' % (timeout))
-
- if len(labels.split()) > 1:
- default = localdata.getVar('UBOOT_EXTLINUX_DEFAULT_LABEL')
- if default:
- cfgfile.write('DEFAULT %s\n' % (default))
-
- # Need to deconflict the labels with existing overrides
- label_overrides = labels.split()
- default_overrides = localdata.getVar('OVERRIDES').split(':')
- # We're keeping all the existing overrides that aren't used as a label
- # an override for that label will be added back in while we're processing that label
- keep_overrides = list(filter(lambda x: x not in label_overrides, default_overrides))
-
- for label in labels.split():
-
- localdata.setVar('OVERRIDES', ':'.join(keep_overrides + [label]))
-
- extlinux_console = localdata.getVar('UBOOT_EXTLINUX_CONSOLE')
-
- menu_description = localdata.getVar('UBOOT_EXTLINUX_MENU_DESCRIPTION')
- if not menu_description:
- menu_description = label
-
- root = localdata.getVar('UBOOT_EXTLINUX_ROOT')
- if not root:
- bb.fatal('UBOOT_EXTLINUX_ROOT not defined')
-
- kernel_image = localdata.getVar('UBOOT_EXTLINUX_KERNEL_IMAGE')
- fdtdir = localdata.getVar('UBOOT_EXTLINUX_FDTDIR')
-
- fdt = localdata.getVar('UBOOT_EXTLINUX_FDT')
-
- if fdt:
- cfgfile.write('LABEL %s\n\tKERNEL %s\n\tFDT %s\n' %
- (menu_description, kernel_image, fdt))
- elif fdtdir:
- cfgfile.write('LABEL %s\n\tKERNEL %s\n\tFDTDIR %s\n' %
- (menu_description, kernel_image, fdtdir))
- else:
- cfgfile.write('LABEL %s\n\tKERNEL %s\n' % (menu_description, kernel_image))
-
- kernel_args = localdata.getVar('UBOOT_EXTLINUX_KERNEL_ARGS')
-
- initrd = localdata.getVar('UBOOT_EXTLINUX_INITRD')
- if initrd:
- cfgfile.write('\tINITRD %s\n'% initrd)
-
- kernel_args = root + " " + kernel_args
- cfgfile.write('\tAPPEND %s %s\n' % (kernel_args, extlinux_console))
-
- except OSError:
- bb.fatal('Unable to open %s' % (cfile))
-}
-UBOOT_EXTLINUX_VARS = "CONSOLE MENU_DESCRIPTION ROOT KERNEL_IMAGE FDTDIR FDT KERNEL_ARGS INITRD"
-do_create_extlinux_config[vardeps] += "${@' '.join(['UBOOT_EXTLINUX_%s_%s' % (v, l) for v in d.getVar('UBOOT_EXTLINUX_VARS').split() for l in d.getVar('UBOOT_EXTLINUX_LABELS').split()])}"
-
-addtask create_extlinux_config before do_install do_deploy after do_compile
diff --git a/meta/classes/uboot-sign.bbclass b/meta/classes/uboot-sign.bbclass
deleted file mode 100644
index 713196df41..0000000000
--- a/meta/classes/uboot-sign.bbclass
+++ /dev/null
@@ -1,132 +0,0 @@
-# This file is part of U-Boot verified boot support and is intended to be
-# inherited from u-boot recipe and from kernel-fitimage.bbclass.
-#
-# The signature procedure requires the user to generate an RSA key and
-# certificate in a directory and to define the following variable:
-#
-# UBOOT_SIGN_KEYDIR = "/keys/directory"
-# UBOOT_SIGN_KEYNAME = "dev" # keys name in keydir (eg. "dev.crt", "dev.key")
-# UBOOT_MKIMAGE_DTCOPTS = "-I dts -O dtb -p 2000"
-# UBOOT_SIGN_ENABLE = "1"
-#
-# As verified boot depends on fitImage generation, following is also required:
-#
-# KERNEL_CLASSES ?= " kernel-fitimage "
-# KERNEL_IMAGETYPE ?= "fitImage"
-#
-# The signature support is limited to the use of CONFIG_OF_SEPARATE in U-Boot.
-#
-# The tasks sequence is set as below, using DEPLOY_IMAGE_DIR as common place to
-# treat the device tree blob:
-#
-# * u-boot:do_install_append
-# Install UBOOT_DTB_BINARY to datadir, so that kernel can use it for
-# signing, and kernel will deploy UBOOT_DTB_BINARY after signs it.
-#
-# * virtual/kernel:do_assemble_fitimage
-# Sign the image
-#
-# * u-boot:do_deploy[postfuncs]
-# Deploy files like UBOOT_DTB_IMAGE, UBOOT_DTB_SYMLINK and others.
-#
-# For more details on signature process, please refer to U-Boot documentation.
-
-# Signature activation.
-UBOOT_SIGN_ENABLE ?= "0"
-
-# Default value for deployment filenames.
-UBOOT_DTB_IMAGE ?= "u-boot-${MACHINE}-${PV}-${PR}.dtb"
-UBOOT_DTB_BINARY ?= "u-boot.dtb"
-UBOOT_DTB_SYMLINK ?= "u-boot-${MACHINE}.dtb"
-UBOOT_NODTB_IMAGE ?= "u-boot-nodtb-${MACHINE}-${PV}-${PR}.${UBOOT_SUFFIX}"
-UBOOT_NODTB_BINARY ?= "u-boot-nodtb.${UBOOT_SUFFIX}"
-UBOOT_NODTB_SYMLINK ?= "u-boot-nodtb-${MACHINE}.${UBOOT_SUFFIX}"
-
-# Functions in this bbclass is for u-boot only
-UBOOT_PN = "${@d.getVar('PREFERRED_PROVIDER_u-boot') or 'u-boot'}"
-
-concat_dtb_helper() {
- if [ -e "${UBOOT_DTB_BINARY}" ]; then
- ln -sf ${UBOOT_DTB_IMAGE} ${DEPLOYDIR}/${UBOOT_DTB_BINARY}
- ln -sf ${UBOOT_DTB_IMAGE} ${DEPLOYDIR}/${UBOOT_DTB_SYMLINK}
- fi
-
- if [ -f "${UBOOT_NODTB_BINARY}" ]; then
- install ${UBOOT_NODTB_BINARY} ${DEPLOYDIR}/${UBOOT_NODTB_IMAGE}
- ln -sf ${UBOOT_NODTB_IMAGE} ${DEPLOYDIR}/${UBOOT_NODTB_SYMLINK}
- ln -sf ${UBOOT_NODTB_IMAGE} ${DEPLOYDIR}/${UBOOT_NODTB_BINARY}
- fi
-
- # Concatenate U-Boot w/o DTB & DTB with public key
- # (cf. kernel-fitimage.bbclass for more details)
- deployed_uboot_dtb_binary='${DEPLOY_DIR_IMAGE}/${UBOOT_DTB_IMAGE}'
- if [ "x${UBOOT_SUFFIX}" = "ximg" -o "x${UBOOT_SUFFIX}" = "xrom" ] && \
- [ -e "$deployed_uboot_dtb_binary" ]; then
- oe_runmake EXT_DTB=$deployed_uboot_dtb_binary
- install ${UBOOT_BINARY} ${DEPLOYDIR}/${UBOOT_IMAGE}
- elif [ -e "${DEPLOYDIR}/${UBOOT_NODTB_IMAGE}" -a -e "$deployed_uboot_dtb_binary" ]; then
- cd ${DEPLOYDIR}
- cat ${UBOOT_NODTB_IMAGE} $deployed_uboot_dtb_binary | tee ${B}/${CONFIG_B_PATH}/${UBOOT_BINARY} > ${UBOOT_IMAGE}
- else
- bbwarn "Failure while adding public key to u-boot binary. Verified boot won't be available."
- fi
-}
-
-concat_dtb() {
- if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${PN}" = "${UBOOT_PN}" -a -n "${UBOOT_DTB_BINARY}" ]; then
- mkdir -p ${DEPLOYDIR}
- if [ -n "${UBOOT_CONFIG}" ]; then
- for config in ${UBOOT_MACHINE}; do
- CONFIG_B_PATH="${config}"
- cd ${B}/${config}
- concat_dtb_helper
- done
- else
- CONFIG_B_PATH=""
- cd ${B}
- concat_dtb_helper
- fi
- fi
-}
-
-# Install UBOOT_DTB_BINARY to datadir, so that kernel can use it for
-# signing, and kernel will deploy UBOOT_DTB_BINARY after signs it.
-install_helper() {
- if [ -f "${UBOOT_DTB_BINARY}" ]; then
- install -d ${D}${datadir}
- # UBOOT_DTB_BINARY is a symlink to UBOOT_DTB_IMAGE, so we
- # need both of them.
- install ${UBOOT_DTB_BINARY} ${D}${datadir}/${UBOOT_DTB_IMAGE}
- ln -sf ${UBOOT_DTB_IMAGE} ${D}${datadir}/${UBOOT_DTB_BINARY}
- else
- bbwarn "${UBOOT_DTB_BINARY} not found"
- fi
-}
-
-do_install_append() {
- if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${PN}" = "${UBOOT_PN}" -a -n "${UBOOT_DTB_BINARY}" ]; then
- if [ -n "${UBOOT_CONFIG}" ]; then
- for config in ${UBOOT_MACHINE}; do
- cd ${B}/${config}
- install_helper
- done
- else
- cd ${B}
- install_helper
- fi
- fi
-}
-
-do_deploy_prepend_pn-${UBOOT_PN}() {
- if [ "${UBOOT_SIGN_ENABLE}" = "1" -a -n "${UBOOT_DTB_BINARY}" ]; then
- concat_dtb
- fi
-}
-
-python () {
- if d.getVar('UBOOT_SIGN_ENABLE') == '1' and d.getVar('PN') == d.getVar('UBOOT_PN') and d.getVar('UBOOT_DTB_BINARY'):
- kernel_pn = d.getVar('PREFERRED_PROVIDER_virtual/kernel')
-
- # Make "bitbake u-boot -cdeploy" deploys the signed u-boot.dtb
- d.appendVarFlag('do_deploy', 'depends', ' %s:do_deploy' % kernel_pn)
-}
diff --git a/meta/classes/uninative.bbclass b/meta/classes/uninative.bbclass
deleted file mode 100644
index 70799bbf6d..0000000000
--- a/meta/classes/uninative.bbclass
+++ /dev/null
@@ -1,166 +0,0 @@
-UNINATIVE_LOADER ?= "${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/lib/${@bb.utils.contains('BUILD_ARCH', 'x86_64', 'ld-linux-x86-64.so.2', '', d)}${@bb.utils.contains('BUILD_ARCH', 'i686', 'ld-linux.so.2', '', d)}${@bb.utils.contains('BUILD_ARCH', 'aarch64', 'ld-linux-aarch64.so.1', '', d)}${@bb.utils.contains('BUILD_ARCH', 'ppc64le', 'ld64.so.2', '', d)}"
-UNINATIVE_STAGING_DIR ?= "${STAGING_DIR}"
-
-UNINATIVE_URL ?= "unset"
-UNINATIVE_TARBALL ?= "${BUILD_ARCH}-nativesdk-libc.tar.xz"
-# Example checksums
-#UNINATIVE_CHECKSUM[aarch64] = "dead"
-#UNINATIVE_CHECKSUM[i686] = "dead"
-#UNINATIVE_CHECKSUM[x86_64] = "dead"
-UNINATIVE_DLDIR ?= "${DL_DIR}/uninative/"
-
-# Enabling uninative will change the following variables so they need to go the parsing white list to prevent multiple recipe parsing
-BB_HASHCONFIG_WHITELIST += "NATIVELSBSTRING SSTATEPOSTUNPACKFUNCS BUILD_LDFLAGS"
-
-addhandler uninative_event_fetchloader
-uninative_event_fetchloader[eventmask] = "bb.event.BuildStarted"
-
-addhandler uninative_event_enable
-uninative_event_enable[eventmask] = "bb.event.ConfigParsed"
-
-python uninative_event_fetchloader() {
- """
- This event fires on the parent and will try to fetch the tarball if the
- loader isn't already present.
- """
-
- chksum = d.getVarFlag("UNINATIVE_CHECKSUM", d.getVar("BUILD_ARCH"))
- if not chksum:
- bb.fatal("Uninative selected but not configured correctly, please set UNINATIVE_CHECKSUM[%s]" % d.getVar("BUILD_ARCH"))
-
- loader = d.getVar("UNINATIVE_LOADER")
- loaderchksum = loader + ".chksum"
- if os.path.exists(loader) and os.path.exists(loaderchksum):
- with open(loaderchksum, "r") as f:
- readchksum = f.read().strip()
- if readchksum == chksum:
- return
-
- import subprocess
- try:
- # Save and restore cwd as Fetch.download() does a chdir()
- olddir = os.getcwd()
-
- tarball = d.getVar("UNINATIVE_TARBALL")
- tarballdir = os.path.join(d.getVar("UNINATIVE_DLDIR"), chksum)
- tarballpath = os.path.join(tarballdir, tarball)
-
- if not os.path.exists(tarballpath + ".done"):
- bb.utils.mkdirhier(tarballdir)
- if d.getVar("UNINATIVE_URL") == "unset":
- bb.fatal("Uninative selected but not configured, please set UNINATIVE_URL")
-
- localdata = bb.data.createCopy(d)
- localdata.setVar('FILESPATH', "")
- localdata.setVar('DL_DIR', tarballdir)
- # Our games with path manipulation of DL_DIR mean standard PREMIRRORS don't work
- # and we can't easily put 'chksum' into the url path from a url parameter with
- # the current fetcher url handling
- ownmirror = d.getVar('SOURCE_MIRROR_URL')
- if ownmirror:
- localdata.appendVar("PREMIRRORS", " ${UNINATIVE_URL}${UNINATIVE_TARBALL} ${SOURCE_MIRROR_URL}/uninative/%s/${UNINATIVE_TARBALL}" % chksum)
-
- srcuri = d.expand("${UNINATIVE_URL}${UNINATIVE_TARBALL};sha256sum=%s" % chksum)
- bb.note("Fetching uninative binary shim from %s" % srcuri)
-
- fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
- fetcher.download()
- localpath = fetcher.localpath(srcuri)
- if localpath != tarballpath and os.path.exists(localpath) and not os.path.exists(tarballpath):
- # Follow the symlink behavior from the bitbake fetch2.
- # This will cover the case where an existing symlink is broken
- # as well as if there are two processes trying to create it
- # at the same time.
- if os.path.islink(tarballpath):
- # Broken symbolic link
- os.unlink(tarballpath)
-
- # Deal with two processes trying to make symlink at once
- try:
- os.symlink(localpath, tarballpath)
- except FileExistsError:
- pass
-
- # ldd output is "ldd (Ubuntu GLIBC 2.23-0ubuntu10) 2.23", extract last option from first line
- glibcver = subprocess.check_output(["ldd", "--version"]).decode('utf-8').split('\n')[0].split()[-1]
- if bb.utils.vercmp_string(d.getVar("UNINATIVE_MAXGLIBCVERSION"), glibcver) < 0:
- raise RuntimeError("Your host glibc verson (%s) is newer than that in uninative (%s). Disabling uninative so that sstate is not corrupted." % (glibcver, d.getVar("UNINATIVE_MAXGLIBCVERSION")))
-
- cmd = d.expand("\
-mkdir -p ${UNINATIVE_STAGING_DIR}-uninative; \
-cd ${UNINATIVE_STAGING_DIR}-uninative; \
-tar -xJf ${UNINATIVE_DLDIR}/%s/${UNINATIVE_TARBALL}; \
-${UNINATIVE_STAGING_DIR}-uninative/relocate_sdk.py \
- ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux \
- ${UNINATIVE_LOADER} \
- ${UNINATIVE_LOADER} \
- ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/${bindir_native}/patchelf-uninative \
- ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux${base_libdir_native}/libc*.so" % chksum)
- subprocess.check_output(cmd, shell=True)
-
- with open(loaderchksum, "w") as f:
- f.write(chksum)
-
- enable_uninative(d)
-
- except RuntimeError as e:
- bb.warn(str(e))
- except bb.fetch2.BBFetchException as exc:
- bb.warn("Disabling uninative as unable to fetch uninative tarball: %s" % str(exc))
- bb.warn("To build your own uninative loader, please bitbake uninative-tarball and set UNINATIVE_TARBALL appropriately.")
- except subprocess.CalledProcessError as exc:
- bb.warn("Disabling uninative as unable to install uninative tarball: %s" % str(exc))
- bb.warn("To build your own uninative loader, please bitbake uninative-tarball and set UNINATIVE_TARBALL appropriately.")
- finally:
- os.chdir(olddir)
-}
-
-python uninative_event_enable() {
- """
- This event handler is called in the workers and is responsible for setting
- up uninative if a loader is found.
- """
- enable_uninative(d)
-}
-
-def enable_uninative(d):
- loader = d.getVar("UNINATIVE_LOADER")
- if os.path.exists(loader):
- bb.debug(2, "Enabling uninative")
- d.setVar("NATIVELSBSTRING", "universal%s" % oe.utils.host_gcc_version(d))
- d.appendVar("SSTATEPOSTUNPACKFUNCS", " uninative_changeinterp")
- d.appendVarFlag("SSTATEPOSTUNPACKFUNCS", "vardepvalueexclude", "| uninative_changeinterp")
- d.appendVar("BUILD_LDFLAGS", " -Wl,--allow-shlib-undefined -Wl,--dynamic-linker=${UNINATIVE_LOADER}")
- d.appendVarFlag("BUILD_LDFLAGS", "vardepvalueexclude", "| -Wl,--allow-shlib-undefined -Wl,--dynamic-linker=${UNINATIVE_LOADER}")
- d.appendVarFlag("BUILD_LDFLAGS", "vardepsexclude", "UNINATIVE_LOADER")
- d.prependVar("PATH", "${STAGING_DIR}-uninative/${BUILD_ARCH}-linux${bindir_native}:")
-
-python uninative_changeinterp () {
- import subprocess
- import stat
- import oe.qa
-
- if not (bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d)):
- return
-
- sstateinst = d.getVar('SSTATE_INSTDIR')
- for walkroot, dirs, files in os.walk(sstateinst):
- for file in files:
- if file.endswith(".so") or ".so." in file:
- continue
- f = os.path.join(walkroot, file)
- if os.path.islink(f):
- continue
- s = os.stat(f)
- if not ((s[stat.ST_MODE] & stat.S_IXUSR) or (s[stat.ST_MODE] & stat.S_IXGRP) or (s[stat.ST_MODE] & stat.S_IXOTH)):
- continue
- elf = oe.qa.ELFFile(f)
- try:
- elf.open()
- except oe.qa.NotELFFileError:
- continue
- if not elf.isDynamic():
- continue
-
- subprocess.check_output(("patchelf-uninative", "--set-interpreter", d.getVar("UNINATIVE_LOADER"), f), stderr=subprocess.STDOUT)
-}
diff --git a/meta/classes/update-alternatives.bbclass b/meta/classes/update-alternatives.bbclass
deleted file mode 100644
index 8c2b66e7f1..0000000000
--- a/meta/classes/update-alternatives.bbclass
+++ /dev/null
@@ -1,327 +0,0 @@
-# This class is used to help the alternatives system which is useful when
-# multiple sources provide same command. You can use update-alternatives
-# command directly in your recipe, but in most cases this class simplifies
-# that job.
-#
-# To use this class a number of variables should be defined:
-#
-# List all of the alternatives needed by a package:
-# ALTERNATIVE_<pkg> = "name1 name2 name3 ..."
-#
-# i.e. ALTERNATIVE_busybox = "sh sed test bracket"
-#
-# The pathname of the link
-# ALTERNATIVE_LINK_NAME[name] = "target"
-#
-# This is the name of the binary once it's been installed onto the runtime.
-# This name is global to all split packages in this recipe, and should match
-# other recipes with the same functionality.
-# i.e. ALTERNATIVE_LINK_NAME[bracket] = "/usr/bin/["
-#
-# NOTE: If ALTERNATIVE_LINK_NAME is not defined, it defaults to ${bindir}/name
-#
-# The default link to create for all targets
-# ALTERNATIVE_TARGET = "target"
-#
-# This is useful in a multicall binary case
-# i.e. ALTERNATIVE_TARGET = "/bin/busybox"
-#
-# A non-default link to create for a target
-# ALTERNATIVE_TARGET[name] = "target"
-#
-# This is the name of the binary as it's been install by do_install
-# i.e. ALTERNATIVE_TARGET[sh] = "/bin/bash"
-#
-# A package specific link for a target
-# ALTERNATIVE_TARGET_<pkg>[name] = "target"
-#
-# This is useful when a recipe provides multiple alternatives for the
-# same item.
-#
-# NOTE: If ALTERNATIVE_TARGET is not defined, it will inherit the value
-# from ALTERNATIVE_LINK_NAME.
-#
-# NOTE: If the ALTERNATIVE_LINK_NAME and ALTERNATIVE_TARGET are the same,
-# ALTERNATIVE_TARGET will have '.{BPN}' appended to it. If the file
-# referenced has not been renamed, it will also be renamed. (This avoids
-# the need to rename alternative files in the do_install step, but still
-# supports it if necessary for some reason.)
-#
-# The default priority for any alternatives
-# ALTERNATIVE_PRIORITY = "priority"
-#
-# i.e. default is ALTERNATIVE_PRIORITY = "10"
-#
-# The non-default priority for a specific target
-# ALTERNATIVE_PRIORITY[name] = "priority"
-#
-# The package priority for a specific target
-# ALTERNATIVE_PRIORITY_<pkg>[name] = "priority"
-
-ALTERNATIVE_PRIORITY = "10"
-
-# We need special processing for vardeps because it can not work on
-# modified flag values. So we aggregate the flags into a new variable
-# and include that vairable in the set.
-UPDALTVARS = "ALTERNATIVE ALTERNATIVE_LINK_NAME ALTERNATIVE_TARGET ALTERNATIVE_PRIORITY"
-
-PACKAGE_WRITE_DEPS += "virtual/update-alternatives-native"
-
-def gen_updatealternativesvardeps(d):
- pkgs = (d.getVar("PACKAGES") or "").split()
- vars = (d.getVar("UPDALTVARS") or "").split()
-
- # First compute them for non_pkg versions
- for v in vars:
- for flag in sorted((d.getVarFlags(v) or {}).keys()):
- if flag == "doc" or flag == "vardeps" or flag == "vardepsexp":
- continue
- d.appendVar('%s_VARDEPS' % (v), ' %s:%s' % (flag, d.getVarFlag(v, flag, False)))
-
- for p in pkgs:
- for v in vars:
- for flag in sorted((d.getVarFlags("%s_%s" % (v,p)) or {}).keys()):
- if flag == "doc" or flag == "vardeps" or flag == "vardepsexp":
- continue
- d.appendVar('%s_VARDEPS_%s' % (v,p), ' %s:%s' % (flag, d.getVarFlag('%s_%s' % (v,p), flag, False)))
-
-def ua_extend_depends(d):
- if not 'virtual/update-alternatives' in d.getVar('PROVIDES'):
- d.appendVar('DEPENDS', ' virtual/${MLPREFIX}update-alternatives')
-
-def update_alternatives_enabled(d):
- # Update Alternatives only works on target packages...
- if bb.data.inherits_class('native', d) or \
- bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d) or \
- bb.data.inherits_class('cross-canadian', d):
- return False
-
- # Disable when targeting mingw32 (no target support)
- if d.getVar("TARGET_OS") == "mingw32":
- return False
-
- return True
-
-python __anonymous() {
- if not update_alternatives_enabled(d):
- return
-
- # compute special vardeps
- gen_updatealternativesvardeps(d)
-
- # extend the depends to include virtual/update-alternatives
- ua_extend_depends(d)
-}
-
-def gen_updatealternativesvars(d):
- ret = []
- pkgs = (d.getVar("PACKAGES") or "").split()
- vars = (d.getVar("UPDALTVARS") or "").split()
-
- for v in vars:
- ret.append(v + "_VARDEPS")
-
- for p in pkgs:
- for v in vars:
- ret.append(v + "_" + p)
- ret.append(v + "_VARDEPS_" + p)
- return " ".join(ret)
-
-# Now the new stuff, we use a custom function to generate the right values
-populate_packages[vardeps] += "${UPDALTVARS} ${@gen_updatealternativesvars(d)}"
-
-# We need to do the rename after the image creation step, but before
-# the split and strip steps.. PACKAGE_PREPROCESS_FUNCS is the right
-# place for that.
-PACKAGE_PREPROCESS_FUNCS += "apply_update_alternative_renames"
-python apply_update_alternative_renames () {
- if not update_alternatives_enabled(d):
- return
-
- import re
-
- def update_files(alt_target, alt_target_rename, pkg, d):
- f = d.getVar('FILES_' + pkg)
- if f:
- f = re.sub(r'(^|\s)%s(\s|$)' % re.escape (alt_target), r'\1%s\2' % alt_target_rename, f)
- d.setVar('FILES_' + pkg, f)
-
- # Check for deprecated usage...
- pn = d.getVar('BPN')
- if d.getVar('ALTERNATIVE_LINKS') != None:
- bb.fatal('%s: Use of ALTERNATIVE_LINKS/ALTERNATIVE_PATH/ALTERNATIVE_NAME is no longer supported, please convert to the updated syntax, see update-alternatives.bbclass for more info.' % pn)
-
- # Do actual update alternatives processing
- pkgdest = d.getVar('PKGD')
- for pkg in (d.getVar('PACKAGES') or "").split():
- # If the src == dest, we know we need to rename the dest by appending ${BPN}
- link_rename = []
- for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg) or "").split():
- alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name)
- if not alt_link:
- alt_link = "%s/%s" % (d.getVar('bindir'), alt_name)
- d.setVarFlag('ALTERNATIVE_LINK_NAME', alt_name, alt_link)
- if alt_link.startswith(os.path.join(d.getVar('sysconfdir'), 'init.d')):
- # Managing init scripts does not work (bug #10433), foremost
- # because of a race with update-rc.d
- bb.fatal("Using update-alternatives for managing SysV init scripts is not supported")
-
- alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name)
- alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg) or d.getVar('ALTERNATIVE_TARGET') or alt_link
- # Sometimes alt_target is specified as relative to the link name.
- alt_target = os.path.join(os.path.dirname(alt_link), alt_target)
-
- # If the link and target are the same name, we need to rename the target.
- if alt_link == alt_target:
- src = '%s/%s' % (pkgdest, alt_target)
- alt_target_rename = '%s.%s' % (alt_target, pn)
- dest = '%s/%s' % (pkgdest, alt_target_rename)
- if os.path.lexists(dest):
- bb.note('%s: Already renamed: %s' % (pn, alt_target_rename))
- elif os.path.lexists(src):
- if os.path.islink(src):
- # Delay rename of links
- link_rename.append((alt_target, alt_target_rename))
- else:
- bb.note('%s: Rename %s -> %s' % (pn, alt_target, alt_target_rename))
- os.rename(src, dest)
- update_files(alt_target, alt_target_rename, pkg, d)
- else:
- bb.warn("%s: alternative target (%s or %s) does not exist, skipping..." % (pn, alt_target, alt_target_rename))
- continue
- d.setVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name, alt_target_rename)
-
- # Process delayed link names
- # Do these after other renames so we can correct broken links
- for (alt_target, alt_target_rename) in link_rename:
- src = '%s/%s' % (pkgdest, alt_target)
- dest = '%s/%s' % (pkgdest, alt_target_rename)
- link_target = oe.path.realpath(src, pkgdest, True)
-
- if os.path.lexists(link_target):
- # Ok, the link_target exists, we can rename
- bb.note('%s: Rename (link) %s -> %s' % (pn, alt_target, alt_target_rename))
- os.rename(src, dest)
- else:
- # Try to resolve the broken link to link.${BPN}
- link_maybe = '%s.%s' % (os.readlink(src), pn)
- if os.path.lexists(os.path.join(os.path.dirname(src), link_maybe)):
- # Ok, the renamed link target exists.. create a new link, and remove the original
- bb.note('%s: Creating new link %s -> %s' % (pn, alt_target_rename, link_maybe))
- os.symlink(link_maybe, dest)
- os.unlink(src)
- else:
- bb.warn('%s: Unable to resolve dangling symlink: %s' % (pn, alt_target))
- continue
- update_files(alt_target, alt_target_rename, pkg, d)
-}
-
-def update_alternatives_alt_targets(d, pkg):
- """
- Returns the update-alternatives metadata for a package.
-
- The returned format is a list of tuples where the tuple contains:
- alt_name: The binary name
- alt_link: The path for the binary (Shared by different packages)
- alt_target: The path for the renamed binary (Unique per package)
- alt_priority: The priority of the alt_target
-
- All the alt_targets will be installed into the sysroot. The alt_link is
- a symlink pointing to the alt_target with the highest priority.
- """
-
- pn = d.getVar('BPN')
- pkgdest = d.getVar('PKGD')
- updates = list()
- for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg) or "").split():
- alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name)
- alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name) or \
- d.getVarFlag('ALTERNATIVE_TARGET', alt_name) or \
- d.getVar('ALTERNATIVE_TARGET_%s' % pkg) or \
- d.getVar('ALTERNATIVE_TARGET') or \
- alt_link
- alt_priority = d.getVarFlag('ALTERNATIVE_PRIORITY_%s' % pkg, alt_name) or \
- d.getVarFlag('ALTERNATIVE_PRIORITY', alt_name) or \
- d.getVar('ALTERNATIVE_PRIORITY_%s' % pkg) or \
- d.getVar('ALTERNATIVE_PRIORITY')
-
- # This shouldn't trigger, as it should have been resolved earlier!
- if alt_link == alt_target:
- bb.note('alt_link == alt_target: %s == %s -- correcting, this should not happen!' % (alt_link, alt_target))
- alt_target = '%s.%s' % (alt_target, pn)
-
- if not os.path.lexists('%s/%s' % (pkgdest, alt_target)):
- bb.warn('%s: NOT adding alternative provide %s: %s does not exist' % (pn, alt_link, alt_target))
- continue
-
- alt_target = os.path.normpath(alt_target)
- updates.append( (alt_name, alt_link, alt_target, alt_priority) )
-
- return updates
-
-PACKAGESPLITFUNCS_prepend = "populate_packages_updatealternatives "
-
-python populate_packages_updatealternatives () {
- if not update_alternatives_enabled(d):
- return
-
- # Do actual update alternatives processing
- for pkg in (d.getVar('PACKAGES') or "").split():
- # Create post install/removal scripts
- alt_setup_links = ""
- alt_remove_links = ""
- updates = update_alternatives_alt_targets(d, pkg)
- for alt_name, alt_link, alt_target, alt_priority in updates:
- alt_setup_links += '\tupdate-alternatives --install %s %s %s %s\n' % (alt_link, alt_name, alt_target, alt_priority)
- alt_remove_links += '\tupdate-alternatives --remove %s %s\n' % (alt_name, alt_target)
-
- if alt_setup_links:
- # RDEPENDS setup
- provider = d.getVar('VIRTUAL-RUNTIME_update-alternatives')
- if provider:
- #bb.note('adding runtime requirement for update-alternatives for %s' % pkg)
- d.appendVar('RDEPENDS_%s' % pkg, ' ' + d.getVar('MLPREFIX', False) + provider)
-
- bb.note('adding update-alternatives calls to postinst/prerm for %s' % pkg)
- bb.note('%s' % alt_setup_links)
- postinst = d.getVar('pkg_postinst_%s' % pkg)
- if postinst:
- postinst = alt_setup_links + postinst
- else:
- postinst = '#!/bin/sh\n' + alt_setup_links
- d.setVar('pkg_postinst_%s' % pkg, postinst)
-
- bb.note('%s' % alt_remove_links)
- prerm = d.getVar('pkg_prerm_%s' % pkg) or '#!/bin/sh\n'
- prerm += alt_remove_links
- d.setVar('pkg_prerm_%s' % pkg, prerm)
-}
-
-python package_do_filedeps_append () {
- if update_alternatives_enabled(d):
- apply_update_alternative_provides(d)
-}
-
-def apply_update_alternative_provides(d):
- pn = d.getVar('BPN')
- pkgdest = d.getVar('PKGDEST')
-
- for pkg in d.getVar('PACKAGES').split():
- for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg) or "").split():
- alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name)
- alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name)
- alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg) or d.getVar('ALTERNATIVE_TARGET') or alt_link
-
- if alt_link == alt_target:
- bb.warn('%s: alt_link == alt_target: %s == %s' % (pn, alt_link, alt_target))
- alt_target = '%s.%s' % (alt_target, pn)
-
- if not os.path.lexists('%s/%s/%s' % (pkgdest, pkg, alt_target)):
- continue
-
- # Add file provide
- trans_target = oe.package.file_translate(alt_target)
- d.appendVar('FILERPROVIDES_%s_%s' % (trans_target, pkg), " " + alt_link)
- if not trans_target in (d.getVar('FILERPROVIDESFLIST_%s' % pkg) or ""):
- d.appendVar('FILERPROVIDESFLIST_%s' % pkg, " " + trans_target)
-
diff --git a/meta/classes/update-rc.d.bbclass b/meta/classes/update-rc.d.bbclass
deleted file mode 100644
index 1366fee653..0000000000
--- a/meta/classes/update-rc.d.bbclass
+++ /dev/null
@@ -1,123 +0,0 @@
-UPDATERCPN ?= "${PN}"
-
-DEPENDS_append_class-target = "${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', ' update-rc.d initscripts', '', d)}"
-
-UPDATERCD = "update-rc.d"
-UPDATERCD_class-cross = ""
-UPDATERCD_class-native = ""
-UPDATERCD_class-nativesdk = ""
-
-INITSCRIPT_PARAMS ?= "defaults"
-
-INIT_D_DIR = "${sysconfdir}/init.d"
-
-def use_updatercd(d):
- # If the distro supports both sysvinit and systemd, and the current recipe
- # supports systemd, only call update-rc.d on rootfs creation or if systemd
- # is not running. That's because systemctl enable/disable will already call
- # update-rc.d if it detects initscripts.
- if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d) and bb.data.inherits_class('systemd', d):
- return '[ -n "$D" -o ! -d /run/systemd/system ]'
- return 'true'
-
-PACKAGE_WRITE_DEPS += "update-rc.d-native"
-
-updatercd_postinst() {
-if ${@use_updatercd(d)} && type update-rc.d >/dev/null 2>/dev/null; then
- if [ -n "$D" ]; then
- OPT="-r $D"
- else
- OPT="-s"
- fi
- update-rc.d $OPT ${INITSCRIPT_NAME} ${INITSCRIPT_PARAMS}
-fi
-}
-
-updatercd_prerm() {
-if ${@use_updatercd(d)} && [ -z "$D" -a -x "${INIT_D_DIR}/${INITSCRIPT_NAME}" ]; then
- ${INIT_D_DIR}/${INITSCRIPT_NAME} stop || :
-fi
-}
-
-updatercd_postrm() {
-if ${@use_updatercd(d)} && type update-rc.d >/dev/null 2>/dev/null; then
- if [ -n "$D" ]; then
- OPT="-f -r $D"
- else
- OPT="-f"
- fi
- update-rc.d $OPT ${INITSCRIPT_NAME} remove
-fi
-}
-
-
-def update_rc_after_parse(d):
- if d.getVar('INITSCRIPT_PACKAGES', False) == None:
- if d.getVar('INITSCRIPT_NAME', False) == None:
- bb.fatal("%s inherits update-rc.d but doesn't set INITSCRIPT_NAME" % d.getVar('FILE', False))
- if d.getVar('INITSCRIPT_PARAMS', False) == None:
- bb.fatal("%s inherits update-rc.d but doesn't set INITSCRIPT_PARAMS" % d.getVar('FILE', False))
-
-python __anonymous() {
- update_rc_after_parse(d)
-}
-
-PACKAGESPLITFUNCS_prepend = "${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', 'populate_packages_updatercd ', '', d)}"
-PACKAGESPLITFUNCS_remove_class-nativesdk = "populate_packages_updatercd "
-
-populate_packages_updatercd[vardeps] += "updatercd_prerm updatercd_postrm updatercd_postinst"
-populate_packages_updatercd[vardepsexclude] += "OVERRIDES"
-
-python populate_packages_updatercd () {
- def update_rcd_auto_depend(pkg):
- import subprocess
- import os
- path = d.expand("${D}${INIT_D_DIR}/${INITSCRIPT_NAME}")
- if not os.path.exists(path):
- return
- statement = "grep -q -w '/etc/init.d/functions' %s" % path
- if subprocess.call(statement, shell=True) == 0:
- mlprefix = d.getVar('MLPREFIX') or ""
- d.appendVar('RDEPENDS_' + pkg, ' %sinitd-functions' % (mlprefix))
-
- def update_rcd_package(pkg):
- bb.debug(1, 'adding update-rc.d calls to postinst/prerm/postrm for %s' % pkg)
-
- localdata = bb.data.createCopy(d)
- overrides = localdata.getVar("OVERRIDES")
- localdata.setVar("OVERRIDES", "%s:%s" % (pkg, overrides))
-
- update_rcd_auto_depend(pkg)
-
- postinst = d.getVar('pkg_postinst_%s' % pkg)
- if not postinst:
- postinst = '#!/bin/sh\n'
- postinst += localdata.getVar('updatercd_postinst')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
-
- prerm = d.getVar('pkg_prerm_%s' % pkg)
- if not prerm:
- prerm = '#!/bin/sh\n'
- prerm += localdata.getVar('updatercd_prerm')
- d.setVar('pkg_prerm_%s' % pkg, prerm)
-
- postrm = d.getVar('pkg_postrm_%s' % pkg)
- if not postrm:
- postrm = '#!/bin/sh\n'
- postrm += localdata.getVar('updatercd_postrm')
- d.setVar('pkg_postrm_%s' % pkg, postrm)
-
- d.appendVar('RRECOMMENDS_' + pkg, " ${MLPREFIX}${UPDATERCD}")
-
- # Check that this class isn't being inhibited (generally, by
- # systemd.bbclass) before doing any work.
- if not d.getVar("INHIBIT_UPDATERCD_BBCLASS"):
- pkgs = d.getVar('INITSCRIPT_PACKAGES')
- if pkgs == None:
- pkgs = d.getVar('UPDATERCPN')
- packages = (d.getVar('PACKAGES') or "").split()
- if not pkgs in packages and packages != []:
- pkgs = packages[0]
- for pkg in pkgs.split():
- update_rcd_package(pkg)
-}
diff --git a/meta/classes/upstream-version-is-even.bbclass b/meta/classes/upstream-version-is-even.bbclass
deleted file mode 100644
index 256c752423..0000000000
--- a/meta/classes/upstream-version-is-even.bbclass
+++ /dev/null
@@ -1,5 +0,0 @@
-# This class ensures that the upstream version check only
-# accepts even minor versions (i.e. 3.0.x, 3.2.x, 3.4.x, etc.)
-# This scheme is used by Gnome and a number of other projects
-# to signify stable releases vs development releases.
-UPSTREAM_CHECK_REGEX = "[^\d\.](?P<pver>\d+\.(\d*[02468])+(\.\d+)+)\.tar"
diff --git a/meta/classes/useradd-staticids.bbclass b/meta/classes/useradd-staticids.bbclass
index 3a1b5f1320..1dbcba2bf1 100644
--- a/meta/classes/useradd-staticids.bbclass
+++ b/meta/classes/useradd-staticids.bbclass
@@ -1,3 +1,9 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
# In order to support a deterministic set of 'dynamic' users/groups,
# we need a function to reformat the params based on a static file
def update_useradd_static_config(d):
@@ -41,7 +47,7 @@ def update_useradd_static_config(d):
def handle_missing_id(id, type, pkg, files, var, value):
# For backwards compatibility we accept "1" in addition to "error"
error_dynamic = d.getVar('USERADD_ERROR_DYNAMIC')
- msg = "%s - %s: %sname %s does not have a static ID defined." % (d.getVar('PN'), pkg, type, id)
+ msg = 'Recipe %s, package %s: %sname "%s" does not have a static ID defined.' % (d.getVar('PN'), pkg, type, id)
if files:
msg += " Add %s to one of these files: %s" % (id, files)
else:
@@ -77,7 +83,7 @@ def update_useradd_static_config(d):
try:
uaargs = parser.parse_args(oe.useradd.split_args(param))
except Exception as e:
- bb.fatal("%s: Unable to parse arguments for USERADD_PARAM_%s '%s': %s" % (d.getVar('PN'), pkg, param, e))
+ bb.fatal("%s: Unable to parse arguments for USERADD_PARAM:%s '%s': %s" % (d.getVar('PN'), pkg, param, e))
# Read all passwd files specified in USERADD_UID_TABLES or files/passwd
# Use the standard passwd layout:
@@ -140,13 +146,13 @@ def update_useradd_static_config(d):
uaargs.gid = uaargs.groupid
uaargs.user_group = None
if newgroup and is_pkg:
- groupadd = d.getVar("GROUPADD_PARAM_%s" % pkg)
+ groupadd = d.getVar("GROUPADD_PARAM:%s" % pkg)
if groupadd:
# Only add the group if not already specified
if not uaargs.groupname in groupadd:
- d.setVar("GROUPADD_PARAM_%s" % pkg, "%s; %s" % (groupadd, newgroup))
+ d.setVar("GROUPADD_PARAM:%s" % pkg, "%s; %s" % (groupadd, newgroup))
else:
- d.setVar("GROUPADD_PARAM_%s" % pkg, newgroup)
+ d.setVar("GROUPADD_PARAM:%s" % pkg, newgroup)
uaargs.comment = "'%s'" % field[4] if field[4] else uaargs.comment
uaargs.home_dir = field[5] or uaargs.home_dir
@@ -174,8 +180,6 @@ def update_useradd_static_config(d):
newparam += ['', ' --non-unique'][uaargs.non_unique]
if uaargs.password != None:
newparam += ['', ' --password %s' % uaargs.password][uaargs.password != None]
- elif uaargs.clear_password:
- newparam += ['', ' --clear-password %s' % uaargs.clear_password][uaargs.clear_password != None]
newparam += ['', ' --root %s' % uaargs.root][uaargs.root != None]
newparam += ['', ' --system'][uaargs.system]
newparam += ['', ' --shell %s' % uaargs.shell][uaargs.shell != None]
@@ -198,7 +202,7 @@ def update_useradd_static_config(d):
# If we're processing multiple lines, we could have left over values here...
gaargs = parser.parse_args(oe.useradd.split_args(param))
except Exception as e:
- bb.fatal("%s: Unable to parse arguments for GROUPADD_PARAM_%s '%s': %s" % (d.getVar('PN'), pkg, param, e))
+ bb.fatal("%s: Unable to parse arguments for GROUPADD_PARAM:%s '%s': %s" % (d.getVar('PN'), pkg, param, e))
# Read all group files specified in USERADD_GID_TABLES or files/group
# Use the standard group layout:
@@ -236,8 +240,6 @@ def update_useradd_static_config(d):
newparam += ['', ' --non-unique'][gaargs.non_unique]
if gaargs.password != None:
newparam += ['', ' --password %s' % gaargs.password][gaargs.password != None]
- elif gaargs.clear_password:
- newparam += ['', ' --clear-password %s' % gaargs.clear_password][gaargs.clear_password != None]
newparam += ['', ' --root %s' % gaargs.root][gaargs.root != None]
newparam += ['', ' --system'][gaargs.system]
newparam += ' %s' % gaargs.GROUP
@@ -265,17 +267,17 @@ def update_useradd_static_config(d):
for pkg in useradd_packages.split():
# Groupmems doesn't have anything we might want to change, so simply validating
# is a bit of a waste -- only process useradd/groupadd
- useradd_param = d.getVar('USERADD_PARAM_%s' % pkg)
+ useradd_param = d.getVar('USERADD_PARAM:%s' % pkg)
if useradd_param:
- #bb.warn("Before: 'USERADD_PARAM_%s' - '%s'" % (pkg, useradd_param))
- d.setVar('USERADD_PARAM_%s' % pkg, rewrite_useradd(useradd_param, True))
- #bb.warn("After: 'USERADD_PARAM_%s' - '%s'" % (pkg, d.getVar('USERADD_PARAM_%s' % pkg)))
+ #bb.warn("Before: 'USERADD_PARAM:%s' - '%s'" % (pkg, useradd_param))
+ d.setVar('USERADD_PARAM:%s' % pkg, rewrite_useradd(useradd_param, True))
+ #bb.warn("After: 'USERADD_PARAM:%s' - '%s'" % (pkg, d.getVar('USERADD_PARAM:%s' % pkg)))
- groupadd_param = d.getVar('GROUPADD_PARAM_%s' % pkg)
+ groupadd_param = d.getVar('GROUPADD_PARAM:%s' % pkg)
if groupadd_param:
- #bb.warn("Before: 'GROUPADD_PARAM_%s' - '%s'" % (pkg, groupadd_param))
- d.setVar('GROUPADD_PARAM_%s' % pkg, rewrite_groupadd(groupadd_param, True))
- #bb.warn("After: 'GROUPADD_PARAM_%s' - '%s'" % (pkg, d.getVar('GROUPADD_PARAM_%s' % pkg)))
+ #bb.warn("Before: 'GROUPADD_PARAM:%s' - '%s'" % (pkg, groupadd_param))
+ d.setVar('GROUPADD_PARAM:%s' % pkg, rewrite_groupadd(groupadd_param, True))
+ #bb.warn("After: 'GROUPADD_PARAM:%s' - '%s'" % (pkg, d.getVar('GROUPADD_PARAM:%s' % pkg)))
# Load and process extra users and groups, rewriting only adduser/addgroup params
pkg = d.getVar('PN')
diff --git a/meta/classes/useradd.bbclass b/meta/classes/useradd.bbclass
index e5f3ba24f9..16a65ac323 100644
--- a/meta/classes/useradd.bbclass
+++ b/meta/classes/useradd.bbclass
@@ -1,9 +1,15 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
inherit useradd_base
# base-passwd-cross provides the default passwd and group files in the
# target sysroot, and shadow -native and -sysroot provide the utilities
# and support files needed to add and modify user and group accounts
-DEPENDS_append_class-target = " base-files shadow-native shadow-sysroot shadow base-passwd"
+DEPENDS:append:class-target = " base-files shadow-native shadow-sysroot shadow base-passwd"
PACKAGE_WRITE_DEPS += "shadow-native"
# This preinstall function can be run in four different contexts:
@@ -97,6 +103,18 @@ fi
}
useradd_sysroot () {
+ user_group_groupmems_add_sysroot user
+}
+
+groupadd_sysroot () {
+ user_group_groupmems_add_sysroot group
+}
+
+groupmemsadd_sysroot () {
+ user_group_groupmems_add_sysroot groupmems
+}
+
+user_group_groupmems_add_sysroot () {
# Pseudo may (do_prepare_recipe_sysroot) or may not (do_populate_sysroot_setscene) be running
# at this point so we're explicit about the environment so pseudo can load if
# not already present.
@@ -125,9 +143,15 @@ useradd_sysroot () {
fi
# Add groups and users defined for all recipe packages
- GROUPADD_PARAM="${@get_all_cmd_params(d, 'groupadd')}"
- USERADD_PARAM="${@get_all_cmd_params(d, 'useradd')}"
- GROUPMEMS_PARAM="${@get_all_cmd_params(d, 'groupmems')}"
+ if test "$1" = "group"; then
+ GROUPADD_PARAM="${@get_all_cmd_params(d, 'groupadd')}"
+ elif test "$1" = "user"; then
+ USERADD_PARAM="${@get_all_cmd_params(d, 'useradd')}"
+ elif test "$1" = "groupmems"; then
+ GROUPMEMS_PARAM="${@get_all_cmd_params(d, 'groupmems')}"
+ elif test "x$1" = "x"; then
+ bbwarn "missing type of passwd db action"
+ fi
# Tell the system to use the environment vars
UA_SYSROOT=1
@@ -142,38 +166,45 @@ useradd_sysroot () {
EXTRA_STAGING_FIXMES += "PSEUDO_SYSROOT PSEUDO_LOCALSTATEDIR LOGFIFO"
python useradd_sysroot_sstate () {
- scriptfile = None
- task = d.getVar("BB_CURRENTTASK")
- if task == "package_setscene":
- bb.build.exec_func("useradd_sysroot", d)
- elif task == "prepare_recipe_sysroot":
- # Used to update this recipe's own sysroot so the user/groups are available to do_install
- scriptfile = d.expand("${RECIPE_SYSROOT}${bindir}/postinst-useradd-${PN}")
- bb.build.exec_func("useradd_sysroot", d)
- elif task == "populate_sysroot":
- # Used when installed in dependent task sysroots
- scriptfile = d.expand("${SYSROOT_DESTDIR}${bindir}/postinst-useradd-${PN}")
-
- if scriptfile:
- bb.utils.mkdirhier(os.path.dirname(scriptfile))
- with open(scriptfile, 'w') as script:
- script.write("#!/bin/sh\n")
- bb.data.emit_func("useradd_sysroot", script, d)
- script.write("useradd_sysroot\n")
- os.chmod(scriptfile, 0o755)
+ for type, sort_prefix in [("group", "01"), ("user", "02"), ("groupmems", "03")]:
+ scriptfile = None
+ task = d.getVar("BB_CURRENTTASK")
+ if task == "package_setscene":
+ bb.build.exec_func(type + "add_sysroot", d)
+ elif task == "prepare_recipe_sysroot":
+ # Used to update this recipe's own sysroot so the user/groups are available to do_install
+
+ # If do_populate_sysroot is triggered and we write the file here, there would be an overlapping
+ # files. See usergrouptests.UserGroupTests.test_add_task_between_p_sysroot_and_package
+ scriptfile = d.expand("${RECIPE_SYSROOT}${bindir}/postinst-useradd-" + sort_prefix + type + "-${PN}-recipedebug")
+
+ bb.build.exec_func(type + "add_sysroot", d)
+ elif task == "populate_sysroot":
+ # Used when installed in dependent task sysroots
+ scriptfile = d.expand("${SYSROOT_DESTDIR}${bindir}/postinst-useradd-" + sort_prefix + type + "-${PN}")
+
+ if scriptfile:
+ bb.utils.mkdirhier(os.path.dirname(scriptfile))
+ with open(scriptfile, 'w') as script:
+ script.write("#!/bin/sh -e\n")
+ bb.data.emit_func(type + "add_sysroot", script, d)
+ script.write(type + "add_sysroot\n")
+ os.chmod(scriptfile, 0o755)
}
do_prepare_recipe_sysroot[postfuncs] += "${SYSROOTFUNC}"
-SYSROOTFUNC_class-target = "useradd_sysroot_sstate"
+SYSROOTFUNC:class-target = "useradd_sysroot_sstate"
SYSROOTFUNC = ""
SYSROOT_PREPROCESS_FUNCS += "${SYSROOTFUNC}"
-SSTATEPREINSTFUNCS_append_class-target = " useradd_sysroot_sstate"
+SSTATEPREINSTFUNCS:append:class-target = " useradd_sysroot_sstate"
+USERADD_DEPENDS ??= ""
+DEPENDS += "${USERADD_DEPENDS}"
do_package_setscene[depends] += "${USERADDSETSCENEDEPS}"
do_populate_sysroot_setscene[depends] += "${USERADDSETSCENEDEPS}"
-USERADDSETSCENEDEPS_class-target = "${MLPREFIX}base-passwd:do_populate_sysroot_setscene pseudo-native:do_populate_sysroot_setscene shadow-native:do_populate_sysroot_setscene ${MLPREFIX}shadow-sysroot:do_populate_sysroot_setscene"
+USERADDSETSCENEDEPS:class-target = "${MLPREFIX}base-passwd:do_populate_sysroot_setscene pseudo-native:do_populate_sysroot_setscene shadow-native:do_populate_sysroot_setscene ${MLPREFIX}shadow-sysroot:do_populate_sysroot_setscene ${@' '.join(['%s:do_populate_sysroot_setscene' % pkg for pkg in d.getVar("USERADD_DEPENDS").split()])}"
USERADDSETSCENEDEPS = ""
# Recipe parse-time sanity checks
@@ -184,8 +215,8 @@ def update_useradd_after_parse(d):
bb.fatal("%s inherits useradd but doesn't set USERADD_PACKAGES" % d.getVar('FILE', False))
for pkg in useradd_packages.split():
- d.appendVarFlag("do_populate_sysroot", "vardeps", "USERADD_PARAM_%s GROUPADD_PARAM_%s GROUPMEMS_PARAM_%s" % (pkg, pkg, pkg))
- if not d.getVar('USERADD_PARAM_%s' % pkg) and not d.getVar('GROUPADD_PARAM_%s' % pkg) and not d.getVar('GROUPMEMS_PARAM_%s' % pkg):
+ d.appendVarFlag("do_populate_sysroot", "vardeps", " USERADD_PARAM:%s GROUPADD_PARAM:%s GROUPMEMS_PARAM:%s" % (pkg, pkg, pkg))
+ if not d.getVar('USERADD_PARAM:%s' % pkg) and not d.getVar('GROUPADD_PARAM:%s' % pkg) and not d.getVar('GROUPMEMS_PARAM:%s' % pkg):
bb.fatal("%s inherits useradd but doesn't set USERADD_PARAM, GROUPADD_PARAM or GROUPMEMS_PARAM for package %s" % (d.getVar('FILE', False), pkg))
python __anonymous() {
@@ -199,7 +230,7 @@ python __anonymous() {
def get_all_cmd_params(d, cmd_type):
import string
- param_type = cmd_type.upper() + "_PARAM_%s"
+ param_type = cmd_type.upper() + "_PARAM:%s"
params = []
useradd_packages = d.getVar('USERADD_PACKAGES') or ""
@@ -211,7 +242,7 @@ def get_all_cmd_params(d, cmd_type):
return "; ".join(params)
# Adds the preinst script into generated packages
-fakeroot python populate_packages_prepend () {
+fakeroot python populate_packages:prepend () {
def update_useradd_package(pkg):
bb.debug(1, 'adding user/group calls to preinst for %s' % pkg)
@@ -220,7 +251,7 @@ fakeroot python populate_packages_prepend () {
required to execute on the target. Not doing so may cause
useradd preinst to be invoked twice, causing unwanted warnings.
"""
- preinst = d.getVar('pkg_preinst_%s' % pkg) or d.getVar('pkg_preinst')
+ preinst = d.getVar('pkg_preinst:%s' % pkg) or d.getVar('pkg_preinst')
if not preinst:
preinst = '#!/bin/sh\n'
preinst += 'bbnote () {\n\techo "NOTE: $*"\n}\n'
@@ -230,15 +261,19 @@ fakeroot python populate_packages_prepend () {
preinst += 'perform_useradd () {\n%s}\n' % d.getVar('perform_useradd')
preinst += 'perform_groupmems () {\n%s}\n' % d.getVar('perform_groupmems')
preinst += d.getVar('useradd_preinst')
- d.setVar('pkg_preinst_%s' % pkg, preinst)
+ # Expand out the *_PARAM variables to the package specific versions
+ for rep in ["GROUPADD_PARAM", "USERADD_PARAM", "GROUPMEMS_PARAM"]:
+ val = d.getVar(rep + ":" + pkg) or ""
+ preinst = preinst.replace("${" + rep + "}", val)
+ d.setVar('pkg_preinst:%s' % pkg, preinst)
# RDEPENDS setup
- rdepends = d.getVar("RDEPENDS_%s" % pkg) or ""
+ rdepends = d.getVar("RDEPENDS:%s" % pkg) or ""
rdepends += ' ' + d.getVar('MLPREFIX', False) + 'base-passwd'
rdepends += ' ' + d.getVar('MLPREFIX', False) + 'shadow'
# base-files is where the default /etc/skel is packaged
rdepends += ' ' + d.getVar('MLPREFIX', False) + 'base-files'
- d.setVar("RDEPENDS_%s" % pkg, rdepends)
+ d.setVar("RDEPENDS:%s" % pkg, rdepends)
# Add the user/group preinstall scripts and RDEPENDS requirements
# to packages specified by USERADD_PACKAGES
@@ -252,4 +287,4 @@ fakeroot python populate_packages_prepend () {
# Use the following to extend the useradd with custom functions
USERADDEXTENSION ?= ""
-inherit ${USERADDEXTENSION}
+inherit_defer ${USERADDEXTENSION}
diff --git a/meta/classes/useradd_base.bbclass b/meta/classes/useradd_base.bbclass
index 0d0bdb80f5..5e1c699118 100644
--- a/meta/classes/useradd_base.bbclass
+++ b/meta/classes/useradd_base.bbclass
@@ -1,3 +1,9 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
# This bbclass provides basic functionality for user/group settings.
# This bbclass is intended to be inherited by useradd.bbclass and
# extrausers.bbclass.
@@ -145,3 +151,21 @@ perform_usermod () {
fi
set -e
}
+
+perform_passwd_expire () {
+ local rootdir="$1"
+ local opts="$2"
+ bbnote "${PN}: Performing equivalent of passwd --expire with [$opts]"
+ # Directly set sp_lstchg to 0 without using the passwd command: Only root can do that
+ local username=`echo "$opts" | awk '{ print $NF }'`
+ local user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
+ if test "x$user_exists" != "x"; then
+ eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO sed --follow-symlinks -i \''s/^\('$username':[^:]*\):[^:]*:/\1:0:/'\' $rootdir/etc/shadow \" || true
+ local passwd_lastchanged="`grep "^$username:" $rootdir/etc/shadow | cut -d: -f3`"
+ if test "x$passwd_lastchanged" != "x0"; then
+ bbfatal "${PN}: passwd --expire operation did not succeed."
+ fi
+ else
+ bbnote "${PN}: user $username doesn't exist, not expiring its password"
+ fi
+}
diff --git a/meta/classes/utility-tasks.bbclass b/meta/classes/utility-tasks.bbclass
deleted file mode 100644
index b1f27d3658..0000000000
--- a/meta/classes/utility-tasks.bbclass
+++ /dev/null
@@ -1,53 +0,0 @@
-addtask listtasks
-do_listtasks[nostamp] = "1"
-python do_listtasks() {
- taskdescs = {}
- maxlen = 0
- for e in d.keys():
- if d.getVarFlag(e, 'task'):
- maxlen = max(maxlen, len(e))
- if e.endswith('_setscene'):
- desc = "%s (setscene version)" % (d.getVarFlag(e[:-9], 'doc') or '')
- else:
- desc = d.getVarFlag(e, 'doc') or ''
- taskdescs[e] = desc
-
- tasks = sorted(taskdescs.keys())
- for taskname in tasks:
- bb.plain("%s %s" % (taskname.ljust(maxlen), taskdescs[taskname]))
-}
-
-CLEANFUNCS ?= ""
-
-T_task-clean = "${LOG_DIR}/cleanlogs/${PN}"
-addtask clean
-do_clean[nostamp] = "1"
-python do_clean() {
- """clear the build and temp directories"""
- dir = d.expand("${WORKDIR}")
- bb.note("Removing " + dir)
- oe.path.remove(dir)
-
- dir = "%s.*" % d.getVar('STAMP')
- bb.note("Removing " + dir)
- oe.path.remove(dir)
-
- for f in (d.getVar('CLEANFUNCS') or '').split():
- bb.build.exec_func(f, d)
-}
-
-addtask checkuri
-do_checkuri[nostamp] = "1"
-python do_checkuri() {
- src_uri = (d.getVar('SRC_URI') or "").split()
- if len(src_uri) == 0:
- return
-
- try:
- fetcher = bb.fetch2.Fetch(src_uri, d)
- fetcher.checkstatus()
- except bb.fetch2.BBFetchException as e:
- bb.fatal(str(e))
-}
-
-
diff --git a/meta/classes/utils.bbclass b/meta/classes/utils.bbclass
deleted file mode 100644
index 120bcc64a6..0000000000
--- a/meta/classes/utils.bbclass
+++ /dev/null
@@ -1,362 +0,0 @@
-
-oe_soinstall() {
- # Purpose: Install shared library file and
- # create the necessary links
- # Example: oe_soinstall libfoo.so.1.2.3 ${D}${libdir}
- libname=`basename $1`
- case "$libname" in
- *.so)
- bbfatal "oe_soinstall: Shared library must haved versioned filename (e.g. libfoo.so.1.2.3)"
- ;;
- esac
- install -m 755 $1 $2/$libname
- sonamelink=`${HOST_PREFIX}readelf -d $1 |grep 'Library soname:' |sed -e 's/.*\[\(.*\)\].*/\1/'`
- if [ -z $sonamelink ]; then
- bbfatal "oe_soinstall: $libname is missing ELF tag 'SONAME'."
- fi
- solink=`echo $libname | sed -e 's/\.so\..*/.so/'`
- ln -sf $libname $2/$sonamelink
- ln -sf $libname $2/$solink
-}
-
-oe_libinstall() {
- # Purpose: Install a library, in all its forms
- # Example
- #
- # oe_libinstall libltdl ${STAGING_LIBDIR}/
- # oe_libinstall -C src/libblah libblah ${D}/${libdir}/
- dir=""
- libtool=""
- silent=""
- require_static=""
- require_shared=""
- staging_install=""
- while [ "$#" -gt 0 ]; do
- case "$1" in
- -C)
- shift
- dir="$1"
- ;;
- -s)
- silent=1
- ;;
- -a)
- require_static=1
- ;;
- -so)
- require_shared=1
- ;;
- -*)
- bbfatal "oe_libinstall: unknown option: $1"
- ;;
- *)
- break;
- ;;
- esac
- shift
- done
-
- libname="$1"
- shift
- destpath="$1"
- if [ -z "$destpath" ]; then
- bbfatal "oe_libinstall: no destination path specified"
- fi
- if echo "$destpath/" | egrep '^${STAGING_LIBDIR}/' >/dev/null
- then
- staging_install=1
- fi
-
- __runcmd () {
- if [ -z "$silent" ]; then
- echo >&2 "oe_libinstall: $*"
- fi
- $*
- }
-
- if [ -z "$dir" ]; then
- dir=`pwd`
- fi
-
- dotlai=$libname.lai
-
- # Sanity check that the libname.lai is unique
- number_of_files=`(cd $dir; find . -name "$dotlai") | wc -l`
- if [ $number_of_files -gt 1 ]; then
- bbfatal "oe_libinstall: $dotlai is not unique in $dir"
- fi
-
-
- dir=$dir`(cd $dir;find . -name "$dotlai") | sed "s/^\.//;s/\/$dotlai\$//;q"`
- olddir=`pwd`
- __runcmd cd $dir
-
- lafile=$libname.la
-
- # If such file doesn't exist, try to cut version suffix
- if [ ! -f "$lafile" ]; then
- libname1=`echo "$libname" | sed 's/-[0-9.]*$//'`
- lafile1=$libname.la
- if [ -f "$lafile1" ]; then
- libname=$libname1
- lafile=$lafile1
- fi
- fi
-
- if [ -f "$lafile" ]; then
- # libtool archive
- eval `cat $lafile|grep "^library_names="`
- libtool=1
- else
- library_names="$libname.so* $libname.dll.a $libname.*.dylib"
- fi
-
- __runcmd install -d $destpath/
- dota=$libname.a
- if [ -f "$dota" -o -n "$require_static" ]; then
- rm -f $destpath/$dota
- __runcmd install -m 0644 $dota $destpath/
- fi
- if [ -f "$dotlai" -a -n "$libtool" ]; then
- rm -f $destpath/$libname.la
- __runcmd install -m 0644 $dotlai $destpath/$libname.la
- fi
-
- for name in $library_names; do
- files=`eval echo $name`
- for f in $files; do
- if [ ! -e "$f" ]; then
- if [ -n "$libtool" ]; then
- bbfatal "oe_libinstall: $dir/$f not found."
- fi
- elif [ -L "$f" ]; then
- __runcmd cp -P "$f" $destpath/
- elif [ ! -L "$f" ]; then
- libfile="$f"
- rm -f $destpath/$libfile
- __runcmd install -m 0755 $libfile $destpath/
- fi
- done
- done
-
- if [ -z "$libfile" ]; then
- if [ -n "$require_shared" ]; then
- bbfatal "oe_libinstall: unable to locate shared library"
- fi
- elif [ -z "$libtool" ]; then
- # special case hack for non-libtool .so.#.#.# links
- baselibfile=`basename "$libfile"`
- if (echo $baselibfile | grep -qE '^lib.*\.so\.[0-9.]*$'); then
- sonamelink=`${HOST_PREFIX}readelf -d $libfile |grep 'Library soname:' |sed -e 's/.*\[\(.*\)\].*/\1/'`
- solink=`echo $baselibfile | sed -e 's/\.so\..*/.so/'`
- if [ -n "$sonamelink" -a x"$baselibfile" != x"$sonamelink" ]; then
- __runcmd ln -sf $baselibfile $destpath/$sonamelink
- fi
- __runcmd ln -sf $baselibfile $destpath/$solink
- fi
- fi
-
- __runcmd cd "$olddir"
-}
-
-oe_machinstall() {
- # Purpose: Install machine dependent files, if available
- # If not available, check if there is a default
- # If no default, just touch the destination
- # Example:
- # $1 $2 $3 $4
- # oe_machinstall -m 0644 fstab ${D}/etc/fstab
- #
- # TODO: Check argument number?
- #
- filename=`basename $3`
- dirname=`dirname $3`
-
- for o in `echo ${OVERRIDES} | tr ':' ' '`; do
- if [ -e $dirname/$o/$filename ]; then
- bbnote $dirname/$o/$filename present, installing to $4
- install $1 $2 $dirname/$o/$filename $4
- return
- fi
- done
-# bbnote overrides specific file NOT present, trying default=$3...
- if [ -e $3 ]; then
- bbnote $3 present, installing to $4
- install $1 $2 $3 $4
- else
- bbnote $3 NOT present, touching empty $4
- touch $4
- fi
-}
-
-create_cmdline_wrapper () {
- # Create a wrapper script where commandline options are needed
- #
- # These are useful to work around relocation issues, by passing extra options
- # to a program
- #
- # Usage: create_cmdline_wrapper FILENAME <extra-options>
-
- cmd=$1
- shift
-
- echo "Generating wrapper script for $cmd"
-
- mv $cmd $cmd.real
- cmdname=`basename $cmd`
- dirname=`dirname $cmd`
- cmdoptions=$@
- if [ "${base_prefix}" != "" ]; then
- relpath=`python3 -c "import os; print(os.path.relpath('${D}${base_prefix}', '$dirname'))"`
- cmdoptions=`echo $@ | sed -e "s:${base_prefix}:\\$realdir/$relpath:g"`
- fi
- cat <<END >$cmd
-#!/bin/bash
-realpath=\`readlink -fn \$0\`
-realdir=\`dirname \$realpath\`
-exec -a \`dirname \$realpath\`/$cmdname \`dirname \$realpath\`/$cmdname.real $cmdoptions "\$@"
-END
- chmod +x $cmd
-}
-
-create_wrapper () {
- # Create a wrapper script where extra environment variables are needed
- #
- # These are useful to work around relocation issues, by setting environment
- # variables which point to paths in the filesystem.
- #
- # Usage: create_wrapper FILENAME [[VAR=VALUE]..]
-
- cmd=$1
- shift
-
- echo "Generating wrapper script for $cmd"
-
- mv $cmd $cmd.real
- cmdname=`basename $cmd`
- dirname=`dirname $cmd`
- exportstring=$@
- if [ "${base_prefix}" != "" ]; then
- relpath=`python3 -c "import os; print(os.path.relpath('${D}${base_prefix}', '$dirname'))"`
- exportstring=`echo $@ | sed -e "s:${base_prefix}:\\$realdir/$relpath:g"`
- fi
- cat <<END >$cmd
-#!/bin/bash
-realpath=\`readlink -fn \$0\`
-realdir=\`dirname \$realpath\`
-export $exportstring
-exec -a "\$0" \$realdir/$cmdname.real "\$@"
-END
- chmod +x $cmd
-}
-
-# Copy files/directories from $1 to $2 but using hardlinks
-# (preserve symlinks)
-hardlinkdir () {
- from=$1
- to=$2
- (cd $from; find . -print0 | cpio --null -pdlu $to)
-}
-
-
-def check_app_exists(app, d):
- app = d.expand(app).split()[0].strip()
- path = d.getVar('PATH')
- return bool(bb.utils.which(path, app))
-
-def explode_deps(s):
- return bb.utils.explode_deps(s)
-
-def base_set_filespath(path, d):
- filespath = []
- extrapaths = (d.getVar("FILESEXTRAPATHS") or "")
- # Remove default flag which was used for checking
- extrapaths = extrapaths.replace("__default:", "")
- # Don't prepend empty strings to the path list
- if extrapaths != "":
- path = extrapaths.split(":") + path
- # The ":" ensures we have an 'empty' override
- overrides = (":" + (d.getVar("FILESOVERRIDES") or "")).split(":")
- overrides.reverse()
- for o in overrides:
- for p in path:
- if p != "":
- filespath.append(os.path.join(p, o))
- return ":".join(filespath)
-
-def extend_variants(d, var, extend, delim=':'):
- """Return a string of all bb class extend variants for the given extend"""
- variants = []
- whole = d.getVar(var) or ""
- for ext in whole.split():
- eext = ext.split(delim)
- if len(eext) > 1 and eext[0] == extend:
- variants.append(eext[1])
- return " ".join(variants)
-
-def multilib_pkg_extend(d, pkg):
- variants = (d.getVar("MULTILIB_VARIANTS") or "").split()
- if not variants:
- return pkg
- pkgs = pkg
- for v in variants:
- pkgs = pkgs + " " + v + "-" + pkg
- return pkgs
-
-def get_multilib_datastore(variant, d):
- return oe.utils.get_multilib_datastore(variant, d)
-
-def all_multilib_tune_values(d, var, unique = True, need_split = True, delim = ' '):
- """Return a string of all ${var} in all multilib tune configuration"""
- values = []
- variants = (d.getVar("MULTILIB_VARIANTS") or "").split() + ['']
- for item in variants:
- localdata = get_multilib_datastore(item, d)
- # We need WORKDIR to be consistent with the original datastore
- localdata.setVar("WORKDIR", d.getVar("WORKDIR"))
- value = localdata.getVar(var) or ""
- if value != "":
- if need_split:
- for item in value.split(delim):
- values.append(item)
- else:
- values.append(value)
- if unique:
- #we do this to keep order as much as possible
- ret = []
- for value in values:
- if not value in ret:
- ret.append(value)
- else:
- ret = values
- return " ".join(ret)
-
-def all_multilib_tune_list(vars, d):
- """
- Return a list of ${VAR} for each variable VAR in vars from each
- multilib tune configuration.
- Is safe to be called from a multilib recipe/context as it can
- figure out the original tune and remove the multilib overrides.
- """
- values = {}
- for v in vars:
- values[v] = []
- values['ml'] = ['']
-
- variants = (d.getVar("MULTILIB_VARIANTS") or "").split() + ['']
- for item in variants:
- localdata = get_multilib_datastore(item, d)
- values[v].append(localdata.getVar(v))
- values['ml'].append(item)
- return values
-all_multilib_tune_list[vardepsexclude] = "OVERRIDES"
-
-# If the user hasn't set up their name/email, set some defaults
-check_git_config() {
- if ! git config user.email > /dev/null ; then
- git config --local user.email "${PATCH_GIT_USER_EMAIL}"
- fi
- if ! git config user.name > /dev/null ; then
- git config --local user.name "${PATCH_GIT_USER_NAME}"
- fi
-}
diff --git a/meta/classes/vala.bbclass b/meta/classes/vala.bbclass
deleted file mode 100644
index bcaf68c5a7..0000000000
--- a/meta/classes/vala.bbclass
+++ /dev/null
@@ -1,24 +0,0 @@
-# Everyone needs vala-native and targets need vala, too,
-# because that is where target builds look for .vapi files.
-#
-VALADEPENDS = ""
-VALADEPENDS_class-target = "vala"
-DEPENDS_append = " vala-native ${VALADEPENDS}"
-
-# Our patched version of Vala looks in STAGING_DATADIR for .vapi files
-export STAGING_DATADIR
-# Upstream Vala >= 0.11 looks in XDG_DATA_DIRS for .vapi files
-export XDG_DATA_DIRS = "${STAGING_DATADIR}:${STAGING_LIBDIR}"
-
-# Package additional files
-FILES_${PN}-dev += "\
- ${datadir}/vala/vapi/*.vapi \
- ${datadir}/vala/vapi/*.deps \
- ${datadir}/gir-1.0 \
-"
-
-# Remove vapigen.m4 that is bundled with tarballs
-# because it does not yet have our cross-compile fixes
-do_configure_prepend() {
- rm -f ${S}/m4/vapigen.m4
-}
diff --git a/meta/classes/waf.bbclass b/meta/classes/waf.bbclass
deleted file mode 100644
index 900244004e..0000000000
--- a/meta/classes/waf.bbclass
+++ /dev/null
@@ -1,65 +0,0 @@
-# avoids build breaks when using no-static-libs.inc
-DISABLE_STATIC = ""
-
-B = "${WORKDIR}/build"
-
-EXTRA_OECONF_append = " ${PACKAGECONFIG_CONFARGS}"
-
-def waflock_hash(d):
- # Calculates the hash used for the waf lock file. This should include
- # all of the user controllable inputs passed to waf configure. Note
- # that the full paths for ${B} and ${S} are used; this is OK and desired
- # because a change to either of these should create a unique lock file
- # to prevent collisions.
- import hashlib
- h = hashlib.sha512()
- def update(name):
- val = d.getVar(name)
- if val is not None:
- h.update(val.encode('utf-8'))
- update('S')
- update('B')
- update('prefix')
- update('EXTRA_OECONF')
- return h.hexdigest()
-
-# Use WAFLOCK to specify a separate lock file. The build is already
-# sufficiently isolated by setting the output directory, this ensures that
-# bitbake won't step on toes of any other configured context in the source
-# directory (e.g. if the source is coming from externalsrc and was previously
-# configured elsewhere).
-export WAFLOCK = ".lock-waf_oe_${@waflock_hash(d)}_build"
-BB_HASHBASE_WHITELIST += "WAFLOCK"
-
-python waf_preconfigure() {
- import subprocess
- from distutils.version import StrictVersion
- subsrcdir = d.getVar('S')
- wafbin = os.path.join(subsrcdir, 'waf')
- try:
- result = subprocess.check_output([wafbin, '--version'], cwd=subsrcdir, stderr=subprocess.STDOUT)
- version = result.decode('utf-8').split()[1]
- if StrictVersion(version) >= StrictVersion("1.8.7"):
- d.setVar("WAF_EXTRA_CONF", "--bindir=${bindir} --libdir=${libdir}")
- except subprocess.CalledProcessError as e:
- bb.warn("Unable to execute waf --version, exit code %d. Assuming waf version without bindir/libdir support." % e.returncode)
- except FileNotFoundError:
- bb.fatal("waf does not exist in %s" % subsrcdir)
-}
-
-do_configure[prefuncs] += "waf_preconfigure"
-
-waf_do_configure() {
- (cd ${S} && ./waf configure -o ${B} --prefix=${prefix} ${WAF_EXTRA_CONF} ${EXTRA_OECONF})
-}
-
-do_compile[progress] = "outof:^\[\s*(\d+)/\s*(\d+)\]\s+"
-waf_do_compile() {
- (cd ${S} && ./waf build ${@oe.utils.parallel_make_argument(d, '-j%d', limit=64)})
-}
-
-waf_do_install() {
- (cd ${S} && ./waf install --destdir=${D})
-}
-
-EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes/xmlcatalog.bbclass b/meta/classes/xmlcatalog.bbclass
deleted file mode 100644
index ae4811fdeb..0000000000
--- a/meta/classes/xmlcatalog.bbclass
+++ /dev/null
@@ -1,26 +0,0 @@
-DEPENDS = "libxml2-native"
-
-# A whitespace-separated list of XML catalogs to be registered, for example
-# "${sysconfdir}/xml/docbook-xml.xml".
-XMLCATALOGS ?= ""
-
-SYSROOT_PREPROCESS_FUNCS_append = " xmlcatalog_sstate_postinst"
-
-xmlcatalog_complete() {
- ROOTCATALOG="${STAGING_ETCDIR_NATIVE}/xml/catalog"
- if [ ! -f $ROOTCATALOG ]; then
- mkdir --parents $(dirname $ROOTCATALOG)
- xmlcatalog --noout --create $ROOTCATALOG
- fi
- for CATALOG in ${XMLCATALOGS}; do
- xmlcatalog --noout --add nextCatalog unused file://$CATALOG $ROOTCATALOG
- done
-}
-
-xmlcatalog_sstate_postinst() {
- mkdir -p ${SYSROOT_DESTDIR}${bindir}
- dest=${SYSROOT_DESTDIR}${bindir}/postinst-${PN}-xmlcatalog
- echo '#!/bin/sh' > $dest
- echo '${xmlcatalog_complete}' >> $dest
- chmod 0755 $dest
-}
diff --git a/meta/classes/yocto-check-layer.bbclass b/meta/classes/yocto-check-layer.bbclass
new file mode 100644
index 0000000000..404f5fd9f2
--- /dev/null
+++ b/meta/classes/yocto-check-layer.bbclass
@@ -0,0 +1,22 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+#
+# This class is used by yocto-check-layer script for additional per-recipe tests
+# The first test ensures that the layer has no recipes skipping 'installed-vs-shipped' QA checks
+#
+
+WARN_QA:remove = "installed-vs-shipped"
+ERROR_QA:append = " installed-vs-shipped"
+
+python () {
+ packages = set((d.getVar('PACKAGES') or '').split())
+ for package in packages:
+ skip = set((d.getVar('INSANE_SKIP') or "").split() +
+ (d.getVar('INSANE_SKIP:' + package) or "").split())
+ if 'installed-vs-shipped' in skip:
+ oe.qa.handle_error("installed-vs-shipped", 'Package %s is skipping "installed-vs-shipped" QA test.' % package, d)
+}