summaryrefslogtreecommitdiffstats
path: root/meta/classes
diff options
context:
space:
mode:
Diffstat (limited to 'meta/classes')
-rw-r--r--meta/classes/archiver.bbclass19
-rw-r--r--meta/classes/base.bbclass18
-rw-r--r--meta/classes/bin_package.bbclass3
-rw-r--r--meta/classes/buildhistory.bbclass31
-rw-r--r--meta/classes/cml1.bbclass8
-rw-r--r--meta/classes/create-spdx-2.2.bbclass1067
-rw-r--r--meta/classes/create-spdx.bbclass8
-rw-r--r--meta/classes/cve-check.bbclass466
-rw-r--r--meta/classes/devtool-source.bbclass4
-rw-r--r--meta/classes/devupstream.bbclass2
-rw-r--r--meta/classes/externalsrc.bbclass34
-rw-r--r--meta/classes/fs-uuid.bbclass2
-rw-r--r--meta/classes/go.bbclass2
-rw-r--r--meta/classes/image.bbclass9
-rw-r--r--meta/classes/image_types.bbclass2
-rw-r--r--meta/classes/insane.bbclass8
-rw-r--r--meta/classes/kernel-arch.bbclass4
-rw-r--r--meta/classes/kernel-devicetree.bbclass18
-rw-r--r--meta/classes/kernel-fitimage.bbclass155
-rw-r--r--meta/classes/kernel-yocto.bbclass38
-rw-r--r--meta/classes/kernel.bbclass66
-rw-r--r--meta/classes/libc-package.bbclass3
-rw-r--r--meta/classes/license.bbclass16
-rw-r--r--meta/classes/license_image.bbclass6
-rw-r--r--meta/classes/metadata_scm.bbclass10
-rw-r--r--meta/classes/mirrors.bbclass5
-rw-r--r--meta/classes/multilib.bbclass2
-rw-r--r--meta/classes/nativesdk.bbclass2
-rw-r--r--meta/classes/package.bbclass45
-rw-r--r--meta/classes/package_deb.bbclass4
-rw-r--r--meta/classes/package_ipk.bbclass3
-rw-r--r--meta/classes/package_pkgdata.bbclass2
-rw-r--r--meta/classes/package_rpm.bbclass3
-rw-r--r--meta/classes/patch.bbclass7
-rw-r--r--meta/classes/populate_sdk_base.bbclass7
-rw-r--r--meta/classes/populate_sdk_ext.bbclass7
-rw-r--r--meta/classes/pypi.bbclass2
-rw-r--r--meta/classes/python3targetconfig.bbclass12
-rw-r--r--meta/classes/qemuboot.bbclass3
-rw-r--r--meta/classes/report-error.bbclass2
-rw-r--r--meta/classes/reproducible_build.bbclass83
-rw-r--r--meta/classes/rm_work.bbclass15
-rw-r--r--meta/classes/rootfs-postcommands.bbclass22
-rw-r--r--meta/classes/rootfsdebugfiles.bbclass2
-rw-r--r--meta/classes/sanity.bbclass10
-rw-r--r--meta/classes/sstate.bbclass80
-rw-r--r--meta/classes/staging.bbclass10
-rw-r--r--meta/classes/testimage.bbclass40
-rw-r--r--meta/classes/toolchain-scripts.bbclass4
-rw-r--r--meta/classes/uninative.bbclass8
-rw-r--r--meta/classes/useradd-staticids.bbclass2
-rw-r--r--meta/classes/useradd.bbclass4
-rw-r--r--meta/classes/utils.bbclass2
53 files changed, 2001 insertions, 386 deletions
diff --git a/meta/classes/archiver.bbclass b/meta/classes/archiver.bbclass
index 7ca35a573b..6ead010fe1 100644
--- a/meta/classes/archiver.bbclass
+++ b/meta/classes/archiver.bbclass
@@ -54,9 +54,10 @@ ARCHIVER_MODE[mirror] ?= "split"
DEPLOY_DIR_SRC ?= "${DEPLOY_DIR}/sources"
ARCHIVER_TOPDIR ?= "${WORKDIR}/archiver-sources"
-ARCHIVER_OUTDIR = "${ARCHIVER_TOPDIR}/${TARGET_SYS}/${PF}/"
+ARCHIVER_ARCH = "${TARGET_SYS}"
+ARCHIVER_OUTDIR = "${ARCHIVER_TOPDIR}/${ARCHIVER_ARCH}/${PF}/"
ARCHIVER_RPMTOPDIR ?= "${WORKDIR}/deploy-sources-rpm"
-ARCHIVER_RPMOUTDIR = "${ARCHIVER_RPMTOPDIR}/${TARGET_SYS}/${PF}/"
+ARCHIVER_RPMOUTDIR = "${ARCHIVER_RPMTOPDIR}/${ARCHIVER_ARCH}/${PF}/"
ARCHIVER_WORKDIR = "${WORKDIR}/archiver-work/"
# When producing a combined mirror directory, allow duplicates for the case
@@ -100,6 +101,10 @@ python () {
bb.debug(1, 'archiver: %s is excluded, covered by gcc-source' % pn)
return
+ # TARGET_SYS in ARCHIVER_ARCH will break the stamp for gcc-source in multiconfig
+ if pn.startswith('gcc-source'):
+ d.setVar('ARCHIVER_ARCH', "allarch")
+
def hasTask(task):
return bool(d.getVarFlag(task, "task", False)) and not bool(d.getVarFlag(task, "noexec", False))
@@ -281,7 +286,10 @@ python do_ar_configured() {
# ${STAGING_DATADIR}/aclocal/libtool.m4, so we can't re-run the
# do_configure, we archive the already configured ${S} to
# instead of.
- elif pn != 'libtool-native':
+ # The kernel class functions require it to be on work-shared, we
+ # don't unpack, patch, configure again, just archive the already
+ # configured ${S}
+ elif not (pn == 'libtool-native' or is_work_shared(d)):
def runTask(task):
prefuncs = d.getVarFlag(task, 'prefuncs') or ''
for func in prefuncs.split():
@@ -484,6 +492,9 @@ python do_unpack_and_patch() {
src_orig = '%s.orig' % src
oe.path.copytree(src, src_orig)
+ if bb.data.inherits_class('dos2unix', d):
+ bb.build.exec_func('do_convert_crlf_to_lf', d)
+
# Make sure gcc and kernel sources are patched only once
if not (d.getVar('SRC_URI') == "" or is_work_shared(d)):
bb.build.exec_func('do_patch', d)
@@ -572,7 +583,7 @@ python do_dumpdata () {
SSTATETASKS += "do_deploy_archives"
do_deploy_archives () {
- echo "Deploying source archive files from ${ARCHIVER_TOPDIR} to ${DEPLOY_DIR_SRC}."
+ bbnote "Deploying source archive files from ${ARCHIVER_TOPDIR} to ${DEPLOY_DIR_SRC}."
}
python do_deploy_archives_setscene () {
sstate_setscene(d)
diff --git a/meta/classes/base.bbclass b/meta/classes/base.bbclass
index 8a1b5f79c1..3cae577a0e 100644
--- a/meta/classes/base.bbclass
+++ b/meta/classes/base.bbclass
@@ -122,6 +122,10 @@ def setup_hosttools_dir(dest, toolsvar, d, fatal=True):
tools = d.getVar(toolsvar).split()
origbbenv = d.getVar("BB_ORIGENV", False)
path = origbbenv.getVar("PATH")
+ # Need to ignore our own scripts directories to avoid circular links
+ for p in path.split(":"):
+ if p.endswith("/scripts"):
+ path = path.replace(p, "/ignoreme")
bb.utils.mkdirhier(dest)
notfound = []
for tool in tools:
@@ -135,7 +139,7 @@ def setup_hosttools_dir(dest, toolsvar, d, fatal=True):
# /usr/local/bin/ccache/gcc -> /usr/bin/ccache, then which(gcc)
# would return /usr/local/bin/ccache/gcc, but what we need is
# /usr/bin/gcc, this code can check and fix that.
- if "ccache" in srctool:
+ if os.path.islink(srctool) and os.path.basename(os.readlink(srctool)) == 'ccache':
srctool = bb.utils.which(path, tool, executable=True, direction=1)
if srctool:
os.symlink(srctool, desttool)
@@ -153,14 +157,14 @@ do_fetch[vardeps] += "SRCREV"
python base_do_fetch() {
src_uri = (d.getVar('SRC_URI') or "").split()
- if len(src_uri) == 0:
+ if not src_uri:
return
try:
fetcher = bb.fetch2.Fetch(src_uri, d)
fetcher.download()
except bb.fetch2.BBFetchException as e:
- bb.fatal(str(e))
+ bb.fatal("Bitbake Fetcher Error: " + repr(e))
}
addtask unpack after do_fetch
@@ -170,14 +174,14 @@ do_unpack[cleandirs] = "${@d.getVar('S') if os.path.normpath(d.getVar('S')) != o
python base_do_unpack() {
src_uri = (d.getVar('SRC_URI') or "").split()
- if len(src_uri) == 0:
+ if not src_uri:
return
try:
fetcher = bb.fetch2.Fetch(src_uri, d)
fetcher.unpack(d.getVar('WORKDIR'))
except bb.fetch2.BBFetchException as e:
- bb.fatal(str(e))
+ bb.fatal("Bitbake Fetcher Error: " + repr(e))
}
def get_layers_branch_rev(d):
@@ -688,7 +692,7 @@ python () {
if os.path.basename(p) == machine and os.path.isdir(p):
paths.append(p)
- if len(paths) != 0:
+ if paths:
for s in srcuri.split():
if not s.startswith("file://"):
continue
@@ -721,7 +725,7 @@ do_cleansstate[nostamp] = "1"
python do_cleanall() {
src_uri = (d.getVar('SRC_URI') or "").split()
- if len(src_uri) == 0:
+ if not src_uri:
return
try:
diff --git a/meta/classes/bin_package.bbclass b/meta/classes/bin_package.bbclass
index cbc9b1fa13..c1954243ee 100644
--- a/meta/classes/bin_package.bbclass
+++ b/meta/classes/bin_package.bbclass
@@ -30,8 +30,9 @@ bin_package_do_install () {
bbfatal bin_package has nothing to install. Be sure the SRC_URI unpacks into S.
fi
cd ${S}
+ install -d ${D}${base_prefix}
tar --no-same-owner --exclude='./patches' --exclude='./.pc' -cpf - . \
- | tar --no-same-owner -xpf - -C ${D}
+ | tar --no-same-owner -xpf - -C ${D}${base_prefix}
}
FILES_${PN} = "/"
diff --git a/meta/classes/buildhistory.bbclass b/meta/classes/buildhistory.bbclass
index 44a66df962..6a1a20653a 100644
--- a/meta/classes/buildhistory.bbclass
+++ b/meta/classes/buildhistory.bbclass
@@ -865,6 +865,7 @@ python buildhistory_eventhandler() {
if os.path.isdir(olddir):
shutil.rmtree(olddir)
rootdir = e.data.getVar("BUILDHISTORY_DIR")
+ bb.utils.mkdirhier(rootdir)
entries = [ x for x in os.listdir(rootdir) if not x.startswith('.') ]
bb.utils.mkdirhier(olddir)
for entry in entries:
@@ -953,23 +954,19 @@ def write_latest_srcrev(d, pkghistdir):
value = value.replace('"', '').strip()
old_tag_srcrevs[key] = value
with open(srcrevfile, 'w') as f:
- orig_srcrev = d.getVar('SRCREV', False) or 'INVALID'
- if orig_srcrev != 'INVALID':
- f.write('# SRCREV = "%s"\n' % orig_srcrev)
- if len(srcrevs) > 1:
- for name, srcrev in sorted(srcrevs.items()):
- orig_srcrev = d.getVar('SRCREV_%s' % name, False)
- if orig_srcrev:
- f.write('# SRCREV_%s = "%s"\n' % (name, orig_srcrev))
- f.write('SRCREV_%s = "%s"\n' % (name, srcrev))
- else:
- f.write('SRCREV = "%s"\n' % next(iter(srcrevs.values())))
- if len(tag_srcrevs) > 0:
- for name, srcrev in sorted(tag_srcrevs.items()):
- f.write('# tag_%s = "%s"\n' % (name, srcrev))
- if name in old_tag_srcrevs and old_tag_srcrevs[name] != srcrev:
- pkg = d.getVar('PN')
- bb.warn("Revision for tag %s in package %s was changed since last build (from %s to %s)" % (name, pkg, old_tag_srcrevs[name], srcrev))
+ for name, srcrev in sorted(srcrevs.items()):
+ suffix = "_" + name
+ if name == "default":
+ suffix = ""
+ orig_srcrev = d.getVar('SRCREV%s' % suffix, False)
+ if orig_srcrev:
+ f.write('# SRCREV%s = "%s"\n' % (suffix, orig_srcrev))
+ f.write('SRCREV%s = "%s"\n' % (suffix, srcrev))
+ for name, srcrev in sorted(tag_srcrevs.items()):
+ f.write('# tag_%s = "%s"\n' % (name, srcrev))
+ if name in old_tag_srcrevs and old_tag_srcrevs[name] != srcrev:
+ pkg = d.getVar('PN')
+ bb.warn("Revision for tag %s in package %s was changed since last build (from %s to %s)" % (name, pkg, old_tag_srcrevs[name], srcrev))
else:
if os.path.exists(srcrevfile):
diff --git a/meta/classes/cml1.bbclass b/meta/classes/cml1.bbclass
index 8ab240589a..46a19fce32 100644
--- a/meta/classes/cml1.bbclass
+++ b/meta/classes/cml1.bbclass
@@ -36,6 +36,14 @@ python do_menuconfig() {
except OSError:
mtime = 0
+ # setup native pkg-config variables (kconfig scripts call pkg-config directly, cannot generically be overriden to pkg-config-native)
+ d.setVar("PKG_CONFIG_DIR", "${STAGING_DIR_NATIVE}${libdir_native}/pkgconfig")
+ d.setVar("PKG_CONFIG_PATH", "${PKG_CONFIG_DIR}:${STAGING_DATADIR_NATIVE}/pkgconfig")
+ d.setVar("PKG_CONFIG_LIBDIR", "${PKG_CONFIG_DIR}")
+ d.setVarFlag("PKG_CONFIG_SYSROOT_DIR", "unexport", "1")
+ # ensure that environment variables are overwritten with this tasks 'd' values
+ d.appendVar("OE_TERMINAL_EXPORTS", " PKG_CONFIG_DIR PKG_CONFIG_PATH PKG_CONFIG_LIBDIR PKG_CONFIG_SYSROOT_DIR")
+
oe_terminal("sh -c \"make %s; if [ \\$? -ne 0 ]; then echo 'Command failed.'; printf 'Press any key to continue... '; read r; fi\"" % d.getVar('KCONFIG_CONFIG_COMMAND'),
d.getVar('PN') + ' Configuration', d)
diff --git a/meta/classes/create-spdx-2.2.bbclass b/meta/classes/create-spdx-2.2.bbclass
new file mode 100644
index 0000000000..42b693d586
--- /dev/null
+++ b/meta/classes/create-spdx-2.2.bbclass
@@ -0,0 +1,1067 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+DEPLOY_DIR_SPDX ??= "${DEPLOY_DIR}/spdx/${MACHINE}"
+
+# The product name that the CVE database uses. Defaults to BPN, but may need to
+# be overriden per recipe (for example tiff.bb sets CVE_PRODUCT=libtiff).
+CVE_PRODUCT ??= "${BPN}"
+CVE_VERSION ??= "${PV}"
+
+SPDXDIR ??= "${WORKDIR}/spdx"
+SPDXDEPLOY = "${SPDXDIR}/deploy"
+SPDXWORK = "${SPDXDIR}/work"
+SPDXIMAGEWORK = "${SPDXDIR}/image-work"
+SPDXSDKWORK = "${SPDXDIR}/sdk-work"
+
+SPDX_TOOL_NAME ??= "oe-spdx-creator"
+SPDX_TOOL_VERSION ??= "1.0"
+
+SPDXRUNTIMEDEPLOY = "${SPDXDIR}/runtime-deploy"
+
+SPDX_INCLUDE_SOURCES ??= "0"
+SPDX_ARCHIVE_SOURCES ??= "0"
+SPDX_ARCHIVE_PACKAGED ??= "0"
+
+SPDX_UUID_NAMESPACE ??= "sbom.openembedded.org"
+SPDX_NAMESPACE_PREFIX ??= "http://spdx.org/spdxdoc"
+SPDX_PRETTY ??= "0"
+
+SPDX_LICENSES ??= "${COREBASE}/meta/files/spdx-licenses.json"
+
+SPDX_CUSTOM_ANNOTATION_VARS ??= ""
+
+SPDX_ORG ??= "OpenEmbedded ()"
+SPDX_SUPPLIER ??= "Organization: ${SPDX_ORG}"
+SPDX_SUPPLIER[doc] = "The SPDX PackageSupplier field for SPDX packages created from \
+ this recipe. For SPDX documents create using this class during the build, this \
+ is the contact information for the person or organization who is doing the \
+ build."
+
+def extract_licenses(filename):
+ import re
+
+ lic_regex = re.compile(rb'^\W*SPDX-License-Identifier:\s*([ \w\d.()+-]+?)(?:\s+\W*)?$', re.MULTILINE)
+
+ try:
+ with open(filename, 'rb') as f:
+ size = min(15000, os.stat(filename).st_size)
+ txt = f.read(size)
+ licenses = re.findall(lic_regex, txt)
+ if licenses:
+ ascii_licenses = [lic.decode('ascii') for lic in licenses]
+ return ascii_licenses
+ except Exception as e:
+ bb.warn(f"Exception reading {filename}: {e}")
+ return None
+
+def get_doc_namespace(d, doc):
+ import uuid
+ namespace_uuid = uuid.uuid5(uuid.NAMESPACE_DNS, d.getVar("SPDX_UUID_NAMESPACE"))
+ return "%s/%s-%s" % (d.getVar("SPDX_NAMESPACE_PREFIX"), doc.name, str(uuid.uuid5(namespace_uuid, doc.name)))
+
+def create_annotation(d, comment):
+ from datetime import datetime, timezone
+
+ creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
+ annotation = oe.spdx.SPDXAnnotation()
+ annotation.annotationDate = creation_time
+ annotation.annotationType = "OTHER"
+ annotation.annotator = "Tool: %s - %s" % (d.getVar("SPDX_TOOL_NAME"), d.getVar("SPDX_TOOL_VERSION"))
+ annotation.comment = comment
+ return annotation
+
+def recipe_spdx_is_native(d, recipe):
+ return any(a.annotationType == "OTHER" and
+ a.annotator == "Tool: %s - %s" % (d.getVar("SPDX_TOOL_NAME"), d.getVar("SPDX_TOOL_VERSION")) and
+ a.comment == "isNative" for a in recipe.annotations)
+
+def is_work_shared_spdx(d):
+ return bb.data.inherits_class('kernel', d) or ('work-shared' in d.getVar('WORKDIR'))
+
+def get_json_indent(d):
+ if d.getVar("SPDX_PRETTY") == "1":
+ return 2
+ return None
+
+python() {
+ import json
+ if d.getVar("SPDX_LICENSE_DATA"):
+ return
+
+ with open(d.getVar("SPDX_LICENSES"), "r") as f:
+ data = json.load(f)
+ # Transform the license array to a dictionary
+ data["licenses"] = {l["licenseId"]: l for l in data["licenses"]}
+ d.setVar("SPDX_LICENSE_DATA", data)
+}
+
+def convert_license_to_spdx(lic, document, d, existing={}):
+ from pathlib import Path
+ import oe.spdx
+
+ license_data = d.getVar("SPDX_LICENSE_DATA")
+ extracted = {}
+
+ def add_extracted_license(ident, name):
+ nonlocal document
+
+ if name in extracted:
+ return
+
+ extracted_info = oe.spdx.SPDXExtractedLicensingInfo()
+ extracted_info.name = name
+ extracted_info.licenseId = ident
+ extracted_info.extractedText = None
+
+ if name == "PD":
+ # Special-case this.
+ extracted_info.extractedText = "Software released to the public domain"
+ else:
+ # Seach for the license in COMMON_LICENSE_DIR and LICENSE_PATH
+ for directory in [d.getVar('COMMON_LICENSE_DIR')] + (d.getVar('LICENSE_PATH') or '').split():
+ try:
+ with (Path(directory) / name).open(errors="replace") as f:
+ extracted_info.extractedText = f.read()
+ break
+ except FileNotFoundError:
+ pass
+ if extracted_info.extractedText is None:
+ # If it's not SPDX or PD, then NO_GENERIC_LICENSE must be set
+ filename = d.getVarFlag('NO_GENERIC_LICENSE', name)
+ if filename:
+ filename = d.expand("${S}/" + filename)
+ with open(filename, errors="replace") as f:
+ extracted_info.extractedText = f.read()
+ else:
+ bb.error("Cannot find any text for license %s" % name)
+
+ extracted[name] = extracted_info
+ document.hasExtractedLicensingInfos.append(extracted_info)
+
+ def convert(l):
+ if l == "(" or l == ")":
+ return l
+
+ if l == "&":
+ return "AND"
+
+ if l == "|":
+ return "OR"
+
+ if l == "CLOSED":
+ return "NONE"
+
+ spdx_license = d.getVarFlag("SPDXLICENSEMAP", l) or l
+ if spdx_license in license_data["licenses"]:
+ return spdx_license
+
+ try:
+ spdx_license = existing[l]
+ except KeyError:
+ spdx_license = "LicenseRef-" + l
+ add_extracted_license(spdx_license, l)
+
+ return spdx_license
+
+ lic_split = lic.replace("(", " ( ").replace(")", " ) ").split()
+
+ return ' '.join(convert(l) for l in lic_split)
+
+def process_sources(d):
+ pn = d.getVar('PN')
+ assume_provided = (d.getVar("ASSUME_PROVIDED") or "").split()
+ if pn in assume_provided:
+ for p in d.getVar("PROVIDES").split():
+ if p != pn:
+ pn = p
+ break
+
+ # glibc-locale: do_fetch, do_unpack and do_patch tasks have been deleted,
+ # so avoid archiving source here.
+ if pn.startswith('glibc-locale'):
+ return False
+ if d.getVar('PN') == "libtool-cross":
+ return False
+ if d.getVar('PN') == "libgcc-initial":
+ return False
+ if d.getVar('PN') == "shadow-sysroot":
+ return False
+
+ # We just archive gcc-source for all the gcc related recipes
+ if d.getVar('BPN') in ['gcc', 'libgcc']:
+ bb.debug(1, 'spdx: There is bug in scan of %s is, do nothing' % pn)
+ return False
+
+ return True
+
+
+def add_package_files(d, doc, spdx_pkg, topdir, get_spdxid, get_types, *, archive=None, ignore_dirs=[], ignore_top_level_dirs=[]):
+ from pathlib import Path
+ import oe.spdx
+ import hashlib
+
+ source_date_epoch = d.getVar("SOURCE_DATE_EPOCH")
+ if source_date_epoch:
+ source_date_epoch = int(source_date_epoch)
+
+ sha1s = []
+ spdx_files = []
+
+ file_counter = 1
+ for subdir, dirs, files in os.walk(topdir):
+ dirs[:] = [d for d in dirs if d not in ignore_dirs]
+ if subdir == str(topdir):
+ dirs[:] = [d for d in dirs if d not in ignore_top_level_dirs]
+
+ for file in files:
+ filepath = Path(subdir) / file
+ filename = str(filepath.relative_to(topdir))
+
+ if not filepath.is_symlink() and filepath.is_file():
+ spdx_file = oe.spdx.SPDXFile()
+ spdx_file.SPDXID = get_spdxid(file_counter)
+ for t in get_types(filepath):
+ spdx_file.fileTypes.append(t)
+ spdx_file.fileName = filename
+
+ if archive is not None:
+ with filepath.open("rb") as f:
+ info = archive.gettarinfo(fileobj=f)
+ info.name = filename
+ info.uid = 0
+ info.gid = 0
+ info.uname = "root"
+ info.gname = "root"
+
+ if source_date_epoch is not None and info.mtime > source_date_epoch:
+ info.mtime = source_date_epoch
+
+ archive.addfile(info, f)
+
+ sha1 = bb.utils.sha1_file(filepath)
+ sha1s.append(sha1)
+ spdx_file.checksums.append(oe.spdx.SPDXChecksum(
+ algorithm="SHA1",
+ checksumValue=sha1,
+ ))
+ spdx_file.checksums.append(oe.spdx.SPDXChecksum(
+ algorithm="SHA256",
+ checksumValue=bb.utils.sha256_file(filepath),
+ ))
+
+ if "SOURCE" in spdx_file.fileTypes:
+ extracted_lics = extract_licenses(filepath)
+ if extracted_lics:
+ spdx_file.licenseInfoInFiles = extracted_lics
+
+ doc.files.append(spdx_file)
+ doc.add_relationship(spdx_pkg, "CONTAINS", spdx_file)
+ spdx_pkg.hasFiles.append(spdx_file.SPDXID)
+
+ spdx_files.append(spdx_file)
+
+ file_counter += 1
+
+ sha1s.sort()
+ verifier = hashlib.sha1()
+ for v in sha1s:
+ verifier.update(v.encode("utf-8"))
+ spdx_pkg.packageVerificationCode.packageVerificationCodeValue = verifier.hexdigest()
+
+ return spdx_files
+
+
+def add_package_sources_from_debug(d, package_doc, spdx_package, package, package_files, sources):
+ from pathlib import Path
+ import hashlib
+ import oe.packagedata
+ import oe.spdx
+
+ debug_search_paths = [
+ Path(d.getVar('PKGD')),
+ Path(d.getVar('STAGING_DIR_TARGET')),
+ Path(d.getVar('STAGING_DIR_NATIVE')),
+ Path(d.getVar('STAGING_KERNEL_DIR')),
+ ]
+
+ pkg_data = oe.packagedata.read_subpkgdata_extended(package, d)
+
+ if pkg_data is None:
+ return
+
+ for file_path, file_data in pkg_data["files_info"].items():
+ if not "debugsrc" in file_data:
+ continue
+
+ for pkg_file in package_files:
+ if file_path.lstrip("/") == pkg_file.fileName.lstrip("/"):
+ break
+ else:
+ bb.fatal("No package file found for %s" % str(file_path))
+ continue
+
+ for debugsrc in file_data["debugsrc"]:
+ ref_id = "NOASSERTION"
+ for search in debug_search_paths:
+ if debugsrc.startswith("/usr/src/kernel"):
+ debugsrc_path = search / debugsrc.replace('/usr/src/kernel/', '')
+ else:
+ debugsrc_path = search / debugsrc.lstrip("/")
+ if not debugsrc_path.exists():
+ continue
+
+ file_sha256 = bb.utils.sha256_file(debugsrc_path)
+
+ if file_sha256 in sources:
+ source_file = sources[file_sha256]
+
+ doc_ref = package_doc.find_external_document_ref(source_file.doc.documentNamespace)
+ if doc_ref is None:
+ doc_ref = oe.spdx.SPDXExternalDocumentRef()
+ doc_ref.externalDocumentId = "DocumentRef-dependency-" + source_file.doc.name
+ doc_ref.spdxDocument = source_file.doc.documentNamespace
+ doc_ref.checksum.algorithm = "SHA1"
+ doc_ref.checksum.checksumValue = source_file.doc_sha1
+ package_doc.externalDocumentRefs.append(doc_ref)
+
+ ref_id = "%s:%s" % (doc_ref.externalDocumentId, source_file.file.SPDXID)
+ else:
+ bb.debug(1, "Debug source %s with SHA256 %s not found in any dependency" % (str(debugsrc_path), file_sha256))
+ break
+ else:
+ bb.debug(1, "Debug source %s not found" % debugsrc)
+
+ package_doc.add_relationship(pkg_file, "GENERATED_FROM", ref_id, comment=debugsrc)
+
+def collect_dep_recipes(d, doc, spdx_recipe):
+ from pathlib import Path
+ import oe.sbom
+ import oe.spdx
+
+ deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
+
+ dep_recipes = []
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ deps = sorted(set(
+ dep[0] for dep in taskdepdata.values() if
+ dep[1] == "do_create_spdx" and dep[0] != d.getVar("PN")
+ ))
+ for dep_pn in deps:
+ dep_recipe_path = deploy_dir_spdx / "recipes" / ("recipe-%s.spdx.json" % dep_pn)
+
+ spdx_dep_doc, spdx_dep_sha1 = oe.sbom.read_doc(dep_recipe_path)
+
+ for pkg in spdx_dep_doc.packages:
+ if pkg.name == dep_pn:
+ spdx_dep_recipe = pkg
+ break
+ else:
+ continue
+
+ dep_recipes.append(oe.sbom.DepRecipe(spdx_dep_doc, spdx_dep_sha1, spdx_dep_recipe))
+
+ dep_recipe_ref = oe.spdx.SPDXExternalDocumentRef()
+ dep_recipe_ref.externalDocumentId = "DocumentRef-dependency-" + spdx_dep_doc.name
+ dep_recipe_ref.spdxDocument = spdx_dep_doc.documentNamespace
+ dep_recipe_ref.checksum.algorithm = "SHA1"
+ dep_recipe_ref.checksum.checksumValue = spdx_dep_sha1
+
+ doc.externalDocumentRefs.append(dep_recipe_ref)
+
+ doc.add_relationship(
+ "%s:%s" % (dep_recipe_ref.externalDocumentId, spdx_dep_recipe.SPDXID),
+ "BUILD_DEPENDENCY_OF",
+ spdx_recipe
+ )
+
+ return dep_recipes
+
+collect_dep_recipes[vardepsexclude] += "BB_TASKDEPDATA"
+collect_dep_recipes[vardeps] += "DEPENDS"
+
+def collect_dep_sources(d, dep_recipes):
+ import oe.sbom
+
+ sources = {}
+ for dep in dep_recipes:
+ # Don't collect sources from native recipes as they
+ # match non-native sources also.
+ if recipe_spdx_is_native(d, dep.recipe):
+ continue
+ recipe_files = set(dep.recipe.hasFiles)
+
+ for spdx_file in dep.doc.files:
+ if spdx_file.SPDXID not in recipe_files:
+ continue
+
+ if "SOURCE" in spdx_file.fileTypes:
+ for checksum in spdx_file.checksums:
+ if checksum.algorithm == "SHA256":
+ sources[checksum.checksumValue] = oe.sbom.DepSource(dep.doc, dep.doc_sha1, dep.recipe, spdx_file)
+ break
+
+ return sources
+
+def add_download_packages(d, doc, recipe):
+ import os.path
+ from bb.fetch2 import decodeurl, CHECKSUM_LIST
+ import bb.process
+ import oe.spdx
+ import oe.sbom
+
+ for download_idx, src_uri in enumerate(d.getVar('SRC_URI').split()):
+ f = bb.fetch2.FetchData(src_uri, d)
+
+ for name in f.names:
+ package = oe.spdx.SPDXPackage()
+ package.name = "%s-source-%d" % (d.getVar("PN"), download_idx + 1)
+ package.SPDXID = oe.sbom.get_download_spdxid(d, download_idx + 1)
+
+ if f.type == "file":
+ continue
+
+ uri = f.type
+ proto = getattr(f, "proto", None)
+ if proto is not None:
+ uri = uri + "+" + proto
+ uri = uri + "://" + f.host + f.path
+
+ if f.method.supports_srcrev():
+ uri = uri + "@" + f.revisions[name]
+
+ if f.method.supports_checksum(f):
+ for checksum_id in CHECKSUM_LIST:
+ if checksum_id.upper() not in oe.spdx.SPDXPackage.ALLOWED_CHECKSUMS:
+ continue
+
+ expected_checksum = getattr(f, "%s_expected" % checksum_id)
+ if expected_checksum is None:
+ continue
+
+ c = oe.spdx.SPDXChecksum()
+ c.algorithm = checksum_id.upper()
+ c.checksumValue = expected_checksum
+ package.checksums.append(c)
+
+ package.downloadLocation = uri
+ doc.packages.append(package)
+ doc.add_relationship(doc, "DESCRIBES", package)
+ # In the future, we might be able to do more fancy dependencies,
+ # but this should be sufficient for now
+ doc.add_relationship(package, "BUILD_DEPENDENCY_OF", recipe)
+
+python do_create_spdx() {
+ from datetime import datetime, timezone
+ import oe.sbom
+ import oe.spdx
+ import uuid
+ from pathlib import Path
+ from contextlib import contextmanager
+ import oe.cve_check
+
+ @contextmanager
+ def optional_tarfile(name, guard, mode="w"):
+ import tarfile
+ import gzip
+
+ if guard:
+ name.parent.mkdir(parents=True, exist_ok=True)
+ with gzip.open(name, mode=mode + "b") as f:
+ with tarfile.open(fileobj=f, mode=mode + "|") as tf:
+ yield tf
+ else:
+ yield None
+
+
+ deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
+ spdx_workdir = Path(d.getVar("SPDXWORK"))
+ include_sources = d.getVar("SPDX_INCLUDE_SOURCES") == "1"
+ archive_sources = d.getVar("SPDX_ARCHIVE_SOURCES") == "1"
+ archive_packaged = d.getVar("SPDX_ARCHIVE_PACKAGED") == "1"
+
+ creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
+
+ doc = oe.spdx.SPDXDocument()
+
+ doc.name = "recipe-" + d.getVar("PN")
+ doc.documentNamespace = get_doc_namespace(d, doc)
+ doc.creationInfo.created = creation_time
+ doc.creationInfo.comment = "This document was created by analyzing recipe files during the build."
+ doc.creationInfo.licenseListVersion = d.getVar("SPDX_LICENSE_DATA")["licenseListVersion"]
+ doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass")
+ doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG"))
+ doc.creationInfo.creators.append("Person: N/A ()")
+
+ recipe = oe.spdx.SPDXPackage()
+ recipe.name = d.getVar("PN")
+ recipe.versionInfo = d.getVar("PV")
+ recipe.SPDXID = oe.sbom.get_recipe_spdxid(d)
+ recipe.supplier = d.getVar("SPDX_SUPPLIER")
+ if bb.data.inherits_class("native", d) or bb.data.inherits_class("cross", d):
+ recipe.annotations.append(create_annotation(d, "isNative"))
+
+ homepage = d.getVar("HOMEPAGE")
+ if homepage:
+ recipe.homepage = homepage
+
+ license = d.getVar("LICENSE")
+ if license:
+ recipe.licenseDeclared = convert_license_to_spdx(license, doc, d)
+
+ summary = d.getVar("SUMMARY")
+ if summary:
+ recipe.summary = summary
+
+ description = d.getVar("DESCRIPTION")
+ if description:
+ recipe.description = description
+
+ if d.getVar("SPDX_CUSTOM_ANNOTATION_VARS"):
+ for var in d.getVar('SPDX_CUSTOM_ANNOTATION_VARS').split():
+ recipe.annotations.append(create_annotation(d, var + "=" + d.getVar(var)))
+
+ # Some CVEs may be patched during the build process without incrementing the version number,
+ # so querying for CVEs based on the CPE id can lead to false positives. To account for this,
+ # save the CVEs fixed by patches to source information field in the SPDX.
+ patched_cves = oe.cve_check.get_patched_cves(d)
+ patched_cves = list(patched_cves)
+ patched_cves = ' '.join(patched_cves)
+ if patched_cves:
+ recipe.sourceInfo = "CVEs fixed: " + patched_cves
+
+ cpe_ids = oe.cve_check.get_cpe_ids(d.getVar("CVE_PRODUCT"), d.getVar("CVE_VERSION"))
+ if cpe_ids:
+ for cpe_id in cpe_ids:
+ cpe = oe.spdx.SPDXExternalReference()
+ cpe.referenceCategory = "SECURITY"
+ cpe.referenceType = "http://spdx.org/rdf/references/cpe23Type"
+ cpe.referenceLocator = cpe_id
+ recipe.externalRefs.append(cpe)
+
+ doc.packages.append(recipe)
+ doc.add_relationship(doc, "DESCRIBES", recipe)
+
+ add_download_packages(d, doc, recipe)
+
+ if process_sources(d) and include_sources:
+ recipe_archive = deploy_dir_spdx / "recipes" / (doc.name + ".tar.gz")
+ with optional_tarfile(recipe_archive, archive_sources) as archive:
+ spdx_get_src(d)
+
+ add_package_files(
+ d,
+ doc,
+ recipe,
+ spdx_workdir,
+ lambda file_counter: "SPDXRef-SourceFile-%s-%d" % (d.getVar("PN"), file_counter),
+ lambda filepath: ["SOURCE"],
+ ignore_dirs=[".git"],
+ ignore_top_level_dirs=["temp"],
+ archive=archive,
+ )
+
+ if archive is not None:
+ recipe.packageFileName = str(recipe_archive.name)
+
+ dep_recipes = collect_dep_recipes(d, doc, recipe)
+
+ doc_sha1 = oe.sbom.write_doc(d, doc, "recipes", indent=get_json_indent(d))
+ dep_recipes.append(oe.sbom.DepRecipe(doc, doc_sha1, recipe))
+
+ recipe_ref = oe.spdx.SPDXExternalDocumentRef()
+ recipe_ref.externalDocumentId = "DocumentRef-recipe-" + recipe.name
+ recipe_ref.spdxDocument = doc.documentNamespace
+ recipe_ref.checksum.algorithm = "SHA1"
+ recipe_ref.checksum.checksumValue = doc_sha1
+
+ sources = collect_dep_sources(d, dep_recipes)
+ found_licenses = {license.name:recipe_ref.externalDocumentId + ":" + license.licenseId for license in doc.hasExtractedLicensingInfos}
+
+ if not recipe_spdx_is_native(d, recipe):
+ bb.build.exec_func("read_subpackage_metadata", d)
+
+ pkgdest = Path(d.getVar("PKGDEST"))
+ for package in d.getVar("PACKAGES").split():
+ if not oe.packagedata.packaged(package, d):
+ continue
+
+ package_doc = oe.spdx.SPDXDocument()
+ pkg_name = d.getVar("PKG:%s" % package) or package
+ package_doc.name = pkg_name
+ package_doc.documentNamespace = get_doc_namespace(d, package_doc)
+ package_doc.creationInfo.created = creation_time
+ package_doc.creationInfo.comment = "This document was created by analyzing packages created during the build."
+ package_doc.creationInfo.licenseListVersion = d.getVar("SPDX_LICENSE_DATA")["licenseListVersion"]
+ package_doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass")
+ package_doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG"))
+ package_doc.creationInfo.creators.append("Person: N/A ()")
+ package_doc.externalDocumentRefs.append(recipe_ref)
+
+ package_license = d.getVar("LICENSE:%s" % package) or d.getVar("LICENSE")
+
+ spdx_package = oe.spdx.SPDXPackage()
+
+ spdx_package.SPDXID = oe.sbom.get_package_spdxid(pkg_name)
+ spdx_package.name = pkg_name
+ spdx_package.versionInfo = d.getVar("PV")
+ spdx_package.licenseDeclared = convert_license_to_spdx(package_license, package_doc, d, found_licenses)
+ spdx_package.supplier = d.getVar("SPDX_SUPPLIER")
+
+ package_doc.packages.append(spdx_package)
+
+ package_doc.add_relationship(spdx_package, "GENERATED_FROM", "%s:%s" % (recipe_ref.externalDocumentId, recipe.SPDXID))
+ package_doc.add_relationship(package_doc, "DESCRIBES", spdx_package)
+
+ package_archive = deploy_dir_spdx / "packages" / (package_doc.name + ".tar.gz")
+ with optional_tarfile(package_archive, archive_packaged) as archive:
+ package_files = add_package_files(
+ d,
+ package_doc,
+ spdx_package,
+ pkgdest / package,
+ lambda file_counter: oe.sbom.get_packaged_file_spdxid(pkg_name, file_counter),
+ lambda filepath: ["BINARY"],
+ ignore_top_level_dirs=['CONTROL', 'DEBIAN'],
+ archive=archive,
+ )
+
+ if archive is not None:
+ spdx_package.packageFileName = str(package_archive.name)
+
+ add_package_sources_from_debug(d, package_doc, spdx_package, package, package_files, sources)
+
+ oe.sbom.write_doc(d, package_doc, "packages", indent=get_json_indent(d))
+}
+# NOTE: depending on do_unpack is a hack that is necessary to get it's dependencies for archive the source
+addtask do_create_spdx after do_package do_packagedata do_unpack before do_populate_sdk do_build do_rm_work
+
+SSTATETASKS += "do_create_spdx"
+do_create_spdx[sstate-inputdirs] = "${SPDXDEPLOY}"
+do_create_spdx[sstate-outputdirs] = "${DEPLOY_DIR_SPDX}"
+
+python do_create_spdx_setscene () {
+ sstate_setscene(d)
+}
+addtask do_create_spdx_setscene
+
+do_create_spdx[dirs] = "${SPDXWORK}"
+do_create_spdx[cleandirs] = "${SPDXDEPLOY} ${SPDXWORK}"
+do_create_spdx[depends] += "${PATCHDEPENDENCY}"
+do_create_spdx[deptask] = "do_create_spdx"
+
+def collect_package_providers(d):
+ from pathlib import Path
+ import oe.sbom
+ import oe.spdx
+ import json
+
+ deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
+
+ providers = {}
+
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ deps = sorted(set(
+ dep[0] for dep in taskdepdata.values() if dep[0] != d.getVar("PN")
+ ))
+ deps.append(d.getVar("PN"))
+
+ for dep_pn in deps:
+ recipe_data = oe.packagedata.read_pkgdata(dep_pn, d)
+
+ for pkg in recipe_data.get("PACKAGES", "").split():
+
+ pkg_data = oe.packagedata.read_subpkgdata_dict(pkg, d)
+ rprovides = set(n for n, _ in bb.utils.explode_dep_versions2(pkg_data.get("RPROVIDES", "")).items())
+ rprovides.add(pkg)
+
+ for r in rprovides:
+ providers[r] = pkg
+
+ return providers
+
+collect_package_providers[vardepsexclude] += "BB_TASKDEPDATA"
+
+python do_create_runtime_spdx() {
+ from datetime import datetime, timezone
+ import oe.sbom
+ import oe.spdx
+ import oe.packagedata
+ from pathlib import Path
+
+ deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
+ spdx_deploy = Path(d.getVar("SPDXRUNTIMEDEPLOY"))
+ is_native = bb.data.inherits_class("native", d) or bb.data.inherits_class("cross", d)
+
+ creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
+
+ providers = collect_package_providers(d)
+
+ if not is_native:
+ bb.build.exec_func("read_subpackage_metadata", d)
+
+ dep_package_cache = {}
+
+ pkgdest = Path(d.getVar("PKGDEST"))
+ for package in d.getVar("PACKAGES").split():
+ localdata = bb.data.createCopy(d)
+ pkg_name = d.getVar("PKG:%s" % package) or package
+ localdata.setVar("PKG", pkg_name)
+ localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + package)
+
+ if not oe.packagedata.packaged(package, localdata):
+ continue
+
+ pkg_spdx_path = deploy_dir_spdx / "packages" / (pkg_name + ".spdx.json")
+
+ package_doc, package_doc_sha1 = oe.sbom.read_doc(pkg_spdx_path)
+
+ for p in package_doc.packages:
+ if p.name == pkg_name:
+ spdx_package = p
+ break
+ else:
+ bb.fatal("Package '%s' not found in %s" % (pkg_name, pkg_spdx_path))
+
+ runtime_doc = oe.spdx.SPDXDocument()
+ runtime_doc.name = "runtime-" + pkg_name
+ runtime_doc.documentNamespace = get_doc_namespace(localdata, runtime_doc)
+ runtime_doc.creationInfo.created = creation_time
+ runtime_doc.creationInfo.comment = "This document was created by analyzing package runtime dependencies."
+ runtime_doc.creationInfo.licenseListVersion = d.getVar("SPDX_LICENSE_DATA")["licenseListVersion"]
+ runtime_doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass")
+ runtime_doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG"))
+ runtime_doc.creationInfo.creators.append("Person: N/A ()")
+
+ package_ref = oe.spdx.SPDXExternalDocumentRef()
+ package_ref.externalDocumentId = "DocumentRef-package-" + package
+ package_ref.spdxDocument = package_doc.documentNamespace
+ package_ref.checksum.algorithm = "SHA1"
+ package_ref.checksum.checksumValue = package_doc_sha1
+
+ runtime_doc.externalDocumentRefs.append(package_ref)
+
+ runtime_doc.add_relationship(
+ runtime_doc.SPDXID,
+ "AMENDS",
+ "%s:%s" % (package_ref.externalDocumentId, package_doc.SPDXID)
+ )
+
+ deps = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS") or "")
+ seen_deps = set()
+ for dep, _ in deps.items():
+ if dep in seen_deps:
+ continue
+
+ if dep not in providers:
+ continue
+
+ dep = providers[dep]
+
+ if not oe.packagedata.packaged(dep, localdata):
+ continue
+
+ dep_pkg_data = oe.packagedata.read_subpkgdata_dict(dep, d)
+ dep_pkg = dep_pkg_data["PKG"]
+
+ if dep in dep_package_cache:
+ (dep_spdx_package, dep_package_ref) = dep_package_cache[dep]
+ else:
+ dep_path = deploy_dir_spdx / "packages" / ("%s.spdx.json" % dep_pkg)
+
+ spdx_dep_doc, spdx_dep_sha1 = oe.sbom.read_doc(dep_path)
+
+ for pkg in spdx_dep_doc.packages:
+ if pkg.name == dep_pkg:
+ dep_spdx_package = pkg
+ break
+ else:
+ bb.fatal("Package '%s' not found in %s" % (dep_pkg, dep_path))
+
+ dep_package_ref = oe.spdx.SPDXExternalDocumentRef()
+ dep_package_ref.externalDocumentId = "DocumentRef-runtime-dependency-" + spdx_dep_doc.name
+ dep_package_ref.spdxDocument = spdx_dep_doc.documentNamespace
+ dep_package_ref.checksum.algorithm = "SHA1"
+ dep_package_ref.checksum.checksumValue = spdx_dep_sha1
+
+ dep_package_cache[dep] = (dep_spdx_package, dep_package_ref)
+
+ runtime_doc.externalDocumentRefs.append(dep_package_ref)
+
+ runtime_doc.add_relationship(
+ "%s:%s" % (dep_package_ref.externalDocumentId, dep_spdx_package.SPDXID),
+ "RUNTIME_DEPENDENCY_OF",
+ "%s:%s" % (package_ref.externalDocumentId, spdx_package.SPDXID)
+ )
+ seen_deps.add(dep)
+
+ oe.sbom.write_doc(d, runtime_doc, "runtime", spdx_deploy, indent=get_json_indent(d))
+}
+
+addtask do_create_runtime_spdx after do_create_spdx before do_build do_rm_work
+SSTATETASKS += "do_create_runtime_spdx"
+do_create_runtime_spdx[sstate-inputdirs] = "${SPDXRUNTIMEDEPLOY}"
+do_create_runtime_spdx[sstate-outputdirs] = "${DEPLOY_DIR_SPDX}"
+
+python do_create_runtime_spdx_setscene () {
+ sstate_setscene(d)
+}
+addtask do_create_runtime_spdx_setscene
+
+do_create_runtime_spdx[dirs] = "${SPDXRUNTIMEDEPLOY}"
+do_create_runtime_spdx[cleandirs] = "${SPDXRUNTIMEDEPLOY}"
+do_create_runtime_spdx[rdeptask] = "do_create_spdx"
+
+def spdx_get_src(d):
+ """
+ save patched source of the recipe in SPDX_WORKDIR.
+ """
+ import shutil
+ spdx_workdir = d.getVar('SPDXWORK')
+ spdx_sysroot_native = d.getVar('STAGING_DIR_NATIVE')
+ pn = d.getVar('PN')
+
+ workdir = d.getVar("WORKDIR")
+
+ try:
+ # The kernel class functions require it to be on work-shared, so we dont change WORKDIR
+ if not is_work_shared_spdx(d):
+ # Change the WORKDIR to make do_unpack do_patch run in another dir.
+ d.setVar('WORKDIR', spdx_workdir)
+ # Restore the original path to recipe's native sysroot (it's relative to WORKDIR).
+ d.setVar('STAGING_DIR_NATIVE', spdx_sysroot_native)
+
+ # The changed 'WORKDIR' also caused 'B' changed, create dir 'B' for the
+ # possibly requiring of the following tasks (such as some recipes's
+ # do_patch required 'B' existed).
+ bb.utils.mkdirhier(d.getVar('B'))
+
+ bb.build.exec_func('do_unpack', d)
+ # Copy source of kernel to spdx_workdir
+ if is_work_shared_spdx(d):
+ share_src = d.getVar('WORKDIR')
+ d.setVar('WORKDIR', spdx_workdir)
+ d.setVar('STAGING_DIR_NATIVE', spdx_sysroot_native)
+ src_dir = spdx_workdir + "/" + d.getVar('PN')+ "-" + d.getVar('PV') + "-" + d.getVar('PR')
+ bb.utils.mkdirhier(src_dir)
+ if bb.data.inherits_class('kernel',d):
+ share_src = d.getVar('STAGING_KERNEL_DIR')
+ cmd_copy_share = "cp -rf " + share_src + "/* " + src_dir + "/"
+ cmd_copy_shared_res = os.popen(cmd_copy_share).read()
+ bb.note("cmd_copy_shared_result = " + cmd_copy_shared_res)
+
+ git_path = src_dir + "/.git"
+ if os.path.exists(git_path):
+ shutils.rmtree(git_path)
+
+ # Make sure gcc and kernel sources are patched only once
+ if not (d.getVar('SRC_URI') == "" or is_work_shared_spdx(d)):
+ bb.build.exec_func('do_patch', d)
+
+ # Some userland has no source.
+ if not os.path.exists( spdx_workdir ):
+ bb.utils.mkdirhier(spdx_workdir)
+ finally:
+ d.setVar("WORKDIR", workdir)
+
+do_rootfs[recrdeptask] += "do_create_spdx do_create_runtime_spdx"
+do_rootfs[cleandirs] += "${SPDXIMAGEWORK}"
+
+ROOTFS_POSTUNINSTALL_COMMAND =+ "image_combine_spdx ; "
+
+do_populate_sdk[recrdeptask] += "do_create_spdx do_create_runtime_spdx"
+do_populate_sdk[cleandirs] += "${SPDXSDKWORK}"
+POPULATE_SDK_POST_HOST_COMMAND:append:task-populate-sdk = " sdk_host_combine_spdx; "
+POPULATE_SDK_POST_TARGET_COMMAND:append:task-populate-sdk = " sdk_target_combine_spdx; "
+
+python image_combine_spdx() {
+ import os
+ import oe.sbom
+ from pathlib import Path
+ from oe.rootfs import image_list_installed_packages
+
+ image_name = d.getVar("IMAGE_NAME")
+ image_link_name = d.getVar("IMAGE_LINK_NAME")
+ imgdeploydir = Path(d.getVar("IMGDEPLOYDIR"))
+ img_spdxid = oe.sbom.get_image_spdxid(image_name)
+ packages = image_list_installed_packages(d)
+
+ combine_spdx(d, image_name, imgdeploydir, img_spdxid, packages, Path(d.getVar("SPDXIMAGEWORK")))
+
+ def make_image_link(target_path, suffix):
+ if image_link_name:
+ link = imgdeploydir / (image_link_name + suffix)
+ if link != target_path:
+ link.symlink_to(os.path.relpath(target_path, link.parent))
+
+ spdx_tar_path = imgdeploydir / (image_name + ".spdx.tar.gz")
+ make_image_link(spdx_tar_path, ".spdx.tar.gz")
+}
+
+python sdk_host_combine_spdx() {
+ sdk_combine_spdx(d, "host")
+}
+
+python sdk_target_combine_spdx() {
+ sdk_combine_spdx(d, "target")
+}
+
+def sdk_combine_spdx(d, sdk_type):
+ import oe.sbom
+ from pathlib import Path
+ from oe.sdk import sdk_list_installed_packages
+
+ sdk_name = d.getVar("SDK_NAME") + "-" + sdk_type
+ sdk_deploydir = Path(d.getVar("SDKDEPLOYDIR"))
+ sdk_spdxid = oe.sbom.get_sdk_spdxid(sdk_name)
+ sdk_packages = sdk_list_installed_packages(d, sdk_type == "target")
+ combine_spdx(d, sdk_name, sdk_deploydir, sdk_spdxid, sdk_packages, Path(d.getVar('SPDXSDKWORK')))
+
+def combine_spdx(d, rootfs_name, rootfs_deploydir, rootfs_spdxid, packages, spdx_workdir):
+ import os
+ import oe.spdx
+ import oe.sbom
+ import io
+ import json
+ from datetime import timezone, datetime
+ from pathlib import Path
+ import tarfile
+ import gzip
+
+ creation_time = datetime.now(tz=timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
+ deploy_dir_spdx = Path(d.getVar("DEPLOY_DIR_SPDX"))
+ source_date_epoch = d.getVar("SOURCE_DATE_EPOCH")
+
+ doc = oe.spdx.SPDXDocument()
+ doc.name = rootfs_name
+ doc.documentNamespace = get_doc_namespace(d, doc)
+ doc.creationInfo.created = creation_time
+ doc.creationInfo.comment = "This document was created by analyzing the source of the Yocto recipe during the build."
+ doc.creationInfo.licenseListVersion = d.getVar("SPDX_LICENSE_DATA")["licenseListVersion"]
+ doc.creationInfo.creators.append("Tool: OpenEmbedded Core create-spdx.bbclass")
+ doc.creationInfo.creators.append("Organization: %s" % d.getVar("SPDX_ORG"))
+ doc.creationInfo.creators.append("Person: N/A ()")
+
+ image = oe.spdx.SPDXPackage()
+ image.name = d.getVar("PN")
+ image.versionInfo = d.getVar("PV")
+ image.SPDXID = rootfs_spdxid
+ image.supplier = d.getVar("SPDX_SUPPLIER")
+
+ doc.packages.append(image)
+
+ for name in sorted(packages.keys()):
+ pkg_spdx_path = deploy_dir_spdx / "packages" / (name + ".spdx.json")
+ pkg_doc, pkg_doc_sha1 = oe.sbom.read_doc(pkg_spdx_path)
+
+ for p in pkg_doc.packages:
+ if p.name == name:
+ pkg_ref = oe.spdx.SPDXExternalDocumentRef()
+ pkg_ref.externalDocumentId = "DocumentRef-%s" % pkg_doc.name
+ pkg_ref.spdxDocument = pkg_doc.documentNamespace
+ pkg_ref.checksum.algorithm = "SHA1"
+ pkg_ref.checksum.checksumValue = pkg_doc_sha1
+
+ doc.externalDocumentRefs.append(pkg_ref)
+ doc.add_relationship(image, "CONTAINS", "%s:%s" % (pkg_ref.externalDocumentId, p.SPDXID))
+ break
+ else:
+ bb.fatal("Unable to find package with name '%s' in SPDX file %s" % (name, pkg_spdx_path))
+
+ runtime_spdx_path = deploy_dir_spdx / "runtime" / ("runtime-" + name + ".spdx.json")
+ runtime_doc, runtime_doc_sha1 = oe.sbom.read_doc(runtime_spdx_path)
+
+ runtime_ref = oe.spdx.SPDXExternalDocumentRef()
+ runtime_ref.externalDocumentId = "DocumentRef-%s" % runtime_doc.name
+ runtime_ref.spdxDocument = runtime_doc.documentNamespace
+ runtime_ref.checksum.algorithm = "SHA1"
+ runtime_ref.checksum.checksumValue = runtime_doc_sha1
+
+ # "OTHER" isn't ideal here, but I can't find a relationship that makes sense
+ doc.externalDocumentRefs.append(runtime_ref)
+ doc.add_relationship(
+ image,
+ "OTHER",
+ "%s:%s" % (runtime_ref.externalDocumentId, runtime_doc.SPDXID),
+ comment="Runtime dependencies for %s" % name
+ )
+
+ image_spdx_path = spdx_workdir / (rootfs_name + ".spdx.json")
+
+ with image_spdx_path.open("wb") as f:
+ doc.to_json(f, sort_keys=True, indent=get_json_indent(d))
+
+ num_threads = int(d.getVar("BB_NUMBER_THREADS"))
+
+ visited_docs = set()
+
+ index = {"documents": []}
+
+ spdx_tar_path = rootfs_deploydir / (rootfs_name + ".spdx.tar.gz")
+ with gzip.open(spdx_tar_path, "w") as f:
+ with tarfile.open(fileobj=f, mode="w|") as tar:
+ def collect_spdx_document(path):
+ nonlocal tar
+ nonlocal deploy_dir_spdx
+ nonlocal source_date_epoch
+ nonlocal index
+
+ if path in visited_docs:
+ return
+
+ visited_docs.add(path)
+
+ with path.open("rb") as f:
+ doc, sha1 = oe.sbom.read_doc(f)
+ f.seek(0)
+
+ if doc.documentNamespace in visited_docs:
+ return
+
+ bb.note("Adding SPDX document %s" % path)
+ visited_docs.add(doc.documentNamespace)
+ info = tar.gettarinfo(fileobj=f)
+
+ info.name = doc.name + ".spdx.json"
+ info.uid = 0
+ info.gid = 0
+ info.uname = "root"
+ info.gname = "root"
+
+ if source_date_epoch is not None and info.mtime > int(source_date_epoch):
+ info.mtime = int(source_date_epoch)
+
+ tar.addfile(info, f)
+
+ index["documents"].append({
+ "filename": info.name,
+ "documentNamespace": doc.documentNamespace,
+ "sha1": sha1,
+ })
+
+ for ref in doc.externalDocumentRefs:
+ ref_path = deploy_dir_spdx / "by-namespace" / ref.spdxDocument.replace("/", "_")
+ collect_spdx_document(ref_path)
+
+ collect_spdx_document(image_spdx_path)
+
+ index["documents"].sort(key=lambda x: x["filename"])
+
+ index_str = io.BytesIO(json.dumps(
+ index,
+ sort_keys=True,
+ indent=get_json_indent(d),
+ ).encode("utf-8"))
+
+ info = tarfile.TarInfo()
+ info.name = "index.json"
+ info.size = len(index_str.getvalue())
+ info.uid = 0
+ info.gid = 0
+ info.uname = "root"
+ info.gname = "root"
+
+ tar.addfile(info, fileobj=index_str)
diff --git a/meta/classes/create-spdx.bbclass b/meta/classes/create-spdx.bbclass
new file mode 100644
index 0000000000..19c6c0ff0b
--- /dev/null
+++ b/meta/classes/create-spdx.bbclass
@@ -0,0 +1,8 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Include this class when you don't care what version of SPDX you get; it will
+# be updated to the latest stable version that is supported
+inherit create-spdx-2.2
diff --git a/meta/classes/cve-check.bbclass b/meta/classes/cve-check.bbclass
index 8086cf05e9..5e6bae1757 100644
--- a/meta/classes/cve-check.bbclass
+++ b/meta/classes/cve-check.bbclass
@@ -20,13 +20,13 @@
# the only method to check against CVEs. Running this tool
# doesn't guarantee your packages are free of CVEs.
-# The product name that the CVE database uses. Defaults to BPN, but may need to
+# The product name that the CVE database uses defaults to BPN, but may need to
# be overriden per recipe (for example tiff.bb sets CVE_PRODUCT=libtiff).
CVE_PRODUCT ??= "${BPN}"
CVE_VERSION ??= "${PV}"
CVE_CHECK_DB_DIR ?= "${DL_DIR}/CVE_CHECK"
-CVE_CHECK_DB_FILE ?= "${CVE_CHECK_DB_DIR}/nvdcve_1.1.db"
+CVE_CHECK_DB_FILE ?= "${CVE_CHECK_DB_DIR}/nvdcve_2.db"
CVE_CHECK_DB_FILE_LOCK ?= "${CVE_CHECK_DB_FILE}.lock"
CVE_CHECK_LOG ?= "${T}/cve.log"
@@ -34,15 +34,33 @@ CVE_CHECK_TMP_FILE ?= "${TMPDIR}/cve_check"
CVE_CHECK_SUMMARY_DIR ?= "${LOG_DIR}/cve"
CVE_CHECK_SUMMARY_FILE_NAME ?= "cve-summary"
CVE_CHECK_SUMMARY_FILE ?= "${CVE_CHECK_SUMMARY_DIR}/${CVE_CHECK_SUMMARY_FILE_NAME}"
+CVE_CHECK_SUMMARY_FILE_NAME_JSON = "cve-summary.json"
+CVE_CHECK_SUMMARY_INDEX_PATH = "${CVE_CHECK_SUMMARY_DIR}/cve-summary-index.txt"
+
+CVE_CHECK_LOG_JSON ?= "${T}/cve.json"
CVE_CHECK_DIR ??= "${DEPLOY_DIR}/cve"
CVE_CHECK_RECIPE_FILE ?= "${CVE_CHECK_DIR}/${PN}"
-CVE_CHECK_MANIFEST ?= "${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cve"
+CVE_CHECK_RECIPE_FILE_JSON ?= "${CVE_CHECK_DIR}/${PN}_cve.json"
+CVE_CHECK_MANIFEST ?= "${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cve"
+CVE_CHECK_MANIFEST_JSON ?= "${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.json"
CVE_CHECK_COPY_FILES ??= "1"
CVE_CHECK_CREATE_MANIFEST ??= "1"
+# Report Patched or Ignored/Whitelisted CVEs
CVE_CHECK_REPORT_PATCHED ??= "1"
+CVE_CHECK_SHOW_WARNINGS ??= "1"
+
+# Provide text output
+CVE_CHECK_FORMAT_TEXT ??= "1"
+
+# Provide JSON output - disabled by default for backward compatibility
+CVE_CHECK_FORMAT_JSON ??= "0"
+
+# Check for packages without CVEs (no issues or missing product name)
+CVE_CHECK_COVERAGE ??= "1"
+
# Whitelist for packages (PN)
CVE_CHECK_PN_WHITELIST ?= ""
@@ -53,12 +71,43 @@ CVE_CHECK_PN_WHITELIST ?= ""
#
CVE_CHECK_WHITELIST ?= ""
-# set to "alphabetical" for version using single alphabetical character as increament release
+# Layers to be excluded
+CVE_CHECK_LAYER_EXCLUDELIST ??= ""
+
+# Layers to be included
+CVE_CHECK_LAYER_INCLUDELIST ??= ""
+
+
+# set to "alphabetical" for version using single alphabetical character as increment release
CVE_VERSION_SUFFIX ??= ""
+def generate_json_report(d, out_path, link_path):
+ if os.path.exists(d.getVar("CVE_CHECK_SUMMARY_INDEX_PATH")):
+ import json
+ from oe.cve_check import cve_check_merge_jsons, update_symlinks
+
+ bb.note("Generating JSON CVE summary")
+ index_file = d.getVar("CVE_CHECK_SUMMARY_INDEX_PATH")
+ summary = {"version":"1", "package": []}
+ with open(index_file) as f:
+ filename = f.readline()
+ while filename:
+ with open(filename.rstrip()) as j:
+ data = json.load(j)
+ cve_check_merge_jsons(summary, data)
+ filename = f.readline()
+
+ summary["package"].sort(key=lambda d: d['name'])
+
+ with open(out_path, "w") as f:
+ json.dump(summary, f, indent=2)
+
+ update_symlinks(out_path, link_path)
+
python cve_save_summary_handler () {
import shutil
import datetime
+ from oe.cve_check import update_symlinks
cve_tmp_file = d.getVar("CVE_CHECK_TMP_FILE")
@@ -71,13 +120,15 @@ python cve_save_summary_handler () {
if os.path.exists(cve_tmp_file):
shutil.copyfile(cve_tmp_file, cve_summary_file)
-
- if cve_summary_file and os.path.exists(cve_summary_file):
- cvefile_link = os.path.join(cvelogpath, cve_summary_name)
-
- if os.path.exists(os.path.realpath(cvefile_link)):
- os.remove(cvefile_link)
- os.symlink(os.path.basename(cve_summary_file), cvefile_link)
+ cvefile_link = os.path.join(cvelogpath, cve_summary_name)
+ update_symlinks(cve_summary_file, cvefile_link)
+ bb.plain("Complete CVE report summary created at: %s" % cvefile_link)
+
+ if d.getVar("CVE_CHECK_FORMAT_JSON") == "1":
+ json_summary_link_name = os.path.join(cvelogpath, d.getVar("CVE_CHECK_SUMMARY_FILE_NAME_JSON"))
+ json_summary_name = os.path.join(cvelogpath, "%s-%s.json" % (cve_summary_name, timestamp))
+ generate_json_report(d, json_summary_name, json_summary_link_name)
+ bb.plain("Complete CVE JSON report summary created at: %s" % json_summary_link_name)
}
addhandler cve_save_summary_handler
@@ -87,23 +138,25 @@ python do_cve_check () {
"""
Check recipe for patched and unpatched CVEs
"""
+ from oe.cve_check import get_patched_cves
- if os.path.exists(d.getVar("CVE_CHECK_DB_FILE")):
- try:
- patched_cves = get_patches_cves(d)
- except FileNotFoundError:
- bb.fatal("Failure in searching patches")
- whitelisted, patched, unpatched = check_cves(d, patched_cves)
- if patched or unpatched:
- cve_data = get_cve_info(d, patched + unpatched)
- cve_write_data(d, patched, unpatched, whitelisted, cve_data)
- else:
- bb.note("No CVE database found, skipping CVE check")
+ with bb.utils.fileslocked([d.getVar("CVE_CHECK_DB_FILE_LOCK")], shared=True):
+ if os.path.exists(d.getVar("CVE_CHECK_DB_FILE")):
+ try:
+ patched_cves = get_patched_cves(d)
+ except FileNotFoundError:
+ bb.fatal("Failure in searching patches")
+ ignored, patched, unpatched, status = check_cves(d, patched_cves)
+ if patched or unpatched or (d.getVar("CVE_CHECK_COVERAGE") == "1" and status):
+ cve_data = get_cve_info(d, patched + unpatched + ignored)
+ cve_write_data(d, patched, unpatched, ignored, cve_data, status)
+ else:
+ bb.note("No CVE database found, skipping CVE check")
}
-addtask cve_check before do_build after do_fetch
-do_cve_check[depends] = "cve-update-db-native:do_populate_cve_db"
+addtask cve_check before do_build
+do_cve_check[depends] = "cve-update-nvd2-native:do_fetch"
do_cve_check[nostamp] = "1"
python cve_check_cleanup () {
@@ -111,10 +164,11 @@ python cve_check_cleanup () {
Delete the file used to gather all the CVE information.
"""
bb.utils.remove(e.data.getVar("CVE_CHECK_TMP_FILE"))
+ bb.utils.remove(e.data.getVar("CVE_CHECK_SUMMARY_INDEX_PATH"))
}
addhandler cve_check_cleanup
-cve_check_cleanup[eventmask] = "bb.cooker.CookerExit"
+cve_check_cleanup[eventmask] = "bb.event.BuildCompleted"
python cve_check_write_rootfs_manifest () {
"""
@@ -122,115 +176,107 @@ python cve_check_write_rootfs_manifest () {
"""
import shutil
+ import json
+ from oe.rootfs import image_list_installed_packages
+ from oe.cve_check import cve_check_merge_jsons, update_symlinks
if d.getVar("CVE_CHECK_COPY_FILES") == "1":
deploy_file = d.getVar("CVE_CHECK_RECIPE_FILE")
if os.path.exists(deploy_file):
bb.utils.remove(deploy_file)
-
- if os.path.exists(d.getVar("CVE_CHECK_TMP_FILE")):
- bb.note("Writing rootfs CVE manifest")
- deploy_dir = d.getVar("DEPLOY_DIR_IMAGE")
- link_name = d.getVar("IMAGE_LINK_NAME")
+ deploy_file_json = d.getVar("CVE_CHECK_RECIPE_FILE_JSON")
+ if os.path.exists(deploy_file_json):
+ bb.utils.remove(deploy_file_json)
+
+ # Create a list of relevant recipies
+ recipies = set()
+ for pkg in list(image_list_installed_packages(d)):
+ pkg_info = os.path.join(d.getVar('PKGDATA_DIR'),
+ 'runtime-reverse', pkg)
+ pkg_data = oe.packagedata.read_pkgdatafile(pkg_info)
+ recipies.add(pkg_data["PN"])
+
+ bb.note("Writing rootfs CVE manifest")
+ deploy_dir = d.getVar("IMGDEPLOYDIR")
+ link_name = d.getVar("IMAGE_LINK_NAME")
+
+ json_data = {"version":"1", "package": []}
+ text_data = ""
+ enable_json = d.getVar("CVE_CHECK_FORMAT_JSON") == "1"
+ enable_text = d.getVar("CVE_CHECK_FORMAT_TEXT") == "1"
+
+ save_pn = d.getVar("PN")
+
+ for pkg in recipies:
+ # To be able to use the CVE_CHECK_RECIPE_FILE variable we have to evaluate
+ # it with the different PN names set each time.
+ d.setVar("PN", pkg)
+ if enable_text:
+ pkgfilepath = d.getVar("CVE_CHECK_RECIPE_FILE")
+ if os.path.exists(pkgfilepath):
+ with open(pkgfilepath) as pfile:
+ text_data += pfile.read()
+
+ if enable_json:
+ pkgfilepath = d.getVar("CVE_CHECK_RECIPE_FILE_JSON")
+ if os.path.exists(pkgfilepath):
+ with open(pkgfilepath) as j:
+ data = json.load(j)
+ cve_check_merge_jsons(json_data, data)
+
+ d.setVar("PN", save_pn)
+
+ if enable_text:
+ link_path = os.path.join(deploy_dir, "%s.cve" % link_name)
manifest_name = d.getVar("CVE_CHECK_MANIFEST")
- cve_tmp_file = d.getVar("CVE_CHECK_TMP_FILE")
-
- shutil.copyfile(cve_tmp_file, manifest_name)
- if manifest_name and os.path.exists(manifest_name):
- manifest_link = os.path.join(deploy_dir, "%s.cve" % link_name)
- # If we already have another manifest, update symlinks
- if os.path.exists(os.path.realpath(manifest_link)):
- os.remove(manifest_link)
- os.symlink(os.path.basename(manifest_name), manifest_link)
- bb.plain("Image CVE report stored in: %s" % manifest_name)
-}
-
-ROOTFS_POSTPROCESS_COMMAND_prepend = "${@'cve_check_write_rootfs_manifest; ' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
-do_rootfs[recrdeptask] += "${@'do_cve_check' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
+ with open(manifest_name, "w") as f:
+ f.write(text_data)
-def get_patches_cves(d):
- """
- Get patches that solve CVEs using the "CVE: " tag.
- """
+ update_symlinks(manifest_name, link_path)
+ bb.plain("Image CVE report stored in: %s" % manifest_name)
- import re
+ if enable_json:
+ link_path = os.path.join(deploy_dir, "%s.json" % link_name)
+ manifest_name = d.getVar("CVE_CHECK_MANIFEST_JSON")
- pn = d.getVar("PN")
- cve_match = re.compile("CVE:( CVE\-\d{4}\-\d+)+")
-
- # Matches last CVE-1234-211432 in the file name, also if written
- # with small letters. Not supporting multiple CVE id's in a single
- # file name.
- cve_file_name_match = re.compile(".*([Cc][Vv][Ee]\-\d{4}\-\d+)")
-
- patched_cves = set()
- bb.debug(2, "Looking for patches that solves CVEs for %s" % pn)
- for url in src_patches(d):
- patch_file = bb.fetch.decodeurl(url)[2]
-
- if not os.path.isfile(patch_file):
- bb.error("File Not found: %s" % patch_file)
- raise FileNotFoundError
-
- # Check patch file name for CVE ID
- fname_match = cve_file_name_match.search(patch_file)
- if fname_match:
- cve = fname_match.group(1).upper()
- patched_cves.add(cve)
- bb.debug(2, "Found CVE %s from patch file name %s" % (cve, patch_file))
-
- with open(patch_file, "r", encoding="utf-8") as f:
- try:
- patch_text = f.read()
- except UnicodeDecodeError:
- bb.debug(1, "Failed to read patch %s using UTF-8 encoding"
- " trying with iso8859-1" % patch_file)
- f.close()
- with open(patch_file, "r", encoding="iso8859-1") as f:
- patch_text = f.read()
-
- # Search for one or more "CVE: " lines
- text_match = False
- for match in cve_match.finditer(patch_text):
- # Get only the CVEs without the "CVE: " tag
- cves = patch_text[match.start()+5:match.end()]
- for cve in cves.split():
- bb.debug(2, "Patch %s solves %s" % (patch_file, cve))
- patched_cves.add(cve)
- text_match = True
+ with open(manifest_name, "w") as f:
+ json.dump(json_data, f, indent=2)
- if not fname_match and not text_match:
- bb.debug(2, "Patch %s doesn't solve CVEs" % patch_file)
+ update_symlinks(manifest_name, link_path)
+ bb.plain("Image CVE JSON report stored in: %s" % manifest_name)
+}
- return patched_cves
+ROOTFS_POSTPROCESS_COMMAND_prepend = "${@'cve_check_write_rootfs_manifest; ' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
+do_rootfs[recrdeptask] += "${@'do_cve_check' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
+do_populate_sdk[recrdeptask] += "${@'do_cve_check' if d.getVar('CVE_CHECK_CREATE_MANIFEST') == '1' else ''}"
def check_cves(d, patched_cves):
"""
Connect to the NVD database and find unpatched cves.
"""
- from oe.cve_check import Version
+ from oe.cve_check import Version, convert_cve_version
pn = d.getVar("PN")
real_pv = d.getVar("PV")
suffix = d.getVar("CVE_VERSION_SUFFIX")
cves_unpatched = []
+ cves_ignored = []
+ cves_status = []
+ cves_in_recipe = False
# CVE_PRODUCT can contain more than one product (eg. curl/libcurl)
products = d.getVar("CVE_PRODUCT").split()
# If this has been unset then we're not scanning for CVEs here (for example, image recipes)
if not products:
- return ([], [], [])
+ return ([], [], [], [])
pv = d.getVar("CVE_VERSION").split("+git")[0]
- # If the recipe has been whitlisted we return empty lists
+ # If the recipe has been whitelisted we return empty lists
if pn in d.getVar("CVE_CHECK_PN_WHITELIST").split():
bb.note("Recipe has been whitelisted, skipping check")
- return ([], [], [])
+ return ([], [], [], [])
- old_cve_whitelist = d.getVar("CVE_CHECK_CVE_WHITELIST")
- if old_cve_whitelist:
- bb.warn("CVE_CHECK_CVE_WHITELIST is deprecated, please use CVE_CHECK_WHITELIST.")
cve_whitelist = d.getVar("CVE_CHECK_WHITELIST").split()
import sqlite3
@@ -239,28 +285,42 @@ def check_cves(d, patched_cves):
# For each of the known product names (e.g. curl has CPEs using curl and libcurl)...
for product in products:
+ cves_in_product = False
if ":" in product:
vendor, product = product.split(":", 1)
else:
vendor = "%"
# Find all relevant CVE IDs.
- for cverow in conn.execute("SELECT DISTINCT ID FROM PRODUCTS WHERE PRODUCT IS ? AND VENDOR LIKE ?", (product, vendor)):
+ cve_cursor = conn.execute("SELECT DISTINCT ID FROM PRODUCTS WHERE PRODUCT IS ? AND VENDOR LIKE ?", (product, vendor))
+ for cverow in cve_cursor:
cve = cverow[0]
if cve in cve_whitelist:
bb.note("%s-%s has been whitelisted for %s" % (product, pv, cve))
- # TODO: this should be in the report as 'whitelisted'
- patched_cves.add(cve)
+ cves_ignored.append(cve)
continue
elif cve in patched_cves:
bb.note("%s has been patched" % (cve))
continue
+ # Write status once only for each product
+ if not cves_in_product:
+ cves_status.append([product, True])
+ cves_in_product = True
+ cves_in_recipe = True
vulnerable = False
- for row in conn.execute("SELECT * FROM PRODUCTS WHERE ID IS ? AND PRODUCT IS ? AND VENDOR LIKE ?", (cve, product, vendor)):
+ ignored = False
+
+ product_cursor = conn.execute("SELECT * FROM PRODUCTS WHERE ID IS ? AND PRODUCT IS ? AND VENDOR LIKE ?", (cve, product, vendor))
+ for row in product_cursor:
(_, _, _, version_start, operator_start, version_end, operator_end) = row
#bb.debug(2, "Evaluating row " + str(row))
+ if cve in cve_whitelist:
+ ignored = True
+
+ version_start = convert_cve_version(version_start)
+ version_end = convert_cve_version(version_end)
if (operator_start == '=' and pv == version_start) or version_start == '-':
vulnerable = True
@@ -293,18 +353,27 @@ def check_cves(d, patched_cves):
vulnerable = vulnerable_start or vulnerable_end
if vulnerable:
- bb.note("%s-%s is vulnerable to %s" % (pn, real_pv, cve))
- cves_unpatched.append(cve)
+ if ignored:
+ bb.note("%s is ignored in %s-%s" % (cve, pn, real_pv))
+ cves_ignored.append(cve)
+ else:
+ bb.note("%s-%s is vulnerable to %s" % (pn, real_pv, cve))
+ cves_unpatched.append(cve)
break
+ product_cursor.close()
if not vulnerable:
bb.note("%s-%s is not vulnerable to %s" % (pn, real_pv, cve))
- # TODO: not patched but not vulnerable
patched_cves.add(cve)
+ cve_cursor.close()
+
+ if not cves_in_product:
+ bb.note("No CVE records found for product %s, pn %s" % (product, pn))
+ cves_status.append([product, False])
conn.close()
- return (list(cve_whitelist), list(patched_cves), cves_unpatched)
+ return (list(cves_ignored), list(patched_cves), cves_unpatched, cves_status)
def get_cve_info(d, cves):
"""
@@ -314,21 +383,23 @@ def get_cve_info(d, cves):
import sqlite3
cve_data = {}
- conn = sqlite3.connect(d.getVar("CVE_CHECK_DB_FILE"))
+ db_file = d.expand("file:${CVE_CHECK_DB_FILE}?mode=ro")
+ conn = sqlite3.connect(db_file, uri=True)
for cve in cves:
- for row in conn.execute("SELECT * FROM NVD WHERE ID IS ?", (cve,)):
+ cursor = conn.execute("SELECT * FROM NVD WHERE ID IS ?", (cve,))
+ for row in cursor:
cve_data[row[0]] = {}
cve_data[row[0]]["summary"] = row[1]
cve_data[row[0]]["scorev2"] = row[2]
cve_data[row[0]]["scorev3"] = row[3]
cve_data[row[0]]["modified"] = row[4]
cve_data[row[0]]["vector"] = row[5]
-
+ cursor.close()
conn.close()
return cve_data
-def cve_write_data(d, patched, unpatched, whitelisted, cve_data):
+def cve_write_data_text(d, patched, unpatched, whitelisted, cve_data):
"""
Write CVE information in WORKDIR; and to CVE_CHECK_DIR, and
CVE manifest if enabled.
@@ -338,20 +409,38 @@ def cve_write_data(d, patched, unpatched, whitelisted, cve_data):
fdir_name = d.getVar("FILE_DIRNAME")
layer = fdir_name.split("/")[-3]
- nvd_link = "https://web.nvd.nist.gov/view/vuln/detail?vulnId="
+ include_layers = d.getVar("CVE_CHECK_LAYER_INCLUDELIST").split()
+ exclude_layers = d.getVar("CVE_CHECK_LAYER_EXCLUDELIST").split()
+
+ report_all = d.getVar("CVE_CHECK_REPORT_PATCHED") == "1"
+
+ if exclude_layers and layer in exclude_layers:
+ return
+
+ if include_layers and layer not in include_layers:
+ return
+
+ # Early exit, the text format does not report packages without CVEs
+ if not patched+unpatched+whitelisted:
+ return
+
+ nvd_link = "https://nvd.nist.gov/vuln/detail/"
write_string = ""
unpatched_cves = []
bb.utils.mkdirhier(os.path.dirname(cve_file))
for cve in sorted(cve_data):
is_patched = cve in patched
- if is_patched and (d.getVar("CVE_CHECK_REPORT_PATCHED") != "1"):
+ is_ignored = cve in whitelisted
+
+ if (is_patched or is_ignored) and not report_all:
continue
+
write_string += "LAYER: %s\n" % layer
write_string += "PACKAGE NAME: %s\n" % d.getVar("PN")
write_string += "PACKAGE VERSION: %s%s\n" % (d.getVar("EXTENDPE"), d.getVar("PV"))
write_string += "CVE: %s\n" % cve
- if cve in whitelisted:
+ if is_ignored:
write_string += "CVE STATUS: Whitelisted\n"
elif is_patched:
write_string += "CVE STATUS: Patched\n"
@@ -364,23 +453,138 @@ def cve_write_data(d, patched, unpatched, whitelisted, cve_data):
write_string += "VECTOR: %s\n" % cve_data[cve]["vector"]
write_string += "MORE INFORMATION: %s%s\n\n" % (nvd_link, cve)
- if unpatched_cves:
+ if unpatched_cves and d.getVar("CVE_CHECK_SHOW_WARNINGS") == "1":
bb.warn("Found unpatched CVE (%s), for more information check %s" % (" ".join(unpatched_cves),cve_file))
- if write_string:
- with open(cve_file, "w") as f:
- bb.note("Writing file %s with CVE information" % cve_file)
+ with open(cve_file, "w") as f:
+ bb.note("Writing file %s with CVE information" % cve_file)
+ f.write(write_string)
+
+ if d.getVar("CVE_CHECK_COPY_FILES") == "1":
+ deploy_file = d.getVar("CVE_CHECK_RECIPE_FILE")
+ bb.utils.mkdirhier(os.path.dirname(deploy_file))
+ with open(deploy_file, "w") as f:
+ f.write(write_string)
+
+ if d.getVar("CVE_CHECK_CREATE_MANIFEST") == "1":
+ cvelogpath = d.getVar("CVE_CHECK_SUMMARY_DIR")
+ bb.utils.mkdirhier(cvelogpath)
+
+ with open(d.getVar("CVE_CHECK_TMP_FILE"), "a") as f:
+ f.write("%s" % write_string)
+
+def cve_check_write_json_output(d, output, direct_file, deploy_file, manifest_file):
+ """
+ Write CVE information in the JSON format: to WORKDIR; and to
+ CVE_CHECK_DIR, if CVE manifest if enabled, write fragment
+ files that will be assembled at the end in cve_check_write_rootfs_manifest.
+ """
+
+ import json
+
+ write_string = json.dumps(output, indent=2)
+ with open(direct_file, "w") as f:
+ bb.note("Writing file %s with CVE information" % direct_file)
+ f.write(write_string)
+
+ if d.getVar("CVE_CHECK_COPY_FILES") == "1":
+ bb.utils.mkdirhier(os.path.dirname(deploy_file))
+ with open(deploy_file, "w") as f:
+ f.write(write_string)
+
+ if d.getVar("CVE_CHECK_CREATE_MANIFEST") == "1":
+ cvelogpath = d.getVar("CVE_CHECK_SUMMARY_DIR")
+ index_path = d.getVar("CVE_CHECK_SUMMARY_INDEX_PATH")
+ bb.utils.mkdirhier(cvelogpath)
+ fragment_file = os.path.basename(deploy_file)
+ fragment_path = os.path.join(cvelogpath, fragment_file)
+ with open(fragment_path, "w") as f:
f.write(write_string)
+ with open(index_path, "a+") as f:
+ f.write("%s\n" % fragment_path)
+
+def cve_write_data_json(d, patched, unpatched, ignored, cve_data, cve_status):
+ """
+ Prepare CVE data for the JSON format, then write it.
+ """
+
+ output = {"version":"1", "package": []}
+ nvd_link = "https://nvd.nist.gov/vuln/detail/"
+
+ fdir_name = d.getVar("FILE_DIRNAME")
+ layer = fdir_name.split("/")[-3]
+
+ include_layers = d.getVar("CVE_CHECK_LAYER_INCLUDELIST").split()
+ exclude_layers = d.getVar("CVE_CHECK_LAYER_EXCLUDELIST").split()
+
+ report_all = d.getVar("CVE_CHECK_REPORT_PATCHED") == "1"
+
+ if exclude_layers and layer in exclude_layers:
+ return
+
+ if include_layers and layer not in include_layers:
+ return
+
+ unpatched_cves = []
+
+ product_data = []
+ for s in cve_status:
+ p = {"product": s[0], "cvesInRecord": "Yes"}
+ if s[1] == False:
+ p["cvesInRecord"] = "No"
+ product_data.append(p)
+
+ package_version = "%s%s" % (d.getVar("EXTENDPE"), d.getVar("PV"))
+ package_data = {
+ "name" : d.getVar("PN"),
+ "layer" : layer,
+ "version" : package_version,
+ "products": product_data
+ }
+ cve_list = []
+
+ for cve in sorted(cve_data):
+ is_patched = cve in patched
+ is_ignored = cve in ignored
+ status = "Unpatched"
+ if (is_patched or is_ignored) and not report_all:
+ continue
+ if is_ignored:
+ status = "Ignored"
+ elif is_patched:
+ status = "Patched"
+ else:
+ # default value of status is Unpatched
+ unpatched_cves.append(cve)
+
+ issue_link = "%s%s" % (nvd_link, cve)
- if d.getVar("CVE_CHECK_COPY_FILES") == "1":
- deploy_file = d.getVar("CVE_CHECK_RECIPE_FILE")
- bb.utils.mkdirhier(os.path.dirname(deploy_file))
- with open(deploy_file, "w") as f:
- f.write(write_string)
+ cve_item = {
+ "id" : cve,
+ "summary" : cve_data[cve]["summary"],
+ "scorev2" : cve_data[cve]["scorev2"],
+ "scorev3" : cve_data[cve]["scorev3"],
+ "vector" : cve_data[cve]["vector"],
+ "status" : status,
+ "link": issue_link
+ }
+ cve_list.append(cve_item)
- if d.getVar("CVE_CHECK_CREATE_MANIFEST") == "1":
- cvelogpath = d.getVar("CVE_CHECK_SUMMARY_DIR")
- bb.utils.mkdirhier(cvelogpath)
+ package_data["issue"] = cve_list
+ output["package"].append(package_data)
+
+ direct_file = d.getVar("CVE_CHECK_LOG_JSON")
+ deploy_file = d.getVar("CVE_CHECK_RECIPE_FILE_JSON")
+ manifest_file = d.getVar("CVE_CHECK_SUMMARY_FILE_NAME_JSON")
+
+ cve_check_write_json_output(d, output, direct_file, deploy_file, manifest_file)
+
+def cve_write_data(d, patched, unpatched, ignored, cve_data, status):
+ """
+ Write CVE data in each enabled format.
+ """
- with open(d.getVar("CVE_CHECK_TMP_FILE"), "a") as f:
- f.write("%s" % write_string)
+ if d.getVar("CVE_CHECK_FORMAT_TEXT") == "1":
+ cve_write_data_text(d, patched, unpatched, ignored, cve_data)
+ if d.getVar("CVE_CHECK_FORMAT_JSON") == "1":
+ cve_write_data_json(d, patched, unpatched, ignored, cve_data, status)
diff --git a/meta/classes/devtool-source.bbclass b/meta/classes/devtool-source.bbclass
index 280d6009f3..41900e651f 100644
--- a/meta/classes/devtool-source.bbclass
+++ b/meta/classes/devtool-source.bbclass
@@ -199,6 +199,7 @@ python devtool_post_patch() {
# Run do_patch function with the override applied
localdata = bb.data.createCopy(d)
localdata.setVar('OVERRIDES', ':'.join(no_overrides))
+ localdata.setVar('FILESOVERRIDES', ':'.join(no_overrides))
bb.build.exec_func('do_patch', localdata)
rm_patches()
# Now we need to reconcile the dev branch with the no-overrides one
@@ -216,7 +217,8 @@ python devtool_post_patch() {
# Reset back to the initial commit on a new branch
bb.process.run('git checkout %s -b devtool-override-%s' % (initial_rev, override), cwd=srcsubdir)
# Run do_patch function with the override applied
- localdata.appendVar('OVERRIDES', ':%s' % override)
+ localdata.setVar('OVERRIDES', ':'.join(no_overrides + [override]))
+ localdata.setVar('FILESOVERRIDES', ':'.join(no_overrides + [override]))
bb.build.exec_func('do_patch', localdata)
rm_patches()
# Now we need to reconcile the new branch with the no-overrides one
diff --git a/meta/classes/devupstream.bbclass b/meta/classes/devupstream.bbclass
index 7780c5482c..97e137cb40 100644
--- a/meta/classes/devupstream.bbclass
+++ b/meta/classes/devupstream.bbclass
@@ -4,7 +4,7 @@
#
# Usage:
# BBCLASSEXTEND = "devupstream:target"
-# SRC_URI_class-devupstream = "git://git.example.com/example"
+# SRC_URI_class-devupstream = "git://git.example.com/example;branch=master"
# SRCREV_class-devupstream = "abcdef"
#
# If the first entry in SRC_URI is a git: URL then S is rewritten to
diff --git a/meta/classes/externalsrc.bbclass b/meta/classes/externalsrc.bbclass
index c7fcdca6ef..9c9451e528 100644
--- a/meta/classes/externalsrc.bbclass
+++ b/meta/classes/externalsrc.bbclass
@@ -60,7 +60,7 @@ python () {
if externalsrcbuild:
d.setVar('B', externalsrcbuild)
else:
- d.setVar('B', '${WORKDIR}/${BPN}-${PV}/')
+ d.setVar('B', '${WORKDIR}/${BPN}-${PV}')
local_srcuri = []
fetch = bb.fetch2.Fetch((d.getVar('SRC_URI') or '').split(), d)
@@ -108,6 +108,15 @@ python () {
if local_srcuri and task in fetch_tasks:
continue
bb.build.deltask(task, d)
+ if bb.data.inherits_class('reproducible_build', d) and task == 'do_unpack':
+ # The reproducible_build's create_source_date_epoch_stamp function must
+ # be run after the source is available and before the
+ # do_deploy_source_date_epoch task. In the normal case, it's attached
+ # to do_unpack as a postfuncs, but since we removed do_unpack (above)
+ # we need to move the function elsewhere. The easiest thing to do is
+ # move it into the prefuncs of the do_deploy_source_date_epoch task.
+ # This is safe, as externalsrc runs with the source already unpacked.
+ d.prependVarFlag('do_deploy_source_date_epoch', 'prefuncs', 'create_source_date_epoch_stamp ')
d.prependVarFlag('do_compile', 'prefuncs', "externalsrc_compile_prefunc ")
d.prependVarFlag('do_configure', 'prefuncs', "externalsrc_configure_prefunc ")
@@ -198,8 +207,8 @@ def srctree_hash_files(d, srcdir=None):
try:
git_dir = os.path.join(s_dir,
subprocess.check_output(['git', '-C', s_dir, 'rev-parse', '--git-dir'], stderr=subprocess.DEVNULL).decode("utf-8").rstrip())
- top_git_dir = os.path.join(s_dir, subprocess.check_output(['git', '-C', d.getVar("TOPDIR"), 'rev-parse', '--git-dir'],
- stderr=subprocess.DEVNULL).decode("utf-8").rstrip())
+ top_git_dir = os.path.join(d.getVar("TOPDIR"),
+ subprocess.check_output(['git', '-C', d.getVar("TOPDIR"), 'rev-parse', '--git-dir'], stderr=subprocess.DEVNULL).decode("utf-8").rstrip())
if git_dir == top_git_dir:
git_dir = None
except subprocess.CalledProcessError:
@@ -216,15 +225,16 @@ def srctree_hash_files(d, srcdir=None):
env['GIT_INDEX_FILE'] = tmp_index.name
subprocess.check_output(['git', 'add', '-A', '.'], cwd=s_dir, env=env)
git_sha1 = subprocess.check_output(['git', 'write-tree'], cwd=s_dir, env=env).decode("utf-8")
- submodule_helper = subprocess.check_output(['git', 'submodule--helper', 'list'], cwd=s_dir, env=env).decode("utf-8")
- for line in submodule_helper.splitlines():
- module_dir = os.path.join(s_dir, line.rsplit(maxsplit=1)[1])
- if os.path.isdir(module_dir):
- proc = subprocess.Popen(['git', 'add', '-A', '.'], cwd=module_dir, env=env, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
- proc.communicate()
- proc = subprocess.Popen(['git', 'write-tree'], cwd=module_dir, env=env, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
- stdout, _ = proc.communicate()
- git_sha1 += stdout.decode("utf-8")
+ if os.path.exists(os.path.join(s_dir, ".gitmodules")) and os.path.getsize(os.path.join(s_dir, ".gitmodules")) > 0:
+ submodule_helper = subprocess.check_output(["git", "config", "--file", ".gitmodules", "--get-regexp", "path"], cwd=s_dir, env=env).decode("utf-8")
+ for line in submodule_helper.splitlines():
+ module_dir = os.path.join(s_dir, line.rsplit(maxsplit=1)[1])
+ if os.path.isdir(module_dir):
+ proc = subprocess.Popen(['git', 'add', '-A', '.'], cwd=module_dir, env=env, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
+ proc.communicate()
+ proc = subprocess.Popen(['git', 'write-tree'], cwd=module_dir, env=env, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
+ stdout, _ = proc.communicate()
+ git_sha1 += stdout.decode("utf-8")
sha1 = hashlib.sha1(git_sha1.encode("utf-8")).hexdigest()
with open(oe_hash_file, 'w') as fobj:
fobj.write(sha1)
diff --git a/meta/classes/fs-uuid.bbclass b/meta/classes/fs-uuid.bbclass
index 9b53dfba7a..731ea575bd 100644
--- a/meta/classes/fs-uuid.bbclass
+++ b/meta/classes/fs-uuid.bbclass
@@ -4,7 +4,7 @@
def get_rootfs_uuid(d):
import subprocess
rootfs = d.getVar('ROOTFS')
- output = subprocess.check_output(['tune2fs', '-l', rootfs])
+ output = subprocess.check_output(['tune2fs', '-l', rootfs], text=True)
for line in output.split('\n'):
if line.startswith('Filesystem UUID:'):
uuid = line.split()[-1]
diff --git a/meta/classes/go.bbclass b/meta/classes/go.bbclass
index 16e46398b1..21b1a0271e 100644
--- a/meta/classes/go.bbclass
+++ b/meta/classes/go.bbclass
@@ -118,7 +118,7 @@ go_do_install() {
tar -C ${B} -cf - --exclude-vcs --exclude '*.test' --exclude 'testdata' pkg | \
tar -C ${D}${libdir}/go --no-same-owner -xf -
- if [ -n "`ls ${B}/${GO_BUILD_BINDIR}/`" ]; then
+ if ls ${B}/${GO_BUILD_BINDIR}/* >/dev/null 2>/dev/null ; then
install -d ${D}${bindir}
install -m 0755 ${B}/${GO_BUILD_BINDIR}/* ${D}${bindir}/
fi
diff --git a/meta/classes/image.bbclass b/meta/classes/image.bbclass
index 1900eff412..fbf7206d04 100644
--- a/meta/classes/image.bbclass
+++ b/meta/classes/image.bbclass
@@ -124,7 +124,7 @@ python () {
def rootfs_variables(d):
from oe.rootfs import variable_depends
variables = ['IMAGE_DEVICE_TABLE','IMAGE_DEVICE_TABLES','BUILD_IMAGES_FROM_FEEDS','IMAGE_TYPES_MASKED','IMAGE_ROOTFS_ALIGNMENT','IMAGE_OVERHEAD_FACTOR','IMAGE_ROOTFS_SIZE','IMAGE_ROOTFS_EXTRA_SPACE',
- 'IMAGE_ROOTFS_MAXSIZE','IMAGE_NAME','IMAGE_LINK_NAME','IMAGE_MANIFEST','DEPLOY_DIR_IMAGE','IMAGE_FSTYPES','IMAGE_INSTALL_COMPLEMENTARY','IMAGE_LINGUAS', 'IMAGE_LINGUAS_COMPLEMENTARY',
+ 'IMAGE_ROOTFS_MAXSIZE','IMAGE_NAME','IMAGE_LINK_NAME','IMAGE_MANIFEST','DEPLOY_DIR_IMAGE','IMAGE_FSTYPES','IMAGE_INSTALL_COMPLEMENTARY','IMAGE_LINGUAS', 'IMAGE_LINGUAS_COMPLEMENTARY', 'IMAGE_LOCALES_ARCHIVE',
'MULTILIBRE_ALLOW_REP','MULTILIB_TEMP_ROOTFS','MULTILIB_VARIANTS','MULTILIBS','ALL_MULTILIB_PACKAGE_ARCHS','MULTILIB_GLOBAL_VARIANTS','BAD_RECOMMENDATIONS','NO_RECOMMENDATIONS',
'PACKAGE_ARCHS','PACKAGE_CLASSES','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','OVERRIDES','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI','INTERCEPT_DIR','USE_DEVFS',
'CONVERSIONTYPES', 'IMAGE_GEN_DEBUGFS', 'ROOTFS_RO_UNNEEDED', 'IMGDEPLOYDIR', 'PACKAGE_EXCLUDE_COMPLEMENTARY', 'REPRODUCIBLE_TIMESTAMP_ROOTFS', 'IMAGE_INSTALL_DEBUGFS']
@@ -176,6 +176,9 @@ IMAGE_LINGUAS ?= "de-de fr-fr en-gb"
LINGUAS_INSTALL ?= "${@" ".join(map(lambda s: "locale-base-%s" % s, d.getVar('IMAGE_LINGUAS').split()))}"
+# per default create a locale archive
+IMAGE_LOCALES_ARCHIVE ?= '1'
+
# Prefer image, but use the fallback files for lookups if the image ones
# aren't yet available.
PSEUDO_PASSWD = "${IMAGE_ROOTFS}:${STAGING_DIR_NATIVE}"
@@ -308,7 +311,7 @@ fakeroot python do_image_qa () {
except oe.utils.ImageQAFailed as e:
qamsg = qamsg + '\tImage QA function %s failed: %s\n' % (e.name, e.description)
except Exception as e:
- qamsg = qamsg + '\tImage QA function %s failed\n' % cmd
+ qamsg = qamsg + '\tImage QA function %s failed: %s\n' % (cmd, e)
if qamsg:
imgname = d.getVar('IMAGE_NAME')
@@ -434,7 +437,7 @@ python () {
localdata.delVar('DATETIME')
localdata.delVar('DATE')
localdata.delVar('TMPDIR')
- vardepsexclude = (d.getVarFlag('IMAGE_CMD_' + realt, 'vardepsexclude', True) or '').split()
+ vardepsexclude = (d.getVarFlag('IMAGE_CMD_' + realt, 'vardepsexclude') or '').split()
for dep in vardepsexclude:
localdata.delVar(dep)
diff --git a/meta/classes/image_types.bbclass b/meta/classes/image_types.bbclass
index ff42ac9423..6dc0e094d0 100644
--- a/meta/classes/image_types.bbclass
+++ b/meta/classes/image_types.bbclass
@@ -240,7 +240,7 @@ EXTRA_IMAGECMD_jffs2 ?= "--pad ${JFFS2_ENDIANNESS} --eraseblock=${JFFS2_ERASEBLO
EXTRA_IMAGECMD_ext2 ?= "-i 4096"
EXTRA_IMAGECMD_ext3 ?= "-i 4096"
EXTRA_IMAGECMD_ext4 ?= "-i 4096"
-EXTRA_IMAGECMD_btrfs ?= "-n 4096"
+EXTRA_IMAGECMD_btrfs ?= "-n 4096 --shrink"
EXTRA_IMAGECMD_f2fs ?= ""
do_image_cpio[depends] += "cpio-native:do_populate_sysroot"
diff --git a/meta/classes/insane.bbclass b/meta/classes/insane.bbclass
index eb19425652..d6da53252f 100644
--- a/meta/classes/insane.bbclass
+++ b/meta/classes/insane.bbclass
@@ -452,12 +452,14 @@ def package_qa_check_buildpaths(path, name, d, elf, messages):
"""
Check for build paths inside target files and error if not found in the whitelist
"""
+ import stat
# Ignore .debug files, not interesting
if path.find(".debug") != -1:
return
- # Ignore symlinks
- if os.path.islink(path):
+ # Ignore symlinks/devs/fifos
+ mode = os.lstat(path).st_mode
+ if stat.S_ISLNK(mode) or stat.S_ISBLK(mode) or stat.S_ISFIFO(mode) or stat.S_ISCHR(mode) or stat.S_ISSOCK(mode):
return
tmpdir = bytes(d.getVar('TMPDIR'), encoding="utf-8")
@@ -945,7 +947,7 @@ def package_qa_check_host_user(path, name, d, elf, messages):
dest = d.getVar('PKGDEST')
pn = d.getVar('PN')
- home = os.path.join(dest, 'home')
+ home = os.path.join(dest, name, 'home')
if path == home or path.startswith(home + os.sep):
return
diff --git a/meta/classes/kernel-arch.bbclass b/meta/classes/kernel-arch.bbclass
index 07ec242e63..4cd08b96fb 100644
--- a/meta/classes/kernel-arch.bbclass
+++ b/meta/classes/kernel-arch.bbclass
@@ -61,8 +61,8 @@ HOST_LD_KERNEL_ARCH ?= "${TARGET_LD_KERNEL_ARCH}"
TARGET_AR_KERNEL_ARCH ?= ""
HOST_AR_KERNEL_ARCH ?= "${TARGET_AR_KERNEL_ARCH}"
-KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_KERNEL_ARCH} -fuse-ld=bfd ${DEBUG_PREFIX_MAP} -fdebug-prefix-map=${STAGING_KERNEL_DIR}=${KERNEL_SRC_PATH}"
+KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_KERNEL_ARCH} -fuse-ld=bfd ${DEBUG_PREFIX_MAP} -fdebug-prefix-map=${STAGING_KERNEL_DIR}=${KERNEL_SRC_PATH} -fdebug-prefix-map=${STAGING_KERNEL_BUILDDIR}=${KERNEL_SRC_PATH}"
KERNEL_LD = "${CCACHE}${HOST_PREFIX}ld.bfd ${HOST_LD_KERNEL_ARCH}"
KERNEL_AR = "${CCACHE}${HOST_PREFIX}ar ${HOST_AR_KERNEL_ARCH}"
-TOOLCHAIN = "gcc"
+TOOLCHAIN ?= "gcc"
diff --git a/meta/classes/kernel-devicetree.bbclass b/meta/classes/kernel-devicetree.bbclass
index 81dda8003f..27a4905ac6 100644
--- a/meta/classes/kernel-devicetree.bbclass
+++ b/meta/classes/kernel-devicetree.bbclass
@@ -1,14 +1,20 @@
# Support for device tree generation
-PACKAGES_append = " \
- ${KERNEL_PACKAGE_NAME}-devicetree \
- ${@[d.getVar('KERNEL_PACKAGE_NAME') + '-image-zimage-bundle', ''][d.getVar('KERNEL_DEVICETREE_BUNDLE') != '1']} \
-"
+python () {
+ if not bb.data.inherits_class('nopackages', d):
+ d.appendVar("PACKAGES", " ${KERNEL_PACKAGE_NAME}-devicetree")
+ if d.getVar('KERNEL_DEVICETREE_BUNDLE') == '1':
+ d.appendVar("PACKAGES", " ${KERNEL_PACKAGE_NAME}-image-zimage-bundle")
+}
+
FILES_${KERNEL_PACKAGE_NAME}-devicetree = "/${KERNEL_IMAGEDEST}/*.dtb /${KERNEL_IMAGEDEST}/*.dtbo"
FILES_${KERNEL_PACKAGE_NAME}-image-zimage-bundle = "/${KERNEL_IMAGEDEST}/zImage-*.dtb.bin"
# Generate kernel+devicetree bundle
KERNEL_DEVICETREE_BUNDLE ?= "0"
+# dtc flags passed via DTC_FLAGS env variable
+KERNEL_DTC_FLAGS ?= ""
+
normalize_dtb () {
dtb="$1"
if echo $dtb | grep -q '/dts/'; then
@@ -50,6 +56,10 @@ do_configure_append() {
}
do_compile_append() {
+ if [ -n "${KERNEL_DTC_FLAGS}" ]; then
+ export DTC_FLAGS="${KERNEL_DTC_FLAGS}"
+ fi
+
for dtbf in ${KERNEL_DEVICETREE}; do
dtb=`normalize_dtb "$dtbf"`
oe_runmake $dtb CC="${KERNEL_CC} $cc_extra " LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS}
diff --git a/meta/classes/kernel-fitimage.bbclass b/meta/classes/kernel-fitimage.bbclass
index 5f5e9dd444..7c7bcd3fc0 100644
--- a/meta/classes/kernel-fitimage.bbclass
+++ b/meta/classes/kernel-fitimage.bbclass
@@ -1,5 +1,7 @@
inherit kernel-uboot kernel-artifact-names uboot-sign
+KERNEL_IMAGETYPE_REPLACEMENT = ""
+
python __anonymous () {
kerneltypes = d.getVar('KERNEL_IMAGETYPES') or ""
if 'fitImage' in kerneltypes.split():
@@ -21,6 +23,8 @@ python __anonymous () {
else:
replacementtype = "zImage"
+ d.setVar("KERNEL_IMAGETYPE_REPLACEMENT", replacementtype)
+
# Override KERNEL_IMAGETYPE_FOR_MAKE variable, which is internal
# to kernel.bbclass . We have to override it, since we pack zImage
# (at least for now) into the fitImage .
@@ -45,6 +49,8 @@ python __anonymous () {
if d.getVar('UBOOT_SIGN_ENABLE') == "1" and d.getVar('UBOOT_DTB_BINARY'):
uboot_pn = d.getVar('PREFERRED_PROVIDER_u-boot') or 'u-boot'
d.appendVarFlag('do_assemble_fitimage', 'depends', ' %s:do_populate_sysroot' % uboot_pn)
+ if d.getVar('INITRAMFS_IMAGE_BUNDLE') == "1":
+ d.appendVarFlag('do_assemble_fitimage_initramfs', 'depends', ' %s:do_populate_sysroot' % uboot_pn)
}
# Options for the device tree compiler passed to mkimage '-D' feature:
@@ -56,6 +62,12 @@ FIT_HASH_ALG ?= "sha256"
# fitImage Signature Algo
FIT_SIGN_ALG ?= "rsa2048"
+# fitImage Padding Algo
+FIT_PAD_ALG ?= "pkcs-1.5"
+
+# Arguments passed to mkimage for signing
+UBOOT_MKIMAGE_SIGN_ARGS ?= ""
+
#
# Emit the fitImage ITS header
#
@@ -175,6 +187,43 @@ EOF
}
#
+# Emit the fitImage ITS u-boot script section
+#
+# $1 ... .its filename
+# $2 ... Image counter
+# $3 ... Path to boot script image
+fitimage_emit_section_boot_script() {
+
+ bootscr_csum="${FIT_HASH_ALG}"
+ bootscr_sign_algo="${FIT_SIGN_ALG}"
+ bootscr_sign_keyname="${UBOOT_SIGN_IMG_KEYNAME}"
+
+ cat << EOF >> $1
+ bootscr-$2 {
+ description = "U-boot script";
+ data = /incbin/("$3");
+ type = "script";
+ arch = "${UBOOT_ARCH}";
+ compression = "none";
+ hash-1 {
+ algo = "$bootscr_csum";
+ };
+ };
+EOF
+
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${FIT_SIGN_INDIVIDUAL}" = "1" -a -n "$bootscr_sign_keyname" ] ; then
+ sed -i '$ d' $1
+ cat << EOF >> $1
+ signature-1 {
+ algo = "$bootscr_csum,$bootscr_sign_algo";
+ key-name-hint = "$bootscr_sign_keyname";
+ };
+ };
+EOF
+ fi
+}
+
+#
# Emit the fitImage ITS setup section
#
# $1 ... .its filename
@@ -244,12 +293,14 @@ EOF
# $2 ... Linux kernel ID
# $3 ... DTB image name
# $4 ... ramdisk ID
-# $5 ... config ID
-# $6 ... default flag
+# $5 ... u-boot script ID
+# $6 ... config ID
+# $7 ... default flag
fitimage_emit_section_config() {
conf_csum="${FIT_HASH_ALG}"
conf_sign_algo="${FIT_SIGN_ALG}"
+ conf_padding_algo="${FIT_PAD_ALG}"
if [ "${UBOOT_SIGN_ENABLE}" = "1" ] ; then
conf_sign_keyname="${UBOOT_SIGN_KEYNAME}"
fi
@@ -260,6 +311,7 @@ fitimage_emit_section_config() {
kernel_line=""
fdt_line=""
ramdisk_line=""
+ bootscr_line=""
setup_line=""
default_line=""
@@ -282,21 +334,28 @@ fitimage_emit_section_config() {
fi
if [ -n "${5}" ]; then
+ conf_desc="${conf_desc}${sep}u-boot script"
+ sep=", "
+ bootscr_line="bootscr = \"bootscr-${5}\";"
+ fi
+
+ if [ -n "${6}" ]; then
conf_desc="${conf_desc}${sep}setup"
- setup_line="setup = \"setup-${5}\";"
+ setup_line="setup = \"setup-${6}\";"
fi
- if [ "${6}" = "1" ]; then
+ if [ "${7}" = "1" ]; then
default_line="default = \"conf-${3}\";"
fi
cat << EOF >> ${1}
${default_line}
conf-${3} {
- description = "${6} ${conf_desc}";
+ description = "${7} ${conf_desc}";
${kernel_line}
${fdt_line}
${ramdisk_line}
+ ${bootscr_line}
${setup_line}
hash-1 {
algo = "${conf_csum}";
@@ -324,6 +383,11 @@ EOF
fi
if [ -n "${5}" ]; then
+ sign_line="${sign_line}${sep}\"bootscr\""
+ sep=", "
+ fi
+
+ if [ -n "${6}" ]; then
sign_line="${sign_line}${sep}\"setup\""
fi
@@ -333,6 +397,7 @@ EOF
signature-1 {
algo = "${conf_csum},${conf_sign_algo}";
key-name-hint = "${conf_sign_keyname}";
+ padding = "${conf_padding_algo}";
${sign_line}
};
EOF
@@ -355,6 +420,7 @@ fitimage_assemble() {
DTBS=""
ramdiskcount=${3}
setupcount=""
+ bootscr_id=""
rm -f ${1} arch/${ARCH}/boot/${2}
fitimage_emit_fit_header ${1}
@@ -365,7 +431,7 @@ fitimage_assemble() {
fitimage_emit_section_maint ${1} imagestart
uboot_prep_kimage
- fitimage_emit_section_kernel ${1} "${kernelcount}" linux.bin "${linux_comp}"
+ fitimage_emit_section_kernel $1 $kernelcount linux.bin "$linux_comp"
#
# Step 2: Prepare a DTB image section
@@ -399,7 +465,21 @@ fitimage_assemble() {
fi
#
- # Step 3: Prepare a setup section. (For x86)
+ # Step 3: Prepare a u-boot script section
+ #
+
+ if [ -n "${UBOOT_ENV}" ] && [ -d "${STAGING_DIR_HOST}/boot" ]; then
+ if [ -e "${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY}" ]; then
+ cp ${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY} ${B}
+ bootscr_id="${UBOOT_ENV_BINARY}"
+ fitimage_emit_section_boot_script ${1} "${bootscr_id}" ${UBOOT_ENV_BINARY}
+ else
+ bbwarn "${STAGING_DIR_HOST}/boot/${UBOOT_ENV_BINARY} not found."
+ fi
+ fi
+
+ #
+ # Step 4: Prepare a setup section. (For x86)
#
if [ -e arch/${ARCH}/boot/setup.bin ]; then
setupcount=1
@@ -407,9 +487,9 @@ fitimage_assemble() {
fi
#
- # Step 4: Prepare a ramdisk section.
+ # Step 5: Prepare a ramdisk section.
#
- if [ "x${ramdiskcount}" = "x1" ] ; then
+ if [ "x${ramdiskcount}" = "x1" ] && [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
# Find and use the first initramfs image archive type we find
for img in cpio.lz4 cpio.lzo cpio.lzma cpio.xz cpio.gz ext2.gz cpio; do
initramfs_path="${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.${img}"
@@ -430,7 +510,7 @@ fitimage_assemble() {
fi
#
- # Step 5: Prepare a configurations section
+ # Step 6: Prepare a configurations section
#
fitimage_emit_section_maint ${1} confstart
@@ -439,9 +519,9 @@ fitimage_assemble() {
for DTB in ${DTBS}; do
dtb_ext=${DTB##*.}
if [ "${dtb_ext}" = "dtbo" ]; then
- fitimage_emit_section_config ${1} "" "${DTB}" "" "" "`expr ${i} = ${dtbcount}`"
+ fitimage_emit_section_config ${1} "" "${DTB}" "" "${bootscr_id}" "" "`expr ${i} = ${dtbcount}`"
else
- fitimage_emit_section_config ${1} "${kernelcount}" "${DTB}" "${ramdiskcount}" "${setupcount}" "`expr ${i} = ${dtbcount}`"
+ fitimage_emit_section_config ${1} "${kernelcount}" "${DTB}" "${ramdiskcount}" "${bootscr_id}" "${setupcount}" "`expr ${i} = ${dtbcount}`"
fi
i=`expr ${i} + 1`
done
@@ -452,7 +532,7 @@ fitimage_assemble() {
fitimage_emit_section_maint ${1} fitend
#
- # Step 6: Assemble the image
+ # Step 7: Assemble the image
#
uboot-mkimage \
${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \
@@ -460,7 +540,7 @@ fitimage_assemble() {
arch/${ARCH}/boot/${2}
#
- # Step 7: Sign the image and add public key to U-Boot dtb
+ # Step 8: Sign the image and add public key to U-Boot dtb
#
if [ "x${UBOOT_SIGN_ENABLE}" = "x1" ] ; then
add_key_to_u_boot=""
@@ -474,7 +554,8 @@ fitimage_assemble() {
${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \
-F -k "${UBOOT_SIGN_KEYDIR}" \
$add_key_to_u_boot \
- -r arch/${ARCH}/boot/${2}
+ -r arch/${ARCH}/boot/${2} \
+ ${UBOOT_MKIMAGE_SIGN_ARGS}
fi
}
@@ -491,7 +572,11 @@ do_assemble_fitimage_initramfs() {
if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage" && \
test -n "${INITRAMFS_IMAGE}" ; then
cd ${B}
- fitimage_assemble fit-image-${INITRAMFS_IMAGE}.its fitImage-${INITRAMFS_IMAGE} 1
+ if [ "${INITRAMFS_IMAGE_BUNDLE}" = "1" ]; then
+ fitimage_assemble fit-image-${INITRAMFS_IMAGE}.its fitImage ""
+ else
+ fitimage_assemble fit-image-${INITRAMFS_IMAGE}.its fitImage-${INITRAMFS_IMAGE} 1
+ fi
fi
}
@@ -502,22 +587,32 @@ kernel_do_deploy[vardepsexclude] = "DATETIME"
kernel_do_deploy_append() {
# Update deploy directory
if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage"; then
- echo "Copying fit-image.its source file..."
- install -m 0644 ${B}/fit-image.its "$deployDir/fitImage-its-${KERNEL_FIT_NAME}.its"
- ln -snf fitImage-its-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${KERNEL_FIT_LINK_NAME}"
+ if [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
+ echo "Copying fit-image.its source file..."
+ install -m 0644 ${B}/fit-image.its "$deployDir/fitImage-its-${KERNEL_FIT_NAME}.its"
+ if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
+ ln -snf fitImage-its-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${KERNEL_FIT_LINK_NAME}"
+ fi
- echo "Copying linux.bin file..."
- install -m 0644 ${B}/linux.bin $deployDir/fitImage-linux.bin-${KERNEL_FIT_NAME}.bin
- ln -snf fitImage-linux.bin-${KERNEL_FIT_NAME}.bin "$deployDir/fitImage-linux.bin-${KERNEL_FIT_LINK_NAME}"
+ echo "Copying linux.bin file..."
+ install -m 0644 ${B}/linux.bin $deployDir/fitImage-linux.bin-${KERNEL_FIT_NAME}.bin
+ if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
+ ln -snf fitImage-linux.bin-${KERNEL_FIT_NAME}.bin "$deployDir/fitImage-linux.bin-${KERNEL_FIT_LINK_NAME}"
+ fi
+ fi
if [ -n "${INITRAMFS_IMAGE}" ]; then
echo "Copying fit-image-${INITRAMFS_IMAGE}.its source file..."
install -m 0644 ${B}/fit-image-${INITRAMFS_IMAGE}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its"
ln -snf fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
- echo "Copying fitImage-${INITRAMFS_IMAGE} file..."
- install -m 0644 ${B}/arch/${ARCH}/boot/fitImage-${INITRAMFS_IMAGE} "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin"
- ln -snf fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
+ if [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ]; then
+ echo "Copying fitImage-${INITRAMFS_IMAGE} file..."
+ install -m 0644 ${B}/arch/${ARCH}/boot/fitImage-${INITRAMFS_IMAGE} "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin"
+ if [ -n "${KERNEL_FIT_LINK_NAME}" ] ; then
+ ln -snf fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
+ fi
+ fi
fi
if [ "${UBOOT_SIGN_ENABLE}" = "1" -a -n "${UBOOT_DTB_BINARY}" ] ; then
# UBOOT_DTB_IMAGE is a realfile, but we can't use
@@ -527,3 +622,13 @@ kernel_do_deploy_append() {
fi
fi
}
+
+# The function below performs the following in case of initramfs bundles:
+# - Removes do_assemble_fitimage. FIT generation is done through
+# do_assemble_fitimage_initramfs. do_assemble_fitimage is not needed
+# and should not be part of the tasks to be executed.
+python () {
+ d.appendVarFlag('do_compile', 'vardeps', ' INITRAMFS_IMAGE_BUNDLE')
+ if d.getVar('INITRAMFS_IMAGE_BUNDLE') == "1":
+ bb.build.deltask('do_assemble_fitimage', d)
+}
diff --git a/meta/classes/kernel-yocto.bbclass b/meta/classes/kernel-yocto.bbclass
index 66cce92362..2abbc2ff66 100644
--- a/meta/classes/kernel-yocto.bbclass
+++ b/meta/classes/kernel-yocto.bbclass
@@ -194,7 +194,7 @@ do_kernel_metadata() {
# SRC_URI. If they were supplied, we convert them into include directives
# for the update part of the process
for f in ${feat_dirs}; do
- if [ -d "${WORKDIR}/$f/meta" ]; then
+ if [ -d "${WORKDIR}/$f/kernel-meta" ]; then
includes="$includes -I${WORKDIR}/$f/kernel-meta"
elif [ -d "${WORKDIR}/../oe-local-files/$f" ]; then
includes="$includes -I${WORKDIR}/../oe-local-files/$f"
@@ -269,6 +269,8 @@ do_kernel_metadata() {
bbnote "KERNEL_FEATURES: $KERNEL_FEATURES_FINAL"
bbnote "Final scc/cfg list: $sccs_defconfig $bsp_definition $sccs $KERNEL_FEATURES_FINAL"
fi
+
+ set -e
}
do_patch() {
@@ -298,6 +300,8 @@ do_patch() {
fi
done
fi
+
+ set -e
}
do_kernel_checkout() {
@@ -320,6 +324,21 @@ do_kernel_checkout() {
fi
fi
cd ${S}
+
+ # convert any remote branches to local tracking ones
+ for i in `git branch -a --no-color | grep remotes | grep -v HEAD`; do
+ b=`echo $i | cut -d' ' -f2 | sed 's%remotes/origin/%%'`;
+ git show-ref --quiet --verify -- "refs/heads/$b"
+ if [ $? -ne 0 ]; then
+ git branch $b $i > /dev/null
+ fi
+ done
+
+ # Create a working tree copy of the kernel by checking out a branch
+ machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
+
+ # checkout and clobber any unimportant files
+ git checkout -f ${machine_branch}
else
# case: we have no git repository at all.
# To support low bandwidth options for building the kernel, we'll just
@@ -342,20 +361,7 @@ do_kernel_checkout() {
git clean -d -f
fi
- # convert any remote branches to local tracking ones
- for i in `git branch -a --no-color | grep remotes | grep -v HEAD`; do
- b=`echo $i | cut -d' ' -f2 | sed 's%remotes/origin/%%'`;
- git show-ref --quiet --verify -- "refs/heads/$b"
- if [ $? -ne 0 ]; then
- git branch $b $i > /dev/null
- fi
- done
-
- # Create a working tree copy of the kernel by checking out a branch
- machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
-
- # checkout and clobber any unimportant files
- git checkout -f ${machine_branch}
+ set -e
}
do_kernel_checkout[dirs] = "${S}"
@@ -523,6 +529,8 @@ do_validate_branches() {
kgit-s2q --clean
fi
fi
+
+ set -e
}
OE_TERMINAL_EXPORTS += "KBUILD_OUTPUT"
diff --git a/meta/classes/kernel.bbclass b/meta/classes/kernel.bbclass
index 518aaef724..ca7530095e 100644
--- a/meta/classes/kernel.bbclass
+++ b/meta/classes/kernel.bbclass
@@ -75,7 +75,7 @@ python __anonymous () {
# KERNEL_IMAGETYPES may contain a mixture of image types supported directly
# by the kernel build system and types which are created by post-processing
# the output of the kernel build system (e.g. compressing vmlinux ->
- # vmlinux.gz in kernel_do_compile()).
+ # vmlinux.gz in kernel_do_transform_kernel()).
# KERNEL_IMAGETYPE_FOR_MAKE should contain only image types supported
# directly by the kernel build system.
if not d.getVar('KERNEL_IMAGETYPE_FOR_MAKE'):
@@ -91,6 +91,8 @@ python __anonymous () {
imagedest = d.getVar('KERNEL_IMAGEDEST')
for type in types.split():
+ if bb.data.inherits_class('nopackages', d):
+ continue
typelower = type.lower()
d.appendVar('PACKAGES', ' %s-image-%s' % (kname, typelower))
d.setVar('FILES_' + kname + '-image-' + typelower, '/' + imagedest + '/' + type + '-${KERNEL_VERSION_NAME}' + ' /' + imagedest + '/' + type)
@@ -104,6 +106,8 @@ python __anonymous () {
# standalone for use by wic and other tools.
if image:
d.appendVarFlag('do_bundle_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete')
+ if image and bb.utils.to_boolean(d.getVar('INITRAMFS_IMAGE_BUNDLE')):
+ bb.build.addtask('do_transform_bundled_initramfs', 'do_deploy', 'do_bundle_initramfs', d)
# NOTE: setting INITRAMFS_TASK is for backward compatibility
# The preferred method is to set INITRAMFS_IMAGE, because
@@ -139,13 +143,14 @@ do_unpack[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILD
do_clean[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILDDIR}"
python do_symlink_kernsrc () {
s = d.getVar("S")
- if s[-1] == '/':
- # drop trailing slash, so that os.symlink(kernsrc, s) doesn't use s as directory name and fail
- s=s[:-1]
kernsrc = d.getVar("STAGING_KERNEL_DIR")
if s != kernsrc:
bb.utils.mkdirhier(kernsrc)
bb.utils.remove(kernsrc, recurse=True)
+ if s[-1] == '/':
+ # drop trailing slash, so that os.symlink(kernsrc, s) doesn't use s as
+ # directory name and fail
+ s = s[:-1]
if d.getVar("EXTERNALSRC"):
# With EXTERNALSRC S will not be wiped so we can symlink to it
os.symlink(s, kernsrc)
@@ -278,6 +283,14 @@ do_bundle_initramfs () {
}
do_bundle_initramfs[dirs] = "${B}"
+kernel_do_transform_bundled_initramfs() {
+ # vmlinux.gz is not built by kernel
+ if (echo "${KERNEL_IMAGETYPES}" | grep -wq "vmlinux\.gz"); then
+ gzip -9cn < ${KERNEL_OUTPUT_DIR}/vmlinux.initramfs > ${KERNEL_OUTPUT_DIR}/vmlinux.gz.initramfs
+ fi
+}
+do_transform_bundled_initramfs[dirs] = "${B}"
+
python do_devshell_prepend () {
os.environ["LDFLAGS"] = ''
}
@@ -309,6 +322,10 @@ kernel_do_compile() {
export KBUILD_BUILD_TIMESTAMP="$ts"
export KCONFIG_NOTIMESTAMP=1
bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
+ else
+ ts=`LC_ALL=C date`
+ export KBUILD_BUILD_TIMESTAMP="$ts"
+ bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
fi
# The $use_alternate_initrd is only set from
# do_bundle_initramfs() This variable is specifically for the
@@ -327,12 +344,17 @@ kernel_do_compile() {
for typeformake in ${KERNEL_IMAGETYPE_FOR_MAKE} ; do
oe_runmake ${typeformake} CC="${KERNEL_CC} $cc_extra " LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS} $use_alternate_initrd
done
+}
+
+kernel_do_transform_kernel() {
# vmlinux.gz is not built by kernel
if (echo "${KERNEL_IMAGETYPES}" | grep -wq "vmlinux\.gz"); then
mkdir -p "${KERNEL_OUTPUT_DIR}"
gzip -9cn < ${B}/vmlinux > "${KERNEL_OUTPUT_DIR}/vmlinux.gz"
fi
}
+do_transform_kernel[dirs] = "${B}"
+addtask transform_kernel after do_compile before do_install
do_compile_kernelmodules() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
@@ -350,6 +372,10 @@ do_compile_kernelmodules() {
export KBUILD_BUILD_TIMESTAMP="$ts"
export KCONFIG_NOTIMESTAMP=1
bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
+ else
+ ts=`LC_ALL=C date`
+ export KBUILD_BUILD_TIMESTAMP="$ts"
+ bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
fi
if (grep -q -i -e '^CONFIG_MODULES=y$' ${B}/.config); then
cc_extra=$(get_cc_option)
@@ -379,8 +405,8 @@ kernel_do_install() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
if (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
oe_runmake DEPMOD=echo MODLIB=${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION} INSTALL_FW_PATH=${D}${nonarch_base_libdir}/firmware modules_install
- rm "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
- rm "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/source"
+ rm -f "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
+ rm -f "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/source"
# If the kernel/ directory is empty remove it to prevent QA issues
rmdir --ignore-fail-on-non-empty "${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}/kernel"
else
@@ -392,12 +418,26 @@ kernel_do_install() {
#
install -d ${D}/${KERNEL_IMAGEDEST}
install -d ${D}/boot
+
+ #
+ # When including an initramfs bundle inside a FIT image, the fitImage is created after the install task
+ # by do_assemble_fitimage_initramfs.
+ # This happens after the generation of the initramfs bundle (done by do_bundle_initramfs).
+ # So, at the level of the install task we should not try to install the fitImage. fitImage is still not
+ # generated yet.
+ # After the generation of the fitImage, the deploy task copies the fitImage from the build directory to
+ # the deploy folder.
+ #
+
for imageType in ${KERNEL_IMAGETYPES} ; do
- install -m 0644 ${KERNEL_OUTPUT_DIR}/${imageType} ${D}/${KERNEL_IMAGEDEST}/${imageType}-${KERNEL_VERSION}
- if [ "${KERNEL_PACKAGE_NAME}" = "kernel" ]; then
- ln -sf ${imageType}-${KERNEL_VERSION} ${D}/${KERNEL_IMAGEDEST}/${imageType}
+ if [ $imageType != "fitImage" ] || [ "${INITRAMFS_IMAGE_BUNDLE}" != "1" ] ; then
+ install -m 0644 ${KERNEL_OUTPUT_DIR}/${imageType} ${D}/${KERNEL_IMAGEDEST}/${imageType}-${KERNEL_VERSION}
+ if [ "${KERNEL_PACKAGE_NAME}" = "kernel" ]; then
+ ln -sf ${imageType}-${KERNEL_VERSION} ${D}/${KERNEL_IMAGEDEST}/${imageType}
+ fi
fi
done
+
install -m 0644 System.map ${D}/boot/System.map-${KERNEL_VERSION}
install -m 0644 .config ${D}/boot/config-${KERNEL_VERSION}
install -m 0644 vmlinux ${D}/boot/vmlinux-${KERNEL_VERSION}
@@ -570,11 +610,11 @@ do_savedefconfig() {
do_savedefconfig[nostamp] = "1"
addtask savedefconfig after do_configure
-inherit cml1
+inherit cml1 pkgconfig
KCONFIG_CONFIG_COMMAND_append = " LD='${KERNEL_LD}' HOSTLDFLAGS='${BUILD_LDFLAGS}'"
-EXPORT_FUNCTIONS do_compile do_install do_configure
+EXPORT_FUNCTIONS do_compile do_transform_kernel do_transform_bundled_initramfs do_install do_configure
# kernel-base becomes kernel-${KERNEL_VERSION}
# kernel-image becomes kernel-image-${KERNEL_VERSION}
@@ -680,7 +720,7 @@ do_sizecheck() {
at_least_one_fits=
for imageType in ${KERNEL_IMAGETYPES} ; do
size=`du -ks ${B}/${KERNEL_OUTPUT_DIR}/$imageType | awk '{print $1}'`
- if [ $size -ge ${KERNEL_IMAGE_MAXSIZE} ]; then
+ if [ $size -gt ${KERNEL_IMAGE_MAXSIZE} ]; then
bbwarn "This kernel $imageType (size=$size(K) > ${KERNEL_IMAGE_MAXSIZE}(K)) is too big for your device."
else
at_least_one_fits=y
@@ -719,7 +759,7 @@ kernel_do_deploy() {
fi
if [ ! -z "${INITRAMFS_IMAGE}" -a x"${INITRAMFS_IMAGE_BUNDLE}" = x1 ]; then
- for imageType in ${KERNEL_IMAGETYPE_FOR_MAKE} ; do
+ for imageType in ${KERNEL_IMAGETYPES} ; do
if [ "$imageType" = "fitImage" ] ; then
continue
fi
diff --git a/meta/classes/libc-package.bbclass b/meta/classes/libc-package.bbclass
index de3b4250c7..72f489d673 100644
--- a/meta/classes/libc-package.bbclass
+++ b/meta/classes/libc-package.bbclass
@@ -45,6 +45,7 @@ PACKAGE_NO_GCONV ?= "0"
OVERRIDES_append = ":${TARGET_ARCH}-${TARGET_OS}"
locale_base_postinst_ontarget() {
+mkdir ${libdir}/locale
localedef --inputfile=${datadir}/i18n/locales/%s --charmap=%s %s
}
@@ -355,7 +356,7 @@ python package_do_split_gconvs () {
m.write("\t@echo 'Progress %d/%d'\n" % (i, total))
m.write("\t" + makerecipe + "\n\n")
d.setVar("EXTRA_OEMAKE", "-C %s ${PARALLEL_MAKE}" % (os.path.dirname(makefile)))
- d.setVarFlag("oe_runmake", "progress", "outof:Progress\s(\d+)/(\d+)")
+ d.setVarFlag("oe_runmake", "progress", r"outof:Progress\s(\d+)/(\d+)")
bb.note("Executing binary locale generation makefile")
bb.build.exec_func("oe_runmake", d)
bb.note("collecting binary locales from locale tree")
diff --git a/meta/classes/license.bbclass b/meta/classes/license.bbclass
index dc91118340..806b5069fd 100644
--- a/meta/classes/license.bbclass
+++ b/meta/classes/license.bbclass
@@ -31,8 +31,8 @@ python do_populate_lic() {
f.write("%s: %s\n" % (key, info[key]))
}
-PSEUDO_IGNORE_PATHS .= ",${@','.join(((d.getVar('COMMON_LICENSE_DIR') or '') + ' ' + (d.getVar('LICENSE_PATH') or '')).split())}"
-# it would be better to copy them in do_install_append, but find_license_filesa is python
+PSEUDO_IGNORE_PATHS .= ",${@','.join(((d.getVar('COMMON_LICENSE_DIR') or '') + ' ' + (d.getVar('LICENSE_PATH') or '') + ' ' + d.getVar('COREBASE') + '/meta/COPYING').split())}"
+# it would be better to copy them in do_install:append, but find_license_filesa is python
python perform_packagecopy_prepend () {
enabled = oe.data.typed_value('LICENSE_CREATE_PACKAGE', d)
if d.getVar('CLASSOVERRIDE') == 'class-target' and enabled:
@@ -91,17 +91,17 @@ def copy_license_files(lic_files_paths, destdir):
os.link(src, dst)
except OSError as err:
if err.errno == errno.EXDEV:
- # Copy license files if hard-link is not possible even if st_dev is the
+ # Copy license files if hardlink is not possible even if st_dev is the
# same on source and destination (docker container with device-mapper?)
canlink = False
else:
raise
- # Only chown if we did hardling, and, we're running under pseudo
+ # Only chown if we did hardlink and we're running under pseudo
if canlink and os.environ.get('PSEUDO_DISABLED') == '0':
os.chown(dst,0,0)
if not canlink:
- begin_idx = int(beginline)-1 if beginline is not None else None
- end_idx = int(endline) if endline is not None else None
+ begin_idx = max(0, int(beginline) - 1) if beginline is not None else None
+ end_idx = max(0, int(endline)) if endline is not None else None
if begin_idx is None and end_idx is None:
shutil.copyfile(src, dst)
else:
@@ -153,6 +153,10 @@ def find_license_files(d):
find_license(node.s.replace("+", "").replace("*", ""))
self.generic_visit(node)
+ def visit_Constant(self, node):
+ find_license(node.value.replace("+", "").replace("*", ""))
+ self.generic_visit(node)
+
def find_license(license_type):
try:
bb.utils.mkdirhier(gen_lic_dest)
diff --git a/meta/classes/license_image.bbclass b/meta/classes/license_image.bbclass
index 1396a95f47..325b3cbba7 100644
--- a/meta/classes/license_image.bbclass
+++ b/meta/classes/license_image.bbclass
@@ -9,8 +9,8 @@ python write_package_manifest() {
pkgs = image_list_installed_packages(d)
output = format_pkg_list(pkgs)
- open(os.path.join(license_image_dir, 'package.manifest'),
- 'w+').write(output)
+ with open(os.path.join(license_image_dir, 'package.manifest'), "w+") as package_manifest:
+ package_manifest.write(output)
}
python license_create_manifest() {
@@ -211,7 +211,7 @@ def get_deployed_dependencies(d):
deploy = {}
# Get all the dependencies for the current task (rootfs).
taskdata = d.getVar("BB_TASKDEPDATA", False)
- pn = d.getVar("PN", True)
+ pn = d.getVar("PN")
depends = list(set([dep[0] for dep
in list(taskdata.values())
if not dep[0].endswith("-native") and not dep[0] == pn]))
diff --git a/meta/classes/metadata_scm.bbclass b/meta/classes/metadata_scm.bbclass
index 2608a7ef7b..47cb969b8d 100644
--- a/meta/classes/metadata_scm.bbclass
+++ b/meta/classes/metadata_scm.bbclass
@@ -1,8 +1,3 @@
-METADATA_BRANCH ?= "${@base_detect_branch(d)}"
-METADATA_BRANCH[vardepvalue] = "${METADATA_BRANCH}"
-METADATA_REVISION ?= "${@base_detect_revision(d)}"
-METADATA_REVISION[vardepvalue] = "${METADATA_REVISION}"
-
def base_detect_revision(d):
path = base_get_scmbasepath(d)
return base_get_metadata_git_revision(path, d)
@@ -42,3 +37,8 @@ def base_get_metadata_git_revision(path, d):
except bb.process.ExecutionError:
rev = '<unknown>'
return rev.strip()
+
+METADATA_BRANCH := "${@base_detect_branch(d)}"
+METADATA_BRANCH[vardepvalue] = "${METADATA_BRANCH}"
+METADATA_REVISION := "${@base_detect_revision(d)}"
+METADATA_REVISION[vardepvalue] = "${METADATA_REVISION}"
diff --git a/meta/classes/mirrors.bbclass b/meta/classes/mirrors.bbclass
index 87bba41472..669d0cc8ff 100644
--- a/meta/classes/mirrors.bbclass
+++ b/meta/classes/mirrors.bbclass
@@ -29,7 +29,6 @@ ftp://dante.ctan.org/tex-archive ftp://ftp.fu-berlin.de/tex/CTAN \n \
ftp://dante.ctan.org/tex-archive http://sunsite.sut.ac.jp/pub/archives/ctan/ \n \
ftp://dante.ctan.org/tex-archive http://ctan.unsw.edu.au/ \n \
ftp://ftp.gnutls.org/gcrypt/gnutls ${GNUPG_MIRROR}/gnutls \n \
-http://ftp.info-zip.org/pub/infozip/src/ http://mirror.switch.ch/ftp/mirror/infozip/src/ \n \
http://ftp.info-zip.org/pub/infozip/src/ ftp://sunsite.icm.edu.pl/pub/unix/archiving/info-zip/src/ \n \
http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/ http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/OLD/ \n \
${APACHE_MIRROR} http://www.us.apache.org/dist \n \
@@ -43,6 +42,7 @@ ftp://sourceware.org/pub http://ftp.gwdg.de/pub/linux/sources.redhat.com/sourcew
cvs://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
svn://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
git://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
+gitsm://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
hg://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
bzr://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
p4://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
@@ -53,6 +53,7 @@ npm://.*/?.* http://downloads.yoctoproject.org/mirror/sources/ \n \
cvs://.*/.* http://sources.openembedded.org/ \n \
svn://.*/.* http://sources.openembedded.org/ \n \
git://.*/.* http://sources.openembedded.org/ \n \
+gitsm://.*/.* http://sources.openembedded.org/ \n \
hg://.*/.* http://sources.openembedded.org/ \n \
bzr://.*/.* http://sources.openembedded.org/ \n \
p4://.*/.* http://sources.openembedded.org/ \n \
@@ -62,6 +63,8 @@ ftp://.*/.* http://sources.openembedded.org/ \n \
npm://.*/?.* http://sources.openembedded.org/ \n \
${CPAN_MIRROR} http://cpan.metacpan.org/ \n \
${CPAN_MIRROR} http://search.cpan.org/CPAN/ \n \
+https?$://downloads.yoctoproject.org/releases/uninative/ https://mirrors.kernel.org/yocto/uninative/ \n \
+https?$://downloads.yoctoproject.org/mirror/sources/ https://mirrors.kernel.org/yocto-sources/ \n \
"
# Use MIRRORS to provide git repo fallbacks using the https protocol, for cases
diff --git a/meta/classes/multilib.bbclass b/meta/classes/multilib.bbclass
index ee677da1e2..b5c59ac593 100644
--- a/meta/classes/multilib.bbclass
+++ b/meta/classes/multilib.bbclass
@@ -45,6 +45,7 @@ python multilib_virtclass_handler () {
e.data.setVar("RECIPE_SYSROOT", "${WORKDIR}/recipe-sysroot")
e.data.setVar("STAGING_DIR_TARGET", "${WORKDIR}/recipe-sysroot")
e.data.setVar("STAGING_DIR_HOST", "${WORKDIR}/recipe-sysroot")
+ e.data.setVar("RECIPE_SYSROOT_MANIFEST_SUBDIR", "nativesdk-" + variant)
e.data.setVar("MLPREFIX", variant + "-")
override = ":virtclass-multilib-" + variant
e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
@@ -106,7 +107,6 @@ python __anonymous () {
d.setVar("LINGUAS_INSTALL", "")
# FIXME, we need to map this to something, not delete it!
d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", "")
- bb.build.deltask('do_populate_sdk', d)
bb.build.deltask('do_populate_sdk_ext', d)
return
diff --git a/meta/classes/nativesdk.bbclass b/meta/classes/nativesdk.bbclass
index 7f2692c51a..dc5a9756b6 100644
--- a/meta/classes/nativesdk.bbclass
+++ b/meta/classes/nativesdk.bbclass
@@ -113,3 +113,5 @@ do_packagedata[stamp-extra-info] = ""
USE_NLS = "${SDKUSE_NLS}"
OLDEST_KERNEL = "${SDK_OLDEST_KERNEL}"
+
+PATH_prepend = "${COREBASE}/scripts/nativesdk-intercept:"
diff --git a/meta/classes/package.bbclass b/meta/classes/package.bbclass
index 3ff74c9f31..49d30caef7 100644
--- a/meta/classes/package.bbclass
+++ b/meta/classes/package.bbclass
@@ -1140,6 +1140,14 @@ python split_and_strip_files () {
# Modified the file so clear the cache
cpath.updatecache(file)
+ def strip_pkgd_prefix(f):
+ nonlocal dvar
+
+ if f.startswith(dvar):
+ return f[len(dvar):]
+
+ return f
+
#
# First lets process debug splitting
#
@@ -1153,6 +1161,8 @@ python split_and_strip_files () {
for file in staticlibs:
results.append( (file,source_info(file, d)) )
+ d.setVar("PKGDEBUGSOURCES", {strip_pkgd_prefix(f): sorted(s) for f, s in results})
+
sources = set()
for r in results:
sources.update(r[1])
@@ -1460,6 +1470,7 @@ PKGDATA_VARS = "PN PE PV PR PKGE PKGV PKGR LICENSE DESCRIPTION SUMMARY RDEPENDS
python emit_pkgdata() {
from glob import glob
import json
+ import gzip
def process_postinst_on_target(pkg, mlprefix):
pkgval = d.getVar('PKG_%s' % pkg)
@@ -1532,6 +1543,8 @@ fi
with open(data_file, 'w') as fd:
fd.write("PACKAGES: %s\n" % packages)
+ pkgdebugsource = d.getVar("PKGDEBUGSOURCES") or []
+
pn = d.getVar('PN')
global_variants = (d.getVar('MULTILIB_GLOBAL_VARIANTS') or "").split()
variants = (d.getVar('MULTILIB_VARIANTS') or "").split()
@@ -1551,17 +1564,32 @@ fi
pkgval = pkg
d.setVar('PKG_%s' % pkg, pkg)
+ extended_data = {
+ "files_info": {}
+ }
+
pkgdestpkg = os.path.join(pkgdest, pkg)
files = {}
+ files_extra = {}
total_size = 0
seen = set()
for f in pkgfiles[pkg]:
- relpth = os.path.relpath(f, pkgdestpkg)
+ fpath = os.sep + os.path.relpath(f, pkgdestpkg)
+
fstat = os.lstat(f)
- files[os.sep + relpth] = fstat.st_size
+ files[fpath] = fstat.st_size
+
+ extended_data["files_info"].setdefault(fpath, {})
+ extended_data["files_info"][fpath]['size'] = fstat.st_size
+
if fstat.st_ino not in seen:
seen.add(fstat.st_ino)
total_size += fstat.st_size
+
+ if fpath in pkgdebugsource:
+ extended_data["files_info"][fpath]['debugsrc'] = pkgdebugsource[fpath]
+ del pkgdebugsource[fpath]
+
d.setVar('FILES_INFO', json.dumps(files, sort_keys=True))
process_postinst_on_target(pkg, d.getVar("MLPREFIX"))
@@ -1582,6 +1610,10 @@ fi
sf.write('%s_%s: %d\n' % ('PKGSIZE', pkg, total_size))
+ subdata_extended_file = pkgdatadir + "/extended/%s.json.gz" % pkg
+ with gzip.open(subdata_extended_file, "wt", encoding="utf-8") as f:
+ json.dump(extended_data, f, sort_keys=True, separators=(",", ":"))
+
# Symlinks needed for rprovides lookup
rprov = d.getVar('RPROVIDES_%s' % pkg) or d.getVar('RPROVIDES')
if rprov:
@@ -1612,7 +1644,8 @@ fi
write_extra_runtime_pkgs(global_variants, packages, pkgdatadir)
}
-emit_pkgdata[dirs] = "${PKGDESTWORK}/runtime ${PKGDESTWORK}/runtime-reverse ${PKGDESTWORK}/runtime-rprovides"
+emit_pkgdata[dirs] = "${PKGDESTWORK}/runtime ${PKGDESTWORK}/runtime-reverse ${PKGDESTWORK}/runtime-rprovides ${PKGDESTWORK}/extended"
+emit_pkgdata[vardepsexclude] = "BB_NUMBER_THREADS"
ldconfig_postinst_fragment() {
if [ x"$D" = "x" ]; then
@@ -1989,12 +2022,12 @@ python package_do_pkgconfig () {
for pkg in packages.split():
pkgconfig_provided[pkg] = []
pkgconfig_needed[pkg] = []
- for file in pkgfiles[pkg]:
+ for file in sorted(pkgfiles[pkg]):
m = pc_re.match(file)
if m:
pd = bb.data.init()
name = m.group(1)
- pkgconfig_provided[pkg].append(name)
+ pkgconfig_provided[pkg].append(os.path.basename(name))
if not os.access(file, os.R_OK):
continue
with open(file, 'r') as f:
@@ -2017,7 +2050,7 @@ python package_do_pkgconfig () {
pkgs_file = os.path.join(shlibswork_dir, pkg + ".pclist")
if pkgconfig_provided[pkg] != []:
with open(pkgs_file, 'w') as f:
- for p in pkgconfig_provided[pkg]:
+ for p in sorted(pkgconfig_provided[pkg]):
f.write('%s\n' % p)
# Go from least to most specific since the last one found wins
diff --git a/meta/classes/package_deb.bbclass b/meta/classes/package_deb.bbclass
index 790b26aef2..fa8c6c82ff 100644
--- a/meta/classes/package_deb.bbclass
+++ b/meta/classes/package_deb.bbclass
@@ -315,8 +315,8 @@ do_package_write_deb[dirs] = "${PKGWRITEDIRDEB}"
do_package_write_deb[cleandirs] = "${PKGWRITEDIRDEB}"
do_package_write_deb[umask] = "022"
do_package_write_deb[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
-addtask package_write_deb after do_packagedata do_package
-
+EPOCHTASK ??= ""
+addtask package_write_deb after do_packagedata do_package ${EPOCHTASK}
PACKAGEINDEXDEPS += "dpkg-native:do_populate_sysroot"
PACKAGEINDEXDEPS += "apt-native:do_populate_sysroot"
diff --git a/meta/classes/package_ipk.bbclass b/meta/classes/package_ipk.bbclass
index c008559e4a..4927cfba00 100644
--- a/meta/classes/package_ipk.bbclass
+++ b/meta/classes/package_ipk.bbclass
@@ -274,7 +274,8 @@ do_package_write_ipk[dirs] = "${PKGWRITEDIRIPK}"
do_package_write_ipk[cleandirs] = "${PKGWRITEDIRIPK}"
do_package_write_ipk[umask] = "022"
do_package_write_ipk[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
-addtask package_write_ipk after do_packagedata do_package
+EPOCHTASK ??= ""
+addtask package_write_ipk after do_packagedata do_package ${EPOCHTASK}
PACKAGEINDEXDEPS += "opkg-utils-native:do_populate_sysroot"
PACKAGEINDEXDEPS += "opkg-native:do_populate_sysroot"
diff --git a/meta/classes/package_pkgdata.bbclass b/meta/classes/package_pkgdata.bbclass
index 18b7ed62e0..a1ea8fc041 100644
--- a/meta/classes/package_pkgdata.bbclass
+++ b/meta/classes/package_pkgdata.bbclass
@@ -162,6 +162,6 @@ python package_prepare_pkgdata() {
}
package_prepare_pkgdata[cleandirs] = "${WORKDIR_PKGDATA}"
-package_prepare_pkgdata[vardepsexclude] += "MACHINE_ARCH PACKAGE_EXTRA_ARCHS SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA"
+package_prepare_pkgdata[vardepsexclude] += "MACHINE_ARCH PACKAGE_EXTRA_ARCHS SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA SSTATETASKS"
diff --git a/meta/classes/package_rpm.bbclass b/meta/classes/package_rpm.bbclass
index fc9007922a..65587d228b 100644
--- a/meta/classes/package_rpm.bbclass
+++ b/meta/classes/package_rpm.bbclass
@@ -743,7 +743,8 @@ do_package_write_rpm[dirs] = "${PKGWRITEDIRRPM}"
do_package_write_rpm[cleandirs] = "${PKGWRITEDIRRPM}"
do_package_write_rpm[umask] = "022"
do_package_write_rpm[depends] += "${@oe.utils.build_depends_string(d.getVar('PACKAGE_WRITE_DEPS'), 'do_populate_sysroot')}"
-addtask package_write_rpm after do_packagedata do_package
+EPOCHTASK ??= ""
+addtask package_write_rpm after do_packagedata do_package ${EPOCHTASK}
PACKAGEINDEXDEPS += "rpm-native:do_populate_sysroot"
PACKAGEINDEXDEPS += "createrepo-c-native:do_populate_sysroot"
diff --git a/meta/classes/patch.bbclass b/meta/classes/patch.bbclass
index 25ec089ae1..484d27ac76 100644
--- a/meta/classes/patch.bbclass
+++ b/meta/classes/patch.bbclass
@@ -131,6 +131,9 @@ python patch_do_patch() {
patchdir = parm["patchdir"]
if not os.path.isabs(patchdir):
patchdir = os.path.join(s, patchdir)
+ if not os.path.isdir(patchdir):
+ bb.fatal("Target directory '%s' not found, patchdir '%s' is incorrect in patch file '%s'" %
+ (patchdir, parm["patchdir"], parm['patchname']))
else:
patchdir = s
@@ -147,12 +150,12 @@ python patch_do_patch() {
patchset.Import({"file":local, "strippath": parm['striplevel']}, True)
except Exception as exc:
bb.utils.remove(process_tmpdir, True)
- bb.fatal(str(exc))
+ bb.fatal("Importing patch '%s' with striplevel '%s'\n%s" % (parm['patchname'], parm['striplevel'], str(exc)))
try:
resolver.Resolve()
except bb.BBHandledException as e:
bb.utils.remove(process_tmpdir, True)
- bb.fatal(str(e))
+ bb.fatal("Applying patch '%s' on target directory '%s'\n%s" % (parm['patchname'], patchdir, str(e)))
bb.utils.remove(process_tmpdir, True)
del os.environ['TMPDIR']
diff --git a/meta/classes/populate_sdk_base.bbclass b/meta/classes/populate_sdk_base.bbclass
index ca56d803cb..49fdfaa93d 100644
--- a/meta/classes/populate_sdk_base.bbclass
+++ b/meta/classes/populate_sdk_base.bbclass
@@ -51,6 +51,8 @@ TOOLCHAIN_OUTPUTNAME ?= "${SDK_NAME}-toolchain-${SDK_VERSION}"
SDK_ARCHIVE_TYPE ?= "tar.xz"
SDK_XZ_COMPRESSION_LEVEL ?= "-9"
SDK_XZ_OPTIONS ?= "${XZ_DEFAULTS} ${SDK_XZ_COMPRESSION_LEVEL}"
+SDK_ZIP_OPTIONS ?= "-y"
+
# To support different sdk type according to SDK_ARCHIVE_TYPE, now support zip and tar.xz
python () {
@@ -58,7 +60,7 @@ python () {
d.setVar('SDK_ARCHIVE_DEPENDS', 'zip-native')
# SDK_ARCHIVE_CMD used to generate archived sdk ${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} from input dir ${SDK_OUTPUT}/${SDKPATH} to output dir ${SDKDEPLOYDIR}
# recommand to cd into input dir first to avoid archive with buildpath
- d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; zip -r -y ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} .')
+ d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; zip -r ${SDK_ZIP_OPTIONS} ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} .')
else:
d.setVar('SDK_ARCHIVE_DEPENDS', 'xz-native')
d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; tar ${SDKTAROPTS} -cf - . | xz ${SDK_XZ_OPTIONS} > ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE}')
@@ -66,7 +68,7 @@ python () {
SDK_RDEPENDS = "${TOOLCHAIN_TARGET_TASK} ${TOOLCHAIN_HOST_TASK}"
SDK_DEPENDS = "virtual/fakeroot-native ${SDK_ARCHIVE_DEPENDS} cross-localedef-native nativesdk-qemuwrapper-cross ${@' '.join(["%s-qemuwrapper-cross" % m for m in d.getVar("MULTILIB_VARIANTS").split()])} qemuwrapper-cross"
-PATH_prepend = "${STAGING_DIR_HOST}${SDKPATHNATIVE}${bindir}/crossscripts:${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:"
+PATH_prepend = "${WORKDIR}/recipe-sysroot/${SDKPATHNATIVE}${bindir}/crossscripts:${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:"
SDK_DEPENDS += "nativesdk-glibc-locale"
# We want the MULTIARCH_TARGET_SYS to point to the TUNE_PKGARCH, not PACKAGE_ARCH as it
@@ -275,6 +277,7 @@ EOF
# substitute variables
sed -i -e 's#@SDK_ARCH@#${SDK_ARCH}#g' \
-e 's#@SDKPATH@#${SDKPATH}#g' \
+ -e 's#@SDKPATHINSTALL@#${SDKPATHINSTALL}#g' \
-e 's#@SDKEXTPATH@#${SDKEXTPATH}#g' \
-e 's#@OLDEST_KERNEL@#${SDK_OLDEST_KERNEL}#g' \
-e 's#@REAL_MULTIMACH_TARGET_SYS@#${REAL_MULTIMACH_TARGET_SYS}#g' \
diff --git a/meta/classes/populate_sdk_ext.bbclass b/meta/classes/populate_sdk_ext.bbclass
index aa00d5397c..1bdfd92847 100644
--- a/meta/classes/populate_sdk_ext.bbclass
+++ b/meta/classes/populate_sdk_ext.bbclass
@@ -117,7 +117,7 @@ python write_host_sdk_ext_manifest () {
f.write("%s %s %s\n" % (info[1], info[2], info[3]))
}
-SDK_POSTPROCESS_COMMAND_append_task-populate-sdk-ext = "write_target_sdk_ext_manifest; write_host_sdk_ext_manifest; "
+SDK_POSTPROCESS_COMMAND_append_task-populate-sdk-ext = " write_target_sdk_ext_manifest; write_host_sdk_ext_manifest; "
SDK_TITLE_task-populate-sdk-ext = "${@d.getVar('DISTRO_NAME') or d.getVar('DISTRO')} Extensible SDK"
@@ -363,7 +363,8 @@ python copy_buildsystem () {
f.write('BUILDCFG_HEADER = ""\n\n')
# Write METADATA_REVISION
- f.write('METADATA_REVISION = "%s"\n\n' % d.getVar('METADATA_REVISION'))
+ # Needs distro override so it can override the value set in the bbclass code (later than local.conf)
+ f.write('METADATA_REVISION:%s = "%s"\n\n' % (d.getVar('DISTRO'), d.getVar('METADATA_REVISION')))
f.write('# Provide a flag to indicate we are in the EXT_SDK Context\n')
f.write('WITHIN_EXT_SDK = "1"\n\n')
@@ -669,7 +670,7 @@ sdk_ext_postinst() {
# A bit of another hack, but we need this in the path only for devtool
# so put it at the end of $PATH.
- echo "export PATH=$target_sdk_dir/sysroots/${SDK_SYS}${bindir_nativesdk}:\$PATH" >> $env_setup_script
+ echo "export PATH=\"$target_sdk_dir/sysroots/${SDK_SYS}${bindir_nativesdk}:\$PATH\"" >> $env_setup_script
echo "printf 'SDK environment now set up; additionally you may now run devtool to perform development tasks.\nRun devtool --help for further details.\n'" >> $env_setup_script
diff --git a/meta/classes/pypi.bbclass b/meta/classes/pypi.bbclass
index 87b4c85fc0..c68367449a 100644
--- a/meta/classes/pypi.bbclass
+++ b/meta/classes/pypi.bbclass
@@ -24,3 +24,5 @@ S = "${WORKDIR}/${PYPI_PACKAGE}-${PV}"
UPSTREAM_CHECK_URI ?= "https://pypi.org/project/${PYPI_PACKAGE}/"
UPSTREAM_CHECK_REGEX ?= "/${PYPI_PACKAGE}/(?P<pver>(\d+[\.\-_]*)+)/"
+
+CVE_PRODUCT ?= "python:${PYPI_PACKAGE}"
diff --git a/meta/classes/python3targetconfig.bbclass b/meta/classes/python3targetconfig.bbclass
index fc1025c207..a6e67f1bf8 100644
--- a/meta/classes/python3targetconfig.bbclass
+++ b/meta/classes/python3targetconfig.bbclass
@@ -15,3 +15,15 @@ do_compile_prepend_class-target() {
do_install_prepend_class-target() {
export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
}
+
+do_configure:prepend:class-nativesdk() {
+ export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
+}
+
+do_compile:prepend:class-nativesdk() {
+ export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
+}
+
+do_install:prepend:class-nativesdk() {
+ export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
+}
diff --git a/meta/classes/qemuboot.bbclass b/meta/classes/qemuboot.bbclass
index 648af09b6e..92ae69d9f2 100644
--- a/meta/classes/qemuboot.bbclass
+++ b/meta/classes/qemuboot.bbclass
@@ -7,6 +7,7 @@
# QB_OPT_APPEND: options to append to qemu, e.g., "-show-cursor"
#
# QB_DEFAULT_KERNEL: default kernel to boot, e.g., "bzImage"
+# e.g., "bzImage-initramfs-qemux86-64.bin" if INITRAMFS_IMAGE_BUNDLE is set to 1.
#
# QB_DEFAULT_FSTYPE: default FSTYPE to boot, e.g., "ext4"
#
@@ -75,7 +76,7 @@
QB_MEM ?= "-m 256"
QB_SERIAL_OPT ?= "-serial mon:stdio -serial null"
-QB_DEFAULT_KERNEL ?= "${KERNEL_IMAGETYPE}"
+QB_DEFAULT_KERNEL ?= "${@bb.utils.contains("INITRAMFS_IMAGE_BUNDLE", "1", "${KERNEL_IMAGETYPE}-${INITRAMFS_LINK_NAME}.bin", "${KERNEL_IMAGETYPE}", d)}"
QB_DEFAULT_FSTYPE ?= "ext4"
QB_OPT_APPEND ?= "-show-cursor"
QB_NETWORK_DEVICE ?= "-device virtio-net-pci,netdev=net0,mac=@MAC@"
diff --git a/meta/classes/report-error.bbclass b/meta/classes/report-error.bbclass
index 9cb6b0bd31..de48e4ff0f 100644
--- a/meta/classes/report-error.bbclass
+++ b/meta/classes/report-error.bbclass
@@ -6,8 +6,6 @@
#
# Licensed under the MIT license, see COPYING.MIT for details
-inherit base
-
ERR_REPORT_DIR ?= "${LOG_DIR}/error-report"
def errorreport_getdata(e):
diff --git a/meta/classes/reproducible_build.bbclass b/meta/classes/reproducible_build.bbclass
index f06e00d70d..3c01dbd5b3 100644
--- a/meta/classes/reproducible_build.bbclass
+++ b/meta/classes/reproducible_build.bbclass
@@ -1,17 +1,38 @@
# reproducible_build.bbclass
#
-# Sets SOURCE_DATE_EPOCH in each component's build environment.
+# Sets the default SOURCE_DATE_EPOCH in each component's build environment.
+# The format is number of seconds since the system epoch.
+#
# Upstream components (generally) respect this environment variable,
# using it in place of the "current" date and time.
# See https://reproducible-builds.org/specs/source-date-epoch/
#
-# After sources are unpacked but before they are patched, we set a reproducible value for SOURCE_DATE_EPOCH.
-# This value should be reproducible for anyone who builds the same revision from the same sources.
+# The default value of SOURCE_DATE_EPOCH comes from the function
+# get_source_date_epoch_value which reads from the SDE_FILE, or if the file
+# is not available (or set to 0) will use the fallback of
+# SOURCE_DATE_EPOCH_FALLBACK.
+#
+# The SDE_FILE is normally constructed from the function
+# create_source_date_epoch_stamp which is typically added as a postfuncs to
+# the do_unpack task. If a recipe does NOT have do_unpack, it should be added
+# to a task that runs after the source is available and before the
+# do_deploy_source_date_epoch task is executed.
+#
+# If a recipe wishes to override the default behavior it should set it's own
+# SOURCE_DATE_EPOCH or override the do_deploy_source_date_epoch_stamp task
+# with recipe-specific functionality to write the appropriate
+# SOURCE_DATE_EPOCH into the SDE_FILE.
+#
+# SOURCE_DATE_EPOCH is intended to be a reproducible value. This value should
+# be reproducible for anyone who builds the same revision from the same
+# sources.
#
-# There are 4 ways we determine SOURCE_DATE_EPOCH:
+# There are 4 ways the create_source_date_epoch_stamp function determines what
+# becomes SOURCE_DATE_EPOCH:
#
# 1. Use the value from __source_date_epoch.txt file if this file exists.
-# This file was most likely created in the previous build by one of the following methods 2,3,4.
+# This file was most likely created in the previous build by one of the
+# following methods 2,3,4.
# Alternatively, it can be provided by a recipe via SRC_URI.
#
# If the file does not exist:
@@ -22,20 +43,16 @@
# 3. Use the mtime of "known" files such as NEWS, CHANGLELOG, ...
# This works for well-kept repositories distributed via tarball.
#
-# 4. Use the modification time of the youngest file in the source tree, if there is one.
+# 4. Use the modification time of the youngest file in the source tree, if
+# there is one.
# This will be the newest file from the distribution tarball, if any.
#
-# 5. Fall back to a fixed timestamp.
+# 5. Fall back to a fixed timestamp (SOURCE_DATE_EPOCH_FALLBACK).
#
-# Once the value of SOURCE_DATE_EPOCH is determined, it is stored in the recipe's SDE_FILE.
-# If none of these mechanisms are suitable, replace the do_deploy_source_date_epoch task
-# with recipe-specific functionality to write the appropriate SOURCE_DATE_EPOCH into the SDE_FILE.
-#
-# If this file is found by other tasks, the value is exported in the SOURCE_DATE_EPOCH variable.
-# SOURCE_DATE_EPOCH is set for all tasks that might use it (do_configure, do_compile, do_package, ...)
+# Once the value is determined, it is stored in the recipe's SDE_FILE.
BUILD_REPRODUCIBLE_BINARIES ??= '1'
-inherit ${@oe.utils.ifelse(d.getVar('BUILD_REPRODUCIBLE_BINARIES') == '1', 'reproducible_build_simple', '')}
+inherit reproducible_build_simple
SDE_DIR = "${WORKDIR}/source-date-epoch"
SDE_FILE = "${SDE_DIR}/__source_date_epoch.txt"
@@ -77,49 +94,47 @@ python create_source_date_epoch_stamp() {
import oe.reproducible
epochfile = d.getVar('SDE_FILE')
- # If it exists we need to regenerate as the sources may have changed
- if os.path.isfile(epochfile):
- bb.debug(1, "Deleting existing SOURCE_DATE_EPOCH from: %s" % epochfile)
- os.remove(epochfile)
+ tmp_file = "%s.new" % epochfile
source_date_epoch = oe.reproducible.get_source_date_epoch(d, d.getVar('S'))
bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch)
bb.utils.mkdirhier(d.getVar('SDE_DIR'))
- with open(epochfile, 'w') as f:
+ with open(tmp_file, 'w') as f:
f.write(str(source_date_epoch))
+
+ os.rename(tmp_file, epochfile)
}
+EPOCHTASK = "do_deploy_source_date_epoch"
+
+# Generate the stamp after do_unpack runs
+do_unpack[postfuncs] += "create_source_date_epoch_stamp"
+
def get_source_date_epoch_value(d):
- cached = d.getVar('__CACHED_SOURCE_DATE_EPOCH')
- if cached:
+ epochfile = d.getVar('SDE_FILE')
+ cached, efile = d.getVar('__CACHED_SOURCE_DATE_EPOCH') or (None, None)
+ if cached and efile == epochfile:
return cached
- epochfile = d.getVar('SDE_FILE')
+ if cached and epochfile != efile:
+ bb.debug(1, "Epoch file changed from %s to %s" % (efile, epochfile))
+
source_date_epoch = int(d.getVar('SOURCE_DATE_EPOCH_FALLBACK'))
- if os.path.isfile(epochfile):
+ try:
with open(epochfile, 'r') as f:
s = f.read()
try:
source_date_epoch = int(s)
- # workaround for old sstate with SDE_FILE content being 0 - use SOURCE_DATE_EPOCH_FALLBACK
- if source_date_epoch == 0 :
- source_date_epoch = int(d.getVar('SOURCE_DATE_EPOCH_FALLBACK'))
- bb.warn("SOURCE_DATE_EPOCH value from sstate '%s' is deprecated/invalid. Reverting to SOURCE_DATE_EPOCH_FALLBACK '%s'" % (s, source_date_epoch))
except ValueError:
bb.warn("SOURCE_DATE_EPOCH value '%s' is invalid. Reverting to SOURCE_DATE_EPOCH_FALLBACK" % s)
source_date_epoch = int(d.getVar('SOURCE_DATE_EPOCH_FALLBACK'))
bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch)
- else:
+ except FileNotFoundError:
bb.debug(1, "Cannot find %s. SOURCE_DATE_EPOCH will default to %d" % (epochfile, source_date_epoch))
- d.setVar('__CACHED_SOURCE_DATE_EPOCH', str(source_date_epoch))
+ d.setVar('__CACHED_SOURCE_DATE_EPOCH', (str(source_date_epoch), epochfile))
return str(source_date_epoch)
export SOURCE_DATE_EPOCH ?= "${@get_source_date_epoch_value(d)}"
BB_HASHBASE_WHITELIST += "SOURCE_DATE_EPOCH"
-
-python () {
- if d.getVar('BUILD_REPRODUCIBLE_BINARIES') == '1':
- d.appendVarFlag("do_unpack", "postfuncs", " create_source_date_epoch_stamp")
-}
diff --git a/meta/classes/rm_work.bbclass b/meta/classes/rm_work.bbclass
index 01c2ab1c78..24051aa378 100644
--- a/meta/classes/rm_work.bbclass
+++ b/meta/classes/rm_work.bbclass
@@ -27,6 +27,13 @@ BB_SCHEDULER ?= "completion"
BB_TASK_IONICE_LEVEL_task-rm_work = "3.0"
do_rm_work () {
+ # Force using the HOSTTOOLS 'rm' - otherwise the SYSROOT_NATIVE 'rm' can be selected depending on PATH
+ # Avoids race-condition accessing 'rm' when deleting WORKDIR folders at the end of this function
+ RM_BIN="$(PATH=${HOSTTOOLS_DIR} command -v rm)"
+ if [ -z "${RM_BIN}" ]; then
+ bbfatal "Binary 'rm' not found in HOSTTOOLS_DIR, cannot remove WORKDIR data."
+ fi
+
# If the recipe name is in the RM_WORK_EXCLUDE, skip the recipe.
for p in ${RM_WORK_EXCLUDE}; do
if [ "$p" = "${PN}" ]; then
@@ -73,7 +80,7 @@ do_rm_work () {
# sstate version since otherwise we'd need to leave 'plaindirs' around
# such as 'packages' and 'packages-split' and these can be large. No end
# of chain tasks depend directly on do_package anymore.
- rm -f $i;
+ "${RM_BIN}" -f -- $i;
;;
*_setscene*)
# Skip stamps which are already setscene versions
@@ -90,7 +97,7 @@ do_rm_work () {
;;
esac
done
- rm -f $i
+ "${RM_BIN}" -f -- $i
esac
done
@@ -100,9 +107,9 @@ do_rm_work () {
# Retain only logs and other files in temp, safely ignore
# failures of removing pseudo folers on NFS2/3 server.
if [ $dir = 'pseudo' ]; then
- rm -rf $dir 2> /dev/null || true
+ "${RM_BIN}" -rf -- $dir 2> /dev/null || true
elif ! echo "$excludes" | grep -q -w "$dir"; then
- rm -rf $dir
+ "${RM_BIN}" -rf -- $dir
fi
done
}
diff --git a/meta/classes/rootfs-postcommands.bbclass b/meta/classes/rootfs-postcommands.bbclass
index c43b9a9823..943534c57a 100644
--- a/meta/classes/rootfs-postcommands.bbclass
+++ b/meta/classes/rootfs-postcommands.bbclass
@@ -1,6 +1,6 @@
# Zap the root password if debug-tweaks feature is not enabled
-ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'empty-root-password' ], "", "zap_empty_root_password ; ",d)}'
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'empty-root-password' ], "", "zap_empty_root_password; ",d)}'
# Allow dropbear/openssh to accept logins from accounts with an empty password string if debug-tweaks or allow-empty-password is enabled
ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'allow-empty-password' ], "ssh_allow_empty_password; ", "",d)}'
@@ -12,7 +12,7 @@ ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'deb
ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'post-install-logging' ], "postinst_enable_logging; ", "",d)}'
# Create /etc/timestamp during image construction to give a reasonably sane default time setting
-ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp ; "
+ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp; "
# Tweak the mount options for rootfs in /etc/fstab if read-only-rootfs is enabled
ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", "read_only_rootfs_hook; ", "",d)}'
@@ -26,7 +26,7 @@ ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only
APPEND_append = '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", " ro", "", d)}'
# Generates test data file with data store variables expanded in json format
-ROOTFS_POSTPROCESS_COMMAND += "write_image_test_data ; "
+ROOTFS_POSTPROCESS_COMMAND += "write_image_test_data; "
# Write manifest
IMAGE_MANIFEST = "${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.manifest"
@@ -267,9 +267,10 @@ python write_image_manifest () {
if os.path.exists(manifest_name) and link_name:
manifest_link = deploy_dir + "/" + link_name + ".manifest"
- if os.path.lexists(manifest_link):
- os.remove(manifest_link)
- os.symlink(os.path.basename(manifest_name), manifest_link)
+ if manifest_link != manifest_name:
+ if os.path.lexists(manifest_link):
+ os.remove(manifest_link)
+ os.symlink(os.path.basename(manifest_name), manifest_link)
}
# Can be used to create /etc/timestamp during image construction to give a reasonably
@@ -304,7 +305,7 @@ rootfs_trim_schemas () {
}
rootfs_check_host_user_contaminated () {
- contaminated="${WORKDIR}/host-user-contaminated.txt"
+ contaminated="${S}/host-user-contaminated.txt"
HOST_USER_UID="$(PSEUDO_UNLOAD=1 id -u)"
HOST_USER_GID="$(PSEUDO_UNLOAD=1 id -g)"
@@ -339,9 +340,10 @@ python write_image_test_data() {
if os.path.exists(testdata_name) and link_name:
testdata_link = os.path.join(deploy_dir, "%s.testdata.json" % link_name)
- if os.path.lexists(testdata_link):
- os.remove(testdata_link)
- os.symlink(os.path.basename(testdata_name), testdata_link)
+ if testdata_link != testdata_name:
+ if os.path.lexists(testdata_link):
+ os.remove(testdata_link)
+ os.symlink(os.path.basename(testdata_name), testdata_link)
}
write_image_test_data[vardepsexclude] += "TOPDIR"
diff --git a/meta/classes/rootfsdebugfiles.bbclass b/meta/classes/rootfsdebugfiles.bbclass
index e2ba4e3647..85c7ec7434 100644
--- a/meta/classes/rootfsdebugfiles.bbclass
+++ b/meta/classes/rootfsdebugfiles.bbclass
@@ -28,7 +28,7 @@
ROOTFS_DEBUG_FILES ?= ""
ROOTFS_DEBUG_FILES[doc] = "Lists additional files or directories to be installed with 'cp -a' in the format 'source1 target1;source2 target2;...'"
-ROOTFS_POSTPROCESS_COMMAND += "rootfs_debug_files ;"
+ROOTFS_POSTPROCESS_COMMAND += "rootfs_debug_files;"
rootfs_debug_files () {
#!/bin/sh -e
echo "${ROOTFS_DEBUG_FILES}" | sed -e 's/;/\n/g' | while read source target mode; do
diff --git a/meta/classes/sanity.bbclass b/meta/classes/sanity.bbclass
index 2325ee2747..33e5e5952f 100644
--- a/meta/classes/sanity.bbclass
+++ b/meta/classes/sanity.bbclass
@@ -395,7 +395,7 @@ def check_connectivity(d):
msg += " Please ensure your host's network is configured correctly.\n"
msg += " If your ISP or network is blocking the above URL,\n"
msg += " try with another domain name, for example by setting:\n"
- msg += " CONNECTIVITY_CHECK_URIS = \"https://www.yoctoproject.org/\""
+ msg += " CONNECTIVITY_CHECK_URIS = \"https://www.example.com/\""
msg += " You could also set BB_NO_NETWORK = \"1\" to disable network\n"
msg += " access if all required sources are on local disk.\n"
retval = msg
@@ -561,6 +561,14 @@ def check_tar_version(sanity_data):
version = result.split()[3]
if LooseVersion(version) < LooseVersion("1.28"):
return "Your version of tar is older than 1.28 and does not have the support needed to enable reproducible builds. Please install a newer version of tar (you could use the project's buildtools-tarball from our last release or use scripts/install-buildtools).\n"
+
+ try:
+ result = subprocess.check_output(["tar", "--help"], stderr=subprocess.STDOUT).decode('utf-8')
+ if "--xattrs" not in result:
+ return "Your tar doesn't support --xattrs, please use GNU tar.\n"
+ except subprocess.CalledProcessError as e:
+ return "Unable to execute tar --help, exit code %d\n%s\n" % (e.returncode, e.output)
+
return None
# We use git parameters and functionality only found in 1.7.8 or later
diff --git a/meta/classes/sstate.bbclass b/meta/classes/sstate.bbclass
index a689f7f677..1058778980 100644
--- a/meta/classes/sstate.bbclass
+++ b/meta/classes/sstate.bbclass
@@ -20,7 +20,7 @@ def generate_sstatefn(spec, hash, taskname, siginfo, d):
components = spec.split(":")
# Fields 0,5,6 are mandatory, 1 is most useful, 2,3,4 are just for information
# 7 is for the separators
- avail = (254 - len(hash + "_" + taskname + extension) - len(components[0]) - len(components[1]) - len(components[5]) - len(components[6]) - 7) // 3
+ avail = (limit - len(hash + "_" + taskname + extension) - len(components[0]) - len(components[1]) - len(components[5]) - len(components[6]) - 7) // 3
components[2] = components[2][:avail]
components[3] = components[3][:avail]
components[4] = components[4][:avail]
@@ -123,8 +123,6 @@ SSTATE_HASHEQUIV_REPORT_TASKDATA[doc] = "Report additional useful data to the \
python () {
if bb.data.inherits_class('native', d):
d.setVar('SSTATE_PKGARCH', d.getVar('BUILD_ARCH', False))
- if d.getVar("PN") == "pseudo-native":
- d.appendVar('SSTATE_PKGARCH', '_${ORIGNATIVELSBSTRING}')
elif bb.data.inherits_class('crosssdk', d):
d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"))
elif bb.data.inherits_class('cross', d):
@@ -483,7 +481,7 @@ def sstate_clean_cachefiles(d):
ss = sstate_state_fromvars(ld, task)
sstate_clean_cachefile(ss, ld)
-def sstate_clean_manifest(manifest, d, prefix=None):
+def sstate_clean_manifest(manifest, d, canrace=False, prefix=None):
import oe.path
mfile = open(manifest)
@@ -501,7 +499,9 @@ def sstate_clean_manifest(manifest, d, prefix=None):
if entry.endswith("/"):
if os.path.islink(entry[:-1]):
os.remove(entry[:-1])
- elif os.path.exists(entry) and len(os.listdir(entry)) == 0:
+ elif os.path.exists(entry) and len(os.listdir(entry)) == 0 and not canrace:
+ # Removing directories whilst builds are in progress exposes a race. Only
+ # do it in contexts where it is safe to do so.
os.rmdir(entry[:-1])
else:
os.remove(entry)
@@ -539,7 +539,7 @@ def sstate_clean(ss, d):
for lock in ss['lockfiles']:
locks.append(bb.utils.lockfile(lock))
- sstate_clean_manifest(manifest, d)
+ sstate_clean_manifest(manifest, d, canrace=True)
for lock in locks:
bb.utils.unlockfile(lock)
@@ -640,10 +640,21 @@ python sstate_hardcode_path () {
def sstate_package(ss, d):
import oe.path
+ import time
tmpdir = d.getVar('TMPDIR')
+ fixtime = False
+ if ss['task'] == "package":
+ fixtime = True
+
+ def fixtimestamp(root, path):
+ f = os.path.join(root, path)
+ if os.lstat(f).st_mtime > sde:
+ os.utime(f, (sde, sde), follow_symlinks=False)
+
sstatebuild = d.expand("${WORKDIR}/sstate-build-%s/" % ss['task'])
+ sde = int(d.getVar("SOURCE_DATE_EPOCH") or time.time())
d.setVar("SSTATE_CURRTASK", ss['task'])
bb.utils.remove(sstatebuild, recurse=True)
bb.utils.mkdirhier(sstatebuild)
@@ -656,6 +667,8 @@ def sstate_package(ss, d):
# to sstate tasks but there aren't many of these so better just avoid them entirely.
for walkroot, dirs, files in os.walk(state[1]):
for file in files + dirs:
+ if fixtime:
+ fixtimestamp(walkroot, file)
srcpath = os.path.join(walkroot, file)
if not os.path.islink(srcpath):
continue
@@ -677,6 +690,11 @@ def sstate_package(ss, d):
bb.utils.mkdirhier(plain)
bb.utils.mkdirhier(pdir)
os.rename(plain, pdir)
+ if fixtime:
+ fixtimestamp(pdir, "")
+ for walkroot, dirs, files in os.walk(pdir):
+ for file in files + dirs:
+ fixtimestamp(walkroot, file)
d.setVar('SSTATE_BUILDDIR', sstatebuild)
d.setVar('SSTATE_INSTDIR', sstatebuild)
@@ -703,6 +721,11 @@ def sstate_package(ss, d):
os.utime(siginfo, None)
except PermissionError:
pass
+ except OSError as e:
+ # Handle read-only file systems gracefully
+ import errno
+ if e.errno != errno.EROFS:
+ raise e
return
@@ -791,7 +814,7 @@ sstate_task_postfunc[dirs] = "${WORKDIR}"
sstate_create_package () {
# Exit early if it already exists
if [ -e ${SSTATE_PKG} ]; then
- [ ! -w ${SSTATE_PKG} ] || touch ${SSTATE_PKG}
+ touch ${SSTATE_PKG} 2>/dev/null || true
return
fi
@@ -818,14 +841,18 @@ sstate_create_package () {
fi
chmod 0664 $TFILE
# Skip if it was already created by some other process
- if [ ! -e ${SSTATE_PKG} ]; then
+ if [ -h ${SSTATE_PKG} ] && [ ! -e ${SSTATE_PKG} ]; then
+ # There is a symbolic link, but it links to nothing.
+ # Forcefully replace it with the new file.
+ ln -f $TFILE ${SSTATE_PKG} || true
+ elif [ ! -e ${SSTATE_PKG} ]; then
# Move into place using ln to attempt an atomic op.
# Abort if it already exists
- ln $TFILE ${SSTATE_PKG} && rm $TFILE
+ ln $TFILE ${SSTATE_PKG} || true
else
- rm $TFILE
+ touch ${SSTATE_PKG} 2>/dev/null || true
fi
- [ ! -w ${SSTATE_PKG} ] || touch ${SSTATE_PKG}
+ rm $TFILE
}
python sstate_sign_package () {
@@ -854,12 +881,12 @@ python sstate_report_unihash() {
#
sstate_unpack_package () {
tar -xvzf ${SSTATE_PKG}
- # update .siginfo atime on local/NFS mirror
- [ -O ${SSTATE_PKG}.siginfo ] && [ -w ${SSTATE_PKG}.siginfo ] && [ -h ${SSTATE_PKG}.siginfo ] && touch -a ${SSTATE_PKG}.siginfo
- # Use "! -w ||" to return true for read only files
- [ ! -w ${SSTATE_PKG} ] || touch --no-dereference ${SSTATE_PKG}
- [ ! -w ${SSTATE_PKG}.sig ] || [ ! -e ${SSTATE_PKG}.sig ] || touch --no-dereference ${SSTATE_PKG}.sig
- [ ! -w ${SSTATE_PKG}.siginfo ] || [ ! -e ${SSTATE_PKG}.siginfo ] || touch --no-dereference ${SSTATE_PKG}.siginfo
+ # update .siginfo atime on local/NFS mirror if it is a symbolic link
+ [ ! -h ${SSTATE_PKG}.siginfo ] || [ ! -e ${SSTATE_PKG}.siginfo ] || touch -a ${SSTATE_PKG}.siginfo 2>/dev/null || true
+ # update each symbolic link instead of any referenced file
+ touch --no-dereference ${SSTATE_PKG} 2>/dev/null || true
+ [ ! -e ${SSTATE_PKG}.sig ] || touch --no-dereference ${SSTATE_PKG}.sig 2>/dev/null || true
+ [ ! -e ${SSTATE_PKG}.siginfo ] || touch --no-dereference ${SSTATE_PKG}.siginfo 2>/dev/null || true
}
BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
@@ -934,7 +961,7 @@ def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True,
localdata2 = bb.data.createCopy(localdata)
srcuri = "file://" + sstatefile
- localdata.setVar('SRC_URI', srcuri)
+ localdata2.setVar('SRC_URI', srcuri)
bb.debug(2, "SState: Attempting to fetch %s" % srcuri)
try:
@@ -945,10 +972,11 @@ def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True,
found.add(tid)
if tid in missed:
missed.remove(tid)
- except:
+ except bb.fetch2.FetchError as e:
missed.add(tid)
- bb.debug(2, "SState: Unsuccessful fetch test for %s" % srcuri)
- pass
+ bb.debug(2, "SState: Unsuccessful fetch test for %s (%s)" % (srcuri, e))
+ except Exception as e:
+ bb.error("SState: cannot test %s: %s" % (srcuri, e))
if len(tasklist) >= min_tasks:
bb.event.fire(bb.event.ProcessProgress(msg, len(tasklist) - thread_worker.tasks.qsize()), d)
@@ -1010,6 +1038,7 @@ def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True,
bb.parse.siggen.checkhashes(sq_data, missed, found, d)
return found
+setscene_depvalid[vardepsexclude] = "SSTATE_EXCLUDEDEPS_SYSROOT"
BB_SETSCENE_DEPVALID = "setscene_depvalid"
@@ -1035,6 +1064,10 @@ def setscene_depvalid(task, taskdependees, notneeded, d, log=None):
if taskdependees[task][1] == "do_populate_lic":
return True
+ # We only need to trigger deploy_source_date_epoch through direct dependencies
+ if taskdependees[task][1] == "do_deploy_source_date_epoch":
+ return True
+
# stash_locale and gcc_stash_builddir are never needed as a dependency for built objects
if taskdependees[task][1] == "do_stash_locale" or taskdependees[task][1] == "do_gcc_stash_builddir":
return True
@@ -1141,6 +1174,11 @@ python sstate_eventhandler() {
os.utime(siginfo, None)
except PermissionError:
pass
+ except OSError as e:
+ # Handle read-only file systems gracefully
+ import errno
+ if e.errno != errno.EROFS:
+ raise e
}
diff --git a/meta/classes/staging.bbclass b/meta/classes/staging.bbclass
index 506ce0665e..21523c8f75 100644
--- a/meta/classes/staging.bbclass
+++ b/meta/classes/staging.bbclass
@@ -267,6 +267,10 @@ python extend_recipe_sysroot() {
pn = d.getVar("PN")
stagingdir = d.getVar("STAGING_DIR")
sharedmanifests = d.getVar("COMPONENTS_DIR") + "/manifests"
+ # only needed by multilib cross-canadian since it redefines RECIPE_SYSROOT
+ manifestprefix = d.getVar("RECIPE_SYSROOT_MANIFEST_SUBDIR")
+ if manifestprefix:
+ sharedmanifests = sharedmanifests + "/" + manifestprefix
recipesysroot = d.getVar("RECIPE_SYSROOT")
recipesysrootnative = d.getVar("RECIPE_SYSROOT_NATIVE")
@@ -408,7 +412,7 @@ python extend_recipe_sysroot() {
if os.path.islink(f) and not os.path.exists(f):
bb.note("%s no longer exists, removing from sysroot" % f)
lnk = os.readlink(f.replace(".complete", ""))
- sstate_clean_manifest(depdir + "/" + lnk, d, workdir)
+ sstate_clean_manifest(depdir + "/" + lnk, d, canrace=True, prefix=workdir)
os.unlink(f)
os.unlink(f.replace(".complete", ""))
@@ -453,7 +457,7 @@ python extend_recipe_sysroot() {
fl = depdir + "/" + l
bb.note("Task %s no longer depends on %s, removing from sysroot" % (mytaskname, l))
lnk = os.readlink(fl)
- sstate_clean_manifest(depdir + "/" + lnk, d, workdir)
+ sstate_clean_manifest(depdir + "/" + lnk, d, canrace=True, prefix=workdir)
os.unlink(fl)
os.unlink(fl + ".complete")
@@ -474,7 +478,7 @@ python extend_recipe_sysroot() {
continue
else:
bb.note("%s exists in sysroot, but is stale (%s vs. %s), removing." % (c, lnk, c + "." + taskhash))
- sstate_clean_manifest(depdir + "/" + lnk, d, workdir)
+ sstate_clean_manifest(depdir + "/" + lnk, d, canrace=True, prefix=workdir)
os.unlink(depdir + "/" + c)
if os.path.lexists(depdir + "/" + c + ".complete"):
os.unlink(depdir + "/" + c + ".complete")
diff --git a/meta/classes/testimage.bbclass b/meta/classes/testimage.bbclass
index c709384b91..7c8b2b30a1 100644
--- a/meta/classes/testimage.bbclass
+++ b/meta/classes/testimage.bbclass
@@ -99,30 +99,9 @@ TESTIMAGE_DUMP_DIR ?= "${LOG_DIR}/runtime-hostdump/"
TESTIMAGE_UPDATE_VARS ?= "DL_DIR WORKDIR DEPLOY_DIR"
testimage_dump_target () {
- top -bn1
- ps
- free
- df
- # The next command will export the default gateway IP
- export DEFAULT_GATEWAY=$(ip route | awk '/default/ { print $3}')
- ping -c3 $DEFAULT_GATEWAY
- dmesg
- netstat -an
- ip address
- # Next command will dump logs from /var/log/
- find /var/log/ -type f 2>/dev/null -exec echo "====================" \; -exec echo {} \; -exec echo "====================" \; -exec cat {} \; -exec echo "" \;
}
testimage_dump_host () {
- top -bn1
- iostat -x -z -N -d -p ALL 20 2
- ps -ef
- free
- df
- memstat
- dmesg
- ip -s link
- netstat -an
}
python do_testimage() {
@@ -193,6 +172,7 @@ def testimage_main(d):
import json
import signal
import logging
+ import shutil
from bb.utils import export_proxies
from oeqa.core.utils.misc import updateTestData
@@ -228,9 +208,10 @@ def testimage_main(d):
tdname = "%s.testdata.json" % image_name
try:
- td = json.load(open(tdname, "r"))
- except (FileNotFoundError) as err:
- bb.fatal('File %s Not Found. Have you built the image with INHERIT+="testimage" in the conf/local.conf?' % tdname)
+ with open(tdname, "r") as f:
+ td = json.load(f)
+ except FileNotFoundError as err:
+ bb.fatal('File %s not found (%s).\nHave you built the image with INHERIT += "testimage" in the conf/local.conf?' % (tdname, err))
# Some variables need to be updates (mostly paths) with the
# ones of the current environment because some tests require them.
@@ -397,10 +378,17 @@ def testimage_main(d):
get_testimage_result_id(configuration),
dump_streams=d.getVar('TESTREPORT_FULLLOGS'))
results.logSummary(pn)
+
+ # Copy additional logs to tmp/log/oeqa so it's easier to find them
+ targetdir = os.path.join(get_testimage_json_result_dir(d), d.getVar("PN"))
+ os.makedirs(targetdir, exist_ok=True)
+ os.symlink(bootlog, os.path.join(targetdir, os.path.basename(bootlog)))
+ os.symlink(d.getVar("BB_LOGFILE"), os.path.join(targetdir, os.path.basename(d.getVar("BB_LOGFILE") + "." + d.getVar('DATETIME'))))
+
if not results or not complete:
- bb.fatal('%s - FAILED - tests were interrupted during execution' % pn, forcelog=True)
+ bb.fatal('%s - FAILED - tests were interrupted during execution, check the logs in %s' % (pn, d.getVar("LOG_DIR")), forcelog=True)
if not results.wasSuccessful():
- bb.fatal('%s - FAILED - check the task log and the ssh log' % pn, forcelog=True)
+ bb.fatal('%s - FAILED - also check the logs in %s' % (pn, d.getVar("LOG_DIR")), forcelog=True)
def get_runtime_paths(d):
"""
diff --git a/meta/classes/toolchain-scripts.bbclass b/meta/classes/toolchain-scripts.bbclass
index db1d3215ef..21762b803b 100644
--- a/meta/classes/toolchain-scripts.bbclass
+++ b/meta/classes/toolchain-scripts.bbclass
@@ -29,7 +29,7 @@ toolchain_create_sdk_env_script () {
echo '# http://tldp.org/HOWTO/Program-Library-HOWTO/shared-libraries.html#AEN80' >> $script
echo '# http://xahlee.info/UnixResource_dir/_/ldpath.html' >> $script
echo '# Only disable this check if you are absolutely know what you are doing!' >> $script
- echo 'if [ ! -z "$LD_LIBRARY_PATH" ]; then' >> $script
+ echo 'if [ ! -z "${LD_LIBRARY_PATH:-}" ]; then' >> $script
echo " echo \"Your environment is misconfigured, you probably need to 'unset LD_LIBRARY_PATH'\"" >> $script
echo " echo \"but please check why this was set in the first place and that it's safe to unset.\"" >> $script
echo ' echo "The SDK will not operate correctly in most cases when LD_LIBRARY_PATH is set."' >> $script
@@ -44,7 +44,7 @@ toolchain_create_sdk_env_script () {
for i in ${CANADIANEXTRAOS}; do
EXTRAPATH="$EXTRAPATH:$sdkpathnative$bindir/${TARGET_ARCH}${TARGET_VENDOR}-$i"
done
- echo "export PATH=$sdkpathnative$bindir:$sdkpathnative$sbindir:$sdkpathnative$base_bindir:$sdkpathnative$base_sbindir:$sdkpathnative$bindir/../${HOST_SYS}/bin:$sdkpathnative$bindir/${TARGET_SYS}"$EXTRAPATH':$PATH' >> $script
+ echo "export PATH=$sdkpathnative$bindir:$sdkpathnative$sbindir:$sdkpathnative$base_bindir:$sdkpathnative$base_sbindir:$sdkpathnative$bindir/../${HOST_SYS}/bin:$sdkpathnative$bindir/${TARGET_SYS}"$EXTRAPATH':"$PATH"' >> $script
echo 'export PKG_CONFIG_SYSROOT_DIR=$SDKTARGETSYSROOT' >> $script
echo 'export PKG_CONFIG_PATH=$SDKTARGETSYSROOT'"$libdir"'/pkgconfig:$SDKTARGETSYSROOT'"$prefix"'/share/pkgconfig' >> $script
echo 'export CONFIG_SITE=${SDKPATH}/site-config-'"${multimach_target_sys}" >> $script
diff --git a/meta/classes/uninative.bbclass b/meta/classes/uninative.bbclass
index 1e19917a97..4d4f53ad4d 100644
--- a/meta/classes/uninative.bbclass
+++ b/meta/classes/uninative.bbclass
@@ -2,7 +2,7 @@ UNINATIVE_LOADER ?= "${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/lib/
UNINATIVE_STAGING_DIR ?= "${STAGING_DIR}"
UNINATIVE_URL ?= "unset"
-UNINATIVE_TARBALL ?= "${BUILD_ARCH}-nativesdk-libc.tar.xz"
+UNINATIVE_TARBALL ?= "${BUILD_ARCH}-nativesdk-libc-${UNINATIVE_VERSION}.tar.xz"
# Example checksums
#UNINATIVE_CHECKSUM[aarch64] = "dead"
#UNINATIVE_CHECKSUM[i686] = "dead"
@@ -34,6 +34,8 @@ python uninative_event_fetchloader() {
with open(loaderchksum, "r") as f:
readchksum = f.read().strip()
if readchksum == chksum:
+ if "uninative" not in d.getVar("SSTATEPOSTUNPACKFUNCS"):
+ enable_uninative(d)
return
import subprocess
@@ -100,7 +102,7 @@ ${UNINATIVE_STAGING_DIR}-uninative/relocate_sdk.py \
${UNINATIVE_LOADER} \
${UNINATIVE_LOADER} \
${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/${bindir_native}/patchelf-uninative \
- ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux${base_libdir_native}/libc*.so" % chksum)
+ ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux${base_libdir_native}/libc*.so*" % chksum)
subprocess.check_output(cmd, shell=True)
with open(loaderchksum, "w") as f:
@@ -167,5 +169,7 @@ python uninative_changeinterp () {
if not elf.isDynamic():
continue
+ os.chmod(f, s[stat.ST_MODE] | stat.S_IWUSR)
subprocess.check_output(("patchelf-uninative", "--set-interpreter", d.getVar("UNINATIVE_LOADER"), f), stderr=subprocess.STDOUT)
+ os.chmod(f, s[stat.ST_MODE])
}
diff --git a/meta/classes/useradd-staticids.bbclass b/meta/classes/useradd-staticids.bbclass
index 3a1b5f1320..908b24969f 100644
--- a/meta/classes/useradd-staticids.bbclass
+++ b/meta/classes/useradd-staticids.bbclass
@@ -41,7 +41,7 @@ def update_useradd_static_config(d):
def handle_missing_id(id, type, pkg, files, var, value):
# For backwards compatibility we accept "1" in addition to "error"
error_dynamic = d.getVar('USERADD_ERROR_DYNAMIC')
- msg = "%s - %s: %sname %s does not have a static ID defined." % (d.getVar('PN'), pkg, type, id)
+ msg = 'Recipe %s, package %s: %sname "%s" does not have a static ID defined.' % (d.getVar('PN'), pkg, type, id)
if files:
msg += " Add %s to one of these files: %s" % (id, files)
else:
diff --git a/meta/classes/useradd.bbclass b/meta/classes/useradd.bbclass
index e5f3ba24f9..0f0ed3446d 100644
--- a/meta/classes/useradd.bbclass
+++ b/meta/classes/useradd.bbclass
@@ -230,6 +230,10 @@ fakeroot python populate_packages_prepend () {
preinst += 'perform_useradd () {\n%s}\n' % d.getVar('perform_useradd')
preinst += 'perform_groupmems () {\n%s}\n' % d.getVar('perform_groupmems')
preinst += d.getVar('useradd_preinst')
+ # Expand out the *_PARAM variables to the package specific versions
+ for rep in ["GROUPADD_PARAM", "USERADD_PARAM", "GROUPMEMS_PARAM"]:
+ val = d.getVar(rep + "_" + pkg) or ""
+ preinst = preinst.replace("${" + rep + "}", val)
d.setVar('pkg_preinst_%s' % pkg, preinst)
# RDEPENDS setup
diff --git a/meta/classes/utils.bbclass b/meta/classes/utils.bbclass
index cd3d05709e..99f68f7505 100644
--- a/meta/classes/utils.bbclass
+++ b/meta/classes/utils.bbclass
@@ -233,7 +233,7 @@ create_cmdline_wrapper () {
#!/bin/bash
realpath=\`readlink -fn \$0\`
realdir=\`dirname \$realpath\`
-exec -a \`dirname \$realpath\`/$cmdname \`dirname \$realpath\`/$cmdname.real $cmdoptions "\$@"
+exec -a \$realdir/$cmdname \$realdir/$cmdname.real $cmdoptions "\$@"
END
chmod +x $cmd
}