summaryrefslogtreecommitdiffstats
path: root/meta/lib/oe
diff options
context:
space:
mode:
Diffstat (limited to 'meta/lib/oe')
-rw-r--r--meta/lib/oe/__init__.py6
-rw-r--r--meta/lib/oe/buildcfg.py79
-rw-r--r--meta/lib/oe/buildhistory_analysis.py2
-rw-r--r--meta/lib/oe/cachedpath.py2
-rw-r--r--meta/lib/oe/classextend.py13
-rw-r--r--meta/lib/oe/classutils.py2
-rw-r--r--meta/lib/oe/copy_buildsystem.py16
-rw-r--r--meta/lib/oe/cve_check.py180
-rw-r--r--meta/lib/oe/data.py2
-rw-r--r--meta/lib/oe/distro_check.py4
-rw-r--r--meta/lib/oe/elf.py12
-rw-r--r--meta/lib/oe/go.py34
-rw-r--r--meta/lib/oe/gpg_sign.py58
-rw-r--r--meta/lib/oe/license.py47
-rw-r--r--meta/lib/oe/lsb.py2
-rw-r--r--meta/lib/oe/maketype.py9
-rw-r--r--meta/lib/oe/manifest.py2
-rw-r--r--meta/lib/oe/npm_registry.py175
-rw-r--r--meta/lib/oe/overlayfs.py54
-rw-r--r--meta/lib/oe/package.py1751
-rw-r--r--meta/lib/oe/package_manager/__init__.py26
-rw-r--r--meta/lib/oe/package_manager/deb/__init__.py37
-rw-r--r--meta/lib/oe/package_manager/deb/manifest.py2
-rw-r--r--meta/lib/oe/package_manager/deb/rootfs.py2
-rw-r--r--meta/lib/oe/package_manager/deb/sdk.py11
-rw-r--r--meta/lib/oe/package_manager/ipk/__init__.py42
-rw-r--r--meta/lib/oe/package_manager/ipk/manifest.py3
-rw-r--r--meta/lib/oe/package_manager/ipk/rootfs.py41
-rw-r--r--meta/lib/oe/package_manager/ipk/sdk.py11
-rw-r--r--meta/lib/oe/package_manager/rpm/__init__.py47
-rw-r--r--meta/lib/oe/package_manager/rpm/manifest.py2
-rw-r--r--meta/lib/oe/package_manager/rpm/rootfs.py4
-rw-r--r--meta/lib/oe/package_manager/rpm/sdk.py10
-rw-r--r--meta/lib/oe/packagedata.py276
-rw-r--r--meta/lib/oe/packagegroup.py2
-rw-r--r--meta/lib/oe/patch.py248
-rw-r--r--meta/lib/oe/path.py8
-rw-r--r--meta/lib/oe/prservice.py33
-rw-r--r--meta/lib/oe/qa.py59
-rw-r--r--meta/lib/oe/recipeutils.py135
-rw-r--r--meta/lib/oe/reproducible.py95
-rw-r--r--meta/lib/oe/rootfs.py97
-rw-r--r--meta/lib/oe/rust.py13
-rw-r--r--meta/lib/oe/sbom.py120
-rw-r--r--meta/lib/oe/sdk.py8
-rw-r--r--meta/lib/oe/spdx.py357
-rw-r--r--meta/lib/oe/sstatesig.py237
-rw-r--r--meta/lib/oe/terminal.py30
-rw-r--r--meta/lib/oe/types.py2
-rw-r--r--meta/lib/oe/useradd.py4
-rw-r--r--meta/lib/oe/utils.py151
51 files changed, 4065 insertions, 498 deletions
diff --git a/meta/lib/oe/__init__.py b/meta/lib/oe/__init__.py
index 4e7c09da04..6eb536ad28 100644
--- a/meta/lib/oe/__init__.py
+++ b/meta/lib/oe/__init__.py
@@ -1,6 +1,12 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
+
+BBIMPORTS = ["data", "path", "utils", "types", "package", "packagedata", \
+ "packagegroup", "sstatesig", "lsb", "cachedpath", "license", \
+ "qa", "reproducible", "rust", "buildcfg", "go"]
diff --git a/meta/lib/oe/buildcfg.py b/meta/lib/oe/buildcfg.py
new file mode 100644
index 0000000000..27b059b834
--- /dev/null
+++ b/meta/lib/oe/buildcfg.py
@@ -0,0 +1,79 @@
+
+import os
+import subprocess
+import bb.process
+
+def detect_revision(d):
+ path = get_scmbasepath(d)
+ return get_metadata_git_revision(path)
+
+def detect_branch(d):
+ path = get_scmbasepath(d)
+ return get_metadata_git_branch(path)
+
+def get_scmbasepath(d):
+ return os.path.join(d.getVar('COREBASE'), 'meta')
+
+def get_metadata_git_branch(path):
+ try:
+ rev, _ = bb.process.run('git rev-parse --abbrev-ref HEAD', cwd=path)
+ except bb.process.ExecutionError:
+ rev = '<unknown>'
+ return rev.strip()
+
+def get_metadata_git_revision(path):
+ try:
+ rev, _ = bb.process.run('git rev-parse HEAD', cwd=path)
+ except bb.process.ExecutionError:
+ rev = '<unknown>'
+ return rev.strip()
+
+def get_metadata_git_toplevel(path):
+ try:
+ toplevel, _ = bb.process.run('git rev-parse --show-toplevel', cwd=path)
+ except bb.process.ExecutionError:
+ return ""
+ return toplevel.strip()
+
+def get_metadata_git_remotes(path):
+ try:
+ remotes_list, _ = bb.process.run('git remote', cwd=path)
+ remotes = remotes_list.split()
+ except bb.process.ExecutionError:
+ remotes = []
+ return remotes
+
+def get_metadata_git_remote_url(path, remote):
+ try:
+ uri, _ = bb.process.run('git remote get-url {remote}'.format(remote=remote), cwd=path)
+ except bb.process.ExecutionError:
+ return ""
+ return uri.strip()
+
+def get_metadata_git_describe(path):
+ try:
+ describe, _ = bb.process.run('git describe --tags', cwd=path)
+ except bb.process.ExecutionError:
+ return ""
+ return describe.strip()
+
+def is_layer_modified(path):
+ try:
+ subprocess.check_output("""cd %s; export PSEUDO_UNLOAD=1; set -e;
+ git diff --quiet --no-ext-diff
+ git diff --quiet --no-ext-diff --cached""" % path,
+ shell=True,
+ stderr=subprocess.STDOUT)
+ return ""
+ except subprocess.CalledProcessError as ex:
+ # Silently treat errors as "modified", without checking for the
+ # (expected) return code 1 in a modified git repo. For example, we get
+ # output and a 129 return code when a layer isn't a git repo at all.
+ return " -- modified"
+
+def get_layer_revisions(d):
+ layers = (d.getVar("BBLAYERS") or "").split()
+ revisions = []
+ for i in layers:
+ revisions.append((i, os.path.basename(i), get_metadata_git_branch(i).strip(), get_metadata_git_revision(i), is_layer_modified(i)))
+ return revisions
diff --git a/meta/lib/oe/buildhistory_analysis.py b/meta/lib/oe/buildhistory_analysis.py
index b1856846b6..4edad01580 100644
--- a/meta/lib/oe/buildhistory_analysis.py
+++ b/meta/lib/oe/buildhistory_analysis.py
@@ -562,7 +562,7 @@ def compare_siglists(a_blob, b_blob, taskdiff=False):
elif not hash2 in hashfiles:
out.append("Unable to find matching sigdata for %s with hash %s" % (desc, hash2))
else:
- out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb, collapsed=True)
+ out2 = bb.siggen.compare_sigfiles(hashfiles[hash1]['path'], hashfiles[hash2]['path'], recursecb, collapsed=True)
for line in out2:
m = hashlib.sha256()
m.update(line.encode('utf-8'))
diff --git a/meta/lib/oe/cachedpath.py b/meta/lib/oe/cachedpath.py
index 254257a83f..0138b791d4 100644
--- a/meta/lib/oe/cachedpath.py
+++ b/meta/lib/oe/cachedpath.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
# Based on standard python library functions but avoid
diff --git a/meta/lib/oe/classextend.py b/meta/lib/oe/classextend.py
index d3d8fbe724..5161d33d2d 100644
--- a/meta/lib/oe/classextend.py
+++ b/meta/lib/oe/classextend.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -30,6 +32,9 @@ class ClassExtender(object):
if name.endswith("-" + self.extname):
name = name.replace("-" + self.extname, "")
if name.startswith("virtual/"):
+ # Assume large numbers of dashes means a triplet is present and we don't need to convert
+ if name.count("-") >= 3 and name.endswith(("-go", "-binutils", "-gcc", "-g++")):
+ return name
subs = name.split("/", 1)[1]
if not subs.startswith(self.extname):
return "virtual/" + self.extname + "-" + subs
@@ -87,7 +92,7 @@ class ClassExtender(object):
def map_depends_variable(self, varname, suffix = ""):
# We need to preserve EXTENDPKGV so it can be expanded correctly later
if suffix:
- varname = varname + "_" + suffix
+ varname = varname + ":" + suffix
orig = self.d.getVar("EXTENDPKGV", False)
self.d.setVar("EXTENDPKGV", "EXTENDPKGV")
deps = self.d.getVar(varname)
@@ -142,15 +147,13 @@ class ClassExtender(object):
if pkg_mapping[0].startswith("${") and pkg_mapping[0].endswith("}"):
continue
for subs in variables:
- self.d.renameVar("%s_%s" % (subs, pkg_mapping[0]), "%s_%s" % (subs, pkg_mapping[1]))
+ self.d.renameVar("%s:%s" % (subs, pkg_mapping[0]), "%s:%s" % (subs, pkg_mapping[1]))
class NativesdkClassExtender(ClassExtender):
def map_depends(self, dep):
if dep.startswith(self.extname):
return dep
- if dep.endswith(("-gcc", "-g++")):
- return dep + "-crosssdk"
- elif dep.endswith(("-native", "-native-runtime")) or ('nativesdk-' in dep) or ('-cross-' in dep) or ('-crosssdk-' in dep):
+ if dep.endswith(("-native", "-native-runtime")) or ('nativesdk-' in dep) or ('-cross-' in dep) or ('-crosssdk-' in dep):
return dep
else:
return self.extend_name(dep)
diff --git a/meta/lib/oe/classutils.py b/meta/lib/oe/classutils.py
index 08bb66b365..ec3f6ad720 100644
--- a/meta/lib/oe/classutils.py
+++ b/meta/lib/oe/classutils.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/meta/lib/oe/copy_buildsystem.py b/meta/lib/oe/copy_buildsystem.py
index d97bf9d1b9..81abfbf9e2 100644
--- a/meta/lib/oe/copy_buildsystem.py
+++ b/meta/lib/oe/copy_buildsystem.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
# This class should provide easy access to the different aspects of the
@@ -20,7 +22,7 @@ def _smart_copy(src, dest):
mode = os.stat(src).st_mode
if stat.S_ISDIR(mode):
bb.utils.mkdirhier(dest)
- cmd = "tar --exclude='.git' --exclude='__pycache__' --xattrs --xattrs-include='*' -chf - -C %s -p . \
+ cmd = "tar --exclude='.git' --exclude='__pycache__' --xattrs --xattrs-include='*' -cf - -C %s -p . \
| tar --xattrs --xattrs-include='*' -xf - -C %s" % (src, dest)
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
else:
@@ -45,9 +47,6 @@ class BuildSystem(object):
corebase = os.path.abspath(self.d.getVar('COREBASE'))
layers.append(corebase)
- # Get relationship between TOPDIR and COREBASE
- # Layers should respect it
- corebase_relative = os.path.dirname(os.path.relpath(os.path.abspath(self.d.getVar('TOPDIR')), corebase))
# The bitbake build system uses the meta-skeleton layer as a layout
# for common recipies, e.g: the recipetool script to create kernel recipies
# Add the meta-skeleton layer to be included as part of the eSDK installation
@@ -100,11 +99,10 @@ class BuildSystem(object):
layerdestpath = destdir
if corebase == os.path.dirname(layer):
layerdestpath += '/' + os.path.basename(corebase)
- else:
- layer_relative = os.path.relpath(layer, corebase)
- if os.path.dirname(layer_relative) == corebase_relative:
- layer_relative = os.path.dirname(corebase_relative) + '/' + layernewname
- layer_relative = os.path.basename(corebase) + '/' + layer_relative
+ # If the layer is located somewhere under the same parent directory
+ # as corebase we keep the layer structure.
+ elif os.path.commonpath([layer, corebase]) == os.path.dirname(corebase):
+ layer_relative = os.path.relpath(layer, os.path.dirname(corebase))
if os.path.dirname(layer_relative) != layernewname:
layerdestpath += '/' + os.path.dirname(layer_relative)
diff --git a/meta/lib/oe/cve_check.py b/meta/lib/oe/cve_check.py
index a1d7c292af..ed5c714cb8 100644
--- a/meta/lib/oe/cve_check.py
+++ b/meta/lib/oe/cve_check.py
@@ -1,3 +1,9 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
import collections
import re
import itertools
@@ -63,3 +69,177 @@ def _cmpkey(release, patch_l, pre_l, pre_v):
else:
_pre = float(pre_v) if pre_v else float('-inf')
return _release, _patch, _pre
+
+
+def get_patched_cves(d):
+ """
+ Get patches that solve CVEs using the "CVE: " tag.
+ """
+
+ import re
+ import oe.patch
+
+ cve_match = re.compile(r"CVE:( CVE-\d{4}-\d+)+")
+
+ # Matches the last "CVE-YYYY-ID" in the file name, also if written
+ # in lowercase. Possible to have multiple CVE IDs in a single
+ # file name, but only the last one will be detected from the file name.
+ # However, patch files contents addressing multiple CVE IDs are supported
+ # (cve_match regular expression)
+ cve_file_name_match = re.compile(r".*(CVE-\d{4}-\d+)", re.IGNORECASE)
+
+ patched_cves = set()
+ patches = oe.patch.src_patches(d)
+ bb.debug(2, "Scanning %d patches for CVEs" % len(patches))
+ for url in patches:
+ patch_file = bb.fetch.decodeurl(url)[2]
+
+ # Check patch file name for CVE ID
+ fname_match = cve_file_name_match.search(patch_file)
+ if fname_match:
+ cve = fname_match.group(1).upper()
+ patched_cves.add(cve)
+ bb.debug(2, "Found %s from patch file name %s" % (cve, patch_file))
+
+ # Remote patches won't be present and compressed patches won't be
+ # unpacked, so say we're not scanning them
+ if not os.path.isfile(patch_file):
+ bb.note("%s is remote or compressed, not scanning content" % patch_file)
+ continue
+
+ with open(patch_file, "r", encoding="utf-8") as f:
+ try:
+ patch_text = f.read()
+ except UnicodeDecodeError:
+ bb.debug(1, "Failed to read patch %s using UTF-8 encoding"
+ " trying with iso8859-1" % patch_file)
+ f.close()
+ with open(patch_file, "r", encoding="iso8859-1") as f:
+ patch_text = f.read()
+
+ # Search for one or more "CVE: " lines
+ text_match = False
+ for match in cve_match.finditer(patch_text):
+ # Get only the CVEs without the "CVE: " tag
+ cves = patch_text[match.start()+5:match.end()]
+ for cve in cves.split():
+ bb.debug(2, "Patch %s solves %s" % (patch_file, cve))
+ patched_cves.add(cve)
+ text_match = True
+
+ if not fname_match and not text_match:
+ bb.debug(2, "Patch %s doesn't solve CVEs" % patch_file)
+
+ # Search for additional patched CVEs
+ for cve in (d.getVarFlags("CVE_STATUS") or {}):
+ decoded_status, _, _ = decode_cve_status(d, cve)
+ if decoded_status == "Patched":
+ bb.debug(2, "CVE %s is additionally patched" % cve)
+ patched_cves.add(cve)
+
+ return patched_cves
+
+
+def get_cpe_ids(cve_product, version):
+ """
+ Get list of CPE identifiers for the given product and version
+ """
+
+ version = version.split("+git")[0]
+
+ cpe_ids = []
+ for product in cve_product.split():
+ # CVE_PRODUCT in recipes may include vendor information for CPE identifiers. If not,
+ # use wildcard for vendor.
+ if ":" in product:
+ vendor, product = product.split(":", 1)
+ else:
+ vendor = "*"
+
+ cpe_id = 'cpe:2.3:*:{}:{}:{}:*:*:*:*:*:*:*'.format(vendor, product, version)
+ cpe_ids.append(cpe_id)
+
+ return cpe_ids
+
+def cve_check_merge_jsons(output, data):
+ """
+ Merge the data in the "package" property to the main data file
+ output
+ """
+ if output["version"] != data["version"]:
+ bb.error("Version mismatch when merging JSON outputs")
+ return
+
+ for product in output["package"]:
+ if product["name"] == data["package"][0]["name"]:
+ bb.error("Error adding the same package %s twice" % product["name"])
+ return
+
+ output["package"].append(data["package"][0])
+
+def update_symlinks(target_path, link_path):
+ """
+ Update a symbolic link link_path to point to target_path.
+ Remove the link and recreate it if exist and is different.
+ """
+ if link_path != target_path and os.path.exists(target_path):
+ if os.path.exists(os.path.realpath(link_path)):
+ os.remove(link_path)
+ os.symlink(os.path.basename(target_path), link_path)
+
+
+def convert_cve_version(version):
+ """
+ This function converts from CVE format to Yocto version format.
+ eg 8.3_p1 -> 8.3p1, 6.2_rc1 -> 6.2-rc1
+
+ Unless it is redefined using CVE_VERSION in the recipe,
+ cve_check uses the version in the name of the recipe (${PV})
+ to check vulnerabilities against a CVE in the database downloaded from NVD.
+
+ When the version has an update, i.e.
+ "p1" in OpenSSH 8.3p1,
+ "-rc1" in linux kernel 6.2-rc1,
+ the database stores the version as version_update (8.3_p1, 6.2_rc1).
+ Therefore, we must transform this version before comparing to the
+ recipe version.
+
+ In this case, the parameter of the function is 8.3_p1.
+ If the version uses the Release Candidate format, "rc",
+ this function replaces the '_' by '-'.
+ If the version uses the Update format, "p",
+ this function removes the '_' completely.
+ """
+ import re
+
+ matches = re.match('^([0-9.]+)_((p|rc)[0-9]+)$', version)
+
+ if not matches:
+ return version
+
+ version = matches.group(1)
+ update = matches.group(2)
+
+ if matches.group(3) == "rc":
+ return version + '-' + update
+
+ return version + update
+
+def decode_cve_status(d, cve):
+ """
+ Convert CVE_STATUS into status, detail and description.
+ """
+ status = d.getVarFlag("CVE_STATUS", cve)
+ if not status:
+ return ("", "", "")
+
+ status_split = status.split(':', 1)
+ detail = status_split[0]
+ description = status_split[1].strip() if (len(status_split) > 1) else ""
+
+ status_mapping = d.getVarFlag("CVE_CHECK_STATUSMAP", detail)
+ if status_mapping is None:
+ bb.warn('Invalid detail "%s" for CVE_STATUS[%s] = "%s", fallback to Unpatched' % (detail, cve, status))
+ status_mapping = "Unpatched"
+
+ return (status_mapping, detail, description)
diff --git a/meta/lib/oe/data.py b/meta/lib/oe/data.py
index 602130a904..37121cfad2 100644
--- a/meta/lib/oe/data.py
+++ b/meta/lib/oe/data.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/meta/lib/oe/distro_check.py b/meta/lib/oe/distro_check.py
index 88e46c354d..3494520f40 100644
--- a/meta/lib/oe/distro_check.py
+++ b/meta/lib/oe/distro_check.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -26,7 +28,7 @@ def find_latest_numeric_release(url, d):
maxstr=""
for link in get_links_from_url(url, d):
try:
- # TODO use LooseVersion
+ # TODO use bb.utils.vercmp_string_op()
release = float(link)
except:
release = 0
diff --git a/meta/lib/oe/elf.py b/meta/lib/oe/elf.py
index df0a4593fa..eab2349a4f 100644
--- a/meta/lib/oe/elf.py
+++ b/meta/lib/oe/elf.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -19,6 +21,7 @@ def machine_dict(d):
"x86_64": (62, 0, 0, True, 64),
"epiphany": (4643, 0, 0, True, 32),
"lm32": (138, 0, 0, False, 32),
+ "loongarch64":(258, 0, 0, True, 64),
"mips": ( 8, 0, 0, False, 32),
"mipsel": ( 8, 0, 0, True, 32),
"microblaze": (189, 0, 0, False, 32),
@@ -43,6 +46,7 @@ def machine_dict(d):
"ia64": (50, 0, 0, True, 64),
"alpha": (36902, 0, 0, True, 64),
"hppa": (15, 3, 0, False, 32),
+ "loongarch64":(258, 0, 0, True, 64),
"m68k": ( 4, 0, 0, False, 32),
"mips": ( 8, 0, 0, False, 32),
"mipsel": ( 8, 0, 0, True, 32),
@@ -61,6 +65,14 @@ def machine_dict(d):
"microblaze": (189, 0, 0, False, 32),
"microblazeel":(189, 0, 0, True, 32),
},
+ "linux-android" : {
+ "aarch64" : (183, 0, 0, True, 64),
+ "i686": ( 3, 0, 0, True, 32),
+ "x86_64": (62, 0, 0, True, 64),
+ },
+ "linux-androideabi" : {
+ "arm" : (40, 97, 0, True, 32),
+ },
"linux-musl" : {
"aarch64" : (183, 0, 0, True, 64),
"aarch64_be" :(183, 0, 0, False, 64),
diff --git a/meta/lib/oe/go.py b/meta/lib/oe/go.py
new file mode 100644
index 0000000000..dfd957d157
--- /dev/null
+++ b/meta/lib/oe/go.py
@@ -0,0 +1,34 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+import re
+
+def map_arch(a):
+ if re.match('i.86', a):
+ return '386'
+ elif a == 'x86_64':
+ return 'amd64'
+ elif re.match('arm.*', a):
+ return 'arm'
+ elif re.match('aarch64.*', a):
+ return 'arm64'
+ elif re.match('mips64el.*', a):
+ return 'mips64le'
+ elif re.match('mips64.*', a):
+ return 'mips64'
+ elif a == 'mips':
+ return 'mips'
+ elif a == 'mipsel':
+ return 'mipsle'
+ elif re.match('p(pc|owerpc)(64le)', a):
+ return 'ppc64le'
+ elif re.match('p(pc|owerpc)(64)', a):
+ return 'ppc64'
+ elif a == 'riscv64':
+ return 'riscv64'
+ elif a == 'loongarch64':
+ return 'loong64'
+ return ''
diff --git a/meta/lib/oe/gpg_sign.py b/meta/lib/oe/gpg_sign.py
index 7634d7ef1d..ede6186c84 100644
--- a/meta/lib/oe/gpg_sign.py
+++ b/meta/lib/oe/gpg_sign.py
@@ -1,13 +1,16 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
"""Helper module for GPG signing"""
-import os
import bb
-import subprocess
+import os
import shlex
+import subprocess
+import tempfile
class LocalSigner(object):
"""Class for handling local (on the build host) signing"""
@@ -58,7 +61,7 @@ class LocalSigner(object):
for i in range(0, len(files), sign_chunk):
subprocess.check_output(shlex.split(cmd + ' '.join(files[i:i+sign_chunk])), stderr=subprocess.STDOUT)
- def detach_sign(self, input_file, keyid, passphrase_file, passphrase=None, armor=True):
+ def detach_sign(self, input_file, keyid, passphrase_file, passphrase=None, armor=True, output_suffix=None, use_sha256=False):
"""Create a detached signature of a file"""
if passphrase_file and passphrase:
@@ -71,25 +74,35 @@ class LocalSigner(object):
cmd += ['--homedir', self.gpg_path]
if armor:
cmd += ['--armor']
+ if use_sha256:
+ cmd += ['--digest-algo', "SHA256"]
#gpg > 2.1 supports password pipes only through the loopback interface
#gpg < 2.1 errors out if given unknown parameters
if self.gpg_version > (2,1,):
cmd += ['--pinentry-mode', 'loopback']
- cmd += [input_file]
-
try:
if passphrase_file:
with open(passphrase_file) as fobj:
passphrase = fobj.readline();
- job = subprocess.Popen(cmd, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
- (_, stderr) = job.communicate(passphrase.encode("utf-8"))
+ if not output_suffix:
+ output_suffix = 'asc' if armor else 'sig'
+ output_file = input_file + "." + output_suffix
+ with tempfile.TemporaryDirectory(dir=os.path.dirname(output_file)) as tmp_dir:
+ tmp_file = os.path.join(tmp_dir, os.path.basename(output_file))
+ cmd += ['-o', tmp_file]
+
+ cmd += [input_file]
+
+ job = subprocess.Popen(cmd, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
+ (_, stderr) = job.communicate(passphrase.encode("utf-8"))
- if job.returncode:
- bb.fatal("GPG exited with code %d: %s" % (job.returncode, stderr.decode("utf-8")))
+ if job.returncode:
+ bb.fatal("GPG exited with code %d: %s" % (job.returncode, stderr.decode("utf-8")))
+ os.rename(tmp_file, output_file)
except IOError as e:
bb.error("IO error (%s): %s" % (e.errno, e.strerror))
raise Exception("Failed to sign '%s'" % input_file)
@@ -109,16 +122,33 @@ class LocalSigner(object):
bb.fatal("Could not get gpg version: %s" % e)
- def verify(self, sig_file):
+ def verify(self, sig_file, valid_sigs = ''):
"""Verify signature"""
- cmd = self.gpg_cmd + [" --verify", "--no-permission-warning"]
+ cmd = self.gpg_cmd + ["--verify", "--no-permission-warning", "--status-fd", "1"]
if self.gpg_path:
cmd += ["--homedir", self.gpg_path]
cmd += [sig_file]
- status = subprocess.call(cmd)
- ret = False if status else True
- return ret
+ status = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ # Valid if any key matches if unspecified
+ if not valid_sigs:
+ ret = False if status.returncode else True
+ return ret
+
+ import re
+ goodsigs = []
+ sigre = re.compile(r'^\[GNUPG:\] GOODSIG (\S+)\s(.*)$')
+ for l in status.stdout.decode("utf-8").splitlines():
+ s = sigre.match(l)
+ if s:
+ goodsigs += [s.group(1)]
+
+ for sig in valid_sigs.split():
+ if sig in goodsigs:
+ return True
+ if len(goodsigs):
+ bb.warn('No accepted signatures found. Good signatures found: %s.' % ' '.join(goodsigs))
+ return False
def get_signer(d, backend):
diff --git a/meta/lib/oe/license.py b/meta/lib/oe/license.py
index 665d32ecbb..d9c8d94da4 100644
--- a/meta/lib/oe/license.py
+++ b/meta/lib/oe/license.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
"""Code for parsing OpenEmbedded license strings"""
@@ -14,6 +16,16 @@ def license_ok(license, dont_want_licenses):
return False
return True
+def obsolete_license_list():
+ return ["AGPL-3", "AGPL-3+", "AGPLv3", "AGPLv3+", "AGPLv3.0", "AGPLv3.0+", "AGPL-3.0", "AGPL-3.0+", "BSD-0-Clause",
+ "GPL-1", "GPL-1+", "GPLv1", "GPLv1+", "GPLv1.0", "GPLv1.0+", "GPL-1.0", "GPL-1.0+", "GPL-2", "GPL-2+", "GPLv2",
+ "GPLv2+", "GPLv2.0", "GPLv2.0+", "GPL-2.0", "GPL-2.0+", "GPL-3", "GPL-3+", "GPLv3", "GPLv3+", "GPLv3.0", "GPLv3.0+",
+ "GPL-3.0", "GPL-3.0+", "LGPLv2", "LGPLv2+", "LGPLv2.0", "LGPLv2.0+", "LGPL-2.0", "LGPL-2.0+", "LGPL2.1", "LGPL2.1+",
+ "LGPLv2.1", "LGPLv2.1+", "LGPL-2.1", "LGPL-2.1+", "LGPLv3", "LGPLv3+", "LGPL-3.0", "LGPL-3.0+", "MPL-1", "MPLv1",
+ "MPLv1.1", "MPLv2", "MIT-X", "MIT-style", "openssl", "PSF", "PSFv2", "Python-2", "Apachev2", "Apache-2", "Artisticv1",
+ "Artistic-1", "AFL-2", "AFL-1", "AFLv2", "AFLv1", "CDDLv1", "CDDL-1", "EPLv1.0", "FreeType", "Nauman",
+ "tcl", "vim", "SGIv1"]
+
class LicenseError(Exception):
pass
@@ -74,6 +86,9 @@ class FlattenVisitor(LicenseVisitor):
def visit_Str(self, node):
self.licenses.append(node.s)
+ def visit_Constant(self, node):
+ self.licenses.append(node.value)
+
def visit_BinOp(self, node):
if isinstance(node.op, ast.BitOr):
left = FlattenVisitor(self.choose_licenses)
@@ -96,26 +111,26 @@ def flattened_licenses(licensestr, choose_licenses):
raise LicenseSyntaxError(licensestr, exc)
return flatten.licenses
-def is_included(licensestr, whitelist=None, blacklist=None):
- """Given a license string and whitelist and blacklist, determine if the
- license string matches the whitelist and does not match the blacklist.
+def is_included(licensestr, include_licenses=None, exclude_licenses=None):
+ """Given a license string, a list of licenses to include and a list of
+ licenses to exclude, determine if the license string matches the include
+ list and does not match the exclude list.
Returns a tuple holding the boolean state and a list of the applicable
licenses that were excluded if state is False, or the licenses that were
- included if the state is True.
- """
+ included if the state is True."""
def include_license(license):
- return any(fnmatch(license, pattern) for pattern in whitelist)
+ return any(fnmatch(license, pattern) for pattern in include_licenses)
def exclude_license(license):
- return any(fnmatch(license, pattern) for pattern in blacklist)
+ return any(fnmatch(license, pattern) for pattern in exclude_licenses)
def choose_licenses(alpha, beta):
"""Select the option in an OR which is the 'best' (has the most
included licenses and no excluded licenses)."""
# The factor 1000 below is arbitrary, just expected to be much larger
- # that the number of licenses actually specified. That way the weight
+ # than the number of licenses actually specified. That way the weight
# will be negative if the list of licenses contains an excluded license,
# but still gives a higher weight to the list with the most included
# licenses.
@@ -128,11 +143,11 @@ def is_included(licensestr, whitelist=None, blacklist=None):
else:
return beta
- if not whitelist:
- whitelist = ['*']
+ if not include_licenses:
+ include_licenses = ['*']
- if not blacklist:
- blacklist = []
+ if not exclude_licenses:
+ exclude_licenses = []
licenses = flattened_licenses(licensestr, choose_licenses)
excluded = [lic for lic in licenses if exclude_license(lic)]
@@ -227,6 +242,9 @@ class ListVisitor(LicenseVisitor):
def visit_Str(self, node):
self.licenses.add(node.s)
+ def visit_Constant(self, node):
+ self.licenses.add(node.value)
+
def list_licenses(licensestr):
"""Simply get a list of all licenses mentioned in a license string.
Binary operators are not applied or taken into account in any way"""
@@ -236,3 +254,8 @@ def list_licenses(licensestr):
except SyntaxError as exc:
raise LicenseSyntaxError(licensestr, exc)
return visitor.licenses
+
+def apply_pkg_license_exception(pkg, bad_licenses, exceptions):
+ """Return remaining bad licenses after removing any package exceptions"""
+
+ return [lic for lic in bad_licenses if pkg + ':' + lic not in exceptions]
diff --git a/meta/lib/oe/lsb.py b/meta/lib/oe/lsb.py
index 43e46380d7..3ec03e5042 100644
--- a/meta/lib/oe/lsb.py
+++ b/meta/lib/oe/lsb.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/meta/lib/oe/maketype.py b/meta/lib/oe/maketype.py
index d929c8b3e5..7a83bdf602 100644
--- a/meta/lib/oe/maketype.py
+++ b/meta/lib/oe/maketype.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
"""OpenEmbedded variable typing support
@@ -10,12 +12,7 @@ the arguments of the type's factory for details.
import inspect
import oe.types as types
-try:
- # Python 3.7+
- from collections.abc import Callable
-except ImportError:
- # Python < 3.7
- from collections import Callable
+from collections.abc import Callable
available_types = {}
diff --git a/meta/lib/oe/manifest.py b/meta/lib/oe/manifest.py
index 1a058dcd73..61f18adc4a 100644
--- a/meta/lib/oe/manifest.py
+++ b/meta/lib/oe/manifest.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/meta/lib/oe/npm_registry.py b/meta/lib/oe/npm_registry.py
new file mode 100644
index 0000000000..d97ced7cda
--- /dev/null
+++ b/meta/lib/oe/npm_registry.py
@@ -0,0 +1,175 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+import bb
+import json
+import subprocess
+
+_ALWAYS_SAFE = frozenset('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+ 'abcdefghijklmnopqrstuvwxyz'
+ '0123456789'
+ '_.-~()')
+
+MISSING_OK = object()
+
+REGISTRY = "https://registry.npmjs.org"
+
+# we can not use urllib.parse here because npm expects lowercase
+# hex-chars but urllib generates uppercase ones
+def uri_quote(s, safe = '/'):
+ res = ""
+ safe_set = set(safe)
+ for c in s:
+ if c in _ALWAYS_SAFE or c in safe_set:
+ res += c
+ else:
+ res += '%%%02x' % ord(c)
+ return res
+
+class PackageJson:
+ def __init__(self, spec):
+ self.__spec = spec
+
+ @property
+ def name(self):
+ return self.__spec['name']
+
+ @property
+ def version(self):
+ return self.__spec['version']
+
+ @property
+ def empty_manifest(self):
+ return {
+ 'name': self.name,
+ 'description': self.__spec.get('description', ''),
+ 'versions': {},
+ }
+
+ def base_filename(self):
+ return uri_quote(self.name, safe = '@')
+
+ def as_manifest_entry(self, tarball_uri):
+ res = {}
+
+ ## NOTE: 'npm install' requires more than basic meta information;
+ ## e.g. it takes 'bin' from this manifest entry but not the actual
+ ## 'package.json'
+ for (idx,dflt) in [('name', None),
+ ('description', ""),
+ ('version', None),
+ ('bin', MISSING_OK),
+ ('man', MISSING_OK),
+ ('scripts', MISSING_OK),
+ ('directories', MISSING_OK),
+ ('dependencies', MISSING_OK),
+ ('devDependencies', MISSING_OK),
+ ('optionalDependencies', MISSING_OK),
+ ('license', "unknown")]:
+ if idx in self.__spec:
+ res[idx] = self.__spec[idx]
+ elif dflt == MISSING_OK:
+ pass
+ elif dflt != None:
+ res[idx] = dflt
+ else:
+ raise Exception("%s-%s: missing key %s" % (self.name,
+ self.version,
+ idx))
+
+ res['dist'] = {
+ 'tarball': tarball_uri,
+ }
+
+ return res
+
+class ManifestImpl:
+ def __init__(self, base_fname, spec):
+ self.__base = base_fname
+ self.__spec = spec
+
+ def load(self):
+ try:
+ with open(self.filename, "r") as f:
+ res = json.load(f)
+ except IOError:
+ res = self.__spec.empty_manifest
+
+ return res
+
+ def save(self, meta):
+ with open(self.filename, "w") as f:
+ json.dump(meta, f, indent = 2)
+
+ @property
+ def filename(self):
+ return self.__base + ".meta"
+
+class Manifest:
+ def __init__(self, base_fname, spec):
+ self.__base = base_fname
+ self.__spec = spec
+ self.__lockf = None
+ self.__impl = None
+
+ def __enter__(self):
+ self.__lockf = bb.utils.lockfile(self.__base + ".lock")
+ self.__impl = ManifestImpl(self.__base, self.__spec)
+ return self.__impl
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ bb.utils.unlockfile(self.__lockf)
+
+class NpmCache:
+ def __init__(self, cache):
+ self.__cache = cache
+
+ @property
+ def path(self):
+ return self.__cache
+
+ def run(self, type, key, fname):
+ subprocess.run(['oe-npm-cache', self.__cache, type, key, fname],
+ check = True)
+
+class NpmRegistry:
+ def __init__(self, path, cache):
+ self.__path = path
+ self.__cache = NpmCache(cache + '/_cacache')
+ bb.utils.mkdirhier(self.__path)
+ bb.utils.mkdirhier(self.__cache.path)
+
+ @staticmethod
+ ## This function is critical and must match nodejs expectations
+ def _meta_uri(spec):
+ return REGISTRY + '/' + uri_quote(spec.name, safe = '@')
+
+ @staticmethod
+ ## Exact return value does not matter; just make it look like a
+ ## usual registry url
+ def _tarball_uri(spec):
+ return '%s/%s/-/%s-%s.tgz' % (REGISTRY,
+ uri_quote(spec.name, safe = '@'),
+ uri_quote(spec.name, safe = '@/'),
+ spec.version)
+
+ def add_pkg(self, tarball, pkg_json):
+ pkg_json = PackageJson(pkg_json)
+ base = os.path.join(self.__path, pkg_json.base_filename())
+
+ with Manifest(base, pkg_json) as manifest:
+ meta = manifest.load()
+ tarball_uri = self._tarball_uri(pkg_json)
+
+ meta['versions'][pkg_json.version] = pkg_json.as_manifest_entry(tarball_uri)
+
+ manifest.save(meta)
+
+ ## Cache entries are a little bit dependent on the nodejs
+ ## version; version specific cache implementation must
+ ## mitigate differences
+ self.__cache.run('meta', self._meta_uri(pkg_json), manifest.filename);
+ self.__cache.run('tgz', tarball_uri, tarball);
diff --git a/meta/lib/oe/overlayfs.py b/meta/lib/oe/overlayfs.py
new file mode 100644
index 0000000000..8b88900f71
--- /dev/null
+++ b/meta/lib/oe/overlayfs.py
@@ -0,0 +1,54 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# This file contains common functions for overlayfs and its QA check
+
+# this function is based on https://github.com/systemd/systemd/blob/main/src/basic/unit-name.c
+def escapeSystemdUnitName(path):
+ escapeMap = {
+ '/': '-',
+ '-': "\\x2d",
+ '\\': "\\x5d"
+ }
+ return "".join([escapeMap.get(c, c) for c in path.strip('/')])
+
+def strForBash(s):
+ return s.replace('\\', '\\\\')
+
+def allOverlaysUnitName(d):
+ return d.getVar('PN') + '-overlays.service'
+
+def mountUnitName(unit):
+ return escapeSystemdUnitName(unit) + '.mount'
+
+def helperUnitName(unit):
+ return escapeSystemdUnitName(unit) + '-create-upper-dir.service'
+
+def unitFileList(d):
+ fileList = []
+ overlayMountPoints = d.getVarFlags("OVERLAYFS_MOUNT_POINT")
+
+ if not overlayMountPoints:
+ bb.fatal("A recipe uses overlayfs class but there is no OVERLAYFS_MOUNT_POINT set in your MACHINE configuration")
+
+ # check that we have required mount points set first
+ requiredMountPoints = d.getVarFlags('OVERLAYFS_WRITABLE_PATHS')
+ for mountPoint in requiredMountPoints:
+ if mountPoint not in overlayMountPoints:
+ bb.fatal("Missing required mount point for OVERLAYFS_MOUNT_POINT[%s] in your MACHINE configuration" % mountPoint)
+
+ for mountPoint in overlayMountPoints:
+ mountPointList = d.getVarFlag('OVERLAYFS_WRITABLE_PATHS', mountPoint)
+ if not mountPointList:
+ bb.debug(1, "No mount points defined for %s flag, don't add to file list", mountPoint)
+ continue
+ for path in mountPointList.split():
+ fileList.append(mountUnitName(path))
+ fileList.append(helperUnitName(path))
+
+ fileList.append(allOverlaysUnitName(d))
+
+ return fileList
+
diff --git a/meta/lib/oe/package.py b/meta/lib/oe/package.py
index dd700cbb0c..1511ba47c4 100644
--- a/meta/lib/oe/package.py
+++ b/meta/lib/oe/package.py
@@ -1,11 +1,22 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
+import errno
+import fnmatch
+import itertools
+import os
+import shlex
+import re
+import glob
import stat
import mmap
import subprocess
+import oe.cachedpath
+
def runstrip(arg):
# Function to strip a single file, called from split_and_strip_files below
# A working 'file' (one which works on the target architecture)
@@ -16,7 +27,11 @@ def runstrip(arg):
# 8 - shared library
# 16 - kernel module
- (file, elftype, strip) = arg
+ if len(arg) == 3:
+ (file, elftype, strip) = arg
+ extra_strip_sections = ''
+ else:
+ (file, elftype, strip, extra_strip_sections) = arg
newmode = None
if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
@@ -26,7 +41,7 @@ def runstrip(arg):
stripcmd = [strip]
skip_strip = False
- # kernel module
+ # kernel module
if elftype & 16:
if is_kernel_module_signed(file):
bb.debug(1, "Skip strip on signed module %s" % file)
@@ -40,6 +55,9 @@ def runstrip(arg):
# shared or executable:
elif elftype & 8 or elftype & 4:
stripcmd.extend(["--remove-section=.comment", "--remove-section=.note"])
+ if extra_strip_sections != '':
+ for section in extra_strip_sections.split():
+ stripcmd.extend(["--remove-section=" + section])
stripcmd.append(file)
bb.debug(1, "runstrip: %s" % stripcmd)
@@ -96,7 +114,7 @@ def is_static_lib(path):
return start == magic
return False
-def strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, d, qa_already_stripped=False):
+def strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, max_process, qa_already_stripped=False):
"""
Strip executable code (like executables, shared libraries) _in_place_
- Based on sysroot_strip in staging.bbclass
@@ -104,6 +122,7 @@ def strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, d, qa_already_stripp
:param strip_cmd: Strip command (usually ${STRIP})
:param libdir: ${libdir} - strip .so files in this directory
:param base_libdir: ${base_libdir} - strip .so files in this directory
+ :param max_process: number of stripping processes started in parallel
:param qa_already_stripped: Set to True if already-stripped' in ${INSANE_SKIP}
This is for proper logging and messages only.
"""
@@ -146,7 +165,7 @@ def strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, d, qa_already_stripp
# ...but is it ELF, and is it already stripped?
checkelf.append(file)
inodecache[file] = s.st_ino
- results = oe.utils.multiprocess_launch(is_elf, checkelf, d)
+ results = oe.utils.multiprocess_launch_mp(is_elf, checkelf, max_process)
for (file, elf_file) in results:
#elf_file = is_elf(file)
if elf_file & 1:
@@ -174,7 +193,7 @@ def strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, d, qa_already_stripp
elf_file = int(elffiles[file])
sfiles.append((file, elf_file, strip_cmd))
- oe.utils.multiprocess_launch(runstrip, sfiles, d)
+ oe.utils.multiprocess_launch_mp(runstrip, sfiles, max_process)
def file_translate(file):
@@ -283,3 +302,1725 @@ def read_shlib_providers(d):
shlib_provider[s[0]] = {}
shlib_provider[s[0]][s[1]] = (dep_pkg, s[2])
return shlib_provider
+
+# We generate a master list of directories to process, we start by
+# seeding this list with reasonable defaults, then load from
+# the fs-perms.txt files
+def fixup_perms(d):
+ import pwd, grp
+
+ cpath = oe.cachedpath.CachedPath()
+ dvar = d.getVar('PKGD')
+
+ # init using a string with the same format as a line as documented in
+ # the fs-perms.txt file
+ # <path> <mode> <uid> <gid> <walk> <fmode> <fuid> <fgid>
+ # <path> link <link target>
+ #
+ # __str__ can be used to print out an entry in the input format
+ #
+ # if fs_perms_entry.path is None:
+ # an error occurred
+ # if fs_perms_entry.link, you can retrieve:
+ # fs_perms_entry.path = path
+ # fs_perms_entry.link = target of link
+ # if not fs_perms_entry.link, you can retrieve:
+ # fs_perms_entry.path = path
+ # fs_perms_entry.mode = expected dir mode or None
+ # fs_perms_entry.uid = expected uid or -1
+ # fs_perms_entry.gid = expected gid or -1
+ # fs_perms_entry.walk = 'true' or something else
+ # fs_perms_entry.fmode = expected file mode or None
+ # fs_perms_entry.fuid = expected file uid or -1
+ # fs_perms_entry_fgid = expected file gid or -1
+ class fs_perms_entry():
+ def __init__(self, line):
+ lsplit = line.split()
+ if len(lsplit) == 3 and lsplit[1].lower() == "link":
+ self._setlink(lsplit[0], lsplit[2])
+ elif len(lsplit) == 8:
+ self._setdir(lsplit[0], lsplit[1], lsplit[2], lsplit[3], lsplit[4], lsplit[5], lsplit[6], lsplit[7])
+ else:
+ msg = "Fixup Perms: invalid config line %s" % line
+ oe.qa.handle_error("perm-config", msg, d)
+ self.path = None
+ self.link = None
+
+ def _setdir(self, path, mode, uid, gid, walk, fmode, fuid, fgid):
+ self.path = os.path.normpath(path)
+ self.link = None
+ self.mode = self._procmode(mode)
+ self.uid = self._procuid(uid)
+ self.gid = self._procgid(gid)
+ self.walk = walk.lower()
+ self.fmode = self._procmode(fmode)
+ self.fuid = self._procuid(fuid)
+ self.fgid = self._procgid(fgid)
+
+ def _setlink(self, path, link):
+ self.path = os.path.normpath(path)
+ self.link = link
+
+ def _procmode(self, mode):
+ if not mode or (mode and mode == "-"):
+ return None
+ else:
+ return int(mode,8)
+
+ # Note uid/gid -1 has special significance in os.lchown
+ def _procuid(self, uid):
+ if uid is None or uid == "-":
+ return -1
+ elif uid.isdigit():
+ return int(uid)
+ else:
+ return pwd.getpwnam(uid).pw_uid
+
+ def _procgid(self, gid):
+ if gid is None or gid == "-":
+ return -1
+ elif gid.isdigit():
+ return int(gid)
+ else:
+ return grp.getgrnam(gid).gr_gid
+
+ # Use for debugging the entries
+ def __str__(self):
+ if self.link:
+ return "%s link %s" % (self.path, self.link)
+ else:
+ mode = "-"
+ if self.mode:
+ mode = "0%o" % self.mode
+ fmode = "-"
+ if self.fmode:
+ fmode = "0%o" % self.fmode
+ uid = self._mapugid(self.uid)
+ gid = self._mapugid(self.gid)
+ fuid = self._mapugid(self.fuid)
+ fgid = self._mapugid(self.fgid)
+ return "%s %s %s %s %s %s %s %s" % (self.path, mode, uid, gid, self.walk, fmode, fuid, fgid)
+
+ def _mapugid(self, id):
+ if id is None or id == -1:
+ return "-"
+ else:
+ return "%d" % id
+
+ # Fix the permission, owner and group of path
+ def fix_perms(path, mode, uid, gid, dir):
+ if mode and not os.path.islink(path):
+ #bb.note("Fixup Perms: chmod 0%o %s" % (mode, dir))
+ os.chmod(path, mode)
+ # -1 is a special value that means don't change the uid/gid
+ # if they are BOTH -1, don't bother to lchown
+ if not (uid == -1 and gid == -1):
+ #bb.note("Fixup Perms: lchown %d:%d %s" % (uid, gid, dir))
+ os.lchown(path, uid, gid)
+
+ # Return a list of configuration files based on either the default
+ # files/fs-perms.txt or the contents of FILESYSTEM_PERMS_TABLES
+ # paths are resolved via BBPATH
+ def get_fs_perms_list(d):
+ str = ""
+ bbpath = d.getVar('BBPATH')
+ fs_perms_tables = d.getVar('FILESYSTEM_PERMS_TABLES') or ""
+ for conf_file in fs_perms_tables.split():
+ confpath = bb.utils.which(bbpath, conf_file)
+ if confpath:
+ str += " %s" % bb.utils.which(bbpath, conf_file)
+ else:
+ bb.warn("cannot find %s specified in FILESYSTEM_PERMS_TABLES" % conf_file)
+ return str
+
+ fs_perms_table = {}
+ fs_link_table = {}
+
+ # By default all of the standard directories specified in
+ # bitbake.conf will get 0755 root:root.
+ target_path_vars = [ 'base_prefix',
+ 'prefix',
+ 'exec_prefix',
+ 'base_bindir',
+ 'base_sbindir',
+ 'base_libdir',
+ 'datadir',
+ 'sysconfdir',
+ 'servicedir',
+ 'sharedstatedir',
+ 'localstatedir',
+ 'infodir',
+ 'mandir',
+ 'docdir',
+ 'bindir',
+ 'sbindir',
+ 'libexecdir',
+ 'libdir',
+ 'includedir' ]
+
+ for path in target_path_vars:
+ dir = d.getVar(path) or ""
+ if dir == "":
+ continue
+ fs_perms_table[dir] = fs_perms_entry(d.expand("%s 0755 root root false - - -" % (dir)))
+
+ # Now we actually load from the configuration files
+ for conf in get_fs_perms_list(d).split():
+ if not os.path.exists(conf):
+ continue
+ with open(conf) as f:
+ for line in f:
+ if line.startswith('#'):
+ continue
+ lsplit = line.split()
+ if len(lsplit) == 0:
+ continue
+ if len(lsplit) != 8 and not (len(lsplit) == 3 and lsplit[1].lower() == "link"):
+ msg = "Fixup perms: %s invalid line: %s" % (conf, line)
+ oe.qa.handle_error("perm-line", msg, d)
+ continue
+ entry = fs_perms_entry(d.expand(line))
+ if entry and entry.path:
+ if entry.link:
+ fs_link_table[entry.path] = entry
+ if entry.path in fs_perms_table:
+ fs_perms_table.pop(entry.path)
+ else:
+ fs_perms_table[entry.path] = entry
+ if entry.path in fs_link_table:
+ fs_link_table.pop(entry.path)
+
+ # Debug -- list out in-memory table
+ #for dir in fs_perms_table:
+ # bb.note("Fixup Perms: %s: %s" % (dir, str(fs_perms_table[dir])))
+ #for link in fs_link_table:
+ # bb.note("Fixup Perms: %s: %s" % (link, str(fs_link_table[link])))
+
+ # We process links first, so we can go back and fixup directory ownership
+ # for any newly created directories
+ # Process in sorted order so /run gets created before /run/lock, etc.
+ for entry in sorted(fs_link_table.values(), key=lambda x: x.link):
+ link = entry.link
+ dir = entry.path
+ origin = dvar + dir
+ if not (cpath.exists(origin) and cpath.isdir(origin) and not cpath.islink(origin)):
+ continue
+
+ if link[0] == "/":
+ target = dvar + link
+ ptarget = link
+ else:
+ target = os.path.join(os.path.dirname(origin), link)
+ ptarget = os.path.join(os.path.dirname(dir), link)
+ if os.path.exists(target):
+ msg = "Fixup Perms: Unable to correct directory link, target already exists: %s -> %s" % (dir, ptarget)
+ oe.qa.handle_error("perm-link", msg, d)
+ continue
+
+ # Create path to move directory to, move it, and then setup the symlink
+ bb.utils.mkdirhier(os.path.dirname(target))
+ #bb.note("Fixup Perms: Rename %s -> %s" % (dir, ptarget))
+ bb.utils.rename(origin, target)
+ #bb.note("Fixup Perms: Link %s -> %s" % (dir, link))
+ os.symlink(link, origin)
+
+ for dir in fs_perms_table:
+ origin = dvar + dir
+ if not (cpath.exists(origin) and cpath.isdir(origin)):
+ continue
+
+ fix_perms(origin, fs_perms_table[dir].mode, fs_perms_table[dir].uid, fs_perms_table[dir].gid, dir)
+
+ if fs_perms_table[dir].walk == 'true':
+ for root, dirs, files in os.walk(origin):
+ for dr in dirs:
+ each_dir = os.path.join(root, dr)
+ fix_perms(each_dir, fs_perms_table[dir].mode, fs_perms_table[dir].uid, fs_perms_table[dir].gid, dir)
+ for f in files:
+ each_file = os.path.join(root, f)
+ fix_perms(each_file, fs_perms_table[dir].fmode, fs_perms_table[dir].fuid, fs_perms_table[dir].fgid, dir)
+
+# Get a list of files from file vars by searching files under current working directory
+# The list contains symlinks, directories and normal files.
+def files_from_filevars(filevars):
+ cpath = oe.cachedpath.CachedPath()
+ files = []
+ for f in filevars:
+ if os.path.isabs(f):
+ f = '.' + f
+ if not f.startswith("./"):
+ f = './' + f
+ globbed = glob.glob(f, recursive=True)
+ if globbed:
+ if [ f ] != globbed:
+ files += globbed
+ continue
+ files.append(f)
+
+ symlink_paths = []
+ for ind, f in enumerate(files):
+ # Handle directory symlinks. Truncate path to the lowest level symlink
+ parent = ''
+ for dirname in f.split('/')[:-1]:
+ parent = os.path.join(parent, dirname)
+ if dirname == '.':
+ continue
+ if cpath.islink(parent):
+ bb.warn("FILES contains file '%s' which resides under a "
+ "directory symlink. Please fix the recipe and use the "
+ "real path for the file." % f[1:])
+ symlink_paths.append(f)
+ files[ind] = parent
+ f = parent
+ break
+
+ if not cpath.islink(f):
+ if cpath.isdir(f):
+ newfiles = [ os.path.join(f,x) for x in os.listdir(f) ]
+ if newfiles:
+ files += newfiles
+
+ return files, symlink_paths
+
+# Called in package_<rpm,ipk,deb>.bbclass to get the correct list of configuration files
+def get_conffiles(pkg, d):
+ pkgdest = d.getVar('PKGDEST')
+ root = os.path.join(pkgdest, pkg)
+ cwd = os.getcwd()
+ os.chdir(root)
+
+ conffiles = d.getVar('CONFFILES:%s' % pkg);
+ if conffiles == None:
+ conffiles = d.getVar('CONFFILES')
+ if conffiles == None:
+ conffiles = ""
+ conffiles = conffiles.split()
+ conf_orig_list = files_from_filevars(conffiles)[0]
+
+ # Remove links and directories from conf_orig_list to get conf_list which only contains normal files
+ conf_list = []
+ for f in conf_orig_list:
+ if os.path.isdir(f):
+ continue
+ if os.path.islink(f):
+ continue
+ if not os.path.exists(f):
+ continue
+ conf_list.append(f)
+
+ # Remove the leading './'
+ for i in range(0, len(conf_list)):
+ conf_list[i] = conf_list[i][1:]
+
+ os.chdir(cwd)
+ return sorted(conf_list)
+
+def legitimize_package_name(s):
+ """
+ Make sure package names are legitimate strings
+ """
+
+ def fixutf(m):
+ cp = m.group(1)
+ if cp:
+ return ('\\u%s' % cp).encode('latin-1').decode('unicode_escape')
+
+ # Handle unicode codepoints encoded as <U0123>, as in glibc locale files.
+ s = re.sub(r'<U([0-9A-Fa-f]{1,4})>', fixutf, s)
+
+ # Remaining package name validity fixes
+ return s.lower().replace('_', '-').replace('@', '+').replace(',', '+').replace('/', '-')
+
+def split_locales(d):
+ cpath = oe.cachedpath.CachedPath()
+ if (d.getVar('PACKAGE_NO_LOCALE') == '1'):
+ bb.debug(1, "package requested not splitting locales")
+ return
+
+ packages = (d.getVar('PACKAGES') or "").split()
+
+ dvar = d.getVar('PKGD')
+ pn = d.getVar('LOCALEBASEPN')
+
+ try:
+ locale_index = packages.index(pn + '-locale')
+ packages.pop(locale_index)
+ except ValueError:
+ locale_index = len(packages)
+
+ localepaths = []
+ locales = set()
+ for localepath in (d.getVar('LOCALE_PATHS') or "").split():
+ localedir = dvar + localepath
+ if not cpath.isdir(localedir):
+ bb.debug(1, 'No locale files in %s' % localepath)
+ continue
+
+ localepaths.append(localepath)
+ with os.scandir(localedir) as it:
+ for entry in it:
+ if entry.is_dir():
+ locales.add(entry.name)
+
+ if len(locales) == 0:
+ bb.debug(1, "No locale files in this package")
+ return
+
+ summary = d.getVar('SUMMARY') or pn
+ description = d.getVar('DESCRIPTION') or ""
+ locale_section = d.getVar('LOCALE_SECTION')
+ mlprefix = d.getVar('MLPREFIX') or ""
+ for l in sorted(locales):
+ ln = legitimize_package_name(l)
+ pkg = pn + '-locale-' + ln
+ packages.insert(locale_index, pkg)
+ locale_index += 1
+ files = []
+ for localepath in localepaths:
+ files.append(os.path.join(localepath, l))
+ d.setVar('FILES:' + pkg, " ".join(files))
+ d.setVar('RRECOMMENDS:' + pkg, '%svirtual-locale-%s' % (mlprefix, ln))
+ d.setVar('RPROVIDES:' + pkg, '%s-locale %s%s-translation' % (pn, mlprefix, ln))
+ d.setVar('SUMMARY:' + pkg, '%s - %s translations' % (summary, l))
+ d.setVar('DESCRIPTION:' + pkg, '%s This package contains language translation files for the %s locale.' % (description, l))
+ if locale_section:
+ d.setVar('SECTION:' + pkg, locale_section)
+
+ d.setVar('PACKAGES', ' '.join(packages))
+
+ # Disabled by RP 18/06/07
+ # Wildcards aren't supported in debian
+ # They break with ipkg since glibc-locale* will mean that
+ # glibc-localedata-translit* won't install as a dependency
+ # for some other package which breaks meta-toolchain
+ # Probably breaks since virtual-locale- isn't provided anywhere
+ #rdep = (d.getVar('RDEPENDS:%s' % pn) or "").split()
+ #rdep.append('%s-locale*' % pn)
+ #d.setVar('RDEPENDS:%s' % pn, ' '.join(rdep))
+
+def package_debug_vars(d):
+ # We default to '.debug' style
+ if d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-file-directory':
+ # Single debug-file-directory style debug info
+ debug_vars = {
+ "append": ".debug",
+ "staticappend": "",
+ "dir": "",
+ "staticdir": "",
+ "libdir": "/usr/lib/debug",
+ "staticlibdir": "/usr/lib/debug-static",
+ "srcdir": "/usr/src/debug",
+ }
+ elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-without-src':
+ # Original OE-core, a.k.a. ".debug", style debug info, but without sources in /usr/src/debug
+ debug_vars = {
+ "append": "",
+ "staticappend": "",
+ "dir": "/.debug",
+ "staticdir": "/.debug-static",
+ "libdir": "",
+ "staticlibdir": "",
+ "srcdir": "",
+ }
+ elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg':
+ debug_vars = {
+ "append": "",
+ "staticappend": "",
+ "dir": "/.debug",
+ "staticdir": "/.debug-static",
+ "libdir": "",
+ "staticlibdir": "",
+ "srcdir": "/usr/src/debug",
+ }
+ else:
+ # Original OE-core, a.k.a. ".debug", style debug info
+ debug_vars = {
+ "append": "",
+ "staticappend": "",
+ "dir": "/.debug",
+ "staticdir": "/.debug-static",
+ "libdir": "",
+ "staticlibdir": "",
+ "srcdir": "/usr/src/debug",
+ }
+
+ return debug_vars
+
+
+def parse_debugsources_from_dwarfsrcfiles_output(dwarfsrcfiles_output):
+ debugfiles = {}
+
+ for line in dwarfsrcfiles_output.splitlines():
+ if line.startswith("\t"):
+ debugfiles[os.path.normpath(line.split()[0])] = ""
+
+ return debugfiles.keys()
+
+def source_info(file, d, fatal=True):
+ cmd = ["dwarfsrcfiles", file]
+ try:
+ output = subprocess.check_output(cmd, universal_newlines=True, stderr=subprocess.STDOUT)
+ retval = 0
+ except subprocess.CalledProcessError as exc:
+ output = exc.output
+ retval = exc.returncode
+
+ # 255 means a specific file wasn't fully parsed to get the debug file list, which is not a fatal failure
+ if retval != 0 and retval != 255:
+ msg = "dwarfsrcfiles failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else "")
+ if fatal:
+ bb.fatal(msg)
+ bb.note(msg)
+
+ debugsources = parse_debugsources_from_dwarfsrcfiles_output(output)
+
+ return list(debugsources)
+
+def splitdebuginfo(file, dvar, dv, d):
+ # Function to split a single file into two components, one is the stripped
+ # target system binary, the other contains any debugging information. The
+ # two files are linked to reference each other.
+ #
+ # return a mapping of files:debugsources
+
+ src = file[len(dvar):]
+ dest = dv["libdir"] + os.path.dirname(src) + dv["dir"] + "/" + os.path.basename(src) + dv["append"]
+ debugfile = dvar + dest
+ sources = []
+
+ if file.endswith(".ko") and file.find("/lib/modules/") != -1:
+ if oe.package.is_kernel_module_signed(file):
+ bb.debug(1, "Skip strip on signed module %s" % file)
+ return (file, sources)
+
+ # Split the file...
+ bb.utils.mkdirhier(os.path.dirname(debugfile))
+ #bb.note("Split %s -> %s" % (file, debugfile))
+ # Only store off the hard link reference if we successfully split!
+
+ dvar = d.getVar('PKGD')
+ objcopy = d.getVar("OBJCOPY")
+
+ newmode = None
+ if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
+ origmode = os.stat(file)[stat.ST_MODE]
+ newmode = origmode | stat.S_IWRITE | stat.S_IREAD
+ os.chmod(file, newmode)
+
+ # We need to extract the debug src information here...
+ if dv["srcdir"]:
+ sources = source_info(file, d)
+
+ bb.utils.mkdirhier(os.path.dirname(debugfile))
+
+ subprocess.check_output([objcopy, '--only-keep-debug', file, debugfile], stderr=subprocess.STDOUT)
+
+ # Set the debuglink to have the view of the file path on the target
+ subprocess.check_output([objcopy, '--add-gnu-debuglink', debugfile, file], stderr=subprocess.STDOUT)
+
+ if newmode:
+ os.chmod(file, origmode)
+
+ return (file, sources)
+
+def splitstaticdebuginfo(file, dvar, dv, d):
+ # Unlike the function above, there is no way to split a static library
+ # two components. So to get similar results we will copy the unmodified
+ # static library (containing the debug symbols) into a new directory.
+ # We will then strip (preserving symbols) the static library in the
+ # typical location.
+ #
+ # return a mapping of files:debugsources
+
+ src = file[len(dvar):]
+ dest = dv["staticlibdir"] + os.path.dirname(src) + dv["staticdir"] + "/" + os.path.basename(src) + dv["staticappend"]
+ debugfile = dvar + dest
+ sources = []
+
+ # Copy the file...
+ bb.utils.mkdirhier(os.path.dirname(debugfile))
+ #bb.note("Copy %s -> %s" % (file, debugfile))
+
+ dvar = d.getVar('PKGD')
+
+ newmode = None
+ if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
+ origmode = os.stat(file)[stat.ST_MODE]
+ newmode = origmode | stat.S_IWRITE | stat.S_IREAD
+ os.chmod(file, newmode)
+
+ # We need to extract the debug src information here...
+ if dv["srcdir"]:
+ sources = source_info(file, d)
+
+ bb.utils.mkdirhier(os.path.dirname(debugfile))
+
+ # Copy the unmodified item to the debug directory
+ shutil.copy2(file, debugfile)
+
+ if newmode:
+ os.chmod(file, origmode)
+
+ return (file, sources)
+
+def inject_minidebuginfo(file, dvar, dv, d):
+ # Extract just the symbols from debuginfo into minidebuginfo,
+ # compress it with xz and inject it back into the binary in a .gnu_debugdata section.
+ # https://sourceware.org/gdb/onlinedocs/gdb/MiniDebugInfo.html
+
+ readelf = d.getVar('READELF')
+ nm = d.getVar('NM')
+ objcopy = d.getVar('OBJCOPY')
+
+ minidebuginfodir = d.expand('${WORKDIR}/minidebuginfo')
+
+ src = file[len(dvar):]
+ dest = dv["libdir"] + os.path.dirname(src) + dv["dir"] + "/" + os.path.basename(src) + dv["append"]
+ debugfile = dvar + dest
+ minidebugfile = minidebuginfodir + src + '.minidebug'
+ bb.utils.mkdirhier(os.path.dirname(minidebugfile))
+
+ # If we didn't produce debuginfo for any reason, we can't produce minidebuginfo either
+ # so skip it.
+ if not os.path.exists(debugfile):
+ bb.debug(1, 'ELF file {} has no debuginfo, skipping minidebuginfo injection'.format(file))
+ return
+
+ # minidebuginfo does not make sense to apply to ELF objects other than
+ # executables and shared libraries, skip applying the minidebuginfo
+ # generation for objects like kernel modules.
+ for line in subprocess.check_output([readelf, '-h', debugfile], universal_newlines=True).splitlines():
+ if not line.strip().startswith("Type:"):
+ continue
+ elftype = line.split(":")[1].strip()
+ if not any(elftype.startswith(i) for i in ["EXEC", "DYN"]):
+ bb.debug(1, 'ELF file {} is not executable/shared, skipping minidebuginfo injection'.format(file))
+ return
+ break
+
+ # Find non-allocated PROGBITS, NOTE, and NOBITS sections in the debuginfo.
+ # We will exclude all of these from minidebuginfo to save space.
+ remove_section_names = []
+ for line in subprocess.check_output([readelf, '-W', '-S', debugfile], universal_newlines=True).splitlines():
+ # strip the leading " [ 1]" section index to allow splitting on space
+ if ']' not in line:
+ continue
+ fields = line[line.index(']') + 1:].split()
+ if len(fields) < 7:
+ continue
+ name = fields[0]
+ type = fields[1]
+ flags = fields[6]
+ # .debug_ sections will be removed by objcopy -S so no need to explicitly remove them
+ if name.startswith('.debug_'):
+ continue
+ if 'A' not in flags and type in ['PROGBITS', 'NOTE', 'NOBITS']:
+ remove_section_names.append(name)
+
+ # List dynamic symbols in the binary. We can exclude these from minidebuginfo
+ # because they are always present in the binary.
+ dynsyms = set()
+ for line in subprocess.check_output([nm, '-D', file, '--format=posix', '--defined-only'], universal_newlines=True).splitlines():
+ dynsyms.add(line.split()[0])
+
+ # Find all function symbols from debuginfo which aren't in the dynamic symbols table.
+ # These are the ones we want to keep in minidebuginfo.
+ keep_symbols_file = minidebugfile + '.symlist'
+ found_any_symbols = False
+ with open(keep_symbols_file, 'w') as f:
+ for line in subprocess.check_output([nm, debugfile, '--format=sysv', '--defined-only'], universal_newlines=True).splitlines():
+ fields = line.split('|')
+ if len(fields) < 7:
+ continue
+ name = fields[0].strip()
+ type = fields[3].strip()
+ if type == 'FUNC' and name not in dynsyms:
+ f.write('{}\n'.format(name))
+ found_any_symbols = True
+
+ if not found_any_symbols:
+ bb.debug(1, 'ELF file {} contains no symbols, skipping minidebuginfo injection'.format(file))
+ return
+
+ bb.utils.remove(minidebugfile)
+ bb.utils.remove(minidebugfile + '.xz')
+
+ subprocess.check_call([objcopy, '-S'] +
+ ['--remove-section={}'.format(s) for s in remove_section_names] +
+ ['--keep-symbols={}'.format(keep_symbols_file), debugfile, minidebugfile])
+
+ subprocess.check_call(['xz', '--keep', minidebugfile])
+
+ subprocess.check_call([objcopy, '--add-section', '.gnu_debugdata={}.xz'.format(minidebugfile), file])
+
+def copydebugsources(debugsrcdir, sources, d):
+ # The debug src information written out to sourcefile is further processed
+ # and copied to the destination here.
+
+ cpath = oe.cachedpath.CachedPath()
+
+ if debugsrcdir and sources:
+ sourcefile = d.expand("${WORKDIR}/debugsources.list")
+ bb.utils.remove(sourcefile)
+
+ # filenames are null-separated - this is an artefact of the previous use
+ # of rpm's debugedit, which was writing them out that way, and the code elsewhere
+ # is still assuming that.
+ debuglistoutput = '\0'.join(sources) + '\0'
+ with open(sourcefile, 'a') as sf:
+ sf.write(debuglistoutput)
+
+ dvar = d.getVar('PKGD')
+ strip = d.getVar("STRIP")
+ objcopy = d.getVar("OBJCOPY")
+ workdir = d.getVar("WORKDIR")
+ sdir = d.getVar("S")
+ cflags = d.expand("${CFLAGS}")
+
+ prefixmap = {}
+ for flag in cflags.split():
+ if not flag.startswith("-fdebug-prefix-map"):
+ continue
+ if "recipe-sysroot" in flag:
+ continue
+ flag = flag.split("=")
+ prefixmap[flag[1]] = flag[2]
+
+ nosuchdir = []
+ basepath = dvar
+ for p in debugsrcdir.split("/"):
+ basepath = basepath + "/" + p
+ if not cpath.exists(basepath):
+ nosuchdir.append(basepath)
+ bb.utils.mkdirhier(basepath)
+ cpath.updatecache(basepath)
+
+ for pmap in prefixmap:
+ # Ignore files from the recipe sysroots (target and native)
+ cmd = "LC_ALL=C ; sort -z -u '%s' | egrep -v -z '((<internal>|<built-in>)$|/.*recipe-sysroot.*/)' | " % sourcefile
+ # We need to ignore files that are not actually ours
+ # we do this by only paying attention to items from this package
+ cmd += "fgrep -zw '%s' | " % prefixmap[pmap]
+ # Remove prefix in the source paths
+ cmd += "sed 's#%s/##g' | " % (prefixmap[pmap])
+ cmd += "(cd '%s' ; cpio -pd0mlL --no-preserve-owner '%s%s' 2>/dev/null)" % (pmap, dvar, prefixmap[pmap])
+
+ try:
+ subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError:
+ # Can "fail" if internal headers/transient sources are attempted
+ pass
+ # cpio seems to have a bug with -lL together and symbolic links are just copied, not dereferenced.
+ # Work around this by manually finding and copying any symbolic links that made it through.
+ cmd = "find %s%s -type l -print0 -delete | sed s#%s%s/##g | (cd '%s' ; cpio -pd0mL --no-preserve-owner '%s%s')" % \
+ (dvar, prefixmap[pmap], dvar, prefixmap[pmap], pmap, dvar, prefixmap[pmap])
+ subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
+
+ # debugsources.list may be polluted from the host if we used externalsrc,
+ # cpio uses copy-pass and may have just created a directory structure
+ # matching the one from the host, if thats the case move those files to
+ # debugsrcdir to avoid host contamination.
+ # Empty dir structure will be deleted in the next step.
+
+ # Same check as above for externalsrc
+ if workdir not in sdir:
+ if os.path.exists(dvar + debugsrcdir + sdir):
+ cmd = "mv %s%s%s/* %s%s" % (dvar, debugsrcdir, sdir, dvar,debugsrcdir)
+ subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
+
+ # The copy by cpio may have resulted in some empty directories! Remove these
+ cmd = "find %s%s -empty -type d -delete" % (dvar, debugsrcdir)
+ subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
+
+ # Also remove debugsrcdir if its empty
+ for p in nosuchdir[::-1]:
+ if os.path.exists(p) and not os.listdir(p):
+ os.rmdir(p)
+
+
+def process_split_and_strip_files(d):
+ cpath = oe.cachedpath.CachedPath()
+
+ dvar = d.getVar('PKGD')
+ pn = d.getVar('PN')
+ hostos = d.getVar('HOST_OS')
+
+ oldcwd = os.getcwd()
+ os.chdir(dvar)
+
+ dv = package_debug_vars(d)
+
+ #
+ # First lets figure out all of the files we may have to process ... do this only once!
+ #
+ elffiles = {}
+ symlinks = {}
+ staticlibs = []
+ inodes = {}
+ libdir = os.path.abspath(dvar + os.sep + d.getVar("libdir"))
+ baselibdir = os.path.abspath(dvar + os.sep + d.getVar("base_libdir"))
+ skipfiles = (d.getVar("INHIBIT_PACKAGE_STRIP_FILES") or "").split()
+ if (d.getVar('INHIBIT_PACKAGE_STRIP') != '1' or \
+ d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT') != '1'):
+ checkelf = {}
+ checkelflinks = {}
+ for root, dirs, files in cpath.walk(dvar):
+ for f in files:
+ file = os.path.join(root, f)
+
+ # Skip debug files
+ if dv["append"] and file.endswith(dv["append"]):
+ continue
+ if dv["dir"] and dv["dir"] in os.path.dirname(file[len(dvar):]):
+ continue
+
+ if file in skipfiles:
+ continue
+
+ if oe.package.is_static_lib(file):
+ staticlibs.append(file)
+ continue
+
+ try:
+ ltarget = cpath.realpath(file, dvar, False)
+ s = cpath.lstat(ltarget)
+ except OSError as e:
+ (err, strerror) = e.args
+ if err != errno.ENOENT:
+ raise
+ # Skip broken symlinks
+ continue
+ if not s:
+ continue
+ # Check its an executable
+ if (s[stat.ST_MODE] & stat.S_IXUSR) or (s[stat.ST_MODE] & stat.S_IXGRP) \
+ or (s[stat.ST_MODE] & stat.S_IXOTH) \
+ or ((file.startswith(libdir) or file.startswith(baselibdir)) \
+ and (".so" in f or ".node" in f)) \
+ or (f.startswith('vmlinux') or ".ko" in f):
+
+ if cpath.islink(file):
+ checkelflinks[file] = ltarget
+ continue
+ # Use a reference of device ID and inode number to identify files
+ file_reference = "%d_%d" % (s.st_dev, s.st_ino)
+ checkelf[file] = (file, file_reference)
+
+ results = oe.utils.multiprocess_launch(oe.package.is_elf, checkelflinks.values(), d)
+ results_map = {}
+ for (ltarget, elf_file) in results:
+ results_map[ltarget] = elf_file
+ for file in checkelflinks:
+ ltarget = checkelflinks[file]
+ # If it's a symlink, and points to an ELF file, we capture the readlink target
+ if results_map[ltarget]:
+ target = os.readlink(file)
+ #bb.note("Sym: %s (%d)" % (ltarget, results_map[ltarget]))
+ symlinks[file] = target
+
+ results = oe.utils.multiprocess_launch(oe.package.is_elf, checkelf.keys(), d)
+
+ # Sort results by file path. This ensures that the files are always
+ # processed in the same order, which is important to make sure builds
+ # are reproducible when dealing with hardlinks
+ results.sort(key=lambda x: x[0])
+
+ for (file, elf_file) in results:
+ # It's a file (or hardlink), not a link
+ # ...but is it ELF, and is it already stripped?
+ if elf_file & 1:
+ if elf_file & 2:
+ if 'already-stripped' in (d.getVar('INSANE_SKIP:' + pn) or "").split():
+ bb.note("Skipping file %s from %s for already-stripped QA test" % (file[len(dvar):], pn))
+ else:
+ msg = "File '%s' from %s was already stripped, this will prevent future debugging!" % (file[len(dvar):], pn)
+ oe.qa.handle_error("already-stripped", msg, d)
+ continue
+
+ # At this point we have an unstripped elf file. We need to:
+ # a) Make sure any file we strip is not hardlinked to anything else outside this tree
+ # b) Only strip any hardlinked file once (no races)
+ # c) Track any hardlinks between files so that we can reconstruct matching debug file hardlinks
+
+ # Use a reference of device ID and inode number to identify files
+ file_reference = checkelf[file][1]
+ if file_reference in inodes:
+ os.unlink(file)
+ os.link(inodes[file_reference][0], file)
+ inodes[file_reference].append(file)
+ else:
+ inodes[file_reference] = [file]
+ # break hardlink
+ bb.utils.break_hardlinks(file)
+ elffiles[file] = elf_file
+ # Modified the file so clear the cache
+ cpath.updatecache(file)
+
+ def strip_pkgd_prefix(f):
+ nonlocal dvar
+
+ if f.startswith(dvar):
+ return f[len(dvar):]
+
+ return f
+
+ #
+ # First lets process debug splitting
+ #
+ if (d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT') != '1'):
+ results = oe.utils.multiprocess_launch(splitdebuginfo, list(elffiles), d, extraargs=(dvar, dv, d))
+
+ if dv["srcdir"] and not hostos.startswith("mingw"):
+ if (d.getVar('PACKAGE_DEBUG_STATIC_SPLIT') == '1'):
+ results = oe.utils.multiprocess_launch(splitstaticdebuginfo, staticlibs, d, extraargs=(dvar, dv, d))
+ else:
+ for file in staticlibs:
+ results.append( (file,source_info(file, d)) )
+
+ d.setVar("PKGDEBUGSOURCES", {strip_pkgd_prefix(f): sorted(s) for f, s in results})
+
+ sources = set()
+ for r in results:
+ sources.update(r[1])
+
+ # Hardlink our debug symbols to the other hardlink copies
+ for ref in inodes:
+ if len(inodes[ref]) == 1:
+ continue
+
+ target = inodes[ref][0][len(dvar):]
+ for file in inodes[ref][1:]:
+ src = file[len(dvar):]
+ dest = dv["libdir"] + os.path.dirname(src) + dv["dir"] + "/" + os.path.basename(target) + dv["append"]
+ fpath = dvar + dest
+ ftarget = dvar + dv["libdir"] + os.path.dirname(target) + dv["dir"] + "/" + os.path.basename(target) + dv["append"]
+ bb.utils.mkdirhier(os.path.dirname(fpath))
+ # Only one hardlink of separated debug info file in each directory
+ if not os.access(fpath, os.R_OK):
+ #bb.note("Link %s -> %s" % (fpath, ftarget))
+ os.link(ftarget, fpath)
+
+ # Create symlinks for all cases we were able to split symbols
+ for file in symlinks:
+ src = file[len(dvar):]
+ dest = dv["libdir"] + os.path.dirname(src) + dv["dir"] + "/" + os.path.basename(src) + dv["append"]
+ fpath = dvar + dest
+ # Skip it if the target doesn't exist
+ try:
+ s = os.stat(fpath)
+ except OSError as e:
+ (err, strerror) = e.args
+ if err != errno.ENOENT:
+ raise
+ continue
+
+ ltarget = symlinks[file]
+ lpath = os.path.dirname(ltarget)
+ lbase = os.path.basename(ltarget)
+ ftarget = ""
+ if lpath and lpath != ".":
+ ftarget += lpath + dv["dir"] + "/"
+ ftarget += lbase + dv["append"]
+ if lpath.startswith(".."):
+ ftarget = os.path.join("..", ftarget)
+ bb.utils.mkdirhier(os.path.dirname(fpath))
+ #bb.note("Symlink %s -> %s" % (fpath, ftarget))
+ os.symlink(ftarget, fpath)
+
+ # Process the dv["srcdir"] if requested...
+ # This copies and places the referenced sources for later debugging...
+ copydebugsources(dv["srcdir"], sources, d)
+ #
+ # End of debug splitting
+ #
+
+ #
+ # Now lets go back over things and strip them
+ #
+ if (d.getVar('INHIBIT_PACKAGE_STRIP') != '1'):
+ strip = d.getVar("STRIP")
+ sfiles = []
+ for file in elffiles:
+ elf_file = int(elffiles[file])
+ #bb.note("Strip %s" % file)
+ sfiles.append((file, elf_file, strip))
+ if (d.getVar('PACKAGE_STRIP_STATIC') == '1' or d.getVar('PACKAGE_DEBUG_STATIC_SPLIT') == '1'):
+ for f in staticlibs:
+ sfiles.append((f, 16, strip))
+
+ oe.utils.multiprocess_launch(oe.package.runstrip, sfiles, d)
+
+ # Build "minidebuginfo" and reinject it back into the stripped binaries
+ if bb.utils.contains('DISTRO_FEATURES', 'minidebuginfo', True, False, d):
+ oe.utils.multiprocess_launch(inject_minidebuginfo, list(elffiles), d,
+ extraargs=(dvar, dv, d))
+
+ #
+ # End of strip
+ #
+ os.chdir(oldcwd)
+
+
+def populate_packages(d):
+ cpath = oe.cachedpath.CachedPath()
+
+ workdir = d.getVar('WORKDIR')
+ outdir = d.getVar('DEPLOY_DIR')
+ dvar = d.getVar('PKGD')
+ packages = d.getVar('PACKAGES').split()
+ pn = d.getVar('PN')
+
+ bb.utils.mkdirhier(outdir)
+ os.chdir(dvar)
+
+ autodebug = not (d.getVar("NOAUTOPACKAGEDEBUG") or False)
+
+ split_source_package = (d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg')
+
+ # If debug-with-srcpkg mode is enabled then add the source package if it
+ # doesn't exist and add the source file contents to the source package.
+ if split_source_package:
+ src_package_name = ('%s-src' % d.getVar('PN'))
+ if not src_package_name in packages:
+ packages.append(src_package_name)
+ d.setVar('FILES:%s' % src_package_name, '/usr/src/debug')
+
+ # Sanity check PACKAGES for duplicates
+ # Sanity should be moved to sanity.bbclass once we have the infrastructure
+ package_dict = {}
+
+ for i, pkg in enumerate(packages):
+ if pkg in package_dict:
+ msg = "%s is listed in PACKAGES multiple times, this leads to packaging errors." % pkg
+ oe.qa.handle_error("packages-list", msg, d)
+ # Ensure the source package gets the chance to pick up the source files
+ # before the debug package by ordering it first in PACKAGES. Whether it
+ # actually picks up any source files is controlled by
+ # PACKAGE_DEBUG_SPLIT_STYLE.
+ elif pkg.endswith("-src"):
+ package_dict[pkg] = (10, i)
+ elif autodebug and pkg.endswith("-dbg"):
+ package_dict[pkg] = (30, i)
+ else:
+ package_dict[pkg] = (50, i)
+ packages = sorted(package_dict.keys(), key=package_dict.get)
+ d.setVar('PACKAGES', ' '.join(packages))
+ pkgdest = d.getVar('PKGDEST')
+
+ seen = []
+
+ # os.mkdir masks the permissions with umask so we have to unset it first
+ oldumask = os.umask(0)
+
+ debug = []
+ for root, dirs, files in cpath.walk(dvar):
+ dir = root[len(dvar):]
+ if not dir:
+ dir = os.sep
+ for f in (files + dirs):
+ path = "." + os.path.join(dir, f)
+ if "/.debug/" in path or "/.debug-static/" in path or path.endswith("/.debug"):
+ debug.append(path)
+
+ for pkg in packages:
+ root = os.path.join(pkgdest, pkg)
+ bb.utils.mkdirhier(root)
+
+ filesvar = d.getVar('FILES:%s' % pkg) or ""
+ if "//" in filesvar:
+ msg = "FILES variable for package %s contains '//' which is invalid. Attempting to fix this but you should correct the metadata.\n" % pkg
+ oe.qa.handle_error("files-invalid", msg, d)
+ filesvar.replace("//", "/")
+
+ origfiles = filesvar.split()
+ files, symlink_paths = oe.package.files_from_filevars(origfiles)
+
+ if autodebug and pkg.endswith("-dbg"):
+ files.extend(debug)
+
+ for file in files:
+ if (not cpath.islink(file)) and (not cpath.exists(file)):
+ continue
+ if file in seen:
+ continue
+ seen.append(file)
+
+ def mkdir(src, dest, p):
+ src = os.path.join(src, p)
+ dest = os.path.join(dest, p)
+ fstat = cpath.stat(src)
+ os.mkdir(dest)
+ os.chmod(dest, fstat.st_mode)
+ os.chown(dest, fstat.st_uid, fstat.st_gid)
+ if p not in seen:
+ seen.append(p)
+ cpath.updatecache(dest)
+
+ def mkdir_recurse(src, dest, paths):
+ if cpath.exists(dest + '/' + paths):
+ return
+ while paths.startswith("./"):
+ paths = paths[2:]
+ p = "."
+ for c in paths.split("/"):
+ p = os.path.join(p, c)
+ if not cpath.exists(os.path.join(dest, p)):
+ mkdir(src, dest, p)
+
+ if cpath.isdir(file) and not cpath.islink(file):
+ mkdir_recurse(dvar, root, file)
+ continue
+
+ mkdir_recurse(dvar, root, os.path.dirname(file))
+ fpath = os.path.join(root,file)
+ if not cpath.islink(file):
+ os.link(file, fpath)
+ continue
+ ret = bb.utils.copyfile(file, fpath)
+ if ret is False or ret == 0:
+ bb.fatal("File population failed")
+
+ # Check if symlink paths exist
+ for file in symlink_paths:
+ if not os.path.exists(os.path.join(root,file)):
+ bb.fatal("File '%s' cannot be packaged into '%s' because its "
+ "parent directory structure does not exist. One of "
+ "its parent directories is a symlink whose target "
+ "directory is not included in the package." %
+ (file, pkg))
+
+ os.umask(oldumask)
+ os.chdir(workdir)
+
+ # Handle excluding packages with incompatible licenses
+ package_list = []
+ for pkg in packages:
+ licenses = d.getVar('_exclude_incompatible-' + pkg)
+ if licenses:
+ msg = "Excluding %s from packaging as it has incompatible license(s): %s" % (pkg, licenses)
+ oe.qa.handle_error("incompatible-license", msg, d)
+ else:
+ package_list.append(pkg)
+ d.setVar('PACKAGES', ' '.join(package_list))
+
+ unshipped = []
+ for root, dirs, files in cpath.walk(dvar):
+ dir = root[len(dvar):]
+ if not dir:
+ dir = os.sep
+ for f in (files + dirs):
+ path = os.path.join(dir, f)
+ if ('.' + path) not in seen:
+ unshipped.append(path)
+
+ if unshipped != []:
+ msg = pn + ": Files/directories were installed but not shipped in any package:"
+ if "installed-vs-shipped" in (d.getVar('INSANE_SKIP:' + pn) or "").split():
+ bb.note("Package %s skipping QA tests: installed-vs-shipped" % pn)
+ else:
+ for f in unshipped:
+ msg = msg + "\n " + f
+ msg = msg + "\nPlease set FILES such that these items are packaged. Alternatively if they are unneeded, avoid installing them or delete them within do_install.\n"
+ msg = msg + "%s: %d installed and not shipped files." % (pn, len(unshipped))
+ oe.qa.handle_error("installed-vs-shipped", msg, d)
+
+def process_fixsymlinks(pkgfiles, d):
+ cpath = oe.cachedpath.CachedPath()
+ pkgdest = d.getVar('PKGDEST')
+ packages = d.getVar("PACKAGES", False).split()
+
+ dangling_links = {}
+ pkg_files = {}
+ for pkg in packages:
+ dangling_links[pkg] = []
+ pkg_files[pkg] = []
+ inst_root = os.path.join(pkgdest, pkg)
+ for path in pkgfiles[pkg]:
+ rpath = path[len(inst_root):]
+ pkg_files[pkg].append(rpath)
+ rtarget = cpath.realpath(path, inst_root, True, assume_dir = True)
+ if not cpath.lexists(rtarget):
+ dangling_links[pkg].append(os.path.normpath(rtarget[len(inst_root):]))
+
+ newrdepends = {}
+ for pkg in dangling_links:
+ for l in dangling_links[pkg]:
+ found = False
+ bb.debug(1, "%s contains dangling link %s" % (pkg, l))
+ for p in packages:
+ if l in pkg_files[p]:
+ found = True
+ bb.debug(1, "target found in %s" % p)
+ if p == pkg:
+ break
+ if pkg not in newrdepends:
+ newrdepends[pkg] = []
+ newrdepends[pkg].append(p)
+ break
+ if found == False:
+ bb.note("%s contains dangling symlink to %s" % (pkg, l))
+
+ for pkg in newrdepends:
+ rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS:' + pkg) or "")
+ for p in newrdepends[pkg]:
+ if p not in rdepends:
+ rdepends[p] = []
+ d.setVar('RDEPENDS:' + pkg, bb.utils.join_deps(rdepends, commasep=False))
+
+def process_filedeps(pkgfiles, d):
+ """
+ Collect perfile run-time dependency metadata
+ Output:
+ FILERPROVIDESFLIST:pkg - list of all files w/ deps
+ FILERPROVIDES:filepath:pkg - per file dep
+
+ FILERDEPENDSFLIST:pkg - list of all files w/ deps
+ FILERDEPENDS:filepath:pkg - per file dep
+ """
+ if d.getVar('SKIP_FILEDEPS') == '1':
+ return
+
+ pkgdest = d.getVar('PKGDEST')
+ packages = d.getVar('PACKAGES')
+ rpmdeps = d.getVar('RPMDEPS')
+
+ def chunks(files, n):
+ return [files[i:i+n] for i in range(0, len(files), n)]
+
+ pkglist = []
+ for pkg in packages.split():
+ if d.getVar('SKIP_FILEDEPS:' + pkg) == '1':
+ continue
+ if pkg.endswith('-dbg') or pkg.endswith('-doc') or pkg.find('-locale-') != -1 or pkg.find('-localedata-') != -1 or pkg.find('-gconv-') != -1 or pkg.find('-charmap-') != -1 or pkg.startswith('kernel-module-') or pkg.endswith('-src'):
+ continue
+ for files in chunks(pkgfiles[pkg], 100):
+ pkglist.append((pkg, files, rpmdeps, pkgdest))
+
+ processed = oe.utils.multiprocess_launch(oe.package.filedeprunner, pkglist, d)
+
+ provides_files = {}
+ requires_files = {}
+
+ for result in processed:
+ (pkg, provides, requires) = result
+
+ if pkg not in provides_files:
+ provides_files[pkg] = []
+ if pkg not in requires_files:
+ requires_files[pkg] = []
+
+ for file in sorted(provides):
+ provides_files[pkg].append(file)
+ key = "FILERPROVIDES:" + file + ":" + pkg
+ d.appendVar(key, " " + " ".join(provides[file]))
+
+ for file in sorted(requires):
+ requires_files[pkg].append(file)
+ key = "FILERDEPENDS:" + file + ":" + pkg
+ d.appendVar(key, " " + " ".join(requires[file]))
+
+ for pkg in requires_files:
+ d.setVar("FILERDEPENDSFLIST:" + pkg, " ".join(sorted(requires_files[pkg])))
+ for pkg in provides_files:
+ d.setVar("FILERPROVIDESFLIST:" + pkg, " ".join(sorted(provides_files[pkg])))
+
+def process_shlibs(pkgfiles, d):
+ cpath = oe.cachedpath.CachedPath()
+
+ exclude_shlibs = d.getVar('EXCLUDE_FROM_SHLIBS', False)
+ if exclude_shlibs:
+ bb.note("not generating shlibs")
+ return
+
+ lib_re = re.compile(r"^.*\.so")
+ libdir_re = re.compile(r".*/%s$" % d.getVar('baselib'))
+
+ packages = d.getVar('PACKAGES')
+
+ shlib_pkgs = []
+ exclusion_list = d.getVar("EXCLUDE_PACKAGES_FROM_SHLIBS")
+ if exclusion_list:
+ for pkg in packages.split():
+ if pkg not in exclusion_list.split():
+ shlib_pkgs.append(pkg)
+ else:
+ bb.note("not generating shlibs for %s" % pkg)
+ else:
+ shlib_pkgs = packages.split()
+
+ hostos = d.getVar('HOST_OS')
+
+ workdir = d.getVar('WORKDIR')
+
+ ver = d.getVar('PKGV')
+ if not ver:
+ msg = "PKGV not defined"
+ oe.qa.handle_error("pkgv-undefined", msg, d)
+ return
+
+ pkgdest = d.getVar('PKGDEST')
+
+ shlibswork_dir = d.getVar('SHLIBSWORKDIR')
+
+ def linux_so(file, pkg, pkgver, d):
+ needs_ldconfig = False
+ needed = set()
+ sonames = set()
+ renames = []
+ ldir = os.path.dirname(file).replace(pkgdest + "/" + pkg, '')
+ cmd = d.getVar('OBJDUMP') + " -p " + shlex.quote(file) + " 2>/dev/null"
+ fd = os.popen(cmd)
+ lines = fd.readlines()
+ fd.close()
+ rpath = tuple()
+ for l in lines:
+ m = re.match(r"\s+RPATH\s+([^\s]*)", l)
+ if m:
+ rpaths = m.group(1).replace("$ORIGIN", ldir).split(":")
+ rpath = tuple(map(os.path.normpath, rpaths))
+ for l in lines:
+ m = re.match(r"\s+NEEDED\s+([^\s]*)", l)
+ if m:
+ dep = m.group(1)
+ if dep not in needed:
+ needed.add((dep, file, rpath))
+ m = re.match(r"\s+SONAME\s+([^\s]*)", l)
+ if m:
+ this_soname = m.group(1)
+ prov = (this_soname, ldir, pkgver)
+ if not prov in sonames:
+ # if library is private (only used by package) then do not build shlib for it
+ if not private_libs or len([i for i in private_libs if fnmatch.fnmatch(this_soname, i)]) == 0:
+ sonames.add(prov)
+ if libdir_re.match(os.path.dirname(file)):
+ needs_ldconfig = True
+ if needs_ldconfig and snap_symlinks and (os.path.basename(file) != this_soname):
+ renames.append((file, os.path.join(os.path.dirname(file), this_soname)))
+ return (needs_ldconfig, needed, sonames, renames)
+
+ def darwin_so(file, needed, sonames, renames, pkgver):
+ if not os.path.exists(file):
+ return
+ ldir = os.path.dirname(file).replace(pkgdest + "/" + pkg, '')
+
+ def get_combinations(base):
+ #
+ # Given a base library name, find all combinations of this split by "." and "-"
+ #
+ combos = []
+ options = base.split(".")
+ for i in range(1, len(options) + 1):
+ combos.append(".".join(options[0:i]))
+ options = base.split("-")
+ for i in range(1, len(options) + 1):
+ combos.append("-".join(options[0:i]))
+ return combos
+
+ if (file.endswith('.dylib') or file.endswith('.so')) and not pkg.endswith('-dev') and not pkg.endswith('-dbg') and not pkg.endswith('-src'):
+ # Drop suffix
+ name = os.path.basename(file).rsplit(".",1)[0]
+ # Find all combinations
+ combos = get_combinations(name)
+ for combo in combos:
+ if not combo in sonames:
+ prov = (combo, ldir, pkgver)
+ sonames.add(prov)
+ if file.endswith('.dylib') or file.endswith('.so'):
+ rpath = []
+ p = subprocess.Popen([d.expand("${HOST_PREFIX}otool"), '-l', file], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
+ out, err = p.communicate()
+ # If returned successfully, process stdout for results
+ if p.returncode == 0:
+ for l in out.split("\n"):
+ l = l.strip()
+ if l.startswith('path '):
+ rpath.append(l.split()[1])
+
+ p = subprocess.Popen([d.expand("${HOST_PREFIX}otool"), '-L', file], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
+ out, err = p.communicate()
+ # If returned successfully, process stdout for results
+ if p.returncode == 0:
+ for l in out.split("\n"):
+ l = l.strip()
+ if not l or l.endswith(":"):
+ continue
+ if "is not an object file" in l:
+ continue
+ name = os.path.basename(l.split()[0]).rsplit(".", 1)[0]
+ if name and name not in needed[pkg]:
+ needed[pkg].add((name, file, tuple()))
+
+ def mingw_dll(file, needed, sonames, renames, pkgver):
+ if not os.path.exists(file):
+ return
+
+ if file.endswith(".dll"):
+ # assume all dlls are shared objects provided by the package
+ sonames.add((os.path.basename(file), os.path.dirname(file).replace(pkgdest + "/" + pkg, ''), pkgver))
+
+ if (file.endswith(".dll") or file.endswith(".exe")):
+ # use objdump to search for "DLL Name: .*\.dll"
+ p = subprocess.Popen([d.expand("${OBJDUMP}"), "-p", file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = p.communicate()
+ # process the output, grabbing all .dll names
+ if p.returncode == 0:
+ for m in re.finditer(r"DLL Name: (.*?\.dll)$", out.decode(), re.MULTILINE | re.IGNORECASE):
+ dllname = m.group(1)
+ if dllname:
+ needed[pkg].add((dllname, file, tuple()))
+
+ if d.getVar('PACKAGE_SNAP_LIB_SYMLINKS') == "1":
+ snap_symlinks = True
+ else:
+ snap_symlinks = False
+
+ needed = {}
+
+ shlib_provider = oe.package.read_shlib_providers(d)
+
+ for pkg in shlib_pkgs:
+ private_libs = d.getVar('PRIVATE_LIBS:' + pkg) or d.getVar('PRIVATE_LIBS') or ""
+ private_libs = private_libs.split()
+ needs_ldconfig = False
+ bb.debug(2, "calculating shlib provides for %s" % pkg)
+
+ pkgver = d.getVar('PKGV:' + pkg)
+ if not pkgver:
+ pkgver = d.getVar('PV_' + pkg)
+ if not pkgver:
+ pkgver = ver
+
+ needed[pkg] = set()
+ sonames = set()
+ renames = []
+ linuxlist = []
+ for file in pkgfiles[pkg]:
+ soname = None
+ if cpath.islink(file):
+ continue
+ if hostos.startswith("darwin"):
+ darwin_so(file, needed, sonames, renames, pkgver)
+ elif hostos.startswith("mingw"):
+ mingw_dll(file, needed, sonames, renames, pkgver)
+ elif os.access(file, os.X_OK) or lib_re.match(file):
+ linuxlist.append(file)
+
+ if linuxlist:
+ results = oe.utils.multiprocess_launch(linux_so, linuxlist, d, extraargs=(pkg, pkgver, d))
+ for r in results:
+ ldconfig = r[0]
+ needed[pkg] |= r[1]
+ sonames |= r[2]
+ renames.extend(r[3])
+ needs_ldconfig = needs_ldconfig or ldconfig
+
+ for (old, new) in renames:
+ bb.note("Renaming %s to %s" % (old, new))
+ bb.utils.rename(old, new)
+ pkgfiles[pkg].remove(old)
+
+ shlibs_file = os.path.join(shlibswork_dir, pkg + ".list")
+ if len(sonames):
+ with open(shlibs_file, 'w') as fd:
+ for s in sorted(sonames):
+ if s[0] in shlib_provider and s[1] in shlib_provider[s[0]]:
+ (old_pkg, old_pkgver) = shlib_provider[s[0]][s[1]]
+ if old_pkg != pkg:
+ bb.warn('%s-%s was registered as shlib provider for %s, changing it to %s-%s because it was built later' % (old_pkg, old_pkgver, s[0], pkg, pkgver))
+ bb.debug(1, 'registering %s-%s as shlib provider for %s' % (pkg, pkgver, s[0]))
+ fd.write(s[0] + ':' + s[1] + ':' + s[2] + '\n')
+ if s[0] not in shlib_provider:
+ shlib_provider[s[0]] = {}
+ shlib_provider[s[0]][s[1]] = (pkg, pkgver)
+ if needs_ldconfig:
+ bb.debug(1, 'adding ldconfig call to postinst for %s' % pkg)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
+ if not postinst:
+ postinst = '#!/bin/sh\n'
+ postinst += d.getVar('ldconfig_postinst_fragment')
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
+ bb.debug(1, 'LIBNAMES: pkg %s sonames %s' % (pkg, sonames))
+
+ assumed_libs = d.getVar('ASSUME_SHLIBS')
+ if assumed_libs:
+ libdir = d.getVar("libdir")
+ for e in assumed_libs.split():
+ l, dep_pkg = e.split(":")
+ lib_ver = None
+ dep_pkg = dep_pkg.rsplit("_", 1)
+ if len(dep_pkg) == 2:
+ lib_ver = dep_pkg[1]
+ dep_pkg = dep_pkg[0]
+ if l not in shlib_provider:
+ shlib_provider[l] = {}
+ shlib_provider[l][libdir] = (dep_pkg, lib_ver)
+
+ libsearchpath = [d.getVar('libdir'), d.getVar('base_libdir')]
+
+ for pkg in shlib_pkgs:
+ bb.debug(2, "calculating shlib requirements for %s" % pkg)
+
+ private_libs = d.getVar('PRIVATE_LIBS:' + pkg) or d.getVar('PRIVATE_LIBS') or ""
+ private_libs = private_libs.split()
+
+ deps = list()
+ for n in needed[pkg]:
+ # if n is in private libraries, don't try to search provider for it
+ # this could cause problem in case some abc.bb provides private
+ # /opt/abc/lib/libfoo.so.1 and contains /usr/bin/abc depending on system library libfoo.so.1
+ # but skipping it is still better alternative than providing own
+ # version and then adding runtime dependency for the same system library
+ if private_libs and len([i for i in private_libs if fnmatch.fnmatch(n[0], i)]) > 0:
+ bb.debug(2, '%s: Dependency %s covered by PRIVATE_LIBS' % (pkg, n[0]))
+ continue
+ if n[0] in shlib_provider.keys():
+ shlib_provider_map = shlib_provider[n[0]]
+ matches = set()
+ for p in itertools.chain(list(n[2]), sorted(shlib_provider_map.keys()), libsearchpath):
+ if p in shlib_provider_map:
+ matches.add(p)
+ if len(matches) > 1:
+ matchpkgs = ', '.join([shlib_provider_map[match][0] for match in matches])
+ bb.error("%s: Multiple shlib providers for %s: %s (used by files: %s)" % (pkg, n[0], matchpkgs, n[1]))
+ elif len(matches) == 1:
+ (dep_pkg, ver_needed) = shlib_provider_map[matches.pop()]
+
+ bb.debug(2, '%s: Dependency %s requires package %s (used by files: %s)' % (pkg, n[0], dep_pkg, n[1]))
+
+ if dep_pkg == pkg:
+ continue
+
+ if ver_needed:
+ dep = "%s (>= %s)" % (dep_pkg, ver_needed)
+ else:
+ dep = dep_pkg
+ if not dep in deps:
+ deps.append(dep)
+ continue
+ bb.note("Couldn't find shared library provider for %s, used by files: %s" % (n[0], n[1]))
+
+ deps_file = os.path.join(pkgdest, pkg + ".shlibdeps")
+ if os.path.exists(deps_file):
+ os.remove(deps_file)
+ if deps:
+ with open(deps_file, 'w') as fd:
+ for dep in sorted(deps):
+ fd.write(dep + '\n')
+
+def process_pkgconfig(pkgfiles, d):
+ packages = d.getVar('PACKAGES')
+ workdir = d.getVar('WORKDIR')
+ pkgdest = d.getVar('PKGDEST')
+
+ shlibs_dirs = d.getVar('SHLIBSDIRS').split()
+ shlibswork_dir = d.getVar('SHLIBSWORKDIR')
+
+ pc_re = re.compile(r'(.*)\.pc$')
+ var_re = re.compile(r'(.*)=(.*)')
+ field_re = re.compile(r'(.*): (.*)')
+
+ pkgconfig_provided = {}
+ pkgconfig_needed = {}
+ for pkg in packages.split():
+ pkgconfig_provided[pkg] = []
+ pkgconfig_needed[pkg] = []
+ for file in sorted(pkgfiles[pkg]):
+ m = pc_re.match(file)
+ if m:
+ pd = bb.data.init()
+ name = m.group(1)
+ pkgconfig_provided[pkg].append(os.path.basename(name))
+ if not os.access(file, os.R_OK):
+ continue
+ with open(file, 'r') as f:
+ lines = f.readlines()
+ for l in lines:
+ m = field_re.match(l)
+ if m:
+ hdr = m.group(1)
+ exp = pd.expand(m.group(2))
+ if hdr == 'Requires':
+ pkgconfig_needed[pkg] += exp.replace(',', ' ').split()
+ continue
+ m = var_re.match(l)
+ if m:
+ name = m.group(1)
+ val = m.group(2)
+ pd.setVar(name, pd.expand(val))
+
+ for pkg in packages.split():
+ pkgs_file = os.path.join(shlibswork_dir, pkg + ".pclist")
+ if pkgconfig_provided[pkg] != []:
+ with open(pkgs_file, 'w') as f:
+ for p in sorted(pkgconfig_provided[pkg]):
+ f.write('%s\n' % p)
+
+ # Go from least to most specific since the last one found wins
+ for dir in reversed(shlibs_dirs):
+ if not os.path.exists(dir):
+ continue
+ for file in sorted(os.listdir(dir)):
+ m = re.match(r'^(.*)\.pclist$', file)
+ if m:
+ pkg = m.group(1)
+ with open(os.path.join(dir, file)) as fd:
+ lines = fd.readlines()
+ pkgconfig_provided[pkg] = []
+ for l in lines:
+ pkgconfig_provided[pkg].append(l.rstrip())
+
+ for pkg in packages.split():
+ deps = []
+ for n in pkgconfig_needed[pkg]:
+ found = False
+ for k in pkgconfig_provided.keys():
+ if n in pkgconfig_provided[k]:
+ if k != pkg and not (k in deps):
+ deps.append(k)
+ found = True
+ if found == False:
+ bb.note("couldn't find pkgconfig module '%s' in any package" % n)
+ deps_file = os.path.join(pkgdest, pkg + ".pcdeps")
+ if len(deps):
+ with open(deps_file, 'w') as fd:
+ for dep in deps:
+ fd.write(dep + '\n')
+
+def read_libdep_files(d):
+ pkglibdeps = {}
+ packages = d.getVar('PACKAGES').split()
+ for pkg in packages:
+ pkglibdeps[pkg] = {}
+ for extension in ".shlibdeps", ".pcdeps", ".clilibdeps":
+ depsfile = d.expand("${PKGDEST}/" + pkg + extension)
+ if os.access(depsfile, os.R_OK):
+ with open(depsfile) as fd:
+ lines = fd.readlines()
+ for l in lines:
+ l.rstrip()
+ deps = bb.utils.explode_dep_versions2(l)
+ for dep in deps:
+ if not dep in pkglibdeps[pkg]:
+ pkglibdeps[pkg][dep] = deps[dep]
+ return pkglibdeps
+
+def process_depchains(pkgfiles, d):
+ """
+ For a given set of prefix and postfix modifiers, make those packages
+ RRECOMMENDS on the corresponding packages for its RDEPENDS.
+
+ Example: If package A depends upon package B, and A's .bb emits an
+ A-dev package, this would make A-dev Recommends: B-dev.
+
+ If only one of a given suffix is specified, it will take the RRECOMMENDS
+ based on the RDEPENDS of *all* other packages. If more than one of a given
+ suffix is specified, its will only use the RDEPENDS of the single parent
+ package.
+ """
+
+ packages = d.getVar('PACKAGES')
+ postfixes = (d.getVar('DEPCHAIN_POST') or '').split()
+ prefixes = (d.getVar('DEPCHAIN_PRE') or '').split()
+
+ def pkg_adddeprrecs(pkg, base, suffix, getname, depends, d):
+
+ #bb.note('depends for %s is %s' % (base, depends))
+ rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS:' + pkg) or "")
+
+ for depend in sorted(depends):
+ if depend.find('-native') != -1 or depend.find('-cross') != -1 or depend.startswith('virtual/'):
+ #bb.note("Skipping %s" % depend)
+ continue
+ if depend.endswith('-dev'):
+ depend = depend[:-4]
+ if depend.endswith('-dbg'):
+ depend = depend[:-4]
+ pkgname = getname(depend, suffix)
+ #bb.note("Adding %s for %s" % (pkgname, depend))
+ if pkgname not in rreclist and pkgname != pkg:
+ rreclist[pkgname] = []
+
+ #bb.note('setting: RRECOMMENDS:%s=%s' % (pkg, ' '.join(rreclist)))
+ d.setVar('RRECOMMENDS:%s' % pkg, bb.utils.join_deps(rreclist, commasep=False))
+
+ def pkg_addrrecs(pkg, base, suffix, getname, rdepends, d):
+
+ #bb.note('rdepends for %s is %s' % (base, rdepends))
+ rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS:' + pkg) or "")
+
+ for depend in sorted(rdepends):
+ if depend.find('virtual-locale-') != -1:
+ #bb.note("Skipping %s" % depend)
+ continue
+ if depend.endswith('-dev'):
+ depend = depend[:-4]
+ if depend.endswith('-dbg'):
+ depend = depend[:-4]
+ pkgname = getname(depend, suffix)
+ #bb.note("Adding %s for %s" % (pkgname, depend))
+ if pkgname not in rreclist and pkgname != pkg:
+ rreclist[pkgname] = []
+
+ #bb.note('setting: RRECOMMENDS:%s=%s' % (pkg, ' '.join(rreclist)))
+ d.setVar('RRECOMMENDS:%s' % pkg, bb.utils.join_deps(rreclist, commasep=False))
+
+ def add_dep(list, dep):
+ if dep not in list:
+ list.append(dep)
+
+ depends = []
+ for dep in bb.utils.explode_deps(d.getVar('DEPENDS') or ""):
+ add_dep(depends, dep)
+
+ rdepends = []
+ for pkg in packages.split():
+ for dep in bb.utils.explode_deps(d.getVar('RDEPENDS:' + pkg) or ""):
+ add_dep(rdepends, dep)
+
+ #bb.note('rdepends is %s' % rdepends)
+
+ def post_getname(name, suffix):
+ return '%s%s' % (name, suffix)
+ def pre_getname(name, suffix):
+ return '%s%s' % (suffix, name)
+
+ pkgs = {}
+ for pkg in packages.split():
+ for postfix in postfixes:
+ if pkg.endswith(postfix):
+ if not postfix in pkgs:
+ pkgs[postfix] = {}
+ pkgs[postfix][pkg] = (pkg[:-len(postfix)], post_getname)
+
+ for prefix in prefixes:
+ if pkg.startswith(prefix):
+ if not prefix in pkgs:
+ pkgs[prefix] = {}
+ pkgs[prefix][pkg] = (pkg[:-len(prefix)], pre_getname)
+
+ if "-dbg" in pkgs:
+ pkglibdeps = read_libdep_files(d)
+ pkglibdeplist = []
+ for pkg in pkglibdeps:
+ for k in pkglibdeps[pkg]:
+ add_dep(pkglibdeplist, k)
+ dbgdefaultdeps = ((d.getVar('DEPCHAIN_DBGDEFAULTDEPS') == '1') or (bb.data.inherits_class('packagegroup', d)))
+
+ for suffix in pkgs:
+ for pkg in pkgs[suffix]:
+ if d.getVarFlag('RRECOMMENDS:' + pkg, 'nodeprrecs'):
+ continue
+ (base, func) = pkgs[suffix][pkg]
+ if suffix == "-dev":
+ pkg_adddeprrecs(pkg, base, suffix, func, depends, d)
+ elif suffix == "-dbg":
+ if not dbgdefaultdeps:
+ pkg_addrrecs(pkg, base, suffix, func, pkglibdeplist, d)
+ continue
+ if len(pkgs[suffix]) == 1:
+ pkg_addrrecs(pkg, base, suffix, func, rdepends, d)
+ else:
+ rdeps = []
+ for dep in bb.utils.explode_deps(d.getVar('RDEPENDS:' + base) or ""):
+ add_dep(rdeps, dep)
+ pkg_addrrecs(pkg, base, suffix, func, rdeps, d)
diff --git a/meta/lib/oe/package_manager/__init__.py b/meta/lib/oe/package_manager/__init__.py
index 8e7128b195..6774cdb794 100644
--- a/meta/lib/oe/package_manager/__init__.py
+++ b/meta/lib/oe/package_manager/__init__.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -90,7 +92,7 @@ def opkg_query(cmd_output):
def failed_postinsts_abort(pkgs, log_path):
bb.fatal("""Postinstall scriptlets of %s have failed. If the intention is to defer them to first boot,
-then please place them into pkg_postinst_ontarget_${PN} ().
+then please place them into pkg_postinst_ontarget:${PN} ().
Deferring to first boot via 'exit 1' is no longer supported.
Details of the failure are in %s.""" %(pkgs, log_path))
@@ -120,7 +122,8 @@ def generate_locale_archive(d, rootfs, target_arch, localedir):
"riscv32": ["--uint32-align=4", "--little-endian"],
"i586": ["--uint32-align=4", "--little-endian"],
"i686": ["--uint32-align=4", "--little-endian"],
- "x86_64": ["--uint32-align=4", "--little-endian"]
+ "x86_64": ["--uint32-align=4", "--little-endian"],
+ "loongarch64": ["--uint32-align=4", "--little-endian"]
}
if target_arch in locale_arch_options:
arch_options = locale_arch_options[target_arch]
@@ -189,7 +192,7 @@ class PackageManager(object, metaclass=ABCMeta):
bb.utils.remove(self.intercepts_dir, True)
bb.utils.mkdirhier(self.intercepts_dir)
for intercept in postinst_intercepts:
- bb.utils.copyfile(intercept, os.path.join(self.intercepts_dir, os.path.basename(intercept)))
+ shutil.copy(intercept, os.path.join(self.intercepts_dir, os.path.basename(intercept)))
@abstractmethod
def _handle_intercept_failure(self, failed_script):
@@ -266,7 +269,7 @@ class PackageManager(object, metaclass=ABCMeta):
pass
@abstractmethod
- def install(self, pkgs, attempt_only=False):
+ def install(self, pkgs, attempt_only=False, hard_depends_only=False):
"""
Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is
True, installation failures are ignored.
@@ -321,7 +324,7 @@ class PackageManager(object, metaclass=ABCMeta):
# TODO don't have sdk here but have a property on the superclass
# (and respect in install_complementary)
if sdk:
- pkgdatadir = self.d.expand("${TMPDIR}/pkgdata/${SDK_SYS}")
+ pkgdatadir = self.d.getVar("PKGDATA_DIR_SDK")
else:
pkgdatadir = self.d.getVar("PKGDATA_DIR")
@@ -344,10 +347,8 @@ class PackageManager(object, metaclass=ABCMeta):
def install_complementary(self, globs=None):
"""
Install complementary packages based upon the list of currently installed
- packages e.g. locales, *-dev, *-dbg, etc. This will only attempt to install
- these packages, if they don't exist then no error will occur. Note: every
- backend needs to call this function explicitly after the normal package
- installation
+ packages e.g. locales, *-dev, *-dbg, etc. Note: every backend needs to
+ call this function explicitly after the normal package installation.
"""
if globs is None:
globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY')
@@ -398,7 +399,7 @@ class PackageManager(object, metaclass=ABCMeta):
bb.note("Installing complementary packages ... %s (skipped already provided packages %s)" % (
' '.join(install_pkgs),
' '.join(skip_pkgs)))
- self.install(install_pkgs, attempt_only=True)
+ self.install(install_pkgs, hard_depends_only=True)
except subprocess.CalledProcessError as e:
bb.fatal("Could not compute complementary packages list. Command "
"'%s' returned %d:\n%s" %
@@ -469,7 +470,10 @@ def create_packages_dir(d, subrepo_dir, deploydir, taskname, filterbydependencie
# Detect bitbake -b usage
nodeps = d.getVar("BB_LIMITEDDEPS") or False
if nodeps or not filterbydependencies:
- oe.path.symlink(deploydir, subrepo_dir, True)
+ for arch in d.getVar("ALL_MULTILIB_PACKAGE_ARCHS").split() + d.getVar("ALL_MULTILIB_PACKAGE_ARCHS").replace("-", "_").split():
+ target = os.path.join(deploydir + "/" + arch)
+ if os.path.exists(target):
+ oe.path.symlink(target, subrepo_dir + "/" + arch, True)
return
start = None
diff --git a/meta/lib/oe/package_manager/deb/__init__.py b/meta/lib/oe/package_manager/deb/__init__.py
index 2ee68fefb1..0c23c884c1 100644
--- a/meta/lib/oe/package_manager/deb/__init__.py
+++ b/meta/lib/oe/package_manager/deb/__init__.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -53,6 +55,7 @@ class DpkgIndexer(Indexer):
index_cmds = []
deb_dirs_found = False
+ index_sign_files = set()
for arch in arch_list:
arch_dir = os.path.join(self.deploy_dir, arch)
if not os.path.isdir(arch_dir):
@@ -62,7 +65,10 @@ class DpkgIndexer(Indexer):
cmd += "%s -fcn Packages > Packages.gz;" % gzip
- with open(os.path.join(arch_dir, "Release"), "w+") as release:
+ release_file = os.path.join(arch_dir, "Release")
+ index_sign_files.add(release_file)
+
+ with open(release_file, "w+") as release:
release.write("Label: %s\n" % arch)
cmd += "PSEUDO_UNLOAD=1 %s release . >> Release" % apt_ftparchive
@@ -77,7 +83,16 @@ class DpkgIndexer(Indexer):
oe.utils.multiprocess_launch(create_index, index_cmds, self.d)
if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
- raise NotImplementedError('Package feed signing not implementd for dpkg')
+ signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND'))
+ else:
+ signer = None
+ if signer:
+ for f in index_sign_files:
+ signer.detach_sign(f,
+ self.d.getVar('PACKAGE_FEED_GPG_NAME'),
+ self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'),
+ output_suffix="gpg",
+ use_sha256=True)
class PMPkgsList(PkgsList):
@@ -214,7 +229,7 @@ class DpkgPM(OpkgDpkgPM):
tmp_sf.write(status)
- os.rename(status_file + ".tmp", status_file)
+ bb.utils.rename(status_file + ".tmp", status_file)
def run_pre_post_installs(self, package_name=None):
"""
@@ -276,14 +291,18 @@ class DpkgPM(OpkgDpkgPM):
self.deploy_dir_unlock()
- def install(self, pkgs, attempt_only=False):
+ def install(self, pkgs, attempt_only=False, hard_depends_only=False):
if attempt_only and len(pkgs) == 0:
return
os.environ['APT_CONFIG'] = self.apt_conf_file
- cmd = "%s %s install --allow-downgrades --allow-remove-essential --allow-change-held-packages --allow-unauthenticated --no-remove %s" % \
- (self.apt_get_cmd, self.apt_args, ' '.join(pkgs))
+ extra_args = ""
+ if hard_depends_only:
+ extra_args = "--no-install-recommends"
+
+ cmd = "%s %s install --allow-downgrades --allow-remove-essential --allow-change-held-packages --allow-unauthenticated --no-remove %s %s" % \
+ (self.apt_get_cmd, self.apt_args, extra_args, ' '.join(pkgs))
try:
bb.note("Installing the following packages: %s" % ' '.join(pkgs))
@@ -299,13 +318,13 @@ class DpkgPM(OpkgDpkgPM):
for dir in dirs:
new_dir = re.sub(r"\.dpkg-new", "", dir)
if dir != new_dir:
- os.rename(os.path.join(root, dir),
+ bb.utils.rename(os.path.join(root, dir),
os.path.join(root, new_dir))
for file in files:
new_file = re.sub(r"\.dpkg-new", "", file)
if file != new_file:
- os.rename(os.path.join(root, file),
+ bb.utils.rename(os.path.join(root, file),
os.path.join(root, new_file))
@@ -422,7 +441,7 @@ class DpkgPM(OpkgDpkgPM):
multilib_variants = self.d.getVar("MULTILIB_VARIANTS");
for variant in multilib_variants.split():
localdata = bb.data.createCopy(self.d)
- variant_tune = localdata.getVar("DEFAULTTUNE_virtclass-multilib-" + variant, False)
+ variant_tune = localdata.getVar("DEFAULTTUNE:virtclass-multilib-" + variant, False)
orig_arch = localdata.getVar("DPKG_ARCH")
localdata.setVar("DEFAULTTUNE", variant_tune)
variant_arch = localdata.getVar("DPKG_ARCH")
diff --git a/meta/lib/oe/package_manager/deb/manifest.py b/meta/lib/oe/package_manager/deb/manifest.py
index d8eab24a06..72983bae98 100644
--- a/meta/lib/oe/package_manager/deb/manifest.py
+++ b/meta/lib/oe/package_manager/deb/manifest.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/meta/lib/oe/package_manager/deb/rootfs.py b/meta/lib/oe/package_manager/deb/rootfs.py
index 8fbaca11d6..1e25b64ed9 100644
--- a/meta/lib/oe/package_manager/deb/rootfs.py
+++ b/meta/lib/oe/package_manager/deb/rootfs.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/meta/lib/oe/package_manager/deb/sdk.py b/meta/lib/oe/package_manager/deb/sdk.py
index 9859d8f32d..6f3005053e 100644
--- a/meta/lib/oe/package_manager/deb/sdk.py
+++ b/meta/lib/oe/package_manager/deb/sdk.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -65,7 +67,14 @@ class PkgSdk(Sdk):
self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
+ self.target_pm.run_pre_post_installs()
+
+ env_bkp = os.environ.copy()
+ os.environ['PATH'] = self.d.expand("${COREBASE}/scripts/nativesdk-intercept") + \
+ os.pathsep + os.environ["PATH"]
+
self.target_pm.run_intercepts(populate_sdk='target')
+ os.environ.update(env_bkp)
execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
@@ -78,6 +87,8 @@ class PkgSdk(Sdk):
self._populate_sysroot(self.host_pm, self.host_manifest)
self.install_locales(self.host_pm)
+ self.host_pm.run_pre_post_installs()
+
self.host_pm.run_intercepts(populate_sdk='host')
execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
diff --git a/meta/lib/oe/package_manager/ipk/__init__.py b/meta/lib/oe/package_manager/ipk/__init__.py
index da488c1c7f..8cc9953a02 100644
--- a/meta/lib/oe/package_manager/ipk/__init__.py
+++ b/meta/lib/oe/package_manager/ipk/__init__.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -14,6 +16,7 @@ class OpkgIndexer(Indexer):
]
opkg_index_cmd = bb.utils.which(os.getenv('PATH'), "opkg-make-index")
+ opkg_index_cmd_extra_params = self.d.getVar('OPKG_MAKE_INDEX_EXTRA_PARAMS') or ""
if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND'))
else:
@@ -39,8 +42,8 @@ class OpkgIndexer(Indexer):
if not os.path.exists(pkgs_file):
open(pkgs_file, "w").close()
- index_cmds.add('%s --checksum md5 --checksum sha256 -r %s -p %s -m %s' %
- (opkg_index_cmd, pkgs_file, pkgs_file, pkgs_dir))
+ index_cmds.add('%s --checksum md5 --checksum sha256 -r %s -p %s -m %s %s' %
+ (opkg_index_cmd, pkgs_file, pkgs_file, pkgs_dir, opkg_index_cmd_extra_params))
index_sign_files.add(pkgs_file)
@@ -102,12 +105,14 @@ class OpkgDpkgPM(PackageManager):
This method extracts the common parts for Opkg and Dpkg
"""
- try:
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
- except subprocess.CalledProcessError as e:
+ proc = subprocess.run(cmd, capture_output=True, encoding="utf-8", shell=True)
+ if proc.returncode:
bb.fatal("Unable to list available packages. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
- return opkg_query(output)
+ "returned %d:\n%s" % (cmd, proc.returncode, proc.stderr))
+ elif proc.stderr:
+ bb.note("Command '%s' returned stderr: %s" % (cmd, proc.stderr))
+
+ return opkg_query(proc.stdout)
def extract(self, pkg, pkg_info):
"""
@@ -129,7 +134,7 @@ class OpkgDpkgPM(PackageManager):
tmp_dir = tempfile.mkdtemp()
current_dir = os.getcwd()
os.chdir(tmp_dir)
- data_tar = 'data.tar.xz'
+ data_tar = 'data.tar.zst'
try:
cmd = [ar_cmd, 'x', pkg_path]
@@ -213,7 +218,7 @@ class OpkgPM(OpkgDpkgPM):
tmp_sf.write(status)
- os.rename(status_file + ".tmp", status_file)
+ bb.utils.rename(status_file + ".tmp", status_file)
def _create_custom_config(self):
bb.note("Building from feeds activated!")
@@ -243,7 +248,7 @@ class OpkgPM(OpkgDpkgPM):
"""
if (self.d.getVar('FEED_DEPLOYDIR_BASE_URI') or "") != "":
for arch in self.pkg_archs.split():
- cfg_file_name = os.path.join(self.target_rootfs,
+ cfg_file_name = oe.path.join(self.target_rootfs,
self.d.getVar("sysconfdir"),
"opkg",
"local-%s-feed.conf" % arch)
@@ -337,7 +342,7 @@ class OpkgPM(OpkgDpkgPM):
self.deploy_dir_unlock()
- def install(self, pkgs, attempt_only=False):
+ def install(self, pkgs, attempt_only=False, hard_depends_only=False):
if not pkgs:
return
@@ -346,6 +351,8 @@ class OpkgPM(OpkgDpkgPM):
cmd += " --add-exclude %s" % exclude
for bad_recommendation in (self.d.getVar("BAD_RECOMMENDATIONS") or "").split():
cmd += " --add-ignore-recommends %s" % bad_recommendation
+ if hard_depends_only:
+ cmd += " --no-install-recommends"
cmd += " install "
cmd += " ".join(pkgs)
@@ -443,15 +450,16 @@ class OpkgPM(OpkgDpkgPM):
cmd = "%s %s --noaction install %s " % (self.opkg_cmd,
opkg_args,
' '.join(pkgs))
- try:
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
- except subprocess.CalledProcessError as e:
+ proc = subprocess.run(cmd, capture_output=True, encoding="utf-8", shell=True)
+ if proc.returncode:
bb.fatal("Unable to dummy install packages. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
+ "returned %d:\n%s" % (cmd, proc.returncode, proc.stderr))
+ elif proc.stderr:
+ bb.note("Command '%s' returned stderr: %s" % (cmd, proc.stderr))
bb.utils.remove(temp_rootfs, True)
- return output
+ return proc.stdout
def backup_packaging_data(self):
# Save the opkglib for increment ipk image generation
@@ -498,6 +506,6 @@ class OpkgPM(OpkgDpkgPM):
"trying to extract the package." % pkg)
tmp_dir = super(OpkgPM, self).extract(pkg, pkg_info)
- bb.utils.remove(os.path.join(tmp_dir, "data.tar.xz"))
+ bb.utils.remove(os.path.join(tmp_dir, "data.tar.zst"))
return tmp_dir
diff --git a/meta/lib/oe/package_manager/ipk/manifest.py b/meta/lib/oe/package_manager/ipk/manifest.py
index ee4b57bcb0..3549d7428d 100644
--- a/meta/lib/oe/package_manager/ipk/manifest.py
+++ b/meta/lib/oe/package_manager/ipk/manifest.py
@@ -1,8 +1,11 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
from oe.manifest import Manifest
+import re
class PkgManifest(Manifest):
"""
diff --git a/meta/lib/oe/package_manager/ipk/rootfs.py b/meta/lib/oe/package_manager/ipk/rootfs.py
index 26dbee6f6a..ba93eb62ea 100644
--- a/meta/lib/oe/package_manager/ipk/rootfs.py
+++ b/meta/lib/oe/package_manager/ipk/rootfs.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -145,51 +147,14 @@ class PkgRootfs(DpkgOpkgRootfs):
self.pm.recover_packaging_data()
bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
-
- def _prelink_file(self, root_dir, filename):
- bb.note('prelink %s in %s' % (filename, root_dir))
- prelink_cfg = oe.path.join(root_dir,
- self.d.expand('${sysconfdir}/prelink.conf'))
- if not os.path.exists(prelink_cfg):
- shutil.copy(self.d.expand('${STAGING_DIR_NATIVE}${sysconfdir_native}/prelink.conf'),
- prelink_cfg)
-
- cmd_prelink = self.d.expand('${STAGING_DIR_NATIVE}${sbindir_native}/prelink')
- self._exec_shell_cmd([cmd_prelink,
- '--root',
- root_dir,
- '-amR',
- '-N',
- '-c',
- self.d.expand('${sysconfdir}/prelink.conf')])
-
'''
Compare two files with the same key twice to see if they are equal.
If they are not equal, it means they are duplicated and come from
different packages.
- 1st: Comapre them directly;
- 2nd: While incremental image creation is enabled, one of the
- files could be probaly prelinked in the previous image
- creation and the file has been changed, so we need to
- prelink the other one and compare them.
'''
def _file_equal(self, key, f1, f2):
-
- # Both of them are not prelinked
if filecmp.cmp(f1, f2):
return True
-
- if bb.data.inherits_class('image-prelink', self.d):
- if self.image_rootfs not in f1:
- self._prelink_file(f1.replace(key, ''), f1)
-
- if self.image_rootfs not in f2:
- self._prelink_file(f2.replace(key, ''), f2)
-
- # Both of them are prelinked
- if filecmp.cmp(f1, f2):
- return True
-
# Not equal
return False
@@ -200,7 +165,7 @@ class PkgRootfs(DpkgOpkgRootfs):
"""
def _multilib_sanity_test(self, dirs):
- allow_replace = self.d.getVar("MULTILIBRE_ALLOW_REP")
+ allow_replace = "|".join((self.d.getVar("MULTILIBRE_ALLOW_REP") or "").split())
if allow_replace is None:
allow_replace = ""
diff --git a/meta/lib/oe/package_manager/ipk/sdk.py b/meta/lib/oe/package_manager/ipk/sdk.py
index e2ca415c8e..3acd55f548 100644
--- a/meta/lib/oe/package_manager/ipk/sdk.py
+++ b/meta/lib/oe/package_manager/ipk/sdk.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -61,12 +63,19 @@ class PkgSdk(Sdk):
self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
+ env_bkp = os.environ.copy()
+ os.environ['PATH'] = self.d.expand("${COREBASE}/scripts/nativesdk-intercept") + \
+ os.pathsep + os.environ["PATH"]
+
self.target_pm.run_intercepts(populate_sdk='target')
+ os.environ.update(env_bkp)
execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
self.target_pm.remove_packaging_data()
+ else:
+ self.target_pm.remove_lists()
bb.note("Installing NATIVESDK packages")
self._populate_sysroot(self.host_pm, self.host_manifest)
@@ -78,6 +87,8 @@ class PkgSdk(Sdk):
if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
self.host_pm.remove_packaging_data()
+ else:
+ self.host_pm.remove_lists()
target_sysconfdir = os.path.join(self.sdk_target_sysroot, self.sysconfdir)
host_sysconfdir = os.path.join(self.sdk_host_sysroot, self.sysconfdir)
diff --git a/meta/lib/oe/package_manager/rpm/__init__.py b/meta/lib/oe/package_manager/rpm/__init__.py
index 6df0092281..f40c880af4 100644
--- a/meta/lib/oe/package_manager/rpm/__init__.py
+++ b/meta/lib/oe/package_manager/rpm/__init__.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -96,11 +98,15 @@ class RpmPM(PackageManager):
archs = ["sdk_provides_dummy_target"] + archs
confdir = "%s/%s" %(self.target_rootfs, "etc/dnf/vars/")
bb.utils.mkdirhier(confdir)
- open(confdir + "arch", 'w').write(":".join(archs))
+ with open(confdir + "arch", 'w') as f:
+ f.write(":".join(archs))
+
distro_codename = self.d.getVar('DISTRO_CODENAME')
- open(confdir + "releasever", 'w').write(distro_codename if distro_codename is not None else '')
+ with open(confdir + "releasever", 'w') as f:
+ f.write(distro_codename if distro_codename is not None else '')
- open(oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"), 'w').write("")
+ with open(oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"), 'w') as f:
+ f.write("")
def _configure_rpm(self):
@@ -110,14 +116,17 @@ class RpmPM(PackageManager):
platformconfdir = "%s/%s" %(self.target_rootfs, "etc/rpm/")
rpmrcconfdir = "%s/%s" %(self.target_rootfs, "etc/")
bb.utils.mkdirhier(platformconfdir)
- open(platformconfdir + "platform", 'w').write("%s-pc-linux" % self.primary_arch)
+ with open(platformconfdir + "platform", 'w') as f:
+ f.write("%s-pc-linux" % self.primary_arch)
with open(rpmrcconfdir + "rpmrc", 'w') as f:
f.write("arch_compat: %s: %s\n" % (self.primary_arch, self.archs if len(self.archs) > 0 else self.primary_arch))
f.write("buildarch_compat: %s: noarch\n" % self.primary_arch)
- open(platformconfdir + "macros", 'w').write("%_transaction_color 7\n")
+ with open(platformconfdir + "macros", 'w') as f:
+ f.write("%_transaction_color 7\n")
if self.d.getVar('RPM_PREFER_ELF_ARCH'):
- open(platformconfdir + "macros", 'a').write("%%_prefer_color %s" % (self.d.getVar('RPM_PREFER_ELF_ARCH')))
+ with open(platformconfdir + "macros", 'a') as f:
+ f.write("%%_prefer_color %s" % (self.d.getVar('RPM_PREFER_ELF_ARCH')))
if self.d.getVar('RPM_SIGN_PACKAGES') == '1':
signer = get_signer(self.d, self.d.getVar('RPM_GPG_BACKEND'))
@@ -164,13 +173,13 @@ class RpmPM(PackageManager):
repo_uri = uri + "/" + arch
repo_id = "oe-remote-repo" + "-".join(urlparse(repo_uri).path.split("/"))
repo_name = "OE Remote Repo:" + " ".join(urlparse(repo_uri).path.split("/"))
- open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'a').write(
- "[%s]\nname=%s\nbaseurl=%s\n%s\n" % (repo_id, repo_name, repo_uri, gpg_opts))
+ with open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'a') as f:
+ f.write("[%s]\nname=%s\nbaseurl=%s\n%s\n" % (repo_id, repo_name, repo_uri, gpg_opts))
else:
repo_name = "OE Remote Repo:" + " ".join(urlparse(uri).path.split("/"))
repo_uri = uri
- open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'w').write(
- "[%s]\nname=%s\nbaseurl=%s\n%s" % (repo_base, repo_name, repo_uri, gpg_opts))
+ with open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'w') as f:
+ f.write("[%s]\nname=%s\nbaseurl=%s\n%s" % (repo_base, repo_name, repo_uri, gpg_opts))
def _prepare_pkg_transaction(self):
os.environ['D'] = self.target_rootfs
@@ -181,7 +190,7 @@ class RpmPM(PackageManager):
os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
- def install(self, pkgs, attempt_only = False):
+ def install(self, pkgs, attempt_only=False, hard_depends_only=False):
if len(pkgs) == 0:
return
self._prepare_pkg_transaction()
@@ -192,13 +201,16 @@ class RpmPM(PackageManager):
output = self._invoke_dnf((["--skip-broken"] if attempt_only else []) +
(["-x", ",".join(exclude_pkgs)] if len(exclude_pkgs) > 0 else []) +
- (["--setopt=install_weak_deps=False"] if self.d.getVar('NO_RECOMMENDATIONS') == "1" else []) +
+ (["--setopt=install_weak_deps=False"] if (hard_depends_only or self.d.getVar('NO_RECOMMENDATIONS') == "1") else []) +
(["--nogpgcheck"] if self.d.getVar('RPM_SIGN_PACKAGES') != '1' else ["--setopt=gpgcheck=True"]) +
["install"] +
pkgs)
failed_scriptlets_pkgnames = collections.OrderedDict()
for line in output.splitlines():
+ if line.startswith("Error: Systemctl"):
+ bb.error(line)
+
if line.startswith("Error in POSTIN scriptlet in rpm package"):
failed_scriptlets_pkgnames[line.split()[-1]] = True
@@ -326,7 +338,8 @@ class RpmPM(PackageManager):
return e.output.decode("utf-8")
def dump_install_solution(self, pkgs):
- open(self.solution_manifest, 'w').write(" ".join(pkgs))
+ with open(self.solution_manifest, 'w') as f:
+ f.write(" ".join(pkgs))
return pkgs
def load_old_install_solution(self):
@@ -360,7 +373,8 @@ class RpmPM(PackageManager):
bb.utils.mkdirhier(target_path)
num = self._script_num_prefix(target_path)
saved_script_name = oe.path.join(target_path, "%d-%s" % (num, pkg))
- open(saved_script_name, 'w').write(output)
+ with open(saved_script_name, 'w') as f:
+ f.write(output)
os.chmod(saved_script_name, 0o755)
def _handle_intercept_failure(self, registered_pkgs):
@@ -372,11 +386,12 @@ class RpmPM(PackageManager):
self.save_rpmpostinst(pkg)
def extract(self, pkg):
- output = self._invoke_dnf(["repoquery", "--queryformat", "%{location}", pkg])
+ output = self._invoke_dnf(["repoquery", "--location", pkg])
pkg_name = output.splitlines()[-1]
if not pkg_name.endswith(".rpm"):
bb.fatal("dnf could not find package %s in repository: %s" %(pkg, output))
- pkg_path = oe.path.join(self.rpm_repo_dir, pkg_name)
+ # Strip file: prefix
+ pkg_path = pkg_name[5:]
cpio_cmd = bb.utils.which(os.getenv("PATH"), "cpio")
rpm2cpio_cmd = bb.utils.which(os.getenv("PATH"), "rpm2cpio")
diff --git a/meta/lib/oe/package_manager/rpm/manifest.py b/meta/lib/oe/package_manager/rpm/manifest.py
index e6604b301f..6ee7c329f0 100644
--- a/meta/lib/oe/package_manager/rpm/manifest.py
+++ b/meta/lib/oe/package_manager/rpm/manifest.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/meta/lib/oe/package_manager/rpm/rootfs.py b/meta/lib/oe/package_manager/rpm/rootfs.py
index 00d07cd9cc..3ba5396320 100644
--- a/meta/lib/oe/package_manager/rpm/rootfs.py
+++ b/meta/lib/oe/package_manager/rpm/rootfs.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -108,7 +110,7 @@ class PkgRootfs(Rootfs):
if self.progress_reporter:
self.progress_reporter.next_stage()
- self._setup_dbg_rootfs(['/etc', '/var/lib/rpm', '/var/cache/dnf', '/var/lib/dnf'])
+ self._setup_dbg_rootfs(['/etc/rpm', '/etc/rpmrc', '/etc/dnf', '/var/lib/rpm', '/var/cache/dnf', '/var/lib/dnf'])
execute_pre_post_process(self.d, rpm_post_process_cmds)
diff --git a/meta/lib/oe/package_manager/rpm/sdk.py b/meta/lib/oe/package_manager/rpm/sdk.py
index c5f232431f..ea79fe050b 100644
--- a/meta/lib/oe/package_manager/rpm/sdk.py
+++ b/meta/lib/oe/package_manager/rpm/sdk.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -65,7 +67,12 @@ class PkgSdk(Sdk):
self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
+ env_bkp = os.environ.copy()
+ os.environ['PATH'] = self.d.expand("${COREBASE}/scripts/nativesdk-intercept") + \
+ os.pathsep + os.environ["PATH"]
+
self.target_pm.run_intercepts(populate_sdk='target')
+ os.environ.update(env_bkp)
execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
@@ -110,5 +117,6 @@ class PkgSdk(Sdk):
for f in glob.glob(os.path.join(self.sdk_output, "etc", "rpm*")):
self.movefile(f, native_sysconf_dir)
for f in glob.glob(os.path.join(self.sdk_output, "etc", "dnf", "*")):
- self.movefile(f, native_sysconf_dir)
+ self.mkdirhier(native_sysconf_dir + "/dnf")
+ self.movefile(f, native_sysconf_dir + "/dnf")
self.remove(os.path.join(self.sdk_output, "etc"), True)
diff --git a/meta/lib/oe/packagedata.py b/meta/lib/oe/packagedata.py
index a82085a792..2d1d6ddeb7 100644
--- a/meta/lib/oe/packagedata.py
+++ b/meta/lib/oe/packagedata.py
@@ -1,9 +1,16 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
import codecs
import os
+import json
+import bb.compress.zstd
+import oe.path
+
+from glob import glob
def packaged(pkg, d):
return os.access(get_subpkgedata_fn(pkg, d) + '.packaged', os.R_OK)
@@ -19,7 +26,7 @@ def read_pkgdatafile(fn):
import re
with open(fn, 'r') as f:
lines = f.readlines()
- r = re.compile("([^:]+):\s*(.*)")
+ r = re.compile(r"(^.+?):\s+(.*)")
for l in lines:
m = r.match(l)
if m:
@@ -45,18 +52,30 @@ def read_pkgdata(pn, d):
return read_pkgdatafile(fn)
#
-# Collapse FOO_pkg variables into FOO
+# Collapse FOO:pkg variables into FOO
#
def read_subpkgdata_dict(pkg, d):
ret = {}
subd = read_pkgdatafile(get_subpkgedata_fn(pkg, d))
for var in subd:
- newvar = var.replace("_" + pkg, "")
- if newvar == var and var + "_" + pkg in subd:
+ newvar = var.replace(":" + pkg, "")
+ if newvar == var and var + ":" + pkg in subd:
continue
ret[newvar] = subd[var]
return ret
+def read_subpkgdata_extended(pkg, d):
+ import json
+ import bb.compress.zstd
+
+ fn = d.expand("${PKGDATA_DIR}/extended/%s.json.zstd" % pkg)
+ try:
+ num_threads = int(d.getVar("BB_NUMBER_THREADS"))
+ with bb.compress.zstd.open(fn, "rt", encoding="utf-8", num_threads=num_threads) as f:
+ return json.load(f)
+ except FileNotFoundError:
+ return None
+
def _pkgmap(d):
"""Return a dictionary mapping package to recipe name."""
@@ -96,3 +115,252 @@ def recipename(pkg, d):
"""Return the recipe name for the given binary package name."""
return pkgmap(d).get(pkg)
+
+def foreach_runtime_provider_pkgdata(d, rdep, include_rdep=False):
+ pkgdata_dir = d.getVar("PKGDATA_DIR")
+ possibles = set()
+ try:
+ possibles |= set(os.listdir("%s/runtime-rprovides/%s/" % (pkgdata_dir, rdep)))
+ except OSError:
+ pass
+
+ if include_rdep:
+ possibles.add(rdep)
+
+ for p in sorted(list(possibles)):
+ rdep_data = read_subpkgdata(p, d)
+ yield p, rdep_data
+
+def get_package_mapping(pkg, basepkg, d, depversions=None):
+ import oe.packagedata
+
+ data = oe.packagedata.read_subpkgdata(pkg, d)
+ key = "PKG:%s" % pkg
+
+ if key in data:
+ if bb.data.inherits_class('allarch', d) and bb.data.inherits_class('packagegroup', d) and pkg != data[key]:
+ bb.error("An allarch packagegroup shouldn't depend on packages which are dynamically renamed (%s to %s)" % (pkg, data[key]))
+ # Have to avoid undoing the write_extra_pkgs(global_variants...)
+ if bb.data.inherits_class('allarch', d) and not d.getVar('MULTILIB_VARIANTS') \
+ and data[key] == basepkg:
+ return pkg
+ if depversions == []:
+ # Avoid returning a mapping if the renamed package rprovides its original name
+ rprovkey = "RPROVIDES:%s" % pkg
+ if rprovkey in data:
+ if pkg in bb.utils.explode_dep_versions2(data[rprovkey]):
+ bb.note("%s rprovides %s, not replacing the latter" % (data[key], pkg))
+ return pkg
+ # Do map to rewritten package name
+ return data[key]
+
+ return pkg
+
+def get_package_additional_metadata(pkg_type, d):
+ base_key = "PACKAGE_ADD_METADATA"
+ for key in ("%s_%s" % (base_key, pkg_type.upper()), base_key):
+ if d.getVar(key, False) is None:
+ continue
+ d.setVarFlag(key, "type", "list")
+ if d.getVarFlag(key, "separator") is None:
+ d.setVarFlag(key, "separator", "\\n")
+ metadata_fields = [field.strip() for field in oe.data.typed_value(key, d)]
+ return "\n".join(metadata_fields).strip()
+
+def runtime_mapping_rename(varname, pkg, d):
+ #bb.note("%s before: %s" % (varname, d.getVar(varname)))
+
+ new_depends = {}
+ deps = bb.utils.explode_dep_versions2(d.getVar(varname) or "")
+ for depend, depversions in deps.items():
+ new_depend = get_package_mapping(depend, pkg, d, depversions)
+ if depend != new_depend:
+ bb.note("package name mapping done: %s -> %s" % (depend, new_depend))
+ new_depends[new_depend] = deps[depend]
+
+ d.setVar(varname, bb.utils.join_deps(new_depends, commasep=False))
+
+ #bb.note("%s after: %s" % (varname, d.getVar(varname)))
+
+def emit_pkgdata(pkgfiles, d):
+ def process_postinst_on_target(pkg, mlprefix):
+ pkgval = d.getVar('PKG:%s' % pkg)
+ if pkgval is None:
+ pkgval = pkg
+
+ defer_fragment = """
+if [ -n "$D" ]; then
+ $INTERCEPT_DIR/postinst_intercept delay_to_first_boot %s mlprefix=%s
+ exit 0
+fi
+""" % (pkgval, mlprefix)
+
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
+ postinst_ontarget = d.getVar('pkg_postinst_ontarget:%s' % pkg)
+
+ if postinst_ontarget:
+ bb.debug(1, 'adding deferred pkg_postinst_ontarget() to pkg_postinst() for %s' % pkg)
+ if not postinst:
+ postinst = '#!/bin/sh\n'
+ postinst += defer_fragment
+ postinst += postinst_ontarget
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
+
+ def add_set_e_to_scriptlets(pkg):
+ for scriptlet_name in ('pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm'):
+ scriptlet = d.getVar('%s:%s' % (scriptlet_name, pkg))
+ if scriptlet:
+ scriptlet_split = scriptlet.split('\n')
+ if scriptlet_split[0].startswith("#!"):
+ scriptlet = scriptlet_split[0] + "\nset -e\n" + "\n".join(scriptlet_split[1:])
+ else:
+ scriptlet = "set -e\n" + "\n".join(scriptlet_split[0:])
+ d.setVar('%s:%s' % (scriptlet_name, pkg), scriptlet)
+
+ def write_if_exists(f, pkg, var):
+ def encode(str):
+ import codecs
+ c = codecs.getencoder("unicode_escape")
+ return c(str)[0].decode("latin1")
+
+ val = d.getVar('%s:%s' % (var, pkg))
+ if val:
+ f.write('%s:%s: %s\n' % (var, pkg, encode(val)))
+ return val
+ val = d.getVar('%s' % (var))
+ if val:
+ f.write('%s: %s\n' % (var, encode(val)))
+ return val
+
+ def write_extra_pkgs(variants, pn, packages, pkgdatadir):
+ for variant in variants:
+ with open("%s/%s-%s" % (pkgdatadir, variant, pn), 'w') as fd:
+ fd.write("PACKAGES: %s\n" % ' '.join(
+ map(lambda pkg: '%s-%s' % (variant, pkg), packages.split())))
+
+ def write_extra_runtime_pkgs(variants, packages, pkgdatadir):
+ for variant in variants:
+ for pkg in packages.split():
+ ml_pkg = "%s-%s" % (variant, pkg)
+ subdata_file = "%s/runtime/%s" % (pkgdatadir, ml_pkg)
+ with open(subdata_file, 'w') as fd:
+ fd.write("PKG:%s: %s" % (ml_pkg, pkg))
+
+ packages = d.getVar('PACKAGES')
+ pkgdest = d.getVar('PKGDEST')
+ pkgdatadir = d.getVar('PKGDESTWORK')
+
+ data_file = pkgdatadir + d.expand("/${PN}")
+ with open(data_file, 'w') as fd:
+ fd.write("PACKAGES: %s\n" % packages)
+
+ pkgdebugsource = d.getVar("PKGDEBUGSOURCES") or []
+
+ pn = d.getVar('PN')
+ global_variants = (d.getVar('MULTILIB_GLOBAL_VARIANTS') or "").split()
+ variants = (d.getVar('MULTILIB_VARIANTS') or "").split()
+
+ if bb.data.inherits_class('kernel', d) or bb.data.inherits_class('module-base', d):
+ write_extra_pkgs(variants, pn, packages, pkgdatadir)
+
+ if bb.data.inherits_class('allarch', d) and not variants \
+ and not bb.data.inherits_class('packagegroup', d):
+ write_extra_pkgs(global_variants, pn, packages, pkgdatadir)
+
+ workdir = d.getVar('WORKDIR')
+
+ for pkg in packages.split():
+ pkgval = d.getVar('PKG:%s' % pkg)
+ if pkgval is None:
+ pkgval = pkg
+ d.setVar('PKG:%s' % pkg, pkg)
+
+ extended_data = {
+ "files_info": {}
+ }
+
+ pkgdestpkg = os.path.join(pkgdest, pkg)
+ files = {}
+ files_extra = {}
+ total_size = 0
+ seen = set()
+ for f in pkgfiles[pkg]:
+ fpath = os.sep + os.path.relpath(f, pkgdestpkg)
+
+ fstat = os.lstat(f)
+ files[fpath] = fstat.st_size
+
+ extended_data["files_info"].setdefault(fpath, {})
+ extended_data["files_info"][fpath]['size'] = fstat.st_size
+
+ if fstat.st_ino not in seen:
+ seen.add(fstat.st_ino)
+ total_size += fstat.st_size
+
+ if fpath in pkgdebugsource:
+ extended_data["files_info"][fpath]['debugsrc'] = pkgdebugsource[fpath]
+ del pkgdebugsource[fpath]
+
+ d.setVar('FILES_INFO:' + pkg , json.dumps(files, sort_keys=True))
+
+ process_postinst_on_target(pkg, d.getVar("MLPREFIX"))
+ add_set_e_to_scriptlets(pkg)
+
+ subdata_file = pkgdatadir + "/runtime/%s" % pkg
+ with open(subdata_file, 'w') as sf:
+ for var in (d.getVar('PKGDATA_VARS') or "").split():
+ val = write_if_exists(sf, pkg, var)
+
+ write_if_exists(sf, pkg, 'FILERPROVIDESFLIST')
+ for dfile in sorted((d.getVar('FILERPROVIDESFLIST:' + pkg) or "").split()):
+ write_if_exists(sf, pkg, 'FILERPROVIDES:' + dfile)
+
+ write_if_exists(sf, pkg, 'FILERDEPENDSFLIST')
+ for dfile in sorted((d.getVar('FILERDEPENDSFLIST:' + pkg) or "").split()):
+ write_if_exists(sf, pkg, 'FILERDEPENDS:' + dfile)
+
+ sf.write('%s:%s: %d\n' % ('PKGSIZE', pkg, total_size))
+
+ subdata_extended_file = pkgdatadir + "/extended/%s.json.zstd" % pkg
+ num_threads = int(d.getVar("BB_NUMBER_THREADS"))
+ with bb.compress.zstd.open(subdata_extended_file, "wt", encoding="utf-8", num_threads=num_threads) as f:
+ json.dump(extended_data, f, sort_keys=True, separators=(",", ":"))
+
+ # Symlinks needed for rprovides lookup
+ rprov = d.getVar('RPROVIDES:%s' % pkg) or d.getVar('RPROVIDES')
+ if rprov:
+ for p in bb.utils.explode_deps(rprov):
+ subdata_sym = pkgdatadir + "/runtime-rprovides/%s/%s" % (p, pkg)
+ bb.utils.mkdirhier(os.path.dirname(subdata_sym))
+ oe.path.relsymlink(subdata_file, subdata_sym, True)
+
+ allow_empty = d.getVar('ALLOW_EMPTY:%s' % pkg)
+ if not allow_empty:
+ allow_empty = d.getVar('ALLOW_EMPTY')
+ root = "%s/%s" % (pkgdest, pkg)
+ os.chdir(root)
+ g = glob('*')
+ if g or allow_empty == "1":
+ # Symlinks needed for reverse lookups (from the final package name)
+ subdata_sym = pkgdatadir + "/runtime-reverse/%s" % pkgval
+ oe.path.relsymlink(subdata_file, subdata_sym, True)
+
+ packagedfile = pkgdatadir + '/runtime/%s.packaged' % pkg
+ open(packagedfile, 'w').close()
+
+ if bb.data.inherits_class('kernel', d) or bb.data.inherits_class('module-base', d):
+ write_extra_runtime_pkgs(variants, packages, pkgdatadir)
+
+ if bb.data.inherits_class('allarch', d) and not variants \
+ and not bb.data.inherits_class('packagegroup', d):
+ write_extra_runtime_pkgs(global_variants, packages, pkgdatadir)
+
+def mapping_rename_hook(d):
+ """
+ Rewrite variables to account for package renaming in things
+ like debian.bbclass or manual PKG variable name changes
+ """
+ pkg = d.getVar("PKG")
+ oe.packagedata.runtime_mapping_rename("RDEPENDS", pkg, d)
+ oe.packagedata.runtime_mapping_rename("RRECOMMENDS", pkg, d)
+ oe.packagedata.runtime_mapping_rename("RSUGGESTS", pkg, d)
diff --git a/meta/lib/oe/packagegroup.py b/meta/lib/oe/packagegroup.py
index 8fcaecde82..7b7594751a 100644
--- a/meta/lib/oe/packagegroup.py
+++ b/meta/lib/oe/packagegroup.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/meta/lib/oe/patch.py b/meta/lib/oe/patch.py
index fccbedb519..60a0cc8291 100644
--- a/meta/lib/oe/patch.py
+++ b/meta/lib/oe/patch.py
@@ -1,7 +1,12 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
+import os
+import shlex
+import subprocess
import oe.path
import oe.types
@@ -24,9 +29,6 @@ class CmdError(bb.BBHandledException):
def runcmd(args, dir = None):
- import pipes
- import subprocess
-
if dir:
olddir = os.path.abspath(os.curdir)
if not os.path.exists(dir):
@@ -35,7 +37,7 @@ def runcmd(args, dir = None):
# print("cwd: %s -> %s" % (olddir, dir))
try:
- args = [ pipes.quote(str(arg)) for arg in args ]
+ args = [ shlex.quote(str(arg)) for arg in args ]
cmd = " ".join(args)
# print("cmd: %s" % cmd)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
@@ -56,6 +58,7 @@ def runcmd(args, dir = None):
if dir:
os.chdir(olddir)
+
class PatchError(Exception):
def __init__(self, msg):
self.msg = msg
@@ -214,7 +217,7 @@ class PatchTree(PatchSet):
with open(self.seriespath, 'w') as f:
for p in patches:
f.write(p)
-
+
def Import(self, patch, force = None):
""""""
PatchSet.Import(self, patch, force)
@@ -291,13 +294,32 @@ class PatchTree(PatchSet):
self.Pop(all=True)
class GitApplyTree(PatchTree):
- patch_line_prefix = '%% original patch'
- ignore_commit_prefix = '%% ignore'
+ notes_ref = "refs/notes/devtool"
+ original_patch = 'original patch'
+ ignore_commit = 'ignore'
def __init__(self, dir, d):
PatchTree.__init__(self, dir, d)
self.commituser = d.getVar('PATCH_GIT_USER_NAME')
self.commitemail = d.getVar('PATCH_GIT_USER_EMAIL')
+ if not self._isInitialized(d):
+ self._initRepo()
+
+ def _isInitialized(self, d):
+ cmd = "git rev-parse --show-toplevel"
+ try:
+ output = runcmd(cmd.split(), self.dir).strip()
+ except CmdError as err:
+ ## runcmd returned non-zero which most likely means 128
+ ## Not a git directory
+ return False
+ ## Make sure repo is in builddir to not break top-level git repos, or under workdir
+ return os.path.samefile(output, self.dir) or oe.path.is_path_parent(d.getVar('WORKDIR'), output)
+
+ def _initRepo(self):
+ runcmd("git init".split(), self.dir)
+ runcmd("git add .".split(), self.dir)
+ runcmd("git commit -a --allow-empty -m bitbake_patching_started".split(), self.dir)
@staticmethod
def extractPatchHeader(patchfile):
@@ -431,7 +453,7 @@ class GitApplyTree(PatchTree):
# Prepare git command
cmd = ["git"]
GitApplyTree.gitCommandUserOptions(cmd, commituser, commitemail)
- cmd += ["commit", "-F", tmpfile]
+ cmd += ["commit", "-F", tmpfile, "--no-verify"]
# git doesn't like plain email addresses as authors
if author and '<' in author:
cmd.append('--author="%s"' % author)
@@ -440,44 +462,131 @@ class GitApplyTree(PatchTree):
return (tmpfile, cmd)
@staticmethod
- def extractPatches(tree, startcommit, outdir, paths=None):
+ def addNote(repo, ref, key, value=None):
+ note = key + (": %s" % value if value else "")
+ notes_ref = GitApplyTree.notes_ref
+ runcmd(["git", "config", "notes.rewriteMode", "ignore"], repo)
+ runcmd(["git", "config", "notes.displayRef", notes_ref, notes_ref], repo)
+ runcmd(["git", "config", "notes.rewriteRef", notes_ref, notes_ref], repo)
+ runcmd(["git", "notes", "--ref", notes_ref, "append", "-m", note, ref], repo)
+
+ @staticmethod
+ def removeNote(repo, ref, key):
+ notes = GitApplyTree.getNotes(repo, ref)
+ notes = {k: v for k, v in notes.items() if k != key and not k.startswith(key + ":")}
+ runcmd(["git", "notes", "--ref", GitApplyTree.notes_ref, "remove", "--ignore-missing", ref], repo)
+ for note, value in notes.items():
+ GitApplyTree.addNote(repo, ref, note, value)
+
+ @staticmethod
+ def getNotes(repo, ref):
+ import re
+
+ note = None
+ try:
+ note = runcmd(["git", "notes", "--ref", GitApplyTree.notes_ref, "show", ref], repo)
+ prefix = ""
+ except CmdError:
+ note = runcmd(['git', 'show', '-s', '--format=%B', ref], repo)
+ prefix = "%% "
+
+ note_re = re.compile(r'^%s(.*?)(?::\s*(.*))?$' % prefix)
+ notes = dict()
+ for line in note.splitlines():
+ m = note_re.match(line)
+ if m:
+ notes[m.group(1)] = m.group(2)
+
+ return notes
+
+ @staticmethod
+ def commitIgnored(subject, dir=None, files=None, d=None):
+ if files:
+ runcmd(['git', 'add'] + files, dir)
+ cmd = ["git"]
+ GitApplyTree.gitCommandUserOptions(cmd, d=d)
+ cmd += ["commit", "-m", subject, "--no-verify"]
+ runcmd(cmd, dir)
+ GitApplyTree.addNote(dir, "HEAD", GitApplyTree.ignore_commit)
+
+ @staticmethod
+ def extractPatches(tree, startcommits, outdir, paths=None):
import tempfile
import shutil
tempdir = tempfile.mkdtemp(prefix='oepatch')
try:
- shellcmd = ["git", "format-patch", "--no-signature", "--no-numbered", startcommit, "-o", tempdir]
- if paths:
- shellcmd.append('--')
- shellcmd.extend(paths)
- out = runcmd(["sh", "-c", " ".join(shellcmd)], tree)
- if out:
- for srcfile in out.split():
- for encoding in ['utf-8', 'latin-1']:
- patchlines = []
- outfile = None
- try:
- with open(srcfile, 'r', encoding=encoding) as f:
- for line in f:
- if line.startswith(GitApplyTree.patch_line_prefix):
- outfile = line.split()[-1].strip()
- continue
- if line.startswith(GitApplyTree.ignore_commit_prefix):
- continue
- patchlines.append(line)
- except UnicodeDecodeError:
+ for name, rev in startcommits.items():
+ shellcmd = ["git", "format-patch", "--no-signature", "--no-numbered", rev, "-o", tempdir]
+ if paths:
+ shellcmd.append('--')
+ shellcmd.extend(paths)
+ out = runcmd(["sh", "-c", " ".join(shellcmd)], os.path.join(tree, name))
+ if out:
+ for srcfile in out.split():
+ # This loop, which is used to remove any line that
+ # starts with "%% original patch", is kept for backwards
+ # compatibility. If/when that compatibility is dropped,
+ # it can be replaced with code to just read the first
+ # line of the patch file to get the SHA-1, and the code
+ # below that writes the modified patch file can be
+ # replaced with a simple file move.
+ for encoding in ['utf-8', 'latin-1']:
+ patchlines = []
+ try:
+ with open(srcfile, 'r', encoding=encoding, newline='') as f:
+ for line in f:
+ if line.startswith("%% " + GitApplyTree.original_patch):
+ continue
+ patchlines.append(line)
+ except UnicodeDecodeError:
+ continue
+ break
+ else:
+ raise PatchError('Unable to find a character encoding to decode %s' % srcfile)
+
+ sha1 = patchlines[0].split()[1]
+ notes = GitApplyTree.getNotes(os.path.join(tree, name), sha1)
+ if GitApplyTree.ignore_commit in notes:
continue
- break
- else:
- raise PatchError('Unable to find a character encoding to decode %s' % srcfile)
-
- if not outfile:
- outfile = os.path.basename(srcfile)
- with open(os.path.join(outdir, outfile), 'w') as of:
- for line in patchlines:
- of.write(line)
+ outfile = notes.get(GitApplyTree.original_patch, os.path.basename(srcfile))
+
+ bb.utils.mkdirhier(os.path.join(outdir, name))
+ with open(os.path.join(outdir, name, outfile), 'w') as of:
+ for line in patchlines:
+ of.write(line)
finally:
shutil.rmtree(tempdir)
+ def _need_dirty_check(self):
+ fetch = bb.fetch2.Fetch([], self.d)
+ check_dirtyness = False
+ for url in fetch.urls:
+ url_data = fetch.ud[url]
+ parm = url_data.parm
+ # a git url with subpath param will surely be dirty
+ # since the git tree from which we clone will be emptied
+ # from all files that are not in the subpath
+ if url_data.type == 'git' and parm.get('subpath'):
+ check_dirtyness = True
+ return check_dirtyness
+
+ def _commitpatch(self, patch, patchfilevar):
+ output = ""
+ # Add all files
+ shellcmd = ["git", "add", "-f", "-A", "."]
+ output += runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+ # Exclude the patches directory
+ shellcmd = ["git", "reset", "HEAD", self.patchdir]
+ output += runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+ # Commit the result
+ (tmpfile, shellcmd) = self.prepareCommit(patch['file'], self.commituser, self.commitemail)
+ try:
+ shellcmd.insert(0, patchfilevar)
+ output += runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+ finally:
+ os.remove(tmpfile)
+ return output
+
def _applypatch(self, patch, force = False, reverse = False, run = True):
import shutil
@@ -492,27 +601,26 @@ class GitApplyTree(PatchTree):
return runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
- # Add hooks which add a pointer to the original patch file name in the commit message
reporoot = (runcmd("git rev-parse --show-toplevel".split(), self.dir) or '').strip()
if not reporoot:
raise Exception("Cannot get repository root for directory %s" % self.dir)
- hooks_dir = os.path.join(reporoot, '.git', 'hooks')
- hooks_dir_backup = hooks_dir + '.devtool-orig'
- if os.path.lexists(hooks_dir_backup):
- raise Exception("Git hooks backup directory already exists: %s" % hooks_dir_backup)
- if os.path.lexists(hooks_dir):
- shutil.move(hooks_dir, hooks_dir_backup)
- os.mkdir(hooks_dir)
- commithook = os.path.join(hooks_dir, 'commit-msg')
- applyhook = os.path.join(hooks_dir, 'applypatch-msg')
- with open(commithook, 'w') as f:
- # NOTE: the formatting here is significant; if you change it you'll also need to
- # change other places which read it back
- f.write('echo "\n%s: $PATCHFILE" >> $1' % GitApplyTree.patch_line_prefix)
- os.chmod(commithook, 0o755)
- shutil.copy2(commithook, applyhook)
+
+ patch_applied = True
try:
patchfilevar = 'PATCHFILE="%s"' % os.path.basename(patch['file'])
+ if self._need_dirty_check():
+ # Check dirtyness of the tree
+ try:
+ output = runcmd(["git", "--work-tree=%s" % reporoot, "status", "--short"])
+ except CmdError:
+ pass
+ else:
+ if output:
+ # The tree is dirty, no need to try to apply patches with git anymore
+ # since they fail, fallback directly to patch
+ output = PatchTree._applypatch(self, patch, force, reverse, run)
+ output += self._commitpatch(patch, patchfilevar)
+ return output
try:
shellcmd = [patchfilevar, "git", "--work-tree=%s" % reporoot]
self.gitCommandUserOptions(shellcmd, self.commituser, self.commitemail)
@@ -539,24 +647,14 @@ class GitApplyTree(PatchTree):
except CmdError:
# Fall back to patch
output = PatchTree._applypatch(self, patch, force, reverse, run)
- # Add all files
- shellcmd = ["git", "add", "-f", "-A", "."]
- output += runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
- # Exclude the patches directory
- shellcmd = ["git", "reset", "HEAD", self.patchdir]
- output += runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
- # Commit the result
- (tmpfile, shellcmd) = self.prepareCommit(patch['file'], self.commituser, self.commitemail)
- try:
- shellcmd.insert(0, patchfilevar)
- output += runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
- finally:
- os.remove(tmpfile)
+ output += self._commitpatch(patch, patchfilevar)
return output
+ except:
+ patch_applied = False
+ raise
finally:
- shutil.rmtree(hooks_dir)
- if os.path.lexists(hooks_dir_backup):
- shutil.move(hooks_dir_backup, hooks_dir)
+ if patch_applied:
+ GitApplyTree.addNote(self.dir, "HEAD", GitApplyTree.original_patch, os.path.basename(patch['file']))
class QuiltTree(PatchSet):
@@ -579,6 +677,8 @@ class QuiltTree(PatchSet):
def Clean(self):
try:
+ # make sure that patches/series file exists before quilt pop to keep quilt-0.67 happy
+ open(os.path.join(self.dir, "patches","series"), 'a').close()
self._runcmd(["pop", "-a", "-f"])
oe.path.remove(os.path.join(self.dir, "patches","series"))
except Exception:
@@ -715,8 +815,9 @@ class NOOPResolver(Resolver):
self.patchset.Push()
except Exception:
import sys
- os.chdir(olddir)
raise
+ finally:
+ os.chdir(olddir)
# Patch resolver which relies on the user doing all the work involved in the
# resolution, with the exception of refreshing the remote copy of the patch
@@ -776,9 +877,9 @@ class UserResolver(Resolver):
# User did not fix the problem. Abort.
raise PatchError("Patch application failed, and user did not fix and refresh the patch.")
except Exception:
- os.chdir(olddir)
raise
- os.chdir(olddir)
+ finally:
+ os.chdir(olddir)
def patch_path(url, fetch, workdir, expand=True):
@@ -898,4 +999,3 @@ def should_apply(parm, d):
return False, "applies to later version"
return True, None
-
diff --git a/meta/lib/oe/path.py b/meta/lib/oe/path.py
index c8d8ad05b9..5d21cdcbdf 100644
--- a/meta/lib/oe/path.py
+++ b/meta/lib/oe/path.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -123,7 +125,8 @@ def copyhardlinktree(src, dst):
if os.path.isdir(src):
if len(glob.glob('%s/.??*' % src)) > 0:
source = './.??* '
- source += './*'
+ if len(glob.glob('%s/**' % src)) > 0:
+ source += './*'
s_dir = src
else:
source = src
@@ -169,6 +172,9 @@ def symlink(source, destination, force=False):
if e.errno != errno.EEXIST or os.readlink(destination) != source:
raise
+def relsymlink(target, name, force=False):
+ symlink(os.path.relpath(target, os.path.dirname(name)), name, force=force)
+
def find(dir, **walkoptions):
""" Given a directory, recurses into that directory,
returning all files as absolute paths. """
diff --git a/meta/lib/oe/prservice.py b/meta/lib/oe/prservice.py
index fcdbe66c19..c41242c878 100644
--- a/meta/lib/oe/prservice.py
+++ b/meta/lib/oe/prservice.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -7,11 +9,10 @@ def prserv_make_conn(d, check = False):
host_params = list([_f for _f in (d.getVar("PRSERV_HOST") or '').split(':') if _f])
try:
conn = None
- conn = prserv.serv.PRServerConnection(host_params[0], int(host_params[1]))
+ conn = prserv.serv.connect(host_params[0], int(host_params[1]))
if check:
if not conn.ping():
raise Exception('service not available')
- d.setVar("__PRSERV_CONN",conn)
except Exception as exc:
bb.fatal("Connecting to PR service %s:%s failed: %s" % (host_params[0], host_params[1], str(exc)))
@@ -22,31 +23,29 @@ def prserv_dump_db(d):
bb.error("Not using network based PR service")
return None
- conn = d.getVar("__PRSERV_CONN")
+ conn = prserv_make_conn(d)
if conn is None:
- conn = prserv_make_conn(d)
- if conn is None:
- bb.error("Making connection failed to remote PR service")
- return None
+ bb.error("Making connection failed to remote PR service")
+ return None
#dump db
opt_version = d.getVar('PRSERV_DUMPOPT_VERSION')
opt_pkgarch = d.getVar('PRSERV_DUMPOPT_PKGARCH')
opt_checksum = d.getVar('PRSERV_DUMPOPT_CHECKSUM')
opt_col = ("1" == d.getVar('PRSERV_DUMPOPT_COL'))
- return conn.export(opt_version, opt_pkgarch, opt_checksum, opt_col)
+ d = conn.export(opt_version, opt_pkgarch, opt_checksum, opt_col)
+ conn.close()
+ return d
def prserv_import_db(d, filter_version=None, filter_pkgarch=None, filter_checksum=None):
if not d.getVar('PRSERV_HOST'):
bb.error("Not using network based PR service")
return None
- conn = d.getVar("__PRSERV_CONN")
+ conn = prserv_make_conn(d)
if conn is None:
- conn = prserv_make_conn(d)
- if conn is None:
- bb.error("Making connection failed to remote PR service")
- return None
+ bb.error("Making connection failed to remote PR service")
+ return None
#get the entry values
imported = []
prefix = "PRAUTO$"
@@ -70,6 +69,7 @@ def prserv_import_db(d, filter_version=None, filter_pkgarch=None, filter_checksu
bb.error("importing(%s,%s,%s,%d) failed. DB may have larger value %d" % (version,pkgarch,checksum,value,ret))
else:
imported.append((version,pkgarch,checksum,value))
+ conn.close()
return imported
def prserv_export_tofile(d, metainfo, datainfo, lockdown, nomax=False):
@@ -78,8 +78,7 @@ def prserv_export_tofile(d, metainfo, datainfo, lockdown, nomax=False):
bb.utils.mkdirhier(d.getVar('PRSERV_DUMPDIR'))
df = d.getVar('PRSERV_DUMPFILE')
#write data
- lf = bb.utils.lockfile("%s.lock" % df)
- with open(df, "a") as f:
+ with open(df, "a") as f, bb.utils.fileslocked(["%s.lock" % df]) as locks:
if metainfo:
#dump column info
f.write("#PR_core_ver = \"%s\"\n\n" % metainfo['core_ver']);
@@ -113,7 +112,6 @@ def prserv_export_tofile(d, metainfo, datainfo, lockdown, nomax=False):
if not nomax:
for i in idx:
f.write("PRAUTO_%s_%s = \"%s\"\n" % (str(datainfo[idx[i]]['version']),str(datainfo[idx[i]]['pkgarch']),str(datainfo[idx[i]]['value'])))
- bb.utils.unlockfile(lf)
def prserv_check_avail(d):
host_params = list([_f for _f in (d.getVar("PRSERV_HOST") or '').split(':') if _f])
@@ -125,4 +123,5 @@ def prserv_check_avail(d):
except TypeError:
bb.fatal('Undefined/incorrect PRSERV_HOST value. Format: "host:port"')
else:
- prserv_make_conn(d, True)
+ conn = prserv_make_conn(d, True)
+ conn.close()
diff --git a/meta/lib/oe/qa.py b/meta/lib/oe/qa.py
index e8a854a302..f8ae3c743f 100644
--- a/meta/lib/oe/qa.py
+++ b/meta/lib/oe/qa.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -48,6 +50,9 @@ class ELFFile:
return self
def __exit__(self, exc_type, exc_value, traceback):
+ self.close()
+
+ def close(self):
if self.data:
self.data.close()
@@ -128,6 +133,9 @@ class ELFFile:
"""
return self.getShort(ELFFile.E_MACHINE)
+ def set_objdump(self, cmd, output):
+ self.objdump_output[cmd] = output
+
def run_objdump(self, cmd, d):
import bb.process
import sys
@@ -171,6 +179,57 @@ def elf_machine_to_string(machine):
except:
return "Unknown (%s)" % repr(machine)
+def write_error(type, error, d):
+ logfile = d.getVar('QA_LOGFILE')
+ if logfile:
+ p = d.getVar('P')
+ with open(logfile, "a+") as f:
+ f.write("%s: %s [%s]\n" % (p, error, type))
+
+def handle_error(error_class, error_msg, d):
+ if error_class in (d.getVar("ERROR_QA") or "").split():
+ write_error(error_class, error_msg, d)
+ bb.error("QA Issue: %s [%s]" % (error_msg, error_class))
+ d.setVar("QA_ERRORS_FOUND", "True")
+ return False
+ elif error_class in (d.getVar("WARN_QA") or "").split():
+ write_error(error_class, error_msg, d)
+ bb.warn("QA Issue: %s [%s]" % (error_msg, error_class))
+ else:
+ bb.note("QA Issue: %s [%s]" % (error_msg, error_class))
+ return True
+
+def add_message(messages, section, new_msg):
+ if section not in messages:
+ messages[section] = new_msg
+ else:
+ messages[section] = messages[section] + "\n" + new_msg
+
+def exit_with_message_if_errors(message, d):
+ qa_fatal_errors = bb.utils.to_boolean(d.getVar("QA_ERRORS_FOUND"), False)
+ if qa_fatal_errors:
+ bb.fatal(message)
+
+def exit_if_errors(d):
+ exit_with_message_if_errors("Fatal QA errors were found, failing task.", d)
+
+def check_upstream_status(fullpath):
+ import re
+ kinda_status_re = re.compile(r"^.*upstream.*status.*$", re.IGNORECASE | re.MULTILINE)
+ strict_status_re = re.compile(r"^Upstream-Status: (Pending|Submitted|Denied|Inappropriate|Backport|Inactive-Upstream)( .+)?$", re.MULTILINE)
+ guidelines = "https://docs.yoctoproject.org/contributor-guide/recipe-style-guide.html#patch-upstream-status"
+
+ with open(fullpath, encoding='utf-8', errors='ignore') as f:
+ file_content = f.read()
+ match_kinda = kinda_status_re.search(file_content)
+ match_strict = strict_status_re.search(file_content)
+
+ if not match_strict:
+ if match_kinda:
+ return "Malformed Upstream-Status in patch\n%s\nPlease correct according to %s :\n%s" % (fullpath, guidelines, match_kinda.group(0))
+ else:
+ return "Missing Upstream-Status in patch\n%s\nPlease add according to %s ." % (fullpath, guidelines)
+
if __name__ == "__main__":
import sys
diff --git a/meta/lib/oe/recipeutils.py b/meta/lib/oe/recipeutils.py
index 407d168894..de1fbdd3a8 100644
--- a/meta/lib/oe/recipeutils.py
+++ b/meta/lib/oe/recipeutils.py
@@ -24,9 +24,9 @@ from collections import OrderedDict, defaultdict
from bb.utils import vercmp_string
# Help us to find places to insert values
-recipe_progression = ['SUMMARY', 'DESCRIPTION', 'AUTHOR', 'HOMEPAGE', 'BUGTRACKER', 'SECTION', 'LICENSE', 'LICENSE_FLAGS', 'LIC_FILES_CHKSUM', 'PROVIDES', 'DEPENDS', 'PR', 'PV', 'SRCREV', 'SRCPV', 'SRC_URI', 'S', 'do_fetch()', 'do_unpack()', 'do_patch()', 'EXTRA_OECONF', 'EXTRA_OECMAKE', 'EXTRA_OESCONS', 'do_configure()', 'EXTRA_OEMAKE', 'do_compile()', 'do_install()', 'do_populate_sysroot()', 'INITSCRIPT', 'USERADD', 'GROUPADD', 'PACKAGES', 'FILES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RPROVIDES', 'RREPLACES', 'RCONFLICTS', 'ALLOW_EMPTY', 'populate_packages()', 'do_package()', 'do_deploy()', 'BBCLASSEXTEND']
+recipe_progression = ['SUMMARY', 'DESCRIPTION', 'HOMEPAGE', 'BUGTRACKER', 'SECTION', 'LICENSE', 'LICENSE_FLAGS', 'LIC_FILES_CHKSUM', 'PROVIDES', 'DEPENDS', 'PR', 'PV', 'SRCREV', 'SRC_URI', 'S', 'do_fetch()', 'do_unpack()', 'do_patch()', 'EXTRA_OECONF', 'EXTRA_OECMAKE', 'EXTRA_OESCONS', 'do_configure()', 'EXTRA_OEMAKE', 'do_compile()', 'do_install()', 'do_populate_sysroot()', 'INITSCRIPT', 'USERADD', 'GROUPADD', 'PACKAGES', 'FILES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RPROVIDES', 'RREPLACES', 'RCONFLICTS', 'ALLOW_EMPTY', 'populate_packages()', 'do_package()', 'do_deploy()', 'BBCLASSEXTEND']
# Variables that sometimes are a bit long but shouldn't be wrapped
-nowrap_vars = ['SUMMARY', 'HOMEPAGE', 'BUGTRACKER', r'SRC_URI\[(.+\.)?md5sum\]', r'SRC_URI\[(.+\.)?sha256sum\]']
+nowrap_vars = ['SUMMARY', 'HOMEPAGE', 'BUGTRACKER', r'SRC_URI\[(.+\.)?md5sum\]', r'SRC_URI\[(.+\.)?sha[0-9]+sum\]']
list_vars = ['SRC_URI', 'LIC_FILES_CHKSUM']
meta_vars = ['SUMMARY', 'DESCRIPTION', 'HOMEPAGE', 'BUGTRACKER', 'SECTION']
@@ -47,7 +47,7 @@ def simplify_history(history, d):
continue
has_set = True
elif event['op'] in ('append', 'prepend', 'postdot', 'predot'):
- # Reminder: "append" and "prepend" mean += and =+ respectively, NOT _append / _prepend
+ # Reminder: "append" and "prepend" mean += and =+ respectively, NOT :append / :prepend
if has_set:
continue
ret_history.insert(0, event)
@@ -342,7 +342,7 @@ def patch_recipe(d, fn, varvalues, patch=False, relpath='', redirect_output=None
def override_applicable(hevent):
op = hevent['op']
if '[' in op:
- opoverrides = op.split('[')[1].split(']')[0].split('_')
+ opoverrides = op.split('[')[1].split(']')[0].split(':')
for opoverride in opoverrides:
if not opoverride in overrides:
return False
@@ -368,13 +368,13 @@ def patch_recipe(d, fn, varvalues, patch=False, relpath='', redirect_output=None
recipe_set = True
if not recipe_set:
for event in history:
- if event['op'].startswith('_remove'):
+ if event['op'].startswith(':remove'):
continue
if not override_applicable(event):
continue
newvalue = value.replace(event['detail'], '')
- if newvalue == value and os.path.abspath(event['file']) == fn and event['op'].startswith('_'):
- op = event['op'].replace('[', '_').replace(']', '')
+ if newvalue == value and os.path.abspath(event['file']) == fn and event['op'].startswith(':'):
+ op = event['op'].replace('[', ':').replace(']', '')
extravals[var + op] = None
value = newvalue
vals[var] = ('+=', value)
@@ -414,15 +414,13 @@ def copy_recipe_files(d, tgt_dir, whole_dir=False, download=True, all_variants=F
fetch_urls(d)
if all_variants:
- # Get files for other variants e.g. in the case of a SRC_URI_append
+ # Get files for other variants e.g. in the case of a SRC_URI:append
localdata = bb.data.createCopy(d)
variants = (localdata.getVar('BBCLASSEXTEND') or '').split()
if variants:
# Ensure we handle class-target if we're dealing with one of the variants
variants.append('target')
for variant in variants:
- if variant.startswith("devupstream"):
- localdata.setVar('SRCPV', 'git')
localdata.setVar('CLASSOVERRIDE', 'class-%s' % variant)
fetch_urls(localdata)
@@ -666,19 +664,23 @@ def get_bbappend_path(d, destlayerdir, wildcardver=False):
return (appendpath, pathok)
-def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False, machine=None, extralines=None, removevalues=None, redirect_output=None):
+def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False, machine=None, extralines=None, removevalues=None, redirect_output=None, params=None, update_original_recipe=False):
"""
Writes a bbappend file for a recipe
Parameters:
rd: data dictionary for the recipe
destlayerdir: base directory of the layer to place the bbappend in
(subdirectory path from there will be determined automatically)
- srcfiles: dict of source files to add to SRC_URI, where the value
- is the full path to the file to be added, and the value is the
- original filename as it would appear in SRC_URI or None if it
- isn't already present. You may pass None for this parameter if
- you simply want to specify your own content via the extralines
- parameter.
+ srcfiles: dict of source files to add to SRC_URI, where the key
+ is the full path to the file to be added, and the value is a
+ dict with following optional keys:
+ path: the original filename as it would appear in SRC_URI
+ or None if it isn't already present.
+ patchdir: the patchdir parameter
+ newname: the name to give to the new added file. None to use
+ the default value: basename(path)
+ You may pass None for this parameter if you simply want to specify
+ your own content via the extralines parameter.
install: dict mapping entries in srcfiles to a tuple of two elements:
install path (*without* ${D} prefix) and permission value (as a
string, e.g. '0644').
@@ -696,18 +698,32 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
redirect_output:
If specified, redirects writing the output file to the
specified directory (for dry-run purposes)
+ params:
+ Parameters to use when adding entries to SRC_URI. If specified,
+ should be a list of dicts with the same length as srcfiles.
+ update_original_recipe:
+ Force to update the original recipe instead of creating/updating
+ a bbapend. destlayerdir must contain the original recipe
"""
if not removevalues:
removevalues = {}
- # Determine how the bbappend should be named
- appendpath, pathok = get_bbappend_path(rd, destlayerdir, wildcardver)
- if not appendpath:
- bb.error('Unable to determine layer directory containing %s' % recipefile)
- return (None, None)
- if not pathok:
- bb.warn('Unable to determine correct subdirectory path for bbappend file - check that what %s adds to BBFILES also matches .bbappend files. Using %s for now, but until you fix this the bbappend will not be applied.' % (os.path.join(destlayerdir, 'conf', 'layer.conf'), os.path.dirname(appendpath)))
+ recipefile = rd.getVar('FILE')
+ if update_original_recipe:
+ if destlayerdir not in recipefile:
+ bb.error("destlayerdir %s doesn't contain the original recipe (%s), cannot update it" % (destlayerdir, recipefile))
+ return (None, None)
+
+ appendpath = recipefile
+ else:
+ # Determine how the bbappend should be named
+ appendpath, pathok = get_bbappend_path(rd, destlayerdir, wildcardver)
+ if not appendpath:
+ bb.error('Unable to determine layer directory containing %s' % recipefile)
+ return (None, None)
+ if not pathok:
+ bb.warn('Unable to determine correct subdirectory path for bbappend file - check that what %s adds to BBFILES also matches .bbappend files. Using %s for now, but until you fix this the bbappend will not be applied.' % (os.path.join(destlayerdir, 'conf', 'layer.conf'), os.path.dirname(appendpath)))
appenddir = os.path.dirname(appendpath)
if not redirect_output:
@@ -752,30 +768,48 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
bbappendlines.append((varname, op, value))
destsubdir = rd.getVar('PN')
- if srcfiles:
- bbappendlines.append(('FILESEXTRAPATHS_prepend', ':=', '${THISDIR}/${PN}:'))
+ if not update_original_recipe and srcfiles:
+ bbappendlines.append(('FILESEXTRAPATHS:prepend', ':=', '${THISDIR}/${PN}:'))
appendoverride = ''
if machine:
bbappendlines.append(('PACKAGE_ARCH', '=', '${MACHINE_ARCH}'))
- appendoverride = '_%s' % machine
+ appendoverride = ':%s' % machine
copyfiles = {}
if srcfiles:
instfunclines = []
- for newfile, origsrcfile in srcfiles.items():
- srcfile = origsrcfile
+ for i, (newfile, param) in enumerate(srcfiles.items()):
srcurientry = None
- if not srcfile:
- srcfile = os.path.basename(newfile)
+ if not 'path' in param or not param['path']:
+ if 'newname' in param and param['newname']:
+ srcfile = param['newname']
+ else:
+ srcfile = os.path.basename(newfile)
srcurientry = 'file://%s' % srcfile
+ oldentry = None
+ for uri in rd.getVar('SRC_URI').split():
+ if srcurientry in uri:
+ oldentry = uri
+ if params and params[i]:
+ srcurientry = '%s;%s' % (srcurientry, ';'.join('%s=%s' % (k,v) for k,v in params[i].items()))
# Double-check it's not there already
# FIXME do we care if the entry is added by another bbappend that might go away?
if not srcurientry in rd.getVar('SRC_URI').split():
if machine:
- appendline('SRC_URI_append%s' % appendoverride, '=', ' ' + srcurientry)
+ if oldentry:
+ appendline('SRC_URI:remove%s' % appendoverride, '=', ' ' + oldentry)
+ appendline('SRC_URI:append%s' % appendoverride, '=', ' ' + srcurientry)
else:
+ if oldentry:
+ if update_original_recipe:
+ removevalues['SRC_URI'] = oldentry
+ else:
+ appendline('SRC_URI:remove', '=', oldentry)
appendline('SRC_URI', '+=', srcurientry)
- copyfiles[newfile] = srcfile
+ param['path'] = srcfile
+ else:
+ srcfile = param['path']
+ copyfiles[newfile] = param
if install:
institem = install.pop(newfile, None)
if institem:
@@ -786,7 +820,7 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
instfunclines.append(instdirline)
instfunclines.append('install -m %s ${WORKDIR}/%s ${D}%s' % (perms, os.path.basename(srcfile), instdestpath))
if instfunclines:
- bbappendlines.append(('do_install_append%s()' % appendoverride, '', instfunclines))
+ bbappendlines.append(('do_install:append%s()' % appendoverride, '', instfunclines))
if redirect_output:
bb.note('Writing append file %s (dry-run)' % appendpath)
@@ -795,6 +829,8 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
# multiple times per operation when we're handling overrides)
if os.path.exists(appendpath) and not os.path.exists(outfile):
shutil.copy2(appendpath, outfile)
+ elif update_original_recipe:
+ outfile = recipefile
else:
bb.note('Writing append file %s' % appendpath)
outfile = appendpath
@@ -804,15 +840,15 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
extvars = {'destsubdir': destsubdir}
def appendfile_varfunc(varname, origvalue, op, newlines):
- if varname == 'FILESEXTRAPATHS_prepend':
+ if varname == 'FILESEXTRAPATHS:prepend':
if origvalue.startswith('${THISDIR}/'):
- popline('FILESEXTRAPATHS_prepend')
+ popline('FILESEXTRAPATHS:prepend')
extvars['destsubdir'] = rd.expand(origvalue.split('${THISDIR}/', 1)[1].rstrip(':'))
elif varname == 'PACKAGE_ARCH':
if machine:
popline('PACKAGE_ARCH')
return (machine, None, 4, False)
- elif varname.startswith('do_install_append'):
+ elif varname.startswith('do_install:append'):
func = popline(varname)
if func:
instfunclines = [line.strip() for line in origvalue.strip('\n').splitlines()]
@@ -824,7 +860,7 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
splitval = split_var_value(origvalue, assignment=False)
changed = False
removevar = varname
- if varname in ['SRC_URI', 'SRC_URI_append%s' % appendoverride]:
+ if varname in ['SRC_URI', 'SRC_URI:append%s' % appendoverride]:
removevar = 'SRC_URI'
line = popline(varname)
if line:
@@ -853,11 +889,11 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
newvalue = splitval
if len(newvalue) == 1:
# Ensure it's written out as one line
- if '_append' in varname:
+ if ':append' in varname:
newvalue = ' ' + newvalue[0]
else:
newvalue = newvalue[0]
- if not newvalue and (op in ['+=', '.='] or '_append' in varname):
+ if not newvalue and (op in ['+=', '.='] or ':append' in varname):
# There's no point appending nothing
newvalue = None
if varname.endswith('()'):
@@ -898,7 +934,12 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
outdir = redirect_output
else:
outdir = appenddir
- for newfile, srcfile in copyfiles.items():
+ for newfile, param in copyfiles.items():
+ srcfile = param['path']
+ patchdir = param.get('patchdir', ".")
+
+ if patchdir != ".":
+ newfile = os.path.join(os.path.split(newfile)[0], patchdir, os.path.split(newfile)[1])
filedest = os.path.join(outdir, destsubdir, os.path.basename(srcfile))
if os.path.abspath(newfile) != os.path.abspath(filedest):
if newfile.startswith(tempfile.gettempdir()):
@@ -942,10 +983,9 @@ def replace_dir_vars(path, d):
path = path.replace(dirpath, '${%s}' % dirvars[dirpath])
return path
-def get_recipe_pv_without_srcpv(pv, uri_type):
+def get_recipe_pv_with_pfx_sfx(pv, uri_type):
"""
- Get PV without SRCPV common in SCM's for now only
- support git.
+ Get PV separating prefix and suffix components.
Returns tuple with pv, prefix and suffix.
"""
@@ -953,7 +993,7 @@ def get_recipe_pv_without_srcpv(pv, uri_type):
sfx = ''
if uri_type == 'git':
- git_regex = re.compile(r"(?P<pfx>v?)(?P<ver>.*?)(?P<sfx>\+[^\+]*(git)?r?(AUTOINC\+))(?P<rev>.*)")
+ git_regex = re.compile(r"(?P<pfx>v?)(?P<ver>.*?)(?P<sfx>\+[^\+]*(git)?r?(AUTOINC\+)?)(?P<rev>.*)")
m = git_regex.match(pv)
if m:
@@ -1005,7 +1045,7 @@ def get_recipe_upstream_version(rd):
src_uri = src_uris.split()[0]
uri_type, _, _, _, _, _ = decodeurl(src_uri)
- (pv, pfx, sfx) = get_recipe_pv_without_srcpv(rd.getVar('PV'), uri_type)
+ (pv, pfx, sfx) = get_recipe_pv_with_pfx_sfx(rd.getVar('PV'), uri_type)
ru['current_version'] = pv
manual_upstream_version = rd.getVar("RECIPE_UPSTREAM_VERSION")
@@ -1029,10 +1069,11 @@ def get_recipe_upstream_version(rd):
else:
ud = bb.fetch2.FetchData(src_uri, rd)
if rd.getVar("UPSTREAM_CHECK_COMMITS") == "1":
+ bb.fetch2.get_srcrev(rd)
revision = ud.method.latest_revision(ud, rd, 'default')
upversion = pv
if revision != rd.getVar("SRCREV"):
- upversion = upversion + "-new-commits-available"
+ upversion = upversion + "-new-commits-available"
else:
pupver = ud.method.latest_versionstring(ud, rd)
(upversion, revision) = pupver
diff --git a/meta/lib/oe/reproducible.py b/meta/lib/oe/reproducible.py
index 204b9bd734..448befce33 100644
--- a/meta/lib/oe/reproducible.py
+++ b/meta/lib/oe/reproducible.py
@@ -1,10 +1,63 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
import os
import subprocess
import bb
+# For reproducible builds, this code sets the default SOURCE_DATE_EPOCH in each
+# component's build environment. The format is number of seconds since the
+# system epoch.
+#
+# Upstream components (generally) respect this environment variable,
+# using it in place of the "current" date and time.
+# See https://reproducible-builds.org/specs/source-date-epoch/
+#
+# The default value of SOURCE_DATE_EPOCH comes from the function
+# get_source_date_epoch_value which reads from the SDE_FILE, or if the file
+# is not available will use the fallback of SOURCE_DATE_EPOCH_FALLBACK.
+#
+# The SDE_FILE is normally constructed from the function
+# create_source_date_epoch_stamp which is typically added as a postfuncs to
+# the do_unpack task. If a recipe does NOT have do_unpack, it should be added
+# to a task that runs after the source is available and before the
+# do_deploy_source_date_epoch task is executed.
+#
+# If a recipe wishes to override the default behavior it should set it's own
+# SOURCE_DATE_EPOCH or override the do_deploy_source_date_epoch_stamp task
+# with recipe-specific functionality to write the appropriate
+# SOURCE_DATE_EPOCH into the SDE_FILE.
+#
+# SOURCE_DATE_EPOCH is intended to be a reproducible value. This value should
+# be reproducible for anyone who builds the same revision from the same
+# sources.
+#
+# There are 4 ways the create_source_date_epoch_stamp function determines what
+# becomes SOURCE_DATE_EPOCH:
+#
+# 1. Use the value from __source_date_epoch.txt file if this file exists.
+# This file was most likely created in the previous build by one of the
+# following methods 2,3,4.
+# Alternatively, it can be provided by a recipe via SRC_URI.
+#
+# If the file does not exist:
+#
+# 2. If there is a git checkout, use the last git commit timestamp.
+# Git does not preserve file timestamps on checkout.
+#
+# 3. Use the mtime of "known" files such as NEWS, CHANGLELOG, ...
+# This works for well-kept repositories distributed via tarball.
+#
+# 4. Use the modification time of the youngest file in the source tree, if
+# there is one.
+# This will be the newest file from the distribution tarball, if any.
+#
+# 5. Fall back to a fixed timestamp (SOURCE_DATE_EPOCH_FALLBACK).
+#
+# Once the value is determined, it is stored in the recipe's SDE_FILE.
+
def get_source_date_epoch_from_known_files(d, sourcedir):
source_date_epoch = None
newest_file = None
@@ -41,7 +94,7 @@ def find_git_folder(d, sourcedir):
for root, dirs, files in os.walk(workdir, topdown=True):
dirs[:] = [d for d in dirs if d not in exclude]
if '.git' in dirs:
- return root
+ return os.path.join(root, ".git")
bb.warn("Failed to find a git repository in WORKDIR: %s" % workdir)
return None
@@ -62,7 +115,8 @@ def get_source_date_epoch_from_git(d, sourcedir):
return None
bb.debug(1, "git repository: %s" % gitpath)
- p = subprocess.run(['git', '--git-dir', gitpath, 'log', '-1', '--pretty=%ct'], check=True, stdout=subprocess.PIPE)
+ p = subprocess.run(['git', '-c', 'log.showSignature=false', '--git-dir', gitpath, 'log', '-1', '--pretty=%ct'],
+ check=True, stdout=subprocess.PIPE)
return int(p.stdout.decode('utf-8'))
def get_source_date_epoch_from_youngest_file(d, sourcedir):
@@ -77,6 +131,9 @@ def get_source_date_epoch_from_youngest_file(d, sourcedir):
files = [f for f in files if not f[0] == '.']
for fname in files:
+ if fname == "singletask.lock":
+ # Ignore externalsrc/devtool lockfile [YOCTO #14921]
+ continue
filename = os.path.join(root, fname)
try:
mtime = int(os.lstat(filename).st_mtime)
@@ -101,8 +158,40 @@ def fixed_source_date_epoch(d):
def get_source_date_epoch(d, sourcedir):
return (
get_source_date_epoch_from_git(d, sourcedir) or
- get_source_date_epoch_from_known_files(d, sourcedir) or
get_source_date_epoch_from_youngest_file(d, sourcedir) or
fixed_source_date_epoch(d) # Last resort
)
+def epochfile_read(epochfile, d):
+ cached, efile = d.getVar('__CACHED_SOURCE_DATE_EPOCH') or (None, None)
+ if cached and efile == epochfile:
+ return cached
+
+ if cached and epochfile != efile:
+ bb.debug(1, "Epoch file changed from %s to %s" % (efile, epochfile))
+
+ source_date_epoch = int(d.getVar('SOURCE_DATE_EPOCH_FALLBACK'))
+ try:
+ with open(epochfile, 'r') as f:
+ s = f.read()
+ try:
+ source_date_epoch = int(s)
+ except ValueError:
+ bb.warn("SOURCE_DATE_EPOCH value '%s' is invalid. Reverting to SOURCE_DATE_EPOCH_FALLBACK" % s)
+ source_date_epoch = int(d.getVar('SOURCE_DATE_EPOCH_FALLBACK'))
+ bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch)
+ except FileNotFoundError:
+ bb.debug(1, "Cannot find %s. SOURCE_DATE_EPOCH will default to %d" % (epochfile, source_date_epoch))
+
+ d.setVar('__CACHED_SOURCE_DATE_EPOCH', (str(source_date_epoch), epochfile))
+ return str(source_date_epoch)
+
+def epochfile_write(source_date_epoch, epochfile, d):
+
+ bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch)
+ bb.utils.mkdirhier(os.path.dirname(epochfile))
+
+ tmp_file = "%s.new" % epochfile
+ with open(tmp_file, 'w') as f:
+ f.write(str(source_date_epoch))
+ os.rename(tmp_file, epochfile)
diff --git a/meta/lib/oe/rootfs.py b/meta/lib/oe/rootfs.py
index 5f81023040..8cd48f9450 100644
--- a/meta/lib/oe/rootfs.py
+++ b/meta/lib/oe/rootfs.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
from abc import ABCMeta, abstractmethod
@@ -104,7 +106,7 @@ class Rootfs(object, metaclass=ABCMeta):
def _cleanup(self):
pass
- def _setup_dbg_rootfs(self, dirs):
+ def _setup_dbg_rootfs(self, package_paths):
gen_debugfs = self.d.getVar('IMAGE_GEN_DEBUGFS') or '0'
if gen_debugfs != '1':
return
@@ -114,17 +116,18 @@ class Rootfs(object, metaclass=ABCMeta):
shutil.rmtree(self.image_rootfs + '-orig')
except:
pass
- os.rename(self.image_rootfs, self.image_rootfs + '-orig')
+ bb.utils.rename(self.image_rootfs, self.image_rootfs + '-orig')
bb.note(" Creating debug rootfs...")
bb.utils.mkdirhier(self.image_rootfs)
bb.note(" Copying back package database...")
- for dir in dirs:
- if not os.path.isdir(self.image_rootfs + '-orig' + dir):
- continue
- bb.utils.mkdirhier(self.image_rootfs + os.path.dirname(dir))
- shutil.copytree(self.image_rootfs + '-orig' + dir, self.image_rootfs + dir, symlinks=True)
+ for path in package_paths:
+ bb.utils.mkdirhier(self.image_rootfs + os.path.dirname(path))
+ if os.path.isdir(self.image_rootfs + '-orig' + path):
+ shutil.copytree(self.image_rootfs + '-orig' + path, self.image_rootfs + path, symlinks=True)
+ elif os.path.isfile(self.image_rootfs + '-orig' + path):
+ shutil.copyfile(self.image_rootfs + '-orig' + path, self.image_rootfs + path)
# Copy files located in /usr/lib/debug or /usr/src/debug
for dir in ["/usr/lib/debug", "/usr/src/debug"]:
@@ -160,25 +163,26 @@ class Rootfs(object, metaclass=ABCMeta):
bb.note(" Install extra debug packages...")
self.pm.install(extra_debug_pkgs.split(), True)
+ bb.note(" Removing package database...")
+ for path in package_paths:
+ if os.path.isdir(self.image_rootfs + path):
+ shutil.rmtree(self.image_rootfs + path)
+ elif os.path.isfile(self.image_rootfs + path):
+ os.remove(self.image_rootfs + path)
+
bb.note(" Rename debug rootfs...")
try:
shutil.rmtree(self.image_rootfs + '-dbg')
except:
pass
- os.rename(self.image_rootfs, self.image_rootfs + '-dbg')
+ bb.utils.rename(self.image_rootfs, self.image_rootfs + '-dbg')
- bb.note(" Restoreing original rootfs...")
- os.rename(self.image_rootfs + '-orig', self.image_rootfs)
+ bb.note(" Restoring original rootfs...")
+ bb.utils.rename(self.image_rootfs + '-orig', self.image_rootfs)
def _exec_shell_cmd(self, cmd):
- fakerootcmd = self.d.getVar('FAKEROOT')
- if fakerootcmd is not None:
- exec_cmd = [fakerootcmd, cmd]
- else:
- exec_cmd = cmd
-
try:
- subprocess.check_output(exec_cmd, stderr=subprocess.STDOUT)
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
return("Command '%s' returned %d:\n%s" % (e.cmd, e.returncode, e.output))
@@ -190,9 +194,17 @@ class Rootfs(object, metaclass=ABCMeta):
post_process_cmds = self.d.getVar("ROOTFS_POSTPROCESS_COMMAND")
rootfs_post_install_cmds = self.d.getVar('ROOTFS_POSTINSTALL_COMMAND')
- bb.utils.mkdirhier(self.image_rootfs)
+ def make_last(command, commands):
+ commands = commands.split()
+ if command in commands:
+ commands.remove(command)
+ commands.append(command)
+ return "".join(commands)
- bb.utils.mkdirhier(self.deploydir)
+ # We want this to run as late as possible, in particular after
+ # systemd_sysusers_create and set_user_group. Using :append is not enough
+ make_last("tidy_shadowutils_files", post_process_cmds)
+ make_last("rootfs_reproducible", post_process_cmds)
execute_pre_post_process(self.d, pre_process_cmds)
@@ -253,7 +265,7 @@ class Rootfs(object, metaclass=ABCMeta):
# Remove the run-postinsts package if no delayed postinsts are found
delayed_postinsts = self._get_delayed_postinsts()
if delayed_postinsts is None:
- if os.path.exists(self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/init.d/run-postinsts")) or os.path.exists(self.d.expand("${IMAGE_ROOTFS}${systemd_unitdir}/system/run-postinsts.service")):
+ if os.path.exists(self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/init.d/run-postinsts")) or os.path.exists(self.d.expand("${IMAGE_ROOTFS}${systemd_system_unitdir}/run-postinsts.service")):
self.pm.remove(["run-postinsts"])
image_rorfs = bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs",
@@ -302,10 +314,20 @@ class Rootfs(object, metaclass=ABCMeta):
self._exec_shell_cmd(['ldconfig', '-r', self.image_rootfs, '-c',
'new', '-v', '-X'])
+ image_rorfs = bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs",
+ True, False, self.d)
+ ldconfig_in_features = bb.utils.contains("DISTRO_FEATURES", "ldconfig",
+ True, False, self.d)
+ if image_rorfs or not ldconfig_in_features:
+ ldconfig_cache_dir = os.path.join(self.image_rootfs, "var/cache/ldconfig")
+ if os.path.exists(ldconfig_cache_dir):
+ bb.note("Removing ldconfig auxiliary cache...")
+ shutil.rmtree(ldconfig_cache_dir)
+
def _check_for_kernel_modules(self, modules_dir):
for root, dirs, files in os.walk(modules_dir, topdown=True):
for name in files:
- found_ko = name.endswith(".ko")
+ found_ko = name.endswith((".ko", ".ko.gz", ".ko.xz", ".ko.zst"))
if found_ko:
return found_ko
return False
@@ -317,17 +339,30 @@ class Rootfs(object, metaclass=ABCMeta):
bb.note("No Kernel Modules found, not running depmod")
return
- kernel_abi_ver_file = oe.path.join(self.d.getVar('PKGDATA_DIR'), "kernel-depmod",
- 'kernel-abiversion')
- if not os.path.exists(kernel_abi_ver_file):
- bb.fatal("No kernel-abiversion file found (%s), cannot run depmod, aborting" % kernel_abi_ver_file)
+ pkgdatadir = self.d.getVar('PKGDATA_DIR')
+
+ # PKGDATA_DIR can include multiple kernels so we run depmod for each
+ # one of them.
+ for direntry in os.listdir(pkgdatadir):
+ match = re.match('(.*)-depmod', direntry)
+ if not match:
+ continue
+ kernel_package_name = match.group(1)
- kernel_ver = open(kernel_abi_ver_file).read().strip(' \n')
- versioned_modules_dir = os.path.join(self.image_rootfs, modules_dir, kernel_ver)
+ kernel_abi_ver_file = oe.path.join(pkgdatadir, direntry, kernel_package_name + '-abiversion')
+ if not os.path.exists(kernel_abi_ver_file):
+ bb.fatal("No kernel-abiversion file found (%s), cannot run depmod, aborting" % kernel_abi_ver_file)
- bb.utils.mkdirhier(versioned_modules_dir)
+ with open(kernel_abi_ver_file) as f:
+ kernel_ver = f.read().strip(' \n')
- self._exec_shell_cmd(['depmodwrapper', '-a', '-b', self.image_rootfs, kernel_ver])
+ versioned_modules_dir = os.path.join(self.image_rootfs, modules_dir, kernel_ver)
+
+ bb.utils.mkdirhier(versioned_modules_dir)
+
+ bb.note("Running depmodwrapper for %s ..." % versioned_modules_dir)
+ if self._exec_shell_cmd(['depmodwrapper', '-a', '-b', self.image_rootfs, kernel_ver, kernel_package_name]):
+ bb.fatal("Kernel modules dependency generation failed")
"""
Create devfs:
@@ -376,6 +411,10 @@ def create_rootfs(d, manifest_dir=None, progress_reporter=None, logcatcher=None)
def image_list_installed_packages(d, rootfs_dir=None):
+ # Theres no rootfs for baremetal images
+ if bb.data.inherits_class('baremetal-image', d):
+ return ""
+
if not rootfs_dir:
rootfs_dir = d.getVar('IMAGE_ROOTFS')
diff --git a/meta/lib/oe/rust.py b/meta/lib/oe/rust.py
new file mode 100644
index 0000000000..185553eeeb
--- /dev/null
+++ b/meta/lib/oe/rust.py
@@ -0,0 +1,13 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+# Handle mismatches between `uname -m`-style output and Rust's arch names
+def arch_to_rust_arch(arch):
+ if arch == "ppc64le":
+ return "powerpc64le"
+ if arch in ('riscv32', 'riscv64'):
+ return arch + 'gc'
+ return arch
diff --git a/meta/lib/oe/sbom.py b/meta/lib/oe/sbom.py
new file mode 100644
index 0000000000..fd4b6895d8
--- /dev/null
+++ b/meta/lib/oe/sbom.py
@@ -0,0 +1,120 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import collections
+
+DepRecipe = collections.namedtuple("DepRecipe", ("doc", "doc_sha1", "recipe"))
+DepSource = collections.namedtuple("DepSource", ("doc", "doc_sha1", "recipe", "file"))
+
+
+def get_recipe_spdxid(d):
+ return "SPDXRef-%s-%s" % ("Recipe", d.getVar("PN"))
+
+
+def get_download_spdxid(d, idx):
+ return "SPDXRef-Download-%s-%d" % (d.getVar("PN"), idx)
+
+
+def get_package_spdxid(pkg):
+ return "SPDXRef-Package-%s" % pkg
+
+
+def get_source_file_spdxid(d, idx):
+ return "SPDXRef-SourceFile-%s-%d" % (d.getVar("PN"), idx)
+
+
+def get_packaged_file_spdxid(pkg, idx):
+ return "SPDXRef-PackagedFile-%s-%d" % (pkg, idx)
+
+
+def get_image_spdxid(img):
+ return "SPDXRef-Image-%s" % img
+
+
+def get_sdk_spdxid(sdk):
+ return "SPDXRef-SDK-%s" % sdk
+
+
+def _doc_path_by_namespace(spdx_deploy, arch, doc_namespace):
+ return spdx_deploy / "by-namespace" / arch / doc_namespace.replace("/", "_")
+
+
+def doc_find_by_namespace(spdx_deploy, search_arches, doc_namespace):
+ for pkgarch in search_arches:
+ p = _doc_path_by_namespace(spdx_deploy, pkgarch, doc_namespace)
+ if os.path.exists(p):
+ return p
+ return None
+
+
+def _doc_path_by_hashfn(spdx_deploy, arch, doc_name, hashfn):
+ return (
+ spdx_deploy / "by-hash" / arch / hashfn.split()[1] / (doc_name + ".spdx.json")
+ )
+
+
+def doc_find_by_hashfn(spdx_deploy, search_arches, doc_name, hashfn):
+ for pkgarch in search_arches:
+ p = _doc_path_by_hashfn(spdx_deploy, pkgarch, doc_name, hashfn)
+ if os.path.exists(p):
+ return p
+ return None
+
+
+def doc_path(spdx_deploy, doc_name, arch, subdir):
+ return spdx_deploy / arch / subdir / (doc_name + ".spdx.json")
+
+
+def write_doc(d, spdx_doc, arch, subdir, spdx_deploy=None, indent=None):
+ from pathlib import Path
+
+ if spdx_deploy is None:
+ spdx_deploy = Path(d.getVar("SPDXDEPLOY"))
+
+ dest = doc_path(spdx_deploy, spdx_doc.name, arch, subdir)
+ dest.parent.mkdir(exist_ok=True, parents=True)
+ with dest.open("wb") as f:
+ doc_sha1 = spdx_doc.to_json(f, sort_keys=True, indent=indent)
+
+ l = _doc_path_by_namespace(spdx_deploy, arch, spdx_doc.documentNamespace)
+ l.parent.mkdir(exist_ok=True, parents=True)
+ l.symlink_to(os.path.relpath(dest, l.parent))
+
+ l = _doc_path_by_hashfn(
+ spdx_deploy, arch, spdx_doc.name, d.getVar("BB_HASHFILENAME")
+ )
+ l.parent.mkdir(exist_ok=True, parents=True)
+ l.symlink_to(os.path.relpath(dest, l.parent))
+
+ return doc_sha1
+
+
+def read_doc(fn):
+ import hashlib
+ import oe.spdx
+ import io
+ import contextlib
+
+ @contextlib.contextmanager
+ def get_file():
+ if isinstance(fn, io.IOBase):
+ yield fn
+ else:
+ with fn.open("rb") as f:
+ yield f
+
+ with get_file() as f:
+ sha1 = hashlib.sha1()
+ while True:
+ chunk = f.read(4096)
+ if not chunk:
+ break
+ sha1.update(chunk)
+
+ f.seek(0)
+ doc = oe.spdx.SPDXDocument.from_json(f)
+
+ return (doc, sha1.hexdigest())
diff --git a/meta/lib/oe/sdk.py b/meta/lib/oe/sdk.py
index 37b59afd1a..3dc3672210 100644
--- a/meta/lib/oe/sdk.py
+++ b/meta/lib/oe/sdk.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -68,7 +70,7 @@ class Sdk(object, metaclass=ABCMeta):
#FIXME: using umbrella exc catching because bb.utils method raises it
except Exception as e:
bb.debug(1, "printing the stack trace\n %s" %traceback.format_exc())
- bb.error("unable to place %s in final SDK location" % sourcefile)
+ bb.fatal("unable to place %s in final SDK location" % sourcefile)
def mkdirhier(self, dirpath):
try:
@@ -115,6 +117,10 @@ def sdk_list_installed_packages(d, target, rootfs_dir=None):
rootfs_dir = [sdk_output, os.path.join(sdk_output, target_path)][target is True]
+ if target is False:
+ ipkgconf_sdk_target = d.getVar("IPKGCONF_SDK")
+ d.setVar("IPKGCONF_TARGET", ipkgconf_sdk_target)
+
img_type = d.getVar('IMAGE_PKGTYPE')
import importlib
cls = importlib.import_module('oe.package_manager.' + img_type)
diff --git a/meta/lib/oe/spdx.py b/meta/lib/oe/spdx.py
new file mode 100644
index 0000000000..7aaf2af5ed
--- /dev/null
+++ b/meta/lib/oe/spdx.py
@@ -0,0 +1,357 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+#
+# This library is intended to capture the JSON SPDX specification in a type
+# safe manner. It is not intended to encode any particular OE specific
+# behaviors, see the sbom.py for that.
+#
+# The documented SPDX spec document doesn't cover the JSON syntax for
+# particular configuration, which can make it hard to determine what the JSON
+# syntax should be. I've found it is actually much simpler to read the official
+# SPDX JSON schema which can be found here: https://github.com/spdx/spdx-spec
+# in schemas/spdx-schema.json
+#
+
+import hashlib
+import itertools
+import json
+
+SPDX_VERSION = "2.2"
+
+
+#
+# The following are the support classes that are used to implement SPDX object
+#
+
+class _Property(object):
+ """
+ A generic SPDX object property. The different types will derive from this
+ class
+ """
+
+ def __init__(self, *, default=None):
+ self.default = default
+
+ def setdefault(self, dest, name):
+ if self.default is not None:
+ dest.setdefault(name, self.default)
+
+
+class _String(_Property):
+ """
+ A scalar string property for an SPDX object
+ """
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+
+ def set_property(self, attrs, name):
+ def get_helper(obj):
+ return obj._spdx[name]
+
+ def set_helper(obj, value):
+ obj._spdx[name] = value
+
+ def del_helper(obj):
+ del obj._spdx[name]
+
+ attrs[name] = property(get_helper, set_helper, del_helper)
+
+ def init(self, source):
+ return source
+
+
+class _Object(_Property):
+ """
+ A scalar SPDX object property of a SPDX object
+ """
+
+ def __init__(self, cls, **kwargs):
+ super().__init__(**kwargs)
+ self.cls = cls
+
+ def set_property(self, attrs, name):
+ def get_helper(obj):
+ if not name in obj._spdx:
+ obj._spdx[name] = self.cls()
+ return obj._spdx[name]
+
+ def set_helper(obj, value):
+ obj._spdx[name] = value
+
+ def del_helper(obj):
+ del obj._spdx[name]
+
+ attrs[name] = property(get_helper, set_helper)
+
+ def init(self, source):
+ return self.cls(**source)
+
+
+class _ListProperty(_Property):
+ """
+ A list of SPDX properties
+ """
+
+ def __init__(self, prop, **kwargs):
+ super().__init__(**kwargs)
+ self.prop = prop
+
+ def set_property(self, attrs, name):
+ def get_helper(obj):
+ if not name in obj._spdx:
+ obj._spdx[name] = []
+ return obj._spdx[name]
+
+ def set_helper(obj, value):
+ obj._spdx[name] = list(value)
+
+ def del_helper(obj):
+ del obj._spdx[name]
+
+ attrs[name] = property(get_helper, set_helper, del_helper)
+
+ def init(self, source):
+ return [self.prop.init(o) for o in source]
+
+
+class _StringList(_ListProperty):
+ """
+ A list of strings as a property for an SPDX object
+ """
+
+ def __init__(self, **kwargs):
+ super().__init__(_String(), **kwargs)
+
+
+class _ObjectList(_ListProperty):
+ """
+ A list of SPDX objects as a property for an SPDX object
+ """
+
+ def __init__(self, cls, **kwargs):
+ super().__init__(_Object(cls), **kwargs)
+
+
+class MetaSPDXObject(type):
+ """
+ A metaclass that allows properties (anything derived from a _Property
+ class) to be defined for a SPDX object
+ """
+ def __new__(mcls, name, bases, attrs):
+ attrs["_properties"] = {}
+
+ for key in attrs.keys():
+ if isinstance(attrs[key], _Property):
+ prop = attrs[key]
+ attrs["_properties"][key] = prop
+ prop.set_property(attrs, key)
+
+ return super().__new__(mcls, name, bases, attrs)
+
+
+class SPDXObject(metaclass=MetaSPDXObject):
+ """
+ The base SPDX object; all SPDX spec classes must derive from this class
+ """
+ def __init__(self, **d):
+ self._spdx = {}
+
+ for name, prop in self._properties.items():
+ prop.setdefault(self._spdx, name)
+ if name in d:
+ self._spdx[name] = prop.init(d[name])
+
+ def serializer(self):
+ return self._spdx
+
+ def __setattr__(self, name, value):
+ if name in self._properties or name == "_spdx":
+ super().__setattr__(name, value)
+ return
+ raise KeyError("%r is not a valid SPDX property" % name)
+
+#
+# These are the SPDX objects implemented from the spec. The *only* properties
+# that can be added to these objects are ones directly specified in the SPDX
+# spec, however you may add helper functions to make operations easier.
+#
+# Defaults should *only* be specified if the SPDX spec says there is a certain
+# required value for a field (e.g. dataLicense), or if the field is mandatory
+# and has some sane "this field is unknown" (e.g. "NOASSERTION")
+#
+
+class SPDXAnnotation(SPDXObject):
+ annotationDate = _String()
+ annotationType = _String()
+ annotator = _String()
+ comment = _String()
+
+class SPDXChecksum(SPDXObject):
+ algorithm = _String()
+ checksumValue = _String()
+
+
+class SPDXRelationship(SPDXObject):
+ spdxElementId = _String()
+ relatedSpdxElement = _String()
+ relationshipType = _String()
+ comment = _String()
+ annotations = _ObjectList(SPDXAnnotation)
+
+
+class SPDXExternalReference(SPDXObject):
+ referenceCategory = _String()
+ referenceType = _String()
+ referenceLocator = _String()
+
+
+class SPDXPackageVerificationCode(SPDXObject):
+ packageVerificationCodeValue = _String()
+ packageVerificationCodeExcludedFiles = _StringList()
+
+
+class SPDXPackage(SPDXObject):
+ ALLOWED_CHECKSUMS = [
+ "SHA1",
+ "SHA224",
+ "SHA256",
+ "SHA384",
+ "SHA512",
+ "MD2",
+ "MD4",
+ "MD5",
+ "MD6",
+ ]
+
+ name = _String()
+ SPDXID = _String()
+ versionInfo = _String()
+ downloadLocation = _String(default="NOASSERTION")
+ supplier = _String(default="NOASSERTION")
+ homepage = _String()
+ licenseConcluded = _String(default="NOASSERTION")
+ licenseDeclared = _String(default="NOASSERTION")
+ summary = _String()
+ description = _String()
+ sourceInfo = _String()
+ copyrightText = _String(default="NOASSERTION")
+ licenseInfoFromFiles = _StringList(default=["NOASSERTION"])
+ externalRefs = _ObjectList(SPDXExternalReference)
+ packageVerificationCode = _Object(SPDXPackageVerificationCode)
+ hasFiles = _StringList()
+ packageFileName = _String()
+ annotations = _ObjectList(SPDXAnnotation)
+ checksums = _ObjectList(SPDXChecksum)
+
+
+class SPDXFile(SPDXObject):
+ SPDXID = _String()
+ fileName = _String()
+ licenseConcluded = _String(default="NOASSERTION")
+ copyrightText = _String(default="NOASSERTION")
+ licenseInfoInFiles = _StringList(default=["NOASSERTION"])
+ checksums = _ObjectList(SPDXChecksum)
+ fileTypes = _StringList()
+
+
+class SPDXCreationInfo(SPDXObject):
+ created = _String()
+ licenseListVersion = _String()
+ comment = _String()
+ creators = _StringList()
+
+
+class SPDXExternalDocumentRef(SPDXObject):
+ externalDocumentId = _String()
+ spdxDocument = _String()
+ checksum = _Object(SPDXChecksum)
+
+
+class SPDXExtractedLicensingInfo(SPDXObject):
+ name = _String()
+ comment = _String()
+ licenseId = _String()
+ extractedText = _String()
+
+
+class SPDXDocument(SPDXObject):
+ spdxVersion = _String(default="SPDX-" + SPDX_VERSION)
+ dataLicense = _String(default="CC0-1.0")
+ SPDXID = _String(default="SPDXRef-DOCUMENT")
+ name = _String()
+ documentNamespace = _String()
+ creationInfo = _Object(SPDXCreationInfo)
+ packages = _ObjectList(SPDXPackage)
+ files = _ObjectList(SPDXFile)
+ relationships = _ObjectList(SPDXRelationship)
+ externalDocumentRefs = _ObjectList(SPDXExternalDocumentRef)
+ hasExtractedLicensingInfos = _ObjectList(SPDXExtractedLicensingInfo)
+
+ def __init__(self, **d):
+ super().__init__(**d)
+
+ def to_json(self, f, *, sort_keys=False, indent=None, separators=None):
+ class Encoder(json.JSONEncoder):
+ def default(self, o):
+ if isinstance(o, SPDXObject):
+ return o.serializer()
+
+ return super().default(o)
+
+ sha1 = hashlib.sha1()
+ for chunk in Encoder(
+ sort_keys=sort_keys,
+ indent=indent,
+ separators=separators,
+ ).iterencode(self):
+ chunk = chunk.encode("utf-8")
+ f.write(chunk)
+ sha1.update(chunk)
+
+ return sha1.hexdigest()
+
+ @classmethod
+ def from_json(cls, f):
+ return cls(**json.load(f))
+
+ def add_relationship(self, _from, relationship, _to, *, comment=None, annotation=None):
+ if isinstance(_from, SPDXObject):
+ from_spdxid = _from.SPDXID
+ else:
+ from_spdxid = _from
+
+ if isinstance(_to, SPDXObject):
+ to_spdxid = _to.SPDXID
+ else:
+ to_spdxid = _to
+
+ r = SPDXRelationship(
+ spdxElementId=from_spdxid,
+ relatedSpdxElement=to_spdxid,
+ relationshipType=relationship,
+ )
+
+ if comment is not None:
+ r.comment = comment
+
+ if annotation is not None:
+ r.annotations.append(annotation)
+
+ self.relationships.append(r)
+
+ def find_by_spdxid(self, spdxid):
+ for o in itertools.chain(self.packages, self.files):
+ if o.SPDXID == spdxid:
+ return o
+ return None
+
+ def find_external_document_ref(self, namespace):
+ for r in self.externalDocumentRefs:
+ if r.spdxDocument == namespace:
+ return r
+ return None
diff --git a/meta/lib/oe/sstatesig.py b/meta/lib/oe/sstatesig.py
index 6cd6e11acc..a46e5502ab 100644
--- a/meta/lib/oe/sstatesig.py
+++ b/meta/lib/oe/sstatesig.py
@@ -1,9 +1,12 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
import bb.siggen
import bb.runqueue
import oe
+import netrc
def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCaches):
# Return True if we should keep the dependency, False to drop it
@@ -28,6 +31,12 @@ def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCaches):
depmc, _, deptaskname, depmcfn = bb.runqueue.split_tid_mcfn(dep)
mc, _ = bb.runqueue.split_mc(fn)
+ # We can skip the rm_work task signature to avoid running the task
+ # when we remove some tasks from the dependencie chain
+ # i.e INHERIT:remove = "create-spdx" will trigger the do_rm_work
+ if task == "do_rm_work":
+ return False
+
# (Almost) always include our own inter-task dependencies (unless it comes
# from a mcdepends). The exception is the special
# do_kernel_configme->do_unpack_and_patch dependency from archiver.bbclass.
@@ -59,7 +68,7 @@ def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCaches):
return False
# Kernel modules are well namespaced. We don't want to depend on the kernel's checksum
- # if we're just doing an RRECOMMENDS_xxx = "kernel-module-*", not least because the checksum
+ # if we're just doing an RRECOMMENDS:xxx = "kernel-module-*", not least because the checksum
# is machine specific.
# Therefore if we're not a kernel or a module recipe (inheriting the kernel classes)
# and we reccomend a kernel-module, we exclude the dependency.
@@ -84,15 +93,6 @@ def sstate_lockedsigs(d):
sigs[pn][task] = [h, siggen_lockedsigs_var]
return sigs
-class SignatureGeneratorOEBasic(bb.siggen.SignatureGeneratorBasic):
- name = "OEBasic"
- def init_rundepcheck(self, data):
- self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE") or "").split()
- self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS") or "").split()
- pass
- def rundep_check(self, fn, recipename, task, dep, depname, dataCaches = None):
- return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCaches)
-
class SignatureGeneratorOEBasicHashMixIn(object):
supports_multiconfig_datacaches = True
@@ -105,10 +105,11 @@ class SignatureGeneratorOEBasicHashMixIn(object):
self.lockedhashfn = {}
self.machine = data.getVar("MACHINE")
self.mismatch_msgs = []
+ self.mismatch_number = 0
+ self.lockedsigs_msgs = ""
self.unlockedrecipes = (data.getVar("SIGGEN_UNLOCKED_RECIPES") or
"").split()
self.unlockedrecipes = { k: "" for k in self.unlockedrecipes }
- self.buildarch = data.getVar('BUILD_ARCH')
self._internal = False
pass
@@ -142,18 +143,12 @@ class SignatureGeneratorOEBasicHashMixIn(object):
super().set_taskdata(data[3:])
def dump_sigs(self, dataCache, options):
- sigfile = os.getcwd() + "/locked-sigs.inc"
- bb.plain("Writing locked sigs to %s" % sigfile)
- self.dump_lockedsigs(sigfile)
+ if 'lockedsigs' in options:
+ sigfile = os.getcwd() + "/locked-sigs.inc"
+ bb.plain("Writing locked sigs to %s" % sigfile)
+ self.dump_lockedsigs(sigfile)
return super(bb.siggen.SignatureGeneratorBasicHash, self).dump_sigs(dataCache, options)
- def prep_taskhash(self, tid, deps, dataCaches):
- super().prep_taskhash(tid, deps, dataCaches)
- if hasattr(self, "extramethod"):
- (mc, _, _, fn) = bb.runqueue.split_tid_mcfn(tid)
- inherits = " ".join(dataCaches[mc].inherits[fn])
- if inherits.find("/native.bbclass") != -1 or inherits.find("/cross.bbclass") != -1:
- self.extramethod[tid] = ":" + self.buildarch
def get_taskhash(self, tid, deps, dataCaches):
if tid in self.lockedhashes:
@@ -196,6 +191,7 @@ class SignatureGeneratorOEBasicHashMixIn(object):
#bb.warn("Using %s %s %s" % (recipename, task, h))
if h != h_locked and h_locked != unihash:
+ self.mismatch_number += 1
self.mismatch_msgs.append('The %s:%s sig is computed to be %s, but the sig is locked to %s in %s'
% (recipename, task, h, h_locked, var))
@@ -210,10 +206,10 @@ class SignatureGeneratorOEBasicHashMixIn(object):
return self.lockedhashes[tid]
return super().get_stampfile_hash(tid)
- def get_unihash(self, tid):
+ def get_cached_unihash(self, tid):
if tid in self.lockedhashes and self.lockedhashes[tid] and not self._internal:
return self.lockedhashes[tid]
- return super().get_unihash(tid)
+ return super().get_cached_unihash(tid)
def dump_sigtask(self, fn, task, stampbase, runtime):
tid = fn + ":" + task
@@ -224,6 +220,9 @@ class SignatureGeneratorOEBasicHashMixIn(object):
def dump_lockedsigs(self, sigfile, taskfilter=None):
types = {}
for tid in self.runtaskdeps:
+ # Bitbake changed this to a tuple in newer versions
+ if isinstance(tid, tuple):
+ tid = tid[1]
if taskfilter:
if not tid in taskfilter:
continue
@@ -246,15 +245,26 @@ class SignatureGeneratorOEBasicHashMixIn(object):
continue
f.write(" " + self.lockedpnmap[fn] + ":" + task + ":" + self.get_unihash(tid) + " \\\n")
f.write(' "\n')
- f.write('SIGGEN_LOCKEDSIGS_TYPES_%s = "%s"' % (self.machine, " ".join(l)))
+ f.write('SIGGEN_LOCKEDSIGS_TYPES:%s = "%s"' % (self.machine, " ".join(l)))
+
+ def dump_siglist(self, sigfile, path_prefix_strip=None):
+ def strip_fn(fn):
+ nonlocal path_prefix_strip
+ if not path_prefix_strip:
+ return fn
+
+ fn_exp = fn.split(":")
+ if fn_exp[-1].startswith(path_prefix_strip):
+ fn_exp[-1] = fn_exp[-1][len(path_prefix_strip):]
+
+ return ":".join(fn_exp)
- def dump_siglist(self, sigfile):
with open(sigfile, "w") as f:
tasks = []
for taskitem in self.taskhash:
(fn, task) = taskitem.rsplit(":", 1)
pn = self.lockedpnmap[fn]
- tasks.append((pn, task, fn, self.taskhash[taskitem]))
+ tasks.append((pn, task, strip_fn(fn), self.taskhash[taskitem]))
for (pn, task, fn, taskhash) in sorted(tasks):
f.write('%s:%s %s %s\n' % (pn, task, fn, taskhash))
@@ -262,6 +272,15 @@ class SignatureGeneratorOEBasicHashMixIn(object):
warn_msgs = []
error_msgs = []
sstate_missing_msgs = []
+ info_msgs = None
+
+ if self.lockedsigs:
+ if len(self.lockedsigs) > 10:
+ self.lockedsigs_msgs = "There are %s recipes with locked tasks (%s task(s) have non matching signature)" % (len(self.lockedsigs), self.mismatch_number)
+ else:
+ self.lockedsigs_msgs = "The following recipes have locked tasks:"
+ for pn in self.lockedsigs:
+ self.lockedsigs_msgs += " %s" % (pn)
for tid in sq_data['hash']:
if tid not in found:
@@ -274,7 +293,9 @@ class SignatureGeneratorOEBasicHashMixIn(object):
% (pn, taskname, sq_data['hash'][tid]))
checklevel = d.getVar("SIGGEN_LOCKEDSIGS_TASKSIG_CHECK")
- if checklevel == 'warn':
+ if checklevel == 'info':
+ info_msgs = self.lockedsigs_msgs
+ if checklevel == 'warn' or checklevel == 'info':
warn_msgs += self.mismatch_msgs
elif checklevel == 'error':
error_msgs += self.mismatch_msgs
@@ -285,6 +306,8 @@ class SignatureGeneratorOEBasicHashMixIn(object):
elif checklevel == 'error':
error_msgs += sstate_missing_msgs
+ if info_msgs:
+ bb.note(info_msgs)
if warn_msgs:
bb.warn("\n".join(warn_msgs))
if error_msgs:
@@ -304,9 +327,21 @@ class SignatureGeneratorOEEquivHash(SignatureGeneratorOEBasicHashMixIn, bb.sigge
self.method = data.getVar('SSTATE_HASHEQUIV_METHOD')
if not self.method:
bb.fatal("OEEquivHash requires SSTATE_HASHEQUIV_METHOD to be set")
+ self.max_parallel = int(data.getVar('BB_HASHSERVE_MAX_PARALLEL') or 1)
+ self.username = data.getVar("BB_HASHSERVE_USERNAME")
+ self.password = data.getVar("BB_HASHSERVE_PASSWORD")
+ if not self.username or not self.password:
+ try:
+ n = netrc.netrc()
+ auth = n.authenticators(self.server)
+ if auth is not None:
+ self.username, _, self.password = auth
+ except FileNotFoundError:
+ pass
+ except netrc.NetrcParseError as e:
+ bb.warn("Error parsing %s:%s: %s" % (e.filename, str(e.lineno), e.msg))
# Insert these classes into siggen's namespace so it can see and select them
-bb.siggen.SignatureGeneratorOEBasic = SignatureGeneratorOEBasic
bb.siggen.SignatureGeneratorOEBasicHash = SignatureGeneratorOEBasicHash
bb.siggen.SignatureGeneratorOEEquivHash = SignatureGeneratorOEEquivHash
@@ -320,14 +355,14 @@ def find_siginfo(pn, taskname, taskhashlist, d):
if not taskname:
# We have to derive pn and taskname
key = pn
- splitit = key.split('.bb:')
- taskname = splitit[1]
- pn = os.path.basename(splitit[0]).split('_')[0]
- if key.startswith('virtual:native:'):
- pn = pn + '-native'
+ if key.startswith("mc:"):
+ # mc:<mc>:<pn>:<task>
+ _, _, pn, taskname = key.split(':', 3)
+ else:
+ # <pn>:<task>
+ pn, taskname = key.split(':', 1)
hashfiles = {}
- filedates = {}
def get_hashval(siginfo):
if siginfo.endswith('.siginfo'):
@@ -335,6 +370,9 @@ def find_siginfo(pn, taskname, taskhashlist, d):
else:
return siginfo.rpartition('.')[2]
+ def get_time(fullpath):
+ return os.stat(fullpath).st_mtime
+
# First search in stamps dir
localdata = d.createCopy()
localdata.setVar('MULTIMACH_TARGET_SYS', '*')
@@ -350,24 +388,21 @@ def find_siginfo(pn, taskname, taskhashlist, d):
filespec = '%s.%s.sigdata.*' % (stamp, taskname)
foundall = False
import glob
+ bb.debug(1, "Calling glob.glob on {}".format(filespec))
for fullpath in glob.glob(filespec):
match = False
if taskhashlist:
for taskhash in taskhashlist:
if fullpath.endswith('.%s' % taskhash):
- hashfiles[taskhash] = fullpath
+ hashfiles[taskhash] = {'path':fullpath, 'sstate':False, 'time':get_time(fullpath)}
if len(hashfiles) == len(taskhashlist):
foundall = True
break
else:
- try:
- filedates[fullpath] = os.stat(fullpath).st_mtime
- except OSError:
- continue
hashval = get_hashval(fullpath)
- hashfiles[hashval] = fullpath
+ hashfiles[hashval] = {'path':fullpath, 'sstate':False, 'time':get_time(fullpath)}
- if not taskhashlist or (len(filedates) < 2 and not foundall):
+ if not taskhashlist or (len(hashfiles) < 2 and not foundall):
# That didn't work, look in sstate-cache
hashes = taskhashlist or ['?' * 64]
localdata = bb.data.createCopy(d)
@@ -376,35 +411,32 @@ def find_siginfo(pn, taskname, taskhashlist, d):
localdata.setVar('TARGET_VENDOR', '*')
localdata.setVar('TARGET_OS', '*')
localdata.setVar('PN', pn)
+ # gcc-source is a special case, same as with local stamps above
+ if pn.startswith("gcc-source"):
+ localdata.setVar('PN', "gcc")
localdata.setVar('PV', '*')
localdata.setVar('PR', '*')
localdata.setVar('BB_TASKHASH', hashval)
+ localdata.setVar('SSTATE_CURRTASK', taskname[3:])
swspec = localdata.getVar('SSTATE_SWSPEC')
if taskname in ['do_fetch', 'do_unpack', 'do_patch', 'do_populate_lic', 'do_preconfigure'] and swspec:
localdata.setVar('SSTATE_PKGSPEC', '${SSTATE_SWSPEC}')
elif pn.endswith('-native') or "-cross-" in pn or "-crosssdk-" in pn:
localdata.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
- sstatename = taskname[3:]
- filespec = '%s_%s.*.siginfo' % (localdata.getVar('SSTATE_PKG'), sstatename)
+ filespec = '%s.siginfo' % localdata.getVar('SSTATE_PKG')
+ bb.debug(1, "Calling glob.glob on {}".format(filespec))
matchedfiles = glob.glob(filespec)
for fullpath in matchedfiles:
actual_hashval = get_hashval(fullpath)
if actual_hashval in hashfiles:
continue
- hashfiles[hashval] = fullpath
- if not taskhashlist:
- try:
- filedates[fullpath] = os.stat(fullpath).st_mtime
- except:
- continue
+ hashfiles[actual_hashval] = {'path':fullpath, 'sstate':True, 'time':get_time(fullpath)}
- if taskhashlist:
- return hashfiles
- else:
- return filedates
+ return hashfiles
bb.siggen.find_siginfo = find_siginfo
+bb.siggen.find_siginfo_version = 2
def sstate_get_manifest_filename(task, d):
@@ -440,7 +472,7 @@ def find_sstate_manifest(taskdata, taskdata2, taskname, d, multilibcache):
elif "-cross-canadian" in taskdata:
pkgarchs = ["${SDK_ARCH}_${SDK_ARCH}-${SDKPKGSUFFIX}"]
elif "-cross-" in taskdata:
- pkgarchs = ["${BUILD_ARCH}_${TARGET_ARCH}"]
+ pkgarchs = ["${BUILD_ARCH}"]
elif "-crosssdk" in taskdata:
pkgarchs = ["${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"]
else:
@@ -449,11 +481,15 @@ def find_sstate_manifest(taskdata, taskdata2, taskname, d, multilibcache):
pkgarchs.append('allarch')
pkgarchs.append('${SDK_ARCH}_${SDK_ARCH}-${SDKPKGSUFFIX}')
+ searched_manifests = []
+
for pkgarch in pkgarchs:
manifest = d2.expand("${SSTATE_MANIFESTS}/manifest-%s-%s.%s" % (pkgarch, taskdata, taskname))
if os.path.exists(manifest):
return manifest, d2
- bb.error("Manifest %s not found in %s (variant '%s')?" % (manifest, d2.expand(" ".join(pkgarchs)), variant))
+ searched_manifests.append(manifest)
+ bb.fatal("The sstate manifest for task '%s:%s' (multilib variant '%s') could not be found.\nThe pkgarchs considered were: %s.\nBut none of these manifests exists:\n %s"
+ % (taskdata, taskname, variant, d2.expand(", ".join(pkgarchs)),"\n ".join(searched_manifests)))
return None, d2
def OEOuthashBasic(path, sigfile, task, d):
@@ -467,6 +503,8 @@ def OEOuthashBasic(path, sigfile, task, d):
import stat
import pwd
import grp
+ import re
+ import fnmatch
def update_hash(s):
s = s.encode('utf-8')
@@ -476,20 +514,37 @@ def OEOuthashBasic(path, sigfile, task, d):
h = hashlib.sha256()
prev_dir = os.getcwd()
+ corebase = d.getVar("COREBASE")
+ tmpdir = d.getVar("TMPDIR")
include_owners = os.environ.get('PSEUDO_DISABLED') == '0'
if "package_write_" in task or task == "package_qa":
include_owners = False
include_timestamps = False
+ include_root = True
if task == "package":
- include_timestamps = d.getVar('BUILD_REPRODUCIBLE_BINARIES') == '1'
- extra_content = d.getVar('HASHEQUIV_HASH_VERSION')
+ include_timestamps = True
+ include_root = False
+ hash_version = d.getVar('HASHEQUIV_HASH_VERSION')
+ extra_sigdata = d.getVar("HASHEQUIV_EXTRA_SIGDATA")
+
+ filemaps = {}
+ for m in (d.getVar('SSTATE_HASHEQUIV_FILEMAP') or '').split():
+ entry = m.split(":")
+ if len(entry) != 3 or entry[0] != task:
+ continue
+ filemaps.setdefault(entry[1], [])
+ filemaps[entry[1]].append(entry[2])
try:
os.chdir(path)
+ basepath = os.path.normpath(path)
update_hash("OEOuthashBasic\n")
- if extra_content:
- update_hash(extra_content + "\n")
+ if hash_version:
+ update_hash(hash_version + "\n")
+
+ if extra_sigdata:
+ update_hash(extra_sigdata + "\n")
# It is only currently useful to get equivalent hashes for things that
# can be restored from sstate. Since the sstate object is named using
@@ -534,28 +589,29 @@ def OEOuthashBasic(path, sigfile, task, d):
else:
add_perm(stat.S_IXUSR, 'x')
- add_perm(stat.S_IRGRP, 'r')
- add_perm(stat.S_IWGRP, 'w')
- if stat.S_ISGID & s.st_mode:
- add_perm(stat.S_IXGRP, 's', 'S')
- else:
- add_perm(stat.S_IXGRP, 'x')
+ if include_owners:
+ # Group/other permissions are only relevant in pseudo context
+ add_perm(stat.S_IRGRP, 'r')
+ add_perm(stat.S_IWGRP, 'w')
+ if stat.S_ISGID & s.st_mode:
+ add_perm(stat.S_IXGRP, 's', 'S')
+ else:
+ add_perm(stat.S_IXGRP, 'x')
- add_perm(stat.S_IROTH, 'r')
- add_perm(stat.S_IWOTH, 'w')
- if stat.S_ISVTX & s.st_mode:
- update_hash('t')
- else:
- add_perm(stat.S_IXOTH, 'x')
+ add_perm(stat.S_IROTH, 'r')
+ add_perm(stat.S_IWOTH, 'w')
+ if stat.S_ISVTX & s.st_mode:
+ update_hash('t')
+ else:
+ add_perm(stat.S_IXOTH, 'x')
- if include_owners:
try:
update_hash(" %10s" % pwd.getpwuid(s.st_uid).pw_name)
update_hash(" %10s" % grp.getgrgid(s.st_gid).gr_name)
except KeyError as e:
- bb.warn("KeyError in %s" % path)
msg = ("KeyError: %s\nPath %s is owned by uid %d, gid %d, which doesn't match "
- "any user/group on target. This may be due to host contamination." % (e, path, s.st_uid, s.st_gid))
+ "any user/group on target. This may be due to host contamination." %
+ (e, os.path.abspath(path), s.st_uid, s.st_gid))
raise Exception(msg).with_traceback(e.__traceback__)
if include_timestamps:
@@ -567,8 +623,13 @@ def OEOuthashBasic(path, sigfile, task, d):
else:
update_hash(" " * 9)
+ filterfile = False
+ for entry in filemaps:
+ if fnmatch.fnmatch(path, entry):
+ filterfile = True
+
update_hash(" ")
- if stat.S_ISREG(s.st_mode):
+ if stat.S_ISREG(s.st_mode) and not filterfile:
update_hash("%10d" % s.st_size)
else:
update_hash(" " * 10)
@@ -577,9 +638,24 @@ def OEOuthashBasic(path, sigfile, task, d):
fh = hashlib.sha256()
if stat.S_ISREG(s.st_mode):
# Hash file contents
- with open(path, 'rb') as d:
- for chunk in iter(lambda: d.read(4096), b""):
+ if filterfile:
+ # Need to ignore paths in crossscripts and postinst-useradd files.
+ with open(path, 'rb') as d:
+ chunk = d.read()
+ chunk = chunk.replace(bytes(basepath, encoding='utf8'), b'')
+ for entry in filemaps:
+ if not fnmatch.fnmatch(path, entry):
+ continue
+ for r in filemaps[entry]:
+ if r.startswith("regex-"):
+ chunk = re.sub(bytes(r[6:], encoding='utf8'), b'', chunk)
+ else:
+ chunk = chunk.replace(bytes(r, encoding='utf8'), b'')
fh.update(chunk)
+ else:
+ with open(path, 'rb') as d:
+ for chunk in iter(lambda: d.read(4096), b""):
+ fh.update(chunk)
update_hash(fh.hexdigest())
else:
update_hash(" " * len(fh.hexdigest()))
@@ -592,11 +668,16 @@ def OEOuthashBasic(path, sigfile, task, d):
update_hash("\n")
# Process this directory and all its child files
- process(root)
+ if include_root or root != ".":
+ process(root)
for f in files:
if f == 'fixmepath':
continue
process(os.path.join(root, f))
+
+ for dir in dirs:
+ if os.path.islink(os.path.join(root, dir)):
+ process(os.path.join(root, dir))
finally:
os.chdir(prev_dir)
diff --git a/meta/lib/oe/terminal.py b/meta/lib/oe/terminal.py
index 61c2687ef4..4412bc14c1 100644
--- a/meta/lib/oe/terminal.py
+++ b/meta/lib/oe/terminal.py
@@ -1,11 +1,12 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
import logging
import oe.classutils
import shlex
from bb.process import Popen, ExecutionError
-from distutils.version import LooseVersion
logger = logging.getLogger('BitBake.OE.Terminal')
@@ -31,9 +32,10 @@ class Registry(oe.classutils.ClassRegistry):
class Terminal(Popen, metaclass=Registry):
def __init__(self, sh_cmd, title=None, env=None, d=None):
+ from subprocess import STDOUT
fmt_sh_cmd = self.format_command(sh_cmd, title)
try:
- Popen.__init__(self, fmt_sh_cmd, env=env)
+ Popen.__init__(self, fmt_sh_cmd, env=env, stderr=STDOUT)
except OSError as exc:
import errno
if exc.errno == errno.ENOENT:
@@ -86,10 +88,10 @@ class Konsole(XTerminal):
def __init__(self, sh_cmd, title=None, env=None, d=None):
# Check version
vernum = check_terminal_version("konsole")
- if vernum and LooseVersion(vernum) < '2.0.0':
+ if vernum and bb.utils.vercmp_string_op(vernum, "2.0.0", "<"):
# Konsole from KDE 3.x
self.command = 'konsole -T "{title}" -e {command}'
- elif vernum and LooseVersion(vernum) < '16.08.1':
+ elif vernum and bb.utils.vercmp_string_op(vernum, "16.08.1", "<"):
# Konsole pre 16.08.01 Has nofork
self.command = 'konsole --nofork --workdir . -p tabtitle="{title}" -e {command}'
XTerminal.__init__(self, sh_cmd, title, env, d)
@@ -102,6 +104,10 @@ class Rxvt(XTerminal):
command = 'rxvt -T "{title}" -e {command}'
priority = 1
+class URxvt(XTerminal):
+ command = 'urxvt -T "{title}" -e {command}'
+ priority = 1
+
class Screen(Terminal):
command = 'screen -D -m -t "{title}" -S devshell {command}'
@@ -163,7 +169,12 @@ class Tmux(Terminal):
# devshells, if it's already there, add a new window to it.
window_name = 'devshell-%i' % os.getpid()
- self.command = 'tmux new -c "{{cwd}}" -d -s {0} -n {0} "{{command}}"'.format(window_name)
+ self.command = 'tmux new -c "{{cwd}}" -d -s {0} -n {0} "{{command}}"'
+ if not check_tmux_version('1.9'):
+ # `tmux new-session -c` was added in 1.9;
+ # older versions fail with that flag
+ self.command = 'tmux new -d -s {0} -n {0} "{{command}}"'
+ self.command = self.command.format(window_name)
Terminal.__init__(self, sh_cmd, title, env, d)
attach_cmd = 'tmux att -t {0}'.format(window_name)
@@ -253,13 +264,18 @@ def spawn(name, sh_cmd, title=None, env=None, d=None):
except OSError:
return
+def check_tmux_version(desired):
+ vernum = check_terminal_version("tmux")
+ if vernum and bb.utils.vercmp_string_op(vernum, desired, "<"):
+ return False
+ return vernum
+
def check_tmux_pane_size(tmux):
import subprocess as sub
# On older tmux versions (<1.9), return false. The reason
# is that there is no easy way to get the height of the active panel
# on current window without nested formats (available from version 1.9)
- vernum = check_terminal_version("tmux")
- if vernum and LooseVersion(vernum) < '1.9':
+ if not check_tmux_version('1.9'):
return False
try:
p = sub.Popen('%s list-panes -F "#{?pane_active,#{pane_height},}"' % tmux,
diff --git a/meta/lib/oe/types.py b/meta/lib/oe/types.py
index bbbabafbf6..b929afb1f3 100644
--- a/meta/lib/oe/types.py
+++ b/meta/lib/oe/types.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/meta/lib/oe/useradd.py b/meta/lib/oe/useradd.py
index 8fc77568ff..54aa86feb5 100644
--- a/meta/lib/oe/useradd.py
+++ b/meta/lib/oe/useradd.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
import argparse
@@ -45,7 +47,6 @@ def build_useradd_parser():
parser.add_argument("-N", "--no-user-group", dest="user_group", help="do not create a group with the same name as the user", action="store_const", const=False)
parser.add_argument("-o", "--non-unique", help="allow to create users with duplicate (non-unique UID)", action="store_true")
parser.add_argument("-p", "--password", metavar="PASSWORD", help="encrypted password of the new account")
- parser.add_argument("-P", "--clear-password", metavar="CLEAR_PASSWORD", help="use this clear password for the new account")
parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into")
parser.add_argument("-r", "--system", help="create a system account", action="store_true")
parser.add_argument("-s", "--shell", metavar="SHELL", help="login shell of the new account")
@@ -63,7 +64,6 @@ def build_groupadd_parser():
parser.add_argument("-K", "--key", metavar="KEY=VALUE", help="override /etc/login.defs defaults")
parser.add_argument("-o", "--non-unique", help="allow to create groups with duplicate (non-unique) GID", action="store_true")
parser.add_argument("-p", "--password", metavar="PASSWORD", help="use this encrypted password for the new group")
- parser.add_argument("-P", "--clear-password", metavar="CLEAR_PASSWORD", help="use this clear password for the new group")
parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into")
parser.add_argument("-r", "--system", help="create a system account", action="store_true")
parser.add_argument("GROUP", help="Group name of the new group")
diff --git a/meta/lib/oe/utils.py b/meta/lib/oe/utils.py
index a84039f585..14a7d07ef0 100644
--- a/meta/lib/oe/utils.py
+++ b/meta/lib/oe/utils.py
@@ -1,10 +1,13 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
import subprocess
import multiprocessing
import traceback
+import errno
def read_file(filename):
try:
@@ -221,12 +224,12 @@ def packages_filter_out_system(d):
PN-dbg PN-doc PN-locale-eb-gb removed.
"""
pn = d.getVar('PN')
- blacklist = [pn + suffix for suffix in ('', '-dbg', '-dev', '-doc', '-locale', '-staticdev', '-src')]
+ pkgfilter = [pn + suffix for suffix in ('', '-dbg', '-dev', '-doc', '-locale', '-staticdev', '-src')]
localepkg = pn + "-locale-"
pkgs = []
for pkg in d.getVar('PACKAGES').split():
- if pkg not in blacklist and localepkg not in pkg:
+ if pkg not in pkgfilter and localepkg not in pkg:
pkgs.append(pkg)
return pkgs
@@ -248,24 +251,31 @@ def trim_version(version, num_parts=2):
trimmed = ".".join(parts[:num_parts])
return trimmed
-def cpu_count(at_least=1):
+def cpu_count(at_least=1, at_most=64):
cpus = len(os.sched_getaffinity(0))
- return max(cpus, at_least)
+ return max(min(cpus, at_most), at_least)
def execute_pre_post_process(d, cmds):
if cmds is None:
return
- for cmd in cmds.strip().split(';'):
- cmd = cmd.strip()
- if cmd != '':
- bb.note("Executing %s ..." % cmd)
- bb.build.exec_func(cmd, d)
+ cmds = cmds.replace(";", " ")
+
+ for cmd in cmds.split():
+ bb.note("Executing %s ..." % cmd)
+ bb.build.exec_func(cmd, d)
+
+def get_bb_number_threads(d):
+ return int(d.getVar("BB_NUMBER_THREADS") or os.cpu_count() or 1)
-# For each item in items, call the function 'target' with item as the first
+def multiprocess_launch(target, items, d, extraargs=None):
+ max_process = get_bb_number_threads(d)
+ return multiprocess_launch_mp(target, items, max_process, extraargs)
+
+# For each item in items, call the function 'target' with item as the first
# argument, extraargs as the other arguments and handle any exceptions in the
# parent thread
-def multiprocess_launch(target, items, d, extraargs=None):
+def multiprocess_launch_mp(target, items, max_process, extraargs=None):
class ProcessLaunch(multiprocessing.Process):
def __init__(self, *args, **kwargs):
@@ -300,7 +310,6 @@ def multiprocess_launch(target, items, d, extraargs=None):
self.update()
return self._result
- max_process = int(d.getVar("BB_NUMBER_THREADS") or os.cpu_count() or 1)
launched = []
errors = []
results = []
@@ -344,7 +353,29 @@ def squashspaces(string):
import re
return re.sub(r"\s+", " ", string).strip()
-def format_pkg_list(pkg_dict, ret_format=None):
+def rprovides_map(pkgdata_dir, pkg_dict):
+ # Map file -> pkg provider
+ rprov_map = {}
+
+ for pkg in pkg_dict:
+ path_to_pkgfile = os.path.join(pkgdata_dir, 'runtime-reverse', pkg)
+ if not os.path.isfile(path_to_pkgfile):
+ continue
+ with open(path_to_pkgfile) as f:
+ for line in f:
+ if line.startswith('RPROVIDES') or line.startswith('FILERPROVIDES'):
+ # List all components provided by pkg.
+ # Exclude version strings, i.e. those starting with (
+ provides = [x for x in line.split()[1:] if not x.startswith('(')]
+ for prov in provides:
+ if prov in rprov_map:
+ rprov_map[prov].append(pkg)
+ else:
+ rprov_map[prov] = [pkg]
+
+ return rprov_map
+
+def format_pkg_list(pkg_dict, ret_format=None, pkgdata_dir=None):
output = []
if ret_format == "arch":
@@ -357,9 +388,15 @@ def format_pkg_list(pkg_dict, ret_format=None):
for pkg in sorted(pkg_dict):
output.append("%s %s %s" % (pkg, pkg_dict[pkg]["arch"], pkg_dict[pkg]["ver"]))
elif ret_format == "deps":
+ rprov_map = rprovides_map(pkgdata_dir, pkg_dict)
for pkg in sorted(pkg_dict):
for dep in pkg_dict[pkg]["deps"]:
- output.append("%s|%s" % (pkg, dep))
+ if dep in rprov_map:
+ # There could be multiple providers within the image
+ for pkg_provider in rprov_map[dep]:
+ output.append("%s|%s * %s [RPROVIDES]" % (pkg, pkg_provider, dep))
+ else:
+ output.append("%s|%s" % (pkg, dep))
else:
for pkg in sorted(pkg_dict):
output.append(pkg)
@@ -445,81 +482,6 @@ def get_multilib_datastore(variant, d):
localdata.setVar("MLPREFIX", "")
return localdata
-#
-# Python 2.7 doesn't have threaded pools (just multiprocessing)
-# so implement a version here
-#
-
-from queue import Queue
-from threading import Thread
-
-class ThreadedWorker(Thread):
- """Thread executing tasks from a given tasks queue"""
- def __init__(self, tasks, worker_init, worker_end):
- Thread.__init__(self)
- self.tasks = tasks
- self.daemon = True
-
- self.worker_init = worker_init
- self.worker_end = worker_end
-
- def run(self):
- from queue import Empty
-
- if self.worker_init is not None:
- self.worker_init(self)
-
- while True:
- try:
- func, args, kargs = self.tasks.get(block=False)
- except Empty:
- if self.worker_end is not None:
- self.worker_end(self)
- break
-
- try:
- func(self, *args, **kargs)
- except Exception as e:
- print(e)
- finally:
- self.tasks.task_done()
-
-class ThreadedPool:
- """Pool of threads consuming tasks from a queue"""
- def __init__(self, num_workers, num_tasks, worker_init=None,
- worker_end=None):
- self.tasks = Queue(num_tasks)
- self.workers = []
-
- for _ in range(num_workers):
- worker = ThreadedWorker(self.tasks, worker_init, worker_end)
- self.workers.append(worker)
-
- def start(self):
- for worker in self.workers:
- worker.start()
-
- def add_task(self, func, *args, **kargs):
- """Add a task to the queue"""
- self.tasks.put((func, args, kargs))
-
- def wait_completion(self):
- """Wait for completion of all the tasks in the queue"""
- self.tasks.join()
- for worker in self.workers:
- worker.join()
-
-def write_ld_so_conf(d):
- # Some utils like prelink may not have the correct target library paths
- # so write an ld.so.conf to help them
- ldsoconf = d.expand("${STAGING_DIR_TARGET}${sysconfdir}/ld.so.conf")
- if os.path.exists(ldsoconf):
- bb.utils.remove(ldsoconf)
- bb.utils.mkdirhier(os.path.dirname(ldsoconf))
- with open(ldsoconf, "w") as f:
- f.write(d.getVar("base_libdir") + '\n')
- f.write(d.getVar("libdir") + '\n')
-
class ImageQAFailed(Exception):
def __init__(self, description, name=None, logfile=None):
self.description = description
@@ -567,3 +529,14 @@ def directory_size(root, blocksize=4096):
total += sum(roundup(getsize(os.path.join(root, name))) for name in files)
total += roundup(getsize(root))
return total
+
+# Update the mtime of a file, skip if permission/read-only issues
+def touch(filename):
+ try:
+ os.utime(filename, None)
+ except PermissionError:
+ pass
+ except OSError as e:
+ # Handle read-only file systems gracefully
+ if e.errno != errno.EROFS:
+ raise e