summaryrefslogtreecommitdiffstats
path: root/meta/lib/oe
diff options
context:
space:
mode:
Diffstat (limited to 'meta/lib/oe')
-rw-r--r--meta/lib/oe/__init__.py6
-rw-r--r--meta/lib/oe/buildcfg.py40
-rw-r--r--meta/lib/oe/buildhistory_analysis.py2
-rw-r--r--meta/lib/oe/cachedpath.py2
-rw-r--r--meta/lib/oe/classextend.py9
-rw-r--r--meta/lib/oe/classutils.py2
-rw-r--r--meta/lib/oe/copy_buildsystem.py4
-rw-r--r--meta/lib/oe/cve_check.py98
-rw-r--r--meta/lib/oe/data.py2
-rw-r--r--meta/lib/oe/distro_check.py2
-rw-r--r--meta/lib/oe/elf.py4
-rw-r--r--meta/lib/oe/go.py34
-rw-r--r--meta/lib/oe/gpg_sign.py29
-rw-r--r--meta/lib/oe/license.py2
-rw-r--r--meta/lib/oe/lsb.py2
-rw-r--r--meta/lib/oe/maketype.py2
-rw-r--r--meta/lib/oe/manifest.py2
-rw-r--r--meta/lib/oe/npm_registry.py8
-rw-r--r--meta/lib/oe/overlayfs.py8
-rw-r--r--meta/lib/oe/package.py1742
-rw-r--r--meta/lib/oe/package_manager/__init__.py10
-rw-r--r--meta/lib/oe/package_manager/deb/__init__.py10
-rw-r--r--meta/lib/oe/package_manager/deb/manifest.py2
-rw-r--r--meta/lib/oe/package_manager/deb/rootfs.py2
-rw-r--r--meta/lib/oe/package_manager/deb/sdk.py7
-rw-r--r--meta/lib/oe/package_manager/ipk/__init__.py13
-rw-r--r--meta/lib/oe/package_manager/ipk/manifest.py4
-rw-r--r--meta/lib/oe/package_manager/ipk/rootfs.py4
-rw-r--r--meta/lib/oe/package_manager/ipk/sdk.py11
-rw-r--r--meta/lib/oe/package_manager/rpm/__init__.py40
-rw-r--r--meta/lib/oe/package_manager/rpm/manifest.py2
-rw-r--r--meta/lib/oe/package_manager/rpm/rootfs.py4
-rw-r--r--meta/lib/oe/package_manager/rpm/sdk.py10
-rw-r--r--meta/lib/oe/packagedata.py256
-rw-r--r--meta/lib/oe/packagegroup.py2
-rw-r--r--meta/lib/oe/patch.py227
-rw-r--r--meta/lib/oe/path.py8
-rw-r--r--meta/lib/oe/prservice.py6
-rw-r--r--meta/lib/oe/qa.py19
-rw-r--r--meta/lib/oe/recipeutils.py97
-rw-r--r--meta/lib/oe/reproducible.py8
-rw-r--r--meta/lib/oe/rootfs.py75
-rw-r--r--meta/lib/oe/rust.py8
-rw-r--r--meta/lib/oe/sbom.py48
-rw-r--r--meta/lib/oe/sdk.py4
-rw-r--r--meta/lib/oe/spdx.py15
-rw-r--r--meta/lib/oe/sstatesig.py128
-rw-r--r--meta/lib/oe/terminal.py6
-rw-r--r--meta/lib/oe/types.py2
-rw-r--r--meta/lib/oe/useradd.py2
-rw-r--r--meta/lib/oe/utils.py36
51 files changed, 2803 insertions, 263 deletions
diff --git a/meta/lib/oe/__init__.py b/meta/lib/oe/__init__.py
index 4e7c09da04..6eb536ad28 100644
--- a/meta/lib/oe/__init__.py
+++ b/meta/lib/oe/__init__.py
@@ -1,6 +1,12 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
+
+BBIMPORTS = ["data", "path", "utils", "types", "package", "packagedata", \
+ "packagegroup", "sstatesig", "lsb", "cachedpath", "license", \
+ "qa", "reproducible", "rust", "buildcfg", "go"]
diff --git a/meta/lib/oe/buildcfg.py b/meta/lib/oe/buildcfg.py
index 90f5e05715..27b059b834 100644
--- a/meta/lib/oe/buildcfg.py
+++ b/meta/lib/oe/buildcfg.py
@@ -1,32 +1,62 @@
+import os
import subprocess
import bb.process
def detect_revision(d):
path = get_scmbasepath(d)
- return get_metadata_git_revision(path, d)
+ return get_metadata_git_revision(path)
def detect_branch(d):
path = get_scmbasepath(d)
- return get_metadata_git_branch(path, d)
+ return get_metadata_git_branch(path)
def get_scmbasepath(d):
return os.path.join(d.getVar('COREBASE'), 'meta')
-def get_metadata_git_branch(path, d):
+def get_metadata_git_branch(path):
try:
rev, _ = bb.process.run('git rev-parse --abbrev-ref HEAD', cwd=path)
except bb.process.ExecutionError:
rev = '<unknown>'
return rev.strip()
-def get_metadata_git_revision(path, d):
+def get_metadata_git_revision(path):
try:
rev, _ = bb.process.run('git rev-parse HEAD', cwd=path)
except bb.process.ExecutionError:
rev = '<unknown>'
return rev.strip()
+def get_metadata_git_toplevel(path):
+ try:
+ toplevel, _ = bb.process.run('git rev-parse --show-toplevel', cwd=path)
+ except bb.process.ExecutionError:
+ return ""
+ return toplevel.strip()
+
+def get_metadata_git_remotes(path):
+ try:
+ remotes_list, _ = bb.process.run('git remote', cwd=path)
+ remotes = remotes_list.split()
+ except bb.process.ExecutionError:
+ remotes = []
+ return remotes
+
+def get_metadata_git_remote_url(path, remote):
+ try:
+ uri, _ = bb.process.run('git remote get-url {remote}'.format(remote=remote), cwd=path)
+ except bb.process.ExecutionError:
+ return ""
+ return uri.strip()
+
+def get_metadata_git_describe(path):
+ try:
+ describe, _ = bb.process.run('git describe --tags', cwd=path)
+ except bb.process.ExecutionError:
+ return ""
+ return describe.strip()
+
def is_layer_modified(path):
try:
subprocess.check_output("""cd %s; export PSEUDO_UNLOAD=1; set -e;
@@ -45,5 +75,5 @@ def get_layer_revisions(d):
layers = (d.getVar("BBLAYERS") or "").split()
revisions = []
for i in layers:
- revisions.append((i, os.path.basename(i), get_metadata_git_branch(i, None).strip(), get_metadata_git_revision(i, None), is_layer_modified(i)))
+ revisions.append((i, os.path.basename(i), get_metadata_git_branch(i).strip(), get_metadata_git_revision(i), is_layer_modified(i)))
return revisions
diff --git a/meta/lib/oe/buildhistory_analysis.py b/meta/lib/oe/buildhistory_analysis.py
index b1856846b6..4edad01580 100644
--- a/meta/lib/oe/buildhistory_analysis.py
+++ b/meta/lib/oe/buildhistory_analysis.py
@@ -562,7 +562,7 @@ def compare_siglists(a_blob, b_blob, taskdiff=False):
elif not hash2 in hashfiles:
out.append("Unable to find matching sigdata for %s with hash %s" % (desc, hash2))
else:
- out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb, collapsed=True)
+ out2 = bb.siggen.compare_sigfiles(hashfiles[hash1]['path'], hashfiles[hash2]['path'], recursecb, collapsed=True)
for line in out2:
m = hashlib.sha256()
m.update(line.encode('utf-8'))
diff --git a/meta/lib/oe/cachedpath.py b/meta/lib/oe/cachedpath.py
index 254257a83f..0138b791d4 100644
--- a/meta/lib/oe/cachedpath.py
+++ b/meta/lib/oe/cachedpath.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
# Based on standard python library functions but avoid
diff --git a/meta/lib/oe/classextend.py b/meta/lib/oe/classextend.py
index e08d788b75..5161d33d2d 100644
--- a/meta/lib/oe/classextend.py
+++ b/meta/lib/oe/classextend.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -30,6 +32,9 @@ class ClassExtender(object):
if name.endswith("-" + self.extname):
name = name.replace("-" + self.extname, "")
if name.startswith("virtual/"):
+ # Assume large numbers of dashes means a triplet is present and we don't need to convert
+ if name.count("-") >= 3 and name.endswith(("-go", "-binutils", "-gcc", "-g++")):
+ return name
subs = name.split("/", 1)[1]
if not subs.startswith(self.extname):
return "virtual/" + self.extname + "-" + subs
@@ -148,9 +153,7 @@ class NativesdkClassExtender(ClassExtender):
def map_depends(self, dep):
if dep.startswith(self.extname):
return dep
- if dep.endswith(("-gcc", "-g++")):
- return dep + "-crosssdk"
- elif dep.endswith(("-native", "-native-runtime")) or ('nativesdk-' in dep) or ('-cross-' in dep) or ('-crosssdk-' in dep):
+ if dep.endswith(("-native", "-native-runtime")) or ('nativesdk-' in dep) or ('-cross-' in dep) or ('-crosssdk-' in dep):
return dep
else:
return self.extend_name(dep)
diff --git a/meta/lib/oe/classutils.py b/meta/lib/oe/classutils.py
index 08bb66b365..ec3f6ad720 100644
--- a/meta/lib/oe/classutils.py
+++ b/meta/lib/oe/classutils.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/meta/lib/oe/copy_buildsystem.py b/meta/lib/oe/copy_buildsystem.py
index 79642fd76a..81abfbf9e2 100644
--- a/meta/lib/oe/copy_buildsystem.py
+++ b/meta/lib/oe/copy_buildsystem.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
# This class should provide easy access to the different aspects of the
@@ -20,7 +22,7 @@ def _smart_copy(src, dest):
mode = os.stat(src).st_mode
if stat.S_ISDIR(mode):
bb.utils.mkdirhier(dest)
- cmd = "tar --exclude='.git' --exclude='__pycache__' --xattrs --xattrs-include='*' -chf - -C %s -p . \
+ cmd = "tar --exclude='.git' --exclude='__pycache__' --xattrs --xattrs-include='*' -cf - -C %s -p . \
| tar --xattrs --xattrs-include='*' -xf - -C %s" % (src, dest)
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
else:
diff --git a/meta/lib/oe/cve_check.py b/meta/lib/oe/cve_check.py
index aa06497727..ed5c714cb8 100644
--- a/meta/lib/oe/cve_check.py
+++ b/meta/lib/oe/cve_check.py
@@ -1,3 +1,9 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
import collections
import re
import itertools
@@ -73,33 +79,33 @@ def get_patched_cves(d):
import re
import oe.patch
- pn = d.getVar("PN")
- cve_match = re.compile("CVE:( CVE\-\d{4}\-\d+)+")
+ cve_match = re.compile(r"CVE:( CVE-\d{4}-\d+)+")
# Matches the last "CVE-YYYY-ID" in the file name, also if written
# in lowercase. Possible to have multiple CVE IDs in a single
# file name, but only the last one will be detected from the file name.
# However, patch files contents addressing multiple CVE IDs are supported
# (cve_match regular expression)
-
- cve_file_name_match = re.compile(".*([Cc][Vv][Ee]\-\d{4}\-\d+)")
+ cve_file_name_match = re.compile(r".*(CVE-\d{4}-\d+)", re.IGNORECASE)
patched_cves = set()
- bb.debug(2, "Looking for patches that solves CVEs for %s" % pn)
- for url in oe.patch.src_patches(d):
+ patches = oe.patch.src_patches(d)
+ bb.debug(2, "Scanning %d patches for CVEs" % len(patches))
+ for url in patches:
patch_file = bb.fetch.decodeurl(url)[2]
- # Remote compressed patches may not be unpacked, so silently ignore them
- if not os.path.isfile(patch_file):
- bb.warn("%s does not exist, cannot extract CVE list" % patch_file)
- continue
-
# Check patch file name for CVE ID
fname_match = cve_file_name_match.search(patch_file)
if fname_match:
cve = fname_match.group(1).upper()
patched_cves.add(cve)
- bb.debug(2, "Found CVE %s from patch file name %s" % (cve, patch_file))
+ bb.debug(2, "Found %s from patch file name %s" % (cve, patch_file))
+
+ # Remote patches won't be present and compressed patches won't be
+ # unpacked, so say we're not scanning them
+ if not os.path.isfile(patch_file):
+ bb.note("%s is remote or compressed, not scanning content" % patch_file)
+ continue
with open(patch_file, "r", encoding="utf-8") as f:
try:
@@ -124,6 +130,13 @@ def get_patched_cves(d):
if not fname_match and not text_match:
bb.debug(2, "Patch %s doesn't solve CVEs" % patch_file)
+ # Search for additional patched CVEs
+ for cve in (d.getVarFlags("CVE_STATUS") or {}):
+ decoded_status, _, _ = decode_cve_status(d, cve)
+ if decoded_status == "Patched":
+ bb.debug(2, "CVE %s is additionally patched" % cve)
+ patched_cves.add(cve)
+
return patched_cves
@@ -143,7 +156,7 @@ def get_cpe_ids(cve_product, version):
else:
vendor = "*"
- cpe_id = f'cpe:2.3:a:{vendor}:{product}:{version}:*:*:*:*:*:*:*'
+ cpe_id = 'cpe:2.3:*:{}:{}:{}:*:*:*:*:*:*:*'.format(vendor, product, version)
cpe_ids.append(cpe_id)
return cpe_ids
@@ -159,7 +172,7 @@ def cve_check_merge_jsons(output, data):
for product in output["package"]:
if product["name"] == data["package"][0]["name"]:
- bb.error("Error adding the same package twice")
+ bb.error("Error adding the same package %s twice" % product["name"])
return
output["package"].append(data["package"][0])
@@ -173,3 +186,60 @@ def update_symlinks(target_path, link_path):
if os.path.exists(os.path.realpath(link_path)):
os.remove(link_path)
os.symlink(os.path.basename(target_path), link_path)
+
+
+def convert_cve_version(version):
+ """
+ This function converts from CVE format to Yocto version format.
+ eg 8.3_p1 -> 8.3p1, 6.2_rc1 -> 6.2-rc1
+
+ Unless it is redefined using CVE_VERSION in the recipe,
+ cve_check uses the version in the name of the recipe (${PV})
+ to check vulnerabilities against a CVE in the database downloaded from NVD.
+
+ When the version has an update, i.e.
+ "p1" in OpenSSH 8.3p1,
+ "-rc1" in linux kernel 6.2-rc1,
+ the database stores the version as version_update (8.3_p1, 6.2_rc1).
+ Therefore, we must transform this version before comparing to the
+ recipe version.
+
+ In this case, the parameter of the function is 8.3_p1.
+ If the version uses the Release Candidate format, "rc",
+ this function replaces the '_' by '-'.
+ If the version uses the Update format, "p",
+ this function removes the '_' completely.
+ """
+ import re
+
+ matches = re.match('^([0-9.]+)_((p|rc)[0-9]+)$', version)
+
+ if not matches:
+ return version
+
+ version = matches.group(1)
+ update = matches.group(2)
+
+ if matches.group(3) == "rc":
+ return version + '-' + update
+
+ return version + update
+
+def decode_cve_status(d, cve):
+ """
+ Convert CVE_STATUS into status, detail and description.
+ """
+ status = d.getVarFlag("CVE_STATUS", cve)
+ if not status:
+ return ("", "", "")
+
+ status_split = status.split(':', 1)
+ detail = status_split[0]
+ description = status_split[1].strip() if (len(status_split) > 1) else ""
+
+ status_mapping = d.getVarFlag("CVE_CHECK_STATUSMAP", detail)
+ if status_mapping is None:
+ bb.warn('Invalid detail "%s" for CVE_STATUS[%s] = "%s", fallback to Unpatched' % (detail, cve, status))
+ status_mapping = "Unpatched"
+
+ return (status_mapping, detail, description)
diff --git a/meta/lib/oe/data.py b/meta/lib/oe/data.py
index 602130a904..37121cfad2 100644
--- a/meta/lib/oe/data.py
+++ b/meta/lib/oe/data.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/meta/lib/oe/distro_check.py b/meta/lib/oe/distro_check.py
index 4b2a9bec01..3494520f40 100644
--- a/meta/lib/oe/distro_check.py
+++ b/meta/lib/oe/distro_check.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/meta/lib/oe/elf.py b/meta/lib/oe/elf.py
index 46c884a775..eab2349a4f 100644
--- a/meta/lib/oe/elf.py
+++ b/meta/lib/oe/elf.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -19,6 +21,7 @@ def machine_dict(d):
"x86_64": (62, 0, 0, True, 64),
"epiphany": (4643, 0, 0, True, 32),
"lm32": (138, 0, 0, False, 32),
+ "loongarch64":(258, 0, 0, True, 64),
"mips": ( 8, 0, 0, False, 32),
"mipsel": ( 8, 0, 0, True, 32),
"microblaze": (189, 0, 0, False, 32),
@@ -43,6 +46,7 @@ def machine_dict(d):
"ia64": (50, 0, 0, True, 64),
"alpha": (36902, 0, 0, True, 64),
"hppa": (15, 3, 0, False, 32),
+ "loongarch64":(258, 0, 0, True, 64),
"m68k": ( 4, 0, 0, False, 32),
"mips": ( 8, 0, 0, False, 32),
"mipsel": ( 8, 0, 0, True, 32),
diff --git a/meta/lib/oe/go.py b/meta/lib/oe/go.py
new file mode 100644
index 0000000000..dfd957d157
--- /dev/null
+++ b/meta/lib/oe/go.py
@@ -0,0 +1,34 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+import re
+
+def map_arch(a):
+ if re.match('i.86', a):
+ return '386'
+ elif a == 'x86_64':
+ return 'amd64'
+ elif re.match('arm.*', a):
+ return 'arm'
+ elif re.match('aarch64.*', a):
+ return 'arm64'
+ elif re.match('mips64el.*', a):
+ return 'mips64le'
+ elif re.match('mips64.*', a):
+ return 'mips64'
+ elif a == 'mips':
+ return 'mips'
+ elif a == 'mipsel':
+ return 'mipsle'
+ elif re.match('p(pc|owerpc)(64le)', a):
+ return 'ppc64le'
+ elif re.match('p(pc|owerpc)(64)', a):
+ return 'ppc64'
+ elif a == 'riscv64':
+ return 'riscv64'
+ elif a == 'loongarch64':
+ return 'loong64'
+ return ''
diff --git a/meta/lib/oe/gpg_sign.py b/meta/lib/oe/gpg_sign.py
index aa9bb49f2c..ede6186c84 100644
--- a/meta/lib/oe/gpg_sign.py
+++ b/meta/lib/oe/gpg_sign.py
@@ -1,13 +1,16 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
"""Helper module for GPG signing"""
-import os
import bb
-import subprocess
+import os
import shlex
+import subprocess
+import tempfile
class LocalSigner(object):
"""Class for handling local (on the build host) signing"""
@@ -71,8 +74,6 @@ class LocalSigner(object):
cmd += ['--homedir', self.gpg_path]
if armor:
cmd += ['--armor']
- if output_suffix:
- cmd += ['-o', input_file + "." + output_suffix]
if use_sha256:
cmd += ['--digest-algo', "SHA256"]
@@ -81,19 +82,27 @@ class LocalSigner(object):
if self.gpg_version > (2,1,):
cmd += ['--pinentry-mode', 'loopback']
- cmd += [input_file]
-
try:
if passphrase_file:
with open(passphrase_file) as fobj:
passphrase = fobj.readline();
- job = subprocess.Popen(cmd, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
- (_, stderr) = job.communicate(passphrase.encode("utf-8"))
+ if not output_suffix:
+ output_suffix = 'asc' if armor else 'sig'
+ output_file = input_file + "." + output_suffix
+ with tempfile.TemporaryDirectory(dir=os.path.dirname(output_file)) as tmp_dir:
+ tmp_file = os.path.join(tmp_dir, os.path.basename(output_file))
+ cmd += ['-o', tmp_file]
+
+ cmd += [input_file]
+
+ job = subprocess.Popen(cmd, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
+ (_, stderr) = job.communicate(passphrase.encode("utf-8"))
- if job.returncode:
- bb.fatal("GPG exited with code %d: %s" % (job.returncode, stderr.decode("utf-8")))
+ if job.returncode:
+ bb.fatal("GPG exited with code %d: %s" % (job.returncode, stderr.decode("utf-8")))
+ os.rename(tmp_file, output_file)
except IOError as e:
bb.error("IO error (%s): %s" % (e.errno, e.strerror))
raise Exception("Failed to sign '%s'" % input_file)
diff --git a/meta/lib/oe/license.py b/meta/lib/oe/license.py
index 99cfa5f733..d9c8d94da4 100644
--- a/meta/lib/oe/license.py
+++ b/meta/lib/oe/license.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
"""Code for parsing OpenEmbedded license strings"""
diff --git a/meta/lib/oe/lsb.py b/meta/lib/oe/lsb.py
index 43e46380d7..3ec03e5042 100644
--- a/meta/lib/oe/lsb.py
+++ b/meta/lib/oe/lsb.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/meta/lib/oe/maketype.py b/meta/lib/oe/maketype.py
index d36082c535..7a83bdf602 100644
--- a/meta/lib/oe/maketype.py
+++ b/meta/lib/oe/maketype.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
"""OpenEmbedded variable typing support
diff --git a/meta/lib/oe/manifest.py b/meta/lib/oe/manifest.py
index 1a058dcd73..61f18adc4a 100644
--- a/meta/lib/oe/manifest.py
+++ b/meta/lib/oe/manifest.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/meta/lib/oe/npm_registry.py b/meta/lib/oe/npm_registry.py
index 96c0affb45..d97ced7cda 100644
--- a/meta/lib/oe/npm_registry.py
+++ b/meta/lib/oe/npm_registry.py
@@ -1,3 +1,9 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
import bb
import json
import subprocess
@@ -5,7 +11,7 @@ import subprocess
_ALWAYS_SAFE = frozenset('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'abcdefghijklmnopqrstuvwxyz'
'0123456789'
- '_.-~')
+ '_.-~()')
MISSING_OK = object()
diff --git a/meta/lib/oe/overlayfs.py b/meta/lib/oe/overlayfs.py
index b5d5e88e80..8b88900f71 100644
--- a/meta/lib/oe/overlayfs.py
+++ b/meta/lib/oe/overlayfs.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
# This file contains common functions for overlayfs and its QA check
@@ -38,7 +40,11 @@ def unitFileList(d):
bb.fatal("Missing required mount point for OVERLAYFS_MOUNT_POINT[%s] in your MACHINE configuration" % mountPoint)
for mountPoint in overlayMountPoints:
- for path in d.getVarFlag('OVERLAYFS_WRITABLE_PATHS', mountPoint).split():
+ mountPointList = d.getVarFlag('OVERLAYFS_WRITABLE_PATHS', mountPoint)
+ if not mountPointList:
+ bb.debug(1, "No mount points defined for %s flag, don't add to file list", mountPoint)
+ continue
+ for path in mountPointList.split():
fileList.append(mountUnitName(path))
fileList.append(helperUnitName(path))
diff --git a/meta/lib/oe/package.py b/meta/lib/oe/package.py
index 7d387ee81d..1511ba47c4 100644
--- a/meta/lib/oe/package.py
+++ b/meta/lib/oe/package.py
@@ -1,11 +1,22 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
+import errno
+import fnmatch
+import itertools
+import os
+import shlex
+import re
+import glob
import stat
import mmap
import subprocess
+import oe.cachedpath
+
def runstrip(arg):
# Function to strip a single file, called from split_and_strip_files below
# A working 'file' (one which works on the target architecture)
@@ -30,7 +41,7 @@ def runstrip(arg):
stripcmd = [strip]
skip_strip = False
- # kernel module
+ # kernel module
if elftype & 16:
if is_kernel_module_signed(file):
bb.debug(1, "Skip strip on signed module %s" % file)
@@ -103,7 +114,7 @@ def is_static_lib(path):
return start == magic
return False
-def strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, d, qa_already_stripped=False):
+def strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, max_process, qa_already_stripped=False):
"""
Strip executable code (like executables, shared libraries) _in_place_
- Based on sysroot_strip in staging.bbclass
@@ -111,6 +122,7 @@ def strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, d, qa_already_stripp
:param strip_cmd: Strip command (usually ${STRIP})
:param libdir: ${libdir} - strip .so files in this directory
:param base_libdir: ${base_libdir} - strip .so files in this directory
+ :param max_process: number of stripping processes started in parallel
:param qa_already_stripped: Set to True if already-stripped' in ${INSANE_SKIP}
This is for proper logging and messages only.
"""
@@ -153,7 +165,7 @@ def strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, d, qa_already_stripp
# ...but is it ELF, and is it already stripped?
checkelf.append(file)
inodecache[file] = s.st_ino
- results = oe.utils.multiprocess_launch(is_elf, checkelf, d)
+ results = oe.utils.multiprocess_launch_mp(is_elf, checkelf, max_process)
for (file, elf_file) in results:
#elf_file = is_elf(file)
if elf_file & 1:
@@ -181,7 +193,7 @@ def strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, d, qa_already_stripp
elf_file = int(elffiles[file])
sfiles.append((file, elf_file, strip_cmd))
- oe.utils.multiprocess_launch(runstrip, sfiles, d)
+ oe.utils.multiprocess_launch_mp(runstrip, sfiles, max_process)
def file_translate(file):
@@ -290,3 +302,1725 @@ def read_shlib_providers(d):
shlib_provider[s[0]] = {}
shlib_provider[s[0]][s[1]] = (dep_pkg, s[2])
return shlib_provider
+
+# We generate a master list of directories to process, we start by
+# seeding this list with reasonable defaults, then load from
+# the fs-perms.txt files
+def fixup_perms(d):
+ import pwd, grp
+
+ cpath = oe.cachedpath.CachedPath()
+ dvar = d.getVar('PKGD')
+
+ # init using a string with the same format as a line as documented in
+ # the fs-perms.txt file
+ # <path> <mode> <uid> <gid> <walk> <fmode> <fuid> <fgid>
+ # <path> link <link target>
+ #
+ # __str__ can be used to print out an entry in the input format
+ #
+ # if fs_perms_entry.path is None:
+ # an error occurred
+ # if fs_perms_entry.link, you can retrieve:
+ # fs_perms_entry.path = path
+ # fs_perms_entry.link = target of link
+ # if not fs_perms_entry.link, you can retrieve:
+ # fs_perms_entry.path = path
+ # fs_perms_entry.mode = expected dir mode or None
+ # fs_perms_entry.uid = expected uid or -1
+ # fs_perms_entry.gid = expected gid or -1
+ # fs_perms_entry.walk = 'true' or something else
+ # fs_perms_entry.fmode = expected file mode or None
+ # fs_perms_entry.fuid = expected file uid or -1
+ # fs_perms_entry_fgid = expected file gid or -1
+ class fs_perms_entry():
+ def __init__(self, line):
+ lsplit = line.split()
+ if len(lsplit) == 3 and lsplit[1].lower() == "link":
+ self._setlink(lsplit[0], lsplit[2])
+ elif len(lsplit) == 8:
+ self._setdir(lsplit[0], lsplit[1], lsplit[2], lsplit[3], lsplit[4], lsplit[5], lsplit[6], lsplit[7])
+ else:
+ msg = "Fixup Perms: invalid config line %s" % line
+ oe.qa.handle_error("perm-config", msg, d)
+ self.path = None
+ self.link = None
+
+ def _setdir(self, path, mode, uid, gid, walk, fmode, fuid, fgid):
+ self.path = os.path.normpath(path)
+ self.link = None
+ self.mode = self._procmode(mode)
+ self.uid = self._procuid(uid)
+ self.gid = self._procgid(gid)
+ self.walk = walk.lower()
+ self.fmode = self._procmode(fmode)
+ self.fuid = self._procuid(fuid)
+ self.fgid = self._procgid(fgid)
+
+ def _setlink(self, path, link):
+ self.path = os.path.normpath(path)
+ self.link = link
+
+ def _procmode(self, mode):
+ if not mode or (mode and mode == "-"):
+ return None
+ else:
+ return int(mode,8)
+
+ # Note uid/gid -1 has special significance in os.lchown
+ def _procuid(self, uid):
+ if uid is None or uid == "-":
+ return -1
+ elif uid.isdigit():
+ return int(uid)
+ else:
+ return pwd.getpwnam(uid).pw_uid
+
+ def _procgid(self, gid):
+ if gid is None or gid == "-":
+ return -1
+ elif gid.isdigit():
+ return int(gid)
+ else:
+ return grp.getgrnam(gid).gr_gid
+
+ # Use for debugging the entries
+ def __str__(self):
+ if self.link:
+ return "%s link %s" % (self.path, self.link)
+ else:
+ mode = "-"
+ if self.mode:
+ mode = "0%o" % self.mode
+ fmode = "-"
+ if self.fmode:
+ fmode = "0%o" % self.fmode
+ uid = self._mapugid(self.uid)
+ gid = self._mapugid(self.gid)
+ fuid = self._mapugid(self.fuid)
+ fgid = self._mapugid(self.fgid)
+ return "%s %s %s %s %s %s %s %s" % (self.path, mode, uid, gid, self.walk, fmode, fuid, fgid)
+
+ def _mapugid(self, id):
+ if id is None or id == -1:
+ return "-"
+ else:
+ return "%d" % id
+
+ # Fix the permission, owner and group of path
+ def fix_perms(path, mode, uid, gid, dir):
+ if mode and not os.path.islink(path):
+ #bb.note("Fixup Perms: chmod 0%o %s" % (mode, dir))
+ os.chmod(path, mode)
+ # -1 is a special value that means don't change the uid/gid
+ # if they are BOTH -1, don't bother to lchown
+ if not (uid == -1 and gid == -1):
+ #bb.note("Fixup Perms: lchown %d:%d %s" % (uid, gid, dir))
+ os.lchown(path, uid, gid)
+
+ # Return a list of configuration files based on either the default
+ # files/fs-perms.txt or the contents of FILESYSTEM_PERMS_TABLES
+ # paths are resolved via BBPATH
+ def get_fs_perms_list(d):
+ str = ""
+ bbpath = d.getVar('BBPATH')
+ fs_perms_tables = d.getVar('FILESYSTEM_PERMS_TABLES') or ""
+ for conf_file in fs_perms_tables.split():
+ confpath = bb.utils.which(bbpath, conf_file)
+ if confpath:
+ str += " %s" % bb.utils.which(bbpath, conf_file)
+ else:
+ bb.warn("cannot find %s specified in FILESYSTEM_PERMS_TABLES" % conf_file)
+ return str
+
+ fs_perms_table = {}
+ fs_link_table = {}
+
+ # By default all of the standard directories specified in
+ # bitbake.conf will get 0755 root:root.
+ target_path_vars = [ 'base_prefix',
+ 'prefix',
+ 'exec_prefix',
+ 'base_bindir',
+ 'base_sbindir',
+ 'base_libdir',
+ 'datadir',
+ 'sysconfdir',
+ 'servicedir',
+ 'sharedstatedir',
+ 'localstatedir',
+ 'infodir',
+ 'mandir',
+ 'docdir',
+ 'bindir',
+ 'sbindir',
+ 'libexecdir',
+ 'libdir',
+ 'includedir' ]
+
+ for path in target_path_vars:
+ dir = d.getVar(path) or ""
+ if dir == "":
+ continue
+ fs_perms_table[dir] = fs_perms_entry(d.expand("%s 0755 root root false - - -" % (dir)))
+
+ # Now we actually load from the configuration files
+ for conf in get_fs_perms_list(d).split():
+ if not os.path.exists(conf):
+ continue
+ with open(conf) as f:
+ for line in f:
+ if line.startswith('#'):
+ continue
+ lsplit = line.split()
+ if len(lsplit) == 0:
+ continue
+ if len(lsplit) != 8 and not (len(lsplit) == 3 and lsplit[1].lower() == "link"):
+ msg = "Fixup perms: %s invalid line: %s" % (conf, line)
+ oe.qa.handle_error("perm-line", msg, d)
+ continue
+ entry = fs_perms_entry(d.expand(line))
+ if entry and entry.path:
+ if entry.link:
+ fs_link_table[entry.path] = entry
+ if entry.path in fs_perms_table:
+ fs_perms_table.pop(entry.path)
+ else:
+ fs_perms_table[entry.path] = entry
+ if entry.path in fs_link_table:
+ fs_link_table.pop(entry.path)
+
+ # Debug -- list out in-memory table
+ #for dir in fs_perms_table:
+ # bb.note("Fixup Perms: %s: %s" % (dir, str(fs_perms_table[dir])))
+ #for link in fs_link_table:
+ # bb.note("Fixup Perms: %s: %s" % (link, str(fs_link_table[link])))
+
+ # We process links first, so we can go back and fixup directory ownership
+ # for any newly created directories
+ # Process in sorted order so /run gets created before /run/lock, etc.
+ for entry in sorted(fs_link_table.values(), key=lambda x: x.link):
+ link = entry.link
+ dir = entry.path
+ origin = dvar + dir
+ if not (cpath.exists(origin) and cpath.isdir(origin) and not cpath.islink(origin)):
+ continue
+
+ if link[0] == "/":
+ target = dvar + link
+ ptarget = link
+ else:
+ target = os.path.join(os.path.dirname(origin), link)
+ ptarget = os.path.join(os.path.dirname(dir), link)
+ if os.path.exists(target):
+ msg = "Fixup Perms: Unable to correct directory link, target already exists: %s -> %s" % (dir, ptarget)
+ oe.qa.handle_error("perm-link", msg, d)
+ continue
+
+ # Create path to move directory to, move it, and then setup the symlink
+ bb.utils.mkdirhier(os.path.dirname(target))
+ #bb.note("Fixup Perms: Rename %s -> %s" % (dir, ptarget))
+ bb.utils.rename(origin, target)
+ #bb.note("Fixup Perms: Link %s -> %s" % (dir, link))
+ os.symlink(link, origin)
+
+ for dir in fs_perms_table:
+ origin = dvar + dir
+ if not (cpath.exists(origin) and cpath.isdir(origin)):
+ continue
+
+ fix_perms(origin, fs_perms_table[dir].mode, fs_perms_table[dir].uid, fs_perms_table[dir].gid, dir)
+
+ if fs_perms_table[dir].walk == 'true':
+ for root, dirs, files in os.walk(origin):
+ for dr in dirs:
+ each_dir = os.path.join(root, dr)
+ fix_perms(each_dir, fs_perms_table[dir].mode, fs_perms_table[dir].uid, fs_perms_table[dir].gid, dir)
+ for f in files:
+ each_file = os.path.join(root, f)
+ fix_perms(each_file, fs_perms_table[dir].fmode, fs_perms_table[dir].fuid, fs_perms_table[dir].fgid, dir)
+
+# Get a list of files from file vars by searching files under current working directory
+# The list contains symlinks, directories and normal files.
+def files_from_filevars(filevars):
+ cpath = oe.cachedpath.CachedPath()
+ files = []
+ for f in filevars:
+ if os.path.isabs(f):
+ f = '.' + f
+ if not f.startswith("./"):
+ f = './' + f
+ globbed = glob.glob(f, recursive=True)
+ if globbed:
+ if [ f ] != globbed:
+ files += globbed
+ continue
+ files.append(f)
+
+ symlink_paths = []
+ for ind, f in enumerate(files):
+ # Handle directory symlinks. Truncate path to the lowest level symlink
+ parent = ''
+ for dirname in f.split('/')[:-1]:
+ parent = os.path.join(parent, dirname)
+ if dirname == '.':
+ continue
+ if cpath.islink(parent):
+ bb.warn("FILES contains file '%s' which resides under a "
+ "directory symlink. Please fix the recipe and use the "
+ "real path for the file." % f[1:])
+ symlink_paths.append(f)
+ files[ind] = parent
+ f = parent
+ break
+
+ if not cpath.islink(f):
+ if cpath.isdir(f):
+ newfiles = [ os.path.join(f,x) for x in os.listdir(f) ]
+ if newfiles:
+ files += newfiles
+
+ return files, symlink_paths
+
+# Called in package_<rpm,ipk,deb>.bbclass to get the correct list of configuration files
+def get_conffiles(pkg, d):
+ pkgdest = d.getVar('PKGDEST')
+ root = os.path.join(pkgdest, pkg)
+ cwd = os.getcwd()
+ os.chdir(root)
+
+ conffiles = d.getVar('CONFFILES:%s' % pkg);
+ if conffiles == None:
+ conffiles = d.getVar('CONFFILES')
+ if conffiles == None:
+ conffiles = ""
+ conffiles = conffiles.split()
+ conf_orig_list = files_from_filevars(conffiles)[0]
+
+ # Remove links and directories from conf_orig_list to get conf_list which only contains normal files
+ conf_list = []
+ for f in conf_orig_list:
+ if os.path.isdir(f):
+ continue
+ if os.path.islink(f):
+ continue
+ if not os.path.exists(f):
+ continue
+ conf_list.append(f)
+
+ # Remove the leading './'
+ for i in range(0, len(conf_list)):
+ conf_list[i] = conf_list[i][1:]
+
+ os.chdir(cwd)
+ return sorted(conf_list)
+
+def legitimize_package_name(s):
+ """
+ Make sure package names are legitimate strings
+ """
+
+ def fixutf(m):
+ cp = m.group(1)
+ if cp:
+ return ('\\u%s' % cp).encode('latin-1').decode('unicode_escape')
+
+ # Handle unicode codepoints encoded as <U0123>, as in glibc locale files.
+ s = re.sub(r'<U([0-9A-Fa-f]{1,4})>', fixutf, s)
+
+ # Remaining package name validity fixes
+ return s.lower().replace('_', '-').replace('@', '+').replace(',', '+').replace('/', '-')
+
+def split_locales(d):
+ cpath = oe.cachedpath.CachedPath()
+ if (d.getVar('PACKAGE_NO_LOCALE') == '1'):
+ bb.debug(1, "package requested not splitting locales")
+ return
+
+ packages = (d.getVar('PACKAGES') or "").split()
+
+ dvar = d.getVar('PKGD')
+ pn = d.getVar('LOCALEBASEPN')
+
+ try:
+ locale_index = packages.index(pn + '-locale')
+ packages.pop(locale_index)
+ except ValueError:
+ locale_index = len(packages)
+
+ localepaths = []
+ locales = set()
+ for localepath in (d.getVar('LOCALE_PATHS') or "").split():
+ localedir = dvar + localepath
+ if not cpath.isdir(localedir):
+ bb.debug(1, 'No locale files in %s' % localepath)
+ continue
+
+ localepaths.append(localepath)
+ with os.scandir(localedir) as it:
+ for entry in it:
+ if entry.is_dir():
+ locales.add(entry.name)
+
+ if len(locales) == 0:
+ bb.debug(1, "No locale files in this package")
+ return
+
+ summary = d.getVar('SUMMARY') or pn
+ description = d.getVar('DESCRIPTION') or ""
+ locale_section = d.getVar('LOCALE_SECTION')
+ mlprefix = d.getVar('MLPREFIX') or ""
+ for l in sorted(locales):
+ ln = legitimize_package_name(l)
+ pkg = pn + '-locale-' + ln
+ packages.insert(locale_index, pkg)
+ locale_index += 1
+ files = []
+ for localepath in localepaths:
+ files.append(os.path.join(localepath, l))
+ d.setVar('FILES:' + pkg, " ".join(files))
+ d.setVar('RRECOMMENDS:' + pkg, '%svirtual-locale-%s' % (mlprefix, ln))
+ d.setVar('RPROVIDES:' + pkg, '%s-locale %s%s-translation' % (pn, mlprefix, ln))
+ d.setVar('SUMMARY:' + pkg, '%s - %s translations' % (summary, l))
+ d.setVar('DESCRIPTION:' + pkg, '%s This package contains language translation files for the %s locale.' % (description, l))
+ if locale_section:
+ d.setVar('SECTION:' + pkg, locale_section)
+
+ d.setVar('PACKAGES', ' '.join(packages))
+
+ # Disabled by RP 18/06/07
+ # Wildcards aren't supported in debian
+ # They break with ipkg since glibc-locale* will mean that
+ # glibc-localedata-translit* won't install as a dependency
+ # for some other package which breaks meta-toolchain
+ # Probably breaks since virtual-locale- isn't provided anywhere
+ #rdep = (d.getVar('RDEPENDS:%s' % pn) or "").split()
+ #rdep.append('%s-locale*' % pn)
+ #d.setVar('RDEPENDS:%s' % pn, ' '.join(rdep))
+
+def package_debug_vars(d):
+ # We default to '.debug' style
+ if d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-file-directory':
+ # Single debug-file-directory style debug info
+ debug_vars = {
+ "append": ".debug",
+ "staticappend": "",
+ "dir": "",
+ "staticdir": "",
+ "libdir": "/usr/lib/debug",
+ "staticlibdir": "/usr/lib/debug-static",
+ "srcdir": "/usr/src/debug",
+ }
+ elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-without-src':
+ # Original OE-core, a.k.a. ".debug", style debug info, but without sources in /usr/src/debug
+ debug_vars = {
+ "append": "",
+ "staticappend": "",
+ "dir": "/.debug",
+ "staticdir": "/.debug-static",
+ "libdir": "",
+ "staticlibdir": "",
+ "srcdir": "",
+ }
+ elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg':
+ debug_vars = {
+ "append": "",
+ "staticappend": "",
+ "dir": "/.debug",
+ "staticdir": "/.debug-static",
+ "libdir": "",
+ "staticlibdir": "",
+ "srcdir": "/usr/src/debug",
+ }
+ else:
+ # Original OE-core, a.k.a. ".debug", style debug info
+ debug_vars = {
+ "append": "",
+ "staticappend": "",
+ "dir": "/.debug",
+ "staticdir": "/.debug-static",
+ "libdir": "",
+ "staticlibdir": "",
+ "srcdir": "/usr/src/debug",
+ }
+
+ return debug_vars
+
+
+def parse_debugsources_from_dwarfsrcfiles_output(dwarfsrcfiles_output):
+ debugfiles = {}
+
+ for line in dwarfsrcfiles_output.splitlines():
+ if line.startswith("\t"):
+ debugfiles[os.path.normpath(line.split()[0])] = ""
+
+ return debugfiles.keys()
+
+def source_info(file, d, fatal=True):
+ cmd = ["dwarfsrcfiles", file]
+ try:
+ output = subprocess.check_output(cmd, universal_newlines=True, stderr=subprocess.STDOUT)
+ retval = 0
+ except subprocess.CalledProcessError as exc:
+ output = exc.output
+ retval = exc.returncode
+
+ # 255 means a specific file wasn't fully parsed to get the debug file list, which is not a fatal failure
+ if retval != 0 and retval != 255:
+ msg = "dwarfsrcfiles failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else "")
+ if fatal:
+ bb.fatal(msg)
+ bb.note(msg)
+
+ debugsources = parse_debugsources_from_dwarfsrcfiles_output(output)
+
+ return list(debugsources)
+
+def splitdebuginfo(file, dvar, dv, d):
+ # Function to split a single file into two components, one is the stripped
+ # target system binary, the other contains any debugging information. The
+ # two files are linked to reference each other.
+ #
+ # return a mapping of files:debugsources
+
+ src = file[len(dvar):]
+ dest = dv["libdir"] + os.path.dirname(src) + dv["dir"] + "/" + os.path.basename(src) + dv["append"]
+ debugfile = dvar + dest
+ sources = []
+
+ if file.endswith(".ko") and file.find("/lib/modules/") != -1:
+ if oe.package.is_kernel_module_signed(file):
+ bb.debug(1, "Skip strip on signed module %s" % file)
+ return (file, sources)
+
+ # Split the file...
+ bb.utils.mkdirhier(os.path.dirname(debugfile))
+ #bb.note("Split %s -> %s" % (file, debugfile))
+ # Only store off the hard link reference if we successfully split!
+
+ dvar = d.getVar('PKGD')
+ objcopy = d.getVar("OBJCOPY")
+
+ newmode = None
+ if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
+ origmode = os.stat(file)[stat.ST_MODE]
+ newmode = origmode | stat.S_IWRITE | stat.S_IREAD
+ os.chmod(file, newmode)
+
+ # We need to extract the debug src information here...
+ if dv["srcdir"]:
+ sources = source_info(file, d)
+
+ bb.utils.mkdirhier(os.path.dirname(debugfile))
+
+ subprocess.check_output([objcopy, '--only-keep-debug', file, debugfile], stderr=subprocess.STDOUT)
+
+ # Set the debuglink to have the view of the file path on the target
+ subprocess.check_output([objcopy, '--add-gnu-debuglink', debugfile, file], stderr=subprocess.STDOUT)
+
+ if newmode:
+ os.chmod(file, origmode)
+
+ return (file, sources)
+
+def splitstaticdebuginfo(file, dvar, dv, d):
+ # Unlike the function above, there is no way to split a static library
+ # two components. So to get similar results we will copy the unmodified
+ # static library (containing the debug symbols) into a new directory.
+ # We will then strip (preserving symbols) the static library in the
+ # typical location.
+ #
+ # return a mapping of files:debugsources
+
+ src = file[len(dvar):]
+ dest = dv["staticlibdir"] + os.path.dirname(src) + dv["staticdir"] + "/" + os.path.basename(src) + dv["staticappend"]
+ debugfile = dvar + dest
+ sources = []
+
+ # Copy the file...
+ bb.utils.mkdirhier(os.path.dirname(debugfile))
+ #bb.note("Copy %s -> %s" % (file, debugfile))
+
+ dvar = d.getVar('PKGD')
+
+ newmode = None
+ if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
+ origmode = os.stat(file)[stat.ST_MODE]
+ newmode = origmode | stat.S_IWRITE | stat.S_IREAD
+ os.chmod(file, newmode)
+
+ # We need to extract the debug src information here...
+ if dv["srcdir"]:
+ sources = source_info(file, d)
+
+ bb.utils.mkdirhier(os.path.dirname(debugfile))
+
+ # Copy the unmodified item to the debug directory
+ shutil.copy2(file, debugfile)
+
+ if newmode:
+ os.chmod(file, origmode)
+
+ return (file, sources)
+
+def inject_minidebuginfo(file, dvar, dv, d):
+ # Extract just the symbols from debuginfo into minidebuginfo,
+ # compress it with xz and inject it back into the binary in a .gnu_debugdata section.
+ # https://sourceware.org/gdb/onlinedocs/gdb/MiniDebugInfo.html
+
+ readelf = d.getVar('READELF')
+ nm = d.getVar('NM')
+ objcopy = d.getVar('OBJCOPY')
+
+ minidebuginfodir = d.expand('${WORKDIR}/minidebuginfo')
+
+ src = file[len(dvar):]
+ dest = dv["libdir"] + os.path.dirname(src) + dv["dir"] + "/" + os.path.basename(src) + dv["append"]
+ debugfile = dvar + dest
+ minidebugfile = minidebuginfodir + src + '.minidebug'
+ bb.utils.mkdirhier(os.path.dirname(minidebugfile))
+
+ # If we didn't produce debuginfo for any reason, we can't produce minidebuginfo either
+ # so skip it.
+ if not os.path.exists(debugfile):
+ bb.debug(1, 'ELF file {} has no debuginfo, skipping minidebuginfo injection'.format(file))
+ return
+
+ # minidebuginfo does not make sense to apply to ELF objects other than
+ # executables and shared libraries, skip applying the minidebuginfo
+ # generation for objects like kernel modules.
+ for line in subprocess.check_output([readelf, '-h', debugfile], universal_newlines=True).splitlines():
+ if not line.strip().startswith("Type:"):
+ continue
+ elftype = line.split(":")[1].strip()
+ if not any(elftype.startswith(i) for i in ["EXEC", "DYN"]):
+ bb.debug(1, 'ELF file {} is not executable/shared, skipping minidebuginfo injection'.format(file))
+ return
+ break
+
+ # Find non-allocated PROGBITS, NOTE, and NOBITS sections in the debuginfo.
+ # We will exclude all of these from minidebuginfo to save space.
+ remove_section_names = []
+ for line in subprocess.check_output([readelf, '-W', '-S', debugfile], universal_newlines=True).splitlines():
+ # strip the leading " [ 1]" section index to allow splitting on space
+ if ']' not in line:
+ continue
+ fields = line[line.index(']') + 1:].split()
+ if len(fields) < 7:
+ continue
+ name = fields[0]
+ type = fields[1]
+ flags = fields[6]
+ # .debug_ sections will be removed by objcopy -S so no need to explicitly remove them
+ if name.startswith('.debug_'):
+ continue
+ if 'A' not in flags and type in ['PROGBITS', 'NOTE', 'NOBITS']:
+ remove_section_names.append(name)
+
+ # List dynamic symbols in the binary. We can exclude these from minidebuginfo
+ # because they are always present in the binary.
+ dynsyms = set()
+ for line in subprocess.check_output([nm, '-D', file, '--format=posix', '--defined-only'], universal_newlines=True).splitlines():
+ dynsyms.add(line.split()[0])
+
+ # Find all function symbols from debuginfo which aren't in the dynamic symbols table.
+ # These are the ones we want to keep in minidebuginfo.
+ keep_symbols_file = minidebugfile + '.symlist'
+ found_any_symbols = False
+ with open(keep_symbols_file, 'w') as f:
+ for line in subprocess.check_output([nm, debugfile, '--format=sysv', '--defined-only'], universal_newlines=True).splitlines():
+ fields = line.split('|')
+ if len(fields) < 7:
+ continue
+ name = fields[0].strip()
+ type = fields[3].strip()
+ if type == 'FUNC' and name not in dynsyms:
+ f.write('{}\n'.format(name))
+ found_any_symbols = True
+
+ if not found_any_symbols:
+ bb.debug(1, 'ELF file {} contains no symbols, skipping minidebuginfo injection'.format(file))
+ return
+
+ bb.utils.remove(minidebugfile)
+ bb.utils.remove(minidebugfile + '.xz')
+
+ subprocess.check_call([objcopy, '-S'] +
+ ['--remove-section={}'.format(s) for s in remove_section_names] +
+ ['--keep-symbols={}'.format(keep_symbols_file), debugfile, minidebugfile])
+
+ subprocess.check_call(['xz', '--keep', minidebugfile])
+
+ subprocess.check_call([objcopy, '--add-section', '.gnu_debugdata={}.xz'.format(minidebugfile), file])
+
+def copydebugsources(debugsrcdir, sources, d):
+ # The debug src information written out to sourcefile is further processed
+ # and copied to the destination here.
+
+ cpath = oe.cachedpath.CachedPath()
+
+ if debugsrcdir and sources:
+ sourcefile = d.expand("${WORKDIR}/debugsources.list")
+ bb.utils.remove(sourcefile)
+
+ # filenames are null-separated - this is an artefact of the previous use
+ # of rpm's debugedit, which was writing them out that way, and the code elsewhere
+ # is still assuming that.
+ debuglistoutput = '\0'.join(sources) + '\0'
+ with open(sourcefile, 'a') as sf:
+ sf.write(debuglistoutput)
+
+ dvar = d.getVar('PKGD')
+ strip = d.getVar("STRIP")
+ objcopy = d.getVar("OBJCOPY")
+ workdir = d.getVar("WORKDIR")
+ sdir = d.getVar("S")
+ cflags = d.expand("${CFLAGS}")
+
+ prefixmap = {}
+ for flag in cflags.split():
+ if not flag.startswith("-fdebug-prefix-map"):
+ continue
+ if "recipe-sysroot" in flag:
+ continue
+ flag = flag.split("=")
+ prefixmap[flag[1]] = flag[2]
+
+ nosuchdir = []
+ basepath = dvar
+ for p in debugsrcdir.split("/"):
+ basepath = basepath + "/" + p
+ if not cpath.exists(basepath):
+ nosuchdir.append(basepath)
+ bb.utils.mkdirhier(basepath)
+ cpath.updatecache(basepath)
+
+ for pmap in prefixmap:
+ # Ignore files from the recipe sysroots (target and native)
+ cmd = "LC_ALL=C ; sort -z -u '%s' | egrep -v -z '((<internal>|<built-in>)$|/.*recipe-sysroot.*/)' | " % sourcefile
+ # We need to ignore files that are not actually ours
+ # we do this by only paying attention to items from this package
+ cmd += "fgrep -zw '%s' | " % prefixmap[pmap]
+ # Remove prefix in the source paths
+ cmd += "sed 's#%s/##g' | " % (prefixmap[pmap])
+ cmd += "(cd '%s' ; cpio -pd0mlL --no-preserve-owner '%s%s' 2>/dev/null)" % (pmap, dvar, prefixmap[pmap])
+
+ try:
+ subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError:
+ # Can "fail" if internal headers/transient sources are attempted
+ pass
+ # cpio seems to have a bug with -lL together and symbolic links are just copied, not dereferenced.
+ # Work around this by manually finding and copying any symbolic links that made it through.
+ cmd = "find %s%s -type l -print0 -delete | sed s#%s%s/##g | (cd '%s' ; cpio -pd0mL --no-preserve-owner '%s%s')" % \
+ (dvar, prefixmap[pmap], dvar, prefixmap[pmap], pmap, dvar, prefixmap[pmap])
+ subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
+
+ # debugsources.list may be polluted from the host if we used externalsrc,
+ # cpio uses copy-pass and may have just created a directory structure
+ # matching the one from the host, if thats the case move those files to
+ # debugsrcdir to avoid host contamination.
+ # Empty dir structure will be deleted in the next step.
+
+ # Same check as above for externalsrc
+ if workdir not in sdir:
+ if os.path.exists(dvar + debugsrcdir + sdir):
+ cmd = "mv %s%s%s/* %s%s" % (dvar, debugsrcdir, sdir, dvar,debugsrcdir)
+ subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
+
+ # The copy by cpio may have resulted in some empty directories! Remove these
+ cmd = "find %s%s -empty -type d -delete" % (dvar, debugsrcdir)
+ subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
+
+ # Also remove debugsrcdir if its empty
+ for p in nosuchdir[::-1]:
+ if os.path.exists(p) and not os.listdir(p):
+ os.rmdir(p)
+
+
+def process_split_and_strip_files(d):
+ cpath = oe.cachedpath.CachedPath()
+
+ dvar = d.getVar('PKGD')
+ pn = d.getVar('PN')
+ hostos = d.getVar('HOST_OS')
+
+ oldcwd = os.getcwd()
+ os.chdir(dvar)
+
+ dv = package_debug_vars(d)
+
+ #
+ # First lets figure out all of the files we may have to process ... do this only once!
+ #
+ elffiles = {}
+ symlinks = {}
+ staticlibs = []
+ inodes = {}
+ libdir = os.path.abspath(dvar + os.sep + d.getVar("libdir"))
+ baselibdir = os.path.abspath(dvar + os.sep + d.getVar("base_libdir"))
+ skipfiles = (d.getVar("INHIBIT_PACKAGE_STRIP_FILES") or "").split()
+ if (d.getVar('INHIBIT_PACKAGE_STRIP') != '1' or \
+ d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT') != '1'):
+ checkelf = {}
+ checkelflinks = {}
+ for root, dirs, files in cpath.walk(dvar):
+ for f in files:
+ file = os.path.join(root, f)
+
+ # Skip debug files
+ if dv["append"] and file.endswith(dv["append"]):
+ continue
+ if dv["dir"] and dv["dir"] in os.path.dirname(file[len(dvar):]):
+ continue
+
+ if file in skipfiles:
+ continue
+
+ if oe.package.is_static_lib(file):
+ staticlibs.append(file)
+ continue
+
+ try:
+ ltarget = cpath.realpath(file, dvar, False)
+ s = cpath.lstat(ltarget)
+ except OSError as e:
+ (err, strerror) = e.args
+ if err != errno.ENOENT:
+ raise
+ # Skip broken symlinks
+ continue
+ if not s:
+ continue
+ # Check its an executable
+ if (s[stat.ST_MODE] & stat.S_IXUSR) or (s[stat.ST_MODE] & stat.S_IXGRP) \
+ or (s[stat.ST_MODE] & stat.S_IXOTH) \
+ or ((file.startswith(libdir) or file.startswith(baselibdir)) \
+ and (".so" in f or ".node" in f)) \
+ or (f.startswith('vmlinux') or ".ko" in f):
+
+ if cpath.islink(file):
+ checkelflinks[file] = ltarget
+ continue
+ # Use a reference of device ID and inode number to identify files
+ file_reference = "%d_%d" % (s.st_dev, s.st_ino)
+ checkelf[file] = (file, file_reference)
+
+ results = oe.utils.multiprocess_launch(oe.package.is_elf, checkelflinks.values(), d)
+ results_map = {}
+ for (ltarget, elf_file) in results:
+ results_map[ltarget] = elf_file
+ for file in checkelflinks:
+ ltarget = checkelflinks[file]
+ # If it's a symlink, and points to an ELF file, we capture the readlink target
+ if results_map[ltarget]:
+ target = os.readlink(file)
+ #bb.note("Sym: %s (%d)" % (ltarget, results_map[ltarget]))
+ symlinks[file] = target
+
+ results = oe.utils.multiprocess_launch(oe.package.is_elf, checkelf.keys(), d)
+
+ # Sort results by file path. This ensures that the files are always
+ # processed in the same order, which is important to make sure builds
+ # are reproducible when dealing with hardlinks
+ results.sort(key=lambda x: x[0])
+
+ for (file, elf_file) in results:
+ # It's a file (or hardlink), not a link
+ # ...but is it ELF, and is it already stripped?
+ if elf_file & 1:
+ if elf_file & 2:
+ if 'already-stripped' in (d.getVar('INSANE_SKIP:' + pn) or "").split():
+ bb.note("Skipping file %s from %s for already-stripped QA test" % (file[len(dvar):], pn))
+ else:
+ msg = "File '%s' from %s was already stripped, this will prevent future debugging!" % (file[len(dvar):], pn)
+ oe.qa.handle_error("already-stripped", msg, d)
+ continue
+
+ # At this point we have an unstripped elf file. We need to:
+ # a) Make sure any file we strip is not hardlinked to anything else outside this tree
+ # b) Only strip any hardlinked file once (no races)
+ # c) Track any hardlinks between files so that we can reconstruct matching debug file hardlinks
+
+ # Use a reference of device ID and inode number to identify files
+ file_reference = checkelf[file][1]
+ if file_reference in inodes:
+ os.unlink(file)
+ os.link(inodes[file_reference][0], file)
+ inodes[file_reference].append(file)
+ else:
+ inodes[file_reference] = [file]
+ # break hardlink
+ bb.utils.break_hardlinks(file)
+ elffiles[file] = elf_file
+ # Modified the file so clear the cache
+ cpath.updatecache(file)
+
+ def strip_pkgd_prefix(f):
+ nonlocal dvar
+
+ if f.startswith(dvar):
+ return f[len(dvar):]
+
+ return f
+
+ #
+ # First lets process debug splitting
+ #
+ if (d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT') != '1'):
+ results = oe.utils.multiprocess_launch(splitdebuginfo, list(elffiles), d, extraargs=(dvar, dv, d))
+
+ if dv["srcdir"] and not hostos.startswith("mingw"):
+ if (d.getVar('PACKAGE_DEBUG_STATIC_SPLIT') == '1'):
+ results = oe.utils.multiprocess_launch(splitstaticdebuginfo, staticlibs, d, extraargs=(dvar, dv, d))
+ else:
+ for file in staticlibs:
+ results.append( (file,source_info(file, d)) )
+
+ d.setVar("PKGDEBUGSOURCES", {strip_pkgd_prefix(f): sorted(s) for f, s in results})
+
+ sources = set()
+ for r in results:
+ sources.update(r[1])
+
+ # Hardlink our debug symbols to the other hardlink copies
+ for ref in inodes:
+ if len(inodes[ref]) == 1:
+ continue
+
+ target = inodes[ref][0][len(dvar):]
+ for file in inodes[ref][1:]:
+ src = file[len(dvar):]
+ dest = dv["libdir"] + os.path.dirname(src) + dv["dir"] + "/" + os.path.basename(target) + dv["append"]
+ fpath = dvar + dest
+ ftarget = dvar + dv["libdir"] + os.path.dirname(target) + dv["dir"] + "/" + os.path.basename(target) + dv["append"]
+ bb.utils.mkdirhier(os.path.dirname(fpath))
+ # Only one hardlink of separated debug info file in each directory
+ if not os.access(fpath, os.R_OK):
+ #bb.note("Link %s -> %s" % (fpath, ftarget))
+ os.link(ftarget, fpath)
+
+ # Create symlinks for all cases we were able to split symbols
+ for file in symlinks:
+ src = file[len(dvar):]
+ dest = dv["libdir"] + os.path.dirname(src) + dv["dir"] + "/" + os.path.basename(src) + dv["append"]
+ fpath = dvar + dest
+ # Skip it if the target doesn't exist
+ try:
+ s = os.stat(fpath)
+ except OSError as e:
+ (err, strerror) = e.args
+ if err != errno.ENOENT:
+ raise
+ continue
+
+ ltarget = symlinks[file]
+ lpath = os.path.dirname(ltarget)
+ lbase = os.path.basename(ltarget)
+ ftarget = ""
+ if lpath and lpath != ".":
+ ftarget += lpath + dv["dir"] + "/"
+ ftarget += lbase + dv["append"]
+ if lpath.startswith(".."):
+ ftarget = os.path.join("..", ftarget)
+ bb.utils.mkdirhier(os.path.dirname(fpath))
+ #bb.note("Symlink %s -> %s" % (fpath, ftarget))
+ os.symlink(ftarget, fpath)
+
+ # Process the dv["srcdir"] if requested...
+ # This copies and places the referenced sources for later debugging...
+ copydebugsources(dv["srcdir"], sources, d)
+ #
+ # End of debug splitting
+ #
+
+ #
+ # Now lets go back over things and strip them
+ #
+ if (d.getVar('INHIBIT_PACKAGE_STRIP') != '1'):
+ strip = d.getVar("STRIP")
+ sfiles = []
+ for file in elffiles:
+ elf_file = int(elffiles[file])
+ #bb.note("Strip %s" % file)
+ sfiles.append((file, elf_file, strip))
+ if (d.getVar('PACKAGE_STRIP_STATIC') == '1' or d.getVar('PACKAGE_DEBUG_STATIC_SPLIT') == '1'):
+ for f in staticlibs:
+ sfiles.append((f, 16, strip))
+
+ oe.utils.multiprocess_launch(oe.package.runstrip, sfiles, d)
+
+ # Build "minidebuginfo" and reinject it back into the stripped binaries
+ if bb.utils.contains('DISTRO_FEATURES', 'minidebuginfo', True, False, d):
+ oe.utils.multiprocess_launch(inject_minidebuginfo, list(elffiles), d,
+ extraargs=(dvar, dv, d))
+
+ #
+ # End of strip
+ #
+ os.chdir(oldcwd)
+
+
+def populate_packages(d):
+ cpath = oe.cachedpath.CachedPath()
+
+ workdir = d.getVar('WORKDIR')
+ outdir = d.getVar('DEPLOY_DIR')
+ dvar = d.getVar('PKGD')
+ packages = d.getVar('PACKAGES').split()
+ pn = d.getVar('PN')
+
+ bb.utils.mkdirhier(outdir)
+ os.chdir(dvar)
+
+ autodebug = not (d.getVar("NOAUTOPACKAGEDEBUG") or False)
+
+ split_source_package = (d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg')
+
+ # If debug-with-srcpkg mode is enabled then add the source package if it
+ # doesn't exist and add the source file contents to the source package.
+ if split_source_package:
+ src_package_name = ('%s-src' % d.getVar('PN'))
+ if not src_package_name in packages:
+ packages.append(src_package_name)
+ d.setVar('FILES:%s' % src_package_name, '/usr/src/debug')
+
+ # Sanity check PACKAGES for duplicates
+ # Sanity should be moved to sanity.bbclass once we have the infrastructure
+ package_dict = {}
+
+ for i, pkg in enumerate(packages):
+ if pkg in package_dict:
+ msg = "%s is listed in PACKAGES multiple times, this leads to packaging errors." % pkg
+ oe.qa.handle_error("packages-list", msg, d)
+ # Ensure the source package gets the chance to pick up the source files
+ # before the debug package by ordering it first in PACKAGES. Whether it
+ # actually picks up any source files is controlled by
+ # PACKAGE_DEBUG_SPLIT_STYLE.
+ elif pkg.endswith("-src"):
+ package_dict[pkg] = (10, i)
+ elif autodebug and pkg.endswith("-dbg"):
+ package_dict[pkg] = (30, i)
+ else:
+ package_dict[pkg] = (50, i)
+ packages = sorted(package_dict.keys(), key=package_dict.get)
+ d.setVar('PACKAGES', ' '.join(packages))
+ pkgdest = d.getVar('PKGDEST')
+
+ seen = []
+
+ # os.mkdir masks the permissions with umask so we have to unset it first
+ oldumask = os.umask(0)
+
+ debug = []
+ for root, dirs, files in cpath.walk(dvar):
+ dir = root[len(dvar):]
+ if not dir:
+ dir = os.sep
+ for f in (files + dirs):
+ path = "." + os.path.join(dir, f)
+ if "/.debug/" in path or "/.debug-static/" in path or path.endswith("/.debug"):
+ debug.append(path)
+
+ for pkg in packages:
+ root = os.path.join(pkgdest, pkg)
+ bb.utils.mkdirhier(root)
+
+ filesvar = d.getVar('FILES:%s' % pkg) or ""
+ if "//" in filesvar:
+ msg = "FILES variable for package %s contains '//' which is invalid. Attempting to fix this but you should correct the metadata.\n" % pkg
+ oe.qa.handle_error("files-invalid", msg, d)
+ filesvar.replace("//", "/")
+
+ origfiles = filesvar.split()
+ files, symlink_paths = oe.package.files_from_filevars(origfiles)
+
+ if autodebug and pkg.endswith("-dbg"):
+ files.extend(debug)
+
+ for file in files:
+ if (not cpath.islink(file)) and (not cpath.exists(file)):
+ continue
+ if file in seen:
+ continue
+ seen.append(file)
+
+ def mkdir(src, dest, p):
+ src = os.path.join(src, p)
+ dest = os.path.join(dest, p)
+ fstat = cpath.stat(src)
+ os.mkdir(dest)
+ os.chmod(dest, fstat.st_mode)
+ os.chown(dest, fstat.st_uid, fstat.st_gid)
+ if p not in seen:
+ seen.append(p)
+ cpath.updatecache(dest)
+
+ def mkdir_recurse(src, dest, paths):
+ if cpath.exists(dest + '/' + paths):
+ return
+ while paths.startswith("./"):
+ paths = paths[2:]
+ p = "."
+ for c in paths.split("/"):
+ p = os.path.join(p, c)
+ if not cpath.exists(os.path.join(dest, p)):
+ mkdir(src, dest, p)
+
+ if cpath.isdir(file) and not cpath.islink(file):
+ mkdir_recurse(dvar, root, file)
+ continue
+
+ mkdir_recurse(dvar, root, os.path.dirname(file))
+ fpath = os.path.join(root,file)
+ if not cpath.islink(file):
+ os.link(file, fpath)
+ continue
+ ret = bb.utils.copyfile(file, fpath)
+ if ret is False or ret == 0:
+ bb.fatal("File population failed")
+
+ # Check if symlink paths exist
+ for file in symlink_paths:
+ if not os.path.exists(os.path.join(root,file)):
+ bb.fatal("File '%s' cannot be packaged into '%s' because its "
+ "parent directory structure does not exist. One of "
+ "its parent directories is a symlink whose target "
+ "directory is not included in the package." %
+ (file, pkg))
+
+ os.umask(oldumask)
+ os.chdir(workdir)
+
+ # Handle excluding packages with incompatible licenses
+ package_list = []
+ for pkg in packages:
+ licenses = d.getVar('_exclude_incompatible-' + pkg)
+ if licenses:
+ msg = "Excluding %s from packaging as it has incompatible license(s): %s" % (pkg, licenses)
+ oe.qa.handle_error("incompatible-license", msg, d)
+ else:
+ package_list.append(pkg)
+ d.setVar('PACKAGES', ' '.join(package_list))
+
+ unshipped = []
+ for root, dirs, files in cpath.walk(dvar):
+ dir = root[len(dvar):]
+ if not dir:
+ dir = os.sep
+ for f in (files + dirs):
+ path = os.path.join(dir, f)
+ if ('.' + path) not in seen:
+ unshipped.append(path)
+
+ if unshipped != []:
+ msg = pn + ": Files/directories were installed but not shipped in any package:"
+ if "installed-vs-shipped" in (d.getVar('INSANE_SKIP:' + pn) or "").split():
+ bb.note("Package %s skipping QA tests: installed-vs-shipped" % pn)
+ else:
+ for f in unshipped:
+ msg = msg + "\n " + f
+ msg = msg + "\nPlease set FILES such that these items are packaged. Alternatively if they are unneeded, avoid installing them or delete them within do_install.\n"
+ msg = msg + "%s: %d installed and not shipped files." % (pn, len(unshipped))
+ oe.qa.handle_error("installed-vs-shipped", msg, d)
+
+def process_fixsymlinks(pkgfiles, d):
+ cpath = oe.cachedpath.CachedPath()
+ pkgdest = d.getVar('PKGDEST')
+ packages = d.getVar("PACKAGES", False).split()
+
+ dangling_links = {}
+ pkg_files = {}
+ for pkg in packages:
+ dangling_links[pkg] = []
+ pkg_files[pkg] = []
+ inst_root = os.path.join(pkgdest, pkg)
+ for path in pkgfiles[pkg]:
+ rpath = path[len(inst_root):]
+ pkg_files[pkg].append(rpath)
+ rtarget = cpath.realpath(path, inst_root, True, assume_dir = True)
+ if not cpath.lexists(rtarget):
+ dangling_links[pkg].append(os.path.normpath(rtarget[len(inst_root):]))
+
+ newrdepends = {}
+ for pkg in dangling_links:
+ for l in dangling_links[pkg]:
+ found = False
+ bb.debug(1, "%s contains dangling link %s" % (pkg, l))
+ for p in packages:
+ if l in pkg_files[p]:
+ found = True
+ bb.debug(1, "target found in %s" % p)
+ if p == pkg:
+ break
+ if pkg not in newrdepends:
+ newrdepends[pkg] = []
+ newrdepends[pkg].append(p)
+ break
+ if found == False:
+ bb.note("%s contains dangling symlink to %s" % (pkg, l))
+
+ for pkg in newrdepends:
+ rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS:' + pkg) or "")
+ for p in newrdepends[pkg]:
+ if p not in rdepends:
+ rdepends[p] = []
+ d.setVar('RDEPENDS:' + pkg, bb.utils.join_deps(rdepends, commasep=False))
+
+def process_filedeps(pkgfiles, d):
+ """
+ Collect perfile run-time dependency metadata
+ Output:
+ FILERPROVIDESFLIST:pkg - list of all files w/ deps
+ FILERPROVIDES:filepath:pkg - per file dep
+
+ FILERDEPENDSFLIST:pkg - list of all files w/ deps
+ FILERDEPENDS:filepath:pkg - per file dep
+ """
+ if d.getVar('SKIP_FILEDEPS') == '1':
+ return
+
+ pkgdest = d.getVar('PKGDEST')
+ packages = d.getVar('PACKAGES')
+ rpmdeps = d.getVar('RPMDEPS')
+
+ def chunks(files, n):
+ return [files[i:i+n] for i in range(0, len(files), n)]
+
+ pkglist = []
+ for pkg in packages.split():
+ if d.getVar('SKIP_FILEDEPS:' + pkg) == '1':
+ continue
+ if pkg.endswith('-dbg') or pkg.endswith('-doc') or pkg.find('-locale-') != -1 or pkg.find('-localedata-') != -1 or pkg.find('-gconv-') != -1 or pkg.find('-charmap-') != -1 or pkg.startswith('kernel-module-') or pkg.endswith('-src'):
+ continue
+ for files in chunks(pkgfiles[pkg], 100):
+ pkglist.append((pkg, files, rpmdeps, pkgdest))
+
+ processed = oe.utils.multiprocess_launch(oe.package.filedeprunner, pkglist, d)
+
+ provides_files = {}
+ requires_files = {}
+
+ for result in processed:
+ (pkg, provides, requires) = result
+
+ if pkg not in provides_files:
+ provides_files[pkg] = []
+ if pkg not in requires_files:
+ requires_files[pkg] = []
+
+ for file in sorted(provides):
+ provides_files[pkg].append(file)
+ key = "FILERPROVIDES:" + file + ":" + pkg
+ d.appendVar(key, " " + " ".join(provides[file]))
+
+ for file in sorted(requires):
+ requires_files[pkg].append(file)
+ key = "FILERDEPENDS:" + file + ":" + pkg
+ d.appendVar(key, " " + " ".join(requires[file]))
+
+ for pkg in requires_files:
+ d.setVar("FILERDEPENDSFLIST:" + pkg, " ".join(sorted(requires_files[pkg])))
+ for pkg in provides_files:
+ d.setVar("FILERPROVIDESFLIST:" + pkg, " ".join(sorted(provides_files[pkg])))
+
+def process_shlibs(pkgfiles, d):
+ cpath = oe.cachedpath.CachedPath()
+
+ exclude_shlibs = d.getVar('EXCLUDE_FROM_SHLIBS', False)
+ if exclude_shlibs:
+ bb.note("not generating shlibs")
+ return
+
+ lib_re = re.compile(r"^.*\.so")
+ libdir_re = re.compile(r".*/%s$" % d.getVar('baselib'))
+
+ packages = d.getVar('PACKAGES')
+
+ shlib_pkgs = []
+ exclusion_list = d.getVar("EXCLUDE_PACKAGES_FROM_SHLIBS")
+ if exclusion_list:
+ for pkg in packages.split():
+ if pkg not in exclusion_list.split():
+ shlib_pkgs.append(pkg)
+ else:
+ bb.note("not generating shlibs for %s" % pkg)
+ else:
+ shlib_pkgs = packages.split()
+
+ hostos = d.getVar('HOST_OS')
+
+ workdir = d.getVar('WORKDIR')
+
+ ver = d.getVar('PKGV')
+ if not ver:
+ msg = "PKGV not defined"
+ oe.qa.handle_error("pkgv-undefined", msg, d)
+ return
+
+ pkgdest = d.getVar('PKGDEST')
+
+ shlibswork_dir = d.getVar('SHLIBSWORKDIR')
+
+ def linux_so(file, pkg, pkgver, d):
+ needs_ldconfig = False
+ needed = set()
+ sonames = set()
+ renames = []
+ ldir = os.path.dirname(file).replace(pkgdest + "/" + pkg, '')
+ cmd = d.getVar('OBJDUMP') + " -p " + shlex.quote(file) + " 2>/dev/null"
+ fd = os.popen(cmd)
+ lines = fd.readlines()
+ fd.close()
+ rpath = tuple()
+ for l in lines:
+ m = re.match(r"\s+RPATH\s+([^\s]*)", l)
+ if m:
+ rpaths = m.group(1).replace("$ORIGIN", ldir).split(":")
+ rpath = tuple(map(os.path.normpath, rpaths))
+ for l in lines:
+ m = re.match(r"\s+NEEDED\s+([^\s]*)", l)
+ if m:
+ dep = m.group(1)
+ if dep not in needed:
+ needed.add((dep, file, rpath))
+ m = re.match(r"\s+SONAME\s+([^\s]*)", l)
+ if m:
+ this_soname = m.group(1)
+ prov = (this_soname, ldir, pkgver)
+ if not prov in sonames:
+ # if library is private (only used by package) then do not build shlib for it
+ if not private_libs or len([i for i in private_libs if fnmatch.fnmatch(this_soname, i)]) == 0:
+ sonames.add(prov)
+ if libdir_re.match(os.path.dirname(file)):
+ needs_ldconfig = True
+ if needs_ldconfig and snap_symlinks and (os.path.basename(file) != this_soname):
+ renames.append((file, os.path.join(os.path.dirname(file), this_soname)))
+ return (needs_ldconfig, needed, sonames, renames)
+
+ def darwin_so(file, needed, sonames, renames, pkgver):
+ if not os.path.exists(file):
+ return
+ ldir = os.path.dirname(file).replace(pkgdest + "/" + pkg, '')
+
+ def get_combinations(base):
+ #
+ # Given a base library name, find all combinations of this split by "." and "-"
+ #
+ combos = []
+ options = base.split(".")
+ for i in range(1, len(options) + 1):
+ combos.append(".".join(options[0:i]))
+ options = base.split("-")
+ for i in range(1, len(options) + 1):
+ combos.append("-".join(options[0:i]))
+ return combos
+
+ if (file.endswith('.dylib') or file.endswith('.so')) and not pkg.endswith('-dev') and not pkg.endswith('-dbg') and not pkg.endswith('-src'):
+ # Drop suffix
+ name = os.path.basename(file).rsplit(".",1)[0]
+ # Find all combinations
+ combos = get_combinations(name)
+ for combo in combos:
+ if not combo in sonames:
+ prov = (combo, ldir, pkgver)
+ sonames.add(prov)
+ if file.endswith('.dylib') or file.endswith('.so'):
+ rpath = []
+ p = subprocess.Popen([d.expand("${HOST_PREFIX}otool"), '-l', file], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
+ out, err = p.communicate()
+ # If returned successfully, process stdout for results
+ if p.returncode == 0:
+ for l in out.split("\n"):
+ l = l.strip()
+ if l.startswith('path '):
+ rpath.append(l.split()[1])
+
+ p = subprocess.Popen([d.expand("${HOST_PREFIX}otool"), '-L', file], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
+ out, err = p.communicate()
+ # If returned successfully, process stdout for results
+ if p.returncode == 0:
+ for l in out.split("\n"):
+ l = l.strip()
+ if not l or l.endswith(":"):
+ continue
+ if "is not an object file" in l:
+ continue
+ name = os.path.basename(l.split()[0]).rsplit(".", 1)[0]
+ if name and name not in needed[pkg]:
+ needed[pkg].add((name, file, tuple()))
+
+ def mingw_dll(file, needed, sonames, renames, pkgver):
+ if not os.path.exists(file):
+ return
+
+ if file.endswith(".dll"):
+ # assume all dlls are shared objects provided by the package
+ sonames.add((os.path.basename(file), os.path.dirname(file).replace(pkgdest + "/" + pkg, ''), pkgver))
+
+ if (file.endswith(".dll") or file.endswith(".exe")):
+ # use objdump to search for "DLL Name: .*\.dll"
+ p = subprocess.Popen([d.expand("${OBJDUMP}"), "-p", file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = p.communicate()
+ # process the output, grabbing all .dll names
+ if p.returncode == 0:
+ for m in re.finditer(r"DLL Name: (.*?\.dll)$", out.decode(), re.MULTILINE | re.IGNORECASE):
+ dllname = m.group(1)
+ if dllname:
+ needed[pkg].add((dllname, file, tuple()))
+
+ if d.getVar('PACKAGE_SNAP_LIB_SYMLINKS') == "1":
+ snap_symlinks = True
+ else:
+ snap_symlinks = False
+
+ needed = {}
+
+ shlib_provider = oe.package.read_shlib_providers(d)
+
+ for pkg in shlib_pkgs:
+ private_libs = d.getVar('PRIVATE_LIBS:' + pkg) or d.getVar('PRIVATE_LIBS') or ""
+ private_libs = private_libs.split()
+ needs_ldconfig = False
+ bb.debug(2, "calculating shlib provides for %s" % pkg)
+
+ pkgver = d.getVar('PKGV:' + pkg)
+ if not pkgver:
+ pkgver = d.getVar('PV_' + pkg)
+ if not pkgver:
+ pkgver = ver
+
+ needed[pkg] = set()
+ sonames = set()
+ renames = []
+ linuxlist = []
+ for file in pkgfiles[pkg]:
+ soname = None
+ if cpath.islink(file):
+ continue
+ if hostos.startswith("darwin"):
+ darwin_so(file, needed, sonames, renames, pkgver)
+ elif hostos.startswith("mingw"):
+ mingw_dll(file, needed, sonames, renames, pkgver)
+ elif os.access(file, os.X_OK) or lib_re.match(file):
+ linuxlist.append(file)
+
+ if linuxlist:
+ results = oe.utils.multiprocess_launch(linux_so, linuxlist, d, extraargs=(pkg, pkgver, d))
+ for r in results:
+ ldconfig = r[0]
+ needed[pkg] |= r[1]
+ sonames |= r[2]
+ renames.extend(r[3])
+ needs_ldconfig = needs_ldconfig or ldconfig
+
+ for (old, new) in renames:
+ bb.note("Renaming %s to %s" % (old, new))
+ bb.utils.rename(old, new)
+ pkgfiles[pkg].remove(old)
+
+ shlibs_file = os.path.join(shlibswork_dir, pkg + ".list")
+ if len(sonames):
+ with open(shlibs_file, 'w') as fd:
+ for s in sorted(sonames):
+ if s[0] in shlib_provider and s[1] in shlib_provider[s[0]]:
+ (old_pkg, old_pkgver) = shlib_provider[s[0]][s[1]]
+ if old_pkg != pkg:
+ bb.warn('%s-%s was registered as shlib provider for %s, changing it to %s-%s because it was built later' % (old_pkg, old_pkgver, s[0], pkg, pkgver))
+ bb.debug(1, 'registering %s-%s as shlib provider for %s' % (pkg, pkgver, s[0]))
+ fd.write(s[0] + ':' + s[1] + ':' + s[2] + '\n')
+ if s[0] not in shlib_provider:
+ shlib_provider[s[0]] = {}
+ shlib_provider[s[0]][s[1]] = (pkg, pkgver)
+ if needs_ldconfig:
+ bb.debug(1, 'adding ldconfig call to postinst for %s' % pkg)
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
+ if not postinst:
+ postinst = '#!/bin/sh\n'
+ postinst += d.getVar('ldconfig_postinst_fragment')
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
+ bb.debug(1, 'LIBNAMES: pkg %s sonames %s' % (pkg, sonames))
+
+ assumed_libs = d.getVar('ASSUME_SHLIBS')
+ if assumed_libs:
+ libdir = d.getVar("libdir")
+ for e in assumed_libs.split():
+ l, dep_pkg = e.split(":")
+ lib_ver = None
+ dep_pkg = dep_pkg.rsplit("_", 1)
+ if len(dep_pkg) == 2:
+ lib_ver = dep_pkg[1]
+ dep_pkg = dep_pkg[0]
+ if l not in shlib_provider:
+ shlib_provider[l] = {}
+ shlib_provider[l][libdir] = (dep_pkg, lib_ver)
+
+ libsearchpath = [d.getVar('libdir'), d.getVar('base_libdir')]
+
+ for pkg in shlib_pkgs:
+ bb.debug(2, "calculating shlib requirements for %s" % pkg)
+
+ private_libs = d.getVar('PRIVATE_LIBS:' + pkg) or d.getVar('PRIVATE_LIBS') or ""
+ private_libs = private_libs.split()
+
+ deps = list()
+ for n in needed[pkg]:
+ # if n is in private libraries, don't try to search provider for it
+ # this could cause problem in case some abc.bb provides private
+ # /opt/abc/lib/libfoo.so.1 and contains /usr/bin/abc depending on system library libfoo.so.1
+ # but skipping it is still better alternative than providing own
+ # version and then adding runtime dependency for the same system library
+ if private_libs and len([i for i in private_libs if fnmatch.fnmatch(n[0], i)]) > 0:
+ bb.debug(2, '%s: Dependency %s covered by PRIVATE_LIBS' % (pkg, n[0]))
+ continue
+ if n[0] in shlib_provider.keys():
+ shlib_provider_map = shlib_provider[n[0]]
+ matches = set()
+ for p in itertools.chain(list(n[2]), sorted(shlib_provider_map.keys()), libsearchpath):
+ if p in shlib_provider_map:
+ matches.add(p)
+ if len(matches) > 1:
+ matchpkgs = ', '.join([shlib_provider_map[match][0] for match in matches])
+ bb.error("%s: Multiple shlib providers for %s: %s (used by files: %s)" % (pkg, n[0], matchpkgs, n[1]))
+ elif len(matches) == 1:
+ (dep_pkg, ver_needed) = shlib_provider_map[matches.pop()]
+
+ bb.debug(2, '%s: Dependency %s requires package %s (used by files: %s)' % (pkg, n[0], dep_pkg, n[1]))
+
+ if dep_pkg == pkg:
+ continue
+
+ if ver_needed:
+ dep = "%s (>= %s)" % (dep_pkg, ver_needed)
+ else:
+ dep = dep_pkg
+ if not dep in deps:
+ deps.append(dep)
+ continue
+ bb.note("Couldn't find shared library provider for %s, used by files: %s" % (n[0], n[1]))
+
+ deps_file = os.path.join(pkgdest, pkg + ".shlibdeps")
+ if os.path.exists(deps_file):
+ os.remove(deps_file)
+ if deps:
+ with open(deps_file, 'w') as fd:
+ for dep in sorted(deps):
+ fd.write(dep + '\n')
+
+def process_pkgconfig(pkgfiles, d):
+ packages = d.getVar('PACKAGES')
+ workdir = d.getVar('WORKDIR')
+ pkgdest = d.getVar('PKGDEST')
+
+ shlibs_dirs = d.getVar('SHLIBSDIRS').split()
+ shlibswork_dir = d.getVar('SHLIBSWORKDIR')
+
+ pc_re = re.compile(r'(.*)\.pc$')
+ var_re = re.compile(r'(.*)=(.*)')
+ field_re = re.compile(r'(.*): (.*)')
+
+ pkgconfig_provided = {}
+ pkgconfig_needed = {}
+ for pkg in packages.split():
+ pkgconfig_provided[pkg] = []
+ pkgconfig_needed[pkg] = []
+ for file in sorted(pkgfiles[pkg]):
+ m = pc_re.match(file)
+ if m:
+ pd = bb.data.init()
+ name = m.group(1)
+ pkgconfig_provided[pkg].append(os.path.basename(name))
+ if not os.access(file, os.R_OK):
+ continue
+ with open(file, 'r') as f:
+ lines = f.readlines()
+ for l in lines:
+ m = field_re.match(l)
+ if m:
+ hdr = m.group(1)
+ exp = pd.expand(m.group(2))
+ if hdr == 'Requires':
+ pkgconfig_needed[pkg] += exp.replace(',', ' ').split()
+ continue
+ m = var_re.match(l)
+ if m:
+ name = m.group(1)
+ val = m.group(2)
+ pd.setVar(name, pd.expand(val))
+
+ for pkg in packages.split():
+ pkgs_file = os.path.join(shlibswork_dir, pkg + ".pclist")
+ if pkgconfig_provided[pkg] != []:
+ with open(pkgs_file, 'w') as f:
+ for p in sorted(pkgconfig_provided[pkg]):
+ f.write('%s\n' % p)
+
+ # Go from least to most specific since the last one found wins
+ for dir in reversed(shlibs_dirs):
+ if not os.path.exists(dir):
+ continue
+ for file in sorted(os.listdir(dir)):
+ m = re.match(r'^(.*)\.pclist$', file)
+ if m:
+ pkg = m.group(1)
+ with open(os.path.join(dir, file)) as fd:
+ lines = fd.readlines()
+ pkgconfig_provided[pkg] = []
+ for l in lines:
+ pkgconfig_provided[pkg].append(l.rstrip())
+
+ for pkg in packages.split():
+ deps = []
+ for n in pkgconfig_needed[pkg]:
+ found = False
+ for k in pkgconfig_provided.keys():
+ if n in pkgconfig_provided[k]:
+ if k != pkg and not (k in deps):
+ deps.append(k)
+ found = True
+ if found == False:
+ bb.note("couldn't find pkgconfig module '%s' in any package" % n)
+ deps_file = os.path.join(pkgdest, pkg + ".pcdeps")
+ if len(deps):
+ with open(deps_file, 'w') as fd:
+ for dep in deps:
+ fd.write(dep + '\n')
+
+def read_libdep_files(d):
+ pkglibdeps = {}
+ packages = d.getVar('PACKAGES').split()
+ for pkg in packages:
+ pkglibdeps[pkg] = {}
+ for extension in ".shlibdeps", ".pcdeps", ".clilibdeps":
+ depsfile = d.expand("${PKGDEST}/" + pkg + extension)
+ if os.access(depsfile, os.R_OK):
+ with open(depsfile) as fd:
+ lines = fd.readlines()
+ for l in lines:
+ l.rstrip()
+ deps = bb.utils.explode_dep_versions2(l)
+ for dep in deps:
+ if not dep in pkglibdeps[pkg]:
+ pkglibdeps[pkg][dep] = deps[dep]
+ return pkglibdeps
+
+def process_depchains(pkgfiles, d):
+ """
+ For a given set of prefix and postfix modifiers, make those packages
+ RRECOMMENDS on the corresponding packages for its RDEPENDS.
+
+ Example: If package A depends upon package B, and A's .bb emits an
+ A-dev package, this would make A-dev Recommends: B-dev.
+
+ If only one of a given suffix is specified, it will take the RRECOMMENDS
+ based on the RDEPENDS of *all* other packages. If more than one of a given
+ suffix is specified, its will only use the RDEPENDS of the single parent
+ package.
+ """
+
+ packages = d.getVar('PACKAGES')
+ postfixes = (d.getVar('DEPCHAIN_POST') or '').split()
+ prefixes = (d.getVar('DEPCHAIN_PRE') or '').split()
+
+ def pkg_adddeprrecs(pkg, base, suffix, getname, depends, d):
+
+ #bb.note('depends for %s is %s' % (base, depends))
+ rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS:' + pkg) or "")
+
+ for depend in sorted(depends):
+ if depend.find('-native') != -1 or depend.find('-cross') != -1 or depend.startswith('virtual/'):
+ #bb.note("Skipping %s" % depend)
+ continue
+ if depend.endswith('-dev'):
+ depend = depend[:-4]
+ if depend.endswith('-dbg'):
+ depend = depend[:-4]
+ pkgname = getname(depend, suffix)
+ #bb.note("Adding %s for %s" % (pkgname, depend))
+ if pkgname not in rreclist and pkgname != pkg:
+ rreclist[pkgname] = []
+
+ #bb.note('setting: RRECOMMENDS:%s=%s' % (pkg, ' '.join(rreclist)))
+ d.setVar('RRECOMMENDS:%s' % pkg, bb.utils.join_deps(rreclist, commasep=False))
+
+ def pkg_addrrecs(pkg, base, suffix, getname, rdepends, d):
+
+ #bb.note('rdepends for %s is %s' % (base, rdepends))
+ rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS:' + pkg) or "")
+
+ for depend in sorted(rdepends):
+ if depend.find('virtual-locale-') != -1:
+ #bb.note("Skipping %s" % depend)
+ continue
+ if depend.endswith('-dev'):
+ depend = depend[:-4]
+ if depend.endswith('-dbg'):
+ depend = depend[:-4]
+ pkgname = getname(depend, suffix)
+ #bb.note("Adding %s for %s" % (pkgname, depend))
+ if pkgname not in rreclist and pkgname != pkg:
+ rreclist[pkgname] = []
+
+ #bb.note('setting: RRECOMMENDS:%s=%s' % (pkg, ' '.join(rreclist)))
+ d.setVar('RRECOMMENDS:%s' % pkg, bb.utils.join_deps(rreclist, commasep=False))
+
+ def add_dep(list, dep):
+ if dep not in list:
+ list.append(dep)
+
+ depends = []
+ for dep in bb.utils.explode_deps(d.getVar('DEPENDS') or ""):
+ add_dep(depends, dep)
+
+ rdepends = []
+ for pkg in packages.split():
+ for dep in bb.utils.explode_deps(d.getVar('RDEPENDS:' + pkg) or ""):
+ add_dep(rdepends, dep)
+
+ #bb.note('rdepends is %s' % rdepends)
+
+ def post_getname(name, suffix):
+ return '%s%s' % (name, suffix)
+ def pre_getname(name, suffix):
+ return '%s%s' % (suffix, name)
+
+ pkgs = {}
+ for pkg in packages.split():
+ for postfix in postfixes:
+ if pkg.endswith(postfix):
+ if not postfix in pkgs:
+ pkgs[postfix] = {}
+ pkgs[postfix][pkg] = (pkg[:-len(postfix)], post_getname)
+
+ for prefix in prefixes:
+ if pkg.startswith(prefix):
+ if not prefix in pkgs:
+ pkgs[prefix] = {}
+ pkgs[prefix][pkg] = (pkg[:-len(prefix)], pre_getname)
+
+ if "-dbg" in pkgs:
+ pkglibdeps = read_libdep_files(d)
+ pkglibdeplist = []
+ for pkg in pkglibdeps:
+ for k in pkglibdeps[pkg]:
+ add_dep(pkglibdeplist, k)
+ dbgdefaultdeps = ((d.getVar('DEPCHAIN_DBGDEFAULTDEPS') == '1') or (bb.data.inherits_class('packagegroup', d)))
+
+ for suffix in pkgs:
+ for pkg in pkgs[suffix]:
+ if d.getVarFlag('RRECOMMENDS:' + pkg, 'nodeprrecs'):
+ continue
+ (base, func) = pkgs[suffix][pkg]
+ if suffix == "-dev":
+ pkg_adddeprrecs(pkg, base, suffix, func, depends, d)
+ elif suffix == "-dbg":
+ if not dbgdefaultdeps:
+ pkg_addrrecs(pkg, base, suffix, func, pkglibdeplist, d)
+ continue
+ if len(pkgs[suffix]) == 1:
+ pkg_addrrecs(pkg, base, suffix, func, rdepends, d)
+ else:
+ rdeps = []
+ for dep in bb.utils.explode_deps(d.getVar('RDEPENDS:' + base) or ""):
+ add_dep(rdeps, dep)
+ pkg_addrrecs(pkg, base, suffix, func, rdeps, d)
diff --git a/meta/lib/oe/package_manager/__init__.py b/meta/lib/oe/package_manager/__init__.py
index d3b45705ec..6774cdb794 100644
--- a/meta/lib/oe/package_manager/__init__.py
+++ b/meta/lib/oe/package_manager/__init__.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -120,7 +122,8 @@ def generate_locale_archive(d, rootfs, target_arch, localedir):
"riscv32": ["--uint32-align=4", "--little-endian"],
"i586": ["--uint32-align=4", "--little-endian"],
"i686": ["--uint32-align=4", "--little-endian"],
- "x86_64": ["--uint32-align=4", "--little-endian"]
+ "x86_64": ["--uint32-align=4", "--little-endian"],
+ "loongarch64": ["--uint32-align=4", "--little-endian"]
}
if target_arch in locale_arch_options:
arch_options = locale_arch_options[target_arch]
@@ -467,7 +470,10 @@ def create_packages_dir(d, subrepo_dir, deploydir, taskname, filterbydependencie
# Detect bitbake -b usage
nodeps = d.getVar("BB_LIMITEDDEPS") or False
if nodeps or not filterbydependencies:
- oe.path.symlink(deploydir, subrepo_dir, True)
+ for arch in d.getVar("ALL_MULTILIB_PACKAGE_ARCHS").split() + d.getVar("ALL_MULTILIB_PACKAGE_ARCHS").replace("-", "_").split():
+ target = os.path.join(deploydir + "/" + arch)
+ if os.path.exists(target):
+ oe.path.symlink(target, subrepo_dir + "/" + arch, True)
return
start = None
diff --git a/meta/lib/oe/package_manager/deb/__init__.py b/meta/lib/oe/package_manager/deb/__init__.py
index b96ea0bad4..0c23c884c1 100644
--- a/meta/lib/oe/package_manager/deb/__init__.py
+++ b/meta/lib/oe/package_manager/deb/__init__.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -80,15 +82,15 @@ class DpkgIndexer(Indexer):
return
oe.utils.multiprocess_launch(create_index, index_cmds, self.d)
- if self.d.getVar('PACKAGE_FEED_SIGN', True) == '1':
- signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND', True))
+ if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
+ signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND'))
else:
signer = None
if signer:
for f in index_sign_files:
signer.detach_sign(f,
- self.d.getVar('PACKAGE_FEED_GPG_NAME', True),
- self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE', True),
+ self.d.getVar('PACKAGE_FEED_GPG_NAME'),
+ self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'),
output_suffix="gpg",
use_sha256=True)
diff --git a/meta/lib/oe/package_manager/deb/manifest.py b/meta/lib/oe/package_manager/deb/manifest.py
index d8eab24a06..72983bae98 100644
--- a/meta/lib/oe/package_manager/deb/manifest.py
+++ b/meta/lib/oe/package_manager/deb/manifest.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/meta/lib/oe/package_manager/deb/rootfs.py b/meta/lib/oe/package_manager/deb/rootfs.py
index 8fbaca11d6..1e25b64ed9 100644
--- a/meta/lib/oe/package_manager/deb/rootfs.py
+++ b/meta/lib/oe/package_manager/deb/rootfs.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/meta/lib/oe/package_manager/deb/sdk.py b/meta/lib/oe/package_manager/deb/sdk.py
index f4b0b6510a..6f3005053e 100644
--- a/meta/lib/oe/package_manager/deb/sdk.py
+++ b/meta/lib/oe/package_manager/deb/sdk.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -67,7 +69,12 @@ class PkgSdk(Sdk):
self.target_pm.run_pre_post_installs()
+ env_bkp = os.environ.copy()
+ os.environ['PATH'] = self.d.expand("${COREBASE}/scripts/nativesdk-intercept") + \
+ os.pathsep + os.environ["PATH"]
+
self.target_pm.run_intercepts(populate_sdk='target')
+ os.environ.update(env_bkp)
execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
diff --git a/meta/lib/oe/package_manager/ipk/__init__.py b/meta/lib/oe/package_manager/ipk/__init__.py
index 7cbea0fa80..8cc9953a02 100644
--- a/meta/lib/oe/package_manager/ipk/__init__.py
+++ b/meta/lib/oe/package_manager/ipk/__init__.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -14,6 +16,7 @@ class OpkgIndexer(Indexer):
]
opkg_index_cmd = bb.utils.which(os.getenv('PATH'), "opkg-make-index")
+ opkg_index_cmd_extra_params = self.d.getVar('OPKG_MAKE_INDEX_EXTRA_PARAMS') or ""
if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND'))
else:
@@ -39,8 +42,8 @@ class OpkgIndexer(Indexer):
if not os.path.exists(pkgs_file):
open(pkgs_file, "w").close()
- index_cmds.add('%s --checksum md5 --checksum sha256 -r %s -p %s -m %s' %
- (opkg_index_cmd, pkgs_file, pkgs_file, pkgs_dir))
+ index_cmds.add('%s --checksum md5 --checksum sha256 -r %s -p %s -m %s %s' %
+ (opkg_index_cmd, pkgs_file, pkgs_file, pkgs_dir, opkg_index_cmd_extra_params))
index_sign_files.add(pkgs_file)
@@ -131,7 +134,7 @@ class OpkgDpkgPM(PackageManager):
tmp_dir = tempfile.mkdtemp()
current_dir = os.getcwd()
os.chdir(tmp_dir)
- data_tar = 'data.tar.xz'
+ data_tar = 'data.tar.zst'
try:
cmd = [ar_cmd, 'x', pkg_path]
@@ -245,7 +248,7 @@ class OpkgPM(OpkgDpkgPM):
"""
if (self.d.getVar('FEED_DEPLOYDIR_BASE_URI') or "") != "":
for arch in self.pkg_archs.split():
- cfg_file_name = os.path.join(self.target_rootfs,
+ cfg_file_name = oe.path.join(self.target_rootfs,
self.d.getVar("sysconfdir"),
"opkg",
"local-%s-feed.conf" % arch)
@@ -503,6 +506,6 @@ class OpkgPM(OpkgDpkgPM):
"trying to extract the package." % pkg)
tmp_dir = super(OpkgPM, self).extract(pkg, pkg_info)
- bb.utils.remove(os.path.join(tmp_dir, "data.tar.xz"))
+ bb.utils.remove(os.path.join(tmp_dir, "data.tar.zst"))
return tmp_dir
diff --git a/meta/lib/oe/package_manager/ipk/manifest.py b/meta/lib/oe/package_manager/ipk/manifest.py
index ae451c5c70..3549d7428d 100644
--- a/meta/lib/oe/package_manager/ipk/manifest.py
+++ b/meta/lib/oe/package_manager/ipk/manifest.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -62,7 +64,7 @@ class PkgManifest(Manifest):
if len(pkgs_to_install) == 0:
return
- output = pm.dummy_install(pkgs_to_install).decode('utf-8')
+ output = pm.dummy_install(pkgs_to_install)
with open(self.full_manifest, 'w+') as manifest:
pkg_re = re.compile('^Installing ([^ ]+) [^ ].*')
diff --git a/meta/lib/oe/package_manager/ipk/rootfs.py b/meta/lib/oe/package_manager/ipk/rootfs.py
index 10a831994e..ba93eb62ea 100644
--- a/meta/lib/oe/package_manager/ipk/rootfs.py
+++ b/meta/lib/oe/package_manager/ipk/rootfs.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -163,7 +165,7 @@ class PkgRootfs(DpkgOpkgRootfs):
"""
def _multilib_sanity_test(self, dirs):
- allow_replace = self.d.getVar("MULTILIBRE_ALLOW_REP")
+ allow_replace = "|".join((self.d.getVar("MULTILIBRE_ALLOW_REP") or "").split())
if allow_replace is None:
allow_replace = ""
diff --git a/meta/lib/oe/package_manager/ipk/sdk.py b/meta/lib/oe/package_manager/ipk/sdk.py
index e2ca415c8e..3acd55f548 100644
--- a/meta/lib/oe/package_manager/ipk/sdk.py
+++ b/meta/lib/oe/package_manager/ipk/sdk.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -61,12 +63,19 @@ class PkgSdk(Sdk):
self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
+ env_bkp = os.environ.copy()
+ os.environ['PATH'] = self.d.expand("${COREBASE}/scripts/nativesdk-intercept") + \
+ os.pathsep + os.environ["PATH"]
+
self.target_pm.run_intercepts(populate_sdk='target')
+ os.environ.update(env_bkp)
execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
self.target_pm.remove_packaging_data()
+ else:
+ self.target_pm.remove_lists()
bb.note("Installing NATIVESDK packages")
self._populate_sysroot(self.host_pm, self.host_manifest)
@@ -78,6 +87,8 @@ class PkgSdk(Sdk):
if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
self.host_pm.remove_packaging_data()
+ else:
+ self.host_pm.remove_lists()
target_sysconfdir = os.path.join(self.sdk_target_sysroot, self.sysconfdir)
host_sysconfdir = os.path.join(self.sdk_host_sysroot, self.sysconfdir)
diff --git a/meta/lib/oe/package_manager/rpm/__init__.py b/meta/lib/oe/package_manager/rpm/__init__.py
index d97dab3293..f40c880af4 100644
--- a/meta/lib/oe/package_manager/rpm/__init__.py
+++ b/meta/lib/oe/package_manager/rpm/__init__.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -96,11 +98,15 @@ class RpmPM(PackageManager):
archs = ["sdk_provides_dummy_target"] + archs
confdir = "%s/%s" %(self.target_rootfs, "etc/dnf/vars/")
bb.utils.mkdirhier(confdir)
- open(confdir + "arch", 'w').write(":".join(archs))
+ with open(confdir + "arch", 'w') as f:
+ f.write(":".join(archs))
+
distro_codename = self.d.getVar('DISTRO_CODENAME')
- open(confdir + "releasever", 'w').write(distro_codename if distro_codename is not None else '')
+ with open(confdir + "releasever", 'w') as f:
+ f.write(distro_codename if distro_codename is not None else '')
- open(oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"), 'w').write("")
+ with open(oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"), 'w') as f:
+ f.write("")
def _configure_rpm(self):
@@ -110,14 +116,17 @@ class RpmPM(PackageManager):
platformconfdir = "%s/%s" %(self.target_rootfs, "etc/rpm/")
rpmrcconfdir = "%s/%s" %(self.target_rootfs, "etc/")
bb.utils.mkdirhier(platformconfdir)
- open(platformconfdir + "platform", 'w').write("%s-pc-linux" % self.primary_arch)
+ with open(platformconfdir + "platform", 'w') as f:
+ f.write("%s-pc-linux" % self.primary_arch)
with open(rpmrcconfdir + "rpmrc", 'w') as f:
f.write("arch_compat: %s: %s\n" % (self.primary_arch, self.archs if len(self.archs) > 0 else self.primary_arch))
f.write("buildarch_compat: %s: noarch\n" % self.primary_arch)
- open(platformconfdir + "macros", 'w').write("%_transaction_color 7\n")
+ with open(platformconfdir + "macros", 'w') as f:
+ f.write("%_transaction_color 7\n")
if self.d.getVar('RPM_PREFER_ELF_ARCH'):
- open(platformconfdir + "macros", 'a').write("%%_prefer_color %s" % (self.d.getVar('RPM_PREFER_ELF_ARCH')))
+ with open(platformconfdir + "macros", 'a') as f:
+ f.write("%%_prefer_color %s" % (self.d.getVar('RPM_PREFER_ELF_ARCH')))
if self.d.getVar('RPM_SIGN_PACKAGES') == '1':
signer = get_signer(self.d, self.d.getVar('RPM_GPG_BACKEND'))
@@ -164,13 +173,13 @@ class RpmPM(PackageManager):
repo_uri = uri + "/" + arch
repo_id = "oe-remote-repo" + "-".join(urlparse(repo_uri).path.split("/"))
repo_name = "OE Remote Repo:" + " ".join(urlparse(repo_uri).path.split("/"))
- open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'a').write(
- "[%s]\nname=%s\nbaseurl=%s\n%s\n" % (repo_id, repo_name, repo_uri, gpg_opts))
+ with open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'a') as f:
+ f.write("[%s]\nname=%s\nbaseurl=%s\n%s\n" % (repo_id, repo_name, repo_uri, gpg_opts))
else:
repo_name = "OE Remote Repo:" + " ".join(urlparse(uri).path.split("/"))
repo_uri = uri
- open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'w').write(
- "[%s]\nname=%s\nbaseurl=%s\n%s" % (repo_base, repo_name, repo_uri, gpg_opts))
+ with open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'w') as f:
+ f.write("[%s]\nname=%s\nbaseurl=%s\n%s" % (repo_base, repo_name, repo_uri, gpg_opts))
def _prepare_pkg_transaction(self):
os.environ['D'] = self.target_rootfs
@@ -329,7 +338,8 @@ class RpmPM(PackageManager):
return e.output.decode("utf-8")
def dump_install_solution(self, pkgs):
- open(self.solution_manifest, 'w').write(" ".join(pkgs))
+ with open(self.solution_manifest, 'w') as f:
+ f.write(" ".join(pkgs))
return pkgs
def load_old_install_solution(self):
@@ -363,7 +373,8 @@ class RpmPM(PackageManager):
bb.utils.mkdirhier(target_path)
num = self._script_num_prefix(target_path)
saved_script_name = oe.path.join(target_path, "%d-%s" % (num, pkg))
- open(saved_script_name, 'w').write(output)
+ with open(saved_script_name, 'w') as f:
+ f.write(output)
os.chmod(saved_script_name, 0o755)
def _handle_intercept_failure(self, registered_pkgs):
@@ -375,11 +386,12 @@ class RpmPM(PackageManager):
self.save_rpmpostinst(pkg)
def extract(self, pkg):
- output = self._invoke_dnf(["repoquery", "--queryformat", "%{location}", pkg])
+ output = self._invoke_dnf(["repoquery", "--location", pkg])
pkg_name = output.splitlines()[-1]
if not pkg_name.endswith(".rpm"):
bb.fatal("dnf could not find package %s in repository: %s" %(pkg, output))
- pkg_path = oe.path.join(self.rpm_repo_dir, pkg_name)
+ # Strip file: prefix
+ pkg_path = pkg_name[5:]
cpio_cmd = bb.utils.which(os.getenv("PATH"), "cpio")
rpm2cpio_cmd = bb.utils.which(os.getenv("PATH"), "rpm2cpio")
diff --git a/meta/lib/oe/package_manager/rpm/manifest.py b/meta/lib/oe/package_manager/rpm/manifest.py
index e6604b301f..6ee7c329f0 100644
--- a/meta/lib/oe/package_manager/rpm/manifest.py
+++ b/meta/lib/oe/package_manager/rpm/manifest.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/meta/lib/oe/package_manager/rpm/rootfs.py b/meta/lib/oe/package_manager/rpm/rootfs.py
index 00d07cd9cc..3ba5396320 100644
--- a/meta/lib/oe/package_manager/rpm/rootfs.py
+++ b/meta/lib/oe/package_manager/rpm/rootfs.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -108,7 +110,7 @@ class PkgRootfs(Rootfs):
if self.progress_reporter:
self.progress_reporter.next_stage()
- self._setup_dbg_rootfs(['/etc', '/var/lib/rpm', '/var/cache/dnf', '/var/lib/dnf'])
+ self._setup_dbg_rootfs(['/etc/rpm', '/etc/rpmrc', '/etc/dnf', '/var/lib/rpm', '/var/cache/dnf', '/var/lib/dnf'])
execute_pre_post_process(self.d, rpm_post_process_cmds)
diff --git a/meta/lib/oe/package_manager/rpm/sdk.py b/meta/lib/oe/package_manager/rpm/sdk.py
index c5f232431f..ea79fe050b 100644
--- a/meta/lib/oe/package_manager/rpm/sdk.py
+++ b/meta/lib/oe/package_manager/rpm/sdk.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -65,7 +67,12 @@ class PkgSdk(Sdk):
self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
+ env_bkp = os.environ.copy()
+ os.environ['PATH'] = self.d.expand("${COREBASE}/scripts/nativesdk-intercept") + \
+ os.pathsep + os.environ["PATH"]
+
self.target_pm.run_intercepts(populate_sdk='target')
+ os.environ.update(env_bkp)
execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
@@ -110,5 +117,6 @@ class PkgSdk(Sdk):
for f in glob.glob(os.path.join(self.sdk_output, "etc", "rpm*")):
self.movefile(f, native_sysconf_dir)
for f in glob.glob(os.path.join(self.sdk_output, "etc", "dnf", "*")):
- self.movefile(f, native_sysconf_dir)
+ self.mkdirhier(native_sysconf_dir + "/dnf")
+ self.movefile(f, native_sysconf_dir + "/dnf")
self.remove(os.path.join(self.sdk_output, "etc"), True)
diff --git a/meta/lib/oe/packagedata.py b/meta/lib/oe/packagedata.py
index 212f048bc6..2d1d6ddeb7 100644
--- a/meta/lib/oe/packagedata.py
+++ b/meta/lib/oe/packagedata.py
@@ -1,9 +1,16 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
import codecs
import os
+import json
+import bb.compress.zstd
+import oe.path
+
+from glob import glob
def packaged(pkg, d):
return os.access(get_subpkgedata_fn(pkg, d) + '.packaged', os.R_OK)
@@ -108,3 +115,252 @@ def recipename(pkg, d):
"""Return the recipe name for the given binary package name."""
return pkgmap(d).get(pkg)
+
+def foreach_runtime_provider_pkgdata(d, rdep, include_rdep=False):
+ pkgdata_dir = d.getVar("PKGDATA_DIR")
+ possibles = set()
+ try:
+ possibles |= set(os.listdir("%s/runtime-rprovides/%s/" % (pkgdata_dir, rdep)))
+ except OSError:
+ pass
+
+ if include_rdep:
+ possibles.add(rdep)
+
+ for p in sorted(list(possibles)):
+ rdep_data = read_subpkgdata(p, d)
+ yield p, rdep_data
+
+def get_package_mapping(pkg, basepkg, d, depversions=None):
+ import oe.packagedata
+
+ data = oe.packagedata.read_subpkgdata(pkg, d)
+ key = "PKG:%s" % pkg
+
+ if key in data:
+ if bb.data.inherits_class('allarch', d) and bb.data.inherits_class('packagegroup', d) and pkg != data[key]:
+ bb.error("An allarch packagegroup shouldn't depend on packages which are dynamically renamed (%s to %s)" % (pkg, data[key]))
+ # Have to avoid undoing the write_extra_pkgs(global_variants...)
+ if bb.data.inherits_class('allarch', d) and not d.getVar('MULTILIB_VARIANTS') \
+ and data[key] == basepkg:
+ return pkg
+ if depversions == []:
+ # Avoid returning a mapping if the renamed package rprovides its original name
+ rprovkey = "RPROVIDES:%s" % pkg
+ if rprovkey in data:
+ if pkg in bb.utils.explode_dep_versions2(data[rprovkey]):
+ bb.note("%s rprovides %s, not replacing the latter" % (data[key], pkg))
+ return pkg
+ # Do map to rewritten package name
+ return data[key]
+
+ return pkg
+
+def get_package_additional_metadata(pkg_type, d):
+ base_key = "PACKAGE_ADD_METADATA"
+ for key in ("%s_%s" % (base_key, pkg_type.upper()), base_key):
+ if d.getVar(key, False) is None:
+ continue
+ d.setVarFlag(key, "type", "list")
+ if d.getVarFlag(key, "separator") is None:
+ d.setVarFlag(key, "separator", "\\n")
+ metadata_fields = [field.strip() for field in oe.data.typed_value(key, d)]
+ return "\n".join(metadata_fields).strip()
+
+def runtime_mapping_rename(varname, pkg, d):
+ #bb.note("%s before: %s" % (varname, d.getVar(varname)))
+
+ new_depends = {}
+ deps = bb.utils.explode_dep_versions2(d.getVar(varname) or "")
+ for depend, depversions in deps.items():
+ new_depend = get_package_mapping(depend, pkg, d, depversions)
+ if depend != new_depend:
+ bb.note("package name mapping done: %s -> %s" % (depend, new_depend))
+ new_depends[new_depend] = deps[depend]
+
+ d.setVar(varname, bb.utils.join_deps(new_depends, commasep=False))
+
+ #bb.note("%s after: %s" % (varname, d.getVar(varname)))
+
+def emit_pkgdata(pkgfiles, d):
+ def process_postinst_on_target(pkg, mlprefix):
+ pkgval = d.getVar('PKG:%s' % pkg)
+ if pkgval is None:
+ pkgval = pkg
+
+ defer_fragment = """
+if [ -n "$D" ]; then
+ $INTERCEPT_DIR/postinst_intercept delay_to_first_boot %s mlprefix=%s
+ exit 0
+fi
+""" % (pkgval, mlprefix)
+
+ postinst = d.getVar('pkg_postinst:%s' % pkg)
+ postinst_ontarget = d.getVar('pkg_postinst_ontarget:%s' % pkg)
+
+ if postinst_ontarget:
+ bb.debug(1, 'adding deferred pkg_postinst_ontarget() to pkg_postinst() for %s' % pkg)
+ if not postinst:
+ postinst = '#!/bin/sh\n'
+ postinst += defer_fragment
+ postinst += postinst_ontarget
+ d.setVar('pkg_postinst:%s' % pkg, postinst)
+
+ def add_set_e_to_scriptlets(pkg):
+ for scriptlet_name in ('pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm'):
+ scriptlet = d.getVar('%s:%s' % (scriptlet_name, pkg))
+ if scriptlet:
+ scriptlet_split = scriptlet.split('\n')
+ if scriptlet_split[0].startswith("#!"):
+ scriptlet = scriptlet_split[0] + "\nset -e\n" + "\n".join(scriptlet_split[1:])
+ else:
+ scriptlet = "set -e\n" + "\n".join(scriptlet_split[0:])
+ d.setVar('%s:%s' % (scriptlet_name, pkg), scriptlet)
+
+ def write_if_exists(f, pkg, var):
+ def encode(str):
+ import codecs
+ c = codecs.getencoder("unicode_escape")
+ return c(str)[0].decode("latin1")
+
+ val = d.getVar('%s:%s' % (var, pkg))
+ if val:
+ f.write('%s:%s: %s\n' % (var, pkg, encode(val)))
+ return val
+ val = d.getVar('%s' % (var))
+ if val:
+ f.write('%s: %s\n' % (var, encode(val)))
+ return val
+
+ def write_extra_pkgs(variants, pn, packages, pkgdatadir):
+ for variant in variants:
+ with open("%s/%s-%s" % (pkgdatadir, variant, pn), 'w') as fd:
+ fd.write("PACKAGES: %s\n" % ' '.join(
+ map(lambda pkg: '%s-%s' % (variant, pkg), packages.split())))
+
+ def write_extra_runtime_pkgs(variants, packages, pkgdatadir):
+ for variant in variants:
+ for pkg in packages.split():
+ ml_pkg = "%s-%s" % (variant, pkg)
+ subdata_file = "%s/runtime/%s" % (pkgdatadir, ml_pkg)
+ with open(subdata_file, 'w') as fd:
+ fd.write("PKG:%s: %s" % (ml_pkg, pkg))
+
+ packages = d.getVar('PACKAGES')
+ pkgdest = d.getVar('PKGDEST')
+ pkgdatadir = d.getVar('PKGDESTWORK')
+
+ data_file = pkgdatadir + d.expand("/${PN}")
+ with open(data_file, 'w') as fd:
+ fd.write("PACKAGES: %s\n" % packages)
+
+ pkgdebugsource = d.getVar("PKGDEBUGSOURCES") or []
+
+ pn = d.getVar('PN')
+ global_variants = (d.getVar('MULTILIB_GLOBAL_VARIANTS') or "").split()
+ variants = (d.getVar('MULTILIB_VARIANTS') or "").split()
+
+ if bb.data.inherits_class('kernel', d) or bb.data.inherits_class('module-base', d):
+ write_extra_pkgs(variants, pn, packages, pkgdatadir)
+
+ if bb.data.inherits_class('allarch', d) and not variants \
+ and not bb.data.inherits_class('packagegroup', d):
+ write_extra_pkgs(global_variants, pn, packages, pkgdatadir)
+
+ workdir = d.getVar('WORKDIR')
+
+ for pkg in packages.split():
+ pkgval = d.getVar('PKG:%s' % pkg)
+ if pkgval is None:
+ pkgval = pkg
+ d.setVar('PKG:%s' % pkg, pkg)
+
+ extended_data = {
+ "files_info": {}
+ }
+
+ pkgdestpkg = os.path.join(pkgdest, pkg)
+ files = {}
+ files_extra = {}
+ total_size = 0
+ seen = set()
+ for f in pkgfiles[pkg]:
+ fpath = os.sep + os.path.relpath(f, pkgdestpkg)
+
+ fstat = os.lstat(f)
+ files[fpath] = fstat.st_size
+
+ extended_data["files_info"].setdefault(fpath, {})
+ extended_data["files_info"][fpath]['size'] = fstat.st_size
+
+ if fstat.st_ino not in seen:
+ seen.add(fstat.st_ino)
+ total_size += fstat.st_size
+
+ if fpath in pkgdebugsource:
+ extended_data["files_info"][fpath]['debugsrc'] = pkgdebugsource[fpath]
+ del pkgdebugsource[fpath]
+
+ d.setVar('FILES_INFO:' + pkg , json.dumps(files, sort_keys=True))
+
+ process_postinst_on_target(pkg, d.getVar("MLPREFIX"))
+ add_set_e_to_scriptlets(pkg)
+
+ subdata_file = pkgdatadir + "/runtime/%s" % pkg
+ with open(subdata_file, 'w') as sf:
+ for var in (d.getVar('PKGDATA_VARS') or "").split():
+ val = write_if_exists(sf, pkg, var)
+
+ write_if_exists(sf, pkg, 'FILERPROVIDESFLIST')
+ for dfile in sorted((d.getVar('FILERPROVIDESFLIST:' + pkg) or "").split()):
+ write_if_exists(sf, pkg, 'FILERPROVIDES:' + dfile)
+
+ write_if_exists(sf, pkg, 'FILERDEPENDSFLIST')
+ for dfile in sorted((d.getVar('FILERDEPENDSFLIST:' + pkg) or "").split()):
+ write_if_exists(sf, pkg, 'FILERDEPENDS:' + dfile)
+
+ sf.write('%s:%s: %d\n' % ('PKGSIZE', pkg, total_size))
+
+ subdata_extended_file = pkgdatadir + "/extended/%s.json.zstd" % pkg
+ num_threads = int(d.getVar("BB_NUMBER_THREADS"))
+ with bb.compress.zstd.open(subdata_extended_file, "wt", encoding="utf-8", num_threads=num_threads) as f:
+ json.dump(extended_data, f, sort_keys=True, separators=(",", ":"))
+
+ # Symlinks needed for rprovides lookup
+ rprov = d.getVar('RPROVIDES:%s' % pkg) or d.getVar('RPROVIDES')
+ if rprov:
+ for p in bb.utils.explode_deps(rprov):
+ subdata_sym = pkgdatadir + "/runtime-rprovides/%s/%s" % (p, pkg)
+ bb.utils.mkdirhier(os.path.dirname(subdata_sym))
+ oe.path.relsymlink(subdata_file, subdata_sym, True)
+
+ allow_empty = d.getVar('ALLOW_EMPTY:%s' % pkg)
+ if not allow_empty:
+ allow_empty = d.getVar('ALLOW_EMPTY')
+ root = "%s/%s" % (pkgdest, pkg)
+ os.chdir(root)
+ g = glob('*')
+ if g or allow_empty == "1":
+ # Symlinks needed for reverse lookups (from the final package name)
+ subdata_sym = pkgdatadir + "/runtime-reverse/%s" % pkgval
+ oe.path.relsymlink(subdata_file, subdata_sym, True)
+
+ packagedfile = pkgdatadir + '/runtime/%s.packaged' % pkg
+ open(packagedfile, 'w').close()
+
+ if bb.data.inherits_class('kernel', d) or bb.data.inherits_class('module-base', d):
+ write_extra_runtime_pkgs(variants, packages, pkgdatadir)
+
+ if bb.data.inherits_class('allarch', d) and not variants \
+ and not bb.data.inherits_class('packagegroup', d):
+ write_extra_runtime_pkgs(global_variants, packages, pkgdatadir)
+
+def mapping_rename_hook(d):
+ """
+ Rewrite variables to account for package renaming in things
+ like debian.bbclass or manual PKG variable name changes
+ """
+ pkg = d.getVar("PKG")
+ oe.packagedata.runtime_mapping_rename("RDEPENDS", pkg, d)
+ oe.packagedata.runtime_mapping_rename("RRECOMMENDS", pkg, d)
+ oe.packagedata.runtime_mapping_rename("RSUGGESTS", pkg, d)
diff --git a/meta/lib/oe/packagegroup.py b/meta/lib/oe/packagegroup.py
index 8fcaecde82..7b7594751a 100644
--- a/meta/lib/oe/packagegroup.py
+++ b/meta/lib/oe/packagegroup.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/meta/lib/oe/patch.py b/meta/lib/oe/patch.py
index 4ec9caed45..60a0cc8291 100644
--- a/meta/lib/oe/patch.py
+++ b/meta/lib/oe/patch.py
@@ -1,10 +1,14 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
+import os
+import shlex
+import subprocess
import oe.path
import oe.types
-import subprocess
class NotFoundError(bb.BBHandledException):
def __init__(self, path):
@@ -25,8 +29,6 @@ class CmdError(bb.BBHandledException):
def runcmd(args, dir = None):
- import pipes
-
if dir:
olddir = os.path.abspath(os.curdir)
if not os.path.exists(dir):
@@ -35,7 +37,7 @@ def runcmd(args, dir = None):
# print("cwd: %s -> %s" % (olddir, dir))
try:
- args = [ pipes.quote(str(arg)) for arg in args ]
+ args = [ shlex.quote(str(arg)) for arg in args ]
cmd = " ".join(args)
# print("cmd: %s" % cmd)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
@@ -215,7 +217,7 @@ class PatchTree(PatchSet):
with open(self.seriespath, 'w') as f:
for p in patches:
f.write(p)
-
+
def Import(self, patch, force = None):
""""""
PatchSet.Import(self, patch, force)
@@ -292,8 +294,9 @@ class PatchTree(PatchSet):
self.Pop(all=True)
class GitApplyTree(PatchTree):
- patch_line_prefix = '%% original patch'
- ignore_commit_prefix = '%% ignore'
+ notes_ref = "refs/notes/devtool"
+ original_patch = 'original patch'
+ ignore_commit = 'ignore'
def __init__(self, dir, d):
PatchTree.__init__(self, dir, d)
@@ -450,7 +453,7 @@ class GitApplyTree(PatchTree):
# Prepare git command
cmd = ["git"]
GitApplyTree.gitCommandUserOptions(cmd, commituser, commitemail)
- cmd += ["commit", "-F", tmpfile]
+ cmd += ["commit", "-F", tmpfile, "--no-verify"]
# git doesn't like plain email addresses as authors
if author and '<' in author:
cmd.append('--author="%s"' % author)
@@ -459,44 +462,131 @@ class GitApplyTree(PatchTree):
return (tmpfile, cmd)
@staticmethod
- def extractPatches(tree, startcommit, outdir, paths=None):
+ def addNote(repo, ref, key, value=None):
+ note = key + (": %s" % value if value else "")
+ notes_ref = GitApplyTree.notes_ref
+ runcmd(["git", "config", "notes.rewriteMode", "ignore"], repo)
+ runcmd(["git", "config", "notes.displayRef", notes_ref, notes_ref], repo)
+ runcmd(["git", "config", "notes.rewriteRef", notes_ref, notes_ref], repo)
+ runcmd(["git", "notes", "--ref", notes_ref, "append", "-m", note, ref], repo)
+
+ @staticmethod
+ def removeNote(repo, ref, key):
+ notes = GitApplyTree.getNotes(repo, ref)
+ notes = {k: v for k, v in notes.items() if k != key and not k.startswith(key + ":")}
+ runcmd(["git", "notes", "--ref", GitApplyTree.notes_ref, "remove", "--ignore-missing", ref], repo)
+ for note, value in notes.items():
+ GitApplyTree.addNote(repo, ref, note, value)
+
+ @staticmethod
+ def getNotes(repo, ref):
+ import re
+
+ note = None
+ try:
+ note = runcmd(["git", "notes", "--ref", GitApplyTree.notes_ref, "show", ref], repo)
+ prefix = ""
+ except CmdError:
+ note = runcmd(['git', 'show', '-s', '--format=%B', ref], repo)
+ prefix = "%% "
+
+ note_re = re.compile(r'^%s(.*?)(?::\s*(.*))?$' % prefix)
+ notes = dict()
+ for line in note.splitlines():
+ m = note_re.match(line)
+ if m:
+ notes[m.group(1)] = m.group(2)
+
+ return notes
+
+ @staticmethod
+ def commitIgnored(subject, dir=None, files=None, d=None):
+ if files:
+ runcmd(['git', 'add'] + files, dir)
+ cmd = ["git"]
+ GitApplyTree.gitCommandUserOptions(cmd, d=d)
+ cmd += ["commit", "-m", subject, "--no-verify"]
+ runcmd(cmd, dir)
+ GitApplyTree.addNote(dir, "HEAD", GitApplyTree.ignore_commit)
+
+ @staticmethod
+ def extractPatches(tree, startcommits, outdir, paths=None):
import tempfile
import shutil
tempdir = tempfile.mkdtemp(prefix='oepatch')
try:
- shellcmd = ["git", "format-patch", "--no-signature", "--no-numbered", startcommit, "-o", tempdir]
- if paths:
- shellcmd.append('--')
- shellcmd.extend(paths)
- out = runcmd(["sh", "-c", " ".join(shellcmd)], tree)
- if out:
- for srcfile in out.split():
- for encoding in ['utf-8', 'latin-1']:
- patchlines = []
- outfile = None
- try:
- with open(srcfile, 'r', encoding=encoding) as f:
- for line in f:
- if line.startswith(GitApplyTree.patch_line_prefix):
- outfile = line.split()[-1].strip()
- continue
- if line.startswith(GitApplyTree.ignore_commit_prefix):
- continue
- patchlines.append(line)
- except UnicodeDecodeError:
+ for name, rev in startcommits.items():
+ shellcmd = ["git", "format-patch", "--no-signature", "--no-numbered", rev, "-o", tempdir]
+ if paths:
+ shellcmd.append('--')
+ shellcmd.extend(paths)
+ out = runcmd(["sh", "-c", " ".join(shellcmd)], os.path.join(tree, name))
+ if out:
+ for srcfile in out.split():
+ # This loop, which is used to remove any line that
+ # starts with "%% original patch", is kept for backwards
+ # compatibility. If/when that compatibility is dropped,
+ # it can be replaced with code to just read the first
+ # line of the patch file to get the SHA-1, and the code
+ # below that writes the modified patch file can be
+ # replaced with a simple file move.
+ for encoding in ['utf-8', 'latin-1']:
+ patchlines = []
+ try:
+ with open(srcfile, 'r', encoding=encoding, newline='') as f:
+ for line in f:
+ if line.startswith("%% " + GitApplyTree.original_patch):
+ continue
+ patchlines.append(line)
+ except UnicodeDecodeError:
+ continue
+ break
+ else:
+ raise PatchError('Unable to find a character encoding to decode %s' % srcfile)
+
+ sha1 = patchlines[0].split()[1]
+ notes = GitApplyTree.getNotes(os.path.join(tree, name), sha1)
+ if GitApplyTree.ignore_commit in notes:
continue
- break
- else:
- raise PatchError('Unable to find a character encoding to decode %s' % srcfile)
-
- if not outfile:
- outfile = os.path.basename(srcfile)
- with open(os.path.join(outdir, outfile), 'w') as of:
- for line in patchlines:
- of.write(line)
+ outfile = notes.get(GitApplyTree.original_patch, os.path.basename(srcfile))
+
+ bb.utils.mkdirhier(os.path.join(outdir, name))
+ with open(os.path.join(outdir, name, outfile), 'w') as of:
+ for line in patchlines:
+ of.write(line)
finally:
shutil.rmtree(tempdir)
+ def _need_dirty_check(self):
+ fetch = bb.fetch2.Fetch([], self.d)
+ check_dirtyness = False
+ for url in fetch.urls:
+ url_data = fetch.ud[url]
+ parm = url_data.parm
+ # a git url with subpath param will surely be dirty
+ # since the git tree from which we clone will be emptied
+ # from all files that are not in the subpath
+ if url_data.type == 'git' and parm.get('subpath'):
+ check_dirtyness = True
+ return check_dirtyness
+
+ def _commitpatch(self, patch, patchfilevar):
+ output = ""
+ # Add all files
+ shellcmd = ["git", "add", "-f", "-A", "."]
+ output += runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+ # Exclude the patches directory
+ shellcmd = ["git", "reset", "HEAD", self.patchdir]
+ output += runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+ # Commit the result
+ (tmpfile, shellcmd) = self.prepareCommit(patch['file'], self.commituser, self.commitemail)
+ try:
+ shellcmd.insert(0, patchfilevar)
+ output += runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+ finally:
+ os.remove(tmpfile)
+ return output
+
def _applypatch(self, patch, force = False, reverse = False, run = True):
import shutil
@@ -511,27 +601,26 @@ class GitApplyTree(PatchTree):
return runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
- # Add hooks which add a pointer to the original patch file name in the commit message
reporoot = (runcmd("git rev-parse --show-toplevel".split(), self.dir) or '').strip()
if not reporoot:
raise Exception("Cannot get repository root for directory %s" % self.dir)
- hooks_dir = os.path.join(reporoot, '.git', 'hooks')
- hooks_dir_backup = hooks_dir + '.devtool-orig'
- if os.path.lexists(hooks_dir_backup):
- raise Exception("Git hooks backup directory already exists: %s" % hooks_dir_backup)
- if os.path.lexists(hooks_dir):
- shutil.move(hooks_dir, hooks_dir_backup)
- os.mkdir(hooks_dir)
- commithook = os.path.join(hooks_dir, 'commit-msg')
- applyhook = os.path.join(hooks_dir, 'applypatch-msg')
- with open(commithook, 'w') as f:
- # NOTE: the formatting here is significant; if you change it you'll also need to
- # change other places which read it back
- f.write('echo "\n%s: $PATCHFILE" >> $1' % GitApplyTree.patch_line_prefix)
- os.chmod(commithook, 0o755)
- shutil.copy2(commithook, applyhook)
+
+ patch_applied = True
try:
patchfilevar = 'PATCHFILE="%s"' % os.path.basename(patch['file'])
+ if self._need_dirty_check():
+ # Check dirtyness of the tree
+ try:
+ output = runcmd(["git", "--work-tree=%s" % reporoot, "status", "--short"])
+ except CmdError:
+ pass
+ else:
+ if output:
+ # The tree is dirty, no need to try to apply patches with git anymore
+ # since they fail, fallback directly to patch
+ output = PatchTree._applypatch(self, patch, force, reverse, run)
+ output += self._commitpatch(patch, patchfilevar)
+ return output
try:
shellcmd = [patchfilevar, "git", "--work-tree=%s" % reporoot]
self.gitCommandUserOptions(shellcmd, self.commituser, self.commitemail)
@@ -558,24 +647,14 @@ class GitApplyTree(PatchTree):
except CmdError:
# Fall back to patch
output = PatchTree._applypatch(self, patch, force, reverse, run)
- # Add all files
- shellcmd = ["git", "add", "-f", "-A", "."]
- output += runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
- # Exclude the patches directory
- shellcmd = ["git", "reset", "HEAD", self.patchdir]
- output += runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
- # Commit the result
- (tmpfile, shellcmd) = self.prepareCommit(patch['file'], self.commituser, self.commitemail)
- try:
- shellcmd.insert(0, patchfilevar)
- output += runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
- finally:
- os.remove(tmpfile)
+ output += self._commitpatch(patch, patchfilevar)
return output
+ except:
+ patch_applied = False
+ raise
finally:
- shutil.rmtree(hooks_dir)
- if os.path.lexists(hooks_dir_backup):
- shutil.move(hooks_dir_backup, hooks_dir)
+ if patch_applied:
+ GitApplyTree.addNote(self.dir, "HEAD", GitApplyTree.original_patch, os.path.basename(patch['file']))
class QuiltTree(PatchSet):
@@ -736,8 +815,9 @@ class NOOPResolver(Resolver):
self.patchset.Push()
except Exception:
import sys
- os.chdir(olddir)
raise
+ finally:
+ os.chdir(olddir)
# Patch resolver which relies on the user doing all the work involved in the
# resolution, with the exception of refreshing the remote copy of the patch
@@ -797,9 +877,9 @@ class UserResolver(Resolver):
# User did not fix the problem. Abort.
raise PatchError("Patch application failed, and user did not fix and refresh the patch.")
except Exception:
- os.chdir(olddir)
raise
- os.chdir(olddir)
+ finally:
+ os.chdir(olddir)
def patch_path(url, fetch, workdir, expand=True):
@@ -919,4 +999,3 @@ def should_apply(parm, d):
return False, "applies to later version"
return True, None
-
diff --git a/meta/lib/oe/path.py b/meta/lib/oe/path.py
index c8d8ad05b9..5d21cdcbdf 100644
--- a/meta/lib/oe/path.py
+++ b/meta/lib/oe/path.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -123,7 +125,8 @@ def copyhardlinktree(src, dst):
if os.path.isdir(src):
if len(glob.glob('%s/.??*' % src)) > 0:
source = './.??* '
- source += './*'
+ if len(glob.glob('%s/**' % src)) > 0:
+ source += './*'
s_dir = src
else:
source = src
@@ -169,6 +172,9 @@ def symlink(source, destination, force=False):
if e.errno != errno.EEXIST or os.readlink(destination) != source:
raise
+def relsymlink(target, name, force=False):
+ symlink(os.path.relpath(target, os.path.dirname(name)), name, force=force)
+
def find(dir, **walkoptions):
""" Given a directory, recurses into that directory,
returning all files as absolute paths. """
diff --git a/meta/lib/oe/prservice.py b/meta/lib/oe/prservice.py
index 339f7aebca..c41242c878 100644
--- a/meta/lib/oe/prservice.py
+++ b/meta/lib/oe/prservice.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -76,8 +78,7 @@ def prserv_export_tofile(d, metainfo, datainfo, lockdown, nomax=False):
bb.utils.mkdirhier(d.getVar('PRSERV_DUMPDIR'))
df = d.getVar('PRSERV_DUMPFILE')
#write data
- lf = bb.utils.lockfile("%s.lock" % df)
- with open(df, "a") as f:
+ with open(df, "a") as f, bb.utils.fileslocked(["%s.lock" % df]) as locks:
if metainfo:
#dump column info
f.write("#PR_core_ver = \"%s\"\n\n" % metainfo['core_ver']);
@@ -111,7 +112,6 @@ def prserv_export_tofile(d, metainfo, datainfo, lockdown, nomax=False):
if not nomax:
for i in idx:
f.write("PRAUTO_%s_%s = \"%s\"\n" % (str(datainfo[idx[i]]['version']),str(datainfo[idx[i]]['pkgarch']),str(datainfo[idx[i]]['value'])))
- bb.utils.unlockfile(lf)
def prserv_check_avail(d):
host_params = list([_f for _f in (d.getVar("PRSERV_HOST") or '').split(':') if _f])
diff --git a/meta/lib/oe/qa.py b/meta/lib/oe/qa.py
index 89acd3ead0..f8ae3c743f 100644
--- a/meta/lib/oe/qa.py
+++ b/meta/lib/oe/qa.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -211,6 +213,23 @@ def exit_with_message_if_errors(message, d):
def exit_if_errors(d):
exit_with_message_if_errors("Fatal QA errors were found, failing task.", d)
+def check_upstream_status(fullpath):
+ import re
+ kinda_status_re = re.compile(r"^.*upstream.*status.*$", re.IGNORECASE | re.MULTILINE)
+ strict_status_re = re.compile(r"^Upstream-Status: (Pending|Submitted|Denied|Inappropriate|Backport|Inactive-Upstream)( .+)?$", re.MULTILINE)
+ guidelines = "https://docs.yoctoproject.org/contributor-guide/recipe-style-guide.html#patch-upstream-status"
+
+ with open(fullpath, encoding='utf-8', errors='ignore') as f:
+ file_content = f.read()
+ match_kinda = kinda_status_re.search(file_content)
+ match_strict = strict_status_re.search(file_content)
+
+ if not match_strict:
+ if match_kinda:
+ return "Malformed Upstream-Status in patch\n%s\nPlease correct according to %s :\n%s" % (fullpath, guidelines, match_kinda.group(0))
+ else:
+ return "Missing Upstream-Status in patch\n%s\nPlease add according to %s ." % (fullpath, guidelines)
+
if __name__ == "__main__":
import sys
diff --git a/meta/lib/oe/recipeutils.py b/meta/lib/oe/recipeutils.py
index b04992c66d..de1fbdd3a8 100644
--- a/meta/lib/oe/recipeutils.py
+++ b/meta/lib/oe/recipeutils.py
@@ -24,9 +24,9 @@ from collections import OrderedDict, defaultdict
from bb.utils import vercmp_string
# Help us to find places to insert values
-recipe_progression = ['SUMMARY', 'DESCRIPTION', 'AUTHOR', 'HOMEPAGE', 'BUGTRACKER', 'SECTION', 'LICENSE', 'LICENSE_FLAGS', 'LIC_FILES_CHKSUM', 'PROVIDES', 'DEPENDS', 'PR', 'PV', 'SRCREV', 'SRCPV', 'SRC_URI', 'S', 'do_fetch()', 'do_unpack()', 'do_patch()', 'EXTRA_OECONF', 'EXTRA_OECMAKE', 'EXTRA_OESCONS', 'do_configure()', 'EXTRA_OEMAKE', 'do_compile()', 'do_install()', 'do_populate_sysroot()', 'INITSCRIPT', 'USERADD', 'GROUPADD', 'PACKAGES', 'FILES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RPROVIDES', 'RREPLACES', 'RCONFLICTS', 'ALLOW_EMPTY', 'populate_packages()', 'do_package()', 'do_deploy()', 'BBCLASSEXTEND']
+recipe_progression = ['SUMMARY', 'DESCRIPTION', 'HOMEPAGE', 'BUGTRACKER', 'SECTION', 'LICENSE', 'LICENSE_FLAGS', 'LIC_FILES_CHKSUM', 'PROVIDES', 'DEPENDS', 'PR', 'PV', 'SRCREV', 'SRC_URI', 'S', 'do_fetch()', 'do_unpack()', 'do_patch()', 'EXTRA_OECONF', 'EXTRA_OECMAKE', 'EXTRA_OESCONS', 'do_configure()', 'EXTRA_OEMAKE', 'do_compile()', 'do_install()', 'do_populate_sysroot()', 'INITSCRIPT', 'USERADD', 'GROUPADD', 'PACKAGES', 'FILES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RPROVIDES', 'RREPLACES', 'RCONFLICTS', 'ALLOW_EMPTY', 'populate_packages()', 'do_package()', 'do_deploy()', 'BBCLASSEXTEND']
# Variables that sometimes are a bit long but shouldn't be wrapped
-nowrap_vars = ['SUMMARY', 'HOMEPAGE', 'BUGTRACKER', r'SRC_URI\[(.+\.)?md5sum\]', r'SRC_URI\[(.+\.)?sha256sum\]']
+nowrap_vars = ['SUMMARY', 'HOMEPAGE', 'BUGTRACKER', r'SRC_URI\[(.+\.)?md5sum\]', r'SRC_URI\[(.+\.)?sha[0-9]+sum\]']
list_vars = ['SRC_URI', 'LIC_FILES_CHKSUM']
meta_vars = ['SUMMARY', 'DESCRIPTION', 'HOMEPAGE', 'BUGTRACKER', 'SECTION']
@@ -421,8 +421,6 @@ def copy_recipe_files(d, tgt_dir, whole_dir=False, download=True, all_variants=F
# Ensure we handle class-target if we're dealing with one of the variants
variants.append('target')
for variant in variants:
- if variant.startswith("devupstream"):
- localdata.setVar('SRCPV', 'git')
localdata.setVar('CLASSOVERRIDE', 'class-%s' % variant)
fetch_urls(localdata)
@@ -666,19 +664,23 @@ def get_bbappend_path(d, destlayerdir, wildcardver=False):
return (appendpath, pathok)
-def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False, machine=None, extralines=None, removevalues=None, redirect_output=None, params=None):
+def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False, machine=None, extralines=None, removevalues=None, redirect_output=None, params=None, update_original_recipe=False):
"""
Writes a bbappend file for a recipe
Parameters:
rd: data dictionary for the recipe
destlayerdir: base directory of the layer to place the bbappend in
(subdirectory path from there will be determined automatically)
- srcfiles: dict of source files to add to SRC_URI, where the value
- is the full path to the file to be added, and the value is the
- original filename as it would appear in SRC_URI or None if it
- isn't already present. You may pass None for this parameter if
- you simply want to specify your own content via the extralines
- parameter.
+ srcfiles: dict of source files to add to SRC_URI, where the key
+ is the full path to the file to be added, and the value is a
+ dict with following optional keys:
+ path: the original filename as it would appear in SRC_URI
+ or None if it isn't already present.
+ patchdir: the patchdir parameter
+ newname: the name to give to the new added file. None to use
+ the default value: basename(path)
+ You may pass None for this parameter if you simply want to specify
+ your own content via the extralines parameter.
install: dict mapping entries in srcfiles to a tuple of two elements:
install path (*without* ${D} prefix) and permission value (as a
string, e.g. '0644').
@@ -699,18 +701,29 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
params:
Parameters to use when adding entries to SRC_URI. If specified,
should be a list of dicts with the same length as srcfiles.
+ update_original_recipe:
+ Force to update the original recipe instead of creating/updating
+ a bbapend. destlayerdir must contain the original recipe
"""
if not removevalues:
removevalues = {}
- # Determine how the bbappend should be named
- appendpath, pathok = get_bbappend_path(rd, destlayerdir, wildcardver)
- if not appendpath:
- bb.error('Unable to determine layer directory containing %s' % recipefile)
- return (None, None)
- if not pathok:
- bb.warn('Unable to determine correct subdirectory path for bbappend file - check that what %s adds to BBFILES also matches .bbappend files. Using %s for now, but until you fix this the bbappend will not be applied.' % (os.path.join(destlayerdir, 'conf', 'layer.conf'), os.path.dirname(appendpath)))
+ recipefile = rd.getVar('FILE')
+ if update_original_recipe:
+ if destlayerdir not in recipefile:
+ bb.error("destlayerdir %s doesn't contain the original recipe (%s), cannot update it" % (destlayerdir, recipefile))
+ return (None, None)
+
+ appendpath = recipefile
+ else:
+ # Determine how the bbappend should be named
+ appendpath, pathok = get_bbappend_path(rd, destlayerdir, wildcardver)
+ if not appendpath:
+ bb.error('Unable to determine layer directory containing %s' % recipefile)
+ return (None, None)
+ if not pathok:
+ bb.warn('Unable to determine correct subdirectory path for bbappend file - check that what %s adds to BBFILES also matches .bbappend files. Using %s for now, but until you fix this the bbappend will not be applied.' % (os.path.join(destlayerdir, 'conf', 'layer.conf'), os.path.dirname(appendpath)))
appenddir = os.path.dirname(appendpath)
if not redirect_output:
@@ -755,7 +768,7 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
bbappendlines.append((varname, op, value))
destsubdir = rd.getVar('PN')
- if srcfiles:
+ if not update_original_recipe and srcfiles:
bbappendlines.append(('FILESEXTRAPATHS:prepend', ':=', '${THISDIR}/${PN}:'))
appendoverride = ''
@@ -765,22 +778,38 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
copyfiles = {}
if srcfiles:
instfunclines = []
- for i, (newfile, origsrcfile) in enumerate(srcfiles.items()):
- srcfile = origsrcfile
+ for i, (newfile, param) in enumerate(srcfiles.items()):
srcurientry = None
- if not srcfile:
- srcfile = os.path.basename(newfile)
+ if not 'path' in param or not param['path']:
+ if 'newname' in param and param['newname']:
+ srcfile = param['newname']
+ else:
+ srcfile = os.path.basename(newfile)
srcurientry = 'file://%s' % srcfile
+ oldentry = None
+ for uri in rd.getVar('SRC_URI').split():
+ if srcurientry in uri:
+ oldentry = uri
if params and params[i]:
srcurientry = '%s;%s' % (srcurientry, ';'.join('%s=%s' % (k,v) for k,v in params[i].items()))
# Double-check it's not there already
# FIXME do we care if the entry is added by another bbappend that might go away?
if not srcurientry in rd.getVar('SRC_URI').split():
if machine:
+ if oldentry:
+ appendline('SRC_URI:remove%s' % appendoverride, '=', ' ' + oldentry)
appendline('SRC_URI:append%s' % appendoverride, '=', ' ' + srcurientry)
else:
+ if oldentry:
+ if update_original_recipe:
+ removevalues['SRC_URI'] = oldentry
+ else:
+ appendline('SRC_URI:remove', '=', oldentry)
appendline('SRC_URI', '+=', srcurientry)
- copyfiles[newfile] = srcfile
+ param['path'] = srcfile
+ else:
+ srcfile = param['path']
+ copyfiles[newfile] = param
if install:
institem = install.pop(newfile, None)
if institem:
@@ -800,6 +829,8 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
# multiple times per operation when we're handling overrides)
if os.path.exists(appendpath) and not os.path.exists(outfile):
shutil.copy2(appendpath, outfile)
+ elif update_original_recipe:
+ outfile = recipefile
else:
bb.note('Writing append file %s' % appendpath)
outfile = appendpath
@@ -903,7 +934,12 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
outdir = redirect_output
else:
outdir = appenddir
- for newfile, srcfile in copyfiles.items():
+ for newfile, param in copyfiles.items():
+ srcfile = param['path']
+ patchdir = param.get('patchdir', ".")
+
+ if patchdir != ".":
+ newfile = os.path.join(os.path.split(newfile)[0], patchdir, os.path.split(newfile)[1])
filedest = os.path.join(outdir, destsubdir, os.path.basename(srcfile))
if os.path.abspath(newfile) != os.path.abspath(filedest):
if newfile.startswith(tempfile.gettempdir()):
@@ -947,10 +983,9 @@ def replace_dir_vars(path, d):
path = path.replace(dirpath, '${%s}' % dirvars[dirpath])
return path
-def get_recipe_pv_without_srcpv(pv, uri_type):
+def get_recipe_pv_with_pfx_sfx(pv, uri_type):
"""
- Get PV without SRCPV common in SCM's for now only
- support git.
+ Get PV separating prefix and suffix components.
Returns tuple with pv, prefix and suffix.
"""
@@ -958,7 +993,7 @@ def get_recipe_pv_without_srcpv(pv, uri_type):
sfx = ''
if uri_type == 'git':
- git_regex = re.compile(r"(?P<pfx>v?)(?P<ver>.*?)(?P<sfx>\+[^\+]*(git)?r?(AUTOINC\+))(?P<rev>.*)")
+ git_regex = re.compile(r"(?P<pfx>v?)(?P<ver>.*?)(?P<sfx>\+[^\+]*(git)?r?(AUTOINC\+)?)(?P<rev>.*)")
m = git_regex.match(pv)
if m:
@@ -1010,7 +1045,7 @@ def get_recipe_upstream_version(rd):
src_uri = src_uris.split()[0]
uri_type, _, _, _, _, _ = decodeurl(src_uri)
- (pv, pfx, sfx) = get_recipe_pv_without_srcpv(rd.getVar('PV'), uri_type)
+ (pv, pfx, sfx) = get_recipe_pv_with_pfx_sfx(rd.getVar('PV'), uri_type)
ru['current_version'] = pv
manual_upstream_version = rd.getVar("RECIPE_UPSTREAM_VERSION")
@@ -1038,7 +1073,7 @@ def get_recipe_upstream_version(rd):
revision = ud.method.latest_revision(ud, rd, 'default')
upversion = pv
if revision != rd.getVar("SRCREV"):
- upversion = upversion + "-new-commits-available"
+ upversion = upversion + "-new-commits-available"
else:
pupver = ud.method.latest_versionstring(ud, rd)
(upversion, revision) = pupver
diff --git a/meta/lib/oe/reproducible.py b/meta/lib/oe/reproducible.py
index 2e815df190..448befce33 100644
--- a/meta/lib/oe/reproducible.py
+++ b/meta/lib/oe/reproducible.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
import os
@@ -113,7 +115,8 @@ def get_source_date_epoch_from_git(d, sourcedir):
return None
bb.debug(1, "git repository: %s" % gitpath)
- p = subprocess.run(['git', '--git-dir', gitpath, 'log', '-1', '--pretty=%ct'], check=True, stdout=subprocess.PIPE)
+ p = subprocess.run(['git', '-c', 'log.showSignature=false', '--git-dir', gitpath, 'log', '-1', '--pretty=%ct'],
+ check=True, stdout=subprocess.PIPE)
return int(p.stdout.decode('utf-8'))
def get_source_date_epoch_from_youngest_file(d, sourcedir):
@@ -128,6 +131,9 @@ def get_source_date_epoch_from_youngest_file(d, sourcedir):
files = [f for f in files if not f[0] == '.']
for fname in files:
+ if fname == "singletask.lock":
+ # Ignore externalsrc/devtool lockfile [YOCTO #14921]
+ continue
filename = os.path.join(root, fname)
try:
mtime = int(os.lstat(filename).st_mtime)
diff --git a/meta/lib/oe/rootfs.py b/meta/lib/oe/rootfs.py
index 9e6b411fb6..8cd48f9450 100644
--- a/meta/lib/oe/rootfs.py
+++ b/meta/lib/oe/rootfs.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
from abc import ABCMeta, abstractmethod
@@ -104,7 +106,7 @@ class Rootfs(object, metaclass=ABCMeta):
def _cleanup(self):
pass
- def _setup_dbg_rootfs(self, dirs):
+ def _setup_dbg_rootfs(self, package_paths):
gen_debugfs = self.d.getVar('IMAGE_GEN_DEBUGFS') or '0'
if gen_debugfs != '1':
return
@@ -120,11 +122,12 @@ class Rootfs(object, metaclass=ABCMeta):
bb.utils.mkdirhier(self.image_rootfs)
bb.note(" Copying back package database...")
- for dir in dirs:
- if not os.path.isdir(self.image_rootfs + '-orig' + dir):
- continue
- bb.utils.mkdirhier(self.image_rootfs + os.path.dirname(dir))
- shutil.copytree(self.image_rootfs + '-orig' + dir, self.image_rootfs + dir, symlinks=True)
+ for path in package_paths:
+ bb.utils.mkdirhier(self.image_rootfs + os.path.dirname(path))
+ if os.path.isdir(self.image_rootfs + '-orig' + path):
+ shutil.copytree(self.image_rootfs + '-orig' + path, self.image_rootfs + path, symlinks=True)
+ elif os.path.isfile(self.image_rootfs + '-orig' + path):
+ shutil.copyfile(self.image_rootfs + '-orig' + path, self.image_rootfs + path)
# Copy files located in /usr/lib/debug or /usr/src/debug
for dir in ["/usr/lib/debug", "/usr/src/debug"]:
@@ -160,6 +163,13 @@ class Rootfs(object, metaclass=ABCMeta):
bb.note(" Install extra debug packages...")
self.pm.install(extra_debug_pkgs.split(), True)
+ bb.note(" Removing package database...")
+ for path in package_paths:
+ if os.path.isdir(self.image_rootfs + path):
+ shutil.rmtree(self.image_rootfs + path)
+ elif os.path.isfile(self.image_rootfs + path):
+ os.remove(self.image_rootfs + path)
+
bb.note(" Rename debug rootfs...")
try:
shutil.rmtree(self.image_rootfs + '-dbg')
@@ -171,14 +181,8 @@ class Rootfs(object, metaclass=ABCMeta):
bb.utils.rename(self.image_rootfs + '-orig', self.image_rootfs)
def _exec_shell_cmd(self, cmd):
- fakerootcmd = self.d.getVar('FAKEROOT')
- if fakerootcmd is not None:
- exec_cmd = [fakerootcmd, cmd]
- else:
- exec_cmd = cmd
-
try:
- subprocess.check_output(exec_cmd, stderr=subprocess.STDOUT)
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
return("Command '%s' returned %d:\n%s" % (e.cmd, e.returncode, e.output))
@@ -190,6 +194,18 @@ class Rootfs(object, metaclass=ABCMeta):
post_process_cmds = self.d.getVar("ROOTFS_POSTPROCESS_COMMAND")
rootfs_post_install_cmds = self.d.getVar('ROOTFS_POSTINSTALL_COMMAND')
+ def make_last(command, commands):
+ commands = commands.split()
+ if command in commands:
+ commands.remove(command)
+ commands.append(command)
+ return "".join(commands)
+
+ # We want this to run as late as possible, in particular after
+ # systemd_sysusers_create and set_user_group. Using :append is not enough
+ make_last("tidy_shadowutils_files", post_process_cmds)
+ make_last("rootfs_reproducible", post_process_cmds)
+
execute_pre_post_process(self.d, pre_process_cmds)
if self.progress_reporter:
@@ -323,19 +339,30 @@ class Rootfs(object, metaclass=ABCMeta):
bb.note("No Kernel Modules found, not running depmod")
return
- kernel_abi_ver_file = oe.path.join(self.d.getVar('PKGDATA_DIR'), "kernel-depmod",
- 'kernel-abiversion')
- if not os.path.exists(kernel_abi_ver_file):
- bb.fatal("No kernel-abiversion file found (%s), cannot run depmod, aborting" % kernel_abi_ver_file)
+ pkgdatadir = self.d.getVar('PKGDATA_DIR')
+
+ # PKGDATA_DIR can include multiple kernels so we run depmod for each
+ # one of them.
+ for direntry in os.listdir(pkgdatadir):
+ match = re.match('(.*)-depmod', direntry)
+ if not match:
+ continue
+ kernel_package_name = match.group(1)
- with open(kernel_abi_ver_file) as f:
- kernel_ver = f.read().strip(' \n')
+ kernel_abi_ver_file = oe.path.join(pkgdatadir, direntry, kernel_package_name + '-abiversion')
+ if not os.path.exists(kernel_abi_ver_file):
+ bb.fatal("No kernel-abiversion file found (%s), cannot run depmod, aborting" % kernel_abi_ver_file)
- versioned_modules_dir = os.path.join(self.image_rootfs, modules_dir, kernel_ver)
+ with open(kernel_abi_ver_file) as f:
+ kernel_ver = f.read().strip(' \n')
- bb.utils.mkdirhier(versioned_modules_dir)
+ versioned_modules_dir = os.path.join(self.image_rootfs, modules_dir, kernel_ver)
- self._exec_shell_cmd(['depmodwrapper', '-a', '-b', self.image_rootfs, kernel_ver])
+ bb.utils.mkdirhier(versioned_modules_dir)
+
+ bb.note("Running depmodwrapper for %s ..." % versioned_modules_dir)
+ if self._exec_shell_cmd(['depmodwrapper', '-a', '-b', self.image_rootfs, kernel_ver, kernel_package_name]):
+ bb.fatal("Kernel modules dependency generation failed")
"""
Create devfs:
@@ -384,6 +411,10 @@ def create_rootfs(d, manifest_dir=None, progress_reporter=None, logcatcher=None)
def image_list_installed_packages(d, rootfs_dir=None):
+ # Theres no rootfs for baremetal images
+ if bb.data.inherits_class('baremetal-image', d):
+ return ""
+
if not rootfs_dir:
rootfs_dir = d.getVar('IMAGE_ROOTFS')
diff --git a/meta/lib/oe/rust.py b/meta/lib/oe/rust.py
index ec70b34805..185553eeeb 100644
--- a/meta/lib/oe/rust.py
+++ b/meta/lib/oe/rust.py
@@ -1,5 +1,13 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
# Handle mismatches between `uname -m`-style output and Rust's arch names
def arch_to_rust_arch(arch):
if arch == "ppc64le":
return "powerpc64le"
+ if arch in ('riscv32', 'riscv64'):
+ return arch + 'gc'
return arch
diff --git a/meta/lib/oe/sbom.py b/meta/lib/oe/sbom.py
index 52bf51440e..fd4b6895d8 100644
--- a/meta/lib/oe/sbom.py
+++ b/meta/lib/oe/sbom.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -12,6 +14,10 @@ def get_recipe_spdxid(d):
return "SPDXRef-%s-%s" % ("Recipe", d.getVar("PN"))
+def get_download_spdxid(d, idx):
+ return "SPDXRef-Download-%s-%d" % (d.getVar("PN"), idx)
+
+
def get_package_spdxid(pkg):
return "SPDXRef-Package-%s" % pkg
@@ -32,18 +38,54 @@ def get_sdk_spdxid(sdk):
return "SPDXRef-SDK-%s" % sdk
-def write_doc(d, spdx_doc, subdir, spdx_deploy=None, indent=None):
+def _doc_path_by_namespace(spdx_deploy, arch, doc_namespace):
+ return spdx_deploy / "by-namespace" / arch / doc_namespace.replace("/", "_")
+
+
+def doc_find_by_namespace(spdx_deploy, search_arches, doc_namespace):
+ for pkgarch in search_arches:
+ p = _doc_path_by_namespace(spdx_deploy, pkgarch, doc_namespace)
+ if os.path.exists(p):
+ return p
+ return None
+
+
+def _doc_path_by_hashfn(spdx_deploy, arch, doc_name, hashfn):
+ return (
+ spdx_deploy / "by-hash" / arch / hashfn.split()[1] / (doc_name + ".spdx.json")
+ )
+
+
+def doc_find_by_hashfn(spdx_deploy, search_arches, doc_name, hashfn):
+ for pkgarch in search_arches:
+ p = _doc_path_by_hashfn(spdx_deploy, pkgarch, doc_name, hashfn)
+ if os.path.exists(p):
+ return p
+ return None
+
+
+def doc_path(spdx_deploy, doc_name, arch, subdir):
+ return spdx_deploy / arch / subdir / (doc_name + ".spdx.json")
+
+
+def write_doc(d, spdx_doc, arch, subdir, spdx_deploy=None, indent=None):
from pathlib import Path
if spdx_deploy is None:
spdx_deploy = Path(d.getVar("SPDXDEPLOY"))
- dest = spdx_deploy / subdir / (spdx_doc.name + ".spdx.json")
+ dest = doc_path(spdx_deploy, spdx_doc.name, arch, subdir)
dest.parent.mkdir(exist_ok=True, parents=True)
with dest.open("wb") as f:
doc_sha1 = spdx_doc.to_json(f, sort_keys=True, indent=indent)
- l = spdx_deploy / "by-namespace" / spdx_doc.documentNamespace.replace("/", "_")
+ l = _doc_path_by_namespace(spdx_deploy, arch, spdx_doc.documentNamespace)
+ l.parent.mkdir(exist_ok=True, parents=True)
+ l.symlink_to(os.path.relpath(dest, l.parent))
+
+ l = _doc_path_by_hashfn(
+ spdx_deploy, arch, spdx_doc.name, d.getVar("BB_HASHFILENAME")
+ )
l.parent.mkdir(exist_ok=True, parents=True)
l.symlink_to(os.path.relpath(dest, l.parent))
diff --git a/meta/lib/oe/sdk.py b/meta/lib/oe/sdk.py
index 27347667e8..3dc3672210 100644
--- a/meta/lib/oe/sdk.py
+++ b/meta/lib/oe/sdk.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -68,7 +70,7 @@ class Sdk(object, metaclass=ABCMeta):
#FIXME: using umbrella exc catching because bb.utils method raises it
except Exception as e:
bb.debug(1, "printing the stack trace\n %s" %traceback.format_exc())
- bb.error("unable to place %s in final SDK location" % sourcefile)
+ bb.fatal("unable to place %s in final SDK location" % sourcefile)
def mkdirhier(self, dirpath):
try:
diff --git a/meta/lib/oe/spdx.py b/meta/lib/oe/spdx.py
index 6d56ed90df..7aaf2af5ed 100644
--- a/meta/lib/oe/spdx.py
+++ b/meta/lib/oe/spdx.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -214,6 +216,18 @@ class SPDXPackageVerificationCode(SPDXObject):
class SPDXPackage(SPDXObject):
+ ALLOWED_CHECKSUMS = [
+ "SHA1",
+ "SHA224",
+ "SHA256",
+ "SHA384",
+ "SHA512",
+ "MD2",
+ "MD4",
+ "MD5",
+ "MD6",
+ ]
+
name = _String()
SPDXID = _String()
versionInfo = _String()
@@ -232,6 +246,7 @@ class SPDXPackage(SPDXObject):
hasFiles = _StringList()
packageFileName = _String()
annotations = _ObjectList(SPDXAnnotation)
+ checksums = _ObjectList(SPDXChecksum)
class SPDXFile(SPDXObject):
diff --git a/meta/lib/oe/sstatesig.py b/meta/lib/oe/sstatesig.py
index de65244932..a46e5502ab 100644
--- a/meta/lib/oe/sstatesig.py
+++ b/meta/lib/oe/sstatesig.py
@@ -1,9 +1,12 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
import bb.siggen
import bb.runqueue
import oe
+import netrc
def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCaches):
# Return True if we should keep the dependency, False to drop it
@@ -24,18 +27,15 @@ def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCaches):
return "/allarch.bbclass" in inherits
def isImage(mc, fn):
return "/image.bbclass" in " ".join(dataCaches[mc].inherits[fn])
- def isSPDXTask(task):
- return task in ("do_create_spdx", "do_create_runtime_spdx")
depmc, _, deptaskname, depmcfn = bb.runqueue.split_tid_mcfn(dep)
mc, _ = bb.runqueue.split_mc(fn)
- # Keep all dependencies between SPDX tasks in the signature. SPDX documents
- # are linked together by hashes, which means if a dependent document changes,
- # all downstream documents must be re-written (even if they are "safe"
- # dependencies).
- if isSPDXTask(task) and isSPDXTask(deptaskname):
- return True
+ # We can skip the rm_work task signature to avoid running the task
+ # when we remove some tasks from the dependencie chain
+ # i.e INHERIT:remove = "create-spdx" will trigger the do_rm_work
+ if task == "do_rm_work":
+ return False
# (Almost) always include our own inter-task dependencies (unless it comes
# from a mcdepends). The exception is the special
@@ -93,15 +93,6 @@ def sstate_lockedsigs(d):
sigs[pn][task] = [h, siggen_lockedsigs_var]
return sigs
-class SignatureGeneratorOEBasic(bb.siggen.SignatureGeneratorBasic):
- name = "OEBasic"
- def init_rundepcheck(self, data):
- self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE") or "").split()
- self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS") or "").split()
- pass
- def rundep_check(self, fn, recipename, task, dep, depname, dataCaches = None):
- return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCaches)
-
class SignatureGeneratorOEBasicHashMixIn(object):
supports_multiconfig_datacaches = True
@@ -114,6 +105,8 @@ class SignatureGeneratorOEBasicHashMixIn(object):
self.lockedhashfn = {}
self.machine = data.getVar("MACHINE")
self.mismatch_msgs = []
+ self.mismatch_number = 0
+ self.lockedsigs_msgs = ""
self.unlockedrecipes = (data.getVar("SIGGEN_UNLOCKED_RECIPES") or
"").split()
self.unlockedrecipes = { k: "" for k in self.unlockedrecipes }
@@ -150,9 +143,10 @@ class SignatureGeneratorOEBasicHashMixIn(object):
super().set_taskdata(data[3:])
def dump_sigs(self, dataCache, options):
- sigfile = os.getcwd() + "/locked-sigs.inc"
- bb.plain("Writing locked sigs to %s" % sigfile)
- self.dump_lockedsigs(sigfile)
+ if 'lockedsigs' in options:
+ sigfile = os.getcwd() + "/locked-sigs.inc"
+ bb.plain("Writing locked sigs to %s" % sigfile)
+ self.dump_lockedsigs(sigfile)
return super(bb.siggen.SignatureGeneratorBasicHash, self).dump_sigs(dataCache, options)
@@ -197,6 +191,7 @@ class SignatureGeneratorOEBasicHashMixIn(object):
#bb.warn("Using %s %s %s" % (recipename, task, h))
if h != h_locked and h_locked != unihash:
+ self.mismatch_number += 1
self.mismatch_msgs.append('The %s:%s sig is computed to be %s, but the sig is locked to %s in %s'
% (recipename, task, h, h_locked, var))
@@ -211,10 +206,10 @@ class SignatureGeneratorOEBasicHashMixIn(object):
return self.lockedhashes[tid]
return super().get_stampfile_hash(tid)
- def get_unihash(self, tid):
+ def get_cached_unihash(self, tid):
if tid in self.lockedhashes and self.lockedhashes[tid] and not self._internal:
return self.lockedhashes[tid]
- return super().get_unihash(tid)
+ return super().get_cached_unihash(tid)
def dump_sigtask(self, fn, task, stampbase, runtime):
tid = fn + ":" + task
@@ -225,6 +220,9 @@ class SignatureGeneratorOEBasicHashMixIn(object):
def dump_lockedsigs(self, sigfile, taskfilter=None):
types = {}
for tid in self.runtaskdeps:
+ # Bitbake changed this to a tuple in newer versions
+ if isinstance(tid, tuple):
+ tid = tid[1]
if taskfilter:
if not tid in taskfilter:
continue
@@ -274,6 +272,15 @@ class SignatureGeneratorOEBasicHashMixIn(object):
warn_msgs = []
error_msgs = []
sstate_missing_msgs = []
+ info_msgs = None
+
+ if self.lockedsigs:
+ if len(self.lockedsigs) > 10:
+ self.lockedsigs_msgs = "There are %s recipes with locked tasks (%s task(s) have non matching signature)" % (len(self.lockedsigs), self.mismatch_number)
+ else:
+ self.lockedsigs_msgs = "The following recipes have locked tasks:"
+ for pn in self.lockedsigs:
+ self.lockedsigs_msgs += " %s" % (pn)
for tid in sq_data['hash']:
if tid not in found:
@@ -286,7 +293,9 @@ class SignatureGeneratorOEBasicHashMixIn(object):
% (pn, taskname, sq_data['hash'][tid]))
checklevel = d.getVar("SIGGEN_LOCKEDSIGS_TASKSIG_CHECK")
- if checklevel == 'warn':
+ if checklevel == 'info':
+ info_msgs = self.lockedsigs_msgs
+ if checklevel == 'warn' or checklevel == 'info':
warn_msgs += self.mismatch_msgs
elif checklevel == 'error':
error_msgs += self.mismatch_msgs
@@ -297,6 +306,8 @@ class SignatureGeneratorOEBasicHashMixIn(object):
elif checklevel == 'error':
error_msgs += sstate_missing_msgs
+ if info_msgs:
+ bb.note(info_msgs)
if warn_msgs:
bb.warn("\n".join(warn_msgs))
if error_msgs:
@@ -316,9 +327,21 @@ class SignatureGeneratorOEEquivHash(SignatureGeneratorOEBasicHashMixIn, bb.sigge
self.method = data.getVar('SSTATE_HASHEQUIV_METHOD')
if not self.method:
bb.fatal("OEEquivHash requires SSTATE_HASHEQUIV_METHOD to be set")
+ self.max_parallel = int(data.getVar('BB_HASHSERVE_MAX_PARALLEL') or 1)
+ self.username = data.getVar("BB_HASHSERVE_USERNAME")
+ self.password = data.getVar("BB_HASHSERVE_PASSWORD")
+ if not self.username or not self.password:
+ try:
+ n = netrc.netrc()
+ auth = n.authenticators(self.server)
+ if auth is not None:
+ self.username, _, self.password = auth
+ except FileNotFoundError:
+ pass
+ except netrc.NetrcParseError as e:
+ bb.warn("Error parsing %s:%s: %s" % (e.filename, str(e.lineno), e.msg))
# Insert these classes into siggen's namespace so it can see and select them
-bb.siggen.SignatureGeneratorOEBasic = SignatureGeneratorOEBasic
bb.siggen.SignatureGeneratorOEBasicHash = SignatureGeneratorOEBasicHash
bb.siggen.SignatureGeneratorOEEquivHash = SignatureGeneratorOEEquivHash
@@ -332,14 +355,14 @@ def find_siginfo(pn, taskname, taskhashlist, d):
if not taskname:
# We have to derive pn and taskname
key = pn
- splitit = key.split('.bb:')
- taskname = splitit[1]
- pn = os.path.basename(splitit[0]).split('_')[0]
- if key.startswith('virtual:native:'):
- pn = pn + '-native'
+ if key.startswith("mc:"):
+ # mc:<mc>:<pn>:<task>
+ _, _, pn, taskname = key.split(':', 3)
+ else:
+ # <pn>:<task>
+ pn, taskname = key.split(':', 1)
hashfiles = {}
- filedates = {}
def get_hashval(siginfo):
if siginfo.endswith('.siginfo'):
@@ -347,6 +370,9 @@ def find_siginfo(pn, taskname, taskhashlist, d):
else:
return siginfo.rpartition('.')[2]
+ def get_time(fullpath):
+ return os.stat(fullpath).st_mtime
+
# First search in stamps dir
localdata = d.createCopy()
localdata.setVar('MULTIMACH_TARGET_SYS', '*')
@@ -362,24 +388,21 @@ def find_siginfo(pn, taskname, taskhashlist, d):
filespec = '%s.%s.sigdata.*' % (stamp, taskname)
foundall = False
import glob
+ bb.debug(1, "Calling glob.glob on {}".format(filespec))
for fullpath in glob.glob(filespec):
match = False
if taskhashlist:
for taskhash in taskhashlist:
if fullpath.endswith('.%s' % taskhash):
- hashfiles[taskhash] = fullpath
+ hashfiles[taskhash] = {'path':fullpath, 'sstate':False, 'time':get_time(fullpath)}
if len(hashfiles) == len(taskhashlist):
foundall = True
break
else:
- try:
- filedates[fullpath] = os.stat(fullpath).st_mtime
- except OSError:
- continue
hashval = get_hashval(fullpath)
- hashfiles[hashval] = fullpath
+ hashfiles[hashval] = {'path':fullpath, 'sstate':False, 'time':get_time(fullpath)}
- if not taskhashlist or (len(filedates) < 2 and not foundall):
+ if not taskhashlist or (len(hashfiles) < 2 and not foundall):
# That didn't work, look in sstate-cache
hashes = taskhashlist or ['?' * 64]
localdata = bb.data.createCopy(d)
@@ -388,6 +411,9 @@ def find_siginfo(pn, taskname, taskhashlist, d):
localdata.setVar('TARGET_VENDOR', '*')
localdata.setVar('TARGET_OS', '*')
localdata.setVar('PN', pn)
+ # gcc-source is a special case, same as with local stamps above
+ if pn.startswith("gcc-source"):
+ localdata.setVar('PN', "gcc")
localdata.setVar('PV', '*')
localdata.setVar('PR', '*')
localdata.setVar('BB_TASKHASH', hashval)
@@ -399,24 +425,18 @@ def find_siginfo(pn, taskname, taskhashlist, d):
localdata.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
filespec = '%s.siginfo' % localdata.getVar('SSTATE_PKG')
+ bb.debug(1, "Calling glob.glob on {}".format(filespec))
matchedfiles = glob.glob(filespec)
for fullpath in matchedfiles:
actual_hashval = get_hashval(fullpath)
if actual_hashval in hashfiles:
continue
- hashfiles[hashval] = fullpath
- if not taskhashlist:
- try:
- filedates[fullpath] = os.stat(fullpath).st_mtime
- except:
- continue
+ hashfiles[actual_hashval] = {'path':fullpath, 'sstate':True, 'time':get_time(fullpath)}
- if taskhashlist:
- return hashfiles
- else:
- return filedates
+ return hashfiles
bb.siggen.find_siginfo = find_siginfo
+bb.siggen.find_siginfo_version = 2
def sstate_get_manifest_filename(task, d):
@@ -461,11 +481,15 @@ def find_sstate_manifest(taskdata, taskdata2, taskname, d, multilibcache):
pkgarchs.append('allarch')
pkgarchs.append('${SDK_ARCH}_${SDK_ARCH}-${SDKPKGSUFFIX}')
+ searched_manifests = []
+
for pkgarch in pkgarchs:
manifest = d2.expand("${SSTATE_MANIFESTS}/manifest-%s-%s.%s" % (pkgarch, taskdata, taskname))
if os.path.exists(manifest):
return manifest, d2
- bb.fatal("Manifest %s not found in %s (variant '%s')?" % (manifest, d2.expand(" ".join(pkgarchs)), variant))
+ searched_manifests.append(manifest)
+ bb.fatal("The sstate manifest for task '%s:%s' (multilib variant '%s') could not be found.\nThe pkgarchs considered were: %s.\nBut none of these manifests exists:\n %s"
+ % (taskdata, taskname, variant, d2.expand(", ".join(pkgarchs)),"\n ".join(searched_manifests)))
return None, d2
def OEOuthashBasic(path, sigfile, task, d):
@@ -585,9 +609,9 @@ def OEOuthashBasic(path, sigfile, task, d):
update_hash(" %10s" % pwd.getpwuid(s.st_uid).pw_name)
update_hash(" %10s" % grp.getgrgid(s.st_gid).gr_name)
except KeyError as e:
- bb.warn("KeyError in %s" % path)
msg = ("KeyError: %s\nPath %s is owned by uid %d, gid %d, which doesn't match "
- "any user/group on target. This may be due to host contamination." % (e, path, s.st_uid, s.st_gid))
+ "any user/group on target. This may be due to host contamination." %
+ (e, os.path.abspath(path), s.st_uid, s.st_gid))
raise Exception(msg).with_traceback(e.__traceback__)
if include_timestamps:
@@ -650,6 +674,10 @@ def OEOuthashBasic(path, sigfile, task, d):
if f == 'fixmepath':
continue
process(os.path.join(root, f))
+
+ for dir in dirs:
+ if os.path.islink(os.path.join(root, dir)):
+ process(os.path.join(root, dir))
finally:
os.chdir(prev_dir)
diff --git a/meta/lib/oe/terminal.py b/meta/lib/oe/terminal.py
index de8dcebf94..4412bc14c1 100644
--- a/meta/lib/oe/terminal.py
+++ b/meta/lib/oe/terminal.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
import logging
@@ -102,6 +104,10 @@ class Rxvt(XTerminal):
command = 'rxvt -T "{title}" -e {command}'
priority = 1
+class URxvt(XTerminal):
+ command = 'urxvt -T "{title}" -e {command}'
+ priority = 1
+
class Screen(Terminal):
command = 'screen -D -m -t "{title}" -S devshell {command}'
diff --git a/meta/lib/oe/types.py b/meta/lib/oe/types.py
index bbbabafbf6..b929afb1f3 100644
--- a/meta/lib/oe/types.py
+++ b/meta/lib/oe/types.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/meta/lib/oe/useradd.py b/meta/lib/oe/useradd.py
index 3caa3f851a..54aa86feb5 100644
--- a/meta/lib/oe/useradd.py
+++ b/meta/lib/oe/useradd.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
import argparse
diff --git a/meta/lib/oe/utils.py b/meta/lib/oe/utils.py
index 1ee947d584..14a7d07ef0 100644
--- a/meta/lib/oe/utils.py
+++ b/meta/lib/oe/utils.py
@@ -1,10 +1,13 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
import subprocess
import multiprocessing
import traceback
+import errno
def read_file(filename):
try:
@@ -256,16 +259,23 @@ def execute_pre_post_process(d, cmds):
if cmds is None:
return
- for cmd in cmds.strip().split(';'):
- cmd = cmd.strip()
- if cmd != '':
- bb.note("Executing %s ..." % cmd)
- bb.build.exec_func(cmd, d)
+ cmds = cmds.replace(";", " ")
+
+ for cmd in cmds.split():
+ bb.note("Executing %s ..." % cmd)
+ bb.build.exec_func(cmd, d)
+
+def get_bb_number_threads(d):
+ return int(d.getVar("BB_NUMBER_THREADS") or os.cpu_count() or 1)
+
+def multiprocess_launch(target, items, d, extraargs=None):
+ max_process = get_bb_number_threads(d)
+ return multiprocess_launch_mp(target, items, max_process, extraargs)
-# For each item in items, call the function 'target' with item as the first
+# For each item in items, call the function 'target' with item as the first
# argument, extraargs as the other arguments and handle any exceptions in the
# parent thread
-def multiprocess_launch(target, items, d, extraargs=None):
+def multiprocess_launch_mp(target, items, max_process, extraargs=None):
class ProcessLaunch(multiprocessing.Process):
def __init__(self, *args, **kwargs):
@@ -300,7 +310,6 @@ def multiprocess_launch(target, items, d, extraargs=None):
self.update()
return self._result
- max_process = int(d.getVar("BB_NUMBER_THREADS") or os.cpu_count() or 1)
launched = []
errors = []
results = []
@@ -520,3 +529,14 @@ def directory_size(root, blocksize=4096):
total += sum(roundup(getsize(os.path.join(root, name))) for name in files)
total += roundup(getsize(root))
return total
+
+# Update the mtime of a file, skip if permission/read-only issues
+def touch(filename):
+ try:
+ os.utime(filename, None)
+ except PermissionError:
+ pass
+ except OSError as e:
+ # Handle read-only file systems gracefully
+ if e.errno != errno.EROFS:
+ raise e