diff options
Diffstat (limited to 'meta/lib/oe')
49 files changed, 4962 insertions, 3479 deletions
diff --git a/meta/lib/oe/__init__.py b/meta/lib/oe/__init__.py index 3ad9513f40..4e7c09da04 100644 --- a/meta/lib/oe/__init__.py +++ b/meta/lib/oe/__init__.py @@ -1,2 +1,6 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + from pkgutil import extend_path __path__ = extend_path(__path__, __name__) diff --git a/meta/lib/oe/buildhistory_analysis.py b/meta/lib/oe/buildhistory_analysis.py index ad7fceb8bb..b1856846b6 100644 --- a/meta/lib/oe/buildhistory_analysis.py +++ b/meta/lib/oe/buildhistory_analysis.py @@ -3,6 +3,8 @@ # Copyright (C) 2012-2013, 2016-2017 Intel Corporation # Author: Paul Eggleton <paul.eggleton@linux.intel.com> # +# SPDX-License-Identifier: GPL-2.0-only +# # Note: requires GitPython 0.3.1+ # # You can use this from the command line by running scripts/buildhistory-diff @@ -127,7 +129,7 @@ class ChangeRecord: removed = list(set(aitems) - set(bitems)) added = list(set(bitems) - set(aitems)) - if not removed and not added: + if not removed and not added and self.fieldname in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']: depvera = bb.utils.explode_dep_versions2(self.oldvalue, sort=False) depverb = bb.utils.explode_dep_versions2(self.newvalue, sort=False) for i, j in zip(depvera.items(), depverb.items()): @@ -179,7 +181,7 @@ class ChangeRecord: diff = difflib.unified_diff(alines, blines, self.fieldname, self.fieldname, lineterm='') out += '\n '.join(list(diff)[2:]) out += '\n --' - elif self.fieldname in img_monitor_files or '/image-files/' in self.path: + elif self.fieldname in img_monitor_files or '/image-files/' in self.path or self.fieldname == "sysroot": if self.filechanges or (self.oldvalue and self.newvalue): fieldname = self.fieldname if '/image-files/' in self.path: @@ -211,6 +213,7 @@ class FileChange: changetype_perms = 'P' changetype_ownergroup = 'O' changetype_link = 'L' + changetype_move = 'M' def __init__(self, path, changetype, oldvalue = None, newvalue = None): self.path = path @@ -249,10 +252,11 @@ class FileChange: return '%s changed owner/group from %s to %s' % (self.path, self.oldvalue, self.newvalue) elif self.changetype == self.changetype_link: return '%s changed symlink target from %s to %s' % (self.path, self.oldvalue, self.newvalue) + elif self.changetype == self.changetype_move: + return '%s moved to %s' % (self.path, self.oldvalue) else: return '%s changed (unknown)' % self.path - def blob_to_dict(blob): alines = [line for line in blob.data_stream.read().decode('utf-8').splitlines()] adict = {} @@ -279,11 +283,14 @@ def file_list_to_dict(lines): adict[path] = splitv[0:3] return adict +numeric_removal = str.maketrans('0123456789', 'XXXXXXXXXX') -def compare_file_lists(alines, blines): +def compare_file_lists(alines, blines, compare_ownership=True): adict = file_list_to_dict(alines) bdict = file_list_to_dict(blines) filechanges = [] + additions = [] + removals = [] for path, splitv in adict.items(): newsplitv = bdict.pop(path, None) if newsplitv: @@ -292,16 +299,20 @@ def compare_file_lists(alines, blines): newvalue = newsplitv[0][0] if oldvalue != newvalue: filechanges.append(FileChange(path, FileChange.changetype_type, oldvalue, newvalue)) + # Check permissions oldvalue = splitv[0][1:] newvalue = newsplitv[0][1:] if oldvalue != newvalue: filechanges.append(FileChange(path, FileChange.changetype_perms, oldvalue, newvalue)) - # Check owner/group - oldvalue = '%s/%s' % (splitv[1], splitv[2]) - newvalue = '%s/%s' % (newsplitv[1], newsplitv[2]) - if oldvalue != newvalue: - filechanges.append(FileChange(path, FileChange.changetype_ownergroup, oldvalue, newvalue)) + + if compare_ownership: + # Check owner/group + oldvalue = '%s/%s' % (splitv[1], splitv[2]) + newvalue = '%s/%s' % (newsplitv[1], newsplitv[2]) + if oldvalue != newvalue: + filechanges.append(FileChange(path, FileChange.changetype_ownergroup, oldvalue, newvalue)) + # Check symlink target if newsplitv[0][0] == 'l': if len(splitv) > 3: @@ -312,11 +323,67 @@ def compare_file_lists(alines, blines): if oldvalue != newvalue: filechanges.append(FileChange(path, FileChange.changetype_link, oldvalue, newvalue)) else: - filechanges.append(FileChange(path, FileChange.changetype_remove)) + removals.append(path) # Whatever is left over has been added for path in bdict: - filechanges.append(FileChange(path, FileChange.changetype_add)) + additions.append(path) + + # Rather than print additions and removals, its nicer to print file 'moves' + # where names or paths are similar. + revmap_remove = {} + for removal in removals: + translated = removal.translate(numeric_removal) + if translated not in revmap_remove: + revmap_remove[translated] = [] + revmap_remove[translated].append(removal) + + # + # We want to detect renames of large trees of files like + # /lib/modules/5.4.40-yocto-standard to /lib/modules/5.4.43-yocto-standard + # + renames = {} + for addition in additions.copy(): + if addition not in additions: + continue + translated = addition.translate(numeric_removal) + if translated in revmap_remove: + if len(revmap_remove[translated]) != 1: + continue + removal = revmap_remove[translated][0] + commondir = addition.split("/") + commondir2 = removal.split("/") + idx = None + for i in range(len(commondir)): + if commondir[i] != commondir2[i]: + idx = i + break + commondir = "/".join(commondir[:i+1]) + commondir2 = "/".join(commondir2[:i+1]) + # If the common parent is in one dict and not the other its likely a rename + # so iterate through those files and process as such + if commondir2 not in bdict and commondir not in adict: + if commondir not in renames: + renames[commondir] = commondir2 + for addition2 in additions.copy(): + if addition2.startswith(commondir): + removal2 = addition2.replace(commondir, commondir2) + if removal2 in removals: + additions.remove(addition2) + removals.remove(removal2) + continue + filechanges.append(FileChange(removal, FileChange.changetype_move, addition)) + if addition in additions: + additions.remove(addition) + if removal in removals: + removals.remove(removal) + for rename in renames: + filechanges.append(FileChange(renames[rename], FileChange.changetype_move, rename)) + + for addition in additions: + filechanges.append(FileChange(addition, FileChange.changetype_add)) + for removal in removals: + filechanges.append(FileChange(removal, FileChange.changetype_remove)) return filechanges @@ -407,7 +474,7 @@ def compare_dict_blobs(path, ablob, bblob, report_all, report_ver): if abs(percentchg) < monitor_numeric_threshold: continue elif (not report_all) and key in list_fields: - if key == "FILELIST" and path.endswith("-dbg") and bstr.strip() != '': + if key == "FILELIST" and (path.endswith("-dbg") or path.endswith("-src")) and bstr.strip() != '': continue if key in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']: (depvera, depverb) = compare_pkg_lists(astr, bstr) @@ -569,6 +636,15 @@ def process_changes(repopath, revision1, revision2='HEAD', report_all=False, rep elif filename.startswith('latest.'): chg = ChangeRecord(path, filename, d.a_blob.data_stream.read().decode('utf-8'), d.b_blob.data_stream.read().decode('utf-8'), True) changes.append(chg) + elif filename == 'sysroot': + alines = d.a_blob.data_stream.read().decode('utf-8').splitlines() + blines = d.b_blob.data_stream.read().decode('utf-8').splitlines() + filechanges = compare_file_lists(alines,blines, compare_ownership=False) + if filechanges: + chg = ChangeRecord(path, filename, None, None, True) + chg.filechanges = filechanges + changes.append(chg) + elif path.startswith('images/'): filename = os.path.basename(d.a_blob.path) if filename in img_monitor_files: diff --git a/meta/lib/oe/cachedpath.py b/meta/lib/oe/cachedpath.py index 0840cc4c3f..254257a83f 100644 --- a/meta/lib/oe/cachedpath.py +++ b/meta/lib/oe/cachedpath.py @@ -1,4 +1,6 @@ # +# SPDX-License-Identifier: GPL-2.0-only +# # Based on standard python library functions but avoid # repeated stat calls. Its assumed the files will not change from under us # so we can cache stat calls. diff --git a/meta/lib/oe/classextend.py b/meta/lib/oe/classextend.py index 662707b859..e08d788b75 100644 --- a/meta/lib/oe/classextend.py +++ b/meta/lib/oe/classextend.py @@ -1,10 +1,24 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + import collections +def get_packages(d): + pkgs = d.getVar("PACKAGES_NONML") + extcls = d.getVar("EXTENDERCLASS") + return extcls.rename_packages_internal(pkgs) + +def get_depends(varprefix, d): + extcls = d.getVar("EXTENDERCLASS") + return extcls.map_depends_variable(varprefix + "_NONML") + class ClassExtender(object): def __init__(self, extname, d): self.extname = extname self.d = d self.pkgs_mapping = [] + self.d.setVar("EXTENDERCLASS", self) def extend_name(self, name): if name.startswith("kernel-") or name == "virtual/kernel": @@ -20,6 +34,8 @@ class ClassExtender(object): if not subs.startswith(self.extname): return "virtual/" + self.extname + "-" + subs return name + if name.startswith("/") or (name.startswith("${") and name.endswith("}")): + return name if not name.startswith(self.extname): return self.extname + "-" + name return name @@ -71,7 +87,7 @@ class ClassExtender(object): def map_depends_variable(self, varname, suffix = ""): # We need to preserve EXTENDPKGV so it can be expanded correctly later if suffix: - varname = varname + "_" + suffix + varname = varname + ":" + suffix orig = self.d.getVar("EXTENDPKGV", False) self.d.setVar("EXTENDPKGV", "EXTENDPKGV") deps = self.d.getVar(varname) @@ -83,8 +99,13 @@ class ClassExtender(object): for dep in deps: newdeps[self.map_depends(dep)] = deps[dep] - self.d.setVar(varname, bb.utils.join_deps(newdeps, False).replace("EXTENDPKGV", "${EXTENDPKGV}")) + if not varname.endswith("_NONML"): + self.d.renameVar(varname, varname + "_NONML") + self.d.setVar(varname, "${@oe.classextend.get_depends('%s', d)}" % varname) + self.d.appendVarFlag(varname, "vardeps", " " + varname + "_NONML") + ret = bb.utils.join_deps(newdeps, False).replace("EXTENDPKGV", "${EXTENDPKGV}") self.d.setVar("EXTENDPKGV", orig) + return ret def map_packagevars(self): for pkg in (self.d.getVar("PACKAGES").split() + [""]): @@ -103,12 +124,25 @@ class ClassExtender(object): continue self.pkgs_mapping.append([pkg, self.extend_name(pkg)]) - self.d.setVar("PACKAGES", " ".join([row[1] for row in self.pkgs_mapping])) + self.d.renameVar("PACKAGES", "PACKAGES_NONML") + self.d.setVar("PACKAGES", "${@oe.classextend.get_packages(d)}") + + def rename_packages_internal(self, pkgs): + self.pkgs_mapping = [] + for pkg in (self.d.expand(pkgs) or "").split(): + if pkg.startswith(self.extname): + self.pkgs_mapping.append([pkg.split(self.extname + "-")[1], pkg]) + continue + self.pkgs_mapping.append([pkg, self.extend_name(pkg)]) + + return " ".join([row[1] for row in self.pkgs_mapping]) def rename_package_variables(self, variables): for pkg_mapping in self.pkgs_mapping: + if pkg_mapping[0].startswith("${") and pkg_mapping[0].endswith("}"): + continue for subs in variables: - self.d.renameVar("%s_%s" % (subs, pkg_mapping[0]), "%s_%s" % (subs, pkg_mapping[1])) + self.d.renameVar("%s:%s" % (subs, pkg_mapping[0]), "%s:%s" % (subs, pkg_mapping[1])) class NativesdkClassExtender(ClassExtender): def map_depends(self, dep): diff --git a/meta/lib/oe/classutils.py b/meta/lib/oe/classutils.py index 45cd5249be..08bb66b365 100644 --- a/meta/lib/oe/classutils.py +++ b/meta/lib/oe/classutils.py @@ -1,3 +1,6 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# class ClassRegistryMeta(type): """Give each ClassRegistry their own registry""" diff --git a/meta/lib/oe/copy_buildsystem.py b/meta/lib/oe/copy_buildsystem.py index 7cb784cf8c..79642fd76a 100644 --- a/meta/lib/oe/copy_buildsystem.py +++ b/meta/lib/oe/copy_buildsystem.py @@ -1,3 +1,6 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# # This class should provide easy access to the different aspects of the # buildsystem such as layers, bitbake location, etc. # @@ -17,7 +20,7 @@ def _smart_copy(src, dest): mode = os.stat(src).st_mode if stat.S_ISDIR(mode): bb.utils.mkdirhier(dest) - cmd = "tar --exclude='.git' --xattrs --xattrs-include='*' -chf - -C %s -p . \ + cmd = "tar --exclude='.git' --exclude='__pycache__' --xattrs --xattrs-include='*' -chf - -C %s -p . \ | tar --xattrs --xattrs-include='*' -xf - -C %s" % (src, dest) subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) else: @@ -94,8 +97,10 @@ class BuildSystem(object): layerdestpath = destdir if corebase == os.path.dirname(layer): layerdestpath += '/' + os.path.basename(corebase) - else: - layer_relative = os.path.basename(corebase) + '/' + os.path.relpath(layer, corebase) + # If the layer is located somewhere under the same parent directory + # as corebase we keep the layer structure. + elif os.path.commonpath([layer, corebase]) == os.path.dirname(corebase): + layer_relative = os.path.relpath(layer, os.path.dirname(corebase)) if os.path.dirname(layer_relative) != layernewname: layerdestpath += '/' + os.path.dirname(layer_relative) @@ -165,10 +170,10 @@ class BuildSystem(object): def generate_locked_sigs(sigfile, d): bb.utils.mkdirhier(os.path.dirname(sigfile)) depd = d.getVar('BB_TASKDEPDATA', False) - tasks = ['%s.%s' % (v[2], v[1]) for v in depd.values()] + tasks = ['%s:%s' % (v[2], v[1]) for v in depd.values()] bb.parse.siggen.dump_lockedsigs(sigfile, tasks) -def prune_lockedsigs(excluded_tasks, excluded_targets, lockedsigs, pruned_output): +def prune_lockedsigs(excluded_tasks, excluded_targets, lockedsigs, onlynative, pruned_output): with open(lockedsigs, 'r') as infile: bb.utils.mkdirhier(os.path.dirname(pruned_output)) with open(pruned_output, 'w') as f: @@ -178,7 +183,11 @@ def prune_lockedsigs(excluded_tasks, excluded_targets, lockedsigs, pruned_output if line.endswith('\\\n'): splitval = line.strip().split(':') if not splitval[1] in excluded_tasks and not splitval[0] in excluded_targets: - f.write(line) + if onlynative: + if 'nativesdk' in splitval[0]: + f.write(line) + else: + f.write(line) else: f.write(line) invalue = False @@ -246,7 +255,7 @@ def create_locked_sstate_cache(lockedsigs, input_sstate_cache, output_sstate_cac bb.note('Generating sstate-cache...') nativelsbstring = d.getVar('NATIVELSBSTRING') - bb.process.run("gen-lockedsig-cache %s %s %s %s %s" % (lockedsigs, input_sstate_cache, output_sstate_cache, nativelsbstring, filterfile or '')) + bb.process.run("PYTHONDONTWRITEBYTECODE=1 gen-lockedsig-cache %s %s %s %s %s" % (lockedsigs, input_sstate_cache, output_sstate_cache, nativelsbstring, filterfile or '')) if fixedlsbstring and nativelsbstring != fixedlsbstring: nativedir = output_sstate_cache + '/' + nativelsbstring if os.path.isdir(nativedir): @@ -273,7 +282,7 @@ def check_sstate_task_list(d, targets, filteroutfile, cmdprefix='', cwd=None, lo logparam = '-l %s' % logfile else: logparam = '' - cmd = "%sBB_SETSCENE_ENFORCE=1 PSEUDO_DISABLED=1 oe-check-sstate %s -s -o %s %s" % (cmdprefix, targets, filteroutfile, logparam) + cmd = "%sPYTHONDONTWRITEBYTECODE=1 BB_SETSCENE_ENFORCE=1 PSEUDO_DISABLED=1 oe-check-sstate %s -s -o %s %s" % (cmdprefix, targets, filteroutfile, logparam) env = dict(d.getVar('BB_ORIGENV', False)) env.pop('BUILDDIR', '') env.pop('BBPATH', '') diff --git a/meta/lib/oe/cve_check.py b/meta/lib/oe/cve_check.py new file mode 100644 index 0000000000..0302beeb4a --- /dev/null +++ b/meta/lib/oe/cve_check.py @@ -0,0 +1,148 @@ +import collections +import re +import itertools +import functools + +_Version = collections.namedtuple( + "_Version", ["release", "patch_l", "pre_l", "pre_v"] +) + +@functools.total_ordering +class Version(): + + def __init__(self, version, suffix=None): + + suffixes = ["alphabetical", "patch"] + + if str(suffix) == "alphabetical": + version_pattern = r"""r?v?(?:(?P<release>[0-9]+(?:[-\.][0-9]+)*)(?P<patch>[-_\.]?(?P<patch_l>[a-z]))?(?P<pre>[-_\.]?(?P<pre_l>(rc|alpha|beta|pre|preview|dev))[-_\.]?(?P<pre_v>[0-9]+)?)?)(.*)?""" + elif str(suffix) == "patch": + version_pattern = r"""r?v?(?:(?P<release>[0-9]+(?:[-\.][0-9]+)*)(?P<patch>[-_\.]?(p|patch)(?P<patch_l>[0-9]+))?(?P<pre>[-_\.]?(?P<pre_l>(rc|alpha|beta|pre|preview|dev))[-_\.]?(?P<pre_v>[0-9]+)?)?)(.*)?""" + else: + version_pattern = r"""r?v?(?:(?P<release>[0-9]+(?:[-\.][0-9]+)*)(?P<pre>[-_\.]?(?P<pre_l>(rc|alpha|beta|pre|preview|dev))[-_\.]?(?P<pre_v>[0-9]+)?)?)(.*)?""" + regex = re.compile(r"^\s*" + version_pattern + r"\s*$", re.VERBOSE | re.IGNORECASE) + + match = regex.search(version) + if not match: + raise Exception("Invalid version: '{0}'".format(version)) + + self._version = _Version( + release=tuple(int(i) for i in match.group("release").replace("-",".").split(".")), + patch_l=match.group("patch_l") if str(suffix) in suffixes and match.group("patch_l") else "", + pre_l=match.group("pre_l"), + pre_v=match.group("pre_v") + ) + + self._key = _cmpkey( + self._version.release, + self._version.patch_l, + self._version.pre_l, + self._version.pre_v + ) + + def __eq__(self, other): + if not isinstance(other, Version): + return NotImplemented + return self._key == other._key + + def __gt__(self, other): + if not isinstance(other, Version): + return NotImplemented + return self._key > other._key + +def _cmpkey(release, patch_l, pre_l, pre_v): + # remove leading 0 + _release = tuple( + reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release)))) + ) + + _patch = patch_l.upper() + + if pre_l is None and pre_v is None: + _pre = float('inf') + else: + _pre = float(pre_v) if pre_v else float('-inf') + return _release, _patch, _pre + + +def get_patched_cves(d): + """ + Get patches that solve CVEs using the "CVE: " tag. + """ + + import re + import oe.patch + + pn = d.getVar("PN") + cve_match = re.compile("CVE:( CVE\-\d{4}\-\d+)+") + + # Matches the last "CVE-YYYY-ID" in the file name, also if written + # in lowercase. Possible to have multiple CVE IDs in a single + # file name, but only the last one will be detected from the file name. + # However, patch files contents addressing multiple CVE IDs are supported + # (cve_match regular expression) + + cve_file_name_match = re.compile(".*([Cc][Vv][Ee]\-\d{4}\-\d+)") + + patched_cves = set() + bb.debug(2, "Looking for patches that solves CVEs for %s" % pn) + for url in oe.patch.src_patches(d): + patch_file = bb.fetch.decodeurl(url)[2] + + if not os.path.isfile(patch_file): + bb.error("File Not found: %s" % patch_file) + raise FileNotFoundError + + # Check patch file name for CVE ID + fname_match = cve_file_name_match.search(patch_file) + if fname_match: + cve = fname_match.group(1).upper() + patched_cves.add(cve) + bb.debug(2, "Found CVE %s from patch file name %s" % (cve, patch_file)) + + with open(patch_file, "r", encoding="utf-8") as f: + try: + patch_text = f.read() + except UnicodeDecodeError: + bb.debug(1, "Failed to read patch %s using UTF-8 encoding" + " trying with iso8859-1" % patch_file) + f.close() + with open(patch_file, "r", encoding="iso8859-1") as f: + patch_text = f.read() + + # Search for one or more "CVE: " lines + text_match = False + for match in cve_match.finditer(patch_text): + # Get only the CVEs without the "CVE: " tag + cves = patch_text[match.start()+5:match.end()] + for cve in cves.split(): + bb.debug(2, "Patch %s solves %s" % (patch_file, cve)) + patched_cves.add(cve) + text_match = True + + if not fname_match and not text_match: + bb.debug(2, "Patch %s doesn't solve CVEs" % patch_file) + + return patched_cves + + +def get_cpe_ids(cve_product, version): + """ + Get list of CPE identifiers for the given product and version + """ + + version = version.split("+git")[0] + + cpe_ids = [] + for product in cve_product.split(): + # CVE_PRODUCT in recipes may include vendor information for CPE identifiers. If not, + # use wildcard for vendor. + if ":" in product: + vendor, product = product.split(":", 1) + else: + vendor = "*" + + cpe_id = f'cpe:2.3:a:{vendor}:{product}:{version}:*:*:*:*:*:*:*' + cpe_ids.append(cpe_id) + + return cpe_ids diff --git a/meta/lib/oe/data.py b/meta/lib/oe/data.py index b8901e63f5..602130a904 100644 --- a/meta/lib/oe/data.py +++ b/meta/lib/oe/data.py @@ -1,3 +1,7 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + import json import oe.maketype diff --git a/meta/lib/oe/distro_check.py b/meta/lib/oe/distro_check.py index e775c3a6ec..4b2a9bec01 100644 --- a/meta/lib/oe/distro_check.py +++ b/meta/lib/oe/distro_check.py @@ -1,3 +1,7 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + def create_socket(url, d): import urllib from bb.utils import export_proxies @@ -22,7 +26,7 @@ def find_latest_numeric_release(url, d): maxstr="" for link in get_links_from_url(url, d): try: - # TODO use LooseVersion + # TODO use bb.utils.vercmp_string_op() release = float(link) except: release = 0 diff --git a/meta/lib/oe/elf.py b/meta/lib/oe/elf.py index 4cc9a9a097..46c884a775 100644 --- a/meta/lib/oe/elf.py +++ b/meta/lib/oe/elf.py @@ -1,3 +1,7 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + def machine_dict(d): # TARGET_OS TARGET_ARCH MACHINE, OSABI, ABIVERSION, Little Endian, 32bit? machdata = { @@ -11,13 +15,13 @@ def machine_dict(d): "aarch64" : (183, 0, 0, True, 64), "aarch64_be" :(183, 0, 0, False, 64), "i586" : (3, 0, 0, True, 32), + "i686" : (3, 0, 0, True, 32), "x86_64": (62, 0, 0, True, 64), "epiphany": (4643, 0, 0, True, 32), "lm32": (138, 0, 0, False, 32), "mips": ( 8, 0, 0, False, 32), "mipsel": ( 8, 0, 0, True, 32), "microblaze": (189, 0, 0, False, 32), - "microblazeeb":(189, 0, 0, False, 32), "microblazeel":(189, 0, 0, True, 32), "powerpc": (20, 0, 0, False, 32), "riscv32": (243, 0, 0, True, 32), @@ -30,6 +34,7 @@ def machine_dict(d): "armeb": (40, 97, 0, False, 32), "powerpc": (20, 0, 0, False, 32), "powerpc64": (21, 0, 0, False, 64), + "powerpc64le": (21, 0, 0, True, 64), "i386": ( 3, 0, 0, True, 32), "i486": ( 3, 0, 0, True, 32), "i586": ( 3, 0, 0, True, 32), @@ -54,9 +59,16 @@ def machine_dict(d): "sh4": (42, 0, 0, True, 32), "sparc": ( 2, 0, 0, False, 32), "microblaze": (189, 0, 0, False, 32), - "microblazeeb":(189, 0, 0, False, 32), "microblazeel":(189, 0, 0, True, 32), }, + "linux-android" : { + "aarch64" : (183, 0, 0, True, 64), + "i686": ( 3, 0, 0, True, 32), + "x86_64": (62, 0, 0, True, 64), + }, + "linux-androideabi" : { + "arm" : (40, 97, 0, True, 32), + }, "linux-musl" : { "aarch64" : (183, 0, 0, True, 64), "aarch64_be" :(183, 0, 0, False, 64), @@ -64,6 +76,7 @@ def machine_dict(d): "armeb": ( 40, 97, 0, False, 32), "powerpc": ( 20, 0, 0, False, 32), "powerpc64": ( 21, 0, 0, False, 64), + "powerpc64le": (21, 0, 0, True, 64), "i386": ( 3, 0, 0, True, 32), "i486": ( 3, 0, 0, True, 32), "i586": ( 3, 0, 0, True, 32), @@ -74,7 +87,6 @@ def machine_dict(d): "mips64": ( 8, 0, 0, False, 64), "mips64el": ( 8, 0, 0, True, 64), "microblaze": (189, 0, 0, False, 32), - "microblazeeb":(189, 0, 0, False, 32), "microblazeel":(189, 0, 0, True, 32), "riscv32": (243, 0, 0, True, 32), "riscv64": (243, 0, 0, True, 64), diff --git a/meta/lib/oe/gpg_sign.py b/meta/lib/oe/gpg_sign.py index ccd5aee420..1bce6cb792 100644 --- a/meta/lib/oe/gpg_sign.py +++ b/meta/lib/oe/gpg_sign.py @@ -1,8 +1,11 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + """Helper module for GPG signing""" import os import bb -import oe.utils import subprocess import shlex @@ -11,21 +14,27 @@ class LocalSigner(object): def __init__(self, d): self.gpg_bin = d.getVar('GPG_BIN') or \ bb.utils.which(os.getenv('PATH'), 'gpg') + self.gpg_cmd = [self.gpg_bin] + self.gpg_agent_bin = bb.utils.which(os.getenv('PATH'), "gpg-agent") + # Without this we see "Cannot allocate memory" errors when running processes in parallel + # It needs to be set for any gpg command since any agent launched can stick around in memory + # and this parameter must be set. + if self.gpg_agent_bin: + self.gpg_cmd += ["--agent-program=%s|--auto-expand-secmem" % (self.gpg_agent_bin)] self.gpg_path = d.getVar('GPG_PATH') - self.gpg_version = self.get_gpg_version() self.rpm_bin = bb.utils.which(os.getenv('PATH'), "rpmsign") - self.gpg_agent_bin = bb.utils.which(os.getenv('PATH'), "gpg-agent") + self.gpg_version = self.get_gpg_version() + def export_pubkey(self, output_file, keyid, armor=True): """Export GPG public key to a file""" - cmd = '%s --no-permission-warning --batch --yes --export -o %s ' % \ - (self.gpg_bin, output_file) + cmd = self.gpg_cmd + ["--no-permission-warning", "--batch", "--yes", "--export", "-o", output_file] if self.gpg_path: - cmd += "--homedir %s " % self.gpg_path + cmd += ["--homedir", self.gpg_path] if armor: - cmd += "--armor " - cmd += keyid - subprocess.check_output(shlex.split(cmd), stderr=subprocess.STDOUT) + cmd += ["--armor"] + cmd += [keyid] + subprocess.check_output(cmd, stderr=subprocess.STDOUT) def sign_rpms(self, files, keyid, passphrase, digest, sign_chunk, fsk=None, fsk_password=None): """Sign RPM files""" @@ -55,7 +64,7 @@ class LocalSigner(object): if passphrase_file and passphrase: raise Exception("You should use either passphrase_file of passphrase, not both") - cmd = [self.gpg_bin, '--detach-sign', '--no-permission-warning', '--batch', + cmd = self.gpg_cmd + ['--detach-sign', '--no-permission-warning', '--batch', '--no-tty', '--yes', '--passphrase-fd', '0', '-u', keyid] if self.gpg_path: @@ -68,9 +77,6 @@ class LocalSigner(object): if self.gpg_version > (2,1,): cmd += ['--pinentry-mode', 'loopback'] - if self.gpg_agent_bin: - cmd += ["--agent-program=%s|--auto-expand-secmem" % (self.gpg_agent_bin)] - cmd += [input_file] try: @@ -82,8 +88,7 @@ class LocalSigner(object): (_, stderr) = job.communicate(passphrase.encode("utf-8")) if job.returncode: - raise bb.build.FuncFailed("GPG exited with code %d: %s" % - (job.returncode, stderr.decode("utf-8"))) + bb.fatal("GPG exited with code %d: %s" % (job.returncode, stderr.decode("utf-8"))) except IOError as e: bb.error("IO error (%s): %s" % (e.errno, e.strerror)) @@ -97,21 +102,40 @@ class LocalSigner(object): def get_gpg_version(self): """Return the gpg version as a tuple of ints""" try: - ver_str = subprocess.check_output((self.gpg_bin, "--version", "--no-permission-warning")).split()[2].decode("utf-8") + cmd = self.gpg_cmd + ["--version", "--no-permission-warning"] + ver_str = subprocess.check_output(cmd).split()[2].decode("utf-8") return tuple([int(i) for i in ver_str.split("-")[0].split('.')]) except subprocess.CalledProcessError as e: - raise bb.build.FuncFailed("Could not get gpg version: %s" % e) + bb.fatal("Could not get gpg version: %s" % e) - def verify(self, sig_file): + def verify(self, sig_file, valid_sigs = ''): """Verify signature""" - cmd = self.gpg_bin + " --verify --no-permission-warning " + cmd = self.gpg_cmd + ["--verify", "--no-permission-warning", "--status-fd", "1"] if self.gpg_path: - cmd += "--homedir %s " % self.gpg_path - cmd += sig_file - status = subprocess.call(shlex.split(cmd)) - ret = False if status else True - return ret + cmd += ["--homedir", self.gpg_path] + + cmd += [sig_file] + status = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + # Valid if any key matches if unspecified + if not valid_sigs: + ret = False if status.returncode else True + return ret + + import re + goodsigs = [] + sigre = re.compile(r'^\[GNUPG:\] GOODSIG (\S+)\s(.*)$') + for l in status.stdout.decode("utf-8").splitlines(): + s = sigre.match(l) + if s: + goodsigs += [s.group(1)] + + for sig in valid_sigs.split(): + if sig in goodsigs: + return True + if len(goodsigs): + bb.warn('No accepted signatures found. Good signatures found: %s.' % ' '.join(goodsigs)) + return False def get_signer(d, backend): diff --git a/meta/lib/oe/license.py b/meta/lib/oe/license.py index 04f5b316a9..99cfa5f733 100644 --- a/meta/lib/oe/license.py +++ b/meta/lib/oe/license.py @@ -1,4 +1,6 @@ -# vi:sts=4:sw=4:et +# +# SPDX-License-Identifier: GPL-2.0-only +# """Code for parsing OpenEmbedded license strings""" import ast @@ -8,17 +10,20 @@ from fnmatch import fnmatchcase as fnmatch def license_ok(license, dont_want_licenses): """ Return False if License exist in dont_want_licenses else True """ for dwl in dont_want_licenses: - # If you want to exclude license named generically 'X', we - # surely want to exclude 'X+' as well. In consequence, we - # will exclude a trailing '+' character from LICENSE in - # case INCOMPATIBLE_LICENSE is not a 'X+' license. - lic = license - if not re.search(r'\+$', dwl): - lic = re.sub(r'\+', '', license) - if fnmatch(lic, dwl): + if fnmatch(license, dwl): return False return True +def obsolete_license_list(): + return ["AGPL-3", "AGPL-3+", "AGPLv3", "AGPLv3+", "AGPLv3.0", "AGPLv3.0+", "AGPL-3.0", "AGPL-3.0+", "BSD-0-Clause", + "GPL-1", "GPL-1+", "GPLv1", "GPLv1+", "GPLv1.0", "GPLv1.0+", "GPL-1.0", "GPL-1.0+", "GPL-2", "GPL-2+", "GPLv2", + "GPLv2+", "GPLv2.0", "GPLv2.0+", "GPL-2.0", "GPL-2.0+", "GPL-3", "GPL-3+", "GPLv3", "GPLv3+", "GPLv3.0", "GPLv3.0+", + "GPL-3.0", "GPL-3.0+", "LGPLv2", "LGPLv2+", "LGPLv2.0", "LGPLv2.0+", "LGPL-2.0", "LGPL-2.0+", "LGPL2.1", "LGPL2.1+", + "LGPLv2.1", "LGPLv2.1+", "LGPL-2.1", "LGPL-2.1+", "LGPLv3", "LGPLv3+", "LGPL-3.0", "LGPL-3.0+", "MPL-1", "MPLv1", + "MPLv1.1", "MPLv2", "MIT-X", "MIT-style", "openssl", "PSF", "PSFv2", "Python-2", "Apachev2", "Apache-2", "Artisticv1", + "Artistic-1", "AFL-2", "AFL-1", "AFLv2", "AFLv1", "CDDLv1", "CDDL-1", "EPLv1.0", "FreeType", "Nauman", + "tcl", "vim", "SGIv1"] + class LicenseError(Exception): pass @@ -79,6 +84,9 @@ class FlattenVisitor(LicenseVisitor): def visit_Str(self, node): self.licenses.append(node.s) + def visit_Constant(self, node): + self.licenses.append(node.value) + def visit_BinOp(self, node): if isinstance(node.op, ast.BitOr): left = FlattenVisitor(self.choose_licenses) @@ -101,26 +109,26 @@ def flattened_licenses(licensestr, choose_licenses): raise LicenseSyntaxError(licensestr, exc) return flatten.licenses -def is_included(licensestr, whitelist=None, blacklist=None): - """Given a license string and whitelist and blacklist, determine if the - license string matches the whitelist and does not match the blacklist. +def is_included(licensestr, include_licenses=None, exclude_licenses=None): + """Given a license string, a list of licenses to include and a list of + licenses to exclude, determine if the license string matches the include + list and does not match the exclude list. Returns a tuple holding the boolean state and a list of the applicable licenses that were excluded if state is False, or the licenses that were - included if the state is True. - """ + included if the state is True.""" def include_license(license): - return any(fnmatch(license, pattern) for pattern in whitelist) + return any(fnmatch(license, pattern) for pattern in include_licenses) def exclude_license(license): - return any(fnmatch(license, pattern) for pattern in blacklist) + return any(fnmatch(license, pattern) for pattern in exclude_licenses) def choose_licenses(alpha, beta): """Select the option in an OR which is the 'best' (has the most included licenses and no excluded licenses).""" # The factor 1000 below is arbitrary, just expected to be much larger - # that the number of licenses actually specified. That way the weight + # than the number of licenses actually specified. That way the weight # will be negative if the list of licenses contains an excluded license, # but still gives a higher weight to the list with the most included # licenses. @@ -133,11 +141,11 @@ def is_included(licensestr, whitelist=None, blacklist=None): else: return beta - if not whitelist: - whitelist = ['*'] + if not include_licenses: + include_licenses = ['*'] - if not blacklist: - blacklist = [] + if not exclude_licenses: + exclude_licenses = [] licenses = flattened_licenses(licensestr, choose_licenses) excluded = [lic for lic in licenses if exclude_license(lic)] @@ -232,6 +240,9 @@ class ListVisitor(LicenseVisitor): def visit_Str(self, node): self.licenses.add(node.s) + def visit_Constant(self, node): + self.licenses.add(node.value) + def list_licenses(licensestr): """Simply get a list of all licenses mentioned in a license string. Binary operators are not applied or taken into account in any way""" @@ -241,3 +252,8 @@ def list_licenses(licensestr): except SyntaxError as exc: raise LicenseSyntaxError(licensestr, exc) return visitor.licenses + +def apply_pkg_license_exception(pkg, bad_licenses, exceptions): + """Return remaining bad licenses after removing any package exceptions""" + + return [lic for lic in bad_licenses if pkg + ':' + lic not in exceptions] diff --git a/meta/lib/oe/lsb.py b/meta/lib/oe/lsb.py index 71c0992c5d..43e46380d7 100644 --- a/meta/lib/oe/lsb.py +++ b/meta/lib/oe/lsb.py @@ -1,3 +1,7 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + def get_os_release(): """Get all key-value pairs from /etc/os-release as a dict""" from collections import OrderedDict @@ -106,12 +110,12 @@ def distro_identifier(adjust_hook=None): if adjust_hook: distro_id, release = adjust_hook(distro_id, release) if not distro_id: - return "Unknown" - # Filter out any non-alphanumerics - distro_id = re.sub(r'\W', '', distro_id) + return "unknown" + # Filter out any non-alphanumerics and convert to lowercase + distro_id = re.sub(r'\W', '', distro_id).lower() if release: - id_str = '{0}-{1}'.format(distro_id.lower(), release) + id_str = '{0}-{1}'.format(distro_id, release) else: id_str = distro_id return id_str.replace(' ','-').replace('/','-') diff --git a/meta/lib/oe/maketype.py b/meta/lib/oe/maketype.py index c36e7b5604..d36082c535 100644 --- a/meta/lib/oe/maketype.py +++ b/meta/lib/oe/maketype.py @@ -1,3 +1,6 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# """OpenEmbedded variable typing support Types are defined in the metadata by name, using the 'type' flag on a @@ -7,12 +10,7 @@ the arguments of the type's factory for details. import inspect import oe.types as types -try: - # Python 3.7+ - from collections.abc import Callable -except ImportError: - # Python < 3.7 - from collections import Callable +from collections.abc import Callable available_types = {} diff --git a/meta/lib/oe/manifest.py b/meta/lib/oe/manifest.py index 674303c866..1a058dcd73 100644 --- a/meta/lib/oe/manifest.py +++ b/meta/lib/oe/manifest.py @@ -1,9 +1,12 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + from abc import ABCMeta, abstractmethod import os import re import bb - class Manifest(object, metaclass=ABCMeta): """ This is an abstract class. Do not instantiate this directly. @@ -185,154 +188,11 @@ class Manifest(object, metaclass=ABCMeta): return installed_pkgs -class RpmManifest(Manifest): - """ - Returns a dictionary object with mip and mlp packages. - """ - def _split_multilib(self, pkg_list): - pkgs = dict() - - for pkg in pkg_list.split(): - pkg_type = self.PKG_TYPE_MUST_INSTALL - - ml_variants = self.d.getVar('MULTILIB_VARIANTS').split() - - for ml_variant in ml_variants: - if pkg.startswith(ml_variant + '-'): - pkg_type = self.PKG_TYPE_MULTILIB - - if not pkg_type in pkgs: - pkgs[pkg_type] = pkg - else: - pkgs[pkg_type] += " " + pkg - - return pkgs - - def create_initial(self): - pkgs = dict() - - with open(self.initial_manifest, "w+") as manifest: - manifest.write(self.initial_manifest_file_header) - - for var in self.var_maps[self.manifest_type]: - if var in self.vars_to_split: - split_pkgs = self._split_multilib(self.d.getVar(var)) - if split_pkgs is not None: - pkgs = dict(list(pkgs.items()) + list(split_pkgs.items())) - else: - pkg_list = self.d.getVar(var) - if pkg_list is not None: - pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var) - - for pkg_type in pkgs: - for pkg in pkgs[pkg_type].split(): - manifest.write("%s,%s\n" % (pkg_type, pkg)) - - def create_final(self): - pass - - def create_full(self, pm): - pass - - -class OpkgManifest(Manifest): - """ - Returns a dictionary object with mip and mlp packages. - """ - def _split_multilib(self, pkg_list): - pkgs = dict() - - for pkg in pkg_list.split(): - pkg_type = self.PKG_TYPE_MUST_INSTALL - - ml_variants = self.d.getVar('MULTILIB_VARIANTS').split() - - for ml_variant in ml_variants: - if pkg.startswith(ml_variant + '-'): - pkg_type = self.PKG_TYPE_MULTILIB - - if not pkg_type in pkgs: - pkgs[pkg_type] = pkg - else: - pkgs[pkg_type] += " " + pkg - - return pkgs - - def create_initial(self): - pkgs = dict() - - with open(self.initial_manifest, "w+") as manifest: - manifest.write(self.initial_manifest_file_header) - - for var in self.var_maps[self.manifest_type]: - if var in self.vars_to_split: - split_pkgs = self._split_multilib(self.d.getVar(var)) - if split_pkgs is not None: - pkgs = dict(list(pkgs.items()) + list(split_pkgs.items())) - else: - pkg_list = self.d.getVar(var) - if pkg_list is not None: - pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var) - - for pkg_type in sorted(pkgs): - for pkg in sorted(pkgs[pkg_type].split()): - manifest.write("%s,%s\n" % (pkg_type, pkg)) - - def create_final(self): - pass - - def create_full(self, pm): - if not os.path.exists(self.initial_manifest): - self.create_initial() - - initial_manifest = self.parse_initial_manifest() - pkgs_to_install = list() - for pkg_type in initial_manifest: - pkgs_to_install += initial_manifest[pkg_type] - if len(pkgs_to_install) == 0: - return - - output = pm.dummy_install(pkgs_to_install) - - with open(self.full_manifest, 'w+') as manifest: - pkg_re = re.compile('^Installing ([^ ]+) [^ ].*') - for line in set(output.split('\n')): - m = pkg_re.match(line) - if m: - manifest.write(m.group(1) + '\n') - - return - - -class DpkgManifest(Manifest): - def create_initial(self): - with open(self.initial_manifest, "w+") as manifest: - manifest.write(self.initial_manifest_file_header) - - for var in self.var_maps[self.manifest_type]: - pkg_list = self.d.getVar(var) - - if pkg_list is None: - continue - - for pkg in pkg_list.split(): - manifest.write("%s,%s\n" % - (self.var_maps[self.manifest_type][var], pkg)) - - def create_final(self): - pass - - def create_full(self, pm): - pass - def create_manifest(d, final_manifest=False, manifest_dir=None, manifest_type=Manifest.MANIFEST_TYPE_IMAGE): - manifest_map = {'rpm': RpmManifest, - 'ipk': OpkgManifest, - 'deb': DpkgManifest} - - manifest = manifest_map[d.getVar('IMAGE_PKGTYPE')](d, manifest_dir, manifest_type) + import importlib + manifest = importlib.import_module('oe.package_manager.' + d.getVar('IMAGE_PKGTYPE') + '.manifest').PkgManifest(d, manifest_dir, manifest_type) if final_manifest: manifest.create_final() diff --git a/meta/lib/oe/overlayfs.py b/meta/lib/oe/overlayfs.py new file mode 100644 index 0000000000..b5d5e88e80 --- /dev/null +++ b/meta/lib/oe/overlayfs.py @@ -0,0 +1,48 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# +# This file contains common functions for overlayfs and its QA check + +# this function is based on https://github.com/systemd/systemd/blob/main/src/basic/unit-name.c +def escapeSystemdUnitName(path): + escapeMap = { + '/': '-', + '-': "\\x2d", + '\\': "\\x5d" + } + return "".join([escapeMap.get(c, c) for c in path.strip('/')]) + +def strForBash(s): + return s.replace('\\', '\\\\') + +def allOverlaysUnitName(d): + return d.getVar('PN') + '-overlays.service' + +def mountUnitName(unit): + return escapeSystemdUnitName(unit) + '.mount' + +def helperUnitName(unit): + return escapeSystemdUnitName(unit) + '-create-upper-dir.service' + +def unitFileList(d): + fileList = [] + overlayMountPoints = d.getVarFlags("OVERLAYFS_MOUNT_POINT") + + if not overlayMountPoints: + bb.fatal("A recipe uses overlayfs class but there is no OVERLAYFS_MOUNT_POINT set in your MACHINE configuration") + + # check that we have required mount points set first + requiredMountPoints = d.getVarFlags('OVERLAYFS_WRITABLE_PATHS') + for mountPoint in requiredMountPoints: + if mountPoint not in overlayMountPoints: + bb.fatal("Missing required mount point for OVERLAYFS_MOUNT_POINT[%s] in your MACHINE configuration" % mountPoint) + + for mountPoint in overlayMountPoints: + for path in d.getVarFlag('OVERLAYFS_WRITABLE_PATHS', mountPoint).split(): + fileList.append(mountUnitName(path)) + fileList.append(helperUnitName(path)) + + fileList.append(allOverlaysUnitName(d)) + + return fileList + diff --git a/meta/lib/oe/package.py b/meta/lib/oe/package.py index 6e83f01f14..7d387ee81d 100644 --- a/meta/lib/oe/package.py +++ b/meta/lib/oe/package.py @@ -1,3 +1,7 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + import stat import mmap import subprocess @@ -12,7 +16,11 @@ def runstrip(arg): # 8 - shared library # 16 - kernel module - (file, elftype, strip) = arg + if len(arg) == 3: + (file, elftype, strip) = arg + extra_strip_sections = '' + else: + (file, elftype, strip, extra_strip_sections) = arg newmode = None if not os.access(file, os.W_OK) or os.access(file, os.R_OK): @@ -36,6 +44,9 @@ def runstrip(arg): # shared or executable: elif elftype & 8 or elftype & 4: stripcmd.extend(["--remove-section=.comment", "--remove-section=.note"]) + if extra_strip_sections != '': + for section in extra_strip_sections.split(): + stripcmd.extend(["--remove-section=" + section]) stripcmd.append(file) bb.debug(1, "runstrip: %s" % stripcmd) @@ -261,7 +272,7 @@ def read_shlib_providers(d): bb.debug(2, "Reading shlib providers in %s" % (dir)) if not os.path.exists(dir): continue - for file in os.listdir(dir): + for file in sorted(os.listdir(dir)): m = list_re.match(file) if m: dep_pkg = m.group(1) @@ -279,36 +290,3 @@ def read_shlib_providers(d): shlib_provider[s[0]] = {} shlib_provider[s[0]][s[1]] = (dep_pkg, s[2]) return shlib_provider - - -def npm_split_package_dirs(pkgdir): - """ - Work out the packages fetched and unpacked by BitBake's npm fetcher - Returns a dict of packagename -> (relpath, package.json) ordered - such that it is suitable for use in PACKAGES and FILES - """ - from collections import OrderedDict - import json - packages = {} - for root, dirs, files in os.walk(pkgdir): - if os.path.basename(root) == 'node_modules': - for dn in dirs: - relpth = os.path.relpath(os.path.join(root, dn), pkgdir) - pkgitems = ['${PN}'] - for pathitem in relpth.split('/'): - if pathitem == 'node_modules': - continue - pkgitems.append(pathitem) - pkgname = '-'.join(pkgitems).replace('_', '-') - pkgname = pkgname.replace('@', '') - pkgfile = os.path.join(root, dn, 'package.json') - data = None - if os.path.exists(pkgfile): - with open(pkgfile, 'r') as f: - data = json.loads(f.read()) - packages[pkgname] = (relpth, data) - # We want the main package for a module sorted *after* its subpackages - # (so that it doesn't otherwise steal the files for the subpackage), so - # this is a cheap way to do that whilst still having an otherwise - # alphabetical sort - return OrderedDict((key, packages[key]) for key in sorted(packages, key=lambda pkg: pkg + '~')) diff --git a/meta/lib/oe/package_manager.py b/meta/lib/oe/package_manager.py deleted file mode 100644 index f5cd7454d7..0000000000 --- a/meta/lib/oe/package_manager.py +++ /dev/null @@ -1,1845 +0,0 @@ -from abc import ABCMeta, abstractmethod -import os -import glob -import subprocess -import shutil -import re -import collections -import bb -import tempfile -import oe.utils -import oe.path -import string -from oe.gpg_sign import get_signer -import hashlib -import fnmatch - -# this can be used by all PM backends to create the index files in parallel -def create_index(arg): - index_cmd = arg - - bb.note("Executing '%s' ..." % index_cmd) - result = subprocess.check_output(index_cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8") - if result: - bb.note(result) - -def opkg_query(cmd_output): - """ - This method parse the output from the package managerand return - a dictionary with the information of the packages. This is used - when the packages are in deb or ipk format. - """ - verregex = re.compile(r' \([=<>]* [^ )]*\)') - output = dict() - pkg = "" - arch = "" - ver = "" - filename = "" - dep = [] - pkgarch = "" - for line in cmd_output.splitlines(): - line = line.rstrip() - if ':' in line: - if line.startswith("Package: "): - pkg = line.split(": ")[1] - elif line.startswith("Architecture: "): - arch = line.split(": ")[1] - elif line.startswith("Version: "): - ver = line.split(": ")[1] - elif line.startswith("File: ") or line.startswith("Filename:"): - filename = line.split(": ")[1] - if "/" in filename: - filename = os.path.basename(filename) - elif line.startswith("Depends: "): - depends = verregex.sub('', line.split(": ")[1]) - for depend in depends.split(", "): - dep.append(depend) - elif line.startswith("Recommends: "): - recommends = verregex.sub('', line.split(": ")[1]) - for recommend in recommends.split(", "): - dep.append("%s [REC]" % recommend) - elif line.startswith("PackageArch: "): - pkgarch = line.split(": ")[1] - - # When there is a blank line save the package information - elif not line: - # IPK doesn't include the filename - if not filename: - filename = "%s_%s_%s.ipk" % (pkg, ver, arch) - if pkg: - output[pkg] = {"arch":arch, "ver":ver, - "filename":filename, "deps": dep, "pkgarch":pkgarch } - pkg = "" - arch = "" - ver = "" - filename = "" - dep = [] - pkgarch = "" - - if pkg: - if not filename: - filename = "%s_%s_%s.ipk" % (pkg, ver, arch) - output[pkg] = {"arch":arch, "ver":ver, - "filename":filename, "deps": dep } - - return output - -def failed_postinsts_abort(pkgs, log_path): - bb.fatal("""Postinstall scriptlets of %s have failed. If the intention is to defer them to first boot, -then please place them into pkg_postinst_ontarget_${PN} (). -Deferring to first boot via 'exit 1' is no longer supported. -Details of the failure are in %s.""" %(pkgs, log_path)) - -def generate_locale_archive(d, rootfs, target_arch, localedir): - # Pretty sure we don't need this for locale archive generation but - # keeping it to be safe... - locale_arch_options = { \ - "arc": ["--uint32-align=4", "--little-endian"], - "arceb": ["--uint32-align=4", "--big-endian"], - "arm": ["--uint32-align=4", "--little-endian"], - "armeb": ["--uint32-align=4", "--big-endian"], - "aarch64": ["--uint32-align=4", "--little-endian"], - "aarch64_be": ["--uint32-align=4", "--big-endian"], - "sh4": ["--uint32-align=4", "--big-endian"], - "powerpc": ["--uint32-align=4", "--big-endian"], - "powerpc64": ["--uint32-align=4", "--big-endian"], - "mips": ["--uint32-align=4", "--big-endian"], - "mipsisa32r6": ["--uint32-align=4", "--big-endian"], - "mips64": ["--uint32-align=4", "--big-endian"], - "mipsisa64r6": ["--uint32-align=4", "--big-endian"], - "mipsel": ["--uint32-align=4", "--little-endian"], - "mipsisa32r6el": ["--uint32-align=4", "--little-endian"], - "mips64el": ["--uint32-align=4", "--little-endian"], - "mipsisa64r6el": ["--uint32-align=4", "--little-endian"], - "riscv64": ["--uint32-align=4", "--little-endian"], - "riscv32": ["--uint32-align=4", "--little-endian"], - "i586": ["--uint32-align=4", "--little-endian"], - "i686": ["--uint32-align=4", "--little-endian"], - "x86_64": ["--uint32-align=4", "--little-endian"] - } - if target_arch in locale_arch_options: - arch_options = locale_arch_options[target_arch] - else: - bb.error("locale_arch_options not found for target_arch=" + target_arch) - bb.fatal("unknown arch:" + target_arch + " for locale_arch_options") - - # Need to set this so cross-localedef knows where the archive is - env = dict(os.environ) - env["LOCALEARCHIVE"] = oe.path.join(localedir, "locale-archive") - - for name in os.listdir(localedir): - path = os.path.join(localedir, name) - if os.path.isdir(path): - cmd = ["cross-localedef", "--verbose"] - cmd += arch_options - cmd += ["--add-to-archive", path] - subprocess.check_output(cmd, env=env, stderr=subprocess.STDOUT) - -class Indexer(object, metaclass=ABCMeta): - def __init__(self, d, deploy_dir): - self.d = d - self.deploy_dir = deploy_dir - - @abstractmethod - def write_index(self): - pass - - -class RpmIndexer(Indexer): - def write_index(self): - self.do_write_index(self.deploy_dir) - - def do_write_index(self, deploy_dir): - if self.d.getVar('PACKAGE_FEED_SIGN') == '1': - signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND')) - else: - signer = None - - createrepo_c = bb.utils.which(os.environ['PATH'], "createrepo_c") - result = create_index("%s --update -q %s" % (createrepo_c, deploy_dir)) - if result: - bb.fatal(result) - - # Sign repomd - if signer: - sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE') - is_ascii_sig = (sig_type.upper() != "BIN") - signer.detach_sign(os.path.join(deploy_dir, 'repodata', 'repomd.xml'), - self.d.getVar('PACKAGE_FEED_GPG_NAME'), - self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'), - armor=is_ascii_sig) - -class RpmSubdirIndexer(RpmIndexer): - def write_index(self): - bb.note("Generating package index for %s" %(self.deploy_dir)) - self.do_write_index(self.deploy_dir) - for entry in os.walk(self.deploy_dir): - if os.path.samefile(self.deploy_dir, entry[0]): - for dir in entry[1]: - if dir != 'repodata': - dir_path = oe.path.join(self.deploy_dir, dir) - bb.note("Generating package index for %s" %(dir_path)) - self.do_write_index(dir_path) - -class OpkgIndexer(Indexer): - def write_index(self): - arch_vars = ["ALL_MULTILIB_PACKAGE_ARCHS", - "SDK_PACKAGE_ARCHS", - ] - - opkg_index_cmd = bb.utils.which(os.getenv('PATH'), "opkg-make-index") - if self.d.getVar('PACKAGE_FEED_SIGN') == '1': - signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND')) - else: - signer = None - - if not os.path.exists(os.path.join(self.deploy_dir, "Packages")): - open(os.path.join(self.deploy_dir, "Packages"), "w").close() - - index_cmds = set() - index_sign_files = set() - for arch_var in arch_vars: - archs = self.d.getVar(arch_var) - if archs is None: - continue - - for arch in archs.split(): - pkgs_dir = os.path.join(self.deploy_dir, arch) - pkgs_file = os.path.join(pkgs_dir, "Packages") - - if not os.path.isdir(pkgs_dir): - continue - - if not os.path.exists(pkgs_file): - open(pkgs_file, "w").close() - - index_cmds.add('%s -r %s -p %s -m %s' % - (opkg_index_cmd, pkgs_file, pkgs_file, pkgs_dir)) - - index_sign_files.add(pkgs_file) - - if len(index_cmds) == 0: - bb.note("There are no packages in %s!" % self.deploy_dir) - return - - oe.utils.multiprocess_launch(create_index, index_cmds, self.d) - - if signer: - feed_sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE') - is_ascii_sig = (feed_sig_type.upper() != "BIN") - for f in index_sign_files: - signer.detach_sign(f, - self.d.getVar('PACKAGE_FEED_GPG_NAME'), - self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'), - armor=is_ascii_sig) - - -class DpkgIndexer(Indexer): - def _create_configs(self): - bb.utils.mkdirhier(self.apt_conf_dir) - bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "lists", "partial")) - bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "apt.conf.d")) - bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "preferences.d")) - - with open(os.path.join(self.apt_conf_dir, "preferences"), - "w") as prefs_file: - pass - with open(os.path.join(self.apt_conf_dir, "sources.list"), - "w+") as sources_file: - pass - - with open(self.apt_conf_file, "w") as apt_conf: - with open(os.path.join(self.d.expand("${STAGING_ETCDIR_NATIVE}"), - "apt", "apt.conf.sample")) as apt_conf_sample: - for line in apt_conf_sample.read().split("\n"): - line = re.sub(r"#ROOTFS#", "/dev/null", line) - line = re.sub(r"#APTCONF#", self.apt_conf_dir, line) - apt_conf.write(line + "\n") - - def write_index(self): - self.apt_conf_dir = os.path.join(self.d.expand("${APTCONF_TARGET}"), - "apt-ftparchive") - self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf") - self._create_configs() - - os.environ['APT_CONFIG'] = self.apt_conf_file - - pkg_archs = self.d.getVar('PACKAGE_ARCHS') - if pkg_archs is not None: - arch_list = pkg_archs.split() - sdk_pkg_archs = self.d.getVar('SDK_PACKAGE_ARCHS') - if sdk_pkg_archs is not None: - for a in sdk_pkg_archs.split(): - if a not in pkg_archs: - arch_list.append(a) - - all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or "").split() - arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in arch_list) - - apt_ftparchive = bb.utils.which(os.getenv('PATH'), "apt-ftparchive") - gzip = bb.utils.which(os.getenv('PATH'), "gzip") - - index_cmds = [] - deb_dirs_found = False - for arch in arch_list: - arch_dir = os.path.join(self.deploy_dir, arch) - if not os.path.isdir(arch_dir): - continue - - cmd = "cd %s; PSEUDO_UNLOAD=1 %s packages . > Packages;" % (arch_dir, apt_ftparchive) - - cmd += "%s -fcn Packages > Packages.gz;" % gzip - - with open(os.path.join(arch_dir, "Release"), "w+") as release: - release.write("Label: %s\n" % arch) - - cmd += "PSEUDO_UNLOAD=1 %s release . >> Release" % apt_ftparchive - - index_cmds.append(cmd) - - deb_dirs_found = True - - if not deb_dirs_found: - bb.note("There are no packages in %s" % self.deploy_dir) - return - - oe.utils.multiprocess_launch(create_index, index_cmds, self.d) - if self.d.getVar('PACKAGE_FEED_SIGN') == '1': - raise NotImplementedError('Package feed signing not implementd for dpkg') - - - -class PkgsList(object, metaclass=ABCMeta): - def __init__(self, d, rootfs_dir): - self.d = d - self.rootfs_dir = rootfs_dir - - @abstractmethod - def list_pkgs(self): - pass - -class RpmPkgsList(PkgsList): - def list_pkgs(self): - return RpmPM(self.d, self.rootfs_dir, self.d.getVar('TARGET_VENDOR'), needfeed=False).list_installed() - -class OpkgPkgsList(PkgsList): - def __init__(self, d, rootfs_dir, config_file): - super(OpkgPkgsList, self).__init__(d, rootfs_dir) - - self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg") - self.opkg_args = "-f %s -o %s " % (config_file, rootfs_dir) - self.opkg_args += self.d.getVar("OPKG_ARGS") - - def list_pkgs(self, format=None): - cmd = "%s %s status" % (self.opkg_cmd, self.opkg_args) - - # opkg returns success even when it printed some - # "Collected errors:" report to stderr. Mixing stderr into - # stdout then leads to random failures later on when - # parsing the output. To avoid this we need to collect both - # output streams separately and check for empty stderr. - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) - cmd_output, cmd_stderr = p.communicate() - cmd_output = cmd_output.decode("utf-8") - cmd_stderr = cmd_stderr.decode("utf-8") - if p.returncode or cmd_stderr: - bb.fatal("Cannot get the installed packages list. Command '%s' " - "returned %d and stderr:\n%s" % (cmd, p.returncode, cmd_stderr)) - - return opkg_query(cmd_output) - - -class DpkgPkgsList(PkgsList): - - def list_pkgs(self): - cmd = [bb.utils.which(os.getenv('PATH'), "dpkg-query"), - "--admindir=%s/var/lib/dpkg" % self.rootfs_dir, - "-W"] - - cmd.append("-f=Package: ${Package}\nArchitecture: ${PackageArch}\nVersion: ${Version}\nFile: ${Package}_${Version}_${Architecture}.deb\nDepends: ${Depends}\nRecommends: ${Recommends}\n\n") - - try: - cmd_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip().decode("utf-8") - except subprocess.CalledProcessError as e: - bb.fatal("Cannot get the installed packages list. Command '%s' " - "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8"))) - - return opkg_query(cmd_output) - - -class PackageManager(object, metaclass=ABCMeta): - """ - This is an abstract class. Do not instantiate this directly. - """ - - def __init__(self, d, target_rootfs): - self.d = d - self.target_rootfs = target_rootfs - self.deploy_dir = None - self.deploy_lock = None - self._initialize_intercepts() - - def _initialize_intercepts(self): - bb.note("Initializing intercept dir for %s" % self.target_rootfs) - # As there might be more than one instance of PackageManager operating at the same time - # we need to isolate the intercept_scripts directories from each other, - # hence the ugly hash digest in dir name. - self.intercepts_dir = os.path.join(self.d.getVar('WORKDIR'), "intercept_scripts-%s" % - (hashlib.sha256(self.target_rootfs.encode()).hexdigest())) - - postinst_intercepts = (self.d.getVar("POSTINST_INTERCEPTS") or "").split() - if not postinst_intercepts: - postinst_intercepts_path = self.d.getVar("POSTINST_INTERCEPTS_PATH") - if not postinst_intercepts_path: - postinst_intercepts_path = self.d.getVar("POSTINST_INTERCEPTS_DIR") or self.d.expand("${COREBASE}/scripts/postinst-intercepts") - postinst_intercepts = oe.path.which_wild('*', postinst_intercepts_path) - - bb.debug(1, 'Collected intercepts:\n%s' % ''.join(' %s\n' % i for i in postinst_intercepts)) - bb.utils.remove(self.intercepts_dir, True) - bb.utils.mkdirhier(self.intercepts_dir) - for intercept in postinst_intercepts: - bb.utils.copyfile(intercept, os.path.join(self.intercepts_dir, os.path.basename(intercept))) - - @abstractmethod - def _handle_intercept_failure(self, failed_script): - pass - - def _postpone_to_first_boot(self, postinst_intercept_hook): - with open(postinst_intercept_hook) as intercept: - registered_pkgs = None - for line in intercept.read().split("\n"): - m = re.match(r"^##PKGS:(.*)", line) - if m is not None: - registered_pkgs = m.group(1).strip() - break - - if registered_pkgs is not None: - bb.note("If an image is being built, the postinstalls for the following packages " - "will be postponed for first boot: %s" % - registered_pkgs) - - # call the backend dependent handler - self._handle_intercept_failure(registered_pkgs) - - - def run_intercepts(self, populate_sdk=None): - intercepts_dir = self.intercepts_dir - - bb.note("Running intercept scripts:") - os.environ['D'] = self.target_rootfs - os.environ['STAGING_DIR_NATIVE'] = self.d.getVar('STAGING_DIR_NATIVE') - for script in os.listdir(intercepts_dir): - script_full = os.path.join(intercepts_dir, script) - - if script == "postinst_intercept" or not os.access(script_full, os.X_OK): - continue - - # we do not want to run any multilib variant of this - if script.startswith("delay_to_first_boot"): - self._postpone_to_first_boot(script_full) - continue - - if populate_sdk == 'host' and self.d.getVar('SDK_OS') == 'mingw32': - bb.warn("The postinstall intercept hook '%s' could not be executed due to missing wine support, details in %s/log.do_%s" - % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK'))) - continue - - bb.note("> Executing %s intercept ..." % script) - - try: - output = subprocess.check_output(script_full, stderr=subprocess.STDOUT) - if output: bb.note(output.decode("utf-8")) - except subprocess.CalledProcessError as e: - bb.note("Exit code %d. Output:\n%s" % (e.returncode, e.output.decode("utf-8"))) - if populate_sdk == 'host': - bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK'))) - elif populate_sdk == 'target': - if "qemuwrapper: qemu usermode is not supported" in e.output.decode("utf-8"): - bb.warn("The postinstall intercept hook '%s' could not be executed due to missing qemu usermode support, details in %s/log.do_%s" - % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK'))) - else: - bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK'))) - else: - if "qemuwrapper: qemu usermode is not supported" in e.output.decode("utf-8"): - bb.note("The postinstall intercept hook '%s' could not be executed due to missing qemu usermode support, details in %s/log.do_%s" - % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK'))) - self._postpone_to_first_boot(script_full) - else: - bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK'))) - - @abstractmethod - def update(self): - """ - Update the package manager package database. - """ - pass - - @abstractmethod - def install(self, pkgs, attempt_only=False): - """ - Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is - True, installation failures are ignored. - """ - pass - - @abstractmethod - def remove(self, pkgs, with_dependencies=True): - """ - Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies' - is False, then any dependencies are left in place. - """ - pass - - @abstractmethod - def write_index(self): - """ - This function creates the index files - """ - pass - - @abstractmethod - def remove_packaging_data(self): - pass - - @abstractmethod - def list_installed(self): - pass - - @abstractmethod - def extract(self, pkg): - """ - Returns the path to a tmpdir where resides the contents of a package. - Deleting the tmpdir is responsability of the caller. - """ - pass - - @abstractmethod - def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs): - """ - Add remote package feeds into repository manager configuration. The parameters - for the feeds are set by feed_uris, feed_base_paths and feed_archs. - See http://www.yoctoproject.org/docs/current/ref-manual/ref-manual.html#var-PACKAGE_FEED_URIS - for their description. - """ - pass - - def install_glob(self, globs, sdk=False): - """ - Install all packages that match a glob. - """ - # TODO don't have sdk here but have a property on the superclass - # (and respect in install_complementary) - if sdk: - pkgdatadir = self.d.expand("${TMPDIR}/pkgdata/${SDK_SYS}") - else: - pkgdatadir = self.d.getVar("PKGDATA_DIR") - - try: - bb.note("Installing globbed packages...") - cmd = ["oe-pkgdata-util", "-p", pkgdatadir, "list-pkgs", globs] - pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode("utf-8") - self.install(pkgs.split(), attempt_only=True) - except subprocess.CalledProcessError as e: - # Return code 1 means no packages matched - if e.returncode != 1: - bb.fatal("Could not compute globbed packages list. Command " - "'%s' returned %d:\n%s" % - (' '.join(cmd), e.returncode, e.output.decode("utf-8"))) - - def install_complementary(self, globs=None): - """ - Install complementary packages based upon the list of currently installed - packages e.g. locales, *-dev, *-dbg, etc. This will only attempt to install - these packages, if they don't exist then no error will occur. Note: every - backend needs to call this function explicitly after the normal package - installation - """ - if globs is None: - globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY') - split_linguas = set() - - for translation in self.d.getVar('IMAGE_LINGUAS').split(): - split_linguas.add(translation) - split_linguas.add(translation.split('-')[0]) - - split_linguas = sorted(split_linguas) - - for lang in split_linguas: - globs += " *-locale-%s" % lang - - if globs is None: - return - - # we need to write the list of installed packages to a file because the - # oe-pkgdata-util reads it from a file - with tempfile.NamedTemporaryFile(mode="w+", prefix="installed-pkgs") as installed_pkgs: - pkgs = self.list_installed() - output = oe.utils.format_pkg_list(pkgs, "arch") - installed_pkgs.write(output) - installed_pkgs.flush() - - cmd = ["oe-pkgdata-util", - "-p", self.d.getVar('PKGDATA_DIR'), "glob", installed_pkgs.name, - globs] - exclude = self.d.getVar('PACKAGE_EXCLUDE_COMPLEMENTARY') - if exclude: - cmd.extend(['--exclude=' + '|'.join(exclude.split())]) - try: - bb.note("Installing complementary packages ...") - bb.note('Running %s' % cmd) - complementary_pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode("utf-8") - self.install(complementary_pkgs.split(), attempt_only=True) - except subprocess.CalledProcessError as e: - bb.fatal("Could not compute complementary packages list. Command " - "'%s' returned %d:\n%s" % - (' '.join(cmd), e.returncode, e.output.decode("utf-8"))) - - target_arch = self.d.getVar('TARGET_ARCH') - localedir = oe.path.join(self.target_rootfs, self.d.getVar("libdir"), "locale") - if os.path.exists(localedir) and os.listdir(localedir): - generate_locale_archive(self.d, self.target_rootfs, target_arch, localedir) - # And now delete the binary locales - self.remove(fnmatch.filter(self.list_installed(), "glibc-binary-localedata-*"), False) - - def deploy_dir_lock(self): - if self.deploy_dir is None: - raise RuntimeError("deploy_dir is not set!") - - lock_file_name = os.path.join(self.deploy_dir, "deploy.lock") - - self.deploy_lock = bb.utils.lockfile(lock_file_name) - - def deploy_dir_unlock(self): - if self.deploy_lock is None: - return - - bb.utils.unlockfile(self.deploy_lock) - - self.deploy_lock = None - - def construct_uris(self, uris, base_paths): - """ - Construct URIs based on the following pattern: uri/base_path where 'uri' - and 'base_path' correspond to each element of the corresponding array - argument leading to len(uris) x len(base_paths) elements on the returned - array - """ - def _append(arr1, arr2, sep='/'): - res = [] - narr1 = [a.rstrip(sep) for a in arr1] - narr2 = [a.rstrip(sep).lstrip(sep) for a in arr2] - for a1 in narr1: - if arr2: - for a2 in narr2: - res.append("%s%s%s" % (a1, sep, a2)) - else: - res.append(a1) - return res - return _append(uris, base_paths) - -def create_packages_dir(d, subrepo_dir, deploydir, taskname, filterbydependencies): - """ - Go through our do_package_write_X dependencies and hardlink the packages we depend - upon into the repo directory. This prevents us seeing other packages that may - have been built that we don't depend upon and also packages for architectures we don't - support. - """ - import errno - - taskdepdata = d.getVar("BB_TASKDEPDATA", False) - mytaskname = d.getVar("BB_RUNTASK") - pn = d.getVar("PN") - seendirs = set() - multilibs = {} - - bb.utils.remove(subrepo_dir, recurse=True) - bb.utils.mkdirhier(subrepo_dir) - - # Detect bitbake -b usage - nodeps = d.getVar("BB_LIMITEDDEPS") or False - if nodeps or not filterbydependencies: - oe.path.symlink(deploydir, subrepo_dir, True) - return - - start = None - for dep in taskdepdata: - data = taskdepdata[dep] - if data[1] == mytaskname and data[0] == pn: - start = dep - break - if start is None: - bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?") - pkgdeps = set() - start = [start] - seen = set(start) - # Support direct dependencies (do_rootfs -> do_package_write_X) - # or indirect dependencies within PN (do_populate_sdk_ext -> do_rootfs -> do_package_write_X) - while start: - next = [] - for dep2 in start: - for dep in taskdepdata[dep2][3]: - if taskdepdata[dep][0] != pn: - if "do_" + taskname in dep: - pkgdeps.add(dep) - elif dep not in seen: - next.append(dep) - seen.add(dep) - start = next - - for dep in pkgdeps: - c = taskdepdata[dep][0] - manifest, d2 = oe.sstatesig.find_sstate_manifest(c, taskdepdata[dep][2], taskname, d, multilibs) - if not manifest: - bb.fatal("No manifest generated from: %s in %s" % (c, taskdepdata[dep][2])) - if not os.path.exists(manifest): - continue - with open(manifest, "r") as f: - for l in f: - l = l.strip() - deploydir = os.path.normpath(deploydir) - if bb.data.inherits_class('packagefeed-stability', d): - dest = l.replace(deploydir + "-prediff", "") - else: - dest = l.replace(deploydir, "") - dest = subrepo_dir + dest - if l.endswith("/"): - if dest not in seendirs: - bb.utils.mkdirhier(dest) - seendirs.add(dest) - continue - # Try to hardlink the file, copy if that fails - destdir = os.path.dirname(dest) - if destdir not in seendirs: - bb.utils.mkdirhier(destdir) - seendirs.add(destdir) - try: - os.link(l, dest) - except OSError as err: - if err.errno == errno.EXDEV: - bb.utils.copyfile(l, dest) - else: - raise - -class RpmPM(PackageManager): - def __init__(self, - d, - target_rootfs, - target_vendor, - task_name='target', - arch_var=None, - os_var=None, - rpm_repo_workdir="oe-rootfs-repo", - filterbydependencies=True, - needfeed=True): - super(RpmPM, self).__init__(d, target_rootfs) - self.target_vendor = target_vendor - self.task_name = task_name - if arch_var == None: - self.archs = self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS').replace("-","_") - else: - self.archs = self.d.getVar(arch_var).replace("-","_") - if task_name == "host": - self.primary_arch = self.d.getVar('SDK_ARCH') - else: - self.primary_arch = self.d.getVar('MACHINE_ARCH') - - if needfeed: - self.rpm_repo_dir = oe.path.join(self.d.getVar('WORKDIR'), rpm_repo_workdir) - create_packages_dir(self.d, oe.path.join(self.rpm_repo_dir, "rpm"), d.getVar("DEPLOY_DIR_RPM"), "package_write_rpm", filterbydependencies) - - self.saved_packaging_data = self.d.expand('${T}/saved_packaging_data/%s' % self.task_name) - if not os.path.exists(self.d.expand('${T}/saved_packaging_data')): - bb.utils.mkdirhier(self.d.expand('${T}/saved_packaging_data')) - self.packaging_data_dirs = ['etc/rpm', 'etc/rpmrc', 'etc/dnf', 'var/lib/rpm', 'var/lib/dnf', 'var/cache/dnf'] - self.solution_manifest = self.d.expand('${T}/saved/%s_solution' % - self.task_name) - if not os.path.exists(self.d.expand('${T}/saved')): - bb.utils.mkdirhier(self.d.expand('${T}/saved')) - - def _configure_dnf(self): - # libsolv handles 'noarch' internally, we don't need to specify it explicitly - archs = [i for i in reversed(self.archs.split()) if i not in ["any", "all", "noarch"]] - # This prevents accidental matching against libsolv's built-in policies - if len(archs) <= 1: - archs = archs + ["bogusarch"] - confdir = "%s/%s" %(self.target_rootfs, "etc/dnf/vars/") - bb.utils.mkdirhier(confdir) - open(confdir + "arch", 'w').write(":".join(archs)) - distro_codename = self.d.getVar('DISTRO_CODENAME') - open(confdir + "releasever", 'w').write(distro_codename if distro_codename is not None else '') - - open(oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"), 'w').write("") - - - def _configure_rpm(self): - # We need to configure rpm to use our primary package architecture as the installation architecture, - # and to make it compatible with other package architectures that we use. - # Otherwise it will refuse to proceed with packages installation. - platformconfdir = "%s/%s" %(self.target_rootfs, "etc/rpm/") - rpmrcconfdir = "%s/%s" %(self.target_rootfs, "etc/") - bb.utils.mkdirhier(platformconfdir) - open(platformconfdir + "platform", 'w').write("%s-pc-linux" % self.primary_arch) - with open(rpmrcconfdir + "rpmrc", 'w') as f: - f.write("arch_compat: %s: %s\n" % (self.primary_arch, self.archs if len(self.archs) > 0 else self.primary_arch)) - f.write("buildarch_compat: %s: noarch\n" % self.primary_arch) - - open(platformconfdir + "macros", 'w').write("%_transaction_color 7\n") - if self.d.getVar('RPM_PREFER_ELF_ARCH'): - open(platformconfdir + "macros", 'a').write("%%_prefer_color %s" % (self.d.getVar('RPM_PREFER_ELF_ARCH'))) - else: - open(platformconfdir + "macros", 'a').write("%_prefer_color 7") - - if self.d.getVar('RPM_SIGN_PACKAGES') == '1': - signer = get_signer(self.d, self.d.getVar('RPM_GPG_BACKEND')) - pubkey_path = oe.path.join(self.d.getVar('B'), 'rpm-key') - signer.export_pubkey(pubkey_path, self.d.getVar('RPM_GPG_NAME')) - rpm_bin = bb.utils.which(os.getenv('PATH'), "rpmkeys") - cmd = [rpm_bin, '--root=%s' % self.target_rootfs, '--import', pubkey_path] - try: - subprocess.check_output(cmd, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - bb.fatal("Importing GPG key failed. Command '%s' " - "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8"))) - - def create_configs(self): - self._configure_dnf() - self._configure_rpm() - - def write_index(self): - lockfilename = self.d.getVar('DEPLOY_DIR_RPM') + "/rpm.lock" - lf = bb.utils.lockfile(lockfilename, False) - RpmIndexer(self.d, self.rpm_repo_dir).write_index() - bb.utils.unlockfile(lf) - - def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs): - from urllib.parse import urlparse - - if feed_uris == "": - return - - gpg_opts = '' - if self.d.getVar('PACKAGE_FEED_SIGN') == '1': - gpg_opts += 'repo_gpgcheck=1\n' - gpg_opts += 'gpgkey=file://%s/pki/packagefeed-gpg/PACKAGEFEED-GPG-KEY-%s-%s\n' % (self.d.getVar('sysconfdir'), self.d.getVar('DISTRO'), self.d.getVar('DISTRO_CODENAME')) - - if self.d.getVar('RPM_SIGN_PACKAGES') != '1': - gpg_opts += 'gpgcheck=0\n' - - bb.utils.mkdirhier(oe.path.join(self.target_rootfs, "etc", "yum.repos.d")) - remote_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split()) - for uri in remote_uris: - repo_base = "oe-remote-repo" + "-".join(urlparse(uri).path.split("/")) - if feed_archs is not None: - for arch in feed_archs.split(): - repo_uri = uri + "/" + arch - repo_id = "oe-remote-repo" + "-".join(urlparse(repo_uri).path.split("/")) - repo_name = "OE Remote Repo:" + " ".join(urlparse(repo_uri).path.split("/")) - open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'a').write( - "[%s]\nname=%s\nbaseurl=%s\n%s\n" % (repo_id, repo_name, repo_uri, gpg_opts)) - else: - repo_name = "OE Remote Repo:" + " ".join(urlparse(uri).path.split("/")) - repo_uri = uri - open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'w').write( - "[%s]\nname=%s\nbaseurl=%s\n%s" % (repo_base, repo_name, repo_uri, gpg_opts)) - - def _prepare_pkg_transaction(self): - os.environ['D'] = self.target_rootfs - os.environ['OFFLINE_ROOT'] = self.target_rootfs - os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs - os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs - os.environ['INTERCEPT_DIR'] = self.intercepts_dir - os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE') - - - def install(self, pkgs, attempt_only = False): - if len(pkgs) == 0: - return - self._prepare_pkg_transaction() - - bad_recommendations = self.d.getVar('BAD_RECOMMENDATIONS') - package_exclude = self.d.getVar('PACKAGE_EXCLUDE') - exclude_pkgs = (bad_recommendations.split() if bad_recommendations else []) + (package_exclude.split() if package_exclude else []) - - output = self._invoke_dnf((["--skip-broken"] if attempt_only else []) + - (["-x", ",".join(exclude_pkgs)] if len(exclude_pkgs) > 0 else []) + - (["--setopt=install_weak_deps=False"] if self.d.getVar('NO_RECOMMENDATIONS') == "1" else []) + - (["--nogpgcheck"] if self.d.getVar('RPM_SIGN_PACKAGES') != '1' else ["--setopt=gpgcheck=True"]) + - ["install"] + - pkgs) - - failed_scriptlets_pkgnames = collections.OrderedDict() - for line in output.splitlines(): - if line.startswith("Error in POSTIN scriptlet in rpm package"): - failed_scriptlets_pkgnames[line.split()[-1]] = True - - if len(failed_scriptlets_pkgnames) > 0: - failed_postinsts_abort(list(failed_scriptlets_pkgnames.keys()), self.d.expand("${T}/log.do_${BB_CURRENTTASK}")) - - def remove(self, pkgs, with_dependencies = True): - if not pkgs: - return - - self._prepare_pkg_transaction() - - if with_dependencies: - self._invoke_dnf(["remove"] + pkgs) - else: - cmd = bb.utils.which(os.getenv('PATH'), "rpm") - args = ["-e", "-v", "--nodeps", "--root=%s" %self.target_rootfs] - - try: - bb.note("Running %s" % ' '.join([cmd] + args + pkgs)) - output = subprocess.check_output([cmd] + args + pkgs, stderr=subprocess.STDOUT).decode("utf-8") - bb.note(output) - except subprocess.CalledProcessError as e: - bb.fatal("Could not invoke rpm. Command " - "'%s' returned %d:\n%s" % (' '.join([cmd] + args + pkgs), e.returncode, e.output.decode("utf-8"))) - - def upgrade(self): - self._prepare_pkg_transaction() - self._invoke_dnf(["upgrade"]) - - def autoremove(self): - self._prepare_pkg_transaction() - self._invoke_dnf(["autoremove"]) - - def remove_packaging_data(self): - self._invoke_dnf(["clean", "all"]) - for dir in self.packaging_data_dirs: - bb.utils.remove(oe.path.join(self.target_rootfs, dir), True) - - def backup_packaging_data(self): - # Save the packaging dirs for increment rpm image generation - if os.path.exists(self.saved_packaging_data): - bb.utils.remove(self.saved_packaging_data, True) - for i in self.packaging_data_dirs: - source_dir = oe.path.join(self.target_rootfs, i) - target_dir = oe.path.join(self.saved_packaging_data, i) - if os.path.isdir(source_dir): - shutil.copytree(source_dir, target_dir, symlinks=True) - elif os.path.isfile(source_dir): - shutil.copy2(source_dir, target_dir) - - def recovery_packaging_data(self): - # Move the rpmlib back - if os.path.exists(self.saved_packaging_data): - for i in self.packaging_data_dirs: - target_dir = oe.path.join(self.target_rootfs, i) - if os.path.exists(target_dir): - bb.utils.remove(target_dir, True) - source_dir = oe.path.join(self.saved_packaging_data, i) - if os.path.isdir(source_dir): - shutil.copytree(source_dir, target_dir, symlinks=True) - elif os.path.isfile(source_dir): - shutil.copy2(source_dir, target_dir) - - def list_installed(self): - output = self._invoke_dnf(["repoquery", "--installed", "--queryformat", "Package: %{name} %{arch} %{version} %{name}-%{version}-%{release}.%{arch}.rpm\nDependencies:\n%{requires}\nRecommendations:\n%{recommends}\nDependenciesEndHere:\n"], - print_output = False) - packages = {} - current_package = None - current_deps = None - current_state = "initial" - for line in output.splitlines(): - if line.startswith("Package:"): - package_info = line.split(" ")[1:] - current_package = package_info[0] - package_arch = package_info[1] - package_version = package_info[2] - package_rpm = package_info[3] - packages[current_package] = {"arch":package_arch, "ver":package_version, "filename":package_rpm} - current_deps = [] - elif line.startswith("Dependencies:"): - current_state = "dependencies" - elif line.startswith("Recommendations"): - current_state = "recommendations" - elif line.startswith("DependenciesEndHere:"): - current_state = "initial" - packages[current_package]["deps"] = current_deps - elif len(line) > 0: - if current_state == "dependencies": - current_deps.append(line) - elif current_state == "recommendations": - current_deps.append("%s [REC]" % line) - - return packages - - def update(self): - self._invoke_dnf(["makecache", "--refresh"]) - - def _invoke_dnf(self, dnf_args, fatal = True, print_output = True ): - os.environ['RPM_ETCCONFIGDIR'] = self.target_rootfs - - dnf_cmd = bb.utils.which(os.getenv('PATH'), "dnf") - standard_dnf_args = ["-v", "--rpmverbosity=info", "-y", - "-c", oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"), - "--setopt=reposdir=%s" %(oe.path.join(self.target_rootfs, "etc/yum.repos.d")), - "--installroot=%s" % (self.target_rootfs), - "--setopt=logdir=%s" % (self.d.getVar('T')) - ] - if hasattr(self, "rpm_repo_dir"): - standard_dnf_args.append("--repofrompath=oe-repo,%s" % (self.rpm_repo_dir)) - cmd = [dnf_cmd] + standard_dnf_args + dnf_args - bb.note('Running %s' % ' '.join(cmd)) - try: - output = subprocess.check_output(cmd,stderr=subprocess.STDOUT).decode("utf-8") - if print_output: - bb.debug(1, output) - return output - except subprocess.CalledProcessError as e: - if print_output: - (bb.note, bb.fatal)[fatal]("Could not invoke dnf. Command " - "'%s' returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8"))) - else: - (bb.note, bb.fatal)[fatal]("Could not invoke dnf. Command " - "'%s' returned %d:" % (' '.join(cmd), e.returncode)) - return e.output.decode("utf-8") - - def dump_install_solution(self, pkgs): - open(self.solution_manifest, 'w').write(" ".join(pkgs)) - return pkgs - - def load_old_install_solution(self): - if not os.path.exists(self.solution_manifest): - return [] - - return open(self.solution_manifest, 'r').read().split() - - def _script_num_prefix(self, path): - files = os.listdir(path) - numbers = set() - numbers.add(99) - for f in files: - numbers.add(int(f.split("-")[0])) - return max(numbers) + 1 - - def save_rpmpostinst(self, pkg): - bb.note("Saving postinstall script of %s" % (pkg)) - cmd = bb.utils.which(os.getenv('PATH'), "rpm") - args = ["-q", "--root=%s" % self.target_rootfs, "--queryformat", "%{postin}", pkg] - - try: - output = subprocess.check_output([cmd] + args,stderr=subprocess.STDOUT).decode("utf-8") - except subprocess.CalledProcessError as e: - bb.fatal("Could not invoke rpm. Command " - "'%s' returned %d:\n%s" % (' '.join([cmd] + args), e.returncode, e.output.decode("utf-8"))) - - # may need to prepend #!/bin/sh to output - - target_path = oe.path.join(self.target_rootfs, self.d.expand('${sysconfdir}/rpm-postinsts/')) - bb.utils.mkdirhier(target_path) - num = self._script_num_prefix(target_path) - saved_script_name = oe.path.join(target_path, "%d-%s" % (num, pkg)) - open(saved_script_name, 'w').write(output) - os.chmod(saved_script_name, 0o755) - - def _handle_intercept_failure(self, registered_pkgs): - rpm_postinsts_dir = self.target_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/') - bb.utils.mkdirhier(rpm_postinsts_dir) - - # Save the package postinstalls in /etc/rpm-postinsts - for pkg in registered_pkgs.split(): - self.save_rpmpostinst(pkg) - - def extract(self, pkg): - output = self._invoke_dnf(["repoquery", "--queryformat", "%{location}", pkg]) - pkg_name = output.splitlines()[-1] - if not pkg_name.endswith(".rpm"): - bb.fatal("dnf could not find package %s in repository: %s" %(pkg, output)) - pkg_path = oe.path.join(self.rpm_repo_dir, pkg_name) - - cpio_cmd = bb.utils.which(os.getenv("PATH"), "cpio") - rpm2cpio_cmd = bb.utils.which(os.getenv("PATH"), "rpm2cpio") - - if not os.path.isfile(pkg_path): - bb.fatal("Unable to extract package for '%s'." - "File %s doesn't exists" % (pkg, pkg_path)) - - tmp_dir = tempfile.mkdtemp() - current_dir = os.getcwd() - os.chdir(tmp_dir) - - try: - cmd = "%s %s | %s -idmv" % (rpm2cpio_cmd, pkg_path, cpio_cmd) - output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) - except subprocess.CalledProcessError as e: - bb.utils.remove(tmp_dir, recurse=True) - bb.fatal("Unable to extract %s package. Command '%s' " - "returned %d:\n%s" % (pkg_path, cmd, e.returncode, e.output.decode("utf-8"))) - except OSError as e: - bb.utils.remove(tmp_dir, recurse=True) - bb.fatal("Unable to extract %s package. Command '%s' " - "returned %d:\n%s at %s" % (pkg_path, cmd, e.errno, e.strerror, e.filename)) - - bb.note("Extracted %s to %s" % (pkg_path, tmp_dir)) - os.chdir(current_dir) - - return tmp_dir - - -class OpkgDpkgPM(PackageManager): - def __init__(self, d, target_rootfs): - """ - This is an abstract class. Do not instantiate this directly. - """ - super(OpkgDpkgPM, self).__init__(d, target_rootfs) - - def package_info(self, pkg, cmd): - """ - Returns a dictionary with the package info. - - This method extracts the common parts for Opkg and Dpkg - """ - - try: - output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8") - except subprocess.CalledProcessError as e: - bb.fatal("Unable to list available packages. Command '%s' " - "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8"))) - return opkg_query(output) - - def extract(self, pkg, pkg_info): - """ - Returns the path to a tmpdir where resides the contents of a package. - - Deleting the tmpdir is responsability of the caller. - - This method extracts the common parts for Opkg and Dpkg - """ - - ar_cmd = bb.utils.which(os.getenv("PATH"), "ar") - tar_cmd = bb.utils.which(os.getenv("PATH"), "tar") - pkg_path = pkg_info[pkg]["filepath"] - - if not os.path.isfile(pkg_path): - bb.fatal("Unable to extract package for '%s'." - "File %s doesn't exists" % (pkg, pkg_path)) - - tmp_dir = tempfile.mkdtemp() - current_dir = os.getcwd() - os.chdir(tmp_dir) - data_tar = 'data.tar.xz' - - try: - cmd = [ar_cmd, 'x', pkg_path] - output = subprocess.check_output(cmd, stderr=subprocess.STDOUT) - cmd = [tar_cmd, 'xf', data_tar] - output = subprocess.check_output(cmd, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - bb.utils.remove(tmp_dir, recurse=True) - bb.fatal("Unable to extract %s package. Command '%s' " - "returned %d:\n%s" % (pkg_path, ' '.join(cmd), e.returncode, e.output.decode("utf-8"))) - except OSError as e: - bb.utils.remove(tmp_dir, recurse=True) - bb.fatal("Unable to extract %s package. Command '%s' " - "returned %d:\n%s at %s" % (pkg_path, ' '.join(cmd), e.errno, e.strerror, e.filename)) - - bb.note("Extracted %s to %s" % (pkg_path, tmp_dir)) - bb.utils.remove(os.path.join(tmp_dir, "debian-binary")) - bb.utils.remove(os.path.join(tmp_dir, "control.tar.gz")) - os.chdir(current_dir) - - return tmp_dir - - def _handle_intercept_failure(self, registered_pkgs): - self.mark_packages("unpacked", registered_pkgs.split()) - -class OpkgPM(OpkgDpkgPM): - def __init__(self, d, target_rootfs, config_file, archs, task_name='target', ipk_repo_workdir="oe-rootfs-repo", filterbydependencies=True, prepare_index=True): - super(OpkgPM, self).__init__(d, target_rootfs) - - self.config_file = config_file - self.pkg_archs = archs - self.task_name = task_name - - self.deploy_dir = oe.path.join(self.d.getVar('WORKDIR'), ipk_repo_workdir) - self.deploy_lock_file = os.path.join(self.deploy_dir, "deploy.lock") - self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg") - self.opkg_args = "--volatile-cache -f %s -t %s -o %s " % (self.config_file, self.d.expand('${T}/ipktemp/') ,target_rootfs) - self.opkg_args += self.d.getVar("OPKG_ARGS") - - if prepare_index: - create_packages_dir(self.d, self.deploy_dir, d.getVar("DEPLOY_DIR_IPK"), "package_write_ipk", filterbydependencies) - - opkg_lib_dir = self.d.getVar('OPKGLIBDIR') - if opkg_lib_dir[0] == "/": - opkg_lib_dir = opkg_lib_dir[1:] - - self.opkg_dir = os.path.join(target_rootfs, opkg_lib_dir, "opkg") - - bb.utils.mkdirhier(self.opkg_dir) - - self.saved_opkg_dir = self.d.expand('${T}/saved/%s' % self.task_name) - if not os.path.exists(self.d.expand('${T}/saved')): - bb.utils.mkdirhier(self.d.expand('${T}/saved')) - - self.from_feeds = (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") == "1" - if self.from_feeds: - self._create_custom_config() - else: - self._create_config() - - self.indexer = OpkgIndexer(self.d, self.deploy_dir) - - def mark_packages(self, status_tag, packages=None): - """ - This function will change a package's status in /var/lib/opkg/status file. - If 'packages' is None then the new_status will be applied to all - packages - """ - status_file = os.path.join(self.opkg_dir, "status") - - with open(status_file, "r") as sf: - with open(status_file + ".tmp", "w+") as tmp_sf: - if packages is None: - tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)", - r"Package: \1\n\2Status: \3%s" % status_tag, - sf.read())) - else: - if type(packages).__name__ != "list": - raise TypeError("'packages' should be a list object") - - status = sf.read() - for pkg in packages: - status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg, - r"Package: %s\n\1Status: \2%s" % (pkg, status_tag), - status) - - tmp_sf.write(status) - - os.rename(status_file + ".tmp", status_file) - - def _create_custom_config(self): - bb.note("Building from feeds activated!") - - with open(self.config_file, "w+") as config_file: - priority = 1 - for arch in self.pkg_archs.split(): - config_file.write("arch %s %d\n" % (arch, priority)) - priority += 5 - - for line in (self.d.getVar('IPK_FEED_URIS') or "").split(): - feed_match = re.match(r"^[ \t]*(.*)##([^ \t]*)[ \t]*$", line) - - if feed_match is not None: - feed_name = feed_match.group(1) - feed_uri = feed_match.group(2) - - bb.note("Add %s feed with URL %s" % (feed_name, feed_uri)) - - config_file.write("src/gz %s %s\n" % (feed_name, feed_uri)) - - """ - Allow to use package deploy directory contents as quick devel-testing - feed. This creates individual feed configs for each arch subdir of those - specified as compatible for the current machine. - NOTE: Development-helper feature, NOT a full-fledged feed. - """ - if (self.d.getVar('FEED_DEPLOYDIR_BASE_URI') or "") != "": - for arch in self.pkg_archs.split(): - cfg_file_name = os.path.join(self.target_rootfs, - self.d.getVar("sysconfdir"), - "opkg", - "local-%s-feed.conf" % arch) - - with open(cfg_file_name, "w+") as cfg_file: - cfg_file.write("src/gz local-%s %s/%s" % - (arch, - self.d.getVar('FEED_DEPLOYDIR_BASE_URI'), - arch)) - - if self.d.getVar('OPKGLIBDIR') != '/var/lib': - # There is no command line option for this anymore, we need to add - # info_dir and status_file to config file, if OPKGLIBDIR doesn't have - # the default value of "/var/lib" as defined in opkg: - # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_LISTS_DIR VARDIR "/lib/opkg/lists" - # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR VARDIR "/lib/opkg/info" - # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE VARDIR "/lib/opkg/status" - cfg_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'info')) - cfg_file.write("option lists_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'lists')) - cfg_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'status')) - - - def _create_config(self): - with open(self.config_file, "w+") as config_file: - priority = 1 - for arch in self.pkg_archs.split(): - config_file.write("arch %s %d\n" % (arch, priority)) - priority += 5 - - config_file.write("src oe file:%s\n" % self.deploy_dir) - - for arch in self.pkg_archs.split(): - pkgs_dir = os.path.join(self.deploy_dir, arch) - if os.path.isdir(pkgs_dir): - config_file.write("src oe-%s file:%s\n" % - (arch, pkgs_dir)) - - if self.d.getVar('OPKGLIBDIR') != '/var/lib': - # There is no command line option for this anymore, we need to add - # info_dir and status_file to config file, if OPKGLIBDIR doesn't have - # the default value of "/var/lib" as defined in opkg: - # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_LISTS_DIR VARDIR "/lib/opkg/lists" - # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR VARDIR "/lib/opkg/info" - # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE VARDIR "/lib/opkg/status" - config_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'info')) - config_file.write("option lists_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'lists')) - config_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'status')) - - def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs): - if feed_uris == "": - return - - rootfs_config = os.path.join('%s/etc/opkg/base-feeds.conf' - % self.target_rootfs) - - feed_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split()) - archs = self.pkg_archs.split() if feed_archs is None else feed_archs.split() - - with open(rootfs_config, "w+") as config_file: - uri_iterator = 0 - for uri in feed_uris: - if archs: - for arch in archs: - if (feed_archs is None) and (not os.path.exists(oe.path.join(self.deploy_dir, arch))): - continue - bb.note('Adding opkg feed url-%s-%d (%s)' % - (arch, uri_iterator, uri)) - config_file.write("src/gz uri-%s-%d %s/%s\n" % - (arch, uri_iterator, uri, arch)) - else: - bb.note('Adding opkg feed url-%d (%s)' % - (uri_iterator, uri)) - config_file.write("src/gz uri-%d %s\n" % - (uri_iterator, uri)) - - uri_iterator += 1 - - def update(self): - self.deploy_dir_lock() - - cmd = "%s %s update" % (self.opkg_cmd, self.opkg_args) - - try: - subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - self.deploy_dir_unlock() - bb.fatal("Unable to update the package index files. Command '%s' " - "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8"))) - - self.deploy_dir_unlock() - - def install(self, pkgs, attempt_only=False): - if not pkgs: - return - - cmd = "%s %s" % (self.opkg_cmd, self.opkg_args) - for exclude in (self.d.getVar("PACKAGE_EXCLUDE") or "").split(): - cmd += " --add-exclude %s" % exclude - for bad_recommendation in (self.d.getVar("BAD_RECOMMENDATIONS") or "").split(): - cmd += " --add-ignore-recommends %s" % bad_recommendation - cmd += " install " - cmd += " ".join(pkgs) - - os.environ['D'] = self.target_rootfs - os.environ['OFFLINE_ROOT'] = self.target_rootfs - os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs - os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs - os.environ['INTERCEPT_DIR'] = self.intercepts_dir - os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE') - - try: - bb.note("Installing the following packages: %s" % ' '.join(pkgs)) - bb.note(cmd) - output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).decode("utf-8") - bb.note(output) - failed_pkgs = [] - for line in output.split('\n'): - if line.endswith("configuration required on target."): - bb.warn(line) - failed_pkgs.append(line.split(".")[0]) - if failed_pkgs: - failed_postinsts_abort(failed_pkgs, self.d.expand("${T}/log.do_${BB_CURRENTTASK}")) - except subprocess.CalledProcessError as e: - (bb.fatal, bb.warn)[attempt_only]("Unable to install packages. " - "Command '%s' returned %d:\n%s" % - (cmd, e.returncode, e.output.decode("utf-8"))) - - def remove(self, pkgs, with_dependencies=True): - if not pkgs: - return - - if with_dependencies: - cmd = "%s %s --force-remove --force-removal-of-dependent-packages remove %s" % \ - (self.opkg_cmd, self.opkg_args, ' '.join(pkgs)) - else: - cmd = "%s %s --force-depends remove %s" % \ - (self.opkg_cmd, self.opkg_args, ' '.join(pkgs)) - - try: - bb.note(cmd) - output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).decode("utf-8") - bb.note(output) - except subprocess.CalledProcessError as e: - bb.fatal("Unable to remove packages. Command '%s' " - "returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8"))) - - def write_index(self): - self.deploy_dir_lock() - - result = self.indexer.write_index() - - self.deploy_dir_unlock() - - if result is not None: - bb.fatal(result) - - def remove_packaging_data(self): - bb.utils.remove(self.opkg_dir, True) - # create the directory back, it's needed by PM lock - bb.utils.mkdirhier(self.opkg_dir) - - def remove_lists(self): - if not self.from_feeds: - bb.utils.remove(os.path.join(self.opkg_dir, "lists"), True) - - def list_installed(self): - return OpkgPkgsList(self.d, self.target_rootfs, self.config_file).list_pkgs() - - def dummy_install(self, pkgs): - """ - The following function dummy installs pkgs and returns the log of output. - """ - if len(pkgs) == 0: - return - - # Create an temp dir as opkg root for dummy installation - temp_rootfs = self.d.expand('${T}/opkg') - opkg_lib_dir = self.d.getVar('OPKGLIBDIR') - if opkg_lib_dir[0] == "/": - opkg_lib_dir = opkg_lib_dir[1:] - temp_opkg_dir = os.path.join(temp_rootfs, opkg_lib_dir, 'opkg') - bb.utils.mkdirhier(temp_opkg_dir) - - opkg_args = "-f %s -o %s " % (self.config_file, temp_rootfs) - opkg_args += self.d.getVar("OPKG_ARGS") - - cmd = "%s %s update" % (self.opkg_cmd, opkg_args) - try: - subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) - except subprocess.CalledProcessError as e: - bb.fatal("Unable to update. Command '%s' " - "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8"))) - - # Dummy installation - cmd = "%s %s --noaction install %s " % (self.opkg_cmd, - opkg_args, - ' '.join(pkgs)) - try: - output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) - except subprocess.CalledProcessError as e: - bb.fatal("Unable to dummy install packages. Command '%s' " - "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8"))) - - bb.utils.remove(temp_rootfs, True) - - return output - - def backup_packaging_data(self): - # Save the opkglib for increment ipk image generation - if os.path.exists(self.saved_opkg_dir): - bb.utils.remove(self.saved_opkg_dir, True) - shutil.copytree(self.opkg_dir, - self.saved_opkg_dir, - symlinks=True) - - def recover_packaging_data(self): - # Move the opkglib back - if os.path.exists(self.saved_opkg_dir): - if os.path.exists(self.opkg_dir): - bb.utils.remove(self.opkg_dir, True) - - bb.note('Recover packaging data') - shutil.copytree(self.saved_opkg_dir, - self.opkg_dir, - symlinks=True) - - def package_info(self, pkg): - """ - Returns a dictionary with the package info. - """ - cmd = "%s %s info %s" % (self.opkg_cmd, self.opkg_args, pkg) - pkg_info = super(OpkgPM, self).package_info(pkg, cmd) - - pkg_arch = pkg_info[pkg]["arch"] - pkg_filename = pkg_info[pkg]["filename"] - pkg_info[pkg]["filepath"] = \ - os.path.join(self.deploy_dir, pkg_arch, pkg_filename) - - return pkg_info - - def extract(self, pkg): - """ - Returns the path to a tmpdir where resides the contents of a package. - - Deleting the tmpdir is responsability of the caller. - """ - pkg_info = self.package_info(pkg) - if not pkg_info: - bb.fatal("Unable to get information for package '%s' while " - "trying to extract the package." % pkg) - - tmp_dir = super(OpkgPM, self).extract(pkg, pkg_info) - bb.utils.remove(os.path.join(tmp_dir, "data.tar.xz")) - - return tmp_dir - -class DpkgPM(OpkgDpkgPM): - def __init__(self, d, target_rootfs, archs, base_archs, apt_conf_dir=None, deb_repo_workdir="oe-rootfs-repo", filterbydependencies=True): - super(DpkgPM, self).__init__(d, target_rootfs) - self.deploy_dir = oe.path.join(self.d.getVar('WORKDIR'), deb_repo_workdir) - - create_packages_dir(self.d, self.deploy_dir, d.getVar("DEPLOY_DIR_DEB"), "package_write_deb", filterbydependencies) - - if apt_conf_dir is None: - self.apt_conf_dir = self.d.expand("${APTCONF_TARGET}/apt") - else: - self.apt_conf_dir = apt_conf_dir - self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf") - self.apt_get_cmd = bb.utils.which(os.getenv('PATH'), "apt-get") - self.apt_cache_cmd = bb.utils.which(os.getenv('PATH'), "apt-cache") - - self.apt_args = d.getVar("APT_ARGS") - - self.all_arch_list = archs.split() - all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or "").split() - self.all_arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in self.all_arch_list) - - self._create_configs(archs, base_archs) - - self.indexer = DpkgIndexer(self.d, self.deploy_dir) - - def mark_packages(self, status_tag, packages=None): - """ - This function will change a package's status in /var/lib/dpkg/status file. - If 'packages' is None then the new_status will be applied to all - packages - """ - status_file = self.target_rootfs + "/var/lib/dpkg/status" - - with open(status_file, "r") as sf: - with open(status_file + ".tmp", "w+") as tmp_sf: - if packages is None: - tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)", - r"Package: \1\n\2Status: \3%s" % status_tag, - sf.read())) - else: - if type(packages).__name__ != "list": - raise TypeError("'packages' should be a list object") - - status = sf.read() - for pkg in packages: - status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg, - r"Package: %s\n\1Status: \2%s" % (pkg, status_tag), - status) - - tmp_sf.write(status) - - os.rename(status_file + ".tmp", status_file) - - def run_pre_post_installs(self, package_name=None): - """ - Run the pre/post installs for package "package_name". If package_name is - None, then run all pre/post install scriptlets. - """ - info_dir = self.target_rootfs + "/var/lib/dpkg/info" - ControlScript = collections.namedtuple("ControlScript", ["suffix", "name", "argument"]) - control_scripts = [ - ControlScript(".preinst", "Preinstall", "install"), - ControlScript(".postinst", "Postinstall", "configure")] - status_file = self.target_rootfs + "/var/lib/dpkg/status" - installed_pkgs = [] - - with open(status_file, "r") as status: - for line in status.read().split('\n'): - m = re.match(r"^Package: (.*)", line) - if m is not None: - installed_pkgs.append(m.group(1)) - - if package_name is not None and not package_name in installed_pkgs: - return - - os.environ['D'] = self.target_rootfs - os.environ['OFFLINE_ROOT'] = self.target_rootfs - os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs - os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs - os.environ['INTERCEPT_DIR'] = self.intercepts_dir - os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE') - - for pkg_name in installed_pkgs: - for control_script in control_scripts: - p_full = os.path.join(info_dir, pkg_name + control_script.suffix) - if os.path.exists(p_full): - try: - bb.note("Executing %s for package: %s ..." % - (control_script.name.lower(), pkg_name)) - output = subprocess.check_output([p_full, control_script.argument], - stderr=subprocess.STDOUT).decode("utf-8") - bb.note(output) - except subprocess.CalledProcessError as e: - bb.warn("%s for package %s failed with %d:\n%s" % - (control_script.name, pkg_name, e.returncode, - e.output.decode("utf-8"))) - failed_postinsts_abort([pkg_name], self.d.expand("${T}/log.do_${BB_CURRENTTASK}")) - - def update(self): - os.environ['APT_CONFIG'] = self.apt_conf_file - - self.deploy_dir_lock() - - cmd = "%s update" % self.apt_get_cmd - - try: - subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - bb.fatal("Unable to update the package index files. Command '%s' " - "returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8"))) - - self.deploy_dir_unlock() - - def install(self, pkgs, attempt_only=False): - if attempt_only and len(pkgs) == 0: - return - - os.environ['APT_CONFIG'] = self.apt_conf_file - - cmd = "%s %s install --force-yes --allow-unauthenticated %s" % \ - (self.apt_get_cmd, self.apt_args, ' '.join(pkgs)) - - try: - bb.note("Installing the following packages: %s" % ' '.join(pkgs)) - subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - (bb.fatal, bb.warn)[attempt_only]("Unable to install packages. " - "Command '%s' returned %d:\n%s" % - (cmd, e.returncode, e.output.decode("utf-8"))) - - # rename *.dpkg-new files/dirs - for root, dirs, files in os.walk(self.target_rootfs): - for dir in dirs: - new_dir = re.sub(r"\.dpkg-new", "", dir) - if dir != new_dir: - os.rename(os.path.join(root, dir), - os.path.join(root, new_dir)) - - for file in files: - new_file = re.sub(r"\.dpkg-new", "", file) - if file != new_file: - os.rename(os.path.join(root, file), - os.path.join(root, new_file)) - - - def remove(self, pkgs, with_dependencies=True): - if not pkgs: - return - - if with_dependencies: - os.environ['APT_CONFIG'] = self.apt_conf_file - cmd = "%s purge %s" % (self.apt_get_cmd, ' '.join(pkgs)) - else: - cmd = "%s --admindir=%s/var/lib/dpkg --instdir=%s" \ - " -P --force-depends %s" % \ - (bb.utils.which(os.getenv('PATH'), "dpkg"), - self.target_rootfs, self.target_rootfs, ' '.join(pkgs)) - - try: - subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - bb.fatal("Unable to remove packages. Command '%s' " - "returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8"))) - - def write_index(self): - self.deploy_dir_lock() - - result = self.indexer.write_index() - - self.deploy_dir_unlock() - - if result is not None: - bb.fatal(result) - - def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs): - if feed_uris == "": - return - - sources_conf = os.path.join("%s/etc/apt/sources.list" - % self.target_rootfs) - arch_list = [] - - if feed_archs is None: - for arch in self.all_arch_list: - if not os.path.exists(os.path.join(self.deploy_dir, arch)): - continue - arch_list.append(arch) - else: - arch_list = feed_archs.split() - - feed_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split()) - - with open(sources_conf, "w+") as sources_file: - for uri in feed_uris: - if arch_list: - for arch in arch_list: - bb.note('Adding dpkg channel at (%s)' % uri) - sources_file.write("deb %s/%s ./\n" % - (uri, arch)) - else: - bb.note('Adding dpkg channel at (%s)' % uri) - sources_file.write("deb %s ./\n" % uri) - - def _create_configs(self, archs, base_archs): - base_archs = re.sub(r"_", r"-", base_archs) - - if os.path.exists(self.apt_conf_dir): - bb.utils.remove(self.apt_conf_dir, True) - - bb.utils.mkdirhier(self.apt_conf_dir) - bb.utils.mkdirhier(self.apt_conf_dir + "/lists/partial/") - bb.utils.mkdirhier(self.apt_conf_dir + "/apt.conf.d/") - bb.utils.mkdirhier(self.apt_conf_dir + "/preferences.d/") - - arch_list = [] - for arch in self.all_arch_list: - if not os.path.exists(os.path.join(self.deploy_dir, arch)): - continue - arch_list.append(arch) - - with open(os.path.join(self.apt_conf_dir, "preferences"), "w+") as prefs_file: - priority = 801 - for arch in arch_list: - prefs_file.write( - "Package: *\n" - "Pin: release l=%s\n" - "Pin-Priority: %d\n\n" % (arch, priority)) - - priority += 5 - - pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE') or "" - for pkg in pkg_exclude.split(): - prefs_file.write( - "Package: %s\n" - "Pin: release *\n" - "Pin-Priority: -1\n\n" % pkg) - - arch_list.reverse() - - with open(os.path.join(self.apt_conf_dir, "sources.list"), "w+") as sources_file: - for arch in arch_list: - sources_file.write("deb file:%s/ ./\n" % - os.path.join(self.deploy_dir, arch)) - - base_arch_list = base_archs.split() - multilib_variants = self.d.getVar("MULTILIB_VARIANTS"); - for variant in multilib_variants.split(): - localdata = bb.data.createCopy(self.d) - variant_tune = localdata.getVar("DEFAULTTUNE_virtclass-multilib-" + variant, False) - orig_arch = localdata.getVar("DPKG_ARCH") - localdata.setVar("DEFAULTTUNE", variant_tune) - variant_arch = localdata.getVar("DPKG_ARCH") - if variant_arch not in base_arch_list: - base_arch_list.append(variant_arch) - - with open(self.apt_conf_file, "w+") as apt_conf: - with open(self.d.expand("${STAGING_ETCDIR_NATIVE}/apt/apt.conf.sample")) as apt_conf_sample: - for line in apt_conf_sample.read().split("\n"): - match_arch = re.match(r" Architecture \".*\";$", line) - architectures = "" - if match_arch: - for base_arch in base_arch_list: - architectures += "\"%s\";" % base_arch - apt_conf.write(" Architectures {%s};\n" % architectures); - apt_conf.write(" Architecture \"%s\";\n" % base_archs) - else: - line = re.sub(r"#ROOTFS#", self.target_rootfs, line) - line = re.sub(r"#APTCONF#", self.apt_conf_dir, line) - apt_conf.write(line + "\n") - - target_dpkg_dir = "%s/var/lib/dpkg" % self.target_rootfs - bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "info")) - - bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "updates")) - - if not os.path.exists(os.path.join(target_dpkg_dir, "status")): - open(os.path.join(target_dpkg_dir, "status"), "w+").close() - if not os.path.exists(os.path.join(target_dpkg_dir, "available")): - open(os.path.join(target_dpkg_dir, "available"), "w+").close() - - def remove_packaging_data(self): - bb.utils.remove(os.path.join(self.target_rootfs, - self.d.getVar('opkglibdir')), True) - bb.utils.remove(self.target_rootfs + "/var/lib/dpkg/", True) - - def fix_broken_dependencies(self): - os.environ['APT_CONFIG'] = self.apt_conf_file - - cmd = "%s %s -f install" % (self.apt_get_cmd, self.apt_args) - - try: - subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - bb.fatal("Cannot fix broken dependencies. Command '%s' " - "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8"))) - - def list_installed(self): - return DpkgPkgsList(self.d, self.target_rootfs).list_pkgs() - - def package_info(self, pkg): - """ - Returns a dictionary with the package info. - """ - cmd = "%s show %s" % (self.apt_cache_cmd, pkg) - pkg_info = super(DpkgPM, self).package_info(pkg, cmd) - - pkg_arch = pkg_info[pkg]["pkgarch"] - pkg_filename = pkg_info[pkg]["filename"] - pkg_info[pkg]["filepath"] = \ - os.path.join(self.deploy_dir, pkg_arch, pkg_filename) - - return pkg_info - - def extract(self, pkg): - """ - Returns the path to a tmpdir where resides the contents of a package. - - Deleting the tmpdir is responsability of the caller. - """ - pkg_info = self.package_info(pkg) - if not pkg_info: - bb.fatal("Unable to get information for package '%s' while " - "trying to extract the package." % pkg) - - tmp_dir = super(DpkgPM, self).extract(pkg, pkg_info) - bb.utils.remove(os.path.join(tmp_dir, "data.tar.xz")) - - return tmp_dir - -def generate_index_files(d): - classes = d.getVar('PACKAGE_CLASSES').replace("package_", "").split() - - indexer_map = { - "rpm": (RpmSubdirIndexer, d.getVar('DEPLOY_DIR_RPM')), - "ipk": (OpkgIndexer, d.getVar('DEPLOY_DIR_IPK')), - "deb": (DpkgIndexer, d.getVar('DEPLOY_DIR_DEB')) - } - - result = None - - for pkg_class in classes: - if not pkg_class in indexer_map: - continue - - if os.path.exists(indexer_map[pkg_class][1]): - result = indexer_map[pkg_class][0](d, indexer_map[pkg_class][1]).write_index() - - if result is not None: - bb.fatal(result) diff --git a/meta/lib/oe/package_manager/__init__.py b/meta/lib/oe/package_manager/__init__.py new file mode 100644 index 0000000000..80bc1a6bc6 --- /dev/null +++ b/meta/lib/oe/package_manager/__init__.py @@ -0,0 +1,556 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + +from abc import ABCMeta, abstractmethod +import os +import glob +import subprocess +import shutil +import re +import collections +import bb +import tempfile +import oe.utils +import oe.path +import string +from oe.gpg_sign import get_signer +import hashlib +import fnmatch + +# this can be used by all PM backends to create the index files in parallel +def create_index(arg): + index_cmd = arg + + bb.note("Executing '%s' ..." % index_cmd) + result = subprocess.check_output(index_cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8") + if result: + bb.note(result) + +def opkg_query(cmd_output): + """ + This method parse the output from the package managerand return + a dictionary with the information of the packages. This is used + when the packages are in deb or ipk format. + """ + verregex = re.compile(r' \([=<>]* [^ )]*\)') + output = dict() + pkg = "" + arch = "" + ver = "" + filename = "" + dep = [] + prov = [] + pkgarch = "" + for line in cmd_output.splitlines()+['']: + line = line.rstrip() + if ':' in line: + if line.startswith("Package: "): + pkg = line.split(": ")[1] + elif line.startswith("Architecture: "): + arch = line.split(": ")[1] + elif line.startswith("Version: "): + ver = line.split(": ")[1] + elif line.startswith("File: ") or line.startswith("Filename:"): + filename = line.split(": ")[1] + if "/" in filename: + filename = os.path.basename(filename) + elif line.startswith("Depends: "): + depends = verregex.sub('', line.split(": ")[1]) + for depend in depends.split(", "): + dep.append(depend) + elif line.startswith("Recommends: "): + recommends = verregex.sub('', line.split(": ")[1]) + for recommend in recommends.split(", "): + dep.append("%s [REC]" % recommend) + elif line.startswith("PackageArch: "): + pkgarch = line.split(": ")[1] + elif line.startswith("Provides: "): + provides = verregex.sub('', line.split(": ")[1]) + for provide in provides.split(", "): + prov.append(provide) + + # When there is a blank line save the package information + elif not line: + # IPK doesn't include the filename + if not filename: + filename = "%s_%s_%s.ipk" % (pkg, ver, arch) + if pkg: + output[pkg] = {"arch":arch, "ver":ver, + "filename":filename, "deps": dep, "pkgarch":pkgarch, "provs": prov} + pkg = "" + arch = "" + ver = "" + filename = "" + dep = [] + prov = [] + pkgarch = "" + + return output + +def failed_postinsts_abort(pkgs, log_path): + bb.fatal("""Postinstall scriptlets of %s have failed. If the intention is to defer them to first boot, +then please place them into pkg_postinst_ontarget:${PN} (). +Deferring to first boot via 'exit 1' is no longer supported. +Details of the failure are in %s.""" %(pkgs, log_path)) + +def generate_locale_archive(d, rootfs, target_arch, localedir): + # Pretty sure we don't need this for locale archive generation but + # keeping it to be safe... + locale_arch_options = { \ + "arc": ["--uint32-align=4", "--little-endian"], + "arceb": ["--uint32-align=4", "--big-endian"], + "arm": ["--uint32-align=4", "--little-endian"], + "armeb": ["--uint32-align=4", "--big-endian"], + "aarch64": ["--uint32-align=4", "--little-endian"], + "aarch64_be": ["--uint32-align=4", "--big-endian"], + "sh4": ["--uint32-align=4", "--big-endian"], + "powerpc": ["--uint32-align=4", "--big-endian"], + "powerpc64": ["--uint32-align=4", "--big-endian"], + "powerpc64le": ["--uint32-align=4", "--little-endian"], + "mips": ["--uint32-align=4", "--big-endian"], + "mipsisa32r6": ["--uint32-align=4", "--big-endian"], + "mips64": ["--uint32-align=4", "--big-endian"], + "mipsisa64r6": ["--uint32-align=4", "--big-endian"], + "mipsel": ["--uint32-align=4", "--little-endian"], + "mipsisa32r6el": ["--uint32-align=4", "--little-endian"], + "mips64el": ["--uint32-align=4", "--little-endian"], + "mipsisa64r6el": ["--uint32-align=4", "--little-endian"], + "riscv64": ["--uint32-align=4", "--little-endian"], + "riscv32": ["--uint32-align=4", "--little-endian"], + "i586": ["--uint32-align=4", "--little-endian"], + "i686": ["--uint32-align=4", "--little-endian"], + "x86_64": ["--uint32-align=4", "--little-endian"] + } + if target_arch in locale_arch_options: + arch_options = locale_arch_options[target_arch] + else: + bb.error("locale_arch_options not found for target_arch=" + target_arch) + bb.fatal("unknown arch:" + target_arch + " for locale_arch_options") + + # Need to set this so cross-localedef knows where the archive is + env = dict(os.environ) + env["LOCALEARCHIVE"] = oe.path.join(localedir, "locale-archive") + + for name in sorted(os.listdir(localedir)): + path = os.path.join(localedir, name) + if os.path.isdir(path): + cmd = ["cross-localedef", "--verbose"] + cmd += arch_options + cmd += ["--add-to-archive", path] + subprocess.check_output(cmd, env=env, stderr=subprocess.STDOUT) + +class Indexer(object, metaclass=ABCMeta): + def __init__(self, d, deploy_dir): + self.d = d + self.deploy_dir = deploy_dir + + @abstractmethod + def write_index(self): + pass + +class PkgsList(object, metaclass=ABCMeta): + def __init__(self, d, rootfs_dir): + self.d = d + self.rootfs_dir = rootfs_dir + + @abstractmethod + def list_pkgs(self): + pass + +class PackageManager(object, metaclass=ABCMeta): + """ + This is an abstract class. Do not instantiate this directly. + """ + + def __init__(self, d, target_rootfs): + self.d = d + self.target_rootfs = target_rootfs + self.deploy_dir = None + self.deploy_lock = None + self._initialize_intercepts() + + def _initialize_intercepts(self): + bb.note("Initializing intercept dir for %s" % self.target_rootfs) + # As there might be more than one instance of PackageManager operating at the same time + # we need to isolate the intercept_scripts directories from each other, + # hence the ugly hash digest in dir name. + self.intercepts_dir = os.path.join(self.d.getVar('WORKDIR'), "intercept_scripts-%s" % + (hashlib.sha256(self.target_rootfs.encode()).hexdigest())) + + postinst_intercepts = (self.d.getVar("POSTINST_INTERCEPTS") or "").split() + if not postinst_intercepts: + postinst_intercepts_path = self.d.getVar("POSTINST_INTERCEPTS_PATH") + if not postinst_intercepts_path: + postinst_intercepts_path = self.d.getVar("POSTINST_INTERCEPTS_DIR") or self.d.expand("${COREBASE}/scripts/postinst-intercepts") + postinst_intercepts = oe.path.which_wild('*', postinst_intercepts_path) + + bb.debug(1, 'Collected intercepts:\n%s' % ''.join(' %s\n' % i for i in postinst_intercepts)) + bb.utils.remove(self.intercepts_dir, True) + bb.utils.mkdirhier(self.intercepts_dir) + for intercept in postinst_intercepts: + shutil.copy(intercept, os.path.join(self.intercepts_dir, os.path.basename(intercept))) + + @abstractmethod + def _handle_intercept_failure(self, failed_script): + pass + + def _postpone_to_first_boot(self, postinst_intercept_hook): + with open(postinst_intercept_hook) as intercept: + registered_pkgs = None + for line in intercept.read().split("\n"): + m = re.match(r"^##PKGS:(.*)", line) + if m is not None: + registered_pkgs = m.group(1).strip() + break + + if registered_pkgs is not None: + bb.note("If an image is being built, the postinstalls for the following packages " + "will be postponed for first boot: %s" % + registered_pkgs) + + # call the backend dependent handler + self._handle_intercept_failure(registered_pkgs) + + + def run_intercepts(self, populate_sdk=None): + intercepts_dir = self.intercepts_dir + + bb.note("Running intercept scripts:") + os.environ['D'] = self.target_rootfs + os.environ['STAGING_DIR_NATIVE'] = self.d.getVar('STAGING_DIR_NATIVE') + for script in os.listdir(intercepts_dir): + script_full = os.path.join(intercepts_dir, script) + + if script == "postinst_intercept" or not os.access(script_full, os.X_OK): + continue + + # we do not want to run any multilib variant of this + if script.startswith("delay_to_first_boot"): + self._postpone_to_first_boot(script_full) + continue + + if populate_sdk == 'host' and self.d.getVar('SDK_OS') == 'mingw32': + bb.note("The postinstall intercept hook '%s' could not be executed due to missing wine support, details in %s/log.do_%s" + % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK'))) + continue + + bb.note("> Executing %s intercept ..." % script) + + try: + output = subprocess.check_output(script_full, stderr=subprocess.STDOUT) + if output: bb.note(output.decode("utf-8")) + except subprocess.CalledProcessError as e: + bb.note("Exit code %d. Output:\n%s" % (e.returncode, e.output.decode("utf-8"))) + if populate_sdk == 'host': + bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK'))) + elif populate_sdk == 'target': + if "qemuwrapper: qemu usermode is not supported" in e.output.decode("utf-8"): + bb.note("The postinstall intercept hook '%s' could not be executed due to missing qemu usermode support, details in %s/log.do_%s" + % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK'))) + else: + bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK'))) + else: + if "qemuwrapper: qemu usermode is not supported" in e.output.decode("utf-8"): + bb.note("The postinstall intercept hook '%s' could not be executed due to missing qemu usermode support, details in %s/log.do_%s" + % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK'))) + self._postpone_to_first_boot(script_full) + else: + bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK'))) + + @abstractmethod + def update(self): + """ + Update the package manager package database. + """ + pass + + @abstractmethod + def install(self, pkgs, attempt_only=False): + """ + Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is + True, installation failures are ignored. + """ + pass + + @abstractmethod + def remove(self, pkgs, with_dependencies=True): + """ + Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies' + is False, then any dependencies are left in place. + """ + pass + + @abstractmethod + def write_index(self): + """ + This function creates the index files + """ + pass + + @abstractmethod + def remove_packaging_data(self): + pass + + @abstractmethod + def list_installed(self): + pass + + @abstractmethod + def extract(self, pkg): + """ + Returns the path to a tmpdir where resides the contents of a package. + Deleting the tmpdir is responsability of the caller. + """ + pass + + @abstractmethod + def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs): + """ + Add remote package feeds into repository manager configuration. The parameters + for the feeds are set by feed_uris, feed_base_paths and feed_archs. + See http://www.yoctoproject.org/docs/current/ref-manual/ref-manual.html#var-PACKAGE_FEED_URIS + for their description. + """ + pass + + def install_glob(self, globs, sdk=False): + """ + Install all packages that match a glob. + """ + # TODO don't have sdk here but have a property on the superclass + # (and respect in install_complementary) + if sdk: + pkgdatadir = self.d.getVar("PKGDATA_DIR_SDK") + else: + pkgdatadir = self.d.getVar("PKGDATA_DIR") + + try: + bb.note("Installing globbed packages...") + cmd = ["oe-pkgdata-util", "-p", pkgdatadir, "list-pkgs", globs] + bb.note('Running %s' % cmd) + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = proc.communicate() + if stderr: bb.note(stderr.decode("utf-8")) + pkgs = stdout.decode("utf-8") + self.install(pkgs.split(), attempt_only=True) + except subprocess.CalledProcessError as e: + # Return code 1 means no packages matched + if e.returncode != 1: + bb.fatal("Could not compute globbed packages list. Command " + "'%s' returned %d:\n%s" % + (' '.join(cmd), e.returncode, e.output.decode("utf-8"))) + + def install_complementary(self, globs=None): + """ + Install complementary packages based upon the list of currently installed + packages e.g. locales, *-dev, *-dbg, etc. Note: every backend needs to + call this function explicitly after the normal package installation. + """ + if globs is None: + globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY') + split_linguas = set() + + for translation in self.d.getVar('IMAGE_LINGUAS').split(): + split_linguas.add(translation) + split_linguas.add(translation.split('-')[0]) + + split_linguas = sorted(split_linguas) + + for lang in split_linguas: + globs += " *-locale-%s" % lang + for complementary_linguas in (self.d.getVar('IMAGE_LINGUAS_COMPLEMENTARY') or "").split(): + globs += (" " + complementary_linguas) % lang + + if globs is None: + return + + # we need to write the list of installed packages to a file because the + # oe-pkgdata-util reads it from a file + with tempfile.NamedTemporaryFile(mode="w+", prefix="installed-pkgs") as installed_pkgs: + pkgs = self.list_installed() + + provided_pkgs = set() + for pkg in pkgs.values(): + provided_pkgs |= set(pkg.get('provs', [])) + + output = oe.utils.format_pkg_list(pkgs, "arch") + installed_pkgs.write(output) + installed_pkgs.flush() + + cmd = ["oe-pkgdata-util", + "-p", self.d.getVar('PKGDATA_DIR'), "glob", installed_pkgs.name, + globs] + exclude = self.d.getVar('PACKAGE_EXCLUDE_COMPLEMENTARY') + if exclude: + cmd.extend(['--exclude=' + '|'.join(exclude.split())]) + try: + bb.note('Running %s' % cmd) + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = proc.communicate() + if stderr: bb.note(stderr.decode("utf-8")) + complementary_pkgs = stdout.decode("utf-8") + complementary_pkgs = set(complementary_pkgs.split()) + skip_pkgs = sorted(complementary_pkgs & provided_pkgs) + install_pkgs = sorted(complementary_pkgs - provided_pkgs) + bb.note("Installing complementary packages ... %s (skipped already provided packages %s)" % ( + ' '.join(install_pkgs), + ' '.join(skip_pkgs))) + self.install(install_pkgs) + except subprocess.CalledProcessError as e: + bb.fatal("Could not compute complementary packages list. Command " + "'%s' returned %d:\n%s" % + (' '.join(cmd), e.returncode, e.output.decode("utf-8"))) + + if self.d.getVar('IMAGE_LOCALES_ARCHIVE') == '1': + target_arch = self.d.getVar('TARGET_ARCH') + localedir = oe.path.join(self.target_rootfs, self.d.getVar("libdir"), "locale") + if os.path.exists(localedir) and os.listdir(localedir): + generate_locale_archive(self.d, self.target_rootfs, target_arch, localedir) + # And now delete the binary locales + self.remove(fnmatch.filter(self.list_installed(), "glibc-binary-localedata-*"), False) + + def deploy_dir_lock(self): + if self.deploy_dir is None: + raise RuntimeError("deploy_dir is not set!") + + lock_file_name = os.path.join(self.deploy_dir, "deploy.lock") + + self.deploy_lock = bb.utils.lockfile(lock_file_name) + + def deploy_dir_unlock(self): + if self.deploy_lock is None: + return + + bb.utils.unlockfile(self.deploy_lock) + + self.deploy_lock = None + + def construct_uris(self, uris, base_paths): + """ + Construct URIs based on the following pattern: uri/base_path where 'uri' + and 'base_path' correspond to each element of the corresponding array + argument leading to len(uris) x len(base_paths) elements on the returned + array + """ + def _append(arr1, arr2, sep='/'): + res = [] + narr1 = [a.rstrip(sep) for a in arr1] + narr2 = [a.rstrip(sep).lstrip(sep) for a in arr2] + for a1 in narr1: + if arr2: + for a2 in narr2: + res.append("%s%s%s" % (a1, sep, a2)) + else: + res.append(a1) + return res + return _append(uris, base_paths) + +def create_packages_dir(d, subrepo_dir, deploydir, taskname, filterbydependencies): + """ + Go through our do_package_write_X dependencies and hardlink the packages we depend + upon into the repo directory. This prevents us seeing other packages that may + have been built that we don't depend upon and also packages for architectures we don't + support. + """ + import errno + + taskdepdata = d.getVar("BB_TASKDEPDATA", False) + mytaskname = d.getVar("BB_RUNTASK") + pn = d.getVar("PN") + seendirs = set() + multilibs = {} + + bb.utils.remove(subrepo_dir, recurse=True) + bb.utils.mkdirhier(subrepo_dir) + + # Detect bitbake -b usage + nodeps = d.getVar("BB_LIMITEDDEPS") or False + if nodeps or not filterbydependencies: + oe.path.symlink(deploydir, subrepo_dir, True) + return + + start = None + for dep in taskdepdata: + data = taskdepdata[dep] + if data[1] == mytaskname and data[0] == pn: + start = dep + break + if start is None: + bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?") + pkgdeps = set() + start = [start] + seen = set(start) + # Support direct dependencies (do_rootfs -> do_package_write_X) + # or indirect dependencies within PN (do_populate_sdk_ext -> do_rootfs -> do_package_write_X) + while start: + next = [] + for dep2 in start: + for dep in taskdepdata[dep2][3]: + if taskdepdata[dep][0] != pn: + if "do_" + taskname in dep: + pkgdeps.add(dep) + elif dep not in seen: + next.append(dep) + seen.add(dep) + start = next + + for dep in pkgdeps: + c = taskdepdata[dep][0] + manifest, d2 = oe.sstatesig.find_sstate_manifest(c, taskdepdata[dep][2], taskname, d, multilibs) + if not manifest: + bb.fatal("No manifest generated from: %s in %s" % (c, taskdepdata[dep][2])) + if not os.path.exists(manifest): + continue + with open(manifest, "r") as f: + for l in f: + l = l.strip() + deploydir = os.path.normpath(deploydir) + if bb.data.inherits_class('packagefeed-stability', d): + dest = l.replace(deploydir + "-prediff", "") + else: + dest = l.replace(deploydir, "") + dest = subrepo_dir + dest + if l.endswith("/"): + if dest not in seendirs: + bb.utils.mkdirhier(dest) + seendirs.add(dest) + continue + # Try to hardlink the file, copy if that fails + destdir = os.path.dirname(dest) + if destdir not in seendirs: + bb.utils.mkdirhier(destdir) + seendirs.add(destdir) + try: + os.link(l, dest) + except OSError as err: + if err.errno == errno.EXDEV: + bb.utils.copyfile(l, dest) + else: + raise + + +def generate_index_files(d): + from oe.package_manager.rpm import RpmSubdirIndexer + from oe.package_manager.ipk import OpkgIndexer + from oe.package_manager.deb import DpkgIndexer + + classes = d.getVar('PACKAGE_CLASSES').replace("package_", "").split() + + indexer_map = { + "rpm": (RpmSubdirIndexer, d.getVar('DEPLOY_DIR_RPM')), + "ipk": (OpkgIndexer, d.getVar('DEPLOY_DIR_IPK')), + "deb": (DpkgIndexer, d.getVar('DEPLOY_DIR_DEB')) + } + + result = None + + for pkg_class in classes: + if not pkg_class in indexer_map: + continue + + if os.path.exists(indexer_map[pkg_class][1]): + result = indexer_map[pkg_class][0](d, indexer_map[pkg_class][1]).write_index() + + if result is not None: + bb.fatal(result) diff --git a/meta/lib/oe/package_manager/deb/__init__.py b/meta/lib/oe/package_manager/deb/__init__.py new file mode 100644 index 0000000000..9f112ae25b --- /dev/null +++ b/meta/lib/oe/package_manager/deb/__init__.py @@ -0,0 +1,503 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + +import re +import subprocess +from oe.package_manager import * + +class DpkgIndexer(Indexer): + def _create_configs(self): + bb.utils.mkdirhier(self.apt_conf_dir) + bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "lists", "partial")) + bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "apt.conf.d")) + bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "preferences.d")) + + with open(os.path.join(self.apt_conf_dir, "preferences"), + "w") as prefs_file: + pass + with open(os.path.join(self.apt_conf_dir, "sources.list"), + "w+") as sources_file: + pass + + with open(self.apt_conf_file, "w") as apt_conf: + with open(os.path.join(self.d.expand("${STAGING_ETCDIR_NATIVE}"), + "apt", "apt.conf.sample")) as apt_conf_sample: + for line in apt_conf_sample.read().split("\n"): + line = re.sub(r"#ROOTFS#", "/dev/null", line) + line = re.sub(r"#APTCONF#", self.apt_conf_dir, line) + apt_conf.write(line + "\n") + + def write_index(self): + self.apt_conf_dir = os.path.join(self.d.expand("${APTCONF_TARGET}"), + "apt-ftparchive") + self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf") + self._create_configs() + + os.environ['APT_CONFIG'] = self.apt_conf_file + + pkg_archs = self.d.getVar('PACKAGE_ARCHS') + if pkg_archs is not None: + arch_list = pkg_archs.split() + sdk_pkg_archs = self.d.getVar('SDK_PACKAGE_ARCHS') + if sdk_pkg_archs is not None: + for a in sdk_pkg_archs.split(): + if a not in pkg_archs: + arch_list.append(a) + + all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or "").split() + arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in arch_list) + + apt_ftparchive = bb.utils.which(os.getenv('PATH'), "apt-ftparchive") + gzip = bb.utils.which(os.getenv('PATH'), "gzip") + + index_cmds = [] + deb_dirs_found = False + for arch in arch_list: + arch_dir = os.path.join(self.deploy_dir, arch) + if not os.path.isdir(arch_dir): + continue + + cmd = "cd %s; PSEUDO_UNLOAD=1 %s packages . > Packages;" % (arch_dir, apt_ftparchive) + + cmd += "%s -fcn Packages > Packages.gz;" % gzip + + with open(os.path.join(arch_dir, "Release"), "w+") as release: + release.write("Label: %s\n" % arch) + + cmd += "PSEUDO_UNLOAD=1 %s release . >> Release" % apt_ftparchive + + index_cmds.append(cmd) + + deb_dirs_found = True + + if not deb_dirs_found: + bb.note("There are no packages in %s" % self.deploy_dir) + return + + oe.utils.multiprocess_launch(create_index, index_cmds, self.d) + if self.d.getVar('PACKAGE_FEED_SIGN') == '1': + raise NotImplementedError('Package feed signing not implementd for dpkg') + +class PMPkgsList(PkgsList): + + def list_pkgs(self): + cmd = [bb.utils.which(os.getenv('PATH'), "dpkg-query"), + "--admindir=%s/var/lib/dpkg" % self.rootfs_dir, + "-W"] + + cmd.append("-f=Package: ${Package}\nArchitecture: ${PackageArch}\nVersion: ${Version}\nFile: ${Package}_${Version}_${Architecture}.deb\nDepends: ${Depends}\nRecommends: ${Recommends}\nProvides: ${Provides}\n\n") + + try: + cmd_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip().decode("utf-8") + except subprocess.CalledProcessError as e: + bb.fatal("Cannot get the installed packages list. Command '%s' " + "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8"))) + + return opkg_query(cmd_output) + +class OpkgDpkgPM(PackageManager): + def __init__(self, d, target_rootfs): + """ + This is an abstract class. Do not instantiate this directly. + """ + super(OpkgDpkgPM, self).__init__(d, target_rootfs) + + def package_info(self, pkg, cmd): + """ + Returns a dictionary with the package info. + + This method extracts the common parts for Opkg and Dpkg + """ + + try: + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8") + except subprocess.CalledProcessError as e: + bb.fatal("Unable to list available packages. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8"))) + return opkg_query(output) + + def extract(self, pkg, pkg_info): + """ + Returns the path to a tmpdir where resides the contents of a package. + + Deleting the tmpdir is responsability of the caller. + + This method extracts the common parts for Opkg and Dpkg + """ + + ar_cmd = bb.utils.which(os.getenv("PATH"), "ar") + tar_cmd = bb.utils.which(os.getenv("PATH"), "tar") + pkg_path = pkg_info[pkg]["filepath"] + + if not os.path.isfile(pkg_path): + bb.fatal("Unable to extract package for '%s'." + "File %s doesn't exists" % (pkg, pkg_path)) + + tmp_dir = tempfile.mkdtemp() + current_dir = os.getcwd() + os.chdir(tmp_dir) + data_tar = 'data.tar.xz' + + try: + cmd = [ar_cmd, 'x', pkg_path] + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + cmd = [tar_cmd, 'xf', data_tar] + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + bb.utils.remove(tmp_dir, recurse=True) + bb.fatal("Unable to extract %s package. Command '%s' " + "returned %d:\n%s" % (pkg_path, ' '.join(cmd), e.returncode, e.output.decode("utf-8"))) + except OSError as e: + bb.utils.remove(tmp_dir, recurse=True) + bb.fatal("Unable to extract %s package. Command '%s' " + "returned %d:\n%s at %s" % (pkg_path, ' '.join(cmd), e.errno, e.strerror, e.filename)) + + bb.note("Extracted %s to %s" % (pkg_path, tmp_dir)) + bb.utils.remove(os.path.join(tmp_dir, "debian-binary")) + bb.utils.remove(os.path.join(tmp_dir, "control.tar.gz")) + os.chdir(current_dir) + + return tmp_dir + + def _handle_intercept_failure(self, registered_pkgs): + self.mark_packages("unpacked", registered_pkgs.split()) + +class DpkgPM(OpkgDpkgPM): + def __init__(self, d, target_rootfs, archs, base_archs, apt_conf_dir=None, deb_repo_workdir="oe-rootfs-repo", filterbydependencies=True): + super(DpkgPM, self).__init__(d, target_rootfs) + self.deploy_dir = oe.path.join(self.d.getVar('WORKDIR'), deb_repo_workdir) + + create_packages_dir(self.d, self.deploy_dir, d.getVar("DEPLOY_DIR_DEB"), "package_write_deb", filterbydependencies) + + if apt_conf_dir is None: + self.apt_conf_dir = self.d.expand("${APTCONF_TARGET}/apt") + else: + self.apt_conf_dir = apt_conf_dir + self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf") + self.apt_get_cmd = bb.utils.which(os.getenv('PATH'), "apt-get") + self.apt_cache_cmd = bb.utils.which(os.getenv('PATH'), "apt-cache") + + self.apt_args = d.getVar("APT_ARGS") + + self.all_arch_list = archs.split() + all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or "").split() + self.all_arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in self.all_arch_list) + + self._create_configs(archs, base_archs) + + self.indexer = DpkgIndexer(self.d, self.deploy_dir) + + def mark_packages(self, status_tag, packages=None): + """ + This function will change a package's status in /var/lib/dpkg/status file. + If 'packages' is None then the new_status will be applied to all + packages + """ + status_file = self.target_rootfs + "/var/lib/dpkg/status" + + with open(status_file, "r") as sf: + with open(status_file + ".tmp", "w+") as tmp_sf: + if packages is None: + tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)", + r"Package: \1\n\2Status: \3%s" % status_tag, + sf.read())) + else: + if type(packages).__name__ != "list": + raise TypeError("'packages' should be a list object") + + status = sf.read() + for pkg in packages: + status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg, + r"Package: %s\n\1Status: \2%s" % (pkg, status_tag), + status) + + tmp_sf.write(status) + + bb.utils.rename(status_file + ".tmp", status_file) + + def run_pre_post_installs(self, package_name=None): + """ + Run the pre/post installs for package "package_name". If package_name is + None, then run all pre/post install scriptlets. + """ + info_dir = self.target_rootfs + "/var/lib/dpkg/info" + ControlScript = collections.namedtuple("ControlScript", ["suffix", "name", "argument"]) + control_scripts = [ + ControlScript(".preinst", "Preinstall", "install"), + ControlScript(".postinst", "Postinstall", "configure")] + status_file = self.target_rootfs + "/var/lib/dpkg/status" + installed_pkgs = [] + + with open(status_file, "r") as status: + for line in status.read().split('\n'): + m = re.match(r"^Package: (.*)", line) + if m is not None: + installed_pkgs.append(m.group(1)) + + if package_name is not None and not package_name in installed_pkgs: + return + + os.environ['D'] = self.target_rootfs + os.environ['OFFLINE_ROOT'] = self.target_rootfs + os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs + os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs + os.environ['INTERCEPT_DIR'] = self.intercepts_dir + os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE') + + for pkg_name in installed_pkgs: + for control_script in control_scripts: + p_full = os.path.join(info_dir, pkg_name + control_script.suffix) + if os.path.exists(p_full): + try: + bb.note("Executing %s for package: %s ..." % + (control_script.name.lower(), pkg_name)) + output = subprocess.check_output([p_full, control_script.argument], + stderr=subprocess.STDOUT).decode("utf-8") + bb.note(output) + except subprocess.CalledProcessError as e: + bb.warn("%s for package %s failed with %d:\n%s" % + (control_script.name, pkg_name, e.returncode, + e.output.decode("utf-8"))) + failed_postinsts_abort([pkg_name], self.d.expand("${T}/log.do_${BB_CURRENTTASK}")) + + def update(self): + os.environ['APT_CONFIG'] = self.apt_conf_file + + self.deploy_dir_lock() + + cmd = "%s update" % self.apt_get_cmd + + try: + subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + bb.fatal("Unable to update the package index files. Command '%s' " + "returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8"))) + + self.deploy_dir_unlock() + + def install(self, pkgs, attempt_only=False): + if attempt_only and len(pkgs) == 0: + return + + os.environ['APT_CONFIG'] = self.apt_conf_file + + cmd = "%s %s install --allow-downgrades --allow-remove-essential --allow-change-held-packages --allow-unauthenticated --no-remove %s" % \ + (self.apt_get_cmd, self.apt_args, ' '.join(pkgs)) + + try: + bb.note("Installing the following packages: %s" % ' '.join(pkgs)) + output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) + bb.note(output.decode("utf-8")) + except subprocess.CalledProcessError as e: + (bb.fatal, bb.warn)[attempt_only]("Unable to install packages. " + "Command '%s' returned %d:\n%s" % + (cmd, e.returncode, e.output.decode("utf-8"))) + + # rename *.dpkg-new files/dirs + for root, dirs, files in os.walk(self.target_rootfs): + for dir in dirs: + new_dir = re.sub(r"\.dpkg-new", "", dir) + if dir != new_dir: + bb.utils.rename(os.path.join(root, dir), + os.path.join(root, new_dir)) + + for file in files: + new_file = re.sub(r"\.dpkg-new", "", file) + if file != new_file: + bb.utils.rename(os.path.join(root, file), + os.path.join(root, new_file)) + + + def remove(self, pkgs, with_dependencies=True): + if not pkgs: + return + + os.environ['D'] = self.target_rootfs + os.environ['OFFLINE_ROOT'] = self.target_rootfs + os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs + os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs + os.environ['INTERCEPT_DIR'] = self.intercepts_dir + + if with_dependencies: + os.environ['APT_CONFIG'] = self.apt_conf_file + cmd = "%s purge %s" % (self.apt_get_cmd, ' '.join(pkgs)) + else: + cmd = "%s --admindir=%s/var/lib/dpkg --instdir=%s" \ + " -P --force-depends %s" % \ + (bb.utils.which(os.getenv('PATH'), "dpkg"), + self.target_rootfs, self.target_rootfs, ' '.join(pkgs)) + + try: + subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + bb.fatal("Unable to remove packages. Command '%s' " + "returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8"))) + + def write_index(self): + self.deploy_dir_lock() + + result = self.indexer.write_index() + + self.deploy_dir_unlock() + + if result is not None: + bb.fatal(result) + + def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs): + if feed_uris == "": + return + + + sources_conf = os.path.join("%s/etc/apt/sources.list" + % self.target_rootfs) + if not os.path.exists(os.path.dirname(sources_conf)): + return + + arch_list = [] + + if feed_archs is None: + for arch in self.all_arch_list: + if not os.path.exists(os.path.join(self.deploy_dir, arch)): + continue + arch_list.append(arch) + else: + arch_list = feed_archs.split() + + feed_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split()) + + with open(sources_conf, "w+") as sources_file: + for uri in feed_uris: + if arch_list: + for arch in arch_list: + bb.note('Adding dpkg channel at (%s)' % uri) + sources_file.write("deb [trusted=yes] %s/%s ./\n" % + (uri, arch)) + else: + bb.note('Adding dpkg channel at (%s)' % uri) + sources_file.write("deb [trusted=yes] %s ./\n" % uri) + + def _create_configs(self, archs, base_archs): + base_archs = re.sub(r"_", r"-", base_archs) + + if os.path.exists(self.apt_conf_dir): + bb.utils.remove(self.apt_conf_dir, True) + + bb.utils.mkdirhier(self.apt_conf_dir) + bb.utils.mkdirhier(self.apt_conf_dir + "/lists/partial/") + bb.utils.mkdirhier(self.apt_conf_dir + "/apt.conf.d/") + bb.utils.mkdirhier(self.apt_conf_dir + "/preferences.d/") + + arch_list = [] + for arch in self.all_arch_list: + if not os.path.exists(os.path.join(self.deploy_dir, arch)): + continue + arch_list.append(arch) + + with open(os.path.join(self.apt_conf_dir, "preferences"), "w+") as prefs_file: + priority = 801 + for arch in arch_list: + prefs_file.write( + "Package: *\n" + "Pin: release l=%s\n" + "Pin-Priority: %d\n\n" % (arch, priority)) + + priority += 5 + + pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE') or "" + for pkg in pkg_exclude.split(): + prefs_file.write( + "Package: %s\n" + "Pin: release *\n" + "Pin-Priority: -1\n\n" % pkg) + + arch_list.reverse() + + with open(os.path.join(self.apt_conf_dir, "sources.list"), "w+") as sources_file: + for arch in arch_list: + sources_file.write("deb [trusted=yes] file:%s/ ./\n" % + os.path.join(self.deploy_dir, arch)) + + base_arch_list = base_archs.split() + multilib_variants = self.d.getVar("MULTILIB_VARIANTS"); + for variant in multilib_variants.split(): + localdata = bb.data.createCopy(self.d) + variant_tune = localdata.getVar("DEFAULTTUNE:virtclass-multilib-" + variant, False) + orig_arch = localdata.getVar("DPKG_ARCH") + localdata.setVar("DEFAULTTUNE", variant_tune) + variant_arch = localdata.getVar("DPKG_ARCH") + if variant_arch not in base_arch_list: + base_arch_list.append(variant_arch) + + with open(self.apt_conf_file, "w+") as apt_conf: + with open(self.d.expand("${STAGING_ETCDIR_NATIVE}/apt/apt.conf.sample")) as apt_conf_sample: + for line in apt_conf_sample.read().split("\n"): + match_arch = re.match(r" Architecture \".*\";$", line) + architectures = "" + if match_arch: + for base_arch in base_arch_list: + architectures += "\"%s\";" % base_arch + apt_conf.write(" Architectures {%s};\n" % architectures); + apt_conf.write(" Architecture \"%s\";\n" % base_archs) + else: + line = re.sub(r"#ROOTFS#", self.target_rootfs, line) + line = re.sub(r"#APTCONF#", self.apt_conf_dir, line) + apt_conf.write(line + "\n") + + target_dpkg_dir = "%s/var/lib/dpkg" % self.target_rootfs + bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "info")) + + bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "updates")) + + if not os.path.exists(os.path.join(target_dpkg_dir, "status")): + open(os.path.join(target_dpkg_dir, "status"), "w+").close() + if not os.path.exists(os.path.join(target_dpkg_dir, "available")): + open(os.path.join(target_dpkg_dir, "available"), "w+").close() + + def remove_packaging_data(self): + bb.utils.remove(self.target_rootfs + self.d.getVar('opkglibdir'), True) + bb.utils.remove(self.target_rootfs + "/var/lib/dpkg/", True) + + def fix_broken_dependencies(self): + os.environ['APT_CONFIG'] = self.apt_conf_file + + cmd = "%s %s --allow-unauthenticated -f install" % (self.apt_get_cmd, self.apt_args) + + try: + subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + bb.fatal("Cannot fix broken dependencies. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8"))) + + def list_installed(self): + return PMPkgsList(self.d, self.target_rootfs).list_pkgs() + + def package_info(self, pkg): + """ + Returns a dictionary with the package info. + """ + cmd = "%s show %s" % (self.apt_cache_cmd, pkg) + pkg_info = super(DpkgPM, self).package_info(pkg, cmd) + + pkg_arch = pkg_info[pkg]["pkgarch"] + pkg_filename = pkg_info[pkg]["filename"] + pkg_info[pkg]["filepath"] = \ + os.path.join(self.deploy_dir, pkg_arch, pkg_filename) + + return pkg_info + + def extract(self, pkg): + """ + Returns the path to a tmpdir where resides the contents of a package. + + Deleting the tmpdir is responsability of the caller. + """ + pkg_info = self.package_info(pkg) + if not pkg_info: + bb.fatal("Unable to get information for package '%s' while " + "trying to extract the package." % pkg) + + tmp_dir = super(DpkgPM, self).extract(pkg, pkg_info) + bb.utils.remove(os.path.join(tmp_dir, "data.tar.xz")) + + return tmp_dir diff --git a/meta/lib/oe/package_manager/deb/manifest.py b/meta/lib/oe/package_manager/deb/manifest.py new file mode 100644 index 0000000000..d8eab24a06 --- /dev/null +++ b/meta/lib/oe/package_manager/deb/manifest.py @@ -0,0 +1,26 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + +from oe.manifest import Manifest + +class PkgManifest(Manifest): + def create_initial(self): + with open(self.initial_manifest, "w+") as manifest: + manifest.write(self.initial_manifest_file_header) + + for var in self.var_maps[self.manifest_type]: + pkg_list = self.d.getVar(var) + + if pkg_list is None: + continue + + for pkg in pkg_list.split(): + manifest.write("%s,%s\n" % + (self.var_maps[self.manifest_type][var], pkg)) + + def create_final(self): + pass + + def create_full(self, pm): + pass diff --git a/meta/lib/oe/package_manager/deb/rootfs.py b/meta/lib/oe/package_manager/deb/rootfs.py new file mode 100644 index 0000000000..8fbaca11d6 --- /dev/null +++ b/meta/lib/oe/package_manager/deb/rootfs.py @@ -0,0 +1,210 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + +import re +import shutil +from oe.rootfs import Rootfs +from oe.manifest import Manifest +from oe.utils import execute_pre_post_process +from oe.package_manager.deb.manifest import PkgManifest +from oe.package_manager.deb import DpkgPM + +class DpkgOpkgRootfs(Rootfs): + def __init__(self, d, progress_reporter=None, logcatcher=None): + super(DpkgOpkgRootfs, self).__init__(d, progress_reporter, logcatcher) + + def _get_pkgs_postinsts(self, status_file): + def _get_pkg_depends_list(pkg_depends): + pkg_depends_list = [] + # filter version requirements like libc (>= 1.1) + for dep in pkg_depends.split(', '): + m_dep = re.match(r"^(.*) \(.*\)$", dep) + if m_dep: + dep = m_dep.group(1) + pkg_depends_list.append(dep) + + return pkg_depends_list + + pkgs = {} + pkg_name = "" + pkg_status_match = False + pkg_depends = "" + + with open(status_file) as status: + data = status.read() + status.close() + for line in data.split('\n'): + m_pkg = re.match(r"^Package: (.*)", line) + m_status = re.match(r"^Status:.*unpacked", line) + m_depends = re.match(r"^Depends: (.*)", line) + + #Only one of m_pkg, m_status or m_depends is not None at time + #If m_pkg is not None, we started a new package + if m_pkg is not None: + #Get Package name + pkg_name = m_pkg.group(1) + #Make sure we reset other variables + pkg_status_match = False + pkg_depends = "" + elif m_status is not None: + #New status matched + pkg_status_match = True + elif m_depends is not None: + #New depends macthed + pkg_depends = m_depends.group(1) + else: + pass + + #Now check if we can process package depends and postinst + if "" != pkg_name and pkg_status_match: + pkgs[pkg_name] = _get_pkg_depends_list(pkg_depends) + else: + #Not enough information + pass + + # remove package dependencies not in postinsts + pkg_names = list(pkgs.keys()) + for pkg_name in pkg_names: + deps = pkgs[pkg_name][:] + + for d in deps: + if d not in pkg_names: + pkgs[pkg_name].remove(d) + + return pkgs + + def _get_delayed_postinsts_common(self, status_file): + def _dep_resolve(graph, node, resolved, seen): + seen.append(node) + + for edge in graph[node]: + if edge not in resolved: + if edge in seen: + raise RuntimeError("Packages %s and %s have " \ + "a circular dependency in postinsts scripts." \ + % (node, edge)) + _dep_resolve(graph, edge, resolved, seen) + + resolved.append(node) + + pkg_list = [] + + pkgs = None + if not self.d.getVar('PACKAGE_INSTALL').strip(): + bb.note("Building empty image") + else: + pkgs = self._get_pkgs_postinsts(status_file) + if pkgs: + root = "__packagegroup_postinst__" + pkgs[root] = list(pkgs.keys()) + _dep_resolve(pkgs, root, pkg_list, []) + pkg_list.remove(root) + + if len(pkg_list) == 0: + return None + + return pkg_list + + def _save_postinsts_common(self, dst_postinst_dir, src_postinst_dir): + if bb.utils.contains("IMAGE_FEATURES", "package-management", + True, False, self.d): + return + num = 0 + for p in self._get_delayed_postinsts(): + bb.utils.mkdirhier(dst_postinst_dir) + + if os.path.exists(os.path.join(src_postinst_dir, p + ".postinst")): + shutil.copy(os.path.join(src_postinst_dir, p + ".postinst"), + os.path.join(dst_postinst_dir, "%03d-%s" % (num, p))) + + num += 1 + +class PkgRootfs(DpkgOpkgRootfs): + def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None): + super(PkgRootfs, self).__init__(d, progress_reporter, logcatcher) + self.log_check_regex = '^E:' + self.log_check_expected_regexes = \ + [ + "^E: Unmet dependencies." + ] + + bb.utils.remove(self.image_rootfs, True) + bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True) + self.manifest = PkgManifest(d, manifest_dir) + self.pm = DpkgPM(d, d.getVar('IMAGE_ROOTFS'), + d.getVar('PACKAGE_ARCHS'), + d.getVar('DPKG_ARCH')) + + + def _create(self): + pkgs_to_install = self.manifest.parse_initial_manifest() + deb_pre_process_cmds = self.d.getVar('DEB_PREPROCESS_COMMANDS') + deb_post_process_cmds = self.d.getVar('DEB_POSTPROCESS_COMMANDS') + + alt_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/alternatives") + bb.utils.mkdirhier(alt_dir) + + # update PM index files + self.pm.write_index() + + execute_pre_post_process(self.d, deb_pre_process_cmds) + + if self.progress_reporter: + self.progress_reporter.next_stage() + # Don't support incremental, so skip that + self.progress_reporter.next_stage() + + self.pm.update() + + if self.progress_reporter: + self.progress_reporter.next_stage() + + for pkg_type in self.install_order: + if pkg_type in pkgs_to_install: + self.pm.install(pkgs_to_install[pkg_type], + [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY]) + self.pm.fix_broken_dependencies() + + if self.progress_reporter: + # Don't support attemptonly, so skip that + self.progress_reporter.next_stage() + self.progress_reporter.next_stage() + + self.pm.install_complementary() + + if self.progress_reporter: + self.progress_reporter.next_stage() + + self._setup_dbg_rootfs(['/var/lib/dpkg']) + + self.pm.fix_broken_dependencies() + + self.pm.mark_packages("installed") + + self.pm.run_pre_post_installs() + + execute_pre_post_process(self.d, deb_post_process_cmds) + + if self.progress_reporter: + self.progress_reporter.next_stage() + + @staticmethod + def _depends_list(): + return ['DEPLOY_DIR_DEB', 'DEB_SDK_ARCH', 'APTCONF_TARGET', 'APT_ARGS', 'DPKG_ARCH', 'DEB_PREPROCESS_COMMANDS', 'DEB_POSTPROCESS_COMMANDS'] + + def _get_delayed_postinsts(self): + status_file = self.image_rootfs + "/var/lib/dpkg/status" + return self._get_delayed_postinsts_common(status_file) + + def _save_postinsts(self): + dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/deb-postinsts") + src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/info") + return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir) + + def _log_check(self): + self._log_check_warn() + self._log_check_error() + + def _cleanup(self): + pass diff --git a/meta/lib/oe/package_manager/deb/sdk.py b/meta/lib/oe/package_manager/deb/sdk.py new file mode 100644 index 0000000000..f4b0b6510a --- /dev/null +++ b/meta/lib/oe/package_manager/deb/sdk.py @@ -0,0 +1,100 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + +import glob +import shutil +from oe.utils import execute_pre_post_process +from oe.sdk import Sdk +from oe.manifest import Manifest +from oe.package_manager.deb import DpkgPM +from oe.package_manager.deb.manifest import PkgManifest + +class PkgSdk(Sdk): + def __init__(self, d, manifest_dir=None): + super(PkgSdk, self).__init__(d, manifest_dir) + + self.target_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET"), "apt") + self.host_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET"), "apt-sdk") + + + self.target_manifest = PkgManifest(d, self.manifest_dir, + Manifest.MANIFEST_TYPE_SDK_TARGET) + self.host_manifest = PkgManifest(d, self.manifest_dir, + Manifest.MANIFEST_TYPE_SDK_HOST) + + deb_repo_workdir = "oe-sdk-repo" + if "sdk_ext" in d.getVar("BB_RUNTASK"): + deb_repo_workdir = "oe-sdk-ext-repo" + + self.target_pm = DpkgPM(d, self.sdk_target_sysroot, + self.d.getVar("PACKAGE_ARCHS"), + self.d.getVar("DPKG_ARCH"), + self.target_conf_dir, + deb_repo_workdir=deb_repo_workdir) + + self.host_pm = DpkgPM(d, self.sdk_host_sysroot, + self.d.getVar("SDK_PACKAGE_ARCHS"), + self.d.getVar("DEB_SDK_ARCH"), + self.host_conf_dir, + deb_repo_workdir=deb_repo_workdir) + + def _copy_apt_dir_to(self, dst_dir): + staging_etcdir_native = self.d.getVar("STAGING_ETCDIR_NATIVE") + + self.remove(dst_dir, True) + + shutil.copytree(os.path.join(staging_etcdir_native, "apt"), dst_dir) + + def _populate_sysroot(self, pm, manifest): + pkgs_to_install = manifest.parse_initial_manifest() + + pm.write_index() + pm.update() + + for pkg_type in self.install_order: + if pkg_type in pkgs_to_install: + pm.install(pkgs_to_install[pkg_type], + [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY]) + + def _populate(self): + execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND")) + + bb.note("Installing TARGET packages") + self._populate_sysroot(self.target_pm, self.target_manifest) + + self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY')) + + self.target_pm.run_pre_post_installs() + + self.target_pm.run_intercepts(populate_sdk='target') + + execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND")) + + self._copy_apt_dir_to(os.path.join(self.sdk_target_sysroot, "etc", "apt")) + + if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d): + self.target_pm.remove_packaging_data() + + bb.note("Installing NATIVESDK packages") + self._populate_sysroot(self.host_pm, self.host_manifest) + self.install_locales(self.host_pm) + + self.host_pm.run_pre_post_installs() + + self.host_pm.run_intercepts(populate_sdk='host') + + execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND")) + + self._copy_apt_dir_to(os.path.join(self.sdk_output, self.sdk_native_path, + "etc", "apt")) + + if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d): + self.host_pm.remove_packaging_data() + + native_dpkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path, + "var", "lib", "dpkg") + self.mkdirhier(native_dpkg_state_dir) + for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "dpkg", "*")): + self.movefile(f, native_dpkg_state_dir) + self.remove(os.path.join(self.sdk_output, "var"), True) diff --git a/meta/lib/oe/package_manager/ipk/__init__.py b/meta/lib/oe/package_manager/ipk/__init__.py new file mode 100644 index 0000000000..4cd3963111 --- /dev/null +++ b/meta/lib/oe/package_manager/ipk/__init__.py @@ -0,0 +1,503 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + +import re +import shutil +import subprocess +from oe.package_manager import * + +class OpkgIndexer(Indexer): + def write_index(self): + arch_vars = ["ALL_MULTILIB_PACKAGE_ARCHS", + "SDK_PACKAGE_ARCHS", + ] + + opkg_index_cmd = bb.utils.which(os.getenv('PATH'), "opkg-make-index") + if self.d.getVar('PACKAGE_FEED_SIGN') == '1': + signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND')) + else: + signer = None + + if not os.path.exists(os.path.join(self.deploy_dir, "Packages")): + open(os.path.join(self.deploy_dir, "Packages"), "w").close() + + index_cmds = set() + index_sign_files = set() + for arch_var in arch_vars: + archs = self.d.getVar(arch_var) + if archs is None: + continue + + for arch in archs.split(): + pkgs_dir = os.path.join(self.deploy_dir, arch) + pkgs_file = os.path.join(pkgs_dir, "Packages") + + if not os.path.isdir(pkgs_dir): + continue + + if not os.path.exists(pkgs_file): + open(pkgs_file, "w").close() + + index_cmds.add('%s --checksum md5 --checksum sha256 -r %s -p %s -m %s' % + (opkg_index_cmd, pkgs_file, pkgs_file, pkgs_dir)) + + index_sign_files.add(pkgs_file) + + if len(index_cmds) == 0: + bb.note("There are no packages in %s!" % self.deploy_dir) + return + + oe.utils.multiprocess_launch(create_index, index_cmds, self.d) + + if signer: + feed_sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE') + is_ascii_sig = (feed_sig_type.upper() != "BIN") + for f in index_sign_files: + signer.detach_sign(f, + self.d.getVar('PACKAGE_FEED_GPG_NAME'), + self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'), + armor=is_ascii_sig) + +class PMPkgsList(PkgsList): + def __init__(self, d, rootfs_dir): + super(PMPkgsList, self).__init__(d, rootfs_dir) + config_file = d.getVar("IPKGCONF_TARGET") + + self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg") + self.opkg_args = "-f %s -o %s " % (config_file, rootfs_dir) + self.opkg_args += self.d.getVar("OPKG_ARGS") + + def list_pkgs(self, format=None): + cmd = "%s %s status" % (self.opkg_cmd, self.opkg_args) + + # opkg returns success even when it printed some + # "Collected errors:" report to stderr. Mixing stderr into + # stdout then leads to random failures later on when + # parsing the output. To avoid this we need to collect both + # output streams separately and check for empty stderr. + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) + cmd_output, cmd_stderr = p.communicate() + cmd_output = cmd_output.decode("utf-8") + cmd_stderr = cmd_stderr.decode("utf-8") + if p.returncode or cmd_stderr: + bb.fatal("Cannot get the installed packages list. Command '%s' " + "returned %d and stderr:\n%s" % (cmd, p.returncode, cmd_stderr)) + + return opkg_query(cmd_output) + + + +class OpkgDpkgPM(PackageManager): + def __init__(self, d, target_rootfs): + """ + This is an abstract class. Do not instantiate this directly. + """ + super(OpkgDpkgPM, self).__init__(d, target_rootfs) + + def package_info(self, pkg, cmd): + """ + Returns a dictionary with the package info. + + This method extracts the common parts for Opkg and Dpkg + """ + + try: + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8") + except subprocess.CalledProcessError as e: + bb.fatal("Unable to list available packages. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8"))) + return opkg_query(output) + + def extract(self, pkg, pkg_info): + """ + Returns the path to a tmpdir where resides the contents of a package. + + Deleting the tmpdir is responsability of the caller. + + This method extracts the common parts for Opkg and Dpkg + """ + + ar_cmd = bb.utils.which(os.getenv("PATH"), "ar") + tar_cmd = bb.utils.which(os.getenv("PATH"), "tar") + pkg_path = pkg_info[pkg]["filepath"] + + if not os.path.isfile(pkg_path): + bb.fatal("Unable to extract package for '%s'." + "File %s doesn't exists" % (pkg, pkg_path)) + + tmp_dir = tempfile.mkdtemp() + current_dir = os.getcwd() + os.chdir(tmp_dir) + data_tar = 'data.tar.xz' + + try: + cmd = [ar_cmd, 'x', pkg_path] + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + cmd = [tar_cmd, 'xf', data_tar] + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + bb.utils.remove(tmp_dir, recurse=True) + bb.fatal("Unable to extract %s package. Command '%s' " + "returned %d:\n%s" % (pkg_path, ' '.join(cmd), e.returncode, e.output.decode("utf-8"))) + except OSError as e: + bb.utils.remove(tmp_dir, recurse=True) + bb.fatal("Unable to extract %s package. Command '%s' " + "returned %d:\n%s at %s" % (pkg_path, ' '.join(cmd), e.errno, e.strerror, e.filename)) + + bb.note("Extracted %s to %s" % (pkg_path, tmp_dir)) + bb.utils.remove(os.path.join(tmp_dir, "debian-binary")) + bb.utils.remove(os.path.join(tmp_dir, "control.tar.gz")) + os.chdir(current_dir) + + return tmp_dir + + def _handle_intercept_failure(self, registered_pkgs): + self.mark_packages("unpacked", registered_pkgs.split()) + +class OpkgPM(OpkgDpkgPM): + def __init__(self, d, target_rootfs, config_file, archs, task_name='target', ipk_repo_workdir="oe-rootfs-repo", filterbydependencies=True, prepare_index=True): + super(OpkgPM, self).__init__(d, target_rootfs) + + self.config_file = config_file + self.pkg_archs = archs + self.task_name = task_name + + self.deploy_dir = oe.path.join(self.d.getVar('WORKDIR'), ipk_repo_workdir) + self.deploy_lock_file = os.path.join(self.deploy_dir, "deploy.lock") + self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg") + self.opkg_args = "--volatile-cache -f %s -t %s -o %s " % (self.config_file, self.d.expand('${T}/ipktemp/') ,target_rootfs) + self.opkg_args += self.d.getVar("OPKG_ARGS") + + if prepare_index: + create_packages_dir(self.d, self.deploy_dir, d.getVar("DEPLOY_DIR_IPK"), "package_write_ipk", filterbydependencies) + + self.opkg_dir = oe.path.join(target_rootfs, self.d.getVar('OPKGLIBDIR'), "opkg") + bb.utils.mkdirhier(self.opkg_dir) + + self.saved_opkg_dir = self.d.expand('${T}/saved/%s' % self.task_name) + if not os.path.exists(self.d.expand('${T}/saved')): + bb.utils.mkdirhier(self.d.expand('${T}/saved')) + + self.from_feeds = (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") == "1" + if self.from_feeds: + self._create_custom_config() + else: + self._create_config() + + self.indexer = OpkgIndexer(self.d, self.deploy_dir) + + def mark_packages(self, status_tag, packages=None): + """ + This function will change a package's status in /var/lib/opkg/status file. + If 'packages' is None then the new_status will be applied to all + packages + """ + status_file = os.path.join(self.opkg_dir, "status") + + with open(status_file, "r") as sf: + with open(status_file + ".tmp", "w+") as tmp_sf: + if packages is None: + tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)", + r"Package: \1\n\2Status: \3%s" % status_tag, + sf.read())) + else: + if type(packages).__name__ != "list": + raise TypeError("'packages' should be a list object") + + status = sf.read() + for pkg in packages: + status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg, + r"Package: %s\n\1Status: \2%s" % (pkg, status_tag), + status) + + tmp_sf.write(status) + + bb.utils.rename(status_file + ".tmp", status_file) + + def _create_custom_config(self): + bb.note("Building from feeds activated!") + + with open(self.config_file, "w+") as config_file: + priority = 1 + for arch in self.pkg_archs.split(): + config_file.write("arch %s %d\n" % (arch, priority)) + priority += 5 + + for line in (self.d.getVar('IPK_FEED_URIS') or "").split(): + feed_match = re.match(r"^[ \t]*(.*)##([^ \t]*)[ \t]*$", line) + + if feed_match is not None: + feed_name = feed_match.group(1) + feed_uri = feed_match.group(2) + + bb.note("Add %s feed with URL %s" % (feed_name, feed_uri)) + + config_file.write("src/gz %s %s\n" % (feed_name, feed_uri)) + + """ + Allow to use package deploy directory contents as quick devel-testing + feed. This creates individual feed configs for each arch subdir of those + specified as compatible for the current machine. + NOTE: Development-helper feature, NOT a full-fledged feed. + """ + if (self.d.getVar('FEED_DEPLOYDIR_BASE_URI') or "") != "": + for arch in self.pkg_archs.split(): + cfg_file_name = os.path.join(self.target_rootfs, + self.d.getVar("sysconfdir"), + "opkg", + "local-%s-feed.conf" % arch) + + with open(cfg_file_name, "w+") as cfg_file: + cfg_file.write("src/gz local-%s %s/%s" % + (arch, + self.d.getVar('FEED_DEPLOYDIR_BASE_URI'), + arch)) + + if self.d.getVar('OPKGLIBDIR') != '/var/lib': + # There is no command line option for this anymore, we need to add + # info_dir and status_file to config file, if OPKGLIBDIR doesn't have + # the default value of "/var/lib" as defined in opkg: + # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_LISTS_DIR VARDIR "/lib/opkg/lists" + # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR VARDIR "/lib/opkg/info" + # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE VARDIR "/lib/opkg/status" + cfg_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'info')) + cfg_file.write("option lists_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'lists')) + cfg_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'status')) + + + def _create_config(self): + with open(self.config_file, "w+") as config_file: + priority = 1 + for arch in self.pkg_archs.split(): + config_file.write("arch %s %d\n" % (arch, priority)) + priority += 5 + + config_file.write("src oe file:%s\n" % self.deploy_dir) + + for arch in self.pkg_archs.split(): + pkgs_dir = os.path.join(self.deploy_dir, arch) + if os.path.isdir(pkgs_dir): + config_file.write("src oe-%s file:%s\n" % + (arch, pkgs_dir)) + + if self.d.getVar('OPKGLIBDIR') != '/var/lib': + # There is no command line option for this anymore, we need to add + # info_dir and status_file to config file, if OPKGLIBDIR doesn't have + # the default value of "/var/lib" as defined in opkg: + # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_LISTS_DIR VARDIR "/lib/opkg/lists" + # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR VARDIR "/lib/opkg/info" + # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE VARDIR "/lib/opkg/status" + config_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'info')) + config_file.write("option lists_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'lists')) + config_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'status')) + + def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs): + if feed_uris == "": + return + + rootfs_config = os.path.join('%s/etc/opkg/base-feeds.conf' + % self.target_rootfs) + + os.makedirs('%s/etc/opkg' % self.target_rootfs, exist_ok=True) + + feed_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split()) + archs = self.pkg_archs.split() if feed_archs is None else feed_archs.split() + + with open(rootfs_config, "w+") as config_file: + uri_iterator = 0 + for uri in feed_uris: + if archs: + for arch in archs: + if (feed_archs is None) and (not os.path.exists(oe.path.join(self.deploy_dir, arch))): + continue + bb.note('Adding opkg feed url-%s-%d (%s)' % + (arch, uri_iterator, uri)) + config_file.write("src/gz uri-%s-%d %s/%s\n" % + (arch, uri_iterator, uri, arch)) + else: + bb.note('Adding opkg feed url-%d (%s)' % + (uri_iterator, uri)) + config_file.write("src/gz uri-%d %s\n" % + (uri_iterator, uri)) + + uri_iterator += 1 + + def update(self): + self.deploy_dir_lock() + + cmd = "%s %s update" % (self.opkg_cmd, self.opkg_args) + + try: + subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + self.deploy_dir_unlock() + bb.fatal("Unable to update the package index files. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8"))) + + self.deploy_dir_unlock() + + def install(self, pkgs, attempt_only=False): + if not pkgs: + return + + cmd = "%s %s" % (self.opkg_cmd, self.opkg_args) + for exclude in (self.d.getVar("PACKAGE_EXCLUDE") or "").split(): + cmd += " --add-exclude %s" % exclude + for bad_recommendation in (self.d.getVar("BAD_RECOMMENDATIONS") or "").split(): + cmd += " --add-ignore-recommends %s" % bad_recommendation + cmd += " install " + cmd += " ".join(pkgs) + + os.environ['D'] = self.target_rootfs + os.environ['OFFLINE_ROOT'] = self.target_rootfs + os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs + os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs + os.environ['INTERCEPT_DIR'] = self.intercepts_dir + os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE') + + try: + bb.note("Installing the following packages: %s" % ' '.join(pkgs)) + bb.note(cmd) + output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).decode("utf-8") + bb.note(output) + failed_pkgs = [] + for line in output.split('\n'): + if line.endswith("configuration required on target."): + bb.warn(line) + failed_pkgs.append(line.split(".")[0]) + if failed_pkgs: + failed_postinsts_abort(failed_pkgs, self.d.expand("${T}/log.do_${BB_CURRENTTASK}")) + except subprocess.CalledProcessError as e: + (bb.fatal, bb.warn)[attempt_only]("Unable to install packages. " + "Command '%s' returned %d:\n%s" % + (cmd, e.returncode, e.output.decode("utf-8"))) + + def remove(self, pkgs, with_dependencies=True): + if not pkgs: + return + + if with_dependencies: + cmd = "%s %s --force-remove --force-removal-of-dependent-packages remove %s" % \ + (self.opkg_cmd, self.opkg_args, ' '.join(pkgs)) + else: + cmd = "%s %s --force-depends remove %s" % \ + (self.opkg_cmd, self.opkg_args, ' '.join(pkgs)) + + try: + bb.note(cmd) + output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).decode("utf-8") + bb.note(output) + except subprocess.CalledProcessError as e: + bb.fatal("Unable to remove packages. Command '%s' " + "returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8"))) + + def write_index(self): + self.deploy_dir_lock() + + result = self.indexer.write_index() + + self.deploy_dir_unlock() + + if result is not None: + bb.fatal(result) + + def remove_packaging_data(self): + cachedir = oe.path.join(self.target_rootfs, self.d.getVar("localstatedir"), "cache", "opkg") + bb.utils.remove(self.opkg_dir, True) + bb.utils.remove(cachedir, True) + + def remove_lists(self): + if not self.from_feeds: + bb.utils.remove(os.path.join(self.opkg_dir, "lists"), True) + + def list_installed(self): + return PMPkgsList(self.d, self.target_rootfs).list_pkgs() + + def dummy_install(self, pkgs): + """ + The following function dummy installs pkgs and returns the log of output. + """ + if len(pkgs) == 0: + return + + # Create an temp dir as opkg root for dummy installation + temp_rootfs = self.d.expand('${T}/opkg') + opkg_lib_dir = self.d.getVar('OPKGLIBDIR') + if opkg_lib_dir[0] == "/": + opkg_lib_dir = opkg_lib_dir[1:] + temp_opkg_dir = os.path.join(temp_rootfs, opkg_lib_dir, 'opkg') + bb.utils.mkdirhier(temp_opkg_dir) + + opkg_args = "-f %s -o %s " % (self.config_file, temp_rootfs) + opkg_args += self.d.getVar("OPKG_ARGS") + + cmd = "%s %s update" % (self.opkg_cmd, opkg_args) + try: + subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) + except subprocess.CalledProcessError as e: + bb.fatal("Unable to update. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8"))) + + # Dummy installation + cmd = "%s %s --noaction install %s " % (self.opkg_cmd, + opkg_args, + ' '.join(pkgs)) + try: + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) + except subprocess.CalledProcessError as e: + bb.fatal("Unable to dummy install packages. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8"))) + + bb.utils.remove(temp_rootfs, True) + + return output + + def backup_packaging_data(self): + # Save the opkglib for increment ipk image generation + if os.path.exists(self.saved_opkg_dir): + bb.utils.remove(self.saved_opkg_dir, True) + shutil.copytree(self.opkg_dir, + self.saved_opkg_dir, + symlinks=True) + + def recover_packaging_data(self): + # Move the opkglib back + if os.path.exists(self.saved_opkg_dir): + if os.path.exists(self.opkg_dir): + bb.utils.remove(self.opkg_dir, True) + + bb.note('Recover packaging data') + shutil.copytree(self.saved_opkg_dir, + self.opkg_dir, + symlinks=True) + + def package_info(self, pkg): + """ + Returns a dictionary with the package info. + """ + cmd = "%s %s info %s" % (self.opkg_cmd, self.opkg_args, pkg) + pkg_info = super(OpkgPM, self).package_info(pkg, cmd) + + pkg_arch = pkg_info[pkg]["arch"] + pkg_filename = pkg_info[pkg]["filename"] + pkg_info[pkg]["filepath"] = \ + os.path.join(self.deploy_dir, pkg_arch, pkg_filename) + + return pkg_info + + def extract(self, pkg): + """ + Returns the path to a tmpdir where resides the contents of a package. + + Deleting the tmpdir is responsability of the caller. + """ + pkg_info = self.package_info(pkg) + if not pkg_info: + bb.fatal("Unable to get information for package '%s' while " + "trying to extract the package." % pkg) + + tmp_dir = super(OpkgPM, self).extract(pkg, pkg_info) + bb.utils.remove(os.path.join(tmp_dir, "data.tar.xz")) + + return tmp_dir diff --git a/meta/lib/oe/package_manager/ipk/manifest.py b/meta/lib/oe/package_manager/ipk/manifest.py new file mode 100644 index 0000000000..ae451c5c70 --- /dev/null +++ b/meta/lib/oe/package_manager/ipk/manifest.py @@ -0,0 +1,74 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + +from oe.manifest import Manifest +import re + +class PkgManifest(Manifest): + """ + Returns a dictionary object with mip and mlp packages. + """ + def _split_multilib(self, pkg_list): + pkgs = dict() + + for pkg in pkg_list.split(): + pkg_type = self.PKG_TYPE_MUST_INSTALL + + ml_variants = self.d.getVar('MULTILIB_VARIANTS').split() + + for ml_variant in ml_variants: + if pkg.startswith(ml_variant + '-'): + pkg_type = self.PKG_TYPE_MULTILIB + + if not pkg_type in pkgs: + pkgs[pkg_type] = pkg + else: + pkgs[pkg_type] += " " + pkg + + return pkgs + + def create_initial(self): + pkgs = dict() + + with open(self.initial_manifest, "w+") as manifest: + manifest.write(self.initial_manifest_file_header) + + for var in self.var_maps[self.manifest_type]: + if var in self.vars_to_split: + split_pkgs = self._split_multilib(self.d.getVar(var)) + if split_pkgs is not None: + pkgs = dict(list(pkgs.items()) + list(split_pkgs.items())) + else: + pkg_list = self.d.getVar(var) + if pkg_list is not None: + pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var) + + for pkg_type in sorted(pkgs): + for pkg in sorted(pkgs[pkg_type].split()): + manifest.write("%s,%s\n" % (pkg_type, pkg)) + + def create_final(self): + pass + + def create_full(self, pm): + if not os.path.exists(self.initial_manifest): + self.create_initial() + + initial_manifest = self.parse_initial_manifest() + pkgs_to_install = list() + for pkg_type in initial_manifest: + pkgs_to_install += initial_manifest[pkg_type] + if len(pkgs_to_install) == 0: + return + + output = pm.dummy_install(pkgs_to_install).decode('utf-8') + + with open(self.full_manifest, 'w+') as manifest: + pkg_re = re.compile('^Installing ([^ ]+) [^ ].*') + for line in set(output.split('\n')): + m = pkg_re.match(line) + if m: + manifest.write(m.group(1) + '\n') + + return diff --git a/meta/lib/oe/package_manager/ipk/rootfs.py b/meta/lib/oe/package_manager/ipk/rootfs.py new file mode 100644 index 0000000000..10a831994e --- /dev/null +++ b/meta/lib/oe/package_manager/ipk/rootfs.py @@ -0,0 +1,350 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + +import re +import filecmp +import shutil +from oe.rootfs import Rootfs +from oe.manifest import Manifest +from oe.utils import execute_pre_post_process +from oe.package_manager.ipk.manifest import PkgManifest +from oe.package_manager.ipk import OpkgPM + +class DpkgOpkgRootfs(Rootfs): + def __init__(self, d, progress_reporter=None, logcatcher=None): + super(DpkgOpkgRootfs, self).__init__(d, progress_reporter, logcatcher) + + def _get_pkgs_postinsts(self, status_file): + def _get_pkg_depends_list(pkg_depends): + pkg_depends_list = [] + # filter version requirements like libc (>= 1.1) + for dep in pkg_depends.split(', '): + m_dep = re.match(r"^(.*) \(.*\)$", dep) + if m_dep: + dep = m_dep.group(1) + pkg_depends_list.append(dep) + + return pkg_depends_list + + pkgs = {} + pkg_name = "" + pkg_status_match = False + pkg_depends = "" + + with open(status_file) as status: + data = status.read() + status.close() + for line in data.split('\n'): + m_pkg = re.match(r"^Package: (.*)", line) + m_status = re.match(r"^Status:.*unpacked", line) + m_depends = re.match(r"^Depends: (.*)", line) + + #Only one of m_pkg, m_status or m_depends is not None at time + #If m_pkg is not None, we started a new package + if m_pkg is not None: + #Get Package name + pkg_name = m_pkg.group(1) + #Make sure we reset other variables + pkg_status_match = False + pkg_depends = "" + elif m_status is not None: + #New status matched + pkg_status_match = True + elif m_depends is not None: + #New depends macthed + pkg_depends = m_depends.group(1) + else: + pass + + #Now check if we can process package depends and postinst + if "" != pkg_name and pkg_status_match: + pkgs[pkg_name] = _get_pkg_depends_list(pkg_depends) + else: + #Not enough information + pass + + # remove package dependencies not in postinsts + pkg_names = list(pkgs.keys()) + for pkg_name in pkg_names: + deps = pkgs[pkg_name][:] + + for d in deps: + if d not in pkg_names: + pkgs[pkg_name].remove(d) + + return pkgs + + def _get_delayed_postinsts_common(self, status_file): + def _dep_resolve(graph, node, resolved, seen): + seen.append(node) + + for edge in graph[node]: + if edge not in resolved: + if edge in seen: + raise RuntimeError("Packages %s and %s have " \ + "a circular dependency in postinsts scripts." \ + % (node, edge)) + _dep_resolve(graph, edge, resolved, seen) + + resolved.append(node) + + pkg_list = [] + + pkgs = None + if not self.d.getVar('PACKAGE_INSTALL').strip(): + bb.note("Building empty image") + else: + pkgs = self._get_pkgs_postinsts(status_file) + if pkgs: + root = "__packagegroup_postinst__" + pkgs[root] = list(pkgs.keys()) + _dep_resolve(pkgs, root, pkg_list, []) + pkg_list.remove(root) + + if len(pkg_list) == 0: + return None + + return pkg_list + + def _save_postinsts_common(self, dst_postinst_dir, src_postinst_dir): + if bb.utils.contains("IMAGE_FEATURES", "package-management", + True, False, self.d): + return + num = 0 + for p in self._get_delayed_postinsts(): + bb.utils.mkdirhier(dst_postinst_dir) + + if os.path.exists(os.path.join(src_postinst_dir, p + ".postinst")): + shutil.copy(os.path.join(src_postinst_dir, p + ".postinst"), + os.path.join(dst_postinst_dir, "%03d-%s" % (num, p))) + + num += 1 + +class PkgRootfs(DpkgOpkgRootfs): + def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None): + super(PkgRootfs, self).__init__(d, progress_reporter, logcatcher) + self.log_check_regex = '(exit 1|Collected errors)' + + self.manifest = PkgManifest(d, manifest_dir) + self.opkg_conf = self.d.getVar("IPKGCONF_TARGET") + self.pkg_archs = self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS") + + self.inc_opkg_image_gen = self.d.getVar('INC_IPK_IMAGE_GEN') or "" + if self._remove_old_rootfs(): + bb.utils.remove(self.image_rootfs, True) + self.pm = OpkgPM(d, + self.image_rootfs, + self.opkg_conf, + self.pkg_archs) + else: + self.pm = OpkgPM(d, + self.image_rootfs, + self.opkg_conf, + self.pkg_archs) + self.pm.recover_packaging_data() + + bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True) + ''' + Compare two files with the same key twice to see if they are equal. + If they are not equal, it means they are duplicated and come from + different packages. + ''' + def _file_equal(self, key, f1, f2): + if filecmp.cmp(f1, f2): + return True + # Not equal + return False + + """ + This function was reused from the old implementation. + See commit: "image.bbclass: Added variables for multilib support." by + Lianhao Lu. + """ + def _multilib_sanity_test(self, dirs): + + allow_replace = self.d.getVar("MULTILIBRE_ALLOW_REP") + if allow_replace is None: + allow_replace = "" + + allow_rep = re.compile(re.sub(r"\|$", r"", allow_replace)) + error_prompt = "Multilib check error:" + + files = {} + for dir in dirs: + for root, subfolders, subfiles in os.walk(dir): + for file in subfiles: + item = os.path.join(root, file) + key = str(os.path.join("/", os.path.relpath(item, dir))) + + valid = True + if key in files: + #check whether the file is allow to replace + if allow_rep.match(key): + valid = True + else: + if os.path.exists(files[key]) and \ + os.path.exists(item) and \ + not self._file_equal(key, files[key], item): + valid = False + bb.fatal("%s duplicate files %s %s is not the same\n" % + (error_prompt, item, files[key])) + + #pass the check, add to list + if valid: + files[key] = item + + def _multilib_test_install(self, pkgs): + ml_temp = self.d.getVar("MULTILIB_TEMP_ROOTFS") + bb.utils.mkdirhier(ml_temp) + + dirs = [self.image_rootfs] + + for variant in self.d.getVar("MULTILIB_VARIANTS").split(): + ml_target_rootfs = os.path.join(ml_temp, variant) + + bb.utils.remove(ml_target_rootfs, True) + + ml_opkg_conf = os.path.join(ml_temp, + variant + "-" + os.path.basename(self.opkg_conf)) + + ml_pm = OpkgPM(self.d, ml_target_rootfs, ml_opkg_conf, self.pkg_archs, prepare_index=False) + + ml_pm.update() + ml_pm.install(pkgs) + + dirs.append(ml_target_rootfs) + + self._multilib_sanity_test(dirs) + + ''' + While ipk incremental image generation is enabled, it will remove the + unneeded pkgs by comparing the old full manifest in previous existing + image and the new full manifest in the current image. + ''' + def _remove_extra_packages(self, pkgs_initial_install): + if self.inc_opkg_image_gen == "1": + # Parse full manifest in previous existing image creation session + old_full_manifest = self.manifest.parse_full_manifest() + + # Create full manifest for the current image session, the old one + # will be replaced by the new one. + self.manifest.create_full(self.pm) + + # Parse full manifest in current image creation session + new_full_manifest = self.manifest.parse_full_manifest() + + pkg_to_remove = list() + for pkg in old_full_manifest: + if pkg not in new_full_manifest: + pkg_to_remove.append(pkg) + + if pkg_to_remove != []: + bb.note('decremental removed: %s' % ' '.join(pkg_to_remove)) + self.pm.remove(pkg_to_remove) + + ''' + Compare with previous existing image creation, if some conditions + triggered, the previous old image should be removed. + The conditions include any of 'PACKAGE_EXCLUDE, NO_RECOMMENDATIONS + and BAD_RECOMMENDATIONS' has been changed. + ''' + def _remove_old_rootfs(self): + if self.inc_opkg_image_gen != "1": + return True + + vars_list_file = self.d.expand('${T}/vars_list') + + old_vars_list = "" + if os.path.exists(vars_list_file): + old_vars_list = open(vars_list_file, 'r+').read() + + new_vars_list = '%s:%s:%s\n' % \ + ((self.d.getVar('BAD_RECOMMENDATIONS') or '').strip(), + (self.d.getVar('NO_RECOMMENDATIONS') or '').strip(), + (self.d.getVar('PACKAGE_EXCLUDE') or '').strip()) + open(vars_list_file, 'w+').write(new_vars_list) + + if old_vars_list != new_vars_list: + return True + + return False + + def _create(self): + pkgs_to_install = self.manifest.parse_initial_manifest() + opkg_pre_process_cmds = self.d.getVar('OPKG_PREPROCESS_COMMANDS') + opkg_post_process_cmds = self.d.getVar('OPKG_POSTPROCESS_COMMANDS') + + # update PM index files + self.pm.write_index() + + execute_pre_post_process(self.d, opkg_pre_process_cmds) + + if self.progress_reporter: + self.progress_reporter.next_stage() + # Steps are a bit different in order, skip next + self.progress_reporter.next_stage() + + self.pm.update() + + if self.progress_reporter: + self.progress_reporter.next_stage() + + if self.inc_opkg_image_gen == "1": + self._remove_extra_packages(pkgs_to_install) + + if self.progress_reporter: + self.progress_reporter.next_stage() + + for pkg_type in self.install_order: + if pkg_type in pkgs_to_install: + # For multilib, we perform a sanity test before final install + # If sanity test fails, it will automatically do a bb.fatal() + # and the installation will stop + if pkg_type == Manifest.PKG_TYPE_MULTILIB: + self._multilib_test_install(pkgs_to_install[pkg_type]) + + self.pm.install(pkgs_to_install[pkg_type], + [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY]) + + if self.progress_reporter: + self.progress_reporter.next_stage() + + self.pm.install_complementary() + + if self.progress_reporter: + self.progress_reporter.next_stage() + + opkg_lib_dir = self.d.getVar('OPKGLIBDIR') + opkg_dir = os.path.join(opkg_lib_dir, 'opkg') + self._setup_dbg_rootfs([opkg_dir]) + + execute_pre_post_process(self.d, opkg_post_process_cmds) + + if self.inc_opkg_image_gen == "1": + self.pm.backup_packaging_data() + + if self.progress_reporter: + self.progress_reporter.next_stage() + + @staticmethod + def _depends_list(): + return ['IPKGCONF_SDK', 'IPK_FEED_URIS', 'DEPLOY_DIR_IPK', 'IPKGCONF_TARGET', 'INC_IPK_IMAGE_GEN', 'OPKG_ARGS', 'OPKGLIBDIR', 'OPKG_PREPROCESS_COMMANDS', 'OPKG_POSTPROCESS_COMMANDS', 'OPKGLIBDIR'] + + def _get_delayed_postinsts(self): + status_file = os.path.join(self.image_rootfs, + self.d.getVar('OPKGLIBDIR').strip('/'), + "opkg", "status") + return self._get_delayed_postinsts_common(status_file) + + def _save_postinsts(self): + dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/ipk-postinsts") + src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${OPKGLIBDIR}/opkg/info") + return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir) + + def _log_check(self): + self._log_check_warn() + self._log_check_error() + + def _cleanup(self): + self.pm.remove_lists() diff --git a/meta/lib/oe/package_manager/ipk/sdk.py b/meta/lib/oe/package_manager/ipk/sdk.py new file mode 100644 index 0000000000..e2ca415c8e --- /dev/null +++ b/meta/lib/oe/package_manager/ipk/sdk.py @@ -0,0 +1,102 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + +import glob +import shutil +from oe.utils import execute_pre_post_process +from oe.sdk import Sdk +from oe.package_manager.ipk.manifest import PkgManifest +from oe.manifest import Manifest +from oe.package_manager.ipk import OpkgPM + +class PkgSdk(Sdk): + def __init__(self, d, manifest_dir=None): + super(PkgSdk, self).__init__(d, manifest_dir) + + # In sdk_list_installed_packages the call to opkg is hardcoded to + # always use IPKGCONF_TARGET and there's no exposed API to change this + # so simply override IPKGCONF_TARGET to use this separated config file. + ipkgconf_sdk_target = d.getVar("IPKGCONF_SDK_TARGET") + d.setVar("IPKGCONF_TARGET", ipkgconf_sdk_target) + + self.target_conf = self.d.getVar("IPKGCONF_TARGET") + self.host_conf = self.d.getVar("IPKGCONF_SDK") + + self.target_manifest = PkgManifest(d, self.manifest_dir, + Manifest.MANIFEST_TYPE_SDK_TARGET) + self.host_manifest = PkgManifest(d, self.manifest_dir, + Manifest.MANIFEST_TYPE_SDK_HOST) + + ipk_repo_workdir = "oe-sdk-repo" + if "sdk_ext" in d.getVar("BB_RUNTASK"): + ipk_repo_workdir = "oe-sdk-ext-repo" + + self.target_pm = OpkgPM(d, self.sdk_target_sysroot, self.target_conf, + self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS"), + ipk_repo_workdir=ipk_repo_workdir) + + self.host_pm = OpkgPM(d, self.sdk_host_sysroot, self.host_conf, + self.d.getVar("SDK_PACKAGE_ARCHS"), + ipk_repo_workdir=ipk_repo_workdir) + + def _populate_sysroot(self, pm, manifest): + pkgs_to_install = manifest.parse_initial_manifest() + + if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") != "1": + pm.write_index() + + pm.update() + + for pkg_type in self.install_order: + if pkg_type in pkgs_to_install: + pm.install(pkgs_to_install[pkg_type], + [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY]) + + def _populate(self): + execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND")) + + bb.note("Installing TARGET packages") + self._populate_sysroot(self.target_pm, self.target_manifest) + + self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY')) + + self.target_pm.run_intercepts(populate_sdk='target') + + execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND")) + + if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d): + self.target_pm.remove_packaging_data() + + bb.note("Installing NATIVESDK packages") + self._populate_sysroot(self.host_pm, self.host_manifest) + self.install_locales(self.host_pm) + + self.host_pm.run_intercepts(populate_sdk='host') + + execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND")) + + if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d): + self.host_pm.remove_packaging_data() + + target_sysconfdir = os.path.join(self.sdk_target_sysroot, self.sysconfdir) + host_sysconfdir = os.path.join(self.sdk_host_sysroot, self.sysconfdir) + + self.mkdirhier(target_sysconfdir) + shutil.copy(self.target_conf, target_sysconfdir) + os.chmod(os.path.join(target_sysconfdir, + os.path.basename(self.target_conf)), 0o644) + + self.mkdirhier(host_sysconfdir) + shutil.copy(self.host_conf, host_sysconfdir) + os.chmod(os.path.join(host_sysconfdir, + os.path.basename(self.host_conf)), 0o644) + + native_opkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path, + self.d.getVar('localstatedir_nativesdk').strip('/'), + "lib", "opkg") + self.mkdirhier(native_opkg_state_dir) + for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "opkg", "*")): + self.movefile(f, native_opkg_state_dir) + + self.remove(os.path.join(self.sdk_output, "var"), True) diff --git a/meta/lib/oe/package_manager/rpm/__init__.py b/meta/lib/oe/package_manager/rpm/__init__.py new file mode 100644 index 0000000000..b392581069 --- /dev/null +++ b/meta/lib/oe/package_manager/rpm/__init__.py @@ -0,0 +1,410 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + +import shutil +import subprocess +from oe.package_manager import * + +class RpmIndexer(Indexer): + def write_index(self): + self.do_write_index(self.deploy_dir) + + def do_write_index(self, deploy_dir): + if self.d.getVar('PACKAGE_FEED_SIGN') == '1': + signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND')) + else: + signer = None + + createrepo_c = bb.utils.which(os.environ['PATH'], "createrepo_c") + result = create_index("%s --update -q %s" % (createrepo_c, deploy_dir)) + if result: + bb.fatal(result) + + # Sign repomd + if signer: + sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE') + is_ascii_sig = (sig_type.upper() != "BIN") + signer.detach_sign(os.path.join(deploy_dir, 'repodata', 'repomd.xml'), + self.d.getVar('PACKAGE_FEED_GPG_NAME'), + self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'), + armor=is_ascii_sig) + +class RpmSubdirIndexer(RpmIndexer): + def write_index(self): + bb.note("Generating package index for %s" %(self.deploy_dir)) + # Remove the existing repodata to ensure that we re-generate it no matter what + bb.utils.remove(os.path.join(self.deploy_dir, "repodata"), recurse=True) + + self.do_write_index(self.deploy_dir) + for entry in os.walk(self.deploy_dir): + if os.path.samefile(self.deploy_dir, entry[0]): + for dir in entry[1]: + if dir != 'repodata': + dir_path = oe.path.join(self.deploy_dir, dir) + bb.note("Generating package index for %s" %(dir_path)) + self.do_write_index(dir_path) + + +class PMPkgsList(PkgsList): + def list_pkgs(self): + return RpmPM(self.d, self.rootfs_dir, self.d.getVar('TARGET_VENDOR'), needfeed=False).list_installed() + +class RpmPM(PackageManager): + def __init__(self, + d, + target_rootfs, + target_vendor, + task_name='target', + arch_var=None, + os_var=None, + rpm_repo_workdir="oe-rootfs-repo", + filterbydependencies=True, + needfeed=True): + super(RpmPM, self).__init__(d, target_rootfs) + self.target_vendor = target_vendor + self.task_name = task_name + if arch_var == None: + self.archs = self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS').replace("-","_") + else: + self.archs = self.d.getVar(arch_var).replace("-","_") + if task_name == "host": + self.primary_arch = self.d.getVar('SDK_ARCH') + else: + self.primary_arch = self.d.getVar('MACHINE_ARCH') + + if needfeed: + self.rpm_repo_dir = oe.path.join(self.d.getVar('WORKDIR'), rpm_repo_workdir) + create_packages_dir(self.d, oe.path.join(self.rpm_repo_dir, "rpm"), d.getVar("DEPLOY_DIR_RPM"), "package_write_rpm", filterbydependencies) + + self.saved_packaging_data = self.d.expand('${T}/saved_packaging_data/%s' % self.task_name) + if not os.path.exists(self.d.expand('${T}/saved_packaging_data')): + bb.utils.mkdirhier(self.d.expand('${T}/saved_packaging_data')) + self.packaging_data_dirs = ['etc/rpm', 'etc/rpmrc', 'etc/dnf', 'var/lib/rpm', 'var/lib/dnf', 'var/cache/dnf'] + self.solution_manifest = self.d.expand('${T}/saved/%s_solution' % + self.task_name) + if not os.path.exists(self.d.expand('${T}/saved')): + bb.utils.mkdirhier(self.d.expand('${T}/saved')) + + def _configure_dnf(self): + # libsolv handles 'noarch' internally, we don't need to specify it explicitly + archs = [i for i in reversed(self.archs.split()) if i not in ["any", "all", "noarch"]] + # This prevents accidental matching against libsolv's built-in policies + if len(archs) <= 1: + archs = archs + ["bogusarch"] + # This architecture needs to be upfront so that packages using it are properly prioritized + archs = ["sdk_provides_dummy_target"] + archs + confdir = "%s/%s" %(self.target_rootfs, "etc/dnf/vars/") + bb.utils.mkdirhier(confdir) + open(confdir + "arch", 'w').write(":".join(archs)) + distro_codename = self.d.getVar('DISTRO_CODENAME') + open(confdir + "releasever", 'w').write(distro_codename if distro_codename is not None else '') + + open(oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"), 'w').write("") + + + def _configure_rpm(self): + # We need to configure rpm to use our primary package architecture as the installation architecture, + # and to make it compatible with other package architectures that we use. + # Otherwise it will refuse to proceed with packages installation. + platformconfdir = "%s/%s" %(self.target_rootfs, "etc/rpm/") + rpmrcconfdir = "%s/%s" %(self.target_rootfs, "etc/") + bb.utils.mkdirhier(platformconfdir) + open(platformconfdir + "platform", 'w').write("%s-pc-linux" % self.primary_arch) + with open(rpmrcconfdir + "rpmrc", 'w') as f: + f.write("arch_compat: %s: %s\n" % (self.primary_arch, self.archs if len(self.archs) > 0 else self.primary_arch)) + f.write("buildarch_compat: %s: noarch\n" % self.primary_arch) + + open(platformconfdir + "macros", 'w').write("%_transaction_color 7\n") + if self.d.getVar('RPM_PREFER_ELF_ARCH'): + open(platformconfdir + "macros", 'a').write("%%_prefer_color %s" % (self.d.getVar('RPM_PREFER_ELF_ARCH'))) + + if self.d.getVar('RPM_SIGN_PACKAGES') == '1': + signer = get_signer(self.d, self.d.getVar('RPM_GPG_BACKEND')) + pubkey_path = oe.path.join(self.d.getVar('B'), 'rpm-key') + signer.export_pubkey(pubkey_path, self.d.getVar('RPM_GPG_NAME')) + rpm_bin = bb.utils.which(os.getenv('PATH'), "rpmkeys") + cmd = [rpm_bin, '--root=%s' % self.target_rootfs, '--import', pubkey_path] + try: + subprocess.check_output(cmd, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + bb.fatal("Importing GPG key failed. Command '%s' " + "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8"))) + + def create_configs(self): + self._configure_dnf() + self._configure_rpm() + + def write_index(self): + lockfilename = self.d.getVar('DEPLOY_DIR_RPM') + "/rpm.lock" + lf = bb.utils.lockfile(lockfilename, False) + RpmIndexer(self.d, self.rpm_repo_dir).write_index() + bb.utils.unlockfile(lf) + + def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs): + from urllib.parse import urlparse + + if feed_uris == "": + return + + gpg_opts = '' + if self.d.getVar('PACKAGE_FEED_SIGN') == '1': + gpg_opts += 'repo_gpgcheck=1\n' + gpg_opts += 'gpgkey=file://%s/pki/packagefeed-gpg/PACKAGEFEED-GPG-KEY-%s-%s\n' % (self.d.getVar('sysconfdir'), self.d.getVar('DISTRO'), self.d.getVar('DISTRO_CODENAME')) + + if self.d.getVar('RPM_SIGN_PACKAGES') != '1': + gpg_opts += 'gpgcheck=0\n' + + bb.utils.mkdirhier(oe.path.join(self.target_rootfs, "etc", "yum.repos.d")) + remote_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split()) + for uri in remote_uris: + repo_base = "oe-remote-repo" + "-".join(urlparse(uri).path.split("/")) + if feed_archs is not None: + for arch in feed_archs.split(): + repo_uri = uri + "/" + arch + repo_id = "oe-remote-repo" + "-".join(urlparse(repo_uri).path.split("/")) + repo_name = "OE Remote Repo:" + " ".join(urlparse(repo_uri).path.split("/")) + open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'a').write( + "[%s]\nname=%s\nbaseurl=%s\n%s\n" % (repo_id, repo_name, repo_uri, gpg_opts)) + else: + repo_name = "OE Remote Repo:" + " ".join(urlparse(uri).path.split("/")) + repo_uri = uri + open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'w').write( + "[%s]\nname=%s\nbaseurl=%s\n%s" % (repo_base, repo_name, repo_uri, gpg_opts)) + + def _prepare_pkg_transaction(self): + os.environ['D'] = self.target_rootfs + os.environ['OFFLINE_ROOT'] = self.target_rootfs + os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs + os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs + os.environ['INTERCEPT_DIR'] = self.intercepts_dir + os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE') + + + def install(self, pkgs, attempt_only = False): + if len(pkgs) == 0: + return + self._prepare_pkg_transaction() + + bad_recommendations = self.d.getVar('BAD_RECOMMENDATIONS') + package_exclude = self.d.getVar('PACKAGE_EXCLUDE') + exclude_pkgs = (bad_recommendations.split() if bad_recommendations else []) + (package_exclude.split() if package_exclude else []) + + output = self._invoke_dnf((["--skip-broken"] if attempt_only else []) + + (["-x", ",".join(exclude_pkgs)] if len(exclude_pkgs) > 0 else []) + + (["--setopt=install_weak_deps=False"] if self.d.getVar('NO_RECOMMENDATIONS') == "1" else []) + + (["--nogpgcheck"] if self.d.getVar('RPM_SIGN_PACKAGES') != '1' else ["--setopt=gpgcheck=True"]) + + ["install"] + + pkgs) + + failed_scriptlets_pkgnames = collections.OrderedDict() + for line in output.splitlines(): + if line.startswith("Error: Systemctl"): + bb.error(line) + + if line.startswith("Error in POSTIN scriptlet in rpm package"): + failed_scriptlets_pkgnames[line.split()[-1]] = True + + if len(failed_scriptlets_pkgnames) > 0: + failed_postinsts_abort(list(failed_scriptlets_pkgnames.keys()), self.d.expand("${T}/log.do_${BB_CURRENTTASK}")) + + def remove(self, pkgs, with_dependencies = True): + if not pkgs: + return + + self._prepare_pkg_transaction() + + if with_dependencies: + self._invoke_dnf(["remove"] + pkgs) + else: + cmd = bb.utils.which(os.getenv('PATH'), "rpm") + args = ["-e", "-v", "--nodeps", "--root=%s" %self.target_rootfs] + + try: + bb.note("Running %s" % ' '.join([cmd] + args + pkgs)) + output = subprocess.check_output([cmd] + args + pkgs, stderr=subprocess.STDOUT).decode("utf-8") + bb.note(output) + except subprocess.CalledProcessError as e: + bb.fatal("Could not invoke rpm. Command " + "'%s' returned %d:\n%s" % (' '.join([cmd] + args + pkgs), e.returncode, e.output.decode("utf-8"))) + + def upgrade(self): + self._prepare_pkg_transaction() + self._invoke_dnf(["upgrade"]) + + def autoremove(self): + self._prepare_pkg_transaction() + self._invoke_dnf(["autoremove"]) + + def remove_packaging_data(self): + self._invoke_dnf(["clean", "all"]) + for dir in self.packaging_data_dirs: + bb.utils.remove(oe.path.join(self.target_rootfs, dir), True) + + def backup_packaging_data(self): + # Save the packaging dirs for increment rpm image generation + if os.path.exists(self.saved_packaging_data): + bb.utils.remove(self.saved_packaging_data, True) + for i in self.packaging_data_dirs: + source_dir = oe.path.join(self.target_rootfs, i) + target_dir = oe.path.join(self.saved_packaging_data, i) + if os.path.isdir(source_dir): + shutil.copytree(source_dir, target_dir, symlinks=True) + elif os.path.isfile(source_dir): + shutil.copy2(source_dir, target_dir) + + def recovery_packaging_data(self): + # Move the rpmlib back + if os.path.exists(self.saved_packaging_data): + for i in self.packaging_data_dirs: + target_dir = oe.path.join(self.target_rootfs, i) + if os.path.exists(target_dir): + bb.utils.remove(target_dir, True) + source_dir = oe.path.join(self.saved_packaging_data, i) + if os.path.isdir(source_dir): + shutil.copytree(source_dir, target_dir, symlinks=True) + elif os.path.isfile(source_dir): + shutil.copy2(source_dir, target_dir) + + def list_installed(self): + output = self._invoke_dnf(["repoquery", "--installed", "--queryformat", "Package: %{name} %{arch} %{version} %{name}-%{version}-%{release}.%{arch}.rpm\nDependencies:\n%{requires}\nRecommendations:\n%{recommends}\nDependenciesEndHere:\n"], + print_output = False) + packages = {} + current_package = None + current_deps = None + current_state = "initial" + for line in output.splitlines(): + if line.startswith("Package:"): + package_info = line.split(" ")[1:] + current_package = package_info[0] + package_arch = package_info[1] + package_version = package_info[2] + package_rpm = package_info[3] + packages[current_package] = {"arch":package_arch, "ver":package_version, "filename":package_rpm} + current_deps = [] + elif line.startswith("Dependencies:"): + current_state = "dependencies" + elif line.startswith("Recommendations"): + current_state = "recommendations" + elif line.startswith("DependenciesEndHere:"): + current_state = "initial" + packages[current_package]["deps"] = current_deps + elif len(line) > 0: + if current_state == "dependencies": + current_deps.append(line) + elif current_state == "recommendations": + current_deps.append("%s [REC]" % line) + + return packages + + def update(self): + self._invoke_dnf(["makecache", "--refresh"]) + + def _invoke_dnf(self, dnf_args, fatal = True, print_output = True ): + os.environ['RPM_ETCCONFIGDIR'] = self.target_rootfs + + dnf_cmd = bb.utils.which(os.getenv('PATH'), "dnf") + standard_dnf_args = ["-v", "--rpmverbosity=info", "-y", + "-c", oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"), + "--setopt=reposdir=%s" %(oe.path.join(self.target_rootfs, "etc/yum.repos.d")), + "--installroot=%s" % (self.target_rootfs), + "--setopt=logdir=%s" % (self.d.getVar('T')) + ] + if hasattr(self, "rpm_repo_dir"): + standard_dnf_args.append("--repofrompath=oe-repo,%s" % (self.rpm_repo_dir)) + cmd = [dnf_cmd] + standard_dnf_args + dnf_args + bb.note('Running %s' % ' '.join(cmd)) + try: + output = subprocess.check_output(cmd,stderr=subprocess.STDOUT).decode("utf-8") + if print_output: + bb.debug(1, output) + return output + except subprocess.CalledProcessError as e: + if print_output: + (bb.note, bb.fatal)[fatal]("Could not invoke dnf. Command " + "'%s' returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8"))) + else: + (bb.note, bb.fatal)[fatal]("Could not invoke dnf. Command " + "'%s' returned %d:" % (' '.join(cmd), e.returncode)) + return e.output.decode("utf-8") + + def dump_install_solution(self, pkgs): + open(self.solution_manifest, 'w').write(" ".join(pkgs)) + return pkgs + + def load_old_install_solution(self): + if not os.path.exists(self.solution_manifest): + return [] + with open(self.solution_manifest, 'r') as fd: + return fd.read().split() + + def _script_num_prefix(self, path): + files = os.listdir(path) + numbers = set() + numbers.add(99) + for f in files: + numbers.add(int(f.split("-")[0])) + return max(numbers) + 1 + + def save_rpmpostinst(self, pkg): + bb.note("Saving postinstall script of %s" % (pkg)) + cmd = bb.utils.which(os.getenv('PATH'), "rpm") + args = ["-q", "--root=%s" % self.target_rootfs, "--queryformat", "%{postin}", pkg] + + try: + output = subprocess.check_output([cmd] + args,stderr=subprocess.STDOUT).decode("utf-8") + except subprocess.CalledProcessError as e: + bb.fatal("Could not invoke rpm. Command " + "'%s' returned %d:\n%s" % (' '.join([cmd] + args), e.returncode, e.output.decode("utf-8"))) + + # may need to prepend #!/bin/sh to output + + target_path = oe.path.join(self.target_rootfs, self.d.expand('${sysconfdir}/rpm-postinsts/')) + bb.utils.mkdirhier(target_path) + num = self._script_num_prefix(target_path) + saved_script_name = oe.path.join(target_path, "%d-%s" % (num, pkg)) + open(saved_script_name, 'w').write(output) + os.chmod(saved_script_name, 0o755) + + def _handle_intercept_failure(self, registered_pkgs): + rpm_postinsts_dir = self.target_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/') + bb.utils.mkdirhier(rpm_postinsts_dir) + + # Save the package postinstalls in /etc/rpm-postinsts + for pkg in registered_pkgs.split(): + self.save_rpmpostinst(pkg) + + def extract(self, pkg): + output = self._invoke_dnf(["repoquery", "--queryformat", "%{location}", pkg]) + pkg_name = output.splitlines()[-1] + if not pkg_name.endswith(".rpm"): + bb.fatal("dnf could not find package %s in repository: %s" %(pkg, output)) + pkg_path = oe.path.join(self.rpm_repo_dir, pkg_name) + + cpio_cmd = bb.utils.which(os.getenv("PATH"), "cpio") + rpm2cpio_cmd = bb.utils.which(os.getenv("PATH"), "rpm2cpio") + + if not os.path.isfile(pkg_path): + bb.fatal("Unable to extract package for '%s'." + "File %s doesn't exists" % (pkg, pkg_path)) + + tmp_dir = tempfile.mkdtemp() + current_dir = os.getcwd() + os.chdir(tmp_dir) + + try: + cmd = "%s %s | %s -idmv" % (rpm2cpio_cmd, pkg_path, cpio_cmd) + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) + except subprocess.CalledProcessError as e: + bb.utils.remove(tmp_dir, recurse=True) + bb.fatal("Unable to extract %s package. Command '%s' " + "returned %d:\n%s" % (pkg_path, cmd, e.returncode, e.output.decode("utf-8"))) + except OSError as e: + bb.utils.remove(tmp_dir, recurse=True) + bb.fatal("Unable to extract %s package. Command '%s' " + "returned %d:\n%s at %s" % (pkg_path, cmd, e.errno, e.strerror, e.filename)) + + bb.note("Extracted %s to %s" % (pkg_path, tmp_dir)) + os.chdir(current_dir) + + return tmp_dir diff --git a/meta/lib/oe/package_manager/rpm/manifest.py b/meta/lib/oe/package_manager/rpm/manifest.py new file mode 100644 index 0000000000..e6604b301f --- /dev/null +++ b/meta/lib/oe/package_manager/rpm/manifest.py @@ -0,0 +1,54 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + +from oe.manifest import Manifest + +class PkgManifest(Manifest): + """ + Returns a dictionary object with mip and mlp packages. + """ + def _split_multilib(self, pkg_list): + pkgs = dict() + + for pkg in pkg_list.split(): + pkg_type = self.PKG_TYPE_MUST_INSTALL + + ml_variants = self.d.getVar('MULTILIB_VARIANTS').split() + + for ml_variant in ml_variants: + if pkg.startswith(ml_variant + '-'): + pkg_type = self.PKG_TYPE_MULTILIB + + if not pkg_type in pkgs: + pkgs[pkg_type] = pkg + else: + pkgs[pkg_type] += " " + pkg + + return pkgs + + def create_initial(self): + pkgs = dict() + + with open(self.initial_manifest, "w+") as manifest: + manifest.write(self.initial_manifest_file_header) + + for var in self.var_maps[self.manifest_type]: + if var in self.vars_to_split: + split_pkgs = self._split_multilib(self.d.getVar(var)) + if split_pkgs is not None: + pkgs = dict(list(pkgs.items()) + list(split_pkgs.items())) + else: + pkg_list = self.d.getVar(var) + if pkg_list is not None: + pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var) + + for pkg_type in pkgs: + for pkg in pkgs[pkg_type].split(): + manifest.write("%s,%s\n" % (pkg_type, pkg)) + + def create_final(self): + pass + + def create_full(self, pm): + pass diff --git a/meta/lib/oe/package_manager/rpm/rootfs.py b/meta/lib/oe/package_manager/rpm/rootfs.py new file mode 100644 index 0000000000..00d07cd9cc --- /dev/null +++ b/meta/lib/oe/package_manager/rpm/rootfs.py @@ -0,0 +1,148 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + +from oe.rootfs import Rootfs +from oe.manifest import Manifest +from oe.utils import execute_pre_post_process +from oe.package_manager.rpm.manifest import PkgManifest +from oe.package_manager.rpm import RpmPM + +class PkgRootfs(Rootfs): + def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None): + super(PkgRootfs, self).__init__(d, progress_reporter, logcatcher) + self.log_check_regex = r'(unpacking of archive failed|Cannot find package'\ + r'|exit 1|ERROR: |Error: |Error |ERROR '\ + r'|Failed |Failed: |Failed$|Failed\(\d+\):)' + + self.manifest = PkgManifest(d, manifest_dir) + + self.pm = RpmPM(d, + d.getVar('IMAGE_ROOTFS'), + self.d.getVar('TARGET_VENDOR') + ) + + self.inc_rpm_image_gen = self.d.getVar('INC_RPM_IMAGE_GEN') + if self.inc_rpm_image_gen != "1": + bb.utils.remove(self.image_rootfs, True) + else: + self.pm.recovery_packaging_data() + bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True) + + self.pm.create_configs() + + ''' + While rpm incremental image generation is enabled, it will remove the + unneeded pkgs by comparing the new install solution manifest and the + old installed manifest. + ''' + def _create_incremental(self, pkgs_initial_install): + if self.inc_rpm_image_gen == "1": + + pkgs_to_install = list() + for pkg_type in pkgs_initial_install: + pkgs_to_install += pkgs_initial_install[pkg_type] + + installed_manifest = self.pm.load_old_install_solution() + solution_manifest = self.pm.dump_install_solution(pkgs_to_install) + + pkg_to_remove = list() + for pkg in installed_manifest: + if pkg not in solution_manifest: + pkg_to_remove.append(pkg) + + self.pm.update() + + bb.note('incremental update -- upgrade packages in place ') + self.pm.upgrade() + if pkg_to_remove != []: + bb.note('incremental removed: %s' % ' '.join(pkg_to_remove)) + self.pm.remove(pkg_to_remove) + + self.pm.autoremove() + + def _create(self): + pkgs_to_install = self.manifest.parse_initial_manifest() + rpm_pre_process_cmds = self.d.getVar('RPM_PREPROCESS_COMMANDS') + rpm_post_process_cmds = self.d.getVar('RPM_POSTPROCESS_COMMANDS') + + # update PM index files + self.pm.write_index() + + execute_pre_post_process(self.d, rpm_pre_process_cmds) + + if self.progress_reporter: + self.progress_reporter.next_stage() + + if self.inc_rpm_image_gen == "1": + self._create_incremental(pkgs_to_install) + + if self.progress_reporter: + self.progress_reporter.next_stage() + + self.pm.update() + + pkgs = [] + pkgs_attempt = [] + for pkg_type in pkgs_to_install: + if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY: + pkgs_attempt += pkgs_to_install[pkg_type] + else: + pkgs += pkgs_to_install[pkg_type] + + if self.progress_reporter: + self.progress_reporter.next_stage() + + self.pm.install(pkgs) + + if self.progress_reporter: + self.progress_reporter.next_stage() + + self.pm.install(pkgs_attempt, True) + + if self.progress_reporter: + self.progress_reporter.next_stage() + + self.pm.install_complementary() + + if self.progress_reporter: + self.progress_reporter.next_stage() + + self._setup_dbg_rootfs(['/etc', '/var/lib/rpm', '/var/cache/dnf', '/var/lib/dnf']) + + execute_pre_post_process(self.d, rpm_post_process_cmds) + + if self.inc_rpm_image_gen == "1": + self.pm.backup_packaging_data() + + if self.progress_reporter: + self.progress_reporter.next_stage() + + + @staticmethod + def _depends_list(): + return ['DEPLOY_DIR_RPM', 'INC_RPM_IMAGE_GEN', 'RPM_PREPROCESS_COMMANDS', + 'RPM_POSTPROCESS_COMMANDS', 'RPM_PREFER_ELF_ARCH'] + + def _get_delayed_postinsts(self): + postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/rpm-postinsts") + if os.path.isdir(postinst_dir): + files = os.listdir(postinst_dir) + for f in files: + bb.note('Delayed package scriptlet: %s' % f) + return files + + return None + + def _save_postinsts(self): + # this is just a stub. For RPM, the failed postinstalls are + # already saved in /etc/rpm-postinsts + pass + + def _log_check(self): + self._log_check_warn() + self._log_check_error() + + def _cleanup(self): + if bb.utils.contains("IMAGE_FEATURES", "package-management", True, False, self.d): + self.pm._invoke_dnf(["clean", "all"]) diff --git a/meta/lib/oe/package_manager/rpm/sdk.py b/meta/lib/oe/package_manager/rpm/sdk.py new file mode 100644 index 0000000000..c5f232431f --- /dev/null +++ b/meta/lib/oe/package_manager/rpm/sdk.py @@ -0,0 +1,114 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + +import glob +from oe.utils import execute_pre_post_process +from oe.sdk import Sdk +from oe.manifest import Manifest +from oe.package_manager.rpm.manifest import PkgManifest +from oe.package_manager.rpm import RpmPM + +class PkgSdk(Sdk): + def __init__(self, d, manifest_dir=None, rpm_workdir="oe-sdk-repo"): + super(PkgSdk, self).__init__(d, manifest_dir) + + self.target_manifest = PkgManifest(d, self.manifest_dir, + Manifest.MANIFEST_TYPE_SDK_TARGET) + self.host_manifest = PkgManifest(d, self.manifest_dir, + Manifest.MANIFEST_TYPE_SDK_HOST) + + rpm_repo_workdir = "oe-sdk-repo" + if "sdk_ext" in d.getVar("BB_RUNTASK"): + rpm_repo_workdir = "oe-sdk-ext-repo" + + self.target_pm = RpmPM(d, + self.sdk_target_sysroot, + self.d.getVar('TARGET_VENDOR'), + 'target', + rpm_repo_workdir=rpm_repo_workdir + ) + + self.host_pm = RpmPM(d, + self.sdk_host_sysroot, + self.d.getVar('SDK_VENDOR'), + 'host', + "SDK_PACKAGE_ARCHS", + "SDK_OS", + rpm_repo_workdir=rpm_repo_workdir + ) + + def _populate_sysroot(self, pm, manifest): + pkgs_to_install = manifest.parse_initial_manifest() + + pm.create_configs() + pm.write_index() + pm.update() + + pkgs = [] + pkgs_attempt = [] + for pkg_type in pkgs_to_install: + if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY: + pkgs_attempt += pkgs_to_install[pkg_type] + else: + pkgs += pkgs_to_install[pkg_type] + + pm.install(pkgs) + + pm.install(pkgs_attempt, True) + + def _populate(self): + execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND")) + + bb.note("Installing TARGET packages") + self._populate_sysroot(self.target_pm, self.target_manifest) + + self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY')) + + self.target_pm.run_intercepts(populate_sdk='target') + + execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND")) + + if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d): + self.target_pm.remove_packaging_data() + + bb.note("Installing NATIVESDK packages") + self._populate_sysroot(self.host_pm, self.host_manifest) + self.install_locales(self.host_pm) + + self.host_pm.run_intercepts(populate_sdk='host') + + execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND")) + + if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d): + self.host_pm.remove_packaging_data() + + # Move host RPM library data + native_rpm_state_dir = os.path.join(self.sdk_output, + self.sdk_native_path, + self.d.getVar('localstatedir_nativesdk').strip('/'), + "lib", + "rpm" + ) + self.mkdirhier(native_rpm_state_dir) + for f in glob.glob(os.path.join(self.sdk_output, + "var", + "lib", + "rpm", + "*")): + self.movefile(f, native_rpm_state_dir) + + self.remove(os.path.join(self.sdk_output, "var"), True) + + # Move host sysconfig data + native_sysconf_dir = os.path.join(self.sdk_output, + self.sdk_native_path, + self.d.getVar('sysconfdir', + True).strip('/'), + ) + self.mkdirhier(native_sysconf_dir) + for f in glob.glob(os.path.join(self.sdk_output, "etc", "rpm*")): + self.movefile(f, native_sysconf_dir) + for f in glob.glob(os.path.join(self.sdk_output, "etc", "dnf", "*")): + self.movefile(f, native_sysconf_dir) + self.remove(os.path.join(self.sdk_output, "etc"), True) diff --git a/meta/lib/oe/packagedata.py b/meta/lib/oe/packagedata.py index 32e5c82a94..212f048bc6 100644 --- a/meta/lib/oe/packagedata.py +++ b/meta/lib/oe/packagedata.py @@ -1,3 +1,7 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + import codecs import os @@ -13,10 +17,9 @@ def read_pkgdatafile(fn): if os.access(fn, os.R_OK): import re - f = open(fn, 'r') - lines = f.readlines() - f.close() - r = re.compile("([^:]+):\s*(.*)") + with open(fn, 'r') as f: + lines = f.readlines() + r = re.compile(r"(^.+?):\s+(.*)") for l in lines: m = r.match(l) if m: @@ -42,18 +45,30 @@ def read_pkgdata(pn, d): return read_pkgdatafile(fn) # -# Collapse FOO_pkg variables into FOO +# Collapse FOO:pkg variables into FOO # def read_subpkgdata_dict(pkg, d): ret = {} subd = read_pkgdatafile(get_subpkgedata_fn(pkg, d)) for var in subd: - newvar = var.replace("_" + pkg, "") - if newvar == var and var + "_" + pkg in subd: + newvar = var.replace(":" + pkg, "") + if newvar == var and var + ":" + pkg in subd: continue ret[newvar] = subd[var] return ret +def read_subpkgdata_extended(pkg, d): + import json + import bb.compress.zstd + + fn = d.expand("${PKGDATA_DIR}/extended/%s.json.zstd" % pkg) + try: + num_threads = int(d.getVar("BB_NUMBER_THREADS")) + with bb.compress.zstd.open(fn, "rt", encoding="utf-8", num_threads=num_threads) as f: + return json.load(f) + except FileNotFoundError: + return None + def _pkgmap(d): """Return a dictionary mapping package to recipe name.""" diff --git a/meta/lib/oe/packagegroup.py b/meta/lib/oe/packagegroup.py index 4bc5d3e4bb..8fcaecde82 100644 --- a/meta/lib/oe/packagegroup.py +++ b/meta/lib/oe/packagegroup.py @@ -1,17 +1,15 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + import itertools def is_optional(feature, d): - packages = d.getVar("FEATURE_PACKAGES_%s" % feature) - if packages: - return bool(d.getVarFlag("FEATURE_PACKAGES_%s" % feature, "optional")) - else: - return bool(d.getVarFlag("PACKAGE_GROUP_%s" % feature, "optional")) + return bool(d.getVarFlag("FEATURE_PACKAGES_%s" % feature, "optional")) def packages(features, d): for feature in features: packages = d.getVar("FEATURE_PACKAGES_%s" % feature) - if not packages: - packages = d.getVar("PACKAGE_GROUP_%s" % feature) for pkg in (packages or "").split(): yield pkg diff --git a/meta/lib/oe/patch.py b/meta/lib/oe/patch.py index 7dd31d9d46..9034fcae03 100644 --- a/meta/lib/oe/patch.py +++ b/meta/lib/oe/patch.py @@ -1,5 +1,10 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + import oe.path import oe.types +import subprocess class NotFoundError(bb.BBHandledException): def __init__(self, path): @@ -21,7 +26,6 @@ class CmdError(bb.BBHandledException): def runcmd(args, dir = None): import pipes - import subprocess if dir: olddir = os.path.abspath(os.curdir) @@ -34,31 +38,25 @@ def runcmd(args, dir = None): args = [ pipes.quote(str(arg)) for arg in args ] cmd = " ".join(args) # print("cmd: %s" % cmd) - (exitstatus, output) = subprocess.getstatusoutput(cmd) + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) + stdout, stderr = proc.communicate() + stdout = stdout.decode('utf-8') + stderr = stderr.decode('utf-8') + exitstatus = proc.returncode if exitstatus != 0: - raise CmdError(cmd, exitstatus >> 8, output) - if " fuzz " in output: - bb.warn(""" -Some of the context lines in patches were ignored. This can lead to incorrectly applied patches. -The context lines in the patches can be updated with devtool: - - devtool modify <recipe> - devtool finish --force-patch-refresh <recipe> <layer_path> - -Then the updated patches and the source tree (in devtool's workspace) -should be reviewed to make sure the patches apply in the correct place -and don't introduce duplicate lines (which can, and does happen -when some of the context is ignored). Further information: -http://lists.openembedded.org/pipermail/openembedded-core/2018-March/148675.html -https://bugzilla.yoctoproject.org/show_bug.cgi?id=10450 -Details: -{}""".format(output)) - return output + raise CmdError(cmd, exitstatus >> 8, "stdout: %s\nstderr: %s" % (stdout, stderr)) + if " fuzz " in stdout and "Hunk " in stdout: + # Drop patch fuzz info with header and footer to log file so + # insane.bbclass can handle to throw error/warning + bb.note("--- Patch fuzz start ---\n%s\n--- Patch fuzz end ---" % format(stdout)) + + return stdout finally: if dir: os.chdir(olddir) + class PatchError(Exception): def __init__(self, msg): self.msg = msg @@ -301,6 +299,24 @@ class GitApplyTree(PatchTree): PatchTree.__init__(self, dir, d) self.commituser = d.getVar('PATCH_GIT_USER_NAME') self.commitemail = d.getVar('PATCH_GIT_USER_EMAIL') + if not self._isInitialized(): + self._initRepo() + + def _isInitialized(self): + cmd = "git rev-parse --show-toplevel" + try: + output = runcmd(cmd.split(), self.dir).strip() + except CmdError as err: + ## runcmd returned non-zero which most likely means 128 + ## Not a git directory + return False + ## Make sure repo is in builddir to not break top-level git repos + return os.path.samefile(output, self.dir) + + def _initRepo(self): + runcmd("git init".split(), self.dir) + runcmd("git add .".split(), self.dir) + runcmd("git commit -a --allow-empty -m bitbake_patching_started".split(), self.dir) @staticmethod def extractPatchHeader(patchfile): @@ -423,7 +439,7 @@ class GitApplyTree(PatchTree): date = newdate if not subject: subject = newsubject - if subject and outlines and not outlines[0].strip() == subject: + if subject and not (outlines and outlines[0].strip() == subject): outlines.insert(0, '%s\n\n' % subject.strip()) # Write out commit message to a file @@ -446,7 +462,6 @@ class GitApplyTree(PatchTree): def extractPatches(tree, startcommit, outdir, paths=None): import tempfile import shutil - import re tempdir = tempfile.mkdtemp(prefix='oepatch') try: shellcmd = ["git", "format-patch", "--no-signature", "--no-numbered", startcommit, "-o", tempdir] @@ -462,13 +477,10 @@ class GitApplyTree(PatchTree): try: with open(srcfile, 'r', encoding=encoding) as f: for line in f: - checkline = line - if checkline.startswith('Subject: '): - checkline = re.sub(r'\[.+?\]\s*', '', checkline[9:]) - if checkline.startswith(GitApplyTree.patch_line_prefix): + if line.startswith(GitApplyTree.patch_line_prefix): outfile = line.split()[-1].strip() continue - if checkline.startswith(GitApplyTree.ignore_commit_prefix): + if line.startswith(GitApplyTree.ignore_commit_prefix): continue patchlines.append(line) except UnicodeDecodeError: @@ -515,8 +527,7 @@ class GitApplyTree(PatchTree): with open(commithook, 'w') as f: # NOTE: the formatting here is significant; if you change it you'll also need to # change other places which read it back - f.write('echo >> $1\n') - f.write('echo "%s: $PATCHFILE" >> $1\n' % GitApplyTree.patch_line_prefix) + f.write('echo "\n%s: $PATCHFILE" >> $1' % GitApplyTree.patch_line_prefix) os.chmod(commithook, 0o755) shutil.copy2(commithook, applyhook) try: @@ -524,7 +535,7 @@ class GitApplyTree(PatchTree): try: shellcmd = [patchfilevar, "git", "--work-tree=%s" % reporoot] self.gitCommandUserOptions(shellcmd, self.commituser, self.commitemail) - shellcmd += ["am", "-3", "--keep-cr", "-p%s" % patch['strippath']] + shellcmd += ["am", "-3", "--keep-cr", "--no-scissors", "-p%s" % patch['strippath']] return _applypatchhelper(shellcmd, patch, force, reverse, run) except CmdError: # Need to abort the git am, or we'll still be within it at the end @@ -858,6 +869,7 @@ def src_patches(d, all=False, expand=True): def should_apply(parm, d): + import bb.utils if "mindate" in parm or "maxdate" in parm: pn = d.getVar('PN') srcdate = d.getVar('SRCDATE_%s' % pn) @@ -894,5 +906,15 @@ def should_apply(parm, d): if srcrev and parm["notrev"] in srcrev: return False, "doesn't apply to revision" + if "maxver" in parm: + pv = d.getVar('PV') + if bb.utils.vercmp_string_op(pv, parm["maxver"], ">"): + return False, "applies to earlier version" + + if "minver" in parm: + pv = d.getVar('PV') + if bb.utils.vercmp_string_op(pv, parm["minver"], "<"): + return False, "applies to later version" + return True, None diff --git a/meta/lib/oe/path.py b/meta/lib/oe/path.py index 1e24d0586b..c8d8ad05b9 100644 --- a/meta/lib/oe/path.py +++ b/meta/lib/oe/path.py @@ -1,3 +1,7 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + import errno import glob import shutil @@ -90,12 +94,27 @@ def copytree(src, dst): subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) def copyhardlinktree(src, dst): - """ Make the hard link when possible, otherwise copy. """ + """Make a tree of hard links when possible, otherwise copy.""" bb.utils.mkdirhier(dst) if os.path.isdir(src) and not len(os.listdir(src)): return - if (os.stat(src).st_dev == os.stat(dst).st_dev): + canhard = False + testfile = None + for root, dirs, files in os.walk(src): + if len(files): + testfile = os.path.join(root, files[0]) + break + + if testfile is not None: + try: + os.link(testfile, os.path.join(dst, 'testfile')) + os.unlink(os.path.join(dst, 'testfile')) + canhard = True + except Exception as e: + bb.debug(2, "Hardlink test failed with " + str(e)) + + if (canhard): # Need to copy directories only with tar first since cp will error if two # writers try and create a directory at the same time cmd = "cd %s; find . -type d -print | tar --xattrs --xattrs-include='*' -cf - -S -C %s -p --no-recursion --files-from - | tar --xattrs --xattrs-include='*' -xhf - -C %s" % (src, src, dst) @@ -114,6 +133,14 @@ def copyhardlinktree(src, dst): else: copytree(src, dst) +def copyhardlink(src, dst): + """Make a hard link when possible, otherwise copy.""" + + try: + os.link(src, dst) + except OSError: + shutil.copy(src, dst) + def remove(path, recurse=True): """ Equivalent to rm -f or rm -rf @@ -293,3 +320,24 @@ def which_wild(pathname, path=None, mode=os.F_OK, *, reverse=False, candidates=F return files +def canonicalize(paths, sep=','): + """Given a string with paths (separated by commas by default), expand + each path using os.path.realpath() and return the resulting paths as a + string (separated using the same separator a the original string). + """ + # Ignore paths containing "$" as they are assumed to be unexpanded bitbake + # variables. Normally they would be ignored, e.g., when passing the paths + # through the shell they would expand to empty strings. However, when they + # are passed through os.path.realpath(), it will cause them to be prefixed + # with the absolute path to the current directory and thus not be empty + # anymore. + # + # Also maintain trailing slashes, as the paths may actually be used as + # prefixes in sting compares later on, where the slashes then are important. + canonical_paths = [] + for path in (paths or '').split(sep): + if '$' not in path: + trailing_slash = path.endswith('/') and '/' or '' + canonical_paths.append(os.path.realpath(path) + trailing_slash) + + return sep.join(canonical_paths) diff --git a/meta/lib/oe/prservice.py b/meta/lib/oe/prservice.py index 32dfc15e88..339f7aebca 100644 --- a/meta/lib/oe/prservice.py +++ b/meta/lib/oe/prservice.py @@ -1,14 +1,16 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# def prserv_make_conn(d, check = False): import prserv.serv host_params = list([_f for _f in (d.getVar("PRSERV_HOST") or '').split(':') if _f]) try: conn = None - conn = prserv.serv.PRServerConnection(host_params[0], int(host_params[1])) + conn = prserv.serv.connect(host_params[0], int(host_params[1])) if check: if not conn.ping(): raise Exception('service not available') - d.setVar("__PRSERV_CONN",conn) except Exception as exc: bb.fatal("Connecting to PR service %s:%s failed: %s" % (host_params[0], host_params[1], str(exc))) @@ -19,31 +21,29 @@ def prserv_dump_db(d): bb.error("Not using network based PR service") return None - conn = d.getVar("__PRSERV_CONN") + conn = prserv_make_conn(d) if conn is None: - conn = prserv_make_conn(d) - if conn is None: - bb.error("Making connection failed to remote PR service") - return None + bb.error("Making connection failed to remote PR service") + return None #dump db opt_version = d.getVar('PRSERV_DUMPOPT_VERSION') opt_pkgarch = d.getVar('PRSERV_DUMPOPT_PKGARCH') opt_checksum = d.getVar('PRSERV_DUMPOPT_CHECKSUM') opt_col = ("1" == d.getVar('PRSERV_DUMPOPT_COL')) - return conn.export(opt_version, opt_pkgarch, opt_checksum, opt_col) + d = conn.export(opt_version, opt_pkgarch, opt_checksum, opt_col) + conn.close() + return d def prserv_import_db(d, filter_version=None, filter_pkgarch=None, filter_checksum=None): if not d.getVar('PRSERV_HOST'): bb.error("Not using network based PR service") return None - conn = d.getVar("__PRSERV_CONN") + conn = prserv_make_conn(d) if conn is None: - conn = prserv_make_conn(d) - if conn is None: - bb.error("Making connection failed to remote PR service") - return None + bb.error("Making connection failed to remote PR service") + return None #get the entry values imported = [] prefix = "PRAUTO$" @@ -67,6 +67,7 @@ def prserv_import_db(d, filter_version=None, filter_pkgarch=None, filter_checksu bb.error("importing(%s,%s,%s,%d) failed. DB may have larger value %d" % (version,pkgarch,checksum,value,ret)) else: imported.append((version,pkgarch,checksum,value)) + conn.close() return imported def prserv_export_tofile(d, metainfo, datainfo, lockdown, nomax=False): @@ -76,41 +77,40 @@ def prserv_export_tofile(d, metainfo, datainfo, lockdown, nomax=False): df = d.getVar('PRSERV_DUMPFILE') #write data lf = bb.utils.lockfile("%s.lock" % df) - f = open(df, "a") - if metainfo: - #dump column info - f.write("#PR_core_ver = \"%s\"\n\n" % metainfo['core_ver']); - f.write("#Table: %s\n" % metainfo['tbl_name']) - f.write("#Columns:\n") - f.write("#name \t type \t notn \t dflt \t pk\n") - f.write("#----------\t --------\t --------\t --------\t ----\n") - for i in range(len(metainfo['col_info'])): - f.write("#%10s\t %8s\t %8s\t %8s\t %4s\n" % - (metainfo['col_info'][i]['name'], - metainfo['col_info'][i]['type'], - metainfo['col_info'][i]['notnull'], - metainfo['col_info'][i]['dflt_value'], - metainfo['col_info'][i]['pk'])) - f.write("\n") + with open(df, "a") as f: + if metainfo: + #dump column info + f.write("#PR_core_ver = \"%s\"\n\n" % metainfo['core_ver']); + f.write("#Table: %s\n" % metainfo['tbl_name']) + f.write("#Columns:\n") + f.write("#name \t type \t notn \t dflt \t pk\n") + f.write("#----------\t --------\t --------\t --------\t ----\n") + for i in range(len(metainfo['col_info'])): + f.write("#%10s\t %8s\t %8s\t %8s\t %4s\n" % + (metainfo['col_info'][i]['name'], + metainfo['col_info'][i]['type'], + metainfo['col_info'][i]['notnull'], + metainfo['col_info'][i]['dflt_value'], + metainfo['col_info'][i]['pk'])) + f.write("\n") - if lockdown: - f.write("PRSERV_LOCKDOWN = \"1\"\n\n") + if lockdown: + f.write("PRSERV_LOCKDOWN = \"1\"\n\n") - if datainfo: - idx = {} - for i in range(len(datainfo)): - pkgarch = datainfo[i]['pkgarch'] - value = datainfo[i]['value'] - if pkgarch not in idx: - idx[pkgarch] = i - elif value > datainfo[idx[pkgarch]]['value']: - idx[pkgarch] = i - f.write("PRAUTO$%s$%s$%s = \"%s\"\n" % - (str(datainfo[i]['version']), pkgarch, str(datainfo[i]['checksum']), str(value))) - if not nomax: - for i in idx: - f.write("PRAUTO_%s_%s = \"%s\"\n" % (str(datainfo[idx[i]]['version']),str(datainfo[idx[i]]['pkgarch']),str(datainfo[idx[i]]['value']))) - f.close() + if datainfo: + idx = {} + for i in range(len(datainfo)): + pkgarch = datainfo[i]['pkgarch'] + value = datainfo[i]['value'] + if pkgarch not in idx: + idx[pkgarch] = i + elif value > datainfo[idx[pkgarch]]['value']: + idx[pkgarch] = i + f.write("PRAUTO$%s$%s$%s = \"%s\"\n" % + (str(datainfo[i]['version']), pkgarch, str(datainfo[i]['checksum']), str(value))) + if not nomax: + for i in idx: + f.write("PRAUTO_%s_%s = \"%s\"\n" % (str(datainfo[idx[i]]['version']),str(datainfo[idx[i]]['pkgarch']),str(datainfo[idx[i]]['value']))) bb.utils.unlockfile(lf) def prserv_check_avail(d): @@ -123,4 +123,5 @@ def prserv_check_avail(d): except TypeError: bb.fatal('Undefined/incorrect PRSERV_HOST value. Format: "host:port"') else: - prserv_make_conn(d, True) + conn = prserv_make_conn(d, True) + conn.close() diff --git a/meta/lib/oe/qa.py b/meta/lib/oe/qa.py index 59c72ce580..89acd3ead0 100644 --- a/meta/lib/oe/qa.py +++ b/meta/lib/oe/qa.py @@ -1,3 +1,7 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + import os, struct, mmap class NotELFFileError(Exception): @@ -37,13 +41,18 @@ class ELFFile: def __init__(self, name): self.name = name self.objdump_output = {} + self.data = None # Context Manager functions to close the mmap explicitly def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): - self.data.close() + self.close() + + def close(self): + if self.data: + self.data.close() def open(self): with open(self.name, "rb") as f: @@ -122,6 +131,9 @@ class ELFFile: """ return self.getShort(ELFFile.E_MACHINE) + def set_objdump(self, cmd, output): + self.objdump_output[cmd] = output + def run_objdump(self, cmd, d): import bb.process import sys @@ -150,6 +162,7 @@ def elf_machine_to_string(machine): """ try: return { + 0x00: "Unset", 0x02: "SPARC", 0x03: "x86", 0x08: "MIPS", @@ -164,6 +177,40 @@ def elf_machine_to_string(machine): except: return "Unknown (%s)" % repr(machine) +def write_error(type, error, d): + logfile = d.getVar('QA_LOGFILE') + if logfile: + p = d.getVar('P') + with open(logfile, "a+") as f: + f.write("%s: %s [%s]\n" % (p, error, type)) + +def handle_error(error_class, error_msg, d): + if error_class in (d.getVar("ERROR_QA") or "").split(): + write_error(error_class, error_msg, d) + bb.error("QA Issue: %s [%s]" % (error_msg, error_class)) + d.setVar("QA_ERRORS_FOUND", "True") + return False + elif error_class in (d.getVar("WARN_QA") or "").split(): + write_error(error_class, error_msg, d) + bb.warn("QA Issue: %s [%s]" % (error_msg, error_class)) + else: + bb.note("QA Issue: %s [%s]" % (error_msg, error_class)) + return True + +def add_message(messages, section, new_msg): + if section not in messages: + messages[section] = new_msg + else: + messages[section] = messages[section] + "\n" + new_msg + +def exit_with_message_if_errors(message, d): + qa_fatal_errors = bb.utils.to_boolean(d.getVar("QA_ERRORS_FOUND"), False) + if qa_fatal_errors: + bb.fatal(message) + +def exit_if_errors(d): + exit_with_message_if_errors("Fatal QA errors were found, failing task.", d) + if __name__ == "__main__": import sys diff --git a/meta/lib/oe/recipeutils.py b/meta/lib/oe/recipeutils.py index 92c0f65257..872ff97b89 100644 --- a/meta/lib/oe/recipeutils.py +++ b/meta/lib/oe/recipeutils.py @@ -4,6 +4,8 @@ # # Copyright (C) 2013-2017 Intel Corporation # +# SPDX-License-Identifier: GPL-2.0-only +# import sys import os @@ -22,9 +24,9 @@ from collections import OrderedDict, defaultdict from bb.utils import vercmp_string # Help us to find places to insert values -recipe_progression = ['SUMMARY', 'DESCRIPTION', 'HOMEPAGE', 'BUGTRACKER', 'SECTION', 'LICENSE', 'LICENSE_FLAGS', 'LIC_FILES_CHKSUM', 'PROVIDES', 'DEPENDS', 'PR', 'PV', 'SRCREV', 'SRCPV', 'SRC_URI', 'S', 'do_fetch()', 'do_unpack()', 'do_patch()', 'EXTRA_OECONF', 'EXTRA_OECMAKE', 'EXTRA_OESCONS', 'do_configure()', 'EXTRA_OEMAKE', 'do_compile()', 'do_install()', 'do_populate_sysroot()', 'INITSCRIPT', 'USERADD', 'GROUPADD', 'PACKAGES', 'FILES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RPROVIDES', 'RREPLACES', 'RCONFLICTS', 'ALLOW_EMPTY', 'populate_packages()', 'do_package()', 'do_deploy()'] +recipe_progression = ['SUMMARY', 'DESCRIPTION', 'AUTHOR', 'HOMEPAGE', 'BUGTRACKER', 'SECTION', 'LICENSE', 'LICENSE_FLAGS', 'LIC_FILES_CHKSUM', 'PROVIDES', 'DEPENDS', 'PR', 'PV', 'SRCREV', 'SRCPV', 'SRC_URI', 'S', 'do_fetch()', 'do_unpack()', 'do_patch()', 'EXTRA_OECONF', 'EXTRA_OECMAKE', 'EXTRA_OESCONS', 'do_configure()', 'EXTRA_OEMAKE', 'do_compile()', 'do_install()', 'do_populate_sysroot()', 'INITSCRIPT', 'USERADD', 'GROUPADD', 'PACKAGES', 'FILES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RPROVIDES', 'RREPLACES', 'RCONFLICTS', 'ALLOW_EMPTY', 'populate_packages()', 'do_package()', 'do_deploy()', 'BBCLASSEXTEND'] # Variables that sometimes are a bit long but shouldn't be wrapped -nowrap_vars = ['SUMMARY', 'HOMEPAGE', 'BUGTRACKER', 'SRC_URI\[(.+\.)?md5sum\]', 'SRC_URI\[(.+\.)?sha256sum\]'] +nowrap_vars = ['SUMMARY', 'HOMEPAGE', 'BUGTRACKER', r'SRC_URI\[(.+\.)?md5sum\]', r'SRC_URI\[(.+\.)?sha256sum\]'] list_vars = ['SRC_URI', 'LIC_FILES_CHKSUM'] meta_vars = ['SUMMARY', 'DESCRIPTION', 'HOMEPAGE', 'BUGTRACKER', 'SECTION'] @@ -45,7 +47,7 @@ def simplify_history(history, d): continue has_set = True elif event['op'] in ('append', 'prepend', 'postdot', 'predot'): - # Reminder: "append" and "prepend" mean += and =+ respectively, NOT _append / _prepend + # Reminder: "append" and "prepend" mean += and =+ respectively, NOT :append / :prepend if has_set: continue ret_history.insert(0, event) @@ -161,7 +163,7 @@ def patch_recipe_lines(fromlines, values, trailing_newline=True): key = item[:-2] else: key = item - restr = '%s(_[a-zA-Z0-9-_$(){}]+|\[[^\]]*\])?' % key + restr = r'%s(_[a-zA-Z0-9-_$(){}]+|\[[^\]]*\])?' % key if item.endswith('()'): recipe_progression_restrs.append(restr + '()') else: @@ -340,7 +342,7 @@ def patch_recipe(d, fn, varvalues, patch=False, relpath='', redirect_output=None def override_applicable(hevent): op = hevent['op'] if '[' in op: - opoverrides = op.split('[')[1].split(']')[0].split('_') + opoverrides = op.split('[')[1].split(']')[0].split(':') for opoverride in opoverrides: if not opoverride in overrides: return False @@ -366,13 +368,13 @@ def patch_recipe(d, fn, varvalues, patch=False, relpath='', redirect_output=None recipe_set = True if not recipe_set: for event in history: - if event['op'].startswith('_remove'): + if event['op'].startswith(':remove'): continue if not override_applicable(event): continue newvalue = value.replace(event['detail'], '') - if newvalue == value and os.path.abspath(event['file']) == fn and event['op'].startswith('_'): - op = event['op'].replace('[', '_').replace(']', '') + if newvalue == value and os.path.abspath(event['file']) == fn and event['op'].startswith(':'): + op = event['op'].replace('[', ':').replace(']', '') extravals[var + op] = None value = newvalue vals[var] = ('+=', value) @@ -407,18 +409,20 @@ def copy_recipe_files(d, tgt_dir, whole_dir=False, download=True, all_variants=F fetch.download() for pth in fetch.localpaths(): if pth not in localpaths: - localpaths.append(pth) + localpaths.append(os.path.abspath(pth)) uri_values.append(srcuri) fetch_urls(d) if all_variants: - # Get files for other variants e.g. in the case of a SRC_URI_append + # Get files for other variants e.g. in the case of a SRC_URI:append localdata = bb.data.createCopy(d) variants = (localdata.getVar('BBCLASSEXTEND') or '').split() if variants: # Ensure we handle class-target if we're dealing with one of the variants variants.append('target') for variant in variants: + if variant.startswith("devupstream"): + localdata.setVar('SRCPV', 'git') localdata.setVar('CLASSOVERRIDE', 'class-%s' % variant) fetch_urls(localdata) @@ -482,7 +486,14 @@ def get_recipe_local_files(d, patches=False, archives=False): unpack = fetch.ud[uri].parm.get('unpack', True) if unpack: continue - ret[fname] = localpath + if os.path.isdir(localpath): + for root, dirs, files in os.walk(localpath): + for fname in files: + fileabspath = os.path.join(root,fname) + srcdir = os.path.dirname(localpath) + ret[os.path.relpath(fileabspath,srcdir)] = fileabspath + else: + ret[fname] = localpath return ret @@ -552,6 +563,23 @@ def get_bbfile_path(d, destdir, extrapathhint=None): confdata = bb.cookerdata.parse_config_file(destlayerconf, confdata) pn = d.getVar('PN') + # Parse BBFILES_DYNAMIC and append to BBFILES + bbfiles_dynamic = (confdata.getVar('BBFILES_DYNAMIC') or "").split() + collections = (confdata.getVar('BBFILE_COLLECTIONS') or "").split() + invalid = [] + for entry in bbfiles_dynamic: + parts = entry.split(":", 1) + if len(parts) != 2: + invalid.append(entry) + continue + l, f = parts + invert = l[0] == "!" + if invert: + l = l[1:] + if (l in collections and not invert) or (l not in collections and invert): + confdata.appendVar("BBFILES", " " + f) + if invalid: + return None bbfilespecs = (confdata.getVar('BBFILES') or '').split() if destdir == destlayerdir: for bbfilespec in bbfilespecs: @@ -725,12 +753,12 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False, destsubdir = rd.getVar('PN') if srcfiles: - bbappendlines.append(('FILESEXTRAPATHS_prepend', ':=', '${THISDIR}/${PN}:')) + bbappendlines.append(('FILESEXTRAPATHS:prepend', ':=', '${THISDIR}/${PN}:')) appendoverride = '' if machine: bbappendlines.append(('PACKAGE_ARCH', '=', '${MACHINE_ARCH}')) - appendoverride = '_%s' % machine + appendoverride = ':%s' % machine copyfiles = {} if srcfiles: instfunclines = [] @@ -744,7 +772,7 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False, # FIXME do we care if the entry is added by another bbappend that might go away? if not srcurientry in rd.getVar('SRC_URI').split(): if machine: - appendline('SRC_URI_append%s' % appendoverride, '=', ' ' + srcurientry) + appendline('SRC_URI:append%s' % appendoverride, '=', ' ' + srcurientry) else: appendline('SRC_URI', '+=', srcurientry) copyfiles[newfile] = srcfile @@ -758,7 +786,7 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False, instfunclines.append(instdirline) instfunclines.append('install -m %s ${WORKDIR}/%s ${D}%s' % (perms, os.path.basename(srcfile), instdestpath)) if instfunclines: - bbappendlines.append(('do_install_append%s()' % appendoverride, '', instfunclines)) + bbappendlines.append(('do_install:append%s()' % appendoverride, '', instfunclines)) if redirect_output: bb.note('Writing append file %s (dry-run)' % appendpath) @@ -776,15 +804,15 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False, extvars = {'destsubdir': destsubdir} def appendfile_varfunc(varname, origvalue, op, newlines): - if varname == 'FILESEXTRAPATHS_prepend': + if varname == 'FILESEXTRAPATHS:prepend': if origvalue.startswith('${THISDIR}/'): - popline('FILESEXTRAPATHS_prepend') + popline('FILESEXTRAPATHS:prepend') extvars['destsubdir'] = rd.expand(origvalue.split('${THISDIR}/', 1)[1].rstrip(':')) elif varname == 'PACKAGE_ARCH': if machine: popline('PACKAGE_ARCH') return (machine, None, 4, False) - elif varname.startswith('do_install_append'): + elif varname.startswith('do_install:append'): func = popline(varname) if func: instfunclines = [line.strip() for line in origvalue.strip('\n').splitlines()] @@ -796,7 +824,7 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False, splitval = split_var_value(origvalue, assignment=False) changed = False removevar = varname - if varname in ['SRC_URI', 'SRC_URI_append%s' % appendoverride]: + if varname in ['SRC_URI', 'SRC_URI:append%s' % appendoverride]: removevar = 'SRC_URI' line = popline(varname) if line: @@ -825,11 +853,11 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False, newvalue = splitval if len(newvalue) == 1: # Ensure it's written out as one line - if '_append' in varname: + if ':append' in varname: newvalue = ' ' + newvalue[0] else: newvalue = newvalue[0] - if not newvalue and (op in ['+=', '.='] or '_append' in varname): + if not newvalue and (op in ['+=', '.='] or ':append' in varname): # There's no point appending nothing newvalue = None if varname.endswith('()'): @@ -925,7 +953,7 @@ def get_recipe_pv_without_srcpv(pv, uri_type): sfx = '' if uri_type == 'git': - git_regex = re.compile("(?P<pfx>v?)(?P<ver>[^\+]*)((?P<sfx>\+(git)?r?(AUTOINC\+))(?P<rev>.*))?") + git_regex = re.compile(r"(?P<pfx>v?)(?P<ver>.*?)(?P<sfx>\+[^\+]*(git)?r?(AUTOINC\+))(?P<rev>.*)") m = git_regex.match(pv) if m: @@ -933,7 +961,7 @@ def get_recipe_pv_without_srcpv(pv, uri_type): pfx = m.group('pfx') sfx = m.group('sfx') else: - regex = re.compile("(?P<pfx>(v|r)?)(?P<ver>.*)") + regex = re.compile(r"(?P<pfx>(v|r)?)(?P<ver>.*)") m = regex.match(pv) if m: pv = m.group('ver') @@ -1001,6 +1029,7 @@ def get_recipe_upstream_version(rd): else: ud = bb.fetch2.FetchData(src_uri, rd) if rd.getVar("UPSTREAM_CHECK_COMMITS") == "1": + bb.fetch2.get_srcrev(rd) revision = ud.method.latest_revision(ud, rd, 'default') upversion = pv if revision != rd.getVar("SRCREV"): @@ -1050,7 +1079,6 @@ def get_recipe_upgrade_status(recipes=None): data_copy_list = [] copy_vars = ('SRC_URI', 'PV', - 'GITDIR', 'DL_DIR', 'PN', 'CACHE', @@ -1066,6 +1094,18 @@ def get_recipe_upgrade_status(recipes=None): 'RECIPE_UPSTREAM_VERSION', 'RECIPE_UPSTREAM_DATE', 'CHECK_DATE', + 'FETCHCMD_bzr', + 'FETCHCMD_ccrc', + 'FETCHCMD_cvs', + 'FETCHCMD_git', + 'FETCHCMD_hg', + 'FETCHCMD_npm', + 'FETCHCMD_osc', + 'FETCHCMD_p4', + 'FETCHCMD_repo', + 'FETCHCMD_s3', + 'FETCHCMD_svn', + 'FETCHCMD_wget', ) with bb.tinfoil.Tinfoil() as tinfoil: diff --git a/meta/lib/oe/reproducible.py b/meta/lib/oe/reproducible.py new file mode 100644 index 0000000000..35b8be6d08 --- /dev/null +++ b/meta/lib/oe/reproducible.py @@ -0,0 +1,192 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# +import os +import subprocess +import bb + +# For reproducible builds, this code sets the default SOURCE_DATE_EPOCH in each +# component's build environment. The format is number of seconds since the +# system epoch. +# +# Upstream components (generally) respect this environment variable, +# using it in place of the "current" date and time. +# See https://reproducible-builds.org/specs/source-date-epoch/ +# +# The default value of SOURCE_DATE_EPOCH comes from the function +# get_source_date_epoch_value which reads from the SDE_FILE, or if the file +# is not available will use the fallback of SOURCE_DATE_EPOCH_FALLBACK. +# +# The SDE_FILE is normally constructed from the function +# create_source_date_epoch_stamp which is typically added as a postfuncs to +# the do_unpack task. If a recipe does NOT have do_unpack, it should be added +# to a task that runs after the source is available and before the +# do_deploy_source_date_epoch task is executed. +# +# If a recipe wishes to override the default behavior it should set it's own +# SOURCE_DATE_EPOCH or override the do_deploy_source_date_epoch_stamp task +# with recipe-specific functionality to write the appropriate +# SOURCE_DATE_EPOCH into the SDE_FILE. +# +# SOURCE_DATE_EPOCH is intended to be a reproducible value. This value should +# be reproducible for anyone who builds the same revision from the same +# sources. +# +# There are 4 ways the create_source_date_epoch_stamp function determines what +# becomes SOURCE_DATE_EPOCH: +# +# 1. Use the value from __source_date_epoch.txt file if this file exists. +# This file was most likely created in the previous build by one of the +# following methods 2,3,4. +# Alternatively, it can be provided by a recipe via SRC_URI. +# +# If the file does not exist: +# +# 2. If there is a git checkout, use the last git commit timestamp. +# Git does not preserve file timestamps on checkout. +# +# 3. Use the mtime of "known" files such as NEWS, CHANGLELOG, ... +# This works for well-kept repositories distributed via tarball. +# +# 4. Use the modification time of the youngest file in the source tree, if +# there is one. +# This will be the newest file from the distribution tarball, if any. +# +# 5. Fall back to a fixed timestamp (SOURCE_DATE_EPOCH_FALLBACK). +# +# Once the value is determined, it is stored in the recipe's SDE_FILE. + +def get_source_date_epoch_from_known_files(d, sourcedir): + source_date_epoch = None + newest_file = None + known_files = set(["NEWS", "ChangeLog", "Changelog", "CHANGES"]) + for file in known_files: + filepath = os.path.join(sourcedir, file) + if os.path.isfile(filepath): + mtime = int(os.lstat(filepath).st_mtime) + # There may be more than one "known_file" present, if so, use the youngest one + if not source_date_epoch or mtime > source_date_epoch: + source_date_epoch = mtime + newest_file = filepath + if newest_file: + bb.debug(1, "SOURCE_DATE_EPOCH taken from: %s" % newest_file) + return source_date_epoch + +def find_git_folder(d, sourcedir): + # First guess: WORKDIR/git + # This is the default git fetcher unpack path + workdir = d.getVar('WORKDIR') + gitpath = os.path.join(workdir, "git/.git") + if os.path.isdir(gitpath): + return gitpath + + # Second guess: ${S} + gitpath = os.path.join(sourcedir, ".git") + if os.path.isdir(gitpath): + return gitpath + + # Perhaps there was a subpath or destsuffix specified. + # Go looking in the WORKDIR + exclude = set(["build", "image", "license-destdir", "patches", "pseudo", + "recipe-sysroot", "recipe-sysroot-native", "sysroot-destdir", "temp"]) + for root, dirs, files in os.walk(workdir, topdown=True): + dirs[:] = [d for d in dirs if d not in exclude] + if '.git' in dirs: + return os.path.join(root, ".git") + + bb.warn("Failed to find a git repository in WORKDIR: %s" % workdir) + return None + +def get_source_date_epoch_from_git(d, sourcedir): + if not "git://" in d.getVar('SRC_URI') and not "gitsm://" in d.getVar('SRC_URI'): + return None + + gitpath = find_git_folder(d, sourcedir) + if not gitpath: + return None + + # Check that the repository has a valid HEAD; it may not if subdir is used + # in SRC_URI + p = subprocess.run(['git', '--git-dir', gitpath, 'rev-parse', 'HEAD'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + if p.returncode != 0: + bb.debug(1, "%s does not have a valid HEAD: %s" % (gitpath, p.stdout.decode('utf-8'))) + return None + + bb.debug(1, "git repository: %s" % gitpath) + p = subprocess.run(['git', '--git-dir', gitpath, 'log', '-1', '--pretty=%ct'], check=True, stdout=subprocess.PIPE) + return int(p.stdout.decode('utf-8')) + +def get_source_date_epoch_from_youngest_file(d, sourcedir): + if sourcedir == d.getVar('WORKDIR'): + # These sources are almost certainly not from a tarball + return None + + # Do it the hard way: check all files and find the youngest one... + source_date_epoch = None + newest_file = None + for root, dirs, files in os.walk(sourcedir, topdown=True): + files = [f for f in files if not f[0] == '.'] + + for fname in files: + filename = os.path.join(root, fname) + try: + mtime = int(os.lstat(filename).st_mtime) + except ValueError: + mtime = 0 + if not source_date_epoch or mtime > source_date_epoch: + source_date_epoch = mtime + newest_file = filename + + if newest_file: + bb.debug(1, "Newest file found: %s" % newest_file) + return source_date_epoch + +def fixed_source_date_epoch(d): + bb.debug(1, "No tarball or git repo found to determine SOURCE_DATE_EPOCH") + source_date_epoch = d.getVar('SOURCE_DATE_EPOCH_FALLBACK') + if source_date_epoch: + bb.debug(1, "Using SOURCE_DATE_EPOCH_FALLBACK") + return int(source_date_epoch) + return 0 + +def get_source_date_epoch(d, sourcedir): + return ( + get_source_date_epoch_from_git(d, sourcedir) or + get_source_date_epoch_from_known_files(d, sourcedir) or + get_source_date_epoch_from_youngest_file(d, sourcedir) or + fixed_source_date_epoch(d) # Last resort + ) + +def epochfile_read(epochfile, d): + cached, efile = d.getVar('__CACHED_SOURCE_DATE_EPOCH') or (None, None) + if cached and efile == epochfile: + return cached + + if cached and epochfile != efile: + bb.debug(1, "Epoch file changed from %s to %s" % (efile, epochfile)) + + source_date_epoch = int(d.getVar('SOURCE_DATE_EPOCH_FALLBACK')) + try: + with open(epochfile, 'r') as f: + s = f.read() + try: + source_date_epoch = int(s) + except ValueError: + bb.warn("SOURCE_DATE_EPOCH value '%s' is invalid. Reverting to SOURCE_DATE_EPOCH_FALLBACK" % s) + source_date_epoch = int(d.getVar('SOURCE_DATE_EPOCH_FALLBACK')) + bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch) + except FileNotFoundError: + bb.debug(1, "Cannot find %s. SOURCE_DATE_EPOCH will default to %d" % (epochfile, source_date_epoch)) + + d.setVar('__CACHED_SOURCE_DATE_EPOCH', (str(source_date_epoch), epochfile)) + return str(source_date_epoch) + +def epochfile_write(source_date_epoch, epochfile, d): + + bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch) + bb.utils.mkdirhier(os.path.dirname(epochfile)) + + tmp_file = "%s.new" % epochfile + with open(tmp_file, 'w') as f: + f.write(str(source_date_epoch)) + os.rename(tmp_file, epochfile) diff --git a/meta/lib/oe/rootfs.py b/meta/lib/oe/rootfs.py index b7c0b9c24b..98cf3f244d 100644 --- a/meta/lib/oe/rootfs.py +++ b/meta/lib/oe/rootfs.py @@ -1,15 +1,16 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# from abc import ABCMeta, abstractmethod from oe.utils import execute_pre_post_process from oe.package_manager import * from oe.manifest import * import oe.path -import filecmp import shutil import os import subprocess import re - class Rootfs(object, metaclass=ABCMeta): """ This is an abstract class. Do not instantiate this directly. @@ -48,6 +49,8 @@ class Rootfs(object, metaclass=ABCMeta): excludes = [ 'log_check', r'^\+' ] if hasattr(self, 'log_check_expected_regexes'): excludes.extend(self.log_check_expected_regexes) + # Insert custom log_check excludes + excludes += [x for x in (self.d.getVar("IMAGE_LOG_CHECK_EXCLUDES") or "").split(" ") if x] excludes = [re.compile(x) for x in excludes] r = re.compile(match) log_path = self.d.expand("${T}/log.do_rootfs") @@ -111,7 +114,7 @@ class Rootfs(object, metaclass=ABCMeta): shutil.rmtree(self.image_rootfs + '-orig') except: pass - os.rename(self.image_rootfs, self.image_rootfs + '-orig') + bb.utils.rename(self.image_rootfs, self.image_rootfs + '-orig') bb.note(" Creating debug rootfs...") bb.utils.mkdirhier(self.image_rootfs) @@ -123,17 +126,16 @@ class Rootfs(object, metaclass=ABCMeta): bb.utils.mkdirhier(self.image_rootfs + os.path.dirname(dir)) shutil.copytree(self.image_rootfs + '-orig' + dir, self.image_rootfs + dir, symlinks=True) - cpath = oe.cachedpath.CachedPath() # Copy files located in /usr/lib/debug or /usr/src/debug for dir in ["/usr/lib/debug", "/usr/src/debug"]: src = self.image_rootfs + '-orig' + dir - if cpath.exists(src): + if os.path.exists(src): dst = self.image_rootfs + dir bb.utils.mkdirhier(os.path.dirname(dst)) shutil.copytree(src, dst) # Copy files with suffix '.debug' or located in '.debug' dir. - for root, dirs, files in cpath.walk(self.image_rootfs + '-orig'): + for root, dirs, files in os.walk(self.image_rootfs + '-orig'): relative_dir = root[len(self.image_rootfs + '-orig'):] for f in files: if f.endswith('.debug') or '/.debug' in relative_dir: @@ -163,10 +165,10 @@ class Rootfs(object, metaclass=ABCMeta): shutil.rmtree(self.image_rootfs + '-dbg') except: pass - os.rename(self.image_rootfs, self.image_rootfs + '-dbg') + bb.utils.rename(self.image_rootfs, self.image_rootfs + '-dbg') - bb.note(" Restoreing original rootfs...") - os.rename(self.image_rootfs + '-orig', self.image_rootfs) + bb.note(" Restoring original rootfs...") + bb.utils.rename(self.image_rootfs + '-orig', self.image_rootfs) def _exec_shell_cmd(self, cmd): fakerootcmd = self.d.getVar('FAKEROOT') @@ -188,10 +190,6 @@ class Rootfs(object, metaclass=ABCMeta): post_process_cmds = self.d.getVar("ROOTFS_POSTPROCESS_COMMAND") rootfs_post_install_cmds = self.d.getVar('ROOTFS_POSTINSTALL_COMMAND') - bb.utils.mkdirhier(self.image_rootfs) - - bb.utils.mkdirhier(self.deploydir) - execute_pre_post_process(self.d, pre_process_cmds) if self.progress_reporter: @@ -215,6 +213,9 @@ class Rootfs(object, metaclass=ABCMeta): self.progress_reporter.next_stage() if bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", + True, False, self.d) and \ + not bb.utils.contains("IMAGE_FEATURES", + "read-only-rootfs-delayed-postinsts", True, False, self.d): delayed_postinsts = self._get_delayed_postinsts() if delayed_postinsts is not None: @@ -245,13 +246,11 @@ class Rootfs(object, metaclass=ABCMeta): def _uninstall_unneeded(self): - # Remove unneeded init script symlinks + # Remove the run-postinsts package if no delayed postinsts are found delayed_postinsts = self._get_delayed_postinsts() if delayed_postinsts is None: - if os.path.exists(self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/init.d/run-postinsts")): - self._exec_shell_cmd(["update-rc.d", "-f", "-r", - self.d.getVar('IMAGE_ROOTFS'), - "run-postinsts", "remove"]) + if os.path.exists(self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/init.d/run-postinsts")) or os.path.exists(self.d.expand("${IMAGE_ROOTFS}${systemd_system_unitdir}/run-postinsts.service")): + self.pm.remove(["run-postinsts"]) image_rorfs = bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", True, False, self.d) @@ -295,14 +294,24 @@ class Rootfs(object, metaclass=ABCMeta): def _run_ldconfig(self): if self.d.getVar('LDCONFIGDEPEND'): - bb.note("Executing: ldconfig -r" + self.image_rootfs + "-c new -v") + bb.note("Executing: ldconfig -r " + self.image_rootfs + " -c new -v -X") self._exec_shell_cmd(['ldconfig', '-r', self.image_rootfs, '-c', - 'new', '-v']) + 'new', '-v', '-X']) + + image_rorfs = bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", + True, False, self.d) + ldconfig_in_features = bb.utils.contains("DISTRO_FEATURES", "ldconfig", + True, False, self.d) + if image_rorfs or not ldconfig_in_features: + ldconfig_cache_dir = os.path.join(self.image_rootfs, "var/cache/ldconfig") + if os.path.exists(ldconfig_cache_dir): + bb.note("Removing ldconfig auxiliary cache...") + shutil.rmtree(ldconfig_cache_dir) def _check_for_kernel_modules(self, modules_dir): for root, dirs, files in os.walk(modules_dir, topdown=True): for name in files: - found_ko = name.endswith(".ko") + found_ko = name.endswith((".ko", ".ko.gz", ".ko.xz")) if found_ko: return found_ko return False @@ -351,613 +360,10 @@ class Rootfs(object, metaclass=ABCMeta): self.image_rootfs, "-D", devtable]) -class RpmRootfs(Rootfs): - def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None): - super(RpmRootfs, self).__init__(d, progress_reporter, logcatcher) - self.log_check_regex = r'(unpacking of archive failed|Cannot find package'\ - r'|exit 1|ERROR: |Error: |Error |ERROR '\ - r'|Failed |Failed: |Failed$|Failed\(\d+\):)' - self.manifest = RpmManifest(d, manifest_dir) - - self.pm = RpmPM(d, - d.getVar('IMAGE_ROOTFS'), - self.d.getVar('TARGET_VENDOR') - ) - - self.inc_rpm_image_gen = self.d.getVar('INC_RPM_IMAGE_GEN') - if self.inc_rpm_image_gen != "1": - bb.utils.remove(self.image_rootfs, True) - else: - self.pm.recovery_packaging_data() - bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True) - - self.pm.create_configs() - - ''' - While rpm incremental image generation is enabled, it will remove the - unneeded pkgs by comparing the new install solution manifest and the - old installed manifest. - ''' - def _create_incremental(self, pkgs_initial_install): - if self.inc_rpm_image_gen == "1": - - pkgs_to_install = list() - for pkg_type in pkgs_initial_install: - pkgs_to_install += pkgs_initial_install[pkg_type] - - installed_manifest = self.pm.load_old_install_solution() - solution_manifest = self.pm.dump_install_solution(pkgs_to_install) - - pkg_to_remove = list() - for pkg in installed_manifest: - if pkg not in solution_manifest: - pkg_to_remove.append(pkg) - - self.pm.update() - - bb.note('incremental update -- upgrade packages in place ') - self.pm.upgrade() - if pkg_to_remove != []: - bb.note('incremental removed: %s' % ' '.join(pkg_to_remove)) - self.pm.remove(pkg_to_remove) - - self.pm.autoremove() - - def _create(self): - pkgs_to_install = self.manifest.parse_initial_manifest() - rpm_pre_process_cmds = self.d.getVar('RPM_PREPROCESS_COMMANDS') - rpm_post_process_cmds = self.d.getVar('RPM_POSTPROCESS_COMMANDS') - - # update PM index files - self.pm.write_index() - - execute_pre_post_process(self.d, rpm_pre_process_cmds) - - if self.progress_reporter: - self.progress_reporter.next_stage() - - if self.inc_rpm_image_gen == "1": - self._create_incremental(pkgs_to_install) - - if self.progress_reporter: - self.progress_reporter.next_stage() - - self.pm.update() - - pkgs = [] - pkgs_attempt = [] - for pkg_type in pkgs_to_install: - if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY: - pkgs_attempt += pkgs_to_install[pkg_type] - else: - pkgs += pkgs_to_install[pkg_type] - - if self.progress_reporter: - self.progress_reporter.next_stage() - - self.pm.install(pkgs) - - if self.progress_reporter: - self.progress_reporter.next_stage() - - self.pm.install(pkgs_attempt, True) - - if self.progress_reporter: - self.progress_reporter.next_stage() - - self.pm.install_complementary() - - if self.progress_reporter: - self.progress_reporter.next_stage() - - self._setup_dbg_rootfs(['/etc', '/var/lib/rpm', '/var/cache/dnf', '/var/lib/dnf']) - - execute_pre_post_process(self.d, rpm_post_process_cmds) - - if self.inc_rpm_image_gen == "1": - self.pm.backup_packaging_data() - - if self.progress_reporter: - self.progress_reporter.next_stage() - - - @staticmethod - def _depends_list(): - return ['DEPLOY_DIR_RPM', 'INC_RPM_IMAGE_GEN', 'RPM_PREPROCESS_COMMANDS', - 'RPM_POSTPROCESS_COMMANDS', 'RPM_PREFER_ELF_ARCH'] - - def _get_delayed_postinsts(self): - postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/rpm-postinsts") - if os.path.isdir(postinst_dir): - files = os.listdir(postinst_dir) - for f in files: - bb.note('Delayed package scriptlet: %s' % f) - return files - - return None - - def _save_postinsts(self): - # this is just a stub. For RPM, the failed postinstalls are - # already saved in /etc/rpm-postinsts - pass - - def _log_check(self): - self._log_check_warn() - self._log_check_error() - - def _cleanup(self): - if bb.utils.contains("IMAGE_FEATURES", "package-management", True, False, self.d): - self.pm._invoke_dnf(["clean", "all"]) - - -class DpkgOpkgRootfs(Rootfs): - def __init__(self, d, progress_reporter=None, logcatcher=None): - super(DpkgOpkgRootfs, self).__init__(d, progress_reporter, logcatcher) - - def _get_pkgs_postinsts(self, status_file): - def _get_pkg_depends_list(pkg_depends): - pkg_depends_list = [] - # filter version requirements like libc (>= 1.1) - for dep in pkg_depends.split(', '): - m_dep = re.match(r"^(.*) \(.*\)$", dep) - if m_dep: - dep = m_dep.group(1) - pkg_depends_list.append(dep) - - return pkg_depends_list - - pkgs = {} - pkg_name = "" - pkg_status_match = False - pkg_depends = "" - - with open(status_file) as status: - data = status.read() - status.close() - for line in data.split('\n'): - m_pkg = re.match(r"^Package: (.*)", line) - m_status = re.match(r"^Status:.*unpacked", line) - m_depends = re.match(r"^Depends: (.*)", line) - - #Only one of m_pkg, m_status or m_depends is not None at time - #If m_pkg is not None, we started a new package - if m_pkg is not None: - #Get Package name - pkg_name = m_pkg.group(1) - #Make sure we reset other variables - pkg_status_match = False - pkg_depends = "" - elif m_status is not None: - #New status matched - pkg_status_match = True - elif m_depends is not None: - #New depends macthed - pkg_depends = m_depends.group(1) - else: - pass - - #Now check if we can process package depends and postinst - if "" != pkg_name and pkg_status_match: - pkgs[pkg_name] = _get_pkg_depends_list(pkg_depends) - else: - #Not enough information - pass - - # remove package dependencies not in postinsts - pkg_names = list(pkgs.keys()) - for pkg_name in pkg_names: - deps = pkgs[pkg_name][:] - - for d in deps: - if d not in pkg_names: - pkgs[pkg_name].remove(d) - - return pkgs - - def _get_delayed_postinsts_common(self, status_file): - def _dep_resolve(graph, node, resolved, seen): - seen.append(node) - - for edge in graph[node]: - if edge not in resolved: - if edge in seen: - raise RuntimeError("Packages %s and %s have " \ - "a circular dependency in postinsts scripts." \ - % (node, edge)) - _dep_resolve(graph, edge, resolved, seen) - - resolved.append(node) - - pkg_list = [] - - pkgs = None - if not self.d.getVar('PACKAGE_INSTALL').strip(): - bb.note("Building empty image") - else: - pkgs = self._get_pkgs_postinsts(status_file) - if pkgs: - root = "__packagegroup_postinst__" - pkgs[root] = list(pkgs.keys()) - _dep_resolve(pkgs, root, pkg_list, []) - pkg_list.remove(root) - - if len(pkg_list) == 0: - return None - - return pkg_list - - def _save_postinsts_common(self, dst_postinst_dir, src_postinst_dir): - if bb.utils.contains("IMAGE_FEATURES", "package-management", - True, False, self.d): - return - num = 0 - for p in self._get_delayed_postinsts(): - bb.utils.mkdirhier(dst_postinst_dir) - - if os.path.exists(os.path.join(src_postinst_dir, p + ".postinst")): - shutil.copy(os.path.join(src_postinst_dir, p + ".postinst"), - os.path.join(dst_postinst_dir, "%03d-%s" % (num, p))) - - num += 1 - -class DpkgRootfs(DpkgOpkgRootfs): - def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None): - super(DpkgRootfs, self).__init__(d, progress_reporter, logcatcher) - self.log_check_regex = '^E:' - self.log_check_expected_regexes = \ - [ - "^E: Unmet dependencies." - ] - - bb.utils.remove(self.image_rootfs, True) - bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True) - self.manifest = DpkgManifest(d, manifest_dir) - self.pm = DpkgPM(d, d.getVar('IMAGE_ROOTFS'), - d.getVar('PACKAGE_ARCHS'), - d.getVar('DPKG_ARCH')) - - - def _create(self): - pkgs_to_install = self.manifest.parse_initial_manifest() - deb_pre_process_cmds = self.d.getVar('DEB_PREPROCESS_COMMANDS') - deb_post_process_cmds = self.d.getVar('DEB_POSTPROCESS_COMMANDS') - - alt_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/alternatives") - bb.utils.mkdirhier(alt_dir) - - # update PM index files - self.pm.write_index() - - execute_pre_post_process(self.d, deb_pre_process_cmds) - - if self.progress_reporter: - self.progress_reporter.next_stage() - # Don't support incremental, so skip that - self.progress_reporter.next_stage() - - self.pm.update() - - if self.progress_reporter: - self.progress_reporter.next_stage() - - for pkg_type in self.install_order: - if pkg_type in pkgs_to_install: - self.pm.install(pkgs_to_install[pkg_type], - [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY]) - - if self.progress_reporter: - # Don't support attemptonly, so skip that - self.progress_reporter.next_stage() - self.progress_reporter.next_stage() - - self.pm.install_complementary() - - if self.progress_reporter: - self.progress_reporter.next_stage() - - self._setup_dbg_rootfs(['/var/lib/dpkg']) - - self.pm.fix_broken_dependencies() - - self.pm.mark_packages("installed") - - self.pm.run_pre_post_installs() - - execute_pre_post_process(self.d, deb_post_process_cmds) - - if self.progress_reporter: - self.progress_reporter.next_stage() - - @staticmethod - def _depends_list(): - return ['DEPLOY_DIR_DEB', 'DEB_SDK_ARCH', 'APTCONF_TARGET', 'APT_ARGS', 'DPKG_ARCH', 'DEB_PREPROCESS_COMMANDS', 'DEB_POSTPROCESS_COMMANDS'] - - def _get_delayed_postinsts(self): - status_file = self.image_rootfs + "/var/lib/dpkg/status" - return self._get_delayed_postinsts_common(status_file) - - def _save_postinsts(self): - dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/deb-postinsts") - src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/info") - return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir) - - def _log_check(self): - self._log_check_warn() - self._log_check_error() - - def _cleanup(self): - pass - - -class OpkgRootfs(DpkgOpkgRootfs): - def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None): - super(OpkgRootfs, self).__init__(d, progress_reporter, logcatcher) - self.log_check_regex = '(exit 1|Collected errors)' - - self.manifest = OpkgManifest(d, manifest_dir) - self.opkg_conf = self.d.getVar("IPKGCONF_TARGET") - self.pkg_archs = self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS") - - self.inc_opkg_image_gen = self.d.getVar('INC_IPK_IMAGE_GEN') or "" - if self._remove_old_rootfs(): - bb.utils.remove(self.image_rootfs, True) - self.pm = OpkgPM(d, - self.image_rootfs, - self.opkg_conf, - self.pkg_archs) - else: - self.pm = OpkgPM(d, - self.image_rootfs, - self.opkg_conf, - self.pkg_archs) - self.pm.recover_packaging_data() - - bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True) - - def _prelink_file(self, root_dir, filename): - bb.note('prelink %s in %s' % (filename, root_dir)) - prelink_cfg = oe.path.join(root_dir, - self.d.expand('${sysconfdir}/prelink.conf')) - if not os.path.exists(prelink_cfg): - shutil.copy(self.d.expand('${STAGING_DIR_NATIVE}${sysconfdir_native}/prelink.conf'), - prelink_cfg) - - cmd_prelink = self.d.expand('${STAGING_DIR_NATIVE}${sbindir_native}/prelink') - self._exec_shell_cmd([cmd_prelink, - '--root', - root_dir, - '-amR', - '-N', - '-c', - self.d.expand('${sysconfdir}/prelink.conf')]) - - ''' - Compare two files with the same key twice to see if they are equal. - If they are not equal, it means they are duplicated and come from - different packages. - 1st: Comapre them directly; - 2nd: While incremental image creation is enabled, one of the - files could be probaly prelinked in the previous image - creation and the file has been changed, so we need to - prelink the other one and compare them. - ''' - def _file_equal(self, key, f1, f2): - - # Both of them are not prelinked - if filecmp.cmp(f1, f2): - return True - - if bb.data.inherits_class('image-prelink', self.d): - if self.image_rootfs not in f1: - self._prelink_file(f1.replace(key, ''), f1) - - if self.image_rootfs not in f2: - self._prelink_file(f2.replace(key, ''), f2) - - # Both of them are prelinked - if filecmp.cmp(f1, f2): - return True - - # Not equal - return False - - """ - This function was reused from the old implementation. - See commit: "image.bbclass: Added variables for multilib support." by - Lianhao Lu. - """ - def _multilib_sanity_test(self, dirs): - - allow_replace = self.d.getVar("MULTILIBRE_ALLOW_REP") - if allow_replace is None: - allow_replace = "" - - allow_rep = re.compile(re.sub(r"\|$", r"", allow_replace)) - error_prompt = "Multilib check error:" - - files = {} - for dir in dirs: - for root, subfolders, subfiles in os.walk(dir): - for file in subfiles: - item = os.path.join(root, file) - key = str(os.path.join("/", os.path.relpath(item, dir))) - - valid = True - if key in files: - #check whether the file is allow to replace - if allow_rep.match(key): - valid = True - else: - if os.path.exists(files[key]) and \ - os.path.exists(item) and \ - not self._file_equal(key, files[key], item): - valid = False - bb.fatal("%s duplicate files %s %s is not the same\n" % - (error_prompt, item, files[key])) - - #pass the check, add to list - if valid: - files[key] = item - - def _multilib_test_install(self, pkgs): - ml_temp = self.d.getVar("MULTILIB_TEMP_ROOTFS") - bb.utils.mkdirhier(ml_temp) - - dirs = [self.image_rootfs] - - for variant in self.d.getVar("MULTILIB_VARIANTS").split(): - ml_target_rootfs = os.path.join(ml_temp, variant) - - bb.utils.remove(ml_target_rootfs, True) - - ml_opkg_conf = os.path.join(ml_temp, - variant + "-" + os.path.basename(self.opkg_conf)) - - ml_pm = OpkgPM(self.d, ml_target_rootfs, ml_opkg_conf, self.pkg_archs, prepare_index=False) - - ml_pm.update() - ml_pm.install(pkgs) - - dirs.append(ml_target_rootfs) - - self._multilib_sanity_test(dirs) - - ''' - While ipk incremental image generation is enabled, it will remove the - unneeded pkgs by comparing the old full manifest in previous existing - image and the new full manifest in the current image. - ''' - def _remove_extra_packages(self, pkgs_initial_install): - if self.inc_opkg_image_gen == "1": - # Parse full manifest in previous existing image creation session - old_full_manifest = self.manifest.parse_full_manifest() - - # Create full manifest for the current image session, the old one - # will be replaced by the new one. - self.manifest.create_full(self.pm) - - # Parse full manifest in current image creation session - new_full_manifest = self.manifest.parse_full_manifest() - - pkg_to_remove = list() - for pkg in old_full_manifest: - if pkg not in new_full_manifest: - pkg_to_remove.append(pkg) - - if pkg_to_remove != []: - bb.note('decremental removed: %s' % ' '.join(pkg_to_remove)) - self.pm.remove(pkg_to_remove) - - ''' - Compare with previous existing image creation, if some conditions - triggered, the previous old image should be removed. - The conditions include any of 'PACKAGE_EXCLUDE, NO_RECOMMENDATIONS - and BAD_RECOMMENDATIONS' has been changed. - ''' - def _remove_old_rootfs(self): - if self.inc_opkg_image_gen != "1": - return True - - vars_list_file = self.d.expand('${T}/vars_list') - - old_vars_list = "" - if os.path.exists(vars_list_file): - old_vars_list = open(vars_list_file, 'r+').read() - - new_vars_list = '%s:%s:%s\n' % \ - ((self.d.getVar('BAD_RECOMMENDATIONS') or '').strip(), - (self.d.getVar('NO_RECOMMENDATIONS') or '').strip(), - (self.d.getVar('PACKAGE_EXCLUDE') or '').strip()) - open(vars_list_file, 'w+').write(new_vars_list) - - if old_vars_list != new_vars_list: - return True - - return False - - def _create(self): - pkgs_to_install = self.manifest.parse_initial_manifest() - opkg_pre_process_cmds = self.d.getVar('OPKG_PREPROCESS_COMMANDS') - opkg_post_process_cmds = self.d.getVar('OPKG_POSTPROCESS_COMMANDS') - - # update PM index files - self.pm.write_index() - - execute_pre_post_process(self.d, opkg_pre_process_cmds) - - if self.progress_reporter: - self.progress_reporter.next_stage() - # Steps are a bit different in order, skip next - self.progress_reporter.next_stage() - - self.pm.update() - - if self.progress_reporter: - self.progress_reporter.next_stage() - - if self.inc_opkg_image_gen == "1": - self._remove_extra_packages(pkgs_to_install) - - if self.progress_reporter: - self.progress_reporter.next_stage() - - for pkg_type in self.install_order: - if pkg_type in pkgs_to_install: - # For multilib, we perform a sanity test before final install - # If sanity test fails, it will automatically do a bb.fatal() - # and the installation will stop - if pkg_type == Manifest.PKG_TYPE_MULTILIB: - self._multilib_test_install(pkgs_to_install[pkg_type]) - - self.pm.install(pkgs_to_install[pkg_type], - [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY]) - - if self.progress_reporter: - self.progress_reporter.next_stage() - - self.pm.install_complementary() - - if self.progress_reporter: - self.progress_reporter.next_stage() - - opkg_lib_dir = self.d.getVar('OPKGLIBDIR') - opkg_dir = os.path.join(opkg_lib_dir, 'opkg') - self._setup_dbg_rootfs([opkg_dir]) - - execute_pre_post_process(self.d, opkg_post_process_cmds) - - if self.inc_opkg_image_gen == "1": - self.pm.backup_packaging_data() - - if self.progress_reporter: - self.progress_reporter.next_stage() - - @staticmethod - def _depends_list(): - return ['IPKGCONF_SDK', 'IPK_FEED_URIS', 'DEPLOY_DIR_IPK', 'IPKGCONF_TARGET', 'INC_IPK_IMAGE_GEN', 'OPKG_ARGS', 'OPKGLIBDIR', 'OPKG_PREPROCESS_COMMANDS', 'OPKG_POSTPROCESS_COMMANDS', 'OPKGLIBDIR'] - - def _get_delayed_postinsts(self): - status_file = os.path.join(self.image_rootfs, - self.d.getVar('OPKGLIBDIR').strip('/'), - "opkg", "status") - return self._get_delayed_postinsts_common(status_file) - - def _save_postinsts(self): - dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/ipk-postinsts") - src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${OPKGLIBDIR}/opkg/info") - return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir) - - def _log_check(self): - self._log_check_warn() - self._log_check_error() - - def _cleanup(self): - self.pm.remove_lists() - def get_class_for_type(imgtype): - return {"rpm": RpmRootfs, - "ipk": OpkgRootfs, - "deb": DpkgRootfs}[imgtype] + import importlib + mod = importlib.import_module('oe.package_manager.' + imgtype + '.rootfs') + return mod.PkgRootfs def variable_depends(d, manifest_dir=None): img_type = d.getVar('IMAGE_PKGTYPE') @@ -968,13 +374,9 @@ def create_rootfs(d, manifest_dir=None, progress_reporter=None, logcatcher=None) env_bkp = os.environ.copy() img_type = d.getVar('IMAGE_PKGTYPE') - if img_type == "rpm": - RpmRootfs(d, manifest_dir, progress_reporter, logcatcher).create() - elif img_type == "ipk": - OpkgRootfs(d, manifest_dir, progress_reporter, logcatcher).create() - elif img_type == "deb": - DpkgRootfs(d, manifest_dir, progress_reporter, logcatcher).create() + cls = get_class_for_type(img_type) + cls(d, manifest_dir, progress_reporter, logcatcher).create() os.environ.clear() os.environ.update(env_bkp) @@ -984,12 +386,10 @@ def image_list_installed_packages(d, rootfs_dir=None): rootfs_dir = d.getVar('IMAGE_ROOTFS') img_type = d.getVar('IMAGE_PKGTYPE') - if img_type == "rpm": - return RpmPkgsList(d, rootfs_dir).list_pkgs() - elif img_type == "ipk": - return OpkgPkgsList(d, rootfs_dir, d.getVar("IPKGCONF_TARGET")).list_pkgs() - elif img_type == "deb": - return DpkgPkgsList(d, rootfs_dir).list_pkgs() + + import importlib + cls = importlib.import_module('oe.package_manager.' + img_type) + return cls.PMPkgsList(d, rootfs_dir).list_pkgs() if __name__ == "__main__": """ diff --git a/meta/lib/oe/rust.py b/meta/lib/oe/rust.py new file mode 100644 index 0000000000..ec70b34805 --- /dev/null +++ b/meta/lib/oe/rust.py @@ -0,0 +1,5 @@ +# Handle mismatches between `uname -m`-style output and Rust's arch names +def arch_to_rust_arch(arch): + if arch == "ppc64le": + return "powerpc64le" + return arch diff --git a/meta/lib/oe/sbom.py b/meta/lib/oe/sbom.py new file mode 100644 index 0000000000..3372f13a9d --- /dev/null +++ b/meta/lib/oe/sbom.py @@ -0,0 +1,78 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + +import collections + +DepRecipe = collections.namedtuple("DepRecipe", ("doc", "doc_sha1", "recipe")) +DepSource = collections.namedtuple("DepSource", ("doc", "doc_sha1", "recipe", "file")) + + +def get_recipe_spdxid(d): + return "SPDXRef-%s-%s" % ("Recipe", d.getVar("PN")) + + +def get_package_spdxid(pkg): + return "SPDXRef-Package-%s" % pkg + + +def get_source_file_spdxid(d, idx): + return "SPDXRef-SourceFile-%s-%d" % (d.getVar("PN"), idx) + + +def get_packaged_file_spdxid(pkg, idx): + return "SPDXRef-PackagedFile-%s-%d" % (pkg, idx) + + +def get_image_spdxid(img): + return "SPDXRef-Image-%s" % img + + +def get_sdk_spdxid(sdk): + return "SPDXRef-SDK-%s" % sdk + + +def write_doc(d, spdx_doc, subdir, spdx_deploy=None): + from pathlib import Path + + if spdx_deploy is None: + spdx_deploy = Path(d.getVar("SPDXDEPLOY")) + + dest = spdx_deploy / subdir / (spdx_doc.name + ".spdx.json") + dest.parent.mkdir(exist_ok=True, parents=True) + with dest.open("wb") as f: + doc_sha1 = spdx_doc.to_json(f, sort_keys=True) + + l = spdx_deploy / "by-namespace" / spdx_doc.documentNamespace.replace("/", "_") + l.parent.mkdir(exist_ok=True, parents=True) + l.symlink_to(os.path.relpath(dest, l.parent)) + + return doc_sha1 + + +def read_doc(fn): + import hashlib + import oe.spdx + import io + import contextlib + + @contextlib.contextmanager + def get_file(): + if isinstance(fn, io.IOBase): + yield fn + else: + with fn.open("rb") as f: + yield f + + with get_file() as f: + sha1 = hashlib.sha1() + while True: + chunk = f.read(4096) + if not chunk: + break + sha1.update(chunk) + + f.seek(0) + doc = oe.spdx.SPDXDocument.from_json(f) + + return (doc, sha1.hexdigest()) diff --git a/meta/lib/oe/sdk.py b/meta/lib/oe/sdk.py index 878ee1647f..27347667e8 100644 --- a/meta/lib/oe/sdk.py +++ b/meta/lib/oe/sdk.py @@ -1,10 +1,12 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + from abc import ABCMeta, abstractmethod from oe.utils import execute_pre_post_process from oe.manifest import * from oe.package_manager import * import os -import shutil -import glob import traceback class Sdk(object, metaclass=ABCMeta): @@ -84,10 +86,6 @@ class Sdk(object, metaclass=ABCMeta): bb.warn("cannot remove SDK dir: %s" % path) def install_locales(self, pm): - # This is only relevant for glibc - if self.d.getVar("TCLIBC") != "glibc": - return - linguas = self.d.getVar("SDKIMAGE_LINGUAS") if linguas: import fnmatch @@ -110,283 +108,6 @@ class Sdk(object, metaclass=ABCMeta): pass -class RpmSdk(Sdk): - def __init__(self, d, manifest_dir=None, rpm_workdir="oe-sdk-repo"): - super(RpmSdk, self).__init__(d, manifest_dir) - - self.target_manifest = RpmManifest(d, self.manifest_dir, - Manifest.MANIFEST_TYPE_SDK_TARGET) - self.host_manifest = RpmManifest(d, self.manifest_dir, - Manifest.MANIFEST_TYPE_SDK_HOST) - - rpm_repo_workdir = "oe-sdk-repo" - if "sdk_ext" in d.getVar("BB_RUNTASK"): - rpm_repo_workdir = "oe-sdk-ext-repo" - - self.target_pm = RpmPM(d, - self.sdk_target_sysroot, - self.d.getVar('TARGET_VENDOR'), - 'target', - rpm_repo_workdir=rpm_repo_workdir - ) - - self.host_pm = RpmPM(d, - self.sdk_host_sysroot, - self.d.getVar('SDK_VENDOR'), - 'host', - "SDK_PACKAGE_ARCHS", - "SDK_OS", - rpm_repo_workdir=rpm_repo_workdir - ) - - def _populate_sysroot(self, pm, manifest): - pkgs_to_install = manifest.parse_initial_manifest() - - pm.create_configs() - pm.write_index() - pm.update() - - pkgs = [] - pkgs_attempt = [] - for pkg_type in pkgs_to_install: - if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY: - pkgs_attempt += pkgs_to_install[pkg_type] - else: - pkgs += pkgs_to_install[pkg_type] - - pm.install(pkgs) - - pm.install(pkgs_attempt, True) - - def _populate(self): - execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND")) - - bb.note("Installing TARGET packages") - self._populate_sysroot(self.target_pm, self.target_manifest) - - self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY')) - - self.target_pm.run_intercepts(populate_sdk='target') - - execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND")) - - if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d): - self.target_pm.remove_packaging_data() - - bb.note("Installing NATIVESDK packages") - self._populate_sysroot(self.host_pm, self.host_manifest) - self.install_locales(self.host_pm) - - self.host_pm.run_intercepts(populate_sdk='host') - - execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND")) - - if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d): - self.host_pm.remove_packaging_data() - - # Move host RPM library data - native_rpm_state_dir = os.path.join(self.sdk_output, - self.sdk_native_path, - self.d.getVar('localstatedir_nativesdk').strip('/'), - "lib", - "rpm" - ) - self.mkdirhier(native_rpm_state_dir) - for f in glob.glob(os.path.join(self.sdk_output, - "var", - "lib", - "rpm", - "*")): - self.movefile(f, native_rpm_state_dir) - - self.remove(os.path.join(self.sdk_output, "var"), True) - - # Move host sysconfig data - native_sysconf_dir = os.path.join(self.sdk_output, - self.sdk_native_path, - self.d.getVar('sysconfdir', - True).strip('/'), - ) - self.mkdirhier(native_sysconf_dir) - for f in glob.glob(os.path.join(self.sdk_output, "etc", "rpm*")): - self.movefile(f, native_sysconf_dir) - for f in glob.glob(os.path.join(self.sdk_output, "etc", "dnf", "*")): - self.movefile(f, native_sysconf_dir) - self.remove(os.path.join(self.sdk_output, "etc"), True) - - -class OpkgSdk(Sdk): - def __init__(self, d, manifest_dir=None): - super(OpkgSdk, self).__init__(d, manifest_dir) - - self.target_conf = self.d.getVar("IPKGCONF_TARGET") - self.host_conf = self.d.getVar("IPKGCONF_SDK") - - self.target_manifest = OpkgManifest(d, self.manifest_dir, - Manifest.MANIFEST_TYPE_SDK_TARGET) - self.host_manifest = OpkgManifest(d, self.manifest_dir, - Manifest.MANIFEST_TYPE_SDK_HOST) - - ipk_repo_workdir = "oe-sdk-repo" - if "sdk_ext" in d.getVar("BB_RUNTASK"): - ipk_repo_workdir = "oe-sdk-ext-repo" - - self.target_pm = OpkgPM(d, self.sdk_target_sysroot, self.target_conf, - self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS"), - ipk_repo_workdir=ipk_repo_workdir) - - self.host_pm = OpkgPM(d, self.sdk_host_sysroot, self.host_conf, - self.d.getVar("SDK_PACKAGE_ARCHS"), - ipk_repo_workdir=ipk_repo_workdir) - - def _populate_sysroot(self, pm, manifest): - pkgs_to_install = manifest.parse_initial_manifest() - - if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") != "1": - pm.write_index() - - pm.update() - - for pkg_type in self.install_order: - if pkg_type in pkgs_to_install: - pm.install(pkgs_to_install[pkg_type], - [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY]) - - def _populate(self): - execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND")) - - bb.note("Installing TARGET packages") - self._populate_sysroot(self.target_pm, self.target_manifest) - - self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY')) - - self.target_pm.run_intercepts(populate_sdk='target') - - execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND")) - - if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d): - self.target_pm.remove_packaging_data() - - bb.note("Installing NATIVESDK packages") - self._populate_sysroot(self.host_pm, self.host_manifest) - self.install_locales(self.host_pm) - - self.host_pm.run_intercepts(populate_sdk='host') - - execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND")) - - if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d): - self.host_pm.remove_packaging_data() - - target_sysconfdir = os.path.join(self.sdk_target_sysroot, self.sysconfdir) - host_sysconfdir = os.path.join(self.sdk_host_sysroot, self.sysconfdir) - - self.mkdirhier(target_sysconfdir) - shutil.copy(self.target_conf, target_sysconfdir) - os.chmod(os.path.join(target_sysconfdir, - os.path.basename(self.target_conf)), 0o644) - - self.mkdirhier(host_sysconfdir) - shutil.copy(self.host_conf, host_sysconfdir) - os.chmod(os.path.join(host_sysconfdir, - os.path.basename(self.host_conf)), 0o644) - - native_opkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path, - self.d.getVar('localstatedir_nativesdk').strip('/'), - "lib", "opkg") - self.mkdirhier(native_opkg_state_dir) - for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "opkg", "*")): - self.movefile(f, native_opkg_state_dir) - - self.remove(os.path.join(self.sdk_output, "var"), True) - - -class DpkgSdk(Sdk): - def __init__(self, d, manifest_dir=None): - super(DpkgSdk, self).__init__(d, manifest_dir) - - self.target_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET"), "apt") - self.host_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET"), "apt-sdk") - - self.target_manifest = DpkgManifest(d, self.manifest_dir, - Manifest.MANIFEST_TYPE_SDK_TARGET) - self.host_manifest = DpkgManifest(d, self.manifest_dir, - Manifest.MANIFEST_TYPE_SDK_HOST) - - deb_repo_workdir = "oe-sdk-repo" - if "sdk_ext" in d.getVar("BB_RUNTASK"): - deb_repo_workdir = "oe-sdk-ext-repo" - - self.target_pm = DpkgPM(d, self.sdk_target_sysroot, - self.d.getVar("PACKAGE_ARCHS"), - self.d.getVar("DPKG_ARCH"), - self.target_conf_dir, - deb_repo_workdir=deb_repo_workdir) - - self.host_pm = DpkgPM(d, self.sdk_host_sysroot, - self.d.getVar("SDK_PACKAGE_ARCHS"), - self.d.getVar("DEB_SDK_ARCH"), - self.host_conf_dir, - deb_repo_workdir=deb_repo_workdir) - - def _copy_apt_dir_to(self, dst_dir): - staging_etcdir_native = self.d.getVar("STAGING_ETCDIR_NATIVE") - - self.remove(dst_dir, True) - - shutil.copytree(os.path.join(staging_etcdir_native, "apt"), dst_dir) - - def _populate_sysroot(self, pm, manifest): - pkgs_to_install = manifest.parse_initial_manifest() - - pm.write_index() - pm.update() - - for pkg_type in self.install_order: - if pkg_type in pkgs_to_install: - pm.install(pkgs_to_install[pkg_type], - [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY]) - - def _populate(self): - execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND")) - - bb.note("Installing TARGET packages") - self._populate_sysroot(self.target_pm, self.target_manifest) - - self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY')) - - self.target_pm.run_intercepts(populate_sdk='target') - - execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND")) - - self._copy_apt_dir_to(os.path.join(self.sdk_target_sysroot, "etc", "apt")) - - if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d): - self.target_pm.remove_packaging_data() - - bb.note("Installing NATIVESDK packages") - self._populate_sysroot(self.host_pm, self.host_manifest) - self.install_locales(self.host_pm) - - self.host_pm.run_intercepts(populate_sdk='host') - - execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND")) - - self._copy_apt_dir_to(os.path.join(self.sdk_output, self.sdk_native_path, - "etc", "apt")) - - if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d): - self.host_pm.remove_packaging_data() - - native_dpkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path, - "var", "lib", "dpkg") - self.mkdirhier(native_dpkg_state_dir) - for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "dpkg", "*")): - self.movefile(f, native_dpkg_state_dir) - self.remove(os.path.join(self.sdk_output, "var"), True) - - - def sdk_list_installed_packages(d, target, rootfs_dir=None): if rootfs_dir is None: sdk_output = d.getVar('SDK_OUTPUT') @@ -394,27 +115,22 @@ def sdk_list_installed_packages(d, target, rootfs_dir=None): rootfs_dir = [sdk_output, os.path.join(sdk_output, target_path)][target is True] + if target is False: + ipkgconf_sdk_target = d.getVar("IPKGCONF_SDK") + d.setVar("IPKGCONF_TARGET", ipkgconf_sdk_target) + img_type = d.getVar('IMAGE_PKGTYPE') - if img_type == "rpm": - arch_var = ["SDK_PACKAGE_ARCHS", None][target is True] - os_var = ["SDK_OS", None][target is True] - return RpmPkgsList(d, rootfs_dir).list_pkgs() - elif img_type == "ipk": - conf_file_var = ["IPKGCONF_SDK", "IPKGCONF_TARGET"][target is True] - return OpkgPkgsList(d, rootfs_dir, d.getVar(conf_file_var)).list_pkgs() - elif img_type == "deb": - return DpkgPkgsList(d, rootfs_dir).list_pkgs() + import importlib + cls = importlib.import_module('oe.package_manager.' + img_type) + return cls.PMPkgsList(d, rootfs_dir).list_pkgs() def populate_sdk(d, manifest_dir=None): env_bkp = os.environ.copy() img_type = d.getVar('IMAGE_PKGTYPE') - if img_type == "rpm": - RpmSdk(d, manifest_dir).populate() - elif img_type == "ipk": - OpkgSdk(d, manifest_dir).populate() - elif img_type == "deb": - DpkgSdk(d, manifest_dir).populate() + import importlib + cls = importlib.import_module('oe.package_manager.' + img_type + '.sdk') + cls.PkgSdk(d, manifest_dir).populate() os.environ.clear() os.environ.update(env_bkp) diff --git a/meta/lib/oe/spdx.py b/meta/lib/oe/spdx.py new file mode 100644 index 0000000000..14ca706895 --- /dev/null +++ b/meta/lib/oe/spdx.py @@ -0,0 +1,342 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + +# +# This library is intended to capture the JSON SPDX specification in a type +# safe manner. It is not intended to encode any particular OE specific +# behaviors, see the sbom.py for that. +# +# The documented SPDX spec document doesn't cover the JSON syntax for +# particular configuration, which can make it hard to determine what the JSON +# syntax should be. I've found it is actually much simpler to read the official +# SPDX JSON schema which can be found here: https://github.com/spdx/spdx-spec +# in schemas/spdx-schema.json +# + +import hashlib +import itertools +import json + +SPDX_VERSION = "2.2" + + +# +# The following are the support classes that are used to implement SPDX object +# + +class _Property(object): + """ + A generic SPDX object property. The different types will derive from this + class + """ + + def __init__(self, *, default=None): + self.default = default + + def setdefault(self, dest, name): + if self.default is not None: + dest.setdefault(name, self.default) + + +class _String(_Property): + """ + A scalar string property for an SPDX object + """ + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def set_property(self, attrs, name): + def get_helper(obj): + return obj._spdx[name] + + def set_helper(obj, value): + obj._spdx[name] = value + + def del_helper(obj): + del obj._spdx[name] + + attrs[name] = property(get_helper, set_helper, del_helper) + + def init(self, source): + return source + + +class _Object(_Property): + """ + A scalar SPDX object property of a SPDX object + """ + + def __init__(self, cls, **kwargs): + super().__init__(**kwargs) + self.cls = cls + + def set_property(self, attrs, name): + def get_helper(obj): + if not name in obj._spdx: + obj._spdx[name] = self.cls() + return obj._spdx[name] + + def set_helper(obj, value): + obj._spdx[name] = value + + def del_helper(obj): + del obj._spdx[name] + + attrs[name] = property(get_helper, set_helper) + + def init(self, source): + return self.cls(**source) + + +class _ListProperty(_Property): + """ + A list of SPDX properties + """ + + def __init__(self, prop, **kwargs): + super().__init__(**kwargs) + self.prop = prop + + def set_property(self, attrs, name): + def get_helper(obj): + if not name in obj._spdx: + obj._spdx[name] = [] + return obj._spdx[name] + + def set_helper(obj, value): + obj._spdx[name] = list(value) + + def del_helper(obj): + del obj._spdx[name] + + attrs[name] = property(get_helper, set_helper, del_helper) + + def init(self, source): + return [self.prop.init(o) for o in source] + + +class _StringList(_ListProperty): + """ + A list of strings as a property for an SPDX object + """ + + def __init__(self, **kwargs): + super().__init__(_String(), **kwargs) + + +class _ObjectList(_ListProperty): + """ + A list of SPDX objects as a property for an SPDX object + """ + + def __init__(self, cls, **kwargs): + super().__init__(_Object(cls), **kwargs) + + +class MetaSPDXObject(type): + """ + A metaclass that allows properties (anything derived from a _Property + class) to be defined for a SPDX object + """ + def __new__(mcls, name, bases, attrs): + attrs["_properties"] = {} + + for key in attrs.keys(): + if isinstance(attrs[key], _Property): + prop = attrs[key] + attrs["_properties"][key] = prop + prop.set_property(attrs, key) + + return super().__new__(mcls, name, bases, attrs) + + +class SPDXObject(metaclass=MetaSPDXObject): + """ + The base SPDX object; all SPDX spec classes must derive from this class + """ + def __init__(self, **d): + self._spdx = {} + + for name, prop in self._properties.items(): + prop.setdefault(self._spdx, name) + if name in d: + self._spdx[name] = prop.init(d[name]) + + def serializer(self): + return self._spdx + + def __setattr__(self, name, value): + if name in self._properties or name == "_spdx": + super().__setattr__(name, value) + return + raise KeyError("%r is not a valid SPDX property" % name) + +# +# These are the SPDX objects implemented from the spec. The *only* properties +# that can be added to these objects are ones directly specified in the SPDX +# spec, however you may add helper functions to make operations easier. +# +# Defaults should *only* be specified if the SPDX spec says there is a certain +# required value for a field (e.g. dataLicense), or if the field is mandatory +# and has some sane "this field is unknown" (e.g. "NOASSERTION") +# + +class SPDXAnnotation(SPDXObject): + annotationDate = _String() + annotationType = _String() + annotator = _String() + comment = _String() + +class SPDXChecksum(SPDXObject): + algorithm = _String() + checksumValue = _String() + + +class SPDXRelationship(SPDXObject): + spdxElementId = _String() + relatedSpdxElement = _String() + relationshipType = _String() + comment = _String() + annotations = _ObjectList(SPDXAnnotation) + + +class SPDXExternalReference(SPDXObject): + referenceCategory = _String() + referenceType = _String() + referenceLocator = _String() + + +class SPDXPackageVerificationCode(SPDXObject): + packageVerificationCodeValue = _String() + packageVerificationCodeExcludedFiles = _StringList() + + +class SPDXPackage(SPDXObject): + name = _String() + SPDXID = _String() + versionInfo = _String() + downloadLocation = _String(default="NOASSERTION") + packageSupplier = _String(default="NOASSERTION") + homepage = _String() + licenseConcluded = _String(default="NOASSERTION") + licenseDeclared = _String(default="NOASSERTION") + summary = _String() + description = _String() + sourceInfo = _String() + copyrightText = _String(default="NOASSERTION") + licenseInfoFromFiles = _StringList(default=["NOASSERTION"]) + externalRefs = _ObjectList(SPDXExternalReference) + packageVerificationCode = _Object(SPDXPackageVerificationCode) + hasFiles = _StringList() + packageFileName = _String() + annotations = _ObjectList(SPDXAnnotation) + + +class SPDXFile(SPDXObject): + SPDXID = _String() + fileName = _String() + licenseConcluded = _String(default="NOASSERTION") + copyrightText = _String(default="NOASSERTION") + licenseInfoInFiles = _StringList(default=["NOASSERTION"]) + checksums = _ObjectList(SPDXChecksum) + fileTypes = _StringList() + + +class SPDXCreationInfo(SPDXObject): + created = _String() + licenseListVersion = _String() + comment = _String() + creators = _StringList() + + +class SPDXExternalDocumentRef(SPDXObject): + externalDocumentId = _String() + spdxDocument = _String() + checksum = _Object(SPDXChecksum) + + +class SPDXExtractedLicensingInfo(SPDXObject): + name = _String() + comment = _String() + licenseId = _String() + extractedText = _String() + + +class SPDXDocument(SPDXObject): + spdxVersion = _String(default="SPDX-" + SPDX_VERSION) + dataLicense = _String(default="CC0-1.0") + SPDXID = _String(default="SPDXRef-DOCUMENT") + name = _String() + documentNamespace = _String() + creationInfo = _Object(SPDXCreationInfo) + packages = _ObjectList(SPDXPackage) + files = _ObjectList(SPDXFile) + relationships = _ObjectList(SPDXRelationship) + externalDocumentRefs = _ObjectList(SPDXExternalDocumentRef) + hasExtractedLicensingInfos = _ObjectList(SPDXExtractedLicensingInfo) + + def __init__(self, **d): + super().__init__(**d) + + def to_json(self, f, *, sort_keys=False, indent=None, separators=None): + class Encoder(json.JSONEncoder): + def default(self, o): + if isinstance(o, SPDXObject): + return o.serializer() + + return super().default(o) + + sha1 = hashlib.sha1() + for chunk in Encoder( + sort_keys=sort_keys, + indent=indent, + separators=separators, + ).iterencode(self): + chunk = chunk.encode("utf-8") + f.write(chunk) + sha1.update(chunk) + + return sha1.hexdigest() + + @classmethod + def from_json(cls, f): + return cls(**json.load(f)) + + def add_relationship(self, _from, relationship, _to, *, comment=None, annotation=None): + if isinstance(_from, SPDXObject): + from_spdxid = _from.SPDXID + else: + from_spdxid = _from + + if isinstance(_to, SPDXObject): + to_spdxid = _to.SPDXID + else: + to_spdxid = _to + + r = SPDXRelationship( + spdxElementId=from_spdxid, + relatedSpdxElement=to_spdxid, + relationshipType=relationship, + ) + + if comment is not None: + r.comment = comment + + if annotation is not None: + r.annotations.append(annotation) + + self.relationships.append(r) + + def find_by_spdxid(self, spdxid): + for o in itertools.chain(self.packages, self.files): + if o.SPDXID == spdxid: + return o + return None + + def find_external_document_ref(self, namespace): + for r in self.externalDocumentRefs: + if r.spdxDocument == namespace: + return r + return None diff --git a/meta/lib/oe/sstatesig.py b/meta/lib/oe/sstatesig.py index a83af519ec..2cf858e201 100644 --- a/meta/lib/oe/sstatesig.py +++ b/meta/lib/oe/sstatesig.py @@ -1,7 +1,11 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# import bb.siggen +import bb.runqueue import oe -def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCache): +def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCaches): # Return True if we should keep the dependency, False to drop it def isNative(x): return x.endswith("-native") @@ -9,23 +13,26 @@ def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCache): return "-cross-" in x def isNativeSDK(x): return x.startswith("nativesdk-") - def isKernel(fn): - inherits = " ".join(dataCache.inherits[fn]) + def isKernel(mc, fn): + inherits = " ".join(dataCaches[mc].inherits[fn]) return inherits.find("/module-base.bbclass") != -1 or inherits.find("/linux-kernel-base.bbclass") != -1 - def isPackageGroup(fn): - inherits = " ".join(dataCache.inherits[fn]) + def isPackageGroup(mc, fn): + inherits = " ".join(dataCaches[mc].inherits[fn]) return "/packagegroup.bbclass" in inherits - def isAllArch(fn): - inherits = " ".join(dataCache.inherits[fn]) + def isAllArch(mc, fn): + inherits = " ".join(dataCaches[mc].inherits[fn]) return "/allarch.bbclass" in inherits - def isImage(fn): - return "/image.bbclass" in " ".join(dataCache.inherits[fn]) - - # (Almost) always include our own inter-task dependencies. - # The exception is the special do_kernel_configme->do_unpack_and_patch - # dependency from archiver.bbclass. - if recipename == depname: - if task == "do_kernel_configme" and dep.endswith(".do_unpack_and_patch"): + def isImage(mc, fn): + return "/image.bbclass" in " ".join(dataCaches[mc].inherits[fn]) + + depmc, _, deptaskname, depmcfn = bb.runqueue.split_tid_mcfn(dep) + mc, _ = bb.runqueue.split_mc(fn) + + # (Almost) always include our own inter-task dependencies (unless it comes + # from a mcdepends). The exception is the special + # do_kernel_configme->do_unpack_and_patch dependency from archiver.bbclass. + if recipename == depname and depmc == mc: + if task == "do_kernel_configme" and deptaskname == "do_unpack_and_patch": return False return True @@ -44,22 +51,21 @@ def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCache): # Only target packages beyond here # allarch packagegroups are assumed to have well behaved names which don't change between architecures/tunes - if isPackageGroup(fn) and isAllArch(fn) and not isNative(depname): + if isPackageGroup(mc, fn) and isAllArch(mc, fn) and not isNative(depname): return False # Exclude well defined machine specific configurations which don't change ABI - if depname in siggen.abisaferecipes and not isImage(fn): + if depname in siggen.abisaferecipes and not isImage(mc, fn): return False # Kernel modules are well namespaced. We don't want to depend on the kernel's checksum - # if we're just doing an RRECOMMENDS_xxx = "kernel-module-*", not least because the checksum + # if we're just doing an RRECOMMENDS:xxx = "kernel-module-*", not least because the checksum # is machine specific. # Therefore if we're not a kernel or a module recipe (inheriting the kernel classes) # and we reccomend a kernel-module, we exclude the dependency. - depfn = dep.rsplit(".", 1)[0] - if dataCache and isKernel(depfn) and not isKernel(fn): - for pkg in dataCache.runrecs[fn]: - if " ".join(dataCache.runrecs[fn][pkg]).find("kernel-module-") != -1: + if dataCaches and isKernel(depmc, depmcfn) and not isKernel(mc, fn): + for pkg in dataCaches[mc].runrecs[fn]: + if " ".join(dataCaches[mc].runrecs[fn][pkg]).find("kernel-module-") != -1: return False # Default to keep dependencies @@ -84,11 +90,12 @@ class SignatureGeneratorOEBasic(bb.siggen.SignatureGeneratorBasic): self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE") or "").split() self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS") or "").split() pass - def rundep_check(self, fn, recipename, task, dep, depname, dataCache = None): - return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache) + def rundep_check(self, fn, recipename, task, dep, depname, dataCaches = None): + return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCaches) + +class SignatureGeneratorOEBasicHashMixIn(object): + supports_multiconfig_datacaches = True -class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash): - name = "OEBasicHash" def init_rundepcheck(self, data): self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE") or "").split() self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS") or "").split() @@ -101,6 +108,7 @@ class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash): self.unlockedrecipes = (data.getVar("SIGGEN_UNLOCKED_RECIPES") or "").split() self.unlockedrecipes = { k: "" for k in self.unlockedrecipes } + self._internal = False pass def tasks_resolved(self, virtmap, virtpnmap, dataCache): @@ -122,16 +130,15 @@ class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash): newsafedeps.append(a1 + "->" + a2) self.saferecipedeps = newsafedeps - def rundep_check(self, fn, recipename, task, dep, depname, dataCache = None): - return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache) + def rundep_check(self, fn, recipename, task, dep, depname, dataCaches = None): + return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCaches) def get_taskdata(self): - data = super(bb.siggen.SignatureGeneratorBasicHash, self).get_taskdata() - return (data, self.lockedpnmap, self.lockedhashfn) + return (self.lockedpnmap, self.lockedhashfn, self.lockedhashes) + super().get_taskdata() def set_taskdata(self, data): - coredata, self.lockedpnmap, self.lockedhashfn = data - super(bb.siggen.SignatureGeneratorBasicHash, self).set_taskdata(coredata) + self.lockedpnmap, self.lockedhashfn, self.lockedhashes = data[:3] + super().set_taskdata(data[3:]) def dump_sigs(self, dataCache, options): sigfile = os.getcwd() + "/locked-sigs.inc" @@ -139,112 +146,135 @@ class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash): self.dump_lockedsigs(sigfile) return super(bb.siggen.SignatureGeneratorBasicHash, self).dump_sigs(dataCache, options) - def get_taskhash(self, fn, task, deps, dataCache): - h = super(bb.siggen.SignatureGeneratorBasicHash, self).get_taskhash(fn, task, deps, dataCache) - recipename = dataCache.pkg_fn[fn] + def get_taskhash(self, tid, deps, dataCaches): + if tid in self.lockedhashes: + if self.lockedhashes[tid]: + return self.lockedhashes[tid] + else: + return super().get_taskhash(tid, deps, dataCaches) + + h = super().get_taskhash(tid, deps, dataCaches) + + (mc, _, task, fn) = bb.runqueue.split_tid_mcfn(tid) + + recipename = dataCaches[mc].pkg_fn[fn] self.lockedpnmap[fn] = recipename - self.lockedhashfn[fn] = dataCache.hashfn[fn] + self.lockedhashfn[fn] = dataCaches[mc].hashfn[fn] unlocked = False if recipename in self.unlockedrecipes: unlocked = True else: - def get_mc(tid): - tid = tid.rsplit('.', 1)[0] - if tid.startswith('multiconfig:'): - elems = tid.split(':') - return elems[1] def recipename_from_dep(dep): - # The dep entry will look something like - # /path/path/recipename.bb.task, virtual:native:/p/foo.bb.task, - # ... - - fn = dep.rsplit('.', 1)[0] - return dataCache.pkg_fn[fn] + (depmc, _, _, depfn) = bb.runqueue.split_tid_mcfn(dep) + return dataCaches[depmc].pkg_fn[depfn] - mc = get_mc(fn) # If any unlocked recipe is in the direct dependencies then the # current recipe should be unlocked as well. - depnames = [ recipename_from_dep(x) for x in deps if mc == get_mc(x)] + depnames = [ recipename_from_dep(x) for x in deps if mc == bb.runqueue.mc_from_tid(x)] if any(x in y for y in depnames for x in self.unlockedrecipes): self.unlockedrecipes[recipename] = '' unlocked = True if not unlocked and recipename in self.lockedsigs: if task in self.lockedsigs[recipename]: - k = fn + "." + task h_locked = self.lockedsigs[recipename][task][0] var = self.lockedsigs[recipename][task][1] - self.lockedhashes[k] = h_locked - self.taskhash[k] = h_locked + self.lockedhashes[tid] = h_locked + self._internal = True + unihash = self.get_unihash(tid) + self._internal = False #bb.warn("Using %s %s %s" % (recipename, task, h)) - if h != h_locked: + if h != h_locked and h_locked != unihash: self.mismatch_msgs.append('The %s:%s sig is computed to be %s, but the sig is locked to %s in %s' % (recipename, task, h, h_locked, var)) return h_locked + + self.lockedhashes[tid] = False #bb.warn("%s %s %s" % (recipename, task, h)) return h + def get_stampfile_hash(self, tid): + if tid in self.lockedhashes and self.lockedhashes[tid]: + return self.lockedhashes[tid] + return super().get_stampfile_hash(tid) + + def get_unihash(self, tid): + if tid in self.lockedhashes and self.lockedhashes[tid] and not self._internal: + return self.lockedhashes[tid] + return super().get_unihash(tid) + def dump_sigtask(self, fn, task, stampbase, runtime): - k = fn + "." + task - if k in self.lockedhashes: + tid = fn + ":" + task + if tid in self.lockedhashes and self.lockedhashes[tid]: return super(bb.siggen.SignatureGeneratorBasicHash, self).dump_sigtask(fn, task, stampbase, runtime) def dump_lockedsigs(self, sigfile, taskfilter=None): types = {} - for k in self.runtaskdeps: + for tid in self.runtaskdeps: if taskfilter: - if not k in taskfilter: + if not tid in taskfilter: continue - fn = k.rsplit(".",1)[0] + fn = bb.runqueue.fn_from_tid(tid) t = self.lockedhashfn[fn].split(" ")[1].split(":")[5] t = 't-' + t.replace('_', '-') if t not in types: types[t] = [] - types[t].append(k) + types[t].append(tid) with open(sigfile, "w") as f: l = sorted(types) for t in l: f.write('SIGGEN_LOCKEDSIGS_%s = "\\\n' % t) types[t].sort() - sortedk = sorted(types[t], key=lambda k: self.lockedpnmap[k.rsplit(".",1)[0]]) - for k in sortedk: - fn = k.rsplit(".",1)[0] - task = k.rsplit(".",1)[1] - if k not in self.taskhash: + sortedtid = sorted(types[t], key=lambda tid: self.lockedpnmap[bb.runqueue.fn_from_tid(tid)]) + for tid in sortedtid: + (_, _, task, fn) = bb.runqueue.split_tid_mcfn(tid) + if tid not in self.taskhash: continue - f.write(" " + self.lockedpnmap[fn] + ":" + task + ":" + self.taskhash[k] + " \\\n") + f.write(" " + self.lockedpnmap[fn] + ":" + task + ":" + self.get_unihash(tid) + " \\\n") f.write(' "\n') - f.write('SIGGEN_LOCKEDSIGS_TYPES_%s = "%s"' % (self.machine, " ".join(l))) + f.write('SIGGEN_LOCKEDSIGS_TYPES:%s = "%s"' % (self.machine, " ".join(l))) + + def dump_siglist(self, sigfile, path_prefix_strip=None): + def strip_fn(fn): + nonlocal path_prefix_strip + if not path_prefix_strip: + return fn + + fn_exp = fn.split(":") + if fn_exp[-1].startswith(path_prefix_strip): + fn_exp[-1] = fn_exp[-1][len(path_prefix_strip):] + + return ":".join(fn_exp) - def dump_siglist(self, sigfile): with open(sigfile, "w") as f: tasks = [] for taskitem in self.taskhash: - (fn, task) = taskitem.rsplit(".", 1) + (fn, task) = taskitem.rsplit(":", 1) pn = self.lockedpnmap[fn] - tasks.append((pn, task, fn, self.taskhash[taskitem])) + tasks.append((pn, task, strip_fn(fn), self.taskhash[taskitem])) for (pn, task, fn, taskhash) in sorted(tasks): - f.write('%s.%s %s %s\n' % (pn, task, fn, taskhash)) + f.write('%s:%s %s %s\n' % (pn, task, fn, taskhash)) - def checkhashes(self, missed, ret, sq_fn, sq_task, sq_hash, sq_hashfn, d): + def checkhashes(self, sq_data, missed, found, d): warn_msgs = [] error_msgs = [] sstate_missing_msgs = [] - for task in range(len(sq_fn)): - if task not in ret: + for tid in sq_data['hash']: + if tid not in found: for pn in self.lockedsigs: - if sq_hash[task] in iter(self.lockedsigs[pn].values()): - if sq_task[task] == 'do_shared_workdir': + taskname = bb.runqueue.taskname_from_tid(tid) + if sq_data['hash'][tid] in iter(self.lockedsigs[pn].values()): + if taskname == 'do_shared_workdir': continue sstate_missing_msgs.append("Locked sig is set for %s:%s (%s) yet not in sstate cache?" - % (pn, sq_task[task], sq_hash[task])) + % (pn, taskname, sq_data['hash'][tid])) checklevel = d.getVar("SIGGEN_LOCKEDSIGS_TASKSIG_CHECK") if checklevel == 'warn': @@ -263,176 +293,20 @@ class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash): if error_msgs: bb.fatal("\n".join(error_msgs)) -class SignatureGeneratorOEEquivHash(SignatureGeneratorOEBasicHash): +class SignatureGeneratorOEBasicHash(SignatureGeneratorOEBasicHashMixIn, bb.siggen.SignatureGeneratorBasicHash): + name = "OEBasicHash" + +class SignatureGeneratorOEEquivHash(SignatureGeneratorOEBasicHashMixIn, bb.siggen.SignatureGeneratorUniHashMixIn, bb.siggen.SignatureGeneratorBasicHash): name = "OEEquivHash" def init_rundepcheck(self, data): super().init_rundepcheck(data) - self.server = data.getVar('SSTATE_HASHEQUIV_SERVER') + self.server = data.getVar('BB_HASHSERVE') + if not self.server: + bb.fatal("OEEquivHash requires BB_HASHSERVE to be set") self.method = data.getVar('SSTATE_HASHEQUIV_METHOD') - self.unihashes = bb.persist_data.persist('SSTATESIG_UNIHASH_CACHE_v1_' + self.method.replace('.', '_'), data) - - def get_taskdata(self): - return (self.server, self.method) + super().get_taskdata() - - def set_taskdata(self, data): - self.server, self.method = data[:2] - super().set_taskdata(data[2:]) - - def __get_task_unihash_key(self, task): - # TODO: The key only *needs* to be the taskhash, the task is just - # convenient - return '%s:%s' % (task, self.taskhash[task]) - - def get_stampfile_hash(self, task): - if task in self.taskhash: - # If a unique hash is reported, use it as the stampfile hash. This - # ensures that if a task won't be re-run if the taskhash changes, - # but it would result in the same output hash - unihash = self.unihashes.get(self.__get_task_unihash_key(task)) - if unihash is not None: - return unihash - - return super().get_stampfile_hash(task) - - def get_unihash(self, task): - import urllib - import json - - taskhash = self.taskhash[task] - - key = self.__get_task_unihash_key(task) - - # TODO: This cache can grow unbounded. It probably only needs to keep - # for each task - unihash = self.unihashes.get(key) - if unihash is not None: - return unihash - - # In the absence of being able to discover a unique hash from the - # server, make it be equivalent to the taskhash. The unique "hash" only - # really needs to be a unique string (not even necessarily a hash), but - # making it match the taskhash has a few advantages: - # - # 1) All of the sstate code that assumes hashes can be the same - # 2) It provides maximal compatibility with builders that don't use - # an equivalency server - # 3) The value is easy for multiple independent builders to derive the - # same unique hash from the same input. This means that if the - # independent builders find the same taskhash, but it isn't reported - # to the server, there is a better chance that they will agree on - # the unique hash. - unihash = taskhash - - try: - url = '%s/v1/equivalent?%s' % (self.server, - urllib.parse.urlencode({'method': self.method, 'taskhash': self.taskhash[task]})) - - request = urllib.request.Request(url) - response = urllib.request.urlopen(request) - data = response.read().decode('utf-8') - - json_data = json.loads(data) - - if json_data: - unihash = json_data['unihash'] - # A unique hash equal to the taskhash is not very interesting, - # so it is reported it at debug level 2. If they differ, that - # is much more interesting, so it is reported at debug level 1 - bb.debug((1, 2)[unihash == taskhash], 'Found unihash %s in place of %s for %s from %s' % (unihash, taskhash, task, self.server)) - else: - bb.debug(2, 'No reported unihash for %s:%s from %s' % (task, taskhash, self.server)) - except urllib.error.URLError as e: - bb.warn('Failure contacting Hash Equivalence Server %s: %s' % (self.server, str(e))) - except (KeyError, json.JSONDecodeError) as e: - bb.warn('Poorly formatted response from %s: %s' % (self.server, str(e))) - - self.unihashes[key] = unihash - return unihash - - def report_unihash(self, path, task, d): - import urllib - import json - import tempfile - import base64 - import importlib - - taskhash = d.getVar('BB_TASKHASH') - unihash = d.getVar('BB_UNIHASH') - report_taskdata = d.getVar('SSTATE_HASHEQUIV_REPORT_TASKDATA') == '1' - tempdir = d.getVar('T') - fn = d.getVar('BB_FILENAME') - key = fn + '.do_' + task + ':' + taskhash - - # Sanity checks - cache_unihash = self.unihashes.get(key) - if cache_unihash is None: - bb.fatal('%s not in unihash cache. Please report this error' % key) - - if cache_unihash != unihash: - bb.fatal("Cache unihash %s doesn't match BB_UNIHASH %s" % (cache_unihash, unihash)) - - sigfile = None - sigfile_name = "depsig.do_%s.%d" % (task, os.getpid()) - sigfile_link = "depsig.do_%s" % task - - try: - sigfile = open(os.path.join(tempdir, sigfile_name), 'w+b') - - locs = {'path': path, 'sigfile': sigfile, 'task': task, 'd': d} - - (module, method) = self.method.rsplit('.', 1) - locs['method'] = getattr(importlib.import_module(module), method) - - outhash = bb.utils.better_eval('method(path, sigfile, task, d)', locs) - - try: - url = '%s/v1/equivalent' % self.server - task_data = { - 'taskhash': taskhash, - 'method': self.method, - 'outhash': outhash, - 'unihash': unihash, - 'owner': d.getVar('SSTATE_HASHEQUIV_OWNER') - } - - if report_taskdata: - sigfile.seek(0) - - task_data['PN'] = d.getVar('PN') - task_data['PV'] = d.getVar('PV') - task_data['PR'] = d.getVar('PR') - task_data['task'] = task - task_data['outhash_siginfo'] = sigfile.read().decode('utf-8') - - headers = {'content-type': 'application/json'} - - request = urllib.request.Request(url, json.dumps(task_data).encode('utf-8'), headers) - response = urllib.request.urlopen(request) - data = response.read().decode('utf-8') - - json_data = json.loads(data) - new_unihash = json_data['unihash'] - - if new_unihash != unihash: - bb.debug(1, 'Task %s unihash changed %s -> %s by server %s' % (taskhash, unihash, new_unihash, self.server)) - else: - bb.debug(1, 'Reported task %s as unihash %s to %s' % (taskhash, unihash, self.server)) - except urllib.error.URLError as e: - bb.warn('Failure contacting Hash Equivalence Server %s: %s' % (self.server, str(e))) - except (KeyError, json.JSONDecodeError) as e: - bb.warn('Poorly formatted response from %s: %s' % (self.server, str(e))) - finally: - if sigfile: - sigfile.close() - - sigfile_link_path = os.path.join(tempdir, sigfile_link) - bb.utils.remove(sigfile_link_path) - - try: - os.symlink(sigfile_name, sigfile_link_path) - except OSError: - pass + if not self.method: + bb.fatal("OEEquivHash requires SSTATE_HASHEQUIV_METHOD to be set") # Insert these classes into siggen's namespace so it can see and select them bb.siggen.SignatureGeneratorOEBasic = SignatureGeneratorOEBasic @@ -449,7 +323,7 @@ def find_siginfo(pn, taskname, taskhashlist, d): if not taskname: # We have to derive pn and taskname key = pn - splitit = key.split('.bb.') + splitit = key.split('.bb:') taskname = splitit[1] pn = os.path.basename(splitit[0]).split('_')[0] if key.startswith('virtual:native:'): @@ -563,13 +437,13 @@ def find_sstate_manifest(taskdata, taskdata2, taskname, d, multilibcache): d2 = multilibcache[variant] if taskdata.endswith("-native"): - pkgarchs = ["${BUILD_ARCH}"] + pkgarchs = ["${BUILD_ARCH}", "${BUILD_ARCH}_${ORIGNATIVELSBSTRING}"] elif taskdata.startswith("nativesdk-"): pkgarchs = ["${SDK_ARCH}_${SDK_OS}", "allarch"] elif "-cross-canadian" in taskdata: pkgarchs = ["${SDK_ARCH}_${SDK_ARCH}-${SDKPKGSUFFIX}"] elif "-cross-" in taskdata: - pkgarchs = ["${BUILD_ARCH}_${TARGET_ARCH}"] + pkgarchs = ["${BUILD_ARCH}"] elif "-crosssdk" in taskdata: pkgarchs = ["${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"] else: @@ -582,7 +456,7 @@ def find_sstate_manifest(taskdata, taskdata2, taskname, d, multilibcache): manifest = d2.expand("${SSTATE_MANIFESTS}/manifest-%s-%s.%s" % (pkgarch, taskdata, taskname)) if os.path.exists(manifest): return manifest, d2 - bb.warn("Manifest %s not found in %s (variant '%s')?" % (manifest, d2.expand(" ".join(pkgarchs)), variant)) + bb.fatal("Manifest %s not found in %s (variant '%s')?" % (manifest, d2.expand(" ".join(pkgarchs)), variant)) return None, d2 def OEOuthashBasic(path, sigfile, task, d): @@ -596,6 +470,8 @@ def OEOuthashBasic(path, sigfile, task, d): import stat import pwd import grp + import re + import fnmatch def update_hash(s): s = s.encode('utf-8') @@ -605,12 +481,37 @@ def OEOuthashBasic(path, sigfile, task, d): h = hashlib.sha256() prev_dir = os.getcwd() + corebase = d.getVar("COREBASE") + tmpdir = d.getVar("TMPDIR") include_owners = os.environ.get('PSEUDO_DISABLED') == '0' + if "package_write_" in task or task == "package_qa": + include_owners = False + include_timestamps = False + include_root = True + if task == "package": + include_timestamps = True + include_root = False + hash_version = d.getVar('HASHEQUIV_HASH_VERSION') + extra_sigdata = d.getVar("HASHEQUIV_EXTRA_SIGDATA") + + filemaps = {} + for m in (d.getVar('SSTATE_HASHEQUIV_FILEMAP') or '').split(): + entry = m.split(":") + if len(entry) != 3 or entry[0] != task: + continue + filemaps.setdefault(entry[1], []) + filemaps[entry[1]].append(entry[2]) try: os.chdir(path) + basepath = os.path.normpath(path) update_hash("OEOuthashBasic\n") + if hash_version: + update_hash(hash_version + "\n") + + if extra_sigdata: + update_hash(extra_sigdata + "\n") # It is only currently useful to get equivalent hashes for things that # can be restored from sstate. Since the sstate object is named using @@ -655,23 +556,33 @@ def OEOuthashBasic(path, sigfile, task, d): else: add_perm(stat.S_IXUSR, 'x') - add_perm(stat.S_IRGRP, 'r') - add_perm(stat.S_IWGRP, 'w') - if stat.S_ISGID & s.st_mode: - add_perm(stat.S_IXGRP, 's', 'S') - else: - add_perm(stat.S_IXGRP, 'x') + if include_owners: + # Group/other permissions are only relevant in pseudo context + add_perm(stat.S_IRGRP, 'r') + add_perm(stat.S_IWGRP, 'w') + if stat.S_ISGID & s.st_mode: + add_perm(stat.S_IXGRP, 's', 'S') + else: + add_perm(stat.S_IXGRP, 'x') - add_perm(stat.S_IROTH, 'r') - add_perm(stat.S_IWOTH, 'w') - if stat.S_ISVTX & s.st_mode: - update_hash('t') - else: - add_perm(stat.S_IXOTH, 'x') + add_perm(stat.S_IROTH, 'r') + add_perm(stat.S_IWOTH, 'w') + if stat.S_ISVTX & s.st_mode: + update_hash('t') + else: + add_perm(stat.S_IXOTH, 'x') - if include_owners: - update_hash(" %10s" % pwd.getpwuid(s.st_uid).pw_name) - update_hash(" %10s" % grp.getgrgid(s.st_gid).gr_name) + try: + update_hash(" %10s" % pwd.getpwuid(s.st_uid).pw_name) + update_hash(" %10s" % grp.getgrgid(s.st_gid).gr_name) + except KeyError as e: + bb.warn("KeyError in %s" % path) + msg = ("KeyError: %s\nPath %s is owned by uid %d, gid %d, which doesn't match " + "any user/group on target. This may be due to host contamination." % (e, path, s.st_uid, s.st_gid)) + raise Exception(msg).with_traceback(e.__traceback__) + + if include_timestamps: + update_hash(" %10d" % s.st_mtime) update_hash(" ") if stat.S_ISBLK(s.st_mode) or stat.S_ISCHR(s.st_mode): @@ -679,8 +590,13 @@ def OEOuthashBasic(path, sigfile, task, d): else: update_hash(" " * 9) + filterfile = False + for entry in filemaps: + if fnmatch.fnmatch(path, entry): + filterfile = True + update_hash(" ") - if stat.S_ISREG(s.st_mode): + if stat.S_ISREG(s.st_mode) and not filterfile: update_hash("%10d" % s.st_size) else: update_hash(" " * 10) @@ -689,9 +605,24 @@ def OEOuthashBasic(path, sigfile, task, d): fh = hashlib.sha256() if stat.S_ISREG(s.st_mode): # Hash file contents - with open(path, 'rb') as d: - for chunk in iter(lambda: d.read(4096), b""): + if filterfile: + # Need to ignore paths in crossscripts and postinst-useradd files. + with open(path, 'rb') as d: + chunk = d.read() + chunk = chunk.replace(bytes(basepath, encoding='utf8'), b'') + for entry in filemaps: + if not fnmatch.fnmatch(path, entry): + continue + for r in filemaps[entry]: + if r.startswith("regex-"): + chunk = re.sub(bytes(r[6:], encoding='utf8'), b'', chunk) + else: + chunk = chunk.replace(bytes(r, encoding='utf8'), b'') fh.update(chunk) + else: + with open(path, 'rb') as d: + for chunk in iter(lambda: d.read(4096), b""): + fh.update(chunk) update_hash(fh.hexdigest()) else: update_hash(" " * len(fh.hexdigest())) @@ -704,7 +635,8 @@ def OEOuthashBasic(path, sigfile, task, d): update_hash("\n") # Process this directory and all its child files - process(root) + if include_root or root != ".": + process(root) for f in files: if f == 'fixmepath': continue diff --git a/meta/lib/oe/terminal.py b/meta/lib/oe/terminal.py index e4045559fc..53186c4a3e 100644 --- a/meta/lib/oe/terminal.py +++ b/meta/lib/oe/terminal.py @@ -1,8 +1,10 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# import logging import oe.classutils import shlex from bb.process import Popen, ExecutionError -from distutils.version import LooseVersion logger = logging.getLogger('BitBake.OE.Terminal') @@ -52,7 +54,7 @@ class XTerminal(Terminal): raise UnsupportedTerminal(self.name) class Gnome(XTerminal): - command = 'gnome-terminal -t "{title}" -x {command}' + command = 'gnome-terminal -t "{title}" -- {command}' priority = 2 def __init__(self, sh_cmd, title=None, env=None, d=None): @@ -83,10 +85,10 @@ class Konsole(XTerminal): def __init__(self, sh_cmd, title=None, env=None, d=None): # Check version vernum = check_terminal_version("konsole") - if vernum and LooseVersion(vernum) < '2.0.0': + if vernum and bb.utils.vercmp_string_op(vernum, "2.0.0", "<"): # Konsole from KDE 3.x self.command = 'konsole -T "{title}" -e {command}' - elif vernum and LooseVersion(vernum) < '16.08.1': + elif vernum and bb.utils.vercmp_string_op(vernum, "16.08.1", "<"): # Konsole pre 16.08.01 Has nofork self.command = 'konsole --nofork --workdir . -p tabtitle="{title}" -e {command}' XTerminal.__init__(self, sh_cmd, title, env, d) @@ -160,7 +162,12 @@ class Tmux(Terminal): # devshells, if it's already there, add a new window to it. window_name = 'devshell-%i' % os.getpid() - self.command = 'tmux new -c "{{cwd}}" -d -s {0} -n {0} "{{command}}"'.format(window_name) + self.command = 'tmux new -c "{{cwd}}" -d -s {0} -n {0} "{{command}}"' + if not check_tmux_version('1.9'): + # `tmux new-session -c` was added in 1.9; + # older versions fail with that flag + self.command = 'tmux new -d -s {0} -n {0} "{{command}}"' + self.command = self.command.format(window_name) Terminal.__init__(self, sh_cmd, title, env, d) attach_cmd = 'tmux att -t {0}'.format(window_name) @@ -182,7 +189,7 @@ class Custom(Terminal): Terminal.__init__(self, sh_cmd, title, env, d) logger.warning('Custom terminal was started.') else: - logger.debug(1, 'No custom terminal (OE_TERMINAL_CUSTOMCMD) set') + logger.debug('No custom terminal (OE_TERMINAL_CUSTOMCMD) set') raise UnsupportedTerminal('OE_TERMINAL_CUSTOMCMD not set') @@ -204,13 +211,16 @@ def spawn_preferred(sh_cmd, title=None, env=None, d=None): spawn(terminal.name, sh_cmd, title, env, d) break except UnsupportedTerminal: - continue + pass + except: + bb.warn("Terminal %s is supported but did not start" % (terminal.name)) + # when we've run out of options else: raise NoSupportedTerminals(get_cmd_list()) def spawn(name, sh_cmd, title=None, env=None, d=None): """Spawn the specified terminal, by name""" - logger.debug(1, 'Attempting to spawn terminal "%s"', name) + logger.debug('Attempting to spawn terminal "%s"', name) try: terminal = Registry.registry[name] except KeyError: @@ -247,13 +257,18 @@ def spawn(name, sh_cmd, title=None, env=None, d=None): except OSError: return +def check_tmux_version(desired): + vernum = check_terminal_version("tmux") + if vernum and bb.utils.vercmp_string_op(vernum, desired, "<"): + return False + return vernum + def check_tmux_pane_size(tmux): import subprocess as sub # On older tmux versions (<1.9), return false. The reason # is that there is no easy way to get the height of the active panel # on current window without nested formats (available from version 1.9) - vernum = check_terminal_version("tmux") - if vernum and LooseVersion(vernum) < '1.9': + if not check_tmux_version('1.9'): return False try: p = sub.Popen('%s list-panes -F "#{?pane_active,#{pane_height},}"' % tmux, diff --git a/meta/lib/oe/types.py b/meta/lib/oe/types.py index 1eebba5a38..bbbabafbf6 100644 --- a/meta/lib/oe/types.py +++ b/meta/lib/oe/types.py @@ -1,3 +1,7 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + import errno import re import os @@ -150,7 +154,8 @@ def path(value, relativeto='', normalize='true', mustexist='false'): if boolean(mustexist): try: - open(value, 'r') + with open(value, 'r'): + pass except IOError as exc: if exc.errno == errno.ENOENT: raise ValueError("{0}: {1}".format(value, os.strerror(errno.ENOENT))) @@ -179,4 +184,3 @@ def qemu_use_kvm(kvm, target_arch): elif build_arch == target_arch: use_kvm = True return use_kvm - diff --git a/meta/lib/oe/useradd.py b/meta/lib/oe/useradd.py index 179ac76b5e..3caa3f851a 100644 --- a/meta/lib/oe/useradd.py +++ b/meta/lib/oe/useradd.py @@ -1,3 +1,6 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# import argparse import re @@ -11,7 +14,7 @@ class myArgumentParser(argparse.ArgumentParser): error(message) def error(self, message): - raise bb.build.FuncFailed(message) + bb.fatal(message) def split_commands(params): params = re.split('''[ \t]*;[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', params.strip()) @@ -42,7 +45,6 @@ def build_useradd_parser(): parser.add_argument("-N", "--no-user-group", dest="user_group", help="do not create a group with the same name as the user", action="store_const", const=False) parser.add_argument("-o", "--non-unique", help="allow to create users with duplicate (non-unique UID)", action="store_true") parser.add_argument("-p", "--password", metavar="PASSWORD", help="encrypted password of the new account") - parser.add_argument("-P", "--clear-password", metavar="CLEAR_PASSWORD", help="use this clear password for the new account") parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into") parser.add_argument("-r", "--system", help="create a system account", action="store_true") parser.add_argument("-s", "--shell", metavar="SHELL", help="login shell of the new account") @@ -60,7 +62,6 @@ def build_groupadd_parser(): parser.add_argument("-K", "--key", metavar="KEY=VALUE", help="override /etc/login.defs defaults") parser.add_argument("-o", "--non-unique", help="allow to create groups with duplicate (non-unique) GID", action="store_true") parser.add_argument("-p", "--password", metavar="PASSWORD", help="use this encrypted password for the new group") - parser.add_argument("-P", "--clear-password", metavar="CLEAR_PASSWORD", help="use this clear password for the new group") parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into") parser.add_argument("-r", "--system", help="create a system account", action="store_true") parser.add_argument("GROUP", help="Group name of the new group") diff --git a/meta/lib/oe/utils.py b/meta/lib/oe/utils.py index 7b574ffd30..46fc76c261 100644 --- a/meta/lib/oe/utils.py +++ b/meta/lib/oe/utils.py @@ -1,3 +1,7 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + import subprocess import multiprocessing import traceback @@ -78,12 +82,12 @@ def prune_suffix(var, suffixes, d): # See if var ends with any of the suffixes listed and # remove it if found for suffix in suffixes: - if var.endswith(suffix): - var = var.replace(suffix, "") + if suffix and var.endswith(suffix): + var = var[:-len(suffix)] prefix = d.getVar("MLPREFIX") if prefix and var.startswith(prefix): - var = var.replace(prefix, "") + var = var[len(prefix):] return var @@ -165,7 +169,7 @@ def any_distro_features(d, features, truevalue="1", falsevalue=""): """ return bb.utils.contains_any("DISTRO_FEATURES", features, truevalue, falsevalue, d) -def parallel_make(d): +def parallel_make(d, makeinst=False): """ Return the integer value for the number of parallel threads to use when building, scraped out of PARALLEL_MAKE. If no parallelization option is @@ -173,7 +177,10 @@ def parallel_make(d): e.g. if PARALLEL_MAKE = "-j 10", this will return 10 as an integer. """ - pm = (d.getVar('PARALLEL_MAKE') or '').split() + if makeinst: + pm = (d.getVar('PARALLEL_MAKEINST') or '').split() + else: + pm = (d.getVar('PARALLEL_MAKE') or '').split() # look for '-j' and throw other options (e.g. '-l') away while pm: opt = pm.pop(0) @@ -186,9 +193,9 @@ def parallel_make(d): return int(v) - return None + return '' -def parallel_make_argument(d, fmt, limit=None): +def parallel_make_argument(d, fmt, limit=None, makeinst=False): """ Helper utility to construct a parallel make argument from the number of parallel threads specified in PARALLEL_MAKE. @@ -201,7 +208,7 @@ def parallel_make_argument(d, fmt, limit=None): e.g. if PARALLEL_MAKE = "-j 10", parallel_make_argument(d, "-n %d") will return "-n 10" """ - v = parallel_make(d) + v = parallel_make(d, makeinst) if v: if limit: v = min(limit, v) @@ -214,12 +221,12 @@ def packages_filter_out_system(d): PN-dbg PN-doc PN-locale-eb-gb removed. """ pn = d.getVar('PN') - blacklist = [pn + suffix for suffix in ('', '-dbg', '-dev', '-doc', '-locale', '-staticdev')] + pkgfilter = [pn + suffix for suffix in ('', '-dbg', '-dev', '-doc', '-locale', '-staticdev', '-src')] localepkg = pn + "-locale-" pkgs = [] for pkg in d.getVar('PACKAGES').split(): - if pkg not in blacklist and localepkg not in pkg: + if pkg not in pkgfilter and localepkg not in pkg: pkgs.append(pkg) return pkgs @@ -241,9 +248,9 @@ def trim_version(version, num_parts=2): trimmed = ".".join(parts[:num_parts]) return trimmed -def cpu_count(): - import multiprocessing - return multiprocessing.cpu_count() +def cpu_count(at_least=1, at_most=64): + cpus = len(os.sched_getaffinity(0)) + return max(min(cpus, at_most), at_least) def execute_pre_post_process(d, cmds): if cmds is None: @@ -307,6 +314,10 @@ def multiprocess_launch(target, items, d, extraargs=None): p.start() launched.append(p) for q in launched: + # Have to manually call update() to avoid deadlocks. The pipe can be full and + # transfer stalled until we try and read the results object but the subprocess won't exit + # as it still has data to write (https://bugs.python.org/issue8426) + q.update() # The finished processes are joined when calling is_alive() if not q.is_alive(): if q.exception: @@ -320,7 +331,12 @@ def multiprocess_launch(target, items, d, extraargs=None): if errors: msg = "" for (e, tb) in errors: - msg = msg + str(e) + ": " + str(tb) + "\n" + if isinstance(e, subprocess.CalledProcessError) and e.output: + msg = msg + str(e) + "\n" + msg = msg + "Subprocess output:" + msg = msg + e.output.decode("utf-8", errors="ignore") + else: + msg = msg + str(e) + ": " + str(tb) + "\n" bb.fatal("Fatal errors occurred in subprocesses:\n%s" % msg) return results @@ -328,7 +344,29 @@ def squashspaces(string): import re return re.sub(r"\s+", " ", string).strip() -def format_pkg_list(pkg_dict, ret_format=None): +def rprovides_map(pkgdata_dir, pkg_dict): + # Map file -> pkg provider + rprov_map = {} + + for pkg in pkg_dict: + path_to_pkgfile = os.path.join(pkgdata_dir, 'runtime-reverse', pkg) + if not os.path.isfile(path_to_pkgfile): + continue + with open(path_to_pkgfile) as f: + for line in f: + if line.startswith('RPROVIDES') or line.startswith('FILERPROVIDES'): + # List all components provided by pkg. + # Exclude version strings, i.e. those starting with ( + provides = [x for x in line.split()[1:] if not x.startswith('(')] + for prov in provides: + if prov in rprov_map: + rprov_map[prov].append(pkg) + else: + rprov_map[prov] = [pkg] + + return rprov_map + +def format_pkg_list(pkg_dict, ret_format=None, pkgdata_dir=None): output = [] if ret_format == "arch": @@ -341,9 +379,15 @@ def format_pkg_list(pkg_dict, ret_format=None): for pkg in sorted(pkg_dict): output.append("%s %s %s" % (pkg, pkg_dict[pkg]["arch"], pkg_dict[pkg]["ver"])) elif ret_format == "deps": + rprov_map = rprovides_map(pkgdata_dir, pkg_dict) for pkg in sorted(pkg_dict): for dep in pkg_dict[pkg]["deps"]: - output.append("%s|%s" % (pkg, dep)) + if dep in rprov_map: + # There could be multiple providers within the image + for pkg_provider in rprov_map[dep]: + output.append("%s|%s * %s [RPROVIDES]" % (pkg, pkg_provider, dep)) + else: + output.append("%s|%s" % (pkg, dep)) else: for pkg in sorted(pkg_dict): output.append(pkg) @@ -356,6 +400,37 @@ def format_pkg_list(pkg_dict, ret_format=None): return output_str + +# Helper function to get the host compiler version +# Do not assume the compiler is gcc +def get_host_compiler_version(d, taskcontextonly=False): + import re, subprocess + + if taskcontextonly and d.getVar('BB_WORKERCONTEXT') != '1': + return + + compiler = d.getVar("BUILD_CC") + # Get rid of ccache since it is not present when parsing. + if compiler.startswith('ccache '): + compiler = compiler[7:] + try: + env = os.environ.copy() + # datastore PATH does not contain session PATH as set by environment-setup-... + # this breaks the install-buildtools use-case + # env["PATH"] = d.getVar("PATH") + output = subprocess.check_output("%s --version" % compiler, \ + shell=True, env=env, stderr=subprocess.STDOUT).decode("utf-8") + except subprocess.CalledProcessError as e: + bb.fatal("Error running %s --version: %s" % (compiler, e.output.decode("utf-8"))) + + match = re.match(r".* (\d+\.\d+)\.\d+.*", output.split('\n')[0]) + if not match: + bb.fatal("Can't get compiler version from %s --version output" % compiler) + + version = match.group(1) + return compiler, version + + def host_gcc_version(d, taskcontextonly=False): import re, subprocess @@ -374,7 +449,7 @@ def host_gcc_version(d, taskcontextonly=False): except subprocess.CalledProcessError as e: bb.fatal("Error running %s --version: %s" % (compiler, e.output.decode("utf-8"))) - match = re.match(r".* (\d\.\d)\.\d.*", output.split('\n')[0]) + match = re.match(r".* (\d+\.\d+)\.\d+.*", output.split('\n')[0]) if not match: bb.fatal("Can't get compiler version from %s --version output" % compiler) @@ -408,8 +483,8 @@ from threading import Thread class ThreadedWorker(Thread): """Thread executing tasks from a given tasks queue""" - def __init__(self, tasks, worker_init, worker_end): - Thread.__init__(self) + def __init__(self, tasks, worker_init, worker_end, name=None): + Thread.__init__(self, name=name) self.tasks = tasks self.daemon = True @@ -433,19 +508,19 @@ class ThreadedWorker(Thread): try: func(self, *args, **kargs) except Exception as e: - print(e) + # Eat all exceptions + bb.mainlogger.debug("Worker task raised %s" % e, exc_info=e) finally: self.tasks.task_done() class ThreadedPool: """Pool of threads consuming tasks from a queue""" - def __init__(self, num_workers, num_tasks, worker_init=None, - worker_end=None): + def __init__(self, num_workers, num_tasks, worker_init=None, worker_end=None, name="ThreadedPool-"): self.tasks = Queue(num_tasks) self.workers = [] - for _ in range(num_workers): - worker = ThreadedWorker(self.tasks, worker_init, worker_end) + for i in range(num_workers): + worker = ThreadedWorker(self.tasks, worker_init, worker_end, name=name + str(i)) self.workers.append(worker) def start(self): @@ -462,18 +537,7 @@ class ThreadedPool: for worker in self.workers: worker.join() -def write_ld_so_conf(d): - # Some utils like prelink may not have the correct target library paths - # so write an ld.so.conf to help them - ldsoconf = d.expand("${STAGING_DIR_TARGET}${sysconfdir}/ld.so.conf") - if os.path.exists(ldsoconf): - bb.utils.remove(ldsoconf) - bb.utils.mkdirhier(os.path.dirname(ldsoconf)) - with open(ldsoconf, "w") as f: - f.write(d.getVar("base_libdir") + '\n') - f.write(d.getVar("libdir") + '\n') - -class ImageQAFailed(bb.build.FuncFailed): +class ImageQAFailed(Exception): def __init__(self, description, name=None, logfile=None): self.description = description self.name = name @@ -486,3 +550,37 @@ class ImageQAFailed(bb.build.FuncFailed): return msg +def sh_quote(string): + import shlex + return shlex.quote(string) + +def directory_size(root, blocksize=4096): + """ + Calculate the size of the directory, taking into account hard links, + rounding up every size to multiples of the blocksize. + """ + def roundup(size): + """ + Round the size up to the nearest multiple of the block size. + """ + import math + return math.ceil(size / blocksize) * blocksize + + def getsize(filename): + """ + Get the size of the filename, not following symlinks, taking into + account hard links. + """ + stat = os.lstat(filename) + if stat.st_ino not in inodes: + inodes.add(stat.st_ino) + return stat.st_size + else: + return 0 + + inodes = set() + total = 0 + for root, dirs, files in os.walk(root): + total += sum(roundup(getsize(os.path.join(root, name))) for name in files) + total += roundup(getsize(root)) + return total |