summaryrefslogtreecommitdiffstats
path: root/meta/lib
diff options
context:
space:
mode:
Diffstat (limited to 'meta/lib')
-rw-r--r--meta/lib/bblayers/create.py6
-rw-r--r--meta/lib/bblayers/templates/example.bb4
-rw-r--r--meta/lib/bblayers/templates/layer.conf10
-rw-r--r--meta/lib/buildstats.py4
-rw-r--r--meta/lib/oe/buildhistory_analysis.py6
-rw-r--r--meta/lib/oe/classextend.py4
-rw-r--r--meta/lib/oe/copy_buildsystem.py18
-rw-r--r--meta/lib/oe/cve_check.py148
-rw-r--r--meta/lib/oe/distro_check.py2
-rw-r--r--meta/lib/oe/elf.py8
-rw-r--r--meta/lib/oe/gpg_sign.py27
-rw-r--r--meta/lib/oe/license.py54
-rw-r--r--meta/lib/oe/maketype.py7
-rw-r--r--meta/lib/oe/manifest.py148
-rw-r--r--meta/lib/oe/overlayfs.py48
-rw-r--r--meta/lib/oe/package.py9
-rw-r--r--meta/lib/oe/package_manager.py1863
-rw-r--r--meta/lib/oe/package_manager/__init__.py556
-rw-r--r--meta/lib/oe/package_manager/deb/__init__.py503
-rw-r--r--meta/lib/oe/package_manager/deb/manifest.py26
-rw-r--r--meta/lib/oe/package_manager/deb/rootfs.py210
-rw-r--r--meta/lib/oe/package_manager/deb/sdk.py100
-rw-r--r--meta/lib/oe/package_manager/ipk/__init__.py503
-rw-r--r--meta/lib/oe/package_manager/ipk/manifest.py74
-rw-r--r--meta/lib/oe/package_manager/ipk/rootfs.py350
-rw-r--r--meta/lib/oe/package_manager/ipk/sdk.py102
-rw-r--r--meta/lib/oe/package_manager/rpm/__init__.py410
-rw-r--r--meta/lib/oe/package_manager/rpm/manifest.py54
-rw-r--r--meta/lib/oe/package_manager/rpm/rootfs.py148
-rw-r--r--meta/lib/oe/package_manager/rpm/sdk.py114
-rw-r--r--meta/lib/oe/packagedata.py20
-rw-r--r--meta/lib/oe/patch.py37
-rw-r--r--meta/lib/oe/path.py21
-rw-r--r--meta/lib/oe/prservice.py31
-rw-r--r--meta/lib/oe/qa.py41
-rw-r--r--meta/lib/oe/recipeutils.py54
-rw-r--r--meta/lib/oe/reproducible.py192
-rw-r--r--meta/lib/oe/rootfs.py667
-rw-r--r--meta/lib/oe/rust.py5
-rw-r--r--meta/lib/oe/sbom.py78
-rw-r--r--meta/lib/oe/sdk.py304
-rw-r--r--meta/lib/oe/spdx.py342
-rw-r--r--meta/lib/oe/sstatesig.py133
-rw-r--r--meta/lib/oe/terminal.py25
-rw-r--r--meta/lib/oe/useradd.py2
-rw-r--r--meta/lib/oe/utils.py101
-rw-r--r--meta/lib/oeqa/controllers/controllerimage.py (renamed from meta/lib/oeqa/controllers/masterimage.py)44
-rw-r--r--meta/lib/oeqa/core/case.py9
-rw-r--r--meta/lib/oeqa/core/context.py4
-rw-r--r--meta/lib/oeqa/core/decorator/oetimeout.py5
-rw-r--r--meta/lib/oeqa/core/runner.py14
-rw-r--r--meta/lib/oeqa/core/target/qemu.py41
-rw-r--r--meta/lib/oeqa/core/target/ssh.py28
-rw-r--r--meta/lib/oeqa/core/tests/cases/timeout.py13
-rwxr-xr-xmeta/lib/oeqa/core/tests/test_data.py2
-rwxr-xr-xmeta/lib/oeqa/core/tests/test_decorators.py6
-rw-r--r--meta/lib/oeqa/core/utils/concurrencytest.py53
-rw-r--r--meta/lib/oeqa/files/testresults/testresults.json2
-rw-r--r--meta/lib/oeqa/manual/bsp-hw.json324
-rw-r--r--meta/lib/oeqa/manual/build-appliance.json2
-rw-r--r--meta/lib/oeqa/manual/eclipse-plugin.json6
-rw-r--r--meta/lib/oeqa/manual/oe-core.json2
-rw-r--r--meta/lib/oeqa/manual/sdk.json2
-rw-r--r--meta/lib/oeqa/manual/toaster-managed-mode.json16
-rw-r--r--meta/lib/oeqa/runtime/cases/buildcpio.py3
-rw-r--r--meta/lib/oeqa/runtime/cases/date.py13
-rw-r--r--meta/lib/oeqa/runtime/cases/df.py2
-rw-r--r--meta/lib/oeqa/runtime/cases/ethernet_ip_connman.py36
-rw-r--r--meta/lib/oeqa/runtime/cases/go.py19
-rw-r--r--meta/lib/oeqa/runtime/cases/ksample.py2
-rw-r--r--meta/lib/oeqa/runtime/cases/logrotate.py14
-rw-r--r--meta/lib/oeqa/runtime/cases/ltp.py5
-rw-r--r--meta/lib/oeqa/runtime/cases/multilib.py11
-rw-r--r--meta/lib/oeqa/runtime/cases/oe_syslog.py11
-rw-r--r--meta/lib/oeqa/runtime/cases/pam.py3
-rw-r--r--meta/lib/oeqa/runtime/cases/parselogs.py30
-rw-r--r--meta/lib/oeqa/runtime/cases/ping.py20
-rw-r--r--meta/lib/oeqa/runtime/cases/ptest.py3
-rw-r--r--meta/lib/oeqa/runtime/cases/rpm.py15
-rw-r--r--meta/lib/oeqa/runtime/cases/rtc.py38
-rw-r--r--meta/lib/oeqa/runtime/cases/runlevel.py22
-rw-r--r--meta/lib/oeqa/runtime/cases/rust.py19
-rw-r--r--meta/lib/oeqa/runtime/cases/skeletoninit.py2
-rw-r--r--meta/lib/oeqa/runtime/cases/ssh.py4
-rw-r--r--meta/lib/oeqa/runtime/cases/stap.py39
-rw-r--r--meta/lib/oeqa/runtime/cases/suspend.py33
-rw-r--r--meta/lib/oeqa/runtime/cases/terminal.py21
-rw-r--r--meta/lib/oeqa/runtime/cases/usb_hid.py22
-rw-r--r--meta/lib/oeqa/runtime/cases/weston.py28
-rw-r--r--meta/lib/oeqa/runtime/cases/x32lib.py17
-rw-r--r--meta/lib/oeqa/runtime/context.py29
-rw-r--r--meta/lib/oeqa/runtime/decorator/package.py4
-rw-r--r--meta/lib/oeqa/runtime/files/hello.stp1
-rw-r--r--meta/lib/oeqa/sdk/buildtools-cases/README2
-rw-r--r--meta/lib/oeqa/sdk/buildtools-cases/build.py30
-rw-r--r--meta/lib/oeqa/sdk/buildtools-cases/gcc.py29
-rw-r--r--meta/lib/oeqa/sdk/buildtools-cases/https.py20
-rw-r--r--meta/lib/oeqa/sdk/buildtools-cases/sanity.py22
-rw-r--r--meta/lib/oeqa/sdk/case.py2
-rw-r--r--meta/lib/oeqa/sdk/cases/assimp.py2
-rw-r--r--meta/lib/oeqa/sdk/cases/buildcpio.py3
-rw-r--r--meta/lib/oeqa/sdk/cases/buildepoxy.py2
-rw-r--r--meta/lib/oeqa/sdk/cases/buildgalculator.py4
-rw-r--r--meta/lib/oeqa/sdk/cases/buildlzip.py2
-rw-r--r--meta/lib/oeqa/sdkext/testsdk.py7
-rw-r--r--meta/lib/oeqa/selftest/cases/_sstatetests_noauto.py4
-rw-r--r--meta/lib/oeqa/selftest/cases/archiver.py22
-rw-r--r--meta/lib/oeqa/selftest/cases/bblayers.py5
-rw-r--r--meta/lib/oeqa/selftest/cases/bblogging.py186
-rw-r--r--meta/lib/oeqa/selftest/cases/bbtests.py82
-rw-r--r--meta/lib/oeqa/selftest/cases/buildoptions.py67
-rw-r--r--meta/lib/oeqa/selftest/cases/containerimage.py13
-rw-r--r--meta/lib/oeqa/selftest/cases/cve_check.py44
-rw-r--r--meta/lib/oeqa/selftest/cases/devtool.py130
-rw-r--r--meta/lib/oeqa/selftest/cases/diffoscope/A/file.txt1
-rw-r--r--meta/lib/oeqa/selftest/cases/diffoscope/B/file.txt1
-rw-r--r--meta/lib/oeqa/selftest/cases/distrodata.py44
-rw-r--r--meta/lib/oeqa/selftest/cases/eSDK.py6
-rw-r--r--meta/lib/oeqa/selftest/cases/efibootpartition.py6
-rw-r--r--meta/lib/oeqa/selftest/cases/fetch.py65
-rw-r--r--meta/lib/oeqa/selftest/cases/fitimage.py840
-rw-r--r--meta/lib/oeqa/selftest/cases/glibc.py4
-rw-r--r--meta/lib/oeqa/selftest/cases/gotoolchain.py21
-rw-r--r--meta/lib/oeqa/selftest/cases/image_typedep.py4
-rw-r--r--meta/lib/oeqa/selftest/cases/imagefeatures.py38
-rw-r--r--meta/lib/oeqa/selftest/cases/incompatible_lic.py124
-rw-r--r--meta/lib/oeqa/selftest/cases/kerneldevelopment.py2
-rw-r--r--meta/lib/oeqa/selftest/cases/layerappend.py10
-rw-r--r--meta/lib/oeqa/selftest/cases/lic_checksum.py2
-rw-r--r--meta/lib/oeqa/selftest/cases/meta_ide.py2
-rw-r--r--meta/lib/oeqa/selftest/cases/multiconfig.py6
-rw-r--r--meta/lib/oeqa/selftest/cases/newlib.py11
-rw-r--r--meta/lib/oeqa/selftest/cases/oelib/elf.py2
-rw-r--r--meta/lib/oeqa/selftest/cases/oelib/license.py22
-rw-r--r--meta/lib/oeqa/selftest/cases/oelib/utils.py3
-rw-r--r--meta/lib/oeqa/selftest/cases/oescripts.py4
-rw-r--r--meta/lib/oeqa/selftest/cases/overlayfs.py423
-rw-r--r--meta/lib/oeqa/selftest/cases/package.py11
-rw-r--r--meta/lib/oeqa/selftest/cases/pkgdata.py6
-rw-r--r--meta/lib/oeqa/selftest/cases/prservice.py8
-rw-r--r--meta/lib/oeqa/selftest/cases/pseudo.py27
-rw-r--r--meta/lib/oeqa/selftest/cases/recipetool.py297
-rw-r--r--meta/lib/oeqa/selftest/cases/recipeutils.py8
-rw-r--r--meta/lib/oeqa/selftest/cases/reproducible.py96
-rw-r--r--meta/lib/oeqa/selftest/cases/runcmd.py20
-rw-r--r--meta/lib/oeqa/selftest/cases/runqemu.py9
-rw-r--r--meta/lib/oeqa/selftest/cases/runtime_test.py75
-rw-r--r--meta/lib/oeqa/selftest/cases/signing.py16
-rw-r--r--meta/lib/oeqa/selftest/cases/sstatetests.py126
-rw-r--r--meta/lib/oeqa/selftest/cases/sysroot.py8
-rw-r--r--meta/lib/oeqa/selftest/cases/tinfoil.py15
-rw-r--r--meta/lib/oeqa/selftest/cases/wic.py247
-rw-r--r--meta/lib/oeqa/selftest/context.py23
-rw-r--r--meta/lib/oeqa/targetcontrol.py7
-rw-r--r--meta/lib/oeqa/utils/__init__.py20
-rw-r--r--meta/lib/oeqa/utils/buildproject.py3
-rw-r--r--meta/lib/oeqa/utils/commands.py18
-rw-r--r--meta/lib/oeqa/utils/dump.py46
-rw-r--r--meta/lib/oeqa/utils/logparser.py27
-rw-r--r--meta/lib/oeqa/utils/package_manager.py8
-rw-r--r--meta/lib/oeqa/utils/qemurunner.py173
-rw-r--r--meta/lib/oeqa/utils/qemutinyrunner.py8
-rw-r--r--meta/lib/oeqa/utils/targetbuild.py4
163 files changed, 8071 insertions, 4162 deletions
diff --git a/meta/lib/bblayers/create.py b/meta/lib/bblayers/create.py
index 542f31fc81..7ddb777dc7 100644
--- a/meta/lib/bblayers/create.py
+++ b/meta/lib/bblayers/create.py
@@ -35,6 +35,7 @@ class CreatePlugin(LayerPlugin):
bb.utils.mkdirhier(conf)
layername = os.path.basename(os.path.normpath(args.layerdir))
+ layerid = args.layerid if args.layerid is not None else layername
# Create the README from templates/README
readme_template = read_template('README').format(layername=layername)
@@ -54,7 +55,7 @@ class CreatePlugin(LayerPlugin):
# Create the layer.conf from templates/layer.conf
layerconf_template = read_template('layer.conf').format(
- layername=layername, priority=args.priority, compat=compat)
+ layerid=layerid, priority=args.priority, compat=compat)
layerconf = os.path.join(conf, 'layer.conf')
with open(layerconf, 'w') as fd:
fd.write(layerconf_template)
@@ -71,7 +72,8 @@ class CreatePlugin(LayerPlugin):
def register_commands(self, sp):
parser_create_layer = self.add_command(sp, 'create-layer', self.do_create_layer, parserecipes=False)
parser_create_layer.add_argument('layerdir', help='Layer directory to create')
- parser_create_layer.add_argument('--priority', '-p', default=6, help='Layer directory to create')
+ parser_create_layer.add_argument('--layerid', '-i', help='Layer id to use if different from layername')
+ parser_create_layer.add_argument('--priority', '-p', default=6, help='Priority of recipes in layer')
parser_create_layer.add_argument('--example-recipe-name', '-e', dest='examplerecipe', default='example', help='Filename of the example recipe')
parser_create_layer.add_argument('--example-recipe-version', '-v', dest='version', default='0.1', help='Version number for the example recipe')
diff --git a/meta/lib/bblayers/templates/example.bb b/meta/lib/bblayers/templates/example.bb
index c4b873d593..facaae35d2 100644
--- a/meta/lib/bblayers/templates/example.bb
+++ b/meta/lib/bblayers/templates/example.bb
@@ -2,10 +2,12 @@ SUMMARY = "bitbake-layers recipe"
DESCRIPTION = "Recipe created by bitbake-layers"
LICENSE = "MIT"
-python do_build() {
+python do_display_banner() {
bb.plain("***********************************************");
bb.plain("* *");
bb.plain("* Example recipe created by bitbake-layers *");
bb.plain("* *");
bb.plain("***********************************************");
}
+
+addtask display_banner before do_build
diff --git a/meta/lib/bblayers/templates/layer.conf b/meta/lib/bblayers/templates/layer.conf
index e2eaff4346..dddfbf716e 100644
--- a/meta/lib/bblayers/templates/layer.conf
+++ b/meta/lib/bblayers/templates/layer.conf
@@ -5,9 +5,9 @@ BBPATH .= ":${{LAYERDIR}}"
BBFILES += "${{LAYERDIR}}/recipes-*/*/*.bb \
${{LAYERDIR}}/recipes-*/*/*.bbappend"
-BBFILE_COLLECTIONS += "{layername}"
-BBFILE_PATTERN_{layername} = "^${{LAYERDIR}}/"
-BBFILE_PRIORITY_{layername} = "{priority}"
+BBFILE_COLLECTIONS += "{layerid}"
+BBFILE_PATTERN_{layerid} = "^${{LAYERDIR}}/"
+BBFILE_PRIORITY_{layerid} = "{priority}"
-LAYERDEPENDS_{layername} = "core"
-LAYERSERIES_COMPAT_{layername} = "{compat}"
+LAYERDEPENDS_{layerid} = "core"
+LAYERSERIES_COMPAT_{layerid} = "{compat}"
diff --git a/meta/lib/buildstats.py b/meta/lib/buildstats.py
index 8627ed3c31..c52b6c3b72 100644
--- a/meta/lib/buildstats.py
+++ b/meta/lib/buildstats.py
@@ -43,8 +43,8 @@ class SystemStats:
# depends on the heartbeat event, which fires less often.
self.min_seconds = 1
- self.meminfo_regex = re.compile(b'^(MemTotal|MemFree|Buffers|Cached|SwapTotal|SwapFree):\s*(\d+)')
- self.diskstats_regex = re.compile(b'^([hsv]d.|mtdblock\d|mmcblk\d|cciss/c\d+d\d+.*)$')
+ self.meminfo_regex = re.compile(rb'^(MemTotal|MemFree|Buffers|Cached|SwapTotal|SwapFree):\s*(\d+)')
+ self.diskstats_regex = re.compile(rb'^([hsv]d.|mtdblock\d|mmcblk\d|cciss/c\d+d\d+.*)$')
self.diskstats_ltime = None
self.diskstats_data = None
self.stat_ltimes = None
diff --git a/meta/lib/oe/buildhistory_analysis.py b/meta/lib/oe/buildhistory_analysis.py
index 2d6fa1779e..b1856846b6 100644
--- a/meta/lib/oe/buildhistory_analysis.py
+++ b/meta/lib/oe/buildhistory_analysis.py
@@ -373,8 +373,10 @@ def compare_file_lists(alines, blines, compare_ownership=True):
removals.remove(removal2)
continue
filechanges.append(FileChange(removal, FileChange.changetype_move, addition))
- additions.remove(addition)
- removals.remove(removal)
+ if addition in additions:
+ additions.remove(addition)
+ if removal in removals:
+ removals.remove(removal)
for rename in renames:
filechanges.append(FileChange(renames[rename], FileChange.changetype_move, rename))
diff --git a/meta/lib/oe/classextend.py b/meta/lib/oe/classextend.py
index d3d8fbe724..e08d788b75 100644
--- a/meta/lib/oe/classextend.py
+++ b/meta/lib/oe/classextend.py
@@ -87,7 +87,7 @@ class ClassExtender(object):
def map_depends_variable(self, varname, suffix = ""):
# We need to preserve EXTENDPKGV so it can be expanded correctly later
if suffix:
- varname = varname + "_" + suffix
+ varname = varname + ":" + suffix
orig = self.d.getVar("EXTENDPKGV", False)
self.d.setVar("EXTENDPKGV", "EXTENDPKGV")
deps = self.d.getVar(varname)
@@ -142,7 +142,7 @@ class ClassExtender(object):
if pkg_mapping[0].startswith("${") and pkg_mapping[0].endswith("}"):
continue
for subs in variables:
- self.d.renameVar("%s_%s" % (subs, pkg_mapping[0]), "%s_%s" % (subs, pkg_mapping[1]))
+ self.d.renameVar("%s:%s" % (subs, pkg_mapping[0]), "%s:%s" % (subs, pkg_mapping[1]))
class NativesdkClassExtender(ClassExtender):
def map_depends(self, dep):
diff --git a/meta/lib/oe/copy_buildsystem.py b/meta/lib/oe/copy_buildsystem.py
index 31a84f5b06..79642fd76a 100644
--- a/meta/lib/oe/copy_buildsystem.py
+++ b/meta/lib/oe/copy_buildsystem.py
@@ -20,7 +20,7 @@ def _smart_copy(src, dest):
mode = os.stat(src).st_mode
if stat.S_ISDIR(mode):
bb.utils.mkdirhier(dest)
- cmd = "tar --exclude='.git' --xattrs --xattrs-include='*' -chf - -C %s -p . \
+ cmd = "tar --exclude='.git' --exclude='__pycache__' --xattrs --xattrs-include='*' -chf - -C %s -p . \
| tar --xattrs --xattrs-include='*' -xf - -C %s" % (src, dest)
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
else:
@@ -45,9 +45,6 @@ class BuildSystem(object):
corebase = os.path.abspath(self.d.getVar('COREBASE'))
layers.append(corebase)
- # Get relationship between TOPDIR and COREBASE
- # Layers should respect it
- corebase_relative = os.path.dirname(os.path.relpath(os.path.abspath(self.d.getVar('TOPDIR')), corebase))
# The bitbake build system uses the meta-skeleton layer as a layout
# for common recipies, e.g: the recipetool script to create kernel recipies
# Add the meta-skeleton layer to be included as part of the eSDK installation
@@ -100,11 +97,10 @@ class BuildSystem(object):
layerdestpath = destdir
if corebase == os.path.dirname(layer):
layerdestpath += '/' + os.path.basename(corebase)
- else:
- layer_relative = os.path.relpath(layer, corebase)
- if os.path.dirname(layer_relative) == corebase_relative:
- layer_relative = os.path.dirname(corebase_relative) + '/' + layernewname
- layer_relative = os.path.basename(corebase) + '/' + layer_relative
+ # If the layer is located somewhere under the same parent directory
+ # as corebase we keep the layer structure.
+ elif os.path.commonpath([layer, corebase]) == os.path.dirname(corebase):
+ layer_relative = os.path.relpath(layer, os.path.dirname(corebase))
if os.path.dirname(layer_relative) != layernewname:
layerdestpath += '/' + os.path.dirname(layer_relative)
@@ -259,7 +255,7 @@ def create_locked_sstate_cache(lockedsigs, input_sstate_cache, output_sstate_cac
bb.note('Generating sstate-cache...')
nativelsbstring = d.getVar('NATIVELSBSTRING')
- bb.process.run("gen-lockedsig-cache %s %s %s %s %s" % (lockedsigs, input_sstate_cache, output_sstate_cache, nativelsbstring, filterfile or ''))
+ bb.process.run("PYTHONDONTWRITEBYTECODE=1 gen-lockedsig-cache %s %s %s %s %s" % (lockedsigs, input_sstate_cache, output_sstate_cache, nativelsbstring, filterfile or ''))
if fixedlsbstring and nativelsbstring != fixedlsbstring:
nativedir = output_sstate_cache + '/' + nativelsbstring
if os.path.isdir(nativedir):
@@ -286,7 +282,7 @@ def check_sstate_task_list(d, targets, filteroutfile, cmdprefix='', cwd=None, lo
logparam = '-l %s' % logfile
else:
logparam = ''
- cmd = "%sBB_SETSCENE_ENFORCE=1 PSEUDO_DISABLED=1 oe-check-sstate %s -s -o %s %s" % (cmdprefix, targets, filteroutfile, logparam)
+ cmd = "%sPYTHONDONTWRITEBYTECODE=1 BB_SETSCENE_ENFORCE=1 PSEUDO_DISABLED=1 oe-check-sstate %s -s -o %s %s" % (cmdprefix, targets, filteroutfile, logparam)
env = dict(d.getVar('BB_ORIGENV', False))
env.pop('BUILDDIR', '')
env.pop('BBPATH', '')
diff --git a/meta/lib/oe/cve_check.py b/meta/lib/oe/cve_check.py
new file mode 100644
index 0000000000..0302beeb4a
--- /dev/null
+++ b/meta/lib/oe/cve_check.py
@@ -0,0 +1,148 @@
+import collections
+import re
+import itertools
+import functools
+
+_Version = collections.namedtuple(
+ "_Version", ["release", "patch_l", "pre_l", "pre_v"]
+)
+
+@functools.total_ordering
+class Version():
+
+ def __init__(self, version, suffix=None):
+
+ suffixes = ["alphabetical", "patch"]
+
+ if str(suffix) == "alphabetical":
+ version_pattern = r"""r?v?(?:(?P<release>[0-9]+(?:[-\.][0-9]+)*)(?P<patch>[-_\.]?(?P<patch_l>[a-z]))?(?P<pre>[-_\.]?(?P<pre_l>(rc|alpha|beta|pre|preview|dev))[-_\.]?(?P<pre_v>[0-9]+)?)?)(.*)?"""
+ elif str(suffix) == "patch":
+ version_pattern = r"""r?v?(?:(?P<release>[0-9]+(?:[-\.][0-9]+)*)(?P<patch>[-_\.]?(p|patch)(?P<patch_l>[0-9]+))?(?P<pre>[-_\.]?(?P<pre_l>(rc|alpha|beta|pre|preview|dev))[-_\.]?(?P<pre_v>[0-9]+)?)?)(.*)?"""
+ else:
+ version_pattern = r"""r?v?(?:(?P<release>[0-9]+(?:[-\.][0-9]+)*)(?P<pre>[-_\.]?(?P<pre_l>(rc|alpha|beta|pre|preview|dev))[-_\.]?(?P<pre_v>[0-9]+)?)?)(.*)?"""
+ regex = re.compile(r"^\s*" + version_pattern + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+ match = regex.search(version)
+ if not match:
+ raise Exception("Invalid version: '{0}'".format(version))
+
+ self._version = _Version(
+ release=tuple(int(i) for i in match.group("release").replace("-",".").split(".")),
+ patch_l=match.group("patch_l") if str(suffix) in suffixes and match.group("patch_l") else "",
+ pre_l=match.group("pre_l"),
+ pre_v=match.group("pre_v")
+ )
+
+ self._key = _cmpkey(
+ self._version.release,
+ self._version.patch_l,
+ self._version.pre_l,
+ self._version.pre_v
+ )
+
+ def __eq__(self, other):
+ if not isinstance(other, Version):
+ return NotImplemented
+ return self._key == other._key
+
+ def __gt__(self, other):
+ if not isinstance(other, Version):
+ return NotImplemented
+ return self._key > other._key
+
+def _cmpkey(release, patch_l, pre_l, pre_v):
+ # remove leading 0
+ _release = tuple(
+ reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
+ )
+
+ _patch = patch_l.upper()
+
+ if pre_l is None and pre_v is None:
+ _pre = float('inf')
+ else:
+ _pre = float(pre_v) if pre_v else float('-inf')
+ return _release, _patch, _pre
+
+
+def get_patched_cves(d):
+ """
+ Get patches that solve CVEs using the "CVE: " tag.
+ """
+
+ import re
+ import oe.patch
+
+ pn = d.getVar("PN")
+ cve_match = re.compile("CVE:( CVE\-\d{4}\-\d+)+")
+
+ # Matches the last "CVE-YYYY-ID" in the file name, also if written
+ # in lowercase. Possible to have multiple CVE IDs in a single
+ # file name, but only the last one will be detected from the file name.
+ # However, patch files contents addressing multiple CVE IDs are supported
+ # (cve_match regular expression)
+
+ cve_file_name_match = re.compile(".*([Cc][Vv][Ee]\-\d{4}\-\d+)")
+
+ patched_cves = set()
+ bb.debug(2, "Looking for patches that solves CVEs for %s" % pn)
+ for url in oe.patch.src_patches(d):
+ patch_file = bb.fetch.decodeurl(url)[2]
+
+ if not os.path.isfile(patch_file):
+ bb.error("File Not found: %s" % patch_file)
+ raise FileNotFoundError
+
+ # Check patch file name for CVE ID
+ fname_match = cve_file_name_match.search(patch_file)
+ if fname_match:
+ cve = fname_match.group(1).upper()
+ patched_cves.add(cve)
+ bb.debug(2, "Found CVE %s from patch file name %s" % (cve, patch_file))
+
+ with open(patch_file, "r", encoding="utf-8") as f:
+ try:
+ patch_text = f.read()
+ except UnicodeDecodeError:
+ bb.debug(1, "Failed to read patch %s using UTF-8 encoding"
+ " trying with iso8859-1" % patch_file)
+ f.close()
+ with open(patch_file, "r", encoding="iso8859-1") as f:
+ patch_text = f.read()
+
+ # Search for one or more "CVE: " lines
+ text_match = False
+ for match in cve_match.finditer(patch_text):
+ # Get only the CVEs without the "CVE: " tag
+ cves = patch_text[match.start()+5:match.end()]
+ for cve in cves.split():
+ bb.debug(2, "Patch %s solves %s" % (patch_file, cve))
+ patched_cves.add(cve)
+ text_match = True
+
+ if not fname_match and not text_match:
+ bb.debug(2, "Patch %s doesn't solve CVEs" % patch_file)
+
+ return patched_cves
+
+
+def get_cpe_ids(cve_product, version):
+ """
+ Get list of CPE identifiers for the given product and version
+ """
+
+ version = version.split("+git")[0]
+
+ cpe_ids = []
+ for product in cve_product.split():
+ # CVE_PRODUCT in recipes may include vendor information for CPE identifiers. If not,
+ # use wildcard for vendor.
+ if ":" in product:
+ vendor, product = product.split(":", 1)
+ else:
+ vendor = "*"
+
+ cpe_id = f'cpe:2.3:a:{vendor}:{product}:{version}:*:*:*:*:*:*:*'
+ cpe_ids.append(cpe_id)
+
+ return cpe_ids
diff --git a/meta/lib/oe/distro_check.py b/meta/lib/oe/distro_check.py
index 88e46c354d..4b2a9bec01 100644
--- a/meta/lib/oe/distro_check.py
+++ b/meta/lib/oe/distro_check.py
@@ -26,7 +26,7 @@ def find_latest_numeric_release(url, d):
maxstr=""
for link in get_links_from_url(url, d):
try:
- # TODO use LooseVersion
+ # TODO use bb.utils.vercmp_string_op()
release = float(link)
except:
release = 0
diff --git a/meta/lib/oe/elf.py b/meta/lib/oe/elf.py
index df0a4593fa..46c884a775 100644
--- a/meta/lib/oe/elf.py
+++ b/meta/lib/oe/elf.py
@@ -61,6 +61,14 @@ def machine_dict(d):
"microblaze": (189, 0, 0, False, 32),
"microblazeel":(189, 0, 0, True, 32),
},
+ "linux-android" : {
+ "aarch64" : (183, 0, 0, True, 64),
+ "i686": ( 3, 0, 0, True, 32),
+ "x86_64": (62, 0, 0, True, 64),
+ },
+ "linux-androideabi" : {
+ "arm" : (40, 97, 0, True, 32),
+ },
"linux-musl" : {
"aarch64" : (183, 0, 0, True, 64),
"aarch64_be" :(183, 0, 0, False, 64),
diff --git a/meta/lib/oe/gpg_sign.py b/meta/lib/oe/gpg_sign.py
index 7634d7ef1d..1bce6cb792 100644
--- a/meta/lib/oe/gpg_sign.py
+++ b/meta/lib/oe/gpg_sign.py
@@ -109,16 +109,33 @@ class LocalSigner(object):
bb.fatal("Could not get gpg version: %s" % e)
- def verify(self, sig_file):
+ def verify(self, sig_file, valid_sigs = ''):
"""Verify signature"""
- cmd = self.gpg_cmd + [" --verify", "--no-permission-warning"]
+ cmd = self.gpg_cmd + ["--verify", "--no-permission-warning", "--status-fd", "1"]
if self.gpg_path:
cmd += ["--homedir", self.gpg_path]
cmd += [sig_file]
- status = subprocess.call(cmd)
- ret = False if status else True
- return ret
+ status = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ # Valid if any key matches if unspecified
+ if not valid_sigs:
+ ret = False if status.returncode else True
+ return ret
+
+ import re
+ goodsigs = []
+ sigre = re.compile(r'^\[GNUPG:\] GOODSIG (\S+)\s(.*)$')
+ for l in status.stdout.decode("utf-8").splitlines():
+ s = sigre.match(l)
+ if s:
+ goodsigs += [s.group(1)]
+
+ for sig in valid_sigs.split():
+ if sig in goodsigs:
+ return True
+ if len(goodsigs):
+ bb.warn('No accepted signatures found. Good signatures found: %s.' % ' '.join(goodsigs))
+ return False
def get_signer(d, backend):
diff --git a/meta/lib/oe/license.py b/meta/lib/oe/license.py
index c1274a61de..99cfa5f733 100644
--- a/meta/lib/oe/license.py
+++ b/meta/lib/oe/license.py
@@ -10,17 +10,20 @@ from fnmatch import fnmatchcase as fnmatch
def license_ok(license, dont_want_licenses):
""" Return False if License exist in dont_want_licenses else True """
for dwl in dont_want_licenses:
- # If you want to exclude license named generically 'X', we
- # surely want to exclude 'X+' as well. In consequence, we
- # will exclude a trailing '+' character from LICENSE in
- # case INCOMPATIBLE_LICENSE is not a 'X+' license.
- lic = license
- if not re.search(r'\+$', dwl):
- lic = re.sub(r'\+', '', license)
- if fnmatch(lic, dwl):
+ if fnmatch(license, dwl):
return False
return True
+def obsolete_license_list():
+ return ["AGPL-3", "AGPL-3+", "AGPLv3", "AGPLv3+", "AGPLv3.0", "AGPLv3.0+", "AGPL-3.0", "AGPL-3.0+", "BSD-0-Clause",
+ "GPL-1", "GPL-1+", "GPLv1", "GPLv1+", "GPLv1.0", "GPLv1.0+", "GPL-1.0", "GPL-1.0+", "GPL-2", "GPL-2+", "GPLv2",
+ "GPLv2+", "GPLv2.0", "GPLv2.0+", "GPL-2.0", "GPL-2.0+", "GPL-3", "GPL-3+", "GPLv3", "GPLv3+", "GPLv3.0", "GPLv3.0+",
+ "GPL-3.0", "GPL-3.0+", "LGPLv2", "LGPLv2+", "LGPLv2.0", "LGPLv2.0+", "LGPL-2.0", "LGPL-2.0+", "LGPL2.1", "LGPL2.1+",
+ "LGPLv2.1", "LGPLv2.1+", "LGPL-2.1", "LGPL-2.1+", "LGPLv3", "LGPLv3+", "LGPL-3.0", "LGPL-3.0+", "MPL-1", "MPLv1",
+ "MPLv1.1", "MPLv2", "MIT-X", "MIT-style", "openssl", "PSF", "PSFv2", "Python-2", "Apachev2", "Apache-2", "Artisticv1",
+ "Artistic-1", "AFL-2", "AFL-1", "AFLv2", "AFLv1", "CDDLv1", "CDDL-1", "EPLv1.0", "FreeType", "Nauman",
+ "tcl", "vim", "SGIv1"]
+
class LicenseError(Exception):
pass
@@ -81,6 +84,9 @@ class FlattenVisitor(LicenseVisitor):
def visit_Str(self, node):
self.licenses.append(node.s)
+ def visit_Constant(self, node):
+ self.licenses.append(node.value)
+
def visit_BinOp(self, node):
if isinstance(node.op, ast.BitOr):
left = FlattenVisitor(self.choose_licenses)
@@ -103,26 +109,26 @@ def flattened_licenses(licensestr, choose_licenses):
raise LicenseSyntaxError(licensestr, exc)
return flatten.licenses
-def is_included(licensestr, whitelist=None, blacklist=None):
- """Given a license string and whitelist and blacklist, determine if the
- license string matches the whitelist and does not match the blacklist.
+def is_included(licensestr, include_licenses=None, exclude_licenses=None):
+ """Given a license string, a list of licenses to include and a list of
+ licenses to exclude, determine if the license string matches the include
+ list and does not match the exclude list.
Returns a tuple holding the boolean state and a list of the applicable
licenses that were excluded if state is False, or the licenses that were
- included if the state is True.
- """
+ included if the state is True."""
def include_license(license):
- return any(fnmatch(license, pattern) for pattern in whitelist)
+ return any(fnmatch(license, pattern) for pattern in include_licenses)
def exclude_license(license):
- return any(fnmatch(license, pattern) for pattern in blacklist)
+ return any(fnmatch(license, pattern) for pattern in exclude_licenses)
def choose_licenses(alpha, beta):
"""Select the option in an OR which is the 'best' (has the most
included licenses and no excluded licenses)."""
# The factor 1000 below is arbitrary, just expected to be much larger
- # that the number of licenses actually specified. That way the weight
+ # than the number of licenses actually specified. That way the weight
# will be negative if the list of licenses contains an excluded license,
# but still gives a higher weight to the list with the most included
# licenses.
@@ -135,11 +141,11 @@ def is_included(licensestr, whitelist=None, blacklist=None):
else:
return beta
- if not whitelist:
- whitelist = ['*']
+ if not include_licenses:
+ include_licenses = ['*']
- if not blacklist:
- blacklist = []
+ if not exclude_licenses:
+ exclude_licenses = []
licenses = flattened_licenses(licensestr, choose_licenses)
excluded = [lic for lic in licenses if exclude_license(lic)]
@@ -234,6 +240,9 @@ class ListVisitor(LicenseVisitor):
def visit_Str(self, node):
self.licenses.add(node.s)
+ def visit_Constant(self, node):
+ self.licenses.add(node.value)
+
def list_licenses(licensestr):
"""Simply get a list of all licenses mentioned in a license string.
Binary operators are not applied or taken into account in any way"""
@@ -243,3 +252,8 @@ def list_licenses(licensestr):
except SyntaxError as exc:
raise LicenseSyntaxError(licensestr, exc)
return visitor.licenses
+
+def apply_pkg_license_exception(pkg, bad_licenses, exceptions):
+ """Return remaining bad licenses after removing any package exceptions"""
+
+ return [lic for lic in bad_licenses if pkg + ':' + lic not in exceptions]
diff --git a/meta/lib/oe/maketype.py b/meta/lib/oe/maketype.py
index d929c8b3e5..d36082c535 100644
--- a/meta/lib/oe/maketype.py
+++ b/meta/lib/oe/maketype.py
@@ -10,12 +10,7 @@ the arguments of the type's factory for details.
import inspect
import oe.types as types
-try:
- # Python 3.7+
- from collections.abc import Callable
-except ImportError:
- # Python < 3.7
- from collections import Callable
+from collections.abc import Callable
available_types = {}
diff --git a/meta/lib/oe/manifest.py b/meta/lib/oe/manifest.py
index f7c88f9a09..1a058dcd73 100644
--- a/meta/lib/oe/manifest.py
+++ b/meta/lib/oe/manifest.py
@@ -7,7 +7,6 @@ import os
import re
import bb
-
class Manifest(object, metaclass=ABCMeta):
"""
This is an abstract class. Do not instantiate this directly.
@@ -189,154 +188,11 @@ class Manifest(object, metaclass=ABCMeta):
return installed_pkgs
-class RpmManifest(Manifest):
- """
- Returns a dictionary object with mip and mlp packages.
- """
- def _split_multilib(self, pkg_list):
- pkgs = dict()
-
- for pkg in pkg_list.split():
- pkg_type = self.PKG_TYPE_MUST_INSTALL
-
- ml_variants = self.d.getVar('MULTILIB_VARIANTS').split()
-
- for ml_variant in ml_variants:
- if pkg.startswith(ml_variant + '-'):
- pkg_type = self.PKG_TYPE_MULTILIB
-
- if not pkg_type in pkgs:
- pkgs[pkg_type] = pkg
- else:
- pkgs[pkg_type] += " " + pkg
-
- return pkgs
-
- def create_initial(self):
- pkgs = dict()
-
- with open(self.initial_manifest, "w+") as manifest:
- manifest.write(self.initial_manifest_file_header)
-
- for var in self.var_maps[self.manifest_type]:
- if var in self.vars_to_split:
- split_pkgs = self._split_multilib(self.d.getVar(var))
- if split_pkgs is not None:
- pkgs = dict(list(pkgs.items()) + list(split_pkgs.items()))
- else:
- pkg_list = self.d.getVar(var)
- if pkg_list is not None:
- pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var)
-
- for pkg_type in pkgs:
- for pkg in pkgs[pkg_type].split():
- manifest.write("%s,%s\n" % (pkg_type, pkg))
-
- def create_final(self):
- pass
-
- def create_full(self, pm):
- pass
-
-
-class OpkgManifest(Manifest):
- """
- Returns a dictionary object with mip and mlp packages.
- """
- def _split_multilib(self, pkg_list):
- pkgs = dict()
-
- for pkg in pkg_list.split():
- pkg_type = self.PKG_TYPE_MUST_INSTALL
-
- ml_variants = self.d.getVar('MULTILIB_VARIANTS').split()
-
- for ml_variant in ml_variants:
- if pkg.startswith(ml_variant + '-'):
- pkg_type = self.PKG_TYPE_MULTILIB
-
- if not pkg_type in pkgs:
- pkgs[pkg_type] = pkg
- else:
- pkgs[pkg_type] += " " + pkg
-
- return pkgs
-
- def create_initial(self):
- pkgs = dict()
-
- with open(self.initial_manifest, "w+") as manifest:
- manifest.write(self.initial_manifest_file_header)
-
- for var in self.var_maps[self.manifest_type]:
- if var in self.vars_to_split:
- split_pkgs = self._split_multilib(self.d.getVar(var))
- if split_pkgs is not None:
- pkgs = dict(list(pkgs.items()) + list(split_pkgs.items()))
- else:
- pkg_list = self.d.getVar(var)
- if pkg_list is not None:
- pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var)
-
- for pkg_type in sorted(pkgs):
- for pkg in sorted(pkgs[pkg_type].split()):
- manifest.write("%s,%s\n" % (pkg_type, pkg))
-
- def create_final(self):
- pass
-
- def create_full(self, pm):
- if not os.path.exists(self.initial_manifest):
- self.create_initial()
-
- initial_manifest = self.parse_initial_manifest()
- pkgs_to_install = list()
- for pkg_type in initial_manifest:
- pkgs_to_install += initial_manifest[pkg_type]
- if len(pkgs_to_install) == 0:
- return
-
- output = pm.dummy_install(pkgs_to_install)
-
- with open(self.full_manifest, 'w+') as manifest:
- pkg_re = re.compile('^Installing ([^ ]+) [^ ].*')
- for line in set(output.split('\n')):
- m = pkg_re.match(line)
- if m:
- manifest.write(m.group(1) + '\n')
-
- return
-
-
-class DpkgManifest(Manifest):
- def create_initial(self):
- with open(self.initial_manifest, "w+") as manifest:
- manifest.write(self.initial_manifest_file_header)
-
- for var in self.var_maps[self.manifest_type]:
- pkg_list = self.d.getVar(var)
-
- if pkg_list is None:
- continue
-
- for pkg in pkg_list.split():
- manifest.write("%s,%s\n" %
- (self.var_maps[self.manifest_type][var], pkg))
-
- def create_final(self):
- pass
-
- def create_full(self, pm):
- pass
-
def create_manifest(d, final_manifest=False, manifest_dir=None,
manifest_type=Manifest.MANIFEST_TYPE_IMAGE):
- manifest_map = {'rpm': RpmManifest,
- 'ipk': OpkgManifest,
- 'deb': DpkgManifest}
-
- manifest = manifest_map[d.getVar('IMAGE_PKGTYPE')](d, manifest_dir, manifest_type)
+ import importlib
+ manifest = importlib.import_module('oe.package_manager.' + d.getVar('IMAGE_PKGTYPE') + '.manifest').PkgManifest(d, manifest_dir, manifest_type)
if final_manifest:
manifest.create_final()
diff --git a/meta/lib/oe/overlayfs.py b/meta/lib/oe/overlayfs.py
new file mode 100644
index 0000000000..b5d5e88e80
--- /dev/null
+++ b/meta/lib/oe/overlayfs.py
@@ -0,0 +1,48 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# This file contains common functions for overlayfs and its QA check
+
+# this function is based on https://github.com/systemd/systemd/blob/main/src/basic/unit-name.c
+def escapeSystemdUnitName(path):
+ escapeMap = {
+ '/': '-',
+ '-': "\\x2d",
+ '\\': "\\x5d"
+ }
+ return "".join([escapeMap.get(c, c) for c in path.strip('/')])
+
+def strForBash(s):
+ return s.replace('\\', '\\\\')
+
+def allOverlaysUnitName(d):
+ return d.getVar('PN') + '-overlays.service'
+
+def mountUnitName(unit):
+ return escapeSystemdUnitName(unit) + '.mount'
+
+def helperUnitName(unit):
+ return escapeSystemdUnitName(unit) + '-create-upper-dir.service'
+
+def unitFileList(d):
+ fileList = []
+ overlayMountPoints = d.getVarFlags("OVERLAYFS_MOUNT_POINT")
+
+ if not overlayMountPoints:
+ bb.fatal("A recipe uses overlayfs class but there is no OVERLAYFS_MOUNT_POINT set in your MACHINE configuration")
+
+ # check that we have required mount points set first
+ requiredMountPoints = d.getVarFlags('OVERLAYFS_WRITABLE_PATHS')
+ for mountPoint in requiredMountPoints:
+ if mountPoint not in overlayMountPoints:
+ bb.fatal("Missing required mount point for OVERLAYFS_MOUNT_POINT[%s] in your MACHINE configuration" % mountPoint)
+
+ for mountPoint in overlayMountPoints:
+ for path in d.getVarFlag('OVERLAYFS_WRITABLE_PATHS', mountPoint).split():
+ fileList.append(mountUnitName(path))
+ fileList.append(helperUnitName(path))
+
+ fileList.append(allOverlaysUnitName(d))
+
+ return fileList
+
diff --git a/meta/lib/oe/package.py b/meta/lib/oe/package.py
index dd700cbb0c..7d387ee81d 100644
--- a/meta/lib/oe/package.py
+++ b/meta/lib/oe/package.py
@@ -16,7 +16,11 @@ def runstrip(arg):
# 8 - shared library
# 16 - kernel module
- (file, elftype, strip) = arg
+ if len(arg) == 3:
+ (file, elftype, strip) = arg
+ extra_strip_sections = ''
+ else:
+ (file, elftype, strip, extra_strip_sections) = arg
newmode = None
if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
@@ -40,6 +44,9 @@ def runstrip(arg):
# shared or executable:
elif elftype & 8 or elftype & 4:
stripcmd.extend(["--remove-section=.comment", "--remove-section=.note"])
+ if extra_strip_sections != '':
+ for section in extra_strip_sections.split():
+ stripcmd.extend(["--remove-section=" + section])
stripcmd.append(file)
bb.debug(1, "runstrip: %s" % stripcmd)
diff --git a/meta/lib/oe/package_manager.py b/meta/lib/oe/package_manager.py
deleted file mode 100644
index 35e5cff073..0000000000
--- a/meta/lib/oe/package_manager.py
+++ /dev/null
@@ -1,1863 +0,0 @@
-#
-# SPDX-License-Identifier: GPL-2.0-only
-#
-
-from abc import ABCMeta, abstractmethod
-import os
-import glob
-import subprocess
-import shutil
-import re
-import collections
-import bb
-import tempfile
-import oe.utils
-import oe.path
-import string
-from oe.gpg_sign import get_signer
-import hashlib
-import fnmatch
-
-# this can be used by all PM backends to create the index files in parallel
-def create_index(arg):
- index_cmd = arg
-
- bb.note("Executing '%s' ..." % index_cmd)
- result = subprocess.check_output(index_cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
- if result:
- bb.note(result)
-
-def opkg_query(cmd_output):
- """
- This method parse the output from the package managerand return
- a dictionary with the information of the packages. This is used
- when the packages are in deb or ipk format.
- """
- verregex = re.compile(r' \([=<>]* [^ )]*\)')
- output = dict()
- pkg = ""
- arch = ""
- ver = ""
- filename = ""
- dep = []
- prov = []
- pkgarch = ""
- for line in cmd_output.splitlines()+['']:
- line = line.rstrip()
- if ':' in line:
- if line.startswith("Package: "):
- pkg = line.split(": ")[1]
- elif line.startswith("Architecture: "):
- arch = line.split(": ")[1]
- elif line.startswith("Version: "):
- ver = line.split(": ")[1]
- elif line.startswith("File: ") or line.startswith("Filename:"):
- filename = line.split(": ")[1]
- if "/" in filename:
- filename = os.path.basename(filename)
- elif line.startswith("Depends: "):
- depends = verregex.sub('', line.split(": ")[1])
- for depend in depends.split(", "):
- dep.append(depend)
- elif line.startswith("Recommends: "):
- recommends = verregex.sub('', line.split(": ")[1])
- for recommend in recommends.split(", "):
- dep.append("%s [REC]" % recommend)
- elif line.startswith("PackageArch: "):
- pkgarch = line.split(": ")[1]
- elif line.startswith("Provides: "):
- provides = verregex.sub('', line.split(": ")[1])
- for provide in provides.split(", "):
- prov.append(provide)
-
- # When there is a blank line save the package information
- elif not line:
- # IPK doesn't include the filename
- if not filename:
- filename = "%s_%s_%s.ipk" % (pkg, ver, arch)
- if pkg:
- output[pkg] = {"arch":arch, "ver":ver,
- "filename":filename, "deps": dep, "pkgarch":pkgarch, "provs": prov}
- pkg = ""
- arch = ""
- ver = ""
- filename = ""
- dep = []
- prov = []
- pkgarch = ""
-
- return output
-
-def failed_postinsts_abort(pkgs, log_path):
- bb.fatal("""Postinstall scriptlets of %s have failed. If the intention is to defer them to first boot,
-then please place them into pkg_postinst_ontarget_${PN} ().
-Deferring to first boot via 'exit 1' is no longer supported.
-Details of the failure are in %s.""" %(pkgs, log_path))
-
-def generate_locale_archive(d, rootfs, target_arch, localedir):
- # Pretty sure we don't need this for locale archive generation but
- # keeping it to be safe...
- locale_arch_options = { \
- "arc": ["--uint32-align=4", "--little-endian"],
- "arceb": ["--uint32-align=4", "--big-endian"],
- "arm": ["--uint32-align=4", "--little-endian"],
- "armeb": ["--uint32-align=4", "--big-endian"],
- "aarch64": ["--uint32-align=4", "--little-endian"],
- "aarch64_be": ["--uint32-align=4", "--big-endian"],
- "sh4": ["--uint32-align=4", "--big-endian"],
- "powerpc": ["--uint32-align=4", "--big-endian"],
- "powerpc64": ["--uint32-align=4", "--big-endian"],
- "powerpc64le": ["--uint32-align=4", "--little-endian"],
- "mips": ["--uint32-align=4", "--big-endian"],
- "mipsisa32r6": ["--uint32-align=4", "--big-endian"],
- "mips64": ["--uint32-align=4", "--big-endian"],
- "mipsisa64r6": ["--uint32-align=4", "--big-endian"],
- "mipsel": ["--uint32-align=4", "--little-endian"],
- "mipsisa32r6el": ["--uint32-align=4", "--little-endian"],
- "mips64el": ["--uint32-align=4", "--little-endian"],
- "mipsisa64r6el": ["--uint32-align=4", "--little-endian"],
- "riscv64": ["--uint32-align=4", "--little-endian"],
- "riscv32": ["--uint32-align=4", "--little-endian"],
- "i586": ["--uint32-align=4", "--little-endian"],
- "i686": ["--uint32-align=4", "--little-endian"],
- "x86_64": ["--uint32-align=4", "--little-endian"]
- }
- if target_arch in locale_arch_options:
- arch_options = locale_arch_options[target_arch]
- else:
- bb.error("locale_arch_options not found for target_arch=" + target_arch)
- bb.fatal("unknown arch:" + target_arch + " for locale_arch_options")
-
- # Need to set this so cross-localedef knows where the archive is
- env = dict(os.environ)
- env["LOCALEARCHIVE"] = oe.path.join(localedir, "locale-archive")
-
- for name in sorted(os.listdir(localedir)):
- path = os.path.join(localedir, name)
- if os.path.isdir(path):
- cmd = ["cross-localedef", "--verbose"]
- cmd += arch_options
- cmd += ["--add-to-archive", path]
- subprocess.check_output(cmd, env=env, stderr=subprocess.STDOUT)
-
-class Indexer(object, metaclass=ABCMeta):
- def __init__(self, d, deploy_dir):
- self.d = d
- self.deploy_dir = deploy_dir
-
- @abstractmethod
- def write_index(self):
- pass
-
-
-class RpmIndexer(Indexer):
- def write_index(self):
- self.do_write_index(self.deploy_dir)
-
- def do_write_index(self, deploy_dir):
- if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
- signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND'))
- else:
- signer = None
-
- createrepo_c = bb.utils.which(os.environ['PATH'], "createrepo_c")
- result = create_index("%s --update -q %s" % (createrepo_c, deploy_dir))
- if result:
- bb.fatal(result)
-
- # Sign repomd
- if signer:
- sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE')
- is_ascii_sig = (sig_type.upper() != "BIN")
- signer.detach_sign(os.path.join(deploy_dir, 'repodata', 'repomd.xml'),
- self.d.getVar('PACKAGE_FEED_GPG_NAME'),
- self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'),
- armor=is_ascii_sig)
-
-class RpmSubdirIndexer(RpmIndexer):
- def write_index(self):
- bb.note("Generating package index for %s" %(self.deploy_dir))
- self.do_write_index(self.deploy_dir)
- for entry in os.walk(self.deploy_dir):
- if os.path.samefile(self.deploy_dir, entry[0]):
- for dir in entry[1]:
- if dir != 'repodata':
- dir_path = oe.path.join(self.deploy_dir, dir)
- bb.note("Generating package index for %s" %(dir_path))
- self.do_write_index(dir_path)
-
-class OpkgIndexer(Indexer):
- def write_index(self):
- arch_vars = ["ALL_MULTILIB_PACKAGE_ARCHS",
- "SDK_PACKAGE_ARCHS",
- ]
-
- opkg_index_cmd = bb.utils.which(os.getenv('PATH'), "opkg-make-index")
- if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
- signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND'))
- else:
- signer = None
-
- if not os.path.exists(os.path.join(self.deploy_dir, "Packages")):
- open(os.path.join(self.deploy_dir, "Packages"), "w").close()
-
- index_cmds = set()
- index_sign_files = set()
- for arch_var in arch_vars:
- archs = self.d.getVar(arch_var)
- if archs is None:
- continue
-
- for arch in archs.split():
- pkgs_dir = os.path.join(self.deploy_dir, arch)
- pkgs_file = os.path.join(pkgs_dir, "Packages")
-
- if not os.path.isdir(pkgs_dir):
- continue
-
- if not os.path.exists(pkgs_file):
- open(pkgs_file, "w").close()
-
- index_cmds.add('%s --checksum md5 --checksum sha256 -r %s -p %s -m %s' %
- (opkg_index_cmd, pkgs_file, pkgs_file, pkgs_dir))
-
- index_sign_files.add(pkgs_file)
-
- if len(index_cmds) == 0:
- bb.note("There are no packages in %s!" % self.deploy_dir)
- return
-
- oe.utils.multiprocess_launch(create_index, index_cmds, self.d)
-
- if signer:
- feed_sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE')
- is_ascii_sig = (feed_sig_type.upper() != "BIN")
- for f in index_sign_files:
- signer.detach_sign(f,
- self.d.getVar('PACKAGE_FEED_GPG_NAME'),
- self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'),
- armor=is_ascii_sig)
-
-
-class DpkgIndexer(Indexer):
- def _create_configs(self):
- bb.utils.mkdirhier(self.apt_conf_dir)
- bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "lists", "partial"))
- bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "apt.conf.d"))
- bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "preferences.d"))
-
- with open(os.path.join(self.apt_conf_dir, "preferences"),
- "w") as prefs_file:
- pass
- with open(os.path.join(self.apt_conf_dir, "sources.list"),
- "w+") as sources_file:
- pass
-
- with open(self.apt_conf_file, "w") as apt_conf:
- with open(os.path.join(self.d.expand("${STAGING_ETCDIR_NATIVE}"),
- "apt", "apt.conf.sample")) as apt_conf_sample:
- for line in apt_conf_sample.read().split("\n"):
- line = re.sub(r"#ROOTFS#", "/dev/null", line)
- line = re.sub(r"#APTCONF#", self.apt_conf_dir, line)
- apt_conf.write(line + "\n")
-
- def write_index(self):
- self.apt_conf_dir = os.path.join(self.d.expand("${APTCONF_TARGET}"),
- "apt-ftparchive")
- self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf")
- self._create_configs()
-
- os.environ['APT_CONFIG'] = self.apt_conf_file
-
- pkg_archs = self.d.getVar('PACKAGE_ARCHS')
- if pkg_archs is not None:
- arch_list = pkg_archs.split()
- sdk_pkg_archs = self.d.getVar('SDK_PACKAGE_ARCHS')
- if sdk_pkg_archs is not None:
- for a in sdk_pkg_archs.split():
- if a not in pkg_archs:
- arch_list.append(a)
-
- all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or "").split()
- arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in arch_list)
-
- apt_ftparchive = bb.utils.which(os.getenv('PATH'), "apt-ftparchive")
- gzip = bb.utils.which(os.getenv('PATH'), "gzip")
-
- index_cmds = []
- deb_dirs_found = False
- for arch in arch_list:
- arch_dir = os.path.join(self.deploy_dir, arch)
- if not os.path.isdir(arch_dir):
- continue
-
- cmd = "cd %s; PSEUDO_UNLOAD=1 %s packages . > Packages;" % (arch_dir, apt_ftparchive)
-
- cmd += "%s -fcn Packages > Packages.gz;" % gzip
-
- with open(os.path.join(arch_dir, "Release"), "w+") as release:
- release.write("Label: %s\n" % arch)
-
- cmd += "PSEUDO_UNLOAD=1 %s release . >> Release" % apt_ftparchive
-
- index_cmds.append(cmd)
-
- deb_dirs_found = True
-
- if not deb_dirs_found:
- bb.note("There are no packages in %s" % self.deploy_dir)
- return
-
- oe.utils.multiprocess_launch(create_index, index_cmds, self.d)
- if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
- raise NotImplementedError('Package feed signing not implementd for dpkg')
-
-
-
-class PkgsList(object, metaclass=ABCMeta):
- def __init__(self, d, rootfs_dir):
- self.d = d
- self.rootfs_dir = rootfs_dir
-
- @abstractmethod
- def list_pkgs(self):
- pass
-
-class RpmPkgsList(PkgsList):
- def list_pkgs(self):
- return RpmPM(self.d, self.rootfs_dir, self.d.getVar('TARGET_VENDOR'), needfeed=False).list_installed()
-
-class OpkgPkgsList(PkgsList):
- def __init__(self, d, rootfs_dir, config_file):
- super(OpkgPkgsList, self).__init__(d, rootfs_dir)
-
- self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg")
- self.opkg_args = "-f %s -o %s " % (config_file, rootfs_dir)
- self.opkg_args += self.d.getVar("OPKG_ARGS")
-
- def list_pkgs(self, format=None):
- cmd = "%s %s status" % (self.opkg_cmd, self.opkg_args)
-
- # opkg returns success even when it printed some
- # "Collected errors:" report to stderr. Mixing stderr into
- # stdout then leads to random failures later on when
- # parsing the output. To avoid this we need to collect both
- # output streams separately and check for empty stderr.
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
- cmd_output, cmd_stderr = p.communicate()
- cmd_output = cmd_output.decode("utf-8")
- cmd_stderr = cmd_stderr.decode("utf-8")
- if p.returncode or cmd_stderr:
- bb.fatal("Cannot get the installed packages list. Command '%s' "
- "returned %d and stderr:\n%s" % (cmd, p.returncode, cmd_stderr))
-
- return opkg_query(cmd_output)
-
-
-class DpkgPkgsList(PkgsList):
-
- def list_pkgs(self):
- cmd = [bb.utils.which(os.getenv('PATH'), "dpkg-query"),
- "--admindir=%s/var/lib/dpkg" % self.rootfs_dir,
- "-W"]
-
- cmd.append("-f=Package: ${Package}\nArchitecture: ${PackageArch}\nVersion: ${Version}\nFile: ${Package}_${Version}_${Architecture}.deb\nDepends: ${Depends}\nRecommends: ${Recommends}\nProvides: ${Provides}\n\n")
-
- try:
- cmd_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip().decode("utf-8")
- except subprocess.CalledProcessError as e:
- bb.fatal("Cannot get the installed packages list. Command '%s' "
- "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
-
- return opkg_query(cmd_output)
-
-
-class PackageManager(object, metaclass=ABCMeta):
- """
- This is an abstract class. Do not instantiate this directly.
- """
-
- def __init__(self, d, target_rootfs):
- self.d = d
- self.target_rootfs = target_rootfs
- self.deploy_dir = None
- self.deploy_lock = None
- self._initialize_intercepts()
-
- def _initialize_intercepts(self):
- bb.note("Initializing intercept dir for %s" % self.target_rootfs)
- # As there might be more than one instance of PackageManager operating at the same time
- # we need to isolate the intercept_scripts directories from each other,
- # hence the ugly hash digest in dir name.
- self.intercepts_dir = os.path.join(self.d.getVar('WORKDIR'), "intercept_scripts-%s" %
- (hashlib.sha256(self.target_rootfs.encode()).hexdigest()))
-
- postinst_intercepts = (self.d.getVar("POSTINST_INTERCEPTS") or "").split()
- if not postinst_intercepts:
- postinst_intercepts_path = self.d.getVar("POSTINST_INTERCEPTS_PATH")
- if not postinst_intercepts_path:
- postinst_intercepts_path = self.d.getVar("POSTINST_INTERCEPTS_DIR") or self.d.expand("${COREBASE}/scripts/postinst-intercepts")
- postinst_intercepts = oe.path.which_wild('*', postinst_intercepts_path)
-
- bb.debug(1, 'Collected intercepts:\n%s' % ''.join(' %s\n' % i for i in postinst_intercepts))
- bb.utils.remove(self.intercepts_dir, True)
- bb.utils.mkdirhier(self.intercepts_dir)
- for intercept in postinst_intercepts:
- bb.utils.copyfile(intercept, os.path.join(self.intercepts_dir, os.path.basename(intercept)))
-
- @abstractmethod
- def _handle_intercept_failure(self, failed_script):
- pass
-
- def _postpone_to_first_boot(self, postinst_intercept_hook):
- with open(postinst_intercept_hook) as intercept:
- registered_pkgs = None
- for line in intercept.read().split("\n"):
- m = re.match(r"^##PKGS:(.*)", line)
- if m is not None:
- registered_pkgs = m.group(1).strip()
- break
-
- if registered_pkgs is not None:
- bb.note("If an image is being built, the postinstalls for the following packages "
- "will be postponed for first boot: %s" %
- registered_pkgs)
-
- # call the backend dependent handler
- self._handle_intercept_failure(registered_pkgs)
-
-
- def run_intercepts(self, populate_sdk=None):
- intercepts_dir = self.intercepts_dir
-
- bb.note("Running intercept scripts:")
- os.environ['D'] = self.target_rootfs
- os.environ['STAGING_DIR_NATIVE'] = self.d.getVar('STAGING_DIR_NATIVE')
- for script in os.listdir(intercepts_dir):
- script_full = os.path.join(intercepts_dir, script)
-
- if script == "postinst_intercept" or not os.access(script_full, os.X_OK):
- continue
-
- # we do not want to run any multilib variant of this
- if script.startswith("delay_to_first_boot"):
- self._postpone_to_first_boot(script_full)
- continue
-
- if populate_sdk == 'host' and self.d.getVar('SDK_OS') == 'mingw32':
- bb.note("The postinstall intercept hook '%s' could not be executed due to missing wine support, details in %s/log.do_%s"
- % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
- continue
-
- bb.note("> Executing %s intercept ..." % script)
-
- try:
- output = subprocess.check_output(script_full, stderr=subprocess.STDOUT)
- if output: bb.note(output.decode("utf-8"))
- except subprocess.CalledProcessError as e:
- bb.note("Exit code %d. Output:\n%s" % (e.returncode, e.output.decode("utf-8")))
- if populate_sdk == 'host':
- bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
- elif populate_sdk == 'target':
- if "qemuwrapper: qemu usermode is not supported" in e.output.decode("utf-8"):
- bb.note("The postinstall intercept hook '%s' could not be executed due to missing qemu usermode support, details in %s/log.do_%s"
- % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
- else:
- bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
- else:
- if "qemuwrapper: qemu usermode is not supported" in e.output.decode("utf-8"):
- bb.note("The postinstall intercept hook '%s' could not be executed due to missing qemu usermode support, details in %s/log.do_%s"
- % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
- self._postpone_to_first_boot(script_full)
- else:
- bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
-
- @abstractmethod
- def update(self):
- """
- Update the package manager package database.
- """
- pass
-
- @abstractmethod
- def install(self, pkgs, attempt_only=False):
- """
- Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is
- True, installation failures are ignored.
- """
- pass
-
- @abstractmethod
- def remove(self, pkgs, with_dependencies=True):
- """
- Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies'
- is False, then any dependencies are left in place.
- """
- pass
-
- @abstractmethod
- def write_index(self):
- """
- This function creates the index files
- """
- pass
-
- @abstractmethod
- def remove_packaging_data(self):
- pass
-
- @abstractmethod
- def list_installed(self):
- pass
-
- @abstractmethod
- def extract(self, pkg):
- """
- Returns the path to a tmpdir where resides the contents of a package.
- Deleting the tmpdir is responsability of the caller.
- """
- pass
-
- @abstractmethod
- def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
- """
- Add remote package feeds into repository manager configuration. The parameters
- for the feeds are set by feed_uris, feed_base_paths and feed_archs.
- See http://www.yoctoproject.org/docs/current/ref-manual/ref-manual.html#var-PACKAGE_FEED_URIS
- for their description.
- """
- pass
-
- def install_glob(self, globs, sdk=False):
- """
- Install all packages that match a glob.
- """
- # TODO don't have sdk here but have a property on the superclass
- # (and respect in install_complementary)
- if sdk:
- pkgdatadir = self.d.expand("${TMPDIR}/pkgdata/${SDK_SYS}")
- else:
- pkgdatadir = self.d.getVar("PKGDATA_DIR")
-
- try:
- bb.note("Installing globbed packages...")
- cmd = ["oe-pkgdata-util", "-p", pkgdatadir, "list-pkgs", globs]
- pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode("utf-8")
- self.install(pkgs.split(), attempt_only=True)
- except subprocess.CalledProcessError as e:
- # Return code 1 means no packages matched
- if e.returncode != 1:
- bb.fatal("Could not compute globbed packages list. Command "
- "'%s' returned %d:\n%s" %
- (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
-
- def install_complementary(self, globs=None):
- """
- Install complementary packages based upon the list of currently installed
- packages e.g. locales, *-dev, *-dbg, etc. This will only attempt to install
- these packages, if they don't exist then no error will occur. Note: every
- backend needs to call this function explicitly after the normal package
- installation
- """
- if globs is None:
- globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY')
- split_linguas = set()
-
- for translation in self.d.getVar('IMAGE_LINGUAS').split():
- split_linguas.add(translation)
- split_linguas.add(translation.split('-')[0])
-
- split_linguas = sorted(split_linguas)
-
- for lang in split_linguas:
- globs += " *-locale-%s" % lang
- for complementary_linguas in (self.d.getVar('IMAGE_LINGUAS_COMPLEMENTARY') or "").split():
- globs += (" " + complementary_linguas) % lang
-
- if globs is None:
- return
-
- # we need to write the list of installed packages to a file because the
- # oe-pkgdata-util reads it from a file
- with tempfile.NamedTemporaryFile(mode="w+", prefix="installed-pkgs") as installed_pkgs:
- pkgs = self.list_installed()
-
- provided_pkgs = set()
- for pkg in pkgs.values():
- provided_pkgs |= set(pkg.get('provs', []))
-
- output = oe.utils.format_pkg_list(pkgs, "arch")
- installed_pkgs.write(output)
- installed_pkgs.flush()
-
- cmd = ["oe-pkgdata-util",
- "-p", self.d.getVar('PKGDATA_DIR'), "glob", installed_pkgs.name,
- globs]
- exclude = self.d.getVar('PACKAGE_EXCLUDE_COMPLEMENTARY')
- if exclude:
- cmd.extend(['--exclude=' + '|'.join(exclude.split())])
- try:
- bb.note('Running %s' % cmd)
- complementary_pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode("utf-8")
- complementary_pkgs = set(complementary_pkgs.split())
- skip_pkgs = sorted(complementary_pkgs & provided_pkgs)
- install_pkgs = sorted(complementary_pkgs - provided_pkgs)
- bb.note("Installing complementary packages ... %s (skipped already provided packages %s)" % (
- ' '.join(install_pkgs),
- ' '.join(skip_pkgs)))
- self.install(install_pkgs, attempt_only=True)
- except subprocess.CalledProcessError as e:
- bb.fatal("Could not compute complementary packages list. Command "
- "'%s' returned %d:\n%s" %
- (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
-
- target_arch = self.d.getVar('TARGET_ARCH')
- localedir = oe.path.join(self.target_rootfs, self.d.getVar("libdir"), "locale")
- if os.path.exists(localedir) and os.listdir(localedir):
- generate_locale_archive(self.d, self.target_rootfs, target_arch, localedir)
- # And now delete the binary locales
- self.remove(fnmatch.filter(self.list_installed(), "glibc-binary-localedata-*"), False)
-
- def deploy_dir_lock(self):
- if self.deploy_dir is None:
- raise RuntimeError("deploy_dir is not set!")
-
- lock_file_name = os.path.join(self.deploy_dir, "deploy.lock")
-
- self.deploy_lock = bb.utils.lockfile(lock_file_name)
-
- def deploy_dir_unlock(self):
- if self.deploy_lock is None:
- return
-
- bb.utils.unlockfile(self.deploy_lock)
-
- self.deploy_lock = None
-
- def construct_uris(self, uris, base_paths):
- """
- Construct URIs based on the following pattern: uri/base_path where 'uri'
- and 'base_path' correspond to each element of the corresponding array
- argument leading to len(uris) x len(base_paths) elements on the returned
- array
- """
- def _append(arr1, arr2, sep='/'):
- res = []
- narr1 = [a.rstrip(sep) for a in arr1]
- narr2 = [a.rstrip(sep).lstrip(sep) for a in arr2]
- for a1 in narr1:
- if arr2:
- for a2 in narr2:
- res.append("%s%s%s" % (a1, sep, a2))
- else:
- res.append(a1)
- return res
- return _append(uris, base_paths)
-
-def create_packages_dir(d, subrepo_dir, deploydir, taskname, filterbydependencies):
- """
- Go through our do_package_write_X dependencies and hardlink the packages we depend
- upon into the repo directory. This prevents us seeing other packages that may
- have been built that we don't depend upon and also packages for architectures we don't
- support.
- """
- import errno
-
- taskdepdata = d.getVar("BB_TASKDEPDATA", False)
- mytaskname = d.getVar("BB_RUNTASK")
- pn = d.getVar("PN")
- seendirs = set()
- multilibs = {}
-
- bb.utils.remove(subrepo_dir, recurse=True)
- bb.utils.mkdirhier(subrepo_dir)
-
- # Detect bitbake -b usage
- nodeps = d.getVar("BB_LIMITEDDEPS") or False
- if nodeps or not filterbydependencies:
- oe.path.symlink(deploydir, subrepo_dir, True)
- return
-
- start = None
- for dep in taskdepdata:
- data = taskdepdata[dep]
- if data[1] == mytaskname and data[0] == pn:
- start = dep
- break
- if start is None:
- bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
- pkgdeps = set()
- start = [start]
- seen = set(start)
- # Support direct dependencies (do_rootfs -> do_package_write_X)
- # or indirect dependencies within PN (do_populate_sdk_ext -> do_rootfs -> do_package_write_X)
- while start:
- next = []
- for dep2 in start:
- for dep in taskdepdata[dep2][3]:
- if taskdepdata[dep][0] != pn:
- if "do_" + taskname in dep:
- pkgdeps.add(dep)
- elif dep not in seen:
- next.append(dep)
- seen.add(dep)
- start = next
-
- for dep in pkgdeps:
- c = taskdepdata[dep][0]
- manifest, d2 = oe.sstatesig.find_sstate_manifest(c, taskdepdata[dep][2], taskname, d, multilibs)
- if not manifest:
- bb.fatal("No manifest generated from: %s in %s" % (c, taskdepdata[dep][2]))
- if not os.path.exists(manifest):
- continue
- with open(manifest, "r") as f:
- for l in f:
- l = l.strip()
- deploydir = os.path.normpath(deploydir)
- if bb.data.inherits_class('packagefeed-stability', d):
- dest = l.replace(deploydir + "-prediff", "")
- else:
- dest = l.replace(deploydir, "")
- dest = subrepo_dir + dest
- if l.endswith("/"):
- if dest not in seendirs:
- bb.utils.mkdirhier(dest)
- seendirs.add(dest)
- continue
- # Try to hardlink the file, copy if that fails
- destdir = os.path.dirname(dest)
- if destdir not in seendirs:
- bb.utils.mkdirhier(destdir)
- seendirs.add(destdir)
- try:
- os.link(l, dest)
- except OSError as err:
- if err.errno == errno.EXDEV:
- bb.utils.copyfile(l, dest)
- else:
- raise
-
-class RpmPM(PackageManager):
- def __init__(self,
- d,
- target_rootfs,
- target_vendor,
- task_name='target',
- arch_var=None,
- os_var=None,
- rpm_repo_workdir="oe-rootfs-repo",
- filterbydependencies=True,
- needfeed=True):
- super(RpmPM, self).__init__(d, target_rootfs)
- self.target_vendor = target_vendor
- self.task_name = task_name
- if arch_var == None:
- self.archs = self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS').replace("-","_")
- else:
- self.archs = self.d.getVar(arch_var).replace("-","_")
- if task_name == "host":
- self.primary_arch = self.d.getVar('SDK_ARCH')
- else:
- self.primary_arch = self.d.getVar('MACHINE_ARCH')
-
- if needfeed:
- self.rpm_repo_dir = oe.path.join(self.d.getVar('WORKDIR'), rpm_repo_workdir)
- create_packages_dir(self.d, oe.path.join(self.rpm_repo_dir, "rpm"), d.getVar("DEPLOY_DIR_RPM"), "package_write_rpm", filterbydependencies)
-
- self.saved_packaging_data = self.d.expand('${T}/saved_packaging_data/%s' % self.task_name)
- if not os.path.exists(self.d.expand('${T}/saved_packaging_data')):
- bb.utils.mkdirhier(self.d.expand('${T}/saved_packaging_data'))
- self.packaging_data_dirs = ['etc/rpm', 'etc/rpmrc', 'etc/dnf', 'var/lib/rpm', 'var/lib/dnf', 'var/cache/dnf']
- self.solution_manifest = self.d.expand('${T}/saved/%s_solution' %
- self.task_name)
- if not os.path.exists(self.d.expand('${T}/saved')):
- bb.utils.mkdirhier(self.d.expand('${T}/saved'))
-
- def _configure_dnf(self):
- # libsolv handles 'noarch' internally, we don't need to specify it explicitly
- archs = [i for i in reversed(self.archs.split()) if i not in ["any", "all", "noarch"]]
- # This prevents accidental matching against libsolv's built-in policies
- if len(archs) <= 1:
- archs = archs + ["bogusarch"]
- # This architecture needs to be upfront so that packages using it are properly prioritized
- archs = ["sdk_provides_dummy_target"] + archs
- confdir = "%s/%s" %(self.target_rootfs, "etc/dnf/vars/")
- bb.utils.mkdirhier(confdir)
- open(confdir + "arch", 'w').write(":".join(archs))
- distro_codename = self.d.getVar('DISTRO_CODENAME')
- open(confdir + "releasever", 'w').write(distro_codename if distro_codename is not None else '')
-
- open(oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"), 'w').write("")
-
-
- def _configure_rpm(self):
- # We need to configure rpm to use our primary package architecture as the installation architecture,
- # and to make it compatible with other package architectures that we use.
- # Otherwise it will refuse to proceed with packages installation.
- platformconfdir = "%s/%s" %(self.target_rootfs, "etc/rpm/")
- rpmrcconfdir = "%s/%s" %(self.target_rootfs, "etc/")
- bb.utils.mkdirhier(platformconfdir)
- open(platformconfdir + "platform", 'w').write("%s-pc-linux" % self.primary_arch)
- with open(rpmrcconfdir + "rpmrc", 'w') as f:
- f.write("arch_compat: %s: %s\n" % (self.primary_arch, self.archs if len(self.archs) > 0 else self.primary_arch))
- f.write("buildarch_compat: %s: noarch\n" % self.primary_arch)
-
- open(platformconfdir + "macros", 'w').write("%_transaction_color 7\n")
- if self.d.getVar('RPM_PREFER_ELF_ARCH'):
- open(platformconfdir + "macros", 'a').write("%%_prefer_color %s" % (self.d.getVar('RPM_PREFER_ELF_ARCH')))
-
- if self.d.getVar('RPM_SIGN_PACKAGES') == '1':
- signer = get_signer(self.d, self.d.getVar('RPM_GPG_BACKEND'))
- pubkey_path = oe.path.join(self.d.getVar('B'), 'rpm-key')
- signer.export_pubkey(pubkey_path, self.d.getVar('RPM_GPG_NAME'))
- rpm_bin = bb.utils.which(os.getenv('PATH'), "rpmkeys")
- cmd = [rpm_bin, '--root=%s' % self.target_rootfs, '--import', pubkey_path]
- try:
- subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- bb.fatal("Importing GPG key failed. Command '%s' "
- "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
-
- def create_configs(self):
- self._configure_dnf()
- self._configure_rpm()
-
- def write_index(self):
- lockfilename = self.d.getVar('DEPLOY_DIR_RPM') + "/rpm.lock"
- lf = bb.utils.lockfile(lockfilename, False)
- RpmIndexer(self.d, self.rpm_repo_dir).write_index()
- bb.utils.unlockfile(lf)
-
- def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
- from urllib.parse import urlparse
-
- if feed_uris == "":
- return
-
- gpg_opts = ''
- if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
- gpg_opts += 'repo_gpgcheck=1\n'
- gpg_opts += 'gpgkey=file://%s/pki/packagefeed-gpg/PACKAGEFEED-GPG-KEY-%s-%s\n' % (self.d.getVar('sysconfdir'), self.d.getVar('DISTRO'), self.d.getVar('DISTRO_CODENAME'))
-
- if self.d.getVar('RPM_SIGN_PACKAGES') != '1':
- gpg_opts += 'gpgcheck=0\n'
-
- bb.utils.mkdirhier(oe.path.join(self.target_rootfs, "etc", "yum.repos.d"))
- remote_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
- for uri in remote_uris:
- repo_base = "oe-remote-repo" + "-".join(urlparse(uri).path.split("/"))
- if feed_archs is not None:
- for arch in feed_archs.split():
- repo_uri = uri + "/" + arch
- repo_id = "oe-remote-repo" + "-".join(urlparse(repo_uri).path.split("/"))
- repo_name = "OE Remote Repo:" + " ".join(urlparse(repo_uri).path.split("/"))
- open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'a').write(
- "[%s]\nname=%s\nbaseurl=%s\n%s\n" % (repo_id, repo_name, repo_uri, gpg_opts))
- else:
- repo_name = "OE Remote Repo:" + " ".join(urlparse(uri).path.split("/"))
- repo_uri = uri
- open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'w').write(
- "[%s]\nname=%s\nbaseurl=%s\n%s" % (repo_base, repo_name, repo_uri, gpg_opts))
-
- def _prepare_pkg_transaction(self):
- os.environ['D'] = self.target_rootfs
- os.environ['OFFLINE_ROOT'] = self.target_rootfs
- os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
- os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
- os.environ['INTERCEPT_DIR'] = self.intercepts_dir
- os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
-
-
- def install(self, pkgs, attempt_only = False):
- if len(pkgs) == 0:
- return
- self._prepare_pkg_transaction()
-
- bad_recommendations = self.d.getVar('BAD_RECOMMENDATIONS')
- package_exclude = self.d.getVar('PACKAGE_EXCLUDE')
- exclude_pkgs = (bad_recommendations.split() if bad_recommendations else []) + (package_exclude.split() if package_exclude else [])
-
- output = self._invoke_dnf((["--skip-broken"] if attempt_only else []) +
- (["-x", ",".join(exclude_pkgs)] if len(exclude_pkgs) > 0 else []) +
- (["--setopt=install_weak_deps=False"] if self.d.getVar('NO_RECOMMENDATIONS') == "1" else []) +
- (["--nogpgcheck"] if self.d.getVar('RPM_SIGN_PACKAGES') != '1' else ["--setopt=gpgcheck=True"]) +
- ["install"] +
- pkgs)
-
- failed_scriptlets_pkgnames = collections.OrderedDict()
- for line in output.splitlines():
- if line.startswith("Error in POSTIN scriptlet in rpm package"):
- failed_scriptlets_pkgnames[line.split()[-1]] = True
-
- if len(failed_scriptlets_pkgnames) > 0:
- failed_postinsts_abort(list(failed_scriptlets_pkgnames.keys()), self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
-
- def remove(self, pkgs, with_dependencies = True):
- if not pkgs:
- return
-
- self._prepare_pkg_transaction()
-
- if with_dependencies:
- self._invoke_dnf(["remove"] + pkgs)
- else:
- cmd = bb.utils.which(os.getenv('PATH'), "rpm")
- args = ["-e", "-v", "--nodeps", "--root=%s" %self.target_rootfs]
-
- try:
- bb.note("Running %s" % ' '.join([cmd] + args + pkgs))
- output = subprocess.check_output([cmd] + args + pkgs, stderr=subprocess.STDOUT).decode("utf-8")
- bb.note(output)
- except subprocess.CalledProcessError as e:
- bb.fatal("Could not invoke rpm. Command "
- "'%s' returned %d:\n%s" % (' '.join([cmd] + args + pkgs), e.returncode, e.output.decode("utf-8")))
-
- def upgrade(self):
- self._prepare_pkg_transaction()
- self._invoke_dnf(["upgrade"])
-
- def autoremove(self):
- self._prepare_pkg_transaction()
- self._invoke_dnf(["autoremove"])
-
- def remove_packaging_data(self):
- self._invoke_dnf(["clean", "all"])
- for dir in self.packaging_data_dirs:
- bb.utils.remove(oe.path.join(self.target_rootfs, dir), True)
-
- def backup_packaging_data(self):
- # Save the packaging dirs for increment rpm image generation
- if os.path.exists(self.saved_packaging_data):
- bb.utils.remove(self.saved_packaging_data, True)
- for i in self.packaging_data_dirs:
- source_dir = oe.path.join(self.target_rootfs, i)
- target_dir = oe.path.join(self.saved_packaging_data, i)
- if os.path.isdir(source_dir):
- shutil.copytree(source_dir, target_dir, symlinks=True)
- elif os.path.isfile(source_dir):
- shutil.copy2(source_dir, target_dir)
-
- def recovery_packaging_data(self):
- # Move the rpmlib back
- if os.path.exists(self.saved_packaging_data):
- for i in self.packaging_data_dirs:
- target_dir = oe.path.join(self.target_rootfs, i)
- if os.path.exists(target_dir):
- bb.utils.remove(target_dir, True)
- source_dir = oe.path.join(self.saved_packaging_data, i)
- if os.path.isdir(source_dir):
- shutil.copytree(source_dir, target_dir, symlinks=True)
- elif os.path.isfile(source_dir):
- shutil.copy2(source_dir, target_dir)
-
- def list_installed(self):
- output = self._invoke_dnf(["repoquery", "--installed", "--queryformat", "Package: %{name} %{arch} %{version} %{name}-%{version}-%{release}.%{arch}.rpm\nDependencies:\n%{requires}\nRecommendations:\n%{recommends}\nDependenciesEndHere:\n"],
- print_output = False)
- packages = {}
- current_package = None
- current_deps = None
- current_state = "initial"
- for line in output.splitlines():
- if line.startswith("Package:"):
- package_info = line.split(" ")[1:]
- current_package = package_info[0]
- package_arch = package_info[1]
- package_version = package_info[2]
- package_rpm = package_info[3]
- packages[current_package] = {"arch":package_arch, "ver":package_version, "filename":package_rpm}
- current_deps = []
- elif line.startswith("Dependencies:"):
- current_state = "dependencies"
- elif line.startswith("Recommendations"):
- current_state = "recommendations"
- elif line.startswith("DependenciesEndHere:"):
- current_state = "initial"
- packages[current_package]["deps"] = current_deps
- elif len(line) > 0:
- if current_state == "dependencies":
- current_deps.append(line)
- elif current_state == "recommendations":
- current_deps.append("%s [REC]" % line)
-
- return packages
-
- def update(self):
- self._invoke_dnf(["makecache", "--refresh"])
-
- def _invoke_dnf(self, dnf_args, fatal = True, print_output = True ):
- os.environ['RPM_ETCCONFIGDIR'] = self.target_rootfs
-
- dnf_cmd = bb.utils.which(os.getenv('PATH'), "dnf")
- standard_dnf_args = ["-v", "--rpmverbosity=info", "-y",
- "-c", oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"),
- "--setopt=reposdir=%s" %(oe.path.join(self.target_rootfs, "etc/yum.repos.d")),
- "--installroot=%s" % (self.target_rootfs),
- "--setopt=logdir=%s" % (self.d.getVar('T'))
- ]
- if hasattr(self, "rpm_repo_dir"):
- standard_dnf_args.append("--repofrompath=oe-repo,%s" % (self.rpm_repo_dir))
- cmd = [dnf_cmd] + standard_dnf_args + dnf_args
- bb.note('Running %s' % ' '.join(cmd))
- try:
- output = subprocess.check_output(cmd,stderr=subprocess.STDOUT).decode("utf-8")
- if print_output:
- bb.debug(1, output)
- return output
- except subprocess.CalledProcessError as e:
- if print_output:
- (bb.note, bb.fatal)[fatal]("Could not invoke dnf. Command "
- "'%s' returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
- else:
- (bb.note, bb.fatal)[fatal]("Could not invoke dnf. Command "
- "'%s' returned %d:" % (' '.join(cmd), e.returncode))
- return e.output.decode("utf-8")
-
- def dump_install_solution(self, pkgs):
- open(self.solution_manifest, 'w').write(" ".join(pkgs))
- return pkgs
-
- def load_old_install_solution(self):
- if not os.path.exists(self.solution_manifest):
- return []
- with open(self.solution_manifest, 'r') as fd:
- return fd.read().split()
-
- def _script_num_prefix(self, path):
- files = os.listdir(path)
- numbers = set()
- numbers.add(99)
- for f in files:
- numbers.add(int(f.split("-")[0]))
- return max(numbers) + 1
-
- def save_rpmpostinst(self, pkg):
- bb.note("Saving postinstall script of %s" % (pkg))
- cmd = bb.utils.which(os.getenv('PATH'), "rpm")
- args = ["-q", "--root=%s" % self.target_rootfs, "--queryformat", "%{postin}", pkg]
-
- try:
- output = subprocess.check_output([cmd] + args,stderr=subprocess.STDOUT).decode("utf-8")
- except subprocess.CalledProcessError as e:
- bb.fatal("Could not invoke rpm. Command "
- "'%s' returned %d:\n%s" % (' '.join([cmd] + args), e.returncode, e.output.decode("utf-8")))
-
- # may need to prepend #!/bin/sh to output
-
- target_path = oe.path.join(self.target_rootfs, self.d.expand('${sysconfdir}/rpm-postinsts/'))
- bb.utils.mkdirhier(target_path)
- num = self._script_num_prefix(target_path)
- saved_script_name = oe.path.join(target_path, "%d-%s" % (num, pkg))
- open(saved_script_name, 'w').write(output)
- os.chmod(saved_script_name, 0o755)
-
- def _handle_intercept_failure(self, registered_pkgs):
- rpm_postinsts_dir = self.target_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/')
- bb.utils.mkdirhier(rpm_postinsts_dir)
-
- # Save the package postinstalls in /etc/rpm-postinsts
- for pkg in registered_pkgs.split():
- self.save_rpmpostinst(pkg)
-
- def extract(self, pkg):
- output = self._invoke_dnf(["repoquery", "--queryformat", "%{location}", pkg])
- pkg_name = output.splitlines()[-1]
- if not pkg_name.endswith(".rpm"):
- bb.fatal("dnf could not find package %s in repository: %s" %(pkg, output))
- pkg_path = oe.path.join(self.rpm_repo_dir, pkg_name)
-
- cpio_cmd = bb.utils.which(os.getenv("PATH"), "cpio")
- rpm2cpio_cmd = bb.utils.which(os.getenv("PATH"), "rpm2cpio")
-
- if not os.path.isfile(pkg_path):
- bb.fatal("Unable to extract package for '%s'."
- "File %s doesn't exists" % (pkg, pkg_path))
-
- tmp_dir = tempfile.mkdtemp()
- current_dir = os.getcwd()
- os.chdir(tmp_dir)
-
- try:
- cmd = "%s %s | %s -idmv" % (rpm2cpio_cmd, pkg_path, cpio_cmd)
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
- except subprocess.CalledProcessError as e:
- bb.utils.remove(tmp_dir, recurse=True)
- bb.fatal("Unable to extract %s package. Command '%s' "
- "returned %d:\n%s" % (pkg_path, cmd, e.returncode, e.output.decode("utf-8")))
- except OSError as e:
- bb.utils.remove(tmp_dir, recurse=True)
- bb.fatal("Unable to extract %s package. Command '%s' "
- "returned %d:\n%s at %s" % (pkg_path, cmd, e.errno, e.strerror, e.filename))
-
- bb.note("Extracted %s to %s" % (pkg_path, tmp_dir))
- os.chdir(current_dir)
-
- return tmp_dir
-
-
-class OpkgDpkgPM(PackageManager):
- def __init__(self, d, target_rootfs):
- """
- This is an abstract class. Do not instantiate this directly.
- """
- super(OpkgDpkgPM, self).__init__(d, target_rootfs)
-
- def package_info(self, pkg, cmd):
- """
- Returns a dictionary with the package info.
-
- This method extracts the common parts for Opkg and Dpkg
- """
-
- try:
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
- except subprocess.CalledProcessError as e:
- bb.fatal("Unable to list available packages. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
- return opkg_query(output)
-
- def extract(self, pkg, pkg_info):
- """
- Returns the path to a tmpdir where resides the contents of a package.
-
- Deleting the tmpdir is responsability of the caller.
-
- This method extracts the common parts for Opkg and Dpkg
- """
-
- ar_cmd = bb.utils.which(os.getenv("PATH"), "ar")
- tar_cmd = bb.utils.which(os.getenv("PATH"), "tar")
- pkg_path = pkg_info[pkg]["filepath"]
-
- if not os.path.isfile(pkg_path):
- bb.fatal("Unable to extract package for '%s'."
- "File %s doesn't exists" % (pkg, pkg_path))
-
- tmp_dir = tempfile.mkdtemp()
- current_dir = os.getcwd()
- os.chdir(tmp_dir)
- data_tar = 'data.tar.xz'
-
- try:
- cmd = [ar_cmd, 'x', pkg_path]
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- cmd = [tar_cmd, 'xf', data_tar]
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- bb.utils.remove(tmp_dir, recurse=True)
- bb.fatal("Unable to extract %s package. Command '%s' "
- "returned %d:\n%s" % (pkg_path, ' '.join(cmd), e.returncode, e.output.decode("utf-8")))
- except OSError as e:
- bb.utils.remove(tmp_dir, recurse=True)
- bb.fatal("Unable to extract %s package. Command '%s' "
- "returned %d:\n%s at %s" % (pkg_path, ' '.join(cmd), e.errno, e.strerror, e.filename))
-
- bb.note("Extracted %s to %s" % (pkg_path, tmp_dir))
- bb.utils.remove(os.path.join(tmp_dir, "debian-binary"))
- bb.utils.remove(os.path.join(tmp_dir, "control.tar.gz"))
- os.chdir(current_dir)
-
- return tmp_dir
-
- def _handle_intercept_failure(self, registered_pkgs):
- self.mark_packages("unpacked", registered_pkgs.split())
-
-class OpkgPM(OpkgDpkgPM):
- def __init__(self, d, target_rootfs, config_file, archs, task_name='target', ipk_repo_workdir="oe-rootfs-repo", filterbydependencies=True, prepare_index=True):
- super(OpkgPM, self).__init__(d, target_rootfs)
-
- self.config_file = config_file
- self.pkg_archs = archs
- self.task_name = task_name
-
- self.deploy_dir = oe.path.join(self.d.getVar('WORKDIR'), ipk_repo_workdir)
- self.deploy_lock_file = os.path.join(self.deploy_dir, "deploy.lock")
- self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg")
- self.opkg_args = "--volatile-cache -f %s -t %s -o %s " % (self.config_file, self.d.expand('${T}/ipktemp/') ,target_rootfs)
- self.opkg_args += self.d.getVar("OPKG_ARGS")
-
- if prepare_index:
- create_packages_dir(self.d, self.deploy_dir, d.getVar("DEPLOY_DIR_IPK"), "package_write_ipk", filterbydependencies)
-
- opkg_lib_dir = self.d.getVar('OPKGLIBDIR')
- if opkg_lib_dir[0] == "/":
- opkg_lib_dir = opkg_lib_dir[1:]
-
- self.opkg_dir = os.path.join(target_rootfs, opkg_lib_dir, "opkg")
-
- bb.utils.mkdirhier(self.opkg_dir)
-
- self.saved_opkg_dir = self.d.expand('${T}/saved/%s' % self.task_name)
- if not os.path.exists(self.d.expand('${T}/saved')):
- bb.utils.mkdirhier(self.d.expand('${T}/saved'))
-
- self.from_feeds = (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") == "1"
- if self.from_feeds:
- self._create_custom_config()
- else:
- self._create_config()
-
- self.indexer = OpkgIndexer(self.d, self.deploy_dir)
-
- def mark_packages(self, status_tag, packages=None):
- """
- This function will change a package's status in /var/lib/opkg/status file.
- If 'packages' is None then the new_status will be applied to all
- packages
- """
- status_file = os.path.join(self.opkg_dir, "status")
-
- with open(status_file, "r") as sf:
- with open(status_file + ".tmp", "w+") as tmp_sf:
- if packages is None:
- tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)",
- r"Package: \1\n\2Status: \3%s" % status_tag,
- sf.read()))
- else:
- if type(packages).__name__ != "list":
- raise TypeError("'packages' should be a list object")
-
- status = sf.read()
- for pkg in packages:
- status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg,
- r"Package: %s\n\1Status: \2%s" % (pkg, status_tag),
- status)
-
- tmp_sf.write(status)
-
- os.rename(status_file + ".tmp", status_file)
-
- def _create_custom_config(self):
- bb.note("Building from feeds activated!")
-
- with open(self.config_file, "w+") as config_file:
- priority = 1
- for arch in self.pkg_archs.split():
- config_file.write("arch %s %d\n" % (arch, priority))
- priority += 5
-
- for line in (self.d.getVar('IPK_FEED_URIS') or "").split():
- feed_match = re.match(r"^[ \t]*(.*)##([^ \t]*)[ \t]*$", line)
-
- if feed_match is not None:
- feed_name = feed_match.group(1)
- feed_uri = feed_match.group(2)
-
- bb.note("Add %s feed with URL %s" % (feed_name, feed_uri))
-
- config_file.write("src/gz %s %s\n" % (feed_name, feed_uri))
-
- """
- Allow to use package deploy directory contents as quick devel-testing
- feed. This creates individual feed configs for each arch subdir of those
- specified as compatible for the current machine.
- NOTE: Development-helper feature, NOT a full-fledged feed.
- """
- if (self.d.getVar('FEED_DEPLOYDIR_BASE_URI') or "") != "":
- for arch in self.pkg_archs.split():
- cfg_file_name = os.path.join(self.target_rootfs,
- self.d.getVar("sysconfdir"),
- "opkg",
- "local-%s-feed.conf" % arch)
-
- with open(cfg_file_name, "w+") as cfg_file:
- cfg_file.write("src/gz local-%s %s/%s" %
- (arch,
- self.d.getVar('FEED_DEPLOYDIR_BASE_URI'),
- arch))
-
- if self.d.getVar('OPKGLIBDIR') != '/var/lib':
- # There is no command line option for this anymore, we need to add
- # info_dir and status_file to config file, if OPKGLIBDIR doesn't have
- # the default value of "/var/lib" as defined in opkg:
- # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_LISTS_DIR VARDIR "/lib/opkg/lists"
- # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR VARDIR "/lib/opkg/info"
- # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE VARDIR "/lib/opkg/status"
- cfg_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'info'))
- cfg_file.write("option lists_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'lists'))
- cfg_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'status'))
-
-
- def _create_config(self):
- with open(self.config_file, "w+") as config_file:
- priority = 1
- for arch in self.pkg_archs.split():
- config_file.write("arch %s %d\n" % (arch, priority))
- priority += 5
-
- config_file.write("src oe file:%s\n" % self.deploy_dir)
-
- for arch in self.pkg_archs.split():
- pkgs_dir = os.path.join(self.deploy_dir, arch)
- if os.path.isdir(pkgs_dir):
- config_file.write("src oe-%s file:%s\n" %
- (arch, pkgs_dir))
-
- if self.d.getVar('OPKGLIBDIR') != '/var/lib':
- # There is no command line option for this anymore, we need to add
- # info_dir and status_file to config file, if OPKGLIBDIR doesn't have
- # the default value of "/var/lib" as defined in opkg:
- # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_LISTS_DIR VARDIR "/lib/opkg/lists"
- # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR VARDIR "/lib/opkg/info"
- # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE VARDIR "/lib/opkg/status"
- config_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'info'))
- config_file.write("option lists_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'lists'))
- config_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'status'))
-
- def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
- if feed_uris == "":
- return
-
- rootfs_config = os.path.join('%s/etc/opkg/base-feeds.conf'
- % self.target_rootfs)
-
- os.makedirs('%s/etc/opkg' % self.target_rootfs, exist_ok=True)
-
- feed_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
- archs = self.pkg_archs.split() if feed_archs is None else feed_archs.split()
-
- with open(rootfs_config, "w+") as config_file:
- uri_iterator = 0
- for uri in feed_uris:
- if archs:
- for arch in archs:
- if (feed_archs is None) and (not os.path.exists(oe.path.join(self.deploy_dir, arch))):
- continue
- bb.note('Adding opkg feed url-%s-%d (%s)' %
- (arch, uri_iterator, uri))
- config_file.write("src/gz uri-%s-%d %s/%s\n" %
- (arch, uri_iterator, uri, arch))
- else:
- bb.note('Adding opkg feed url-%d (%s)' %
- (uri_iterator, uri))
- config_file.write("src/gz uri-%d %s\n" %
- (uri_iterator, uri))
-
- uri_iterator += 1
-
- def update(self):
- self.deploy_dir_lock()
-
- cmd = "%s %s update" % (self.opkg_cmd, self.opkg_args)
-
- try:
- subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- self.deploy_dir_unlock()
- bb.fatal("Unable to update the package index files. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
-
- self.deploy_dir_unlock()
-
- def install(self, pkgs, attempt_only=False):
- if not pkgs:
- return
-
- cmd = "%s %s" % (self.opkg_cmd, self.opkg_args)
- for exclude in (self.d.getVar("PACKAGE_EXCLUDE") or "").split():
- cmd += " --add-exclude %s" % exclude
- for bad_recommendation in (self.d.getVar("BAD_RECOMMENDATIONS") or "").split():
- cmd += " --add-ignore-recommends %s" % bad_recommendation
- cmd += " install "
- cmd += " ".join(pkgs)
-
- os.environ['D'] = self.target_rootfs
- os.environ['OFFLINE_ROOT'] = self.target_rootfs
- os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
- os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
- os.environ['INTERCEPT_DIR'] = self.intercepts_dir
- os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
-
- try:
- bb.note("Installing the following packages: %s" % ' '.join(pkgs))
- bb.note(cmd)
- output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).decode("utf-8")
- bb.note(output)
- failed_pkgs = []
- for line in output.split('\n'):
- if line.endswith("configuration required on target."):
- bb.warn(line)
- failed_pkgs.append(line.split(".")[0])
- if failed_pkgs:
- failed_postinsts_abort(failed_pkgs, self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
- except subprocess.CalledProcessError as e:
- (bb.fatal, bb.warn)[attempt_only]("Unable to install packages. "
- "Command '%s' returned %d:\n%s" %
- (cmd, e.returncode, e.output.decode("utf-8")))
-
- def remove(self, pkgs, with_dependencies=True):
- if not pkgs:
- return
-
- if with_dependencies:
- cmd = "%s %s --force-remove --force-removal-of-dependent-packages remove %s" % \
- (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
- else:
- cmd = "%s %s --force-depends remove %s" % \
- (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
-
- try:
- bb.note(cmd)
- output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).decode("utf-8")
- bb.note(output)
- except subprocess.CalledProcessError as e:
- bb.fatal("Unable to remove packages. Command '%s' "
- "returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8")))
-
- def write_index(self):
- self.deploy_dir_lock()
-
- result = self.indexer.write_index()
-
- self.deploy_dir_unlock()
-
- if result is not None:
- bb.fatal(result)
-
- def remove_packaging_data(self):
- bb.utils.remove(self.opkg_dir, True)
- # create the directory back, it's needed by PM lock
- bb.utils.mkdirhier(self.opkg_dir)
-
- def remove_lists(self):
- if not self.from_feeds:
- bb.utils.remove(os.path.join(self.opkg_dir, "lists"), True)
-
- def list_installed(self):
- return OpkgPkgsList(self.d, self.target_rootfs, self.config_file).list_pkgs()
-
- def dummy_install(self, pkgs):
- """
- The following function dummy installs pkgs and returns the log of output.
- """
- if len(pkgs) == 0:
- return
-
- # Create an temp dir as opkg root for dummy installation
- temp_rootfs = self.d.expand('${T}/opkg')
- opkg_lib_dir = self.d.getVar('OPKGLIBDIR')
- if opkg_lib_dir[0] == "/":
- opkg_lib_dir = opkg_lib_dir[1:]
- temp_opkg_dir = os.path.join(temp_rootfs, opkg_lib_dir, 'opkg')
- bb.utils.mkdirhier(temp_opkg_dir)
-
- opkg_args = "-f %s -o %s " % (self.config_file, temp_rootfs)
- opkg_args += self.d.getVar("OPKG_ARGS")
-
- cmd = "%s %s update" % (self.opkg_cmd, opkg_args)
- try:
- subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
- except subprocess.CalledProcessError as e:
- bb.fatal("Unable to update. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
-
- # Dummy installation
- cmd = "%s %s --noaction install %s " % (self.opkg_cmd,
- opkg_args,
- ' '.join(pkgs))
- try:
- output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
- except subprocess.CalledProcessError as e:
- bb.fatal("Unable to dummy install packages. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
-
- bb.utils.remove(temp_rootfs, True)
-
- return output
-
- def backup_packaging_data(self):
- # Save the opkglib for increment ipk image generation
- if os.path.exists(self.saved_opkg_dir):
- bb.utils.remove(self.saved_opkg_dir, True)
- shutil.copytree(self.opkg_dir,
- self.saved_opkg_dir,
- symlinks=True)
-
- def recover_packaging_data(self):
- # Move the opkglib back
- if os.path.exists(self.saved_opkg_dir):
- if os.path.exists(self.opkg_dir):
- bb.utils.remove(self.opkg_dir, True)
-
- bb.note('Recover packaging data')
- shutil.copytree(self.saved_opkg_dir,
- self.opkg_dir,
- symlinks=True)
-
- def package_info(self, pkg):
- """
- Returns a dictionary with the package info.
- """
- cmd = "%s %s info %s" % (self.opkg_cmd, self.opkg_args, pkg)
- pkg_info = super(OpkgPM, self).package_info(pkg, cmd)
-
- pkg_arch = pkg_info[pkg]["arch"]
- pkg_filename = pkg_info[pkg]["filename"]
- pkg_info[pkg]["filepath"] = \
- os.path.join(self.deploy_dir, pkg_arch, pkg_filename)
-
- return pkg_info
-
- def extract(self, pkg):
- """
- Returns the path to a tmpdir where resides the contents of a package.
-
- Deleting the tmpdir is responsability of the caller.
- """
- pkg_info = self.package_info(pkg)
- if not pkg_info:
- bb.fatal("Unable to get information for package '%s' while "
- "trying to extract the package." % pkg)
-
- tmp_dir = super(OpkgPM, self).extract(pkg, pkg_info)
- bb.utils.remove(os.path.join(tmp_dir, "data.tar.xz"))
-
- return tmp_dir
-
-class DpkgPM(OpkgDpkgPM):
- def __init__(self, d, target_rootfs, archs, base_archs, apt_conf_dir=None, deb_repo_workdir="oe-rootfs-repo", filterbydependencies=True):
- super(DpkgPM, self).__init__(d, target_rootfs)
- self.deploy_dir = oe.path.join(self.d.getVar('WORKDIR'), deb_repo_workdir)
-
- create_packages_dir(self.d, self.deploy_dir, d.getVar("DEPLOY_DIR_DEB"), "package_write_deb", filterbydependencies)
-
- if apt_conf_dir is None:
- self.apt_conf_dir = self.d.expand("${APTCONF_TARGET}/apt")
- else:
- self.apt_conf_dir = apt_conf_dir
- self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf")
- self.apt_get_cmd = bb.utils.which(os.getenv('PATH'), "apt-get")
- self.apt_cache_cmd = bb.utils.which(os.getenv('PATH'), "apt-cache")
-
- self.apt_args = d.getVar("APT_ARGS")
-
- self.all_arch_list = archs.split()
- all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or "").split()
- self.all_arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in self.all_arch_list)
-
- self._create_configs(archs, base_archs)
-
- self.indexer = DpkgIndexer(self.d, self.deploy_dir)
-
- def mark_packages(self, status_tag, packages=None):
- """
- This function will change a package's status in /var/lib/dpkg/status file.
- If 'packages' is None then the new_status will be applied to all
- packages
- """
- status_file = self.target_rootfs + "/var/lib/dpkg/status"
-
- with open(status_file, "r") as sf:
- with open(status_file + ".tmp", "w+") as tmp_sf:
- if packages is None:
- tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)",
- r"Package: \1\n\2Status: \3%s" % status_tag,
- sf.read()))
- else:
- if type(packages).__name__ != "list":
- raise TypeError("'packages' should be a list object")
-
- status = sf.read()
- for pkg in packages:
- status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg,
- r"Package: %s\n\1Status: \2%s" % (pkg, status_tag),
- status)
-
- tmp_sf.write(status)
-
- os.rename(status_file + ".tmp", status_file)
-
- def run_pre_post_installs(self, package_name=None):
- """
- Run the pre/post installs for package "package_name". If package_name is
- None, then run all pre/post install scriptlets.
- """
- info_dir = self.target_rootfs + "/var/lib/dpkg/info"
- ControlScript = collections.namedtuple("ControlScript", ["suffix", "name", "argument"])
- control_scripts = [
- ControlScript(".preinst", "Preinstall", "install"),
- ControlScript(".postinst", "Postinstall", "configure")]
- status_file = self.target_rootfs + "/var/lib/dpkg/status"
- installed_pkgs = []
-
- with open(status_file, "r") as status:
- for line in status.read().split('\n'):
- m = re.match(r"^Package: (.*)", line)
- if m is not None:
- installed_pkgs.append(m.group(1))
-
- if package_name is not None and not package_name in installed_pkgs:
- return
-
- os.environ['D'] = self.target_rootfs
- os.environ['OFFLINE_ROOT'] = self.target_rootfs
- os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
- os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
- os.environ['INTERCEPT_DIR'] = self.intercepts_dir
- os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
-
- for pkg_name in installed_pkgs:
- for control_script in control_scripts:
- p_full = os.path.join(info_dir, pkg_name + control_script.suffix)
- if os.path.exists(p_full):
- try:
- bb.note("Executing %s for package: %s ..." %
- (control_script.name.lower(), pkg_name))
- output = subprocess.check_output([p_full, control_script.argument],
- stderr=subprocess.STDOUT).decode("utf-8")
- bb.note(output)
- except subprocess.CalledProcessError as e:
- bb.warn("%s for package %s failed with %d:\n%s" %
- (control_script.name, pkg_name, e.returncode,
- e.output.decode("utf-8")))
- failed_postinsts_abort([pkg_name], self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
-
- def update(self):
- os.environ['APT_CONFIG'] = self.apt_conf_file
-
- self.deploy_dir_lock()
-
- cmd = "%s update" % self.apt_get_cmd
-
- try:
- subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- bb.fatal("Unable to update the package index files. Command '%s' "
- "returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8")))
-
- self.deploy_dir_unlock()
-
- def install(self, pkgs, attempt_only=False):
- if attempt_only and len(pkgs) == 0:
- return
-
- os.environ['APT_CONFIG'] = self.apt_conf_file
-
- cmd = "%s %s install --force-yes --allow-unauthenticated --no-remove %s" % \
- (self.apt_get_cmd, self.apt_args, ' '.join(pkgs))
-
- try:
- bb.note("Installing the following packages: %s" % ' '.join(pkgs))
- subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- (bb.fatal, bb.warn)[attempt_only]("Unable to install packages. "
- "Command '%s' returned %d:\n%s" %
- (cmd, e.returncode, e.output.decode("utf-8")))
-
- # rename *.dpkg-new files/dirs
- for root, dirs, files in os.walk(self.target_rootfs):
- for dir in dirs:
- new_dir = re.sub(r"\.dpkg-new", "", dir)
- if dir != new_dir:
- os.rename(os.path.join(root, dir),
- os.path.join(root, new_dir))
-
- for file in files:
- new_file = re.sub(r"\.dpkg-new", "", file)
- if file != new_file:
- os.rename(os.path.join(root, file),
- os.path.join(root, new_file))
-
-
- def remove(self, pkgs, with_dependencies=True):
- if not pkgs:
- return
-
- if with_dependencies:
- os.environ['APT_CONFIG'] = self.apt_conf_file
- cmd = "%s purge %s" % (self.apt_get_cmd, ' '.join(pkgs))
- else:
- cmd = "%s --admindir=%s/var/lib/dpkg --instdir=%s" \
- " -P --force-depends %s" % \
- (bb.utils.which(os.getenv('PATH'), "dpkg"),
- self.target_rootfs, self.target_rootfs, ' '.join(pkgs))
-
- try:
- subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- bb.fatal("Unable to remove packages. Command '%s' "
- "returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8")))
-
- def write_index(self):
- self.deploy_dir_lock()
-
- result = self.indexer.write_index()
-
- self.deploy_dir_unlock()
-
- if result is not None:
- bb.fatal(result)
-
- def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
- if feed_uris == "":
- return
-
- sources_conf = os.path.join("%s/etc/apt/sources.list"
- % self.target_rootfs)
- arch_list = []
-
- if feed_archs is None:
- for arch in self.all_arch_list:
- if not os.path.exists(os.path.join(self.deploy_dir, arch)):
- continue
- arch_list.append(arch)
- else:
- arch_list = feed_archs.split()
-
- feed_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
-
- with open(sources_conf, "w+") as sources_file:
- for uri in feed_uris:
- if arch_list:
- for arch in arch_list:
- bb.note('Adding dpkg channel at (%s)' % uri)
- sources_file.write("deb %s/%s ./\n" %
- (uri, arch))
- else:
- bb.note('Adding dpkg channel at (%s)' % uri)
- sources_file.write("deb %s ./\n" % uri)
-
- def _create_configs(self, archs, base_archs):
- base_archs = re.sub(r"_", r"-", base_archs)
-
- if os.path.exists(self.apt_conf_dir):
- bb.utils.remove(self.apt_conf_dir, True)
-
- bb.utils.mkdirhier(self.apt_conf_dir)
- bb.utils.mkdirhier(self.apt_conf_dir + "/lists/partial/")
- bb.utils.mkdirhier(self.apt_conf_dir + "/apt.conf.d/")
- bb.utils.mkdirhier(self.apt_conf_dir + "/preferences.d/")
-
- arch_list = []
- for arch in self.all_arch_list:
- if not os.path.exists(os.path.join(self.deploy_dir, arch)):
- continue
- arch_list.append(arch)
-
- with open(os.path.join(self.apt_conf_dir, "preferences"), "w+") as prefs_file:
- priority = 801
- for arch in arch_list:
- prefs_file.write(
- "Package: *\n"
- "Pin: release l=%s\n"
- "Pin-Priority: %d\n\n" % (arch, priority))
-
- priority += 5
-
- pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE') or ""
- for pkg in pkg_exclude.split():
- prefs_file.write(
- "Package: %s\n"
- "Pin: release *\n"
- "Pin-Priority: -1\n\n" % pkg)
-
- arch_list.reverse()
-
- with open(os.path.join(self.apt_conf_dir, "sources.list"), "w+") as sources_file:
- for arch in arch_list:
- sources_file.write("deb file:%s/ ./\n" %
- os.path.join(self.deploy_dir, arch))
-
- base_arch_list = base_archs.split()
- multilib_variants = self.d.getVar("MULTILIB_VARIANTS");
- for variant in multilib_variants.split():
- localdata = bb.data.createCopy(self.d)
- variant_tune = localdata.getVar("DEFAULTTUNE_virtclass-multilib-" + variant, False)
- orig_arch = localdata.getVar("DPKG_ARCH")
- localdata.setVar("DEFAULTTUNE", variant_tune)
- variant_arch = localdata.getVar("DPKG_ARCH")
- if variant_arch not in base_arch_list:
- base_arch_list.append(variant_arch)
-
- with open(self.apt_conf_file, "w+") as apt_conf:
- with open(self.d.expand("${STAGING_ETCDIR_NATIVE}/apt/apt.conf.sample")) as apt_conf_sample:
- for line in apt_conf_sample.read().split("\n"):
- match_arch = re.match(r" Architecture \".*\";$", line)
- architectures = ""
- if match_arch:
- for base_arch in base_arch_list:
- architectures += "\"%s\";" % base_arch
- apt_conf.write(" Architectures {%s};\n" % architectures);
- apt_conf.write(" Architecture \"%s\";\n" % base_archs)
- else:
- line = re.sub(r"#ROOTFS#", self.target_rootfs, line)
- line = re.sub(r"#APTCONF#", self.apt_conf_dir, line)
- apt_conf.write(line + "\n")
-
- target_dpkg_dir = "%s/var/lib/dpkg" % self.target_rootfs
- bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "info"))
-
- bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "updates"))
-
- if not os.path.exists(os.path.join(target_dpkg_dir, "status")):
- open(os.path.join(target_dpkg_dir, "status"), "w+").close()
- if not os.path.exists(os.path.join(target_dpkg_dir, "available")):
- open(os.path.join(target_dpkg_dir, "available"), "w+").close()
-
- def remove_packaging_data(self):
- bb.utils.remove(self.target_rootfs + self.d.getVar('opkglibdir'), True)
- bb.utils.remove(self.target_rootfs + "/var/lib/dpkg/", True)
-
- def fix_broken_dependencies(self):
- os.environ['APT_CONFIG'] = self.apt_conf_file
-
- cmd = "%s %s --allow-unauthenticated -f install" % (self.apt_get_cmd, self.apt_args)
-
- try:
- subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as e:
- bb.fatal("Cannot fix broken dependencies. Command '%s' "
- "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
-
- def list_installed(self):
- return DpkgPkgsList(self.d, self.target_rootfs).list_pkgs()
-
- def package_info(self, pkg):
- """
- Returns a dictionary with the package info.
- """
- cmd = "%s show %s" % (self.apt_cache_cmd, pkg)
- pkg_info = super(DpkgPM, self).package_info(pkg, cmd)
-
- pkg_arch = pkg_info[pkg]["pkgarch"]
- pkg_filename = pkg_info[pkg]["filename"]
- pkg_info[pkg]["filepath"] = \
- os.path.join(self.deploy_dir, pkg_arch, pkg_filename)
-
- return pkg_info
-
- def extract(self, pkg):
- """
- Returns the path to a tmpdir where resides the contents of a package.
-
- Deleting the tmpdir is responsability of the caller.
- """
- pkg_info = self.package_info(pkg)
- if not pkg_info:
- bb.fatal("Unable to get information for package '%s' while "
- "trying to extract the package." % pkg)
-
- tmp_dir = super(DpkgPM, self).extract(pkg, pkg_info)
- bb.utils.remove(os.path.join(tmp_dir, "data.tar.xz"))
-
- return tmp_dir
-
-def generate_index_files(d):
- classes = d.getVar('PACKAGE_CLASSES').replace("package_", "").split()
-
- indexer_map = {
- "rpm": (RpmSubdirIndexer, d.getVar('DEPLOY_DIR_RPM')),
- "ipk": (OpkgIndexer, d.getVar('DEPLOY_DIR_IPK')),
- "deb": (DpkgIndexer, d.getVar('DEPLOY_DIR_DEB'))
- }
-
- result = None
-
- for pkg_class in classes:
- if not pkg_class in indexer_map:
- continue
-
- if os.path.exists(indexer_map[pkg_class][1]):
- result = indexer_map[pkg_class][0](d, indexer_map[pkg_class][1]).write_index()
-
- if result is not None:
- bb.fatal(result)
diff --git a/meta/lib/oe/package_manager/__init__.py b/meta/lib/oe/package_manager/__init__.py
new file mode 100644
index 0000000000..80bc1a6bc6
--- /dev/null
+++ b/meta/lib/oe/package_manager/__init__.py
@@ -0,0 +1,556 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+from abc import ABCMeta, abstractmethod
+import os
+import glob
+import subprocess
+import shutil
+import re
+import collections
+import bb
+import tempfile
+import oe.utils
+import oe.path
+import string
+from oe.gpg_sign import get_signer
+import hashlib
+import fnmatch
+
+# this can be used by all PM backends to create the index files in parallel
+def create_index(arg):
+ index_cmd = arg
+
+ bb.note("Executing '%s' ..." % index_cmd)
+ result = subprocess.check_output(index_cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
+ if result:
+ bb.note(result)
+
+def opkg_query(cmd_output):
+ """
+ This method parse the output from the package managerand return
+ a dictionary with the information of the packages. This is used
+ when the packages are in deb or ipk format.
+ """
+ verregex = re.compile(r' \([=<>]* [^ )]*\)')
+ output = dict()
+ pkg = ""
+ arch = ""
+ ver = ""
+ filename = ""
+ dep = []
+ prov = []
+ pkgarch = ""
+ for line in cmd_output.splitlines()+['']:
+ line = line.rstrip()
+ if ':' in line:
+ if line.startswith("Package: "):
+ pkg = line.split(": ")[1]
+ elif line.startswith("Architecture: "):
+ arch = line.split(": ")[1]
+ elif line.startswith("Version: "):
+ ver = line.split(": ")[1]
+ elif line.startswith("File: ") or line.startswith("Filename:"):
+ filename = line.split(": ")[1]
+ if "/" in filename:
+ filename = os.path.basename(filename)
+ elif line.startswith("Depends: "):
+ depends = verregex.sub('', line.split(": ")[1])
+ for depend in depends.split(", "):
+ dep.append(depend)
+ elif line.startswith("Recommends: "):
+ recommends = verregex.sub('', line.split(": ")[1])
+ for recommend in recommends.split(", "):
+ dep.append("%s [REC]" % recommend)
+ elif line.startswith("PackageArch: "):
+ pkgarch = line.split(": ")[1]
+ elif line.startswith("Provides: "):
+ provides = verregex.sub('', line.split(": ")[1])
+ for provide in provides.split(", "):
+ prov.append(provide)
+
+ # When there is a blank line save the package information
+ elif not line:
+ # IPK doesn't include the filename
+ if not filename:
+ filename = "%s_%s_%s.ipk" % (pkg, ver, arch)
+ if pkg:
+ output[pkg] = {"arch":arch, "ver":ver,
+ "filename":filename, "deps": dep, "pkgarch":pkgarch, "provs": prov}
+ pkg = ""
+ arch = ""
+ ver = ""
+ filename = ""
+ dep = []
+ prov = []
+ pkgarch = ""
+
+ return output
+
+def failed_postinsts_abort(pkgs, log_path):
+ bb.fatal("""Postinstall scriptlets of %s have failed. If the intention is to defer them to first boot,
+then please place them into pkg_postinst_ontarget:${PN} ().
+Deferring to first boot via 'exit 1' is no longer supported.
+Details of the failure are in %s.""" %(pkgs, log_path))
+
+def generate_locale_archive(d, rootfs, target_arch, localedir):
+ # Pretty sure we don't need this for locale archive generation but
+ # keeping it to be safe...
+ locale_arch_options = { \
+ "arc": ["--uint32-align=4", "--little-endian"],
+ "arceb": ["--uint32-align=4", "--big-endian"],
+ "arm": ["--uint32-align=4", "--little-endian"],
+ "armeb": ["--uint32-align=4", "--big-endian"],
+ "aarch64": ["--uint32-align=4", "--little-endian"],
+ "aarch64_be": ["--uint32-align=4", "--big-endian"],
+ "sh4": ["--uint32-align=4", "--big-endian"],
+ "powerpc": ["--uint32-align=4", "--big-endian"],
+ "powerpc64": ["--uint32-align=4", "--big-endian"],
+ "powerpc64le": ["--uint32-align=4", "--little-endian"],
+ "mips": ["--uint32-align=4", "--big-endian"],
+ "mipsisa32r6": ["--uint32-align=4", "--big-endian"],
+ "mips64": ["--uint32-align=4", "--big-endian"],
+ "mipsisa64r6": ["--uint32-align=4", "--big-endian"],
+ "mipsel": ["--uint32-align=4", "--little-endian"],
+ "mipsisa32r6el": ["--uint32-align=4", "--little-endian"],
+ "mips64el": ["--uint32-align=4", "--little-endian"],
+ "mipsisa64r6el": ["--uint32-align=4", "--little-endian"],
+ "riscv64": ["--uint32-align=4", "--little-endian"],
+ "riscv32": ["--uint32-align=4", "--little-endian"],
+ "i586": ["--uint32-align=4", "--little-endian"],
+ "i686": ["--uint32-align=4", "--little-endian"],
+ "x86_64": ["--uint32-align=4", "--little-endian"]
+ }
+ if target_arch in locale_arch_options:
+ arch_options = locale_arch_options[target_arch]
+ else:
+ bb.error("locale_arch_options not found for target_arch=" + target_arch)
+ bb.fatal("unknown arch:" + target_arch + " for locale_arch_options")
+
+ # Need to set this so cross-localedef knows where the archive is
+ env = dict(os.environ)
+ env["LOCALEARCHIVE"] = oe.path.join(localedir, "locale-archive")
+
+ for name in sorted(os.listdir(localedir)):
+ path = os.path.join(localedir, name)
+ if os.path.isdir(path):
+ cmd = ["cross-localedef", "--verbose"]
+ cmd += arch_options
+ cmd += ["--add-to-archive", path]
+ subprocess.check_output(cmd, env=env, stderr=subprocess.STDOUT)
+
+class Indexer(object, metaclass=ABCMeta):
+ def __init__(self, d, deploy_dir):
+ self.d = d
+ self.deploy_dir = deploy_dir
+
+ @abstractmethod
+ def write_index(self):
+ pass
+
+class PkgsList(object, metaclass=ABCMeta):
+ def __init__(self, d, rootfs_dir):
+ self.d = d
+ self.rootfs_dir = rootfs_dir
+
+ @abstractmethod
+ def list_pkgs(self):
+ pass
+
+class PackageManager(object, metaclass=ABCMeta):
+ """
+ This is an abstract class. Do not instantiate this directly.
+ """
+
+ def __init__(self, d, target_rootfs):
+ self.d = d
+ self.target_rootfs = target_rootfs
+ self.deploy_dir = None
+ self.deploy_lock = None
+ self._initialize_intercepts()
+
+ def _initialize_intercepts(self):
+ bb.note("Initializing intercept dir for %s" % self.target_rootfs)
+ # As there might be more than one instance of PackageManager operating at the same time
+ # we need to isolate the intercept_scripts directories from each other,
+ # hence the ugly hash digest in dir name.
+ self.intercepts_dir = os.path.join(self.d.getVar('WORKDIR'), "intercept_scripts-%s" %
+ (hashlib.sha256(self.target_rootfs.encode()).hexdigest()))
+
+ postinst_intercepts = (self.d.getVar("POSTINST_INTERCEPTS") or "").split()
+ if not postinst_intercepts:
+ postinst_intercepts_path = self.d.getVar("POSTINST_INTERCEPTS_PATH")
+ if not postinst_intercepts_path:
+ postinst_intercepts_path = self.d.getVar("POSTINST_INTERCEPTS_DIR") or self.d.expand("${COREBASE}/scripts/postinst-intercepts")
+ postinst_intercepts = oe.path.which_wild('*', postinst_intercepts_path)
+
+ bb.debug(1, 'Collected intercepts:\n%s' % ''.join(' %s\n' % i for i in postinst_intercepts))
+ bb.utils.remove(self.intercepts_dir, True)
+ bb.utils.mkdirhier(self.intercepts_dir)
+ for intercept in postinst_intercepts:
+ shutil.copy(intercept, os.path.join(self.intercepts_dir, os.path.basename(intercept)))
+
+ @abstractmethod
+ def _handle_intercept_failure(self, failed_script):
+ pass
+
+ def _postpone_to_first_boot(self, postinst_intercept_hook):
+ with open(postinst_intercept_hook) as intercept:
+ registered_pkgs = None
+ for line in intercept.read().split("\n"):
+ m = re.match(r"^##PKGS:(.*)", line)
+ if m is not None:
+ registered_pkgs = m.group(1).strip()
+ break
+
+ if registered_pkgs is not None:
+ bb.note("If an image is being built, the postinstalls for the following packages "
+ "will be postponed for first boot: %s" %
+ registered_pkgs)
+
+ # call the backend dependent handler
+ self._handle_intercept_failure(registered_pkgs)
+
+
+ def run_intercepts(self, populate_sdk=None):
+ intercepts_dir = self.intercepts_dir
+
+ bb.note("Running intercept scripts:")
+ os.environ['D'] = self.target_rootfs
+ os.environ['STAGING_DIR_NATIVE'] = self.d.getVar('STAGING_DIR_NATIVE')
+ for script in os.listdir(intercepts_dir):
+ script_full = os.path.join(intercepts_dir, script)
+
+ if script == "postinst_intercept" or not os.access(script_full, os.X_OK):
+ continue
+
+ # we do not want to run any multilib variant of this
+ if script.startswith("delay_to_first_boot"):
+ self._postpone_to_first_boot(script_full)
+ continue
+
+ if populate_sdk == 'host' and self.d.getVar('SDK_OS') == 'mingw32':
+ bb.note("The postinstall intercept hook '%s' could not be executed due to missing wine support, details in %s/log.do_%s"
+ % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
+ continue
+
+ bb.note("> Executing %s intercept ..." % script)
+
+ try:
+ output = subprocess.check_output(script_full, stderr=subprocess.STDOUT)
+ if output: bb.note(output.decode("utf-8"))
+ except subprocess.CalledProcessError as e:
+ bb.note("Exit code %d. Output:\n%s" % (e.returncode, e.output.decode("utf-8")))
+ if populate_sdk == 'host':
+ bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
+ elif populate_sdk == 'target':
+ if "qemuwrapper: qemu usermode is not supported" in e.output.decode("utf-8"):
+ bb.note("The postinstall intercept hook '%s' could not be executed due to missing qemu usermode support, details in %s/log.do_%s"
+ % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
+ else:
+ bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
+ else:
+ if "qemuwrapper: qemu usermode is not supported" in e.output.decode("utf-8"):
+ bb.note("The postinstall intercept hook '%s' could not be executed due to missing qemu usermode support, details in %s/log.do_%s"
+ % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
+ self._postpone_to_first_boot(script_full)
+ else:
+ bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
+
+ @abstractmethod
+ def update(self):
+ """
+ Update the package manager package database.
+ """
+ pass
+
+ @abstractmethod
+ def install(self, pkgs, attempt_only=False):
+ """
+ Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is
+ True, installation failures are ignored.
+ """
+ pass
+
+ @abstractmethod
+ def remove(self, pkgs, with_dependencies=True):
+ """
+ Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies'
+ is False, then any dependencies are left in place.
+ """
+ pass
+
+ @abstractmethod
+ def write_index(self):
+ """
+ This function creates the index files
+ """
+ pass
+
+ @abstractmethod
+ def remove_packaging_data(self):
+ pass
+
+ @abstractmethod
+ def list_installed(self):
+ pass
+
+ @abstractmethod
+ def extract(self, pkg):
+ """
+ Returns the path to a tmpdir where resides the contents of a package.
+ Deleting the tmpdir is responsability of the caller.
+ """
+ pass
+
+ @abstractmethod
+ def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
+ """
+ Add remote package feeds into repository manager configuration. The parameters
+ for the feeds are set by feed_uris, feed_base_paths and feed_archs.
+ See http://www.yoctoproject.org/docs/current/ref-manual/ref-manual.html#var-PACKAGE_FEED_URIS
+ for their description.
+ """
+ pass
+
+ def install_glob(self, globs, sdk=False):
+ """
+ Install all packages that match a glob.
+ """
+ # TODO don't have sdk here but have a property on the superclass
+ # (and respect in install_complementary)
+ if sdk:
+ pkgdatadir = self.d.getVar("PKGDATA_DIR_SDK")
+ else:
+ pkgdatadir = self.d.getVar("PKGDATA_DIR")
+
+ try:
+ bb.note("Installing globbed packages...")
+ cmd = ["oe-pkgdata-util", "-p", pkgdatadir, "list-pkgs", globs]
+ bb.note('Running %s' % cmd)
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout, stderr = proc.communicate()
+ if stderr: bb.note(stderr.decode("utf-8"))
+ pkgs = stdout.decode("utf-8")
+ self.install(pkgs.split(), attempt_only=True)
+ except subprocess.CalledProcessError as e:
+ # Return code 1 means no packages matched
+ if e.returncode != 1:
+ bb.fatal("Could not compute globbed packages list. Command "
+ "'%s' returned %d:\n%s" %
+ (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
+
+ def install_complementary(self, globs=None):
+ """
+ Install complementary packages based upon the list of currently installed
+ packages e.g. locales, *-dev, *-dbg, etc. Note: every backend needs to
+ call this function explicitly after the normal package installation.
+ """
+ if globs is None:
+ globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY')
+ split_linguas = set()
+
+ for translation in self.d.getVar('IMAGE_LINGUAS').split():
+ split_linguas.add(translation)
+ split_linguas.add(translation.split('-')[0])
+
+ split_linguas = sorted(split_linguas)
+
+ for lang in split_linguas:
+ globs += " *-locale-%s" % lang
+ for complementary_linguas in (self.d.getVar('IMAGE_LINGUAS_COMPLEMENTARY') or "").split():
+ globs += (" " + complementary_linguas) % lang
+
+ if globs is None:
+ return
+
+ # we need to write the list of installed packages to a file because the
+ # oe-pkgdata-util reads it from a file
+ with tempfile.NamedTemporaryFile(mode="w+", prefix="installed-pkgs") as installed_pkgs:
+ pkgs = self.list_installed()
+
+ provided_pkgs = set()
+ for pkg in pkgs.values():
+ provided_pkgs |= set(pkg.get('provs', []))
+
+ output = oe.utils.format_pkg_list(pkgs, "arch")
+ installed_pkgs.write(output)
+ installed_pkgs.flush()
+
+ cmd = ["oe-pkgdata-util",
+ "-p", self.d.getVar('PKGDATA_DIR'), "glob", installed_pkgs.name,
+ globs]
+ exclude = self.d.getVar('PACKAGE_EXCLUDE_COMPLEMENTARY')
+ if exclude:
+ cmd.extend(['--exclude=' + '|'.join(exclude.split())])
+ try:
+ bb.note('Running %s' % cmd)
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout, stderr = proc.communicate()
+ if stderr: bb.note(stderr.decode("utf-8"))
+ complementary_pkgs = stdout.decode("utf-8")
+ complementary_pkgs = set(complementary_pkgs.split())
+ skip_pkgs = sorted(complementary_pkgs & provided_pkgs)
+ install_pkgs = sorted(complementary_pkgs - provided_pkgs)
+ bb.note("Installing complementary packages ... %s (skipped already provided packages %s)" % (
+ ' '.join(install_pkgs),
+ ' '.join(skip_pkgs)))
+ self.install(install_pkgs)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Could not compute complementary packages list. Command "
+ "'%s' returned %d:\n%s" %
+ (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
+
+ if self.d.getVar('IMAGE_LOCALES_ARCHIVE') == '1':
+ target_arch = self.d.getVar('TARGET_ARCH')
+ localedir = oe.path.join(self.target_rootfs, self.d.getVar("libdir"), "locale")
+ if os.path.exists(localedir) and os.listdir(localedir):
+ generate_locale_archive(self.d, self.target_rootfs, target_arch, localedir)
+ # And now delete the binary locales
+ self.remove(fnmatch.filter(self.list_installed(), "glibc-binary-localedata-*"), False)
+
+ def deploy_dir_lock(self):
+ if self.deploy_dir is None:
+ raise RuntimeError("deploy_dir is not set!")
+
+ lock_file_name = os.path.join(self.deploy_dir, "deploy.lock")
+
+ self.deploy_lock = bb.utils.lockfile(lock_file_name)
+
+ def deploy_dir_unlock(self):
+ if self.deploy_lock is None:
+ return
+
+ bb.utils.unlockfile(self.deploy_lock)
+
+ self.deploy_lock = None
+
+ def construct_uris(self, uris, base_paths):
+ """
+ Construct URIs based on the following pattern: uri/base_path where 'uri'
+ and 'base_path' correspond to each element of the corresponding array
+ argument leading to len(uris) x len(base_paths) elements on the returned
+ array
+ """
+ def _append(arr1, arr2, sep='/'):
+ res = []
+ narr1 = [a.rstrip(sep) for a in arr1]
+ narr2 = [a.rstrip(sep).lstrip(sep) for a in arr2]
+ for a1 in narr1:
+ if arr2:
+ for a2 in narr2:
+ res.append("%s%s%s" % (a1, sep, a2))
+ else:
+ res.append(a1)
+ return res
+ return _append(uris, base_paths)
+
+def create_packages_dir(d, subrepo_dir, deploydir, taskname, filterbydependencies):
+ """
+ Go through our do_package_write_X dependencies and hardlink the packages we depend
+ upon into the repo directory. This prevents us seeing other packages that may
+ have been built that we don't depend upon and also packages for architectures we don't
+ support.
+ """
+ import errno
+
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ mytaskname = d.getVar("BB_RUNTASK")
+ pn = d.getVar("PN")
+ seendirs = set()
+ multilibs = {}
+
+ bb.utils.remove(subrepo_dir, recurse=True)
+ bb.utils.mkdirhier(subrepo_dir)
+
+ # Detect bitbake -b usage
+ nodeps = d.getVar("BB_LIMITEDDEPS") or False
+ if nodeps or not filterbydependencies:
+ oe.path.symlink(deploydir, subrepo_dir, True)
+ return
+
+ start = None
+ for dep in taskdepdata:
+ data = taskdepdata[dep]
+ if data[1] == mytaskname and data[0] == pn:
+ start = dep
+ break
+ if start is None:
+ bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
+ pkgdeps = set()
+ start = [start]
+ seen = set(start)
+ # Support direct dependencies (do_rootfs -> do_package_write_X)
+ # or indirect dependencies within PN (do_populate_sdk_ext -> do_rootfs -> do_package_write_X)
+ while start:
+ next = []
+ for dep2 in start:
+ for dep in taskdepdata[dep2][3]:
+ if taskdepdata[dep][0] != pn:
+ if "do_" + taskname in dep:
+ pkgdeps.add(dep)
+ elif dep not in seen:
+ next.append(dep)
+ seen.add(dep)
+ start = next
+
+ for dep in pkgdeps:
+ c = taskdepdata[dep][0]
+ manifest, d2 = oe.sstatesig.find_sstate_manifest(c, taskdepdata[dep][2], taskname, d, multilibs)
+ if not manifest:
+ bb.fatal("No manifest generated from: %s in %s" % (c, taskdepdata[dep][2]))
+ if not os.path.exists(manifest):
+ continue
+ with open(manifest, "r") as f:
+ for l in f:
+ l = l.strip()
+ deploydir = os.path.normpath(deploydir)
+ if bb.data.inherits_class('packagefeed-stability', d):
+ dest = l.replace(deploydir + "-prediff", "")
+ else:
+ dest = l.replace(deploydir, "")
+ dest = subrepo_dir + dest
+ if l.endswith("/"):
+ if dest not in seendirs:
+ bb.utils.mkdirhier(dest)
+ seendirs.add(dest)
+ continue
+ # Try to hardlink the file, copy if that fails
+ destdir = os.path.dirname(dest)
+ if destdir not in seendirs:
+ bb.utils.mkdirhier(destdir)
+ seendirs.add(destdir)
+ try:
+ os.link(l, dest)
+ except OSError as err:
+ if err.errno == errno.EXDEV:
+ bb.utils.copyfile(l, dest)
+ else:
+ raise
+
+
+def generate_index_files(d):
+ from oe.package_manager.rpm import RpmSubdirIndexer
+ from oe.package_manager.ipk import OpkgIndexer
+ from oe.package_manager.deb import DpkgIndexer
+
+ classes = d.getVar('PACKAGE_CLASSES').replace("package_", "").split()
+
+ indexer_map = {
+ "rpm": (RpmSubdirIndexer, d.getVar('DEPLOY_DIR_RPM')),
+ "ipk": (OpkgIndexer, d.getVar('DEPLOY_DIR_IPK')),
+ "deb": (DpkgIndexer, d.getVar('DEPLOY_DIR_DEB'))
+ }
+
+ result = None
+
+ for pkg_class in classes:
+ if not pkg_class in indexer_map:
+ continue
+
+ if os.path.exists(indexer_map[pkg_class][1]):
+ result = indexer_map[pkg_class][0](d, indexer_map[pkg_class][1]).write_index()
+
+ if result is not None:
+ bb.fatal(result)
diff --git a/meta/lib/oe/package_manager/deb/__init__.py b/meta/lib/oe/package_manager/deb/__init__.py
new file mode 100644
index 0000000000..9f112ae25b
--- /dev/null
+++ b/meta/lib/oe/package_manager/deb/__init__.py
@@ -0,0 +1,503 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import re
+import subprocess
+from oe.package_manager import *
+
+class DpkgIndexer(Indexer):
+ def _create_configs(self):
+ bb.utils.mkdirhier(self.apt_conf_dir)
+ bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "lists", "partial"))
+ bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "apt.conf.d"))
+ bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "preferences.d"))
+
+ with open(os.path.join(self.apt_conf_dir, "preferences"),
+ "w") as prefs_file:
+ pass
+ with open(os.path.join(self.apt_conf_dir, "sources.list"),
+ "w+") as sources_file:
+ pass
+
+ with open(self.apt_conf_file, "w") as apt_conf:
+ with open(os.path.join(self.d.expand("${STAGING_ETCDIR_NATIVE}"),
+ "apt", "apt.conf.sample")) as apt_conf_sample:
+ for line in apt_conf_sample.read().split("\n"):
+ line = re.sub(r"#ROOTFS#", "/dev/null", line)
+ line = re.sub(r"#APTCONF#", self.apt_conf_dir, line)
+ apt_conf.write(line + "\n")
+
+ def write_index(self):
+ self.apt_conf_dir = os.path.join(self.d.expand("${APTCONF_TARGET}"),
+ "apt-ftparchive")
+ self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf")
+ self._create_configs()
+
+ os.environ['APT_CONFIG'] = self.apt_conf_file
+
+ pkg_archs = self.d.getVar('PACKAGE_ARCHS')
+ if pkg_archs is not None:
+ arch_list = pkg_archs.split()
+ sdk_pkg_archs = self.d.getVar('SDK_PACKAGE_ARCHS')
+ if sdk_pkg_archs is not None:
+ for a in sdk_pkg_archs.split():
+ if a not in pkg_archs:
+ arch_list.append(a)
+
+ all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or "").split()
+ arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in arch_list)
+
+ apt_ftparchive = bb.utils.which(os.getenv('PATH'), "apt-ftparchive")
+ gzip = bb.utils.which(os.getenv('PATH'), "gzip")
+
+ index_cmds = []
+ deb_dirs_found = False
+ for arch in arch_list:
+ arch_dir = os.path.join(self.deploy_dir, arch)
+ if not os.path.isdir(arch_dir):
+ continue
+
+ cmd = "cd %s; PSEUDO_UNLOAD=1 %s packages . > Packages;" % (arch_dir, apt_ftparchive)
+
+ cmd += "%s -fcn Packages > Packages.gz;" % gzip
+
+ with open(os.path.join(arch_dir, "Release"), "w+") as release:
+ release.write("Label: %s\n" % arch)
+
+ cmd += "PSEUDO_UNLOAD=1 %s release . >> Release" % apt_ftparchive
+
+ index_cmds.append(cmd)
+
+ deb_dirs_found = True
+
+ if not deb_dirs_found:
+ bb.note("There are no packages in %s" % self.deploy_dir)
+ return
+
+ oe.utils.multiprocess_launch(create_index, index_cmds, self.d)
+ if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
+ raise NotImplementedError('Package feed signing not implementd for dpkg')
+
+class PMPkgsList(PkgsList):
+
+ def list_pkgs(self):
+ cmd = [bb.utils.which(os.getenv('PATH'), "dpkg-query"),
+ "--admindir=%s/var/lib/dpkg" % self.rootfs_dir,
+ "-W"]
+
+ cmd.append("-f=Package: ${Package}\nArchitecture: ${PackageArch}\nVersion: ${Version}\nFile: ${Package}_${Version}_${Architecture}.deb\nDepends: ${Depends}\nRecommends: ${Recommends}\nProvides: ${Provides}\n\n")
+
+ try:
+ cmd_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip().decode("utf-8")
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Cannot get the installed packages list. Command '%s' "
+ "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
+
+ return opkg_query(cmd_output)
+
+class OpkgDpkgPM(PackageManager):
+ def __init__(self, d, target_rootfs):
+ """
+ This is an abstract class. Do not instantiate this directly.
+ """
+ super(OpkgDpkgPM, self).__init__(d, target_rootfs)
+
+ def package_info(self, pkg, cmd):
+ """
+ Returns a dictionary with the package info.
+
+ This method extracts the common parts for Opkg and Dpkg
+ """
+
+ try:
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Unable to list available packages. Command '%s' "
+ "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
+ return opkg_query(output)
+
+ def extract(self, pkg, pkg_info):
+ """
+ Returns the path to a tmpdir where resides the contents of a package.
+
+ Deleting the tmpdir is responsability of the caller.
+
+ This method extracts the common parts for Opkg and Dpkg
+ """
+
+ ar_cmd = bb.utils.which(os.getenv("PATH"), "ar")
+ tar_cmd = bb.utils.which(os.getenv("PATH"), "tar")
+ pkg_path = pkg_info[pkg]["filepath"]
+
+ if not os.path.isfile(pkg_path):
+ bb.fatal("Unable to extract package for '%s'."
+ "File %s doesn't exists" % (pkg, pkg_path))
+
+ tmp_dir = tempfile.mkdtemp()
+ current_dir = os.getcwd()
+ os.chdir(tmp_dir)
+ data_tar = 'data.tar.xz'
+
+ try:
+ cmd = [ar_cmd, 'x', pkg_path]
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ cmd = [tar_cmd, 'xf', data_tar]
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ bb.utils.remove(tmp_dir, recurse=True)
+ bb.fatal("Unable to extract %s package. Command '%s' "
+ "returned %d:\n%s" % (pkg_path, ' '.join(cmd), e.returncode, e.output.decode("utf-8")))
+ except OSError as e:
+ bb.utils.remove(tmp_dir, recurse=True)
+ bb.fatal("Unable to extract %s package. Command '%s' "
+ "returned %d:\n%s at %s" % (pkg_path, ' '.join(cmd), e.errno, e.strerror, e.filename))
+
+ bb.note("Extracted %s to %s" % (pkg_path, tmp_dir))
+ bb.utils.remove(os.path.join(tmp_dir, "debian-binary"))
+ bb.utils.remove(os.path.join(tmp_dir, "control.tar.gz"))
+ os.chdir(current_dir)
+
+ return tmp_dir
+
+ def _handle_intercept_failure(self, registered_pkgs):
+ self.mark_packages("unpacked", registered_pkgs.split())
+
+class DpkgPM(OpkgDpkgPM):
+ def __init__(self, d, target_rootfs, archs, base_archs, apt_conf_dir=None, deb_repo_workdir="oe-rootfs-repo", filterbydependencies=True):
+ super(DpkgPM, self).__init__(d, target_rootfs)
+ self.deploy_dir = oe.path.join(self.d.getVar('WORKDIR'), deb_repo_workdir)
+
+ create_packages_dir(self.d, self.deploy_dir, d.getVar("DEPLOY_DIR_DEB"), "package_write_deb", filterbydependencies)
+
+ if apt_conf_dir is None:
+ self.apt_conf_dir = self.d.expand("${APTCONF_TARGET}/apt")
+ else:
+ self.apt_conf_dir = apt_conf_dir
+ self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf")
+ self.apt_get_cmd = bb.utils.which(os.getenv('PATH'), "apt-get")
+ self.apt_cache_cmd = bb.utils.which(os.getenv('PATH'), "apt-cache")
+
+ self.apt_args = d.getVar("APT_ARGS")
+
+ self.all_arch_list = archs.split()
+ all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or "").split()
+ self.all_arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in self.all_arch_list)
+
+ self._create_configs(archs, base_archs)
+
+ self.indexer = DpkgIndexer(self.d, self.deploy_dir)
+
+ def mark_packages(self, status_tag, packages=None):
+ """
+ This function will change a package's status in /var/lib/dpkg/status file.
+ If 'packages' is None then the new_status will be applied to all
+ packages
+ """
+ status_file = self.target_rootfs + "/var/lib/dpkg/status"
+
+ with open(status_file, "r") as sf:
+ with open(status_file + ".tmp", "w+") as tmp_sf:
+ if packages is None:
+ tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)",
+ r"Package: \1\n\2Status: \3%s" % status_tag,
+ sf.read()))
+ else:
+ if type(packages).__name__ != "list":
+ raise TypeError("'packages' should be a list object")
+
+ status = sf.read()
+ for pkg in packages:
+ status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg,
+ r"Package: %s\n\1Status: \2%s" % (pkg, status_tag),
+ status)
+
+ tmp_sf.write(status)
+
+ bb.utils.rename(status_file + ".tmp", status_file)
+
+ def run_pre_post_installs(self, package_name=None):
+ """
+ Run the pre/post installs for package "package_name". If package_name is
+ None, then run all pre/post install scriptlets.
+ """
+ info_dir = self.target_rootfs + "/var/lib/dpkg/info"
+ ControlScript = collections.namedtuple("ControlScript", ["suffix", "name", "argument"])
+ control_scripts = [
+ ControlScript(".preinst", "Preinstall", "install"),
+ ControlScript(".postinst", "Postinstall", "configure")]
+ status_file = self.target_rootfs + "/var/lib/dpkg/status"
+ installed_pkgs = []
+
+ with open(status_file, "r") as status:
+ for line in status.read().split('\n'):
+ m = re.match(r"^Package: (.*)", line)
+ if m is not None:
+ installed_pkgs.append(m.group(1))
+
+ if package_name is not None and not package_name in installed_pkgs:
+ return
+
+ os.environ['D'] = self.target_rootfs
+ os.environ['OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['INTERCEPT_DIR'] = self.intercepts_dir
+ os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
+
+ for pkg_name in installed_pkgs:
+ for control_script in control_scripts:
+ p_full = os.path.join(info_dir, pkg_name + control_script.suffix)
+ if os.path.exists(p_full):
+ try:
+ bb.note("Executing %s for package: %s ..." %
+ (control_script.name.lower(), pkg_name))
+ output = subprocess.check_output([p_full, control_script.argument],
+ stderr=subprocess.STDOUT).decode("utf-8")
+ bb.note(output)
+ except subprocess.CalledProcessError as e:
+ bb.warn("%s for package %s failed with %d:\n%s" %
+ (control_script.name, pkg_name, e.returncode,
+ e.output.decode("utf-8")))
+ failed_postinsts_abort([pkg_name], self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
+
+ def update(self):
+ os.environ['APT_CONFIG'] = self.apt_conf_file
+
+ self.deploy_dir_lock()
+
+ cmd = "%s update" % self.apt_get_cmd
+
+ try:
+ subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Unable to update the package index files. Command '%s' "
+ "returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8")))
+
+ self.deploy_dir_unlock()
+
+ def install(self, pkgs, attempt_only=False):
+ if attempt_only and len(pkgs) == 0:
+ return
+
+ os.environ['APT_CONFIG'] = self.apt_conf_file
+
+ cmd = "%s %s install --allow-downgrades --allow-remove-essential --allow-change-held-packages --allow-unauthenticated --no-remove %s" % \
+ (self.apt_get_cmd, self.apt_args, ' '.join(pkgs))
+
+ try:
+ bb.note("Installing the following packages: %s" % ' '.join(pkgs))
+ output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
+ bb.note(output.decode("utf-8"))
+ except subprocess.CalledProcessError as e:
+ (bb.fatal, bb.warn)[attempt_only]("Unable to install packages. "
+ "Command '%s' returned %d:\n%s" %
+ (cmd, e.returncode, e.output.decode("utf-8")))
+
+ # rename *.dpkg-new files/dirs
+ for root, dirs, files in os.walk(self.target_rootfs):
+ for dir in dirs:
+ new_dir = re.sub(r"\.dpkg-new", "", dir)
+ if dir != new_dir:
+ bb.utils.rename(os.path.join(root, dir),
+ os.path.join(root, new_dir))
+
+ for file in files:
+ new_file = re.sub(r"\.dpkg-new", "", file)
+ if file != new_file:
+ bb.utils.rename(os.path.join(root, file),
+ os.path.join(root, new_file))
+
+
+ def remove(self, pkgs, with_dependencies=True):
+ if not pkgs:
+ return
+
+ os.environ['D'] = self.target_rootfs
+ os.environ['OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['INTERCEPT_DIR'] = self.intercepts_dir
+
+ if with_dependencies:
+ os.environ['APT_CONFIG'] = self.apt_conf_file
+ cmd = "%s purge %s" % (self.apt_get_cmd, ' '.join(pkgs))
+ else:
+ cmd = "%s --admindir=%s/var/lib/dpkg --instdir=%s" \
+ " -P --force-depends %s" % \
+ (bb.utils.which(os.getenv('PATH'), "dpkg"),
+ self.target_rootfs, self.target_rootfs, ' '.join(pkgs))
+
+ try:
+ subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Unable to remove packages. Command '%s' "
+ "returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8")))
+
+ def write_index(self):
+ self.deploy_dir_lock()
+
+ result = self.indexer.write_index()
+
+ self.deploy_dir_unlock()
+
+ if result is not None:
+ bb.fatal(result)
+
+ def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
+ if feed_uris == "":
+ return
+
+
+ sources_conf = os.path.join("%s/etc/apt/sources.list"
+ % self.target_rootfs)
+ if not os.path.exists(os.path.dirname(sources_conf)):
+ return
+
+ arch_list = []
+
+ if feed_archs is None:
+ for arch in self.all_arch_list:
+ if not os.path.exists(os.path.join(self.deploy_dir, arch)):
+ continue
+ arch_list.append(arch)
+ else:
+ arch_list = feed_archs.split()
+
+ feed_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
+
+ with open(sources_conf, "w+") as sources_file:
+ for uri in feed_uris:
+ if arch_list:
+ for arch in arch_list:
+ bb.note('Adding dpkg channel at (%s)' % uri)
+ sources_file.write("deb [trusted=yes] %s/%s ./\n" %
+ (uri, arch))
+ else:
+ bb.note('Adding dpkg channel at (%s)' % uri)
+ sources_file.write("deb [trusted=yes] %s ./\n" % uri)
+
+ def _create_configs(self, archs, base_archs):
+ base_archs = re.sub(r"_", r"-", base_archs)
+
+ if os.path.exists(self.apt_conf_dir):
+ bb.utils.remove(self.apt_conf_dir, True)
+
+ bb.utils.mkdirhier(self.apt_conf_dir)
+ bb.utils.mkdirhier(self.apt_conf_dir + "/lists/partial/")
+ bb.utils.mkdirhier(self.apt_conf_dir + "/apt.conf.d/")
+ bb.utils.mkdirhier(self.apt_conf_dir + "/preferences.d/")
+
+ arch_list = []
+ for arch in self.all_arch_list:
+ if not os.path.exists(os.path.join(self.deploy_dir, arch)):
+ continue
+ arch_list.append(arch)
+
+ with open(os.path.join(self.apt_conf_dir, "preferences"), "w+") as prefs_file:
+ priority = 801
+ for arch in arch_list:
+ prefs_file.write(
+ "Package: *\n"
+ "Pin: release l=%s\n"
+ "Pin-Priority: %d\n\n" % (arch, priority))
+
+ priority += 5
+
+ pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE') or ""
+ for pkg in pkg_exclude.split():
+ prefs_file.write(
+ "Package: %s\n"
+ "Pin: release *\n"
+ "Pin-Priority: -1\n\n" % pkg)
+
+ arch_list.reverse()
+
+ with open(os.path.join(self.apt_conf_dir, "sources.list"), "w+") as sources_file:
+ for arch in arch_list:
+ sources_file.write("deb [trusted=yes] file:%s/ ./\n" %
+ os.path.join(self.deploy_dir, arch))
+
+ base_arch_list = base_archs.split()
+ multilib_variants = self.d.getVar("MULTILIB_VARIANTS");
+ for variant in multilib_variants.split():
+ localdata = bb.data.createCopy(self.d)
+ variant_tune = localdata.getVar("DEFAULTTUNE:virtclass-multilib-" + variant, False)
+ orig_arch = localdata.getVar("DPKG_ARCH")
+ localdata.setVar("DEFAULTTUNE", variant_tune)
+ variant_arch = localdata.getVar("DPKG_ARCH")
+ if variant_arch not in base_arch_list:
+ base_arch_list.append(variant_arch)
+
+ with open(self.apt_conf_file, "w+") as apt_conf:
+ with open(self.d.expand("${STAGING_ETCDIR_NATIVE}/apt/apt.conf.sample")) as apt_conf_sample:
+ for line in apt_conf_sample.read().split("\n"):
+ match_arch = re.match(r" Architecture \".*\";$", line)
+ architectures = ""
+ if match_arch:
+ for base_arch in base_arch_list:
+ architectures += "\"%s\";" % base_arch
+ apt_conf.write(" Architectures {%s};\n" % architectures);
+ apt_conf.write(" Architecture \"%s\";\n" % base_archs)
+ else:
+ line = re.sub(r"#ROOTFS#", self.target_rootfs, line)
+ line = re.sub(r"#APTCONF#", self.apt_conf_dir, line)
+ apt_conf.write(line + "\n")
+
+ target_dpkg_dir = "%s/var/lib/dpkg" % self.target_rootfs
+ bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "info"))
+
+ bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "updates"))
+
+ if not os.path.exists(os.path.join(target_dpkg_dir, "status")):
+ open(os.path.join(target_dpkg_dir, "status"), "w+").close()
+ if not os.path.exists(os.path.join(target_dpkg_dir, "available")):
+ open(os.path.join(target_dpkg_dir, "available"), "w+").close()
+
+ def remove_packaging_data(self):
+ bb.utils.remove(self.target_rootfs + self.d.getVar('opkglibdir'), True)
+ bb.utils.remove(self.target_rootfs + "/var/lib/dpkg/", True)
+
+ def fix_broken_dependencies(self):
+ os.environ['APT_CONFIG'] = self.apt_conf_file
+
+ cmd = "%s %s --allow-unauthenticated -f install" % (self.apt_get_cmd, self.apt_args)
+
+ try:
+ subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Cannot fix broken dependencies. Command '%s' "
+ "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
+
+ def list_installed(self):
+ return PMPkgsList(self.d, self.target_rootfs).list_pkgs()
+
+ def package_info(self, pkg):
+ """
+ Returns a dictionary with the package info.
+ """
+ cmd = "%s show %s" % (self.apt_cache_cmd, pkg)
+ pkg_info = super(DpkgPM, self).package_info(pkg, cmd)
+
+ pkg_arch = pkg_info[pkg]["pkgarch"]
+ pkg_filename = pkg_info[pkg]["filename"]
+ pkg_info[pkg]["filepath"] = \
+ os.path.join(self.deploy_dir, pkg_arch, pkg_filename)
+
+ return pkg_info
+
+ def extract(self, pkg):
+ """
+ Returns the path to a tmpdir where resides the contents of a package.
+
+ Deleting the tmpdir is responsability of the caller.
+ """
+ pkg_info = self.package_info(pkg)
+ if not pkg_info:
+ bb.fatal("Unable to get information for package '%s' while "
+ "trying to extract the package." % pkg)
+
+ tmp_dir = super(DpkgPM, self).extract(pkg, pkg_info)
+ bb.utils.remove(os.path.join(tmp_dir, "data.tar.xz"))
+
+ return tmp_dir
diff --git a/meta/lib/oe/package_manager/deb/manifest.py b/meta/lib/oe/package_manager/deb/manifest.py
new file mode 100644
index 0000000000..d8eab24a06
--- /dev/null
+++ b/meta/lib/oe/package_manager/deb/manifest.py
@@ -0,0 +1,26 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+from oe.manifest import Manifest
+
+class PkgManifest(Manifest):
+ def create_initial(self):
+ with open(self.initial_manifest, "w+") as manifest:
+ manifest.write(self.initial_manifest_file_header)
+
+ for var in self.var_maps[self.manifest_type]:
+ pkg_list = self.d.getVar(var)
+
+ if pkg_list is None:
+ continue
+
+ for pkg in pkg_list.split():
+ manifest.write("%s,%s\n" %
+ (self.var_maps[self.manifest_type][var], pkg))
+
+ def create_final(self):
+ pass
+
+ def create_full(self, pm):
+ pass
diff --git a/meta/lib/oe/package_manager/deb/rootfs.py b/meta/lib/oe/package_manager/deb/rootfs.py
new file mode 100644
index 0000000000..8fbaca11d6
--- /dev/null
+++ b/meta/lib/oe/package_manager/deb/rootfs.py
@@ -0,0 +1,210 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import re
+import shutil
+from oe.rootfs import Rootfs
+from oe.manifest import Manifest
+from oe.utils import execute_pre_post_process
+from oe.package_manager.deb.manifest import PkgManifest
+from oe.package_manager.deb import DpkgPM
+
+class DpkgOpkgRootfs(Rootfs):
+ def __init__(self, d, progress_reporter=None, logcatcher=None):
+ super(DpkgOpkgRootfs, self).__init__(d, progress_reporter, logcatcher)
+
+ def _get_pkgs_postinsts(self, status_file):
+ def _get_pkg_depends_list(pkg_depends):
+ pkg_depends_list = []
+ # filter version requirements like libc (>= 1.1)
+ for dep in pkg_depends.split(', '):
+ m_dep = re.match(r"^(.*) \(.*\)$", dep)
+ if m_dep:
+ dep = m_dep.group(1)
+ pkg_depends_list.append(dep)
+
+ return pkg_depends_list
+
+ pkgs = {}
+ pkg_name = ""
+ pkg_status_match = False
+ pkg_depends = ""
+
+ with open(status_file) as status:
+ data = status.read()
+ status.close()
+ for line in data.split('\n'):
+ m_pkg = re.match(r"^Package: (.*)", line)
+ m_status = re.match(r"^Status:.*unpacked", line)
+ m_depends = re.match(r"^Depends: (.*)", line)
+
+ #Only one of m_pkg, m_status or m_depends is not None at time
+ #If m_pkg is not None, we started a new package
+ if m_pkg is not None:
+ #Get Package name
+ pkg_name = m_pkg.group(1)
+ #Make sure we reset other variables
+ pkg_status_match = False
+ pkg_depends = ""
+ elif m_status is not None:
+ #New status matched
+ pkg_status_match = True
+ elif m_depends is not None:
+ #New depends macthed
+ pkg_depends = m_depends.group(1)
+ else:
+ pass
+
+ #Now check if we can process package depends and postinst
+ if "" != pkg_name and pkg_status_match:
+ pkgs[pkg_name] = _get_pkg_depends_list(pkg_depends)
+ else:
+ #Not enough information
+ pass
+
+ # remove package dependencies not in postinsts
+ pkg_names = list(pkgs.keys())
+ for pkg_name in pkg_names:
+ deps = pkgs[pkg_name][:]
+
+ for d in deps:
+ if d not in pkg_names:
+ pkgs[pkg_name].remove(d)
+
+ return pkgs
+
+ def _get_delayed_postinsts_common(self, status_file):
+ def _dep_resolve(graph, node, resolved, seen):
+ seen.append(node)
+
+ for edge in graph[node]:
+ if edge not in resolved:
+ if edge in seen:
+ raise RuntimeError("Packages %s and %s have " \
+ "a circular dependency in postinsts scripts." \
+ % (node, edge))
+ _dep_resolve(graph, edge, resolved, seen)
+
+ resolved.append(node)
+
+ pkg_list = []
+
+ pkgs = None
+ if not self.d.getVar('PACKAGE_INSTALL').strip():
+ bb.note("Building empty image")
+ else:
+ pkgs = self._get_pkgs_postinsts(status_file)
+ if pkgs:
+ root = "__packagegroup_postinst__"
+ pkgs[root] = list(pkgs.keys())
+ _dep_resolve(pkgs, root, pkg_list, [])
+ pkg_list.remove(root)
+
+ if len(pkg_list) == 0:
+ return None
+
+ return pkg_list
+
+ def _save_postinsts_common(self, dst_postinst_dir, src_postinst_dir):
+ if bb.utils.contains("IMAGE_FEATURES", "package-management",
+ True, False, self.d):
+ return
+ num = 0
+ for p in self._get_delayed_postinsts():
+ bb.utils.mkdirhier(dst_postinst_dir)
+
+ if os.path.exists(os.path.join(src_postinst_dir, p + ".postinst")):
+ shutil.copy(os.path.join(src_postinst_dir, p + ".postinst"),
+ os.path.join(dst_postinst_dir, "%03d-%s" % (num, p)))
+
+ num += 1
+
+class PkgRootfs(DpkgOpkgRootfs):
+ def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
+ super(PkgRootfs, self).__init__(d, progress_reporter, logcatcher)
+ self.log_check_regex = '^E:'
+ self.log_check_expected_regexes = \
+ [
+ "^E: Unmet dependencies."
+ ]
+
+ bb.utils.remove(self.image_rootfs, True)
+ bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
+ self.manifest = PkgManifest(d, manifest_dir)
+ self.pm = DpkgPM(d, d.getVar('IMAGE_ROOTFS'),
+ d.getVar('PACKAGE_ARCHS'),
+ d.getVar('DPKG_ARCH'))
+
+
+ def _create(self):
+ pkgs_to_install = self.manifest.parse_initial_manifest()
+ deb_pre_process_cmds = self.d.getVar('DEB_PREPROCESS_COMMANDS')
+ deb_post_process_cmds = self.d.getVar('DEB_POSTPROCESS_COMMANDS')
+
+ alt_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/alternatives")
+ bb.utils.mkdirhier(alt_dir)
+
+ # update PM index files
+ self.pm.write_index()
+
+ execute_pre_post_process(self.d, deb_pre_process_cmds)
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+ # Don't support incremental, so skip that
+ self.progress_reporter.next_stage()
+
+ self.pm.update()
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ for pkg_type in self.install_order:
+ if pkg_type in pkgs_to_install:
+ self.pm.install(pkgs_to_install[pkg_type],
+ [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
+ self.pm.fix_broken_dependencies()
+
+ if self.progress_reporter:
+ # Don't support attemptonly, so skip that
+ self.progress_reporter.next_stage()
+ self.progress_reporter.next_stage()
+
+ self.pm.install_complementary()
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ self._setup_dbg_rootfs(['/var/lib/dpkg'])
+
+ self.pm.fix_broken_dependencies()
+
+ self.pm.mark_packages("installed")
+
+ self.pm.run_pre_post_installs()
+
+ execute_pre_post_process(self.d, deb_post_process_cmds)
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ @staticmethod
+ def _depends_list():
+ return ['DEPLOY_DIR_DEB', 'DEB_SDK_ARCH', 'APTCONF_TARGET', 'APT_ARGS', 'DPKG_ARCH', 'DEB_PREPROCESS_COMMANDS', 'DEB_POSTPROCESS_COMMANDS']
+
+ def _get_delayed_postinsts(self):
+ status_file = self.image_rootfs + "/var/lib/dpkg/status"
+ return self._get_delayed_postinsts_common(status_file)
+
+ def _save_postinsts(self):
+ dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/deb-postinsts")
+ src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/info")
+ return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir)
+
+ def _log_check(self):
+ self._log_check_warn()
+ self._log_check_error()
+
+ def _cleanup(self):
+ pass
diff --git a/meta/lib/oe/package_manager/deb/sdk.py b/meta/lib/oe/package_manager/deb/sdk.py
new file mode 100644
index 0000000000..f4b0b6510a
--- /dev/null
+++ b/meta/lib/oe/package_manager/deb/sdk.py
@@ -0,0 +1,100 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import glob
+import shutil
+from oe.utils import execute_pre_post_process
+from oe.sdk import Sdk
+from oe.manifest import Manifest
+from oe.package_manager.deb import DpkgPM
+from oe.package_manager.deb.manifest import PkgManifest
+
+class PkgSdk(Sdk):
+ def __init__(self, d, manifest_dir=None):
+ super(PkgSdk, self).__init__(d, manifest_dir)
+
+ self.target_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET"), "apt")
+ self.host_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET"), "apt-sdk")
+
+
+ self.target_manifest = PkgManifest(d, self.manifest_dir,
+ Manifest.MANIFEST_TYPE_SDK_TARGET)
+ self.host_manifest = PkgManifest(d, self.manifest_dir,
+ Manifest.MANIFEST_TYPE_SDK_HOST)
+
+ deb_repo_workdir = "oe-sdk-repo"
+ if "sdk_ext" in d.getVar("BB_RUNTASK"):
+ deb_repo_workdir = "oe-sdk-ext-repo"
+
+ self.target_pm = DpkgPM(d, self.sdk_target_sysroot,
+ self.d.getVar("PACKAGE_ARCHS"),
+ self.d.getVar("DPKG_ARCH"),
+ self.target_conf_dir,
+ deb_repo_workdir=deb_repo_workdir)
+
+ self.host_pm = DpkgPM(d, self.sdk_host_sysroot,
+ self.d.getVar("SDK_PACKAGE_ARCHS"),
+ self.d.getVar("DEB_SDK_ARCH"),
+ self.host_conf_dir,
+ deb_repo_workdir=deb_repo_workdir)
+
+ def _copy_apt_dir_to(self, dst_dir):
+ staging_etcdir_native = self.d.getVar("STAGING_ETCDIR_NATIVE")
+
+ self.remove(dst_dir, True)
+
+ shutil.copytree(os.path.join(staging_etcdir_native, "apt"), dst_dir)
+
+ def _populate_sysroot(self, pm, manifest):
+ pkgs_to_install = manifest.parse_initial_manifest()
+
+ pm.write_index()
+ pm.update()
+
+ for pkg_type in self.install_order:
+ if pkg_type in pkgs_to_install:
+ pm.install(pkgs_to_install[pkg_type],
+ [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
+
+ def _populate(self):
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND"))
+
+ bb.note("Installing TARGET packages")
+ self._populate_sysroot(self.target_pm, self.target_manifest)
+
+ self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
+
+ self.target_pm.run_pre_post_installs()
+
+ self.target_pm.run_intercepts(populate_sdk='target')
+
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
+
+ self._copy_apt_dir_to(os.path.join(self.sdk_target_sysroot, "etc", "apt"))
+
+ if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
+ self.target_pm.remove_packaging_data()
+
+ bb.note("Installing NATIVESDK packages")
+ self._populate_sysroot(self.host_pm, self.host_manifest)
+ self.install_locales(self.host_pm)
+
+ self.host_pm.run_pre_post_installs()
+
+ self.host_pm.run_intercepts(populate_sdk='host')
+
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
+
+ self._copy_apt_dir_to(os.path.join(self.sdk_output, self.sdk_native_path,
+ "etc", "apt"))
+
+ if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
+ self.host_pm.remove_packaging_data()
+
+ native_dpkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path,
+ "var", "lib", "dpkg")
+ self.mkdirhier(native_dpkg_state_dir)
+ for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "dpkg", "*")):
+ self.movefile(f, native_dpkg_state_dir)
+ self.remove(os.path.join(self.sdk_output, "var"), True)
diff --git a/meta/lib/oe/package_manager/ipk/__init__.py b/meta/lib/oe/package_manager/ipk/__init__.py
new file mode 100644
index 0000000000..4cd3963111
--- /dev/null
+++ b/meta/lib/oe/package_manager/ipk/__init__.py
@@ -0,0 +1,503 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import re
+import shutil
+import subprocess
+from oe.package_manager import *
+
+class OpkgIndexer(Indexer):
+ def write_index(self):
+ arch_vars = ["ALL_MULTILIB_PACKAGE_ARCHS",
+ "SDK_PACKAGE_ARCHS",
+ ]
+
+ opkg_index_cmd = bb.utils.which(os.getenv('PATH'), "opkg-make-index")
+ if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
+ signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND'))
+ else:
+ signer = None
+
+ if not os.path.exists(os.path.join(self.deploy_dir, "Packages")):
+ open(os.path.join(self.deploy_dir, "Packages"), "w").close()
+
+ index_cmds = set()
+ index_sign_files = set()
+ for arch_var in arch_vars:
+ archs = self.d.getVar(arch_var)
+ if archs is None:
+ continue
+
+ for arch in archs.split():
+ pkgs_dir = os.path.join(self.deploy_dir, arch)
+ pkgs_file = os.path.join(pkgs_dir, "Packages")
+
+ if not os.path.isdir(pkgs_dir):
+ continue
+
+ if not os.path.exists(pkgs_file):
+ open(pkgs_file, "w").close()
+
+ index_cmds.add('%s --checksum md5 --checksum sha256 -r %s -p %s -m %s' %
+ (opkg_index_cmd, pkgs_file, pkgs_file, pkgs_dir))
+
+ index_sign_files.add(pkgs_file)
+
+ if len(index_cmds) == 0:
+ bb.note("There are no packages in %s!" % self.deploy_dir)
+ return
+
+ oe.utils.multiprocess_launch(create_index, index_cmds, self.d)
+
+ if signer:
+ feed_sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE')
+ is_ascii_sig = (feed_sig_type.upper() != "BIN")
+ for f in index_sign_files:
+ signer.detach_sign(f,
+ self.d.getVar('PACKAGE_FEED_GPG_NAME'),
+ self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'),
+ armor=is_ascii_sig)
+
+class PMPkgsList(PkgsList):
+ def __init__(self, d, rootfs_dir):
+ super(PMPkgsList, self).__init__(d, rootfs_dir)
+ config_file = d.getVar("IPKGCONF_TARGET")
+
+ self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg")
+ self.opkg_args = "-f %s -o %s " % (config_file, rootfs_dir)
+ self.opkg_args += self.d.getVar("OPKG_ARGS")
+
+ def list_pkgs(self, format=None):
+ cmd = "%s %s status" % (self.opkg_cmd, self.opkg_args)
+
+ # opkg returns success even when it printed some
+ # "Collected errors:" report to stderr. Mixing stderr into
+ # stdout then leads to random failures later on when
+ # parsing the output. To avoid this we need to collect both
+ # output streams separately and check for empty stderr.
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
+ cmd_output, cmd_stderr = p.communicate()
+ cmd_output = cmd_output.decode("utf-8")
+ cmd_stderr = cmd_stderr.decode("utf-8")
+ if p.returncode or cmd_stderr:
+ bb.fatal("Cannot get the installed packages list. Command '%s' "
+ "returned %d and stderr:\n%s" % (cmd, p.returncode, cmd_stderr))
+
+ return opkg_query(cmd_output)
+
+
+
+class OpkgDpkgPM(PackageManager):
+ def __init__(self, d, target_rootfs):
+ """
+ This is an abstract class. Do not instantiate this directly.
+ """
+ super(OpkgDpkgPM, self).__init__(d, target_rootfs)
+
+ def package_info(self, pkg, cmd):
+ """
+ Returns a dictionary with the package info.
+
+ This method extracts the common parts for Opkg and Dpkg
+ """
+
+ try:
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Unable to list available packages. Command '%s' "
+ "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
+ return opkg_query(output)
+
+ def extract(self, pkg, pkg_info):
+ """
+ Returns the path to a tmpdir where resides the contents of a package.
+
+ Deleting the tmpdir is responsability of the caller.
+
+ This method extracts the common parts for Opkg and Dpkg
+ """
+
+ ar_cmd = bb.utils.which(os.getenv("PATH"), "ar")
+ tar_cmd = bb.utils.which(os.getenv("PATH"), "tar")
+ pkg_path = pkg_info[pkg]["filepath"]
+
+ if not os.path.isfile(pkg_path):
+ bb.fatal("Unable to extract package for '%s'."
+ "File %s doesn't exists" % (pkg, pkg_path))
+
+ tmp_dir = tempfile.mkdtemp()
+ current_dir = os.getcwd()
+ os.chdir(tmp_dir)
+ data_tar = 'data.tar.xz'
+
+ try:
+ cmd = [ar_cmd, 'x', pkg_path]
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ cmd = [tar_cmd, 'xf', data_tar]
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ bb.utils.remove(tmp_dir, recurse=True)
+ bb.fatal("Unable to extract %s package. Command '%s' "
+ "returned %d:\n%s" % (pkg_path, ' '.join(cmd), e.returncode, e.output.decode("utf-8")))
+ except OSError as e:
+ bb.utils.remove(tmp_dir, recurse=True)
+ bb.fatal("Unable to extract %s package. Command '%s' "
+ "returned %d:\n%s at %s" % (pkg_path, ' '.join(cmd), e.errno, e.strerror, e.filename))
+
+ bb.note("Extracted %s to %s" % (pkg_path, tmp_dir))
+ bb.utils.remove(os.path.join(tmp_dir, "debian-binary"))
+ bb.utils.remove(os.path.join(tmp_dir, "control.tar.gz"))
+ os.chdir(current_dir)
+
+ return tmp_dir
+
+ def _handle_intercept_failure(self, registered_pkgs):
+ self.mark_packages("unpacked", registered_pkgs.split())
+
+class OpkgPM(OpkgDpkgPM):
+ def __init__(self, d, target_rootfs, config_file, archs, task_name='target', ipk_repo_workdir="oe-rootfs-repo", filterbydependencies=True, prepare_index=True):
+ super(OpkgPM, self).__init__(d, target_rootfs)
+
+ self.config_file = config_file
+ self.pkg_archs = archs
+ self.task_name = task_name
+
+ self.deploy_dir = oe.path.join(self.d.getVar('WORKDIR'), ipk_repo_workdir)
+ self.deploy_lock_file = os.path.join(self.deploy_dir, "deploy.lock")
+ self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg")
+ self.opkg_args = "--volatile-cache -f %s -t %s -o %s " % (self.config_file, self.d.expand('${T}/ipktemp/') ,target_rootfs)
+ self.opkg_args += self.d.getVar("OPKG_ARGS")
+
+ if prepare_index:
+ create_packages_dir(self.d, self.deploy_dir, d.getVar("DEPLOY_DIR_IPK"), "package_write_ipk", filterbydependencies)
+
+ self.opkg_dir = oe.path.join(target_rootfs, self.d.getVar('OPKGLIBDIR'), "opkg")
+ bb.utils.mkdirhier(self.opkg_dir)
+
+ self.saved_opkg_dir = self.d.expand('${T}/saved/%s' % self.task_name)
+ if not os.path.exists(self.d.expand('${T}/saved')):
+ bb.utils.mkdirhier(self.d.expand('${T}/saved'))
+
+ self.from_feeds = (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") == "1"
+ if self.from_feeds:
+ self._create_custom_config()
+ else:
+ self._create_config()
+
+ self.indexer = OpkgIndexer(self.d, self.deploy_dir)
+
+ def mark_packages(self, status_tag, packages=None):
+ """
+ This function will change a package's status in /var/lib/opkg/status file.
+ If 'packages' is None then the new_status will be applied to all
+ packages
+ """
+ status_file = os.path.join(self.opkg_dir, "status")
+
+ with open(status_file, "r") as sf:
+ with open(status_file + ".tmp", "w+") as tmp_sf:
+ if packages is None:
+ tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)",
+ r"Package: \1\n\2Status: \3%s" % status_tag,
+ sf.read()))
+ else:
+ if type(packages).__name__ != "list":
+ raise TypeError("'packages' should be a list object")
+
+ status = sf.read()
+ for pkg in packages:
+ status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg,
+ r"Package: %s\n\1Status: \2%s" % (pkg, status_tag),
+ status)
+
+ tmp_sf.write(status)
+
+ bb.utils.rename(status_file + ".tmp", status_file)
+
+ def _create_custom_config(self):
+ bb.note("Building from feeds activated!")
+
+ with open(self.config_file, "w+") as config_file:
+ priority = 1
+ for arch in self.pkg_archs.split():
+ config_file.write("arch %s %d\n" % (arch, priority))
+ priority += 5
+
+ for line in (self.d.getVar('IPK_FEED_URIS') or "").split():
+ feed_match = re.match(r"^[ \t]*(.*)##([^ \t]*)[ \t]*$", line)
+
+ if feed_match is not None:
+ feed_name = feed_match.group(1)
+ feed_uri = feed_match.group(2)
+
+ bb.note("Add %s feed with URL %s" % (feed_name, feed_uri))
+
+ config_file.write("src/gz %s %s\n" % (feed_name, feed_uri))
+
+ """
+ Allow to use package deploy directory contents as quick devel-testing
+ feed. This creates individual feed configs for each arch subdir of those
+ specified as compatible for the current machine.
+ NOTE: Development-helper feature, NOT a full-fledged feed.
+ """
+ if (self.d.getVar('FEED_DEPLOYDIR_BASE_URI') or "") != "":
+ for arch in self.pkg_archs.split():
+ cfg_file_name = os.path.join(self.target_rootfs,
+ self.d.getVar("sysconfdir"),
+ "opkg",
+ "local-%s-feed.conf" % arch)
+
+ with open(cfg_file_name, "w+") as cfg_file:
+ cfg_file.write("src/gz local-%s %s/%s" %
+ (arch,
+ self.d.getVar('FEED_DEPLOYDIR_BASE_URI'),
+ arch))
+
+ if self.d.getVar('OPKGLIBDIR') != '/var/lib':
+ # There is no command line option for this anymore, we need to add
+ # info_dir and status_file to config file, if OPKGLIBDIR doesn't have
+ # the default value of "/var/lib" as defined in opkg:
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_LISTS_DIR VARDIR "/lib/opkg/lists"
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR VARDIR "/lib/opkg/info"
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE VARDIR "/lib/opkg/status"
+ cfg_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'info'))
+ cfg_file.write("option lists_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'lists'))
+ cfg_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'status'))
+
+
+ def _create_config(self):
+ with open(self.config_file, "w+") as config_file:
+ priority = 1
+ for arch in self.pkg_archs.split():
+ config_file.write("arch %s %d\n" % (arch, priority))
+ priority += 5
+
+ config_file.write("src oe file:%s\n" % self.deploy_dir)
+
+ for arch in self.pkg_archs.split():
+ pkgs_dir = os.path.join(self.deploy_dir, arch)
+ if os.path.isdir(pkgs_dir):
+ config_file.write("src oe-%s file:%s\n" %
+ (arch, pkgs_dir))
+
+ if self.d.getVar('OPKGLIBDIR') != '/var/lib':
+ # There is no command line option for this anymore, we need to add
+ # info_dir and status_file to config file, if OPKGLIBDIR doesn't have
+ # the default value of "/var/lib" as defined in opkg:
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_LISTS_DIR VARDIR "/lib/opkg/lists"
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR VARDIR "/lib/opkg/info"
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE VARDIR "/lib/opkg/status"
+ config_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'info'))
+ config_file.write("option lists_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'lists'))
+ config_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'status'))
+
+ def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
+ if feed_uris == "":
+ return
+
+ rootfs_config = os.path.join('%s/etc/opkg/base-feeds.conf'
+ % self.target_rootfs)
+
+ os.makedirs('%s/etc/opkg' % self.target_rootfs, exist_ok=True)
+
+ feed_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
+ archs = self.pkg_archs.split() if feed_archs is None else feed_archs.split()
+
+ with open(rootfs_config, "w+") as config_file:
+ uri_iterator = 0
+ for uri in feed_uris:
+ if archs:
+ for arch in archs:
+ if (feed_archs is None) and (not os.path.exists(oe.path.join(self.deploy_dir, arch))):
+ continue
+ bb.note('Adding opkg feed url-%s-%d (%s)' %
+ (arch, uri_iterator, uri))
+ config_file.write("src/gz uri-%s-%d %s/%s\n" %
+ (arch, uri_iterator, uri, arch))
+ else:
+ bb.note('Adding opkg feed url-%d (%s)' %
+ (uri_iterator, uri))
+ config_file.write("src/gz uri-%d %s\n" %
+ (uri_iterator, uri))
+
+ uri_iterator += 1
+
+ def update(self):
+ self.deploy_dir_lock()
+
+ cmd = "%s %s update" % (self.opkg_cmd, self.opkg_args)
+
+ try:
+ subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ self.deploy_dir_unlock()
+ bb.fatal("Unable to update the package index files. Command '%s' "
+ "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
+
+ self.deploy_dir_unlock()
+
+ def install(self, pkgs, attempt_only=False):
+ if not pkgs:
+ return
+
+ cmd = "%s %s" % (self.opkg_cmd, self.opkg_args)
+ for exclude in (self.d.getVar("PACKAGE_EXCLUDE") or "").split():
+ cmd += " --add-exclude %s" % exclude
+ for bad_recommendation in (self.d.getVar("BAD_RECOMMENDATIONS") or "").split():
+ cmd += " --add-ignore-recommends %s" % bad_recommendation
+ cmd += " install "
+ cmd += " ".join(pkgs)
+
+ os.environ['D'] = self.target_rootfs
+ os.environ['OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['INTERCEPT_DIR'] = self.intercepts_dir
+ os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
+
+ try:
+ bb.note("Installing the following packages: %s" % ' '.join(pkgs))
+ bb.note(cmd)
+ output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).decode("utf-8")
+ bb.note(output)
+ failed_pkgs = []
+ for line in output.split('\n'):
+ if line.endswith("configuration required on target."):
+ bb.warn(line)
+ failed_pkgs.append(line.split(".")[0])
+ if failed_pkgs:
+ failed_postinsts_abort(failed_pkgs, self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
+ except subprocess.CalledProcessError as e:
+ (bb.fatal, bb.warn)[attempt_only]("Unable to install packages. "
+ "Command '%s' returned %d:\n%s" %
+ (cmd, e.returncode, e.output.decode("utf-8")))
+
+ def remove(self, pkgs, with_dependencies=True):
+ if not pkgs:
+ return
+
+ if with_dependencies:
+ cmd = "%s %s --force-remove --force-removal-of-dependent-packages remove %s" % \
+ (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
+ else:
+ cmd = "%s %s --force-depends remove %s" % \
+ (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
+
+ try:
+ bb.note(cmd)
+ output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).decode("utf-8")
+ bb.note(output)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Unable to remove packages. Command '%s' "
+ "returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8")))
+
+ def write_index(self):
+ self.deploy_dir_lock()
+
+ result = self.indexer.write_index()
+
+ self.deploy_dir_unlock()
+
+ if result is not None:
+ bb.fatal(result)
+
+ def remove_packaging_data(self):
+ cachedir = oe.path.join(self.target_rootfs, self.d.getVar("localstatedir"), "cache", "opkg")
+ bb.utils.remove(self.opkg_dir, True)
+ bb.utils.remove(cachedir, True)
+
+ def remove_lists(self):
+ if not self.from_feeds:
+ bb.utils.remove(os.path.join(self.opkg_dir, "lists"), True)
+
+ def list_installed(self):
+ return PMPkgsList(self.d, self.target_rootfs).list_pkgs()
+
+ def dummy_install(self, pkgs):
+ """
+ The following function dummy installs pkgs and returns the log of output.
+ """
+ if len(pkgs) == 0:
+ return
+
+ # Create an temp dir as opkg root for dummy installation
+ temp_rootfs = self.d.expand('${T}/opkg')
+ opkg_lib_dir = self.d.getVar('OPKGLIBDIR')
+ if opkg_lib_dir[0] == "/":
+ opkg_lib_dir = opkg_lib_dir[1:]
+ temp_opkg_dir = os.path.join(temp_rootfs, opkg_lib_dir, 'opkg')
+ bb.utils.mkdirhier(temp_opkg_dir)
+
+ opkg_args = "-f %s -o %s " % (self.config_file, temp_rootfs)
+ opkg_args += self.d.getVar("OPKG_ARGS")
+
+ cmd = "%s %s update" % (self.opkg_cmd, opkg_args)
+ try:
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Unable to update. Command '%s' "
+ "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
+
+ # Dummy installation
+ cmd = "%s %s --noaction install %s " % (self.opkg_cmd,
+ opkg_args,
+ ' '.join(pkgs))
+ try:
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Unable to dummy install packages. Command '%s' "
+ "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
+
+ bb.utils.remove(temp_rootfs, True)
+
+ return output
+
+ def backup_packaging_data(self):
+ # Save the opkglib for increment ipk image generation
+ if os.path.exists(self.saved_opkg_dir):
+ bb.utils.remove(self.saved_opkg_dir, True)
+ shutil.copytree(self.opkg_dir,
+ self.saved_opkg_dir,
+ symlinks=True)
+
+ def recover_packaging_data(self):
+ # Move the opkglib back
+ if os.path.exists(self.saved_opkg_dir):
+ if os.path.exists(self.opkg_dir):
+ bb.utils.remove(self.opkg_dir, True)
+
+ bb.note('Recover packaging data')
+ shutil.copytree(self.saved_opkg_dir,
+ self.opkg_dir,
+ symlinks=True)
+
+ def package_info(self, pkg):
+ """
+ Returns a dictionary with the package info.
+ """
+ cmd = "%s %s info %s" % (self.opkg_cmd, self.opkg_args, pkg)
+ pkg_info = super(OpkgPM, self).package_info(pkg, cmd)
+
+ pkg_arch = pkg_info[pkg]["arch"]
+ pkg_filename = pkg_info[pkg]["filename"]
+ pkg_info[pkg]["filepath"] = \
+ os.path.join(self.deploy_dir, pkg_arch, pkg_filename)
+
+ return pkg_info
+
+ def extract(self, pkg):
+ """
+ Returns the path to a tmpdir where resides the contents of a package.
+
+ Deleting the tmpdir is responsability of the caller.
+ """
+ pkg_info = self.package_info(pkg)
+ if not pkg_info:
+ bb.fatal("Unable to get information for package '%s' while "
+ "trying to extract the package." % pkg)
+
+ tmp_dir = super(OpkgPM, self).extract(pkg, pkg_info)
+ bb.utils.remove(os.path.join(tmp_dir, "data.tar.xz"))
+
+ return tmp_dir
diff --git a/meta/lib/oe/package_manager/ipk/manifest.py b/meta/lib/oe/package_manager/ipk/manifest.py
new file mode 100644
index 0000000000..ae451c5c70
--- /dev/null
+++ b/meta/lib/oe/package_manager/ipk/manifest.py
@@ -0,0 +1,74 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+from oe.manifest import Manifest
+import re
+
+class PkgManifest(Manifest):
+ """
+ Returns a dictionary object with mip and mlp packages.
+ """
+ def _split_multilib(self, pkg_list):
+ pkgs = dict()
+
+ for pkg in pkg_list.split():
+ pkg_type = self.PKG_TYPE_MUST_INSTALL
+
+ ml_variants = self.d.getVar('MULTILIB_VARIANTS').split()
+
+ for ml_variant in ml_variants:
+ if pkg.startswith(ml_variant + '-'):
+ pkg_type = self.PKG_TYPE_MULTILIB
+
+ if not pkg_type in pkgs:
+ pkgs[pkg_type] = pkg
+ else:
+ pkgs[pkg_type] += " " + pkg
+
+ return pkgs
+
+ def create_initial(self):
+ pkgs = dict()
+
+ with open(self.initial_manifest, "w+") as manifest:
+ manifest.write(self.initial_manifest_file_header)
+
+ for var in self.var_maps[self.manifest_type]:
+ if var in self.vars_to_split:
+ split_pkgs = self._split_multilib(self.d.getVar(var))
+ if split_pkgs is not None:
+ pkgs = dict(list(pkgs.items()) + list(split_pkgs.items()))
+ else:
+ pkg_list = self.d.getVar(var)
+ if pkg_list is not None:
+ pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var)
+
+ for pkg_type in sorted(pkgs):
+ for pkg in sorted(pkgs[pkg_type].split()):
+ manifest.write("%s,%s\n" % (pkg_type, pkg))
+
+ def create_final(self):
+ pass
+
+ def create_full(self, pm):
+ if not os.path.exists(self.initial_manifest):
+ self.create_initial()
+
+ initial_manifest = self.parse_initial_manifest()
+ pkgs_to_install = list()
+ for pkg_type in initial_manifest:
+ pkgs_to_install += initial_manifest[pkg_type]
+ if len(pkgs_to_install) == 0:
+ return
+
+ output = pm.dummy_install(pkgs_to_install).decode('utf-8')
+
+ with open(self.full_manifest, 'w+') as manifest:
+ pkg_re = re.compile('^Installing ([^ ]+) [^ ].*')
+ for line in set(output.split('\n')):
+ m = pkg_re.match(line)
+ if m:
+ manifest.write(m.group(1) + '\n')
+
+ return
diff --git a/meta/lib/oe/package_manager/ipk/rootfs.py b/meta/lib/oe/package_manager/ipk/rootfs.py
new file mode 100644
index 0000000000..10a831994e
--- /dev/null
+++ b/meta/lib/oe/package_manager/ipk/rootfs.py
@@ -0,0 +1,350 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import re
+import filecmp
+import shutil
+from oe.rootfs import Rootfs
+from oe.manifest import Manifest
+from oe.utils import execute_pre_post_process
+from oe.package_manager.ipk.manifest import PkgManifest
+from oe.package_manager.ipk import OpkgPM
+
+class DpkgOpkgRootfs(Rootfs):
+ def __init__(self, d, progress_reporter=None, logcatcher=None):
+ super(DpkgOpkgRootfs, self).__init__(d, progress_reporter, logcatcher)
+
+ def _get_pkgs_postinsts(self, status_file):
+ def _get_pkg_depends_list(pkg_depends):
+ pkg_depends_list = []
+ # filter version requirements like libc (>= 1.1)
+ for dep in pkg_depends.split(', '):
+ m_dep = re.match(r"^(.*) \(.*\)$", dep)
+ if m_dep:
+ dep = m_dep.group(1)
+ pkg_depends_list.append(dep)
+
+ return pkg_depends_list
+
+ pkgs = {}
+ pkg_name = ""
+ pkg_status_match = False
+ pkg_depends = ""
+
+ with open(status_file) as status:
+ data = status.read()
+ status.close()
+ for line in data.split('\n'):
+ m_pkg = re.match(r"^Package: (.*)", line)
+ m_status = re.match(r"^Status:.*unpacked", line)
+ m_depends = re.match(r"^Depends: (.*)", line)
+
+ #Only one of m_pkg, m_status or m_depends is not None at time
+ #If m_pkg is not None, we started a new package
+ if m_pkg is not None:
+ #Get Package name
+ pkg_name = m_pkg.group(1)
+ #Make sure we reset other variables
+ pkg_status_match = False
+ pkg_depends = ""
+ elif m_status is not None:
+ #New status matched
+ pkg_status_match = True
+ elif m_depends is not None:
+ #New depends macthed
+ pkg_depends = m_depends.group(1)
+ else:
+ pass
+
+ #Now check if we can process package depends and postinst
+ if "" != pkg_name and pkg_status_match:
+ pkgs[pkg_name] = _get_pkg_depends_list(pkg_depends)
+ else:
+ #Not enough information
+ pass
+
+ # remove package dependencies not in postinsts
+ pkg_names = list(pkgs.keys())
+ for pkg_name in pkg_names:
+ deps = pkgs[pkg_name][:]
+
+ for d in deps:
+ if d not in pkg_names:
+ pkgs[pkg_name].remove(d)
+
+ return pkgs
+
+ def _get_delayed_postinsts_common(self, status_file):
+ def _dep_resolve(graph, node, resolved, seen):
+ seen.append(node)
+
+ for edge in graph[node]:
+ if edge not in resolved:
+ if edge in seen:
+ raise RuntimeError("Packages %s and %s have " \
+ "a circular dependency in postinsts scripts." \
+ % (node, edge))
+ _dep_resolve(graph, edge, resolved, seen)
+
+ resolved.append(node)
+
+ pkg_list = []
+
+ pkgs = None
+ if not self.d.getVar('PACKAGE_INSTALL').strip():
+ bb.note("Building empty image")
+ else:
+ pkgs = self._get_pkgs_postinsts(status_file)
+ if pkgs:
+ root = "__packagegroup_postinst__"
+ pkgs[root] = list(pkgs.keys())
+ _dep_resolve(pkgs, root, pkg_list, [])
+ pkg_list.remove(root)
+
+ if len(pkg_list) == 0:
+ return None
+
+ return pkg_list
+
+ def _save_postinsts_common(self, dst_postinst_dir, src_postinst_dir):
+ if bb.utils.contains("IMAGE_FEATURES", "package-management",
+ True, False, self.d):
+ return
+ num = 0
+ for p in self._get_delayed_postinsts():
+ bb.utils.mkdirhier(dst_postinst_dir)
+
+ if os.path.exists(os.path.join(src_postinst_dir, p + ".postinst")):
+ shutil.copy(os.path.join(src_postinst_dir, p + ".postinst"),
+ os.path.join(dst_postinst_dir, "%03d-%s" % (num, p)))
+
+ num += 1
+
+class PkgRootfs(DpkgOpkgRootfs):
+ def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
+ super(PkgRootfs, self).__init__(d, progress_reporter, logcatcher)
+ self.log_check_regex = '(exit 1|Collected errors)'
+
+ self.manifest = PkgManifest(d, manifest_dir)
+ self.opkg_conf = self.d.getVar("IPKGCONF_TARGET")
+ self.pkg_archs = self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS")
+
+ self.inc_opkg_image_gen = self.d.getVar('INC_IPK_IMAGE_GEN') or ""
+ if self._remove_old_rootfs():
+ bb.utils.remove(self.image_rootfs, True)
+ self.pm = OpkgPM(d,
+ self.image_rootfs,
+ self.opkg_conf,
+ self.pkg_archs)
+ else:
+ self.pm = OpkgPM(d,
+ self.image_rootfs,
+ self.opkg_conf,
+ self.pkg_archs)
+ self.pm.recover_packaging_data()
+
+ bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
+ '''
+ Compare two files with the same key twice to see if they are equal.
+ If they are not equal, it means they are duplicated and come from
+ different packages.
+ '''
+ def _file_equal(self, key, f1, f2):
+ if filecmp.cmp(f1, f2):
+ return True
+ # Not equal
+ return False
+
+ """
+ This function was reused from the old implementation.
+ See commit: "image.bbclass: Added variables for multilib support." by
+ Lianhao Lu.
+ """
+ def _multilib_sanity_test(self, dirs):
+
+ allow_replace = self.d.getVar("MULTILIBRE_ALLOW_REP")
+ if allow_replace is None:
+ allow_replace = ""
+
+ allow_rep = re.compile(re.sub(r"\|$", r"", allow_replace))
+ error_prompt = "Multilib check error:"
+
+ files = {}
+ for dir in dirs:
+ for root, subfolders, subfiles in os.walk(dir):
+ for file in subfiles:
+ item = os.path.join(root, file)
+ key = str(os.path.join("/", os.path.relpath(item, dir)))
+
+ valid = True
+ if key in files:
+ #check whether the file is allow to replace
+ if allow_rep.match(key):
+ valid = True
+ else:
+ if os.path.exists(files[key]) and \
+ os.path.exists(item) and \
+ not self._file_equal(key, files[key], item):
+ valid = False
+ bb.fatal("%s duplicate files %s %s is not the same\n" %
+ (error_prompt, item, files[key]))
+
+ #pass the check, add to list
+ if valid:
+ files[key] = item
+
+ def _multilib_test_install(self, pkgs):
+ ml_temp = self.d.getVar("MULTILIB_TEMP_ROOTFS")
+ bb.utils.mkdirhier(ml_temp)
+
+ dirs = [self.image_rootfs]
+
+ for variant in self.d.getVar("MULTILIB_VARIANTS").split():
+ ml_target_rootfs = os.path.join(ml_temp, variant)
+
+ bb.utils.remove(ml_target_rootfs, True)
+
+ ml_opkg_conf = os.path.join(ml_temp,
+ variant + "-" + os.path.basename(self.opkg_conf))
+
+ ml_pm = OpkgPM(self.d, ml_target_rootfs, ml_opkg_conf, self.pkg_archs, prepare_index=False)
+
+ ml_pm.update()
+ ml_pm.install(pkgs)
+
+ dirs.append(ml_target_rootfs)
+
+ self._multilib_sanity_test(dirs)
+
+ '''
+ While ipk incremental image generation is enabled, it will remove the
+ unneeded pkgs by comparing the old full manifest in previous existing
+ image and the new full manifest in the current image.
+ '''
+ def _remove_extra_packages(self, pkgs_initial_install):
+ if self.inc_opkg_image_gen == "1":
+ # Parse full manifest in previous existing image creation session
+ old_full_manifest = self.manifest.parse_full_manifest()
+
+ # Create full manifest for the current image session, the old one
+ # will be replaced by the new one.
+ self.manifest.create_full(self.pm)
+
+ # Parse full manifest in current image creation session
+ new_full_manifest = self.manifest.parse_full_manifest()
+
+ pkg_to_remove = list()
+ for pkg in old_full_manifest:
+ if pkg not in new_full_manifest:
+ pkg_to_remove.append(pkg)
+
+ if pkg_to_remove != []:
+ bb.note('decremental removed: %s' % ' '.join(pkg_to_remove))
+ self.pm.remove(pkg_to_remove)
+
+ '''
+ Compare with previous existing image creation, if some conditions
+ triggered, the previous old image should be removed.
+ The conditions include any of 'PACKAGE_EXCLUDE, NO_RECOMMENDATIONS
+ and BAD_RECOMMENDATIONS' has been changed.
+ '''
+ def _remove_old_rootfs(self):
+ if self.inc_opkg_image_gen != "1":
+ return True
+
+ vars_list_file = self.d.expand('${T}/vars_list')
+
+ old_vars_list = ""
+ if os.path.exists(vars_list_file):
+ old_vars_list = open(vars_list_file, 'r+').read()
+
+ new_vars_list = '%s:%s:%s\n' % \
+ ((self.d.getVar('BAD_RECOMMENDATIONS') or '').strip(),
+ (self.d.getVar('NO_RECOMMENDATIONS') or '').strip(),
+ (self.d.getVar('PACKAGE_EXCLUDE') or '').strip())
+ open(vars_list_file, 'w+').write(new_vars_list)
+
+ if old_vars_list != new_vars_list:
+ return True
+
+ return False
+
+ def _create(self):
+ pkgs_to_install = self.manifest.parse_initial_manifest()
+ opkg_pre_process_cmds = self.d.getVar('OPKG_PREPROCESS_COMMANDS')
+ opkg_post_process_cmds = self.d.getVar('OPKG_POSTPROCESS_COMMANDS')
+
+ # update PM index files
+ self.pm.write_index()
+
+ execute_pre_post_process(self.d, opkg_pre_process_cmds)
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+ # Steps are a bit different in order, skip next
+ self.progress_reporter.next_stage()
+
+ self.pm.update()
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ if self.inc_opkg_image_gen == "1":
+ self._remove_extra_packages(pkgs_to_install)
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ for pkg_type in self.install_order:
+ if pkg_type in pkgs_to_install:
+ # For multilib, we perform a sanity test before final install
+ # If sanity test fails, it will automatically do a bb.fatal()
+ # and the installation will stop
+ if pkg_type == Manifest.PKG_TYPE_MULTILIB:
+ self._multilib_test_install(pkgs_to_install[pkg_type])
+
+ self.pm.install(pkgs_to_install[pkg_type],
+ [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ self.pm.install_complementary()
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ opkg_lib_dir = self.d.getVar('OPKGLIBDIR')
+ opkg_dir = os.path.join(opkg_lib_dir, 'opkg')
+ self._setup_dbg_rootfs([opkg_dir])
+
+ execute_pre_post_process(self.d, opkg_post_process_cmds)
+
+ if self.inc_opkg_image_gen == "1":
+ self.pm.backup_packaging_data()
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ @staticmethod
+ def _depends_list():
+ return ['IPKGCONF_SDK', 'IPK_FEED_URIS', 'DEPLOY_DIR_IPK', 'IPKGCONF_TARGET', 'INC_IPK_IMAGE_GEN', 'OPKG_ARGS', 'OPKGLIBDIR', 'OPKG_PREPROCESS_COMMANDS', 'OPKG_POSTPROCESS_COMMANDS', 'OPKGLIBDIR']
+
+ def _get_delayed_postinsts(self):
+ status_file = os.path.join(self.image_rootfs,
+ self.d.getVar('OPKGLIBDIR').strip('/'),
+ "opkg", "status")
+ return self._get_delayed_postinsts_common(status_file)
+
+ def _save_postinsts(self):
+ dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/ipk-postinsts")
+ src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${OPKGLIBDIR}/opkg/info")
+ return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir)
+
+ def _log_check(self):
+ self._log_check_warn()
+ self._log_check_error()
+
+ def _cleanup(self):
+ self.pm.remove_lists()
diff --git a/meta/lib/oe/package_manager/ipk/sdk.py b/meta/lib/oe/package_manager/ipk/sdk.py
new file mode 100644
index 0000000000..e2ca415c8e
--- /dev/null
+++ b/meta/lib/oe/package_manager/ipk/sdk.py
@@ -0,0 +1,102 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import glob
+import shutil
+from oe.utils import execute_pre_post_process
+from oe.sdk import Sdk
+from oe.package_manager.ipk.manifest import PkgManifest
+from oe.manifest import Manifest
+from oe.package_manager.ipk import OpkgPM
+
+class PkgSdk(Sdk):
+ def __init__(self, d, manifest_dir=None):
+ super(PkgSdk, self).__init__(d, manifest_dir)
+
+ # In sdk_list_installed_packages the call to opkg is hardcoded to
+ # always use IPKGCONF_TARGET and there's no exposed API to change this
+ # so simply override IPKGCONF_TARGET to use this separated config file.
+ ipkgconf_sdk_target = d.getVar("IPKGCONF_SDK_TARGET")
+ d.setVar("IPKGCONF_TARGET", ipkgconf_sdk_target)
+
+ self.target_conf = self.d.getVar("IPKGCONF_TARGET")
+ self.host_conf = self.d.getVar("IPKGCONF_SDK")
+
+ self.target_manifest = PkgManifest(d, self.manifest_dir,
+ Manifest.MANIFEST_TYPE_SDK_TARGET)
+ self.host_manifest = PkgManifest(d, self.manifest_dir,
+ Manifest.MANIFEST_TYPE_SDK_HOST)
+
+ ipk_repo_workdir = "oe-sdk-repo"
+ if "sdk_ext" in d.getVar("BB_RUNTASK"):
+ ipk_repo_workdir = "oe-sdk-ext-repo"
+
+ self.target_pm = OpkgPM(d, self.sdk_target_sysroot, self.target_conf,
+ self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS"),
+ ipk_repo_workdir=ipk_repo_workdir)
+
+ self.host_pm = OpkgPM(d, self.sdk_host_sysroot, self.host_conf,
+ self.d.getVar("SDK_PACKAGE_ARCHS"),
+ ipk_repo_workdir=ipk_repo_workdir)
+
+ def _populate_sysroot(self, pm, manifest):
+ pkgs_to_install = manifest.parse_initial_manifest()
+
+ if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") != "1":
+ pm.write_index()
+
+ pm.update()
+
+ for pkg_type in self.install_order:
+ if pkg_type in pkgs_to_install:
+ pm.install(pkgs_to_install[pkg_type],
+ [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
+
+ def _populate(self):
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND"))
+
+ bb.note("Installing TARGET packages")
+ self._populate_sysroot(self.target_pm, self.target_manifest)
+
+ self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
+
+ self.target_pm.run_intercepts(populate_sdk='target')
+
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
+
+ if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
+ self.target_pm.remove_packaging_data()
+
+ bb.note("Installing NATIVESDK packages")
+ self._populate_sysroot(self.host_pm, self.host_manifest)
+ self.install_locales(self.host_pm)
+
+ self.host_pm.run_intercepts(populate_sdk='host')
+
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
+
+ if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
+ self.host_pm.remove_packaging_data()
+
+ target_sysconfdir = os.path.join(self.sdk_target_sysroot, self.sysconfdir)
+ host_sysconfdir = os.path.join(self.sdk_host_sysroot, self.sysconfdir)
+
+ self.mkdirhier(target_sysconfdir)
+ shutil.copy(self.target_conf, target_sysconfdir)
+ os.chmod(os.path.join(target_sysconfdir,
+ os.path.basename(self.target_conf)), 0o644)
+
+ self.mkdirhier(host_sysconfdir)
+ shutil.copy(self.host_conf, host_sysconfdir)
+ os.chmod(os.path.join(host_sysconfdir,
+ os.path.basename(self.host_conf)), 0o644)
+
+ native_opkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path,
+ self.d.getVar('localstatedir_nativesdk').strip('/'),
+ "lib", "opkg")
+ self.mkdirhier(native_opkg_state_dir)
+ for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "opkg", "*")):
+ self.movefile(f, native_opkg_state_dir)
+
+ self.remove(os.path.join(self.sdk_output, "var"), True)
diff --git a/meta/lib/oe/package_manager/rpm/__init__.py b/meta/lib/oe/package_manager/rpm/__init__.py
new file mode 100644
index 0000000000..b392581069
--- /dev/null
+++ b/meta/lib/oe/package_manager/rpm/__init__.py
@@ -0,0 +1,410 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import shutil
+import subprocess
+from oe.package_manager import *
+
+class RpmIndexer(Indexer):
+ def write_index(self):
+ self.do_write_index(self.deploy_dir)
+
+ def do_write_index(self, deploy_dir):
+ if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
+ signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND'))
+ else:
+ signer = None
+
+ createrepo_c = bb.utils.which(os.environ['PATH'], "createrepo_c")
+ result = create_index("%s --update -q %s" % (createrepo_c, deploy_dir))
+ if result:
+ bb.fatal(result)
+
+ # Sign repomd
+ if signer:
+ sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE')
+ is_ascii_sig = (sig_type.upper() != "BIN")
+ signer.detach_sign(os.path.join(deploy_dir, 'repodata', 'repomd.xml'),
+ self.d.getVar('PACKAGE_FEED_GPG_NAME'),
+ self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'),
+ armor=is_ascii_sig)
+
+class RpmSubdirIndexer(RpmIndexer):
+ def write_index(self):
+ bb.note("Generating package index for %s" %(self.deploy_dir))
+ # Remove the existing repodata to ensure that we re-generate it no matter what
+ bb.utils.remove(os.path.join(self.deploy_dir, "repodata"), recurse=True)
+
+ self.do_write_index(self.deploy_dir)
+ for entry in os.walk(self.deploy_dir):
+ if os.path.samefile(self.deploy_dir, entry[0]):
+ for dir in entry[1]:
+ if dir != 'repodata':
+ dir_path = oe.path.join(self.deploy_dir, dir)
+ bb.note("Generating package index for %s" %(dir_path))
+ self.do_write_index(dir_path)
+
+
+class PMPkgsList(PkgsList):
+ def list_pkgs(self):
+ return RpmPM(self.d, self.rootfs_dir, self.d.getVar('TARGET_VENDOR'), needfeed=False).list_installed()
+
+class RpmPM(PackageManager):
+ def __init__(self,
+ d,
+ target_rootfs,
+ target_vendor,
+ task_name='target',
+ arch_var=None,
+ os_var=None,
+ rpm_repo_workdir="oe-rootfs-repo",
+ filterbydependencies=True,
+ needfeed=True):
+ super(RpmPM, self).__init__(d, target_rootfs)
+ self.target_vendor = target_vendor
+ self.task_name = task_name
+ if arch_var == None:
+ self.archs = self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS').replace("-","_")
+ else:
+ self.archs = self.d.getVar(arch_var).replace("-","_")
+ if task_name == "host":
+ self.primary_arch = self.d.getVar('SDK_ARCH')
+ else:
+ self.primary_arch = self.d.getVar('MACHINE_ARCH')
+
+ if needfeed:
+ self.rpm_repo_dir = oe.path.join(self.d.getVar('WORKDIR'), rpm_repo_workdir)
+ create_packages_dir(self.d, oe.path.join(self.rpm_repo_dir, "rpm"), d.getVar("DEPLOY_DIR_RPM"), "package_write_rpm", filterbydependencies)
+
+ self.saved_packaging_data = self.d.expand('${T}/saved_packaging_data/%s' % self.task_name)
+ if not os.path.exists(self.d.expand('${T}/saved_packaging_data')):
+ bb.utils.mkdirhier(self.d.expand('${T}/saved_packaging_data'))
+ self.packaging_data_dirs = ['etc/rpm', 'etc/rpmrc', 'etc/dnf', 'var/lib/rpm', 'var/lib/dnf', 'var/cache/dnf']
+ self.solution_manifest = self.d.expand('${T}/saved/%s_solution' %
+ self.task_name)
+ if not os.path.exists(self.d.expand('${T}/saved')):
+ bb.utils.mkdirhier(self.d.expand('${T}/saved'))
+
+ def _configure_dnf(self):
+ # libsolv handles 'noarch' internally, we don't need to specify it explicitly
+ archs = [i for i in reversed(self.archs.split()) if i not in ["any", "all", "noarch"]]
+ # This prevents accidental matching against libsolv's built-in policies
+ if len(archs) <= 1:
+ archs = archs + ["bogusarch"]
+ # This architecture needs to be upfront so that packages using it are properly prioritized
+ archs = ["sdk_provides_dummy_target"] + archs
+ confdir = "%s/%s" %(self.target_rootfs, "etc/dnf/vars/")
+ bb.utils.mkdirhier(confdir)
+ open(confdir + "arch", 'w').write(":".join(archs))
+ distro_codename = self.d.getVar('DISTRO_CODENAME')
+ open(confdir + "releasever", 'w').write(distro_codename if distro_codename is not None else '')
+
+ open(oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"), 'w').write("")
+
+
+ def _configure_rpm(self):
+ # We need to configure rpm to use our primary package architecture as the installation architecture,
+ # and to make it compatible with other package architectures that we use.
+ # Otherwise it will refuse to proceed with packages installation.
+ platformconfdir = "%s/%s" %(self.target_rootfs, "etc/rpm/")
+ rpmrcconfdir = "%s/%s" %(self.target_rootfs, "etc/")
+ bb.utils.mkdirhier(platformconfdir)
+ open(platformconfdir + "platform", 'w').write("%s-pc-linux" % self.primary_arch)
+ with open(rpmrcconfdir + "rpmrc", 'w') as f:
+ f.write("arch_compat: %s: %s\n" % (self.primary_arch, self.archs if len(self.archs) > 0 else self.primary_arch))
+ f.write("buildarch_compat: %s: noarch\n" % self.primary_arch)
+
+ open(platformconfdir + "macros", 'w').write("%_transaction_color 7\n")
+ if self.d.getVar('RPM_PREFER_ELF_ARCH'):
+ open(platformconfdir + "macros", 'a').write("%%_prefer_color %s" % (self.d.getVar('RPM_PREFER_ELF_ARCH')))
+
+ if self.d.getVar('RPM_SIGN_PACKAGES') == '1':
+ signer = get_signer(self.d, self.d.getVar('RPM_GPG_BACKEND'))
+ pubkey_path = oe.path.join(self.d.getVar('B'), 'rpm-key')
+ signer.export_pubkey(pubkey_path, self.d.getVar('RPM_GPG_NAME'))
+ rpm_bin = bb.utils.which(os.getenv('PATH'), "rpmkeys")
+ cmd = [rpm_bin, '--root=%s' % self.target_rootfs, '--import', pubkey_path]
+ try:
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Importing GPG key failed. Command '%s' "
+ "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
+
+ def create_configs(self):
+ self._configure_dnf()
+ self._configure_rpm()
+
+ def write_index(self):
+ lockfilename = self.d.getVar('DEPLOY_DIR_RPM') + "/rpm.lock"
+ lf = bb.utils.lockfile(lockfilename, False)
+ RpmIndexer(self.d, self.rpm_repo_dir).write_index()
+ bb.utils.unlockfile(lf)
+
+ def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
+ from urllib.parse import urlparse
+
+ if feed_uris == "":
+ return
+
+ gpg_opts = ''
+ if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
+ gpg_opts += 'repo_gpgcheck=1\n'
+ gpg_opts += 'gpgkey=file://%s/pki/packagefeed-gpg/PACKAGEFEED-GPG-KEY-%s-%s\n' % (self.d.getVar('sysconfdir'), self.d.getVar('DISTRO'), self.d.getVar('DISTRO_CODENAME'))
+
+ if self.d.getVar('RPM_SIGN_PACKAGES') != '1':
+ gpg_opts += 'gpgcheck=0\n'
+
+ bb.utils.mkdirhier(oe.path.join(self.target_rootfs, "etc", "yum.repos.d"))
+ remote_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
+ for uri in remote_uris:
+ repo_base = "oe-remote-repo" + "-".join(urlparse(uri).path.split("/"))
+ if feed_archs is not None:
+ for arch in feed_archs.split():
+ repo_uri = uri + "/" + arch
+ repo_id = "oe-remote-repo" + "-".join(urlparse(repo_uri).path.split("/"))
+ repo_name = "OE Remote Repo:" + " ".join(urlparse(repo_uri).path.split("/"))
+ open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'a').write(
+ "[%s]\nname=%s\nbaseurl=%s\n%s\n" % (repo_id, repo_name, repo_uri, gpg_opts))
+ else:
+ repo_name = "OE Remote Repo:" + " ".join(urlparse(uri).path.split("/"))
+ repo_uri = uri
+ open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'w').write(
+ "[%s]\nname=%s\nbaseurl=%s\n%s" % (repo_base, repo_name, repo_uri, gpg_opts))
+
+ def _prepare_pkg_transaction(self):
+ os.environ['D'] = self.target_rootfs
+ os.environ['OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['INTERCEPT_DIR'] = self.intercepts_dir
+ os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
+
+
+ def install(self, pkgs, attempt_only = False):
+ if len(pkgs) == 0:
+ return
+ self._prepare_pkg_transaction()
+
+ bad_recommendations = self.d.getVar('BAD_RECOMMENDATIONS')
+ package_exclude = self.d.getVar('PACKAGE_EXCLUDE')
+ exclude_pkgs = (bad_recommendations.split() if bad_recommendations else []) + (package_exclude.split() if package_exclude else [])
+
+ output = self._invoke_dnf((["--skip-broken"] if attempt_only else []) +
+ (["-x", ",".join(exclude_pkgs)] if len(exclude_pkgs) > 0 else []) +
+ (["--setopt=install_weak_deps=False"] if self.d.getVar('NO_RECOMMENDATIONS') == "1" else []) +
+ (["--nogpgcheck"] if self.d.getVar('RPM_SIGN_PACKAGES') != '1' else ["--setopt=gpgcheck=True"]) +
+ ["install"] +
+ pkgs)
+
+ failed_scriptlets_pkgnames = collections.OrderedDict()
+ for line in output.splitlines():
+ if line.startswith("Error: Systemctl"):
+ bb.error(line)
+
+ if line.startswith("Error in POSTIN scriptlet in rpm package"):
+ failed_scriptlets_pkgnames[line.split()[-1]] = True
+
+ if len(failed_scriptlets_pkgnames) > 0:
+ failed_postinsts_abort(list(failed_scriptlets_pkgnames.keys()), self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
+
+ def remove(self, pkgs, with_dependencies = True):
+ if not pkgs:
+ return
+
+ self._prepare_pkg_transaction()
+
+ if with_dependencies:
+ self._invoke_dnf(["remove"] + pkgs)
+ else:
+ cmd = bb.utils.which(os.getenv('PATH'), "rpm")
+ args = ["-e", "-v", "--nodeps", "--root=%s" %self.target_rootfs]
+
+ try:
+ bb.note("Running %s" % ' '.join([cmd] + args + pkgs))
+ output = subprocess.check_output([cmd] + args + pkgs, stderr=subprocess.STDOUT).decode("utf-8")
+ bb.note(output)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Could not invoke rpm. Command "
+ "'%s' returned %d:\n%s" % (' '.join([cmd] + args + pkgs), e.returncode, e.output.decode("utf-8")))
+
+ def upgrade(self):
+ self._prepare_pkg_transaction()
+ self._invoke_dnf(["upgrade"])
+
+ def autoremove(self):
+ self._prepare_pkg_transaction()
+ self._invoke_dnf(["autoremove"])
+
+ def remove_packaging_data(self):
+ self._invoke_dnf(["clean", "all"])
+ for dir in self.packaging_data_dirs:
+ bb.utils.remove(oe.path.join(self.target_rootfs, dir), True)
+
+ def backup_packaging_data(self):
+ # Save the packaging dirs for increment rpm image generation
+ if os.path.exists(self.saved_packaging_data):
+ bb.utils.remove(self.saved_packaging_data, True)
+ for i in self.packaging_data_dirs:
+ source_dir = oe.path.join(self.target_rootfs, i)
+ target_dir = oe.path.join(self.saved_packaging_data, i)
+ if os.path.isdir(source_dir):
+ shutil.copytree(source_dir, target_dir, symlinks=True)
+ elif os.path.isfile(source_dir):
+ shutil.copy2(source_dir, target_dir)
+
+ def recovery_packaging_data(self):
+ # Move the rpmlib back
+ if os.path.exists(self.saved_packaging_data):
+ for i in self.packaging_data_dirs:
+ target_dir = oe.path.join(self.target_rootfs, i)
+ if os.path.exists(target_dir):
+ bb.utils.remove(target_dir, True)
+ source_dir = oe.path.join(self.saved_packaging_data, i)
+ if os.path.isdir(source_dir):
+ shutil.copytree(source_dir, target_dir, symlinks=True)
+ elif os.path.isfile(source_dir):
+ shutil.copy2(source_dir, target_dir)
+
+ def list_installed(self):
+ output = self._invoke_dnf(["repoquery", "--installed", "--queryformat", "Package: %{name} %{arch} %{version} %{name}-%{version}-%{release}.%{arch}.rpm\nDependencies:\n%{requires}\nRecommendations:\n%{recommends}\nDependenciesEndHere:\n"],
+ print_output = False)
+ packages = {}
+ current_package = None
+ current_deps = None
+ current_state = "initial"
+ for line in output.splitlines():
+ if line.startswith("Package:"):
+ package_info = line.split(" ")[1:]
+ current_package = package_info[0]
+ package_arch = package_info[1]
+ package_version = package_info[2]
+ package_rpm = package_info[3]
+ packages[current_package] = {"arch":package_arch, "ver":package_version, "filename":package_rpm}
+ current_deps = []
+ elif line.startswith("Dependencies:"):
+ current_state = "dependencies"
+ elif line.startswith("Recommendations"):
+ current_state = "recommendations"
+ elif line.startswith("DependenciesEndHere:"):
+ current_state = "initial"
+ packages[current_package]["deps"] = current_deps
+ elif len(line) > 0:
+ if current_state == "dependencies":
+ current_deps.append(line)
+ elif current_state == "recommendations":
+ current_deps.append("%s [REC]" % line)
+
+ return packages
+
+ def update(self):
+ self._invoke_dnf(["makecache", "--refresh"])
+
+ def _invoke_dnf(self, dnf_args, fatal = True, print_output = True ):
+ os.environ['RPM_ETCCONFIGDIR'] = self.target_rootfs
+
+ dnf_cmd = bb.utils.which(os.getenv('PATH'), "dnf")
+ standard_dnf_args = ["-v", "--rpmverbosity=info", "-y",
+ "-c", oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"),
+ "--setopt=reposdir=%s" %(oe.path.join(self.target_rootfs, "etc/yum.repos.d")),
+ "--installroot=%s" % (self.target_rootfs),
+ "--setopt=logdir=%s" % (self.d.getVar('T'))
+ ]
+ if hasattr(self, "rpm_repo_dir"):
+ standard_dnf_args.append("--repofrompath=oe-repo,%s" % (self.rpm_repo_dir))
+ cmd = [dnf_cmd] + standard_dnf_args + dnf_args
+ bb.note('Running %s' % ' '.join(cmd))
+ try:
+ output = subprocess.check_output(cmd,stderr=subprocess.STDOUT).decode("utf-8")
+ if print_output:
+ bb.debug(1, output)
+ return output
+ except subprocess.CalledProcessError as e:
+ if print_output:
+ (bb.note, bb.fatal)[fatal]("Could not invoke dnf. Command "
+ "'%s' returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
+ else:
+ (bb.note, bb.fatal)[fatal]("Could not invoke dnf. Command "
+ "'%s' returned %d:" % (' '.join(cmd), e.returncode))
+ return e.output.decode("utf-8")
+
+ def dump_install_solution(self, pkgs):
+ open(self.solution_manifest, 'w').write(" ".join(pkgs))
+ return pkgs
+
+ def load_old_install_solution(self):
+ if not os.path.exists(self.solution_manifest):
+ return []
+ with open(self.solution_manifest, 'r') as fd:
+ return fd.read().split()
+
+ def _script_num_prefix(self, path):
+ files = os.listdir(path)
+ numbers = set()
+ numbers.add(99)
+ for f in files:
+ numbers.add(int(f.split("-")[0]))
+ return max(numbers) + 1
+
+ def save_rpmpostinst(self, pkg):
+ bb.note("Saving postinstall script of %s" % (pkg))
+ cmd = bb.utils.which(os.getenv('PATH'), "rpm")
+ args = ["-q", "--root=%s" % self.target_rootfs, "--queryformat", "%{postin}", pkg]
+
+ try:
+ output = subprocess.check_output([cmd] + args,stderr=subprocess.STDOUT).decode("utf-8")
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Could not invoke rpm. Command "
+ "'%s' returned %d:\n%s" % (' '.join([cmd] + args), e.returncode, e.output.decode("utf-8")))
+
+ # may need to prepend #!/bin/sh to output
+
+ target_path = oe.path.join(self.target_rootfs, self.d.expand('${sysconfdir}/rpm-postinsts/'))
+ bb.utils.mkdirhier(target_path)
+ num = self._script_num_prefix(target_path)
+ saved_script_name = oe.path.join(target_path, "%d-%s" % (num, pkg))
+ open(saved_script_name, 'w').write(output)
+ os.chmod(saved_script_name, 0o755)
+
+ def _handle_intercept_failure(self, registered_pkgs):
+ rpm_postinsts_dir = self.target_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/')
+ bb.utils.mkdirhier(rpm_postinsts_dir)
+
+ # Save the package postinstalls in /etc/rpm-postinsts
+ for pkg in registered_pkgs.split():
+ self.save_rpmpostinst(pkg)
+
+ def extract(self, pkg):
+ output = self._invoke_dnf(["repoquery", "--queryformat", "%{location}", pkg])
+ pkg_name = output.splitlines()[-1]
+ if not pkg_name.endswith(".rpm"):
+ bb.fatal("dnf could not find package %s in repository: %s" %(pkg, output))
+ pkg_path = oe.path.join(self.rpm_repo_dir, pkg_name)
+
+ cpio_cmd = bb.utils.which(os.getenv("PATH"), "cpio")
+ rpm2cpio_cmd = bb.utils.which(os.getenv("PATH"), "rpm2cpio")
+
+ if not os.path.isfile(pkg_path):
+ bb.fatal("Unable to extract package for '%s'."
+ "File %s doesn't exists" % (pkg, pkg_path))
+
+ tmp_dir = tempfile.mkdtemp()
+ current_dir = os.getcwd()
+ os.chdir(tmp_dir)
+
+ try:
+ cmd = "%s %s | %s -idmv" % (rpm2cpio_cmd, pkg_path, cpio_cmd)
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
+ except subprocess.CalledProcessError as e:
+ bb.utils.remove(tmp_dir, recurse=True)
+ bb.fatal("Unable to extract %s package. Command '%s' "
+ "returned %d:\n%s" % (pkg_path, cmd, e.returncode, e.output.decode("utf-8")))
+ except OSError as e:
+ bb.utils.remove(tmp_dir, recurse=True)
+ bb.fatal("Unable to extract %s package. Command '%s' "
+ "returned %d:\n%s at %s" % (pkg_path, cmd, e.errno, e.strerror, e.filename))
+
+ bb.note("Extracted %s to %s" % (pkg_path, tmp_dir))
+ os.chdir(current_dir)
+
+ return tmp_dir
diff --git a/meta/lib/oe/package_manager/rpm/manifest.py b/meta/lib/oe/package_manager/rpm/manifest.py
new file mode 100644
index 0000000000..e6604b301f
--- /dev/null
+++ b/meta/lib/oe/package_manager/rpm/manifest.py
@@ -0,0 +1,54 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+from oe.manifest import Manifest
+
+class PkgManifest(Manifest):
+ """
+ Returns a dictionary object with mip and mlp packages.
+ """
+ def _split_multilib(self, pkg_list):
+ pkgs = dict()
+
+ for pkg in pkg_list.split():
+ pkg_type = self.PKG_TYPE_MUST_INSTALL
+
+ ml_variants = self.d.getVar('MULTILIB_VARIANTS').split()
+
+ for ml_variant in ml_variants:
+ if pkg.startswith(ml_variant + '-'):
+ pkg_type = self.PKG_TYPE_MULTILIB
+
+ if not pkg_type in pkgs:
+ pkgs[pkg_type] = pkg
+ else:
+ pkgs[pkg_type] += " " + pkg
+
+ return pkgs
+
+ def create_initial(self):
+ pkgs = dict()
+
+ with open(self.initial_manifest, "w+") as manifest:
+ manifest.write(self.initial_manifest_file_header)
+
+ for var in self.var_maps[self.manifest_type]:
+ if var in self.vars_to_split:
+ split_pkgs = self._split_multilib(self.d.getVar(var))
+ if split_pkgs is not None:
+ pkgs = dict(list(pkgs.items()) + list(split_pkgs.items()))
+ else:
+ pkg_list = self.d.getVar(var)
+ if pkg_list is not None:
+ pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var)
+
+ for pkg_type in pkgs:
+ for pkg in pkgs[pkg_type].split():
+ manifest.write("%s,%s\n" % (pkg_type, pkg))
+
+ def create_final(self):
+ pass
+
+ def create_full(self, pm):
+ pass
diff --git a/meta/lib/oe/package_manager/rpm/rootfs.py b/meta/lib/oe/package_manager/rpm/rootfs.py
new file mode 100644
index 0000000000..00d07cd9cc
--- /dev/null
+++ b/meta/lib/oe/package_manager/rpm/rootfs.py
@@ -0,0 +1,148 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+from oe.rootfs import Rootfs
+from oe.manifest import Manifest
+from oe.utils import execute_pre_post_process
+from oe.package_manager.rpm.manifest import PkgManifest
+from oe.package_manager.rpm import RpmPM
+
+class PkgRootfs(Rootfs):
+ def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
+ super(PkgRootfs, self).__init__(d, progress_reporter, logcatcher)
+ self.log_check_regex = r'(unpacking of archive failed|Cannot find package'\
+ r'|exit 1|ERROR: |Error: |Error |ERROR '\
+ r'|Failed |Failed: |Failed$|Failed\(\d+\):)'
+
+ self.manifest = PkgManifest(d, manifest_dir)
+
+ self.pm = RpmPM(d,
+ d.getVar('IMAGE_ROOTFS'),
+ self.d.getVar('TARGET_VENDOR')
+ )
+
+ self.inc_rpm_image_gen = self.d.getVar('INC_RPM_IMAGE_GEN')
+ if self.inc_rpm_image_gen != "1":
+ bb.utils.remove(self.image_rootfs, True)
+ else:
+ self.pm.recovery_packaging_data()
+ bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
+
+ self.pm.create_configs()
+
+ '''
+ While rpm incremental image generation is enabled, it will remove the
+ unneeded pkgs by comparing the new install solution manifest and the
+ old installed manifest.
+ '''
+ def _create_incremental(self, pkgs_initial_install):
+ if self.inc_rpm_image_gen == "1":
+
+ pkgs_to_install = list()
+ for pkg_type in pkgs_initial_install:
+ pkgs_to_install += pkgs_initial_install[pkg_type]
+
+ installed_manifest = self.pm.load_old_install_solution()
+ solution_manifest = self.pm.dump_install_solution(pkgs_to_install)
+
+ pkg_to_remove = list()
+ for pkg in installed_manifest:
+ if pkg not in solution_manifest:
+ pkg_to_remove.append(pkg)
+
+ self.pm.update()
+
+ bb.note('incremental update -- upgrade packages in place ')
+ self.pm.upgrade()
+ if pkg_to_remove != []:
+ bb.note('incremental removed: %s' % ' '.join(pkg_to_remove))
+ self.pm.remove(pkg_to_remove)
+
+ self.pm.autoremove()
+
+ def _create(self):
+ pkgs_to_install = self.manifest.parse_initial_manifest()
+ rpm_pre_process_cmds = self.d.getVar('RPM_PREPROCESS_COMMANDS')
+ rpm_post_process_cmds = self.d.getVar('RPM_POSTPROCESS_COMMANDS')
+
+ # update PM index files
+ self.pm.write_index()
+
+ execute_pre_post_process(self.d, rpm_pre_process_cmds)
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ if self.inc_rpm_image_gen == "1":
+ self._create_incremental(pkgs_to_install)
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ self.pm.update()
+
+ pkgs = []
+ pkgs_attempt = []
+ for pkg_type in pkgs_to_install:
+ if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY:
+ pkgs_attempt += pkgs_to_install[pkg_type]
+ else:
+ pkgs += pkgs_to_install[pkg_type]
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ self.pm.install(pkgs)
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ self.pm.install(pkgs_attempt, True)
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ self.pm.install_complementary()
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ self._setup_dbg_rootfs(['/etc', '/var/lib/rpm', '/var/cache/dnf', '/var/lib/dnf'])
+
+ execute_pre_post_process(self.d, rpm_post_process_cmds)
+
+ if self.inc_rpm_image_gen == "1":
+ self.pm.backup_packaging_data()
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+
+ @staticmethod
+ def _depends_list():
+ return ['DEPLOY_DIR_RPM', 'INC_RPM_IMAGE_GEN', 'RPM_PREPROCESS_COMMANDS',
+ 'RPM_POSTPROCESS_COMMANDS', 'RPM_PREFER_ELF_ARCH']
+
+ def _get_delayed_postinsts(self):
+ postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/rpm-postinsts")
+ if os.path.isdir(postinst_dir):
+ files = os.listdir(postinst_dir)
+ for f in files:
+ bb.note('Delayed package scriptlet: %s' % f)
+ return files
+
+ return None
+
+ def _save_postinsts(self):
+ # this is just a stub. For RPM, the failed postinstalls are
+ # already saved in /etc/rpm-postinsts
+ pass
+
+ def _log_check(self):
+ self._log_check_warn()
+ self._log_check_error()
+
+ def _cleanup(self):
+ if bb.utils.contains("IMAGE_FEATURES", "package-management", True, False, self.d):
+ self.pm._invoke_dnf(["clean", "all"])
diff --git a/meta/lib/oe/package_manager/rpm/sdk.py b/meta/lib/oe/package_manager/rpm/sdk.py
new file mode 100644
index 0000000000..c5f232431f
--- /dev/null
+++ b/meta/lib/oe/package_manager/rpm/sdk.py
@@ -0,0 +1,114 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import glob
+from oe.utils import execute_pre_post_process
+from oe.sdk import Sdk
+from oe.manifest import Manifest
+from oe.package_manager.rpm.manifest import PkgManifest
+from oe.package_manager.rpm import RpmPM
+
+class PkgSdk(Sdk):
+ def __init__(self, d, manifest_dir=None, rpm_workdir="oe-sdk-repo"):
+ super(PkgSdk, self).__init__(d, manifest_dir)
+
+ self.target_manifest = PkgManifest(d, self.manifest_dir,
+ Manifest.MANIFEST_TYPE_SDK_TARGET)
+ self.host_manifest = PkgManifest(d, self.manifest_dir,
+ Manifest.MANIFEST_TYPE_SDK_HOST)
+
+ rpm_repo_workdir = "oe-sdk-repo"
+ if "sdk_ext" in d.getVar("BB_RUNTASK"):
+ rpm_repo_workdir = "oe-sdk-ext-repo"
+
+ self.target_pm = RpmPM(d,
+ self.sdk_target_sysroot,
+ self.d.getVar('TARGET_VENDOR'),
+ 'target',
+ rpm_repo_workdir=rpm_repo_workdir
+ )
+
+ self.host_pm = RpmPM(d,
+ self.sdk_host_sysroot,
+ self.d.getVar('SDK_VENDOR'),
+ 'host',
+ "SDK_PACKAGE_ARCHS",
+ "SDK_OS",
+ rpm_repo_workdir=rpm_repo_workdir
+ )
+
+ def _populate_sysroot(self, pm, manifest):
+ pkgs_to_install = manifest.parse_initial_manifest()
+
+ pm.create_configs()
+ pm.write_index()
+ pm.update()
+
+ pkgs = []
+ pkgs_attempt = []
+ for pkg_type in pkgs_to_install:
+ if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY:
+ pkgs_attempt += pkgs_to_install[pkg_type]
+ else:
+ pkgs += pkgs_to_install[pkg_type]
+
+ pm.install(pkgs)
+
+ pm.install(pkgs_attempt, True)
+
+ def _populate(self):
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND"))
+
+ bb.note("Installing TARGET packages")
+ self._populate_sysroot(self.target_pm, self.target_manifest)
+
+ self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
+
+ self.target_pm.run_intercepts(populate_sdk='target')
+
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
+
+ if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
+ self.target_pm.remove_packaging_data()
+
+ bb.note("Installing NATIVESDK packages")
+ self._populate_sysroot(self.host_pm, self.host_manifest)
+ self.install_locales(self.host_pm)
+
+ self.host_pm.run_intercepts(populate_sdk='host')
+
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
+
+ if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
+ self.host_pm.remove_packaging_data()
+
+ # Move host RPM library data
+ native_rpm_state_dir = os.path.join(self.sdk_output,
+ self.sdk_native_path,
+ self.d.getVar('localstatedir_nativesdk').strip('/'),
+ "lib",
+ "rpm"
+ )
+ self.mkdirhier(native_rpm_state_dir)
+ for f in glob.glob(os.path.join(self.sdk_output,
+ "var",
+ "lib",
+ "rpm",
+ "*")):
+ self.movefile(f, native_rpm_state_dir)
+
+ self.remove(os.path.join(self.sdk_output, "var"), True)
+
+ # Move host sysconfig data
+ native_sysconf_dir = os.path.join(self.sdk_output,
+ self.sdk_native_path,
+ self.d.getVar('sysconfdir',
+ True).strip('/'),
+ )
+ self.mkdirhier(native_sysconf_dir)
+ for f in glob.glob(os.path.join(self.sdk_output, "etc", "rpm*")):
+ self.movefile(f, native_sysconf_dir)
+ for f in glob.glob(os.path.join(self.sdk_output, "etc", "dnf", "*")):
+ self.movefile(f, native_sysconf_dir)
+ self.remove(os.path.join(self.sdk_output, "etc"), True)
diff --git a/meta/lib/oe/packagedata.py b/meta/lib/oe/packagedata.py
index a82085a792..212f048bc6 100644
--- a/meta/lib/oe/packagedata.py
+++ b/meta/lib/oe/packagedata.py
@@ -19,7 +19,7 @@ def read_pkgdatafile(fn):
import re
with open(fn, 'r') as f:
lines = f.readlines()
- r = re.compile("([^:]+):\s*(.*)")
+ r = re.compile(r"(^.+?):\s+(.*)")
for l in lines:
m = r.match(l)
if m:
@@ -45,18 +45,30 @@ def read_pkgdata(pn, d):
return read_pkgdatafile(fn)
#
-# Collapse FOO_pkg variables into FOO
+# Collapse FOO:pkg variables into FOO
#
def read_subpkgdata_dict(pkg, d):
ret = {}
subd = read_pkgdatafile(get_subpkgedata_fn(pkg, d))
for var in subd:
- newvar = var.replace("_" + pkg, "")
- if newvar == var and var + "_" + pkg in subd:
+ newvar = var.replace(":" + pkg, "")
+ if newvar == var and var + ":" + pkg in subd:
continue
ret[newvar] = subd[var]
return ret
+def read_subpkgdata_extended(pkg, d):
+ import json
+ import bb.compress.zstd
+
+ fn = d.expand("${PKGDATA_DIR}/extended/%s.json.zstd" % pkg)
+ try:
+ num_threads = int(d.getVar("BB_NUMBER_THREADS"))
+ with bb.compress.zstd.open(fn, "rt", encoding="utf-8", num_threads=num_threads) as f:
+ return json.load(f)
+ except FileNotFoundError:
+ return None
+
def _pkgmap(d):
"""Return a dictionary mapping package to recipe name."""
diff --git a/meta/lib/oe/patch.py b/meta/lib/oe/patch.py
index 7ca2e28b1f..9034fcae03 100644
--- a/meta/lib/oe/patch.py
+++ b/meta/lib/oe/patch.py
@@ -4,6 +4,7 @@
import oe.path
import oe.types
+import subprocess
class NotFoundError(bb.BBHandledException):
def __init__(self, path):
@@ -25,7 +26,6 @@ class CmdError(bb.BBHandledException):
def runcmd(args, dir = None):
import pipes
- import subprocess
if dir:
olddir = os.path.abspath(os.curdir)
@@ -38,20 +38,25 @@ def runcmd(args, dir = None):
args = [ pipes.quote(str(arg)) for arg in args ]
cmd = " ".join(args)
# print("cmd: %s" % cmd)
- (exitstatus, output) = subprocess.getstatusoutput(cmd)
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
+ stdout, stderr = proc.communicate()
+ stdout = stdout.decode('utf-8')
+ stderr = stderr.decode('utf-8')
+ exitstatus = proc.returncode
if exitstatus != 0:
- raise CmdError(cmd, exitstatus >> 8, output)
- if " fuzz " in output:
+ raise CmdError(cmd, exitstatus >> 8, "stdout: %s\nstderr: %s" % (stdout, stderr))
+ if " fuzz " in stdout and "Hunk " in stdout:
# Drop patch fuzz info with header and footer to log file so
# insane.bbclass can handle to throw error/warning
- bb.note("--- Patch fuzz start ---\n%s\n--- Patch fuzz end ---" % format(output))
+ bb.note("--- Patch fuzz start ---\n%s\n--- Patch fuzz end ---" % format(stdout))
- return output
+ return stdout
finally:
if dir:
os.chdir(olddir)
+
class PatchError(Exception):
def __init__(self, msg):
self.msg = msg
@@ -294,6 +299,24 @@ class GitApplyTree(PatchTree):
PatchTree.__init__(self, dir, d)
self.commituser = d.getVar('PATCH_GIT_USER_NAME')
self.commitemail = d.getVar('PATCH_GIT_USER_EMAIL')
+ if not self._isInitialized():
+ self._initRepo()
+
+ def _isInitialized(self):
+ cmd = "git rev-parse --show-toplevel"
+ try:
+ output = runcmd(cmd.split(), self.dir).strip()
+ except CmdError as err:
+ ## runcmd returned non-zero which most likely means 128
+ ## Not a git directory
+ return False
+ ## Make sure repo is in builddir to not break top-level git repos
+ return os.path.samefile(output, self.dir)
+
+ def _initRepo(self):
+ runcmd("git init".split(), self.dir)
+ runcmd("git add .".split(), self.dir)
+ runcmd("git commit -a --allow-empty -m bitbake_patching_started".split(), self.dir)
@staticmethod
def extractPatchHeader(patchfile):
@@ -512,7 +535,7 @@ class GitApplyTree(PatchTree):
try:
shellcmd = [patchfilevar, "git", "--work-tree=%s" % reporoot]
self.gitCommandUserOptions(shellcmd, self.commituser, self.commitemail)
- shellcmd += ["am", "-3", "--keep-cr", "-p%s" % patch['strippath']]
+ shellcmd += ["am", "-3", "--keep-cr", "--no-scissors", "-p%s" % patch['strippath']]
return _applypatchhelper(shellcmd, patch, force, reverse, run)
except CmdError:
# Need to abort the git am, or we'll still be within it at the end
diff --git a/meta/lib/oe/path.py b/meta/lib/oe/path.py
index 082972457b..c8d8ad05b9 100644
--- a/meta/lib/oe/path.py
+++ b/meta/lib/oe/path.py
@@ -320,3 +320,24 @@ def which_wild(pathname, path=None, mode=os.F_OK, *, reverse=False, candidates=F
return files
+def canonicalize(paths, sep=','):
+ """Given a string with paths (separated by commas by default), expand
+ each path using os.path.realpath() and return the resulting paths as a
+ string (separated using the same separator a the original string).
+ """
+ # Ignore paths containing "$" as they are assumed to be unexpanded bitbake
+ # variables. Normally they would be ignored, e.g., when passing the paths
+ # through the shell they would expand to empty strings. However, when they
+ # are passed through os.path.realpath(), it will cause them to be prefixed
+ # with the absolute path to the current directory and thus not be empty
+ # anymore.
+ #
+ # Also maintain trailing slashes, as the paths may actually be used as
+ # prefixes in sting compares later on, where the slashes then are important.
+ canonical_paths = []
+ for path in (paths or '').split(sep):
+ if '$' not in path:
+ trailing_slash = path.endswith('/') and '/' or ''
+ canonical_paths.append(os.path.realpath(path) + trailing_slash)
+
+ return sep.join(canonical_paths)
diff --git a/meta/lib/oe/prservice.py b/meta/lib/oe/prservice.py
index 2d3c9c7e50..339f7aebca 100644
--- a/meta/lib/oe/prservice.py
+++ b/meta/lib/oe/prservice.py
@@ -3,19 +3,14 @@
#
def prserv_make_conn(d, check = False):
- # Otherwise this fails when called from recipes which e.g. inherit python3native (which sets _PYTHON_SYSCONFIGDATA_NAME) with:
- # No module named '_sysconfigdata'
- if '_PYTHON_SYSCONFIGDATA_NAME' in os.environ:
- del os.environ['_PYTHON_SYSCONFIGDATA_NAME']
import prserv.serv
host_params = list([_f for _f in (d.getVar("PRSERV_HOST") or '').split(':') if _f])
try:
conn = None
- conn = prserv.serv.PRServerConnection(host_params[0], int(host_params[1]))
+ conn = prserv.serv.connect(host_params[0], int(host_params[1]))
if check:
if not conn.ping():
raise Exception('service not available')
- d.setVar("__PRSERV_CONN",conn)
except Exception as exc:
bb.fatal("Connecting to PR service %s:%s failed: %s" % (host_params[0], host_params[1], str(exc)))
@@ -26,31 +21,29 @@ def prserv_dump_db(d):
bb.error("Not using network based PR service")
return None
- conn = d.getVar("__PRSERV_CONN")
+ conn = prserv_make_conn(d)
if conn is None:
- conn = prserv_make_conn(d)
- if conn is None:
- bb.error("Making connection failed to remote PR service")
- return None
+ bb.error("Making connection failed to remote PR service")
+ return None
#dump db
opt_version = d.getVar('PRSERV_DUMPOPT_VERSION')
opt_pkgarch = d.getVar('PRSERV_DUMPOPT_PKGARCH')
opt_checksum = d.getVar('PRSERV_DUMPOPT_CHECKSUM')
opt_col = ("1" == d.getVar('PRSERV_DUMPOPT_COL'))
- return conn.export(opt_version, opt_pkgarch, opt_checksum, opt_col)
+ d = conn.export(opt_version, opt_pkgarch, opt_checksum, opt_col)
+ conn.close()
+ return d
def prserv_import_db(d, filter_version=None, filter_pkgarch=None, filter_checksum=None):
if not d.getVar('PRSERV_HOST'):
bb.error("Not using network based PR service")
return None
- conn = d.getVar("__PRSERV_CONN")
+ conn = prserv_make_conn(d)
if conn is None:
- conn = prserv_make_conn(d)
- if conn is None:
- bb.error("Making connection failed to remote PR service")
- return None
+ bb.error("Making connection failed to remote PR service")
+ return None
#get the entry values
imported = []
prefix = "PRAUTO$"
@@ -74,6 +67,7 @@ def prserv_import_db(d, filter_version=None, filter_pkgarch=None, filter_checksu
bb.error("importing(%s,%s,%s,%d) failed. DB may have larger value %d" % (version,pkgarch,checksum,value,ret))
else:
imported.append((version,pkgarch,checksum,value))
+ conn.close()
return imported
def prserv_export_tofile(d, metainfo, datainfo, lockdown, nomax=False):
@@ -129,4 +123,5 @@ def prserv_check_avail(d):
except TypeError:
bb.fatal('Undefined/incorrect PRSERV_HOST value. Format: "host:port"')
else:
- prserv_make_conn(d, True)
+ conn = prserv_make_conn(d, True)
+ conn.close()
diff --git a/meta/lib/oe/qa.py b/meta/lib/oe/qa.py
index ea831b930a..89acd3ead0 100644
--- a/meta/lib/oe/qa.py
+++ b/meta/lib/oe/qa.py
@@ -48,6 +48,9 @@ class ELFFile:
return self
def __exit__(self, exc_type, exc_value, traceback):
+ self.close()
+
+ def close(self):
if self.data:
self.data.close()
@@ -128,6 +131,9 @@ class ELFFile:
"""
return self.getShort(ELFFile.E_MACHINE)
+ def set_objdump(self, cmd, output):
+ self.objdump_output[cmd] = output
+
def run_objdump(self, cmd, d):
import bb.process
import sys
@@ -156,6 +162,7 @@ def elf_machine_to_string(machine):
"""
try:
return {
+ 0x00: "Unset",
0x02: "SPARC",
0x03: "x86",
0x08: "MIPS",
@@ -170,6 +177,40 @@ def elf_machine_to_string(machine):
except:
return "Unknown (%s)" % repr(machine)
+def write_error(type, error, d):
+ logfile = d.getVar('QA_LOGFILE')
+ if logfile:
+ p = d.getVar('P')
+ with open(logfile, "a+") as f:
+ f.write("%s: %s [%s]\n" % (p, error, type))
+
+def handle_error(error_class, error_msg, d):
+ if error_class in (d.getVar("ERROR_QA") or "").split():
+ write_error(error_class, error_msg, d)
+ bb.error("QA Issue: %s [%s]" % (error_msg, error_class))
+ d.setVar("QA_ERRORS_FOUND", "True")
+ return False
+ elif error_class in (d.getVar("WARN_QA") or "").split():
+ write_error(error_class, error_msg, d)
+ bb.warn("QA Issue: %s [%s]" % (error_msg, error_class))
+ else:
+ bb.note("QA Issue: %s [%s]" % (error_msg, error_class))
+ return True
+
+def add_message(messages, section, new_msg):
+ if section not in messages:
+ messages[section] = new_msg
+ else:
+ messages[section] = messages[section] + "\n" + new_msg
+
+def exit_with_message_if_errors(message, d):
+ qa_fatal_errors = bb.utils.to_boolean(d.getVar("QA_ERRORS_FOUND"), False)
+ if qa_fatal_errors:
+ bb.fatal(message)
+
+def exit_if_errors(d):
+ exit_with_message_if_errors("Fatal QA errors were found, failing task.", d)
+
if __name__ == "__main__":
import sys
diff --git a/meta/lib/oe/recipeutils.py b/meta/lib/oe/recipeutils.py
index 29473a9882..872ff97b89 100644
--- a/meta/lib/oe/recipeutils.py
+++ b/meta/lib/oe/recipeutils.py
@@ -24,7 +24,7 @@ from collections import OrderedDict, defaultdict
from bb.utils import vercmp_string
# Help us to find places to insert values
-recipe_progression = ['SUMMARY', 'DESCRIPTION', 'HOMEPAGE', 'BUGTRACKER', 'SECTION', 'LICENSE', 'LICENSE_FLAGS', 'LIC_FILES_CHKSUM', 'PROVIDES', 'DEPENDS', 'PR', 'PV', 'SRCREV', 'SRCPV', 'SRC_URI', 'S', 'do_fetch()', 'do_unpack()', 'do_patch()', 'EXTRA_OECONF', 'EXTRA_OECMAKE', 'EXTRA_OESCONS', 'do_configure()', 'EXTRA_OEMAKE', 'do_compile()', 'do_install()', 'do_populate_sysroot()', 'INITSCRIPT', 'USERADD', 'GROUPADD', 'PACKAGES', 'FILES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RPROVIDES', 'RREPLACES', 'RCONFLICTS', 'ALLOW_EMPTY', 'populate_packages()', 'do_package()', 'do_deploy()']
+recipe_progression = ['SUMMARY', 'DESCRIPTION', 'AUTHOR', 'HOMEPAGE', 'BUGTRACKER', 'SECTION', 'LICENSE', 'LICENSE_FLAGS', 'LIC_FILES_CHKSUM', 'PROVIDES', 'DEPENDS', 'PR', 'PV', 'SRCREV', 'SRCPV', 'SRC_URI', 'S', 'do_fetch()', 'do_unpack()', 'do_patch()', 'EXTRA_OECONF', 'EXTRA_OECMAKE', 'EXTRA_OESCONS', 'do_configure()', 'EXTRA_OEMAKE', 'do_compile()', 'do_install()', 'do_populate_sysroot()', 'INITSCRIPT', 'USERADD', 'GROUPADD', 'PACKAGES', 'FILES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RPROVIDES', 'RREPLACES', 'RCONFLICTS', 'ALLOW_EMPTY', 'populate_packages()', 'do_package()', 'do_deploy()', 'BBCLASSEXTEND']
# Variables that sometimes are a bit long but shouldn't be wrapped
nowrap_vars = ['SUMMARY', 'HOMEPAGE', 'BUGTRACKER', r'SRC_URI\[(.+\.)?md5sum\]', r'SRC_URI\[(.+\.)?sha256sum\]']
list_vars = ['SRC_URI', 'LIC_FILES_CHKSUM']
@@ -47,7 +47,7 @@ def simplify_history(history, d):
continue
has_set = True
elif event['op'] in ('append', 'prepend', 'postdot', 'predot'):
- # Reminder: "append" and "prepend" mean += and =+ respectively, NOT _append / _prepend
+ # Reminder: "append" and "prepend" mean += and =+ respectively, NOT :append / :prepend
if has_set:
continue
ret_history.insert(0, event)
@@ -342,7 +342,7 @@ def patch_recipe(d, fn, varvalues, patch=False, relpath='', redirect_output=None
def override_applicable(hevent):
op = hevent['op']
if '[' in op:
- opoverrides = op.split('[')[1].split(']')[0].split('_')
+ opoverrides = op.split('[')[1].split(']')[0].split(':')
for opoverride in opoverrides:
if not opoverride in overrides:
return False
@@ -368,13 +368,13 @@ def patch_recipe(d, fn, varvalues, patch=False, relpath='', redirect_output=None
recipe_set = True
if not recipe_set:
for event in history:
- if event['op'].startswith('_remove'):
+ if event['op'].startswith(':remove'):
continue
if not override_applicable(event):
continue
newvalue = value.replace(event['detail'], '')
- if newvalue == value and os.path.abspath(event['file']) == fn and event['op'].startswith('_'):
- op = event['op'].replace('[', '_').replace(']', '')
+ if newvalue == value and os.path.abspath(event['file']) == fn and event['op'].startswith(':'):
+ op = event['op'].replace('[', ':').replace(']', '')
extravals[var + op] = None
value = newvalue
vals[var] = ('+=', value)
@@ -409,12 +409,12 @@ def copy_recipe_files(d, tgt_dir, whole_dir=False, download=True, all_variants=F
fetch.download()
for pth in fetch.localpaths():
if pth not in localpaths:
- localpaths.append(pth)
+ localpaths.append(os.path.abspath(pth))
uri_values.append(srcuri)
fetch_urls(d)
if all_variants:
- # Get files for other variants e.g. in the case of a SRC_URI_append
+ # Get files for other variants e.g. in the case of a SRC_URI:append
localdata = bb.data.createCopy(d)
variants = (localdata.getVar('BBCLASSEXTEND') or '').split()
if variants:
@@ -563,6 +563,23 @@ def get_bbfile_path(d, destdir, extrapathhint=None):
confdata = bb.cookerdata.parse_config_file(destlayerconf, confdata)
pn = d.getVar('PN')
+ # Parse BBFILES_DYNAMIC and append to BBFILES
+ bbfiles_dynamic = (confdata.getVar('BBFILES_DYNAMIC') or "").split()
+ collections = (confdata.getVar('BBFILE_COLLECTIONS') or "").split()
+ invalid = []
+ for entry in bbfiles_dynamic:
+ parts = entry.split(":", 1)
+ if len(parts) != 2:
+ invalid.append(entry)
+ continue
+ l, f = parts
+ invert = l[0] == "!"
+ if invert:
+ l = l[1:]
+ if (l in collections and not invert) or (l not in collections and invert):
+ confdata.appendVar("BBFILES", " " + f)
+ if invalid:
+ return None
bbfilespecs = (confdata.getVar('BBFILES') or '').split()
if destdir == destlayerdir:
for bbfilespec in bbfilespecs:
@@ -736,12 +753,12 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
destsubdir = rd.getVar('PN')
if srcfiles:
- bbappendlines.append(('FILESEXTRAPATHS_prepend', ':=', '${THISDIR}/${PN}:'))
+ bbappendlines.append(('FILESEXTRAPATHS:prepend', ':=', '${THISDIR}/${PN}:'))
appendoverride = ''
if machine:
bbappendlines.append(('PACKAGE_ARCH', '=', '${MACHINE_ARCH}'))
- appendoverride = '_%s' % machine
+ appendoverride = ':%s' % machine
copyfiles = {}
if srcfiles:
instfunclines = []
@@ -755,7 +772,7 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
# FIXME do we care if the entry is added by another bbappend that might go away?
if not srcurientry in rd.getVar('SRC_URI').split():
if machine:
- appendline('SRC_URI_append%s' % appendoverride, '=', ' ' + srcurientry)
+ appendline('SRC_URI:append%s' % appendoverride, '=', ' ' + srcurientry)
else:
appendline('SRC_URI', '+=', srcurientry)
copyfiles[newfile] = srcfile
@@ -769,7 +786,7 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
instfunclines.append(instdirline)
instfunclines.append('install -m %s ${WORKDIR}/%s ${D}%s' % (perms, os.path.basename(srcfile), instdestpath))
if instfunclines:
- bbappendlines.append(('do_install_append%s()' % appendoverride, '', instfunclines))
+ bbappendlines.append(('do_install:append%s()' % appendoverride, '', instfunclines))
if redirect_output:
bb.note('Writing append file %s (dry-run)' % appendpath)
@@ -787,15 +804,15 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
extvars = {'destsubdir': destsubdir}
def appendfile_varfunc(varname, origvalue, op, newlines):
- if varname == 'FILESEXTRAPATHS_prepend':
+ if varname == 'FILESEXTRAPATHS:prepend':
if origvalue.startswith('${THISDIR}/'):
- popline('FILESEXTRAPATHS_prepend')
+ popline('FILESEXTRAPATHS:prepend')
extvars['destsubdir'] = rd.expand(origvalue.split('${THISDIR}/', 1)[1].rstrip(':'))
elif varname == 'PACKAGE_ARCH':
if machine:
popline('PACKAGE_ARCH')
return (machine, None, 4, False)
- elif varname.startswith('do_install_append'):
+ elif varname.startswith('do_install:append'):
func = popline(varname)
if func:
instfunclines = [line.strip() for line in origvalue.strip('\n').splitlines()]
@@ -807,7 +824,7 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
splitval = split_var_value(origvalue, assignment=False)
changed = False
removevar = varname
- if varname in ['SRC_URI', 'SRC_URI_append%s' % appendoverride]:
+ if varname in ['SRC_URI', 'SRC_URI:append%s' % appendoverride]:
removevar = 'SRC_URI'
line = popline(varname)
if line:
@@ -836,11 +853,11 @@ def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False,
newvalue = splitval
if len(newvalue) == 1:
# Ensure it's written out as one line
- if '_append' in varname:
+ if ':append' in varname:
newvalue = ' ' + newvalue[0]
else:
newvalue = newvalue[0]
- if not newvalue and (op in ['+=', '.='] or '_append' in varname):
+ if not newvalue and (op in ['+=', '.='] or ':append' in varname):
# There's no point appending nothing
newvalue = None
if varname.endswith('()'):
@@ -1012,6 +1029,7 @@ def get_recipe_upstream_version(rd):
else:
ud = bb.fetch2.FetchData(src_uri, rd)
if rd.getVar("UPSTREAM_CHECK_COMMITS") == "1":
+ bb.fetch2.get_srcrev(rd)
revision = ud.method.latest_revision(ud, rd, 'default')
upversion = pv
if revision != rd.getVar("SRCREV"):
diff --git a/meta/lib/oe/reproducible.py b/meta/lib/oe/reproducible.py
new file mode 100644
index 0000000000..35b8be6d08
--- /dev/null
+++ b/meta/lib/oe/reproducible.py
@@ -0,0 +1,192 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+import os
+import subprocess
+import bb
+
+# For reproducible builds, this code sets the default SOURCE_DATE_EPOCH in each
+# component's build environment. The format is number of seconds since the
+# system epoch.
+#
+# Upstream components (generally) respect this environment variable,
+# using it in place of the "current" date and time.
+# See https://reproducible-builds.org/specs/source-date-epoch/
+#
+# The default value of SOURCE_DATE_EPOCH comes from the function
+# get_source_date_epoch_value which reads from the SDE_FILE, or if the file
+# is not available will use the fallback of SOURCE_DATE_EPOCH_FALLBACK.
+#
+# The SDE_FILE is normally constructed from the function
+# create_source_date_epoch_stamp which is typically added as a postfuncs to
+# the do_unpack task. If a recipe does NOT have do_unpack, it should be added
+# to a task that runs after the source is available and before the
+# do_deploy_source_date_epoch task is executed.
+#
+# If a recipe wishes to override the default behavior it should set it's own
+# SOURCE_DATE_EPOCH or override the do_deploy_source_date_epoch_stamp task
+# with recipe-specific functionality to write the appropriate
+# SOURCE_DATE_EPOCH into the SDE_FILE.
+#
+# SOURCE_DATE_EPOCH is intended to be a reproducible value. This value should
+# be reproducible for anyone who builds the same revision from the same
+# sources.
+#
+# There are 4 ways the create_source_date_epoch_stamp function determines what
+# becomes SOURCE_DATE_EPOCH:
+#
+# 1. Use the value from __source_date_epoch.txt file if this file exists.
+# This file was most likely created in the previous build by one of the
+# following methods 2,3,4.
+# Alternatively, it can be provided by a recipe via SRC_URI.
+#
+# If the file does not exist:
+#
+# 2. If there is a git checkout, use the last git commit timestamp.
+# Git does not preserve file timestamps on checkout.
+#
+# 3. Use the mtime of "known" files such as NEWS, CHANGLELOG, ...
+# This works for well-kept repositories distributed via tarball.
+#
+# 4. Use the modification time of the youngest file in the source tree, if
+# there is one.
+# This will be the newest file from the distribution tarball, if any.
+#
+# 5. Fall back to a fixed timestamp (SOURCE_DATE_EPOCH_FALLBACK).
+#
+# Once the value is determined, it is stored in the recipe's SDE_FILE.
+
+def get_source_date_epoch_from_known_files(d, sourcedir):
+ source_date_epoch = None
+ newest_file = None
+ known_files = set(["NEWS", "ChangeLog", "Changelog", "CHANGES"])
+ for file in known_files:
+ filepath = os.path.join(sourcedir, file)
+ if os.path.isfile(filepath):
+ mtime = int(os.lstat(filepath).st_mtime)
+ # There may be more than one "known_file" present, if so, use the youngest one
+ if not source_date_epoch or mtime > source_date_epoch:
+ source_date_epoch = mtime
+ newest_file = filepath
+ if newest_file:
+ bb.debug(1, "SOURCE_DATE_EPOCH taken from: %s" % newest_file)
+ return source_date_epoch
+
+def find_git_folder(d, sourcedir):
+ # First guess: WORKDIR/git
+ # This is the default git fetcher unpack path
+ workdir = d.getVar('WORKDIR')
+ gitpath = os.path.join(workdir, "git/.git")
+ if os.path.isdir(gitpath):
+ return gitpath
+
+ # Second guess: ${S}
+ gitpath = os.path.join(sourcedir, ".git")
+ if os.path.isdir(gitpath):
+ return gitpath
+
+ # Perhaps there was a subpath or destsuffix specified.
+ # Go looking in the WORKDIR
+ exclude = set(["build", "image", "license-destdir", "patches", "pseudo",
+ "recipe-sysroot", "recipe-sysroot-native", "sysroot-destdir", "temp"])
+ for root, dirs, files in os.walk(workdir, topdown=True):
+ dirs[:] = [d for d in dirs if d not in exclude]
+ if '.git' in dirs:
+ return os.path.join(root, ".git")
+
+ bb.warn("Failed to find a git repository in WORKDIR: %s" % workdir)
+ return None
+
+def get_source_date_epoch_from_git(d, sourcedir):
+ if not "git://" in d.getVar('SRC_URI') and not "gitsm://" in d.getVar('SRC_URI'):
+ return None
+
+ gitpath = find_git_folder(d, sourcedir)
+ if not gitpath:
+ return None
+
+ # Check that the repository has a valid HEAD; it may not if subdir is used
+ # in SRC_URI
+ p = subprocess.run(['git', '--git-dir', gitpath, 'rev-parse', 'HEAD'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ if p.returncode != 0:
+ bb.debug(1, "%s does not have a valid HEAD: %s" % (gitpath, p.stdout.decode('utf-8')))
+ return None
+
+ bb.debug(1, "git repository: %s" % gitpath)
+ p = subprocess.run(['git', '--git-dir', gitpath, 'log', '-1', '--pretty=%ct'], check=True, stdout=subprocess.PIPE)
+ return int(p.stdout.decode('utf-8'))
+
+def get_source_date_epoch_from_youngest_file(d, sourcedir):
+ if sourcedir == d.getVar('WORKDIR'):
+ # These sources are almost certainly not from a tarball
+ return None
+
+ # Do it the hard way: check all files and find the youngest one...
+ source_date_epoch = None
+ newest_file = None
+ for root, dirs, files in os.walk(sourcedir, topdown=True):
+ files = [f for f in files if not f[0] == '.']
+
+ for fname in files:
+ filename = os.path.join(root, fname)
+ try:
+ mtime = int(os.lstat(filename).st_mtime)
+ except ValueError:
+ mtime = 0
+ if not source_date_epoch or mtime > source_date_epoch:
+ source_date_epoch = mtime
+ newest_file = filename
+
+ if newest_file:
+ bb.debug(1, "Newest file found: %s" % newest_file)
+ return source_date_epoch
+
+def fixed_source_date_epoch(d):
+ bb.debug(1, "No tarball or git repo found to determine SOURCE_DATE_EPOCH")
+ source_date_epoch = d.getVar('SOURCE_DATE_EPOCH_FALLBACK')
+ if source_date_epoch:
+ bb.debug(1, "Using SOURCE_DATE_EPOCH_FALLBACK")
+ return int(source_date_epoch)
+ return 0
+
+def get_source_date_epoch(d, sourcedir):
+ return (
+ get_source_date_epoch_from_git(d, sourcedir) or
+ get_source_date_epoch_from_known_files(d, sourcedir) or
+ get_source_date_epoch_from_youngest_file(d, sourcedir) or
+ fixed_source_date_epoch(d) # Last resort
+ )
+
+def epochfile_read(epochfile, d):
+ cached, efile = d.getVar('__CACHED_SOURCE_DATE_EPOCH') or (None, None)
+ if cached and efile == epochfile:
+ return cached
+
+ if cached and epochfile != efile:
+ bb.debug(1, "Epoch file changed from %s to %s" % (efile, epochfile))
+
+ source_date_epoch = int(d.getVar('SOURCE_DATE_EPOCH_FALLBACK'))
+ try:
+ with open(epochfile, 'r') as f:
+ s = f.read()
+ try:
+ source_date_epoch = int(s)
+ except ValueError:
+ bb.warn("SOURCE_DATE_EPOCH value '%s' is invalid. Reverting to SOURCE_DATE_EPOCH_FALLBACK" % s)
+ source_date_epoch = int(d.getVar('SOURCE_DATE_EPOCH_FALLBACK'))
+ bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch)
+ except FileNotFoundError:
+ bb.debug(1, "Cannot find %s. SOURCE_DATE_EPOCH will default to %d" % (epochfile, source_date_epoch))
+
+ d.setVar('__CACHED_SOURCE_DATE_EPOCH', (str(source_date_epoch), epochfile))
+ return str(source_date_epoch)
+
+def epochfile_write(source_date_epoch, epochfile, d):
+
+ bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch)
+ bb.utils.mkdirhier(os.path.dirname(epochfile))
+
+ tmp_file = "%s.new" % epochfile
+ with open(tmp_file, 'w') as f:
+ f.write(str(source_date_epoch))
+ os.rename(tmp_file, epochfile)
diff --git a/meta/lib/oe/rootfs.py b/meta/lib/oe/rootfs.py
index 0e05f1f75e..98cf3f244d 100644
--- a/meta/lib/oe/rootfs.py
+++ b/meta/lib/oe/rootfs.py
@@ -6,13 +6,11 @@ from oe.utils import execute_pre_post_process
from oe.package_manager import *
from oe.manifest import *
import oe.path
-import filecmp
import shutil
import os
import subprocess
import re
-
class Rootfs(object, metaclass=ABCMeta):
"""
This is an abstract class. Do not instantiate this directly.
@@ -51,6 +49,8 @@ class Rootfs(object, metaclass=ABCMeta):
excludes = [ 'log_check', r'^\+' ]
if hasattr(self, 'log_check_expected_regexes'):
excludes.extend(self.log_check_expected_regexes)
+ # Insert custom log_check excludes
+ excludes += [x for x in (self.d.getVar("IMAGE_LOG_CHECK_EXCLUDES") or "").split(" ") if x]
excludes = [re.compile(x) for x in excludes]
r = re.compile(match)
log_path = self.d.expand("${T}/log.do_rootfs")
@@ -114,7 +114,7 @@ class Rootfs(object, metaclass=ABCMeta):
shutil.rmtree(self.image_rootfs + '-orig')
except:
pass
- os.rename(self.image_rootfs, self.image_rootfs + '-orig')
+ bb.utils.rename(self.image_rootfs, self.image_rootfs + '-orig')
bb.note(" Creating debug rootfs...")
bb.utils.mkdirhier(self.image_rootfs)
@@ -165,10 +165,10 @@ class Rootfs(object, metaclass=ABCMeta):
shutil.rmtree(self.image_rootfs + '-dbg')
except:
pass
- os.rename(self.image_rootfs, self.image_rootfs + '-dbg')
+ bb.utils.rename(self.image_rootfs, self.image_rootfs + '-dbg')
- bb.note(" Restoreing original rootfs...")
- os.rename(self.image_rootfs + '-orig', self.image_rootfs)
+ bb.note(" Restoring original rootfs...")
+ bb.utils.rename(self.image_rootfs + '-orig', self.image_rootfs)
def _exec_shell_cmd(self, cmd):
fakerootcmd = self.d.getVar('FAKEROOT')
@@ -190,10 +190,6 @@ class Rootfs(object, metaclass=ABCMeta):
post_process_cmds = self.d.getVar("ROOTFS_POSTPROCESS_COMMAND")
rootfs_post_install_cmds = self.d.getVar('ROOTFS_POSTINSTALL_COMMAND')
- bb.utils.mkdirhier(self.image_rootfs)
-
- bb.utils.mkdirhier(self.deploydir)
-
execute_pre_post_process(self.d, pre_process_cmds)
if self.progress_reporter:
@@ -217,6 +213,9 @@ class Rootfs(object, metaclass=ABCMeta):
self.progress_reporter.next_stage()
if bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs",
+ True, False, self.d) and \
+ not bb.utils.contains("IMAGE_FEATURES",
+ "read-only-rootfs-delayed-postinsts",
True, False, self.d):
delayed_postinsts = self._get_delayed_postinsts()
if delayed_postinsts is not None:
@@ -247,13 +246,11 @@ class Rootfs(object, metaclass=ABCMeta):
def _uninstall_unneeded(self):
- # Remove unneeded init script symlinks
+ # Remove the run-postinsts package if no delayed postinsts are found
delayed_postinsts = self._get_delayed_postinsts()
if delayed_postinsts is None:
- if os.path.exists(self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/init.d/run-postinsts")):
- self._exec_shell_cmd(["update-rc.d", "-f", "-r",
- self.d.getVar('IMAGE_ROOTFS'),
- "run-postinsts", "remove"])
+ if os.path.exists(self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/init.d/run-postinsts")) or os.path.exists(self.d.expand("${IMAGE_ROOTFS}${systemd_system_unitdir}/run-postinsts.service")):
+ self.pm.remove(["run-postinsts"])
image_rorfs = bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs",
True, False, self.d)
@@ -301,10 +298,20 @@ class Rootfs(object, metaclass=ABCMeta):
self._exec_shell_cmd(['ldconfig', '-r', self.image_rootfs, '-c',
'new', '-v', '-X'])
+ image_rorfs = bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs",
+ True, False, self.d)
+ ldconfig_in_features = bb.utils.contains("DISTRO_FEATURES", "ldconfig",
+ True, False, self.d)
+ if image_rorfs or not ldconfig_in_features:
+ ldconfig_cache_dir = os.path.join(self.image_rootfs, "var/cache/ldconfig")
+ if os.path.exists(ldconfig_cache_dir):
+ bb.note("Removing ldconfig auxiliary cache...")
+ shutil.rmtree(ldconfig_cache_dir)
+
def _check_for_kernel_modules(self, modules_dir):
for root, dirs, files in os.walk(modules_dir, topdown=True):
for name in files:
- found_ko = name.endswith(".ko")
+ found_ko = name.endswith((".ko", ".ko.gz", ".ko.xz"))
if found_ko:
return found_ko
return False
@@ -353,614 +360,10 @@ class Rootfs(object, metaclass=ABCMeta):
self.image_rootfs, "-D", devtable])
-class RpmRootfs(Rootfs):
- def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
- super(RpmRootfs, self).__init__(d, progress_reporter, logcatcher)
- self.log_check_regex = r'(unpacking of archive failed|Cannot find package'\
- r'|exit 1|ERROR: |Error: |Error |ERROR '\
- r'|Failed |Failed: |Failed$|Failed\(\d+\):)'
- self.manifest = RpmManifest(d, manifest_dir)
-
- self.pm = RpmPM(d,
- d.getVar('IMAGE_ROOTFS'),
- self.d.getVar('TARGET_VENDOR')
- )
-
- self.inc_rpm_image_gen = self.d.getVar('INC_RPM_IMAGE_GEN')
- if self.inc_rpm_image_gen != "1":
- bb.utils.remove(self.image_rootfs, True)
- else:
- self.pm.recovery_packaging_data()
- bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
-
- self.pm.create_configs()
-
- '''
- While rpm incremental image generation is enabled, it will remove the
- unneeded pkgs by comparing the new install solution manifest and the
- old installed manifest.
- '''
- def _create_incremental(self, pkgs_initial_install):
- if self.inc_rpm_image_gen == "1":
-
- pkgs_to_install = list()
- for pkg_type in pkgs_initial_install:
- pkgs_to_install += pkgs_initial_install[pkg_type]
-
- installed_manifest = self.pm.load_old_install_solution()
- solution_manifest = self.pm.dump_install_solution(pkgs_to_install)
-
- pkg_to_remove = list()
- for pkg in installed_manifest:
- if pkg not in solution_manifest:
- pkg_to_remove.append(pkg)
-
- self.pm.update()
-
- bb.note('incremental update -- upgrade packages in place ')
- self.pm.upgrade()
- if pkg_to_remove != []:
- bb.note('incremental removed: %s' % ' '.join(pkg_to_remove))
- self.pm.remove(pkg_to_remove)
-
- self.pm.autoremove()
-
- def _create(self):
- pkgs_to_install = self.manifest.parse_initial_manifest()
- rpm_pre_process_cmds = self.d.getVar('RPM_PREPROCESS_COMMANDS')
- rpm_post_process_cmds = self.d.getVar('RPM_POSTPROCESS_COMMANDS')
-
- # update PM index files
- self.pm.write_index()
-
- execute_pre_post_process(self.d, rpm_pre_process_cmds)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- if self.inc_rpm_image_gen == "1":
- self._create_incremental(pkgs_to_install)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self.pm.update()
-
- pkgs = []
- pkgs_attempt = []
- for pkg_type in pkgs_to_install:
- if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY:
- pkgs_attempt += pkgs_to_install[pkg_type]
- else:
- pkgs += pkgs_to_install[pkg_type]
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self.pm.install(pkgs)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self.pm.install(pkgs_attempt, True)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self.pm.install_complementary()
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self._setup_dbg_rootfs(['/etc', '/var/lib/rpm', '/var/cache/dnf', '/var/lib/dnf'])
-
- execute_pre_post_process(self.d, rpm_post_process_cmds)
-
- if self.inc_rpm_image_gen == "1":
- self.pm.backup_packaging_data()
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
-
- @staticmethod
- def _depends_list():
- return ['DEPLOY_DIR_RPM', 'INC_RPM_IMAGE_GEN', 'RPM_PREPROCESS_COMMANDS',
- 'RPM_POSTPROCESS_COMMANDS', 'RPM_PREFER_ELF_ARCH']
-
- def _get_delayed_postinsts(self):
- postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/rpm-postinsts")
- if os.path.isdir(postinst_dir):
- files = os.listdir(postinst_dir)
- for f in files:
- bb.note('Delayed package scriptlet: %s' % f)
- return files
-
- return None
-
- def _save_postinsts(self):
- # this is just a stub. For RPM, the failed postinstalls are
- # already saved in /etc/rpm-postinsts
- pass
-
- def _log_check(self):
- self._log_check_warn()
- self._log_check_error()
-
- def _cleanup(self):
- if bb.utils.contains("IMAGE_FEATURES", "package-management", True, False, self.d):
- self.pm._invoke_dnf(["clean", "all"])
-
-
-class DpkgOpkgRootfs(Rootfs):
- def __init__(self, d, progress_reporter=None, logcatcher=None):
- super(DpkgOpkgRootfs, self).__init__(d, progress_reporter, logcatcher)
-
- def _get_pkgs_postinsts(self, status_file):
- def _get_pkg_depends_list(pkg_depends):
- pkg_depends_list = []
- # filter version requirements like libc (>= 1.1)
- for dep in pkg_depends.split(', '):
- m_dep = re.match(r"^(.*) \(.*\)$", dep)
- if m_dep:
- dep = m_dep.group(1)
- pkg_depends_list.append(dep)
-
- return pkg_depends_list
-
- pkgs = {}
- pkg_name = ""
- pkg_status_match = False
- pkg_depends = ""
-
- with open(status_file) as status:
- data = status.read()
- status.close()
- for line in data.split('\n'):
- m_pkg = re.match(r"^Package: (.*)", line)
- m_status = re.match(r"^Status:.*unpacked", line)
- m_depends = re.match(r"^Depends: (.*)", line)
-
- #Only one of m_pkg, m_status or m_depends is not None at time
- #If m_pkg is not None, we started a new package
- if m_pkg is not None:
- #Get Package name
- pkg_name = m_pkg.group(1)
- #Make sure we reset other variables
- pkg_status_match = False
- pkg_depends = ""
- elif m_status is not None:
- #New status matched
- pkg_status_match = True
- elif m_depends is not None:
- #New depends macthed
- pkg_depends = m_depends.group(1)
- else:
- pass
-
- #Now check if we can process package depends and postinst
- if "" != pkg_name and pkg_status_match:
- pkgs[pkg_name] = _get_pkg_depends_list(pkg_depends)
- else:
- #Not enough information
- pass
-
- # remove package dependencies not in postinsts
- pkg_names = list(pkgs.keys())
- for pkg_name in pkg_names:
- deps = pkgs[pkg_name][:]
-
- for d in deps:
- if d not in pkg_names:
- pkgs[pkg_name].remove(d)
-
- return pkgs
-
- def _get_delayed_postinsts_common(self, status_file):
- def _dep_resolve(graph, node, resolved, seen):
- seen.append(node)
-
- for edge in graph[node]:
- if edge not in resolved:
- if edge in seen:
- raise RuntimeError("Packages %s and %s have " \
- "a circular dependency in postinsts scripts." \
- % (node, edge))
- _dep_resolve(graph, edge, resolved, seen)
-
- resolved.append(node)
-
- pkg_list = []
-
- pkgs = None
- if not self.d.getVar('PACKAGE_INSTALL').strip():
- bb.note("Building empty image")
- else:
- pkgs = self._get_pkgs_postinsts(status_file)
- if pkgs:
- root = "__packagegroup_postinst__"
- pkgs[root] = list(pkgs.keys())
- _dep_resolve(pkgs, root, pkg_list, [])
- pkg_list.remove(root)
-
- if len(pkg_list) == 0:
- return None
-
- return pkg_list
-
- def _save_postinsts_common(self, dst_postinst_dir, src_postinst_dir):
- if bb.utils.contains("IMAGE_FEATURES", "package-management",
- True, False, self.d):
- return
- num = 0
- for p in self._get_delayed_postinsts():
- bb.utils.mkdirhier(dst_postinst_dir)
-
- if os.path.exists(os.path.join(src_postinst_dir, p + ".postinst")):
- shutil.copy(os.path.join(src_postinst_dir, p + ".postinst"),
- os.path.join(dst_postinst_dir, "%03d-%s" % (num, p)))
-
- num += 1
-
-class DpkgRootfs(DpkgOpkgRootfs):
- def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
- super(DpkgRootfs, self).__init__(d, progress_reporter, logcatcher)
- self.log_check_regex = '^E:'
- self.log_check_expected_regexes = \
- [
- "^E: Unmet dependencies."
- ]
-
- bb.utils.remove(self.image_rootfs, True)
- bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
- self.manifest = DpkgManifest(d, manifest_dir)
- self.pm = DpkgPM(d, d.getVar('IMAGE_ROOTFS'),
- d.getVar('PACKAGE_ARCHS'),
- d.getVar('DPKG_ARCH'))
-
-
- def _create(self):
- pkgs_to_install = self.manifest.parse_initial_manifest()
- deb_pre_process_cmds = self.d.getVar('DEB_PREPROCESS_COMMANDS')
- deb_post_process_cmds = self.d.getVar('DEB_POSTPROCESS_COMMANDS')
-
- alt_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/alternatives")
- bb.utils.mkdirhier(alt_dir)
-
- # update PM index files
- self.pm.write_index()
-
- execute_pre_post_process(self.d, deb_pre_process_cmds)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
- # Don't support incremental, so skip that
- self.progress_reporter.next_stage()
-
- self.pm.update()
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- for pkg_type in self.install_order:
- if pkg_type in pkgs_to_install:
- self.pm.install(pkgs_to_install[pkg_type],
- [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
- self.pm.fix_broken_dependencies()
-
- if self.progress_reporter:
- # Don't support attemptonly, so skip that
- self.progress_reporter.next_stage()
- self.progress_reporter.next_stage()
-
- self.pm.install_complementary()
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self._setup_dbg_rootfs(['/var/lib/dpkg'])
-
- self.pm.fix_broken_dependencies()
-
- self.pm.mark_packages("installed")
-
- self.pm.run_pre_post_installs()
-
- execute_pre_post_process(self.d, deb_post_process_cmds)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- @staticmethod
- def _depends_list():
- return ['DEPLOY_DIR_DEB', 'DEB_SDK_ARCH', 'APTCONF_TARGET', 'APT_ARGS', 'DPKG_ARCH', 'DEB_PREPROCESS_COMMANDS', 'DEB_POSTPROCESS_COMMANDS']
-
- def _get_delayed_postinsts(self):
- status_file = self.image_rootfs + "/var/lib/dpkg/status"
- return self._get_delayed_postinsts_common(status_file)
-
- def _save_postinsts(self):
- dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/deb-postinsts")
- src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/info")
- return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir)
-
- def _log_check(self):
- self._log_check_warn()
- self._log_check_error()
-
- def _cleanup(self):
- pass
-
-
-class OpkgRootfs(DpkgOpkgRootfs):
- def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
- super(OpkgRootfs, self).__init__(d, progress_reporter, logcatcher)
- self.log_check_regex = '(exit 1|Collected errors)'
-
- self.manifest = OpkgManifest(d, manifest_dir)
- self.opkg_conf = self.d.getVar("IPKGCONF_TARGET")
- self.pkg_archs = self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS")
-
- self.inc_opkg_image_gen = self.d.getVar('INC_IPK_IMAGE_GEN') or ""
- if self._remove_old_rootfs():
- bb.utils.remove(self.image_rootfs, True)
- self.pm = OpkgPM(d,
- self.image_rootfs,
- self.opkg_conf,
- self.pkg_archs)
- else:
- self.pm = OpkgPM(d,
- self.image_rootfs,
- self.opkg_conf,
- self.pkg_archs)
- self.pm.recover_packaging_data()
-
- bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
-
- def _prelink_file(self, root_dir, filename):
- bb.note('prelink %s in %s' % (filename, root_dir))
- prelink_cfg = oe.path.join(root_dir,
- self.d.expand('${sysconfdir}/prelink.conf'))
- if not os.path.exists(prelink_cfg):
- shutil.copy(self.d.expand('${STAGING_DIR_NATIVE}${sysconfdir_native}/prelink.conf'),
- prelink_cfg)
-
- cmd_prelink = self.d.expand('${STAGING_DIR_NATIVE}${sbindir_native}/prelink')
- self._exec_shell_cmd([cmd_prelink,
- '--root',
- root_dir,
- '-amR',
- '-N',
- '-c',
- self.d.expand('${sysconfdir}/prelink.conf')])
-
- '''
- Compare two files with the same key twice to see if they are equal.
- If they are not equal, it means they are duplicated and come from
- different packages.
- 1st: Comapre them directly;
- 2nd: While incremental image creation is enabled, one of the
- files could be probaly prelinked in the previous image
- creation and the file has been changed, so we need to
- prelink the other one and compare them.
- '''
- def _file_equal(self, key, f1, f2):
-
- # Both of them are not prelinked
- if filecmp.cmp(f1, f2):
- return True
-
- if bb.data.inherits_class('image-prelink', self.d):
- if self.image_rootfs not in f1:
- self._prelink_file(f1.replace(key, ''), f1)
-
- if self.image_rootfs not in f2:
- self._prelink_file(f2.replace(key, ''), f2)
-
- # Both of them are prelinked
- if filecmp.cmp(f1, f2):
- return True
-
- # Not equal
- return False
-
- """
- This function was reused from the old implementation.
- See commit: "image.bbclass: Added variables for multilib support." by
- Lianhao Lu.
- """
- def _multilib_sanity_test(self, dirs):
-
- allow_replace = self.d.getVar("MULTILIBRE_ALLOW_REP")
- if allow_replace is None:
- allow_replace = ""
-
- allow_rep = re.compile(re.sub(r"\|$", r"", allow_replace))
- error_prompt = "Multilib check error:"
-
- files = {}
- for dir in dirs:
- for root, subfolders, subfiles in os.walk(dir):
- for file in subfiles:
- item = os.path.join(root, file)
- key = str(os.path.join("/", os.path.relpath(item, dir)))
-
- valid = True
- if key in files:
- #check whether the file is allow to replace
- if allow_rep.match(key):
- valid = True
- else:
- if os.path.exists(files[key]) and \
- os.path.exists(item) and \
- not self._file_equal(key, files[key], item):
- valid = False
- bb.fatal("%s duplicate files %s %s is not the same\n" %
- (error_prompt, item, files[key]))
-
- #pass the check, add to list
- if valid:
- files[key] = item
-
- def _multilib_test_install(self, pkgs):
- ml_temp = self.d.getVar("MULTILIB_TEMP_ROOTFS")
- bb.utils.mkdirhier(ml_temp)
-
- dirs = [self.image_rootfs]
-
- for variant in self.d.getVar("MULTILIB_VARIANTS").split():
- ml_target_rootfs = os.path.join(ml_temp, variant)
-
- bb.utils.remove(ml_target_rootfs, True)
-
- ml_opkg_conf = os.path.join(ml_temp,
- variant + "-" + os.path.basename(self.opkg_conf))
-
- ml_pm = OpkgPM(self.d, ml_target_rootfs, ml_opkg_conf, self.pkg_archs, prepare_index=False)
-
- ml_pm.update()
- ml_pm.install(pkgs)
-
- dirs.append(ml_target_rootfs)
-
- self._multilib_sanity_test(dirs)
-
- '''
- While ipk incremental image generation is enabled, it will remove the
- unneeded pkgs by comparing the old full manifest in previous existing
- image and the new full manifest in the current image.
- '''
- def _remove_extra_packages(self, pkgs_initial_install):
- if self.inc_opkg_image_gen == "1":
- # Parse full manifest in previous existing image creation session
- old_full_manifest = self.manifest.parse_full_manifest()
-
- # Create full manifest for the current image session, the old one
- # will be replaced by the new one.
- self.manifest.create_full(self.pm)
-
- # Parse full manifest in current image creation session
- new_full_manifest = self.manifest.parse_full_manifest()
-
- pkg_to_remove = list()
- for pkg in old_full_manifest:
- if pkg not in new_full_manifest:
- pkg_to_remove.append(pkg)
-
- if pkg_to_remove != []:
- bb.note('decremental removed: %s' % ' '.join(pkg_to_remove))
- self.pm.remove(pkg_to_remove)
-
- '''
- Compare with previous existing image creation, if some conditions
- triggered, the previous old image should be removed.
- The conditions include any of 'PACKAGE_EXCLUDE, NO_RECOMMENDATIONS
- and BAD_RECOMMENDATIONS' has been changed.
- '''
- def _remove_old_rootfs(self):
- if self.inc_opkg_image_gen != "1":
- return True
-
- vars_list_file = self.d.expand('${T}/vars_list')
-
- old_vars_list = ""
- if os.path.exists(vars_list_file):
- old_vars_list = open(vars_list_file, 'r+').read()
-
- new_vars_list = '%s:%s:%s\n' % \
- ((self.d.getVar('BAD_RECOMMENDATIONS') or '').strip(),
- (self.d.getVar('NO_RECOMMENDATIONS') or '').strip(),
- (self.d.getVar('PACKAGE_EXCLUDE') or '').strip())
- open(vars_list_file, 'w+').write(new_vars_list)
-
- if old_vars_list != new_vars_list:
- return True
-
- return False
-
- def _create(self):
- pkgs_to_install = self.manifest.parse_initial_manifest()
- opkg_pre_process_cmds = self.d.getVar('OPKG_PREPROCESS_COMMANDS')
- opkg_post_process_cmds = self.d.getVar('OPKG_POSTPROCESS_COMMANDS')
-
- # update PM index files
- self.pm.write_index()
-
- execute_pre_post_process(self.d, opkg_pre_process_cmds)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
- # Steps are a bit different in order, skip next
- self.progress_reporter.next_stage()
-
- self.pm.update()
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- if self.inc_opkg_image_gen == "1":
- self._remove_extra_packages(pkgs_to_install)
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- for pkg_type in self.install_order:
- if pkg_type in pkgs_to_install:
- # For multilib, we perform a sanity test before final install
- # If sanity test fails, it will automatically do a bb.fatal()
- # and the installation will stop
- if pkg_type == Manifest.PKG_TYPE_MULTILIB:
- self._multilib_test_install(pkgs_to_install[pkg_type])
-
- self.pm.install(pkgs_to_install[pkg_type],
- [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- self.pm.install_complementary()
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- opkg_lib_dir = self.d.getVar('OPKGLIBDIR')
- opkg_dir = os.path.join(opkg_lib_dir, 'opkg')
- self._setup_dbg_rootfs([opkg_dir])
-
- execute_pre_post_process(self.d, opkg_post_process_cmds)
-
- if self.inc_opkg_image_gen == "1":
- self.pm.backup_packaging_data()
-
- if self.progress_reporter:
- self.progress_reporter.next_stage()
-
- @staticmethod
- def _depends_list():
- return ['IPKGCONF_SDK', 'IPK_FEED_URIS', 'DEPLOY_DIR_IPK', 'IPKGCONF_TARGET', 'INC_IPK_IMAGE_GEN', 'OPKG_ARGS', 'OPKGLIBDIR', 'OPKG_PREPROCESS_COMMANDS', 'OPKG_POSTPROCESS_COMMANDS', 'OPKGLIBDIR']
-
- def _get_delayed_postinsts(self):
- status_file = os.path.join(self.image_rootfs,
- self.d.getVar('OPKGLIBDIR').strip('/'),
- "opkg", "status")
- return self._get_delayed_postinsts_common(status_file)
-
- def _save_postinsts(self):
- dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/ipk-postinsts")
- src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${OPKGLIBDIR}/opkg/info")
- return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir)
-
- def _log_check(self):
- self._log_check_warn()
- self._log_check_error()
-
- def _cleanup(self):
- self.pm.remove_lists()
-
def get_class_for_type(imgtype):
- return {"rpm": RpmRootfs,
- "ipk": OpkgRootfs,
- "deb": DpkgRootfs}[imgtype]
+ import importlib
+ mod = importlib.import_module('oe.package_manager.' + imgtype + '.rootfs')
+ return mod.PkgRootfs
def variable_depends(d, manifest_dir=None):
img_type = d.getVar('IMAGE_PKGTYPE')
@@ -971,13 +374,9 @@ def create_rootfs(d, manifest_dir=None, progress_reporter=None, logcatcher=None)
env_bkp = os.environ.copy()
img_type = d.getVar('IMAGE_PKGTYPE')
- if img_type == "rpm":
- RpmRootfs(d, manifest_dir, progress_reporter, logcatcher).create()
- elif img_type == "ipk":
- OpkgRootfs(d, manifest_dir, progress_reporter, logcatcher).create()
- elif img_type == "deb":
- DpkgRootfs(d, manifest_dir, progress_reporter, logcatcher).create()
+ cls = get_class_for_type(img_type)
+ cls(d, manifest_dir, progress_reporter, logcatcher).create()
os.environ.clear()
os.environ.update(env_bkp)
@@ -987,12 +386,10 @@ def image_list_installed_packages(d, rootfs_dir=None):
rootfs_dir = d.getVar('IMAGE_ROOTFS')
img_type = d.getVar('IMAGE_PKGTYPE')
- if img_type == "rpm":
- return RpmPkgsList(d, rootfs_dir).list_pkgs()
- elif img_type == "ipk":
- return OpkgPkgsList(d, rootfs_dir, d.getVar("IPKGCONF_TARGET")).list_pkgs()
- elif img_type == "deb":
- return DpkgPkgsList(d, rootfs_dir).list_pkgs()
+
+ import importlib
+ cls = importlib.import_module('oe.package_manager.' + img_type)
+ return cls.PMPkgsList(d, rootfs_dir).list_pkgs()
if __name__ == "__main__":
"""
diff --git a/meta/lib/oe/rust.py b/meta/lib/oe/rust.py
new file mode 100644
index 0000000000..ec70b34805
--- /dev/null
+++ b/meta/lib/oe/rust.py
@@ -0,0 +1,5 @@
+# Handle mismatches between `uname -m`-style output and Rust's arch names
+def arch_to_rust_arch(arch):
+ if arch == "ppc64le":
+ return "powerpc64le"
+ return arch
diff --git a/meta/lib/oe/sbom.py b/meta/lib/oe/sbom.py
new file mode 100644
index 0000000000..3372f13a9d
--- /dev/null
+++ b/meta/lib/oe/sbom.py
@@ -0,0 +1,78 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import collections
+
+DepRecipe = collections.namedtuple("DepRecipe", ("doc", "doc_sha1", "recipe"))
+DepSource = collections.namedtuple("DepSource", ("doc", "doc_sha1", "recipe", "file"))
+
+
+def get_recipe_spdxid(d):
+ return "SPDXRef-%s-%s" % ("Recipe", d.getVar("PN"))
+
+
+def get_package_spdxid(pkg):
+ return "SPDXRef-Package-%s" % pkg
+
+
+def get_source_file_spdxid(d, idx):
+ return "SPDXRef-SourceFile-%s-%d" % (d.getVar("PN"), idx)
+
+
+def get_packaged_file_spdxid(pkg, idx):
+ return "SPDXRef-PackagedFile-%s-%d" % (pkg, idx)
+
+
+def get_image_spdxid(img):
+ return "SPDXRef-Image-%s" % img
+
+
+def get_sdk_spdxid(sdk):
+ return "SPDXRef-SDK-%s" % sdk
+
+
+def write_doc(d, spdx_doc, subdir, spdx_deploy=None):
+ from pathlib import Path
+
+ if spdx_deploy is None:
+ spdx_deploy = Path(d.getVar("SPDXDEPLOY"))
+
+ dest = spdx_deploy / subdir / (spdx_doc.name + ".spdx.json")
+ dest.parent.mkdir(exist_ok=True, parents=True)
+ with dest.open("wb") as f:
+ doc_sha1 = spdx_doc.to_json(f, sort_keys=True)
+
+ l = spdx_deploy / "by-namespace" / spdx_doc.documentNamespace.replace("/", "_")
+ l.parent.mkdir(exist_ok=True, parents=True)
+ l.symlink_to(os.path.relpath(dest, l.parent))
+
+ return doc_sha1
+
+
+def read_doc(fn):
+ import hashlib
+ import oe.spdx
+ import io
+ import contextlib
+
+ @contextlib.contextmanager
+ def get_file():
+ if isinstance(fn, io.IOBase):
+ yield fn
+ else:
+ with fn.open("rb") as f:
+ yield f
+
+ with get_file() as f:
+ sha1 = hashlib.sha1()
+ while True:
+ chunk = f.read(4096)
+ if not chunk:
+ break
+ sha1.update(chunk)
+
+ f.seek(0)
+ doc = oe.spdx.SPDXDocument.from_json(f)
+
+ return (doc, sha1.hexdigest())
diff --git a/meta/lib/oe/sdk.py b/meta/lib/oe/sdk.py
index d02a274812..27347667e8 100644
--- a/meta/lib/oe/sdk.py
+++ b/meta/lib/oe/sdk.py
@@ -7,8 +7,6 @@ from oe.utils import execute_pre_post_process
from oe.manifest import *
from oe.package_manager import *
import os
-import shutil
-import glob
import traceback
class Sdk(object, metaclass=ABCMeta):
@@ -110,283 +108,6 @@ class Sdk(object, metaclass=ABCMeta):
pass
-class RpmSdk(Sdk):
- def __init__(self, d, manifest_dir=None, rpm_workdir="oe-sdk-repo"):
- super(RpmSdk, self).__init__(d, manifest_dir)
-
- self.target_manifest = RpmManifest(d, self.manifest_dir,
- Manifest.MANIFEST_TYPE_SDK_TARGET)
- self.host_manifest = RpmManifest(d, self.manifest_dir,
- Manifest.MANIFEST_TYPE_SDK_HOST)
-
- rpm_repo_workdir = "oe-sdk-repo"
- if "sdk_ext" in d.getVar("BB_RUNTASK"):
- rpm_repo_workdir = "oe-sdk-ext-repo"
-
- self.target_pm = RpmPM(d,
- self.sdk_target_sysroot,
- self.d.getVar('TARGET_VENDOR'),
- 'target',
- rpm_repo_workdir=rpm_repo_workdir
- )
-
- self.host_pm = RpmPM(d,
- self.sdk_host_sysroot,
- self.d.getVar('SDK_VENDOR'),
- 'host',
- "SDK_PACKAGE_ARCHS",
- "SDK_OS",
- rpm_repo_workdir=rpm_repo_workdir
- )
-
- def _populate_sysroot(self, pm, manifest):
- pkgs_to_install = manifest.parse_initial_manifest()
-
- pm.create_configs()
- pm.write_index()
- pm.update()
-
- pkgs = []
- pkgs_attempt = []
- for pkg_type in pkgs_to_install:
- if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY:
- pkgs_attempt += pkgs_to_install[pkg_type]
- else:
- pkgs += pkgs_to_install[pkg_type]
-
- pm.install(pkgs)
-
- pm.install(pkgs_attempt, True)
-
- def _populate(self):
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND"))
-
- bb.note("Installing TARGET packages")
- self._populate_sysroot(self.target_pm, self.target_manifest)
-
- self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
-
- self.target_pm.run_intercepts(populate_sdk='target')
-
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
-
- if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
- self.target_pm.remove_packaging_data()
-
- bb.note("Installing NATIVESDK packages")
- self._populate_sysroot(self.host_pm, self.host_manifest)
- self.install_locales(self.host_pm)
-
- self.host_pm.run_intercepts(populate_sdk='host')
-
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
-
- if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
- self.host_pm.remove_packaging_data()
-
- # Move host RPM library data
- native_rpm_state_dir = os.path.join(self.sdk_output,
- self.sdk_native_path,
- self.d.getVar('localstatedir_nativesdk').strip('/'),
- "lib",
- "rpm"
- )
- self.mkdirhier(native_rpm_state_dir)
- for f in glob.glob(os.path.join(self.sdk_output,
- "var",
- "lib",
- "rpm",
- "*")):
- self.movefile(f, native_rpm_state_dir)
-
- self.remove(os.path.join(self.sdk_output, "var"), True)
-
- # Move host sysconfig data
- native_sysconf_dir = os.path.join(self.sdk_output,
- self.sdk_native_path,
- self.d.getVar('sysconfdir',
- True).strip('/'),
- )
- self.mkdirhier(native_sysconf_dir)
- for f in glob.glob(os.path.join(self.sdk_output, "etc", "rpm*")):
- self.movefile(f, native_sysconf_dir)
- for f in glob.glob(os.path.join(self.sdk_output, "etc", "dnf", "*")):
- self.movefile(f, native_sysconf_dir)
- self.remove(os.path.join(self.sdk_output, "etc"), True)
-
-
-class OpkgSdk(Sdk):
- def __init__(self, d, manifest_dir=None):
- super(OpkgSdk, self).__init__(d, manifest_dir)
-
- self.target_conf = self.d.getVar("IPKGCONF_TARGET")
- self.host_conf = self.d.getVar("IPKGCONF_SDK")
-
- self.target_manifest = OpkgManifest(d, self.manifest_dir,
- Manifest.MANIFEST_TYPE_SDK_TARGET)
- self.host_manifest = OpkgManifest(d, self.manifest_dir,
- Manifest.MANIFEST_TYPE_SDK_HOST)
-
- ipk_repo_workdir = "oe-sdk-repo"
- if "sdk_ext" in d.getVar("BB_RUNTASK"):
- ipk_repo_workdir = "oe-sdk-ext-repo"
-
- self.target_pm = OpkgPM(d, self.sdk_target_sysroot, self.target_conf,
- self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS"),
- ipk_repo_workdir=ipk_repo_workdir)
-
- self.host_pm = OpkgPM(d, self.sdk_host_sysroot, self.host_conf,
- self.d.getVar("SDK_PACKAGE_ARCHS"),
- ipk_repo_workdir=ipk_repo_workdir)
-
- def _populate_sysroot(self, pm, manifest):
- pkgs_to_install = manifest.parse_initial_manifest()
-
- if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") != "1":
- pm.write_index()
-
- pm.update()
-
- for pkg_type in self.install_order:
- if pkg_type in pkgs_to_install:
- pm.install(pkgs_to_install[pkg_type],
- [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
-
- def _populate(self):
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND"))
-
- bb.note("Installing TARGET packages")
- self._populate_sysroot(self.target_pm, self.target_manifest)
-
- self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
-
- self.target_pm.run_intercepts(populate_sdk='target')
-
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
-
- if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
- self.target_pm.remove_packaging_data()
-
- bb.note("Installing NATIVESDK packages")
- self._populate_sysroot(self.host_pm, self.host_manifest)
- self.install_locales(self.host_pm)
-
- self.host_pm.run_intercepts(populate_sdk='host')
-
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
-
- if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
- self.host_pm.remove_packaging_data()
-
- target_sysconfdir = os.path.join(self.sdk_target_sysroot, self.sysconfdir)
- host_sysconfdir = os.path.join(self.sdk_host_sysroot, self.sysconfdir)
-
- self.mkdirhier(target_sysconfdir)
- shutil.copy(self.target_conf, target_sysconfdir)
- os.chmod(os.path.join(target_sysconfdir,
- os.path.basename(self.target_conf)), 0o644)
-
- self.mkdirhier(host_sysconfdir)
- shutil.copy(self.host_conf, host_sysconfdir)
- os.chmod(os.path.join(host_sysconfdir,
- os.path.basename(self.host_conf)), 0o644)
-
- native_opkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path,
- self.d.getVar('localstatedir_nativesdk').strip('/'),
- "lib", "opkg")
- self.mkdirhier(native_opkg_state_dir)
- for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "opkg", "*")):
- self.movefile(f, native_opkg_state_dir)
-
- self.remove(os.path.join(self.sdk_output, "var"), True)
-
-
-class DpkgSdk(Sdk):
- def __init__(self, d, manifest_dir=None):
- super(DpkgSdk, self).__init__(d, manifest_dir)
-
- self.target_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET"), "apt")
- self.host_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET"), "apt-sdk")
-
- self.target_manifest = DpkgManifest(d, self.manifest_dir,
- Manifest.MANIFEST_TYPE_SDK_TARGET)
- self.host_manifest = DpkgManifest(d, self.manifest_dir,
- Manifest.MANIFEST_TYPE_SDK_HOST)
-
- deb_repo_workdir = "oe-sdk-repo"
- if "sdk_ext" in d.getVar("BB_RUNTASK"):
- deb_repo_workdir = "oe-sdk-ext-repo"
-
- self.target_pm = DpkgPM(d, self.sdk_target_sysroot,
- self.d.getVar("PACKAGE_ARCHS"),
- self.d.getVar("DPKG_ARCH"),
- self.target_conf_dir,
- deb_repo_workdir=deb_repo_workdir)
-
- self.host_pm = DpkgPM(d, self.sdk_host_sysroot,
- self.d.getVar("SDK_PACKAGE_ARCHS"),
- self.d.getVar("DEB_SDK_ARCH"),
- self.host_conf_dir,
- deb_repo_workdir=deb_repo_workdir)
-
- def _copy_apt_dir_to(self, dst_dir):
- staging_etcdir_native = self.d.getVar("STAGING_ETCDIR_NATIVE")
-
- self.remove(dst_dir, True)
-
- shutil.copytree(os.path.join(staging_etcdir_native, "apt"), dst_dir)
-
- def _populate_sysroot(self, pm, manifest):
- pkgs_to_install = manifest.parse_initial_manifest()
-
- pm.write_index()
- pm.update()
-
- for pkg_type in self.install_order:
- if pkg_type in pkgs_to_install:
- pm.install(pkgs_to_install[pkg_type],
- [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
-
- def _populate(self):
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND"))
-
- bb.note("Installing TARGET packages")
- self._populate_sysroot(self.target_pm, self.target_manifest)
-
- self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
-
- self.target_pm.run_intercepts(populate_sdk='target')
-
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
-
- self._copy_apt_dir_to(os.path.join(self.sdk_target_sysroot, "etc", "apt"))
-
- if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
- self.target_pm.remove_packaging_data()
-
- bb.note("Installing NATIVESDK packages")
- self._populate_sysroot(self.host_pm, self.host_manifest)
- self.install_locales(self.host_pm)
-
- self.host_pm.run_intercepts(populate_sdk='host')
-
- execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
-
- self._copy_apt_dir_to(os.path.join(self.sdk_output, self.sdk_native_path,
- "etc", "apt"))
-
- if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
- self.host_pm.remove_packaging_data()
-
- native_dpkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path,
- "var", "lib", "dpkg")
- self.mkdirhier(native_dpkg_state_dir)
- for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "dpkg", "*")):
- self.movefile(f, native_dpkg_state_dir)
- self.remove(os.path.join(self.sdk_output, "var"), True)
-
-
-
def sdk_list_installed_packages(d, target, rootfs_dir=None):
if rootfs_dir is None:
sdk_output = d.getVar('SDK_OUTPUT')
@@ -394,27 +115,22 @@ def sdk_list_installed_packages(d, target, rootfs_dir=None):
rootfs_dir = [sdk_output, os.path.join(sdk_output, target_path)][target is True]
+ if target is False:
+ ipkgconf_sdk_target = d.getVar("IPKGCONF_SDK")
+ d.setVar("IPKGCONF_TARGET", ipkgconf_sdk_target)
+
img_type = d.getVar('IMAGE_PKGTYPE')
- if img_type == "rpm":
- arch_var = ["SDK_PACKAGE_ARCHS", None][target is True]
- os_var = ["SDK_OS", None][target is True]
- return RpmPkgsList(d, rootfs_dir).list_pkgs()
- elif img_type == "ipk":
- conf_file_var = ["IPKGCONF_SDK", "IPKGCONF_TARGET"][target is True]
- return OpkgPkgsList(d, rootfs_dir, d.getVar(conf_file_var)).list_pkgs()
- elif img_type == "deb":
- return DpkgPkgsList(d, rootfs_dir).list_pkgs()
+ import importlib
+ cls = importlib.import_module('oe.package_manager.' + img_type)
+ return cls.PMPkgsList(d, rootfs_dir).list_pkgs()
def populate_sdk(d, manifest_dir=None):
env_bkp = os.environ.copy()
img_type = d.getVar('IMAGE_PKGTYPE')
- if img_type == "rpm":
- RpmSdk(d, manifest_dir).populate()
- elif img_type == "ipk":
- OpkgSdk(d, manifest_dir).populate()
- elif img_type == "deb":
- DpkgSdk(d, manifest_dir).populate()
+ import importlib
+ cls = importlib.import_module('oe.package_manager.' + img_type + '.sdk')
+ cls.PkgSdk(d, manifest_dir).populate()
os.environ.clear()
os.environ.update(env_bkp)
diff --git a/meta/lib/oe/spdx.py b/meta/lib/oe/spdx.py
new file mode 100644
index 0000000000..14ca706895
--- /dev/null
+++ b/meta/lib/oe/spdx.py
@@ -0,0 +1,342 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+#
+# This library is intended to capture the JSON SPDX specification in a type
+# safe manner. It is not intended to encode any particular OE specific
+# behaviors, see the sbom.py for that.
+#
+# The documented SPDX spec document doesn't cover the JSON syntax for
+# particular configuration, which can make it hard to determine what the JSON
+# syntax should be. I've found it is actually much simpler to read the official
+# SPDX JSON schema which can be found here: https://github.com/spdx/spdx-spec
+# in schemas/spdx-schema.json
+#
+
+import hashlib
+import itertools
+import json
+
+SPDX_VERSION = "2.2"
+
+
+#
+# The following are the support classes that are used to implement SPDX object
+#
+
+class _Property(object):
+ """
+ A generic SPDX object property. The different types will derive from this
+ class
+ """
+
+ def __init__(self, *, default=None):
+ self.default = default
+
+ def setdefault(self, dest, name):
+ if self.default is not None:
+ dest.setdefault(name, self.default)
+
+
+class _String(_Property):
+ """
+ A scalar string property for an SPDX object
+ """
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+
+ def set_property(self, attrs, name):
+ def get_helper(obj):
+ return obj._spdx[name]
+
+ def set_helper(obj, value):
+ obj._spdx[name] = value
+
+ def del_helper(obj):
+ del obj._spdx[name]
+
+ attrs[name] = property(get_helper, set_helper, del_helper)
+
+ def init(self, source):
+ return source
+
+
+class _Object(_Property):
+ """
+ A scalar SPDX object property of a SPDX object
+ """
+
+ def __init__(self, cls, **kwargs):
+ super().__init__(**kwargs)
+ self.cls = cls
+
+ def set_property(self, attrs, name):
+ def get_helper(obj):
+ if not name in obj._spdx:
+ obj._spdx[name] = self.cls()
+ return obj._spdx[name]
+
+ def set_helper(obj, value):
+ obj._spdx[name] = value
+
+ def del_helper(obj):
+ del obj._spdx[name]
+
+ attrs[name] = property(get_helper, set_helper)
+
+ def init(self, source):
+ return self.cls(**source)
+
+
+class _ListProperty(_Property):
+ """
+ A list of SPDX properties
+ """
+
+ def __init__(self, prop, **kwargs):
+ super().__init__(**kwargs)
+ self.prop = prop
+
+ def set_property(self, attrs, name):
+ def get_helper(obj):
+ if not name in obj._spdx:
+ obj._spdx[name] = []
+ return obj._spdx[name]
+
+ def set_helper(obj, value):
+ obj._spdx[name] = list(value)
+
+ def del_helper(obj):
+ del obj._spdx[name]
+
+ attrs[name] = property(get_helper, set_helper, del_helper)
+
+ def init(self, source):
+ return [self.prop.init(o) for o in source]
+
+
+class _StringList(_ListProperty):
+ """
+ A list of strings as a property for an SPDX object
+ """
+
+ def __init__(self, **kwargs):
+ super().__init__(_String(), **kwargs)
+
+
+class _ObjectList(_ListProperty):
+ """
+ A list of SPDX objects as a property for an SPDX object
+ """
+
+ def __init__(self, cls, **kwargs):
+ super().__init__(_Object(cls), **kwargs)
+
+
+class MetaSPDXObject(type):
+ """
+ A metaclass that allows properties (anything derived from a _Property
+ class) to be defined for a SPDX object
+ """
+ def __new__(mcls, name, bases, attrs):
+ attrs["_properties"] = {}
+
+ for key in attrs.keys():
+ if isinstance(attrs[key], _Property):
+ prop = attrs[key]
+ attrs["_properties"][key] = prop
+ prop.set_property(attrs, key)
+
+ return super().__new__(mcls, name, bases, attrs)
+
+
+class SPDXObject(metaclass=MetaSPDXObject):
+ """
+ The base SPDX object; all SPDX spec classes must derive from this class
+ """
+ def __init__(self, **d):
+ self._spdx = {}
+
+ for name, prop in self._properties.items():
+ prop.setdefault(self._spdx, name)
+ if name in d:
+ self._spdx[name] = prop.init(d[name])
+
+ def serializer(self):
+ return self._spdx
+
+ def __setattr__(self, name, value):
+ if name in self._properties or name == "_spdx":
+ super().__setattr__(name, value)
+ return
+ raise KeyError("%r is not a valid SPDX property" % name)
+
+#
+# These are the SPDX objects implemented from the spec. The *only* properties
+# that can be added to these objects are ones directly specified in the SPDX
+# spec, however you may add helper functions to make operations easier.
+#
+# Defaults should *only* be specified if the SPDX spec says there is a certain
+# required value for a field (e.g. dataLicense), or if the field is mandatory
+# and has some sane "this field is unknown" (e.g. "NOASSERTION")
+#
+
+class SPDXAnnotation(SPDXObject):
+ annotationDate = _String()
+ annotationType = _String()
+ annotator = _String()
+ comment = _String()
+
+class SPDXChecksum(SPDXObject):
+ algorithm = _String()
+ checksumValue = _String()
+
+
+class SPDXRelationship(SPDXObject):
+ spdxElementId = _String()
+ relatedSpdxElement = _String()
+ relationshipType = _String()
+ comment = _String()
+ annotations = _ObjectList(SPDXAnnotation)
+
+
+class SPDXExternalReference(SPDXObject):
+ referenceCategory = _String()
+ referenceType = _String()
+ referenceLocator = _String()
+
+
+class SPDXPackageVerificationCode(SPDXObject):
+ packageVerificationCodeValue = _String()
+ packageVerificationCodeExcludedFiles = _StringList()
+
+
+class SPDXPackage(SPDXObject):
+ name = _String()
+ SPDXID = _String()
+ versionInfo = _String()
+ downloadLocation = _String(default="NOASSERTION")
+ packageSupplier = _String(default="NOASSERTION")
+ homepage = _String()
+ licenseConcluded = _String(default="NOASSERTION")
+ licenseDeclared = _String(default="NOASSERTION")
+ summary = _String()
+ description = _String()
+ sourceInfo = _String()
+ copyrightText = _String(default="NOASSERTION")
+ licenseInfoFromFiles = _StringList(default=["NOASSERTION"])
+ externalRefs = _ObjectList(SPDXExternalReference)
+ packageVerificationCode = _Object(SPDXPackageVerificationCode)
+ hasFiles = _StringList()
+ packageFileName = _String()
+ annotations = _ObjectList(SPDXAnnotation)
+
+
+class SPDXFile(SPDXObject):
+ SPDXID = _String()
+ fileName = _String()
+ licenseConcluded = _String(default="NOASSERTION")
+ copyrightText = _String(default="NOASSERTION")
+ licenseInfoInFiles = _StringList(default=["NOASSERTION"])
+ checksums = _ObjectList(SPDXChecksum)
+ fileTypes = _StringList()
+
+
+class SPDXCreationInfo(SPDXObject):
+ created = _String()
+ licenseListVersion = _String()
+ comment = _String()
+ creators = _StringList()
+
+
+class SPDXExternalDocumentRef(SPDXObject):
+ externalDocumentId = _String()
+ spdxDocument = _String()
+ checksum = _Object(SPDXChecksum)
+
+
+class SPDXExtractedLicensingInfo(SPDXObject):
+ name = _String()
+ comment = _String()
+ licenseId = _String()
+ extractedText = _String()
+
+
+class SPDXDocument(SPDXObject):
+ spdxVersion = _String(default="SPDX-" + SPDX_VERSION)
+ dataLicense = _String(default="CC0-1.0")
+ SPDXID = _String(default="SPDXRef-DOCUMENT")
+ name = _String()
+ documentNamespace = _String()
+ creationInfo = _Object(SPDXCreationInfo)
+ packages = _ObjectList(SPDXPackage)
+ files = _ObjectList(SPDXFile)
+ relationships = _ObjectList(SPDXRelationship)
+ externalDocumentRefs = _ObjectList(SPDXExternalDocumentRef)
+ hasExtractedLicensingInfos = _ObjectList(SPDXExtractedLicensingInfo)
+
+ def __init__(self, **d):
+ super().__init__(**d)
+
+ def to_json(self, f, *, sort_keys=False, indent=None, separators=None):
+ class Encoder(json.JSONEncoder):
+ def default(self, o):
+ if isinstance(o, SPDXObject):
+ return o.serializer()
+
+ return super().default(o)
+
+ sha1 = hashlib.sha1()
+ for chunk in Encoder(
+ sort_keys=sort_keys,
+ indent=indent,
+ separators=separators,
+ ).iterencode(self):
+ chunk = chunk.encode("utf-8")
+ f.write(chunk)
+ sha1.update(chunk)
+
+ return sha1.hexdigest()
+
+ @classmethod
+ def from_json(cls, f):
+ return cls(**json.load(f))
+
+ def add_relationship(self, _from, relationship, _to, *, comment=None, annotation=None):
+ if isinstance(_from, SPDXObject):
+ from_spdxid = _from.SPDXID
+ else:
+ from_spdxid = _from
+
+ if isinstance(_to, SPDXObject):
+ to_spdxid = _to.SPDXID
+ else:
+ to_spdxid = _to
+
+ r = SPDXRelationship(
+ spdxElementId=from_spdxid,
+ relatedSpdxElement=to_spdxid,
+ relationshipType=relationship,
+ )
+
+ if comment is not None:
+ r.comment = comment
+
+ if annotation is not None:
+ r.annotations.append(annotation)
+
+ self.relationships.append(r)
+
+ def find_by_spdxid(self, spdxid):
+ for o in itertools.chain(self.packages, self.files):
+ if o.SPDXID == spdxid:
+ return o
+ return None
+
+ def find_external_document_ref(self, namespace):
+ for r in self.externalDocumentRefs:
+ if r.spdxDocument == namespace:
+ return r
+ return None
diff --git a/meta/lib/oe/sstatesig.py b/meta/lib/oe/sstatesig.py
index 21ae0a7657..2cf858e201 100644
--- a/meta/lib/oe/sstatesig.py
+++ b/meta/lib/oe/sstatesig.py
@@ -59,7 +59,7 @@ def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCaches):
return False
# Kernel modules are well namespaced. We don't want to depend on the kernel's checksum
- # if we're just doing an RRECOMMENDS_xxx = "kernel-module-*", not least because the checksum
+ # if we're just doing an RRECOMMENDS:xxx = "kernel-module-*", not least because the checksum
# is machine specific.
# Therefore if we're not a kernel or a module recipe (inheriting the kernel classes)
# and we reccomend a kernel-module, we exclude the dependency.
@@ -108,7 +108,6 @@ class SignatureGeneratorOEBasicHashMixIn(object):
self.unlockedrecipes = (data.getVar("SIGGEN_UNLOCKED_RECIPES") or
"").split()
self.unlockedrecipes = { k: "" for k in self.unlockedrecipes }
- self.buildarch = data.getVar('BUILD_ARCH')
self._internal = False
pass
@@ -147,13 +146,6 @@ class SignatureGeneratorOEBasicHashMixIn(object):
self.dump_lockedsigs(sigfile)
return super(bb.siggen.SignatureGeneratorBasicHash, self).dump_sigs(dataCache, options)
- def prep_taskhash(self, tid, deps, dataCaches):
- super().prep_taskhash(tid, deps, dataCaches)
- if hasattr(self, "extramethod"):
- (mc, _, _, fn) = bb.runqueue.split_tid_mcfn(tid)
- inherits = " ".join(dataCaches[mc].inherits[fn])
- if inherits.find("/native.bbclass") != -1 or inherits.find("/cross.bbclass") != -1:
- self.extramethod[tid] = ":" + self.buildarch
def get_taskhash(self, tid, deps, dataCaches):
if tid in self.lockedhashes:
@@ -162,12 +154,7 @@ class SignatureGeneratorOEBasicHashMixIn(object):
else:
return super().get_taskhash(tid, deps, dataCaches)
- # get_taskhash will call get_unihash internally in the parent class, we
- # need to disable our filter of it whilst this runs else
- # incorrect hashes can be calculated.
- self._internal = True
h = super().get_taskhash(tid, deps, dataCaches)
- self._internal = False
(mc, _, task, fn) = bb.runqueue.split_tid_mcfn(tid)
@@ -251,15 +238,26 @@ class SignatureGeneratorOEBasicHashMixIn(object):
continue
f.write(" " + self.lockedpnmap[fn] + ":" + task + ":" + self.get_unihash(tid) + " \\\n")
f.write(' "\n')
- f.write('SIGGEN_LOCKEDSIGS_TYPES_%s = "%s"' % (self.machine, " ".join(l)))
+ f.write('SIGGEN_LOCKEDSIGS_TYPES:%s = "%s"' % (self.machine, " ".join(l)))
+
+ def dump_siglist(self, sigfile, path_prefix_strip=None):
+ def strip_fn(fn):
+ nonlocal path_prefix_strip
+ if not path_prefix_strip:
+ return fn
+
+ fn_exp = fn.split(":")
+ if fn_exp[-1].startswith(path_prefix_strip):
+ fn_exp[-1] = fn_exp[-1][len(path_prefix_strip):]
+
+ return ":".join(fn_exp)
- def dump_siglist(self, sigfile):
with open(sigfile, "w") as f:
tasks = []
for taskitem in self.taskhash:
(fn, task) = taskitem.rsplit(":", 1)
pn = self.lockedpnmap[fn]
- tasks.append((pn, task, fn, self.taskhash[taskitem]))
+ tasks.append((pn, task, strip_fn(fn), self.taskhash[taskitem]))
for (pn, task, fn, taskhash) in sorted(tasks):
f.write('%s:%s %s %s\n' % (pn, task, fn, taskhash))
@@ -439,13 +437,13 @@ def find_sstate_manifest(taskdata, taskdata2, taskname, d, multilibcache):
d2 = multilibcache[variant]
if taskdata.endswith("-native"):
- pkgarchs = ["${BUILD_ARCH}"]
+ pkgarchs = ["${BUILD_ARCH}", "${BUILD_ARCH}_${ORIGNATIVELSBSTRING}"]
elif taskdata.startswith("nativesdk-"):
pkgarchs = ["${SDK_ARCH}_${SDK_OS}", "allarch"]
elif "-cross-canadian" in taskdata:
pkgarchs = ["${SDK_ARCH}_${SDK_ARCH}-${SDKPKGSUFFIX}"]
elif "-cross-" in taskdata:
- pkgarchs = ["${BUILD_ARCH}_${TARGET_ARCH}"]
+ pkgarchs = ["${BUILD_ARCH}"]
elif "-crosssdk" in taskdata:
pkgarchs = ["${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"]
else:
@@ -458,7 +456,7 @@ def find_sstate_manifest(taskdata, taskdata2, taskname, d, multilibcache):
manifest = d2.expand("${SSTATE_MANIFESTS}/manifest-%s-%s.%s" % (pkgarch, taskdata, taskname))
if os.path.exists(manifest):
return manifest, d2
- bb.warn("Manifest %s not found in %s (variant '%s')?" % (manifest, d2.expand(" ".join(pkgarchs)), variant))
+ bb.fatal("Manifest %s not found in %s (variant '%s')?" % (manifest, d2.expand(" ".join(pkgarchs)), variant))
return None, d2
def OEOuthashBasic(path, sigfile, task, d):
@@ -472,6 +470,8 @@ def OEOuthashBasic(path, sigfile, task, d):
import stat
import pwd
import grp
+ import re
+ import fnmatch
def update_hash(s):
s = s.encode('utf-8')
@@ -481,15 +481,37 @@ def OEOuthashBasic(path, sigfile, task, d):
h = hashlib.sha256()
prev_dir = os.getcwd()
+ corebase = d.getVar("COREBASE")
+ tmpdir = d.getVar("TMPDIR")
include_owners = os.environ.get('PSEUDO_DISABLED') == '0'
- extra_content = d.getVar('HASHEQUIV_HASH_VERSION')
+ if "package_write_" in task or task == "package_qa":
+ include_owners = False
+ include_timestamps = False
+ include_root = True
+ if task == "package":
+ include_timestamps = True
+ include_root = False
+ hash_version = d.getVar('HASHEQUIV_HASH_VERSION')
+ extra_sigdata = d.getVar("HASHEQUIV_EXTRA_SIGDATA")
+
+ filemaps = {}
+ for m in (d.getVar('SSTATE_HASHEQUIV_FILEMAP') or '').split():
+ entry = m.split(":")
+ if len(entry) != 3 or entry[0] != task:
+ continue
+ filemaps.setdefault(entry[1], [])
+ filemaps[entry[1]].append(entry[2])
try:
os.chdir(path)
+ basepath = os.path.normpath(path)
update_hash("OEOuthashBasic\n")
- if extra_content:
- update_hash(extra_content + "\n")
+ if hash_version:
+ update_hash(hash_version + "\n")
+
+ if extra_sigdata:
+ update_hash(extra_sigdata + "\n")
# It is only currently useful to get equivalent hashes for things that
# can be restored from sstate. Since the sstate object is named using
@@ -534,27 +556,33 @@ def OEOuthashBasic(path, sigfile, task, d):
else:
add_perm(stat.S_IXUSR, 'x')
- add_perm(stat.S_IRGRP, 'r')
- add_perm(stat.S_IWGRP, 'w')
- if stat.S_ISGID & s.st_mode:
- add_perm(stat.S_IXGRP, 's', 'S')
- else:
- add_perm(stat.S_IXGRP, 'x')
+ if include_owners:
+ # Group/other permissions are only relevant in pseudo context
+ add_perm(stat.S_IRGRP, 'r')
+ add_perm(stat.S_IWGRP, 'w')
+ if stat.S_ISGID & s.st_mode:
+ add_perm(stat.S_IXGRP, 's', 'S')
+ else:
+ add_perm(stat.S_IXGRP, 'x')
- add_perm(stat.S_IROTH, 'r')
- add_perm(stat.S_IWOTH, 'w')
- if stat.S_ISVTX & s.st_mode:
- update_hash('t')
- else:
- add_perm(stat.S_IXOTH, 'x')
+ add_perm(stat.S_IROTH, 'r')
+ add_perm(stat.S_IWOTH, 'w')
+ if stat.S_ISVTX & s.st_mode:
+ update_hash('t')
+ else:
+ add_perm(stat.S_IXOTH, 'x')
- if include_owners:
try:
update_hash(" %10s" % pwd.getpwuid(s.st_uid).pw_name)
update_hash(" %10s" % grp.getgrgid(s.st_gid).gr_name)
- except KeyError:
+ except KeyError as e:
bb.warn("KeyError in %s" % path)
- raise
+ msg = ("KeyError: %s\nPath %s is owned by uid %d, gid %d, which doesn't match "
+ "any user/group on target. This may be due to host contamination." % (e, path, s.st_uid, s.st_gid))
+ raise Exception(msg).with_traceback(e.__traceback__)
+
+ if include_timestamps:
+ update_hash(" %10d" % s.st_mtime)
update_hash(" ")
if stat.S_ISBLK(s.st_mode) or stat.S_ISCHR(s.st_mode):
@@ -562,8 +590,13 @@ def OEOuthashBasic(path, sigfile, task, d):
else:
update_hash(" " * 9)
+ filterfile = False
+ for entry in filemaps:
+ if fnmatch.fnmatch(path, entry):
+ filterfile = True
+
update_hash(" ")
- if stat.S_ISREG(s.st_mode):
+ if stat.S_ISREG(s.st_mode) and not filterfile:
update_hash("%10d" % s.st_size)
else:
update_hash(" " * 10)
@@ -572,9 +605,24 @@ def OEOuthashBasic(path, sigfile, task, d):
fh = hashlib.sha256()
if stat.S_ISREG(s.st_mode):
# Hash file contents
- with open(path, 'rb') as d:
- for chunk in iter(lambda: d.read(4096), b""):
+ if filterfile:
+ # Need to ignore paths in crossscripts and postinst-useradd files.
+ with open(path, 'rb') as d:
+ chunk = d.read()
+ chunk = chunk.replace(bytes(basepath, encoding='utf8'), b'')
+ for entry in filemaps:
+ if not fnmatch.fnmatch(path, entry):
+ continue
+ for r in filemaps[entry]:
+ if r.startswith("regex-"):
+ chunk = re.sub(bytes(r[6:], encoding='utf8'), b'', chunk)
+ else:
+ chunk = chunk.replace(bytes(r, encoding='utf8'), b'')
fh.update(chunk)
+ else:
+ with open(path, 'rb') as d:
+ for chunk in iter(lambda: d.read(4096), b""):
+ fh.update(chunk)
update_hash(fh.hexdigest())
else:
update_hash(" " * len(fh.hexdigest()))
@@ -587,7 +635,8 @@ def OEOuthashBasic(path, sigfile, task, d):
update_hash("\n")
# Process this directory and all its child files
- process(root)
+ if include_root or root != ".":
+ process(root)
for f in files:
if f == 'fixmepath':
continue
diff --git a/meta/lib/oe/terminal.py b/meta/lib/oe/terminal.py
index eb10a6e33e..53186c4a3e 100644
--- a/meta/lib/oe/terminal.py
+++ b/meta/lib/oe/terminal.py
@@ -5,7 +5,6 @@ import logging
import oe.classutils
import shlex
from bb.process import Popen, ExecutionError
-from distutils.version import LooseVersion
logger = logging.getLogger('BitBake.OE.Terminal')
@@ -86,10 +85,10 @@ class Konsole(XTerminal):
def __init__(self, sh_cmd, title=None, env=None, d=None):
# Check version
vernum = check_terminal_version("konsole")
- if vernum and LooseVersion(vernum) < '2.0.0':
+ if vernum and bb.utils.vercmp_string_op(vernum, "2.0.0", "<"):
# Konsole from KDE 3.x
self.command = 'konsole -T "{title}" -e {command}'
- elif vernum and LooseVersion(vernum) < '16.08.1':
+ elif vernum and bb.utils.vercmp_string_op(vernum, "16.08.1", "<"):
# Konsole pre 16.08.01 Has nofork
self.command = 'konsole --nofork --workdir . -p tabtitle="{title}" -e {command}'
XTerminal.__init__(self, sh_cmd, title, env, d)
@@ -163,7 +162,12 @@ class Tmux(Terminal):
# devshells, if it's already there, add a new window to it.
window_name = 'devshell-%i' % os.getpid()
- self.command = 'tmux new -c "{{cwd}}" -d -s {0} -n {0} "{{command}}"'.format(window_name)
+ self.command = 'tmux new -c "{{cwd}}" -d -s {0} -n {0} "{{command}}"'
+ if not check_tmux_version('1.9'):
+ # `tmux new-session -c` was added in 1.9;
+ # older versions fail with that flag
+ self.command = 'tmux new -d -s {0} -n {0} "{{command}}"'
+ self.command = self.command.format(window_name)
Terminal.__init__(self, sh_cmd, title, env, d)
attach_cmd = 'tmux att -t {0}'.format(window_name)
@@ -185,7 +189,7 @@ class Custom(Terminal):
Terminal.__init__(self, sh_cmd, title, env, d)
logger.warning('Custom terminal was started.')
else:
- logger.debug(1, 'No custom terminal (OE_TERMINAL_CUSTOMCMD) set')
+ logger.debug('No custom terminal (OE_TERMINAL_CUSTOMCMD) set')
raise UnsupportedTerminal('OE_TERMINAL_CUSTOMCMD not set')
@@ -216,7 +220,7 @@ def spawn_preferred(sh_cmd, title=None, env=None, d=None):
def spawn(name, sh_cmd, title=None, env=None, d=None):
"""Spawn the specified terminal, by name"""
- logger.debug(1, 'Attempting to spawn terminal "%s"', name)
+ logger.debug('Attempting to spawn terminal "%s"', name)
try:
terminal = Registry.registry[name]
except KeyError:
@@ -253,13 +257,18 @@ def spawn(name, sh_cmd, title=None, env=None, d=None):
except OSError:
return
+def check_tmux_version(desired):
+ vernum = check_terminal_version("tmux")
+ if vernum and bb.utils.vercmp_string_op(vernum, desired, "<"):
+ return False
+ return vernum
+
def check_tmux_pane_size(tmux):
import subprocess as sub
# On older tmux versions (<1.9), return false. The reason
# is that there is no easy way to get the height of the active panel
# on current window without nested formats (available from version 1.9)
- vernum = check_terminal_version("tmux")
- if vernum and LooseVersion(vernum) < '1.9':
+ if not check_tmux_version('1.9'):
return False
try:
p = sub.Popen('%s list-panes -F "#{?pane_active,#{pane_height},}"' % tmux,
diff --git a/meta/lib/oe/useradd.py b/meta/lib/oe/useradd.py
index 8fc77568ff..3caa3f851a 100644
--- a/meta/lib/oe/useradd.py
+++ b/meta/lib/oe/useradd.py
@@ -45,7 +45,6 @@ def build_useradd_parser():
parser.add_argument("-N", "--no-user-group", dest="user_group", help="do not create a group with the same name as the user", action="store_const", const=False)
parser.add_argument("-o", "--non-unique", help="allow to create users with duplicate (non-unique UID)", action="store_true")
parser.add_argument("-p", "--password", metavar="PASSWORD", help="encrypted password of the new account")
- parser.add_argument("-P", "--clear-password", metavar="CLEAR_PASSWORD", help="use this clear password for the new account")
parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into")
parser.add_argument("-r", "--system", help="create a system account", action="store_true")
parser.add_argument("-s", "--shell", metavar="SHELL", help="login shell of the new account")
@@ -63,7 +62,6 @@ def build_groupadd_parser():
parser.add_argument("-K", "--key", metavar="KEY=VALUE", help="override /etc/login.defs defaults")
parser.add_argument("-o", "--non-unique", help="allow to create groups with duplicate (non-unique) GID", action="store_true")
parser.add_argument("-p", "--password", metavar="PASSWORD", help="use this encrypted password for the new group")
- parser.add_argument("-P", "--clear-password", metavar="CLEAR_PASSWORD", help="use this clear password for the new group")
parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into")
parser.add_argument("-r", "--system", help="create a system account", action="store_true")
parser.add_argument("GROUP", help="Group name of the new group")
diff --git a/meta/lib/oe/utils.py b/meta/lib/oe/utils.py
index 13f4271da0..46fc76c261 100644
--- a/meta/lib/oe/utils.py
+++ b/meta/lib/oe/utils.py
@@ -193,7 +193,7 @@ def parallel_make(d, makeinst=False):
return int(v)
- return None
+ return ''
def parallel_make_argument(d, fmt, limit=None, makeinst=False):
"""
@@ -221,12 +221,12 @@ def packages_filter_out_system(d):
PN-dbg PN-doc PN-locale-eb-gb removed.
"""
pn = d.getVar('PN')
- blacklist = [pn + suffix for suffix in ('', '-dbg', '-dev', '-doc', '-locale', '-staticdev', '-src')]
+ pkgfilter = [pn + suffix for suffix in ('', '-dbg', '-dev', '-doc', '-locale', '-staticdev', '-src')]
localepkg = pn + "-locale-"
pkgs = []
for pkg in d.getVar('PACKAGES').split():
- if pkg not in blacklist and localepkg not in pkg:
+ if pkg not in pkgfilter and localepkg not in pkg:
pkgs.append(pkg)
return pkgs
@@ -248,10 +248,9 @@ def trim_version(version, num_parts=2):
trimmed = ".".join(parts[:num_parts])
return trimmed
-def cpu_count(at_least=1):
- import multiprocessing
- cpus = multiprocessing.cpu_count()
- return max(cpus, at_least)
+def cpu_count(at_least=1, at_most=64):
+ cpus = len(os.sched_getaffinity(0))
+ return max(min(cpus, at_most), at_least)
def execute_pre_post_process(d, cmds):
if cmds is None:
@@ -345,7 +344,29 @@ def squashspaces(string):
import re
return re.sub(r"\s+", " ", string).strip()
-def format_pkg_list(pkg_dict, ret_format=None):
+def rprovides_map(pkgdata_dir, pkg_dict):
+ # Map file -> pkg provider
+ rprov_map = {}
+
+ for pkg in pkg_dict:
+ path_to_pkgfile = os.path.join(pkgdata_dir, 'runtime-reverse', pkg)
+ if not os.path.isfile(path_to_pkgfile):
+ continue
+ with open(path_to_pkgfile) as f:
+ for line in f:
+ if line.startswith('RPROVIDES') or line.startswith('FILERPROVIDES'):
+ # List all components provided by pkg.
+ # Exclude version strings, i.e. those starting with (
+ provides = [x for x in line.split()[1:] if not x.startswith('(')]
+ for prov in provides:
+ if prov in rprov_map:
+ rprov_map[prov].append(pkg)
+ else:
+ rprov_map[prov] = [pkg]
+
+ return rprov_map
+
+def format_pkg_list(pkg_dict, ret_format=None, pkgdata_dir=None):
output = []
if ret_format == "arch":
@@ -358,9 +379,15 @@ def format_pkg_list(pkg_dict, ret_format=None):
for pkg in sorted(pkg_dict):
output.append("%s %s %s" % (pkg, pkg_dict[pkg]["arch"], pkg_dict[pkg]["ver"]))
elif ret_format == "deps":
+ rprov_map = rprovides_map(pkgdata_dir, pkg_dict)
for pkg in sorted(pkg_dict):
for dep in pkg_dict[pkg]["deps"]:
- output.append("%s|%s" % (pkg, dep))
+ if dep in rprov_map:
+ # There could be multiple providers within the image
+ for pkg_provider in rprov_map[dep]:
+ output.append("%s|%s * %s [RPROVIDES]" % (pkg, pkg_provider, dep))
+ else:
+ output.append("%s|%s" % (pkg, dep))
else:
for pkg in sorted(pkg_dict):
output.append(pkg)
@@ -456,8 +483,8 @@ from threading import Thread
class ThreadedWorker(Thread):
"""Thread executing tasks from a given tasks queue"""
- def __init__(self, tasks, worker_init, worker_end):
- Thread.__init__(self)
+ def __init__(self, tasks, worker_init, worker_end, name=None):
+ Thread.__init__(self, name=name)
self.tasks = tasks
self.daemon = True
@@ -481,19 +508,19 @@ class ThreadedWorker(Thread):
try:
func(self, *args, **kargs)
except Exception as e:
- print(e)
+ # Eat all exceptions
+ bb.mainlogger.debug("Worker task raised %s" % e, exc_info=e)
finally:
self.tasks.task_done()
class ThreadedPool:
"""Pool of threads consuming tasks from a queue"""
- def __init__(self, num_workers, num_tasks, worker_init=None,
- worker_end=None):
+ def __init__(self, num_workers, num_tasks, worker_init=None, worker_end=None, name="ThreadedPool-"):
self.tasks = Queue(num_tasks)
self.workers = []
- for _ in range(num_workers):
- worker = ThreadedWorker(self.tasks, worker_init, worker_end)
+ for i in range(num_workers):
+ worker = ThreadedWorker(self.tasks, worker_init, worker_end, name=name + str(i))
self.workers.append(worker)
def start(self):
@@ -510,17 +537,6 @@ class ThreadedPool:
for worker in self.workers:
worker.join()
-def write_ld_so_conf(d):
- # Some utils like prelink may not have the correct target library paths
- # so write an ld.so.conf to help them
- ldsoconf = d.expand("${STAGING_DIR_TARGET}${sysconfdir}/ld.so.conf")
- if os.path.exists(ldsoconf):
- bb.utils.remove(ldsoconf)
- bb.utils.mkdirhier(os.path.dirname(ldsoconf))
- with open(ldsoconf, "w") as f:
- f.write(d.getVar("base_libdir") + '\n')
- f.write(d.getVar("libdir") + '\n')
-
class ImageQAFailed(Exception):
def __init__(self, description, name=None, logfile=None):
self.description = description
@@ -537,3 +553,34 @@ class ImageQAFailed(Exception):
def sh_quote(string):
import shlex
return shlex.quote(string)
+
+def directory_size(root, blocksize=4096):
+ """
+ Calculate the size of the directory, taking into account hard links,
+ rounding up every size to multiples of the blocksize.
+ """
+ def roundup(size):
+ """
+ Round the size up to the nearest multiple of the block size.
+ """
+ import math
+ return math.ceil(size / blocksize) * blocksize
+
+ def getsize(filename):
+ """
+ Get the size of the filename, not following symlinks, taking into
+ account hard links.
+ """
+ stat = os.lstat(filename)
+ if stat.st_ino not in inodes:
+ inodes.add(stat.st_ino)
+ return stat.st_size
+ else:
+ return 0
+
+ inodes = set()
+ total = 0
+ for root, dirs, files in os.walk(root):
+ total += sum(roundup(getsize(os.path.join(root, name))) for name in files)
+ total += roundup(getsize(root))
+ return total
diff --git a/meta/lib/oeqa/controllers/masterimage.py b/meta/lib/oeqa/controllers/controllerimage.py
index 0bf5917e48..78a4aaff87 100644
--- a/meta/lib/oeqa/controllers/masterimage.py
+++ b/meta/lib/oeqa/controllers/controllerimage.py
@@ -3,13 +3,13 @@
# SPDX-License-Identifier: MIT
#
# This module adds support to testimage.bbclass to deploy images and run
-# tests using a "master image" - this is a "known good" image that is
+# tests using a "controller image" - this is a "known good" image that is
# installed onto the device as part of initial setup and will be booted into
# with no interaction; we can then use it to deploy the image to be tested
# to a second partition before running the tests.
#
-# For an example master image, see core-image-testmaster
-# (meta/recipes-extended/images/core-image-testmaster.bb)
+# For an example controller image, see core-image-testcontroller
+# (meta/recipes-extended/images/core-image-testcontroller.bb)
import os
import bb
@@ -24,12 +24,12 @@ from oeqa.utils import CommandError
from abc import ABCMeta, abstractmethod
-class MasterImageHardwareTarget(oeqa.targetcontrol.BaseTarget, metaclass=ABCMeta):
+class ControllerImageHardwareTarget(oeqa.targetcontrol.BaseTarget, metaclass=ABCMeta):
supported_image_fstypes = ['tar.gz', 'tar.bz2']
def __init__(self, d):
- super(MasterImageHardwareTarget, self).__init__(d)
+ super(ControllerImageHardwareTarget, self).__init__(d)
# target ip
addr = d.getVar("TEST_TARGET_IP") or bb.fatal('Please set TEST_TARGET_IP with the IP address of the machine you want to run the tests on.')
@@ -61,8 +61,8 @@ class MasterImageHardwareTarget(oeqa.targetcontrol.BaseTarget, metaclass=ABCMeta
if not os.path.isfile(self.kernel):
bb.fatal("No kernel found. Expected path: %s" % self.kernel)
- # master ssh connection
- self.master = None
+ # controller ssh connection
+ self.controller = None
# if the user knows what they are doing, then by all means...
self.user_cmds = d.getVar("TEST_DEPLOY_CMDS")
self.deploy_cmds = None
@@ -119,19 +119,19 @@ class MasterImageHardwareTarget(oeqa.targetcontrol.BaseTarget, metaclass=ABCMeta
def deploy(self):
# base class just sets the ssh log file for us
- super(MasterImageHardwareTarget, self).deploy()
- self.master = sshcontrol.SSHControl(ip=self.ip, logfile=self.sshlog, timeout=600, port=self.port)
- status, output = self.master.run("cat /etc/masterimage")
+ super(ControllerImageHardwareTarget, self).deploy()
+ self.controller = sshcontrol.SSHControl(ip=self.ip, logfile=self.sshlog, timeout=600, port=self.port)
+ status, output = self.controller.run("cat /etc/controllerimage")
if status != 0:
- # We're not booted into the master image, so try rebooting
- bb.plain("%s - booting into the master image" % self.pn)
+ # We're not booted into the controller image, so try rebooting
+ bb.plain("%s - booting into the controller image" % self.pn)
self.power_ctl("cycle")
self._wait_until_booted()
bb.plain("%s - deploying image on target" % self.pn)
- status, output = self.master.run("cat /etc/masterimage")
+ status, output = self.controller.run("cat /etc/controllerimage")
if status != 0:
- bb.fatal("No ssh connectivity or target isn't running a master image.\n%s" % output)
+ bb.fatal("No ssh connectivity or target isn't running a controller image.\n%s" % output)
if self.user_cmds:
self.deploy_cmds = self.user_cmds.split("\n")
try:
@@ -156,10 +156,10 @@ class MasterImageHardwareTarget(oeqa.targetcontrol.BaseTarget, metaclass=ABCMeta
def stop(self):
bb.plain("%s - reboot/powercycle target" % self.pn)
- self.power_cycle(self.master)
+ self.power_cycle(self.controller)
-class SystemdbootTarget(MasterImageHardwareTarget):
+class SystemdbootTarget(ControllerImageHardwareTarget):
def __init__(self, d):
super(SystemdbootTarget, self).__init__(d)
@@ -184,16 +184,16 @@ class SystemdbootTarget(MasterImageHardwareTarget):
def _deploy(self):
# make sure these aren't mounted
- self.master.run("umount /boot; umount /mnt/testrootfs; umount /sys/firmware/efi/efivars;")
+ self.controller.run("umount /boot; umount /mnt/testrootfs; umount /sys/firmware/efi/efivars;")
# from now on, every deploy cmd should return 0
# else an exception will be thrown by sshcontrol
- self.master.ignore_status = False
- self.master.copy_to(self.rootfs, "~/test-rootfs." + self.image_fstype)
- self.master.copy_to(self.kernel, "~/test-kernel")
+ self.controller.ignore_status = False
+ self.controller.copy_to(self.rootfs, "~/test-rootfs." + self.image_fstype)
+ self.controller.copy_to(self.kernel, "~/test-kernel")
for cmd in self.deploy_cmds:
- self.master.run(cmd)
+ self.controller.run(cmd)
def _start(self, params=None):
- self.power_cycle(self.master)
+ self.power_cycle(self.controller)
# there are better ways than a timeout but this should work for now
time.sleep(120)
diff --git a/meta/lib/oeqa/core/case.py b/meta/lib/oeqa/core/case.py
index aae451fef2..bc4446a938 100644
--- a/meta/lib/oeqa/core/case.py
+++ b/meta/lib/oeqa/core/case.py
@@ -43,8 +43,13 @@ class OETestCase(unittest.TestCase):
clss.tearDownClassMethod()
def _oeSetUp(self):
- for d in self.decorators:
- d.setUpDecorator()
+ try:
+ for d in self.decorators:
+ d.setUpDecorator()
+ except:
+ for d in self.decorators:
+ d.tearDownDecorator()
+ raise
self.setUpMethod()
def _oeTearDown(self):
diff --git a/meta/lib/oeqa/core/context.py b/meta/lib/oeqa/core/context.py
index 7d3fa3b84a..2abe353d27 100644
--- a/meta/lib/oeqa/core/context.py
+++ b/meta/lib/oeqa/core/context.py
@@ -31,6 +31,9 @@ class OETestContext(object):
self._registry = {}
self._registry['cases'] = collections.OrderedDict()
+ self.results = unittest.TestResult()
+ unittest.registerResult(self.results)
+
def _read_modules_from_manifest(self, manifest):
if not os.path.exists(manifest):
raise OEQAMissingManifest("Manifest does not exist on %s" % manifest)
@@ -82,6 +85,7 @@ class OETestContext(object):
self.skipTests(skips)
self._run_start_time = time.time()
+ self._run_end_time = self._run_start_time
if not processes:
self.runner.buffer = True
result = self.runner.run(self.prepareSuite(self.suites, processes))
diff --git a/meta/lib/oeqa/core/decorator/oetimeout.py b/meta/lib/oeqa/core/decorator/oetimeout.py
index df90d1c798..5e6873ad48 100644
--- a/meta/lib/oeqa/core/decorator/oetimeout.py
+++ b/meta/lib/oeqa/core/decorator/oetimeout.py
@@ -24,5 +24,6 @@ class OETimeout(OETestDecorator):
def tearDownDecorator(self):
signal.alarm(0)
- signal.signal(signal.SIGALRM, self.alarmSignal)
- self.logger.debug("Removed SIGALRM handler")
+ if hasattr(self, 'alarmSignal'):
+ signal.signal(signal.SIGALRM, self.alarmSignal)
+ self.logger.debug("Removed SIGALRM handler")
diff --git a/meta/lib/oeqa/core/runner.py b/meta/lib/oeqa/core/runner.py
index 00b7d0bb12..d50690ab37 100644
--- a/meta/lib/oeqa/core/runner.py
+++ b/meta/lib/oeqa/core/runner.py
@@ -195,6 +195,20 @@ class OETestResult(_TestResult):
report['log'] = log
if duration:
report['duration'] = duration
+
+ alltags = []
+ # pull tags from the case class
+ if hasattr(case, "__oeqa_testtags"):
+ alltags.extend(getattr(case, "__oeqa_testtags"))
+ # pull tags from the method itself
+ test_name = case._testMethodName
+ if hasattr(case, test_name):
+ method = getattr(case, test_name)
+ if hasattr(method, "__oeqa_testtags"):
+ alltags.extend(getattr(method, "__oeqa_testtags"))
+ if alltags:
+ report['oetags'] = alltags
+
if dump_streams and case.id() in self.logged_output:
(stdout, stderr) = self.logged_output[case.id()]
report['stdout'] = stdout
diff --git a/meta/lib/oeqa/core/target/qemu.py b/meta/lib/oeqa/core/target/qemu.py
index 295e8765e9..79fd724f7d 100644
--- a/meta/lib/oeqa/core/target/qemu.py
+++ b/meta/lib/oeqa/core/target/qemu.py
@@ -8,10 +8,14 @@ import os
import sys
import signal
import time
+import glob
+import subprocess
from collections import defaultdict
from .ssh import OESSHTarget
from oeqa.utils.qemurunner import QemuRunner
+from oeqa.utils.dump import MonitorDumper
+from oeqa.utils.dump import TargetDumper
supported_fstypes = ['ext3', 'ext4', 'cpio.gz', 'wic']
@@ -20,7 +24,7 @@ class OEQemuTarget(OESSHTarget):
port=None, machine='', rootfs='', kernel='', kvm=False, slirp=False,
dump_dir='', dump_host_cmds='', display='', bootlog='',
tmpdir='', dir_image='', boottime=60, serial_ports=2,
- boot_patterns = defaultdict(str), ovmf=False, **kwargs):
+ boot_patterns = defaultdict(str), ovmf=False, tmpfsdir=None, **kwargs):
super(OEQemuTarget, self).__init__(logger, None, server_ip, timeout,
user, port)
@@ -34,6 +38,8 @@ class OEQemuTarget(OESSHTarget):
self.ovmf = ovmf
self.use_slirp = slirp
self.boot_patterns = boot_patterns
+ self.dump_dir = dump_dir
+ self.bootlog = bootlog
self.runner = QemuRunner(machine=machine, rootfs=rootfs, tmpdir=tmpdir,
deploy_dir_image=dir_image, display=display,
@@ -41,7 +47,15 @@ class OEQemuTarget(OESSHTarget):
use_kvm=kvm, use_slirp=slirp, dump_dir=dump_dir,
dump_host_cmds=dump_host_cmds, logger=logger,
serial_ports=serial_ports, boot_patterns = boot_patterns,
- use_ovmf=ovmf)
+ use_ovmf=ovmf, tmpfsdir=tmpfsdir)
+ dump_monitor_cmds = kwargs.get("testimage_dump_monitor")
+ self.monitor_dumper = MonitorDumper(dump_monitor_cmds, dump_dir, self.runner)
+ if self.monitor_dumper:
+ self.monitor_dumper.create_dir("qmp")
+
+ dump_target_cmds = kwargs.get("testimage_dump_target")
+ self.target_dumper = TargetDumper(dump_target_cmds, dump_dir, self.runner)
+ self.target_dumper.create_dir("qemu")
def start(self, params=None, extra_bootparams=None, runqemuparams=''):
if self.use_slirp and not self.server_ip:
@@ -64,7 +78,28 @@ class OEQemuTarget(OESSHTarget):
self.server_ip = self.runner.server_ip
else:
self.stop()
- raise RuntimeError("FAILED to start qemu - check the task log and the boot log")
+ # Display the first 20 lines of top and
+ # last 20 lines of the bootlog when the
+ # target is not being booted up.
+ topfile = glob.glob(self.dump_dir + "/*_qemu/host_*_top")
+ msg = "\n\n===== start: snippet =====\n\n"
+ for f in topfile:
+ msg += "file: %s\n\n" % f
+ with open(f) as tf:
+ for x in range(20):
+ msg += next(tf)
+ msg += "\n\n===== end: snippet =====\n\n"
+ blcmd = ["tail", "-20", self.bootlog]
+ msg += "===== start: snippet =====\n\n"
+ try:
+ out = subprocess.check_output(blcmd, stderr=subprocess.STDOUT, timeout=1).decode('utf-8')
+ msg += "file: %s\n\n" % self.bootlog
+ msg += out
+ except (subprocess.CalledProcessError, subprocess.TimeoutExpired, FileNotFoundError) as err:
+ msg += "Error running command: %s\n%s\n" % (blcmd, err)
+ msg += "\n\n===== end: snippet =====\n"
+
+ raise RuntimeError("FAILED to start qemu - check the task log and the boot log %s" % (msg))
def stop(self):
self.runner.stop()
diff --git a/meta/lib/oeqa/core/target/ssh.py b/meta/lib/oeqa/core/target/ssh.py
index 090b40a814..f956a7744f 100644
--- a/meta/lib/oeqa/core/target/ssh.py
+++ b/meta/lib/oeqa/core/target/ssh.py
@@ -43,6 +43,8 @@ class OESSHTarget(OETarget):
if port:
self.ssh = self.ssh + [ '-p', port ]
self.scp = self.scp + [ '-P', port ]
+ self._monitor_dumper = None
+ self.target_dumper = None
def start(self, **kwargs):
pass
@@ -50,6 +52,15 @@ class OESSHTarget(OETarget):
def stop(self, **kwargs):
pass
+ @property
+ def monitor_dumper(self):
+ return self._monitor_dumper
+
+ @monitor_dumper.setter
+ def monitor_dumper(self, dumper):
+ self._monitor_dumper = dumper
+ self.monitor_dumper.dump_monitor()
+
def _run(self, command, timeout=None, ignore_status=True):
"""
Runs command in target using SSHProcess.
@@ -87,7 +98,15 @@ class OESSHTarget(OETarget):
processTimeout = self.timeout
status, output = self._run(sshCmd, processTimeout, True)
- self.logger.debug('Command: %s\nOutput: %s\n' % (command, output))
+ self.logger.debug('Command: %s\nStatus: %d Output: %s\n' % (command, status, output))
+ if (status == 255) and (('No route to host') in output):
+ if self.monitor_dumper:
+ self.monitor_dumper.dump_monitor()
+ if status == 255:
+ if self.target_dumper:
+ self.target_dumper.dump_target()
+ if self.monitor_dumper:
+ self.monitor_dumper.dump_monitor()
return (status, output)
def copyTo(self, localSrc, remoteDst):
@@ -107,13 +126,16 @@ class OESSHTarget(OETarget):
scpCmd = self.scp + [localSrc, remotePath]
return self._run(scpCmd, ignore_status=False)
- def copyFrom(self, remoteSrc, localDst):
+ def copyFrom(self, remoteSrc, localDst, warn_on_failure=False):
"""
Copy file from target.
"""
remotePath = '%s@%s:%s' % (self.user, self.ip, remoteSrc)
scpCmd = self.scp + [remotePath, localDst]
- return self._run(scpCmd, ignore_status=False)
+ (status, output) = self._run(scpCmd, ignore_status=warn_on_failure)
+ if warn_on_failure and status:
+ self.logger.warning("Copy returned non-zero exit status %d:\n%s" % (status, output))
+ return (status, output)
def copyDirTo(self, localSrc, remoteDst):
"""
diff --git a/meta/lib/oeqa/core/tests/cases/timeout.py b/meta/lib/oeqa/core/tests/cases/timeout.py
index 5dfecc7b7c..69cf969a67 100644
--- a/meta/lib/oeqa/core/tests/cases/timeout.py
+++ b/meta/lib/oeqa/core/tests/cases/timeout.py
@@ -8,6 +8,7 @@ from time import sleep
from oeqa.core.case import OETestCase
from oeqa.core.decorator.oetimeout import OETimeout
+from oeqa.core.decorator.depends import OETestDepends
class TimeoutTest(OETestCase):
@@ -19,3 +20,15 @@ class TimeoutTest(OETestCase):
def testTimeoutFail(self):
sleep(2)
self.assertTrue(True, msg='How is this possible?')
+
+
+ def testTimeoutSkip(self):
+ self.skipTest("This test needs to be skipped, so that testTimeoutDepends()'s OETestDepends kicks in")
+
+ @OETestDepends(["timeout.TimeoutTest.testTimeoutSkip"])
+ @OETimeout(3)
+ def testTimeoutDepends(self):
+ self.assertTrue(False, msg='How is this possible?')
+
+ def testTimeoutUnrelated(self):
+ sleep(6)
diff --git a/meta/lib/oeqa/core/tests/test_data.py b/meta/lib/oeqa/core/tests/test_data.py
index ac74098b78..acd726f3a0 100755
--- a/meta/lib/oeqa/core/tests/test_data.py
+++ b/meta/lib/oeqa/core/tests/test_data.py
@@ -33,7 +33,7 @@ class TestData(TestBase):
def test_data_fail_wrong_variable(self):
expectedError = 'AssertionError'
- d = {'IMAGE' : 'core-image-sato', 'ARCH' : 'arm'}
+ d = {'IMAGE' : 'core-image-weston', 'ARCH' : 'arm'}
tc = self._testLoader(d=d, modules=self.modules)
results = tc.runTests()
diff --git a/meta/lib/oeqa/core/tests/test_decorators.py b/meta/lib/oeqa/core/tests/test_decorators.py
index b798bf7d33..5095f39948 100755
--- a/meta/lib/oeqa/core/tests/test_decorators.py
+++ b/meta/lib/oeqa/core/tests/test_decorators.py
@@ -133,5 +133,11 @@ class TestTimeoutDecorator(TestBase):
msg = "OETestTimeout didn't restore SIGALRM"
self.assertIs(alarm_signal, signal.getsignal(signal.SIGALRM), msg=msg)
+ def test_timeout_cancel(self):
+ tests = ['timeout.TimeoutTest.testTimeoutSkip', 'timeout.TimeoutTest.testTimeoutDepends', 'timeout.TimeoutTest.testTimeoutUnrelated']
+ msg = 'Unrelated test failed to complete'
+ tc = self._testLoader(modules=self.modules, tests=tests)
+ self.assertTrue(tc.runTests().wasSuccessful(), msg=msg)
+
if __name__ == '__main__':
unittest.main()
diff --git a/meta/lib/oeqa/core/utils/concurrencytest.py b/meta/lib/oeqa/core/utils/concurrencytest.py
index b2eb68fb02..161a2f6e90 100644
--- a/meta/lib/oeqa/core/utils/concurrencytest.py
+++ b/meta/lib/oeqa/core/utils/concurrencytest.py
@@ -48,11 +48,15 @@ _all__ = [
#
class BBThreadsafeForwardingResult(ThreadsafeForwardingResult):
- def __init__(self, target, semaphore, threadnum, totalinprocess, totaltests):
+ def __init__(self, target, semaphore, threadnum, totalinprocess, totaltests, output, finalresult):
super(BBThreadsafeForwardingResult, self).__init__(target, semaphore)
self.threadnum = threadnum
self.totalinprocess = totalinprocess
self.totaltests = totaltests
+ self.buffer = True
+ self.outputbuf = output
+ self.finalresult = finalresult
+ self.finalresult.buffer = True
def _add_result_with_semaphore(self, method, test, *args, **kwargs):
self.semaphore.acquire()
@@ -71,6 +75,8 @@ class BBThreadsafeForwardingResult(ThreadsafeForwardingResult):
test.id())
finally:
self.semaphore.release()
+ self.finalresult._stderr_buffer = io.StringIO(initial_value=self.outputbuf.getvalue().decode("utf-8"))
+ self.finalresult._stdout_buffer = io.StringIO()
super(BBThreadsafeForwardingResult, self)._add_result_with_semaphore(method, test, *args, **kwargs)
class ProxyTestResult:
@@ -190,28 +196,20 @@ class ConcurrentTestSuite(unittest.TestSuite):
self.removefunc = removefunc
def run(self, result):
- tests, totaltests = fork_for_tests(self.processes, self)
+ testservers, totaltests = fork_for_tests(self.processes, self)
try:
threads = {}
queue = Queue()
semaphore = threading.Semaphore(1)
result.threadprogress = {}
- for i, (test, testnum) in enumerate(tests):
+ for i, (testserver, testnum, output) in enumerate(testservers):
result.threadprogress[i] = []
process_result = BBThreadsafeForwardingResult(
ExtraResultsDecoderTestResult(result),
- semaphore, i, testnum, totaltests)
- # Force buffering of stdout/stderr so the console doesn't get corrupted by test output
- # as per default in parent code
- process_result.buffer = True
- # We have to add a buffer object to stdout to keep subunit happy
- process_result._stderr_buffer = io.StringIO()
- process_result._stderr_buffer.buffer = dummybuf(process_result._stderr_buffer)
- process_result._stdout_buffer = io.StringIO()
- process_result._stdout_buffer.buffer = dummybuf(process_result._stdout_buffer)
+ semaphore, i, testnum, totaltests, output, result)
reader_thread = threading.Thread(
- target=self._run_test, args=(test, process_result, queue))
- threads[test] = reader_thread, process_result
+ target=self._run_test, args=(testserver, process_result, queue))
+ threads[testserver] = reader_thread, process_result
reader_thread.start()
while threads:
finished_test = queue.get()
@@ -222,13 +220,13 @@ class ConcurrentTestSuite(unittest.TestSuite):
process_result.stop()
raise
finally:
- for test in tests:
- test[0]._stream.close()
+ for testserver in testservers:
+ testserver[0]._stream.close()
- def _run_test(self, test, process_result, queue):
+ def _run_test(self, testserver, process_result, queue):
try:
try:
- test.run(process_result)
+ testserver.run(process_result)
except Exception:
# The run logic itself failed
case = testtools.ErrorHolder(
@@ -236,10 +234,10 @@ class ConcurrentTestSuite(unittest.TestSuite):
error=sys.exc_info())
case.run(process_result)
finally:
- queue.put(test)
+ queue.put(testserver)
def fork_for_tests(concurrency_num, suite):
- result = []
+ testservers = []
if 'BUILDDIR' in os.environ:
selftestdir = get_test_layer()
@@ -273,10 +271,11 @@ def fork_for_tests(concurrency_num, suite):
newsi = os.open(os.devnull, os.O_RDWR)
os.dup2(newsi, sys.stdin.fileno())
+ # Send stdout/stderr over the stream
+ os.dup2(c2pwrite, sys.stdout.fileno())
+ os.dup2(c2pwrite, sys.stderr.fileno())
+
subunit_client = TestProtocolClient(stream)
- # Force buffering of stdout/stderr so the console doesn't get corrupted by test output
- # as per default in parent code
- subunit_client.buffer = True
subunit_result = AutoTimingTestResultDecorator(subunit_client)
unittest_result = process_suite.run(ExtraResultsEncoderTestResult(subunit_result))
if ourpid != os.getpid():
@@ -306,9 +305,11 @@ def fork_for_tests(concurrency_num, suite):
else:
os.close(c2pwrite)
stream = os.fdopen(c2pread, 'rb', 1)
- test = ProtocolTestCase(stream)
- result.append((test, numtests))
- return result, totaltests
+ # Collect stdout/stderr into an io buffer
+ output = io.BytesIO()
+ testserver = ProtocolTestCase(stream, passthrough=output)
+ testservers.append((testserver, numtests, output))
+ return testservers, totaltests
def partition_tests(suite, count):
# Keep tests from the same class together but allow tests from modules
diff --git a/meta/lib/oeqa/files/testresults/testresults.json b/meta/lib/oeqa/files/testresults/testresults.json
index 1a62155618..86e5e412af 100644
--- a/meta/lib/oeqa/files/testresults/testresults.json
+++ b/meta/lib/oeqa/files/testresults/testresults.json
@@ -1,5 +1,5 @@
{
- "runtime_core-image-minimal_qemuarm_20181225195701": {
+ "runtime_core-image-minimal:qemuarm_20181225195701": {
"configuration": {
"DISTRO": "poky",
"HOST_DISTRO": "ubuntu-16.04",
diff --git a/meta/lib/oeqa/manual/bsp-hw.json b/meta/lib/oeqa/manual/bsp-hw.json
index a9bc7d4501..308a0807f3 100644
--- a/meta/lib/oeqa/manual/bsp-hw.json
+++ b/meta/lib/oeqa/manual/bsp-hw.json
@@ -26,7 +26,7 @@
"expected_results": ""
},
"5": {
- "action": "Remove USB, and reboot into new installed system. \nNote: If installation was successfully completed and received this message \"\"(sdx): Volume was not properly unmounted...Please run fsck.\"\" ignore it because this was whitelisted according to bug 9652.",
+ "action": "Remove USB, and reboot into new installed system. \nNote: If installation was successfully completed and received this message \"\"(sdx): Volume was not properly unmounted...Please run fsck.\"\" ignore it because this was allowed according to bug 9652.",
"expected_results": ""
}
},
@@ -61,92 +61,6 @@
},
{
"test": {
- "@alias": "bsps-hw.bsps-hw.boot_from_runlevel_3",
- "author": [
- {
- "email": "alexandru.c.georgescu@intel.com",
- "name": "alexandru.c.georgescu@intel.com"
- }
- ],
- "execution": {
- "1": {
- "action": "Boot into system and edit /etc/inittab to make sure that system enter at the run level 3 by default, this is done by changing the line \n\n\nid:5:initdefault \n\nto \n\nid:3:initdefault \n\n",
- "expected_results": ""
- },
- "2": {
- "action": "Reboot system, and press \"Tab\" to enter \"grub\"",
- "expected_results": ""
- },
- "3": {
- "action": "Get into the \"kernel\" line with the edit option \"e\" and add \"psplash=false text\" at the end line.",
- "expected_results": ""
- },
- "4": {
- "action": "Press \"F10\" or \"ctrl+x\" to boot system",
- "expected_results": ""
- },
- "5": {
- "action": "If system ask you for a login type \"root\"",
- "expected_results": "System should boot to run level 3, showing the command prompt."
- }
- },
- "summary": "boot_from_runlevel_3"
- }
- },
- {
- "test": {
- "@alias": "bsps-hw.bsps-hw.boot_from_runlevel_5",
- "author": [
- {
- "email": "alexandru.c.georgescu@intel.com",
- "name": "alexandru.c.georgescu@intel.com"
- }
- ],
- "execution": {
- "1": {
- "action": "Boot into system and edit /etc/inittab to make sure that system enter at the run level 5 by default, this is done by changing the line \n\nid:3:initdefault \n\nto \n\nid:5:initdefault \n\n",
- "expected_results": ""
- },
- "2": {
- "action": "Reboot system, and press \"Tab\" to enter \"grub\"",
- "expected_results": ""
- },
- "3": {
- "action": "Get into the \"kernel\" line with the edit option \"e\" and add \"psplash=false text\" at the end line.",
- "expected_results": ""
- },
- "4": {
- "action": "Press \"F10\" or \"ctrl+x\" to boot system \nNote: The test is only for sato image.",
- "expected_results": "System should boot to runlevel 5 ."
- }
- },
- "summary": "boot_from_runlevel_5"
- }
- },
- {
- "test": {
- "@alias": "bsps-hw.bsps-hw.shutdown_system",
- "author": [
- {
- "email": "alexandru.c.georgescu@intel.com",
- "name": "alexandru.c.georgescu@intel.com"
- }
- ],
- "execution": {
- "1": {
- "action": "boot system",
- "expected_results": ""
- },
- "2": {
- "action": "launch terminal and run \"shutdown -h now\" or \"poweroff\"",
- "expected_results": "System can be shutdown successfully . "
- }
- },
- "summary": "shutdown_system"
- }
- },
- {
- "test": {
"@alias": "bsps-hw.bsps-hw.switch_among_multi_applications_and_desktop",
"author": [
{
@@ -177,70 +91,6 @@
},
{
"test": {
- "@alias": "bsps-hw.bsps-hw.ethernet_static_ip_set_in_connman",
- "author": [
- {
- "email": "alexandru.c.georgescu@intel.com",
- "name": "alexandru.c.georgescu@intel.com"
- }
- ],
- "execution": {
- "1": {
- "action": "Boot the system and check internet connection is on . ",
- "expected_results": ""
- },
- "2": {
- "action": "Launch connmand-properties (up-right corner on desktop)",
- "expected_results": ""
- },
- "3": {
- "action": "Choose Ethernet device and set a valid static ip address for it. \nFor example, in our internal network, we can set as following: \nip address: 10.239.48.xxx \nMask: 255.255.255.0 \nGateway (Broadcast): 10.239.48.255",
- "expected_results": ""
- },
- "4": {
- "action": "Check the Network configuration with \"ifconfig\"",
- "expected_results": "Static IP was set successfully \n"
- },
- "5": {
- "action": "ping to another IP adress",
- "expected_results": "Ping works correclty\n"
- }
- },
- "summary": "ethernet_static_ip_set_in_connman"
- }
- },
- {
- "test": {
- "@alias": "bsps-hw.bsps-hw.ethernet_get_IP_in_connman_via_DHCP",
- "author": [
- {
- "email": "alexandru.c.georgescu@intel.com",
- "name": "alexandru.c.georgescu@intel.com"
- }
- ],
- "execution": {
- "1": {
- "action": "Launch connmand-properties (up-right corner on your desktop). ",
- "expected_results": ""
- },
- "2": {
- "action": "Check if Ethernet device can work properly with static IP, doing \"ping XXX.XXX.XXX.XXX\", once this is set.",
- "expected_results": "Ping executed successfully . \n\n"
- },
- "3": {
- "action": "Then choose DHCP method for Ethernet device in connmand-properties.",
- "expected_results": ""
- },
- "4": {
- "action": "Check with 'ifconfig\" and \"ping\" if Ethernet device get IP address via DHCP.",
- "expected_results": "Ethernet device can get dynamic IP address via DHCP in connmand ."
- }
- },
- "summary": "ethernet_get_IP_in_connman_via_DHCP"
- }
- },
- {
- "test": {
"@alias": "bsps-hw.bsps-hw.connman_offline_mode_in_connman-gnome",
"author": [
{
@@ -263,62 +113,6 @@
},
{
"test": {
- "@alias": "bsps-hw.bsps-hw.X_server_can_start_up_with_runlevel_5_boot",
- "author": [
- {
- "email": "alexandru.c.georgescu@intel.com",
- "name": "alexandru.c.georgescu@intel.com"
- }
- ],
- "execution": {
- "1": {
- "action": "boot up system with default runlevel \n\n",
- "expected_results": "X server can start up well and desktop display has no problem . \n\n"
- },
- "2": {
- "action": "type runlevel at command prompt",
- "expected_results": "Output:N 5"
- }
- },
- "summary": "X_server_can_start_up_with_runlevel_5_boot"
- }
- },
- {
- "test": {
- "@alias": "bsps-hw.bsps-hw.standby",
- "author": [
- {
- "email": "alexandru.c.georgescu@intel.com",
- "name": "alexandru.c.georgescu@intel.com"
- }
- ],
- "execution": {
- "1": {
- "action": "boot system and launch terminal; check output of \"date\" and launch script \"continue.sh\"",
- "expected_results": ""
- },
- "2": {
- "action": "echo \"mem\" > /sys/power/state",
- "expected_results": ""
- },
- "3": {
- "action": "After system go into S3 mode, move mouse or press any key to make it resume (on NUC press power button)",
- "expected_results": ""
- },
- "4": {
- "action": "Check \"date\" and script \"continue.sh\"",
- "expected_results": ""
- },
- "5": {
- "action": "Check if application can work as normal \ncontinue.sh as below: \n \n#!/bin/sh \n \ni=1 \nwhile [ 0 ] \ndo \n echo $i \n sleep 1 \n i=$((i+1)) \ndone ",
- "expected_results": "Screen should resume back and script can run continuously incrementing the i's value from where it was before going to standby state. Date should be the same with the corresponding time increment."
- }
- },
- "summary": "standby"
- }
- },
- {
- "test": {
"@alias": "bsps-hw.bsps-hw.check_CPU_utilization_after_standby",
"author": [
{
@@ -349,88 +143,6 @@
},
{
"test": {
- "@alias": "bsps-hw.bsps-hw.Test_if_LAN_device_works_well_after_resume_from_suspend_state",
- "author": [
- {
- "email": "alexandru.c.georgescu@intel.com",
- "name": "alexandru.c.georgescu@intel.com"
- }
- ],
- "execution": {
- "1": {
- "action": "boot system and launch terminal",
- "expected_results": ""
- },
- "2": {
- "action": "echo \"mem\" > /sys/power/state",
- "expected_results": ""
- },
- "3": {
- "action": "After system go into S3 mode, move mouse or press any key to make it resume",
- "expected_results": ""
- },
- "4": {
- "action": "check ping status \n\nNote: This TC apply only for core-image-full-cmd.",
- "expected_results": "ping should always work before/after standby"
- }
- },
- "summary": "Test_if_LAN_device_works_well_after_resume_from_suspend_state"
- }
- },
- {
- "test": {
- "@alias": "bsps-hw.bsps-hw.Test_if_usb_hid_device_works_well_after_resume_from_suspend_state",
- "author": [
- {
- "email": "alexandru.c.georgescu@intel.com",
- "name": "alexandru.c.georgescu@intel.com"
- }
- ],
- "execution": {
- "1": {
- "action": "boot system and launch terminal",
- "expected_results": ""
- },
- "2": {
- "action": "echo \"mem\" > /sys/power/state",
- "expected_results": ""
- },
- "3": {
- "action": "After system go into S3 mode, resume the device by pressing the power button or using HID devices",
- "expected_results": "Devices resumes "
- },
- "4": {
- "action": "check usb mouse and keyboard",
- "expected_results": "Usb mouse and keyboard should work"
- }
- },
- "summary": "Test_if_usb_hid_device_works_well_after_resume_from_suspend_state"
- }
- },
- {
- "test": {
- "@alias": "bsps-hw.bsps-hw.click_terminal_icon_on_X_desktop",
- "author": [
- {
- "email": "alexandru.c.georgescu@intel.com",
- "name": "alexandru.c.georgescu@intel.com"
- }
- ],
- "execution": {
- "1": {
- "action": "After system launch and X start up, click terminal icon on desktop",
- "expected_results": ""
- },
- "2": {
- "action": "Check if only one terminal window launched and no other problem met",
- "expected_results": "There should be no problem after launching terminal . "
- }
- },
- "summary": "click_terminal_icon_on_X_desktop"
- }
- },
- {
- "test": {
"@alias": "bsps-hw.bsps-hw.Add_multiple_files_in_media_player",
"author": [
{
@@ -883,40 +595,6 @@
},
{
"test": {
- "@alias": "bsps-hw.bsps-hw.Check_if_RTC_(Real_Time_Clock)_can_work_correctly",
- "author": [
- {
- "email": "yi.zhao@windriver.com",
- "name": "yi.zhao@windriver.com"
- }
- ],
- "execution": {
- "1": {
- "action": "Read time from RTC registers. root@localhost:/root> hwclock -r Sun Mar 22 04:05:47 1970 -0.001948 seconds ",
- "expected_results": "Can read and set the time from RTC.\n"
- },
- "2": {
- "action": "Set system current time root@localhost:/root> date 062309452008 ",
- "expected_results": ""
- },
- "3": {
- "action": "Synchronize the system current time to RTC registers root@localhost:/root> hwclock -w ",
- "expected_results": ""
- },
- "4": {
- "action": "Read time from RTC registers root@localhost:/root> hwclock -r ",
- "expected_results": ""
- },
- "5": {
- "action": "Reboot target and read time from RTC again\n",
- "expected_results": ""
- }
- },
- "summary": "Check_if_RTC_(Real_Time_Clock)_can_work_correctly"
- }
- },
- {
- "test": {
"@alias": "bsps-hw.bsps-hw.System_can_boot_up_via_NFS",
"author": [
{
diff --git a/meta/lib/oeqa/manual/build-appliance.json b/meta/lib/oeqa/manual/build-appliance.json
index 70f8c72c9b..82a556e93e 100644
--- a/meta/lib/oeqa/manual/build-appliance.json
+++ b/meta/lib/oeqa/manual/build-appliance.json
@@ -48,7 +48,7 @@
"expected_results": ""
},
"3": {
- "action": "Install a new package to the image, for example, acpid. Set the following line in conf/local.conf: IMAGE_INSTALL_append = \" acpid\"",
+ "action": "Install a new package to the image, for example, acpid. Set the following line in conf/local.conf: IMAGE_INSTALL:append = \" acpid\"",
"expected_results": ""
},
"4": {
diff --git a/meta/lib/oeqa/manual/eclipse-plugin.json b/meta/lib/oeqa/manual/eclipse-plugin.json
index d77d0e673b..6c110d0656 100644
--- a/meta/lib/oeqa/manual/eclipse-plugin.json
+++ b/meta/lib/oeqa/manual/eclipse-plugin.json
@@ -44,7 +44,7 @@
"expected_results": ""
},
"2": {
- "action": "wget autobuilder.yoctoproject.org/pub/releases//machines/qemu/qemux86/qemu (ex:core-image-sato-sdk-qemux86-date-rootfs-tar-bz2) \nsource /opt/poky/version/environment-setup-i585-poky-linux \n\nExtract qemu with runqemu-extract-sdk /home/user/file(ex.core-image-sato-sdk-qemux86.bz2) \n/home/user/qemux86-sato-sdk \n\n",
+ "action": "wget https://downloads.yoctoproject.org/releases/yocto/yocto-$VERSION/machines/qemu/qemux86/ (ex:core-image-sato-sdk-qemux86-date-rootfs-tar-bz2) \nsource /opt/poky/version/environment-setup-i585-poky-linux \n\nExtract qemu with runqemu-extract-sdk /home/user/file(ex.core-image-sato-sdk-qemux86.bz2) \n/home/user/qemux86-sato-sdk \n\n",
"expected_results": " Qemu can be lauched normally."
},
"3": {
@@ -60,7 +60,7 @@
"expected_results": ""
},
"6": {
- "action": "(d) QEMU: \nSelect this option if you will be using the QEMU emulator. Specify the Kernel matching the QEMU architecture you are using. \n wget autobuilder.yoctoproject.org/pub/releases//machines/qemu/qemux86/bzImage-qemux86.bin \n e.g: /home/$USER/yocto/adt-installer/download_image/bzImage-qemux86.bin \n\n",
+ "action": "(d) QEMU: \nSelect this option if you will be using the QEMU emulator. Specify the Kernel matching the QEMU architecture you are using. \n wget https://downloads.yoctoproject.org/releases/yocto/yocto-$VERSION/machines/qemu/qemux86/bzImage-qemux86.bin \n e.g: /home/$USER/yocto/adt-installer/download_image/bzImage-qemux86.bin \n\n",
"expected_results": ""
},
"7": {
@@ -247,7 +247,7 @@
"execution": {
"1": {
"action": "Clone eclipse-poky source. \n \n - git clone git://git.yoctoproject.org/eclipse-poky \n\n",
- "expected_results": "Eclipse plugin is successfully installed \n\nDocumentation is there. For example if you have release yocto-2.0.1 you will found on http://autobuilder.yoctoproject.org/pub/releases/yocto-2.0.1/eclipse-plugin/mars/ archive with documentation like org.yocto.doc-development-$date.zip \n \n"
+ "expected_results": "Eclipse plugin is successfully installed \n\nDocumentation is there. For example if you have release yocto-2.0.1 you will found on https://downloads.yoctoproject.org/releases/yocto/yocto-2.0.1/eclipse-plugin/mars/ archive with documentation like org.yocto.doc-development-$date.zip \n \n"
},
"2": {
"action": "Checkout correct tag. \n\n - git checkout <eclipse-version>/<yocto-version> \n\n",
diff --git a/meta/lib/oeqa/manual/oe-core.json b/meta/lib/oeqa/manual/oe-core.json
index fb47c5ec36..4ad524d89b 100644
--- a/meta/lib/oeqa/manual/oe-core.json
+++ b/meta/lib/oeqa/manual/oe-core.json
@@ -80,7 +80,7 @@
"expected_results": ""
},
"7": {
- "action": "Run command:./configure && make ",
+ "action": "Run command:./configure ${CONFIGUREOPTS} && make ",
"expected_results": "Verify that \"matchbox-desktop\" binary file was created successfully under \"src/\" directory "
},
"8": {
diff --git a/meta/lib/oeqa/manual/sdk.json b/meta/lib/oeqa/manual/sdk.json
index 434982f7f5..21d892d26d 100644
--- a/meta/lib/oeqa/manual/sdk.json
+++ b/meta/lib/oeqa/manual/sdk.json
@@ -26,7 +26,7 @@
"expected_results": "Expect both qemu to boot up successfully."
}
},
- "summary": "test_install_cross_toolchain_can_run_multiple_qemu_for_x86"
+ "summary": "test_install_cross_toolchain_can_run_multiple_qemu_for:x86"
}
}
] \ No newline at end of file
diff --git a/meta/lib/oeqa/manual/toaster-managed-mode.json b/meta/lib/oeqa/manual/toaster-managed-mode.json
index 12374c7c64..1a71985c3c 100644
--- a/meta/lib/oeqa/manual/toaster-managed-mode.json
+++ b/meta/lib/oeqa/manual/toaster-managed-mode.json
@@ -136,7 +136,7 @@
"expected_results": ""
},
"3": {
- "action": "Check that default values are as follows: \n\tDISTRO - poky \n\tIMAGE_FSTYPES - ext3 jffs2 tar.bz2 \n\tIMAGE_INSTALL_append - \"Not set\" \n\tPACKAGE_CLASES - package_rpm \n SSTATE_DIR - /homeDirectory/poky/sstate-cache \n\n",
+ "action": "Check that default values are as follows: \n\tDISTRO - poky \n\tIMAGE_FSTYPES - ext3 jffs2 tar.bz2 \n\tIMAGE_INSTALL:append - \"Not set\" \n\tPACKAGE_CLASSES - package_rpm \n SSTATE_DIR - /homeDirectory/poky/sstate-cache \n\n",
"expected_results": ""
},
"4": {
@@ -186,7 +186,7 @@
"expected_results": ""
},
"7": {
- "action": "IMAGE_INSTALL_append: \n\t- check that the \"change\" icon is present (represented by a pen icon) \n\t- click on the \"change\" icon and check that the variable becomes a text field, populated with the current value of the variable. \n\n\t- check that the save button is disabled when the text field is empty \n\t- insert test in the text field (for example \"package1\") and hit save; be aware that there is no input validation for this variable \n\t- check that a new \"delete\" icon(a trashcan) has appeared next to the pen icon \n\t- check that clicking on the trashcan icon resets the value to \"Not set\" and makes the trashcan icon dissapear \n\n",
+ "action": "IMAGE_INSTALL:append: \n\t- check that the \"change\" icon is present (represented by a pen icon) \n\t- click on the \"change\" icon and check that the variable becomes a text field, populated with the current value of the variable. \n\n\t- check that the save button is disabled when the text field is empty \n\t- insert test in the text field (for example \"package1\") and hit save; be aware that there is no input validation for this variable \n\t- check that a new \"delete\" icon(a trashcan) has appeared next to the pen icon \n\t- check that clicking on the trashcan icon resets the value to \"Not set\" and makes the trashcan icon dissapear \n\n",
"expected_results": ""
},
"8": {
@@ -1574,7 +1574,7 @@
"expected_results": "Open bitbake variables page. \n\n\t"
},
"5": {
- "action": "Click on change button for IMAGE_INSTALL_append and add a variable (ex: acpid). \n\n",
+ "action": "Click on change button for IMAGE_INSTALL:append and add a variable (ex: acpid). \n\n",
"expected_results": "Variable added. \n\n\t"
},
"6": {
@@ -1590,7 +1590,7 @@
"expected_results": "You should get results for ssh packages."
}
},
- "summary": "Test_IMAGE_INSTALL_append_variable"
+ "summary": "Test_IMAGE_INSTALL:append_variable"
}
},
{
@@ -2348,7 +2348,7 @@
"expected_results": ""
},
"3": {
- "action": "Build 6 recipes example (core-image-sato, core-image-minimal, core-image-base, core-image-clutter) to name a few. ",
+ "action": "Build 6 recipes example (core-image-sato, core-image-minimal, core-image-base) to name a few. ",
"expected_results": " All recipes are built correctly \n\n"
},
"4": {
@@ -2382,7 +2382,7 @@
"expected_results": ""
},
"3": {
- "action": "Build 6 recipes example (core-image-sato, core-image-minimal, core-image-base, core-image-clutter) to name a few. \n\n",
+ "action": "Build 6 recipes example (core-image-sato, core-image-minimal, core-image-base) to name a few. \n\n",
"expected_results": "All recipes are built correctly \n\n"
},
"4": {
@@ -2420,7 +2420,7 @@
"expected_results": ""
},
"3": {
- "action": "Build 4 recipes example (core-image-sato, core-image-minimal, core-image-base, core-image-clutter) to name a few. \n\n",
+ "action": "Build 4 recipes example (core-image-sato, core-image-minimal, core-image-base) to name a few. \n\n",
"expected_results": " All recipes are built correctly \n\n"
},
"4": {
@@ -2569,4 +2569,4 @@
"summary": "Download_task_log"
}
}
-] \ No newline at end of file
+]
diff --git a/meta/lib/oeqa/runtime/cases/buildcpio.py b/meta/lib/oeqa/runtime/cases/buildcpio.py
index d0f91668b2..e29bf16ccb 100644
--- a/meta/lib/oeqa/runtime/cases/buildcpio.py
+++ b/meta/lib/oeqa/runtime/cases/buildcpio.py
@@ -27,6 +27,7 @@ class BuildCpioTest(OERuntimeTestCase):
@OEHasPackage(['autoconf'])
def test_cpio(self):
self.project.download_archive()
- self.project.run_configure('--disable-maintainer-mode','')
+ self.project.run_configure('--disable-maintainer-mode',
+ 'sed -i -e "/char \*program_name/d" src/global.c;')
self.project.run_make()
self.project.run_install()
diff --git a/meta/lib/oeqa/runtime/cases/date.py b/meta/lib/oeqa/runtime/cases/date.py
index fdd2a6ae58..bd6537400e 100644
--- a/meta/lib/oeqa/runtime/cases/date.py
+++ b/meta/lib/oeqa/runtime/cases/date.py
@@ -13,12 +13,12 @@ class DateTest(OERuntimeTestCase):
def setUp(self):
if self.tc.td.get('VIRTUAL-RUNTIME_init_manager') == 'systemd':
self.logger.debug('Stopping systemd-timesyncd daemon')
- self.target.run('systemctl disable --now systemd-timesyncd')
+ self.target.run('systemctl disable --now --runtime systemd-timesyncd')
def tearDown(self):
if self.tc.td.get('VIRTUAL-RUNTIME_init_manager') == 'systemd':
self.logger.debug('Starting systemd-timesyncd daemon')
- self.target.run('systemctl enable --now systemd-timesyncd')
+ self.target.run('systemctl enable --now --runtime systemd-timesyncd')
@OETestDepends(['ssh.SSHTest.test_ssh'])
@OEHasPackage(['coreutils', 'busybox'])
@@ -28,14 +28,13 @@ class DateTest(OERuntimeTestCase):
self.assertEqual(status, 0, msg=msg)
oldDate = output
- sampleDate = '"2016-08-09 10:00:00"'
- (status, output) = self.target.run("date -s %s" % sampleDate)
+ sampleTimestamp = 1488800000
+ (status, output) = self.target.run("date -s @%d" % sampleTimestamp)
self.assertEqual(status, 0, msg='Date set failed, output: %s' % output)
- (status, output) = self.target.run("date -R")
- p = re.match('Tue, 09 Aug 2016 10:00:.. \+0000', output)
+ (status, output) = self.target.run('date +"%s"')
msg = 'The date was not set correctly, output: %s' % output
- self.assertTrue(p, msg=msg)
+ self.assertTrue(int(output) - sampleTimestamp < 300, msg=msg)
(status, output) = self.target.run('date -s "%s"' % oldDate)
msg = 'Failed to reset date, output: %s' % output
diff --git a/meta/lib/oeqa/runtime/cases/df.py b/meta/lib/oeqa/runtime/cases/df.py
index 89fd0fb901..bb155c9cf9 100644
--- a/meta/lib/oeqa/runtime/cases/df.py
+++ b/meta/lib/oeqa/runtime/cases/df.py
@@ -4,12 +4,14 @@
from oeqa.runtime.case import OERuntimeTestCase
from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.data import skipIfDataVar, skipIfInDataVar
from oeqa.runtime.decorator.package import OEHasPackage
class DfTest(OERuntimeTestCase):
@OETestDepends(['ssh.SSHTest.test_ssh'])
@OEHasPackage(['coreutils', 'busybox'])
+ @skipIfInDataVar('IMAGE_FEATURES', 'read-only-rootfs', 'Test case df requires a writable rootfs')
def test_df(self):
cmd = "df -P / | sed -n '2p' | awk '{print $4}'"
(status,output) = self.target.run(cmd)
diff --git a/meta/lib/oeqa/runtime/cases/ethernet_ip_connman.py b/meta/lib/oeqa/runtime/cases/ethernet_ip_connman.py
new file mode 100644
index 0000000000..e010612838
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/ethernet_ip_connman.py
@@ -0,0 +1,36 @@
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.data import skipIfQemu
+
+class Ethernet_Test(OERuntimeTestCase):
+
+ def set_ip(self, x):
+ x = x.split(".")
+ sample_host_address = '150'
+ x[3] = sample_host_address
+ x = '.'.join(x)
+ return x
+
+ @skipIfQemu('qemuall', 'Test only runs on real hardware')
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_set_virtual_ip(self):
+ (status, output) = self.target.run("ifconfig eth0 | grep 'inet ' | awk '{print $2}'")
+ self.assertEqual(status, 0, msg='Failed to get ip address. Make sure you have an ethernet connection on your device, output: %s' % output)
+ original_ip = output
+ virtual_ip = self.set_ip(original_ip)
+
+ (status, output) = self.target.run("ifconfig eth0:1 %s netmask 255.255.255.0 && sleep 2 && ping -c 5 %s && ifconfig eth0:1 down" % (virtual_ip,virtual_ip))
+ self.assertEqual(status, 0, msg='Failed to create virtual ip address, output: %s' % output)
+
+ @OETestDepends(['ethernet_ip_connman.Ethernet_Test.test_set_virtual_ip'])
+ def test_get_ip_from_dhcp(self):
+ (status, output) = self.target.run("connmanctl services | grep -E '*AO Wired|*AR Wired' | awk '{print $3}'")
+ self.assertEqual(status, 0, msg='No wired interfaces are detected, output: %s' % output)
+ wired_interfaces = output
+
+ (status, output) = self.target.run("ip route | grep default | awk '{print $3}'")
+ self.assertEqual(status, 0, msg='Failed to retrieve the default gateway, output: %s' % output)
+ default_gateway = output
+
+ (status, output) = self.target.run("connmanctl config %s --ipv4 dhcp && sleep 2 && ping -c 5 %s" % (wired_interfaces,default_gateway))
+ self.assertEqual(status, 0, msg='Failed to get dynamic IP address via DHCP in connmand, output: %s' % output) \ No newline at end of file
diff --git a/meta/lib/oeqa/runtime/cases/go.py b/meta/lib/oeqa/runtime/cases/go.py
new file mode 100644
index 0000000000..89ba2c3ecb
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/go.py
@@ -0,0 +1,19 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class GoHelloworldTest(OERuntimeTestCase):
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(['go-helloworld'])
+ def test_gohelloworld(self):
+ cmd = "go-helloworld"
+ status, output = self.target.run(cmd)
+ msg = 'Exit status was not 0. Output: %s' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ msg = 'Incorrect output: %s' % output
+ self.assertEqual(output, "Hello, Go examples!", msg=msg)
diff --git a/meta/lib/oeqa/runtime/cases/ksample.py b/meta/lib/oeqa/runtime/cases/ksample.py
index a9a1620ebd..c69e3fe4ac 100644
--- a/meta/lib/oeqa/runtime/cases/ksample.py
+++ b/meta/lib/oeqa/runtime/cases/ksample.py
@@ -10,7 +10,7 @@ from oeqa.core.decorator.depends import OETestDepends
from oeqa.core.decorator.data import skipIfNotFeature
# need some kernel fragments
-# echo "KERNEL_FEATURES_append += \" features\/kernel\-sample\/kernel\-sample.scc\"" >> local.conf
+# echo "KERNEL_FEATURES:append = \" features\/kernel\-sample\/kernel\-sample.scc\"" >> local.conf
class KSample(OERuntimeTestCase):
def cmd_and_check(self, cmd='', match_string=''):
status, output = self.target.run(cmd)
diff --git a/meta/lib/oeqa/runtime/cases/logrotate.py b/meta/lib/oeqa/runtime/cases/logrotate.py
index a4efcd07c0..2bff08f9da 100644
--- a/meta/lib/oeqa/runtime/cases/logrotate.py
+++ b/meta/lib/oeqa/runtime/cases/logrotate.py
@@ -17,7 +17,7 @@ class LogrotateTest(OERuntimeTestCase):
@classmethod
def tearDownClass(cls):
- cls.tc.target.run('mv -f $HOME/wtmp.oeqabak /etc/logrotate.d/wtmp && rm -rf $HOME/logrotate_dir')
+ cls.tc.target.run('mv -f $HOME/wtmp.oeqabak /etc/logrotate.d/wtmp && rm -rf /var/log//logrotate_dir')
cls.tc.target.run('rm -rf /var/log/logrotate_testfile && rm -rf /etc/logrotate.d/logrotate_testfile')
@OETestDepends(['ssh.SSHTest.test_ssh'])
@@ -29,17 +29,17 @@ class LogrotateTest(OERuntimeTestCase):
msg = ('Could not create/update /var/log/wtmp with touch')
self.assertEqual(status, 0, msg = msg)
- status, output = self.target.run('mkdir $HOME/logrotate_dir')
+ status, output = self.target.run('mkdir /var/log//logrotate_dir')
msg = ('Could not create logrotate_dir. Output: %s' % output)
self.assertEqual(status, 0, msg = msg)
- status, output = self.target.run('echo "create \n olddir $HOME/logrotate_dir \n include /etc/logrotate.d/wtmp" > /tmp/logrotate-test.conf')
+ status, output = self.target.run('echo "create \n olddir /var/log//logrotate_dir \n include /etc/logrotate.d/wtmp" > /tmp/logrotate-test.conf')
msg = ('Could not write to /tmp/logrotate-test.conf')
self.assertEqual(status, 0, msg = msg)
# If logrotate fails to rotate the log, view the verbose output of logrotate to see what prevented it
_, logrotate_output = self.target.run('logrotate -vf /tmp/logrotate-test.conf')
- status, _ = self.target.run('find $HOME/logrotate_dir -type f | grep wtmp.1')
+ status, _ = self.target.run('find /var/log//logrotate_dir -type f | grep wtmp.1')
msg = ("logrotate did not successfully rotate the wtmp log. Output from logrotate -vf: \n%s" % (logrotate_output))
self.assertEqual(status, 0, msg = msg)
@@ -54,17 +54,17 @@ class LogrotateTest(OERuntimeTestCase):
msg = ('Could not write to /etc/logrotate.d/logrotate_testfile')
self.assertEqual(status, 0, msg = msg)
- status, output = self.target.run('echo "create \n olddir $HOME/logrotate_dir \n include /etc/logrotate.d/logrotate_testfile" > /tmp/logrotate-test2.conf')
+ status, output = self.target.run('echo "create \n olddir /var/log//logrotate_dir \n include /etc/logrotate.d/logrotate_testfile" > /tmp/logrotate-test2.conf')
msg = ('Could not write to /tmp/logrotate_test2.conf')
self.assertEqual(status, 0, msg = msg)
- status, output = self.target.run('find $HOME/logrotate_dir -type f | grep logrotate_testfile.1')
+ status, output = self.target.run('find /var/log//logrotate_dir -type f | grep logrotate_testfile.1')
msg = ('A rotated log for logrotate_testfile is already present in logrotate_dir')
self.assertEqual(status, 1, msg = msg)
# If logrotate fails to rotate the log, view the verbose output of logrotate instead of just listing the files in olddir
_, logrotate_output = self.target.run('logrotate -vf /tmp/logrotate-test2.conf')
- status, _ = self.target.run('find $HOME/logrotate_dir -type f | grep logrotate_testfile.1')
+ status, _ = self.target.run('find /var/log//logrotate_dir -type f | grep logrotate_testfile.1')
msg = ('logrotate did not successfully rotate the logrotate_test log. Output from logrotate -vf: \n%s' % (logrotate_output))
self.assertEqual(status, 0, msg = msg)
diff --git a/meta/lib/oeqa/runtime/cases/ltp.py b/meta/lib/oeqa/runtime/cases/ltp.py
index 6dc5ef22ad..a66d5d13d7 100644
--- a/meta/lib/oeqa/runtime/cases/ltp.py
+++ b/meta/lib/oeqa/runtime/cases/ltp.py
@@ -78,9 +78,10 @@ class LtpTest(LtpTestBase):
# copy nice log from DUT
dst = os.path.join(self.ltptest_log_dir, "%s" % ltp_group )
remote_src = "/opt/ltp/results/%s" % ltp_group
- (status, output) = self.target.copyFrom(remote_src, dst)
+ (status, output) = self.target.copyFrom(remote_src, dst, True)
msg = 'File could not be copied. Output: %s' % output
- self.assertEqual(status, 0, msg=msg)
+ if status:
+ self.target.logger.warning(msg)
parser = LtpParser()
results, sections = parser.parse(dst)
diff --git a/meta/lib/oeqa/runtime/cases/multilib.py b/meta/lib/oeqa/runtime/cases/multilib.py
index 62e662b01c..0d1b9ae2c9 100644
--- a/meta/lib/oeqa/runtime/cases/multilib.py
+++ b/meta/lib/oeqa/runtime/cases/multilib.py
@@ -7,6 +7,8 @@ from oeqa.core.decorator.depends import OETestDepends
from oeqa.core.decorator.data import skipIfNotInDataVar
from oeqa.runtime.decorator.package import OEHasPackage
+import subprocess
+
class MultilibTest(OERuntimeTestCase):
def archtest(self, binary, arch):
@@ -14,8 +16,10 @@ class MultilibTest(OERuntimeTestCase):
Check that ``binary`` has the ELF class ``arch`` (e.g. ELF32/ELF64).
"""
- status, output = self.target.run('readelf -h %s' % binary)
- self.assertEqual(status, 0, 'Failed to readelf %s' % binary)
+ dest = "{}/test_binary".format(self.td.get('T', ''))
+ self.target.copyFrom(binary, dest)
+ output = subprocess.check_output("readelf -h {}".format(dest), shell=True).decode()
+ os.remove(dest)
l = [l.split()[1] for l in output.split('\n') if "Class:" in l]
if l:
@@ -29,7 +33,6 @@ class MultilibTest(OERuntimeTestCase):
@skipIfNotInDataVar('MULTILIBS', 'multilib:lib32',
"This isn't a multilib:lib32 image")
@OETestDepends(['ssh.SSHTest.test_ssh'])
- @OEHasPackage(['binutils'])
@OEHasPackage(['lib32-libc6'])
def test_check_multilib_libc(self):
"""
@@ -39,6 +42,6 @@ class MultilibTest(OERuntimeTestCase):
self.archtest("/lib64/libc.so.6", "ELF64")
@OETestDepends(['multilib.MultilibTest.test_check_multilib_libc'])
- @OEHasPackage(['lib32-connman', '!connman'])
+ @OEHasPackage(['lib32-connman'])
def test_file_connman(self):
self.archtest("/usr/sbin/connmand", "ELF32")
diff --git a/meta/lib/oeqa/runtime/cases/oe_syslog.py b/meta/lib/oeqa/runtime/cases/oe_syslog.py
index f3c2bedbaf..150b70d9f0 100644
--- a/meta/lib/oeqa/runtime/cases/oe_syslog.py
+++ b/meta/lib/oeqa/runtime/cases/oe_syslog.py
@@ -121,11 +121,16 @@ class SyslogTestConfig(OERuntimeTestCase):
self.test_syslog_restart()
- cmd = 'logger foobar && grep foobar /var/log/test'
- status,output = self.target.run(cmd)
- msg = 'Test log string not found. Output: %s ' % output
+ cmd = 'logger foobar'
+ status, output = self.target.run(cmd)
+ msg = 'Logger command failed, %s. Output: %s ' % (status, output)
self.assertEqual(status, 0, msg=msg)
+ cmd = 'cat /var/log/test'
+ status, output = self.target.run(cmd)
+ if "foobar" not in output or status:
+ self.fail("'foobar' not found in logfile, status %s, contents %s" % (status, output))
+
cmd = "sed -i 's#LOGFILE=/var/log/test##' /etc/syslog-startup.conf"
self.target.run(cmd)
self.test_syslog_restart()
diff --git a/meta/lib/oeqa/runtime/cases/pam.py b/meta/lib/oeqa/runtime/cases/pam.py
index 271a1943e3..a482ded945 100644
--- a/meta/lib/oeqa/runtime/cases/pam.py
+++ b/meta/lib/oeqa/runtime/cases/pam.py
@@ -8,11 +8,14 @@
from oeqa.runtime.case import OERuntimeTestCase
from oeqa.core.decorator.depends import OETestDepends
from oeqa.core.decorator.data import skipIfNotFeature
+from oeqa.runtime.decorator.package import OEHasPackage
class PamBasicTest(OERuntimeTestCase):
@skipIfNotFeature('pam', 'Test requires pam to be in DISTRO_FEATURES')
@OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(['shadow'])
+ @OEHasPackage(['shadow-base'])
def test_pam(self):
status, output = self.target.run('login --help')
msg = ('login command does not work as expected. '
diff --git a/meta/lib/oeqa/runtime/cases/parselogs.py b/meta/lib/oeqa/runtime/cases/parselogs.py
index a1791b5cca..1f9365f3a8 100644
--- a/meta/lib/oeqa/runtime/cases/parselogs.py
+++ b/meta/lib/oeqa/runtime/cases/parselogs.py
@@ -32,7 +32,7 @@ common_errors = [
"Failed to load module \"fbdev\"",
"Failed to load module fbdev",
"Failed to load module glx",
- "[drm] Cannot find any crtc or sizes - going 1024x768",
+ "[drm] Cannot find any crtc or sizes",
"_OSC failed (AE_NOT_FOUND); disabling ASPM",
"Open ACPI failed (/var/run/acpid.socket) (No such file or directory)",
"NX (Execute Disable) protection cannot be enabled: non-PAE kernel!",
@@ -59,8 +59,11 @@ common_errors = [
"Failed to process device, ignoring: Device or resource busy",
"Cannot find a map file",
"[rdrand]: Initialization Failed",
+ "[rndr ]: Initialization Failed",
"[pulseaudio] authkey.c: Failed to open cookie file",
"[pulseaudio] authkey.c: Failed to load authentication key",
+ "was skipped because of a failed condition check",
+ "was skipped because all trigger condition checks failed",
]
video_related = [
@@ -88,6 +91,9 @@ qemux86_common = [
'tsc: HPET/PMTIMER calibration failed',
"modeset(0): Failed to initialize the DRI2 extension",
"glamor initialization failed",
+ "blk_update_request: I/O error, dev fd0, sector 0 op 0x0:(READ)",
+ "floppy: error",
+ 'failed to IDENTIFY (I/O error, err_mask=0x4)',
] + common_errors
ignore_errors = {
@@ -114,7 +120,12 @@ ignore_errors = {
'can\'t handle BAR above 4GB',
'Cannot reserve Legacy IO',
] + common_errors,
- 'qemuarm' : [
+ 'qemuppc64' : [
+ 'vio vio: uevent: failed to send synthetic uevent',
+ 'synth uevent: /devices/vio: failed to send uevent',
+ 'PCI 0000:00 Cannot reserve Legacy IO [io 0x10000-0x10fff]',
+ ] + common_errors,
+ 'qemuarmv5' : [
'mmci-pl18x: probe of fpga:05 failed with error -22',
'mmci-pl18x: probe of fpga:0b failed with error -22',
'Failed to load module "glx"',
@@ -128,6 +139,7 @@ ignore_errors = {
'OF: amba_device_add() failed (-19) for /amba/fpga/sci@a000',
'Failed to initialize \'/amba/timer@101e3000\': -22',
'jitterentropy: Initialization failed with host not compliant with requirements: 2',
+ 'clcd-pl11x: probe of 10120000.display failed with error -2',
] + common_errors,
'qemuarm64' : [
'Fatal server error:',
@@ -293,7 +305,7 @@ class ParseLogsTest(OERuntimeTestCase):
grepcmd = 'grep '
grepcmd += '-Ei "'
for error in errors:
- grepcmd += '\<' + error + '\>' + '|'
+ grepcmd += r'\<' + error + r'\>' + '|'
grepcmd = grepcmd[:-1]
grepcmd += '" ' + str(log) + " | grep -Eiv \'"
@@ -304,13 +316,13 @@ class ParseLogsTest(OERuntimeTestCase):
errorlist = ignore_errors['default']
for ignore_error in errorlist:
- ignore_error = ignore_error.replace('(', '\(')
- ignore_error = ignore_error.replace(')', '\)')
+ ignore_error = ignore_error.replace('(', r'\(')
+ ignore_error = ignore_error.replace(')', r'\)')
ignore_error = ignore_error.replace("'", '.')
- ignore_error = ignore_error.replace('?', '\?')
- ignore_error = ignore_error.replace('[', '\[')
- ignore_error = ignore_error.replace(']', '\]')
- ignore_error = ignore_error.replace('*', '\*')
+ ignore_error = ignore_error.replace('?', r'\?')
+ ignore_error = ignore_error.replace('[', r'\[')
+ ignore_error = ignore_error.replace(']', r'\]')
+ ignore_error = ignore_error.replace('*', r'\*')
ignore_error = ignore_error.replace('0-9', '[0-9]')
grepcmd += ignore_error + '|'
grepcmd = grepcmd[:-1]
diff --git a/meta/lib/oeqa/runtime/cases/ping.py b/meta/lib/oeqa/runtime/cases/ping.py
index f6603f75ec..498f80d0a5 100644
--- a/meta/lib/oeqa/runtime/cases/ping.py
+++ b/meta/lib/oeqa/runtime/cases/ping.py
@@ -6,6 +6,7 @@ from subprocess import Popen, PIPE
from oeqa.runtime.case import OERuntimeTestCase
from oeqa.core.decorator.oetimeout import OETimeout
+from oeqa.core.exception import OEQATimeoutError
class PingTest(OERuntimeTestCase):
@@ -13,14 +14,17 @@ class PingTest(OERuntimeTestCase):
def test_ping(self):
output = ''
count = 0
- while count < 5:
- cmd = 'ping -c 1 %s' % self.target.ip
- proc = Popen(cmd, shell=True, stdout=PIPE)
- output += proc.communicate()[0].decode('utf-8')
- if proc.poll() == 0:
- count += 1
- else:
- count = 0
+ try:
+ while count < 5:
+ cmd = 'ping -c 1 %s' % self.target.ip
+ proc = Popen(cmd, shell=True, stdout=PIPE)
+ output += proc.communicate()[0].decode('utf-8')
+ if proc.poll() == 0:
+ count += 1
+ else:
+ count = 0
+ except OEQATimeoutError:
+ self.fail("Ping timeout error for address %s, count %s, output: %s" % (self.target.ip, count, output))
msg = ('Expected 5 consecutive, got %d.\n'
'ping output is:\n%s' % (count,output))
self.assertEqual(count, 5, msg = msg)
diff --git a/meta/lib/oeqa/runtime/cases/ptest.py b/meta/lib/oeqa/runtime/cases/ptest.py
index a9572c81f0..00742da2b5 100644
--- a/meta/lib/oeqa/runtime/cases/ptest.py
+++ b/meta/lib/oeqa/runtime/cases/ptest.py
@@ -57,7 +57,7 @@ class PtestRunnerTest(OERuntimeTestCase):
ptest_dirs = [ '/usr/lib' ]
if not libdir in ptest_dirs:
ptest_dirs.append(libdir)
- status, output = self.target.run('ptest-runner -d \"{}\"'.format(' '.join(ptest_dirs)), 0)
+ status, output = self.target.run('ptest-runner -t 450 -d \"{}\"'.format(' '.join(ptest_dirs)), 0)
os.makedirs(ptest_log_dir)
with open(ptest_runner_log, 'w') as f:
f.write(output)
@@ -108,4 +108,5 @@ class PtestRunnerTest(OERuntimeTestCase):
failmsg = failmsg + "Failed ptests:\n%s" % pprint.pformat(failed_tests)
if failmsg:
+ self.logger.warning("There were failing ptests.")
self.fail(failmsg)
diff --git a/meta/lib/oeqa/runtime/cases/rpm.py b/meta/lib/oeqa/runtime/cases/rpm.py
index 8e18b426f8..a4339116bf 100644
--- a/meta/lib/oeqa/runtime/cases/rpm.py
+++ b/meta/lib/oeqa/runtime/cases/rpm.py
@@ -116,12 +116,12 @@ class RpmInstallRemoveTest(OERuntimeTestCase):
Author: Alexander Kanavin <alex.kanavin@gmail.com>
AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
"""
- db_files_cmd = 'ls /var/lib/rpm/__db.*'
+ db_files_cmd = 'ls /var/lib/rpm/rpmdb.sqlite*'
check_log_cmd = "grep RPM /var/log/messages | wc -l"
- # Make sure that some database files are under /var/lib/rpm as '__db.xxx'
+ # Make sure that some database files are under /var/lib/rpm as 'rpmdb.sqlite'
status, output = self.target.run(db_files_cmd)
- msg = 'Failed to find database files under /var/lib/rpm/ as __db.xxx'
+ msg = 'Failed to find database files under /var/lib/rpm/ as rpmdb.sqlite'
self.assertEqual(0, status, msg=msg)
self.tc.target.copyTo(self.test_file, self.dst)
@@ -141,13 +141,4 @@ class RpmInstallRemoveTest(OERuntimeTestCase):
self.tc.target.run('rm -f %s' % self.dst)
- # if using systemd this should ensure all entries are flushed to /var
- status, output = self.target.run("journalctl --sync")
- # Get the amount of entries in the log file
- status, output = self.target.run(check_log_cmd)
- msg = 'Failed to get the final size of the log file.'
- self.assertEqual(0, status, msg=msg)
- # Check that there's enough of them
- self.assertGreaterEqual(int(output), 80,
- 'Cound not find sufficient amount of rpm entries in /var/log/messages, found {} entries'.format(output))
diff --git a/meta/lib/oeqa/runtime/cases/rtc.py b/meta/lib/oeqa/runtime/cases/rtc.py
new file mode 100644
index 0000000000..c4e6681324
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/rtc.py
@@ -0,0 +1,38 @@
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.runtime.decorator.package import OEHasPackage
+
+import re
+
+class RTCTest(OERuntimeTestCase):
+
+ def setUp(self):
+ if self.tc.td.get('VIRTUAL-RUNTIME_init_manager') == 'systemd':
+ self.logger.debug('Stopping systemd-timesyncd daemon')
+ self.target.run('systemctl disable --now --runtime systemd-timesyncd')
+
+ def tearDown(self):
+ if self.tc.td.get('VIRTUAL-RUNTIME_init_manager') == 'systemd':
+ self.logger.debug('Starting systemd-timesyncd daemon')
+ self.target.run('systemctl enable --now --runtime systemd-timesyncd')
+
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(['coreutils', 'busybox'])
+ def test_rtc(self):
+ (status, output) = self.target.run('hwclock -r')
+ self.assertEqual(status, 0, msg='Failed to get RTC time, output: %s' % output)
+
+ (status, current_datetime) = self.target.run('date +"%m%d%H%M%Y"')
+ self.assertEqual(status, 0, msg='Failed to get system current date & time, output: %s' % current_datetime)
+
+ example_datetime = '062309452008'
+ (status, output) = self.target.run('date %s ; hwclock -w ; hwclock -r' % example_datetime)
+ check_hwclock = re.search('2008-06-23 09:45:..', output)
+ self.assertTrue(check_hwclock, msg='The RTC time was not set correctly, output: %s' % output)
+
+ (status, output) = self.target.run('date %s' % current_datetime)
+ self.assertEqual(status, 0, msg='Failed to reset system date & time, output: %s' % output)
+
+ (status, output) = self.target.run('hwclock -w')
+ self.assertEqual(status, 0, msg='Failed to reset RTC time, output: %s' % output)
+
diff --git a/meta/lib/oeqa/runtime/cases/runlevel.py b/meta/lib/oeqa/runtime/cases/runlevel.py
new file mode 100644
index 0000000000..3a4df8ace1
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/runlevel.py
@@ -0,0 +1,22 @@
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+
+import time
+
+class RunLevel_Test(OERuntimeTestCase):
+
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_runlevel_3(self):
+ (status, output) = self.target.run("init 3 && sleep 5 && runlevel")
+ runlevel= '5 3'
+ self.assertEqual(output, runlevel, msg='Failed to set current runlevel to runlevel 3, current runlevel : %s' % output[-1])
+ (status, output) = self.target.run("uname -a")
+ self.assertEqual(status, 0, msg='Failed to run uname command, output: %s' % output)
+
+ @OETestDepends(['runlevel.RunLevel_Test.test_runlevel_3'])
+ def test_runlevel_5(self):
+ (status, output) = self.target.run("init 5 && sleep 5 && runlevel")
+ runlevel = '3 5'
+ self.assertEqual(output, runlevel, msg='Failed to set current runlevel to runlevel 5, current runlevel : %s' % output[-1])
+ (status, output) = self.target.run('export DISPLAY=:0 && x11perf -aa10text')
+ self.assertEqual(status, 0, msg='Failed to run 2D graphic test, output: %s' % output)
diff --git a/meta/lib/oeqa/runtime/cases/rust.py b/meta/lib/oeqa/runtime/cases/rust.py
new file mode 100644
index 0000000000..b3d6cf7f37
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/rust.py
@@ -0,0 +1,19 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class RustHelloworldTest(OERuntimeTestCase):
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(['rust-hello-world'])
+ def test_rusthelloworld(self):
+ cmd = "rust-hello-world"
+ status, output = self.target.run(cmd)
+ msg = 'Exit status was not 0. Output: %s' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ msg = 'Incorrect output: %s' % output
+ self.assertEqual(output, "Hello, world!", msg=msg)
diff --git a/meta/lib/oeqa/runtime/cases/skeletoninit.py b/meta/lib/oeqa/runtime/cases/skeletoninit.py
index 4779cd6bb4..a12f1e9aae 100644
--- a/meta/lib/oeqa/runtime/cases/skeletoninit.py
+++ b/meta/lib/oeqa/runtime/cases/skeletoninit.py
@@ -4,7 +4,7 @@
# This test should cover https://bugzilla.yoctoproject.org/tr_show_case.cgi?case_id=284
# testcase. Image under test must have meta-skeleton layer in bblayers and
-# IMAGE_INSTALL_append = " service" in local.conf
+# IMAGE_INSTALL:append = " service" in local.conf
from oeqa.runtime.case import OERuntimeTestCase
from oeqa.core.decorator.depends import OETestDepends
from oeqa.core.decorator.data import skipIfDataVar
diff --git a/meta/lib/oeqa/runtime/cases/ssh.py b/meta/lib/oeqa/runtime/cases/ssh.py
index 60a5fbbfbf..e31224b3af 100644
--- a/meta/lib/oeqa/runtime/cases/ssh.py
+++ b/meta/lib/oeqa/runtime/cases/ssh.py
@@ -13,7 +13,7 @@ class SSHTest(OERuntimeTestCase):
def test_ssh(self):
(status, output) = self.target.run('uname -a')
self.assertEqual(status, 0, msg='SSH Test failed: %s' % output)
- (status, output) = self.target.run('cat /etc/masterimage')
- msg = "This isn't the right image - /etc/masterimage " \
+ (status, output) = self.target.run('cat /etc/controllerimage')
+ msg = "This isn't the right image - /etc/controllerimage " \
"shouldn't be here %s" % output
self.assertEqual(status, 1, msg=msg)
diff --git a/meta/lib/oeqa/runtime/cases/stap.py b/meta/lib/oeqa/runtime/cases/stap.py
index 5342f6ac34..480eaabf2d 100644
--- a/meta/lib/oeqa/runtime/cases/stap.py
+++ b/meta/lib/oeqa/runtime/cases/stap.py
@@ -5,33 +5,28 @@
import os
from oeqa.runtime.case import OERuntimeTestCase
-from oeqa.core.decorator.depends import OETestDepends
from oeqa.core.decorator.data import skipIfNotFeature
from oeqa.runtime.decorator.package import OEHasPackage
class StapTest(OERuntimeTestCase):
-
- @classmethod
- def setUp(cls):
- src = os.path.join(cls.tc.runtime_files_dir, 'hello.stp')
- dst = '/tmp/hello.stp'
- cls.tc.target.copyTo(src, dst)
-
- @classmethod
- def tearDown(cls):
- files = '/tmp/hello.stp'
- cls.tc.target.run('rm %s' % files)
-
- @skipIfNotFeature('tools-profile',
- 'Test requires tools-profile to be in IMAGE_FEATURES')
- @OETestDepends(['kernelmodule.KernelModuleTest.test_kernel_module'])
+ @skipIfNotFeature('tools-profile', 'Test requires tools-profile to be in IMAGE_FEATURES')
@OEHasPackage(['systemtap'])
+ @OEHasPackage(['gcc-symlinks'])
+ @OEHasPackage(['kernel-devsrc'])
def test_stap(self):
- cmds = [
- 'cd /usr/src/kernel && make scripts prepare',
- 'cd /lib/modules/`uname -r` && (if [ ! -e build ]; then ln -s /usr/src/kernel build; fi)',
- 'stap --disable-cache -DSTP_NO_VERREL_CHECK /tmp/hello.stp'
- ]
- for cmd in cmds:
+ try:
+ cmd = 'make -j -C /usr/src/kernel scripts prepare'
+ status, output = self.target.run(cmd, 900)
+ self.assertEqual(status, 0, msg='\n'.join([cmd, output]))
+
+ cmd = 'stap -v -p4 -m stap-hello --disable-cache -DSTP_NO_VERREL_CHECK -e \'probe oneshot { print("Hello, "); println("SystemTap!") }\''
status, output = self.target.run(cmd, 900)
self.assertEqual(status, 0, msg='\n'.join([cmd, output]))
+
+ cmd = 'staprun -v -R -b1 stap-hello.ko'
+ self.assertEqual(status, 0, msg='\n'.join([cmd, output]))
+ self.assertIn('Hello, SystemTap!', output, msg='\n'.join([cmd, output]))
+ except:
+ status, dmesg = self.target.run('dmesg')
+ if status == 0:
+ print(dmesg)
diff --git a/meta/lib/oeqa/runtime/cases/suspend.py b/meta/lib/oeqa/runtime/cases/suspend.py
new file mode 100644
index 0000000000..67b6f7e56f
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/suspend.py
@@ -0,0 +1,33 @@
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.data import skipIfQemu
+import threading
+import time
+
+class Suspend_Test(OERuntimeTestCase):
+
+ def test_date(self):
+ (status, output) = self.target.run('date')
+ self.assertEqual(status, 0, msg = 'Failed to run date command, output : %s' % output)
+
+ def test_ping(self):
+ t_thread = threading.Thread(target=self.target.run, args=("ping 8.8.8.8",))
+ t_thread.start()
+ time.sleep(2)
+
+ status, output = self.target.run('pidof ping')
+ self.target.run('kill -9 %s' % output)
+ self.assertEqual(status, 0, msg = 'Not able to find process that runs ping, output : %s' % output)
+
+ def set_suspend(self):
+ (status, output) = self.target.run('sudo rtcwake -m mem -s 10')
+ self.assertEqual(status, 0, msg = 'Failed to suspends your system to RAM, output : %s' % output)
+
+ @skipIfQemu('qemuall', 'Test only runs on real hardware')
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_suspend(self):
+ self.test_date()
+ self.test_ping()
+ self.set_suspend()
+ self.test_date()
+ self.test_ping()
diff --git a/meta/lib/oeqa/runtime/cases/terminal.py b/meta/lib/oeqa/runtime/cases/terminal.py
new file mode 100644
index 0000000000..8fcca99f47
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/terminal.py
@@ -0,0 +1,21 @@
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.runtime.decorator.package import OEHasPackage
+
+import threading
+import time
+
+class TerminalTest(OERuntimeTestCase):
+
+ @OEHasPackage(['matchbox-terminal'])
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_terminal_running(self):
+ t_thread = threading.Thread(target=self.target.run, args=("export DISPLAY=:0 && matchbox-terminal -e 'sh -c \"uname -a && exec sh\"'",))
+ t_thread.start()
+ time.sleep(2)
+
+ status, output = self.target.run('pidof matchbox-terminal')
+ number_of_terminal = len(output.split())
+ self.assertEqual(number_of_terminal, 1, msg='There should be only one terminal being launched. Number of terminal launched : %s' % number_of_terminal)
+ self.target.run('kill -9 %s' % output)
+ self.assertEqual(status, 0, msg='Not able to find process that runs terminal.')
diff --git a/meta/lib/oeqa/runtime/cases/usb_hid.py b/meta/lib/oeqa/runtime/cases/usb_hid.py
new file mode 100644
index 0000000000..3c292cf661
--- /dev/null
+++ b/meta/lib/oeqa/runtime/cases/usb_hid.py
@@ -0,0 +1,22 @@
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.data import skipIfQemu
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class USB_HID_Test(OERuntimeTestCase):
+
+ def keyboard_mouse_simulation(self):
+ (status, output) = self.target.run('export DISPLAY=:0 && xdotool key F2 && xdotool mousemove 100 100')
+ return self.assertEqual(status, 0, msg = 'Failed to simulate keyboard/mouse input event, output : %s' % output)
+
+ def set_suspend(self):
+ (status, output) = self.target.run('sudo rtcwake -m mem -s 10')
+ return self.assertEqual(status, 0, msg = 'Failed to suspends your system to RAM, output : %s' % output)
+
+ @OEHasPackage(['xdotool'])
+ @skipIfQemu('qemuall', 'Test only runs on real hardware')
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_USB_Hid_input(self):
+ self.keyboard_mouse_simulation()
+ self.set_suspend()
+ self.keyboard_mouse_simulation()
diff --git a/meta/lib/oeqa/runtime/cases/weston.py b/meta/lib/oeqa/runtime/cases/weston.py
index ac29eca6e4..b81cc299ef 100644
--- a/meta/lib/oeqa/runtime/cases/weston.py
+++ b/meta/lib/oeqa/runtime/cases/weston.py
@@ -10,7 +10,7 @@ import threading
import time
class WestonTest(OERuntimeTestCase):
- weston_log_file = '/tmp/weston.log'
+ weston_log_file = '/tmp/weston-2.log'
@classmethod
def tearDownClass(cls):
@@ -31,10 +31,13 @@ class WestonTest(OERuntimeTestCase):
return output.split(" ")
def get_weston_command(self, cmd):
- return 'export XDG_RUNTIME_DIR=/run/user/0; export WAYLAND_DISPLAY=wayland-0; %s' % cmd
+ return 'export XDG_RUNTIME_DIR=/run/user/`id -u weston`; export WAYLAND_DISPLAY=wayland-1; %s' % cmd
def run_weston_init(self):
- self.target.run(self.get_weston_command('weston --log=%s' % self.weston_log_file))
+ if 'systemd' in self.tc.td['VIRTUAL-RUNTIME_init_manager']:
+ self.target.run('systemd-run --collect --unit=weston-ptest.service --uid=0 -p PAMName=login -p TTYPath=/dev/tty6 -E XDG_RUNTIME_DIR=/tmp -E WAYLAND_DISPLAY=wayland-0 /usr/bin/weston --socket=wayland-1 --log=%s' % self.weston_log_file)
+ else:
+ self.target.run(self.get_weston_command('openvt -- weston --socket=wayland-2 --log=%s' % self.weston_log_file))
def get_new_wayland_processes(self, existing_wl_processes):
try_cnt = 0
@@ -48,10 +51,14 @@ class WestonTest(OERuntimeTestCase):
return new_wl_processes, try_cnt
- @OEHasPackage(['weston'])
- def test_weston_info(self):
- status, output = self.target.run(self.get_weston_command('weston-info'))
- self.assertEqual(status, 0, msg='weston-info error: %s' % output)
+ @OEHasPackage(['wayland-utils'])
+ def test_wayland_info(self):
+ if 'systemd' in self.tc.td['VIRTUAL-RUNTIME_init_manager']:
+ command = 'XDG_RUNTIME_DIR=/run wayland-info'
+ else:
+ command = self.get_weston_command('wayland-info')
+ status, output = self.target.run(command)
+ self.assertEqual(status, 0, msg='wayland-info error: %s' % output)
@OEHasPackage(['weston'])
def test_weston_can_initialize_new_wayland_compositor(self):
@@ -63,7 +70,10 @@ class WestonTest(OERuntimeTestCase):
new_wl_processes, try_cnt = self.get_new_wayland_processes(existing_wl_processes)
existing_and_new_weston_processes = self.get_processes_of('weston', 'existing and new')
new_weston_processes = [x for x in existing_and_new_weston_processes if x not in existing_weston_processes]
- for w in new_weston_processes:
- self.target.run('kill -9 %s' % w)
+ if 'systemd' in self.tc.td['VIRTUAL-RUNTIME_init_manager']:
+ self.target.run('systemctl stop weston-ptest.service')
+ else:
+ for w in new_weston_processes:
+ self.target.run('kill -9 %s' % w)
__, weston_log = self.target.run('cat %s' % self.weston_log_file)
self.assertTrue(new_wl_processes, msg='Could not get new weston-desktop-shell processes (%s, try_cnt:%s) weston log: %s' % (new_wl_processes, try_cnt, weston_log))
diff --git a/meta/lib/oeqa/runtime/cases/x32lib.py b/meta/lib/oeqa/runtime/cases/x32lib.py
index ddf220140e..f419c8f181 100644
--- a/meta/lib/oeqa/runtime/cases/x32lib.py
+++ b/meta/lib/oeqa/runtime/cases/x32lib.py
@@ -6,16 +6,21 @@ from oeqa.runtime.case import OERuntimeTestCase
from oeqa.core.decorator.depends import OETestDepends
from oeqa.core.decorator.data import skipIfNotInDataVar
+import subprocess
+
class X32libTest(OERuntimeTestCase):
@skipIfNotInDataVar('DEFAULTTUNE', 'x86-64-x32',
'DEFAULTTUNE is not set to x86-64-x32')
@OETestDepends(['ssh.SSHTest.test_ssh'])
def test_x32_file(self):
- cmd = 'readelf -h /bin/ls | grep Class | grep ELF32'
- status1 = self.target.run(cmd)[0]
- cmd = 'readelf -h /bin/ls | grep Machine | grep X86-64'
- status2 = self.target.run(cmd)[0]
- msg = ("/bin/ls isn't an X86-64 ELF32 binary. readelf says: %s" %
- self.target.run("readelf -h /bin/ls")[1])
+ dest = self.td.get('T', '') + "/ls.x32test"
+ self.target.copyFrom("/bin/ls", dest)
+ cmd = 'readelf -h {} | grep Class | grep ELF32'.format(dest)
+ status1 = subprocess.call(cmd, shell=True)
+ cmd = 'readelf -h {} | grep Machine | grep X86-64'.format(dest)
+ status2 = subprocess.call(cmd, shell=True)
+ msg = ("/bin/ls isn't an X86-64 ELF32 binary. readelf says:\n{}".format(
+ subprocess.check_output("readelf -h {}".format(dest), shell=True).decode()))
+ os.remove(dest)
self.assertTrue(status1 == 0 and status2 == 0, msg=msg)
diff --git a/meta/lib/oeqa/runtime/context.py b/meta/lib/oeqa/runtime/context.py
index 3826f27642..d707ab263a 100644
--- a/meta/lib/oeqa/runtime/context.py
+++ b/meta/lib/oeqa/runtime/context.py
@@ -5,6 +5,7 @@
#
import os
+import sys
from oeqa.core.context import OETestContext, OETestContextExecutor
from oeqa.core.target.ssh import OESSHTarget
@@ -119,8 +120,7 @@ class OERuntimeTestContextExecutor(OETestContextExecutor):
# XXX: Don't base your targets on this code it will be refactored
# in the near future.
# Custom target module loading
- target_modules_path = kwargs.get('target_modules_path', '')
- controller = OERuntimeTestContextExecutor.getControllerModule(target_type, target_modules_path)
+ controller = OERuntimeTestContextExecutor.getControllerModule(target_type)
target = controller(logger, target_ip, server_ip, **kwargs)
return target
@@ -130,15 +130,15 @@ class OERuntimeTestContextExecutor(OETestContextExecutor):
# AttributeError raised if not found.
# ImportError raised if a provided module can not be imported.
@staticmethod
- def getControllerModule(target, target_modules_path):
- controllerslist = OERuntimeTestContextExecutor._getControllerModulenames(target_modules_path)
+ def getControllerModule(target):
+ controllerslist = OERuntimeTestContextExecutor._getControllerModulenames()
controller = OERuntimeTestContextExecutor._loadControllerFromName(target, controllerslist)
return controller
# Return a list of all python modules in lib/oeqa/controllers for each
# layer in bbpath
@staticmethod
- def _getControllerModulenames(target_modules_path):
+ def _getControllerModulenames():
controllerslist = []
@@ -153,9 +153,8 @@ class OERuntimeTestContextExecutor(OETestContextExecutor):
else:
raise RuntimeError("Duplicate controller module found for %s. Layers should create unique controller module names" % module)
- extpath = target_modules_path.split(':')
- for p in extpath:
- controllerpath = os.path.join(p, 'lib', 'oeqa', 'controllers')
+ for p in sys.path:
+ controllerpath = os.path.join(p, 'oeqa', 'controllers')
if os.path.exists(controllerpath):
add_controller_list(controllerpath)
return controllerslist
@@ -175,16 +174,12 @@ class OERuntimeTestContextExecutor(OETestContextExecutor):
# Search for and return a controller or None from given module name
@staticmethod
def _loadControllerFromModule(target, modulename):
- obj = None
- # import module, allowing it to raise import exception
- module = __import__(modulename, globals(), locals(), [target])
- # look for target class in the module, catching any exceptions as it
- # is valid that a module may not have the target class.
try:
- obj = getattr(module, target)
- except:
- obj = None
- return obj
+ import importlib
+ module = importlib.import_module(modulename)
+ return getattr(module, target)
+ except AttributeError:
+ return None
@staticmethod
def readPackagesManifest(manifest):
diff --git a/meta/lib/oeqa/runtime/decorator/package.py b/meta/lib/oeqa/runtime/decorator/package.py
index 57178655cc..2d7e174dbf 100644
--- a/meta/lib/oeqa/runtime/decorator/package.py
+++ b/meta/lib/oeqa/runtime/decorator/package.py
@@ -45,14 +45,14 @@ class OEHasPackage(OETestDecorator):
msg = 'Checking if %s is not installed' % ', '.join(unneed_pkgs)
self.logger.debug(msg)
if not self.case.tc.image_packages.isdisjoint(unneed_pkgs):
- msg = "Test can't run with %s installed" % ', or'.join(unneed_pkgs)
+ msg = "Test can't run with %s installed" % ', or '.join(unneed_pkgs)
self._decorator_fail(msg)
if need_pkgs:
msg = 'Checking if at least one of %s is installed' % ', '.join(need_pkgs)
self.logger.debug(msg)
if self.case.tc.image_packages.isdisjoint(need_pkgs):
- msg = "Test requires %s to be installed" % ', or'.join(need_pkgs)
+ msg = "Test requires %s to be installed" % ', or '.join(need_pkgs)
self._decorator_fail(msg)
def _decorator_fail(self, msg):
diff --git a/meta/lib/oeqa/runtime/files/hello.stp b/meta/lib/oeqa/runtime/files/hello.stp
deleted file mode 100644
index 3677147162..0000000000
--- a/meta/lib/oeqa/runtime/files/hello.stp
+++ /dev/null
@@ -1 +0,0 @@
-probe oneshot { println("hello world") }
diff --git a/meta/lib/oeqa/sdk/buildtools-cases/README b/meta/lib/oeqa/sdk/buildtools-cases/README
new file mode 100644
index 0000000000..d4f20faa9f
--- /dev/null
+++ b/meta/lib/oeqa/sdk/buildtools-cases/README
@@ -0,0 +1,2 @@
+These test cases are used by buildtools-tarball, and are not used by the testsdk
+class.
diff --git a/meta/lib/oeqa/sdk/buildtools-cases/build.py b/meta/lib/oeqa/sdk/buildtools-cases/build.py
new file mode 100644
index 0000000000..aee2e5a8c0
--- /dev/null
+++ b/meta/lib/oeqa/sdk/buildtools-cases/build.py
@@ -0,0 +1,30 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os, tempfile
+import time
+from oeqa.sdk.case import OESDKTestCase
+from oeqa.utils.subprocesstweak import errors_have_output
+errors_have_output()
+
+class BuildTests(OESDKTestCase):
+ """
+ Verify that bitbake can build virtual/libc inside the buildtools.
+ """
+ def test_libc(self):
+ with tempfile.TemporaryDirectory(prefix='bitbake-build-', dir=self.tc.sdk_dir) as testdir:
+ corebase = self.td['COREBASE']
+
+ self._run('. %s/oe-init-build-env %s' % (corebase, testdir))
+ with open(os.path.join(testdir, 'conf', 'local.conf'), 'ta') as conf:
+ conf.write('\n')
+ conf.write('DL_DIR = "%s"\n' % self.td['DL_DIR'])
+
+ try:
+ self._run('. %s/oe-init-build-env %s && bitbake virtual/libc' % (corebase, testdir))
+ finally:
+ delay = 10
+ while delay and (os.path.exists(testdir + "/bitbake.lock") or os.path.exists(testdir + "/cache/hashserv.db-wal")):
+ time.sleep(1)
+ delay = delay - 1
diff --git a/meta/lib/oeqa/sdk/buildtools-cases/gcc.py b/meta/lib/oeqa/sdk/buildtools-cases/gcc.py
new file mode 100644
index 0000000000..36ba15b134
--- /dev/null
+++ b/meta/lib/oeqa/sdk/buildtools-cases/gcc.py
@@ -0,0 +1,29 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os.path
+from oeqa.sdk.case import OESDKTestCase
+
+class GccTests(OESDKTestCase):
+ def test_verify_specs(self):
+ """
+ Verify that the compiler has been relocated successfully and isn't
+ looking in the hard-coded prefix.
+ """
+ # Canonicalise the SDK root
+ sdk_base = os.path.realpath(self.tc.sdk_dir)
+ # Canonicalise the location of GCC
+ gcc_path = os.path.realpath(self._run("command -v gcc").strip())
+ # Skip the test if the GCC didn't come from the buildtools, as it only
+ # comes with buildtools-extended-tarball.
+ if os.path.commonprefix((sdk_base, gcc_path)) != sdk_base:
+ self.skipTest("Buildtools does not provide GCC")
+
+ # This is the prefix that GCC is build with, and should be replaced at
+ # installation time.
+ sdkpath = self.td.get("SDKPATH")
+ self.assertTrue(sdkpath)
+
+ for line in self._run('gcc -dumpspecs').splitlines():
+ self.assertNotIn(sdkpath, line)
diff --git a/meta/lib/oeqa/sdk/buildtools-cases/https.py b/meta/lib/oeqa/sdk/buildtools-cases/https.py
new file mode 100644
index 0000000000..35e549eb40
--- /dev/null
+++ b/meta/lib/oeqa/sdk/buildtools-cases/https.py
@@ -0,0 +1,20 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.sdk.case import OESDKTestCase
+from oeqa.utils.subprocesstweak import errors_have_output
+errors_have_output()
+
+class HTTPTests(OESDKTestCase):
+ """
+ Verify that HTTPS certificates are working correctly, as this depends on
+ environment variables being set correctly.
+ """
+
+ def test_wget(self):
+ self._run('env -i wget --debug --output-document /dev/null https://yoctoproject.org/connectivity.html')
+
+ def test_python(self):
+ # urlopen() returns a file-like object on success and throws an exception otherwise
+ self._run('python3 -c \'import urllib.request; urllib.request.urlopen("https://yoctoproject.org/connectivity.html")\'')
diff --git a/meta/lib/oeqa/sdk/buildtools-cases/sanity.py b/meta/lib/oeqa/sdk/buildtools-cases/sanity.py
new file mode 100644
index 0000000000..64baaa8f84
--- /dev/null
+++ b/meta/lib/oeqa/sdk/buildtools-cases/sanity.py
@@ -0,0 +1,22 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import shutil
+import os.path
+from oeqa.sdk.case import OESDKTestCase
+
+class SanityTests(OESDKTestCase):
+ def test_tools(self):
+ """
+ Test that wget and tar come from the buildtools, not the host. This
+ verifies that the buildtools have installed correctly. We can't check
+ for gcc as that is only installed by buildtools-extended.
+ """
+ for command in ("tar", "wget"):
+ # Canonicalise the SDK root
+ sdk_base = os.path.realpath(self.tc.sdk_dir)
+ # Canonicalise the location of this command
+ tool_path = os.path.realpath(self._run("command -v %s" % command).strip())
+ # Assert that the tool was found inside the SDK root
+ self.assertEquals(os.path.commonprefix((sdk_base, tool_path)), sdk_base)
diff --git a/meta/lib/oeqa/sdk/case.py b/meta/lib/oeqa/sdk/case.py
index ebb03af9eb..c45882689c 100644
--- a/meta/lib/oeqa/sdk/case.py
+++ b/meta/lib/oeqa/sdk/case.py
@@ -26,7 +26,7 @@ class OESDKTestCase(OETestCase):
return tarball
tarball = os.path.join(workdir, archive)
- subprocess.check_output(["wget", "-O", tarball, url])
+ subprocess.check_output(["wget", "-O", tarball, url], stderr=subprocess.STDOUT)
return tarball
def check_elf(self, path, target_os=None, target_arch=None):
diff --git a/meta/lib/oeqa/sdk/cases/assimp.py b/meta/lib/oeqa/sdk/cases/assimp.py
index f26b17f2e9..f166758e49 100644
--- a/meta/lib/oeqa/sdk/cases/assimp.py
+++ b/meta/lib/oeqa/sdk/cases/assimp.py
@@ -30,7 +30,7 @@ class BuildAssimp(OESDKTestCase):
dirs["build"] = os.path.join(testdir, "build")
dirs["install"] = os.path.join(testdir, "install")
- subprocess.check_output(["tar", "xf", tarball, "-C", testdir])
+ subprocess.check_output(["tar", "xf", tarball, "-C", testdir], stderr=subprocess.STDOUT)
self.assertTrue(os.path.isdir(dirs["source"]))
os.makedirs(dirs["build"])
diff --git a/meta/lib/oeqa/sdk/cases/buildcpio.py b/meta/lib/oeqa/sdk/cases/buildcpio.py
index 902e93f623..e7fc211a47 100644
--- a/meta/lib/oeqa/sdk/cases/buildcpio.py
+++ b/meta/lib/oeqa/sdk/cases/buildcpio.py
@@ -24,10 +24,11 @@ class BuildCpioTest(OESDKTestCase):
dirs["build"] = os.path.join(testdir, "build")
dirs["install"] = os.path.join(testdir, "install")
- subprocess.check_output(["tar", "xf", tarball, "-C", testdir])
+ subprocess.check_output(["tar", "xf", tarball, "-C", testdir], stderr=subprocess.STDOUT)
self.assertTrue(os.path.isdir(dirs["source"]))
os.makedirs(dirs["build"])
+ self._run("sed -i -e '/char.*program_name/d' {source}/src/global.c".format(**dirs))
self._run("cd {build} && {source}/configure --disable-maintainer-mode $CONFIGURE_FLAGS".format(**dirs))
self._run("cd {build} && make -j".format(**dirs))
self._run("cd {build} && make install DESTDIR={install}".format(**dirs))
diff --git a/meta/lib/oeqa/sdk/cases/buildepoxy.py b/meta/lib/oeqa/sdk/cases/buildepoxy.py
index 4211955f8d..385f8ccca8 100644
--- a/meta/lib/oeqa/sdk/cases/buildepoxy.py
+++ b/meta/lib/oeqa/sdk/cases/buildepoxy.py
@@ -28,7 +28,7 @@ class EpoxyTest(OESDKTestCase):
dirs["build"] = os.path.join(testdir, "build")
dirs["install"] = os.path.join(testdir, "install")
- subprocess.check_output(["tar", "xf", tarball, "-C", testdir])
+ subprocess.check_output(["tar", "xf", tarball, "-C", testdir], stderr=subprocess.STDOUT)
self.assertTrue(os.path.isdir(dirs["source"]))
os.makedirs(dirs["build"])
diff --git a/meta/lib/oeqa/sdk/cases/buildgalculator.py b/meta/lib/oeqa/sdk/cases/buildgalculator.py
index bbaa5c55c9..eb3c8ddf39 100644
--- a/meta/lib/oeqa/sdk/cases/buildgalculator.py
+++ b/meta/lib/oeqa/sdk/cases/buildgalculator.py
@@ -31,11 +31,11 @@ class GalculatorTest(OESDKTestCase):
dirs["build"] = os.path.join(testdir, "build")
dirs["install"] = os.path.join(testdir, "install")
- subprocess.check_output(["tar", "xf", tarball, "-C", testdir])
+ subprocess.check_output(["tar", "xf", tarball, "-C", testdir], stderr=subprocess.STDOUT)
self.assertTrue(os.path.isdir(dirs["source"]))
os.makedirs(dirs["build"])
- self._run("cd {source} && autoreconf -i -f -I $OECORE_TARGET_SYSROOT/usr/share/aclocal -I m4".format(**dirs))
+ self._run("cd {source} && sed -i -e '/s_preferences.*prefs;/d' src/main.c && autoreconf -i -f -I $OECORE_TARGET_SYSROOT/usr/share/aclocal -I m4".format(**dirs))
self._run("cd {build} && {source}/configure $CONFIGURE_FLAGS".format(**dirs))
self._run("cd {build} && make -j".format(**dirs))
self._run("cd {build} && make install DESTDIR={install}".format(**dirs))
diff --git a/meta/lib/oeqa/sdk/cases/buildlzip.py b/meta/lib/oeqa/sdk/cases/buildlzip.py
index 515acd2891..49ae756bf3 100644
--- a/meta/lib/oeqa/sdk/cases/buildlzip.py
+++ b/meta/lib/oeqa/sdk/cases/buildlzip.py
@@ -20,7 +20,7 @@ class BuildLzipTest(OESDKTestCase):
dirs["build"] = os.path.join(testdir, "build")
dirs["install"] = os.path.join(testdir, "install")
- subprocess.check_output(["tar", "xf", tarball, "-C", testdir])
+ subprocess.check_output(["tar", "xf", tarball, "-C", testdir], stderr=subprocess.STDOUT)
self.assertTrue(os.path.isdir(dirs["source"]))
os.makedirs(dirs["build"])
diff --git a/meta/lib/oeqa/sdkext/testsdk.py b/meta/lib/oeqa/sdkext/testsdk.py
index c5c46df6cd..159f0d135b 100644
--- a/meta/lib/oeqa/sdkext/testsdk.py
+++ b/meta/lib/oeqa/sdkext/testsdk.py
@@ -67,10 +67,10 @@ class TestSDKExt(TestSDKBase):
# and we don't spend hours downloading kernels for the kernel module test
# Abuse auto.conf since local.conf would be overwritten by the SDK
with open(os.path.join(sdk_dir, 'conf', 'auto.conf'), 'a+') as f:
- f.write('SSTATE_MIRRORS += " \\n file://.* file://%s/PATH"\n' % test_data.get('SSTATE_DIR'))
+ f.write('SSTATE_MIRRORS += "file://.* file://%s/PATH"\n' % test_data.get('SSTATE_DIR'))
f.write('SOURCE_MIRROR_URL = "file://%s"\n' % test_data.get('DL_DIR'))
f.write('INHERIT += "own-mirrors"\n')
- f.write('PREMIRRORS_prepend = " git://git.yoctoproject.org/.* git://%s/git2/git.yoctoproject.org.BASENAME \\n "\n' % test_data.get('DL_DIR'))
+ f.write('PREMIRRORS:prepend = "git://git.yoctoproject.org/.* git://%s/git2/git.yoctoproject.org.BASENAME "\n' % test_data.get('DL_DIR'))
# We need to do this in case we have a minimal SDK
subprocess.check_output(". %s > /dev/null; devtool sdk-install meta-extsdk-toolchain" % \
@@ -99,6 +99,9 @@ class TestSDKExt(TestSDKBase):
if not result.wasSuccessful():
fail = True
+ # Clean the workspace/sources to avoid `devtool add' failure because of non-empty source directory
+ bb.utils.remove(sdk_dir+'workspace/sources', True)
+
if fail:
bb.fatal("%s - FAILED - check the task log and the commands log" % pn)
diff --git a/meta/lib/oeqa/selftest/cases/_sstatetests_noauto.py b/meta/lib/oeqa/selftest/cases/_sstatetests_noauto.py
index f7c356ad09..7ac03f0cec 100644
--- a/meta/lib/oeqa/selftest/cases/_sstatetests_noauto.py
+++ b/meta/lib/oeqa/selftest/cases/_sstatetests_noauto.py
@@ -90,7 +90,7 @@ class RebuildFromSState(SStateBase):
self.assertFalse(failed_cleansstate, msg="The following recipes have failed cleansstate(all others have passed both cleansstate and rebuild from sstate tests): %s" % ' '.join(map(str, failed_cleansstate)))
def test_sstate_relocation(self):
- self.run_test_sstate_rebuild(['core-image-sato-sdk'], relocate=True, rebuild_dependencies=True)
+ self.run_test_sstate_rebuild(['core-image-weston-sdk'], relocate=True, rebuild_dependencies=True)
def test_sstate_rebuild(self):
- self.run_test_sstate_rebuild(['core-image-sato-sdk'], relocate=False, rebuild_dependencies=True)
+ self.run_test_sstate_rebuild(['core-image-weston-sdk'], relocate=False, rebuild_dependencies=True)
diff --git a/meta/lib/oeqa/selftest/cases/archiver.py b/meta/lib/oeqa/selftest/cases/archiver.py
index ddd08ecf84..75195241b7 100644
--- a/meta/lib/oeqa/selftest/cases/archiver.py
+++ b/meta/lib/oeqa/selftest/cases/archiver.py
@@ -35,11 +35,11 @@ class Archiver(OESelftestTestCase):
src_path = os.path.join(bb_vars['DEPLOY_DIR_SRC'], bb_vars['TARGET_SYS'])
# Check that include_recipe was included
- included_present = len(glob.glob(src_path + '/%s-*' % include_recipe))
+ included_present = len(glob.glob(src_path + '/%s-*/*' % include_recipe))
self.assertTrue(included_present, 'Recipe %s was not included.' % include_recipe)
# Check that exclude_recipe was excluded
- excluded_present = len(glob.glob(src_path + '/%s-*' % exclude_recipe))
+ excluded_present = len(glob.glob(src_path + '/%s-*/*' % exclude_recipe))
self.assertFalse(excluded_present, 'Recipe %s was not excluded.' % exclude_recipe)
def test_archiver_filters_by_type(self):
@@ -67,11 +67,11 @@ class Archiver(OESelftestTestCase):
src_path_native = os.path.join(bb_vars['DEPLOY_DIR_SRC'], bb_vars['BUILD_SYS'])
# Check that target_recipe was included
- included_present = len(glob.glob(src_path_target + '/%s-*' % target_recipe))
+ included_present = len(glob.glob(src_path_target + '/%s-*/*' % target_recipe))
self.assertTrue(included_present, 'Recipe %s was not included.' % target_recipe)
# Check that native_recipe was excluded
- excluded_present = len(glob.glob(src_path_native + '/%s-*' % native_recipe))
+ excluded_present = len(glob.glob(src_path_native + '/%s-*/*' % native_recipe))
self.assertFalse(excluded_present, 'Recipe %s was not excluded.' % native_recipe)
def test_archiver_filters_by_type_and_name(self):
@@ -104,17 +104,17 @@ class Archiver(OESelftestTestCase):
src_path_native = os.path.join(bb_vars['DEPLOY_DIR_SRC'], bb_vars['BUILD_SYS'])
# Check that target_recipe[0] and native_recipes[1] were included
- included_present = len(glob.glob(src_path_target + '/%s-*' % target_recipes[0]))
+ included_present = len(glob.glob(src_path_target + '/%s-*/*' % target_recipes[0]))
self.assertTrue(included_present, 'Recipe %s was not included.' % target_recipes[0])
- included_present = len(glob.glob(src_path_native + '/%s-*' % native_recipes[1]))
+ included_present = len(glob.glob(src_path_native + '/%s-*/*' % native_recipes[1]))
self.assertTrue(included_present, 'Recipe %s was not included.' % native_recipes[1])
# Check that native_recipes[0] and target_recipes[1] were excluded
- excluded_present = len(glob.glob(src_path_native + '/%s-*' % native_recipes[0]))
+ excluded_present = len(glob.glob(src_path_native + '/%s-*/*' % native_recipes[0]))
self.assertFalse(excluded_present, 'Recipe %s was not excluded.' % native_recipes[0])
- excluded_present = len(glob.glob(src_path_target + '/%s-*' % target_recipes[1]))
+ excluded_present = len(glob.glob(src_path_target + '/%s-*/*' % target_recipes[1]))
self.assertFalse(excluded_present, 'Recipe %s was not excluded.' % target_recipes[1])
@@ -163,21 +163,21 @@ class Archiver(OESelftestTestCase):
Test that the archiver works with `ARCHIVER_MODE[src] = "patched"`.
"""
- self._test_archiver_mode('patched', 'selftest-ed-native-1.14.1-r0-patched.tar.gz')
+ self._test_archiver_mode('patched', 'selftest-ed-native-1.14.1-r0-patched.tar.xz')
def test_archiver_mode_configured(self):
"""
Test that the archiver works with `ARCHIVER_MODE[src] = "configured"`.
"""
- self._test_archiver_mode('configured', 'selftest-ed-native-1.14.1-r0-configured.tar.gz')
+ self._test_archiver_mode('configured', 'selftest-ed-native-1.14.1-r0-configured.tar.xz')
def test_archiver_mode_recipe(self):
"""
Test that the archiver works with `ARCHIVER_MODE[recipe] = "1"`.
"""
- self._test_archiver_mode('patched', 'selftest-ed-native-1.14.1-r0-recipe.tar.gz',
+ self._test_archiver_mode('patched', 'selftest-ed-native-1.14.1-r0-recipe.tar.xz',
'ARCHIVER_MODE[recipe] = "1"\n')
def test_archiver_mode_diff(self):
diff --git a/meta/lib/oeqa/selftest/cases/bblayers.py b/meta/lib/oeqa/selftest/cases/bblayers.py
index f131d9856c..7d74833f61 100644
--- a/meta/lib/oeqa/selftest/cases/bblayers.py
+++ b/meta/lib/oeqa/selftest/cases/bblayers.py
@@ -12,6 +12,11 @@ from oeqa.selftest.case import OESelftestTestCase
class BitbakeLayers(OESelftestTestCase):
+ def test_bitbakelayers_layerindexshowdepends(self):
+ result = runCmd('bitbake-layers layerindex-show-depends meta-poky')
+ find_in_contents = re.search("openembedded-core", result.output)
+ self.assertTrue(find_in_contents, msg = "openembedded-core should have been listed at this step. bitbake-layers layerindex-show-depends meta-poky output: %s" % result.output)
+
def test_bitbakelayers_showcrossdepends(self):
result = runCmd('bitbake-layers show-cross-depends')
self.assertIn('aspell', result.output)
diff --git a/meta/lib/oeqa/selftest/cases/bblogging.py b/meta/lib/oeqa/selftest/cases/bblogging.py
new file mode 100644
index 0000000000..317e68b82f
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/bblogging.py
@@ -0,0 +1,186 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import bitbake
+
+class BitBakeLogging(OESelftestTestCase):
+
+ def assertCount(self, item, entry, count):
+ self.assertEqual(item.count(entry), count, msg="Output:\n'''\n%s\n'''\ndoesn't contain %d copies of:\n'''\n%s\n'''\n" % (item, count, entry))
+
+ def test_shell_loggingA(self):
+ # no logs, no verbose
+ self.write_config('BBINCLUDELOGS = ""')
+ result = bitbake("logging-test -c shelltest -f", ignore_status = True)
+ self.assertIn("ERROR: Logfile of failure stored in:", result.output)
+ self.assertNotIn("This is shell stdout", result.output)
+ self.assertNotIn("This is shell stderr", result.output)
+
+ def test_shell_loggingB(self):
+ # logs, no verbose
+ self.write_config('BBINCLUDELOGS = "yes"')
+ result = bitbake("logging-test -c shelltest -f", ignore_status = True)
+ self.assertIn("ERROR: Logfile of failure stored in:", result.output)
+ self.assertCount(result.output, "This is shell stdout", 1)
+ self.assertCount(result.output, "This is shell stderr", 1)
+
+ def test_shell_loggingC(self):
+ # no logs, verbose
+ self.write_config('BBINCLUDELOGS = ""')
+ result = bitbake("logging-test -c shelltest -f -v", ignore_status = True)
+ self.assertIn("ERROR: Logfile of failure stored in:", result.output)
+ # two copies due to set +x
+ self.assertCount(result.output, "This is shell stdout", 2)
+ self.assertCount(result.output, "This is shell stderr", 2)
+
+ def test_shell_loggingD(self):
+ # logs, verbose
+ self.write_config('BBINCLUDELOGS = "yes"')
+ result = bitbake("logging-test -c shelltest -f -v", ignore_status = True)
+ self.assertIn("ERROR: Logfile of failure stored in:", result.output)
+ # two copies due to set +x
+ self.assertCount(result.output, "This is shell stdout", 2)
+ self.assertCount(result.output, "This is shell stderr", 2)
+
+ def test_python_exec_func_shell_loggingA(self):
+ # no logs, no verbose
+ self.write_config('BBINCLUDELOGS = ""')
+ result = bitbake("logging-test -c pythontest_exec_func_shell -f",
+ ignore_status = True)
+ self.assertIn("ERROR: Logfile of failure stored in:", result.output)
+ self.assertNotIn("This is shell stdout", result.output)
+ self.assertNotIn("This is shell stderr", result.output)
+
+ def test_python_exec_func_shell_loggingB(self):
+ # logs, no verbose
+ self.write_config('BBINCLUDELOGS = "yes"')
+ result = bitbake("logging-test -c pythontest_exec_func_shell -f",
+ ignore_status = True)
+ self.assertIn("ERROR: Logfile of failure stored in:", result.output)
+ self.assertCount(result.output, "This is shell stdout", 1)
+ self.assertCount(result.output, "This is shell stderr", 1)
+
+ def test_python_exec_func_shell_loggingC(self):
+ # no logs, verbose
+ self.write_config('BBINCLUDELOGS = ""')
+ result = bitbake("logging-test -c pythontest_exec_func_shell -f -v",
+ ignore_status = True)
+ self.assertIn("ERROR: Logfile of failure stored in:", result.output)
+ # two copies due to set +x
+ self.assertCount(result.output, "This is shell stdout", 2)
+ self.assertCount(result.output, "This is shell stderr", 2)
+
+ def test_python_exec_func_shell_loggingD(self):
+ # logs, verbose
+ self.write_config('BBINCLUDELOGS = "yes"')
+ result = bitbake("logging-test -c pythontest_exec_func_shell -f -v",
+ ignore_status = True)
+ self.assertIn("ERROR: Logfile of failure stored in:", result.output)
+ # two copies due to set +x
+ self.assertCount(result.output, "This is shell stdout", 2)
+ self.assertCount(result.output, "This is shell stderr", 2)
+
+ def test_python_exit_loggingA(self):
+ # no logs, no verbose
+ self.write_config('BBINCLUDELOGS = ""')
+ result = bitbake("logging-test -c pythontest_exit -f", ignore_status = True)
+ self.assertIn("ERROR: Logfile of failure stored in:", result.output)
+ self.assertNotIn("This is python stdout", result.output)
+
+ def test_python_exit_loggingB(self):
+ # logs, no verbose
+ self.write_config('BBINCLUDELOGS = "yes"')
+ result = bitbake("logging-test -c pythontest_exit -f", ignore_status = True)
+ self.assertIn("ERROR: Logfile of failure stored in:", result.output)
+ # A sys.exit() should include the output
+ self.assertCount(result.output, "This is python stdout", 1)
+
+ def test_python_exit_loggingC(self):
+ # no logs, verbose
+ self.write_config('BBINCLUDELOGS = ""')
+ result = bitbake("logging-test -c pythontest_exit -f -v", ignore_status = True)
+ self.assertIn("ERROR: Logfile of failure stored in:", result.output)
+ # python tasks don't log output with -v currently
+ #self.assertCount(result.output, "This is python stdout", 1)
+
+ def test_python_exit_loggingD(self):
+ # logs, verbose
+ self.write_config('BBINCLUDELOGS = "yes"')
+ result = bitbake("logging-test -c pythontest_exit -f -v", ignore_status = True)
+ self.assertIn("ERROR: Logfile of failure stored in:", result.output)
+ # python tasks don't log output with -v currently
+ #self.assertCount(result.output, "This is python stdout", 1)
+
+ def test_python_exec_func_python_loggingA(self):
+ # no logs, no verbose
+ self.write_config('BBINCLUDELOGS = ""')
+ result = bitbake("logging-test -c pythontest_exec_func_python -f",
+ ignore_status = True)
+ self.assertIn("ERROR: Logfile of failure stored in:", result.output)
+ self.assertNotIn("This is python stdout", result.output)
+
+ def test_python_exec_func_python_loggingB(self):
+ # logs, no verbose
+ self.write_config('BBINCLUDELOGS = "yes"')
+ result = bitbake("logging-test -c pythontest_exec_func_python -f",
+ ignore_status = True)
+ self.assertIn("ERROR: Logfile of failure stored in:", result.output)
+ # A sys.exit() should include the output
+ self.assertCount(result.output, "This is python stdout", 1)
+
+ def test_python_exec_func_python_loggingC(self):
+ # no logs, verbose
+ self.write_config('BBINCLUDELOGS = ""')
+ result = bitbake("logging-test -c pythontest_exec_func_python -f -v",
+ ignore_status = True)
+ self.assertIn("ERROR: Logfile of failure stored in:", result.output)
+ # python tasks don't log output with -v currently
+ #self.assertCount(result.output, "This is python stdout", 1)
+
+ def test_python_exec_func_python_loggingD(self):
+ # logs, verbose
+ self.write_config('BBINCLUDELOGS = "yes"')
+ result = bitbake("logging-test -c pythontest_exec_func_python -f -v",
+ ignore_status = True)
+ self.assertIn("ERROR: Logfile of failure stored in:", result.output)
+ # python tasks don't log output with -v currently
+ #self.assertCount(result.output, "This is python stdout", 1)
+
+ def test_python_fatal_loggingA(self):
+ # no logs, no verbose
+ self.write_config('BBINCLUDELOGS = ""')
+ result = bitbake("logging-test -c pythontest_fatal -f", ignore_status = True)
+ self.assertIn("ERROR: Logfile of failure stored in:", result.output)
+ self.assertNotIn("This is python fatal test stdout", result.output)
+ self.assertCount(result.output, "This is a fatal error", 1)
+
+ def test_python_fatal_loggingB(self):
+ # logs, no verbose
+ self.write_config('BBINCLUDELOGS = "yes"')
+ result = bitbake("logging-test -c pythontest_fatal -f", ignore_status = True)
+ self.assertIn("ERROR: Logfile of failure stored in:", result.output)
+ # A bb.fatal() should not include the output
+ self.assertNotIn("This is python fatal test stdout", result.output)
+ self.assertCount(result.output, "This is a fatal error", 1)
+
+ def test_python_fatal_loggingC(self):
+ # no logs, verbose
+ self.write_config('BBINCLUDELOGS = ""')
+ result = bitbake("logging-test -c pythontest_fatal -f -v", ignore_status = True)
+ self.assertIn("ERROR: Logfile of failure stored in:", result.output)
+ # python tasks don't log output with -v currently
+ #self.assertCount(result.output, "This is python fatal test stdout", 1)
+ self.assertCount(result.output, "This is a fatal error", 1)
+
+ def test_python_fatal_loggingD(self):
+ # logs, verbose
+ self.write_config('BBINCLUDELOGS = "yes"')
+ result = bitbake("logging-test -c pythontest_fatal -f -v", ignore_status = True)
+ self.assertIn("ERROR: Logfile of failure stored in:", result.output)
+ # python tasks don't log output with -v currently
+ #self.assertCount(result.output, "This is python fatal test stdout", 1)
+ self.assertCount(result.output, "This is a fatal error", 1)
+
diff --git a/meta/lib/oeqa/selftest/cases/bbtests.py b/meta/lib/oeqa/selftest/cases/bbtests.py
index dc423ec439..cfac7afcf4 100644
--- a/meta/lib/oeqa/selftest/cases/bbtests.py
+++ b/meta/lib/oeqa/selftest/cases/bbtests.py
@@ -63,15 +63,15 @@ class BitbakeTests(OESelftestTestCase):
def test_warnings_errors(self):
result = bitbake('-b asdf', ignore_status=True)
- find_warnings = re.search("Summary: There w.{2,3}? [1-9][0-9]* WARNING messages* shown", result.output)
- find_errors = re.search("Summary: There w.{2,3}? [1-9][0-9]* ERROR messages* shown", result.output)
+ find_warnings = re.search("Summary: There w.{2,3}? [1-9][0-9]* WARNING messages*", result.output)
+ find_errors = re.search("Summary: There w.{2,3}? [1-9][0-9]* ERROR messages*", result.output)
self.assertTrue(find_warnings, msg="Did not find the mumber of warnings at the end of the build:\n" + result.output)
self.assertTrue(find_errors, msg="Did not find the mumber of errors at the end of the build:\n" + result.output)
def test_invalid_patch(self):
# This patch should fail to apply.
- self.write_recipeinc('man-db', 'FILESEXTRAPATHS_prepend := "${THISDIR}/files:"\nSRC_URI += "file://0001-Test-patch-here.patch"')
- self.write_config("INHERIT_remove = \"report-error\"")
+ self.write_recipeinc('man-db', 'FILESEXTRAPATHS:prepend := "${THISDIR}/files:"\nSRC_URI += "file://0001-Test-patch-here.patch"')
+ self.write_config("INHERIT:remove = \"report-error\"")
result = bitbake('man-db -c patch', ignore_status=True)
self.delete_recipeinc('man-db')
bitbake('-cclean man-db')
@@ -83,12 +83,15 @@ class BitbakeTests(OESelftestTestCase):
def test_force_task_1(self):
# test 1 from bug 5875
+ import uuid
test_recipe = 'zlib'
- test_data = "Microsoft Made No Profit From Anyone's Zunes Yo"
+ # Need to use uuid otherwise hash equivlance would change the workflow
+ test_data = "Microsoft Made No Profit From Anyone's Zunes Yo %s" % uuid.uuid1()
bb_vars = get_bb_vars(['D', 'PKGDEST', 'mandir'], test_recipe)
image_dir = bb_vars['D']
pkgsplit_dir = bb_vars['PKGDEST']
man_dir = bb_vars['mandir']
+ self.write_config("PACKAGE_CLASSES = \"package_rpm\"")
bitbake('-c clean %s' % test_recipe)
bitbake('-c package -f %s' % test_recipe)
@@ -138,7 +141,7 @@ class BitbakeTests(OESelftestTestCase):
self.write_recipeinc('man-db', data)
self.write_config("""DL_DIR = \"${TOPDIR}/download-selftest\"
SSTATE_DIR = \"${TOPDIR}/download-selftest\"
-INHERIT_remove = \"report-error\"
+INHERIT:remove = \"report-error\"
""")
self.track_for_cleanup(os.path.join(self.builddir, "download-selftest"))
@@ -148,9 +151,6 @@ INHERIT_remove = \"report-error\"
self.delete_recipeinc('man-db')
self.assertEqual(result.status, 1, msg="Command succeded when it should have failed. bitbake output: %s" % result.output)
self.assertIn('Fetcher failure: Unable to find file file://invalid anywhere. The paths that were searched were:', result.output)
- line = self.getline(result, 'Fetcher failure for URL: \'file://invalid\'. Unable to fetch URL from any source.')
- self.assertTrue(line and line.startswith("ERROR:"), msg = "\"invalid\" file \
-doesn't exist, yet fetcher didn't report any error. bitbake output: %s" % result.output)
def test_rename_downloaded_file(self):
# TODO unique dldir instead of using cleanall
@@ -160,7 +160,7 @@ SSTATE_DIR = \"${TOPDIR}/download-selftest\"
""")
self.track_for_cleanup(os.path.join(self.builddir, "download-selftest"))
- data = 'SRC_URI = "${GNU_MIRROR}/aspell/aspell-${PV}.tar.gz;downloadfilename=test-aspell.tar.gz"'
+ data = 'SRC_URI = "https://downloads.yoctoproject.org/mirror/sources/aspell-${PV}.tar.gz;downloadfilename=test-aspell.tar.gz"'
self.write_recipeinc('aspell', data)
result = bitbake('-f -c fetch aspell', ignore_status=True)
self.delete_recipeinc('aspell')
@@ -212,7 +212,7 @@ SSTATE_DIR = \"${TOPDIR}/download-selftest\"
def test_continue(self):
self.write_config("""DL_DIR = \"${TOPDIR}/download-selftest\"
SSTATE_DIR = \"${TOPDIR}/download-selftest\"
-INHERIT_remove = \"report-error\"
+INHERIT:remove = \"report-error\"
""")
self.track_for_cleanup(os.path.join(self.builddir, "download-selftest"))
self.write_recipeinc('man-db',"\ndo_fail_task () {\nexit 1 \n}\n\naddtask do_fail_task before do_fetch\n" )
@@ -224,12 +224,12 @@ INHERIT_remove = \"report-error\"
self.assertLess(errorpos,continuepos, msg = "bitbake didn't pass do_fail_task. bitbake output: %s" % result.output)
def test_non_gplv3(self):
- self.write_config('INCOMPATIBLE_LICENSE = "GPLv3"')
+ self.write_config('INCOMPATIBLE_LICENSE = "GPL-3.0-or-later"')
result = bitbake('selftest-ed', ignore_status=True)
self.assertEqual(result.status, 0, "Bitbake failed, exit code %s, output %s" % (result.status, result.output))
lic_dir = get_bb_var('LICENSE_DIRECTORY')
- self.assertFalse(os.path.isfile(os.path.join(lic_dir, 'selftest-ed/generic_GPLv3')))
- self.assertTrue(os.path.isfile(os.path.join(lic_dir, 'selftest-ed/generic_GPLv2')))
+ self.assertFalse(os.path.isfile(os.path.join(lic_dir, 'selftest-ed/generic_GPL-3.0-or-later')))
+ self.assertTrue(os.path.isfile(os.path.join(lic_dir, 'selftest-ed/generic_GPL-2.0-or-later')))
def test_setscene_only(self):
""" Bitbake option to restore from sstate only within a build (i.e. execute no real tasks, only setscene)"""
@@ -297,3 +297,57 @@ INHERIT_remove = \"report-error\"
test_recipe_summary_after = get_bb_var('SUMMARY', test_recipe)
self.assertEqual(expected_recipe_summary, test_recipe_summary_after)
+
+ def test_git_patchtool(self):
+ """ PATCHTOOL=git should work with non-git sources like tarballs
+ test recipe for the test must NOT containt git:// repository in SRC_URI
+ """
+ test_recipe = "man-db"
+ self.write_recipeinc(test_recipe, 'PATCHTOOL=\"git\"')
+ src = get_bb_var("SRC_URI",test_recipe)
+ gitscm = re.search("git://", src)
+ self.assertFalse(gitscm, "test_git_patchtool pre-condition failed: {} test recipe contains git repo!".format(test_recipe))
+ result = bitbake('{} -c patch'.format(test_recipe), ignore_status=False)
+ fatal = re.search("fatal: not a git repository (or any of the parent directories)", result.output)
+ self.assertFalse(fatal, "Failed to patch using PATCHTOOL=\"git\"")
+ self.delete_recipeinc(test_recipe)
+ bitbake('-cclean {}'.format(test_recipe))
+
+ def test_git_patchtool2(self):
+ """ Test if PATCHTOOL=git works with git repo and doesn't reinitialize it
+ """
+ test_recipe = "gitrepotest"
+ src = get_bb_var("SRC_URI",test_recipe)
+ gitscm = re.search("git://", src)
+ self.assertTrue(gitscm, "test_git_patchtool pre-condition failed: {} test recipe doesn't contains git repo!".format(test_recipe))
+ result = bitbake('{} -c patch'.format(test_recipe), ignore_status=False)
+ srcdir = get_bb_var('S', test_recipe)
+ result = runCmd("git log", cwd = srcdir)
+ self.assertFalse("bitbake_patching_started" in result.output, msg = "Repository has been reinitialized. {}".format(srcdir))
+ self.delete_recipeinc(test_recipe)
+ bitbake('-cclean {}'.format(test_recipe))
+
+
+ def test_git_unpack_nonetwork(self):
+ """
+ Test that a recipe with a floating tag that needs to be resolved upstream doesn't
+ access the network in a patch task run in a separate builld invocation
+ """
+
+ # Enable the recipe to float using a distro override
+ self.write_config("DISTROOVERRIDES .= \":gitunpack-enable-recipe\"")
+
+ bitbake('gitunpackoffline -c fetch')
+ bitbake('gitunpackoffline -c patch')
+
+ def test_git_unpack_nonetwork_fail(self):
+ """
+ Test that a recipe with a floating tag which doesn't call get_srcrev() in the fetcher
+ raises an error when the fetcher is called.
+ """
+
+ # Enable the recipe to float using a distro override
+ self.write_config("DISTROOVERRIDES .= \":gitunpack-enable-recipe\"")
+
+ result = bitbake('gitunpackoffline-fail -c fetch', ignore_status=True)
+ self.assertTrue("Recipe uses a floating tag/branch without a fixed SRCREV" in result.output, msg = "Recipe without PV set to SRCPV should have failed: %s" % result.output)
diff --git a/meta/lib/oeqa/selftest/cases/buildoptions.py b/meta/lib/oeqa/selftest/cases/buildoptions.py
index e91f0bd18f..bfe613b847 100644
--- a/meta/lib/oeqa/selftest/cases/buildoptions.py
+++ b/meta/lib/oeqa/selftest/cases/buildoptions.py
@@ -38,34 +38,35 @@ class ImageOptionsTests(OESelftestTestCase):
p = bb_vars['SYSROOT_DESTDIR'] + bb_vars['bindir'] + "/" + "ccache"
self.assertTrue(os.path.isfile(p), msg = "No ccache found (%s)" % p)
self.write_config('INHERIT += "ccache"')
- self.add_command_to_tearDown('bitbake -c clean m4-native')
- bitbake("m4-native -c clean")
- bitbake("m4-native -f -c compile")
- log_compile = os.path.join(get_bb_var("WORKDIR","m4-native"), "temp/log.do_compile")
+ recipe = "libgcc-initial"
+ self.add_command_to_tearDown('bitbake -c clean %s' % recipe)
+ bitbake("%s -c clean" % recipe)
+ bitbake("%s -f -c compile" % recipe)
+ log_compile = os.path.join(get_bb_var("WORKDIR", recipe), "temp/log.do_compile")
with open(log_compile, "r") as f:
loglines = "".join(f.readlines())
- self.assertIn("ccache", loglines, msg="No match for ccache in m4-native log.do_compile. For further details: %s" % log_compile)
+ self.assertIn("ccache", loglines, msg="No match for ccache in %s log.do_compile. For further details: %s" % (recipe , log_compile))
def test_read_only_image(self):
distro_features = get_bb_var('DISTRO_FEATURES')
if not ('x11' in distro_features and 'opengl' in distro_features):
- self.skipTest('core-image-sato requires x11 and opengl in distro features')
+ self.skipTest('core-image-sato/weston requires x11 and opengl in distro features')
self.write_config('IMAGE_FEATURES += "read-only-rootfs"')
- bitbake("core-image-sato")
+ bitbake("core-image-sato core-image-weston")
# do_image will fail if there are any pending postinsts
class DiskMonTest(OESelftestTestCase):
def test_stoptask_behavior(self):
- self.write_config('BB_DISKMON_DIRS = "STOPTASKS,${TMPDIR},100000G,100K"')
+ self.write_config('BB_DISKMON_DIRS = "STOPTASKS,${TMPDIR},100000G,100K"\nBB_HEARTBEAT_EVENT = "1"')
res = bitbake("delay -c delay", ignore_status = True)
self.assertTrue('ERROR: No new tasks can be executed since the disk space monitor action is "STOPTASKS"!' in res.output, msg = "Tasks should have stopped. Disk monitor is set to STOPTASK: %s" % res.output)
self.assertEqual(res.status, 1, msg = "bitbake reported exit code %s. It should have been 1. Bitbake output: %s" % (str(res.status), res.output))
- self.write_config('BB_DISKMON_DIRS = "ABORT,${TMPDIR},100000G,100K"')
+ self.write_config('BB_DISKMON_DIRS = "HALT,${TMPDIR},100000G,100K"\nBB_HEARTBEAT_EVENT = "1"')
res = bitbake("delay -c delay", ignore_status = True)
- self.assertTrue('ERROR: Immediately abort since the disk space monitor action is "ABORT"!' in res.output, "Tasks should have been aborted immediatelly. Disk monitor is set to ABORT: %s" % res.output)
+ self.assertTrue('ERROR: Immediately halt since the disk space monitor action is "HALT"!' in res.output, "Tasks should have been halted immediately. Disk monitor is set to HALT: %s" % res.output)
self.assertEqual(res.status, 1, msg = "bitbake reported exit code %s. It should have been 1. Bitbake output: %s" % (str(res.status), res.output))
- self.write_config('BB_DISKMON_DIRS = "WARN,${TMPDIR},100000G,100K"')
+ self.write_config('BB_DISKMON_DIRS = "WARN,${TMPDIR},100000G,100K"\nBB_HEARTBEAT_EVENT = "1"')
res = bitbake("delay -c delay")
self.assertTrue('WARNING: The free space' in res.output, msg = "A warning should have been displayed for disk monitor is set to WARN: %s" %res.output)
@@ -77,9 +78,9 @@ class SanityOptionsTest(OESelftestTestCase):
def test_options_warnqa_errorqa_switch(self):
- self.write_config("INHERIT_remove = \"report-error\"")
+ self.write_config("INHERIT:remove = \"report-error\"")
if "packages-list" not in get_bb_var("ERROR_QA"):
- self.append_config("ERROR_QA_append = \" packages-list\"")
+ self.append_config("ERROR_QA:append = \" packages-list\"")
self.write_recipeinc('xcursor-transparent-theme', 'PACKAGES += \"${PN}-dbg\"')
self.add_command_to_tearDown('bitbake -c clean xcursor-transparent-theme')
@@ -89,8 +90,8 @@ class SanityOptionsTest(OESelftestTestCase):
self.assertTrue(line and line.startswith("ERROR:"), msg=res.output)
self.assertEqual(res.status, 1, msg = "bitbake reported exit code %s. It should have been 1. Bitbake output: %s" % (str(res.status), res.output))
self.write_recipeinc('xcursor-transparent-theme', 'PACKAGES += \"${PN}-dbg\"')
- self.append_config('ERROR_QA_remove = "packages-list"')
- self.append_config('WARN_QA_append = " packages-list"')
+ self.append_config('ERROR_QA:remove = "packages-list"')
+ self.append_config('WARN_QA:append = " packages-list"')
res = bitbake("xcursor-transparent-theme -f -c package")
self.delete_recipeinc('xcursor-transparent-theme')
line = self.getline(res, "QA Issue: xcursor-transparent-theme-dbg is listed in PACKAGES multiple times, this leads to packaging errors.")
@@ -147,6 +148,30 @@ class BuildhistoryTests(BuildhistoryBase):
self.run_buildhistory_operation(target, target_config="PR = \"r1\"", change_bh_location=True)
self.run_buildhistory_operation(target, target_config="PR = \"r0\"", change_bh_location=False, expect_error=True, error_regex=error)
+ def test_fileinfo(self):
+ self.config_buildhistory()
+ bitbake('hicolor-icon-theme')
+ history_dir = get_bb_var('BUILDHISTORY_DIR_PACKAGE', 'hicolor-icon-theme')
+ self.assertTrue(os.path.isdir(history_dir), 'buildhistory dir was not created.')
+
+ def load_bh(f):
+ d = {}
+ for line in open(f):
+ split = [s.strip() for s in line.split('=', 1)]
+ if len(split) > 1:
+ d[split[0]] = split[1]
+ return d
+
+ data = load_bh(os.path.join(history_dir, 'hicolor-icon-theme', 'latest'))
+ self.assertIn('FILELIST', data)
+ self.assertEqual(data['FILELIST'], '/usr/share/icons/hicolor/index.theme')
+ self.assertGreater(int(data['PKGSIZE']), 0)
+
+ data = load_bh(os.path.join(history_dir, 'hicolor-icon-theme-dev', 'latest'))
+ if 'FILELIST' in data:
+ self.assertEqual(data['FILELIST'], '')
+ self.assertEqual(int(data['PKGSIZE']), 0)
+
class ArchiverTest(OESelftestTestCase):
def test_arch_work_dir_and_export_source(self):
"""
@@ -158,8 +183,8 @@ class ArchiverTest(OESelftestTestCase):
deploy_dir_src = get_bb_var('DEPLOY_DIR_SRC')
pkgs_path = g.glob(str(deploy_dir_src) + "/allarch*/xcurs*")
src_file_glob = str(pkgs_path[0]) + "/xcursor*.src.rpm"
- tar_file_glob = str(pkgs_path[0]) + "/xcursor*.tar.gz"
- self.assertTrue((g.glob(src_file_glob) and g.glob(tar_file_glob)), "Couldn't find .src.rpm and .tar.gz files under %s/allarch*/xcursor*" % deploy_dir_src)
+ tar_file_glob = str(pkgs_path[0]) + "/xcursor*.tar.xz"
+ self.assertTrue((g.glob(src_file_glob) and g.glob(tar_file_glob)), "Couldn't find .src.rpm and .tar.xz files under %s/allarch*/xcursor*" % deploy_dir_src)
class ToolchainOptions(OESelftestTestCase):
def test_toolchain_fortran(self):
@@ -167,7 +192,7 @@ class ToolchainOptions(OESelftestTestCase):
Test that Fortran works by building a Hello, World binary.
"""
- features = 'FORTRAN_forcevariable = ",fortran"\n'
+ features = 'FORTRAN:forcevariable = ",fortran"\n'
self.write_config(features)
bitbake('fortran-helloworld')
@@ -196,3 +221,9 @@ PREMIRRORS = "\\
bitbake("world --runall fetch")
+
+class Poisoning(OESelftestTestCase):
+ def test_poisoning(self):
+ res = bitbake("poison", ignore_status=True)
+ self.assertNotEqual(res.status, 0)
+ self.assertTrue("is unsafe for cross-compilation" in res.output)
diff --git a/meta/lib/oeqa/selftest/cases/containerimage.py b/meta/lib/oeqa/selftest/cases/containerimage.py
index c0998e319e..e0aea1a1ef 100644
--- a/meta/lib/oeqa/selftest/cases/containerimage.py
+++ b/meta/lib/oeqa/selftest/cases/containerimage.py
@@ -13,7 +13,7 @@ from oeqa.utils.commands import bitbake, get_bb_vars, runCmd
# The only package added to the image is container_image_testpkg, which
# contains one file. However, due to some other things not cleaning up during
# rootfs creation, there is some cruft. Ideally bugs will be filed and the
-# cruft removed, but for now we whitelist some known set.
+# cruft removed, but for now we ignore some known set.
#
# Also for performance reasons we're only checking the cruft when using ipk.
# When using deb, and rpm it is a bit different and we could test all
@@ -22,7 +22,7 @@ from oeqa.utils.commands import bitbake, get_bb_vars, runCmd
#
class ContainerImageTests(OESelftestTestCase):
- # Verify that when specifying a IMAGE_TYPEDEP_ of the form "foo.bar" that
+ # Verify that when specifying a IMAGE_TYPEDEP: of the form "foo.bar" that
# the conversion type bar gets added as a dep as well
def test_expected_files(self):
@@ -42,6 +42,9 @@ IMAGE_FSTYPES = "container"
PACKAGE_CLASSES = "package_ipk"
IMAGE_FEATURES = ""
IMAGE_BUILDINFO_FILE = ""
+INIT_MANAGER = "sysvinit"
+IMAGE_INSTALL:remove = "ssh-pregen-hostkeys"
+
""")
bbvars = get_bb_vars(['bindir', 'sysconfdir', 'localstatedir',
@@ -57,11 +60,7 @@ IMAGE_BUILDINFO_FILE = ""
'.{sysconfdir}/version',
'./run/',
'.{localstatedir}/cache/',
- '.{localstatedir}/cache/ldconfig/',
- '.{localstatedir}/cache/ldconfig/aux-cache',
- '.{localstatedir}/cache/opkg/',
- '.{localstatedir}/lib/',
- '.{localstatedir}/lib/opkg/'
+ '.{localstatedir}/lib/'
]
expected_files = [ x.format(bindir=bbvars['bindir'],
diff --git a/meta/lib/oeqa/selftest/cases/cve_check.py b/meta/lib/oeqa/selftest/cases/cve_check.py
new file mode 100644
index 0000000000..d1947baffc
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/cve_check.py
@@ -0,0 +1,44 @@
+from oe.cve_check import Version
+from oeqa.selftest.case import OESelftestTestCase
+
+class CVECheck(OESelftestTestCase):
+
+ def test_version_compare(self):
+ result = Version("100") > Version("99")
+ self.assertTrue( result, msg="Failed to compare version '100' > '99'")
+ result = Version("2.3.1") > Version("2.2.3")
+ self.assertTrue( result, msg="Failed to compare version '2.3.1' > '2.2.3'")
+ result = Version("2021-01-21") > Version("2020-12-25")
+ self.assertTrue( result, msg="Failed to compare version '2021-01-21' > '2020-12-25'")
+ result = Version("1.2-20200910") < Version("1.2-20200920")
+ self.assertTrue( result, msg="Failed to compare version '1.2-20200910' < '1.2-20200920'")
+
+ result = Version("1.0") >= Version("1.0beta")
+ self.assertTrue( result, msg="Failed to compare version '1.0' >= '1.0beta'")
+ result = Version("1.0-rc2") > Version("1.0-rc1")
+ self.assertTrue( result, msg="Failed to compare version '1.0-rc2' > '1.0-rc1'")
+ result = Version("1.0.alpha1") < Version("1.0")
+ self.assertTrue( result, msg="Failed to compare version '1.0.alpha1' < '1.0'")
+ result = Version("1.0_dev") <= Version("1.0")
+ self.assertTrue( result, msg="Failed to compare version '1.0_dev' <= '1.0'")
+
+ # ignore "p1" and "p2", so these should be equal
+ result = Version("1.0p2") == Version("1.0p1")
+ self.assertTrue( result ,msg="Failed to compare version '1.0p2' to '1.0p1'")
+ # ignore the "b" and "r"
+ result = Version("1.0b") == Version("1.0r")
+ self.assertTrue( result ,msg="Failed to compare version '1.0b' to '1.0r'")
+
+ # consider the trailing alphabet as patched level when comparing
+ result = Version("1.0b","alphabetical") < Version("1.0r","alphabetical")
+ self.assertTrue( result ,msg="Failed to compare version with suffix '1.0b' < '1.0r'")
+ result = Version("1.0b","alphabetical") > Version("1.0","alphabetical")
+ self.assertTrue( result ,msg="Failed to compare version with suffix '1.0b' > '1.0'")
+
+ # consider the trailing "p" and "patch" as patched released when comparing
+ result = Version("1.0","patch") < Version("1.0p1","patch")
+ self.assertTrue( result ,msg="Failed to compare version with suffix '1.0' < '1.0p1'")
+ result = Version("1.0p2","patch") > Version("1.0p1","patch")
+ self.assertTrue( result ,msg="Failed to compare version with suffix '1.0p2' > '1.0p1'")
+ result = Version("1.0_patch2","patch") < Version("1.0_patch3","patch")
+ self.assertTrue( result ,msg="Failed to compare version with suffix '1.0_patch2' < '1.0_patch3'")
diff --git a/meta/lib/oeqa/selftest/cases/devtool.py b/meta/lib/oeqa/selftest/cases/devtool.py
index b383ed9c50..e910672c31 100644
--- a/meta/lib/oeqa/selftest/cases/devtool.py
+++ b/meta/lib/oeqa/selftest/cases/devtool.py
@@ -56,7 +56,8 @@ def setUpModule():
if pth.startswith(canonical_layerpath):
if relpth.endswith('/'):
destdir = os.path.join(corecopydir, relpth)
- shutil.copytree(pth, destdir)
+ # avoid race condition by not copying .pyc files YPBZ#13421,13803
+ shutil.copytree(pth, destdir, ignore=shutil.ignore_patterns('*.pyc', '__pycache__'))
else:
destdir = os.path.join(corecopydir, os.path.dirname(relpth))
bb.utils.mkdirhier(destdir)
@@ -79,32 +80,15 @@ def tearDownModule():
bb.utils.edit_bblayers_conf(bblayers_conf, None, None, bblayers_edit_cb)
shutil.rmtree(templayerdir)
-class DevtoolBase(OESelftestTestCase):
-
- @classmethod
- def setUpClass(cls):
- super(DevtoolBase, cls).setUpClass()
- bb_vars = get_bb_vars(['TOPDIR', 'SSTATE_DIR'])
- cls.original_sstate = bb_vars['SSTATE_DIR']
- cls.devtool_sstate = os.path.join(bb_vars['TOPDIR'], 'sstate_devtool')
- cls.sstate_conf = 'SSTATE_DIR = "%s"\n' % cls.devtool_sstate
- cls.sstate_conf += ('SSTATE_MIRRORS += "file://.* file:///%s/PATH"\n'
- % cls.original_sstate)
-
- @classmethod
- def tearDownClass(cls):
- cls.logger.debug('Deleting devtool sstate cache on %s' % cls.devtool_sstate)
- runCmd('rm -rf %s' % cls.devtool_sstate)
- super(DevtoolBase, cls).tearDownClass()
+class DevtoolTestCase(OESelftestTestCase):
def setUp(self):
"""Test case setup function"""
- super(DevtoolBase, self).setUp()
+ super(DevtoolTestCase, self).setUp()
self.workspacedir = os.path.join(self.builddir, 'workspace')
self.assertTrue(not os.path.exists(self.workspacedir),
'This test cannot be run with a workspace directory '
'under the build directory')
- self.append_config(self.sstate_conf)
def _check_src_repo(self, repo_dir):
"""Check srctree git repository"""
@@ -235,6 +219,30 @@ class DevtoolBase(OESelftestTestCase):
return filelist
+class DevtoolBase(DevtoolTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ super(DevtoolBase, cls).setUpClass()
+ bb_vars = get_bb_vars(['TOPDIR', 'SSTATE_DIR'])
+ cls.original_sstate = bb_vars['SSTATE_DIR']
+ cls.devtool_sstate = os.path.join(bb_vars['TOPDIR'], 'sstate_devtool')
+ cls.sstate_conf = 'SSTATE_DIR = "%s"\n' % cls.devtool_sstate
+ cls.sstate_conf += ('SSTATE_MIRRORS += "file://.* file:///%s/PATH"\n'
+ % cls.original_sstate)
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.logger.debug('Deleting devtool sstate cache on %s' % cls.devtool_sstate)
+ runCmd('rm -rf %s' % cls.devtool_sstate)
+ super(DevtoolBase, cls).tearDownClass()
+
+ def setUp(self):
+ """Test case setup function"""
+ super(DevtoolBase, self).setUp()
+ self.append_config(self.sstate_conf)
+
+
class DevtoolTests(DevtoolBase):
def test_create_workspace(self):
@@ -268,7 +276,7 @@ class DevtoolAddTests(DevtoolBase):
self.track_for_cleanup(tempdir)
pn = 'pv'
pv = '1.5.3'
- url = 'http://www.ivarch.com/programs/sources/pv-1.5.3.tar.bz2'
+ url = 'http://downloads.yoctoproject.org/mirror/sources/pv-1.5.3.tar.bz2'
result = runCmd('wget %s' % url, cwd=tempdir)
result = runCmd('tar xfv %s' % os.path.basename(url), cwd=tempdir)
srcdir = os.path.join(tempdir, '%s-%s' % (pn, pv))
@@ -335,11 +343,11 @@ class DevtoolAddTests(DevtoolBase):
self.assertIn(srcdir, result.output)
self.assertIn(recipefile, result.output)
checkvars = {}
- checkvars['LICENSE'] = 'GPLv2'
+ checkvars['LICENSE'] = 'GPL-2.0-only'
checkvars['LIC_FILES_CHKSUM'] = 'file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263'
checkvars['S'] = '${WORKDIR}/git'
checkvars['PV'] = '0.1+git${SRCPV}'
- checkvars['SRC_URI'] = 'git://git.yoctoproject.org/git/dbus-wait;protocol=https'
+ checkvars['SRC_URI'] = 'git://git.yoctoproject.org/git/dbus-wait;protocol=https;branch=master'
checkvars['SRCREV'] = srcrev
checkvars['DEPENDS'] = set(['dbus'])
self._test_recipe_contents(recipefile, checkvars, [])
@@ -372,7 +380,7 @@ class DevtoolAddTests(DevtoolBase):
recipefile = '%s/recipes/libftdi/libftdi_%s.bb' % (self.workspacedir, version)
result = runCmd('recipetool setvar %s EXTRA_OECMAKE -- \'-DPYTHON_BINDINGS=OFF -DLIBFTDI_CMAKE_CONFIG_DIR=${datadir}/cmake/Modules\'' % recipefile)
with open(recipefile, 'a') as f:
- f.write('\nFILES_${PN}-dev += "${datadir}/cmake/Modules"\n')
+ f.write('\nFILES:${PN}-dev += "${datadir}/cmake/Modules"\n')
# We don't have the ability to pick up this dependency automatically yet...
f.write('\nDEPENDS += "libusb1"\n')
f.write('\nTESTLIBOUTPUT = "${COMPONENTS_DIR}/${TUNE_PKGARCH}/${PN}/${libdir}"\n')
@@ -441,6 +449,7 @@ class DevtoolAddTests(DevtoolBase):
tempdir = tempfile.mkdtemp(prefix='devtoolqa')
self.track_for_cleanup(tempdir)
url = 'gitsm://git.yoctoproject.org/mraa'
+ url_branch = '%s;branch=master' % url
checkrev = 'ae127b19a50aa54255e4330ccfdd9a5d058e581d'
testrecipe = 'mraa'
srcdir = os.path.join(tempdir, testrecipe)
@@ -461,7 +470,7 @@ class DevtoolAddTests(DevtoolBase):
checkvars = {}
checkvars['S'] = '${WORKDIR}/git'
checkvars['PV'] = '1.0+git${SRCPV}'
- checkvars['SRC_URI'] = url
+ checkvars['SRC_URI'] = url_branch
checkvars['SRCREV'] = '${AUTOREV}'
self._test_recipe_contents(recipefile, checkvars, [])
# Try with revision and version specified
@@ -480,7 +489,7 @@ class DevtoolAddTests(DevtoolBase):
checkvars = {}
checkvars['S'] = '${WORKDIR}/git'
checkvars['PV'] = '1.5+git${SRCPV}'
- checkvars['SRC_URI'] = url
+ checkvars['SRC_URI'] = url_branch
checkvars['SRCREV'] = checkrev
self._test_recipe_contents(recipefile, checkvars, [])
@@ -512,6 +521,10 @@ class DevtoolAddTests(DevtoolBase):
self._test_recipe_contents(recipefile, checkvars, [])
def test_devtool_add_npm(self):
+ collections = get_bb_var('BBFILE_COLLECTIONS').split()
+ if "openembedded-layer" not in collections:
+ self.skipTest("Test needs meta-oe for nodejs")
+
pn = 'savoirfairelinux-node-server-example'
pv = '1.0.0'
url = 'npm://registry.npmjs.org;package=@savoirfairelinux/node-server-example;version=' + pv
@@ -644,7 +657,7 @@ class DevtoolModifyTests(DevtoolBase):
self.track_for_cleanup(self.workspacedir)
self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
- testrecipes = 'perf kernel-devsrc package-index core-image-minimal meta-toolchain packagegroup-core-sdk meta-ide-support'.split()
+ testrecipes = 'perf kernel-devsrc package-index core-image-minimal meta-toolchain packagegroup-core-sdk'.split()
# Find actual name of gcc-source since it now includes the version - crude, but good enough for this purpose
result = runCmd('bitbake-layers show-recipes gcc-source*')
for line in result.output.splitlines():
@@ -692,7 +705,44 @@ class DevtoolModifyTests(DevtoolBase):
self.assertTrue(bbclassextended, 'None of these recipes are BBCLASSEXTENDed to native - need to adjust testrecipes list: %s' % ', '.join(testrecipes))
self.assertTrue(inheritnative, 'None of these recipes do "inherit native" - need to adjust testrecipes list: %s' % ', '.join(testrecipes))
+ def test_devtool_modify_localfiles_only(self):
+ # Check preconditions
+ testrecipe = 'base-files'
+ src_uri = (get_bb_var('SRC_URI', testrecipe) or '').split()
+ foundlocalonly = False
+ correct_symlink = False
+ for item in src_uri:
+ if item.startswith('file://'):
+ if '.patch' not in item:
+ foundlocalonly = True
+ else:
+ foundlocalonly = False
+ break
+ self.assertTrue(foundlocalonly, 'This test expects the %s recipe to fetch local files only and it seems that it no longer does' % testrecipe)
+ # Clean up anything in the workdir/sysroot/sstate cache
+ bitbake('%s -c cleansstate' % testrecipe)
+ # Try modifying a recipe
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake -c clean %s' % testrecipe)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ result = runCmd('devtool modify %s -x %s' % (testrecipe, tempdir))
+ srcfile = os.path.join(tempdir, 'oe-local-files/share/dot.bashrc')
+ srclink = os.path.join(tempdir, 'share/dot.bashrc')
+ self.assertExists(srcfile, 'Extracted source could not be found')
+ if os.path.islink(srclink) and os.path.exists(srclink) and os.path.samefile(srcfile, srclink):
+ correct_symlink = True
+ self.assertTrue(correct_symlink, 'Source symlink to oe-local-files is broken')
+ matches = glob.glob(os.path.join(self.workspacedir, 'appends', '%s_*.bbappend' % testrecipe))
+ self.assertTrue(matches, 'bbappend not created')
+ # Test devtool status
+ result = runCmd('devtool status')
+ self.assertIn(testrecipe, result.output)
+ self.assertIn(tempdir, result.output)
+ # Try building
+ bitbake(testrecipe)
def test_devtool_modify_git(self):
# Check preconditions
@@ -772,6 +822,26 @@ class DevtoolModifyTests(DevtoolBase):
self._check_src_repo(tempdir)
# This is probably sufficient
+ def test_devtool_modify_overrides(self):
+ # Try modifying a recipe with patches in overrides
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ result = runCmd('devtool modify devtool-patch-overrides -x %s' % (tempdir))
+
+ self._check_src_repo(tempdir)
+ source = os.path.join(tempdir, "source")
+ def check(branch, expected):
+ runCmd('git -C %s checkout %s' % (tempdir, branch))
+ with open(source, "rt") as f:
+ content = f.read()
+ self.assertEquals(content, expected)
+ check('devtool', 'This is a test for something\n')
+ check('devtool-no-overrides', 'This is a test for something\n')
+ check('devtool-override-qemuarm', 'This is a test for qemuarm\n')
+ check('devtool-override-qemux86', 'This is a test for qemux86\n')
+
class DevtoolUpdateTests(DevtoolBase):
def test_devtool_update_recipe(self):
@@ -842,7 +912,7 @@ class DevtoolUpdateTests(DevtoolBase):
self._check_repo_status(os.path.dirname(recipefile), expected_status)
result = runCmd('git diff %s' % os.path.basename(recipefile), cwd=os.path.dirname(recipefile))
- addlines = ['SRCREV = ".*"', 'SRC_URI = "git://git.infradead.org/mtd-utils.git"']
+ addlines = ['SRCREV = ".*"', 'SRC_URI = "git://git.infradead.org/mtd-utils.git;branch=master"']
srcurilines = src_uri.split()
srcurilines[0] = 'SRC_URI = "' + srcurilines[0]
srcurilines.append('"')
@@ -913,7 +983,7 @@ class DevtoolUpdateTests(DevtoolBase):
self.assertExists(patchfile, 'Patch file not created')
# Check bbappend contents
- expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ expectedlines = ['FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"\n',
'\n',
'SRC_URI += "file://0001-Add-our-custom-version.patch"\n',
'\n']
@@ -928,7 +998,7 @@ class DevtoolUpdateTests(DevtoolBase):
result = runCmd('git reset HEAD^', cwd=tempsrcdir)
result = runCmd('devtool update-recipe %s -a %s' % (testrecipe, templayerdir))
self.assertNotExists(patchfile, 'Patch file not deleted')
- expectedlines2 = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ expectedlines2 = ['FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"\n',
'\n']
with open(bbappendfile, 'r') as f:
self.assertEqual(expectedlines2, f.readlines())
diff --git a/meta/lib/oeqa/selftest/cases/diffoscope/A/file.txt b/meta/lib/oeqa/selftest/cases/diffoscope/A/file.txt
new file mode 100644
index 0000000000..f70f10e4db
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/diffoscope/A/file.txt
@@ -0,0 +1 @@
+A
diff --git a/meta/lib/oeqa/selftest/cases/diffoscope/B/file.txt b/meta/lib/oeqa/selftest/cases/diffoscope/B/file.txt
new file mode 100644
index 0000000000..223b7836fb
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/diffoscope/B/file.txt
@@ -0,0 +1 @@
+B
diff --git a/meta/lib/oeqa/selftest/cases/distrodata.py b/meta/lib/oeqa/selftest/cases/distrodata.py
index e1cfc3b621..03f31e9fcb 100644
--- a/meta/lib/oeqa/selftest/cases/distrodata.py
+++ b/meta/lib/oeqa/selftest/cases/distrodata.py
@@ -18,7 +18,7 @@ class Distrodata(OESelftestTestCase):
Product: oe-core
Author: Alexander Kanavin <alex.kanavin@gmail.com>
"""
- feature = 'LICENSE_FLAGS_WHITELIST += " commercial"\n'
+ feature = 'LICENSE_FLAGS_ACCEPTED += " commercial"\n'
self.write_config(feature)
pkgs = oe.recipeutils.get_recipe_upgrade_status()
@@ -40,6 +40,42 @@ but their recipes claim otherwise by setting UPSTREAM_VERSION_UNKNOWN. Please re
""" + "\n".join(regressed_successes)
self.assertTrue(len(regressed_failures) == 0 and len(regressed_successes) == 0, msg)
+ def test_missing_homepg(self):
+ """
+ Summary: Test for oe-core recipes that don't have a HOMEPAGE or DESCRIPTION
+ Expected: All oe-core recipes should have a DESCRIPTION entry
+ Expected: All oe-core recipes should have a HOMEPAGE entry except for recipes that are not fetched from external sources.
+ Product: oe-core
+ """
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=False)
+ no_description = []
+ no_homepage = []
+ for fn in tinfoil.all_recipe_files(variants=False):
+ if not '/meta/recipes-' in fn:
+ # We are only interested in OE-Core
+ continue
+ rd = tinfoil.parse_recipe_file(fn, appends=False)
+ pn = rd.getVar('BPN')
+ srcfile = rd.getVar('SRC_URI').split()
+ #Since DESCRIPTION defaults to SUMMARY if not set, we are only interested in recipes without DESCRIPTION or SUMMARY
+ if not (rd.getVar('SUMMARY') or rd.getVar('DESCRIPTION')):
+ no_description.append((pn, fn))
+ if not rd.getVar('HOMEPAGE'):
+ if srcfile and srcfile[0].startswith('file') or not rd.getVar('SRC_URI'):
+ # We are only interested in recipes SRC_URI fetched from external sources
+ continue
+ no_homepage.append((pn, fn))
+ if no_homepage:
+ self.fail("""
+The following recipes do not have a HOMEPAGE. Please add an entry for HOMEPAGE in the recipe.
+""" + "\n".join(['%s (%s)' % i for i in no_homepage]))
+
+ if no_description:
+ self.fail("""
+The following recipes do not have a DESCRIPTION. Please add an entry for DESCRIPTION in the recipe.
+""" + "\n".join(['%s (%s)' % i for i in no_description]))
+
def test_maintainers(self):
"""
Summary: Test that oe-core recipes have a maintainer and entries in maintainers list have a recipe
@@ -57,13 +93,13 @@ but their recipes claim otherwise by setting UPSTREAM_VERSION_UNKNOWN. Please re
def is_maintainer_exception(entry):
exceptions = ["musl", "newlib", "linux-yocto", "linux-dummy", "mesa-gl", "libgfortran",
- "cve-update-db-native"]
+ "cve-update-db-native", "rust"]
for i in exceptions:
if i in entry:
return True
return False
- feature = 'require conf/distro/include/maintainers.inc\nLICENSE_FLAGS_WHITELIST += " commercial"\nPARSE_ALL_RECIPES = "1"\n'
+ feature = 'require conf/distro/include/maintainers.inc\nLICENSE_FLAGS_ACCEPTED += " commercial"\nPARSE_ALL_RECIPES = "1"\nPACKAGE_CLASSES = "package_ipk package_deb package_rpm"\n'
self.write_config(feature)
with bb.tinfoil.Tinfoil() as tinfoil:
@@ -74,7 +110,7 @@ but their recipes claim otherwise by setting UPSTREAM_VERSION_UNKNOWN. Please re
missing_recipes = []
recipes = []
- prefix = "RECIPE_MAINTAINER_pn-"
+ prefix = "RECIPE_MAINTAINER:pn-"
# We could have used all_recipes() here, but this method will find
# every recipe if we ever move to setting RECIPE_MAINTAINER in recipe files
diff --git a/meta/lib/oeqa/selftest/cases/eSDK.py b/meta/lib/oeqa/selftest/cases/eSDK.py
index 862849af35..f7279b3230 100644
--- a/meta/lib/oeqa/selftest/cases/eSDK.py
+++ b/meta/lib/oeqa/selftest/cases/eSDK.py
@@ -63,7 +63,7 @@ class oeSDKExtSelfTest(OESelftestTestCase):
cls.env_eSDK = oeSDKExtSelfTest.get_esdk_environment('', cls.tmpdir_eSDKQA)
sstate_config="""
-SDK_LOCAL_CONF_WHITELIST = "SSTATE_MIRRORS"
+ESDK_LOCALCONF_ALLOW = "SSTATE_MIRRORS"
SSTATE_MIRRORS = "file://.* file://%s/PATH"
CORE_IMAGE_EXTRA_INSTALL = "perl"
""" % sstate_dir
@@ -91,7 +91,7 @@ CORE_IMAGE_EXTRA_INSTALL = "perl"
# Configure eSDK to use sstate mirror from poky
sstate_config="""
-SDK_LOCAL_CONF_WHITELIST = "SSTATE_MIRRORS"
+ESDK_LOCALCONF_ALLOW = "SSTATE_MIRRORS"
SSTATE_MIRRORS = "file://.* file://%s/PATH"
""" % bb_vars["SSTATE_DIR"]
with open(os.path.join(cls.tmpdir_eSDKQA, 'conf', 'local.conf'), 'a+') as f:
@@ -100,7 +100,7 @@ SSTATE_MIRRORS = "file://.* file://%s/PATH"
@classmethod
def tearDownClass(cls):
for i in range(0, 10):
- if os.path.exists(os.path.join(cls.tmpdir_eSDKQA, 'bitbake.lock')):
+ if os.path.exists(os.path.join(cls.tmpdir_eSDKQA, 'bitbake.lock')) or os.path.exists(os.path.join(cls.tmpdir_eSDKQA, 'cache/hashserv.db-wal')):
time.sleep(1)
else:
break
diff --git a/meta/lib/oeqa/selftest/cases/efibootpartition.py b/meta/lib/oeqa/selftest/cases/efibootpartition.py
index a61cf9bcb3..26de3a07c9 100644
--- a/meta/lib/oeqa/selftest/cases/efibootpartition.py
+++ b/meta/lib/oeqa/selftest/cases/efibootpartition.py
@@ -26,11 +26,11 @@ class GenericEFITest(OESelftestTestCase):
self.write_config(self,
"""
EFI_PROVIDER = "%s"
-IMAGE_FSTYPES_pn-%s_append = " wic"
+IMAGE_FSTYPES:pn-%s:append = " wic"
MACHINE = "%s"
-MACHINE_FEATURES_append = " efi"
+MACHINE_FEATURES:append = " efi"
WKS_FILE = "efi-bootdisk.wks.in"
-IMAGE_INSTALL_append = " grub-efi systemd-boot kernel-image-bzimage"
+IMAGE_INSTALL:append = " grub-efi systemd-boot kernel-image-bzimage"
"""
% (self.efi_provider, self.image, self.machine))
if not self.recipes_built:
diff --git a/meta/lib/oeqa/selftest/cases/fetch.py b/meta/lib/oeqa/selftest/cases/fetch.py
index 76cbadf2ff..be14272e63 100644
--- a/meta/lib/oeqa/selftest/cases/fetch.py
+++ b/meta/lib/oeqa/selftest/cases/fetch.py
@@ -2,6 +2,9 @@
# SPDX-License-Identifier: MIT
#
+import tempfile
+import textwrap
+import bb.tinfoil
import oe.path
from oeqa.selftest.case import OESelftestTestCase
from oeqa.utils.commands import bitbake
@@ -21,8 +24,8 @@ class Fetch(OESelftestTestCase):
# No mirrors, should use git to fetch successfully
features = """
DL_DIR = "%s"
-MIRRORS_forcevariable = ""
-PREMIRRORS_forcevariable = ""
+MIRRORS:forcevariable = ""
+PREMIRRORS:forcevariable = ""
""" % dldir
self.write_config(features)
oe.path.remove(dldir, recurse=True)
@@ -32,8 +35,8 @@ PREMIRRORS_forcevariable = ""
features = """
DL_DIR = "%s"
GIT_PROXY_COMMAND = "false"
-MIRRORS_forcevariable = ""
-PREMIRRORS_forcevariable = ""
+MIRRORS:forcevariable = ""
+PREMIRRORS:forcevariable = ""
""" % dldir
self.write_config(features)
oe.path.remove(dldir, recurse=True)
@@ -44,8 +47,60 @@ PREMIRRORS_forcevariable = ""
features = """
DL_DIR = "%s"
GIT_PROXY_COMMAND = "false"
-MIRRORS_forcevariable = "git://.*/.* http://downloads.yoctoproject.org/mirror/sources/"
+MIRRORS:forcevariable = "git://.*/.* http://downloads.yoctoproject.org/mirror/sources/"
""" % dldir
self.write_config(features)
oe.path.remove(dldir, recurse=True)
bitbake("dbus-wait -c fetch -f")
+
+
+class Dependencies(OESelftestTestCase):
+ def write_recipe(self, content, tempdir):
+ f = os.path.join(tempdir, "test.bb")
+ with open(f, "w") as fd:
+ fd.write(content)
+ return f
+
+ def test_dependencies(self):
+ """
+ Verify that the correct dependencies are generated for specific SRC_URI entries.
+ """
+
+ with bb.tinfoil.Tinfoil() as tinfoil, tempfile.TemporaryDirectory(prefix="selftest-fetch") as tempdir:
+ tinfoil.prepare(config_only=False, quiet=2)
+
+ r = """
+ LICENSE="CLOSED"
+ SRC_URI="http://example.com/tarball.zip"
+ """
+ f = self.write_recipe(textwrap.dedent(r), tempdir)
+ d = tinfoil.parse_recipe_file(f)
+ self.assertIn("wget-native", d.getVarFlag("do_fetch", "depends"))
+ self.assertIn("unzip-native", d.getVarFlag("do_unpack", "depends"))
+
+ # Verify that the downloadfilename overrides the URI
+ r = """
+ LICENSE="CLOSED"
+ SRC_URI="https://example.com/tarball;downloadfilename=something.zip"
+ """
+ f = self.write_recipe(textwrap.dedent(r), tempdir)
+ d = tinfoil.parse_recipe_file(f)
+ self.assertIn("wget-native", d.getVarFlag("do_fetch", "depends"))
+ self.assertIn("unzip-native", d.getVarFlag("do_unpack", "depends") or "")
+
+ r = """
+ LICENSE="CLOSED"
+ SRC_URI="ftp://example.com/tarball.lz"
+ """
+ f = self.write_recipe(textwrap.dedent(r), tempdir)
+ d = tinfoil.parse_recipe_file(f)
+ self.assertIn("wget-native", d.getVarFlag("do_fetch", "depends"))
+ self.assertIn("lzip-native", d.getVarFlag("do_unpack", "depends"))
+
+ r = """
+ LICENSE="CLOSED"
+ SRC_URI="git://example.com/repo;branch=master"
+ """
+ f = self.write_recipe(textwrap.dedent(r), tempdir)
+ d = tinfoil.parse_recipe_file(f)
+ self.assertIn("git-native", d.getVarFlag("do_fetch", "depends"))
diff --git a/meta/lib/oeqa/selftest/cases/fitimage.py b/meta/lib/oeqa/selftest/cases/fitimage.py
new file mode 100644
index 0000000000..f6f6a8e795
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/fitimage.py
@@ -0,0 +1,840 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import runCmd, bitbake, get_bb_var, runqemu
+import os
+import json
+import re
+
+class FitImageTests(OESelftestTestCase):
+
+ def test_fit_image(self):
+ """
+ Summary: Check if FIT image and Image Tree Source (its) are built
+ and the Image Tree Source has the correct fields.
+ Expected: 1. fitImage and fitImage-its can be built
+ 2. The type, load address, entrypoint address and
+ default values of kernel and ramdisk are as expected
+ in the Image Tree Source. Not all the fields are tested,
+ only the key fields that wont vary between different
+ architectures.
+ Product: oe-core
+ Author: Usama Arif <usama.arif@arm.com>
+ """
+ config = """
+# Enable creation of fitImage
+KERNEL_IMAGETYPE = "Image"
+KERNEL_IMAGETYPES += " fitImage "
+KERNEL_CLASSES = " kernel-fitimage "
+
+# RAM disk variables including load address and entrypoint for kernel and RAM disk
+IMAGE_FSTYPES += "cpio.gz"
+INITRAMFS_IMAGE = "core-image-minimal"
+UBOOT_RD_LOADADDRESS = "0x88000000"
+UBOOT_RD_ENTRYPOINT = "0x88000000"
+UBOOT_LOADADDRESS = "0x80080000"
+UBOOT_ENTRYPOINT = "0x80080000"
+FIT_DESC = "A model description"
+"""
+ self.write_config(config)
+
+ # fitImage is created as part of linux recipe
+ bitbake("virtual/kernel")
+
+ image_type = "core-image-minimal"
+ deploy_dir_image = get_bb_var('DEPLOY_DIR_IMAGE')
+ machine = get_bb_var('MACHINE')
+ fitimage_its_path = os.path.join(deploy_dir_image,
+ "fitImage-its-%s-%s-%s" % (image_type, machine, machine))
+ fitimage_path = os.path.join(deploy_dir_image,
+ "fitImage-%s-%s-%s" % (image_type, machine, machine))
+
+ self.assertTrue(os.path.exists(fitimage_its_path),
+ "%s image tree source doesn't exist" % (fitimage_its_path))
+ self.assertTrue(os.path.exists(fitimage_path),
+ "%s FIT image doesn't exist" % (fitimage_path))
+
+ # Check that the type, load address, entrypoint address and default
+ # values for kernel and ramdisk in Image Tree Source are as expected.
+ # The order of fields in the below array is important. Not all the
+ # fields are tested, only the key fields that wont vary between
+ # different architectures.
+ its_field_check = [
+ 'description = "A model description";',
+ 'type = "kernel";',
+ 'load = <0x80080000>;',
+ 'entry = <0x80080000>;',
+ 'type = "ramdisk";',
+ 'load = <0x88000000>;',
+ 'entry = <0x88000000>;',
+ 'default = "conf-1";',
+ 'kernel = "kernel-1";',
+ 'ramdisk = "ramdisk-1";'
+ ]
+
+ with open(fitimage_its_path) as its_file:
+ field_index = 0
+ for line in its_file:
+ if field_index == len(its_field_check):
+ break
+ if its_field_check[field_index] in line:
+ field_index +=1
+
+ if field_index != len(its_field_check): # if its equal, the test passed
+ self.assertTrue(field_index == len(its_field_check),
+ "Fields in Image Tree Source File %s did not match, error in finding %s"
+ % (fitimage_its_path, its_field_check[field_index]))
+
+
+ def test_sign_fit_image(self):
+ """
+ Summary: Check if FIT image and Image Tree Source (its) are created
+ and signed correctly.
+ Expected: 1) its and FIT image are built successfully
+ 2) Scanning the its file indicates signing is enabled
+ as requested by UBOOT_SIGN_ENABLE (using keys generated
+ via FIT_GENERATE_KEYS)
+ 3) Dumping the FIT image indicates signature values
+ are present (including for images as enabled via
+ FIT_SIGN_INDIVIDUAL)
+ 4) Examination of the do_assemble_fitimage runfile/logfile
+ indicate that UBOOT_MKIMAGE, UBOOT_MKIMAGE_SIGN and
+ UBOOT_MKIMAGE_SIGN_ARGS are working as expected.
+ Product: oe-core
+ Author: Paul Eggleton <paul.eggleton@microsoft.com> based upon
+ work by Usama Arif <usama.arif@arm.com>
+ """
+ config = """
+# Enable creation of fitImage
+MACHINE = "beaglebone-yocto"
+KERNEL_IMAGETYPES += " fitImage "
+KERNEL_CLASSES = " kernel-fitimage test-mkimage-wrapper "
+UBOOT_SIGN_ENABLE = "1"
+FIT_GENERATE_KEYS = "1"
+UBOOT_SIGN_KEYDIR = "${TOPDIR}/signing-keys"
+UBOOT_SIGN_IMG_KEYNAME = "img-oe-selftest"
+UBOOT_SIGN_KEYNAME = "cfg-oe-selftest"
+FIT_SIGN_INDIVIDUAL = "1"
+UBOOT_MKIMAGE_SIGN_ARGS = "-c 'a smart comment'"
+"""
+ self.write_config(config)
+
+ # fitImage is created as part of linux recipe
+ bitbake("virtual/kernel")
+
+ image_type = "core-image-minimal"
+ deploy_dir_image = get_bb_var('DEPLOY_DIR_IMAGE')
+ machine = get_bb_var('MACHINE')
+ fitimage_its_path = os.path.join(deploy_dir_image,
+ "fitImage-its-%s" % (machine,))
+ fitimage_path = os.path.join(deploy_dir_image,
+ "fitImage-%s.bin" % (machine,))
+
+ self.assertTrue(os.path.exists(fitimage_its_path),
+ "%s image tree source doesn't exist" % (fitimage_its_path))
+ self.assertTrue(os.path.exists(fitimage_path),
+ "%s FIT image doesn't exist" % (fitimage_path))
+
+ req_itspaths = [
+ ['/', 'images', 'kernel-1'],
+ ['/', 'images', 'kernel-1', 'signature-1'],
+ ['/', 'images', 'fdt-am335x-boneblack.dtb'],
+ ['/', 'images', 'fdt-am335x-boneblack.dtb', 'signature-1'],
+ ['/', 'configurations', 'conf-am335x-boneblack.dtb'],
+ ['/', 'configurations', 'conf-am335x-boneblack.dtb', 'signature-1'],
+ ]
+
+ itspath = []
+ itspaths = []
+ linect = 0
+ sigs = {}
+ with open(fitimage_its_path) as its_file:
+ linect += 1
+ for line in its_file:
+ line = line.strip()
+ if line.endswith('};'):
+ itspath.pop()
+ elif line.endswith('{'):
+ itspath.append(line[:-1].strip())
+ itspaths.append(itspath[:])
+ elif itspath and itspath[-1] == 'signature-1':
+ itsdotpath = '.'.join(itspath)
+ if not itsdotpath in sigs:
+ sigs[itsdotpath] = {}
+ if not '=' in line or not line.endswith(';'):
+ self.fail('Unexpected formatting in %s sigs section line %d:%s' % (fitimage_its_path, linect, line))
+ key, value = line.split('=', 1)
+ sigs[itsdotpath][key.rstrip()] = value.lstrip().rstrip(';')
+
+ for reqpath in req_itspaths:
+ if not reqpath in itspaths:
+ self.fail('Missing section in its file: %s' % reqpath)
+
+ reqsigvalues_image = {
+ 'algo': '"sha256,rsa2048"',
+ 'key-name-hint': '"img-oe-selftest"',
+ }
+ reqsigvalues_config = {
+ 'algo': '"sha256,rsa2048"',
+ 'key-name-hint': '"cfg-oe-selftest"',
+ 'sign-images': '"kernel", "fdt"',
+ }
+
+ for itspath, values in sigs.items():
+ if 'conf-' in itspath:
+ reqsigvalues = reqsigvalues_config
+ else:
+ reqsigvalues = reqsigvalues_image
+ for reqkey, reqvalue in reqsigvalues.items():
+ value = values.get(reqkey, None)
+ if value is None:
+ self.fail('Missing key "%s" in its file signature section %s' % (reqkey, itspath))
+ self.assertEqual(value, reqvalue)
+
+ # Dump the image to see if it really got signed
+ bitbake("u-boot-tools-native -c addto_recipe_sysroot")
+ result = runCmd('bitbake -e u-boot-tools-native | grep ^RECIPE_SYSROOT_NATIVE=')
+ recipe_sysroot_native = result.output.split('=')[1].strip('"')
+ dumpimage_path = os.path.join(recipe_sysroot_native, 'usr', 'bin', 'dumpimage')
+ result = runCmd('%s -l %s' % (dumpimage_path, fitimage_path))
+ in_signed = None
+ signed_sections = {}
+ for line in result.output.splitlines():
+ if line.startswith((' Configuration', ' Image')):
+ in_signed = re.search('\((.*)\)', line).groups()[0]
+ elif re.match('^ *', line) in (' ', ''):
+ in_signed = None
+ elif in_signed:
+ if not in_signed in signed_sections:
+ signed_sections[in_signed] = {}
+ key, value = line.split(':', 1)
+ signed_sections[in_signed][key.strip()] = value.strip()
+ self.assertIn('kernel-1', signed_sections)
+ self.assertIn('fdt-am335x-boneblack.dtb', signed_sections)
+ self.assertIn('conf-am335x-boneblack.dtb', signed_sections)
+ for signed_section, values in signed_sections.items():
+ value = values.get('Sign algo', None)
+ if signed_section.startswith("conf"):
+ self.assertEqual(value, 'sha256,rsa2048:cfg-oe-selftest', 'Signature algorithm for %s not expected value' % signed_section)
+ else:
+ self.assertEqual(value, 'sha256,rsa2048:img-oe-selftest', 'Signature algorithm for %s not expected value' % signed_section)
+ value = values.get('Sign value', None)
+ self.assertEqual(len(value), 512, 'Signature value for section %s not expected length' % signed_section)
+
+ # Check for UBOOT_MKIMAGE_SIGN_ARGS
+ result = runCmd('bitbake -e virtual/kernel | grep ^T=')
+ tempdir = result.output.split('=', 1)[1].strip().strip('')
+ result = runCmd('grep "a smart comment" %s/run.do_assemble_fitimage' % tempdir, ignore_status=True)
+ self.assertEqual(result.status, 0, 'UBOOT_MKIMAGE_SIGN_ARGS value did not get used')
+
+ # Check for evidence of test-mkimage-wrapper class
+ result = runCmd('grep "### uboot-mkimage wrapper message" %s/log.do_assemble_fitimage' % tempdir, ignore_status=True)
+ self.assertEqual(result.status, 0, 'UBOOT_MKIMAGE did not work')
+ result = runCmd('grep "### uboot-mkimage signing wrapper message" %s/log.do_assemble_fitimage' % tempdir, ignore_status=True)
+ self.assertEqual(result.status, 0, 'UBOOT_MKIMAGE_SIGN did not work')
+
+ def test_uboot_fit_image(self):
+ """
+ Summary: Check if Uboot FIT image and Image Tree Source
+ (its) are built and the Image Tree Source has the
+ correct fields.
+ Expected: 1. u-boot-fitImage and u-boot-its can be built
+ 2. The type, load address, entrypoint address and
+ default values of U-boot image are correct in the
+ Image Tree Source. Not all the fields are tested,
+ only the key fields that wont vary between
+ different architectures.
+ Product: oe-core
+ Author: Klaus Heinrich Kiwi <klaus@linux.vnet.ibm.com>
+ based on work by Usama Arif <usama.arif@arm.com>
+ """
+ config = """
+# We need at least CONFIG_SPL_LOAD_FIT and CONFIG_SPL_OF_CONTROL set
+MACHINE = "qemuarm"
+UBOOT_MACHINE = "am57xx_evm_defconfig"
+SPL_BINARY = "MLO"
+
+# Enable creation of the U-Boot fitImage
+UBOOT_FITIMAGE_ENABLE = "1"
+
+# (U-boot) fitImage properties
+UBOOT_LOADADDRESS = "0x80080000"
+UBOOT_ENTRYPOINT = "0x80080000"
+UBOOT_FIT_DESC = "A model description"
+
+# Enable creation of Kernel fitImage
+KERNEL_IMAGETYPES += " fitImage "
+KERNEL_CLASSES = " kernel-fitimage"
+UBOOT_SIGN_ENABLE = "1"
+FIT_GENERATE_KEYS = "1"
+UBOOT_SIGN_KEYDIR = "${TOPDIR}/signing-keys"
+UBOOT_SIGN_IMG_KEYNAME = "img-oe-selftest"
+UBOOT_SIGN_KEYNAME = "cfg-oe-selftest"
+FIT_SIGN_INDIVIDUAL = "1"
+"""
+ self.write_config(config)
+
+ # The U-Boot fitImage is created as part of linux recipe
+ bitbake("virtual/kernel")
+
+ deploy_dir_image = get_bb_var('DEPLOY_DIR_IMAGE')
+ machine = get_bb_var('MACHINE')
+ fitimage_its_path = os.path.join(deploy_dir_image,
+ "u-boot-its-%s" % (machine,))
+ fitimage_path = os.path.join(deploy_dir_image,
+ "u-boot-fitImage-%s" % (machine,))
+
+ self.assertTrue(os.path.exists(fitimage_its_path),
+ "%s image tree source doesn't exist" % (fitimage_its_path))
+ self.assertTrue(os.path.exists(fitimage_path),
+ "%s FIT image doesn't exist" % (fitimage_path))
+
+ # Check that the type, load address, entrypoint address and default
+ # values for kernel and ramdisk in Image Tree Source are as expected.
+ # The order of fields in the below array is important. Not all the
+ # fields are tested, only the key fields that wont vary between
+ # different architectures.
+ its_field_check = [
+ 'description = "A model description";',
+ 'type = "standalone";',
+ 'load = <0x80080000>;',
+ 'entry = <0x80080000>;',
+ 'default = "conf";',
+ 'loadables = "uboot";',
+ 'fdt = "fdt";'
+ ]
+
+ with open(fitimage_its_path) as its_file:
+ field_index = 0
+ for line in its_file:
+ if field_index == len(its_field_check):
+ break
+ if its_field_check[field_index] in line:
+ field_index +=1
+
+ if field_index != len(its_field_check): # if its equal, the test passed
+ self.assertTrue(field_index == len(its_field_check),
+ "Fields in Image Tree Source File %s did not match, error in finding %s"
+ % (fitimage_its_path, its_field_check[field_index]))
+
+ def test_uboot_sign_fit_image(self):
+ """
+ Summary: Check if Uboot FIT image and Image Tree Source
+ (its) are built and the Image Tree Source has the
+ correct fields, in the scenario where the Kernel
+ is also creating/signing it's fitImage.
+ Expected: 1. u-boot-fitImage and u-boot-its can be built
+ 2. The type, load address, entrypoint address and
+ default values of U-boot image are correct in the
+ Image Tree Source. Not all the fields are tested,
+ only the key fields that wont vary between
+ different architectures.
+ Product: oe-core
+ Author: Klaus Heinrich Kiwi <klaus@linux.vnet.ibm.com>
+ based on work by Usama Arif <usama.arif@arm.com>
+ """
+ config = """
+# We need at least CONFIG_SPL_LOAD_FIT and CONFIG_SPL_OF_CONTROL set
+MACHINE = "qemuarm"
+UBOOT_MACHINE = "am57xx_evm_defconfig"
+SPL_BINARY = "MLO"
+
+# Enable creation of the U-Boot fitImage
+UBOOT_FITIMAGE_ENABLE = "1"
+
+# (U-boot) fitImage properties
+UBOOT_LOADADDRESS = "0x80080000"
+UBOOT_ENTRYPOINT = "0x80080000"
+UBOOT_FIT_DESC = "A model description"
+KERNEL_IMAGETYPES += " fitImage "
+KERNEL_CLASSES = " kernel-fitimage test-mkimage-wrapper "
+UBOOT_SIGN_ENABLE = "1"
+FIT_GENERATE_KEYS = "1"
+UBOOT_SIGN_KEYDIR = "${TOPDIR}/signing-keys"
+UBOOT_SIGN_IMG_KEYNAME = "img-oe-selftest"
+UBOOT_SIGN_KEYNAME = "cfg-oe-selftest"
+FIT_SIGN_INDIVIDUAL = "1"
+UBOOT_MKIMAGE_SIGN_ARGS = "-c 'a smart U-Boot comment'"
+"""
+ self.write_config(config)
+
+ # The U-Boot fitImage is created as part of linux recipe
+ bitbake("virtual/kernel")
+
+ deploy_dir_image = get_bb_var('DEPLOY_DIR_IMAGE')
+ machine = get_bb_var('MACHINE')
+ fitimage_its_path = os.path.join(deploy_dir_image,
+ "u-boot-its-%s" % (machine,))
+ fitimage_path = os.path.join(deploy_dir_image,
+ "u-boot-fitImage-%s" % (machine,))
+
+ self.assertTrue(os.path.exists(fitimage_its_path),
+ "%s image tree source doesn't exist" % (fitimage_its_path))
+ self.assertTrue(os.path.exists(fitimage_path),
+ "%s FIT image doesn't exist" % (fitimage_path))
+
+ # Check that the type, load address, entrypoint address and default
+ # values for kernel and ramdisk in Image Tree Source are as expected.
+ # The order of fields in the below array is important. Not all the
+ # fields are tested, only the key fields that wont vary between
+ # different architectures.
+ its_field_check = [
+ 'description = "A model description";',
+ 'type = "standalone";',
+ 'load = <0x80080000>;',
+ 'entry = <0x80080000>;',
+ 'default = "conf";',
+ 'loadables = "uboot";',
+ 'fdt = "fdt";'
+ ]
+
+ with open(fitimage_its_path) as its_file:
+ field_index = 0
+ for line in its_file:
+ if field_index == len(its_field_check):
+ break
+ if its_field_check[field_index] in line:
+ field_index +=1
+
+ if field_index != len(its_field_check): # if its equal, the test passed
+ self.assertTrue(field_index == len(its_field_check),
+ "Fields in Image Tree Source File %s did not match, error in finding %s"
+ % (fitimage_its_path, its_field_check[field_index]))
+
+
+ def test_sign_standalone_uboot_fit_image(self):
+ """
+ Summary: Check if U-Boot FIT image and Image Tree Source (its) are
+ created and signed correctly for the scenario where only
+ the U-Boot proper fitImage is being created and signed.
+ Expected: 1) U-Boot its and FIT image are built successfully
+ 2) Scanning the its file indicates signing is enabled
+ as requested by SPL_SIGN_ENABLE (using keys generated
+ via UBOOT_FIT_GENERATE_KEYS)
+ 3) Dumping the FIT image indicates signature values
+ are present
+ 4) Examination of the do_uboot_assemble_fitimage
+ runfile/logfile indicate that UBOOT_MKIMAGE, UBOOT_MKIMAGE_SIGN
+ and SPL_MKIMAGE_SIGN_ARGS are working as expected.
+ Product: oe-core
+ Author: Klaus Heinrich Kiwi <klaus@linux.vnet.ibm.com> based upon
+ work by Paul Eggleton <paul.eggleton@microsoft.com> and
+ Usama Arif <usama.arif@arm.com>
+ """
+ config = """
+# There's no U-boot deconfig with CONFIG_FIT_SIGNATURE yet, so we need at
+# least CONFIG_SPL_LOAD_FIT and CONFIG_SPL_OF_CONTROL set
+MACHINE = "qemuarm"
+UBOOT_MACHINE = "am57xx_evm_defconfig"
+SPL_BINARY = "MLO"
+# The kernel-fitimage class is a dependency even if we're only
+# creating/signing the U-Boot fitImage
+KERNEL_CLASSES = " kernel-fitimage test-mkimage-wrapper "
+# Enable creation and signing of the U-Boot fitImage
+UBOOT_FITIMAGE_ENABLE = "1"
+SPL_SIGN_ENABLE = "1"
+SPL_SIGN_KEYNAME = "spl-oe-selftest"
+SPL_SIGN_KEYDIR = "${TOPDIR}/signing-keys"
+UBOOT_DTB_BINARY = "u-boot.dtb"
+UBOOT_ENTRYPOINT = "0x80000000"
+UBOOT_LOADADDRESS = "0x80000000"
+UBOOT_DTB_LOADADDRESS = "0x82000000"
+UBOOT_ARCH = "arm"
+SPL_MKIMAGE_DTCOPTS = "-I dts -O dtb -p 2000"
+SPL_MKIMAGE_SIGN_ARGS = "-c 'a smart U-Boot comment'"
+UBOOT_EXTLINUX = "0"
+UBOOT_FIT_GENERATE_KEYS = "1"
+UBOOT_FIT_HASH_ALG = "sha256"
+"""
+ self.write_config(config)
+
+ # The U-Boot fitImage is created as part of linux recipe
+ bitbake("virtual/kernel")
+
+ image_type = "core-image-minimal"
+ deploy_dir_image = get_bb_var('DEPLOY_DIR_IMAGE')
+ machine = get_bb_var('MACHINE')
+ fitimage_its_path = os.path.join(deploy_dir_image,
+ "u-boot-its-%s" % (machine,))
+ fitimage_path = os.path.join(deploy_dir_image,
+ "u-boot-fitImage-%s" % (machine,))
+
+ self.assertTrue(os.path.exists(fitimage_its_path),
+ "%s image tree source doesn't exist" % (fitimage_its_path))
+ self.assertTrue(os.path.exists(fitimage_path),
+ "%s FIT image doesn't exist" % (fitimage_path))
+
+ req_itspaths = [
+ ['/', 'images', 'uboot'],
+ ['/', 'images', 'uboot', 'signature'],
+ ['/', 'images', 'fdt'],
+ ['/', 'images', 'fdt', 'signature'],
+ ]
+
+ itspath = []
+ itspaths = []
+ linect = 0
+ sigs = {}
+ with open(fitimage_its_path) as its_file:
+ linect += 1
+ for line in its_file:
+ line = line.strip()
+ if line.endswith('};'):
+ itspath.pop()
+ elif line.endswith('{'):
+ itspath.append(line[:-1].strip())
+ itspaths.append(itspath[:])
+ elif itspath and itspath[-1] == 'signature':
+ itsdotpath = '.'.join(itspath)
+ if not itsdotpath in sigs:
+ sigs[itsdotpath] = {}
+ if not '=' in line or not line.endswith(';'):
+ self.fail('Unexpected formatting in %s sigs section line %d:%s' % (fitimage_its_path, linect, line))
+ key, value = line.split('=', 1)
+ sigs[itsdotpath][key.rstrip()] = value.lstrip().rstrip(';')
+
+ for reqpath in req_itspaths:
+ if not reqpath in itspaths:
+ self.fail('Missing section in its file: %s' % reqpath)
+
+ reqsigvalues_image = {
+ 'algo': '"sha256,rsa2048"',
+ 'key-name-hint': '"spl-oe-selftest"',
+ }
+
+ for itspath, values in sigs.items():
+ reqsigvalues = reqsigvalues_image
+ for reqkey, reqvalue in reqsigvalues.items():
+ value = values.get(reqkey, None)
+ if value is None:
+ self.fail('Missing key "%s" in its file signature section %s' % (reqkey, itspath))
+ self.assertEqual(value, reqvalue)
+
+ # Dump the image to see if it really got signed
+ bitbake("u-boot-tools-native -c addto_recipe_sysroot")
+ result = runCmd('bitbake -e u-boot-tools-native | grep ^RECIPE_SYSROOT_NATIVE=')
+ recipe_sysroot_native = result.output.split('=')[1].strip('"')
+ dumpimage_path = os.path.join(recipe_sysroot_native, 'usr', 'bin', 'dumpimage')
+ result = runCmd('%s -l %s' % (dumpimage_path, fitimage_path))
+ in_signed = None
+ signed_sections = {}
+ for line in result.output.splitlines():
+ if line.startswith((' Image')):
+ in_signed = re.search('\((.*)\)', line).groups()[0]
+ elif re.match(' \w', line):
+ in_signed = None
+ elif in_signed:
+ if not in_signed in signed_sections:
+ signed_sections[in_signed] = {}
+ key, value = line.split(':', 1)
+ signed_sections[in_signed][key.strip()] = value.strip()
+ self.assertIn('uboot', signed_sections)
+ self.assertIn('fdt', signed_sections)
+ for signed_section, values in signed_sections.items():
+ value = values.get('Sign algo', None)
+ self.assertEqual(value, 'sha256,rsa2048:spl-oe-selftest', 'Signature algorithm for %s not expected value' % signed_section)
+ value = values.get('Sign value', None)
+ self.assertEqual(len(value), 512, 'Signature value for section %s not expected length' % signed_section)
+
+ # Check for SPL_MKIMAGE_SIGN_ARGS
+ result = runCmd('bitbake -e virtual/kernel | grep ^T=')
+ tempdir = result.output.split('=', 1)[1].strip().strip('')
+ result = runCmd('grep "a smart U-Boot comment" %s/run.do_uboot_assemble_fitimage' % tempdir, ignore_status=True)
+ self.assertEqual(result.status, 0, 'SPL_MKIMAGE_SIGN_ARGS value did not get used')
+
+ # Check for evidence of test-mkimage-wrapper class
+ result = runCmd('grep "### uboot-mkimage wrapper message" %s/log.do_uboot_assemble_fitimage' % tempdir, ignore_status=True)
+ self.assertEqual(result.status, 0, 'UBOOT_MKIMAGE did not work')
+ result = runCmd('grep "### uboot-mkimage signing wrapper message" %s/log.do_uboot_assemble_fitimage' % tempdir, ignore_status=True)
+ self.assertEqual(result.status, 0, 'UBOOT_MKIMAGE_SIGN did not work')
+
+ def test_sign_cascaded_uboot_fit_image(self):
+ """
+ Summary: Check if U-Boot FIT image and Image Tree Source (its) are
+ created and signed correctly for the scenario where both
+ U-Boot proper and Kernel fitImages are being created and
+ signed.
+ Expected: 1) U-Boot its and FIT image are built successfully
+ 2) Scanning the its file indicates signing is enabled
+ as requested by SPL_SIGN_ENABLE (using keys generated
+ via UBOOT_FIT_GENERATE_KEYS)
+ 3) Dumping the FIT image indicates signature values
+ are present
+ 4) Examination of the do_uboot_assemble_fitimage
+ runfile/logfile indicate that UBOOT_MKIMAGE, UBOOT_MKIMAGE_SIGN
+ and SPL_MKIMAGE_SIGN_ARGS are working as expected.
+ Product: oe-core
+ Author: Klaus Heinrich Kiwi <klaus@linux.vnet.ibm.com> based upon
+ work by Paul Eggleton <paul.eggleton@microsoft.com> and
+ Usama Arif <usama.arif@arm.com>
+ """
+ config = """
+# There's no U-boot deconfig with CONFIG_FIT_SIGNATURE yet, so we need at
+# least CONFIG_SPL_LOAD_FIT and CONFIG_SPL_OF_CONTROL set
+MACHINE = "qemuarm"
+UBOOT_MACHINE = "am57xx_evm_defconfig"
+SPL_BINARY = "MLO"
+# Enable creation and signing of the U-Boot fitImage
+UBOOT_FITIMAGE_ENABLE = "1"
+SPL_SIGN_ENABLE = "1"
+SPL_SIGN_KEYNAME = "spl-cascaded-oe-selftest"
+SPL_SIGN_KEYDIR = "${TOPDIR}/signing-keys"
+UBOOT_DTB_BINARY = "u-boot.dtb"
+UBOOT_ENTRYPOINT = "0x80000000"
+UBOOT_LOADADDRESS = "0x80000000"
+UBOOT_MKIMAGE_DTCOPTS = "-I dts -O dtb -p 2000"
+UBOOT_MKIMAGE_SIGN_ARGS = "-c 'a smart cascaded Kernel comment'"
+UBOOT_DTB_LOADADDRESS = "0x82000000"
+UBOOT_ARCH = "arm"
+SPL_MKIMAGE_DTCOPTS = "-I dts -O dtb -p 2000"
+SPL_MKIMAGE_SIGN_ARGS = "-c 'a smart cascaded U-Boot comment'"
+UBOOT_EXTLINUX = "0"
+UBOOT_FIT_GENERATE_KEYS = "1"
+UBOOT_FIT_HASH_ALG = "sha256"
+KERNEL_IMAGETYPES += " fitImage "
+KERNEL_CLASSES = " kernel-fitimage test-mkimage-wrapper "
+UBOOT_SIGN_ENABLE = "1"
+FIT_GENERATE_KEYS = "1"
+UBOOT_SIGN_KEYDIR = "${TOPDIR}/signing-keys"
+UBOOT_SIGN_IMG_KEYNAME = "img-oe-selftest"
+UBOOT_SIGN_KEYNAME = "cfg-oe-selftest"
+FIT_SIGN_INDIVIDUAL = "1"
+"""
+ self.write_config(config)
+
+ # The U-Boot fitImage is created as part of linux recipe
+ bitbake("virtual/kernel")
+
+ image_type = "core-image-minimal"
+ deploy_dir_image = get_bb_var('DEPLOY_DIR_IMAGE')
+ machine = get_bb_var('MACHINE')
+ fitimage_its_path = os.path.join(deploy_dir_image,
+ "u-boot-its-%s" % (machine,))
+ fitimage_path = os.path.join(deploy_dir_image,
+ "u-boot-fitImage-%s" % (machine,))
+
+ self.assertTrue(os.path.exists(fitimage_its_path),
+ "%s image tree source doesn't exist" % (fitimage_its_path))
+ self.assertTrue(os.path.exists(fitimage_path),
+ "%s FIT image doesn't exist" % (fitimage_path))
+
+ req_itspaths = [
+ ['/', 'images', 'uboot'],
+ ['/', 'images', 'uboot', 'signature'],
+ ['/', 'images', 'fdt'],
+ ['/', 'images', 'fdt', 'signature'],
+ ]
+
+ itspath = []
+ itspaths = []
+ linect = 0
+ sigs = {}
+ with open(fitimage_its_path) as its_file:
+ linect += 1
+ for line in its_file:
+ line = line.strip()
+ if line.endswith('};'):
+ itspath.pop()
+ elif line.endswith('{'):
+ itspath.append(line[:-1].strip())
+ itspaths.append(itspath[:])
+ elif itspath and itspath[-1] == 'signature':
+ itsdotpath = '.'.join(itspath)
+ if not itsdotpath in sigs:
+ sigs[itsdotpath] = {}
+ if not '=' in line or not line.endswith(';'):
+ self.fail('Unexpected formatting in %s sigs section line %d:%s' % (fitimage_its_path, linect, line))
+ key, value = line.split('=', 1)
+ sigs[itsdotpath][key.rstrip()] = value.lstrip().rstrip(';')
+
+ for reqpath in req_itspaths:
+ if not reqpath in itspaths:
+ self.fail('Missing section in its file: %s' % reqpath)
+
+ reqsigvalues_image = {
+ 'algo': '"sha256,rsa2048"',
+ 'key-name-hint': '"spl-cascaded-oe-selftest"',
+ }
+
+ for itspath, values in sigs.items():
+ reqsigvalues = reqsigvalues_image
+ for reqkey, reqvalue in reqsigvalues.items():
+ value = values.get(reqkey, None)
+ if value is None:
+ self.fail('Missing key "%s" in its file signature section %s' % (reqkey, itspath))
+ self.assertEqual(value, reqvalue)
+
+ # Dump the image to see if it really got signed
+ bitbake("u-boot-tools-native -c addto_recipe_sysroot")
+ result = runCmd('bitbake -e u-boot-tools-native | grep ^RECIPE_SYSROOT_NATIVE=')
+ recipe_sysroot_native = result.output.split('=')[1].strip('"')
+ dumpimage_path = os.path.join(recipe_sysroot_native, 'usr', 'bin', 'dumpimage')
+ result = runCmd('%s -l %s' % (dumpimage_path, fitimage_path))
+ in_signed = None
+ signed_sections = {}
+ for line in result.output.splitlines():
+ if line.startswith((' Image')):
+ in_signed = re.search('\((.*)\)', line).groups()[0]
+ elif re.match(' \w', line):
+ in_signed = None
+ elif in_signed:
+ if not in_signed in signed_sections:
+ signed_sections[in_signed] = {}
+ key, value = line.split(':', 1)
+ signed_sections[in_signed][key.strip()] = value.strip()
+ self.assertIn('uboot', signed_sections)
+ self.assertIn('fdt', signed_sections)
+ for signed_section, values in signed_sections.items():
+ value = values.get('Sign algo', None)
+ self.assertEqual(value, 'sha256,rsa2048:spl-cascaded-oe-selftest', 'Signature algorithm for %s not expected value' % signed_section)
+ value = values.get('Sign value', None)
+ self.assertEqual(len(value), 512, 'Signature value for section %s not expected length' % signed_section)
+
+ # Check for SPL_MKIMAGE_SIGN_ARGS
+ result = runCmd('bitbake -e virtual/kernel | grep ^T=')
+ tempdir = result.output.split('=', 1)[1].strip().strip('')
+ result = runCmd('grep "a smart cascaded U-Boot comment" %s/run.do_uboot_assemble_fitimage' % tempdir, ignore_status=True)
+ self.assertEqual(result.status, 0, 'SPL_MKIMAGE_SIGN_ARGS value did not get used')
+
+ # Check for evidence of test-mkimage-wrapper class
+ result = runCmd('grep "### uboot-mkimage wrapper message" %s/log.do_uboot_assemble_fitimage' % tempdir, ignore_status=True)
+ self.assertEqual(result.status, 0, 'UBOOT_MKIMAGE did not work')
+ result = runCmd('grep "### uboot-mkimage signing wrapper message" %s/log.do_uboot_assemble_fitimage' % tempdir, ignore_status=True)
+ self.assertEqual(result.status, 0, 'UBOOT_MKIMAGE_SIGN did not work')
+
+
+
+ def test_initramfs_bundle(self):
+ """
+ Summary: Verifies the content of the initramfs bundle node in the FIT Image Tree Source (its)
+ The FIT settings are set by the test case.
+ The machine used is beaglebone-yocto.
+ Expected: 1. The ITS is generated with initramfs bundle support
+ 2. All the fields in the kernel node are as expected (matching the
+ conf settings)
+ 3. The kernel is included in all the available configurations and
+ its hash is included in the configuration signature
+
+ Product: oe-core
+ Author: Abdellatif El Khlifi <abdellatif.elkhlifi@arm.com>
+ """
+
+ config = """
+DISTRO="poky"
+MACHINE = "beaglebone-yocto"
+INITRAMFS_IMAGE_BUNDLE = "1"
+INITRAMFS_IMAGE = "core-image-minimal-initramfs"
+INITRAMFS_SCRIPTS = ""
+UBOOT_MACHINE = "am335x_evm_defconfig"
+KERNEL_CLASSES = " kernel-fitimage "
+KERNEL_IMAGETYPES = "fitImage"
+UBOOT_SIGN_ENABLE = "1"
+UBOOT_SIGN_KEYNAME = "beaglebonekey"
+UBOOT_SIGN_KEYDIR ?= "${DEPLOY_DIR_IMAGE}"
+UBOOT_DTB_BINARY = "u-boot.dtb"
+UBOOT_ENTRYPOINT = "0x80000000"
+UBOOT_LOADADDRESS = "0x80000000"
+UBOOT_DTB_LOADADDRESS = "0x82000000"
+UBOOT_ARCH = "arm"
+UBOOT_MKIMAGE_DTCOPTS = "-I dts -O dtb -p 2000"
+UBOOT_EXTLINUX = "0"
+FIT_GENERATE_KEYS = "1"
+KERNEL_IMAGETYPE_REPLACEMENT = "zImage"
+FIT_KERNEL_COMP_ALG = "none"
+FIT_HASH_ALG = "sha256"
+"""
+ self.write_config(config)
+
+ # fitImage is created as part of linux recipe
+ bitbake("virtual/kernel")
+
+ image_type = get_bb_var('INITRAMFS_IMAGE')
+ deploy_dir_image = get_bb_var('DEPLOY_DIR_IMAGE')
+ machine = get_bb_var('MACHINE')
+ fitimage_its_path = os.path.join(deploy_dir_image,
+ "fitImage-its-%s-%s-%s" % (image_type, machine, machine))
+ fitimage_path = os.path.join(deploy_dir_image,"fitImage")
+
+ self.assertTrue(os.path.exists(fitimage_its_path),
+ "%s image tree source doesn't exist" % (fitimage_its_path))
+ self.assertTrue(os.path.exists(fitimage_path),
+ "%s FIT image doesn't exist" % (fitimage_path))
+
+ kernel_load = str(get_bb_var('UBOOT_LOADADDRESS'))
+ kernel_entry = str(get_bb_var('UBOOT_ENTRYPOINT'))
+ kernel_compression = str(get_bb_var('FIT_KERNEL_COMP_ALG'))
+ uboot_arch = str(get_bb_var('UBOOT_ARCH'))
+ fit_hash_alg = str(get_bb_var('FIT_HASH_ALG'))
+
+ its_file = open(fitimage_its_path)
+
+ its_lines = [line.strip() for line in its_file.readlines()]
+
+ exp_node_lines = [
+ 'kernel-1 {',
+ 'description = "Linux kernel";',
+ 'data = /incbin/("linux.bin");',
+ 'type = "kernel";',
+ 'arch = "' + uboot_arch + '";',
+ 'os = "linux";',
+ 'compression = "' + kernel_compression + '";',
+ 'load = <' + kernel_load + '>;',
+ 'entry = <' + kernel_entry + '>;',
+ 'hash-1 {',
+ 'algo = "' + fit_hash_alg +'";',
+ '};',
+ '};'
+ ]
+
+ node_str = exp_node_lines[0]
+
+ test_passed = False
+
+ print ("checking kernel node\n")
+
+ if node_str in its_lines:
+ node_start_idx = its_lines.index(node_str)
+ node = its_lines[node_start_idx:(node_start_idx + len(exp_node_lines))]
+ if node == exp_node_lines:
+ print("kernel node verified")
+ else:
+ self.assertTrue(test_passed == True,"kernel node does not match expectation")
+
+ rx_configs = re.compile("^conf-.*")
+ its_configs = list(filter(rx_configs.match, its_lines))
+
+ for cfg_str in its_configs:
+ cfg_start_idx = its_lines.index(cfg_str)
+ line_idx = cfg_start_idx + 2
+ node_end = False
+ while node_end == False:
+ if its_lines[line_idx] == "};" and its_lines[line_idx-1] == "};" :
+ node_end = True
+ line_idx = line_idx + 1
+
+ node = its_lines[cfg_start_idx:line_idx]
+ print("checking configuration " + cfg_str.rstrip(" {"))
+ rx_desc_line = re.compile("^description.*1 Linux kernel.*")
+ if len(list(filter(rx_desc_line.match, node))) != 1:
+ self.assertTrue(test_passed == True,"kernel keyword not found in the description line")
+ break
+ else:
+ print("kernel keyword found in the description line")
+
+ if 'kernel = "kernel-1";' not in node:
+ self.assertTrue(test_passed == True,"kernel line not found")
+ break
+ else:
+ print("kernel line found")
+
+ rx_sign_line = re.compile("^sign-images.*kernel.*")
+ if len(list(filter(rx_sign_line.match, node))) != 1:
+ self.assertTrue(test_passed == True,"kernel hash not signed")
+ break
+ else:
+ print("kernel hash signed")
+
+ test_passed = True
+ self.assertTrue(test_passed == True,"Initramfs bundle test success")
diff --git a/meta/lib/oeqa/selftest/cases/glibc.py b/meta/lib/oeqa/selftest/cases/glibc.py
index c687f6ef93..6f96281ea5 100644
--- a/meta/lib/oeqa/selftest/cases/glibc.py
+++ b/meta/lib/oeqa/selftest/cases/glibc.py
@@ -24,7 +24,7 @@ class GlibcSelfTestBase(OESelftestTestCase, OEPTestResultTestCase):
features.append('TOOLCHAIN_TEST_HOST_USER = "root"')
features.append('TOOLCHAIN_TEST_HOST_PORT = "22"')
# force single threaded test execution
- features.append('EGLIBCPARALLELISM_task-check_pn-glibc-testsuite = "PARALLELMFLAGS="-j1""')
+ features.append('EGLIBCPARALLELISM_task-check:pn-glibc-testsuite = "PARALLELMFLAGS="-j1""')
self.write_config("\n".join(features))
bitbake("glibc-testsuite -c check")
@@ -33,7 +33,7 @@ class GlibcSelfTestBase(OESelftestTestCase, OEPTestResultTestCase):
ptestsuite = "glibc-user" if ssh is None else "glibc"
self.ptest_section(ptestsuite)
- with open(os.path.join(builddir, "tests.sum"), "r") as f:
+ with open(os.path.join(builddir, "tests.sum"), "r", errors='replace') as f:
for test, result in parse_values(f):
self.ptest_result(ptestsuite, test, result)
diff --git a/meta/lib/oeqa/selftest/cases/gotoolchain.py b/meta/lib/oeqa/selftest/cases/gotoolchain.py
index 3119520f0d..c809d7c9b1 100644
--- a/meta/lib/oeqa/selftest/cases/gotoolchain.py
+++ b/meta/lib/oeqa/selftest/cases/gotoolchain.py
@@ -43,29 +43,34 @@ class oeGoToolchainSelfTest(OESelftestTestCase):
@classmethod
def tearDownClass(cls):
+ # Go creates file which are readonly
+ for dirpath, dirnames, filenames in os.walk(cls.tmpdir_SDKQA):
+ for filename in filenames + dirnames:
+ f = os.path.join(dirpath, filename)
+ if not os.path.islink(f):
+ os.chmod(f, 0o775)
shutil.rmtree(cls.tmpdir_SDKQA, ignore_errors=True)
super(oeGoToolchainSelfTest, cls).tearDownClass()
- def run_sdk_go_command(self, gocmd):
- cmd = "cd %s; " % self.tmpdir_SDKQA
+ def run_sdk_go_command(self, gocmd, proj, name):
+ cmd = "cd %s/src/%s/%s; " % (self.go_path, proj, name)
cmd = cmd + ". %s; " % self.env_SDK
cmd = cmd + "export GOPATH=%s; " % self.go_path
cmd = cmd + "${CROSS_COMPILE}go %s" % gocmd
return runCmd(cmd).status
def test_go_dep_build(self):
- proj = "github.com/golang"
- name = "dep"
- ver = "v0.3.1"
+ proj = "github.com/direnv"
+ name = "direnv"
+ ver = "v2.27.0"
archive = ".tar.gz"
url = "https://%s/%s/archive/%s%s" % (proj, name, ver, archive)
runCmd("cd %s; wget %s" % (self.tmpdir_SDKQA, url))
runCmd("cd %s; tar -xf %s" % (self.tmpdir_SDKQA, ver+archive))
runCmd("mkdir -p %s/src/%s" % (self.go_path, proj))
- runCmd("mv %s/dep-0.3.1 %s/src/%s/%s"
+ runCmd("mv %s/direnv-2.27.0 %s/src/%s/%s"
% (self.tmpdir_SDKQA, self.go_path, proj, name))
- retv = self.run_sdk_go_command('build %s/%s/cmd/dep'
- % (proj, name))
+ retv = self.run_sdk_go_command('build', proj, name)
self.assertEqual(retv, 0,
msg="Running go build failed for %s" % name)
diff --git a/meta/lib/oeqa/selftest/cases/image_typedep.py b/meta/lib/oeqa/selftest/cases/image_typedep.py
index 52e1080f13..5b182a8f94 100644
--- a/meta/lib/oeqa/selftest/cases/image_typedep.py
+++ b/meta/lib/oeqa/selftest/cases/image_typedep.py
@@ -9,7 +9,7 @@ from oeqa.utils.commands import bitbake
class ImageTypeDepTests(OESelftestTestCase):
- # Verify that when specifying a IMAGE_TYPEDEP_ of the form "foo.bar" that
+ # Verify that when specifying a IMAGE_TYPEDEP: of the form "foo.bar" that
# the conversion type bar gets added as a dep as well
def test_conversion_typedep_added(self):
@@ -22,7 +22,7 @@ LICENSE = "MIT"
IMAGE_FSTYPES = "testfstype"
IMAGE_TYPES_MASKED += "testfstype"
-IMAGE_TYPEDEP_testfstype = "tar.bz2"
+IMAGE_TYPEDEP:testfstype = "tar.bz2"
inherit image
diff --git a/meta/lib/oeqa/selftest/cases/imagefeatures.py b/meta/lib/oeqa/selftest/cases/imagefeatures.py
index dea519e6df..d36d45c551 100644
--- a/meta/lib/oeqa/selftest/cases/imagefeatures.py
+++ b/meta/lib/oeqa/selftest/cases/imagefeatures.py
@@ -5,6 +5,7 @@
from oeqa.selftest.case import OESelftestTestCase
from oeqa.utils.commands import runCmd, bitbake, get_bb_var, runqemu
from oeqa.utils.sshcontrol import SSHControl
+import glob
import os
import json
@@ -67,18 +68,6 @@ class ImageFeatures(OESelftestTestCase):
self.assertEqual(status, 0, 'ssh to user tester failed with %s' % output)
- def test_clutter_image_can_be_built(self):
- """
- Summary: Check if clutter image can be built
- Expected: 1. core-image-clutter can be built
- Product: oe-core
- Author: Ionut Chisanovici <ionutx.chisanovici@intel.com>
- AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
- """
-
- # Build a core-image-clutter
- bitbake('core-image-clutter')
-
def test_wayland_support_in_image(self):
"""
Summary: Check Wayland support in image
@@ -209,8 +198,8 @@ class ImageFeatures(OESelftestTestCase):
image_name = 'core-image-minimal'
all_image_types = set(get_bb_var("IMAGE_TYPES", image_name).split())
- blacklist = set(('container', 'elf', 'f2fs', 'multiubi', 'tar.zst', 'wic.zst'))
- img_types = all_image_types - blacklist
+ skip_image_types = set(('container', 'elf', 'f2fs', 'multiubi', 'tar.zst', 'wic.zst'))
+ img_types = all_image_types - skip_image_types
config = 'IMAGE_FSTYPES += "%s"\n'\
'MKUBIFS_ARGS ?= "-m 2048 -e 129024 -c 2047"\n'\
@@ -239,8 +228,8 @@ USERADD_GID_TABLES += "files/static-group"
def test_no_busybox_base_utils(self):
config = """
-# Enable x11
-DISTRO_FEATURES_append += "x11"
+# Enable wayland
+DISTRO_FEATURES:append = " pam opengl wayland"
# Switch to systemd
DISTRO_FEATURES += "systemd"
@@ -256,12 +245,12 @@ VIRTUAL-RUNTIME_base-utils = "packagegroup-core-base-utils"
VIRTUAL-RUNTIME_base-utils-hwclock = "util-linux-hwclock"
VIRTUAL-RUNTIME_base-utils-syslog = ""
-# Blacklist busybox
-PNBLACKLIST[busybox] = "Don't build this"
+# Skip busybox
+SKIP_RECIPE[busybox] = "Don't build this"
"""
self.write_config(config)
- bitbake("--graphviz core-image-sato")
+ bitbake("--graphviz core-image-weston")
def test_image_gen_debugfs(self):
"""
@@ -273,7 +262,7 @@ PNBLACKLIST[busybox] = "Don't build this"
Author: Humberto Ibarra <humberto.ibarra.lopez@intel.com>
Yeoh Ee Peng <ee.peng.yeoh@intel.com>
"""
- import glob
+
image_name = 'core-image-minimal'
features = 'IMAGE_GEN_DEBUGFS = "1"\n'
features += 'IMAGE_FSTYPES_DEBUGFS = "tar.bz2"\n'
@@ -294,3 +283,12 @@ PNBLACKLIST[busybox] = "Don't build this"
for t in dbg_symbols_targets:
result = runCmd('objdump --syms %s | grep debug' % t)
self.assertTrue("debug" in result.output, msg='Failed to find debug symbol: %s' % result.output)
+
+ def test_empty_image(self):
+ """Test creation of image with no packages"""
+ bitbake('test-empty-image')
+ res_dir = get_bb_var('DEPLOY_DIR_IMAGE')
+ images = os.path.join(res_dir, "test-empty-image-*.manifest")
+ result = glob.glob(images)
+ with open(result[1],"r") as f:
+ self.assertEqual(len(f.read().strip()),0)
diff --git a/meta/lib/oeqa/selftest/cases/incompatible_lic.py b/meta/lib/oeqa/selftest/cases/incompatible_lic.py
index 3eabd79097..0794d46e6d 100644
--- a/meta/lib/oeqa/selftest/cases/incompatible_lic.py
+++ b/meta/lib/oeqa/selftest/cases/incompatible_lic.py
@@ -1,10 +1,11 @@
from oeqa.selftest.case import OESelftestTestCase
from oeqa.utils.commands import bitbake
-class IncompatibleLicenseTests(OESelftestTestCase):
+class IncompatibleLicenseTestObsolete(OESelftestTestCase):
- def lic_test(self, pn, pn_lic, lic):
- error_msg = 'ERROR: Nothing PROVIDES \'%s\'\n%s was skipped: it has incompatible license(s): %s' % (pn, pn, pn_lic)
+ def lic_test(self, pn, pn_lic, lic, error_msg=None):
+ if not error_msg:
+ error_msg = 'ERROR: Nothing PROVIDES \'%s\'\n%s was skipped: it has incompatible license(s): %s' % (pn, pn, pn_lic)
self.write_config("INCOMPATIBLE_LICENSE += \"%s\"" % (lic))
@@ -13,68 +14,81 @@ class IncompatibleLicenseTests(OESelftestTestCase):
raise AssertionError(result.output)
# Verify that a package with an SPDX license (from AVAILABLE_LICENSES)
- # cannot be built when INCOMPATIBLE_LICENSE contains this SPDX license
- def test_incompatible_spdx_license(self):
- self.lic_test('incompatible-license', 'GPL-3.0', 'GPL-3.0')
-
- # Verify that a package with an SPDX license (from AVAILABLE_LICENSES)
# cannot be built when INCOMPATIBLE_LICENSE contains an alias (in
# SPDXLICENSEMAP) of this SPDX license
def test_incompatible_alias_spdx_license(self):
- self.lic_test('incompatible-license', 'GPL-3.0', 'GPLv3')
-
- # Verify that a package with an SPDX license (from AVAILABLE_LICENSES)
- # cannot be built when INCOMPATIBLE_LICENSE contains a wildcarded license
- # matching this SPDX license
- def test_incompatible_spdx_license_wildcard(self):
- self.lic_test('incompatible-license', 'GPL-3.0', '*GPL-3.0')
+ self.lic_test('incompatible-license', 'GPL-3.0-only', 'GPLv3', "is an obsolete license, please use an SPDX reference in INCOMPATIBLE_LICENSE")
# Verify that a package with an SPDX license (from AVAILABLE_LICENSES)
# cannot be built when INCOMPATIBLE_LICENSE contains a wildcarded alias
# license matching this SPDX license
def test_incompatible_alias_spdx_license_wildcard(self):
- self.lic_test('incompatible-license', 'GPL-3.0', '*GPLv3')
-
- # Verify that a package with an alias (from SPDXLICENSEMAP) to an SPDX
- # license cannot be built when INCOMPATIBLE_LICENSE contains this SPDX
- # license
- def test_incompatible_spdx_license_alias(self):
- self.lic_test('incompatible-license-alias', 'GPL-3.0', 'GPL-3.0')
+ self.lic_test('incompatible-license', 'GPL-3.0-only', '*GPLv3', "*GPLv3 is an invalid license wildcard entry")
# Verify that a package with an alias (from SPDXLICENSEMAP) to an SPDX
# license cannot be built when INCOMPATIBLE_LICENSE contains this alias
def test_incompatible_alias_spdx_license_alias(self):
- self.lic_test('incompatible-license-alias', 'GPL-3.0', 'GPLv3')
+ self.lic_test('incompatible-license-alias', 'GPL-3.0-only', 'GPLv3', "is an obsolete license, please use an SPDX reference in INCOMPATIBLE_LICENSE")
# Verify that a package with an alias (from SPDXLICENSEMAP) to an SPDX
# license cannot be built when INCOMPATIBLE_LICENSE contains a wildcarded
# license matching this SPDX license
def test_incompatible_spdx_license_alias_wildcard(self):
- self.lic_test('incompatible-license-alias', 'GPL-3.0', '*GPL-3.0')
+ self.lic_test('incompatible-license-alias', 'GPL-3.0-only', '*GPL-3.0', "*GPL-3.0 is an invalid license wildcard entry")
# Verify that a package with an alias (from SPDXLICENSEMAP) to an SPDX
# license cannot be built when INCOMPATIBLE_LICENSE contains a wildcarded
# alias license matching the SPDX license
def test_incompatible_alias_spdx_license_alias_wildcard(self):
- self.lic_test('incompatible-license-alias', 'GPL-3.0', '*GPLv3')
+ self.lic_test('incompatible-license-alias', 'GPL-3.0-only', '*GPLv3', "*GPLv3 is an invalid license wildcard entry")
- # Verify that a package with multiple SPDX licenses (from
- # AVAILABLE_LICENSES) cannot be built when INCOMPATIBLE_LICENSE contains
- # some of them
- def test_incompatible_spdx_licenses(self):
- self.lic_test('incompatible-licenses', 'GPL-3.0 LGPL-3.0', 'GPL-3.0 LGPL-3.0')
# Verify that a package with multiple SPDX licenses (from
# AVAILABLE_LICENSES) cannot be built when INCOMPATIBLE_LICENSE contains a
# wildcard to some of them
def test_incompatible_spdx_licenses_wildcard(self):
- self.lic_test('incompatible-licenses', 'GPL-3.0 LGPL-3.0', '*GPL-3.0')
+ self.lic_test('incompatible-licenses', 'GPL-3.0-only LGPL-3.0-only', '*GPL-3.0-only', "*GPL-3.0-only is an invalid license wildcard entry")
+
# Verify that a package with multiple SPDX licenses (from
# AVAILABLE_LICENSES) cannot be built when INCOMPATIBLE_LICENSE contains a
# wildcard matching all licenses
def test_incompatible_all_licenses_wildcard(self):
- self.lic_test('incompatible-licenses', 'GPL-2.0 GPL-3.0 LGPL-3.0', '*')
+ self.lic_test('incompatible-licenses', 'GPL-2.0-only GPL-3.0-only LGPL-3.0-only', '*', "* is an invalid license wildcard entry")
+
+class IncompatibleLicenseTests(OESelftestTestCase):
+
+ def lic_test(self, pn, pn_lic, lic):
+ error_msg = 'ERROR: Nothing PROVIDES \'%s\'\n%s was skipped: it has incompatible license(s): %s' % (pn, pn, pn_lic)
+
+ self.write_config("INCOMPATIBLE_LICENSE += \"%s\"" % (lic))
+
+ result = bitbake('%s --dry-run' % (pn), ignore_status=True)
+ if error_msg not in result.output:
+ raise AssertionError(result.output)
+
+ # Verify that a package with an SPDX license (from AVAILABLE_LICENSES)
+ # cannot be built when INCOMPATIBLE_LICENSE contains this SPDX license
+ def test_incompatible_spdx_license(self):
+ self.lic_test('incompatible-license', 'GPL-3.0-only', 'GPL-3.0-only')
+
+ # Verify that a package with an SPDX license (from AVAILABLE_LICENSES)
+ # cannot be built when INCOMPATIBLE_LICENSE contains a wildcarded license
+ # matching this SPDX license
+ def test_incompatible_spdx_license_wildcard(self):
+ self.lic_test('incompatible-license', 'GPL-3.0-only', 'GPL-3.0*')
+
+ # Verify that a package with an alias (from SPDXLICENSEMAP) to an SPDX
+ # license cannot be built when INCOMPATIBLE_LICENSE contains this SPDX
+ # license
+ def test_incompatible_spdx_license_alias(self):
+ self.lic_test('incompatible-license-alias', 'GPL-3.0-only', 'GPL-3.0-only')
+
+ # Verify that a package with multiple SPDX licenses (from
+ # AVAILABLE_LICENSES) cannot be built when INCOMPATIBLE_LICENSE contains
+ # some of them
+ def test_incompatible_spdx_licenses(self):
+ self.lic_test('incompatible-licenses', 'GPL-3.0-only LGPL-3.0-only', 'GPL-3.0-only LGPL-3.0-only')
# Verify that a package with a non-SPDX license (neither in
# AVAILABLE_LICENSES nor in SPDXLICENSEMAP) cannot be built when
@@ -85,51 +99,63 @@ class IncompatibleLicenseTests(OESelftestTestCase):
class IncompatibleLicensePerImageTests(OESelftestTestCase):
def default_config(self):
return """
-IMAGE_INSTALL_append = "bash"
-INCOMPATIBLE_LICENSE_pn-core-image-minimal = "GPL-3.0 LGPL-3.0"
+IMAGE_INSTALL:append = " bash"
+INCOMPATIBLE_LICENSE:pn-core-image-minimal = "GPL-3.0* LGPL-3.0*"
"""
def test_bash_default(self):
self.write_config(self.default_config())
- error_msg = "ERROR: core-image-minimal-1.0-r0 do_rootfs: Package bash cannot be installed into the image because it has incompatible license(s): GPL-3.0+"
+ error_msg = "ERROR: core-image-minimal-1.0-r0 do_rootfs: Package bash cannot be installed into the image because it has incompatible license(s): GPL-3.0-or-later"
result = bitbake('core-image-minimal', ignore_status=True)
if error_msg not in result.output:
raise AssertionError(result.output)
def test_bash_and_license(self):
- self.write_config(self.default_config() + '\nLICENSE_append_pn-bash = " & SomeLicense"')
- error_msg = "ERROR: core-image-minimal-1.0-r0 do_rootfs: Package bash cannot be installed into the image because it has incompatible license(s): GPL-3.0+"
+ self.write_config(self.default_config() + '\nLICENSE:append:pn-bash = " & SomeLicense"')
+ error_msg = "ERROR: core-image-minimal-1.0-r0 do_rootfs: Package bash cannot be installed into the image because it has incompatible license(s): GPL-3.0-or-later"
result = bitbake('core-image-minimal', ignore_status=True)
if error_msg not in result.output:
raise AssertionError(result.output)
def test_bash_or_license(self):
- self.write_config(self.default_config() + '\nLICENSE_append_pn-bash = " | SomeLicense"')
+ self.write_config(self.default_config() + '\nLICENSE:append:pn-bash = " | SomeLicense"')
bitbake('core-image-minimal')
- def test_bash_whitelist(self):
- self.write_config(self.default_config() + '\nWHITELIST_GPL-3.0_pn-core-image-minimal = "bash"')
+ def test_bash_license_exceptions(self):
+ self.write_config(self.default_config() + '\nINCOMPATIBLE_LICENSE_EXCEPTIONS:pn-core-image-minimal = "bash:GPL-3.0-or-later"')
bitbake('core-image-minimal')
class NoGPL3InImagesTests(OESelftestTestCase):
def test_core_image_minimal(self):
self.write_config("""
-INCOMPATIBLE_LICENSE_pn-core-image-minimal = "GPL-3.0 LGPL-3.0"
+INCOMPATIBLE_LICENSE:pn-core-image-minimal = "GPL-3.0* LGPL-3.0*"
""")
bitbake('core-image-minimal')
- def test_core_image_full_cmdline(self):
+ def test_core_image_full_cmdline_weston(self):
self.write_config("""
-INHERIT += "testimage"\n
-INCOMPATIBLE_LICENSE_pn-core-image-full-cmdline = "GPL-3.0 LGPL-3.0"\n
-RDEPENDS_packagegroup-core-full-cmdline-utils_remove = "bash bc coreutils cpio ed findutils gawk grep mc mc-fish mc-helpers mc-helpers-perl sed tar time"\n
-RDEPENDS_packagegroup-core-full-cmdline-dev-utils_remove = "diffutils m4 make patch"\n
-RDEPENDS_packagegroup-core-full-cmdline-multiuser_remove = "gzip"\n
+INHERIT += "testimage"
+INCOMPATIBLE_LICENSE:pn-core-image-full-cmdline = "GPL-3.0* LGPL-3.0*"
+INCOMPATIBLE_LICENSE:pn-core-image-weston = "GPL-3.0* LGPL-3.0*"
+# Settings for full-cmdline
+RDEPENDS:packagegroup-core-full-cmdline-utils:remove = "bash bc coreutils cpio ed findutils gawk grep mc mc-fish mc-helpers mc-helpers-perl sed tar time"
+RDEPENDS:packagegroup-core-full-cmdline-dev-utils:remove = "diffutils m4 make patch"
+RDEPENDS:packagegroup-core-full-cmdline-multiuser:remove = "gzip"
+# Settings for weston
+# direct gpl3 dependencies
+RRECOMMENDS:packagegroup-base-vfat:remove = "dosfstools"
+PACKAGECONFIG:remove:pn-bluez5 = "readline"
+# dnf pulls in gpg which is gpl3; it also pulls in python3-rpm which pulls in rpm-build which pulls in bash
+# so install rpm but not dnf
+IMAGE_FEATURES:remove:pn-core-image-weston = "package-management"
+CORE_IMAGE_EXTRA_INSTALL:pn-core-image-weston += "rpm"
+# matchbox-terminal depends on vte, which is gpl3
+CORE_IMAGE_BASE_INSTALL:remove:pn-core-image-weston = "matchbox-terminal"
""")
- bitbake('core-image-full-cmdline')
- bitbake('-c testimage core-image-full-cmdline')
+ bitbake('core-image-full-cmdline core-image-weston')
+ bitbake('-c testimage core-image-full-cmdline core-image-weston')
diff --git a/meta/lib/oeqa/selftest/cases/kerneldevelopment.py b/meta/lib/oeqa/selftest/cases/kerneldevelopment.py
index a61876ee61..b1623a1885 100644
--- a/meta/lib/oeqa/selftest/cases/kerneldevelopment.py
+++ b/meta/lib/oeqa/selftest/cases/kerneldevelopment.py
@@ -58,7 +58,7 @@ class KernelDev(OESelftestTestCase):
recipe_append = os.path.join(self.recipeskernel_dir, 'linux-yocto_%.bbappend')
with open(recipe_append, 'w+') as fh:
fh.write('SRC_URI += "file://%s"\n' % patch_name)
- fh.write('FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"')
+ fh.write('FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"')
runCmd('bitbake virtual/kernel -c clean')
runCmd('bitbake virtual/kernel -c patch')
diff --git a/meta/lib/oeqa/selftest/cases/layerappend.py b/meta/lib/oeqa/selftest/cases/layerappend.py
index 05e9426fc6..dadc7c5d28 100644
--- a/meta/lib/oeqa/selftest/cases/layerappend.py
+++ b/meta/lib/oeqa/selftest/cases/layerappend.py
@@ -30,20 +30,20 @@ python do_build() {
addtask build
"""
append = """
-FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"
+FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"
-SRC_URI_append = " file://appendtest.txt"
+SRC_URI:append = " file://appendtest.txt"
-sysroot_stage_all_append() {
+sysroot_stage_all:append() {
install -m 644 ${WORKDIR}/appendtest.txt ${SYSROOT_DESTDIR}/
}
"""
append2 = """
-FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"
+FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"
-SRC_URI_append = " file://appendtest.txt"
+SRC_URI:append = " file://appendtest.txt"
"""
layerappend = ''
diff --git a/meta/lib/oeqa/selftest/cases/lic_checksum.py b/meta/lib/oeqa/selftest/cases/lic_checksum.py
index bae935d697..91021ac335 100644
--- a/meta/lib/oeqa/selftest/cases/lic_checksum.py
+++ b/meta/lib/oeqa/selftest/cases/lic_checksum.py
@@ -21,7 +21,7 @@ class LicenseTests(OESelftestTestCase):
os.close(lic_file)
self.track_for_cleanup(lic_path)
- self.write_config("INHERIT_remove = \"report-error\"")
+ self.write_config("INHERIT:remove = \"report-error\"")
self.write_recipeinc('emptytest', """
INHIBIT_DEFAULT_DEPS = "1"
diff --git a/meta/lib/oeqa/selftest/cases/meta_ide.py b/meta/lib/oeqa/selftest/cases/meta_ide.py
index 809142559a..6f10d30dc9 100644
--- a/meta/lib/oeqa/selftest/cases/meta_ide.py
+++ b/meta/lib/oeqa/selftest/cases/meta_ide.py
@@ -43,7 +43,7 @@ class MetaIDE(OESelftestTestCase):
"https://ftp.gnu.org/gnu/cpio/cpio-2.13.tar.gz",
self.tmpdir_metaideQA, self.td['DATETIME'], dl_dir=dl_dir)
self.project.download_archive()
- self.assertEqual(self.project.run_configure(), 0,
+ self.assertEqual(self.project.run_configure('$CONFIGURE_FLAGS --disable-maintainer-mode','sed -i -e "/char \*program_name/d" src/global.c;'), 0,
msg="Running configure failed")
self.assertEqual(self.project.run_make(), 0,
msg="Running make failed")
diff --git a/meta/lib/oeqa/selftest/cases/multiconfig.py b/meta/lib/oeqa/selftest/cases/multiconfig.py
index 39b92f2439..baae9b456f 100644
--- a/meta/lib/oeqa/selftest/cases/multiconfig.py
+++ b/meta/lib/oeqa/selftest/cases/multiconfig.py
@@ -17,7 +17,7 @@ class MultiConfig(OESelftestTestCase):
"""
config = """
-IMAGE_INSTALL_append_pn-core-image-full-cmdline = " multiconfig-image-packager-tiny multiconfig-image-packager-musl"
+IMAGE_INSTALL:append:pn-core-image-full-cmdline = " multiconfig-image-packager-tiny multiconfig-image-packager-musl"
BBMULTICONFIG = "tiny musl"
"""
self.write_config(config)
@@ -52,7 +52,7 @@ TMPDIR = "${TOPDIR}/tmp-mc-tiny"
self.write_config(config)
testconfig = textwrap.dedent('''\
- MCTESTVAR_append = "1"
+ MCTESTVAR:append = "1"
''')
self.write_config(testconfig, 'test')
@@ -64,7 +64,7 @@ TMPDIR = "${TOPDIR}/tmp-mc-tiny"
self.assertIn('MCTESTVAR=test1', result.output.splitlines())
testconfig = textwrap.dedent('''\
- MCTESTVAR_append = "2"
+ MCTESTVAR:append = "2"
''')
self.write_config(testconfig, 'test')
diff --git a/meta/lib/oeqa/selftest/cases/newlib.py b/meta/lib/oeqa/selftest/cases/newlib.py
new file mode 100644
index 0000000000..999e3e78b0
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/newlib.py
@@ -0,0 +1,11 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import bitbake
+
+class NewlibTest(OESelftestTestCase):
+ def test_newlib(self):
+ self.write_config('TCLIBC = "newlib"')
+ bitbake("newlib libgloss")
diff --git a/meta/lib/oeqa/selftest/cases/oelib/elf.py b/meta/lib/oeqa/selftest/cases/oelib/elf.py
index d0a28090f2..5a5f9b4fdf 100644
--- a/meta/lib/oeqa/selftest/cases/oelib/elf.py
+++ b/meta/lib/oeqa/selftest/cases/oelib/elf.py
@@ -21,6 +21,6 @@ class TestElf(TestCase):
self.assertEqual(oe.qa.elf_machine_to_string(0xB7), "AArch64")
self.assertEqual(oe.qa.elf_machine_to_string(0xF7), "BPF")
- self.assertEqual(oe.qa.elf_machine_to_string(0x00), "Unknown (0)")
+ self.assertEqual(oe.qa.elf_machine_to_string(0x00), "Unset")
self.assertEqual(oe.qa.elf_machine_to_string(0xDEADBEEF), "Unknown (3735928559)")
self.assertEqual(oe.qa.elf_machine_to_string("foobar"), "Unknown ('foobar')")
diff --git a/meta/lib/oeqa/selftest/cases/oelib/license.py b/meta/lib/oeqa/selftest/cases/oelib/license.py
index 6ebbee589f..3b359396b6 100644
--- a/meta/lib/oeqa/selftest/cases/oelib/license.py
+++ b/meta/lib/oeqa/selftest/cases/oelib/license.py
@@ -15,11 +15,11 @@ class SeenVisitor(oe.license.LicenseVisitor):
class TestSingleLicense(TestCase):
licenses = [
- "GPLv2",
- "LGPL-2.0",
- "Artistic",
+ "GPL-2.0-only",
+ "LGPL-2.0-only",
+ "Artistic-1.0",
"MIT",
- "GPLv3+",
+ "GPL-3.0-or-later",
"FOO_BAR",
]
invalid_licenses = ["GPL/BSD"]
@@ -67,9 +67,9 @@ class TestComplexCombinations(TestSimpleCombinations):
"FOO & (BAR | BAZ)&MOO": ["FOO", "BAR", "MOO"],
"(ALPHA|(BETA&THETA)|OMEGA)&DELTA": ["OMEGA", "DELTA"],
"((ALPHA|BETA)&FOO)|BAZ": ["BETA", "FOO"],
- "(GPL-2.0|Proprietary)&BSD-4-clause&MIT": ["GPL-2.0", "BSD-4-clause", "MIT"],
+ "(GPL-2.0-only|Proprietary)&BSD-4-clause&MIT": ["GPL-2.0-only", "BSD-4-clause", "MIT"],
}
- preferred = ["BAR", "OMEGA", "BETA", "GPL-2.0"]
+ preferred = ["BAR", "OMEGA", "BETA", "GPL-2.0-only"]
class TestIsIncluded(TestCase):
tests = {
@@ -87,12 +87,12 @@ class TestIsIncluded(TestCase):
[True, ["BAR", "FOOBAR"]],
("(FOO | BAR) & FOOBAR | BAZ & MOO & BARFOO", None, "FOO"):
[True, ["BAZ", "MOO", "BARFOO"]],
- ("GPL-3.0 & GPL-2.0 & LGPL-2.1 | Proprietary", None, None):
- [True, ["GPL-3.0", "GPL-2.0", "LGPL-2.1"]],
- ("GPL-3.0 & GPL-2.0 & LGPL-2.1 | Proprietary", None, "GPL-3.0"):
+ ("GPL-3.0-or-later & GPL-2.0-only & LGPL-2.1-only | Proprietary", None, None):
+ [True, ["GPL-3.0-or-later", "GPL-2.0-only", "LGPL-2.1-only"]],
+ ("GPL-3.0-or-later & GPL-2.0-only & LGPL-2.1-only | Proprietary", None, "GPL-3.0-or-later"):
[True, ["Proprietary"]],
- ("GPL-3.0 & GPL-2.0 & LGPL-2.1 | Proprietary", None, "GPL-3.0 Proprietary"):
- [False, ["GPL-3.0"]]
+ ("GPL-3.0-or-later & GPL-2.0-only & LGPL-2.1-only | Proprietary", None, "GPL-3.0-or-later Proprietary"):
+ [False, ["GPL-3.0-or-later"]]
}
def test_tests(self):
diff --git a/meta/lib/oeqa/selftest/cases/oelib/utils.py b/meta/lib/oeqa/selftest/cases/oelib/utils.py
index a7214beb4c..bbf67bf9c9 100644
--- a/meta/lib/oeqa/selftest/cases/oelib/utils.py
+++ b/meta/lib/oeqa/selftest/cases/oelib/utils.py
@@ -64,7 +64,7 @@ class TestMultiprocessLaunch(TestCase):
import bb
def testfunction(item, d):
- if item == "2" or item == "1":
+ if item == "2":
raise KeyError("Invalid number %s" % item)
return "Found %s" % item
@@ -99,5 +99,4 @@ class TestMultiprocessLaunch(TestCase):
# Assert the function prints exceptions
with captured_output() as (out, err):
self.assertRaises(bb.BBHandledException, multiprocess_launch, testfunction, ["1", "2", "3", "4", "5", "6"], d, extraargs=(d,))
- self.assertIn("KeyError: 'Invalid number 1'", out.getvalue())
self.assertIn("KeyError: 'Invalid number 2'", out.getvalue())
diff --git a/meta/lib/oeqa/selftest/cases/oescripts.py b/meta/lib/oeqa/selftest/cases/oescripts.py
index 726daff7c6..91abf9654a 100644
--- a/meta/lib/oeqa/selftest/cases/oescripts.py
+++ b/meta/lib/oeqa/selftest/cases/oescripts.py
@@ -150,7 +150,7 @@ class OEListPackageconfigTests(OEScriptTests):
expected_endlines = []
expected_endlines.append("RECIPE NAME PACKAGECONFIG FLAGS")
expected_endlines.append("pinentry gtk2 libcap ncurses qt secret")
- expected_endlines.append("tar acl")
+ expected_endlines.append("tar acl selinux")
self.check_endlines(results, expected_endlines)
@@ -167,7 +167,7 @@ class OEListPackageconfigTests(OEScriptTests):
def test_packageconfig_flags_option_all(self):
results = runCmd('%s/contrib/list-packageconfig-flags.py -a' % self.scripts_dir)
expected_endlines = []
- expected_endlines.append("pinentry-1.1.0")
+ expected_endlines.append("pinentry-1.2.0")
expected_endlines.append("PACKAGECONFIG ncurses libcap")
expected_endlines.append("PACKAGECONFIG[qt] --enable-pinentry-qt, --disable-pinentry-qt, qtbase-native qtbase")
expected_endlines.append("PACKAGECONFIG[gtk2] --enable-pinentry-gtk2, --disable-pinentry-gtk2, gtk+ glib-2.0")
diff --git a/meta/lib/oeqa/selftest/cases/overlayfs.py b/meta/lib/oeqa/selftest/cases/overlayfs.py
new file mode 100644
index 0000000000..56ae48ce64
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/overlayfs.py
@@ -0,0 +1,423 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import runCmd, bitbake, get_bb_var, runqemu
+
+def getline_qemu(out, line):
+ for l in out.split('\n'):
+ if line in l:
+ return l
+
+def getline(res, line):
+ return getline_qemu(res.output, line)
+
+class OverlayFSTests(OESelftestTestCase):
+ """Overlayfs class usage tests"""
+
+ def add_overlay_conf_to_machine(self):
+ machine_inc = """
+OVERLAYFS_MOUNT_POINT[mnt-overlay] = "/mnt/overlay"
+"""
+ self.set_machine_config(machine_inc)
+
+ def test_distro_features_missing(self):
+ """
+ Summary: Check that required DISTRO_FEATURES are set
+ Expected: Fail when either systemd or overlayfs are not in DISTRO_FEATURES
+ Author: Vyacheslav Yurkov <uvv.mail@gmail.com>
+ """
+
+ config = """
+IMAGE_INSTALL:append = " overlayfs-user"
+"""
+ overlayfs_recipe_append = """
+inherit overlayfs
+"""
+ self.write_config(config)
+ self.add_overlay_conf_to_machine()
+ self.write_recipeinc('overlayfs-user', overlayfs_recipe_append)
+
+ res = bitbake('core-image-minimal', ignore_status=True)
+ line = getline(res, "overlayfs-user was skipped: missing required distro features")
+ self.assertTrue("overlayfs" in res.output, msg=res.output)
+ self.assertTrue("systemd" in res.output, msg=res.output)
+ self.assertTrue("ERROR: Required build target 'core-image-minimal' has no buildable providers." in res.output, msg=res.output)
+
+ def test_not_all_units_installed(self):
+ """
+ Summary: Test QA check that we have required mount units in the image
+ Expected: Fail because mount unit for overlay partition is not installed
+ Author: Vyacheslav Yurkov <uvv.mail@gmail.com>
+ """
+
+ config = """
+IMAGE_INSTALL:append = " overlayfs-user"
+DISTRO_FEATURES += "systemd overlayfs"
+"""
+
+ self.write_config(config)
+ self.add_overlay_conf_to_machine()
+
+ res = bitbake('core-image-minimal', ignore_status=True)
+ line = getline(res, " Mount path /mnt/overlay not found in fstat and unit mnt-overlay.mount not found in systemd unit directories")
+ self.assertTrue(line and line.startswith("WARNING:"), msg=res.output)
+ line = getline(res, "Not all mount paths and units are installed in the image")
+ self.assertTrue(line and line.startswith("ERROR:"), msg=res.output)
+
+ def test_mount_unit_not_set(self):
+ """
+ Summary: Test whether mount unit was set properly
+ Expected: Fail because mount unit was not set
+ Author: Vyacheslav Yurkov <uvv.mail@gmail.com>
+ """
+
+ config = """
+IMAGE_INSTALL:append = " overlayfs-user"
+DISTRO_FEATURES += "systemd overlayfs"
+"""
+
+ self.write_config(config)
+
+ res = bitbake('core-image-minimal', ignore_status=True)
+ line = getline(res, "A recipe uses overlayfs class but there is no OVERLAYFS_MOUNT_POINT set in your MACHINE configuration")
+ self.assertTrue(line and line.startswith("Parsing recipes...ERROR:"), msg=res.output)
+
+ def test_wrong_mount_unit_set(self):
+ """
+ Summary: Test whether mount unit was set properly
+ Expected: Fail because not the correct flag used for mount unit
+ Author: Vyacheslav Yurkov <uvv.mail@gmail.com>
+ """
+
+ config = """
+IMAGE_INSTALL:append = " overlayfs-user"
+DISTRO_FEATURES += "systemd overlayfs"
+"""
+
+ wrong_machine_config = """
+OVERLAYFS_MOUNT_POINT[usr-share-overlay] = "/usr/share/overlay"
+"""
+
+ self.write_config(config)
+ self.set_machine_config(wrong_machine_config)
+
+ res = bitbake('core-image-minimal', ignore_status=True)
+ line = getline(res, "Missing required mount point for OVERLAYFS_MOUNT_POINT[mnt-overlay] in your MACHINE configuration")
+ self.assertTrue(line and line.startswith("Parsing recipes...ERROR:"), msg=res.output)
+
+ def _test_correct_image(self, recipe, data):
+ """
+ Summary: Check that we can create an image when all parameters are
+ set correctly
+ Expected: Image is created successfully
+ Author: Vyacheslav Yurkov <uvv.mail@gmail.com>
+ """
+
+ config = """
+IMAGE_INSTALL:append = " overlayfs-user systemd-machine-units"
+DISTRO_FEATURES += "systemd overlayfs"
+
+# Use systemd as init manager
+VIRTUAL-RUNTIME_init_manager = "systemd"
+
+# enable overlayfs in the kernel
+KERNEL_EXTRA_FEATURES:append = " features/overlayfs/overlayfs.scc"
+"""
+
+ overlayfs_recipe_append = """
+OVERLAYFS_WRITABLE_PATHS[mnt-overlay] += "/usr/share/another-overlay-mount"
+
+SYSTEMD_SERVICE:${PN} += " \
+ my-application.service \
+"
+
+do_install:append() {
+ install -d ${D}${systemd_system_unitdir}
+ cat <<EOT > ${D}${systemd_system_unitdir}/my-application.service
+[Unit]
+Description=Sample application start-up unit
+After=overlayfs-user-overlays.service
+Requires=overlayfs-user-overlays.service
+
+[Service]
+Type=oneshot
+ExecStart=/bin/true
+RemainAfterExit=true
+
+[Install]
+WantedBy=multi-user.target
+EOT
+}
+"""
+
+ self.write_config(config)
+ self.add_overlay_conf_to_machine()
+ self.write_recipeinc(recipe, data)
+ self.write_recipeinc('overlayfs-user', overlayfs_recipe_append)
+
+ bitbake('core-image-minimal')
+
+ with runqemu('core-image-minimal') as qemu:
+ # Check that application service started
+ status, output = qemu.run_serial("systemctl status my-application")
+ self.assertTrue("active (exited)" in output, msg=output)
+
+ # Check that overlay mounts are dependencies of our application unit
+ status, output = qemu.run_serial("systemctl list-dependencies my-application")
+ self.assertTrue("overlayfs-user-overlays.service" in output, msg=output)
+
+ status, output = qemu.run_serial("systemctl list-dependencies overlayfs-user-overlays")
+ self.assertTrue("usr-share-another\\x2doverlay\\x2dmount.mount" in output, msg=output)
+ self.assertTrue("usr-share-my\\x2dapplication.mount" in output, msg=output)
+
+ # Check that we have /mnt/overlay fs mounted as tmpfs and
+ # /usr/share/my-application as an overlay (see overlayfs-user recipe)
+ status, output = qemu.run_serial("/bin/mount -t tmpfs,overlay")
+
+ line = getline_qemu(output, "on /mnt/overlay")
+ self.assertTrue(line and line.startswith("tmpfs"), msg=output)
+
+ line = getline_qemu(output, "upperdir=/mnt/overlay/upper/usr/share/my-application")
+ self.assertTrue(line and line.startswith("overlay"), msg=output)
+
+ line = getline_qemu(output, "upperdir=/mnt/overlay/upper/usr/share/another-overlay-mount")
+ self.assertTrue(line and line.startswith("overlay"), msg=output)
+
+ def test_correct_image_fstab(self):
+ """
+ Summary: Check that we can create an image when all parameters are
+ set correctly via fstab
+ Expected: Image is created successfully
+ Author: Stefan Herbrechtsmeier <stefan.herbrechtsmeier@weidmueller.com>
+ """
+
+ base_files_append = """
+do_install:append() {
+ cat <<EOT >> ${D}${sysconfdir}/fstab
+tmpfs /mnt/overlay tmpfs mode=1777,strictatime,nosuid,nodev 0 0
+EOT
+}
+"""
+
+ self._test_correct_image('base-files', base_files_append)
+
+ def test_correct_image_unit(self):
+ """
+ Summary: Check that we can create an image when all parameters are
+ set correctly via mount unit
+ Expected: Image is created successfully
+ Author: Vyacheslav Yurkov <uvv.mail@gmail.com>
+ """
+
+ systemd_machine_unit_append = """
+SYSTEMD_SERVICE:${PN} += " \
+ mnt-overlay.mount \
+"
+
+do_install:append() {
+ install -d ${D}${systemd_system_unitdir}
+ cat <<EOT > ${D}${systemd_system_unitdir}/mnt-overlay.mount
+[Unit]
+Description=Tmpfs directory
+DefaultDependencies=no
+
+[Mount]
+What=tmpfs
+Where=/mnt/overlay
+Type=tmpfs
+Options=mode=1777,strictatime,nosuid,nodev
+
+[Install]
+WantedBy=multi-user.target
+EOT
+}
+
+"""
+
+ self._test_correct_image('systemd-machine-units', systemd_machine_unit_append)
+
+class OverlayFSEtcRunTimeTests(OESelftestTestCase):
+ """overlayfs-etc class tests"""
+
+ def test_all_required_variables_set(self):
+ """
+ Summary: Check that required variables are set
+ Expected: Fail when any of required variables is missing
+ Author: Vyacheslav Yurkov <uvv.mail@gmail.com>
+ """
+
+ configBase = """
+DISTRO_FEATURES += "systemd"
+
+# Use systemd as init manager
+VIRTUAL-RUNTIME_init_manager = "systemd"
+
+# enable overlayfs in the kernel
+KERNEL_EXTRA_FEATURES:append = " features/overlayfs/overlayfs.scc"
+
+# Image configuration for overlayfs-etc
+EXTRA_IMAGE_FEATURES += "overlayfs-etc"
+IMAGE_FEATURES:remove = "package-management"
+"""
+ configMountPoint = """
+OVERLAYFS_ETC_MOUNT_POINT = "/data"
+"""
+ configDevice = """
+OVERLAYFS_ETC_DEVICE = "/dev/mmcblk0p1"
+"""
+
+ self.write_config(configBase)
+ res = bitbake('core-image-minimal', ignore_status=True)
+ line = getline(res, "OVERLAYFS_ETC_MOUNT_POINT must be set in your MACHINE configuration")
+ self.assertTrue(line, msg=res.output)
+
+ self.append_config(configMountPoint)
+ res = bitbake('core-image-minimal', ignore_status=True)
+ line = getline(res, "OVERLAYFS_ETC_DEVICE must be set in your MACHINE configuration")
+ self.assertTrue(line, msg=res.output)
+
+ self.append_config(configDevice)
+ res = bitbake('core-image-minimal', ignore_status=True)
+ line = getline(res, "OVERLAYFS_ETC_FSTYPE should contain a valid file system type on /dev/mmcblk0p1")
+ self.assertTrue(line, msg=res.output)
+
+ def test_image_feature_conflict(self):
+ """
+ Summary: Overlayfs-etc is not allowed to be used with package-management
+ Expected: Feature conflict
+ Author: Vyacheslav Yurkov <uvv.mail@gmail.com>
+ """
+
+ config = """
+DISTRO_FEATURES += "systemd"
+
+# Use systemd as init manager
+VIRTUAL-RUNTIME_init_manager = "systemd"
+
+# enable overlayfs in the kernel
+KERNEL_EXTRA_FEATURES:append = " features/overlayfs/overlayfs.scc"
+EXTRA_IMAGE_FEATURES += "overlayfs-etc"
+EXTRA_IMAGE_FEATURES += "package-management"
+"""
+
+ self.write_config(config)
+
+ res = bitbake('core-image-minimal', ignore_status=True)
+ line = getline(res, "contains conflicting IMAGE_FEATURES")
+ self.assertTrue("overlayfs-etc" in res.output, msg=res.output)
+ self.assertTrue("package-management" in res.output, msg=res.output)
+
+ def test_image_feature_is_missing_class_included(self):
+ configAppend = """
+INHERIT += "overlayfs-etc"
+"""
+ self.run_check_image_feature(configAppend)
+
+ def test_image_feature_is_missing(self):
+ self.run_check_image_feature()
+
+ def run_check_image_feature(self, appendToConfig=""):
+ """
+ Summary: Overlayfs-etc class is not applied when image feature is not set
+ even if we inherit it directly,
+ Expected: Image is created successfully but /etc is not an overlay
+ Author: Vyacheslav Yurkov <uvv.mail@gmail.com>
+ """
+
+ config = f"""
+DISTRO_FEATURES += "systemd"
+
+# Use systemd as init manager
+VIRTUAL-RUNTIME_init_manager = "systemd"
+
+# enable overlayfs in the kernel
+KERNEL_EXTRA_FEATURES:append = " features/overlayfs/overlayfs.scc"
+
+IMAGE_FSTYPES += "wic"
+WKS_FILE = "overlayfs_etc.wks.in"
+
+EXTRA_IMAGE_FEATURES += "read-only-rootfs"
+# Image configuration for overlayfs-etc
+OVERLAYFS_ETC_MOUNT_POINT = "/data"
+OVERLAYFS_ETC_DEVICE = "/dev/sda3"
+{appendToConfig}
+"""
+
+ self.write_config(config)
+
+ bitbake('core-image-minimal')
+
+ with runqemu('core-image-minimal', image_fstype='wic') as qemu:
+ status, output = qemu.run_serial("/bin/mount")
+
+ line = getline_qemu(output, "upperdir=/data/overlay-etc/upper")
+ self.assertFalse(line, msg=output)
+
+ def test_sbin_init_preinit(self):
+ self.run_sbin_init(False)
+
+ def test_sbin_init_original(self):
+ self.run_sbin_init(True)
+
+ def run_sbin_init(self, origInit):
+ """
+ Summary: Confirm we can replace original init and mount overlay on top of /etc
+ Expected: Image is created successfully and /etc is mounted as an overlay
+ Author: Vyacheslav Yurkov <uvv.mail@gmail.com>
+ """
+
+ config = """
+DISTRO_FEATURES += "systemd"
+
+# Use systemd as init manager
+VIRTUAL-RUNTIME_init_manager = "systemd"
+
+# enable overlayfs in the kernel
+KERNEL_EXTRA_FEATURES:append = " features/overlayfs/overlayfs.scc"
+
+IMAGE_FSTYPES += "wic"
+OVERLAYFS_INIT_OPTION = "{OVERLAYFS_INIT_OPTION}"
+WKS_FILE = "overlayfs_etc.wks.in"
+
+EXTRA_IMAGE_FEATURES += "read-only-rootfs"
+# Image configuration for overlayfs-etc
+EXTRA_IMAGE_FEATURES += "overlayfs-etc"
+IMAGE_FEATURES:remove = "package-management"
+OVERLAYFS_ETC_MOUNT_POINT = "/data"
+OVERLAYFS_ETC_FSTYPE = "ext4"
+OVERLAYFS_ETC_DEVICE = "/dev/sda3"
+OVERLAYFS_ETC_USE_ORIG_INIT_NAME = "{OVERLAYFS_ETC_USE_ORIG_INIT_NAME}"
+"""
+
+ args = {
+ 'OVERLAYFS_INIT_OPTION': "" if origInit else "init=/sbin/preinit",
+ 'OVERLAYFS_ETC_USE_ORIG_INIT_NAME': int(origInit == True)
+ }
+
+ self.write_config(config.format(**args))
+
+ bitbake('core-image-minimal')
+ testFile = "/etc/my-test-data"
+
+ with runqemu('core-image-minimal', image_fstype='wic', discard_writes=False) as qemu:
+ status, output = qemu.run_serial("/bin/mount")
+
+ line = getline_qemu(output, "/dev/sda3")
+ self.assertTrue("/data" in output, msg=output)
+
+ line = getline_qemu(output, "upperdir=/data/overlay-etc/upper")
+ self.assertTrue(line and line.startswith("/data/overlay-etc/upper on /etc type overlay"), msg=output)
+
+ status, output = qemu.run_serial("touch " + testFile)
+ status, output = qemu.run_serial("sync")
+ status, output = qemu.run_serial("ls -1 " + testFile)
+ line = getline_qemu(output, testFile)
+ self.assertTrue(line and line.startswith(testFile), msg=output)
+
+ # Check that file exists in /etc after reboot
+ with runqemu('core-image-minimal', image_fstype='wic') as qemu:
+ status, output = qemu.run_serial("ls -1 " + testFile)
+ line = getline_qemu(output, testFile)
+ self.assertTrue(line and line.startswith(testFile), msg=output)
diff --git a/meta/lib/oeqa/selftest/cases/package.py b/meta/lib/oeqa/selftest/cases/package.py
index 3010b1af49..cebbb4f3f4 100644
--- a/meta/lib/oeqa/selftest/cases/package.py
+++ b/meta/lib/oeqa/selftest/cases/package.py
@@ -116,9 +116,9 @@ class PackageTests(OESelftestTestCase):
# Verify gdb to read symbols from separated debug hardlink file correctly
def test_gdb_hardlink_debug(self):
- features = 'IMAGE_INSTALL_append = " selftest-hardlink"\n'
- features += 'IMAGE_INSTALL_append = " selftest-hardlink-dbg"\n'
- features += 'IMAGE_INSTALL_append = " selftest-hardlink-gdb"\n'
+ features = 'IMAGE_INSTALL:append = " selftest-hardlink"\n'
+ features += 'IMAGE_INSTALL:append = " selftest-hardlink-dbg"\n'
+ features += 'IMAGE_INSTALL:append = " selftest-hardlink-gdb"\n'
self.write_config(features)
bitbake("core-image-minimal")
@@ -151,7 +151,7 @@ class PackageTests(OESelftestTestCase):
def test_preserve_ownership(self):
import os, stat, oe.cachedpath
- features = 'IMAGE_INSTALL_append = " selftest-chown"\n'
+ features = 'IMAGE_INSTALL:append = " selftest-chown"\n'
self.write_config(features)
bitbake("core-image-minimal")
@@ -168,6 +168,7 @@ class PackageTests(OESelftestTestCase):
with runqemu('core-image-minimal') as qemu:
for path in [ sysconfdir + "/selftest-chown/file",
sysconfdir + "/selftest-chown/dir",
- sysconfdir + "/selftest-chown/symlink"]:
+ sysconfdir + "/selftest-chown/symlink",
+ sysconfdir + "/selftest-chown/fifotest/fifo"]:
if not check_ownership(qemu, "test", "test", path):
self.fail('Test ownership %s failed' % path)
diff --git a/meta/lib/oeqa/selftest/cases/pkgdata.py b/meta/lib/oeqa/selftest/cases/pkgdata.py
index 833a1803ba..254abc40c6 100644
--- a/meta/lib/oeqa/selftest/cases/pkgdata.py
+++ b/meta/lib/oeqa/selftest/cases/pkgdata.py
@@ -218,3 +218,9 @@ class OePkgdataUtilTests(OESelftestTestCase):
def test_specify_pkgdatadir(self):
result = runCmd('oe-pkgdata-util -p %s lookup-pkg zlib' % get_bb_var('PKGDATA_DIR'))
self.assertEqual(result.output, 'libz1')
+
+ def test_no_param(self):
+ result = runCmd('oe-pkgdata-util', ignore_status=True)
+ self.assertEqual(result.status, 2, "Status different than 2. output: %s" % result.output)
+ currpos = result.output.find('usage: oe-pkgdata-util')
+ self.assertTrue(currpos != -1, msg = "Test is Failed. Help is not Displayed in %s" % result.output)
diff --git a/meta/lib/oeqa/selftest/cases/prservice.py b/meta/lib/oeqa/selftest/cases/prservice.py
index fe1f24ea6d..10158ca7c2 100644
--- a/meta/lib/oeqa/selftest/cases/prservice.py
+++ b/meta/lib/oeqa/selftest/cases/prservice.py
@@ -23,7 +23,7 @@ class BitbakePrTests(OESelftestTestCase):
package_data_file = os.path.join(self.pkgdata_dir, 'runtime', package_name)
package_data = ftools.read_file(package_data_file)
find_pr = re.search(r"PKGR: r[0-9]+\.([0-9]+)", package_data)
- self.assertTrue(find_pr, "No PKG revision found in %s" % package_data_file)
+ self.assertTrue(find_pr, "No PKG revision found via regex 'PKGR: r[0-9]+\.([0-9]+)' in %s" % package_data_file)
return int(find_pr.group(1))
def get_task_stamp(self, package_name, recipe_task):
@@ -40,7 +40,7 @@ class BitbakePrTests(OESelftestTestCase):
return str(stamps[0])
def increment_package_pr(self, package_name):
- inc_data = "do_package_append() {\n bb.build.exec_func('do_test_prserv', d)\n}\ndo_test_prserv() {\necho \"The current date is: %s\"\n}" % datetime.datetime.now()
+ inc_data = "do_package:append() {\n bb.build.exec_func('do_test_prserv', d)\n}\ndo_test_prserv() {\necho \"The current date is: %s\" > ${PKGDESTWORK}/${PN}.datestamp\n}" % datetime.datetime.now()
self.write_recipeinc(package_name, inc_data)
res = bitbake(package_name, ignore_status=True)
self.delete_recipeinc(package_name)
@@ -63,7 +63,7 @@ class BitbakePrTests(OESelftestTestCase):
pr_2 = self.get_pr_version(package_name)
stamp_2 = self.get_task_stamp(package_name, track_task)
- self.assertTrue(pr_2 - pr_1 == 1, "Step between same pkg. revision is greater than 1")
+ self.assertTrue(pr_2 - pr_1 == 1, "New PR %s did not increment as expected (from %s), difference should be 1" % (pr_2, pr_1))
self.assertTrue(stamp_1 != stamp_2, "Different pkg rev. but same stamp: %s" % stamp_1)
def run_test_pr_export_import(self, package_name, replace_current_db=True):
@@ -89,7 +89,7 @@ class BitbakePrTests(OESelftestTestCase):
self.increment_package_pr(package_name)
pr_2 = self.get_pr_version(package_name)
- self.assertTrue(pr_2 - pr_1 == 1, "Step between same pkg. revision is greater than 1")
+ self.assertTrue(pr_2 - pr_1 == 1, "New PR %s did not increment as expected (from %s), difference should be 1" % (pr_2, pr_1))
def test_import_export_replace_db(self):
self.run_test_pr_export_import('m4')
diff --git a/meta/lib/oeqa/selftest/cases/pseudo.py b/meta/lib/oeqa/selftest/cases/pseudo.py
new file mode 100644
index 0000000000..33593d5ce9
--- /dev/null
+++ b/meta/lib/oeqa/selftest/cases/pseudo.py
@@ -0,0 +1,27 @@
+#
+# SPDX-License-Identifier: MIT
+#
+
+import glob
+import os
+import shutil
+from oeqa.utils.commands import bitbake, get_test_layer
+from oeqa.selftest.case import OESelftestTestCase
+
+class Pseudo(OESelftestTestCase):
+
+ def test_pseudo_pyc_creation(self):
+ self.write_config("")
+
+ metaselftestpath = get_test_layer()
+ pycache_path = os.path.join(metaselftestpath, 'lib/__pycache__')
+ if os.path.exists(pycache_path):
+ shutil.rmtree(pycache_path)
+
+ bitbake('pseudo-pyc-test -c install')
+
+ test1_pyc_present = len(glob.glob(os.path.join(pycache_path, 'pseudo_pyc_test1.*.pyc')))
+ self.assertTrue(test1_pyc_present, 'test1 pyc file missing, should be created outside of pseudo context.')
+
+ test2_pyc_present = len(glob.glob(os.path.join(pycache_path, 'pseudo_pyc_test2.*.pyc')))
+ self.assertFalse(test2_pyc_present, 'test2 pyc file present, should not be created in pseudo context.')
diff --git a/meta/lib/oeqa/selftest/cases/recipetool.py b/meta/lib/oeqa/selftest/cases/recipetool.py
index c2ade2543a..510dae6bad 100644
--- a/meta/lib/oeqa/selftest/cases/recipetool.py
+++ b/meta/lib/oeqa/selftest/cases/recipetool.py
@@ -25,7 +25,7 @@ def tearDownModule():
runCmd('rm -rf %s' % templayerdir)
-class RecipetoolBase(devtool.DevtoolBase):
+class RecipetoolBase(devtool.DevtoolTestCase):
def setUpLocal(self):
super(RecipetoolBase, self).setUpLocal()
@@ -68,17 +68,16 @@ class RecipetoolBase(devtool.DevtoolBase):
return bbappendfile, result.output
-class RecipetoolTests(RecipetoolBase):
+class RecipetoolAppendTests(RecipetoolBase):
@classmethod
def setUpClass(cls):
- super(RecipetoolTests, cls).setUpClass()
+ super(RecipetoolAppendTests, cls).setUpClass()
# Ensure we have the right data in shlibs/pkgdata
cls.logger.info('Running bitbake to generate pkgdata')
bitbake('-c packagedata base-files coreutils busybox selftest-recipetool-appendfile')
- bb_vars = get_bb_vars(['COREBASE', 'BBPATH'])
+ bb_vars = get_bb_vars(['COREBASE'])
cls.corebase = bb_vars['COREBASE']
- cls.bbpath = bb_vars['BBPATH']
def _try_recipetool_appendfile(self, testrecipe, destfile, newfile, options, expectedlines, expectedfiles):
cmd = 'recipetool appendfile %s %s %s %s' % (self.templayerdir, destfile, newfile, options)
@@ -94,7 +93,7 @@ class RecipetoolTests(RecipetoolBase):
def test_recipetool_appendfile_basic(self):
# Basic test
- expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ expectedlines = ['FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"\n',
'\n']
_, output = self._try_recipetool_appendfile('base-files', '/etc/motd', self.testfile, '', expectedlines, ['motd'])
self.assertNotIn('WARNING: ', output)
@@ -112,11 +111,11 @@ class RecipetoolTests(RecipetoolBase):
# Need a test file - should be executable
testfile2 = os.path.join(self.corebase, 'oe-init-build-env')
testfile2name = os.path.basename(testfile2)
- expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ expectedlines = ['FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"\n',
'\n',
'SRC_URI += "file://%s"\n' % testfile2name,
'\n',
- 'do_install_append() {\n',
+ 'do_install:append() {\n',
' install -d ${D}${base_bindir}\n',
' install -m 0755 ${WORKDIR}/%s ${D}${base_bindir}/ls\n' % testfile2name,
'}\n']
@@ -138,11 +137,11 @@ class RecipetoolTests(RecipetoolBase):
def test_recipetool_appendfile_add(self):
# Try arbitrary file add to a recipe
- expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ expectedlines = ['FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"\n',
'\n',
'SRC_URI += "file://testfile"\n',
'\n',
- 'do_install_append() {\n',
+ 'do_install:append() {\n',
' install -d ${D}${datadir}\n',
' install -m 0644 ${WORKDIR}/testfile ${D}${datadir}/something\n',
'}\n']
@@ -151,13 +150,13 @@ class RecipetoolTests(RecipetoolBase):
# (so we're testing that, plus modifying an existing bbappend)
testfile2 = os.path.join(self.corebase, 'oe-init-build-env')
testfile2name = os.path.basename(testfile2)
- expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ expectedlines = ['FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"\n',
'\n',
'SRC_URI += "file://testfile \\\n',
' file://%s \\\n' % testfile2name,
' "\n',
'\n',
- 'do_install_append() {\n',
+ 'do_install:append() {\n',
' install -d ${D}${datadir}\n',
' install -m 0644 ${WORKDIR}/testfile ${D}${datadir}/something\n',
' install -m 0755 ${WORKDIR}/%s ${D}${datadir}/scriptname\n' % testfile2name,
@@ -166,11 +165,11 @@ class RecipetoolTests(RecipetoolBase):
def test_recipetool_appendfile_add_bindir(self):
# Try arbitrary file add to a recipe, this time to a location such that should be installed as executable
- expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ expectedlines = ['FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"\n',
'\n',
'SRC_URI += "file://testfile"\n',
'\n',
- 'do_install_append() {\n',
+ 'do_install:append() {\n',
' install -d ${D}${bindir}\n',
' install -m 0755 ${WORKDIR}/testfile ${D}${bindir}/selftest-recipetool-testbin\n',
'}\n']
@@ -179,13 +178,13 @@ class RecipetoolTests(RecipetoolBase):
def test_recipetool_appendfile_add_machine(self):
# Try arbitrary file add to a recipe, this time to a location such that should be installed as executable
- expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ expectedlines = ['FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"\n',
'\n',
'PACKAGE_ARCH = "${MACHINE_ARCH}"\n',
'\n',
- 'SRC_URI_append_mymachine = " file://testfile"\n',
+ 'SRC_URI:append:mymachine = " file://testfile"\n',
'\n',
- 'do_install_append_mymachine() {\n',
+ 'do_install:append:mymachine() {\n',
' install -d ${D}${datadir}\n',
' install -m 0644 ${WORKDIR}/testfile ${D}${datadir}/something\n',
'}\n']
@@ -194,72 +193,59 @@ class RecipetoolTests(RecipetoolBase):
def test_recipetool_appendfile_orig(self):
# A file that's in SRC_URI and in do_install with the same name
- expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ expectedlines = ['FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"\n',
'\n']
_, output = self._try_recipetool_appendfile('selftest-recipetool-appendfile', '/usr/share/selftest-replaceme-orig', self.testfile, '', expectedlines, ['selftest-replaceme-orig'])
self.assertNotIn('WARNING: ', output)
def test_recipetool_appendfile_todir(self):
# A file that's in SRC_URI and in do_install with destination directory rather than file
- expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ expectedlines = ['FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"\n',
'\n']
_, output = self._try_recipetool_appendfile('selftest-recipetool-appendfile', '/usr/share/selftest-replaceme-todir', self.testfile, '', expectedlines, ['selftest-replaceme-todir'])
self.assertNotIn('WARNING: ', output)
def test_recipetool_appendfile_renamed(self):
# A file that's in SRC_URI with a different name to the destination file
- expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ expectedlines = ['FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"\n',
'\n']
_, output = self._try_recipetool_appendfile('selftest-recipetool-appendfile', '/usr/share/selftest-replaceme-renamed', self.testfile, '', expectedlines, ['file1'])
self.assertNotIn('WARNING: ', output)
def test_recipetool_appendfile_subdir(self):
# A file that's in SRC_URI in a subdir
- expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ expectedlines = ['FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"\n',
'\n',
'SRC_URI += "file://testfile"\n',
'\n',
- 'do_install_append() {\n',
+ 'do_install:append() {\n',
' install -d ${D}${datadir}\n',
' install -m 0644 ${WORKDIR}/testfile ${D}${datadir}/selftest-replaceme-subdir\n',
'}\n']
_, output = self._try_recipetool_appendfile('selftest-recipetool-appendfile', '/usr/share/selftest-replaceme-subdir', self.testfile, '', expectedlines, ['testfile'])
self.assertNotIn('WARNING: ', output)
- def test_recipetool_appendfile_src_glob(self):
- # A file that's in SRC_URI as a glob
- expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
- '\n',
- 'SRC_URI += "file://testfile"\n',
- '\n',
- 'do_install_append() {\n',
- ' install -d ${D}${datadir}\n',
- ' install -m 0644 ${WORKDIR}/testfile ${D}${datadir}/selftest-replaceme-src-globfile\n',
- '}\n']
- _, output = self._try_recipetool_appendfile('selftest-recipetool-appendfile', '/usr/share/selftest-replaceme-src-globfile', self.testfile, '', expectedlines, ['testfile'])
- self.assertNotIn('WARNING: ', output)
-
def test_recipetool_appendfile_inst_glob(self):
# A file that's in do_install as a glob
- expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ expectedlines = ['FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"\n',
'\n']
_, output = self._try_recipetool_appendfile('selftest-recipetool-appendfile', '/usr/share/selftest-replaceme-inst-globfile', self.testfile, '', expectedlines, ['selftest-replaceme-inst-globfile'])
self.assertNotIn('WARNING: ', output)
def test_recipetool_appendfile_inst_todir_glob(self):
# A file that's in do_install as a glob with destination as a directory
- expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ expectedlines = ['FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"\n',
'\n']
_, output = self._try_recipetool_appendfile('selftest-recipetool-appendfile', '/usr/share/selftest-replaceme-inst-todir-globfile', self.testfile, '', expectedlines, ['selftest-replaceme-inst-todir-globfile'])
self.assertNotIn('WARNING: ', output)
def test_recipetool_appendfile_patch(self):
# A file that's added by a patch in SRC_URI
- expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ expectedlines = ['FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"\n',
'\n',
'SRC_URI += "file://testfile"\n',
'\n',
- 'do_install_append() {\n',
+ 'do_install:append() {\n',
' install -d ${D}${sysconfdir}\n',
' install -m 0644 ${WORKDIR}/testfile ${D}${sysconfdir}/selftest-replaceme-patched\n',
'}\n']
@@ -273,11 +259,11 @@ class RecipetoolTests(RecipetoolBase):
def test_recipetool_appendfile_script(self):
# Now, a file that's in SRC_URI but installed by a script (so no mention in do_install)
- expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ expectedlines = ['FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"\n',
'\n',
'SRC_URI += "file://testfile"\n',
'\n',
- 'do_install_append() {\n',
+ 'do_install:append() {\n',
' install -d ${D}${datadir}\n',
' install -m 0644 ${WORKDIR}/testfile ${D}${datadir}/selftest-replaceme-scripted\n',
'}\n']
@@ -286,7 +272,7 @@ class RecipetoolTests(RecipetoolBase):
def test_recipetool_appendfile_inst_func(self):
# A file that's installed from a function called by do_install
- expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ expectedlines = ['FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"\n',
'\n']
_, output = self._try_recipetool_appendfile('selftest-recipetool-appendfile', '/usr/share/selftest-replaceme-inst-func', self.testfile, '', expectedlines, ['selftest-replaceme-inst-func'])
self.assertNotIn('WARNING: ', output)
@@ -296,11 +282,11 @@ class RecipetoolTests(RecipetoolBase):
# First try without specifying recipe
self._try_recipetool_appendfile_fail('/usr/share/selftest-replaceme-postinst', self.testfile, ['File /usr/share/selftest-replaceme-postinst may be written out in a pre/postinstall script of the following recipes:', 'selftest-recipetool-appendfile'])
# Now specify recipe
- expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ expectedlines = ['FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"\n',
'\n',
'SRC_URI += "file://testfile"\n',
'\n',
- 'do_install_append() {\n',
+ 'do_install:append() {\n',
' install -d ${D}${datadir}\n',
' install -m 0644 ${WORKDIR}/testfile ${D}${datadir}/selftest-replaceme-postinst\n',
'}\n']
@@ -345,6 +331,9 @@ class RecipetoolTests(RecipetoolBase):
filename = try_appendfile_wc('-w')
self.assertEqual(filename, recipefn.split('_')[0] + '_%.bbappend')
+
+class RecipetoolCreateTests(RecipetoolBase):
+
def test_recipetool_create(self):
# Try adding a recipe
tempsrc = os.path.join(self.tempdir, 'srctree')
@@ -354,14 +343,14 @@ class RecipetoolTests(RecipetoolBase):
result = runCmd('recipetool create -o %s %s -x %s' % (recipefile, srcuri, tempsrc))
self.assertTrue(os.path.isfile(recipefile))
checkvars = {}
- checkvars['LICENSE'] = 'GPLv2'
+ checkvars['LICENSE'] = 'GPL-2.0-only'
checkvars['LIC_FILES_CHKSUM'] = 'file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263'
checkvars['SRC_URI'] = 'https://github.com/logrotate/logrotate/releases/download/${PV}/logrotate-${PV}.tar.xz'
checkvars['SRC_URI[md5sum]'] = 'a560c57fac87c45b2fc17406cdf79288'
checkvars['SRC_URI[sha256sum]'] = '2e6a401cac9024db2288297e3be1a8ab60e7401ba8e91225218aaf4a27e82a07'
self._test_recipe_contents(recipefile, checkvars, [])
- def test_recipetool_create_git(self):
+ def test_recipetool_create_autotools(self):
if 'x11' not in get_bb_var('DISTRO_FEATURES'):
self.skipTest('Test requires x11 as distro feature')
# Ensure we have the right data in shlibs/pkgdata
@@ -374,11 +363,11 @@ class RecipetoolTests(RecipetoolBase):
result = runCmd(['recipetool', 'create', '-o', recipefile, srcuri + ";rev=9f7cf8895ae2d39c465c04cc78e918c157420269", '-x', tempsrc])
self.assertTrue(os.path.isfile(recipefile), 'recipetool did not create recipe file; output:\n%s' % result.output)
checkvars = {}
- checkvars['LICENSE'] = 'LGPLv2.1'
+ checkvars['LICENSE'] = 'LGPL-2.1-only'
checkvars['LIC_FILES_CHKSUM'] = 'file://COPYING;md5=7fbc338309ac38fefcd64b04bb903e34'
checkvars['S'] = '${WORKDIR}/git'
checkvars['PV'] = '1.11+git${SRCPV}'
- checkvars['SRC_URI'] = srcuri
+ checkvars['SRC_URI'] = srcuri + ';branch=master'
checkvars['DEPENDS'] = set(['libcheck', 'libjpeg-turbo', 'libpng', 'libx11', 'libxext', 'pango'])
inherits = ['autotools', 'pkgconfig']
self._test_recipe_contents(recipefile, checkvars, inherits)
@@ -387,8 +376,8 @@ class RecipetoolTests(RecipetoolBase):
# Try adding a recipe
temprecipe = os.path.join(self.tempdir, 'recipe')
os.makedirs(temprecipe)
- pv = '1.7.3.0'
- srcuri = 'http://www.dest-unreach.org/socat/download/socat-%s.tar.bz2' % pv
+ pv = '1.7.4.1'
+ srcuri = 'http://www.dest-unreach.org/socat/download/Archive/socat-%s.tar.bz2' % pv
result = runCmd('recipetool create %s -o %s' % (srcuri, temprecipe))
dirlist = os.listdir(temprecipe)
if len(dirlist) > 1:
@@ -397,7 +386,7 @@ class RecipetoolTests(RecipetoolBase):
self.fail('recipetool did not create recipe file; output:\n%s\ndirlist:\n%s' % (result.output, str(dirlist)))
self.assertEqual(dirlist[0], 'socat_%s.bb' % pv, 'Recipe file incorrectly named')
checkvars = {}
- checkvars['LICENSE'] = set(['Unknown', 'GPLv2'])
+ checkvars['LICENSE'] = set(['Unknown', 'GPL-2.0-only'])
checkvars['LIC_FILES_CHKSUM'] = set(['file://COPYING.OpenSSL;md5=5c9bccc77f67a8328ef4ebaf468116f4', 'file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263'])
# We don't check DEPENDS since they are variable for this recipe depending on what's in the sysroot
checkvars['S'] = None
@@ -413,7 +402,7 @@ class RecipetoolTests(RecipetoolBase):
result = runCmd('recipetool create -o %s %s' % (temprecipe, srcuri))
self.assertTrue(os.path.isfile(recipefile))
checkvars = {}
- checkvars['LICENSE'] = set(['LGPLv2.1', 'MPL-1.1'])
+ checkvars['LICENSE'] = set(['LGPL-2.1-only', 'MPL-1.1-only'])
checkvars['SRC_URI'] = 'http://taglib.github.io/releases/taglib-${PV}.tar.gz'
checkvars['SRC_URI[md5sum]'] = 'cee7be0ccfc892fa433d6c837df9522a'
checkvars['SRC_URI[sha256sum]'] = 'b6d1a5a610aae6ff39d93de5efd0fdc787aa9e9dc1e7026fa4c961b26563526b'
@@ -422,6 +411,10 @@ class RecipetoolTests(RecipetoolBase):
self._test_recipe_contents(recipefile, checkvars, inherits)
def test_recipetool_create_npm(self):
+ collections = get_bb_var('BBFILE_COLLECTIONS').split()
+ if "openembedded-layer" not in collections:
+ self.skipTest("Test needs meta-oe for nodejs")
+
temprecipe = os.path.join(self.tempdir, 'recipe')
os.makedirs(temprecipe)
recipefile = os.path.join(temprecipe, 'savoirfairelinux-node-server-example_1.0.0.bb')
@@ -433,16 +426,16 @@ class RecipetoolTests(RecipetoolBase):
checkvars = {}
checkvars['SUMMARY'] = 'Node Server Example'
checkvars['HOMEPAGE'] = 'https://github.com/savoirfairelinux/node-server-example#readme'
- checkvars['LICENSE'] = set(['MIT', 'ISC', 'Unknown'])
+ checkvars['LICENSE'] = 'BSD-3-Clause & ISC & MIT & Unknown'
urls = []
urls.append('npm://registry.npmjs.org/;package=@savoirfairelinux/node-server-example;version=${PV}')
urls.append('npmsw://${THISDIR}/${BPN}/npm-shrinkwrap.json')
checkvars['SRC_URI'] = set(urls)
checkvars['S'] = '${WORKDIR}/npm'
- checkvars['LICENSE_${PN}'] = 'MIT'
- checkvars['LICENSE_${PN}-base64'] = 'Unknown'
- checkvars['LICENSE_${PN}-accepts'] = 'MIT'
- checkvars['LICENSE_${PN}-inherits'] = 'ISC'
+ checkvars['LICENSE:${PN}'] = 'MIT'
+ checkvars['LICENSE:${PN}-base64'] = 'Unknown'
+ checkvars['LICENSE:${PN}-accepts'] = 'MIT'
+ checkvars['LICENSE:${PN}-inherits'] = 'ISC'
inherits = ['npm']
self._test_recipe_contents(recipefile, checkvars, inherits)
@@ -456,7 +449,7 @@ class RecipetoolTests(RecipetoolBase):
self.assertTrue(os.path.isfile(recipefile))
checkvars = {}
checkvars['LICENSE'] = set(['Apache-2.0'])
- checkvars['SRC_URI'] = 'git://github.com/mesonbuild/meson;protocol=https'
+ checkvars['SRC_URI'] = 'git://github.com/mesonbuild/meson;protocol=https;branch=master'
inherits = ['setuptools3']
self._test_recipe_contents(recipefile, checkvars, inherits)
@@ -479,25 +472,6 @@ class RecipetoolTests(RecipetoolBase):
inherits = ['setuptools3']
self._test_recipe_contents(recipefile, checkvars, inherits)
- def test_recipetool_create_python3_distutils(self):
- # Test creating python3 package from tarball (using distutils3 class)
- temprecipe = os.path.join(self.tempdir, 'recipe')
- os.makedirs(temprecipe)
- pn = 'docutils'
- pv = '0.14'
- recipefile = os.path.join(temprecipe, '%s_%s.bb' % (pn, pv))
- srcuri = 'https://files.pythonhosted.org/packages/84/f4/5771e41fdf52aabebbadecc9381d11dea0fa34e4759b4071244fa094804c/docutils-%s.tar.gz' % pv
- result = runCmd('recipetool create -o %s %s' % (temprecipe, srcuri))
- self.assertTrue(os.path.isfile(recipefile))
- checkvars = {}
- checkvars['LICENSE'] = set(['PSF', '&', 'BSD', 'GPL'])
- checkvars['LIC_FILES_CHKSUM'] = 'file://COPYING.txt;md5=35a23d42b615470583563132872c97d6'
- checkvars['SRC_URI'] = 'https://files.pythonhosted.org/packages/84/f4/5771e41fdf52aabebbadecc9381d11dea0fa34e4759b4071244fa094804c/docutils-${PV}.tar.gz'
- checkvars['SRC_URI[md5sum]'] = 'c53768d63db3873b7d452833553469de'
- checkvars['SRC_URI[sha256sum]'] = '51e64ef2ebfb29cae1faa133b3710143496eca21c530f3f71424d77687764274'
- inherits = ['distutils3']
- self._test_recipe_contents(recipefile, checkvars, inherits)
-
def test_recipetool_create_github_tarball(self):
# Basic test to ensure github URL mangling doesn't apply to release tarballs
temprecipe = os.path.join(self.tempdir, 'recipe')
@@ -513,19 +487,48 @@ class RecipetoolTests(RecipetoolBase):
inherits = ['setuptools3']
self._test_recipe_contents(recipefile, checkvars, inherits)
- def test_recipetool_create_git_http(self):
+ def _test_recipetool_create_git(self, srcuri, branch=None):
# Basic test to check http git URL mangling works
temprecipe = os.path.join(self.tempdir, 'recipe')
os.makedirs(temprecipe)
- recipefile = os.path.join(temprecipe, 'matchbox-terminal_git.bb')
- srcuri = 'http://git.yoctoproject.org/git/matchbox-terminal'
- result = runCmd('recipetool create -o %s %s' % (temprecipe, srcuri))
+ name = srcuri.split(';')[0].split('/')[-1]
+ recipefile = os.path.join(temprecipe, name + '_git.bb')
+ options = ' -B %s' % branch if branch else ''
+ result = runCmd('recipetool create -o %s%s "%s"' % (temprecipe, options, srcuri))
self.assertTrue(os.path.isfile(recipefile))
checkvars = {}
- checkvars['LICENSE'] = set(['GPLv2'])
- checkvars['SRC_URI'] = 'git://git.yoctoproject.org/git/matchbox-terminal;protocol=http'
- inherits = ['pkgconfig', 'autotools']
- self._test_recipe_contents(recipefile, checkvars, inherits)
+ checkvars['SRC_URI'] = srcuri
+ for scheme in ['http', 'https']:
+ if srcuri.startswith(scheme + ":"):
+ checkvars['SRC_URI'] = 'git%s;protocol=%s' % (srcuri[len(scheme):], scheme)
+ if ';branch=' not in srcuri:
+ checkvars['SRC_URI'] += ';branch=' + (branch or 'master')
+ self._test_recipe_contents(recipefile, checkvars, [])
+
+ def test_recipetool_create_git_http(self):
+ self._test_recipetool_create_git('http://git.yoctoproject.org/git/matchbox-keyboard')
+
+ def test_recipetool_create_git_srcuri_master(self):
+ self._test_recipetool_create_git('git://git.yoctoproject.org/matchbox-keyboard;branch=master')
+
+ def test_recipetool_create_git_srcuri_branch(self):
+ self._test_recipetool_create_git('git://git.yoctoproject.org/matchbox-keyboard;branch=matchbox-keyboard-0-1')
+
+ def test_recipetool_create_git_srcbranch(self):
+ self._test_recipetool_create_git('git://git.yoctoproject.org/matchbox-keyboard', 'matchbox-keyboard-0-1')
+
+
+class RecipetoolTests(RecipetoolBase):
+
+ @classmethod
+ def setUpClass(cls):
+ import sys
+
+ super(RecipetoolTests, cls).setUpClass()
+ bb_vars = get_bb_vars(['BBPATH'])
+ cls.bbpath = bb_vars['BBPATH']
+ libpath = os.path.join(get_bb_var('COREBASE'), 'scripts', 'lib', 'recipetool')
+ sys.path.insert(0, libpath)
def _copy_file_with_cleanup(self, srcfile, basedstdir, *paths):
dstdir = basedstdir
@@ -570,6 +573,128 @@ class RecipetoolTests(RecipetoolBase):
with open(srcfile, 'w') as fh:
fh.writelines(plugincontent)
+ def test_recipetool_handle_license_vars(self):
+ from create import handle_license_vars
+ from unittest.mock import Mock
+
+ commonlicdir = get_bb_var('COMMON_LICENSE_DIR')
+
+ d = bb.tinfoil.TinfoilDataStoreConnector
+ d.getVar = Mock(return_value=commonlicdir)
+
+ srctree = tempfile.mkdtemp(prefix='recipetoolqa')
+ self.track_for_cleanup(srctree)
+
+ # Multiple licenses
+ licenses = ['MIT', 'ISC', 'BSD-3-Clause', 'Apache-2.0']
+ for licence in licenses:
+ shutil.copy(os.path.join(commonlicdir, licence), os.path.join(srctree, 'LICENSE.' + licence))
+ # Duplicate license
+ shutil.copy(os.path.join(commonlicdir, 'MIT'), os.path.join(srctree, 'LICENSE'))
+
+ extravalues = {
+ # Duplicate and missing licenses
+ 'LICENSE': 'Zlib & BSD-2-Clause & Zlib',
+ 'LIC_FILES_CHKSUM': [
+ 'file://README.md;md5=0123456789abcdef0123456789abcd'
+ ]
+ }
+ lines_before = []
+ handled = []
+ licvalues = handle_license_vars(srctree, lines_before, handled, extravalues, d)
+ expected_lines_before = [
+ '# WARNING: the following LICENSE and LIC_FILES_CHKSUM values are best guesses - it is',
+ '# your responsibility to verify that the values are complete and correct.',
+ '# NOTE: Original package / source metadata indicates license is: BSD-2-Clause & Zlib',
+ '#',
+ '# NOTE: multiple licenses have been detected; they have been separated with &',
+ '# in the LICENSE value for now since it is a reasonable assumption that all',
+ '# of the licenses apply. If instead there is a choice between the multiple',
+ '# licenses then you should change the value to separate the licenses with |',
+ '# instead of &. If there is any doubt, check the accompanying documentation',
+ '# to determine which situation is applicable.',
+ 'LICENSE = "Apache-2.0 & BSD-2-Clause & BSD-3-Clause & ISC & MIT & Zlib"',
+ 'LIC_FILES_CHKSUM = "file://LICENSE;md5=0835ade698e0bcf8506ecda2f7b4f302 \\\n'
+ ' file://LICENSE.Apache-2.0;md5=89aea4e17d99a7cacdbeed46a0096b10 \\\n'
+ ' file://LICENSE.BSD-3-Clause;md5=550794465ba0ec5312d6919e203a55f9 \\\n'
+ ' file://LICENSE.ISC;md5=f3b90e78ea0cffb20bf5cca7947a896d \\\n'
+ ' file://LICENSE.MIT;md5=0835ade698e0bcf8506ecda2f7b4f302 \\\n'
+ ' file://README.md;md5=0123456789abcdef0123456789abcd"',
+ ''
+ ]
+ self.assertEqual(lines_before, expected_lines_before)
+ expected_licvalues = [
+ ('MIT', 'LICENSE', '0835ade698e0bcf8506ecda2f7b4f302'),
+ ('Apache-2.0', 'LICENSE.Apache-2.0', '89aea4e17d99a7cacdbeed46a0096b10'),
+ ('BSD-3-Clause', 'LICENSE.BSD-3-Clause', '550794465ba0ec5312d6919e203a55f9'),
+ ('ISC', 'LICENSE.ISC', 'f3b90e78ea0cffb20bf5cca7947a896d'),
+ ('MIT', 'LICENSE.MIT', '0835ade698e0bcf8506ecda2f7b4f302')
+ ]
+ self.assertEqual(handled, [('license', expected_licvalues)])
+ self.assertEqual(extravalues, {})
+ self.assertEqual(licvalues, expected_licvalues)
+
+
+ def test_recipetool_split_pkg_licenses(self):
+ from create import split_pkg_licenses
+ licvalues = [
+ # Duplicate licenses
+ ('BSD-2-Clause', 'x/COPYING', None),
+ ('BSD-2-Clause', 'x/LICENSE', None),
+ # Multiple licenses
+ ('MIT', 'x/a/LICENSE.MIT', None),
+ ('ISC', 'x/a/LICENSE.ISC', None),
+ # Alternative licenses
+ ('(MIT | ISC)', 'x/b/LICENSE', None),
+ # Alternative licenses without brackets
+ ('MIT | BSD-2-Clause', 'x/c/LICENSE', None),
+ # Multi licenses with alternatives
+ ('MIT', 'x/d/COPYING', None),
+ ('MIT | BSD-2-Clause', 'x/d/LICENSE', None),
+ # Multi licenses with alternatives and brackets
+ ('Apache-2.0 & ((MIT | ISC) & BSD-3-Clause)', 'x/e/LICENSE', None)
+ ]
+ packages = {
+ '${PN}': '',
+ 'a': 'x/a',
+ 'b': 'x/b',
+ 'c': 'x/c',
+ 'd': 'x/d',
+ 'e': 'x/e',
+ 'f': 'x/f',
+ 'g': 'x/g',
+ }
+ fallback_licenses = {
+ # Ignored
+ 'a': 'BSD-3-Clause',
+ # Used
+ 'f': 'BSD-3-Clause'
+ }
+ outlines = []
+ outlicenses = split_pkg_licenses(licvalues, packages, outlines, fallback_licenses)
+ expected_outlicenses = {
+ '${PN}': ['BSD-2-Clause'],
+ 'a': ['ISC', 'MIT'],
+ 'b': ['(ISC | MIT)'],
+ 'c': ['(BSD-2-Clause | MIT)'],
+ 'd': ['(BSD-2-Clause | MIT)', 'MIT'],
+ 'e': ['(ISC | MIT)', 'Apache-2.0', 'BSD-3-Clause'],
+ 'f': ['BSD-3-Clause'],
+ 'g': ['Unknown']
+ }
+ self.assertEqual(outlicenses, expected_outlicenses)
+ expected_outlines = [
+ 'LICENSE:${PN} = "BSD-2-Clause"',
+ 'LICENSE:a = "ISC & MIT"',
+ 'LICENSE:b = "(ISC | MIT)"',
+ 'LICENSE:c = "(BSD-2-Clause | MIT)"',
+ 'LICENSE:d = "(BSD-2-Clause | MIT) & MIT"',
+ 'LICENSE:e = "(ISC | MIT) & Apache-2.0 & BSD-3-Clause"',
+ 'LICENSE:f = "BSD-3-Clause"',
+ 'LICENSE:g = "Unknown"'
+ ]
+ self.assertEqual(outlines, expected_outlines)
+
class RecipetoolAppendsrcBase(RecipetoolBase):
def _try_recipetool_appendsrcfile(self, testrecipe, newfile, destfile, options, expectedlines, expectedfiles):
@@ -629,7 +754,7 @@ class RecipetoolAppendsrcBase(RecipetoolBase):
else:
destpath = '.' + os.sep
- expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ expectedlines = ['FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"\n',
'\n']
if has_src_uri:
uri = 'file://%s' % filename
diff --git a/meta/lib/oeqa/selftest/cases/recipeutils.py b/meta/lib/oeqa/selftest/cases/recipeutils.py
index 747870383b..f1dd63f65b 100644
--- a/meta/lib/oeqa/selftest/cases/recipeutils.py
+++ b/meta/lib/oeqa/selftest/cases/recipeutils.py
@@ -40,7 +40,7 @@ class RecipeUtilsTests(OESelftestTestCase):
SUMMARY = "Python framework to process interdependent tasks in a pool of workers"
HOMEPAGE = "http://github.com/gitpython-developers/async"
SECTION = "devel/python"
--LICENSE = "BSD"
+-LICENSE = "BSD-3-Clause"
+LICENSE = "something"
LIC_FILES_CHKSUM = "file://PKG-INFO;beginline=8;endline=8;md5=88df8e78b9edfd744953862179f2d14e"
@@ -52,7 +52,7 @@ class RecipeUtilsTests(OESelftestTestCase):
+SRC_URI[md5sum] = "aaaaaa"
SRC_URI[sha256sum] = "ac6894d876e45878faae493b0cf61d0e28ec417334448ac0a6ea2229d8343051"
- RDEPENDS_${PN} += "${PYTHON_PN}-threading"
+ RDEPENDS:${PN} += "${PYTHON_PN}-threading"
"""
patchlines = []
for f in patches:
@@ -80,7 +80,7 @@ class RecipeUtilsTests(OESelftestTestCase):
-SRC_URI += "file://somefile"
-
- SRC_URI_append = " file://anotherfile"
+ SRC_URI:append = " file://anotherfile"
"""
patchlines = []
for f in patches:
@@ -105,7 +105,7 @@ class RecipeUtilsTests(OESelftestTestCase):
-SRC_URI += "file://somefile"
-
--SRC_URI_append = " file://anotherfile"
+-SRC_URI:append = " file://anotherfile"
"""
patchlines = []
for f in patches:
diff --git a/meta/lib/oeqa/selftest/cases/reproducible.py b/meta/lib/oeqa/selftest/cases/reproducible.py
index 5d3959be77..7caf8c3e7d 100644
--- a/meta/lib/oeqa/selftest/cases/reproducible.py
+++ b/meta/lib/oeqa/selftest/cases/reproducible.py
@@ -17,6 +17,16 @@ import stat
import os
import datetime
+exclude_packages = [
+ ]
+
+def is_excluded(package):
+ package_name = os.path.basename(package)
+ for i in exclude_packages:
+ if package_name.startswith(i):
+ return i
+ return None
+
MISSING = 'MISSING'
DIFFERENT = 'DIFFERENT'
SAME = 'SAME'
@@ -39,14 +49,21 @@ class PackageCompareResults(object):
self.total = []
self.missing = []
self.different = []
+ self.different_excluded = []
self.same = []
+ self.active_exclusions = set()
def add_result(self, r):
self.total.append(r)
if r.status == MISSING:
self.missing.append(r)
elif r.status == DIFFERENT:
- self.different.append(r)
+ exclusion = is_excluded(r.reference)
+ if exclusion:
+ self.different_excluded.append(r)
+ self.active_exclusions.add(exclusion)
+ else:
+ self.different.append(r)
else:
self.same.append(r)
@@ -54,10 +71,14 @@ class PackageCompareResults(object):
self.total.sort()
self.missing.sort()
self.different.sort()
+ self.different_excluded.sort()
self.same.sort()
def __str__(self):
- return 'same=%i different=%i missing=%i total=%i' % (len(self.same), len(self.different), len(self.missing), len(self.total))
+ return 'same=%i different=%i different_excluded=%i missing=%i total=%i\nunused_exclusions=%s' % (len(self.same), len(self.different), len(self.different_excluded), len(self.missing), len(self.total), self.unused_exclusions())
+
+ def unused_exclusions(self):
+ return sorted(set(exclude_packages) - self.active_exclusions)
def compare_file(reference, test, diffutils_sysroot):
result = CompareResult()
@@ -68,7 +89,7 @@ def compare_file(reference, test, diffutils_sysroot):
result.status = MISSING
return result
- r = runCmd(['cmp', '--quiet', reference, test], native_sysroot=diffutils_sysroot, ignore_status=True)
+ r = runCmd(['cmp', '--quiet', reference, test], native_sysroot=diffutils_sysroot, ignore_status=True, sync=False)
if r.status:
result.status = DIFFERENT
@@ -77,9 +98,45 @@ def compare_file(reference, test, diffutils_sysroot):
result.status = SAME
return result
+def run_diffoscope(a_dir, b_dir, html_dir, max_report_size=0, **kwargs):
+ return runCmd(['diffoscope', '--no-default-limits', '--max-report-size', str(max_report_size),
+ '--exclude-directory-metadata', 'yes', '--html-dir', html_dir, a_dir, b_dir],
+ **kwargs)
+
+class DiffoscopeTests(OESelftestTestCase):
+ diffoscope_test_files = os.path.join(os.path.dirname(os.path.abspath(__file__)), "diffoscope")
+
+ def test_diffoscope(self):
+ bitbake("diffoscope-native -c addto_recipe_sysroot")
+ diffoscope_sysroot = get_bb_var("RECIPE_SYSROOT_NATIVE", "diffoscope-native")
+
+ # Check that diffoscope doesn't return an error when the files compare
+ # the same (a general check that diffoscope is working)
+ with tempfile.TemporaryDirectory() as tmpdir:
+ run_diffoscope('A', 'A', tmpdir,
+ native_sysroot=diffoscope_sysroot, cwd=self.diffoscope_test_files)
+
+ # Check that diffoscope generates an index.html file when the files are
+ # different
+ with tempfile.TemporaryDirectory() as tmpdir:
+ r = run_diffoscope('A', 'B', tmpdir,
+ native_sysroot=diffoscope_sysroot, ignore_status=True, cwd=self.diffoscope_test_files)
+
+ self.assertNotEqual(r.status, 0, msg="diffoscope was successful when an error was expected")
+ self.assertTrue(os.path.exists(os.path.join(tmpdir, 'index.html')), "HTML index not found!")
+
class ReproducibleTests(OESelftestTestCase):
- package_classes = ['deb', 'ipk']
- images = ['core-image-minimal', 'core-image-sato', 'core-image-full-cmdline']
+ # Test the reproducibility of whatever is built between sstate_targets and targets
+
+ package_classes = ['deb', 'ipk', 'rpm']
+
+ # Maximum report size, in bytes
+ max_report_size = 250 * 1024 * 1024
+
+ # targets are the things we want to test the reproducibility of
+ targets = ['core-image-minimal', 'core-image-sato', 'core-image-full-cmdline', 'core-image-weston', 'world']
+ # sstate targets are things to pull from sstate to potentially cut build/debugging time
+ sstate_targets = []
save_results = False
if 'OEQA_DEBUGGING_SAVED_OUTPUT' in os.environ:
save_results = os.environ['OEQA_DEBUGGING_SAVED_OUTPUT']
@@ -146,24 +203,36 @@ class ReproducibleTests(OESelftestTestCase):
bb.utils.remove(tmpdir, recurse=True)
config = textwrap.dedent('''\
- INHERIT += "reproducible_build"
PACKAGE_CLASSES = "{package_classes}"
INHIBIT_PACKAGE_STRIP = "1"
TMPDIR = "{tmpdir}"
+ LICENSE_FLAGS_ACCEPTED = "commercial"
+ DISTRO_FEATURES:append = ' systemd pam'
+ USERADDEXTENSION = "useradd-staticids"
+ USERADD_ERROR_DYNAMIC = "skip"
+ USERADD_UID_TABLES += "files/static-passwd"
+ USERADD_GID_TABLES += "files/static-group"
''').format(package_classes=' '.join('package_%s' % c for c in self.package_classes),
tmpdir=tmpdir)
if not use_sstate:
+ if self.sstate_targets:
+ self.logger.info("Building prebuild for %s (sstate allowed)..." % (name))
+ self.write_config(config)
+ bitbake(' '.join(self.sstate_targets))
+
# This config fragment will disable using shared and the sstate
# mirror, forcing a complete build from scratch
config += textwrap.dedent('''\
SSTATE_DIR = "${TMPDIR}/sstate"
- SSTATE_MIRROR = ""
+ SSTATE_MIRRORS = ""
''')
+ self.logger.info("Building %s (sstate%s allowed)..." % (name, '' if use_sstate else ' NOT'))
self.write_config(config)
d = get_bb_vars(capture_vars)
- bitbake(' '.join(self.images))
+ # targets used to be called images
+ bitbake(' '.join(getattr(self, 'images', self.targets)))
return d
def test_reproducible_builds(self):
@@ -187,6 +256,7 @@ class ReproducibleTests(OESelftestTestCase):
self.logger.info('Non-reproducible packages will be copied to %s', save_dir)
vars_A = self.do_test_build('reproducibleA', self.build_from_sstate)
+
vars_B = self.do_test_build('reproducibleB', False)
# NOTE: The temp directories from the reproducible build are purposely
@@ -201,6 +271,7 @@ class ReproducibleTests(OESelftestTestCase):
deploy_A = vars_A['DEPLOY_DIR_' + c.upper()]
deploy_B = vars_B['DEPLOY_DIR_' + c.upper()]
+ self.logger.info('Checking %s packages for differences...' % c)
result = self.compare_packages(deploy_A, deploy_B, diffutils_sysroot)
self.logger.info('Reproducibility summary for %s: %s' % (c, result))
@@ -209,6 +280,7 @@ class ReproducibleTests(OESelftestTestCase):
self.write_package_list(package_class, 'missing', result.missing)
self.write_package_list(package_class, 'different', result.different)
+ self.write_package_list(package_class, 'different_excluded', result.different_excluded)
self.write_package_list(package_class, 'same', result.same)
if self.save_results:
@@ -216,8 +288,12 @@ class ReproducibleTests(OESelftestTestCase):
self.copy_file(d.reference, '/'.join([save_dir, 'packages', strip_topdir(d.reference)]))
self.copy_file(d.test, '/'.join([save_dir, 'packages', strip_topdir(d.test)]))
+ for d in result.different_excluded:
+ self.copy_file(d.reference, '/'.join([save_dir, 'packages-excluded', strip_topdir(d.reference)]))
+ self.copy_file(d.test, '/'.join([save_dir, 'packages-excluded', strip_topdir(d.test)]))
+
if result.missing or result.different:
- fails.append("The following %s packages are missing or different: %s" %
+ fails.append("The following %s packages are missing or different and not in exclusion list: %s" %
(c, '\n'.join(r.test for r in (result.missing + result.different))))
# Clean up empty directories
@@ -232,7 +308,7 @@ class ReproducibleTests(OESelftestTestCase):
# Copy jquery to improve the diffoscope output usability
self.copy_file(os.path.join(jquery_sysroot, 'usr/share/javascript/jquery/jquery.min.js'), os.path.join(package_html_dir, 'jquery.js'))
- runCmd(['diffoscope', '--no-default-limits', '--exclude-directory-metadata', '--html-dir', package_html_dir, 'reproducibleA', 'reproducibleB'],
+ run_diffoscope('reproducibleA', 'reproducibleB', package_html_dir, max_report_size=self.max_report_size,
native_sysroot=diffoscope_sysroot, ignore_status=True, cwd=package_dir)
if fails:
diff --git a/meta/lib/oeqa/selftest/cases/runcmd.py b/meta/lib/oeqa/selftest/cases/runcmd.py
index a5ef1ea95f..e9612389fe 100644
--- a/meta/lib/oeqa/selftest/cases/runcmd.py
+++ b/meta/lib/oeqa/selftest/cases/runcmd.py
@@ -27,8 +27,8 @@ class RunCmdTests(OESelftestTestCase):
# The delta is intentionally smaller than the timeout, to detect cases where
# we incorrectly apply the timeout more than once.
- TIMEOUT = 5
- DELTA = 3
+ TIMEOUT = 10
+ DELTA = 8
def test_result_okay(self):
result = runCmd("true")
@@ -64,12 +64,12 @@ class RunCmdTests(OESelftestTestCase):
runCmd, "echo foobar >&2; false", shell=True, assert_error=False)
def test_output(self):
- result = runCmd("echo stdout; echo stderr >&2", shell=True)
+ result = runCmd("echo stdout; echo stderr >&2", shell=True, sync=False)
self.assertEqual("stdout\nstderr", result.output)
self.assertEqual("", result.error)
def test_output_split(self):
- result = runCmd("echo stdout; echo stderr >&2", shell=True, stderr=subprocess.PIPE)
+ result = runCmd("echo stdout; echo stderr >&2", shell=True, stderr=subprocess.PIPE, sync=False)
self.assertEqual("stdout", result.output)
self.assertEqual("stderr", result.error)
@@ -77,7 +77,7 @@ class RunCmdTests(OESelftestTestCase):
numthreads = threading.active_count()
start = time.time()
# Killing a hanging process only works when not using a shell?!
- result = runCmd(['sleep', '60'], timeout=self.TIMEOUT, ignore_status=True)
+ result = runCmd(['sleep', '60'], timeout=self.TIMEOUT, ignore_status=True, sync=False)
self.assertEqual(result.status, -signal.SIGTERM)
end = time.time()
self.assertLess(end - start, self.TIMEOUT + self.DELTA)
@@ -87,7 +87,7 @@ class RunCmdTests(OESelftestTestCase):
numthreads = threading.active_count()
start = time.time()
# Killing a hanging process only works when not using a shell?!
- result = runCmd(['sleep', '60'], timeout=self.TIMEOUT, ignore_status=True, stderr=subprocess.PIPE)
+ result = runCmd(['sleep', '60'], timeout=self.TIMEOUT, ignore_status=True, stderr=subprocess.PIPE, sync=False)
self.assertEqual(result.status, -signal.SIGTERM)
end = time.time()
self.assertLess(end - start, self.TIMEOUT + self.DELTA)
@@ -95,7 +95,7 @@ class RunCmdTests(OESelftestTestCase):
def test_stdin(self):
numthreads = threading.active_count()
- result = runCmd("cat", data=b"hello world", timeout=self.TIMEOUT)
+ result = runCmd("cat", data=b"hello world", timeout=self.TIMEOUT, sync=False)
self.assertEqual("hello world", result.output)
self.assertEqual(numthreads, threading.active_count(), msg="Thread counts were not equal before (%s) and after (%s), active threads: %s" % (numthreads, threading.active_count(), threading.enumerate()))
self.assertEqual(numthreads, 1)
@@ -103,7 +103,7 @@ class RunCmdTests(OESelftestTestCase):
def test_stdin_timeout(self):
numthreads = threading.active_count()
start = time.time()
- result = runCmd(['sleep', '60'], data=b"hello world", timeout=self.TIMEOUT, ignore_status=True)
+ result = runCmd(['sleep', '60'], data=b"hello world", timeout=self.TIMEOUT, ignore_status=True, sync=False)
self.assertEqual(result.status, -signal.SIGTERM)
end = time.time()
self.assertLess(end - start, self.TIMEOUT + self.DELTA)
@@ -111,12 +111,12 @@ class RunCmdTests(OESelftestTestCase):
def test_log(self):
log = MemLogger()
- result = runCmd("echo stdout; echo stderr >&2", shell=True, output_log=log)
+ result = runCmd("echo stdout; echo stderr >&2", shell=True, output_log=log, sync=False)
self.assertEqual(["Running: echo stdout; echo stderr >&2", "stdout", "stderr"], log.info_msgs)
self.assertEqual([], log.error_msgs)
def test_log_split(self):
log = MemLogger()
- result = runCmd("echo stdout; echo stderr >&2", shell=True, output_log=log, stderr=subprocess.PIPE)
+ result = runCmd("echo stdout; echo stderr >&2", shell=True, output_log=log, stderr=subprocess.PIPE, sync=False)
self.assertEqual(["Running: echo stdout; echo stderr >&2", "stdout"], log.info_msgs)
self.assertEqual(["stderr"], log.error_msgs)
diff --git a/meta/lib/oeqa/selftest/cases/runqemu.py b/meta/lib/oeqa/selftest/cases/runqemu.py
index 7e676bcb41..da22f77b27 100644
--- a/meta/lib/oeqa/selftest/cases/runqemu.py
+++ b/meta/lib/oeqa/selftest/cases/runqemu.py
@@ -163,12 +163,11 @@ class QemuTest(OESelftestTestCase):
bitbake(cls.recipe)
def _start_qemu_shutdown_check_if_shutdown_succeeded(self, qemu, timeout):
+ # Allow the runner's LoggingThread instance to exit without errors
+ # (such as the exception "Console connection closed unexpectedly")
+ # as qemu will disappear when we shut it down
+ qemu.runner.allowexit()
qemu.run_serial("shutdown -h now")
- # Stop thread will stop the LoggingThread instance used for logging
- # qemu through serial console, stop thread will prevent this code
- # from facing exception (Console connection closed unexpectedly)
- # when qemu was shutdown by the above shutdown command
- qemu.runner.stop_thread()
time_track = 0
try:
while True:
diff --git a/meta/lib/oeqa/selftest/cases/runtime_test.py b/meta/lib/oeqa/selftest/cases/runtime_test.py
index 793c98a335..642f0eb637 100644
--- a/meta/lib/oeqa/selftest/cases/runtime_test.py
+++ b/meta/lib/oeqa/selftest/cases/runtime_test.py
@@ -14,11 +14,6 @@ from oeqa.core.decorator.data import skipIfNotQemu
class TestExport(OESelftestTestCase):
- @classmethod
- def tearDownClass(cls):
- runCmd("rm -rf /tmp/sdk")
- super(TestExport, cls).tearDownClass()
-
def test_testexport_basic(self):
"""
Summary: Check basic testexport functionality with only ping test enabled.
@@ -95,19 +90,20 @@ class TestExport(OESelftestTestCase):
msg = "Couldn't find SDK tarball: %s" % tarball_path
self.assertEqual(os.path.isfile(tarball_path), True, msg)
- # Extract SDK and run tar from SDK
- result = runCmd("%s -y -d /tmp/sdk" % tarball_path)
- self.assertEqual(0, result.status, "Couldn't extract SDK")
+ with tempfile.TemporaryDirectory() as tmpdirname:
+ # Extract SDK and run tar from SDK
+ result = runCmd("%s -y -d %s" % (tarball_path, tmpdirname))
+ self.assertEqual(0, result.status, "Couldn't extract SDK")
- env_script = result.output.split()[-1]
- result = runCmd(". %s; which tar" % env_script, shell=True)
- self.assertEqual(0, result.status, "Couldn't setup SDK environment")
- is_sdk_tar = True if "/tmp/sdk" in result.output else False
- self.assertTrue(is_sdk_tar, "Couldn't setup SDK environment")
+ env_script = result.output.split()[-1]
+ result = runCmd(". %s; which tar" % env_script, shell=True)
+ self.assertEqual(0, result.status, "Couldn't setup SDK environment")
+ is_sdk_tar = True if tmpdirname in result.output else False
+ self.assertTrue(is_sdk_tar, "Couldn't setup SDK environment")
- tar_sdk = result.output
- result = runCmd("%s --version" % tar_sdk)
- self.assertEqual(0, result.status, "Couldn't run tar from SDK")
+ tar_sdk = result.output
+ result = runCmd("%s --version" % tar_sdk)
+ self.assertEqual(0, result.status, "Couldn't run tar from SDK")
class TestImage(OESelftestTestCase):
@@ -124,11 +120,10 @@ class TestImage(OESelftestTestCase):
self.skipTest('core-image-full-cmdline not buildable for poky-tiny')
features = 'INHERIT += "testimage"\n'
- features += 'IMAGE_INSTALL_append = " libssl"\n'
+ features += 'IMAGE_INSTALL:append = " libssl"\n'
features += 'TEST_SUITES = "ping ssh selftest"\n'
self.write_config(features)
- # Build core-image-sato and testimage
bitbake('core-image-full-cmdline socat')
bitbake('-c testimage core-image-full-cmdline')
@@ -156,14 +151,14 @@ class TestImage(OESelftestTestCase):
self.gpg_home = tempfile.mkdtemp(prefix="oeqa-feed-sign-")
self.track_for_cleanup(self.gpg_home)
signing_key_dir = os.path.join(self.testlayer_path, 'files', 'signing')
- runCmd('gpg --batch --homedir %s --import %s' % (self.gpg_home, os.path.join(signing_key_dir, 'key.secret')), native_sysroot=get_bb_var("RECIPE_SYSROOT_NATIVE", "gnupg-native"))
+ runCmd('gpgconf --list-dirs --homedir %s; gpg -v --batch --homedir %s --import %s' % (self.gpg_home, self.gpg_home, os.path.join(signing_key_dir, 'key.secret')), native_sysroot=get_bb_var("RECIPE_SYSROOT_NATIVE", "gnupg-native"), shell=True)
features += 'INHERIT += "sign_package_feed"\n'
features += 'PACKAGE_FEED_GPG_NAME = "testuser"\n'
features += 'PACKAGE_FEED_GPG_PASSPHRASE_FILE = "%s"\n' % os.path.join(signing_key_dir, 'key.passphrase')
features += 'GPG_PATH = "%s"\n' % self.gpg_home
+ features += 'PSEUDO_IGNORE_PATHS .= ",%s"\n' % self.gpg_home
self.write_config(features)
- # Build core-image-sato and testimage
bitbake('core-image-full-cmdline socat')
bitbake('-c testimage core-image-full-cmdline')
@@ -191,14 +186,14 @@ class TestImage(OESelftestTestCase):
qemu_distrofeatures = get_bb_var('DISTRO_FEATURES', 'qemu-system-native')
features = 'INHERIT += "testimage"\n'
if 'gtk+' not in qemu_packageconfig:
- features += 'PACKAGECONFIG_append_pn-qemu-system-native = " gtk+"\n'
+ features += 'PACKAGECONFIG:append:pn-qemu-system-native = " gtk+"\n'
if 'sdl' not in qemu_packageconfig:
- features += 'PACKAGECONFIG_append_pn-qemu-system-native = " sdl"\n'
+ features += 'PACKAGECONFIG:append:pn-qemu-system-native = " sdl"\n'
if 'opengl' not in qemu_distrofeatures:
- features += 'DISTRO_FEATURES_append = " opengl"\n'
+ features += 'DISTRO_FEATURES:append = " opengl"\n'
features += 'TEST_SUITES = "ping ssh virgl"\n'
- features += 'IMAGE_FEATURES_append = " ssh-server-dropbear"\n'
- features += 'IMAGE_INSTALL_append = " kmscube"\n'
+ features += 'IMAGE_FEATURES:append = " ssh-server-dropbear"\n'
+ features += 'IMAGE_INSTALL:append = " kmscube"\n'
features_gtk = features + 'TEST_RUNQEMUPARAMS = "gtk gl"\n'
self.write_config(features_gtk)
bitbake('core-image-minimal')
@@ -217,23 +212,29 @@ class TestImage(OESelftestTestCase):
Author: Alexander Kanavin <alex.kanavin@gmail.com>
"""
import subprocess, os
+
+ distro = oe.lsb.distro_identifier()
+ if distro and distro in ['debian-9', 'debian-10', 'centos-7', 'centos-8', 'ubuntu-16.04', 'ubuntu-18.04', 'almalinux-8.5']:
+ self.skipTest('virgl headless cannot be tested with %s' %(distro))
+
+ render_hint = """If /dev/dri/renderD* is absent due to lack of suitable GPU, 'modprobe vgem' will create one suitable for mesa llvmpipe software renderer."""
try:
content = os.listdir("/dev/dri")
if len([i for i in content if i.startswith('render')]) == 0:
- self.skipTest("No render nodes found in /dev/dri: %s" %(content))
+ self.fail("No render nodes found in /dev/dri: %s. %s" %(content, render_hint))
except FileNotFoundError:
- self.skipTest("/dev/dri directory does not exist; no render nodes available on this machine.")
+ self.fail("/dev/dri directory does not exist; no render nodes available on this machine. %s" %(render_hint))
try:
dripath = subprocess.check_output("pkg-config --variable=dridriverdir dri", shell=True)
except subprocess.CalledProcessError as e:
- self.skipTest("Could not determine the path to dri drivers on the host via pkg-config.\nPlease install Mesa development files (particularly, dri.pc) on the host machine.")
+ self.fail("Could not determine the path to dri drivers on the host via pkg-config.\nPlease install Mesa development files (particularly, dri.pc) on the host machine.")
qemu_distrofeatures = get_bb_var('DISTRO_FEATURES', 'qemu-system-native')
features = 'INHERIT += "testimage"\n'
if 'opengl' not in qemu_distrofeatures:
- features += 'DISTRO_FEATURES_append = " opengl"\n'
+ features += 'DISTRO_FEATURES:append = " opengl"\n'
features += 'TEST_SUITES = "ping ssh virgl"\n'
- features += 'IMAGE_FEATURES_append = " ssh-server-dropbear"\n'
- features += 'IMAGE_INSTALL_append = " kmscube"\n'
+ features += 'IMAGE_FEATURES:append = " ssh-server-dropbear"\n'
+ features += 'IMAGE_INSTALL:append = " kmscube"\n'
features += 'TEST_RUNQEMUPARAMS = "egl-headless"\n'
self.write_config(features)
bitbake('core-image-minimal')
@@ -259,7 +260,7 @@ class Postinst(OESelftestTestCase):
features += 'IMAGE_FEATURES += "package-management empty-root-password"\n'
features += 'PACKAGE_CLASSES = "%s"\n' % classes
if init_manager == "systemd":
- features += 'DISTRO_FEATURES_append = " systemd"\n'
+ features += 'DISTRO_FEATURES:append = " systemd"\n'
features += 'VIRTUAL-RUNTIME_init_manager = "systemd"\n'
features += 'DISTRO_FEATURES_BACKFILL_CONSIDERED = "sysvinit"\n'
features += 'VIRTUAL-RUNTIME_initscripts = ""\n'
@@ -275,7 +276,7 @@ class Postinst(OESelftestTestCase):
# run_serial()'s status code is useless.'
for filename in ("rootfs", "delayed-a", "delayed-b"):
status, output = qemu.run_serial("test -f %s && echo found" % os.path.join(targettestdir, filename))
- self.assertEqual(output, "found", "%s was not present on boot" % filename)
+ self.assertIn("found", output, "%s was not present on boot" % filename)
@@ -376,14 +377,14 @@ TEST_SERVER_IP = "192.168.7.1"
TEST_TARGET_IP = "192.168.7.2"
EXTRA_IMAGE_FEATURES += "tools-profile dbg-pkgs"
-IMAGE_FEATURES_append = " ssh-server-dropbear"
+IMAGE_FEATURES:append = " ssh-server-dropbear"
# enables kernel debug symbols
-KERNEL_EXTRA_FEATURES_append = " features/debug/debug-kernel.scc"
-KERNEL_EXTRA_FEATURES_append = " features/systemtap/systemtap.scc"
+KERNEL_EXTRA_FEATURES:append = " features/debug/debug-kernel.scc"
+KERNEL_EXTRA_FEATURES:append = " features/systemtap/systemtap.scc"
# add systemtap run-time into target image if it is not there yet
-IMAGE_INSTALL_append = " systemtap"
+IMAGE_INSTALL:append = " systemtap-runtime"
"""
def test_crosstap_helloworld(self):
diff --git a/meta/lib/oeqa/selftest/cases/signing.py b/meta/lib/oeqa/selftest/cases/signing.py
index 202d54994b..6f3d4aeae9 100644
--- a/meta/lib/oeqa/selftest/cases/signing.py
+++ b/meta/lib/oeqa/selftest/cases/signing.py
@@ -44,7 +44,9 @@ class Signing(OESelftestTestCase):
origenv = os.environ.copy()
for e in os.environ:
- if builddir in os.environ[e]:
+ if builddir + "/" in os.environ[e]:
+ os.environ[e] = os.environ[e].replace(builddir + "/", newbuilddir + "/")
+ if os.environ[e].endswith(builddir):
os.environ[e] = os.environ[e].replace(builddir, newbuilddir)
os.chdir(newbuilddir)
@@ -143,7 +145,7 @@ class Signing(OESelftestTestCase):
feature += 'GPG_PATH = "%s"\n' % self.gpg_dir
feature += 'SSTATE_DIR = "%s"\n' % sstatedir
# Any mirror might have partial sstate without .sig files, triggering failures
- feature += 'SSTATE_MIRRORS_forcevariable = ""\n'
+ feature += 'SSTATE_MIRRORS:forcevariable = ""\n'
self.write_config(feature)
@@ -157,13 +159,13 @@ class Signing(OESelftestTestCase):
bitbake('-c clean %s' % test_recipe)
bitbake('-c populate_lic %s' % test_recipe)
- recipe_sig = glob.glob(sstatedir + '/*/*/*:ed:*_populate_lic.tgz.sig')
- recipe_tgz = glob.glob(sstatedir + '/*/*/*:ed:*_populate_lic.tgz')
+ recipe_sig = glob.glob(sstatedir + '/*/*/*:ed:*_populate_lic.tar.zst.sig')
+ recipe_archive = glob.glob(sstatedir + '/*/*/*:ed:*_populate_lic.tar.zst')
self.assertEqual(len(recipe_sig), 1, 'Failed to find .sig file.')
- self.assertEqual(len(recipe_tgz), 1, 'Failed to find .tgz file.')
+ self.assertEqual(len(recipe_archive), 1, 'Failed to find .tar.zst file.')
- ret = runCmd('gpg --homedir %s --verify %s %s' % (self.gpg_dir, recipe_sig[0], recipe_tgz[0]))
+ ret = runCmd('gpg --homedir %s --verify %s %s' % (self.gpg_dir, recipe_sig[0], recipe_archive[0]))
# gpg: Signature made Thu 22 Oct 2015 01:45:09 PM EEST using RSA key ID 61EEFB30
# gpg: Good signature from "testuser (nocomment) <testuser@email.com>"
self.assertIn('gpg: Good signature from', ret.output, 'Package signed incorrectly.')
@@ -204,7 +206,7 @@ class LockedSignatures(OESelftestTestCase):
# Use uuid so hash equivalance server isn't triggered
recipe_append_file = test_recipe + '_' + get_bb_var('PV', test_recipe) + '.bbappend'
recipe_append_path = os.path.join(templayerdir, 'recipes-test', test_recipe, recipe_append_file)
- feature = 'SUMMARY_${PN} = "test locked signature%s"\n' % uuid.uuid4()
+ feature = 'SUMMARY:${PN} = "test locked signature%s"\n' % uuid.uuid4()
os.mkdir(os.path.join(templayerdir, 'recipes-test'))
os.mkdir(os.path.join(templayerdir, 'recipes-test', test_recipe))
diff --git a/meta/lib/oeqa/selftest/cases/sstatetests.py b/meta/lib/oeqa/selftest/cases/sstatetests.py
index 9adb511960..3038b40021 100644
--- a/meta/lib/oeqa/selftest/cases/sstatetests.py
+++ b/meta/lib/oeqa/selftest/cases/sstatetests.py
@@ -11,6 +11,7 @@ import tempfile
from oeqa.selftest.case import OESelftestTestCase
from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_test_layer, create_temp_layer
from oeqa.selftest.cases.sstate import SStateBase
+import oe
import bb.siggen
@@ -19,10 +20,13 @@ class SStateTests(SStateBase):
# Test that a git repository which changes is correctly handled by SRCREV = ${AUTOREV}
# when PV does not contain SRCPV
- tempdir = tempfile.mkdtemp(prefix='oeqa')
+ tempdir = tempfile.mkdtemp(prefix='sstate_autorev')
+ tempdldir = tempfile.mkdtemp(prefix='sstate_autorev_dldir')
self.track_for_cleanup(tempdir)
+ self.track_for_cleanup(tempdldir)
create_temp_layer(tempdir, 'selftestrecipetool')
self.add_command_to_tearDown('bitbake-layers remove-layer %s' % tempdir)
+ self.append_config("DL_DIR = \"%s\"" % tempdldir)
runCmd('bitbake-layers add-layer %s' % tempdir)
# Use dbus-wait as a local git repo we can add a commit between two builds in
@@ -36,7 +40,7 @@ class SStateTests(SStateBase):
recipefile = os.path.join(tempdir, "recipes-test", "dbus-wait-test", 'dbus-wait-test_git.bb')
os.makedirs(os.path.dirname(recipefile))
- srcuri = 'git://' + srcdir + ';protocol=file'
+ srcuri = 'git://' + srcdir + ';protocol=file;branch=master'
result = runCmd(['recipetool', 'create', '-o', recipefile, srcuri])
self.assertTrue(os.path.isfile(recipefile), 'recipetool did not create recipe file; output:\n%s' % result.output)
@@ -65,7 +69,7 @@ class SStateTests(SStateBase):
results = self.search_sstate('|'.join(map(str, targets)), distro_specific, distro_nonspecific)
if distro_nonspecific:
for r in results:
- if r.endswith(("_populate_lic.tgz", "_populate_lic.tgz.siginfo", "_fetch.tgz.siginfo", "_unpack.tgz.siginfo", "_patch.tgz.siginfo")):
+ if r.endswith(("_populate_lic.tar.zst", "_populate_lic.tar.zst.siginfo", "_fetch.tar.zst.siginfo", "_unpack.tar.zst.siginfo", "_patch.tar.zst.siginfo")):
continue
file_tracker.append(r)
else:
@@ -95,15 +99,15 @@ class SStateTests(SStateBase):
bitbake(['-ccleansstate'] + targets)
bitbake(targets)
- tgz_created = self.search_sstate('|'.join(map(str, [s + r'.*?\.tgz$' for s in targets])), distro_specific, distro_nonspecific)
- self.assertTrue(tgz_created, msg="Could not find sstate .tgz files for: %s (%s)" % (', '.join(map(str, targets)), str(tgz_created)))
+ archives_created = self.search_sstate('|'.join(map(str, [s + r'.*?\.tar.zst$' for s in targets])), distro_specific, distro_nonspecific)
+ self.assertTrue(archives_created, msg="Could not find sstate .tar.zst files for: %s (%s)" % (', '.join(map(str, targets)), str(archives_created)))
siginfo_created = self.search_sstate('|'.join(map(str, [s + r'.*?\.siginfo$' for s in targets])), distro_specific, distro_nonspecific)
self.assertTrue(siginfo_created, msg="Could not find sstate .siginfo files for: %s (%s)" % (', '.join(map(str, targets)), str(siginfo_created)))
bitbake(['-ccleansstate'] + targets)
- tgz_removed = self.search_sstate('|'.join(map(str, [s + r'.*?\.tgz$' for s in targets])), distro_specific, distro_nonspecific)
- self.assertTrue(not tgz_removed, msg="do_cleansstate didn't remove .tgz sstate files for: %s (%s)" % (', '.join(map(str, targets)), str(tgz_removed)))
+ archives_removed = self.search_sstate('|'.join(map(str, [s + r'.*?\.tar.zst$' for s in targets])), distro_specific, distro_nonspecific)
+ self.assertTrue(not archives_removed, msg="do_cleansstate didn't remove .tar.zst sstate files for: %s (%s)" % (', '.join(map(str, targets)), str(archives_removed)))
def test_cleansstate_task_distro_specific_nonspecific(self):
targets = ['binutils-cross-'+ self.tune_arch, 'binutils-native']
@@ -126,15 +130,15 @@ class SStateTests(SStateBase):
bitbake(['-ccleansstate'] + targets)
bitbake(targets)
- results = self.search_sstate('|'.join(map(str, [s + r'.*?\.tgz$' for s in targets])), distro_specific=False, distro_nonspecific=True)
+ results = self.search_sstate('|'.join(map(str, [s + r'.*?\.tar.zst$' for s in targets])), distro_specific=False, distro_nonspecific=True)
filtered_results = []
for r in results:
- if r.endswith(("_populate_lic.tgz", "_populate_lic.tgz.siginfo")):
+ if r.endswith(("_populate_lic.tar.zst", "_populate_lic.tar.zst.siginfo")):
continue
filtered_results.append(r)
self.assertTrue(filtered_results == [], msg="Found distro non-specific sstate for: %s (%s)" % (', '.join(map(str, targets)), str(filtered_results)))
- file_tracker_1 = self.search_sstate('|'.join(map(str, [s + r'.*?\.tgz$' for s in targets])), distro_specific=True, distro_nonspecific=False)
- self.assertTrue(len(file_tracker_1) >= len(targets), msg = "Not all sstate files ware created for: %s" % ', '.join(map(str, targets)))
+ file_tracker_1 = self.search_sstate('|'.join(map(str, [s + r'.*?\.tar.zst$' for s in targets])), distro_specific=True, distro_nonspecific=False)
+ self.assertTrue(len(file_tracker_1) >= len(targets), msg = "Not all sstate files were created for: %s" % ', '.join(map(str, targets)))
self.track_for_cleanup(self.distro_specific_sstate + "_old")
shutil.copytree(self.distro_specific_sstate, self.distro_specific_sstate + "_old")
@@ -142,14 +146,14 @@ class SStateTests(SStateBase):
bitbake(['-cclean'] + targets)
bitbake(targets)
- file_tracker_2 = self.search_sstate('|'.join(map(str, [s + r'.*?\.tgz$' for s in targets])), distro_specific=True, distro_nonspecific=False)
- self.assertTrue(len(file_tracker_2) >= len(targets), msg = "Not all sstate files ware created for: %s" % ', '.join(map(str, targets)))
+ file_tracker_2 = self.search_sstate('|'.join(map(str, [s + r'.*?\.tar.zst$' for s in targets])), distro_specific=True, distro_nonspecific=False)
+ self.assertTrue(len(file_tracker_2) >= len(targets), msg = "Not all sstate files were created for: %s" % ', '.join(map(str, targets)))
not_recreated = [x for x in file_tracker_1 if x not in file_tracker_2]
- self.assertTrue(not_recreated == [], msg="The following sstate files ware not recreated: %s" % ', '.join(map(str, not_recreated)))
+ self.assertTrue(not_recreated == [], msg="The following sstate files were not recreated: %s" % ', '.join(map(str, not_recreated)))
created_once = [x for x in file_tracker_2 if x not in file_tracker_1]
- self.assertTrue(created_once == [], msg="The following sstate files ware created only in the second run: %s" % ', '.join(map(str, created_once)))
+ self.assertTrue(created_once == [], msg="The following sstate files were created only in the second run: %s" % ', '.join(map(str, created_once)))
def test_rebuild_distro_specific_sstate_cross_native_targets(self):
self.run_test_rebuild_distro_specific_sstate(['binutils-cross-' + self.tune_arch, 'binutils-native'], temp_sstate_location=True)
@@ -171,9 +175,9 @@ class SStateTests(SStateBase):
# If buildhistory is enabled, we need to disable version-going-backwards
# QA checks for this test. It may report errors otherwise.
- self.append_config('ERROR_QA_remove = "version-going-backwards"')
+ self.append_config('ERROR_QA:remove = "version-going-backwards"')
- # For not this only checks if random sstate tasks are handled correctly as a group.
+ # For now this only checks if random sstate tasks are handled correctly as a group.
# In the future we should add control over what tasks we check for.
sstate_archs_list = []
@@ -185,23 +189,23 @@ class SStateTests(SStateBase):
if not sstate_arch in sstate_archs_list:
sstate_archs_list.append(sstate_arch)
if target_config[idx] == target_config[-1]:
- target_sstate_before_build = self.search_sstate(target + r'.*?\.tgz$')
+ target_sstate_before_build = self.search_sstate(target + r'.*?\.tar.zst$')
bitbake("-cclean %s" % target)
result = bitbake(target, ignore_status=True)
if target_config[idx] == target_config[-1]:
- target_sstate_after_build = self.search_sstate(target + r'.*?\.tgz$')
+ target_sstate_after_build = self.search_sstate(target + r'.*?\.tar.zst$')
expected_remaining_sstate += [x for x in target_sstate_after_build if x not in target_sstate_before_build if not any(pattern in x for pattern in ignore_patterns)]
self.remove_config(global_config[idx])
self.remove_recipeinc(target, target_config[idx])
self.assertEqual(result.status, 0, msg = "build of %s failed with %s" % (target, result.output))
runCmd("sstate-cache-management.sh -y --cache-dir=%s --remove-duplicated --extra-archs=%s" % (self.sstate_path, ','.join(map(str, sstate_archs_list))))
- actual_remaining_sstate = [x for x in self.search_sstate(target + r'.*?\.tgz$') if not any(pattern in x for pattern in ignore_patterns)]
+ actual_remaining_sstate = [x for x in self.search_sstate(target + r'.*?\.tar.zst$') if not any(pattern in x for pattern in ignore_patterns)]
actual_not_expected = [x for x in actual_remaining_sstate if x not in expected_remaining_sstate]
- self.assertFalse(actual_not_expected, msg="Files should have been removed but ware not: %s" % ', '.join(map(str, actual_not_expected)))
+ self.assertFalse(actual_not_expected, msg="Files should have been removed but were not: %s" % ', '.join(map(str, actual_not_expected)))
expected_not_actual = [x for x in expected_remaining_sstate if x not in actual_remaining_sstate]
- self.assertFalse(expected_not_actual, msg="Extra files ware removed: %s" ', '.join(map(str, expected_not_actual)))
+ self.assertFalse(expected_not_actual, msg="Extra files were removed: %s" ', '.join(map(str, expected_not_actual)))
def test_sstate_cache_management_script_using_pr_1(self):
global_config = []
@@ -258,7 +262,7 @@ PACKAGE_CLASSES = "package_rpm package_ipk package_deb"
BB_SIGNATURE_HANDLER = "OEBasicHash"
""")
self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash")
- bitbake("core-image-sato -S none")
+ bitbake("core-image-weston -S none")
self.write_config("""
MACHINE = "qemux86"
TMPDIR = "${TOPDIR}/tmp-sstatesamehash2"
@@ -270,12 +274,12 @@ PACKAGE_CLASSES = "package_rpm package_ipk package_deb"
BB_SIGNATURE_HANDLER = "OEBasicHash"
""")
self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash2")
- bitbake("core-image-sato -S none")
+ bitbake("core-image-weston -S none")
def get_files(d):
f = []
for root, dirs, files in os.walk(d):
- if "core-image-sato" in root:
+ if "core-image-weston" in root:
# SDKMACHINE changing will change
# do_rootfs/do_testimage/do_build stamps of images which
# is safe to ignore.
@@ -303,7 +307,7 @@ NATIVELSBSTRING = \"DistroA\"
BB_SIGNATURE_HANDLER = "OEBasicHash"
""")
self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash")
- bitbake("core-image-sato -S none")
+ bitbake("core-image-weston -S none")
self.write_config("""
TMPDIR = \"${TOPDIR}/tmp-sstatesamehash2\"
TCLIBCAPPEND = \"\"
@@ -311,7 +315,7 @@ NATIVELSBSTRING = \"DistroB\"
BB_SIGNATURE_HANDLER = "OEBasicHash"
""")
self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash2")
- bitbake("core-image-sato -S none")
+ bitbake("core-image-weston -S none")
def get_files(d):
f = []
@@ -338,13 +342,15 @@ TCLIBCAPPEND = \"\"
MACHINE = \"qemux86-64\"
BB_SIGNATURE_HANDLER = "OEBasicHash"
"""
+ #OLDEST_KERNEL is arch specific so set to a different value here for testing
configB = """
TMPDIR = \"${TOPDIR}/tmp-sstatesamehash2\"
TCLIBCAPPEND = \"\"
MACHINE = \"qemuarm\"
+OLDEST_KERNEL = \"3.3.0\"
BB_SIGNATURE_HANDLER = "OEBasicHash"
"""
- self.sstate_allarch_samesigs(configA, configB)
+ self.sstate_common_samesigs(configA, configB, allarch=True)
def test_sstate_nativesdk_samesigs_multilib(self):
"""
@@ -357,7 +363,7 @@ TCLIBCAPPEND = \"\"
MACHINE = \"qemux86-64\"
require conf/multilib.conf
MULTILIBS = \"multilib:lib32\"
-DEFAULTTUNE_virtclass-multilib-lib32 = \"x86\"
+DEFAULTTUNE:virtclass-multilib-lib32 = \"x86\"
BB_SIGNATURE_HANDLER = "OEBasicHash"
"""
configB = """
@@ -368,9 +374,9 @@ require conf/multilib.conf
MULTILIBS = \"\"
BB_SIGNATURE_HANDLER = "OEBasicHash"
"""
- self.sstate_allarch_samesigs(configA, configB)
+ self.sstate_common_samesigs(configA, configB)
- def sstate_allarch_samesigs(self, configA, configB):
+ def sstate_common_samesigs(self, configA, configB, allarch=False):
self.write_config(configA)
self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash")
@@ -398,6 +404,13 @@ BB_SIGNATURE_HANDLER = "OEBasicHash"
self.maxDiff = None
self.assertEqual(files1, files2)
+ if allarch:
+ allarchdir = os.path.basename(glob.glob(self.topdir + "/tmp-sstatesamehash/stamps/all-*-linux")[0])
+
+ files1 = get_files(self.topdir + "/tmp-sstatesamehash/stamps/" + allarchdir)
+ files2 = get_files(self.topdir + "/tmp-sstatesamehash2/stamps/" + allarchdir)
+ self.assertEqual(files1, files2)
+
def test_sstate_sametune_samesigs(self):
"""
The sstate checksums of two identical machines (using the same tune) should be the
@@ -411,7 +424,7 @@ TCLIBCAPPEND = \"\"
MACHINE = \"qemux86\"
require conf/multilib.conf
MULTILIBS = "multilib:lib32"
-DEFAULTTUNE_virtclass-multilib-lib32 = "x86"
+DEFAULTTUNE:virtclass-multilib-lib32 = "x86"
BB_SIGNATURE_HANDLER = "OEBasicHash"
""")
self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash")
@@ -422,7 +435,7 @@ TCLIBCAPPEND = \"\"
MACHINE = \"qemux86copy\"
require conf/multilib.conf
MULTILIBS = "multilib:lib32"
-DEFAULTTUNE_virtclass-multilib-lib32 = "x86"
+DEFAULTTUNE:virtclass-multilib-lib32 = "x86"
BB_SIGNATURE_HANDLER = "OEBasicHash"
""")
self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash2")
@@ -459,7 +472,7 @@ TCLIBCAPPEND = \"\"
MACHINE = \"qemux86\"
require conf/multilib.conf
MULTILIBS = "multilib:lib32"
-DEFAULTTUNE_virtclass-multilib-lib32 = "x86"
+DEFAULTTUNE:virtclass-multilib-lib32 = "x86"
BB_SIGNATURE_HANDLER = "OEBasicHash"
""")
self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash")
@@ -500,7 +513,7 @@ PARALLEL_MAKE = "-j 1"
DL_DIR = "${TOPDIR}/download1"
TIME = "111111"
DATE = "20161111"
-INHERIT_remove = "buildstats-summary buildhistory uninative"
+INHERIT:remove = "buildstats-summary buildhistory uninative"
http_proxy = ""
BB_SIGNATURE_HANDLER = "OEBasicHash"
""")
@@ -516,7 +529,7 @@ DL_DIR = "${TOPDIR}/download2"
TIME = "222222"
DATE = "20161212"
# Always remove uninative as we're changing proxies
-INHERIT_remove = "uninative"
+INHERIT:remove = "uninative"
INHERIT += "buildstats-summary buildhistory"
http_proxy = "http://example.com/"
BB_SIGNATURE_HANDLER = "OEBasicHash"
@@ -570,3 +583,44 @@ BB_SIGNATURE_HANDLER = "OEBasicHash"
compare_sigfiles(rest, files1, files2, compare=False)
self.fail("sstate hashes not identical.")
+
+ def test_sstate_movelayer_samesigs(self):
+ """
+ The sstate checksums of two builds with the same oe-core layer in two
+ different locations should be the same.
+ """
+ core_layer = os.path.join(
+ self.tc.td["COREBASE"], 'meta')
+ copy_layer_1 = self.topdir + "/meta-copy1/meta"
+ copy_layer_2 = self.topdir + "/meta-copy2/meta"
+
+ oe.path.copytree(core_layer, copy_layer_1)
+ self.write_config("""
+TMPDIR = "${TOPDIR}/tmp-sstatesamehash"
+""")
+ bblayers_conf = 'BBLAYERS += "%s"\nBBLAYERS:remove = "%s"' % (copy_layer_1, core_layer)
+ self.write_bblayers_config(bblayers_conf)
+ self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash")
+ bitbake("bash -S none")
+
+ oe.path.copytree(core_layer, copy_layer_2)
+ self.write_config("""
+TMPDIR = "${TOPDIR}/tmp-sstatesamehash2"
+""")
+ bblayers_conf = 'BBLAYERS += "%s"\nBBLAYERS:remove = "%s"' % (copy_layer_2, core_layer)
+ self.write_bblayers_config(bblayers_conf)
+ self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash2")
+ bitbake("bash -S none")
+
+ def get_files(d):
+ f = []
+ for root, dirs, files in os.walk(d):
+ for name in files:
+ f.append(os.path.join(root, name))
+ return f
+ files1 = get_files(self.topdir + "/tmp-sstatesamehash/stamps")
+ files2 = get_files(self.topdir + "/tmp-sstatesamehash2/stamps")
+ files2 = [x.replace("tmp-sstatesamehash2", "tmp-sstatesamehash") for x in files2]
+ self.maxDiff = None
+ self.assertCountEqual(files1, files2)
+
diff --git a/meta/lib/oeqa/selftest/cases/sysroot.py b/meta/lib/oeqa/selftest/cases/sysroot.py
index 6e34927c90..79ab45235d 100644
--- a/meta/lib/oeqa/selftest/cases/sysroot.py
+++ b/meta/lib/oeqa/selftest/cases/sysroot.py
@@ -24,14 +24,14 @@ class SysrootTests(OESelftestTestCase):
self.write_config("""
PREFERRED_PROVIDER_virtual/sysroot-test = "sysroot-test-arch1"
MACHINE = "qemux86"
-TESTSTRING_pn-sysroot-test-arch1 = "%s"
-TESTSTRING_pn-sysroot-test-arch2 = "%s"
+TESTSTRING:pn-sysroot-test-arch1 = "%s"
+TESTSTRING:pn-sysroot-test-arch2 = "%s"
""" % (uuid1, uuid2))
bitbake("sysroot-test")
self.write_config("""
PREFERRED_PROVIDER_virtual/sysroot-test = "sysroot-test-arch2"
MACHINE = "qemux86copy"
-TESTSTRING_pn-sysroot-test-arch1 = "%s"
-TESTSTRING_pn-sysroot-test-arch2 = "%s"
+TESTSTRING:pn-sysroot-test-arch1 = "%s"
+TESTSTRING:pn-sysroot-test-arch2 = "%s"
""" % (uuid1, uuid2))
bitbake("sysroot-test")
diff --git a/meta/lib/oeqa/selftest/cases/tinfoil.py b/meta/lib/oeqa/selftest/cases/tinfoil.py
index d1aa7b9afd..8fd48bb054 100644
--- a/meta/lib/oeqa/selftest/cases/tinfoil.py
+++ b/meta/lib/oeqa/selftest/cases/tinfoil.py
@@ -94,21 +94,24 @@ class TinfoilTests(OESelftestTestCase):
pass
pattern = 'conf'
- res = tinfoil.run_command('findFilesMatchingInDir', pattern, 'conf/machine')
+ res = tinfoil.run_command('testCookerCommandEvent', pattern)
self.assertTrue(res)
eventreceived = False
commandcomplete = False
start = time.time()
- # Wait for 5s in total so we'd detect spurious heartbeat events for example
- while time.time() - start < 5:
+ # Wait for maximum 60s in total so we'd detect spurious heartbeat events for example
+ while (not (eventreceived == True and commandcomplete == True)
+ and (time.time() - start < 60)):
+ # if we received both events (on let's say a good day), we are done
event = tinfoil.wait_event(1)
if event:
if isinstance(event, bb.command.CommandCompleted):
commandcomplete = True
elif isinstance(event, bb.event.FilesMatchingFound):
self.assertEqual(pattern, event._pattern)
- self.assertIn('qemuarm.conf', event._matches)
+ self.assertIn('A', event._matches)
+ self.assertIn('B', event._matches)
eventreceived = True
elif isinstance(event, logging.LogRecord):
continue
@@ -170,8 +173,8 @@ class TinfoilTests(OESelftestTestCase):
self.assertEqual(value, 'origvalue', 'Variable renamed using config_data.renameVar() does not appear with new name')
# Test overrides
tinfoil.config_data.setVar('TESTVAR', 'original')
- tinfoil.config_data.setVar('TESTVAR_overrideone', 'one')
- tinfoil.config_data.setVar('TESTVAR_overridetwo', 'two')
+ tinfoil.config_data.setVar('TESTVAR:overrideone', 'one')
+ tinfoil.config_data.setVar('TESTVAR:overridetwo', 'two')
tinfoil.config_data.appendVar('OVERRIDES', ':overrideone')
value = tinfoil.config_data.getVar('TESTVAR')
self.assertEqual(value, 'one', 'Variable overrides not functioning correctly')
diff --git a/meta/lib/oeqa/selftest/cases/wic.py b/meta/lib/oeqa/selftest/cases/wic.py
index 8b58285c32..6f3dc27743 100644
--- a/meta/lib/oeqa/selftest/cases/wic.py
+++ b/meta/lib/oeqa/selftest/cases/wic.py
@@ -11,6 +11,7 @@
import os
import sys
import unittest
+import hashlib
from glob import glob
from shutil import rmtree, copy
@@ -189,8 +190,8 @@ class Wic(WicTestCase):
def test_iso_image(self):
"""Test creation of hybrid iso image with legacy and EFI boot"""
config = 'INITRAMFS_IMAGE = "core-image-minimal-initramfs"\n'\
- 'MACHINE_FEATURES_append = " efi"\n'\
- 'DEPENDS_pn-core-image-minimal += "syslinux"\n'
+ 'MACHINE_FEATURES:append = " efi"\n'\
+ 'DEPENDS:pn-core-image-minimal += "syslinux"\n'
self.append_config(config)
bitbake('core-image-minimal core-image-minimal-initramfs')
self.remove_config(config)
@@ -216,7 +217,7 @@ class Wic(WicTestCase):
@only_for_arch(['i586', 'i686', 'x86_64'])
def test_bootloader_config(self):
"""Test creation of directdisk-bootloader-config image"""
- config = 'DEPENDS_pn-core-image-minimal += "syslinux"\n'
+ config = 'DEPENDS:pn-core-image-minimal += "syslinux"\n'
self.append_config(config)
bitbake('core-image-minimal')
self.remove_config(config)
@@ -227,7 +228,7 @@ class Wic(WicTestCase):
@only_for_arch(['i586', 'i686', 'x86_64'])
def test_systemd_bootdisk(self):
"""Test creation of systemd-bootdisk image"""
- config = 'MACHINE_FEATURES_append = " efi"\n'
+ config = 'MACHINE_FEATURES:append = " efi"\n'
self.append_config(config)
bitbake('core-image-minimal')
self.remove_config(config)
@@ -235,6 +236,17 @@ class Wic(WicTestCase):
runCmd(cmd)
self.assertEqual(1, len(glob(self.resultdir + "systemd-bootdisk-*direct")))
+ def test_efi_bootpart(self):
+ """Test creation of efi-bootpart image"""
+ cmd = "wic create mkefidisk -e core-image-minimal -o %s" % self.resultdir
+ kimgtype = get_bb_var('KERNEL_IMAGETYPE', 'core-image-minimal')
+ self.append_config('IMAGE_EFI_BOOT_FILES = "%s;kernel"\n' % kimgtype)
+ runCmd(cmd)
+ sysroot = get_bb_var('RECIPE_SYSROOT_NATIVE', 'wic-tools')
+ images = glob(self.resultdir + "mkefidisk-*.direct")
+ result = runCmd("wic ls %s:1/ -n %s" % (images[0], sysroot))
+ self.assertIn("kernel",result.output)
+
def test_sdimage_bootpart(self):
"""Test creation of sdimage-bootpart image"""
cmd = "wic create sdimage-bootpart -e core-image-minimal -o %s" % self.resultdir
@@ -248,7 +260,7 @@ class Wic(WicTestCase):
"""Test default output location"""
for fname in glob("directdisk-*.direct"):
os.remove(fname)
- config = 'DEPENDS_pn-core-image-minimal += "syslinux"\n'
+ config = 'DEPENDS:pn-core-image-minimal += "syslinux"\n'
self.append_config(config)
bitbake('core-image-minimal')
self.remove_config(config)
@@ -307,6 +319,7 @@ class Wic(WicTestCase):
"--image-name=core-image-minimal "
"-D -o %s" % self.resultdir)
self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct")))
+ self.assertEqual(1, len(glob(self.resultdir + "tmp.wic*")))
def test_debug_long(self):
"""Test --debug option"""
@@ -314,6 +327,7 @@ class Wic(WicTestCase):
"--image-name=core-image-minimal "
"--debug -o %s" % self.resultdir)
self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct")))
+ self.assertEqual(1, len(glob(self.resultdir + "tmp.wic*")))
def test_skip_build_check_short(self):
"""Test -s option"""
@@ -577,6 +591,9 @@ part / --source rootfs --fstype=ext4 --include-path %s --include-path core-imag
def test_permissions(self):
"""Test permissions are respected"""
+ # prepare wicenv and rootfs
+ bitbake('core-image-minimal core-image-minimal-mtdutils -c do_rootfs_wicenv')
+
oldpath = os.environ['PATH']
os.environ['PATH'] = get_bb_var("PATH", "wic-tools")
@@ -610,6 +627,19 @@ part /etc --source rootfs --fstype=ext4 --change-directory=etc
res = runCmd("debugfs -R 'ls -p' %s 2>/dev/null" % (part))
self.assertEqual(True, files_own_by_root(res.output))
+ config = 'IMAGE_FSTYPES += "wic"\nWKS_FILE = "%s"\n' % wks_file
+ self.append_config(config)
+ bitbake('core-image-minimal')
+ tmpdir = os.path.join(get_bb_var('WORKDIR', 'core-image-minimal'),'build-wic')
+
+ # check each partition for permission
+ for part in glob(os.path.join(tmpdir, 'temp-*.direct.p*')):
+ res = runCmd("debugfs -R 'ls -p' %s 2>/dev/null" % (part))
+ self.assertTrue(files_own_by_root(res.output)
+ ,msg='Files permission incorrect using wks set "%s"' % test)
+
+ # clean config and result directory for next cases
+ self.remove_config(config)
rmtree(self.resultdir, ignore_errors=True)
finally:
@@ -657,6 +687,74 @@ part /etc --source rootfs --fstype=ext4 --change-directory=etc
% (wks_file, self.resultdir), ignore_status=True).status)
os.remove(wks_file)
+ def test_no_fstab_update(self):
+ """Test --no-fstab-update wks option."""
+
+ oldpath = os.environ['PATH']
+ os.environ['PATH'] = get_bb_var("PATH", "wic-tools")
+
+ # Get stock fstab from base-files recipe
+ self.assertEqual(0, bitbake('base-files -c do_install').status)
+ bf_fstab = os.path.join(get_bb_var('D', 'base-files'), 'etc/fstab')
+ self.assertEqual(True, os.path.exists(bf_fstab))
+ bf_fstab_md5sum = runCmd('md5sum %s 2>/dev/null' % bf_fstab).output.split(" ")[0]
+
+ try:
+ no_fstab_update_path = os.path.join(self.resultdir, 'test-no-fstab-update')
+ os.makedirs(no_fstab_update_path)
+ wks_file = os.path.join(no_fstab_update_path, 'temp.wks')
+ with open(wks_file, 'w') as wks:
+ wks.writelines(['part / --source rootfs --fstype=ext4 --label rootfs\n',
+ 'part /mnt/p2 --source rootfs --rootfs-dir=core-image-minimal ',
+ '--fstype=ext4 --label p2 --no-fstab-update\n'])
+ runCmd("wic create %s -e core-image-minimal -o %s" \
+ % (wks_file, self.resultdir))
+
+ part_fstab_md5sum = []
+ for i in range(1, 3):
+ part = glob(os.path.join(self.resultdir, 'temp-*.direct.p') + str(i))[0]
+ part_fstab = runCmd("debugfs -R 'cat etc/fstab' %s 2>/dev/null" % (part))
+ part_fstab_md5sum.append(hashlib.md5((part_fstab.output + "\n\n").encode('utf-8')).hexdigest())
+
+ # '/etc/fstab' in partition 2 should contain the same stock fstab file
+ # as the one installed by the base-file recipe.
+ self.assertEqual(bf_fstab_md5sum, part_fstab_md5sum[1])
+
+ # '/etc/fstab' in partition 1 should contain an updated fstab file.
+ self.assertNotEqual(bf_fstab_md5sum, part_fstab_md5sum[0])
+
+ finally:
+ os.environ['PATH'] = oldpath
+
+ def test_no_fstab_update_errors(self):
+ """Test --no-fstab-update wks option error handling."""
+ wks_file = 'temp.wks'
+
+ # Absolute argument.
+ with open(wks_file, 'w') as wks:
+ wks.write("part / --source rootfs --fstype=ext4 --no-fstab-update /etc")
+ self.assertNotEqual(0, runCmd("wic create %s -e core-image-minimal -o %s" \
+ % (wks_file, self.resultdir), ignore_status=True).status)
+ os.remove(wks_file)
+
+ # Argument pointing to parent directory.
+ with open(wks_file, 'w') as wks:
+ wks.write("part / --source rootfs --fstype=ext4 --no-fstab-update ././..")
+ self.assertNotEqual(0, runCmd("wic create %s -e core-image-minimal -o %s" \
+ % (wks_file, self.resultdir), ignore_status=True).status)
+ os.remove(wks_file)
+
+ def test_extra_space(self):
+ """Test --extra-space wks option."""
+ extraspace = 1024**3
+ runCmd("wic create wictestdisk "
+ "--image-name core-image-minimal "
+ "--extra-space %i -o %s" % (extraspace ,self.resultdir))
+ wicout = glob(self.resultdir + "wictestdisk-*.direct")
+ self.assertEqual(1, len(wicout))
+ size = os.path.getsize(wicout[0])
+ self.assertTrue(size > extraspace)
+
class Wic2(WicTestCase):
def test_bmap_short(self):
@@ -689,7 +787,7 @@ class Wic2(WicTestCase):
wicvars = wicvars.difference(('DEPLOY_DIR_IMAGE', 'IMAGE_BOOT_FILES',
'INITRD', 'INITRD_LIVE', 'ISODIR','INITRAMFS_IMAGE',
'INITRAMFS_IMAGE_BUNDLE', 'INITRAMFS_LINK_NAME',
- 'APPEND'))
+ 'APPEND', 'IMAGE_EFI_BOOT_FILES'))
with open(path) as envfile:
content = dict(line.split("=", 1) for line in envfile)
# test if variables used by wic present in the .env file
@@ -728,7 +826,7 @@ class Wic2(WicTestCase):
def test_wic_image_type(self):
"""Test building wic images by bitbake"""
config = 'IMAGE_FSTYPES += "wic"\nWKS_FILE = "wic-image-minimal"\n'\
- 'MACHINE_FEATURES_append = " efi"\n'
+ 'MACHINE_FEATURES:append = " efi"\n'
self.append_config(config)
self.assertEqual(0, bitbake('wic-image-minimal').status)
self.remove_config(config)
@@ -748,12 +846,12 @@ class Wic2(WicTestCase):
def test_qemu(self):
"""Test wic-image-minimal under qemu"""
config = 'IMAGE_FSTYPES += "wic"\nWKS_FILE = "wic-image-minimal"\n'\
- 'MACHINE_FEATURES_append = " efi"\n'
+ 'MACHINE_FEATURES:append = " efi"\n'
self.append_config(config)
self.assertEqual(0, bitbake('wic-image-minimal').status)
self.remove_config(config)
- with runqemu('wic-image-minimal', ssh=False) as qemu:
+ with runqemu('wic-image-minimal', ssh=False, runqemuparams='nographic') as qemu:
cmd = "mount | grep '^/dev/' | cut -f1,3 -d ' ' | egrep -c -e '/dev/sda1 /boot' " \
"-e '/dev/root /|/dev/sda2 /' -e '/dev/sda3 /media' -e '/dev/sda4 /mnt'"
status, output = qemu.run_serial(cmd)
@@ -773,7 +871,7 @@ class Wic2(WicTestCase):
self.remove_config(config)
with runqemu('core-image-minimal', ssh=False,
- runqemuparams='ovmf', image_fstype='wic') as qemu:
+ runqemuparams='nographic ovmf', image_fstype='wic') as qemu:
cmd = "grep sda. /proc/partitions |wc -l"
status, output = qemu.run_serial(cmd)
self.assertEqual(1, status, 'Failed to run command "%s": %s' % (cmd, output))
@@ -890,6 +988,30 @@ class Wic2(WicTestCase):
])
with NamedTemporaryFile("w", suffix=".wks") as tempf:
+ # Test that partitions can be placed on a 512 byte sector boundary
+ tempf.write("bootloader --ptable gpt\n" \
+ "part / --source rootfs --ondisk hda --offset 65s --fixed-size 99M --fstype=ext4\n" \
+ "part /bar --ondisk hda --offset 102432 --fixed-size 100M --fstype=ext4\n")
+ tempf.flush()
+
+ _, partlns = self._get_wic_partitions(tempf.name, native_sysroot)
+ self.assertEqual(partlns, [
+ "1:32.5kiB:101408kiB:101376kiB:ext4:primary:;",
+ "2:102432kiB:204832kiB:102400kiB:ext4:primary:;",
+ ])
+
+ with NamedTemporaryFile("w", suffix=".wks") as tempf:
+ # Test that a partition can be placed immediately after a MSDOS partition table
+ tempf.write("bootloader --ptable msdos\n" \
+ "part / --source rootfs --ondisk hda --offset 1s --fixed-size 100M --fstype=ext4\n")
+ tempf.flush()
+
+ _, partlns = self._get_wic_partitions(tempf.name, native_sysroot)
+ self.assertEqual(partlns, [
+ "1:0.50kiB:102400kiB:102400kiB:ext4::;",
+ ])
+
+ with NamedTemporaryFile("w", suffix=".wks") as tempf:
# Test that image creation fails if the partitions would overlap
tempf.write("bootloader --ptable gpt\n" \
"part / --source rootfs --ondisk hda --offset 32 --fixed-size 100M --fstype=ext4\n" \
@@ -926,28 +1048,32 @@ class Wic2(WicTestCase):
@only_for_arch(['i586', 'i686', 'x86_64'])
def test_rawcopy_plugin_qemu(self):
"""Test rawcopy plugin in qemu"""
- # build ext4 and wic images
- for fstype in ("ext4", "wic"):
- config = 'IMAGE_FSTYPES = "%s"\nWKS_FILE = "test_rawcopy_plugin.wks.in"\n' % fstype
- self.append_config(config)
- self.assertEqual(0, bitbake('core-image-minimal').status)
- self.remove_config(config)
-
- with runqemu('core-image-minimal', ssh=False, image_fstype='wic') as qemu:
+ # build ext4 and then use it for a wic image
+ config = 'IMAGE_FSTYPES = "ext4"\n'
+ self.append_config(config)
+ self.assertEqual(0, bitbake('core-image-minimal').status)
+ self.remove_config(config)
+
+ config = 'IMAGE_FSTYPES = "wic"\nWKS_FILE = "test_rawcopy_plugin.wks.in"\n'
+ self.append_config(config)
+ self.assertEqual(0, bitbake('core-image-minimal-mtdutils').status)
+ self.remove_config(config)
+
+ with runqemu('core-image-minimal-mtdutils', ssh=False,
+ runqemuparams='nographic', image_fstype='wic') as qemu:
cmd = "grep sda. /proc/partitions |wc -l"
status, output = qemu.run_serial(cmd)
self.assertEqual(1, status, 'Failed to run command "%s": %s' % (cmd, output))
self.assertEqual(output, '2')
- def test_rawcopy_plugin(self):
+ def _rawcopy_plugin(self, fstype):
"""Test rawcopy plugin"""
img = 'core-image-minimal'
machine = get_bb_var('MACHINE', img)
+ params = ',unpack' if fstype.endswith('.gz') else ''
with NamedTemporaryFile("w", suffix=".wks") as wks:
- wks.writelines(['part /boot --active --source bootimg-pcbios\n',
- 'part / --source rawcopy --sourceparams="file=%s-%s.ext4" --use-uuid\n'\
- % (img, machine),
- 'bootloader --timeout=0 --append="console=ttyS0,115200n8"\n'])
+ wks.write('part / --source rawcopy --sourceparams="file=%s-%s.%s%s"\n'\
+ % (img, machine, fstype, params))
wks.flush()
cmd = "wic create %s -e %s -o %s" % (wks.name, img, self.resultdir)
runCmd(cmd)
@@ -955,15 +1081,47 @@ class Wic2(WicTestCase):
out = glob(self.resultdir + "%s-*direct" % wksname)
self.assertEqual(1, len(out))
+ def test_rawcopy_plugin(self):
+ self._rawcopy_plugin('ext4')
+
+ def test_rawcopy_plugin_unpack(self):
+ fstype = 'ext4.gz'
+ config = 'IMAGE_FSTYPES = "%s"\n' % fstype
+ self.append_config(config)
+ self.assertEqual(0, bitbake('core-image-minimal').status)
+ self.remove_config(config)
+ self._rawcopy_plugin(fstype)
+
+ def test_empty_plugin(self):
+ """Test empty plugin"""
+ config = 'IMAGE_FSTYPES = "wic"\nWKS_FILE = "test_empty_plugin.wks"\n'
+ self.append_config(config)
+ self.assertEqual(0, bitbake('core-image-minimal').status)
+ self.remove_config(config)
+
+ bb_vars = get_bb_vars(['DEPLOY_DIR_IMAGE', 'MACHINE'])
+ deploy_dir = bb_vars['DEPLOY_DIR_IMAGE']
+ machine = bb_vars['MACHINE']
+ image_path = os.path.join(deploy_dir, 'core-image-minimal-%s.wic' % machine)
+ self.assertEqual(True, os.path.exists(image_path))
+
+ sysroot = get_bb_var('RECIPE_SYSROOT_NATIVE', 'wic-tools')
+
+ # Fstype column from 'wic ls' should be empty for the second partition
+ # as listed in test_empty_plugin.wks
+ result = runCmd("wic ls %s -n %s | awk -F ' ' '{print $1 \" \" $5}' | grep '^2' | wc -w" % (image_path, sysroot))
+ self.assertEqual('1', result.output)
+
@only_for_arch(['i586', 'i686', 'x86_64'])
def test_biosplusefi_plugin_qemu(self):
"""Test biosplusefi plugin in qemu"""
- config = 'IMAGE_FSTYPES = "wic"\nWKS_FILE = "test_biosplusefi_plugin.wks"\nMACHINE_FEATURES_append = " efi"\n'
+ config = 'IMAGE_FSTYPES = "wic"\nWKS_FILE = "test_biosplusefi_plugin.wks"\nMACHINE_FEATURES:append = " efi"\n'
self.append_config(config)
self.assertEqual(0, bitbake('core-image-minimal').status)
self.remove_config(config)
- with runqemu('core-image-minimal', ssh=False, image_fstype='wic') as qemu:
+ with runqemu('core-image-minimal', ssh=False,
+ runqemuparams='nographic', image_fstype='wic') as qemu:
# Check that we have ONLY two /dev/sda* partitions (/boot and /)
cmd = "grep sda. /proc/partitions | wc -l"
status, output = qemu.run_serial(cmd)
@@ -995,7 +1153,7 @@ class Wic2(WicTestCase):
# If an image hasn't been built yet, directory ${STAGING_DATADIR}/syslinux won't exists and _get_bootimg_dir()
# will raise with "Couldn't find correct bootimg_dir"
# The easiest way to work-around this issue is to make sure we already built an image here, hence the bitbake call
- config = 'IMAGE_FSTYPES = "wic"\nWKS_FILE = "test_biosplusefi_plugin.wks"\nMACHINE_FEATURES_append = " efi"\n'
+ config = 'IMAGE_FSTYPES = "wic"\nWKS_FILE = "test_biosplusefi_plugin.wks"\nMACHINE_FEATURES:append = " efi"\n'
self.append_config(config)
self.assertEqual(0, bitbake('core-image-minimal').status)
self.remove_config(config)
@@ -1012,6 +1170,35 @@ class Wic2(WicTestCase):
out = glob(self.resultdir + "%s-*.direct" % wksname)
self.assertEqual(1, len(out))
+ @only_for_arch(['i586', 'i686', 'x86_64'])
+ def test_efi_plugin_unified_kernel_image_qemu(self):
+ """Test efi plugin's Unified Kernel Image feature in qemu"""
+ config = 'IMAGE_FSTYPES = "wic"\n'\
+ 'INITRAMFS_IMAGE = "core-image-minimal-initramfs"\n'\
+ 'WKS_FILE = "test_efi_plugin.wks"\n'\
+ 'MACHINE_FEATURES:append = " efi"\n'
+ self.append_config(config)
+ self.assertEqual(0, bitbake('core-image-minimal core-image-minimal-initramfs ovmf').status)
+ self.remove_config(config)
+
+ with runqemu('core-image-minimal', ssh=False,
+ runqemuparams='nographic ovmf', image_fstype='wic') as qemu:
+ # Check that /boot has EFI bootx64.efi (required for EFI)
+ cmd = "ls /boot/EFI/BOOT/bootx64.efi | wc -l"
+ status, output = qemu.run_serial(cmd)
+ self.assertEqual(1, status, 'Failed to run command "%s": %s' % (cmd, output))
+ self.assertEqual(output, '1')
+ # Check that /boot has EFI/Linux/linux.efi (required for Unified Kernel Images auto detection)
+ cmd = "ls /boot/EFI/Linux/linux.efi | wc -l"
+ status, output = qemu.run_serial(cmd)
+ self.assertEqual(1, status, 'Failed to run command "%s": %s' % (cmd, output))
+ self.assertEqual(output, '1')
+ # Check that /boot doesn't have loader/entries/boot.conf (Unified Kernel Images are auto detected by the bootloader)
+ cmd = "ls /boot/loader/entries/boot.conf 2&>/dev/null | wc -l"
+ status, output = qemu.run_serial(cmd)
+ self.assertEqual(1, status, 'Failed to run command "%s": %s' % (cmd, output))
+ self.assertEqual(output, '0')
+
def test_fs_types(self):
"""Test filesystem types for empty and not empty partitions"""
img = 'core-image-minimal'
@@ -1229,11 +1416,11 @@ class Wic2(WicTestCase):
result = runCmd("%s/usr/sbin/sfdisk -F %s" % (sysroot, new_image_path))
self.assertTrue("0 B, 0 bytes, 0 sectors" in result.output)
- os.rename(image_path, image_path + '.bak')
- os.rename(new_image_path, image_path)
+ bb.utils.rename(image_path, image_path + '.bak')
+ bb.utils.rename(new_image_path, image_path)
# Check if it boots in qemu
- with runqemu('core-image-minimal', ssh=False) as qemu:
+ with runqemu('core-image-minimal', ssh=False, runqemuparams='nographic') as qemu:
cmd = "ls /etc/"
status, output = qemu.run_serial('true')
self.assertEqual(1, status, 'Failed to run command "%s": %s' % (cmd, output))
@@ -1241,7 +1428,7 @@ class Wic2(WicTestCase):
if os.path.exists(new_image_path):
os.unlink(new_image_path)
if os.path.exists(image_path + '.bak'):
- os.rename(image_path + '.bak', image_path)
+ bb.utils.rename(image_path + '.bak', image_path)
def test_wic_ls_ext(self):
"""Test listing content of the ext partition using 'wic ls'"""
diff --git a/meta/lib/oeqa/selftest/context.py b/meta/lib/oeqa/selftest/context.py
index 23f7d71bdb..78c7a467e2 100644
--- a/meta/lib/oeqa/selftest/context.py
+++ b/meta/lib/oeqa/selftest/context.py
@@ -34,12 +34,12 @@ class NonConcurrentTestSuite(unittest.TestSuite):
(builddir, newbuilddir) = self.setupfunc("-st", None, self.suite)
ret = super().run(result)
os.chdir(builddir)
- if newbuilddir and ret.wasSuccessful():
+ if newbuilddir and ret.wasSuccessful() and self.removefunc:
self.removefunc(newbuilddir)
def removebuilddir(d):
delay = 5
- while delay and os.path.exists(d + "/bitbake.lock"):
+ while delay and (os.path.exists(d + "/bitbake.lock") or os.path.exists(d + "/cache/hashserv.db-wal")):
time.sleep(1)
delay = delay - 1
# Deleting these directories takes a lot of time, use autobuilder
@@ -54,7 +54,7 @@ def removebuilddir(d):
bb.utils.prunedir(d, ionice=True)
class OESelftestTestContext(OETestContext):
- def __init__(self, td=None, logger=None, machines=None, config_paths=None, newbuilddir=None):
+ def __init__(self, td=None, logger=None, machines=None, config_paths=None, newbuilddir=None, keep_builddir=None):
super(OESelftestTestContext, self).__init__(td, logger)
self.machines = machines
@@ -62,6 +62,11 @@ class OESelftestTestContext(OETestContext):
self.config_paths = config_paths
self.newbuilddir = newbuilddir
+ if keep_builddir:
+ self.removebuilddir = None
+ else:
+ self.removebuilddir = removebuilddir
+
def setup_builddir(self, suffix, selftestdir, suite):
builddir = os.environ['BUILDDIR']
if not selftestdir:
@@ -82,7 +87,9 @@ class OESelftestTestContext(OETestContext):
oe.path.copytree(selftestdir, newselftestdir)
for e in os.environ:
- if builddir + "/" in os.environ[e] or os.environ[e].endswith(builddir):
+ if builddir + "/" in os.environ[e]:
+ os.environ[e] = os.environ[e].replace(builddir + "/", newbuilddir + "/")
+ if os.environ[e].endswith(builddir):
os.environ[e] = os.environ[e].replace(builddir, newbuilddir)
subprocess.check_output("git init; git add *; git commit -a -m 'initial'", cwd=newselftestdir, shell=True)
@@ -117,9 +124,9 @@ class OESelftestTestContext(OETestContext):
if processes:
from oeqa.core.utils.concurrencytest import ConcurrentTestSuite
- return ConcurrentTestSuite(suites, processes, self.setup_builddir, removebuilddir)
+ return ConcurrentTestSuite(suites, processes, self.setup_builddir, self.removebuilddir)
else:
- return NonConcurrentTestSuite(suites, processes, self.setup_builddir, removebuilddir)
+ return NonConcurrentTestSuite(suites, processes, self.setup_builddir, self.removebuilddir)
def runTests(self, processes=None, machine=None, skips=[]):
if machine:
@@ -177,6 +184,9 @@ class OESelftestTestContextExecutor(OETestContextExecutor):
action='append', default=None,
help='Exclude all (unhidden) tests that match any of the specified tag(s). (exclude applies before select)')
+ parser.add_argument('-K', '--keep-builddir', action='store_true',
+ help='Keep the test build directory even if all tests pass')
+
parser.add_argument('-B', '--newbuilddir', help='New build directory to use for tests.')
parser.add_argument('-v', '--verbose', action='store_true')
parser.set_defaults(func=self.run)
@@ -234,6 +244,7 @@ class OESelftestTestContextExecutor(OETestContextExecutor):
self.tc_kwargs['init']['config_paths']['localconf'] = os.path.join(builddir, "conf/local.conf")
self.tc_kwargs['init']['config_paths']['bblayers'] = os.path.join(builddir, "conf/bblayers.conf")
self.tc_kwargs['init']['newbuilddir'] = args.newbuilddir
+ self.tc_kwargs['init']['keep_builddir'] = args.keep_builddir
def tag_filter(tags):
if args.exclude_tags:
diff --git a/meta/lib/oeqa/targetcontrol.py b/meta/lib/oeqa/targetcontrol.py
index 19f5a4ea7e..1fdff82889 100644
--- a/meta/lib/oeqa/targetcontrol.py
+++ b/meta/lib/oeqa/targetcontrol.py
@@ -17,6 +17,7 @@ from oeqa.utils.sshcontrol import SSHControl
from oeqa.utils.qemurunner import QemuRunner
from oeqa.utils.qemutinyrunner import QemuTinyRunner
from oeqa.utils.dump import TargetDumper
+from oeqa.utils.dump import MonitorDumper
from oeqa.controllers.testtargetloader import TestTargetLoader
from abc import ABCMeta, abstractmethod
@@ -108,6 +109,7 @@ class QemuTarget(BaseTarget):
self.qemulog = os.path.join(self.testdir, "qemu_boot_log.%s" % self.datetime)
dump_target_cmds = d.getVar("testimage_dump_target")
dump_host_cmds = d.getVar("testimage_dump_host")
+ dump_monitor_cmds = d.getVar("testimage_dump_monitor")
dump_dir = d.getVar("TESTIMAGE_DUMP_DIR")
if not dump_dir:
dump_dir = os.path.join(d.getVar('LOG_DIR'), 'runtime-hostdump')
@@ -131,6 +133,7 @@ class QemuTarget(BaseTarget):
logfile = self.qemulog,
kernel = self.kernel,
boottime = int(d.getVar("TEST_QEMUBOOT_TIMEOUT")),
+ tmpfsdir = d.getVar("RUNQEMU_TMPFS_DIR"),
logger = logger)
else:
self.runner = QemuRunner(machine=d.getVar("MACHINE"),
@@ -144,9 +147,13 @@ class QemuTarget(BaseTarget):
dump_dir = dump_dir,
dump_host_cmds = d.getVar("testimage_dump_host"),
logger = logger,
+ tmpfsdir = d.getVar("RUNQEMU_TMPFS_DIR"),
serial_ports = len(d.getVar("SERIAL_CONSOLES").split()))
self.target_dumper = TargetDumper(dump_target_cmds, dump_dir, self.runner)
+ self.monitor_dumper = MonitorDumper(dump_monitor_cmds, dump_dir, self.runner)
+ if (self.monitor_dumper):
+ self.monitor_dumper.create_dir("qmp")
def deploy(self):
bb.utils.mkdirhier(self.testdir)
diff --git a/meta/lib/oeqa/utils/__init__.py b/meta/lib/oeqa/utils/__init__.py
index 70fbe7b552..6d1ec4cb99 100644
--- a/meta/lib/oeqa/utils/__init__.py
+++ b/meta/lib/oeqa/utils/__init__.py
@@ -43,28 +43,12 @@ def make_logger_bitbake_compatible(logger):
import logging
"""
- Bitbake logger redifines debug() in order to
- set a level within debug, this breaks compatibility
- with vainilla logging, so we neeed to redifine debug()
- method again also add info() method with INFO + 1 level.
+ We need to raise the log level of the info output so unittest
+ messages are visible on the console.
"""
- def _bitbake_log_debug(*args, **kwargs):
- lvl = logging.DEBUG
-
- if isinstance(args[0], int):
- lvl = args[0]
- msg = args[1]
- args = args[2:]
- else:
- msg = args[0]
- args = args[1:]
-
- logger.log(lvl, msg, *args, **kwargs)
-
def _bitbake_log_info(msg, *args, **kwargs):
logger.log(logging.INFO + 1, msg, *args, **kwargs)
- logger.debug = _bitbake_log_debug
logger.info = _bitbake_log_info
return logger
diff --git a/meta/lib/oeqa/utils/buildproject.py b/meta/lib/oeqa/utils/buildproject.py
index e6d80cc8dc..dfb9661868 100644
--- a/meta/lib/oeqa/utils/buildproject.py
+++ b/meta/lib/oeqa/utils/buildproject.py
@@ -18,6 +18,7 @@ class BuildProject(metaclass=ABCMeta):
def __init__(self, uri, foldername=None, tmpdir=None, dl_dir=None):
self.uri = uri
self.archive = os.path.basename(uri)
+ self.tempdirobj = None
if not tmpdir:
self.tempdirobj = tempfile.TemporaryDirectory(prefix='buildproject-')
tmpdir = self.tempdirobj.name
@@ -57,6 +58,8 @@ class BuildProject(metaclass=ABCMeta):
return self._run('cd %s; make install %s' % (self.targetdir, install_args))
def clean(self):
+ if self.tempdirobj:
+ self.tempdirobj.cleanup()
if not self.needclean:
return
self._run('rm -rf %s' % self.targetdir)
diff --git a/meta/lib/oeqa/utils/commands.py b/meta/lib/oeqa/utils/commands.py
index f7f8c16bf0..024261410e 100644
--- a/meta/lib/oeqa/utils/commands.py
+++ b/meta/lib/oeqa/utils/commands.py
@@ -125,11 +125,11 @@ class Command(object):
def stop(self):
for thread in self.threads:
- if thread.isAlive():
+ if thread.is_alive():
self.process.terminate()
# let's give it more time to terminate gracefully before killing it
thread.join(5)
- if thread.isAlive():
+ if thread.is_alive():
self.process.kill()
thread.join()
@@ -167,23 +167,29 @@ class Result(object):
pass
-def runCmd(command, ignore_status=False, timeout=None, assert_error=True,
+def runCmd(command, ignore_status=False, timeout=None, assert_error=True, sync=True,
native_sysroot=None, limit_exc_output=0, output_log=None, **options):
result = Result()
if native_sysroot:
extra_paths = "%s/sbin:%s/usr/sbin:%s/usr/bin" % \
(native_sysroot, native_sysroot, native_sysroot)
- extra_libpaths = "%s/lib:%s/usr/lib" % \
- (native_sysroot, native_sysroot)
nenv = dict(options.get('env', os.environ))
nenv['PATH'] = extra_paths + ':' + nenv.get('PATH', '')
- nenv['LD_LIBRARY_PATH'] = extra_libpaths + ':' + nenv.get('LD_LIBRARY_PATH', '')
options['env'] = nenv
cmd = Command(command, timeout=timeout, output_log=output_log, **options)
cmd.run()
+ # tests can be heavy on IO and if bitbake can't write out its caches, we see timeouts.
+ # call sync around the tests to ensure the IO queue doesn't get too large, taking any IO
+ # hit here rather than in bitbake shutdown.
+ if sync:
+ p = os.environ['PATH']
+ os.environ['PATH'] = "/usr/bin:/bin:/usr/sbin:/sbin:" + p
+ os.system("sync")
+ os.environ['PATH'] = p
+
result.command = command
result.status = cmd.status
result.output = cmd.output
diff --git a/meta/lib/oeqa/utils/dump.py b/meta/lib/oeqa/utils/dump.py
index 09a44329e0..95a79a571c 100644
--- a/meta/lib/oeqa/utils/dump.py
+++ b/meta/lib/oeqa/utils/dump.py
@@ -4,6 +4,7 @@
import os
import sys
+import json
import errno
import datetime
import itertools
@@ -17,6 +18,7 @@ class BaseDumper(object):
# Some testing doesn't inherit testimage, so it is needed
# to set some defaults.
self.parent_dir = parent_dir
+ self.dump_dir = parent_dir
dft_cmds = """ top -bn1
iostat -x -z -N -d -p ALL 20 2
ps -ef
@@ -46,11 +48,13 @@ class BaseDumper(object):
raise err
self.dump_dir = dump_dir
- def _write_dump(self, command, output):
+ def _construct_filename(self, command):
if isinstance(self, HostDumper):
prefix = "host"
elif isinstance(self, TargetDumper):
prefix = "target"
+ elif isinstance(self, MonitorDumper):
+ prefix = "qmp"
else:
prefix = "unknown"
for i in itertools.count():
@@ -58,9 +62,17 @@ class BaseDumper(object):
fullname = os.path.join(self.dump_dir, filename)
if not os.path.exists(fullname):
break
- with open(fullname, 'w') as dump_file:
- dump_file.write(output)
+ return fullname
+ def _write_dump(self, command, output):
+ fullname = self._construct_filename(command)
+ os.makedirs(os.path.dirname(fullname), exist_ok=True)
+ if isinstance(self, MonitorDumper):
+ with open(fullname, 'w') as json_file:
+ json.dump(output, json_file, indent=4)
+ else:
+ with open(fullname, 'w') as dump_file:
+ dump_file.write(output)
class HostDumper(BaseDumper):
""" Class to get dumps from the host running the tests """
@@ -96,3 +108,31 @@ class TargetDumper(BaseDumper):
except:
print("Tried to dump info from target but "
"serial console failed")
+ print("Failed CMD: %s" % (cmd))
+
+class MonitorDumper(BaseDumper):
+ """ Class to get dumps via the Qemu Monitor, it only works with QemuRunner """
+
+ def __init__(self, cmds, parent_dir, runner):
+ super(MonitorDumper, self).__init__(cmds, parent_dir)
+ self.runner = runner
+
+ def dump_monitor(self, dump_dir=""):
+ if self.runner is None:
+ return
+ if dump_dir:
+ self.dump_dir = dump_dir
+ for cmd in self.cmds:
+ cmd_name = cmd.split()[0]
+ try:
+ if len(cmd.split()) > 1:
+ cmd_args = cmd.split()[1]
+ if "%s" in cmd_args:
+ filename = self._construct_filename(cmd_name)
+ cmd_data = json.loads(cmd_args % (filename))
+ output = self.runner.run_monitor(cmd_name, cmd_data)
+ else:
+ output = self.runner.run_monitor(cmd_name)
+ self._write_dump(cmd_name, output)
+ except Exception as e:
+ print("Failed to dump QMP CMD: %s with\nException: %s" % (cmd_name, e))
diff --git a/meta/lib/oeqa/utils/logparser.py b/meta/lib/oeqa/utils/logparser.py
index 60e16d500e..879aefca33 100644
--- a/meta/lib/oeqa/utils/logparser.py
+++ b/meta/lib/oeqa/utils/logparser.py
@@ -135,30 +135,27 @@ class LtpComplianceParser(object):
def parse(self, logfile):
test_regex = {}
- test_regex['PASSED'] = re.compile(r"^PASS")
- test_regex['FAILED'] = re.compile(r"^FAIL")
- test_regex['SKIPPED'] = re.compile(r"(?:UNTESTED)|(?:UNSUPPORTED)")
+ test_regex['FAILED'] = re.compile(r"FAIL")
section_regex = {}
- section_regex['test'] = re.compile(r"^Testing")
+ section_regex['test'] = re.compile(r"^Executing")
with open(logfile, errors='replace') as f:
+ name = logfile
+ result = "PASSED"
for line in f:
- result = section_regex['test'].search(line)
- if result:
- self.name = ""
- self.name = line.split()[1].strip()
- self.results[self.name] = "PASSED"
- failed = 0
+ regex_result = section_regex['test'].search(line)
+ if regex_result:
+ name = line.split()[1].strip()
- failed_result = test_regex['FAILED'].search(line)
- if failed_result:
- failed = line.split()[1].strip()
- if int(failed) > 0:
- self.results[self.name] = "FAILED"
+ regex_result = test_regex['FAILED'].search(line)
+ if regex_result:
+ result = "FAILED"
+ self.results[name] = result
for test in self.results:
result = self.results[test]
+ print (self.results)
self.section['log'] = self.section['log'] + ("%s: %s\n" % (result.strip()[:-2], test.strip()))
return self.results, self.section
diff --git a/meta/lib/oeqa/utils/package_manager.py b/meta/lib/oeqa/utils/package_manager.py
index 2d358f7172..6b67f22fdd 100644
--- a/meta/lib/oeqa/utils/package_manager.py
+++ b/meta/lib/oeqa/utils/package_manager.py
@@ -12,7 +12,9 @@ def get_package_manager(d, root_path):
"""
Returns an OE package manager that can install packages in root_path.
"""
- from oe.package_manager import RpmPM, OpkgPM, DpkgPM
+ from oe.package_manager.rpm import RpmPM
+ from oe.package_manager.ipk import OpkgPM
+ from oe.package_manager.deb import DpkgPM
pkg_class = d.getVar("IMAGE_PKGTYPE")
if pkg_class == "rpm":
@@ -115,7 +117,7 @@ def extract_packages(d, needed_packages):
extract = package.get('extract', True)
if extract:
- #logger.debug(1, 'Extracting %s' % pkg)
+ #logger.debug('Extracting %s' % pkg)
dst_dir = os.path.join(extracted_path, pkg)
# Same package used for more than one test,
# don't need to extract again.
@@ -128,7 +130,7 @@ def extract_packages(d, needed_packages):
shutil.rmtree(pkg_dir)
else:
- #logger.debug(1, 'Copying %s' % pkg)
+ #logger.debug('Copying %s' % pkg)
_copy_package(d, pkg)
def _extract_in_tmpdir(d, pkg):
diff --git a/meta/lib/oeqa/utils/qemurunner.py b/meta/lib/oeqa/utils/qemurunner.py
index 992fff9370..76296d50cd 100644
--- a/meta/lib/oeqa/utils/qemurunner.py
+++ b/meta/lib/oeqa/utils/qemurunner.py
@@ -20,8 +20,10 @@ import string
import threading
import codecs
import logging
+import tempfile
from oeqa.utils.dump import HostDumper
from collections import defaultdict
+import importlib
# Get Unicode non printable control chars
control_range = list(range(0,32))+list(range(127,160))
@@ -32,7 +34,7 @@ re_control_char = re.compile('[%s]' % re.escape("".join(control_chars)))
class QemuRunner:
def __init__(self, machine, rootfs, display, tmpdir, deploy_dir_image, logfile, boottime, dump_dir, dump_host_cmds,
- use_kvm, logger, use_slirp=False, serial_ports=2, boot_patterns = defaultdict(str), use_ovmf=False):
+ use_kvm, logger, use_slirp=False, serial_ports=2, boot_patterns = defaultdict(str), use_ovmf=False, workdir=None, tmpfsdir=None):
# Popen object for runqemu
self.runqemu = None
@@ -61,13 +63,18 @@ class QemuRunner:
self.serial_ports = serial_ports
self.msg = ''
self.boot_patterns = boot_patterns
+ self.tmpfsdir = tmpfsdir
- self.runqemutime = 120
- self.qemu_pidfile = 'pidfile_'+str(os.getpid())
+ self.runqemutime = 300
+ if not workdir:
+ workdir = os.getcwd()
+ self.qemu_pidfile = workdir + '/pidfile_' + str(os.getpid())
self.host_dumper = HostDumper(dump_host_cmds, dump_dir)
self.monitorpipe = None
self.logger = logger
+ # Whether we're expecting an exit and should show related errors
+ self.canexit = False
# Enable testing other OS's
# Set commands for target communication, and default to Linux ALWAYS
@@ -116,7 +123,10 @@ class QemuRunner:
import fcntl
fl = fcntl.fcntl(o, fcntl.F_GETFL)
fcntl.fcntl(o, fcntl.F_SETFL, fl | os.O_NONBLOCK)
- return os.read(o.fileno(), 1000000).decode("utf-8")
+ try:
+ return os.read(o.fileno(), 1000000).decode("utf-8")
+ except BlockingIOError:
+ return ""
def handleSIGCHLD(self, signum, frame):
@@ -148,6 +158,9 @@ class QemuRunner:
else:
env["DEPLOY_DIR_IMAGE"] = self.deploy_dir_image
+ if self.tmpfsdir:
+ env["RUNQEMU_TMPFS_DIR"] = self.tmpfsdir
+
if not launch_cmd:
launch_cmd = 'runqemu %s' % ('snapshot' if discard_writes else '')
if self.use_kvm:
@@ -166,6 +179,29 @@ class QemuRunner:
return self.launch(launch_cmd, qemuparams=qemuparams, get_ip=get_ip, extra_bootparams=extra_bootparams, env=env)
def launch(self, launch_cmd, get_ip = True, qemuparams = None, extra_bootparams = None, env = None):
+ # use logfile to determine the recipe-sysroot-native path and
+ # then add in the site-packages path components and add that
+ # to the python sys.path so qmp.py can be found.
+ python_path = os.path.dirname(os.path.dirname(self.logfile))
+ python_path += "/recipe-sysroot-native/usr/lib/qemu-python"
+ sys.path.append(python_path)
+ importlib.invalidate_caches()
+ try:
+ qmp = importlib.import_module("qmp")
+ except:
+ self.logger.error("qemurunner: qmp.py missing, please ensure it's installed")
+ return False
+ # Path relative to tmpdir used as cwd for qemu below to avoid unix socket path length issues
+ qmp_file = "." + next(tempfile._get_candidate_names())
+ qmp_param = ' -S -qmp unix:./%s,server,wait' % (qmp_file)
+ qmp_port = self.tmpdir + "/" + qmp_file
+ # Create a second socket connection for debugging use,
+ # note this will NOT cause qemu to block waiting for the connection
+ qmp_file2 = "." + next(tempfile._get_candidate_names())
+ qmp_param += ' -qmp unix:./%s,server,nowait' % (qmp_file2)
+ qmp_port2 = self.tmpdir + "/" + qmp_file2
+ self.logger.info("QMP Available for connection at %s" % (qmp_port2))
+
try:
if self.serial_ports >= 2:
self.threadsock, threadport = self.create_socket()
@@ -174,7 +210,7 @@ class QemuRunner:
self.logger.error("Failed to create listening socket: %s" % msg[1])
return False
- bootparams = 'console=tty1 console=ttyS0,115200n8 printk.time=1'
+ bootparams = ' printk.time=1'
if extra_bootparams:
bootparams = bootparams + ' ' + extra_bootparams
@@ -182,7 +218,8 @@ class QemuRunner:
# and analyze descendents in order to determine it.
if os.path.exists(self.qemu_pidfile):
os.remove(self.qemu_pidfile)
- self.qemuparams = 'bootparams="{0}" qemuparams="-pidfile {1}"'.format(bootparams, self.qemu_pidfile)
+ self.qemuparams = 'bootparams="{0}" qemuparams="-pidfile {1} {2}"'.format(bootparams, self.qemu_pidfile, qmp_param)
+
if qemuparams:
self.qemuparams = self.qemuparams[:-1] + " " + qemuparams + " " + '\"'
@@ -200,8 +237,9 @@ class QemuRunner:
# blocking at the end of the runqemu script when using this within
# oe-selftest (this makes stty error out immediately). There ought
# to be a proper fix but this will suffice for now.
- self.runqemu = subprocess.Popen(launch_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE, preexec_fn=os.setpgrp, env=env)
+ self.runqemu = subprocess.Popen(launch_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE, preexec_fn=os.setpgrp, env=env, cwd=self.tmpdir)
output = self.runqemu.stdout
+ launch_time = time.time()
#
# We need the preexec_fn above so that all runqemu processes can easily be killed
@@ -227,7 +265,7 @@ class QemuRunner:
r = os.fdopen(r)
x = r.read()
os.killpg(os.getpgid(self.runqemu.pid), signal.SIGTERM)
- sys.exit(0)
+ os._exit(0)
self.logger.debug("runqemu started, pid is %s" % self.runqemu.pid)
self.logger.debug("waiting at most %s seconds for qemu pid (%s)" %
@@ -236,6 +274,7 @@ class QemuRunner:
while not self.is_alive() and time.time() < endtime:
if self.runqemu.poll():
if self.runqemu_exited:
+ self.logger.warning("runqemu during is_alive() test")
return False
if self.runqemu.returncode:
# No point waiting any longer
@@ -247,13 +286,25 @@ class QemuRunner:
time.sleep(0.5)
if self.runqemu_exited:
- return False
+ self.logger.warning("runqemu after timeout")
+
+ if self.runqemu.returncode:
+ self.logger.warning('runqemu exited with code %d' % self.runqemu.returncode)
if not self.is_alive():
self.logger.error("Qemu pid didn't appear in %s seconds (%s)" %
(self.runqemutime, time.strftime("%D %H:%M:%S")))
+
+ qemu_pid = None
+ if os.path.isfile(self.qemu_pidfile):
+ with open(self.qemu_pidfile, 'r') as f:
+ qemu_pid = f.read().strip()
+
+ self.logger.error("Status information, poll status: %s, pidfile exists: %s, pidfile contents %s, proc pid exists %s"
+ % (self.runqemu.poll(), os.path.isfile(self.qemu_pidfile), str(qemu_pid), os.path.exists("/proc/" + str(qemu_pid))))
+
# Dump all processes to help us to figure out what is going on...
- ps = subprocess.Popen(['ps', 'axww', '-o', 'pid,ppid,command '], stdout=subprocess.PIPE).communicate()[0]
+ ps = subprocess.Popen(['ps', 'axww', '-o', 'pid,ppid,pri,ni,command '], stdout=subprocess.PIPE).communicate()[0]
processes = ps.decode("utf-8")
self.logger.debug("Running processes:\n%s" % processes)
self._dump_host()
@@ -265,6 +316,72 @@ class QemuRunner:
self.logger.error("No output from runqemu.\n")
return False
+ # Create the client socket for the QEMU Monitor Control Socket
+ # This will allow us to read status from Qemu if the the process
+ # is still alive
+ self.logger.debug("QMP Initializing to %s" % (qmp_port))
+ # chdir dance for path length issues with unix sockets
+ origpath = os.getcwd()
+ try:
+ os.chdir(os.path.dirname(qmp_port))
+ try:
+ self.qmp = qmp.QEMUMonitorProtocol(os.path.basename(qmp_port))
+ except OSError as msg:
+ self.logger.warning("Failed to initialize qemu monitor socket: %s File: %s" % (msg, msg.filename))
+ return False
+
+ self.logger.debug("QMP Connecting to %s" % (qmp_port))
+ if not os.path.exists(qmp_port) and self.is_alive():
+ self.logger.debug("QMP Port does not exist waiting for it to be created")
+ endtime = time.time() + self.runqemutime
+ while not os.path.exists(qmp_port) and self.is_alive() and time.time() < endtime:
+ self.logger.info("QMP port does not exist yet!")
+ time.sleep(0.5)
+ if not os.path.exists(qmp_port) and self.is_alive():
+ self.logger.warning("QMP Port still does not exist but QEMU is alive")
+ return False
+
+ try:
+ self.qmp.connect()
+ connect_time = time.time()
+ self.logger.info("QMP connected to QEMU at %s and took %s seconds" %
+ (time.strftime("%D %H:%M:%S"),
+ time.time() - launch_time))
+ except OSError as msg:
+ self.logger.warning("Failed to connect qemu monitor socket: %s File: %s" % (msg, msg.filename))
+ return False
+ except qmp.QMPConnectError as msg:
+ self.logger.warning("Failed to communicate with qemu monitor: %s" % (msg))
+ return False
+ finally:
+ os.chdir(origpath)
+
+ # We worry that mmap'd libraries may cause page faults which hang the qemu VM for periods
+ # causing failures. Before we "start" qemu, read through it's mapped files to try and
+ # ensure we don't hit page faults later
+ mapdir = "/proc/" + str(self.qemupid) + "/map_files/"
+ try:
+ for f in os.listdir(mapdir):
+ try:
+ linktarget = os.readlink(os.path.join(mapdir, f))
+ if not linktarget.startswith("/") or linktarget.startswith("/dev") or "deleted" in linktarget:
+ continue
+ with open(linktarget, "rb") as readf:
+ data = True
+ while data:
+ data = readf.read(4096)
+ except FileNotFoundError:
+ continue
+ # Centos7 doesn't allow us to read /map_files/
+ except PermissionError:
+ pass
+
+ # Release the qemu process to continue running
+ self.run_monitor('cont')
+ self.logger.info("QMP released QEMU at %s and took %s seconds from connect" %
+ (time.strftime("%D %H:%M:%S"),
+ time.time() - connect_time))
+
# We are alive: qemu is running
out = self.getOutput(output)
netconf = False # network configuration is not required by default
@@ -290,7 +407,7 @@ class QemuRunner:
self.logger.debug("qemu cmdline used:\n{}".format(cmdline))
except (IndexError, ValueError):
# Try to get network configuration from runqemu output
- match = re.match(r'.*Network configuration: (?:ip=)*([0-9.]+)::([0-9.]+):([0-9.]+)$.*',
+ match = re.match(r'.*Network configuration: (?:ip=)*([0-9.]+)::([0-9.]+):([0-9.]+).*',
out, re.MULTILINE|re.DOTALL)
if match:
self.ip, self.server_ip, self.netmask = match.groups()
@@ -365,7 +482,6 @@ class QemuRunner:
sock.close()
stopread = True
-
if not reachedlogin:
if time.time() >= endtime:
self.logger.warning("Target didn't reach login banner in %d seconds (%s)" %
@@ -382,7 +498,7 @@ class QemuRunner:
# If we are not able to login the tests can continue
try:
- (status, output) = self.run_serial(self.boot_patterns['send_login_user'], raw=True)
+ (status, output) = self.run_serial(self.boot_patterns['send_login_user'], raw=True, timeout=120)
if re.search(self.boot_patterns['search_login_succeeded'], output):
self.logged = True
self.logger.debug("Logged as root in serial console")
@@ -422,10 +538,15 @@ class QemuRunner:
if self.runqemu.poll() is None:
self.logger.debug("Sending SIGKILL to runqemu")
os.killpg(os.getpgid(self.runqemu.pid), signal.SIGKILL)
+ if not self.runqemu.stdout.closed:
+ self.logger.info("Output from runqemu:\n%s" % self.getOutput(self.runqemu.stdout))
self.runqemu.stdin.close()
self.runqemu.stdout.close()
self.runqemu_exited = True
+ if hasattr(self, 'qmp') and self.qmp:
+ self.qmp.close()
+ self.qmp = None
if hasattr(self, 'server_socket') and self.server_socket:
self.server_socket.close()
self.server_socket = None
@@ -456,6 +577,11 @@ class QemuRunner:
self.thread.stop()
self.thread.join()
+ def allowexit(self):
+ self.canexit = True
+ if self.thread:
+ self.thread.allowexit()
+
def restart(self, qemuparams = None):
self.logger.warning("Restarting qemu process")
if self.runqemu.poll() is None:
@@ -484,6 +610,13 @@ class QemuRunner:
return True
return False
+ def run_monitor(self, command, args=None, timeout=60):
+ if hasattr(self, 'qmp') and self.qmp:
+ if args is not None:
+ return self.qmp.cmd(command, args)
+ else:
+ return self.qmp.cmd(command)
+
def run_serial(self, command, raw=False, timeout=60):
# We assume target system have echo to get command status
if not raw:
@@ -511,7 +644,9 @@ class QemuRunner:
if re.search(self.boot_patterns['search_cmd_finished'], data):
break
else:
- raise Exception("No data on serial console socket")
+ if self.canexit:
+ return (1, "")
+ raise Exception("No data on serial console socket, connection closed?")
if data:
if raw:
@@ -549,6 +684,7 @@ class LoggingThread(threading.Thread):
self.logger = logger
self.readsock = None
self.running = False
+ self.canexit = False
self.errorevents = select.POLLERR | select.POLLHUP | select.POLLNVAL
self.readevents = select.POLLIN | select.POLLPRI
@@ -582,6 +718,9 @@ class LoggingThread(threading.Thread):
self.close_ignore_error(self.writepipe)
self.running = False
+ def allowexit(self):
+ self.canexit = True
+
def eventloop(self):
poll = select.poll()
event_read_mask = self.errorevents | self.readevents
@@ -627,7 +766,7 @@ class LoggingThread(threading.Thread):
data = self.readsock.recv(count)
except socket.error as e:
if e.errno == errno.EAGAIN or e.errno == errno.EWOULDBLOCK:
- return ''
+ return b''
else:
raise
@@ -638,7 +777,9 @@ class LoggingThread(threading.Thread):
# happened. But for this code it counts as an
# error since the connection shouldn't go away
# until qemu exits.
- raise Exception("Console connection closed unexpectedly")
+ if not self.canexit:
+ raise Exception("Console connection closed unexpectedly")
+ return b''
return data
diff --git a/meta/lib/oeqa/utils/qemutinyrunner.py b/meta/lib/oeqa/utils/qemutinyrunner.py
index 364005bd2d..20009401ca 100644
--- a/meta/lib/oeqa/utils/qemutinyrunner.py
+++ b/meta/lib/oeqa/utils/qemutinyrunner.py
@@ -19,7 +19,7 @@ from .qemurunner import QemuRunner
class QemuTinyRunner(QemuRunner):
- def __init__(self, machine, rootfs, display, tmpdir, deploy_dir_image, logfile, kernel, boottime, logger):
+ def __init__(self, machine, rootfs, display, tmpdir, deploy_dir_image, logfile, kernel, boottime, logger, tmpfsdir=None):
# Popen object for runqemu
self.runqemu = None
@@ -37,6 +37,7 @@ class QemuTinyRunner(QemuRunner):
self.deploy_dir_image = deploy_dir_image
self.logfile = logfile
self.boottime = boottime
+ self.tmpfsdir = tmpfsdir
self.runqemutime = 60
self.socketfile = "console.sock"
@@ -83,6 +84,9 @@ class QemuTinyRunner(QemuRunner):
return False
else:
os.environ["DEPLOY_DIR_IMAGE"] = self.deploy_dir_image
+ if self.tmpfsdir:
+ env["RUNQEMU_TMPFS_DIR"] = self.tmpfsdir
+
# Set this flag so that Qemu doesn't do any grabs as SDL grabs interact
# badly with screensavers.
@@ -138,7 +142,7 @@ class QemuTinyRunner(QemuRunner):
#
# Walk the process tree from the process specified looking for a qemu-system. Return its [pid'cmd]
#
- ps = subprocess.Popen(['ps', 'axww', '-o', 'pid,ppid,command'], stdout=subprocess.PIPE).communicate()[0]
+ ps = subprocess.Popen(['ps', 'axww', '-o', 'pid,ppid,pri,ni,command'], stdout=subprocess.PIPE).communicate()[0]
processes = ps.decode("utf-8").split('\n')
nfields = len(processes[0].split()) - 1
pids = {}
diff --git a/meta/lib/oeqa/utils/targetbuild.py b/meta/lib/oeqa/utils/targetbuild.py
index 1055810ca3..09738add1d 100644
--- a/meta/lib/oeqa/utils/targetbuild.py
+++ b/meta/lib/oeqa/utils/targetbuild.py
@@ -19,6 +19,7 @@ class BuildProject(metaclass=ABCMeta):
self.d = d
self.uri = uri
self.archive = os.path.basename(uri)
+ self.tempdirobj = None
if not tmpdir:
tmpdir = self.d.getVar('WORKDIR')
if not tmpdir:
@@ -71,9 +72,10 @@ class BuildProject(metaclass=ABCMeta):
return self._run('cd %s; make install %s' % (self.targetdir, install_args))
def clean(self):
+ if self.tempdirobj:
+ self.tempdirobj.cleanup()
self._run('rm -rf %s' % self.targetdir)
subprocess.check_call('rm -f %s' % self.localarchive, shell=True)
- pass
class TargetBuildProject(BuildProject):