summaryrefslogtreecommitdiffstats
path: root/meta/classes/sstate.bbclass
diff options
context:
space:
mode:
Diffstat (limited to 'meta/classes/sstate.bbclass')
-rw-r--r--meta/classes/sstate.bbclass439
1 files changed, 308 insertions, 131 deletions
diff --git a/meta/classes/sstate.bbclass b/meta/classes/sstate.bbclass
index 64808f8e10..1c0cae4893 100644
--- a/meta/classes/sstate.bbclass
+++ b/meta/classes/sstate.bbclass
@@ -1,21 +1,45 @@
-SSTATE_VERSION = "3"
+SSTATE_VERSION = "8"
+
+SSTATE_ZSTD_CLEVEL ??= "8"
SSTATE_MANIFESTS ?= "${TMPDIR}/sstate-control"
SSTATE_MANFILEPREFIX = "${SSTATE_MANIFESTS}/manifest-${SSTATE_MANMACH}-${PN}"
-def generate_sstatefn(spec, hash, d):
+def generate_sstatefn(spec, hash, taskname, siginfo, d):
+ if taskname is None:
+ return ""
+ extension = ".tar.zst"
+ # 8 chars reserved for siginfo
+ limit = 254 - 8
+ if siginfo:
+ limit = 254
+ extension = ".tar.zst.siginfo"
if not hash:
hash = "INVALID"
- return hash[:2] + "/" + spec + hash
+ fn = spec + hash + "_" + taskname + extension
+ # If the filename is too long, attempt to reduce it
+ if len(fn) > limit:
+ components = spec.split(":")
+ # Fields 0,5,6 are mandatory, 1 is most useful, 2,3,4 are just for information
+ # 7 is for the separators
+ avail = (limit - len(hash + "_" + taskname + extension) - len(components[0]) - len(components[1]) - len(components[5]) - len(components[6]) - 7) // 3
+ components[2] = components[2][:avail]
+ components[3] = components[3][:avail]
+ components[4] = components[4][:avail]
+ spec = ":".join(components)
+ fn = spec + hash + "_" + taskname + extension
+ if len(fn) > limit:
+ bb.fatal("Unable to reduce sstate name to less than 255 chararacters")
+ return hash[:2] + "/" + hash[2:4] + "/" + fn
SSTATE_PKGARCH = "${PACKAGE_ARCH}"
SSTATE_PKGSPEC = "sstate:${PN}:${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}:${PV}:${PR}:${SSTATE_PKGARCH}:${SSTATE_VERSION}:"
SSTATE_SWSPEC = "sstate:${PN}::${PV}:${PR}::${SSTATE_VERSION}:"
-SSTATE_PKGNAME = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PKGSPEC'), d.getVar('BB_UNIHASH'), d)}"
+SSTATE_PKGNAME = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PKGSPEC'), d.getVar('BB_UNIHASH'), d.getVar('SSTATE_CURRTASK'), False, d)}"
SSTATE_PKG = "${SSTATE_DIR}/${SSTATE_PKGNAME}"
SSTATE_EXTRAPATH = ""
SSTATE_EXTRAPATHWILDCARD = ""
-SSTATE_PATHSPEC = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/${SSTATE_PKGSPEC}"
+SSTATE_PATHSPEC = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/*/${SSTATE_PKGSPEC}*_${SSTATE_PATH_CURRTASK}.tar.zst*"
# explicitly make PV to depend on evaluated value of PV variable
PV[vardepvalue] = "${PV}"
@@ -26,42 +50,50 @@ SSTATE_EXTRAPATH[vardepvalue] = ""
SSTATE_EXTRAPATHWILDCARD[vardepvalue] = ""
# For multilib rpm the allarch packagegroup files can overwrite (in theory they're identical)
-SSTATE_DUPWHITELIST = "${DEPLOY_DIR}/licenses/"
+SSTATE_ALLOW_OVERLAP_FILES = "${DEPLOY_DIR}/licenses/"
# Avoid docbook/sgml catalog warnings for now
-SSTATE_DUPWHITELIST += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml"
+SSTATE_ALLOW_OVERLAP_FILES += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml"
# sdk-provides-dummy-nativesdk and nativesdk-buildtools-perl-dummy overlap for different SDKMACHINE
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_RPM}/sdk_provides_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-nativesdk/"
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_RPM}/buildtools_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/buildtools-dummy-nativesdk/"
+SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_RPM}/sdk_provides_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-nativesdk/"
+SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_RPM}/buildtools_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/buildtools-dummy-nativesdk/"
# target-sdk-provides-dummy overlaps that allarch is disabled when multilib is used
-SSTATE_DUPWHITELIST += "${COMPONENTS_DIR}/sdk-provides-dummy-target/ ${DEPLOY_DIR_RPM}/sdk_provides_dummy_target/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-target/"
+SSTATE_ALLOW_OVERLAP_FILES += "${COMPONENTS_DIR}/sdk-provides-dummy-target/ ${DEPLOY_DIR_RPM}/sdk_provides_dummy_target/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-target/"
# Archive the sources for many architectures in one deploy folder
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_SRC}"
+SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_SRC}"
# ovmf/grub-efi/systemd-boot/intel-microcode multilib recipes can generate identical overlapping files
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_IMAGE}/ovmf"
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_IMAGE}/grub-efi"
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_IMAGE}/systemd-boot"
-SSTATE_DUPWHITELIST += "${DEPLOY_DIR_IMAGE}/microcode"
+SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/ovmf"
+SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/grub-efi"
+SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/systemd-boot"
+SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/microcode"
SSTATE_SCAN_FILES ?= "*.la *-config *_config postinst-*"
SSTATE_SCAN_CMD ??= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES").split())}" \) -type f'
SSTATE_SCAN_CMD_NATIVE ??= 'grep -Irl -e ${RECIPE_SYSROOT} -e ${RECIPE_SYSROOT_NATIVE} -e ${HOSTTOOLS_DIR} ${SSTATE_BUILDDIR}'
+SSTATE_HASHEQUIV_FILEMAP ?= " \
+ populate_sysroot:*/postinst-useradd-*:${TMPDIR} \
+ populate_sysroot:*/postinst-useradd-*:${COREBASE} \
+ populate_sysroot:*/postinst-useradd-*:regex-\s(PATH|PSEUDO_IGNORE_PATHS|HOME|LOGNAME|OMP_NUM_THREADS|USER)=.*\s \
+ populate_sysroot:*/crossscripts/*:${TMPDIR} \
+ populate_sysroot:*/crossscripts/*:${COREBASE} \
+ "
BB_HASHFILENAME = "False ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}"
SSTATE_ARCHS = " \
${BUILD_ARCH} \
+ ${BUILD_ARCH}_${ORIGNATIVELSBSTRING} \
${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS} \
- ${BUILD_ARCH}_${TARGET_ARCH} \
${SDK_ARCH}_${SDK_OS} \
${SDK_ARCH}_${PACKAGE_ARCH} \
allarch \
${PACKAGE_ARCH} \
${PACKAGE_EXTRA_ARCHS} \
${MACHINE_ARCH}"
+SSTATE_ARCHS[vardepsexclude] = "ORIGNATIVELSBSTRING"
SSTATE_MANMACH ?= "${SSTATE_PKGARCH}"
-SSTATECREATEFUNCS = "sstate_hardcode_path"
+SSTATECREATEFUNCS += "sstate_hardcode_path"
SSTATECREATEFUNCS[vardeps] = "SSTATE_SCAN_FILES"
SSTATEPOSTCREATEFUNCS = ""
SSTATEPREINSTFUNCS = ""
@@ -83,6 +115,9 @@ SSTATE_SIG_KEY ?= ""
SSTATE_SIG_PASSPHRASE ?= ""
# Whether to verify the GnUPG signatures when extracting sstate archives
SSTATE_VERIFY_SIG ?= "0"
+# List of signatures to consider valid.
+SSTATE_VALID_SIGS ??= ""
+SSTATE_VALID_SIGS[vardepvalue] = ""
SSTATE_HASHEQUIV_METHOD ?= "oe.sstatesig.OEOuthashBasic"
SSTATE_HASHEQUIV_METHOD[doc] = "The fully-qualified function used to calculate \
@@ -102,7 +137,7 @@ python () {
elif bb.data.inherits_class('crosssdk', d):
d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"))
elif bb.data.inherits_class('cross', d):
- d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${TARGET_ARCH}"))
+ d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}"))
elif bb.data.inherits_class('nativesdk', d):
d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${SDK_OS}"))
elif bb.data.inherits_class('cross-canadian', d):
@@ -122,6 +157,8 @@ python () {
for task in unique_tasks:
d.prependVarFlag(task, 'prefuncs', "sstate_task_prefunc ")
d.appendVarFlag(task, 'postfuncs', " sstate_task_postfunc")
+ d.setVarFlag(task, 'network', '1')
+ d.setVarFlag(task + "_setscene", 'network', '1')
}
def sstate_init(task, d):
@@ -222,13 +259,13 @@ def sstate_install(ss, d):
shareddirs.append(dstdir)
# Check the file list for conflicts against files which already exist
- whitelist = (d.getVar("SSTATE_DUPWHITELIST") or "").split()
+ overlap_allowed = (d.getVar("SSTATE_ALLOW_OVERLAP_FILES") or "").split()
match = []
for f in sharedfiles:
if os.path.exists(f) and not os.path.islink(f):
f = os.path.normpath(f)
realmatch = True
- for w in whitelist:
+ for w in overlap_allowed:
w = os.path.normpath(w)
if f.startswith(w):
realmatch = False
@@ -258,7 +295,7 @@ def sstate_install(ss, d):
"DISTRO_FEATURES on an existing build directory is not supported - you " \
"should really clean out tmp and rebuild (reusing sstate should be safe). " \
"It could be the overlapping files detected are harmless in which case " \
- "adding them to SSTATE_DUPWHITELIST may be the correct solution. It could " \
+ "adding them to SSTATE_ALLOW_OVERLAP_FILES may be the correct solution. It could " \
"also be your build is including two different conflicting versions of " \
"things (e.g. bluez 4 and bluez 5 and the correct solution for that would " \
"be to resolve the conflict. If in doubt, please ask on the mailing list, " \
@@ -293,6 +330,8 @@ def sstate_install(ss, d):
if os.path.exists(i):
with open(i, "r") as f:
manifests = f.readlines()
+ # We append new entries, we don't remove older entries which may have the same
+ # manifest name but different versions from stamp/workdir. See below.
if filedata not in manifests:
with open(i, "a+") as f:
f.write(filedata)
@@ -310,15 +349,16 @@ def sstate_install(ss, d):
for lock in locks:
bb.utils.unlockfile(lock)
-sstate_install[vardepsexclude] += "SSTATE_DUPWHITELIST STATE_MANMACH SSTATE_MANFILEPREFIX"
+sstate_install[vardepsexclude] += "SSTATE_ALLOW_OVERLAP_FILES STATE_MANMACH SSTATE_MANFILEPREFIX"
sstate_install[vardeps] += "${SSTATEPOSTINSTFUNCS}"
def sstate_installpkg(ss, d):
from oe.gpg_sign import get_signer
sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
- sstatefetch = d.getVar('SSTATE_PKGNAME') + '_' + ss['task'] + ".tgz"
- sstatepkg = d.getVar('SSTATE_PKG') + '_' + ss['task'] + ".tgz"
+ d.setVar("SSTATE_CURRTASK", ss['task'])
+ sstatefetch = d.getVar('SSTATE_PKGNAME')
+ sstatepkg = d.getVar('SSTATE_PKG')
if not os.path.exists(sstatepkg):
pstaging_fetch(sstatefetch, d)
@@ -330,11 +370,13 @@ def sstate_installpkg(ss, d):
sstate_clean(ss, d)
d.setVar('SSTATE_INSTDIR', sstateinst)
- d.setVar('SSTATE_PKG', sstatepkg)
if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False):
+ if not os.path.isfile(sstatepkg + '.sig'):
+ bb.warn("No signature file for sstate package %s, skipping acceleration..." % sstatepkg)
+ return False
signer = get_signer(d, 'local')
- if not signer.verify(sstatepkg + '.sig'):
+ if not signer.verify(sstatepkg + '.sig', d.getVar("SSTATE_VALID_SIGS")):
bb.warn("Cannot verify signature on sstate package %s, skipping acceleration..." % sstatepkg)
return False
@@ -372,7 +414,7 @@ def sstate_installpkgdir(ss, d):
for state in ss['dirs']:
prepdir(state[1])
- os.rename(sstateinst + state[0], state[1])
+ bb.utils.rename(sstateinst + state[0], state[1])
sstate_install(ss, d)
for plain in ss['plaindirs']:
@@ -384,7 +426,7 @@ def sstate_installpkgdir(ss, d):
dest = plain
bb.utils.mkdirhier(src)
prepdir(dest)
- os.rename(src, dest)
+ bb.utils.rename(src, dest)
return True
@@ -440,8 +482,9 @@ python sstate_hardcode_path_unpack () {
def sstate_clean_cachefile(ss, d):
import oe.path
- sstatepkgfile = d.getVar('SSTATE_PATHSPEC') + "*_" + ss['task'] + ".tgz*"
if d.getVarFlag('do_%s' % ss['task'], 'task'):
+ d.setVar("SSTATE_PATH_CURRTASK", ss['task'])
+ sstatepkgfile = d.getVar('SSTATE_PATHSPEC')
bb.note("Removing %s" % sstatepkgfile)
oe.path.remove(sstatepkgfile)
@@ -451,7 +494,7 @@ def sstate_clean_cachefiles(d):
ss = sstate_state_fromvars(ld, task)
sstate_clean_cachefile(ss, ld)
-def sstate_clean_manifest(manifest, d, prefix=None):
+def sstate_clean_manifest(manifest, d, canrace=False, prefix=None):
import oe.path
mfile = open(manifest)
@@ -469,7 +512,9 @@ def sstate_clean_manifest(manifest, d, prefix=None):
if entry.endswith("/"):
if os.path.islink(entry[:-1]):
os.remove(entry[:-1])
- elif os.path.exists(entry) and len(os.listdir(entry)) == 0:
+ elif os.path.exists(entry) and len(os.listdir(entry)) == 0 and not canrace:
+ # Removing directories whilst builds are in progress exposes a race. Only
+ # do it in contexts where it is safe to do so.
os.rmdir(entry[:-1])
else:
os.remove(entry)
@@ -507,7 +552,7 @@ def sstate_clean(ss, d):
for lock in ss['lockfiles']:
locks.append(bb.utils.lockfile(lock))
- sstate_clean_manifest(manifest, d)
+ sstate_clean_manifest(manifest, d, canrace=True)
for lock in locks:
bb.utils.unlockfile(lock)
@@ -608,14 +653,24 @@ python sstate_hardcode_path () {
def sstate_package(ss, d):
import oe.path
+ import time
tmpdir = d.getVar('TMPDIR')
+ fixtime = False
+ if ss['task'] == "package":
+ fixtime = True
+
+ def fixtimestamp(root, path):
+ f = os.path.join(root, path)
+ if os.lstat(f).st_mtime > sde:
+ os.utime(f, (sde, sde), follow_symlinks=False)
+
sstatebuild = d.expand("${WORKDIR}/sstate-build-%s/" % ss['task'])
- sstatepkg = d.getVar('SSTATE_PKG') + '_'+ ss['task'] + ".tgz"
+ sde = int(d.getVar("SOURCE_DATE_EPOCH") or time.time())
+ d.setVar("SSTATE_CURRTASK", ss['task'])
bb.utils.remove(sstatebuild, recurse=True)
bb.utils.mkdirhier(sstatebuild)
- bb.utils.mkdirhier(os.path.dirname(sstatepkg))
for state in ss['dirs']:
if not os.path.exists(state[1]):
continue
@@ -625,6 +680,8 @@ def sstate_package(ss, d):
# to sstate tasks but there aren't many of these so better just avoid them entirely.
for walkroot, dirs, files in os.walk(state[1]):
for file in files + dirs:
+ if fixtime:
+ fixtimestamp(walkroot, file)
srcpath = os.path.join(walkroot, file)
if not os.path.islink(srcpath):
continue
@@ -635,7 +692,7 @@ def sstate_package(ss, d):
continue
bb.error("sstate found an absolute path symlink %s pointing at %s. Please replace this with a relative link." % (srcpath, link))
bb.debug(2, "Preparing tree %s for packaging at %s" % (state[1], sstatebuild + state[0]))
- os.rename(state[1], sstatebuild + state[0])
+ bb.utils.rename(state[1], sstatebuild + state[0])
workdir = d.getVar('WORKDIR')
sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared")
@@ -645,10 +702,14 @@ def sstate_package(ss, d):
pdir = plain.replace(sharedworkdir, sstatebuild)
bb.utils.mkdirhier(plain)
bb.utils.mkdirhier(pdir)
- os.rename(plain, pdir)
+ bb.utils.rename(plain, pdir)
+ if fixtime:
+ fixtimestamp(pdir, "")
+ for walkroot, dirs, files in os.walk(pdir):
+ for file in files + dirs:
+ fixtimestamp(walkroot, file)
d.setVar('SSTATE_BUILDDIR', sstatebuild)
- d.setVar('SSTATE_PKG', sstatepkg)
d.setVar('SSTATE_INSTDIR', sstatebuild)
if d.getVar('SSTATE_SKIP_CREATION') == '1':
@@ -664,10 +725,25 @@ def sstate_package(ss, d):
# All hooks should run in SSTATE_BUILDDIR.
bb.build.exec_func(f, d, (sstatebuild,))
- bb.siggen.dump_this_task(sstatepkg + ".siginfo", d)
+ # SSTATE_PKG may have been changed by sstate_report_unihash
+ siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
+ if not os.path.exists(siginfo):
+ bb.siggen.dump_this_task(siginfo, d)
+ else:
+ try:
+ os.utime(siginfo, None)
+ except PermissionError:
+ pass
+ except OSError as e:
+ # Handle read-only file systems gracefully
+ import errno
+ if e.errno != errno.EROFS:
+ raise e
return
+sstate_package[vardepsexclude] += "SSTATE_SIG_KEY"
+
def pstaging_fetch(sstatefetch, d):
import bb.fetch2
@@ -686,6 +762,7 @@ def pstaging_fetch(sstatefetch, d):
localdata.setVar('FILESPATH', dldir)
localdata.setVar('DL_DIR', dldir)
localdata.setVar('PREMIRRORS', mirrors)
+ localdata.setVar('SRCPV', d.getVar('SRCPV'))
# if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
# we'll want to allow network access for the current set of fetches.
@@ -704,16 +781,22 @@ def pstaging_fetch(sstatefetch, d):
localdata.setVar('SRC_URI', srcuri)
try:
fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
+ fetcher.checkstatus()
fetcher.download()
except bb.fetch2.BBFetchException:
- break
+ pass
+
+pstaging_fetch[vardepsexclude] += "SRCPV"
+
def sstate_setscene(d):
shared_state = sstate_state_fromvars(d)
accelerate = sstate_installpkg(shared_state, d)
if not accelerate:
- bb.fatal("No suitable staging package found")
+ msg = "No sstate archive obtainable, will run full task instead."
+ bb.warn(msg)
+ raise bb.BBHandledException(msg)
python sstate_task_prefunc () {
shared_state = sstate_state_fromvars(d)
@@ -748,38 +831,48 @@ sstate_task_postfunc[dirs] = "${WORKDIR}"
# set as SSTATE_BUILDDIR. Will be run from within SSTATE_BUILDDIR.
#
sstate_create_package () {
- TFILE=`mktemp ${SSTATE_PKG}.XXXXXXXX`
-
- # Exit earlu if it already exists
+ # Exit early if it already exists
if [ -e ${SSTATE_PKG} ]; then
+ touch ${SSTATE_PKG} 2>/dev/null || true
return
fi
- # Use pigz if available
- OPT="-czS"
- if [ -x "$(command -v pigz)" ]; then
- OPT="-I pigz -cS"
- fi
+ mkdir --mode=0775 -p `dirname ${SSTATE_PKG}`
+ TFILE=`mktemp ${SSTATE_PKG}.XXXXXXXX`
+
+ OPT="-cS"
+ ZSTD="zstd -${SSTATE_ZSTD_CLEVEL} -T${ZSTD_THREADS}"
+ # Use pzstd if available
+ if [ -x "$(command -v pzstd)" ]; then
+ ZSTD="pzstd -${SSTATE_ZSTD_CLEVEL} -p ${ZSTD_THREADS}"
+ fi
# Need to handle empty directories
if [ "$(ls -A)" ]; then
set +e
- tar $OPT -f $TFILE *
+ tar -I "$ZSTD" $OPT -f $TFILE *
ret=$?
if [ $ret -ne 0 ] && [ $ret -ne 1 ]; then
exit 1
fi
set -e
else
- tar $OPT --file=$TFILE --files-from=/dev/null
+ tar -I "$ZSTD" $OPT --file=$TFILE --files-from=/dev/null
fi
chmod 0664 $TFILE
# Skip if it was already created by some other process
- if [ ! -e ${SSTATE_PKG} ]; then
- mv -f $TFILE ${SSTATE_PKG}
+ if [ -h ${SSTATE_PKG} ] && [ ! -e ${SSTATE_PKG} ]; then
+ # There is a symbolic link, but it links to nothing.
+ # Forcefully replace it with the new file.
+ ln -f $TFILE ${SSTATE_PKG} || true
+ elif [ ! -e ${SSTATE_PKG} ]; then
+ # Move into place using ln to attempt an atomic op.
+ # Abort if it already exists
+ ln $TFILE ${SSTATE_PKG} || true
else
- rm $TFILE
+ touch ${SSTATE_PKG} 2>/dev/null || true
fi
+ rm $TFILE
}
python sstate_sign_package () {
@@ -807,13 +900,19 @@ python sstate_report_unihash() {
# Will be run from within SSTATE_INSTDIR.
#
sstate_unpack_package () {
- tar -xvzf ${SSTATE_PKG}
- # update .siginfo atime on local/NFS mirror
- [ -w ${SSTATE_PKG}.siginfo ] && [ -h ${SSTATE_PKG}.siginfo ] && touch -a ${SSTATE_PKG}.siginfo
- # Use "! -w ||" to return true for read only files
- [ ! -w ${SSTATE_PKG} ] || touch --no-dereference ${SSTATE_PKG}
- [ ! -w ${SSTATE_PKG}.sig ] || [ ! -e ${SSTATE_PKG}.sig ] || touch --no-dereference ${SSTATE_PKG}.sig
- [ ! -w ${SSTATE_PKG}.siginfo ] || [ ! -e ${SSTATE_PKG}.siginfo ] || touch --no-dereference ${SSTATE_PKG}.siginfo
+ ZSTD="zstd -T${ZSTD_THREADS}"
+ # Use pzstd if available
+ if [ -x "$(command -v pzstd)" ]; then
+ ZSTD="pzstd -p ${ZSTD_THREADS}"
+ fi
+
+ tar -I "$ZSTD" -xvpf ${SSTATE_PKG}
+ # update .siginfo atime on local/NFS mirror if it is a symbolic link
+ [ ! -h ${SSTATE_PKG}.siginfo ] || [ ! -e ${SSTATE_PKG}.siginfo ] || touch -a ${SSTATE_PKG}.siginfo 2>/dev/null || true
+ # update each symbolic link instead of any referenced file
+ touch --no-dereference ${SSTATE_PKG} 2>/dev/null || true
+ [ ! -e ${SSTATE_PKG}.sig ] || touch --no-dereference ${SSTATE_PKG}.sig 2>/dev/null || true
+ [ ! -e ${SSTATE_PKG}.siginfo ] || touch --no-dereference ${SSTATE_PKG}.siginfo 2>/dev/null || true
}
BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
@@ -821,9 +920,6 @@ BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True, **kwargs):
found = set()
missed = set()
- extension = ".tgz"
- if siginfo:
- extension = extension + ".siginfo"
def gethash(task):
return sq_data['unihash'][task]
@@ -845,21 +941,22 @@ def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True,
return spec, extrapath, tname
+ def getsstatefile(tid, siginfo, d):
+ spec, extrapath, tname = getpathcomponents(tid, d)
+ return extrapath + generate_sstatefn(spec, gethash(tid), tname, siginfo, d)
for tid in sq_data['hash']:
- spec, extrapath, tname = getpathcomponents(tid, d)
-
- sstatefile = d.expand("${SSTATE_DIR}/" + extrapath + generate_sstatefn(spec, gethash(tid), d) + "_" + tname + extension)
+ sstatefile = d.expand("${SSTATE_DIR}/" + getsstatefile(tid, siginfo, d))
if os.path.exists(sstatefile):
- bb.debug(2, "SState: Found valid sstate file %s" % sstatefile)
found.add(tid)
- continue
+ bb.debug(2, "SState: Found valid sstate file %s" % sstatefile)
else:
missed.add(tid)
bb.debug(2, "SState: Looked for but didn't find file %s" % sstatefile)
+ foundLocal = len(found)
mirrors = d.getVar("SSTATE_MIRRORS")
if mirrors:
# Copy the data object and override DL_DIR and SRC_URI
@@ -891,63 +988,63 @@ def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True,
localdata2 = bb.data.createCopy(localdata)
srcuri = "file://" + sstatefile
- localdata.setVar('SRC_URI', srcuri)
+ localdata2.setVar('SRC_URI', srcuri)
bb.debug(2, "SState: Attempting to fetch %s" % srcuri)
+ import traceback
+
try:
fetcher = bb.fetch2.Fetch(srcuri.split(), localdata2,
connection_cache=thread_worker.connection_cache)
fetcher.checkstatus()
bb.debug(2, "SState: Successful fetch test for %s" % srcuri)
found.add(tid)
- if tid in missed:
- missed.remove(tid)
- except:
- missed.add(tid)
- bb.debug(2, "SState: Unsuccessful fetch test for %s" % srcuri)
- pass
- bb.event.fire(bb.event.ProcessProgress(msg, len(tasklist) - thread_worker.tasks.qsize()), d)
+ missed.remove(tid)
+ except bb.fetch2.FetchError as e:
+ bb.debug(2, "SState: Unsuccessful fetch test for %s (%s)\n%s" % (srcuri, repr(e), traceback.format_exc()))
+ except Exception as e:
+ bb.error("SState: cannot test %s: %s\n%s" % (srcuri, repr(e), traceback.format_exc()))
+
+ if progress:
+ bb.event.fire(bb.event.ProcessProgress(msg, len(tasklist) - thread_worker.tasks.qsize()), d)
tasklist = []
- for tid in sq_data['hash']:
- if tid in found:
- continue
- spec, extrapath, tname = getpathcomponents(tid, d)
- sstatefile = d.expand(extrapath + generate_sstatefn(spec, gethash(tid), d) + "_" + tname + extension)
+ for tid in missed:
+ sstatefile = d.expand(getsstatefile(tid, siginfo, d))
tasklist.append((tid, sstatefile))
if tasklist:
- msg = "Checking sstate mirror object availability"
- bb.event.fire(bb.event.ProcessStarted(msg, len(tasklist)), d)
-
- import multiprocessing
- nproc = min(multiprocessing.cpu_count(), len(tasklist))
-
- bb.event.enable_threadlock()
- pool = oe.utils.ThreadedPool(nproc, len(tasklist),
- worker_init=checkstatus_init, worker_end=checkstatus_end)
- for t in tasklist:
- pool.add_task(checkstatus, t)
- pool.start()
- pool.wait_completion()
- bb.event.disable_threadlock()
-
- bb.event.fire(bb.event.ProcessFinished(msg), d)
-
- # Likely checking an individual task hash again for multiconfig sharing of sstate tasks so skip reporting
- if len(sq_data['hash']) == 1:
- return found
+ nproc = min(int(d.getVar("BB_NUMBER_THREADS")), len(tasklist))
+
+ progress = len(tasklist) >= 100
+ if progress:
+ msg = "Checking sstate mirror object availability"
+ bb.event.fire(bb.event.ProcessStarted(msg, len(tasklist)), d)
+
+ # Have to setup the fetcher environment here rather than in each thread as it would race
+ fetcherenv = bb.fetch2.get_fetcher_environment(d)
+ with bb.utils.environment(**fetcherenv):
+ bb.event.enable_threadlock()
+ pool = oe.utils.ThreadedPool(nproc, len(tasklist),
+ worker_init=checkstatus_init, worker_end=checkstatus_end,
+ name="sstate_checkhashes-")
+ for t in tasklist:
+ pool.add_task(checkstatus, t)
+ pool.start()
+ pool.wait_completion()
+ bb.event.disable_threadlock()
+
+ if progress:
+ bb.event.fire(bb.event.ProcessFinished(msg), d)
inheritlist = d.getVar("INHERIT")
if "toaster" in inheritlist:
evdata = {'missed': [], 'found': []};
for tid in missed:
- spec, extrapath, tname = getpathcomponents(tid, d)
- sstatefile = d.expand(extrapath + generate_sstatefn(spec, gethash(tid), d) + "_" + tname + ".tgz")
+ sstatefile = d.expand(getsstatefile(tid, False, d))
evdata['missed'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
for tid in found:
- spec, extrapath, tname = getpathcomponents(tid, d)
- sstatefile = d.expand(extrapath + generate_sstatefn(spec, gethash(tid), d) + "_" + tname + ".tgz")
+ sstatefile = d.expand(getsstatefile(tid, False, d))
evdata['found'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
bb.event.fire(bb.event.MetadataEvent("MissedSstate", evdata), d)
@@ -961,12 +1058,14 @@ def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True,
match = 0
if total:
match = len(found) / total * 100
- bb.plain("Sstate summary: Wanted %d Found %d Missed %d Current %d (%d%% match, %d%% complete)" % (total, len(found), len(missed), currentcount, match, complete))
+ bb.plain("Sstate summary: Wanted %d Local %d Mirrors %d Missed %d Current %d (%d%% match, %d%% complete)" %
+ (total, foundLocal, len(found)-foundLocal, len(missed), currentcount, match, complete))
if hasattr(bb.parse.siggen, "checkhashes"):
bb.parse.siggen.checkhashes(sq_data, missed, found, d)
return found
+setscene_depvalid[vardepsexclude] = "SSTATE_EXCLUDEDEPS_SYSROOT"
BB_SETSCENE_DEPVALID = "setscene_depvalid"
@@ -985,15 +1084,13 @@ def setscene_depvalid(task, taskdependees, notneeded, d, log=None):
logit("Considering setscene task: %s" % (str(taskdependees[task])), log)
+ directtasks = ["do_populate_lic", "do_deploy_source_date_epoch", "do_shared_workdir", "do_stash_locale", "do_gcc_stash_builddir", "do_create_spdx"]
+
def isNativeCross(x):
return x.endswith("-native") or "-cross-" in x or "-crosssdk" in x or x.endswith("-cross")
- # We only need to trigger populate_lic through direct dependencies
- if taskdependees[task][1] == "do_populate_lic":
- return True
-
- # stash_locale and gcc_stash_builddir are never needed as a dependency for built objects
- if taskdependees[task][1] == "do_stash_locale" or taskdependees[task][1] == "do_gcc_stash_builddir":
+ # We only need to trigger deploy_source_date_epoch through direct dependencies
+ if taskdependees[task][1] in directtasks:
return True
# We only need to trigger packagedata through direct dependencies
@@ -1016,8 +1113,8 @@ def setscene_depvalid(task, taskdependees, notneeded, d, log=None):
# do_package_write_* need do_populate_sysroot as they're mainly postinstall dependencies
if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm']:
return False
- # do_package/packagedata/package_qa don't need do_populate_sysroot
- if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_packagedata', 'do_package_qa']:
+ # do_package/packagedata/package_qa/deploy don't need do_populate_sysroot
+ if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_packagedata', 'do_package_qa', 'do_deploy']:
continue
# Native/Cross packages don't exist and are noexec anyway
if isNativeCross(taskdependees[dep][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package', 'do_package_qa']:
@@ -1065,13 +1162,9 @@ def setscene_depvalid(task, taskdependees, notneeded, d, log=None):
# Target populate_sysroot need their dependencies
return False
- if taskdependees[task][1] == 'do_shared_workdir':
+ if taskdependees[dep][1] in directtasks:
continue
- if taskdependees[dep][1] == "do_populate_lic":
- continue
-
-
# Safe fallthrough default
logit(" Default setscene dependency fall through due to dependency: %s" % (str(taskdependees[dep])), log)
return False
@@ -1081,29 +1174,45 @@ addhandler sstate_eventhandler
sstate_eventhandler[eventmask] = "bb.build.TaskSucceeded"
python sstate_eventhandler() {
d = e.data
- # When we write an sstate package we rewrite the SSTATE_PKG
- spkg = d.getVar('SSTATE_PKG')
- if not spkg.endswith(".tgz"):
+ writtensstate = d.getVar('SSTATE_CURRTASK')
+ if not writtensstate:
taskname = d.getVar("BB_RUNTASK")[3:]
spec = d.getVar('SSTATE_PKGSPEC')
swspec = d.getVar('SSTATE_SWSPEC')
if taskname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and swspec:
d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
d.setVar("SSTATE_EXTRAPATH", "")
- sstatepkg = d.getVar('SSTATE_PKG')
- bb.siggen.dump_this_task(sstatepkg + '_' + taskname + ".tgz" ".siginfo", d)
+ d.setVar("SSTATE_CURRTASK", taskname)
+ siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
+ if not os.path.exists(siginfo):
+ bb.siggen.dump_this_task(siginfo, d)
+ else:
+ try:
+ os.utime(siginfo, None)
+ except PermissionError:
+ pass
+ except OSError as e:
+ # Handle read-only file systems gracefully
+ import errno
+ if e.errno != errno.EROFS:
+ raise e
+
}
SSTATE_PRUNE_OBSOLETEWORKDIR ?= "1"
-# Event handler which removes manifests and stamps file for
-# recipes which are no longer reachable in a build where they
-# once were.
+#
+# Event handler which removes manifests and stamps file for recipes which are no
+# longer 'reachable' in a build where they once were. 'Reachable' refers to
+# whether a recipe is parsed so recipes in a layer which was removed would no
+# longer be reachable. Switching between systemd and sysvinit where recipes
+# became skipped would be another example.
+#
# Also optionally removes the workdir of those tasks/recipes
#
-addhandler sstate_eventhandler2
-sstate_eventhandler2[eventmask] = "bb.event.ReachableStamps"
-python sstate_eventhandler2() {
+addhandler sstate_eventhandler_reachablestamps
+sstate_eventhandler_reachablestamps[eventmask] = "bb.event.ReachableStamps"
+python sstate_eventhandler_reachablestamps() {
import glob
d = e.data
stamps = e.stamps.values()
@@ -1129,11 +1238,21 @@ python sstate_eventhandler2() {
i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
if not os.path.exists(i):
continue
+ manseen = set()
+ ignore = []
with open(i, "r") as f:
lines = f.readlines()
- for l in lines:
+ for l in reversed(lines):
try:
(stamp, manifest, workdir) = l.split()
+ # The index may have multiple entries for the same manifest as the code above only appends
+ # new entries and there may be an entry with matching manifest but differing version in stamp/workdir.
+ # The last entry in the list is the valid one, any earlier entries with matching manifests
+ # should be ignored.
+ if manifest in manseen:
+ ignore.append(l)
+ continue
+ manseen.add(manifest)
if stamp not in stamps and stamp not in preservestamps and stamp in machineindex:
toremove.append(l)
if stamp not in seen:
@@ -1164,6 +1283,8 @@ python sstate_eventhandler2() {
with open(i, "w") as f:
for l in lines:
+ if l in ignore:
+ continue
f.write(l)
machineindex |= set(stamps)
with open(mi, "w") as f:
@@ -1173,3 +1294,59 @@ python sstate_eventhandler2() {
if preservestamps:
os.remove(preservestampfile)
}
+
+
+#
+# Bitbake can generate an event showing which setscene tasks are 'stale',
+# i.e. which ones will be rerun. These are ones where a stamp file is present but
+# it is stable (e.g. taskhash doesn't match). With that list we can go through
+# the manifests for matching tasks and "uninstall" those manifests now. We do
+# this now rather than mid build since the distribution of files between sstate
+# objects may have changed, new tasks may run first and if those new tasks overlap
+# with the stale tasks, we'd see overlapping files messages and failures. Thankfully
+# removing these files is fast.
+#
+addhandler sstate_eventhandler_stalesstate
+sstate_eventhandler_stalesstate[eventmask] = "bb.event.StaleSetSceneTasks"
+python sstate_eventhandler_stalesstate() {
+ d = e.data
+ tasks = e.tasks
+
+ bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
+
+ for a in list(set(d.getVar("SSTATE_ARCHS").split())):
+ toremove = []
+ i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
+ if not os.path.exists(i):
+ continue
+ with open(i, "r") as f:
+ lines = f.readlines()
+ for l in lines:
+ try:
+ (stamp, manifest, workdir) = l.split()
+ for tid in tasks:
+ for s in tasks[tid]:
+ if s.startswith(stamp):
+ taskname = bb.runqueue.taskname_from_tid(tid)[3:]
+ manname = manifest + "." + taskname
+ if os.path.exists(manname):
+ bb.debug(2, "Sstate for %s is stale, removing related manifest %s" % (tid, manname))
+ toremove.append((manname, tid, tasks[tid]))
+ break
+ except ValueError:
+ bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
+
+ if toremove:
+ msg = "Removing %d stale sstate objects for arch %s" % (len(toremove), a)
+ bb.event.fire(bb.event.ProcessStarted(msg, len(toremove)), d)
+
+ removed = 0
+ for (manname, tid, stamps) in toremove:
+ sstate_clean_manifest(manname, d)
+ for stamp in stamps:
+ bb.utils.remove(stamp)
+ removed = removed + 1
+ bb.event.fire(bb.event.ProcessProgress(msg, removed), d)
+
+ bb.event.fire(bb.event.ProcessFinished(msg), d)
+}