summaryrefslogtreecommitdiffstats
path: root/external/poky/meta/classes/sstate.bbclass
diff options
context:
space:
mode:
Diffstat (limited to 'external/poky/meta/classes/sstate.bbclass')
-rw-r--r--external/poky/meta/classes/sstate.bbclass274
1 files changed, 182 insertions, 92 deletions
diff --git a/external/poky/meta/classes/sstate.bbclass b/external/poky/meta/classes/sstate.bbclass
index edbfba5d..375196ef 100644
--- a/external/poky/meta/classes/sstate.bbclass
+++ b/external/poky/meta/classes/sstate.bbclass
@@ -3,19 +3,41 @@ SSTATE_VERSION = "3"
SSTATE_MANIFESTS ?= "${TMPDIR}/sstate-control"
SSTATE_MANFILEPREFIX = "${SSTATE_MANIFESTS}/manifest-${SSTATE_MANMACH}-${PN}"
-def generate_sstatefn(spec, hash, d):
+def generate_sstatefn(spec, hash, taskname, siginfo, d):
+ if taskname is None:
+ return ""
+ extension = ".tgz"
+ # 8 chars reserved for siginfo
+ limit = 254 - 8
+ if siginfo:
+ limit = 254
+ extension = ".tgz.siginfo"
if not hash:
hash = "INVALID"
- return hash[:2] + "/" + spec + hash
+ fn = spec + hash + "_" + taskname + extension
+ # If the filename is too long, attempt to reduce it
+ if len(fn) > limit:
+ components = spec.split(":")
+ # Fields 0,5,6 are mandatory, 1 is most useful, 2,3,4 are just for information
+ # 7 is for the separators
+ avail = (254 - len(hash + "_" + taskname + extension) - len(components[0]) - len(components[1]) - len(components[5]) - len(components[6]) - 7) // 3
+ components[2] = components[2][:avail]
+ components[3] = components[3][:avail]
+ components[4] = components[4][:avail]
+ spec = ":".join(components)
+ fn = spec + hash + "_" + taskname + extension
+ if len(fn) > limit:
+ bb.fatal("Unable to reduce sstate name to less than 255 chararacters")
+ return hash[:2] + "/" + hash[2:4] + "/" + fn
SSTATE_PKGARCH = "${PACKAGE_ARCH}"
SSTATE_PKGSPEC = "sstate:${PN}:${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}:${PV}:${PR}:${SSTATE_PKGARCH}:${SSTATE_VERSION}:"
SSTATE_SWSPEC = "sstate:${PN}::${PV}:${PR}::${SSTATE_VERSION}:"
-SSTATE_PKGNAME = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PKGSPEC'), d.getVar('BB_TASKHASH'), d)}"
+SSTATE_PKGNAME = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PKGSPEC'), d.getVar('BB_UNIHASH'), d.getVar('SSTATE_CURRTASK'), False, d)}"
SSTATE_PKG = "${SSTATE_DIR}/${SSTATE_PKGNAME}"
SSTATE_EXTRAPATH = ""
SSTATE_EXTRAPATHWILDCARD = ""
-SSTATE_PATHSPEC = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/${SSTATE_PKGSPEC}"
+SSTATE_PATHSPEC = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/*/${SSTATE_PKGSPEC}*_${SSTATE_PATH_CURRTASK}.tgz*"
# explicitly make PV to depend on evaluated value of PV variable
PV[vardepvalue] = "${PV}"
@@ -23,6 +45,7 @@ PV[vardepvalue] = "${PV}"
# We don't want the sstate to depend on things like the distro string
# of the system, we let the sstate paths take care of this.
SSTATE_EXTRAPATH[vardepvalue] = ""
+SSTATE_EXTRAPATHWILDCARD[vardepvalue] = ""
# For multilib rpm the allarch packagegroup files can overwrite (in theory they're identical)
SSTATE_DUPWHITELIST = "${DEPLOY_DIR}/licenses/"
@@ -61,6 +84,7 @@ SSTATE_ARCHS = " \
SSTATE_MANMACH ?= "${SSTATE_PKGARCH}"
SSTATECREATEFUNCS = "sstate_hardcode_path"
+SSTATECREATEFUNCS[vardeps] = "SSTATE_SCAN_FILES"
SSTATEPOSTCREATEFUNCS = ""
SSTATEPREINSTFUNCS = ""
SSTATEPOSTUNPACKFUNCS = "sstate_hardcode_path_unpack"
@@ -82,6 +106,18 @@ SSTATE_SIG_PASSPHRASE ?= ""
# Whether to verify the GnUPG signatures when extracting sstate archives
SSTATE_VERIFY_SIG ?= "0"
+SSTATE_HASHEQUIV_METHOD ?= "oe.sstatesig.OEOuthashBasic"
+SSTATE_HASHEQUIV_METHOD[doc] = "The fully-qualified function used to calculate \
+ the output hash for a task, which in turn is used to determine equivalency. \
+ "
+
+SSTATE_HASHEQUIV_REPORT_TASKDATA ?= "0"
+SSTATE_HASHEQUIV_REPORT_TASKDATA[doc] = "Report additional useful data to the \
+ hash equivalency server, such as PN, PV, taskname, etc. This information \
+ is very useful for developers looking at task data, but may leak sensitive \
+ data if the equivalence server is public. \
+ "
+
python () {
if bb.data.inherits_class('native', d):
d.setVar('SSTATE_PKGARCH', d.getVar('BUILD_ARCH', False))
@@ -101,7 +137,7 @@ python () {
if bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d):
d.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
d.setVar('BB_HASHFILENAME', "True ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}")
- d.setVar('SSTATE_EXTRAPATHWILDCARD', "*/")
+ d.setVar('SSTATE_EXTRAPATHWILDCARD', "${NATIVELSBSTRING}/")
unique_tasks = sorted(set((d.getVar('SSTATETASKS') or "").split()))
d.setVar('SSTATETASKS', " ".join(unique_tasks))
@@ -303,25 +339,29 @@ def sstate_installpkg(ss, d):
from oe.gpg_sign import get_signer
sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
- sstatefetch = d.getVar('SSTATE_PKGNAME') + '_' + ss['task'] + ".tgz"
- sstatepkg = d.getVar('SSTATE_PKG') + '_' + ss['task'] + ".tgz"
+ d.setVar("SSTATE_CURRTASK", ss['task'])
+ sstatefetch = d.getVar('SSTATE_PKGNAME')
+ sstatepkg = d.getVar('SSTATE_PKG')
if not os.path.exists(sstatepkg):
pstaging_fetch(sstatefetch, d)
if not os.path.isfile(sstatepkg):
- bb.note("Staging package %s does not exist" % sstatepkg)
+ bb.note("Sstate package %s does not exist" % sstatepkg)
return False
sstate_clean(ss, d)
d.setVar('SSTATE_INSTDIR', sstateinst)
- d.setVar('SSTATE_PKG', sstatepkg)
if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False):
+ if not os.path.isfile(sstatepkg + '.sig'):
+ bb.warn("No signature file for sstate package %s, skipping acceleration..." % sstatepkg)
+ return False
signer = get_signer(d, 'local')
if not signer.verify(sstatepkg + '.sig'):
- bb.warn("Cannot verify signature on sstate package %s" % sstatepkg)
+ bb.warn("Cannot verify signature on sstate package %s, skipping acceleration..." % sstatepkg)
+ return False
# Empty sstateinst directory, ensure its clean
if os.path.exists(sstateinst):
@@ -425,8 +465,9 @@ python sstate_hardcode_path_unpack () {
def sstate_clean_cachefile(ss, d):
import oe.path
- sstatepkgfile = d.getVar('SSTATE_PATHSPEC') + "*_" + ss['task'] + ".tgz*"
if d.getVarFlag('do_%s' % ss['task'], 'task'):
+ d.setVar("SSTATE_PATH_CURRTASK", ss['task'])
+ sstatepkgfile = d.getVar('SSTATE_PATHSPEC')
bb.note("Removing %s" % sstatepkgfile)
oe.path.remove(sstatepkgfile)
@@ -597,10 +638,9 @@ def sstate_package(ss, d):
tmpdir = d.getVar('TMPDIR')
sstatebuild = d.expand("${WORKDIR}/sstate-build-%s/" % ss['task'])
- sstatepkg = d.getVar('SSTATE_PKG') + '_'+ ss['task'] + ".tgz"
+ d.setVar("SSTATE_CURRTASK", ss['task'])
bb.utils.remove(sstatebuild, recurse=True)
bb.utils.mkdirhier(sstatebuild)
- bb.utils.mkdirhier(os.path.dirname(sstatepkg))
for state in ss['dirs']:
if not os.path.exists(state[1]):
continue
@@ -633,19 +673,30 @@ def sstate_package(ss, d):
os.rename(plain, pdir)
d.setVar('SSTATE_BUILDDIR', sstatebuild)
- d.setVar('SSTATE_PKG', sstatepkg)
d.setVar('SSTATE_INSTDIR', sstatebuild)
if d.getVar('SSTATE_SKIP_CREATION') == '1':
return
+ sstate_create_package = ['sstate_report_unihash', 'sstate_create_package']
+ if d.getVar('SSTATE_SIG_KEY'):
+ sstate_create_package.append('sstate_sign_package')
+
for f in (d.getVar('SSTATECREATEFUNCS') or '').split() + \
- ['sstate_create_package', 'sstate_sign_package'] + \
+ sstate_create_package + \
(d.getVar('SSTATEPOSTCREATEFUNCS') or '').split():
# All hooks should run in SSTATE_BUILDDIR.
bb.build.exec_func(f, d, (sstatebuild,))
- bb.siggen.dump_this_task(sstatepkg + ".siginfo", d)
+ # SSTATE_PKG may have been changed by sstate_report_unihash
+ siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
+ if not os.path.exists(siginfo):
+ bb.siggen.dump_this_task(siginfo, d)
+ else:
+ try:
+ os.utime(siginfo, None)
+ except PermissionError:
+ pass
return
@@ -670,7 +721,8 @@ def pstaging_fetch(sstatefetch, d):
# if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
# we'll want to allow network access for the current set of fetches.
- if localdata.getVar('BB_NO_NETWORK') == "1" and localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK') == "1":
+ if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
+ bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
localdata.delVar('BB_NO_NETWORK')
# Try a fetch from the sstate mirror, if it fails just return and
@@ -684,10 +736,11 @@ def pstaging_fetch(sstatefetch, d):
localdata.setVar('SRC_URI', srcuri)
try:
fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
+ fetcher.checkstatus()
fetcher.download()
except bb.fetch2.BBFetchException:
- break
+ pass
def sstate_setscene(d):
shared_state = sstate_state_fromvars(d)
@@ -728,13 +781,20 @@ sstate_task_postfunc[dirs] = "${WORKDIR}"
# set as SSTATE_BUILDDIR. Will be run from within SSTATE_BUILDDIR.
#
sstate_create_package () {
+ # Exit early if it already exists
+ if [ -e ${SSTATE_PKG} ]; then
+ [ ! -w ${SSTATE_PKG} ] || touch ${SSTATE_PKG}
+ return
+ fi
+
+ mkdir -p `dirname ${SSTATE_PKG}`
TFILE=`mktemp ${SSTATE_PKG}.XXXXXXXX`
- # Use pigz if available
- OPT="-czS"
- if [ -x "$(command -v pigz)" ]; then
- OPT="-I pigz -cS"
- fi
+ # Use pigz if available
+ OPT="-czS"
+ if [ -x "$(command -v pigz)" ]; then
+ OPT="-I pigz -cS"
+ fi
# Need to handle empty directories
if [ "$(ls -A)" ]; then
@@ -749,19 +809,35 @@ sstate_create_package () {
tar $OPT --file=$TFILE --files-from=/dev/null
fi
chmod 0664 $TFILE
- mv -f $TFILE ${SSTATE_PKG}
+ # Skip if it was already created by some other process
+ if [ ! -e ${SSTATE_PKG} ]; then
+ # Move into place using ln to attempt an atomic op.
+ # Abort if it already exists
+ ln $TFILE ${SSTATE_PKG} && rm $TFILE
+ else
+ rm $TFILE
+ fi
+ [ ! -w ${SSTATE_PKG} ] || touch ${SSTATE_PKG}
}
python sstate_sign_package () {
from oe.gpg_sign import get_signer
- if d.getVar('SSTATE_SIG_KEY'):
- signer = get_signer(d, 'local')
- sstate_pkg = d.getVar('SSTATE_PKG')
- if os.path.exists(sstate_pkg + '.sig'):
- os.unlink(sstate_pkg + '.sig')
- signer.detach_sign(sstate_pkg, d.getVar('SSTATE_SIG_KEY', False), None,
- d.getVar('SSTATE_SIG_PASSPHRASE'), armor=False)
+
+ signer = get_signer(d, 'local')
+ sstate_pkg = d.getVar('SSTATE_PKG')
+ if os.path.exists(sstate_pkg + '.sig'):
+ os.unlink(sstate_pkg + '.sig')
+ signer.detach_sign(sstate_pkg, d.getVar('SSTATE_SIG_KEY', False), None,
+ d.getVar('SSTATE_SIG_PASSPHRASE'), armor=False)
+}
+
+python sstate_report_unihash() {
+ report_unihash = getattr(bb.parse.siggen, 'report_unihash', None)
+
+ if report_unihash:
+ ss = sstate_state_fromvars(d)
+ report_unihash(os.getcwd(), ss['task'], d)
}
#
@@ -780,24 +856,23 @@ sstate_unpack_package () {
BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
-def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
+def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True, **kwargs):
+ found = set()
+ missed = set()
- ret = []
- missed = []
- extension = ".tgz"
- if siginfo:
- extension = extension + ".siginfo"
+ def gethash(task):
+ return sq_data['unihash'][task]
def getpathcomponents(task, d):
# Magic data from BB_HASHFILENAME
- splithashfn = sq_hashfn[task].split(" ")
+ splithashfn = sq_data['hashfn'][task].split(" ")
spec = splithashfn[1]
if splithashfn[0] == "True":
extrapath = d.getVar("NATIVELSBSTRING") + "/"
else:
extrapath = ""
-
- tname = sq_task[task][3:]
+
+ tname = bb.runqueue.taskname_from_tid(task)[3:]
if tname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and splithashfn[2]:
spec = splithashfn[2]
@@ -806,18 +881,18 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
return spec, extrapath, tname
- for task in range(len(sq_fn)):
+ for tid in sq_data['hash']:
- spec, extrapath, tname = getpathcomponents(task, d)
+ spec, extrapath, tname = getpathcomponents(tid, d)
- sstatefile = d.expand("${SSTATE_DIR}/" + extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + extension)
+ sstatefile = d.expand("${SSTATE_DIR}/" + extrapath + generate_sstatefn(spec, gethash(tid), tname, siginfo, d))
if os.path.exists(sstatefile):
bb.debug(2, "SState: Found valid sstate file %s" % sstatefile)
- ret.append(task)
+ found.add(tid)
continue
else:
- missed.append(task)
+ missed.add(tid)
bb.debug(2, "SState: Looked for but didn't find file %s" % sstatefile)
mirrors = d.getVar("SSTATE_MIRRORS")
@@ -835,7 +910,8 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
# if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
# we'll want to allow network access for the current set of fetches.
- if localdata.getVar('BB_NO_NETWORK') == "1" and localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK') == "1":
+ if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
+ bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
localdata.delVar('BB_NO_NETWORK')
from bb.fetch2 import FetchConnectionCache
@@ -846,7 +922,7 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
thread_worker.connection_cache.close_connections()
def checkstatus(thread_worker, arg):
- (task, sstatefile) = arg
+ (tid, sstatefile) = arg
localdata2 = bb.data.createCopy(localdata)
srcuri = "file://" + sstatefile
@@ -858,26 +934,29 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
connection_cache=thread_worker.connection_cache)
fetcher.checkstatus()
bb.debug(2, "SState: Successful fetch test for %s" % srcuri)
- ret.append(task)
- if task in missed:
- missed.remove(task)
+ found.add(tid)
+ if tid in missed:
+ missed.remove(tid)
except:
- missed.append(task)
+ missed.add(tid)
bb.debug(2, "SState: Unsuccessful fetch test for %s" % srcuri)
pass
- bb.event.fire(bb.event.ProcessProgress(msg, len(tasklist) - thread_worker.tasks.qsize()), d)
+ if len(tasklist) >= min_tasks:
+ bb.event.fire(bb.event.ProcessProgress(msg, len(tasklist) - thread_worker.tasks.qsize()), d)
tasklist = []
- for task in range(len(sq_fn)):
- if task in ret:
+ min_tasks = 100
+ for tid in sq_data['hash']:
+ if tid in found:
continue
- spec, extrapath, tname = getpathcomponents(task, d)
- sstatefile = d.expand(extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + extension)
- tasklist.append((task, sstatefile))
+ spec, extrapath, tname = getpathcomponents(tid, d)
+ sstatefile = d.expand(extrapath + generate_sstatefn(spec, gethash(tid), tname, siginfo, d))
+ tasklist.append((tid, sstatefile))
if tasklist:
- msg = "Checking sstate mirror object availability"
- bb.event.fire(bb.event.ProcessStarted(msg, len(tasklist)), d)
+ if len(tasklist) >= min_tasks:
+ msg = "Checking sstate mirror object availability"
+ bb.event.fire(bb.event.ProcessStarted(msg, len(tasklist)), d)
import multiprocessing
nproc = min(multiprocessing.cpu_count(), len(tasklist))
@@ -891,37 +970,38 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
pool.wait_completion()
bb.event.disable_threadlock()
- bb.event.fire(bb.event.ProcessFinished(msg), d)
+ if len(tasklist) >= min_tasks:
+ bb.event.fire(bb.event.ProcessFinished(msg), d)
inheritlist = d.getVar("INHERIT")
if "toaster" in inheritlist:
evdata = {'missed': [], 'found': []};
- for task in missed:
- spec, extrapath, tname = getpathcomponents(task, d)
- sstatefile = d.expand(extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + ".tgz")
- evdata['missed'].append( (sq_fn[task], sq_task[task], sq_hash[task], sstatefile ) )
- for task in ret:
- spec, extrapath, tname = getpathcomponents(task, d)
- sstatefile = d.expand(extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + ".tgz")
- evdata['found'].append( (sq_fn[task], sq_task[task], sq_hash[task], sstatefile ) )
+ for tid in missed:
+ spec, extrapath, tname = getpathcomponents(tid, d)
+ sstatefile = d.expand(extrapath + generate_sstatefn(spec, gethash(tid), tname, False, d))
+ evdata['missed'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
+ for tid in found:
+ spec, extrapath, tname = getpathcomponents(tid, d)
+ sstatefile = d.expand(extrapath + generate_sstatefn(spec, gethash(tid), tname, False, d))
+ evdata['found'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
bb.event.fire(bb.event.MetadataEvent("MissedSstate", evdata), d)
- # Print some summary statistics about the current task completion and how much sstate
- # reuse there was. Avoid divide by zero errors.
- total = len(sq_fn)
- currentcount = d.getVar("BB_SETSCENE_STAMPCURRENT_COUNT") or 0
- complete = 0
- if currentcount:
- complete = (len(ret) + currentcount) / (total + currentcount) * 100
- match = 0
- if total:
- match = len(ret) / total * 100
- bb.plain("Sstate summary: Wanted %d Found %d Missed %d Current %d (%d%% match, %d%% complete)" % (total, len(ret), len(missed), currentcount, match, complete))
+ if summary:
+ # Print some summary statistics about the current task completion and how much sstate
+ # reuse there was. Avoid divide by zero errors.
+ total = len(sq_data['hash'])
+ complete = 0
+ if currentcount:
+ complete = (len(found) + currentcount) / (total + currentcount) * 100
+ match = 0
+ if total:
+ match = len(found) / total * 100
+ bb.plain("Sstate summary: Wanted %d Found %d Missed %d Current %d (%d%% match, %d%% complete)" % (total, len(found), len(missed), currentcount, match, complete))
if hasattr(bb.parse.siggen, "checkhashes"):
- bb.parse.siggen.checkhashes(missed, ret, sq_fn, sq_task, sq_hash, sq_hashfn, d)
+ bb.parse.siggen.checkhashes(sq_data, missed, found, d)
- return ret
+ return found
BB_SETSCENE_DEPVALID = "setscene_depvalid"
@@ -1036,17 +1116,24 @@ addhandler sstate_eventhandler
sstate_eventhandler[eventmask] = "bb.build.TaskSucceeded"
python sstate_eventhandler() {
d = e.data
- # When we write an sstate package we rewrite the SSTATE_PKG
- spkg = d.getVar('SSTATE_PKG')
- if not spkg.endswith(".tgz"):
+ writtensstate = d.getVar('SSTATE_CURRTASK')
+ if not writtensstate:
taskname = d.getVar("BB_RUNTASK")[3:]
spec = d.getVar('SSTATE_PKGSPEC')
swspec = d.getVar('SSTATE_SWSPEC')
if taskname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and swspec:
d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
d.setVar("SSTATE_EXTRAPATH", "")
- sstatepkg = d.getVar('SSTATE_PKG')
- bb.siggen.dump_this_task(sstatepkg + '_' + taskname + ".tgz" ".siginfo", d)
+ d.setVar("SSTATE_CURRTASK", taskname)
+ siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
+ if not os.path.exists(siginfo):
+ bb.siggen.dump_this_task(siginfo, d)
+ else:
+ try:
+ os.utime(siginfo, None)
+ except PermissionError:
+ pass
+
}
SSTATE_PRUNE_OBSOLETEWORKDIR ?= "1"
@@ -1087,12 +1174,15 @@ python sstate_eventhandler2() {
with open(i, "r") as f:
lines = f.readlines()
for l in lines:
- (stamp, manifest, workdir) = l.split()
- if stamp not in stamps and stamp not in preservestamps and stamp in machineindex:
- toremove.append(l)
- if stamp not in seen:
- bb.debug(2, "Stamp %s is not reachable, removing related manifests" % stamp)
- seen.append(stamp)
+ try:
+ (stamp, manifest, workdir) = l.split()
+ if stamp not in stamps and stamp not in preservestamps and stamp in machineindex:
+ toremove.append(l)
+ if stamp not in seen:
+ bb.debug(2, "Stamp %s is not reachable, removing related manifests" % stamp)
+ seen.append(stamp)
+ except ValueError:
+ bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
if toremove:
msg = "Removing %d recipes from the %s sysroot" % (len(toremove), a)