summaryrefslogtreecommitdiffstats
path: root/external/poky/meta/classes
diff options
context:
space:
mode:
Diffstat (limited to 'external/poky/meta/classes')
-rw-r--r--external/poky/meta/classes/archiver.bbclass180
-rw-r--r--external/poky/meta/classes/autotools.bbclass9
-rw-r--r--external/poky/meta/classes/base.bbclass153
-rw-r--r--external/poky/meta/classes/binconfig.bbclass9
-rw-r--r--external/poky/meta/classes/bluetooth.bbclass14
-rw-r--r--external/poky/meta/classes/buildhistory.bbclass94
-rw-r--r--external/poky/meta/classes/buildstats.bbclass2
-rw-r--r--external/poky/meta/classes/ccache.bbclass63
-rw-r--r--external/poky/meta/classes/ccmake.bbclass97
-rw-r--r--external/poky/meta/classes/chrpath.bbclass55
-rw-r--r--external/poky/meta/classes/clutter.bbclass3
-rw-r--r--external/poky/meta/classes/cmake.bbclass105
-rw-r--r--external/poky/meta/classes/cml1.bbclass4
-rw-r--r--external/poky/meta/classes/compress_doc.bbclass2
-rw-r--r--external/poky/meta/classes/core-image.bbclass3
-rw-r--r--external/poky/meta/classes/cpan-base.bbclass6
-rw-r--r--external/poky/meta/classes/cpan.bbclass13
-rw-r--r--external/poky/meta/classes/cpan_build.bbclass7
-rw-r--r--external/poky/meta/classes/cross-canadian.bbclass2
-rw-r--r--external/poky/meta/classes/cross.bbclass5
-rw-r--r--external/poky/meta/classes/crosssdk.bbclass9
-rw-r--r--external/poky/meta/classes/cve-check.bbclass33
-rw-r--r--external/poky/meta/classes/debian.bbclass6
-rw-r--r--external/poky/meta/classes/devicetree.bbclass13
-rw-r--r--external/poky/meta/classes/devtool-source.bbclass12
-rw-r--r--external/poky/meta/classes/distro_features_check.bbclass40
-rw-r--r--external/poky/meta/classes/distrodata.bbclass427
-rw-r--r--external/poky/meta/classes/distutils-base.bbclass4
-rw-r--r--external/poky/meta/classes/distutils-tools.bbclass73
-rw-r--r--external/poky/meta/classes/distutils.bbclass92
-rw-r--r--external/poky/meta/classes/distutils3.bbclass63
-rw-r--r--external/poky/meta/classes/externalsrc.bbclass7
-rw-r--r--external/poky/meta/classes/extrausers.bbclass17
-rw-r--r--external/poky/meta/classes/features_check.bbclass88
-rw-r--r--external/poky/meta/classes/fontcache.bbclass1
-rw-r--r--external/poky/meta/classes/gconf.bbclass2
-rw-r--r--external/poky/meta/classes/gnome.bbclass1
-rw-r--r--external/poky/meta/classes/go-mod.bbclass20
-rw-r--r--external/poky/meta/classes/go-ptest.bbclass54
-rw-r--r--external/poky/meta/classes/go.bbclass96
-rw-r--r--external/poky/meta/classes/goarch.bbclass39
-rw-r--r--external/poky/meta/classes/gobject-introspection.bbclass16
-rw-r--r--external/poky/meta/classes/grub-efi-cfg.bbclass10
-rw-r--r--external/poky/meta/classes/grub-efi.bbclass35
-rw-r--r--external/poky/meta/classes/gsettings.bbclass48
-rw-r--r--external/poky/meta/classes/gtk-doc.bbclass16
-rw-r--r--external/poky/meta/classes/gtk-icon-cache.bbclass8
-rw-r--r--external/poky/meta/classes/gtk-immodules-cache.bbclass1
-rw-r--r--external/poky/meta/classes/icecc.bbclass70
-rw-r--r--external/poky/meta/classes/image-buildinfo.bbclass6
-rw-r--r--external/poky/meta/classes/image-live.bbclass2
-rw-r--r--external/poky/meta/classes/image-prelink.bbclass17
-rw-r--r--external/poky/meta/classes/image.bbclass34
-rw-r--r--external/poky/meta/classes/image_types.bbclass29
-rw-r--r--external/poky/meta/classes/image_types_wic.bbclass27
-rw-r--r--external/poky/meta/classes/insane.bbclass383
-rw-r--r--external/poky/meta/classes/kernel-devicetree.bbclass22
-rw-r--r--external/poky/meta/classes/kernel-fitimage.bbclass106
-rw-r--r--external/poky/meta/classes/kernel-module-split.bbclass23
-rw-r--r--external/poky/meta/classes/kernel-uboot.bbclass4
-rw-r--r--external/poky/meta/classes/kernel-uimage.bbclass2
-rw-r--r--external/poky/meta/classes/kernel-yocto.bbclass143
-rw-r--r--external/poky/meta/classes/kernel.bbclass61
-rw-r--r--external/poky/meta/classes/kernelsrc.bbclass2
-rw-r--r--external/poky/meta/classes/libc-common.bbclass37
-rw-r--r--external/poky/meta/classes/libc-package.bbclass74
-rw-r--r--external/poky/meta/classes/license.bbclass81
-rw-r--r--external/poky/meta/classes/license_image.bbclass44
-rw-r--r--external/poky/meta/classes/linuxloader.bbclass33
-rw-r--r--external/poky/meta/classes/live-vm-common.bbclass33
-rw-r--r--external/poky/meta/classes/manpages.bbclass9
-rw-r--r--external/poky/meta/classes/mcextend.bbclass16
-rw-r--r--external/poky/meta/classes/meson.bbclass66
-rw-r--r--external/poky/meta/classes/metadata_scm.bbclass44
-rw-r--r--external/poky/meta/classes/mime-xdg.bbclass74
-rw-r--r--external/poky/meta/classes/mime.bbclass71
-rw-r--r--external/poky/meta/classes/module.bbclass1
-rw-r--r--external/poky/meta/classes/multilib.bbclass60
-rw-r--r--external/poky/meta/classes/multilib_global.bbclass20
-rw-r--r--external/poky/meta/classes/multilib_script.bbclass20
-rw-r--r--external/poky/meta/classes/native.bbclass9
-rw-r--r--external/poky/meta/classes/nativesdk.bbclass6
-rw-r--r--external/poky/meta/classes/npm.bbclass362
-rw-r--r--external/poky/meta/classes/package.bbclass420
-rw-r--r--external/poky/meta/classes/package_deb.bbclass5
-rw-r--r--external/poky/meta/classes/package_ipk.bbclass13
-rw-r--r--external/poky/meta/classes/package_pkgdata.bbclass167
-rw-r--r--external/poky/meta/classes/package_rpm.bbclass13
-rw-r--r--external/poky/meta/classes/packagegroup.bbclass4
-rw-r--r--external/poky/meta/classes/patch.bbclass7
-rw-r--r--external/poky/meta/classes/perl-version.bbclass46
-rw-r--r--external/poky/meta/classes/pixbufcache.bbclass3
-rw-r--r--external/poky/meta/classes/populate_sdk_base.bbclass40
-rw-r--r--external/poky/meta/classes/populate_sdk_ext.bbclass110
-rw-r--r--external/poky/meta/classes/ptest.bbclass53
-rw-r--r--external/poky/meta/classes/pypi.bbclass4
-rw-r--r--external/poky/meta/classes/python-dir.bbclass5
-rw-r--r--external/poky/meta/classes/python3-dir.bbclass4
-rw-r--r--external/poky/meta/classes/python3native.bbclass10
-rw-r--r--external/poky/meta/classes/pythonnative.bbclass19
-rw-r--r--external/poky/meta/classes/qemu.bbclass3
-rw-r--r--external/poky/meta/classes/qemuboot.bbclass19
-rw-r--r--external/poky/meta/classes/relocatable.bbclass20
-rw-r--r--external/poky/meta/classes/report-error.bbclass23
-rw-r--r--external/poky/meta/classes/reproducible_build.bbclass60
-rw-r--r--external/poky/meta/classes/reproducible_build_simple.bbclass1
-rw-r--r--external/poky/meta/classes/rm_work.bbclass50
-rw-r--r--external/poky/meta/classes/rootfs-postcommands.bbclass35
-rw-r--r--external/poky/meta/classes/rootfs_ipk.bbclass2
-rw-r--r--external/poky/meta/classes/sanity.bbclass71
-rw-r--r--external/poky/meta/classes/scons.bbclass26
-rw-r--r--external/poky/meta/classes/setuptools.bbclass3
-rw-r--r--external/poky/meta/classes/siteinfo.bbclass10
-rw-r--r--external/poky/meta/classes/sstate.bbclass274
-rw-r--r--external/poky/meta/classes/staging.bbclass76
-rw-r--r--external/poky/meta/classes/syslinux.bbclass5
-rw-r--r--external/poky/meta/classes/systemd-boot-cfg.bbclass3
-rw-r--r--external/poky/meta/classes/systemd-boot.bbclass31
-rw-r--r--external/poky/meta/classes/systemd.bbclass56
-rw-r--r--external/poky/meta/classes/terminal.bbclass4
-rw-r--r--external/poky/meta/classes/testimage.bbclass97
-rw-r--r--external/poky/meta/classes/texinfo.bbclass8
-rw-r--r--external/poky/meta/classes/tinderclient.bbclass368
-rw-r--r--external/poky/meta/classes/toaster.bbclass26
-rw-r--r--external/poky/meta/classes/toolchain-scripts.bbclass1
-rw-r--r--external/poky/meta/classes/uboot-extlinux-config.bbclass15
-rw-r--r--external/poky/meta/classes/uboot-sign.bbclass122
-rw-r--r--external/poky/meta/classes/uninative.bbclass17
-rw-r--r--external/poky/meta/classes/update-alternatives.bbclass130
-rw-r--r--external/poky/meta/classes/update-rc.d.bbclass28
-rw-r--r--external/poky/meta/classes/useradd-staticids.bbclass8
-rw-r--r--external/poky/meta/classes/useradd.bbclass7
-rw-r--r--external/poky/meta/classes/utils.bbclass2
-rw-r--r--external/poky/meta/classes/vala.bbclass2
-rw-r--r--external/poky/meta/classes/waf.bbclass34
-rw-r--r--external/poky/meta/classes/xmlcatalog.bbclass26
136 files changed, 3911 insertions, 2723 deletions
diff --git a/external/poky/meta/classes/archiver.bbclass b/external/poky/meta/classes/archiver.bbclass
index e321a0e3..1a3c1906 100644
--- a/external/poky/meta/classes/archiver.bbclass
+++ b/external/poky/meta/classes/archiver.bbclass
@@ -2,25 +2,42 @@
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# This bbclass is used for creating archive for:
-# 1) original (or unpacked) source: ARCHIVER_MODE[src] = "original"
-# 2) patched source: ARCHIVER_MODE[src] = "patched" (default)
-# 3) configured source: ARCHIVER_MODE[src] = "configured"
-# 4) The patches between do_unpack and do_patch:
-# ARCHIVER_MODE[diff] = "1"
-# And you can set the one that you'd like to exclude from the diff:
-# ARCHIVER_MODE[diff-exclude] ?= ".pc autom4te.cache patches"
-# 5) The environment data, similar to 'bitbake -e recipe':
-# ARCHIVER_MODE[dumpdata] = "1"
-# 6) The recipe (.bb and .inc): ARCHIVER_MODE[recipe] = "1"
-# 7) Whether output the .src.rpm package:
-# ARCHIVER_MODE[srpm] = "1"
-# 8) Filter the license, the recipe whose license in
-# COPYLEFT_LICENSE_INCLUDE will be included, and in
-# COPYLEFT_LICENSE_EXCLUDE will be excluded.
-# COPYLEFT_LICENSE_INCLUDE = 'GPL* LGPL*'
-# COPYLEFT_LICENSE_EXCLUDE = 'CLOSED Proprietary'
-# 9) The recipe type that will be archived:
-# COPYLEFT_RECIPE_TYPES = 'target'
+# 1) original (or unpacked) source: ARCHIVER_MODE[src] = "original"
+# 2) patched source: ARCHIVER_MODE[src] = "patched" (default)
+# 3) configured source: ARCHIVER_MODE[src] = "configured"
+# 4) source mirror: ARCHIVE_MODE[src] = "mirror"
+# 5) The patches between do_unpack and do_patch:
+# ARCHIVER_MODE[diff] = "1"
+# And you can set the one that you'd like to exclude from the diff:
+# ARCHIVER_MODE[diff-exclude] ?= ".pc autom4te.cache patches"
+# 6) The environment data, similar to 'bitbake -e recipe':
+# ARCHIVER_MODE[dumpdata] = "1"
+# 7) The recipe (.bb and .inc): ARCHIVER_MODE[recipe] = "1"
+# 8) Whether output the .src.rpm package:
+# ARCHIVER_MODE[srpm] = "1"
+# 9) Filter the license, the recipe whose license in
+# COPYLEFT_LICENSE_INCLUDE will be included, and in
+# COPYLEFT_LICENSE_EXCLUDE will be excluded.
+# COPYLEFT_LICENSE_INCLUDE = 'GPL* LGPL*'
+# COPYLEFT_LICENSE_EXCLUDE = 'CLOSED Proprietary'
+# 10) The recipe type that will be archived:
+# COPYLEFT_RECIPE_TYPES = 'target'
+# 11) The source mirror mode:
+# ARCHIVER_MODE[mirror] = "split" (default): Sources are split into
+# per-recipe directories in a similar way to other archiver modes.
+# Post-processing may be required to produce a single mirror directory.
+# This does however allow inspection of duplicate sources and more
+# intelligent handling.
+# ARCHIVER_MODE[mirror] = "combined": All sources are placed into a single
+# directory suitable for direct use as a mirror. Duplicate sources are
+# ignored.
+# 12) Source mirror exclusions:
+# ARCHIVER_MIRROR_EXCLUDE is a list of prefixes to exclude from the mirror.
+# This may be used for sources which you are already publishing yourself
+# (e.g. if the URI starts with 'https://mysite.com/' and your mirror is
+# going to be published to the same site). It may also be used to exclude
+# local files (with the prefix 'file://') if these will be provided as part
+# of an archive of the layers themselves.
#
# Create archive for all the recipe types
@@ -33,12 +50,20 @@ ARCHIVER_MODE[diff] ?= "0"
ARCHIVER_MODE[diff-exclude] ?= ".pc autom4te.cache patches"
ARCHIVER_MODE[dumpdata] ?= "0"
ARCHIVER_MODE[recipe] ?= "0"
+ARCHIVER_MODE[mirror] ?= "split"
DEPLOY_DIR_SRC ?= "${DEPLOY_DIR}/sources"
ARCHIVER_TOPDIR ?= "${WORKDIR}/deploy-sources"
ARCHIVER_OUTDIR = "${ARCHIVER_TOPDIR}/${TARGET_SYS}/${PF}/"
+ARCHIVER_RPMTOPDIR ?= "${WORKDIR}/deploy-sources-rpm"
+ARCHIVER_RPMOUTDIR = "${ARCHIVER_RPMTOPDIR}/${TARGET_SYS}/${PF}/"
ARCHIVER_WORKDIR = "${WORKDIR}/archiver-work/"
+# When producing a combined mirror directory, allow duplicates for the case
+# where multiple recipes use the same SRC_URI.
+ARCHIVER_COMBINED_MIRRORDIR = "${ARCHIVER_TOPDIR}/mirror"
+SSTATE_DUPWHITELIST += "${DEPLOY_DIR_SRC}/mirror"
+
do_dumpdata[dirs] = "${ARCHIVER_OUTDIR}"
do_ar_recipe[dirs] = "${ARCHIVER_OUTDIR}"
do_ar_original[dirs] = "${ARCHIVER_OUTDIR} ${ARCHIVER_WORKDIR}"
@@ -75,6 +100,9 @@ python () {
bb.debug(1, 'archiver: %s is excluded, covered by gcc-source' % pn)
return
+ def hasTask(task):
+ return bool(d.getVarFlag(task, "task", False)) and not bool(d.getVarFlag(task, "noexec", False))
+
ar_src = d.getVarFlag('ARCHIVER_MODE', 'src')
ar_dumpdata = d.getVarFlag('ARCHIVER_MODE', 'dumpdata')
ar_recipe = d.getVarFlag('ARCHIVER_MODE', 'recipe')
@@ -95,14 +123,13 @@ python () {
# There is a corner case with "gcc-source-${PV}" recipes, they don't have
# the "do_configure" task, so we need to use "do_preconfigure"
- def hasTask(task):
- return bool(d.getVarFlag(task, "task", False)) and not bool(d.getVarFlag(task, "noexec", False))
-
if hasTask("do_preconfigure"):
d.appendVarFlag('do_ar_configured', 'depends', ' %s:do_preconfigure' % pn)
elif hasTask("do_configure"):
d.appendVarFlag('do_ar_configured', 'depends', ' %s:do_configure' % pn)
d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_configured' % pn)
+ elif ar_src == "mirror":
+ d.appendVarFlag('do_deploy_archives', 'depends', '%s:do_ar_mirror' % pn)
elif ar_src:
bb.fatal("Invalid ARCHIVER_MODE[src]: %s" % ar_src)
@@ -115,8 +142,15 @@ python () {
# Output the SRPM package
if d.getVarFlag('ARCHIVER_MODE', 'srpm') == "1" and d.getVar('PACKAGES'):
- if "package_rpm" in d.getVar('PACKAGE_CLASSES'):
+ if "package_rpm" not in d.getVar('PACKAGE_CLASSES'):
+ bb.fatal("ARCHIVER_MODE[srpm] needs package_rpm in PACKAGE_CLASSES")
+
+ # Some recipes do not have any packaging tasks
+ if hasTask("do_package_write_rpm"):
d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_package_write_rpm' % pn)
+ d.appendVarFlag('do_package_write_rpm', 'dirs', ' ${ARCHIVER_RPMTOPDIR}')
+ d.appendVarFlag('do_package_write_rpm', 'sstate-inputdirs', ' ${ARCHIVER_RPMTOPDIR}')
+ d.appendVarFlag('do_package_write_rpm', 'sstate-outputdirs', ' ${DEPLOY_DIR_SRC}')
if ar_dumpdata == "1":
d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_dumpdata' % pn)
if ar_recipe == "1":
@@ -127,8 +161,6 @@ python () {
d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_patched' % pn)
elif ar_src == "configured":
d.appendVarFlag('do_package_write_rpm', 'depends', ' %s:do_ar_configured' % pn)
- else:
- bb.fatal("ARCHIVER_MODE[srpm] needs package_rpm in PACKAGE_CLASSES")
}
# Take all the sources for a recipe and puts them in WORKDIR/archiver-work/.
@@ -161,7 +193,13 @@ python do_ar_original() {
del decoded[5][param]
encoded = bb.fetch2.encodeurl(decoded)
urls[i] = encoded
- fetch = bb.fetch2.Fetch(urls, d)
+
+ # Cleanup SRC_URI before call bb.fetch2.Fetch() since now SRC_URI is in the
+ # variable "urls", otherwise there might be errors like:
+ # The SRCREV_FORMAT variable must be set when multiple SCMs are used
+ ld = bb.data.createCopy(d)
+ ld.setVar('SRC_URI', '')
+ fetch = bb.fetch2.Fetch(urls, ld)
tarball_suffix = {}
for url in fetch.urls:
local = fetch.localpath(url).rstrip("/");
@@ -213,9 +251,10 @@ python do_ar_patched() {
# Get the ARCHIVER_OUTDIR before we reset the WORKDIR
ar_outdir = d.getVar('ARCHIVER_OUTDIR')
- ar_workdir = d.getVar('ARCHIVER_WORKDIR')
+ if not is_work_shared(d):
+ ar_workdir = d.getVar('ARCHIVER_WORKDIR')
+ d.setVar('WORKDIR', ar_workdir)
bb.note('Archiving the patched source...')
- d.setVar('WORKDIR', ar_workdir)
create_tarball(d, d.getVar('S'), 'patched', ar_outdir)
}
@@ -272,6 +311,79 @@ python do_ar_configured() {
create_tarball(d, srcdir, 'configured', ar_outdir)
}
+python do_ar_mirror() {
+ import subprocess
+
+ src_uri = (d.getVar('SRC_URI') or '').split()
+ if len(src_uri) == 0:
+ return
+
+ dl_dir = d.getVar('DL_DIR')
+ mirror_exclusions = (d.getVar('ARCHIVER_MIRROR_EXCLUDE') or '').split()
+ mirror_mode = d.getVarFlag('ARCHIVER_MODE', 'mirror')
+ have_mirror_tarballs = d.getVar('BB_GENERATE_MIRROR_TARBALLS')
+
+ if mirror_mode == 'combined':
+ destdir = d.getVar('ARCHIVER_COMBINED_MIRRORDIR')
+ elif mirror_mode == 'split':
+ destdir = d.getVar('ARCHIVER_OUTDIR')
+ else:
+ bb.fatal('Invalid ARCHIVER_MODE[mirror]: %s' % (mirror_mode))
+
+ if not have_mirror_tarballs:
+ bb.fatal('Using `ARCHIVER_MODE[src] = "mirror"` depends on setting `BB_GENERATE_MIRROR_TARBALLS = "1"`')
+
+ def is_excluded(url):
+ for prefix in mirror_exclusions:
+ if url.startswith(prefix):
+ return True
+ return False
+
+ bb.note('Archiving the source as a mirror...')
+
+ bb.utils.mkdirhier(destdir)
+
+ fetcher = bb.fetch2.Fetch(src_uri, d)
+
+ for url in fetcher.urls:
+ if is_excluded(url):
+ bb.note('Skipping excluded url: %s' % (url))
+ continue
+
+ bb.note('Archiving url: %s' % (url))
+ ud = fetcher.ud[url]
+ ud.setup_localpath(d)
+ localpath = None
+
+ # Check for mirror tarballs first. We will archive the first mirror
+ # tarball that we find as it's assumed that we just need one.
+ for mirror_fname in ud.mirrortarballs:
+ mirror_path = os.path.join(dl_dir, mirror_fname)
+ if os.path.exists(mirror_path):
+ bb.note('Found mirror tarball: %s' % (mirror_path))
+ localpath = mirror_path
+ break
+
+ if len(ud.mirrortarballs) and not localpath:
+ bb.warn('Mirror tarballs are listed for a source but none are present. ' \
+ 'Falling back to original download.\n' \
+ 'SRC_URI = %s' % (url))
+
+ # Check original download
+ if not localpath:
+ bb.note('Using original download: %s' % (ud.localpath))
+ localpath = ud.localpath
+
+ if not localpath or not os.path.exists(localpath):
+ bb.fatal('Original download is missing for a source.\n' \
+ 'SRC_URI = %s' % (url))
+
+ # We now have an appropriate localpath
+ bb.note('Copying source mirror')
+ cmd = 'cp -fpPRH %s %s' % (localpath, destdir)
+ subprocess.check_call(cmd, shell=True)
+}
+
def exclude_useless_paths(tarinfo):
if tarinfo.isdir():
if tarinfo.name.endswith('/temp') or tarinfo.name.endswith('/patches') or tarinfo.name.endswith('/.pc'):
@@ -432,9 +544,10 @@ python do_ar_recipe () {
incfile = include_re.match(line).group(1)
if incfile:
incfile = d.expand(incfile)
+ if incfile:
incfile = bb.utils.which(bbpath, incfile)
- if incfile:
- shutil.copy(incfile, outdir)
+ if incfile:
+ shutil.copy(incfile, outdir)
create_tarball(d, outdir, 'recipe', d.getVar('ARCHIVER_OUTDIR'))
bb.utils.remove(outdir, recurse=True)
@@ -470,12 +583,15 @@ do_deploy_archives[sstate-outputdirs] = "${DEPLOY_DIR_SRC}"
addtask do_deploy_archives_setscene
addtask do_ar_original after do_unpack
-addtask do_unpack_and_patch after do_patch
+addtask do_unpack_and_patch after do_patch do_preconfigure
addtask do_ar_patched after do_unpack_and_patch
addtask do_ar_configured after do_unpack_and_patch
+addtask do_ar_mirror after do_fetch
addtask do_dumpdata
addtask do_ar_recipe
-addtask do_deploy_archives before do_build
+addtask do_deploy_archives
+do_build[recrdeptask] += "do_deploy_archives"
+do_populate_sdk[recrdeptask] += "do_deploy_archives"
python () {
# Add tasks in the correct order, specifically for linux-yocto to avoid race condition.
diff --git a/external/poky/meta/classes/autotools.bbclass b/external/poky/meta/classes/autotools.bbclass
index 8768a6ad..6c2a33ac 100644
--- a/external/poky/meta/classes/autotools.bbclass
+++ b/external/poky/meta/classes/autotools.bbclass
@@ -25,7 +25,9 @@ inherit siteinfo
# Space separated list of shell scripts with variables defined to supply test
# results for autoconf tests we cannot run at build time.
-export CONFIG_SITE = "${@siteinfo_get_files(d)}"
+# The value of this variable is filled in in a prefunc because it depends on
+# the contents of the sysroot.
+export CONFIG_SITE
acpaths ?= "default"
EXTRA_AUTORECONF = "--exclude=autopoint"
@@ -88,7 +90,7 @@ oe_runconf () {
cfgscript=`python3 -c "import os; print(os.path.relpath(os.path.dirname('${CONFIGURE_SCRIPT}'), '.'))"`/$cfgscript_name
if [ -x "$cfgscript" ] ; then
bbnote "Running $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} $@"
- if ! ${CACHED_CONFIGUREVARS} $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} "$@"; then
+ if ! ${CACHED_CONFIGUREVARS} CONFIG_SHELL=/bin/bash $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} "$@"; then
bbnote "The following config.log files may provide further information."
bbnote `find ${B} -ignore_readdir_race -type f -name config.log`
bbfatal_log "configure failed"
@@ -132,6 +134,8 @@ EXTRACONFFUNCS ??= ""
EXTRA_OECONF_append = " ${PACKAGECONFIG_CONFARGS}"
do_configure[prefuncs] += "autotools_preconfigure autotools_aclocals ${EXTRACONFFUNCS}"
+do_compile[prefuncs] += "autotools_aclocals"
+do_install[prefuncs] += "autotools_aclocals"
do_configure[postfuncs] += "autotools_postconfigure"
ACLOCALDIR = "${STAGING_DATADIR}/aclocal"
@@ -140,7 +144,6 @@ ACLOCALEXTRAPATH_class-target = " -I ${STAGING_DATADIR_NATIVE}/aclocal/"
ACLOCALEXTRAPATH_class-nativesdk = " -I ${STAGING_DATADIR_NATIVE}/aclocal/"
python autotools_aclocals () {
- # Refresh variable with cache files
d.setVar("CONFIG_SITE", siteinfo_get_files(d, sysrootcache=True))
}
diff --git a/external/poky/meta/classes/base.bbclass b/external/poky/meta/classes/base.bbclass
index bc9b236b..7aa2e144 100644
--- a/external/poky/meta/classes/base.bbclass
+++ b/external/poky/meta/classes/base.bbclass
@@ -10,9 +10,13 @@ inherit utility-tasks
inherit metadata_scm
inherit logging
-OE_IMPORTS += "os sys time oe.path oe.utils oe.types oe.package oe.packagegroup oe.sstatesig oe.lsb oe.cachedpath oe.license"
+OE_EXTRA_IMPORTS ?= ""
+
+OE_IMPORTS += "os sys time oe.path oe.utils oe.types oe.package oe.packagegroup oe.sstatesig oe.lsb oe.cachedpath oe.license ${OE_EXTRA_IMPORTS}"
OE_IMPORTS[type] = "list"
+PACKAGECONFIG_CONFARGS ??= ""
+
def oe_import(d):
import sys
@@ -28,9 +32,11 @@ def oe_import(d):
import oe.data
for toimport in oe.data.typed_value("OE_IMPORTS", d):
- imported = __import__(toimport)
- inject(toimport.split(".", 1)[0], imported)
-
+ try:
+ imported = __import__(toimport)
+ inject(toimport.split(".", 1)[0], imported)
+ except AttributeError as e:
+ bb.error("Error importing OE modules: %s" % str(e))
return ""
# We need the oe module name space early (before INHERITs get added)
@@ -121,18 +127,20 @@ def setup_hosttools_dir(dest, toolsvar, d, fatal=True):
for tool in tools:
desttool = os.path.join(dest, tool)
if not os.path.exists(desttool):
+ # clean up dead symlink
+ if os.path.islink(desttool):
+ os.unlink(desttool)
srctool = bb.utils.which(path, tool, executable=True)
+ # gcc/g++ may link to ccache on some hosts, e.g.,
+ # /usr/local/bin/ccache/gcc -> /usr/bin/ccache, then which(gcc)
+ # would return /usr/local/bin/ccache/gcc, but what we need is
+ # /usr/bin/gcc, this code can check and fix that.
if "ccache" in srctool:
srctool = bb.utils.which(path, tool, executable=True, direction=1)
if srctool:
os.symlink(srctool, desttool)
else:
notfound.append(tool)
- # Force "python" -> "python2"
- desttool = os.path.join(dest, "python")
- if not os.path.exists(desttool):
- srctool = "python2"
- os.symlink(srctool, desttool)
if notfound and fatal:
bb.fatal("The following required tools (as specified by HOSTTOOLS) appear to be unavailable in PATH, please install them in order to proceed:\n %s" % " ".join(notfound))
@@ -216,7 +224,7 @@ def buildcfg_neededvars(d):
bb.fatal('The following variable(s) were not set: %s\nPlease set them directly, or choose a MACHINE or DISTRO that sets them.' % ', '.join(pesteruser))
addhandler base_eventhandler
-base_eventhandler[eventmask] = "bb.event.ConfigParsed bb.event.MultiConfigParsed bb.event.BuildStarted bb.event.RecipePreFinalise bb.runqueue.sceneQueueComplete bb.event.RecipeParsed"
+base_eventhandler[eventmask] = "bb.event.ConfigParsed bb.event.MultiConfigParsed bb.event.BuildStarted bb.event.RecipePreFinalise bb.event.RecipeParsed"
python base_eventhandler() {
import bb.runqueue
@@ -224,6 +232,12 @@ python base_eventhandler() {
if not d.getVar("NATIVELSBSTRING", False):
d.setVar("NATIVELSBSTRING", lsb_distro_identifier(d))
d.setVar('BB_VERSION', bb.__version__)
+
+ # There might be no bb.event.ConfigParsed event if bitbake server is
+ # running, so check bb.event.BuildStarted too to make sure ${HOSTTOOLS_DIR}
+ # exists.
+ if isinstance(e, bb.event.ConfigParsed) or \
+ (isinstance(e, bb.event.BuildStarted) and not os.path.exists(d.getVar('HOSTTOOLS_DIR'))):
# Works with the line in layer.conf which changes PATH to point here
setup_hosttools_dir(d.getVar('HOSTTOOLS_DIR'), 'HOSTTOOLS', d)
setup_hosttools_dir(d.getVar('HOSTTOOLS_DIR'), 'HOSTTOOLS_NONFATAL', d, fatal=False)
@@ -260,23 +274,10 @@ python base_eventhandler() {
if isinstance(e, bb.event.RecipePreFinalise):
if d.getVar("TARGET_PREFIX") == d.getVar("SDK_PREFIX"):
d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}binutils")
- d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}gcc-initial")
d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}gcc")
d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}g++")
d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}compilerlibs")
- if isinstance(e, bb.runqueue.sceneQueueComplete):
- completions = d.expand("${STAGING_DIR}/sstatecompletions")
- if os.path.exists(completions):
- cmds = set()
- with open(completions, "r") as f:
- cmds = set(f)
- d.setVar("completion_function", "\n".join(cmds))
- d.setVarFlag("completion_function", "func", "1")
- bb.debug(1, "Executing SceneQueue Completion commands: %s" % "\n".join(cmds))
- bb.build.exec_func("completion_function", d)
- os.remove(completions)
-
if isinstance(e, bb.event.RecipeParsed):
#
# If we have multiple providers of virtual/X and a PREFERRED_PROVIDER_virtual/X is set
@@ -301,7 +302,6 @@ CLEANBROKEN = "0"
addtask configure after do_patch
do_configure[dirs] = "${B}"
-do_prepare_recipe_sysroot[deptask] = "do_populate_sysroot"
base_do_configure() {
if [ -n "${CONFIGURESTAMPFILE}" -a -e "${CONFIGURESTAMPFILE}" ]; then
if [ "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" ]; then
@@ -393,7 +393,7 @@ python () {
# These take the form:
#
# PACKAGECONFIG ??= "<default options>"
- # PACKAGECONFIG[foo] = "--enable-foo,--disable-foo,foo_depends,foo_runtime_depends,foo_runtime_recommends"
+ # PACKAGECONFIG[foo] = "--enable-foo,--disable-foo,foo_depends,foo_runtime_depends,foo_runtime_recommends,foo_conflict_packageconfig"
pkgconfigflags = d.getVarFlags("PACKAGECONFIG") or {}
if pkgconfigflags:
pkgconfig = (d.getVar('PACKAGECONFIG') or "").split()
@@ -440,8 +440,8 @@ python () {
for flag, flagval in sorted(pkgconfigflags.items()):
items = flagval.split(",")
num = len(items)
- if num > 5:
- bb.error("%s: PACKAGECONFIG[%s] Only enable,disable,depend,rdepend,rrecommend can be specified!"
+ if num > 6:
+ bb.error("%s: PACKAGECONFIG[%s] Only enable,disable,depend,rdepend,rrecommend,conflict_packageconfig can be specified!"
% (d.getVar('PN'), flag))
if flag in pkgconfig:
@@ -455,6 +455,20 @@ python () {
extraconf.append(items[0])
elif num >= 2 and items[1]:
extraconf.append(items[1])
+
+ if num >= 6 and items[5]:
+ conflicts = set(items[5].split())
+ invalid = conflicts.difference(set(pkgconfigflags.keys()))
+ if invalid:
+ bb.error("%s: PACKAGECONFIG[%s] Invalid conflict package config%s '%s' specified."
+ % (d.getVar('PN'), flag, 's' if len(invalid) > 1 else '', ' '.join(invalid)))
+
+ if flag in pkgconfig:
+ intersec = conflicts.intersection(set(pkgconfig))
+ if intersec:
+ bb.fatal("%s: PACKAGECONFIG[%s] Conflict package config%s '%s' set in PACKAGECONFIG."
+ % (d.getVar('PN'), flag, 's' if len(intersec) > 1 else '', ' '.join(intersec)))
+
appendVar('DEPENDS', extradeps)
appendVar('RDEPENDS_${PN}', extrardeps)
appendVar('RRECOMMENDS_${PN}', extrarrecs)
@@ -467,16 +481,20 @@ python () {
if bb.data.inherits_class('license', d):
check_license_format(d)
- unmatched_license_flag = check_license_flags(d)
- if unmatched_license_flag:
- bb.debug(1, "Skipping %s because it has a restricted license not"
- " whitelisted in LICENSE_FLAGS_WHITELIST" % pn)
- raise bb.parse.SkipRecipe("because it has a restricted license not"
- " whitelisted in LICENSE_FLAGS_WHITELIST")
+ unmatched_license_flags = check_license_flags(d)
+ if unmatched_license_flags:
+ if len(unmatched_license_flags) == 1:
+ message = "because it has a restricted license '{0}'. Which is not whitelisted in LICENSE_FLAGS_WHITELIST".format(unmatched_license_flags[0])
+ else:
+ message = "because it has restricted licenses {0}. Which are not whitelisted in LICENSE_FLAGS_WHITELIST".format(
+ ", ".join("'{0}'".format(f) for f in unmatched_license_flags))
+ bb.debug(1, "Skipping %s %s" % (pn, message))
+ raise bb.parse.SkipRecipe(message)
# If we're building a target package we need to use fakeroot (pseudo)
# in order to capture permissions, owners, groups and special files
if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
+ d.appendVarFlag('do_prepare_recipe_sysroot', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
d.setVarFlag('do_unpack', 'umask', '022')
d.setVarFlag('do_configure', 'umask', '022')
d.setVarFlag('do_compile', 'umask', '022')
@@ -492,7 +510,7 @@ python () {
d.appendVarFlag('do_devshell', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
need_machine = d.getVar('COMPATIBLE_MACHINE')
- if need_machine:
+ if need_machine and not d.getVar('PARSE_ALL_RECIPES', False):
import re
compat_machines = (d.getVar('MACHINEOVERRIDES') or "").split(":")
for m in compat_machines:
@@ -501,7 +519,7 @@ python () {
else:
raise bb.parse.SkipRecipe("incompatible with machine %s (not in COMPATIBLE_MACHINE)" % d.getVar('MACHINE'))
- source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', False)
+ source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', False) or d.getVar('PARSE_ALL_RECIPES', False)
if not source_mirror_fetch:
need_host = d.getVar('COMPATIBLE_HOST')
if need_host:
@@ -525,58 +543,46 @@ python () {
bad_licenses = expand_wildcard_licenses(d, bad_licenses)
whitelist = []
- incompatwl = []
for lic in bad_licenses:
spdx_license = return_spdx(d, lic)
whitelist.extend((d.getVar("WHITELIST_" + lic) or "").split())
if spdx_license:
whitelist.extend((d.getVar("WHITELIST_" + spdx_license) or "").split())
+
+ if pn in whitelist:
'''
We need to track what we are whitelisting and why. If pn is
incompatible we need to be able to note that the image that
is created may infact contain incompatible licenses despite
INCOMPATIBLE_LICENSE being set.
'''
- incompatwl.extend((d.getVar("WHITELIST_" + lic) or "").split())
- if spdx_license:
- incompatwl.extend((d.getVar("WHITELIST_" + spdx_license) or "").split())
-
- if not pn in whitelist:
+ bb.note("Including %s as buildable despite it having an incompatible license because it has been whitelisted" % pn)
+ else:
pkgs = d.getVar('PACKAGES').split()
- skipped_pkgs = []
+ skipped_pkgs = {}
unskipped_pkgs = []
for pkg in pkgs:
- if incompatible_license(d, bad_licenses, pkg):
- skipped_pkgs.append(pkg)
+ incompatible_lic = incompatible_license(d, bad_licenses, pkg)
+ if incompatible_lic:
+ skipped_pkgs[pkg] = incompatible_lic
else:
unskipped_pkgs.append(pkg)
- all_skipped = skipped_pkgs and not unskipped_pkgs
if unskipped_pkgs:
for pkg in skipped_pkgs:
- bb.debug(1, "SKIPPING the package " + pkg + " at do_rootfs because it's " + license)
+ bb.debug(1, "Skipping the package %s at do_rootfs because of incompatible license(s): %s" % (pkg, ' '.join(skipped_pkgs[pkg])))
mlprefix = d.getVar('MLPREFIX')
- d.setVar('LICENSE_EXCLUSION-' + mlprefix + pkg, 1)
+ d.setVar('LICENSE_EXCLUSION-' + mlprefix + pkg, ' '.join(skipped_pkgs[pkg]))
for pkg in unskipped_pkgs:
- bb.debug(1, "INCLUDING the package " + pkg)
- elif all_skipped or incompatible_license(d, bad_licenses):
- bb.debug(1, "SKIPPING recipe %s because it's %s" % (pn, license))
- raise bb.parse.SkipRecipe("it has an incompatible license: %s" % license)
- elif pn in whitelist:
- if pn in incompatwl:
- bb.note("INCLUDING " + pn + " as buildable despite INCOMPATIBLE_LICENSE because it has been whitelisted")
-
- # Try to verify per-package (LICENSE_<pkg>) values. LICENSE should be a
- # superset of all per-package licenses. We do not do advanced (pattern)
- # matching of license expressions - just check that all license strings
- # in LICENSE_<pkg> are found in LICENSE.
- license_set = oe.license.list_licenses(license)
- for pkg in d.getVar('PACKAGES').split():
- pkg_license = d.getVar('LICENSE_' + pkg)
- if pkg_license:
- unlisted = oe.license.list_licenses(pkg_license) - license_set
- if unlisted:
- bb.warn("LICENSE_%s includes licenses (%s) that are not "
- "listed in LICENSE" % (pkg, ' '.join(unlisted)))
+ bb.debug(1, "Including the package %s" % pkg)
+ else:
+ incompatible_lic = incompatible_license(d, bad_licenses)
+ for pkg in skipped_pkgs:
+ incompatible_lic += skipped_pkgs[pkg]
+ incompatible_lic = sorted(list(set(incompatible_lic)))
+
+ if incompatible_lic:
+ bb.debug(1, "Skipping recipe %s because of incompatible license(s): %s" % (pn, ' '.join(incompatible_lic)))
+ raise bb.parse.SkipRecipe("it has incompatible license(s): %s" % ' '.join(incompatible_lic))
needsrcrev = False
srcuri = d.getVar('SRC_URI')
@@ -600,6 +606,7 @@ python () {
# Mercurial packages should DEPEND on mercurial-native
elif scheme == "hg":
needsrcrev = True
+ d.appendVar("EXTRANATIVEPATH", ' python3-native ')
d.appendVarFlag('do_fetch', 'depends', ' mercurial-native:do_populate_sysroot')
# Perforce packages support SRCREV = "${AUTOREV}"
@@ -640,6 +647,18 @@ python () {
if needsrcrev:
d.setVar("SRCPV", "${@bb.fetch2.get_srcrev(d)}")
+ # Gather all named SRCREVs to add to the sstate hash calculation
+ # This anonymous python snippet is called multiple times so we
+ # need to be careful to not double up the appends here and cause
+ # the base hash to mismatch the task hash
+ for uri in srcuri.split():
+ parm = bb.fetch.decodeurl(uri)[5]
+ uri_names = parm.get("name", "").split(",")
+ for uri_name in filter(None, uri_names):
+ srcrev_name = "SRCREV_{}".format(uri_name)
+ if srcrev_name not in (d.getVarFlag("do_fetch", "vardeps") or "").split():
+ d.appendVarFlag("do_fetch", "vardeps", " {}".format(srcrev_name))
+
set_packagetriplet(d)
# 'multimachine' handling
diff --git a/external/poky/meta/classes/binconfig.bbclass b/external/poky/meta/classes/binconfig.bbclass
index 133b9537..9112ed46 100644
--- a/external/poky/meta/classes/binconfig.bbclass
+++ b/external/poky/meta/classes/binconfig.bbclass
@@ -40,15 +40,6 @@ binconfig_package_preprocess () {
-e 's:${STAGING_DIR_HOST}${prefix}:${prefix}:' \
$config
done
- for lafile in `find ${PKGD} -type f -name "*.la"` ; do
- sed -i \
- -e 's:${STAGING_BASELIBDIR}:${base_libdir}:g;' \
- -e 's:${STAGING_LIBDIR}:${libdir}:g;' \
- -e 's:${STAGING_INCDIR}:${includedir}:g;' \
- -e 's:${STAGING_DATADIR}:${datadir}:' \
- -e 's:${STAGING_DIR_HOST}${prefix}:${prefix}:' \
- $lafile
- done
}
SYSROOT_PREPROCESS_FUNCS += "binconfig_sysroot_preprocess"
diff --git a/external/poky/meta/classes/bluetooth.bbclass b/external/poky/meta/classes/bluetooth.bbclass
deleted file mode 100644
index f88b4ae5..00000000
--- a/external/poky/meta/classes/bluetooth.bbclass
+++ /dev/null
@@ -1,14 +0,0 @@
-# Avoid code duplication in bluetooth-dependent recipes.
-
-# Define a variable that expands to the recipe (package) providing core
-# bluetooth support on the platform:
-# "" if bluetooth is not in DISTRO_FEATURES
-# else "bluez5" if bluez5 is in DISTRO_FEATURES
-# else "bluez4"
-
-# Use this with:
-# inherit bluetooth
-# PACKAGECONFIG ??= "${@bb.utils.contains('DISTRO_FEATURES', 'bluetooth', '${BLUEZ}', '', d)}
-# PACKAGECONFIG[bluez4] = "--enable-bluez4,--disable-bluez4,bluez4"
-
-BLUEZ ?= "${@bb.utils.contains('DISTRO_FEATURES', 'bluetooth', bb.utils.contains('DISTRO_FEATURES', 'bluez5', 'bluez5', 'bluez4', d), '', d)}"
diff --git a/external/poky/meta/classes/buildhistory.bbclass b/external/poky/meta/classes/buildhistory.bbclass
index 40b292b1..156324d3 100644
--- a/external/poky/meta/classes/buildhistory.bbclass
+++ b/external/poky/meta/classes/buildhistory.bbclass
@@ -40,6 +40,7 @@ BUILDHISTORY_SDK_FILES ?= "conf/local.conf conf/bblayers.conf conf/auto.conf con
BUILDHISTORY_COMMIT ?= "1"
BUILDHISTORY_COMMIT_AUTHOR ?= "buildhistory <buildhistory@${DISTRO}>"
BUILDHISTORY_PUSH_REPO ?= ""
+BUILDHISTORY_TAG ?= "build"
SSTATEPOSTINSTFUNCS_append = " buildhistory_emit_pkghistory"
# We want to avoid influencing the signatures of sstate tasks - first the function itself:
@@ -60,15 +61,34 @@ SSTATEPOSTUNPACKFUNCS[vardepvalueexclude] .= "| buildhistory_emit_outputsigs"
# When extending build history, derive your class from buildhistory.bbclass
# and extend this list here with the additional files created by the derived
# class.
-BUILDHISTORY_PRESERVE = "latest latest_srcrev"
+BUILDHISTORY_PRESERVE = "latest latest_srcrev sysroot"
PATCH_GIT_USER_EMAIL ?= "buildhistory@oe"
PATCH_GIT_USER_NAME ?= "OpenEmbedded"
#
+# Write out the contents of the sysroot
+#
+buildhistory_emit_sysroot() {
+ mkdir --parents ${BUILDHISTORY_DIR_PACKAGE}
+ case ${CLASSOVERRIDE} in
+ class-native|class-cross|class-crosssdk)
+ BASE=${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE}
+ ;;
+ *)
+ BASE=${SYSROOT_DESTDIR}
+ ;;
+ esac
+ buildhistory_list_files_no_owners $BASE ${BUILDHISTORY_DIR_PACKAGE}/sysroot
+}
+
+#
# Write out metadata about this package for comparison when writing future packages
#
python buildhistory_emit_pkghistory() {
+ if d.getVar('BB_CURRENTTASK') in ['populate_sysroot', 'populate_sysroot_setscene']:
+ bb.build.exec_func("buildhistory_emit_sysroot", d)
+
if not d.getVar('BB_CURRENTTASK') in ['packagedata', 'packagedata_setscene']:
return 0
@@ -261,7 +281,7 @@ python buildhistory_emit_pkghistory() {
last_pkgr = lastversion.pkgr
r = bb.utils.vercmp((pkge, pkgv, pkgr), (last_pkge, last_pkgv, last_pkgr))
if r < 0:
- msg = "Package version for package %s went backwards which would break package feeds from (%s:%s-%s to %s:%s-%s)" % (pkg, last_pkge, last_pkgv, last_pkgr, pkge, pkgv, pkgr)
+ msg = "Package version for package %s went backwards which would break package feeds (from %s:%s-%s to %s:%s-%s)" % (pkg, last_pkge, last_pkgv, last_pkgr, pkge, pkgv, pkgr)
package_qa_handle_error("version-going-backwards", msg, d)
pkginfo = PackageInfo(pkg)
@@ -406,8 +426,8 @@ def buildhistory_list_installed(d, rootfs_type="image"):
from oe.sdk import sdk_list_installed_packages
from oe.utils import format_pkg_list
- process_list = [('file', 'bh_installed_pkgs.txt'),\
- ('deps', 'bh_installed_pkgs_deps.txt')]
+ process_list = [('file', 'bh_installed_pkgs_%s.txt' % os.getpid()),\
+ ('deps', 'bh_installed_pkgs_deps_%s.txt' % os.getpid())]
if rootfs_type == "image":
pkgs = image_list_installed_packages(d)
@@ -437,9 +457,10 @@ buildhistory_get_installed() {
# Get list of installed packages
pkgcache="$1/installed-packages.tmp"
- cat ${WORKDIR}/bh_installed_pkgs.txt | sort > $pkgcache && rm ${WORKDIR}/bh_installed_pkgs.txt
+ cat ${WORKDIR}/bh_installed_pkgs_${PID}.txt | sort > $pkgcache && rm ${WORKDIR}/bh_installed_pkgs_${PID}.txt
cat $pkgcache | awk '{ print $1 }' > $1/installed-package-names.txt
+
if [ -s $pkgcache ] ; then
cat $pkgcache | awk '{ print $2 }' | xargs -n1 basename > $1/installed-packages.txt
else
@@ -448,8 +469,8 @@ buildhistory_get_installed() {
# Produce dependency graph
# First, quote each name to handle characters that cause issues for dot
- sed 's:\([^| ]*\):"\1":g' ${WORKDIR}/bh_installed_pkgs_deps.txt > $1/depends.tmp &&
- rm ${WORKDIR}/bh_installed_pkgs_deps.txt
+ sed 's:\([^| ]*\):"\1":g' ${WORKDIR}/bh_installed_pkgs_deps_${PID}.txt > $1/depends.tmp &&
+ rm ${WORKDIR}/bh_installed_pkgs_deps_${PID}.txt
# Remove lines with rpmlib(...) and config(...) dependencies, change the
# delimiter from pipe to "->", set the style for recommend lines and
# turn versioned dependencies into edge labels.
@@ -519,12 +540,28 @@ buildhistory_get_sdk_installed_target() {
buildhistory_list_files() {
# List the files in the specified directory, but exclude date/time etc.
- # This awk script is somewhat messy, but handles where the size is not printed for device files under pseudo
+ # This is somewhat messy, but handles where the size is not printed for device files under pseudo
+ ( cd $1
+ find_cmd='find . ! -path . -printf "%M %-10u %-10g %10s %p -> %l\n"'
if [ "$3" = "fakeroot" ] ; then
- ( cd $1 && ${FAKEROOTENV} ${FAKEROOTCMD} find . ! -path . -printf "%M %-10u %-10g %10s %p -> %l\n" | sort -k5 | sed 's/ * -> $//' > $2 )
+ eval ${FAKEROOTENV} ${FAKEROOTCMD} $find_cmd
else
- ( cd $1 && find . ! -path . -printf "%M %-10u %-10g %10s %p -> %l\n" | sort -k5 | sed 's/ * -> $//' > $2 )
- fi
+ eval $find_cmd
+ fi | sort -k5 | sed 's/ * -> $//' > $2 )
+}
+
+buildhistory_list_files_no_owners() {
+ # List the files in the specified directory, but exclude date/time etc.
+ # Also don't output the ownership data, but instead output just - - so
+ # that the same parsing code as for _list_files works.
+ # This is somewhat messy, but handles where the size is not printed for device files under pseudo
+ ( cd $1
+ find_cmd='find . ! -path . -printf "%M - - %10s %p -> %l\n"'
+ if [ "$3" = "fakeroot" ] ; then
+ eval ${FAKEROOTENV} ${FAKEROOTCMD} "$find_cmd"
+ else
+ eval "$find_cmd"
+ fi | sort -k5 | sed 's/ * -> $//' > $2 )
}
buildhistory_list_pkg_files() {
@@ -670,12 +707,29 @@ def buildhistory_get_build_id(d):
statusheader = d.getVar('BUILDCFG_HEADER')
return('\n%s\n%s\n' % (statusheader, '\n'.join(statuslines)))
+def buildhistory_get_modified(path):
+ # copied from get_layer_git_status() in image-buildinfo.bbclass
+ import subprocess
+ try:
+ subprocess.check_output("""cd %s; export PSEUDO_UNLOAD=1; set -e;
+ git diff --quiet --no-ext-diff
+ git diff --quiet --no-ext-diff --cached""" % path,
+ shell=True,
+ stderr=subprocess.STDOUT)
+ return ""
+ except subprocess.CalledProcessError as ex:
+ # Silently treat errors as "modified", without checking for the
+ # (expected) return code 1 in a modified git repo. For example, we get
+ # output and a 129 return code when a layer isn't a git repo at all.
+ return " -- modified"
+
def buildhistory_get_metadata_revs(d):
# We want an easily machine-readable format here, so get_layers_branch_rev isn't quite what we want
layers = (d.getVar("BBLAYERS") or "").split()
- medadata_revs = ["%-17s = %s:%s" % (os.path.basename(i), \
+ medadata_revs = ["%-17s = %s:%s%s" % (os.path.basename(i), \
base_get_metadata_git_branch(i, None).strip(), \
- base_get_metadata_git_revision(i, None)) \
+ base_get_metadata_git_revision(i, None), \
+ buildhistory_get_modified(i)) \
for i in layers]
return '\n'.join(medadata_revs)
@@ -772,9 +826,9 @@ END
if [ ! -e .git ] ; then
git init -q
else
- git tag -f build-minus-3 build-minus-2 > /dev/null 2>&1 || true
- git tag -f build-minus-2 build-minus-1 > /dev/null 2>&1 || true
- git tag -f build-minus-1 > /dev/null 2>&1 || true
+ git tag -f ${BUILDHISTORY_TAG}-minus-3 ${BUILDHISTORY_TAG}-minus-2 > /dev/null 2>&1 || true
+ git tag -f ${BUILDHISTORY_TAG}-minus-2 ${BUILDHISTORY_TAG}-minus-1 > /dev/null 2>&1 || true
+ git tag -f ${BUILDHISTORY_TAG}-minus-1 > /dev/null 2>&1 || true
fi
check_git_config
@@ -820,11 +874,15 @@ python buildhistory_eventhandler() {
if e.data.getVar("BUILDHISTORY_COMMIT") == "1":
bb.note("Writing buildhistory")
bb.build.exec_func("buildhistory_write_sigs", d)
+ import time
+ start=time.time()
localdata = bb.data.createCopy(e.data)
localdata.setVar('BUILDHISTORY_BUILD_FAILURES', str(e._failures))
interrupted = getattr(e, '_interrupted', 0)
localdata.setVar('BUILDHISTORY_BUILD_INTERRUPTED', str(interrupted))
bb.build.exec_func("buildhistory_commit", localdata)
+ stop=time.time()
+ bb.note("Writing buildhistory took: %s seconds" % round(stop-start))
else:
bb.note("No commit since BUILDHISTORY_COMMIT != '1'")
}
@@ -896,7 +954,7 @@ def write_latest_srcrev(d, pkghistdir):
if orig_srcrev != 'INVALID':
f.write('# SRCREV = "%s"\n' % orig_srcrev)
if len(srcrevs) > 1:
- for name, srcrev in srcrevs.items():
+ for name, srcrev in sorted(srcrevs.items()):
orig_srcrev = d.getVar('SRCREV_%s' % name, False)
if orig_srcrev:
f.write('# SRCREV_%s = "%s"\n' % (name, orig_srcrev))
@@ -904,7 +962,7 @@ def write_latest_srcrev(d, pkghistdir):
else:
f.write('SRCREV = "%s"\n' % next(iter(srcrevs.values())))
if len(tag_srcrevs) > 0:
- for name, srcrev in tag_srcrevs.items():
+ for name, srcrev in sorted(tag_srcrevs.items()):
f.write('# tag_%s = "%s"\n' % (name, srcrev))
if name in old_tag_srcrevs and old_tag_srcrevs[name] != srcrev:
pkg = d.getVar('PN')
diff --git a/external/poky/meta/classes/buildstats.bbclass b/external/poky/meta/classes/buildstats.bbclass
index 960653c7..2590c60c 100644
--- a/external/poky/meta/classes/buildstats.bbclass
+++ b/external/poky/meta/classes/buildstats.bbclass
@@ -100,7 +100,7 @@ def write_task_data(status, logfile, e, d):
f.write("rusage %s: %s\n" % (i, getattr(resources, i)))
for i in rusages:
f.write("Child rusage %s: %s\n" % (i, getattr(childres, i)))
- if status is "passed":
+ if status == "passed":
f.write("Status: PASSED \n")
else:
f.write("Status: FAILED \n")
diff --git a/external/poky/meta/classes/ccache.bbclass b/external/poky/meta/classes/ccache.bbclass
index 96090206..b5457359 100644
--- a/external/poky/meta/classes/ccache.bbclass
+++ b/external/poky/meta/classes/ccache.bbclass
@@ -1,5 +1,37 @@
-CCACHE = "${@bb.utils.which(d.getVar('PATH'), 'ccache') and 'ccache '}"
-export CCACHE_DIR ?= "${TMPDIR}/ccache/${MULTIMACH_TARGET_SYS}/${PN}"
+#
+# Usage:
+# - Enable ccache
+# Add the following line to a conffile such as conf/local.conf:
+# INHERIT += "ccache"
+#
+# - Disable ccache for a recipe
+# Add the following line to the recipe if it can't be built with ccache:
+# CCACHE_DISABLE = '1'
+#
+# - Share ccache files between different builds
+# Set CCACHE_TOP_DIR to a shared dir
+# CCACHE_TOP_DIR = /path/to/shared_ccache/
+#
+# - TO debug ccahe
+# export CCACHE_DEBUG = "1"
+# export CCACHE_LOGFILE = "${CCACHE_DIR}/logfile.log"
+# And also set PARALLEL_MAKE = "-j 1" to get make the log in order
+#
+
+# Set it to a shared location for different builds, so that cache files can
+# be shared between different builds.
+CCACHE_TOP_DIR ?= "${TMPDIR}/ccache"
+
+# ccahe removes CCACHE_BASEDIR from file path, so that hashes will be the same
+# in different builds.
+export CCACHE_BASEDIR ?= "${TMPDIR}"
+
+# Used for sharing cache files after compiler is rebuilt
+export CCACHE_COMPILERCHECK ?= "%compiler% -dumpspecs"
+
+export CCACHE_CONFIGPATH ?= "${COREBASE}/meta/conf/ccache.conf"
+
+export CCACHE_DIR ?= "${CCACHE_TOP_DIR}/${MULTIMACH_TARGET_SYS}/${PN}"
# We need to stop ccache considering the current directory or the
# debug-prefix-map target directory to be significant when calculating
@@ -7,5 +39,28 @@ export CCACHE_DIR ?= "${TMPDIR}/ccache/${MULTIMACH_TARGET_SYS}/${PN}"
# ${PV} or ${PR} change.
export CCACHE_NOHASHDIR ?= "1"
-DEPENDS_append_class-target = " ccache-native"
-DEPENDS[vardepvalueexclude] = " ccache-native"
+python() {
+ """
+ Enable ccache for the recipe
+ """
+ pn = d.getVar('PN')
+ # quilt-native doesn't need ccache since no c files
+ if not (pn in ('ccache-native', 'quilt-native') or
+ bb.utils.to_boolean(d.getVar('CCACHE_DISABLE'))):
+ d.appendVar('DEPENDS', ' ccache-native')
+ d.setVar('CCACHE', 'ccache ')
+}
+
+addtask cleanccache after do_clean
+python do_cleanccache() {
+ import shutil
+
+ ccache_dir = d.getVar('CCACHE_DIR')
+ if os.path.exists(ccache_dir):
+ bb.note("Removing %s" % ccache_dir)
+ shutil.rmtree(ccache_dir)
+ else:
+ bb.note("%s doesn't exist" % ccache_dir)
+}
+addtask cleanall after do_cleanccache
+do_cleanccache[nostamp] = "1"
diff --git a/external/poky/meta/classes/ccmake.bbclass b/external/poky/meta/classes/ccmake.bbclass
new file mode 100644
index 00000000..df5134a1
--- /dev/null
+++ b/external/poky/meta/classes/ccmake.bbclass
@@ -0,0 +1,97 @@
+inherit terminal
+
+python do_ccmake() {
+ import shutil
+
+ # copy current config for diffing
+ config = os.path.join(d.getVar("B"), "CMakeCache.txt")
+ if os.path.exists(config):
+ shutil.copy(config, config + ".orig")
+
+ oe_terminal(d.expand("ccmake ${OECMAKE_GENERATOR_ARGS} ${OECMAKE_SOURCEPATH} -Wno-dev"),
+ d.getVar("PN") + " - ccmake", d)
+
+ if os.path.exists(config) and os.path.exists(config + ".orig"):
+ if bb.utils.md5_file(config) != bb.utils.md5_file(config + ".orig"):
+ # the cmake class uses cmake --build, which will by default
+ # regenerate configuration, simply mark the compile step as tainted
+ # to ensure it is re-run
+ bb.note("Configuration changed, recompile will be forced")
+ bb.build.write_taint('do_compile', d)
+
+}
+do_ccmake[depends] += "cmake-native:do_populate_sysroot"
+do_ccmake[nostamp] = "1"
+do_ccmake[dirs] = "${B}"
+addtask ccmake after do_configure
+
+def cmake_parse_config_cache(path):
+ with open(path, "r") as f:
+ for i in f:
+ i = i.rstrip("\n")
+ if len(i) == 0 or i.startswith("//") or i.startswith("#"):
+ continue # empty or comment
+ key, value = i.split("=", 1)
+ key, keytype = key.split(":")
+ if keytype in ["INTERNAL", "STATIC"]:
+ continue # skip internal and static config options
+ yield key, keytype, value
+
+def cmake_diff_config_vars(a, b):
+ removed, added = [], []
+
+ for ak, akt, av in a:
+ found = False
+ for bk, bkt, bv in b:
+ if bk == ak:
+ found = True
+ if bkt != akt or bv != av: # changed
+ removed.append((ak, akt, av))
+ added.append((bk, bkt, bv))
+ break
+ # remove any missing from b
+ if not found:
+ removed.append((ak, akt, av))
+
+ # add any missing from a
+ for bk, bkt, bv in b:
+ if not any(bk == ak for ak, akt, av in a):
+ added.append((bk, bkt, bv))
+
+ return removed, added
+
+python do_ccmake_diffconfig() {
+ import shutil
+ config = os.path.join(d.getVar("B"), "CMakeCache.txt")
+ if os.path.exists(config) and os.path.exists(config + ".orig"):
+ if bb.utils.md5_file(config) != bb.utils.md5_file(config + ".orig"):
+ # scan the changed options
+ old = list(cmake_parse_config_cache(config + ".orig"))
+ new = list(cmake_parse_config_cache(config))
+ _, added = cmake_diff_config_vars(old, new)
+
+ if len(added) != 0:
+ with open(d.expand("${WORKDIR}/configuration.inc"), "w") as f:
+ f.write("EXTRA_OECMAKE += \" \\\n")
+ for k, kt, v in added:
+ escaped = v if " " not in v else "\"{0}\"".format(v)
+ f.write(" -D{0}:{1}={2} \\\n".format(k, kt, escaped))
+ f.write(" \"\n")
+ bb.plain("Configuration recipe fragment written to: {0}".format(d.expand("${WORKDIR}/configuration.inc")))
+
+ with open(d.expand("${WORKDIR}/site-file.cmake"), "w") as f:
+ for k, kt, v in added:
+ f.write("SET({0} \"{1}\" CACHE {2} \"\")\n".format(k, v, kt))
+ bb.plain("Configuration cmake fragment written to: {0}".format(d.expand("${WORKDIR}/site-file.cmake")))
+
+ # restore the original config
+ shutil.copy(config + ".orig", config)
+ else:
+ bb.plain("No configuration differences, skipping configuration fragment generation.")
+ else:
+ bb.fatal("No config files found. Did you run ccmake?")
+}
+do_ccmake_diffconfig[nostamp] = "1"
+do_ccmake_diffconfig[dirs] = "${B}"
+addtask ccmake_diffconfig
+
diff --git a/external/poky/meta/classes/chrpath.bbclass b/external/poky/meta/classes/chrpath.bbclass
index ad3c3975..26b984c4 100644
--- a/external/poky/meta/classes/chrpath.bbclass
+++ b/external/poky/meta/classes/chrpath.bbclass
@@ -1,17 +1,20 @@
CHRPATH_BIN ?= "chrpath"
PREPROCESS_RELOCATE_DIRS ?= ""
-def process_file_linux(cmd, fpath, rootdir, baseprefix, tmpdir, d):
- import subprocess as sub
-
- p = sub.Popen([cmd, '-l', fpath],stdout=sub.PIPE,stderr=sub.PIPE)
- out, err = p.communicate()
- # If returned successfully, process stdout for results
- if p.returncode != 0:
+def process_file_linux(cmd, fpath, rootdir, baseprefix, tmpdir, d, break_hardlinks = False):
+ import subprocess, oe.qa
+
+ with oe.qa.ELFFile(fpath) as elf:
+ try:
+ elf.open()
+ except oe.qa.NotELFFileError:
+ return
+
+ try:
+ out = subprocess.check_output([cmd, "-l", fpath], universal_newlines=True)
+ except subprocess.CalledProcessError:
return
- out = out.decode('utf-8')
-
# Handle RUNPATH as well as RPATH
out = out.replace("RUNPATH=","RPATH=")
# Throw away everything other than the rpath list
@@ -39,14 +42,18 @@ def process_file_linux(cmd, fpath, rootdir, baseprefix, tmpdir, d):
# if we have modified some rpaths call chrpath to update the binary
if modified:
+ if break_hardlinks:
+ bb.utils.break_hardlinks(fpath)
+
args = ":".join(new_rpaths)
#bb.note("Setting rpath for %s to %s" %(fpath, args))
- p = sub.Popen([cmd, '-r', args, fpath],stdout=sub.PIPE,stderr=sub.PIPE)
- out, err = p.communicate()
- if p.returncode != 0:
- bb.fatal("%s: chrpath command failed with exit code %d:\n%s%s" % (d.getVar('PN'), p.returncode, out, err))
+ try:
+ subprocess.check_output([cmd, "-r", args, fpath],
+ stderr=subprocess.PIPE, universal_newlines=True)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("chrpath command failed with exit code %d:\n%s\n%s" % (e.returncode, e.stdout, e.stderr))
-def process_file_darwin(cmd, fpath, rootdir, baseprefix, tmpdir, d):
+def process_file_darwin(cmd, fpath, rootdir, baseprefix, tmpdir, d, break_hardlinks = False):
import subprocess as sub
p = sub.Popen([d.expand("${HOST_PREFIX}otool"), '-L', fpath],stdout=sub.PIPE,stderr=sub.PIPE)
@@ -61,11 +68,18 @@ def process_file_darwin(cmd, fpath, rootdir, baseprefix, tmpdir, d):
if baseprefix not in rpath:
continue
+ if break_hardlinks:
+ bb.utils.break_hardlinks(fpath)
+
newpath = "@loader_path/" + os.path.relpath(rpath, os.path.dirname(fpath.replace(rootdir, "/")))
p = sub.Popen([d.expand("${HOST_PREFIX}install_name_tool"), '-change', rpath, newpath, fpath],stdout=sub.PIPE,stderr=sub.PIPE)
out, err = p.communicate()
-def process_dir (rootdir, directory, d):
+def process_dir(rootdir, directory, d, break_hardlinks = False):
+ bb.debug(2, "Checking %s for binaries to process" % directory)
+ if not os.path.exists(directory):
+ return
+
import stat
rootdir = os.path.normpath(rootdir)
@@ -74,10 +88,6 @@ def process_dir (rootdir, directory, d):
baseprefix = os.path.normpath(d.expand('${base_prefix}'))
hostos = d.getVar("HOST_OS")
- #bb.debug("Checking %s for binaries to process" % directory)
- if not os.path.exists(directory):
- return
-
if "linux" in hostos:
process_file = process_file_linux
elif "darwin" in hostos:
@@ -95,7 +105,7 @@ def process_dir (rootdir, directory, d):
continue
if os.path.isdir(fpath):
- process_dir(rootdir, fpath, d)
+ process_dir(rootdir, fpath, d, break_hardlinks = break_hardlinks)
else:
#bb.note("Testing %s for relocatability" % fpath)
@@ -108,8 +118,9 @@ def process_dir (rootdir, directory, d):
else:
# Temporarily make the file writeable so we can chrpath it
os.chmod(fpath, perms|stat.S_IRWXU)
- process_file(cmd, fpath, rootdir, baseprefix, tmpdir, d)
-
+
+ process_file(cmd, fpath, rootdir, baseprefix, tmpdir, d, break_hardlinks = break_hardlinks)
+
if perms:
os.chmod(fpath, perms)
diff --git a/external/poky/meta/classes/clutter.bbclass b/external/poky/meta/classes/clutter.bbclass
index 5edab0e5..24b53a13 100644
--- a/external/poky/meta/classes/clutter.bbclass
+++ b/external/poky/meta/classes/clutter.bbclass
@@ -14,4 +14,5 @@ REALNAME = "${@get_real_name("${BPN}")}"
SRC_URI = "${GNOME_MIRROR}/${REALNAME}/${VERMINOR}/${REALNAME}-${PV}.tar.xz;name=archive"
S = "${WORKDIR}/${REALNAME}-${PV}"
-inherit autotools pkgconfig gtk-doc gettext
+CLUTTERBASEBUILDCLASS ??= "autotools"
+inherit ${CLUTTERBASEBUILDCLASS} pkgconfig gtk-doc gettext
diff --git a/external/poky/meta/classes/cmake.bbclass b/external/poky/meta/classes/cmake.bbclass
index b364d2bc..8243f7ce 100644
--- a/external/poky/meta/classes/cmake.bbclass
+++ b/external/poky/meta/classes/cmake.bbclass
@@ -4,29 +4,41 @@ OECMAKE_SOURCEPATH ??= "${S}"
DEPENDS_prepend = "cmake-native "
B = "${WORKDIR}/build"
-# We need to unset CCACHE otherwise cmake gets too confused
-CCACHE = ""
-
# What CMake generator to use.
# The supported options are "Unix Makefiles" or "Ninja".
OECMAKE_GENERATOR ?= "Ninja"
python() {
generator = d.getVar("OECMAKE_GENERATOR")
- if generator == "Unix Makefiles":
- args = "-G 'Unix Makefiles' -DCMAKE_MAKE_PROGRAM=" + d.getVar("MAKE")
+ if "Unix Makefiles" in generator:
+ args = "-G '" + generator + "' -DCMAKE_MAKE_PROGRAM=" + d.getVar("MAKE")
d.setVar("OECMAKE_GENERATOR_ARGS", args)
d.setVarFlag("do_compile", "progress", "percent")
- elif generator == "Ninja":
+ elif "Ninja" in generator:
+ args = "-G '" + generator + "' -DCMAKE_MAKE_PROGRAM=ninja"
d.appendVar("DEPENDS", " ninja-native")
- d.setVar("OECMAKE_GENERATOR_ARGS", "-G Ninja -DCMAKE_MAKE_PROGRAM=ninja")
+ d.setVar("OECMAKE_GENERATOR_ARGS", args)
d.setVarFlag("do_compile", "progress", r"outof:^\[(\d+)/(\d+)\]\s+")
else:
bb.fatal("Unknown CMake Generator %s" % generator)
+
+ # C/C++ Compiler (without cpu arch/tune arguments)
+ if not d.getVar('OECMAKE_C_COMPILER'):
+ cc_list = d.getVar('CC').split()
+ if cc_list[0] == 'ccache':
+ d.setVar('OECMAKE_C_COMPILER_LAUNCHER', cc_list[0])
+ d.setVar('OECMAKE_C_COMPILER', cc_list[1])
+ else:
+ d.setVar('OECMAKE_C_COMPILER', cc_list[0])
+
+ if not d.getVar('OECMAKE_CXX_COMPILER'):
+ cxx_list = d.getVar('CXX').split()
+ if cxx_list[0] == 'ccache':
+ d.setVar('OECMAKE_CXX_COMPILER_LAUNCHER', cxx_list[0])
+ d.setVar('OECMAKE_CXX_COMPILER', cxx_list[1])
+ else:
+ d.setVar('OECMAKE_CXX_COMPILER', cxx_list[0])
}
-# C/C++ Compiler (without cpu arch/tune arguments)
-OECMAKE_C_COMPILER ?= "`echo ${CC} | sed 's/^\([^ ]*\).*/\1/'`"
-OECMAKE_CXX_COMPILER ?= "`echo ${CXX} | sed 's/^\([^ ]*\).*/\1/'`"
OECMAKE_AR ?= "${AR}"
# Compiler flags
@@ -39,6 +51,9 @@ OECMAKE_CXX_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS} ${LD
CXXFLAGS += "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS}"
CFLAGS += "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS}"
+OECMAKE_C_COMPILER_LAUNCHER ?= ""
+OECMAKE_CXX_COMPILER_LAUNCHER ?= ""
+
OECMAKE_RPATH ?= ""
OECMAKE_PERLNATIVE_DIR ??= ""
OECMAKE_EXTRA_ROOT_PATH ?= ""
@@ -48,21 +63,29 @@ OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM_class-native = "BOTH"
EXTRA_OECMAKE_append = " ${PACKAGECONFIG_CONFARGS}"
-EXTRA_OECMAKE_BUILD_prepend_task-compile = "${PARALLEL_MAKE} "
-EXTRA_OECMAKE_BUILD_prepend_task-install = "${PARALLEL_MAKEINST} "
+export CMAKE_BUILD_PARALLEL_LEVEL
+CMAKE_BUILD_PARALLEL_LEVEL_task-compile = "${@oe.utils.parallel_make(d, False)}"
+CMAKE_BUILD_PARALLEL_LEVEL_task-install = "${@oe.utils.parallel_make(d, True)}"
OECMAKE_TARGET_COMPILE ?= "all"
OECMAKE_TARGET_INSTALL ?= "install"
+def map_host_os_to_system_name(host_os):
+ if host_os.startswith('mingw'):
+ return 'Windows'
+ if host_os.startswith('linux'):
+ return 'Linux'
+ return host_os
+
# CMake expects target architectures in the format of uname(2),
# which do not always match TARGET_ARCH, so all the necessary
# conversions should happen here.
-def map_target_arch_to_uname_arch(target_arch):
- if target_arch == "powerpc":
+def map_host_arch_to_uname_arch(host_arch):
+ if host_arch == "powerpc":
return "ppc"
- if target_arch == "powerpc64":
+ if host_arch == "powerpc64":
return "ppc64"
- return target_arch
+ return host_arch
cmake_do_generate_toolchain_file() {
if [ "${BUILD_SYS}" = "${HOST_SYS}" ]; then
@@ -72,10 +95,12 @@ cmake_do_generate_toolchain_file() {
# CMake system name must be something like "Linux".
# This is important for cross-compiling.
$cmake_crosscompiling
-set( CMAKE_SYSTEM_NAME `echo ${TARGET_OS} | sed -e 's/^./\u&/' -e 's/^\(Linux\).*/\1/'` )
-set( CMAKE_SYSTEM_PROCESSOR ${@map_target_arch_to_uname_arch(d.getVar('TARGET_ARCH'))} )
+set( CMAKE_SYSTEM_NAME ${@map_host_os_to_system_name(d.getVar('HOST_OS'))} )
+set( CMAKE_SYSTEM_PROCESSOR ${@map_host_arch_to_uname_arch(d.getVar('HOST_ARCH'))} )
set( CMAKE_C_COMPILER ${OECMAKE_C_COMPILER} )
set( CMAKE_CXX_COMPILER ${OECMAKE_CXX_COMPILER} )
+set( CMAKE_C_COMPILER_LAUNCHER ${OECMAKE_C_COMPILER_LAUNCHER} )
+set( CMAKE_CXX_COMPILER_LAUNCHER ${OECMAKE_CXX_COMPILER_LAUNCHER} )
set( CMAKE_ASM_COMPILER ${OECMAKE_C_COMPILER} )
set( CMAKE_AR ${OECMAKE_AR} CACHE FILEPATH "Archiver" )
set( CMAKE_C_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "CFLAGS" )
@@ -89,11 +114,12 @@ set( CMAKE_CXX_LINK_FLAGS "${OECMAKE_CXX_LINK_FLAGS}" CACHE STRING "LDFLAGS" )
# only search in the paths provided so cmake doesnt pick
# up libraries and tools from the native build machine
-set( CMAKE_FIND_ROOT_PATH ${STAGING_DIR_HOST} ${STAGING_DIR_NATIVE} ${CROSS_DIR} ${OECMAKE_PERLNATIVE_DIR} ${OECMAKE_EXTRA_ROOT_PATH} ${EXTERNAL_TOOLCHAIN})
+set( CMAKE_FIND_ROOT_PATH ${STAGING_DIR_HOST} ${STAGING_DIR_NATIVE} ${CROSS_DIR} ${OECMAKE_PERLNATIVE_DIR} ${OECMAKE_EXTRA_ROOT_PATH} ${EXTERNAL_TOOLCHAIN} ${HOSTTOOLS_DIR})
set( CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY )
set( CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ${OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM} )
set( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY )
set( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY )
+set( CMAKE_PROGRAM_PATH "/" )
# Use qt.conf settings
set( ENV{QT_CONF_PATH} ${WORKDIR}/qt.conf )
@@ -102,12 +128,19 @@ set( ENV{QT_CONF_PATH} ${WORKDIR}/qt.conf )
# directory as rpath by default
set( CMAKE_INSTALL_RPATH ${OECMAKE_RPATH} )
-# Use native cmake modules
+# Use RPATHs relative to build directory for reproducibility
+set( CMAKE_BUILD_RPATH_USE_ORIGIN ON )
+
+# Use our cmake modules
list(APPEND CMAKE_MODULE_PATH "${STAGING_DATADIR}/cmake/Modules/")
# add for non /usr/lib libdir, e.g. /usr/lib64
set( CMAKE_LIBRARY_PATH ${libdir} ${base_libdir})
+# add include dir to implicit includes in case it differs from /usr/include
+list(APPEND CMAKE_C_IMPLICIT_INCLUDE_DIRECTORIES ${includedir})
+list(APPEND CMAKE_CXX_IMPLICIT_INCLUDE_DIRECTORIES ${includedir})
+
EOF
}
@@ -140,26 +173,38 @@ cmake_do_configure() {
$oecmake_sitefile \
${OECMAKE_SOURCEPATH} \
-DCMAKE_INSTALL_PREFIX:PATH=${prefix} \
- -DCMAKE_INSTALL_BINDIR:PATH=${@os.path.relpath(d.getVar('bindir'), d.getVar('prefix'))} \
- -DCMAKE_INSTALL_SBINDIR:PATH=${@os.path.relpath(d.getVar('sbindir'), d.getVar('prefix'))} \
- -DCMAKE_INSTALL_LIBEXECDIR:PATH=${@os.path.relpath(d.getVar('libexecdir'), d.getVar('prefix'))} \
+ -DCMAKE_INSTALL_BINDIR:PATH=${@os.path.relpath(d.getVar('bindir'), d.getVar('prefix') + '/')} \
+ -DCMAKE_INSTALL_SBINDIR:PATH=${@os.path.relpath(d.getVar('sbindir'), d.getVar('prefix') + '/')} \
+ -DCMAKE_INSTALL_LIBEXECDIR:PATH=${@os.path.relpath(d.getVar('libexecdir'), d.getVar('prefix') + '/')} \
-DCMAKE_INSTALL_SYSCONFDIR:PATH=${sysconfdir} \
- -DCMAKE_INSTALL_SHAREDSTATEDIR:PATH=${@os.path.relpath(d.getVar('sharedstatedir'), d. getVar('prefix'))} \
+ -DCMAKE_INSTALL_SHAREDSTATEDIR:PATH=${@os.path.relpath(d.getVar('sharedstatedir'), d. getVar('prefix') + '/')} \
-DCMAKE_INSTALL_LOCALSTATEDIR:PATH=${localstatedir} \
- -DCMAKE_INSTALL_LIBDIR:PATH=${@os.path.relpath(d.getVar('libdir'), d.getVar('prefix'))} \
- -DCMAKE_INSTALL_INCLUDEDIR:PATH=${@os.path.relpath(d.getVar('includedir'), d.getVar('prefix'))} \
- -DCMAKE_INSTALL_DATAROOTDIR:PATH=${@os.path.relpath(d.getVar('datadir'), d.getVar('prefix'))} \
+ -DCMAKE_INSTALL_LIBDIR:PATH=${@os.path.relpath(d.getVar('libdir'), d.getVar('prefix') + '/')} \
+ -DCMAKE_INSTALL_INCLUDEDIR:PATH=${@os.path.relpath(d.getVar('includedir'), d.getVar('prefix') + '/')} \
+ -DCMAKE_INSTALL_DATAROOTDIR:PATH=${@os.path.relpath(d.getVar('datadir'), d.getVar('prefix') + '/')} \
+ -DPYTHON_EXECUTABLE:PATH=${PYTHON} \
+ -DPython_EXECUTABLE:PATH=${PYTHON} \
+ -DPython3_EXECUTABLE:PATH=${PYTHON} \
+ -DLIB_SUFFIX=${@d.getVar('baselib').replace('lib', '')} \
-DCMAKE_INSTALL_SO_NO_EXE=0 \
-DCMAKE_TOOLCHAIN_FILE=${WORKDIR}/toolchain.cmake \
- -DCMAKE_VERBOSE_MAKEFILE=1 \
-DCMAKE_NO_SYSTEM_FROM_IMPORTED=1 \
${EXTRA_OECMAKE} \
-Wno-dev
}
+# To disable verbose cmake logs for a given recipe or globally config metadata e.g. local.conf
+# add following
+#
+# CMAKE_VERBOSE = ""
+#
+
+CMAKE_VERBOSE ??= "VERBOSE=1"
+
+# Then run do_compile again
cmake_runcmake_build() {
- bbnote ${DESTDIR:+DESTDIR=${DESTDIR} }VERBOSE=1 cmake --build '${B}' "$@" -- ${EXTRA_OECMAKE_BUILD}
- eval ${DESTDIR:+DESTDIR=${DESTDIR} }VERBOSE=1 cmake --build '${B}' "$@" -- ${EXTRA_OECMAKE_BUILD}
+ bbnote ${DESTDIR:+DESTDIR=${DESTDIR} }${CMAKE_VERBOSE} cmake --build '${B}' "$@" -- ${EXTRA_OECMAKE_BUILD}
+ eval ${DESTDIR:+DESTDIR=${DESTDIR} }${CMAKE_VERBOSE} cmake --build '${B}' "$@" -- ${EXTRA_OECMAKE_BUILD}
}
cmake_do_compile() {
diff --git a/external/poky/meta/classes/cml1.bbclass b/external/poky/meta/classes/cml1.bbclass
index 926747f2..c7f6723c 100644
--- a/external/poky/meta/classes/cml1.bbclass
+++ b/external/poky/meta/classes/cml1.bbclass
@@ -1,7 +1,7 @@
cml1_do_configure() {
set -e
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
- oe_runmake oldconfig
+ yes '' | oe_runmake oldconfig
}
EXPORT_FUNCTIONS do_configure
@@ -26,7 +26,7 @@ python do_menuconfig() {
except OSError:
mtime = 0
- oe_terminal("${SHELL} -c \"make %s; if [ \$? -ne 0 ]; then echo 'Command failed.'; printf 'Press any key to continue... '; read r; fi\"" % d.getVar('KCONFIG_CONFIG_COMMAND'),
+ oe_terminal("sh -c \"make %s; if [ \\$? -ne 0 ]; then echo 'Command failed.'; printf 'Press any key to continue... '; read r; fi\"" % d.getVar('KCONFIG_CONFIG_COMMAND'),
d.getVar('PN') + ' Configuration', d)
# FIXME this check can be removed when the minimum bitbake version has been bumped
diff --git a/external/poky/meta/classes/compress_doc.bbclass b/external/poky/meta/classes/compress_doc.bbclass
index 45bb8fff..d6d11fad 100644
--- a/external/poky/meta/classes/compress_doc.bbclass
+++ b/external/poky/meta/classes/compress_doc.bbclass
@@ -160,6 +160,7 @@ def _is_compress_doc(file, compress_format_list):
return False, ''
def compress_doc(topdir, compress_mode, compress_cmds):
+ import subprocess
hardlink_dict = {}
for root, dirs, files in os.walk(topdir):
for f in files:
@@ -187,6 +188,7 @@ def compress_doc(topdir, compress_mode, compress_cmds):
# Decompress doc files which format is not compress_mode
def decompress_doc(topdir, compress_mode, decompress_cmds):
+ import subprocess
hardlink_dict = {}
decompress = True
for root, dirs, files in os.walk(topdir):
diff --git a/external/poky/meta/classes/core-image.bbclass b/external/poky/meta/classes/core-image.bbclass
index a9a2cec6..88ca2721 100644
--- a/external/poky/meta/classes/core-image.bbclass
+++ b/external/poky/meta/classes/core-image.bbclass
@@ -26,12 +26,15 @@
# - debug-tweaks - makes an image suitable for development, e.g. allowing passwordless root logins
# - empty-root-password
# - allow-empty-password
+# - allow-root-login
# - post-install-logging
# - dev-pkgs - development packages (headers, etc.) for all installed packages in the rootfs
# - dbg-pkgs - debug symbol packages for all installed packages in the rootfs
# - doc-pkgs - documentation packages for all installed packages in the rootfs
+# - bash-completion-pkgs - bash-completion packages for recipes using bash-completion bbclass
# - ptest-pkgs - ptest packages for all ptest-enabled recipes
# - read-only-rootfs - tweaks an image to support read-only rootfs
+# - stateless-rootfs - systemctl-native not run, image populated by systemd at runtime
# - splash - bootup splash screen
#
FEATURE_PACKAGES_x11 = "packagegroup-core-x11"
diff --git a/external/poky/meta/classes/cpan-base.bbclass b/external/poky/meta/classes/cpan-base.bbclass
index 577fcd63..867edf87 100644
--- a/external/poky/meta/classes/cpan-base.bbclass
+++ b/external/poky/meta/classes/cpan-base.bbclass
@@ -2,7 +2,7 @@
# cpan-base providers various perl related information needed for building
# cpan modules
#
-FILES_${PN} += "${libdir}/perl ${datadir}/perl"
+FILES_${PN} += "${libdir}/perl5 ${datadir}/perl5"
DEPENDS += "${@["perl", "perl-native"][(bb.data.inherits_class('native', d))]}"
RDEPENDS_${PN} += "${@["perl", ""][(bb.data.inherits_class('native', d))]}"
@@ -14,5 +14,5 @@ def is_target(d):
return "yes"
return "no"
-PERLLIBDIRS = "${libdir}/perl"
-PERLLIBDIRS_class-native = "${libdir}/perl-native"
+PERLLIBDIRS = "${libdir}/perl5"
+PERLLIBDIRS_class-native = "${libdir}/perl5"
diff --git a/external/poky/meta/classes/cpan.bbclass b/external/poky/meta/classes/cpan.bbclass
index a5bc3018..e9908ae4 100644
--- a/external/poky/meta/classes/cpan.bbclass
+++ b/external/poky/meta/classes/cpan.bbclass
@@ -10,13 +10,14 @@ EXTRA_PERLFLAGS ?= ""
export PERLCONFIGTARGET = "${@is_target(d)}"
# Env var which tells perl where the perl include files are
-export PERL_INC = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/${@get_perl_version(d)}/CORE"
-export PERL_LIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/${@get_perl_version(d)}"
-export PERL_ARCHLIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/${@get_perl_version(d)}"
-export PERLHOSTLIB = "${STAGING_LIBDIR_NATIVE}/perl-native/perl/${@get_perl_version(d)}/"
+export PERL_INC = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/${@get_perl_version(d)}/${@get_perl_arch(d)}/CORE"
+export PERL_LIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/${@get_perl_version(d)}"
+export PERL_ARCHLIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/${@get_perl_version(d)}/${@get_perl_arch(d)}"
+export PERLHOSTLIB = "${STAGING_LIBDIR_NATIVE}/perl5/${@get_perl_version(d)}/"
+export PERLHOSTARCHLIB = "${STAGING_LIBDIR_NATIVE}/perl5/${@get_perl_version(d)}/${@get_perl_hostarch(d)}/"
cpan_do_configure () {
- yes '' | perl ${EXTRA_PERLFLAGS} Makefile.PL INSTALLDIRS=vendor NO_PERLLOCAL=1 NO_PACKLIST=1 ${EXTRA_CPANFLAGS}
+ yes '' | perl ${EXTRA_PERLFLAGS} Makefile.PL INSTALLDIRS=vendor NO_PERLLOCAL=1 NO_PACKLIST=1 PERL=$(which perl) ${EXTRA_CPANFLAGS}
# Makefile.PLs can exit with success without generating a
# Makefile, e.g. in cases of missing configure time
@@ -27,7 +28,7 @@ cpan_do_configure () {
[ -e Makefile ] || bbfatal "No Makefile was generated by Makefile.PL"
if [ "${BUILD_SYS}" != "${HOST_SYS}" ]; then
- . ${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/config.sh
+ . ${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/config.sh
# Use find since there can be a Makefile generated for each Makefile.PL
for f in `find -name Makefile.PL`; do
f2=`echo $f | sed -e 's/.PL//'`
diff --git a/external/poky/meta/classes/cpan_build.bbclass b/external/poky/meta/classes/cpan_build.bbclass
index 9a2ad895..f3fb4666 100644
--- a/external/poky/meta/classes/cpan_build.bbclass
+++ b/external/poky/meta/classes/cpan_build.bbclass
@@ -7,14 +7,15 @@ EXTRA_CPAN_BUILD_FLAGS ?= ""
# Env var which tells perl if it should use host (no) or target (yes) settings
export PERLCONFIGTARGET = "${@is_target(d)}"
-export PERL_ARCHLIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/${@get_perl_version(d)}"
-export PERLHOSTLIB = "${STAGING_LIBDIR_NATIVE}/perl-native/perl/${@get_perl_version(d)}/"
+export PERL_ARCHLIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/${@get_perl_version(d)}/${@get_perl_arch(d)}"
+export PERLHOSTLIB = "${STAGING_LIBDIR_NATIVE}/perl5/${@get_perl_version(d)}/"
+export PERLHOSTARCHLIB = "${STAGING_LIBDIR_NATIVE}/perl5/${@get_perl_version(d)}/${@get_perl_hostarch(d)}/"
export LD = "${CCLD}"
cpan_build_do_configure () {
if [ "${@is_target(d)}" = "yes" ]; then
# build for target
- . ${STAGING_LIBDIR}/perl/config.sh
+ . ${STAGING_LIBDIR}/perl5/config.sh
fi
perl Build.PL --installdirs vendor --destdir ${D} \
diff --git a/external/poky/meta/classes/cross-canadian.bbclass b/external/poky/meta/classes/cross-canadian.bbclass
index acde331e..f5c9f615 100644
--- a/external/poky/meta/classes/cross-canadian.bbclass
+++ b/external/poky/meta/classes/cross-canadian.bbclass
@@ -8,6 +8,8 @@
# SDK packages are built either explicitly by the user,
# or indirectly via dependency. No need to be in 'world'.
EXCLUDE_FROM_WORLD = "1"
+NATIVESDKLIBC ?= "libc-glibc"
+LIBCOVERRIDE = ":${NATIVESDKLIBC}"
CLASSOVERRIDE = "class-cross-canadian"
STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${SDK_ARCH}${SDK_VENDOR}-${SDK_OS}:${STAGING_DIR_NATIVE}${bindir_native}/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
diff --git a/external/poky/meta/classes/cross.bbclass b/external/poky/meta/classes/cross.bbclass
index 34d7951f..bfec91d0 100644
--- a/external/poky/meta/classes/cross.bbclass
+++ b/external/poky/meta/classes/cross.bbclass
@@ -17,6 +17,9 @@ HOST_CC_ARCH = "${BUILD_CC_ARCH}"
HOST_LD_ARCH = "${BUILD_LD_ARCH}"
HOST_AS_ARCH = "${BUILD_AS_ARCH}"
+# No strip sysroot when DEBUG_BUILD is enabled
+INHIBIT_SYSROOT_STRIP ?= "${@oe.utils.vartrue('DEBUG_BUILD', '1', '', d)}"
+
export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir} /lib /lib64 /usr/lib /usr/lib64"
STAGING_DIR_HOST = "${RECIPE_SYSROOT_NATIVE}"
@@ -67,7 +70,6 @@ libdir = "${exec_prefix}/lib/${CROSS_TARGET_SYS_DIR}"
libexecdir = "${exec_prefix}/libexec/${CROSS_TARGET_SYS_DIR}"
do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE}/"
-do_populate_sysroot[stamp-extra-info] = ""
do_packagedata[stamp-extra-info] = ""
do_install () {
@@ -94,3 +96,4 @@ python do_addto_recipe_sysroot () {
bb.build.exec_func("extend_recipe_sysroot", d)
}
addtask addto_recipe_sysroot after do_populate_sysroot
+do_addto_recipe_sysroot[deptask] = "do_populate_sysroot"
diff --git a/external/poky/meta/classes/crosssdk.bbclass b/external/poky/meta/classes/crosssdk.bbclass
index fdaaac84..04aecb69 100644
--- a/external/poky/meta/classes/crosssdk.bbclass
+++ b/external/poky/meta/classes/crosssdk.bbclass
@@ -1,11 +1,19 @@
inherit cross
CLASSOVERRIDE = "class-crosssdk"
+NATIVESDKLIBC ?= "libc-glibc"
+LIBCOVERRIDE = ":${NATIVESDKLIBC}"
MACHINEOVERRIDES = ""
PACKAGE_ARCH = "${SDK_ARCH}"
+
python () {
# set TUNE_PKGARCH to SDK_ARCH
d.setVar('TUNE_PKGARCH', d.getVar('SDK_ARCH'))
+ # Set features here to prevent appends and distro features backfill
+ # from modifying nativesdk distro features
+ features = set(d.getVar("DISTRO_FEATURES_NATIVESDK").split())
+ filtered = set(bb.utils.filter("DISTRO_FEATURES", d.getVar("DISTRO_FEATURES_FILTER_NATIVESDK"), d).split())
+ d.setVar("DISTRO_FEATURES", " ".join(sorted(features | filtered)))
}
STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
@@ -35,7 +43,6 @@ target_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
target_exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
baselib = "lib"
-do_populate_sysroot[stamp-extra-info] = ""
do_packagedata[stamp-extra-info] = ""
# Need to force this to ensure consitency across architectures
diff --git a/external/poky/meta/classes/cve-check.bbclass b/external/poky/meta/classes/cve-check.bbclass
index 19ed5548..514897e8 100644
--- a/external/poky/meta/classes/cve-check.bbclass
+++ b/external/poky/meta/classes/cve-check.bbclass
@@ -26,7 +26,7 @@ CVE_PRODUCT ??= "${BPN}"
CVE_VERSION ??= "${PV}"
CVE_CHECK_DB_DIR ?= "${DL_DIR}/CVE_CHECK"
-CVE_CHECK_DB_FILE ?= "${CVE_CHECK_DB_DIR}/nvdcve_1.0.db"
+CVE_CHECK_DB_FILE ?= "${CVE_CHECK_DB_DIR}/nvdcve_1.1.db"
CVE_CHECK_LOG ?= "${T}/cve.log"
CVE_CHECK_TMP_FILE ?= "${TMPDIR}/cve_check"
@@ -52,17 +52,20 @@ python do_cve_check () {
"""
if os.path.exists(d.getVar("CVE_CHECK_DB_FILE")):
- patched_cves = get_patches_cves(d)
- patched, unpatched = check_cves(d, patched_cves)
+ try:
+ patched_cves = get_patches_cves(d)
+ except FileNotFoundError:
+ bb.fatal("Failure in searching patches")
+ whitelisted, patched, unpatched = check_cves(d, patched_cves)
if patched or unpatched:
cve_data = get_cve_info(d, patched + unpatched)
- cve_write_data(d, patched, unpatched, cve_data)
+ cve_write_data(d, patched, unpatched, whitelisted, cve_data)
else:
bb.note("No CVE database found, skipping CVE check")
}
-addtask cve_check before do_build
+addtask cve_check before do_build after do_fetch
do_cve_check[depends] = "cve-update-db-native:do_populate_cve_db"
do_cve_check[nostamp] = "1"
@@ -129,6 +132,10 @@ def get_patches_cves(d):
for url in src_patches(d):
patch_file = bb.fetch.decodeurl(url)[2]
+ if not os.path.isfile(patch_file):
+ bb.error("File Not found: %s" % patch_file)
+ raise FileNotFoundError
+
# Check patch file name for CVE ID
fname_match = cve_file_name_match.search(patch_file)
if fname_match:
@@ -172,13 +179,13 @@ def check_cves(d, patched_cves):
products = d.getVar("CVE_PRODUCT").split()
# If this has been unset then we're not scanning for CVEs here (for example, image recipes)
if not products:
- return ([], [])
+ return ([], [], [])
pv = d.getVar("CVE_VERSION").split("+git")[0]
# If the recipe has been whitlisted we return empty lists
if d.getVar("PN") in d.getVar("CVE_CHECK_PN_WHITELIST").split():
bb.note("Recipe has been whitelisted, skipping check")
- return ([], [])
+ return ([], [], [])
old_cve_whitelist = d.getVar("CVE_CHECK_CVE_WHITELIST")
if old_cve_whitelist:
@@ -214,7 +221,7 @@ def check_cves(d, patched_cves):
(_, _, _, version_start, operator_start, version_end, operator_end) = row
#bb.debug(2, "Evaluating row " + str(row))
- if (operator_start == '=' and pv == version_start):
+ if (operator_start == '=' and pv == version_start) or version_start == '-':
vulnerable = True
else:
if operator_start:
@@ -256,7 +263,7 @@ def check_cves(d, patched_cves):
conn.close()
- return (list(patched_cves), cves_unpatched)
+ return (list(cve_whitelist), list(patched_cves), cves_unpatched)
def get_cve_info(d, cves):
"""
@@ -280,7 +287,7 @@ def get_cve_info(d, cves):
conn.close()
return cve_data
-def cve_write_data(d, patched, unpatched, cve_data):
+def cve_write_data(d, patched, unpatched, whitelisted, cve_data):
"""
Write CVE information in WORKDIR; and to CVE_CHECK_DIR, and
CVE manifest if enabled.
@@ -294,9 +301,11 @@ def cve_write_data(d, patched, unpatched, cve_data):
for cve in sorted(cve_data):
write_string += "PACKAGE NAME: %s\n" % d.getVar("PN")
- write_string += "PACKAGE VERSION: %s\n" % d.getVar("PV")
+ write_string += "PACKAGE VERSION: %s%s\n" % (d.getVar("EXTENDPE"), d.getVar("PV"))
write_string += "CVE: %s\n" % cve
- if cve in patched:
+ if cve in whitelisted:
+ write_string += "CVE STATUS: Whitelisted\n"
+ elif cve in patched:
write_string += "CVE STATUS: Patched\n"
else:
unpatched_cves.append(cve)
diff --git a/external/poky/meta/classes/debian.bbclass b/external/poky/meta/classes/debian.bbclass
index 989ea8f8..6f8a599c 100644
--- a/external/poky/meta/classes/debian.bbclass
+++ b/external/poky/meta/classes/debian.bbclass
@@ -29,11 +29,11 @@ python debian_package_name_hook () {
pkgdest = d.getVar("PKGDEST")
packages = d.getVar('PACKAGES')
- so_re = re.compile("lib.*\.so")
+ so_re = re.compile(r"lib.*\.so")
def socrunch(s):
s = s.lower().replace('_', '-')
- m = re.match("^(.*)(.)\.so\.(.*)$", s)
+ m = re.match(r"^(.*)(.)\.so\.(.*)$", s)
if m is None:
return None
if m.group(2) in '0123456789':
@@ -79,7 +79,7 @@ python debian_package_name_hook () {
try:
cmd = [d.expand("${TARGET_PREFIX}objdump"), "-p", f]
output = subprocess.check_output(cmd).decode("utf-8")
- for m in re.finditer("\s+SONAME\s+([^\s]+)", output):
+ for m in re.finditer(r"\s+SONAME\s+([^\s]+)", output):
if m.group(1) not in sonames:
sonames.append(m.group(1))
except subprocess.CalledProcessError:
diff --git a/external/poky/meta/classes/devicetree.bbclass b/external/poky/meta/classes/devicetree.bbclass
index 8fe5a5ed..c772ab2a 100644
--- a/external/poky/meta/classes/devicetree.bbclass
+++ b/external/poky/meta/classes/devicetree.bbclass
@@ -27,6 +27,8 @@ inherit deploy kernel-arch
COMPATIBLE_MACHINE ?= "^$"
+PROVIDES = "virtual/dtb"
+
PACKAGE_ARCH = "${MACHINE_ARCH}"
SYSROOT_DIRS += "/boot/devicetree"
@@ -57,7 +59,7 @@ DT_BOOT_CPU ??= "0"
DTC_FLAGS ?= "-R ${DT_RESERVED_MAP} -b ${DT_BOOT_CPU}"
DTC_PPFLAGS ?= "-nostdinc -undef -D__DTS__ -x assembler-with-cpp"
-DTC_BFLAGS ?= "-p ${DT_PADDING_SIZE}"
+DTC_BFLAGS ?= "-p ${DT_PADDING_SIZE} -@"
DTC_OFLAGS ?= "-p 0 -@ -H epapr"
python () {
@@ -114,15 +116,18 @@ def devicetree_compile(dtspath, includes, d):
dtcargs += ["-o", "{0}.{1}".format(dtname, "dtbo" if isoverlay else "dtb")]
dtcargs += ["-I", "dts", "-O", "dtb", "{0}.pp".format(dts)]
bb.note("Running {0}".format(" ".join(dtcargs)))
- subprocess.run(dtcargs, check = True)
+ subprocess.run(dtcargs, check = True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
python devicetree_do_compile() {
includes = expand_includes("DT_INCLUDE", d)
listpath = d.getVar("DT_FILES_PATH")
for dts in os.listdir(listpath):
- if not dts.endswith(".dts"):
- continue # skip non-.dts files
dtspath = os.path.join(listpath, dts)
+ try:
+ if not(os.path.isfile(dtspath)) or not(dts.endswith(".dts") or devicetree_source_is_overlay(dtspath)):
+ continue # skip non-.dts files and non-overlay files
+ except:
+ continue # skip if can't determine if overlay
devicetree_compile(dtspath, includes, d)
}
diff --git a/external/poky/meta/classes/devtool-source.bbclass b/external/poky/meta/classes/devtool-source.bbclass
index 1372e32c..280d6009 100644
--- a/external/poky/meta/classes/devtool-source.bbclass
+++ b/external/poky/meta/classes/devtool-source.bbclass
@@ -97,15 +97,15 @@ python devtool_post_unpack() {
local_files = oe.recipeutils.get_recipe_local_files(d)
if is_kernel_yocto:
- for key in local_files.copy():
- if key.endswith('scc'):
- sccfile = open(local_files[key], 'r')
+ for key in [f for f in local_files if f.endswith('scc')]:
+ with open(local_files[key], 'r') as sccfile:
for l in sccfile:
line = l.split()
if line and line[0] in ('kconf', 'patch'):
- local_files[line[-1]] = os.path.join(os.path.dirname(local_files[key]), line[-1])
- shutil.copy2(os.path.join(os.path.dirname(local_files[key]), line[-1]), workdir)
- sccfile.close()
+ cfg = os.path.join(os.path.dirname(local_files[key]), line[-1])
+ if cfg not in local_files.values():
+ local_files[line[-1]] = cfg
+ shutil.copy2(cfg, workdir)
# Ignore local files with subdir={BP}
srcabspath = os.path.abspath(srcsubdir)
diff --git a/external/poky/meta/classes/distro_features_check.bbclass b/external/poky/meta/classes/distro_features_check.bbclass
index 9b78b03e..8124a8ca 100644
--- a/external/poky/meta/classes/distro_features_check.bbclass
+++ b/external/poky/meta/classes/distro_features_check.bbclass
@@ -1,37 +1,7 @@
-# Allow checking of required and conflicting DISTRO_FEATURES
-#
-# ANY_OF_DISTRO_FEATURES: ensure at least one item on this list is included
-# in DISTRO_FEATURES.
-# REQUIRED_DISTRO_FEATURES: ensure every item on this list is included
-# in DISTRO_FEATURES.
-# CONFLICT_DISTRO_FEATURES: ensure no item in this list is included in
-# DISTRO_FEATURES.
-#
-# Copyright 2013 (C) O.S. Systems Software LTDA.
+# Temporarily provide fallback to the old name of the class
-python () {
- # Assume at least one var is set.
- distro_features = (d.getVar('DISTRO_FEATURES') or "").split()
-
- any_of_distro_features = d.getVar('ANY_OF_DISTRO_FEATURES')
- if any_of_distro_features:
- any_of_distro_features = any_of_distro_features.split()
- if set.isdisjoint(set(any_of_distro_features),set(distro_features)):
- raise bb.parse.SkipRecipe("one of '%s' needs to be in DISTRO_FEATURES" % any_of_distro_features)
-
- required_distro_features = d.getVar('REQUIRED_DISTRO_FEATURES')
- if required_distro_features:
- required_distro_features = required_distro_features.split()
- for f in required_distro_features:
- if f in distro_features:
- continue
- else:
- raise bb.parse.SkipRecipe("missing required distro feature '%s' (not in DISTRO_FEATURES)" % f)
-
- conflict_distro_features = d.getVar('CONFLICT_DISTRO_FEATURES')
- if conflict_distro_features:
- conflict_distro_features = conflict_distro_features.split()
- for f in conflict_distro_features:
- if f in distro_features:
- raise bb.parse.SkipRecipe("conflicting distro feature '%s' (in DISTRO_FEATURES)" % f)
+python __anonymous() {
+ bb.warn("distro_features_check.bbclass is deprecated, please use features_check.bbclass instead")
}
+
+inherit features_check
diff --git a/external/poky/meta/classes/distrodata.bbclass b/external/poky/meta/classes/distrodata.bbclass
deleted file mode 100644
index 59ee8cea..00000000
--- a/external/poky/meta/classes/distrodata.bbclass
+++ /dev/null
@@ -1,427 +0,0 @@
-include conf/distro/include/upstream_tracking.inc
-include conf/distro/include/distro_alias.inc
-include conf/distro/include/maintainers.inc
-
-addhandler distro_eventhandler
-distro_eventhandler[eventmask] = "bb.event.BuildStarted"
-python distro_eventhandler() {
- import oe.distro_check as dc
- import csv
- logfile = dc.create_log_file(e.data, "distrodata.csv")
-
- lf = bb.utils.lockfile("%s.lock" % logfile)
- with open(logfile, "a") as f:
- writer = csv.writer(f)
- writer.writerow(['Package', 'Description', 'Owner', 'License',
- 'VerMatch', 'Version', 'Upstream', 'Reason', 'Recipe Status',
- 'Distro 1', 'Distro 2', 'Distro 3'])
- f.close()
- bb.utils.unlockfile(lf)
-
- return
-}
-
-addtask distrodata_np
-do_distrodata_np[nostamp] = "1"
-python do_distrodata_np() {
- localdata = bb.data.createCopy(d)
- pn = d.getVar("PN")
- bb.note("Package Name: %s" % pn)
-
- import oe.distro_check as dist_check
- tmpdir = d.getVar('TMPDIR')
- distro_check_dir = os.path.join(tmpdir, "distro_check")
- datetime = localdata.getVar('DATETIME')
- dist_check.update_distro_data(distro_check_dir, datetime, localdata)
-
- if pn.find("-native") != -1:
- pnstripped = pn.split("-native")
- bb.note("Native Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
-
- if pn.find("-cross") != -1:
- pnstripped = pn.split("-cross")
- bb.note("cross Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
-
- if pn.find("-crosssdk") != -1:
- pnstripped = pn.split("-crosssdk")
- bb.note("cross Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
-
- if pn.startswith("nativesdk-"):
- pnstripped = pn.replace("nativesdk-", "")
- bb.note("NativeSDK Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped + ":" + d.getVar('OVERRIDES'))
-
-
- if pn.find("-initial") != -1:
- pnstripped = pn.split("-initial")
- bb.note("initial Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
-
- """generate package information from .bb file"""
- pname = localdata.getVar('PN')
- pcurver = localdata.getVar('PV')
- pdesc = localdata.getVar('DESCRIPTION')
- if pdesc is not None:
- pdesc = pdesc.replace(',','')
- pdesc = pdesc.replace('\n','')
-
- pgrp = localdata.getVar('SECTION')
- plicense = localdata.getVar('LICENSE').replace(',','_')
-
- rstatus = localdata.getVar('RECIPE_COLOR')
- if rstatus is not None:
- rstatus = rstatus.replace(',','')
-
- pupver = localdata.getVar('RECIPE_UPSTREAM_VERSION')
- if pcurver == pupver:
- vermatch="1"
- else:
- vermatch="0"
- noupdate_reason = localdata.getVar('RECIPE_NO_UPDATE_REASON')
- if noupdate_reason is None:
- noupdate="0"
- else:
- noupdate="1"
- noupdate_reason = noupdate_reason.replace(',','')
-
- maintainer = localdata.getVar('RECIPE_MAINTAINER')
- rlrd = localdata.getVar('RECIPE_UPSTREAM_DATE')
- result = dist_check.compare_in_distro_packages_list(distro_check_dir, localdata)
-
- bb.note("DISTRO: %s,%s,%s,%s,%s,%s,%s,%s,%s\n" % \
- (pname, pdesc, maintainer, plicense, vermatch, pcurver, pupver, noupdate_reason, rstatus))
- line = pn
- for i in result:
- line = line + "," + i
- bb.note("%s\n" % line)
-}
-do_distrodata_np[vardepsexclude] = "DATETIME"
-
-addtask distrodata
-do_distrodata[nostamp] = "1"
-python do_distrodata() {
- import csv
- logpath = d.getVar('LOG_DIR')
- bb.utils.mkdirhier(logpath)
- logfile = os.path.join(logpath, "distrodata.csv")
-
- import oe.distro_check as dist_check
- localdata = bb.data.createCopy(d)
- tmpdir = d.getVar('TMPDIR')
- distro_check_dir = os.path.join(tmpdir, "distro_check")
- datetime = localdata.getVar('DATETIME')
- dist_check.update_distro_data(distro_check_dir, datetime, localdata)
-
- pn = d.getVar("PN")
- bb.note("Package Name: %s" % pn)
-
- if pn.find("-native") != -1:
- pnstripped = pn.split("-native")
- bb.note("Native Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
-
- if pn.startswith("nativesdk-"):
- pnstripped = pn.replace("nativesdk-", "")
- bb.note("NativeSDK Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped + ":" + d.getVar('OVERRIDES'))
-
- if pn.find("-cross") != -1:
- pnstripped = pn.split("-cross")
- bb.note("cross Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
-
- if pn.find("-crosssdk") != -1:
- pnstripped = pn.split("-crosssdk")
- bb.note("cross Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
-
- if pn.find("-initial") != -1:
- pnstripped = pn.split("-initial")
- bb.note("initial Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
-
- """generate package information from .bb file"""
- pname = localdata.getVar('PN')
- pcurver = localdata.getVar('PV')
- pdesc = localdata.getVar('DESCRIPTION')
- if pdesc is not None:
- pdesc = pdesc.replace(',','')
- pdesc = pdesc.replace('\n','')
-
- pgrp = localdata.getVar('SECTION')
- plicense = localdata.getVar('LICENSE').replace(',','_')
-
- rstatus = localdata.getVar('RECIPE_COLOR')
- if rstatus is not None:
- rstatus = rstatus.replace(',','')
-
- pupver = localdata.getVar('RECIPE_UPSTREAM_VERSION')
- if pcurver == pupver:
- vermatch="1"
- else:
- vermatch="0"
-
- noupdate_reason = localdata.getVar('RECIPE_NO_UPDATE_REASON')
- if noupdate_reason is None:
- noupdate="0"
- else:
- noupdate="1"
- noupdate_reason = noupdate_reason.replace(',','')
-
- maintainer = localdata.getVar('RECIPE_MAINTAINER')
- rlrd = localdata.getVar('RECIPE_UPSTREAM_DATE')
- # do the comparison
- result = dist_check.compare_in_distro_packages_list(distro_check_dir, localdata)
-
- lf = bb.utils.lockfile("%s.lock" % logfile)
- with open(logfile, "a") as f:
- row = [pname, pdesc, maintainer, plicense, vermatch, pcurver, pupver, noupdate_reason, rstatus]
- row.extend(result)
-
- writer = csv.writer(f)
- writer.writerow(row)
- f.close()
- bb.utils.unlockfile(lf)
-}
-do_distrodata[vardepsexclude] = "DATETIME"
-
-addhandler checkpkg_eventhandler
-checkpkg_eventhandler[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted"
-python checkpkg_eventhandler() {
- import csv
-
- def parse_csv_file(filename):
- package_dict = {}
-
- with open(filename, "r") as f:
- reader = csv.reader(f, delimiter='\t')
- for row in reader:
- pn = row[0]
-
- if reader.line_num == 1:
- header = row
- continue
-
- if not pn in package_dict.keys():
- package_dict[pn] = row
- f.close()
-
- with open(filename, "w") as f:
- writer = csv.writer(f, delimiter='\t')
- writer.writerow(header)
- for pn in package_dict.keys():
- writer.writerow(package_dict[pn])
- f.close()
-
- del package_dict
-
- if bb.event.getName(e) == "BuildStarted":
- import oe.distro_check as dc
- logfile = dc.create_log_file(e.data, "checkpkg.csv")
-
- lf = bb.utils.lockfile("%s.lock" % logfile)
- with open(logfile, "a") as f:
- writer = csv.writer(f, delimiter='\t')
- headers = ['Package', 'Version', 'Upver', 'License', 'Section',
- 'Home', 'Release', 'Depends', 'BugTracker', 'PE', 'Description',
- 'Status', 'Tracking', 'URI', 'MAINTAINER', 'NoUpReason']
- writer.writerow(headers)
- f.close()
- bb.utils.unlockfile(lf)
- elif bb.event.getName(e) == "BuildCompleted":
- import os
- filename = "tmp/log/checkpkg.csv"
- if os.path.isfile(filename):
- lf = bb.utils.lockfile("%s.lock"%filename)
- parse_csv_file(filename)
- bb.utils.unlockfile(lf)
- return
-}
-
-addtask checkpkg
-do_checkpkg[nostamp] = "1"
-python do_checkpkg() {
- localdata = bb.data.createCopy(d)
- import csv
- import re
- import tempfile
- import subprocess
- import oe.recipeutils
- from bb.utils import vercmp_string
- from bb.fetch2 import FetchError, NoMethodError, decodeurl
-
- def get_upstream_version_and_status():
-
- # set if the upstream check fails reliably, e.g. absent git tags, or weird version format used on our or on upstream side.
- upstream_version_unknown = localdata.getVar('UPSTREAM_VERSION_UNKNOWN')
- # set if the upstream check cannot be reliably performed due to transient network failures, or server behaving weirdly.
- # This one should be used sparingly, as it completely excludes a recipe from upstream checking.
- upstream_check_unreliable = localdata.getVar('UPSTREAM_CHECK_UNRELIABLE')
-
- if upstream_check_unreliable == "1":
- return "N/A", "CHECK_IS_UNRELIABLE"
-
- uv = oe.recipeutils.get_recipe_upstream_version(localdata)
- pupver = uv['version'] if uv['version'] else "N/A"
- pversion = uv['current_version']
- revision = uv['revision'] if uv['revision'] else "N/A"
-
- if pupver == "N/A":
- pstatus = "UNKNOWN" if upstream_version_unknown else "UNKNOWN_BROKEN"
- else:
- cmp = vercmp_string(pversion, pupver)
- if cmp == -1:
- pstatus = "UPDATE" if not upstream_version_unknown else "KNOWN_BROKEN"
- elif cmp == 0:
- pstatus = "MATCH" if not upstream_version_unknown else "KNOWN_BROKEN"
- else:
- pstatus = "UNKNOWN" if upstream_version_unknown else "UNKNOWN_BROKEN"
-
- return pversion, pupver, pstatus, revision
-
-
- """initialize log files."""
- logpath = d.getVar('LOG_DIR')
- bb.utils.mkdirhier(logpath)
- logfile = os.path.join(logpath, "checkpkg.csv")
-
- """generate package information from .bb file"""
- pname = d.getVar('PN')
-
- if pname.find("-native") != -1:
- if d.getVar('BBCLASSEXTEND'):
- return
- pnstripped = pname.split("-native")
- bb.note("Native Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
-
- if pname.startswith("nativesdk-"):
- if d.getVar('BBCLASSEXTEND'):
- return
- pnstripped = pname.replace("nativesdk-", "")
- bb.note("NativeSDK Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped + ":" + d.getVar('OVERRIDES'))
-
- if pname.find("-cross") != -1:
- pnstripped = pname.split("-cross")
- bb.note("cross Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
-
- if pname.find("-initial") != -1:
- pnstripped = pname.split("-initial")
- bb.note("initial Split: %s" % pnstripped)
- localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
-
- pdesc = localdata.getVar('DESCRIPTION')
- pgrp = localdata.getVar('SECTION')
- plicense = localdata.getVar('LICENSE')
- psection = localdata.getVar('SECTION')
- phome = localdata.getVar('HOMEPAGE')
- prelease = localdata.getVar('PR')
- pdepends = localdata.getVar('DEPENDS')
- pbugtracker = localdata.getVar('BUGTRACKER')
- ppe = localdata.getVar('PE')
- psrcuri = localdata.getVar('SRC_URI')
- maintainer = localdata.getVar('RECIPE_MAINTAINER')
-
- pversion, pupver, pstatus, prevision = get_upstream_version_and_status()
-
- if psrcuri:
- psrcuri = psrcuri.split()[0]
- else:
- psrcuri = "none"
- pdepends = "".join(pdepends.split("\t"))
- pdesc = "".join(pdesc.split("\t"))
- no_upgr_reason = d.getVar('RECIPE_NO_UPDATE_REASON')
- lf = bb.utils.lockfile("%s.lock" % logfile)
- with open(logfile, "a") as f:
- writer = csv.writer(f, delimiter='\t')
- writer.writerow([pname, pversion, pupver, plicense, psection, phome,
- prelease, pdepends, pbugtracker, ppe, pdesc, pstatus, prevision,
- psrcuri, maintainer, no_upgr_reason])
- f.close()
- bb.utils.unlockfile(lf)
-}
-
-addhandler distro_check_eventhandler
-distro_check_eventhandler[eventmask] = "bb.event.BuildStarted"
-python distro_check_eventhandler() {
- """initialize log files."""
- import oe.distro_check as dc
- result_file = dc.create_log_file(e.data, "distrocheck.csv")
- return
-}
-
-addtask distro_check
-do_distro_check[nostamp] = "1"
-do_distro_check[vardepsexclude] += "DATETIME"
-python do_distro_check() {
- """checks if the package is present in other public Linux distros"""
- import oe.distro_check as dc
- import shutil
- if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('sdk', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('nativesdk',d):
- return
-
- localdata = bb.data.createCopy(d)
- tmpdir = d.getVar('TMPDIR')
- distro_check_dir = os.path.join(tmpdir, "distro_check")
- logpath = d.getVar('LOG_DIR')
- bb.utils.mkdirhier(logpath)
- result_file = os.path.join(logpath, "distrocheck.csv")
- datetime = localdata.getVar('DATETIME')
- dc.update_distro_data(distro_check_dir, datetime, localdata)
-
- # do the comparison
- result = dc.compare_in_distro_packages_list(distro_check_dir, d)
-
- # save the results
- dc.save_distro_check_result(result, datetime, result_file, d)
-}
-
-#
-#Check Missing License Text.
-#Use this task to generate the missing license text data for pkg-report system,
-#then we can search those recipes which license text isn't exsit in common-licenses directory
-#
-addhandler checklicense_eventhandler
-checklicense_eventhandler[eventmask] = "bb.event.BuildStarted"
-python checklicense_eventhandler() {
- """initialize log files."""
- import csv
- import oe.distro_check as dc
- logfile = dc.create_log_file(e.data, "missinglicense.csv")
- lf = bb.utils.lockfile("%s.lock" % logfile)
- with open(logfile, "a") as f:
- writer = csv.writer(f, delimiter='\t')
- writer.writerow(['Package', 'License', 'MissingLicense'])
- f.close()
- bb.utils.unlockfile(lf)
- return
-}
-
-addtask checklicense
-do_checklicense[nostamp] = "1"
-python do_checklicense() {
- import csv
- import shutil
- logpath = d.getVar('LOG_DIR')
- bb.utils.mkdirhier(logpath)
- pn = d.getVar('PN')
- logfile = os.path.join(logpath, "missinglicense.csv")
- generic_directory = d.getVar('COMMON_LICENSE_DIR')
- license_types = d.getVar('LICENSE')
- for license_type in ((license_types.replace('+', '').replace('|', '&')
- .replace('(', '').replace(')', '').replace(';', '')
- .replace(',', '').replace(" ", "").split("&"))):
- if not os.path.isfile(os.path.join(generic_directory, license_type)):
- lf = bb.utils.lockfile("%s.lock" % logfile)
- with open(logfile, "a") as f:
- writer = csv.writer(f, delimiter='\t')
- writer.writerow([pn, license_types, license_type])
- f.close()
- bb.utils.unlockfile(lf)
- return
-}
diff --git a/external/poky/meta/classes/distutils-base.bbclass b/external/poky/meta/classes/distutils-base.bbclass
deleted file mode 100644
index 9f398d70..00000000
--- a/external/poky/meta/classes/distutils-base.bbclass
+++ /dev/null
@@ -1,4 +0,0 @@
-DEPENDS += "${@["${PYTHON_PN}-native ${PYTHON_PN}", ""][(d.getVar('PACKAGES') == '')]}"
-RDEPENDS_${PN} += "${@['', '${PYTHON_PN}-core']['${CLASSOVERRIDE}' == 'class-target']}"
-
-inherit distutils-common-base pythonnative
diff --git a/external/poky/meta/classes/distutils-tools.bbclass b/external/poky/meta/classes/distutils-tools.bbclass
deleted file mode 100644
index 6f2880ea..00000000
--- a/external/poky/meta/classes/distutils-tools.bbclass
+++ /dev/null
@@ -1,73 +0,0 @@
-DISTUTILS_BUILD_ARGS ?= ""
-DISTUTILS_STAGE_HEADERS_ARGS ?= "--install-dir=${STAGING_INCDIR}/${PYTHON_DIR}"
-DISTUTILS_STAGE_ALL_ARGS ?= "--prefix=${STAGING_DIR_HOST}${prefix} \
- --install-data=${STAGING_DATADIR}"
-DISTUTILS_INSTALL_ARGS ?= "--prefix=${D}/${prefix} \
- --install-data=${D}/${datadir}"
-
-distutils_do_compile() {
- STAGING_INCDIR=${STAGING_INCDIR} \
- STAGING_LIBDIR=${STAGING_LIBDIR} \
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py build ${DISTUTILS_BUILD_ARGS} || \
- bbfatal_log "${PYTHON_PN} setup.py build_ext execution failed."
-}
-
-distutils_stage_headers() {
- install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS} || \
- bbfatal_log "${PYTHON_PN} setup.py install_headers execution failed."
-}
-
-distutils_stage_all() {
- STAGING_INCDIR=${STAGING_INCDIR} \
- STAGING_LIBDIR=${STAGING_LIBDIR} \
- install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
- PYTHONPATH=${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} \
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install ${DISTUTILS_STAGE_ALL_ARGS} || \
- bbfatal_log "${PYTHON_PN} setup.py install (stage) execution failed."
-}
-
-distutils_do_install() {
- echo "Beginning ${PN} Install ..."
- install -d ${D}${PYTHON_SITEPACKAGES_DIR}
- echo "Step 2 of ${PN} Install ..."
- STAGING_INCDIR=${STAGING_INCDIR} \
- STAGING_LIBDIR=${STAGING_LIBDIR} \
- PYTHONPATH=${D}/${PYTHON_SITEPACKAGES_DIR} \
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install --install-lib=${D}/${PYTHON_SITEPACKAGES_DIR} ${DISTUTILS_INSTALL_ARGS} || \
- bbfatal_log "${PYTHON_PN} setup.py install execution failed."
-
- echo "Step 3 of ${PN} Install ..."
- # support filenames with *spaces*
- find ${D} -name "*.py" -print0 | while read -d $'\0' i ; do \
- sed -i -e s:${D}::g $i
- done
-
- echo "Step 4 of ${PN} Install ..."
- if test -e ${D}${bindir} ; then
- for i in ${D}${bindir}/* ; do \
- sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
- done
- fi
-
- echo "Step 4 of ${PN} Install ..."
- if test -e ${D}${sbindir}; then
- for i in ${D}${sbindir}/* ; do \
- sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
- done
- fi
-
- echo "Step 5 of ${PN} Install ..."
- rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/easy-install.pth
-
- #
- # FIXME: Bandaid against wrong datadir computation
- #
- if [ -e ${D}${datadir}/share ]; then
- mv -f ${D}${datadir}/share/* ${D}${datadir}/
- fi
-}
-
-#EXPORT_FUNCTIONS do_compile do_install
-
-export LDSHARED="${CCLD} -shared"
diff --git a/external/poky/meta/classes/distutils.bbclass b/external/poky/meta/classes/distutils.bbclass
deleted file mode 100644
index 98627314..00000000
--- a/external/poky/meta/classes/distutils.bbclass
+++ /dev/null
@@ -1,92 +0,0 @@
-inherit distutils-base
-
-DISTUTILS_BUILD_ARGS ?= ""
-DISTUTILS_STAGE_HEADERS_ARGS ?= "--install-dir=${STAGING_INCDIR}/${PYTHON_DIR}"
-DISTUTILS_STAGE_ALL_ARGS ?= "--prefix=${STAGING_DIR_HOST}${prefix} \
- --install-data=${STAGING_DATADIR}"
-DISTUTILS_INSTALL_ARGS ?= "--root=${D} \
- --prefix=${prefix} \
- --install-lib=${PYTHON_SITEPACKAGES_DIR} \
- --install-data=${datadir}"
-
-distutils_do_configure() {
- if [ "${CLEANBROKEN}" != "1" ] ; then
- NO_FETCH_BUILD=1 \
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py clean ${DISTUTILS_BUILD_ARGS}
- fi
-}
-
-distutils_do_compile() {
- NO_FETCH_BUILD=1 \
- STAGING_INCDIR=${STAGING_INCDIR} \
- STAGING_LIBDIR=${STAGING_LIBDIR} \
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py build ${DISTUTILS_BUILD_ARGS} || \
- bbfatal_log "'${PYTHON_PN} setup.py build ${DISTUTILS_BUILD_ARGS}' execution failed."
-}
-
-distutils_stage_headers() {
- install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS} || \
- bbfatal_log "'${PYTHON_PN} setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS}' execution for stage_headers failed."
-}
-
-distutils_stage_all() {
- STAGING_INCDIR=${STAGING_INCDIR} \
- STAGING_LIBDIR=${STAGING_LIBDIR} \
- install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
- PYTHONPATH=${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} \
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install ${DISTUTILS_STAGE_ALL_ARGS} || \
- bbfatal_log "'${PYTHON_PN} setup.py install ${DISTUTILS_STAGE_ALL_ARGS}' execution for stage_all failed."
-}
-
-distutils_do_install() {
- install -d ${D}${PYTHON_SITEPACKAGES_DIR}
- STAGING_INCDIR=${STAGING_INCDIR} \
- STAGING_LIBDIR=${STAGING_LIBDIR} \
- PYTHONPATH=${D}${PYTHON_SITEPACKAGES_DIR} \
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install ${DISTUTILS_INSTALL_ARGS} || \
- bbfatal_log "'${PYTHON_PN} setup.py install ${DISTUTILS_INSTALL_ARGS}' execution failed."
-
- # support filenames with *spaces*
- # only modify file if it contains path and recompile it
- find ${D} -name "*.py" -exec grep -q ${D} {} \; -exec sed -i -e s:${D}::g {} \; -exec ${STAGING_BINDIR_NATIVE}/python-native/python -mcompileall {} \;
-
- if test -e ${D}${bindir} ; then
- for i in ${D}${bindir}/* ; do \
- if [ ${PN} != "${BPN}-native" ]; then
- sed -i -e s:${STAGING_BINDIR_NATIVE}/python-native/python:${USRBINPATH}/env\ python:g $i
- fi
- sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
- done
- fi
-
- if [ -e ${D}${sbindir} ]; then
- for i in ${D}${sbindir}/* ; do \
- if [ ${PN} != "${BPN}-native" ]; then
- sed -i -e s:${STAGING_BINDIR_NATIVE}/python-native/python:${USRBINPATH}/env\ python:g $i
- fi
- sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
- done
- fi
-
- rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/easy-install.pth
- rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/site.py*
-
- #
- # FIXME: Bandaid against wrong datadir computation
- #
- if [ -e ${D}${datadir}/share ]; then
- mv -f ${D}${datadir}/share/* ${D}${datadir}/
- rmdir ${D}${datadir}/share
- fi
-
- # Fix backport modules
- if [ -e ${STAGING_LIBDIR}/${PYTHON_DIR}/site-packages/backports/__init__.py ] && [ -e ${D}${PYTHON_SITEPACKAGES_DIR}/backports/__init__.py ]; then
- rm ${D}${PYTHON_SITEPACKAGES_DIR}/backports/__init__.py;
- rm ${D}${PYTHON_SITEPACKAGES_DIR}/backports/__init__.pyc;
- fi
-}
-
-EXPORT_FUNCTIONS do_configure do_compile do_install
-
-export LDSHARED="${CCLD} -shared"
diff --git a/external/poky/meta/classes/distutils3.bbclass b/external/poky/meta/classes/distutils3.bbclass
index 834e3224..7356b524 100644
--- a/external/poky/meta/classes/distutils3.bbclass
+++ b/external/poky/meta/classes/distutils3.bbclass
@@ -1,76 +1,55 @@
inherit distutils3-base
+B = "${WORKDIR}/build"
+distutils_do_configure[cleandirs] = "${B}"
+
DISTUTILS_BUILD_ARGS ?= ""
-DISTUTILS_BUILD_EXT_ARGS ?= ""
-DISTUTILS_STAGE_HEADERS_ARGS ?= "--install-dir=${STAGING_INCDIR}/${PYTHON_DIR}"
-DISTUTILS_STAGE_ALL_ARGS ?= "--prefix=${STAGING_DIR_HOST}${prefix} \
- --install-data=${STAGING_DATADIR}"
DISTUTILS_INSTALL_ARGS ?= "--root=${D} \
--prefix=${prefix} \
--install-lib=${PYTHON_SITEPACKAGES_DIR} \
--install-data=${datadir}"
+DISTUTILS_PYTHON = "python3"
+DISTUTILS_PYTHON_class-native = "nativepython3"
+
distutils3_do_configure() {
- if [ "${CLEANBROKEN}" != "1" ] ; then
- NO_FETCH_BUILD=1 \
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py clean ${DISTUTILS_BUILD_ARGS}
- fi
+ :
}
distutils3_do_compile() {
+ cd ${S}
NO_FETCH_BUILD=1 \
STAGING_INCDIR=${STAGING_INCDIR} \
STAGING_LIBDIR=${STAGING_LIBDIR} \
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py \
- build ${DISTUTILS_BUILD_ARGS} || \
+ ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} ${S}/setup.py \
+ build --build-base=${B} ${DISTUTILS_BUILD_ARGS} || \
bbfatal_log "'${PYTHON_PN} setup.py build ${DISTUTILS_BUILD_ARGS}' execution failed."
}
distutils3_do_compile[vardepsexclude] = "MACHINE"
-distutils3_stage_headers() {
- install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS} || \
- bbfatal_log "'${PYTHON_PN} setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS}' execution for stage_headers failed."
-}
-distutils3_stage_headers[vardepsexclude] = "MACHINE"
-
-distutils3_stage_all() {
- STAGING_INCDIR=${STAGING_INCDIR} \
- STAGING_LIBDIR=${STAGING_LIBDIR} \
- install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
- PYTHONPATH=${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} \
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install ${DISTUTILS_STAGE_ALL_ARGS} || \
- bbfatal_log "'${PYTHON_PN} setup.py install ${DISTUTILS_STAGE_ALL_ARGS}' execution for stage_all failed."
-}
-distutils3_stage_all[vardepsexclude] = "MACHINE"
-
distutils3_do_install() {
+ cd ${S}
install -d ${D}${PYTHON_SITEPACKAGES_DIR}
STAGING_INCDIR=${STAGING_INCDIR} \
STAGING_LIBDIR=${STAGING_LIBDIR} \
PYTHONPATH=${D}${PYTHON_SITEPACKAGES_DIR} \
- ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install ${DISTUTILS_INSTALL_ARGS} || \
+ ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} ${S}/setup.py \
+ build --build-base=${B} install --skip-build ${DISTUTILS_INSTALL_ARGS} || \
bbfatal_log "'${PYTHON_PN} setup.py install ${DISTUTILS_INSTALL_ARGS}' execution failed."
# support filenames with *spaces*
- find ${D} -name "*.py" -exec grep -q ${D} {} \; -exec sed -i -e s:${D}::g {} \;
-
- if test -e ${D}${bindir} ; then
- for i in ${D}${bindir}/* ; do \
- sed -i -e s:${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN}:${USRBINPATH}/env\ ${PYTHON_PN}:g $i
- sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
- done
- fi
+ find ${D} -name "*.py" -exec grep -q ${D} {} \; \
+ -exec sed -i -e s:${D}::g {} \;
- if test -e ${D}${sbindir}; then
- for i in ${D}${sbindir}/* ; do \
- sed -i -e s:${STAGING_BINDIR_NATIVE}/python-${PYTHON_PN}/${PYTHON_PN}:${USRBINPATH}/env\ ${PYTHON_PN}:g $i
+ for i in ${D}${bindir}/* ${D}${sbindir}/*; do
+ if [ -f "$i" ]; then
+ sed -i -e s:${PYTHON}:${USRBINPATH}/env\ ${DISTUTILS_PYTHON}:g $i
sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
- done
- fi
+ fi
+ done
rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/easy-install.pth
-
+
#
# FIXME: Bandaid against wrong datadir computation
#
diff --git a/external/poky/meta/classes/externalsrc.bbclass b/external/poky/meta/classes/externalsrc.bbclass
index 3618b99a..d2001299 100644
--- a/external/poky/meta/classes/externalsrc.bbclass
+++ b/external/poky/meta/classes/externalsrc.bbclass
@@ -73,9 +73,8 @@ python () {
d.setVar('SRC_URI', ' '.join(local_srcuri))
- if '{SRCPV}' in d.getVar('PV', False):
- # Dummy value because the default function can't be called with blank SRC_URI
- d.setVar('SRCPV', '999')
+ # Dummy value because the default function can't be called with blank SRC_URI
+ d.setVar('SRCPV', '999')
if d.getVar('CONFIGUREOPT_DEPTRACK') == '--disable-dependency-tracking':
d.setVar('CONFIGUREOPT_DEPTRACK', '')
@@ -203,7 +202,7 @@ def srctree_hash_files(d, srcdir=None):
ret = " "
if git_dir is not None:
- oe_hash_file = os.path.join(git_dir, 'oe-devtool-tree-sha1')
+ oe_hash_file = os.path.join(git_dir, 'oe-devtool-tree-sha1-%s' % d.getVar('PN'))
with tempfile.NamedTemporaryFile(prefix='oe-devtool-index') as tmp_index:
# Clone index
shutil.copyfile(os.path.join(git_dir, 'index'), tmp_index.name)
diff --git a/external/poky/meta/classes/extrausers.bbclass b/external/poky/meta/classes/extrausers.bbclass
index 7709407b..32569e97 100644
--- a/external/poky/meta/classes/extrausers.bbclass
+++ b/external/poky/meta/classes/extrausers.bbclass
@@ -1,18 +1,17 @@
-# This bbclass is mainly used for image level user/group configuration.
+# This bbclass is used for image level user/group configuration.
# Inherit this class if you want to make EXTRA_USERS_PARAMS effective.
# Below is an example showing how to use this functionality.
-# INHERIT += "extrausers"
+# IMAGE_CLASSES += "extrausers"
# EXTRA_USERS_PARAMS = "\
-# useradd -p '' tester; \
-# groupadd developers; \
-# userdel nobody; \
-# groupdel -g video; \
-# groupmod -g 1020 developers; \
-# usermod -s /bin/sh tester; \
+# useradd -p '' tester; \
+# groupadd developers; \
+# userdel nobody; \
+# groupdel -g video; \
+# groupmod -g 1020 developers; \
+# usermod -s /bin/sh tester; \
# "
-
inherit useradd_base
PACKAGE_INSTALL_append = " ${@['', 'base-passwd shadow'][bool(d.getVar('EXTRA_USERS_PARAMS'))]}"
diff --git a/external/poky/meta/classes/features_check.bbclass b/external/poky/meta/classes/features_check.bbclass
new file mode 100644
index 00000000..876d32e3
--- /dev/null
+++ b/external/poky/meta/classes/features_check.bbclass
@@ -0,0 +1,88 @@
+# Allow checking of required and conflicting DISTRO_FEATURES
+#
+# ANY_OF_DISTRO_FEATURES: ensure at least one item on this list is included
+# in DISTRO_FEATURES.
+# REQUIRED_DISTRO_FEATURES: ensure every item on this list is included
+# in DISTRO_FEATURES.
+# CONFLICT_DISTRO_FEATURES: ensure no item in this list is included in
+# DISTRO_FEATURES.
+# ANY_OF_MACHINE_FEATURES: ensure at least one item on this list is included
+# in MACHINE_FEATURES.
+# REQUIRED_MACHINE_FEATURES: ensure every item on this list is included
+# in MACHINE_FEATURES.
+# CONFLICT_MACHINE_FEATURES: ensure no item in this list is included in
+# MACHINE_FEATURES.
+# ANY_OF_COMBINED_FEATURES: ensure at least one item on this list is included
+# in COMBINED_FEATURES.
+# REQUIRED_COMBINED_FEATURES: ensure every item on this list is included
+# in COMBINED_FEATURES.
+# CONFLICT_COMBINED_FEATURES: ensure no item in this list is included in
+# COMBINED_FEATURES.
+#
+# Copyright 2019 (C) Texas Instruments Inc.
+# Copyright 2013 (C) O.S. Systems Software LTDA.
+
+python () {
+ if d.getVar('PARSE_ALL_RECIPES', False):
+ return
+
+ # Assume at least one var is set.
+ distro_features = set((d.getVar('DISTRO_FEATURES') or '').split())
+
+ any_of_distro_features = set((d.getVar('ANY_OF_DISTRO_FEATURES') or '').split())
+ if any_of_distro_features:
+ if set.isdisjoint(any_of_distro_features, distro_features):
+ raise bb.parse.SkipRecipe("one of '%s' needs to be in DISTRO_FEATURES" % ' '.join(any_of_distro_features))
+
+ required_distro_features = set((d.getVar('REQUIRED_DISTRO_FEATURES') or '').split())
+ if required_distro_features:
+ missing = set.difference(required_distro_features, distro_features)
+ if missing:
+ raise bb.parse.SkipRecipe("missing required distro feature%s '%s' (not in DISTRO_FEATURES)" % ('s' if len(missing) > 1 else '', ' '.join(missing)))
+
+ conflict_distro_features = set((d.getVar('CONFLICT_DISTRO_FEATURES') or '').split())
+ if conflict_distro_features:
+ conflicts = set.intersection(conflict_distro_features, distro_features)
+ if conflicts:
+ raise bb.parse.SkipRecipe("conflicting distro feature%s '%s' (in DISTRO_FEATURES)" % ('s' if len(conflicts) > 1 else '', ' '.join(conflicts)))
+
+ # Assume at least one var is set.
+ machine_features = set((d.getVar('MACHINE_FEATURES') or '').split())
+
+ any_of_machine_features = set((d.getVar('ANY_OF_MACHINE_FEATURES') or '').split())
+ if any_of_machine_features:
+ if set.isdisjoint(any_of_machine_features, machine_features):
+ raise bb.parse.SkipRecipe("one of '%s' needs to be in MACHINE_FEATURES" % ' '.join(any_of_machine_features))
+
+ required_machine_features = set((d.getVar('REQUIRED_MACHINE_FEATURES') or '').split())
+ if required_machine_features:
+ missing = set.difference(required_machine_features, machine_features)
+ if missing:
+ raise bb.parse.SkipRecipe("missing required machine feature%s '%s' (not in MACHINE_FEATURES)" % ('s' if len(missing) > 1 else '', ' '.join(missing)))
+
+ conflict_machine_features = set((d.getVar('CONFLICT_MACHINE_FEATURES') or '').split())
+ if conflict_machine_features:
+ conflicts = set.intersection(conflict_machine_features, machine_features)
+ if conflicts:
+ raise bb.parse.SkipRecipe("conflicting machine feature%s '%s' (in MACHINE_FEATURES)" % ('s' if len(conflicts) > 1 else '', ' '.join(conflicts)))
+
+ # Assume at least one var is set.
+ combined_features = set((d.getVar('COMBINED_FEATURES') or '').split())
+
+ any_of_combined_features = set((d.getVar('ANY_OF_COMBINED_FEATURES') or '').split())
+ if any_of_combined_features:
+ if set.isdisjoint(any_of_combined_features, combined_features):
+ raise bb.parse.SkipRecipe("one of '%s' needs to be in COMBINED_FEATURES" % ' '.join(any_of_combined_features))
+
+ required_combined_features = set((d.getVar('REQUIRED_COMBINED_FEATURES') or '').split())
+ if required_combined_features:
+ missing = set.difference(required_combined_features, combined_features)
+ if missing:
+ raise bb.parse.SkipRecipe("missing required machine feature%s '%s' (not in COMBINED_FEATURES)" % ('s' if len(missing) > 1 else '', ' '.join(missing)))
+
+ conflict_combined_features = set((d.getVar('CONFLICT_COMBINED_FEATURES') or '').split())
+ if conflict_combined_features:
+ conflicts = set.intersection(conflict_combined_features, combined_features)
+ if conflicts:
+ raise bb.parse.SkipRecipe("conflicting machine feature%s '%s' (in COMBINED_FEATURES)" % ('s' if len(conflicts) > 1 else '', ' '.join(conflicts)))
+}
diff --git a/external/poky/meta/classes/fontcache.bbclass b/external/poky/meta/classes/fontcache.bbclass
index f71a754a..97e7f17f 100644
--- a/external/poky/meta/classes/fontcache.bbclass
+++ b/external/poky/meta/classes/fontcache.bbclass
@@ -20,6 +20,7 @@ if [ -n "$D" ] ; then
$INTERCEPT_DIR/postinst_intercept update_font_cache ${PKG} mlprefix=${MLPREFIX} binprefix=${MLPREFIX} \
'bindir="${bindir}"' \
'libdir="${libdir}"' \
+ 'libexecdir="${libexecdir}"' \
'base_libdir="${base_libdir}"' \
'fontconfigcachedir="${FONTCONFIG_CACHE_DIR}"' \
'fontconfigcacheparams="${FONTCONFIG_CACHE_PARAMS}"' \
diff --git a/external/poky/meta/classes/gconf.bbclass b/external/poky/meta/classes/gconf.bbclass
index 4e0ee2e7..3e3c509d 100644
--- a/external/poky/meta/classes/gconf.bbclass
+++ b/external/poky/meta/classes/gconf.bbclass
@@ -49,7 +49,7 @@ python populate_packages_append () {
for pkg in packages:
schema_dir = '%s/%s/etc/gconf/schemas' % (pkgdest, pkg)
schemas = []
- schema_re = re.compile(".*\.schemas$")
+ schema_re = re.compile(r".*\.schemas$")
if os.path.exists(schema_dir):
for f in os.listdir(schema_dir):
if schema_re.match(f):
diff --git a/external/poky/meta/classes/gnome.bbclass b/external/poky/meta/classes/gnome.bbclass
deleted file mode 100644
index c6202bbb..00000000
--- a/external/poky/meta/classes/gnome.bbclass
+++ /dev/null
@@ -1 +0,0 @@
-inherit gnomebase gtk-icon-cache gconf mime
diff --git a/external/poky/meta/classes/go-mod.bbclass b/external/poky/meta/classes/go-mod.bbclass
new file mode 100644
index 00000000..5871d025
--- /dev/null
+++ b/external/poky/meta/classes/go-mod.bbclass
@@ -0,0 +1,20 @@
+# Handle Go Modules support
+#
+# When using Go Modules, the the current working directory MUST be at or below
+# the location of the 'go.mod' file when the go tool is used, and there is no
+# way to tell it to look elsewhere. It will automatically look upwards for the
+# file, but not downwards.
+#
+# To support this use case, we provide the `GO_WORKDIR` variable, which defaults
+# to `GO_IMPORT` but allows for easy override.
+#
+# Copyright 2020 (C) O.S. Systems Software LTDA.
+
+# The '-modcacherw' option ensures we have write access to the cached objects so
+# we avoid errors during clean task as well as when removing the TMPDIR.
+export GOBUILDFLAGS ?= "-v ${GO_LDFLAGS} -modcacherw"
+
+inherit go
+
+GO_WORKDIR ?= "${GO_IMPORT}"
+do_compile[dirs] += "${B}/src/${GO_WORKDIR}"
diff --git a/external/poky/meta/classes/go-ptest.bbclass b/external/poky/meta/classes/go-ptest.bbclass
new file mode 100644
index 00000000..e230a805
--- /dev/null
+++ b/external/poky/meta/classes/go-ptest.bbclass
@@ -0,0 +1,54 @@
+inherit go ptest
+
+do_compile_ptest_base() {
+ export TMPDIR="${GOTMPDIR}"
+ rm -f ${B}/.go_compiled_tests.list
+ go_list_package_tests | while read pkg; do
+ cd ${B}/src/$pkg
+ ${GO} test ${GOPTESTBUILDFLAGS} $pkg
+ find . -mindepth 1 -maxdepth 1 -type f -name '*.test' -exec echo $pkg/{} \; | \
+ sed -e's,/\./,/,'>> ${B}/.go_compiled_tests.list
+ done
+ do_compile_ptest
+}
+
+do_compile_ptest_base[dirs] =+ "${GOTMPDIR}"
+
+go_make_ptest_wrapper() {
+ cat >${D}${PTEST_PATH}/run-ptest <<EOF
+#!/bin/sh
+RC=0
+run_test() (
+ cd "\$1"
+ ((((./\$2 ${GOPTESTFLAGS}; echo \$? >&3) | sed -r -e"s,^(PASS|SKIP|FAIL)\$,\\1: \$1/\$2," >&4) 3>&1) | (read rc; exit \$rc)) 4>&1
+ exit \$?)
+EOF
+
+}
+
+do_install_ptest_base() {
+ test -f "${B}/.go_compiled_tests.list" || exit 0
+ install -d ${D}${PTEST_PATH}
+ go_stage_testdata
+ go_make_ptest_wrapper
+ havetests=""
+ while read test; do
+ testdir=`dirname $test`
+ testprog=`basename $test`
+ install -d ${D}${PTEST_PATH}/$testdir
+ install -m 0755 ${B}/src/$test ${D}${PTEST_PATH}/$test
+ echo "run_test $testdir $testprog || RC=1" >> ${D}${PTEST_PATH}/run-ptest
+ havetests="yes"
+ done < ${B}/.go_compiled_tests.list
+ if [ -n "$havetests" ]; then
+ echo "exit \$RC" >> ${D}${PTEST_PATH}/run-ptest
+ chmod +x ${D}${PTEST_PATH}/run-ptest
+ else
+ rm -rf ${D}${PTEST_PATH}
+ fi
+ do_install_ptest
+ chown -R root:root ${D}${PTEST_PATH}
+}
+
+INSANE_SKIP_${PN}-ptest += "ldflags"
+
diff --git a/external/poky/meta/classes/go.bbclass b/external/poky/meta/classes/go.bbclass
index af331f80..a9e31b50 100644
--- a/external/poky/meta/classes/go.bbclass
+++ b/external/poky/meta/classes/go.bbclass
@@ -1,4 +1,4 @@
-inherit goarch ptest
+inherit goarch
GO_PARALLEL_BUILD ?= "${@oe.utils.parallel_make_argument(d, '-p %d')}"
@@ -7,6 +7,24 @@ GOROOT_class-nativesdk = "${STAGING_DIR_TARGET}${libdir}/go"
GOROOT = "${STAGING_LIBDIR}/go"
export GOROOT
export GOROOT_FINAL = "${libdir}/go"
+export GOCACHE = "${B}/.cache"
+
+export GOARCH = "${TARGET_GOARCH}"
+export GOOS = "${TARGET_GOOS}"
+export GOHOSTARCH="${BUILD_GOARCH}"
+export GOHOSTOS="${BUILD_GOOS}"
+
+GOARM[export] = "0"
+GOARM_arm_class-target = "${TARGET_GOARM}"
+GOARM_arm_class-target[export] = "1"
+
+GO386[export] = "0"
+GO386_x86_class-target = "${TARGET_GO386}"
+GO386_x86_class-target[export] = "1"
+
+GOMIPS[export] = "0"
+GOMIPS_mips_class-target = "${TARGET_GOMIPS}"
+GOMIPS_mips_class-target[export] = "1"
DEPENDS_GOLANG_class-target = "virtual/${TUNE_PKGARCH}-go virtual/${TARGET_PREFIX}go-runtime"
DEPENDS_GOLANG_class-native = "go-native"
@@ -23,7 +41,7 @@ GO_EXTLDFLAGS ?= "${HOST_CC_ARCH}${TOOLCHAIN_OPTIONS} ${GO_RPATH_LINK} ${LDFLAGS
GO_LINKMODE ?= ""
GO_LINKMODE_class-nativesdk = "--linkmode=external"
GO_LDFLAGS ?= '-ldflags="${GO_RPATH} ${GO_LINKMODE} -extldflags '${GO_EXTLDFLAGS}'"'
-export GOBUILDFLAGS ?= "-v ${GO_LDFLAGS}"
+export GOBUILDFLAGS ?= "-v ${GO_LDFLAGS} -trimpath"
export GOPATH_OMIT_IN_ACTIONID ?= "1"
export GOPTESTBUILDFLAGS ?= "${GOBUILDFLAGS} -c"
export GOPTESTFLAGS ?= ""
@@ -35,6 +53,7 @@ GOTOOLDIR_class-native = "${STAGING_LIBDIR_NATIVE}/go/pkg/tool/${BUILD_GOTUPLE}"
export GOTOOLDIR
export CGO_ENABLED ?= "1"
+export CGO_ENABLED_riscv64 = "0"
export CGO_CFLAGS ?= "${CFLAGS}"
export CGO_CPPFLAGS ?= "${CPPFLAGS}"
export CGO_CXXFLAGS ?= "${CXXFLAGS}"
@@ -45,7 +64,6 @@ GO_INSTALL_FILTEROUT ?= "${GO_IMPORT}/vendor/"
B = "${WORKDIR}/build"
export GOPATH = "${B}"
-export GOCACHE = "off"
export GOTMPDIR ?= "${WORKDIR}/go-tmp"
GOTMPDIR[vardepvalue] = ""
@@ -54,17 +72,13 @@ python go_do_unpack() {
if len(src_uri) == 0:
return
- try:
- fetcher = bb.fetch2.Fetch(src_uri, d)
- for url in fetcher.urls:
- if fetcher.ud[url].type == 'git':
- if fetcher.ud[url].parm.get('destsuffix') is None:
- s_dirname = os.path.basename(d.getVar('S'))
- fetcher.ud[url].parm['destsuffix'] = os.path.join(s_dirname, 'src',
- d.getVar('GO_IMPORT')) + '/'
- fetcher.unpack(d.getVar('WORKDIR'))
- except bb.fetch2.BBFetchException as e:
- raise bb.build.FuncFailed(e)
+ fetcher = bb.fetch2.Fetch(src_uri, d)
+ for url in fetcher.urls:
+ if fetcher.ud[url].type == 'git':
+ if fetcher.ud[url].parm.get('destsuffix') is None:
+ s_dirname = os.path.basename(d.getVar('S'))
+ fetcher.ud[url].parm['destsuffix'] = os.path.join(s_dirname, 'src', d.getVar('GO_IMPORT')) + '/'
+ fetcher.unpack(d.getVar('WORKDIR'))
}
go_list_packages() {
@@ -97,24 +111,11 @@ go_do_compile() {
do_compile[dirs] =+ "${GOTMPDIR}"
do_compile[cleandirs] = "${B}/bin ${B}/pkg"
-do_compile_ptest_base() {
- export TMPDIR="${GOTMPDIR}"
- rm -f ${B}/.go_compiled_tests.list
- go_list_package_tests | while read pkg; do
- cd ${B}/src/$pkg
- ${GO} test ${GOPTESTBUILDFLAGS} $pkg
- find . -mindepth 1 -maxdepth 1 -type f -name '*.test' -exec echo $pkg/{} \; | \
- sed -e's,/\./,/,'>> ${B}/.go_compiled_tests.list
- done
- do_compile_ptest
-}
-do_compile_ptest_base[dirs] =+ "${GOTMPDIR}"
-
go_do_install() {
install -d ${D}${libdir}/go/src/${GO_IMPORT}
tar -C ${S}/src/${GO_IMPORT} -cf - --exclude-vcs --exclude '*.test' --exclude 'testdata' . | \
tar -C ${D}${libdir}/go/src/${GO_IMPORT} --no-same-owner -xf -
- tar -C ${B} -cf - pkg | tar -C ${D}${libdir}/go --no-same-owner -xf -
+ tar -C ${B} -cf - --exclude-vcs pkg | tar -C ${D}${libdir}/go --no-same-owner -xf -
if [ -n "`ls ${B}/${GO_BUILD_BINDIR}/`" ]; then
install -d ${D}${bindir}
@@ -122,18 +123,6 @@ go_do_install() {
fi
}
-go_make_ptest_wrapper() {
- cat >${D}${PTEST_PATH}/run-ptest <<EOF
-#!/bin/sh
-RC=0
-run_test() (
- cd "\$1"
- ((((./\$2 ${GOPTESTFLAGS}; echo \$? >&3) | sed -r -e"s,^(PASS|SKIP|FAIL)\$,\\1: \$1/\$2," >&4) 3>&1) | (read rc; exit \$rc)) 4>&1
- exit \$?)
-EOF
-
-}
-
go_stage_testdata() {
oldwd="$PWD"
cd ${S}/src
@@ -148,43 +137,18 @@ go_stage_testdata() {
cd "$oldwd"
}
-do_install_ptest_base() {
- test -f "${B}/.go_compiled_tests.list" || exit 0
- install -d ${D}${PTEST_PATH}
- go_stage_testdata
- go_make_ptest_wrapper
- havetests=""
- while read test; do
- testdir=`dirname $test`
- testprog=`basename $test`
- install -d ${D}${PTEST_PATH}/$testdir
- install -m 0755 ${B}/src/$test ${D}${PTEST_PATH}/$test
- echo "run_test $testdir $testprog || RC=1" >> ${D}${PTEST_PATH}/run-ptest
- havetests="yes"
- done < ${B}/.go_compiled_tests.list
- if [ -n "$havetests" ]; then
- echo "exit \$RC" >> ${D}${PTEST_PATH}/run-ptest
- chmod +x ${D}${PTEST_PATH}/run-ptest
- else
- rm -rf ${D}${PTEST_PATH}
- fi
- do_install_ptest
- chown -R root:root ${D}${PTEST_PATH}
-}
-
EXPORT_FUNCTIONS do_unpack do_configure do_compile do_install
FILES_${PN}-dev = "${libdir}/go/src"
FILES_${PN}-staticdev = "${libdir}/go/pkg"
INSANE_SKIP_${PN} += "ldflags"
-INSANE_SKIP_${PN}-ptest += "ldflags"
# Add -buildmode=pie to GOBUILDFLAGS to satisfy "textrel" QA checking, but mips
# doesn't support -buildmode=pie, so skip the QA checking for mips and its
# variants.
python() {
- if 'mips' in d.getVar('TARGET_ARCH'):
+ if 'mips' in d.getVar('TARGET_ARCH') or 'riscv' in d.getVar('TARGET_ARCH'):
d.appendVar('INSANE_SKIP_%s' % d.getVar('PN'), " textrel")
else:
d.appendVar('GOBUILDFLAGS', ' -buildmode=pie')
diff --git a/external/poky/meta/classes/goarch.bbclass b/external/poky/meta/classes/goarch.bbclass
index b2c94fad..1099b957 100644
--- a/external/poky/meta/classes/goarch.bbclass
+++ b/external/poky/meta/classes/goarch.bbclass
@@ -3,18 +3,32 @@ BUILD_GOARCH = "${@go_map_arch(d.getVar('BUILD_ARCH'), d)}"
BUILD_GOTUPLE = "${BUILD_GOOS}_${BUILD_GOARCH}"
HOST_GOOS = "${@go_map_os(d.getVar('HOST_OS'), d)}"
HOST_GOARCH = "${@go_map_arch(d.getVar('HOST_ARCH'), d)}"
-HOST_GOARM = "${@go_map_arm(d.getVar('HOST_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
+HOST_GOARM = "${@go_map_arm(d.getVar('HOST_ARCH'), d)}"
HOST_GO386 = "${@go_map_386(d.getVar('HOST_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
HOST_GOMIPS = "${@go_map_mips(d.getVar('HOST_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
+HOST_GOARM_class-native = "7"
+HOST_GO386_class-native = "sse2"
+HOST_GOMIPS_class-native = "hardfloat"
HOST_GOTUPLE = "${HOST_GOOS}_${HOST_GOARCH}"
TARGET_GOOS = "${@go_map_os(d.getVar('TARGET_OS'), d)}"
TARGET_GOARCH = "${@go_map_arch(d.getVar('TARGET_ARCH'), d)}"
-TARGET_GOARM = "${@go_map_arm(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
+TARGET_GOARM = "${@go_map_arm(d.getVar('TARGET_ARCH'), d)}"
TARGET_GO386 = "${@go_map_386(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
TARGET_GOMIPS = "${@go_map_mips(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'), d)}"
+TARGET_GOARM_class-native = "7"
+TARGET_GO386_class-native = "sse2"
+TARGET_GOMIPS_class-native = "hardfloat"
TARGET_GOTUPLE = "${TARGET_GOOS}_${TARGET_GOARCH}"
GO_BUILD_BINDIR = "${@['bin/${HOST_GOTUPLE}','bin'][d.getVar('BUILD_GOTUPLE') == d.getVar('HOST_GOTUPLE')]}"
+# Use the MACHINEOVERRIDES to map ARM CPU architecture passed to GO via GOARM.
+# This is combined with *_ARCH to set HOST_GOARM and TARGET_GOARM.
+BASE_GOARM = ''
+BASE_GOARM_armv7ve = '7'
+BASE_GOARM_armv7a = '7'
+BASE_GOARM_armv6 = '6'
+BASE_GOARM_armv5 = '5'
+
# Go supports dynamic linking on a limited set of architectures.
# See the supportsDynlink function in go/src/cmd/compile/internal/gc/main.go
GO_DYNLINK = ""
@@ -23,6 +37,7 @@ GO_DYNLINK_aarch64 = "1"
GO_DYNLINK_x86 = "1"
GO_DYNLINK_x86-64 = "1"
GO_DYNLINK_powerpc64 = "1"
+GO_DYNLINK_powerpc64le = "1"
GO_DYNLINK_class-native = ""
GO_DYNLINK_class-nativesdk = ""
@@ -32,6 +47,7 @@ COMPATIBLE_HOST_linux-gnux32 = "null"
COMPATIBLE_HOST_linux-muslx32 = "null"
COMPATIBLE_HOST_powerpc = "null"
COMPATIBLE_HOST_powerpc64 = "null"
+COMPATIBLE_HOST_powerpc64le = "null"
COMPATIBLE_HOST_mipsarchn32 = "null"
ARM_INSTRUCTION_SET_armv4 = "arm"
@@ -39,9 +55,12 @@ ARM_INSTRUCTION_SET_armv5 = "arm"
ARM_INSTRUCTION_SET_armv6 = "arm"
TUNE_CCARGS_remove = "-march=mips32r2"
-SECURITY_CFLAGS_mipsarch = "${SECURITY_NOPIE_CFLAGS}"
SECURITY_NOPIE_CFLAGS ??= ""
+# go can't be built with ccache:
+# gcc: fatal error: no input files
+CCACHE_DISABLE ?= "1"
+
def go_map_arch(a, d):
import re
if re.match('i.86', a):
@@ -64,18 +83,14 @@ def go_map_arch(a, d):
return 'ppc64'
elif re.match('p(pc|owerpc)(64el)', a):
return 'ppc64le'
+ elif a == 'riscv64':
+ return 'riscv64'
else:
raise bb.parse.SkipRecipe("Unsupported CPU architecture: %s" % a)
-def go_map_arm(a, f, d):
- import re
- if re.match('arm.*', a):
- if 'armv7' in f:
- return '7'
- elif 'armv6' in f:
- return '6'
- elif 'armv5' in f:
- return '5'
+def go_map_arm(a, d):
+ if a.startswith("arm"):
+ return d.getVar('BASE_GOARM')
return ''
def go_map_386(a, f, d):
diff --git a/external/poky/meta/classes/gobject-introspection.bbclass b/external/poky/meta/classes/gobject-introspection.bbclass
index a323c1fc..504f75e2 100644
--- a/external/poky/meta/classes/gobject-introspection.bbclass
+++ b/external/poky/meta/classes/gobject-introspection.bbclass
@@ -6,14 +6,24 @@
# This also sets up autoconf-based recipes to build introspection data (or not),
# depending on distro and machine features (see gobject-introspection-data class).
inherit python3native gobject-introspection-data
+
+# meson: default option name to enable/disable introspection. This matches most
+# project's configuration. In doubts - check meson_options.txt in project's
+# source path.
+GIR_MESON_OPTION ?= 'introspection'
+GIR_MESON_ENABLE_FLAG ?= 'true'
+GIR_MESON_DISABLE_FLAG ?= 'false'
+
+# Auto enable/disable based on GI_DATA_ENABLED
EXTRA_OECONF_prepend_class-target = "${@bb.utils.contains('GI_DATA_ENABLED', 'True', '--enable-introspection', '--disable-introspection', d)} "
+EXTRA_OEMESON_prepend_class-target = "-D${GIR_MESON_OPTION}=${@bb.utils.contains('GI_DATA_ENABLED', 'True', '${GIR_MESON_ENABLE_FLAG}', '${GIR_MESON_DISABLE_FLAG}', d)} "
# When building native recipes, disable introspection, as it is not necessary,
# pulls in additional dependencies, and makes build times longer
EXTRA_OECONF_prepend_class-native = "--disable-introspection "
EXTRA_OECONF_prepend_class-nativesdk = "--disable-introspection "
-
-UNKNOWN_CONFIGURE_WHITELIST_append = " --enable-introspection --disable-introspection"
+EXTRA_OEMESON_prepend_class-native = "-D${GIR_MESON_OPTION}=${GIR_MESON_DISABLE_FLAG} "
+EXTRA_OEMESON_prepend_class-nativesdk = "-D${GIR_MESON_OPTION}=${GIR_MESON_DISABLE_FLAG} "
# Generating introspection data depends on a combination of native and target
# introspection tools, and qemu to run the target tools.
@@ -25,7 +35,7 @@ DEPENDS_append_class-native = " gobject-introspection-native"
DEPENDS_append_class-nativesdk = " gobject-introspection-native"
# This is used by introspection tools to find .gir includes
-export XDG_DATA_DIRS = "${STAGING_DATADIR}"
+export XDG_DATA_DIRS = "${STAGING_DATADIR}:${STAGING_LIBDIR}"
do_configure_prepend_class-target () {
# introspection.m4 pre-packaged with upstream tarballs does not yet
diff --git a/external/poky/meta/classes/grub-efi-cfg.bbclass b/external/poky/meta/classes/grub-efi-cfg.bbclass
index 5eeee6c2..3a2cdd69 100644
--- a/external/poky/meta/classes/grub-efi-cfg.bbclass
+++ b/external/poky/meta/classes/grub-efi-cfg.bbclass
@@ -23,10 +23,12 @@ GRUB_TIMEOUT ?= "10"
#FIXME: build this from the machine config
GRUB_OPTS ?= "serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1"
-EFIDIR = "/EFI/BOOT"
GRUB_ROOT ?= "${ROOT}"
APPEND ?= ""
+# Uses MACHINE specific KERNEL_IMAGETYPE
+PACKAGE_ARCH = "${MACHINE_ARCH}"
+
# Need UUID utility code.
inherit fs-uuid
@@ -86,6 +88,12 @@ python build_efi_cfg() {
for label in labels.split():
localdata = d.createCopy()
+ overrides = localdata.getVar('OVERRIDES')
+ if not overrides:
+ bb.fatal('OVERRIDES not defined')
+
+ localdata.setVar('OVERRIDES', 'grub_' + label + ':' + overrides)
+
for btype in btypes:
cfgfile.write('\nmenuentry \'%s%s\'{\n' % (label, btype[0]))
lb = label
diff --git a/external/poky/meta/classes/grub-efi.bbclass b/external/poky/meta/classes/grub-efi.bbclass
index 90badc03..8fc6999e 100644
--- a/external/poky/meta/classes/grub-efi.bbclass
+++ b/external/poky/meta/classes/grub-efi.bbclass
@@ -1,39 +1,8 @@
inherit grub-efi-cfg
+require conf/image-uefi.conf
efi_populate() {
- # DEST must be the root of the image so that EFIDIR is not
- # nested under a top level directory.
- DEST=$1
-
- install -d ${DEST}${EFIDIR}
-
- GRUB_IMAGE="grub-efi-bootia32.efi"
- DEST_IMAGE="bootia32.efi"
- if [ "${TARGET_ARCH}" = "x86_64" ]; then
- GRUB_IMAGE="grub-efi-bootx64.efi"
- DEST_IMAGE="bootx64.efi"
- fi
- install -m 0644 ${DEPLOY_DIR_IMAGE}/${GRUB_IMAGE} ${DEST}${EFIDIR}/${DEST_IMAGE}
- EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
- printf 'fs0:%s\%s\n' "$EFIPATH" "$DEST_IMAGE" >${DEST}/startup.nsh
+ efi_populate_common "$1" grub-efi
install -m 0644 ${GRUB_CFG} ${DEST}${EFIDIR}/grub.cfg
}
-
-efi_iso_populate() {
- iso_dir=$1
- efi_populate $iso_dir
- # Build a EFI directory to create efi.img
- mkdir -p ${EFIIMGDIR}/${EFIDIR}
- cp $iso_dir/${EFIDIR}/* ${EFIIMGDIR}${EFIDIR}
- cp $iso_dir/${KERNEL_IMAGETYPE} ${EFIIMGDIR}
- EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
- printf 'fs0:%s\%s\n' "$EFIPATH" "$GRUB_IMAGE" > ${EFIIMGDIR}/startup.nsh
- if [ -f "$iso_dir/initrd" ] ; then
- cp $iso_dir/initrd ${EFIIMGDIR}
- fi
-}
-
-efi_hddimg_populate() {
- efi_populate $1
-}
diff --git a/external/poky/meta/classes/gsettings.bbclass b/external/poky/meta/classes/gsettings.bbclass
index eae3dc79..33afc96a 100644
--- a/external/poky/meta/classes/gsettings.bbclass
+++ b/external/poky/meta/classes/gsettings.bbclass
@@ -7,32 +7,36 @@
# TODO use a trigger so that this runs once per package operation run
-
-RDEPENDS_${PN} += "glib-2.0-utils"
-
-FILES_${PN} += "${datadir}/glib-2.0/schemas"
-
-PACKAGE_WRITE_DEPS += "glib-2.0-native"
+GSETTINGS_PACKAGE ?= "${PN}"
+
+python __anonymous() {
+ pkg = d.getVar("GSETTINGS_PACKAGE")
+ if pkg:
+ d.appendVar("PACKAGE_WRITE_DEPS", " glib-2.0-native")
+ d.appendVar("RDEPENDS_" + pkg, " ${MLPREFIX}glib-2.0-utils")
+ d.appendVar("FILES_" + pkg, " ${datadir}/glib-2.0/schemas")
+}
gsettings_postinstrm () {
glib-compile-schemas $D${datadir}/glib-2.0/schemas
}
python populate_packages_append () {
- pkg = d.getVar('PN')
- bb.note("adding gsettings postinst scripts to %s" % pkg)
-
- postinst = d.getVar('pkg_postinst_%s' % pkg) or d.getVar('pkg_postinst')
- if not postinst:
- postinst = '#!/bin/sh\n'
- postinst += d.getVar('gsettings_postinstrm')
- d.setVar('pkg_postinst_%s' % pkg, postinst)
-
- bb.note("adding gsettings postrm scripts to %s" % pkg)
-
- postrm = d.getVar('pkg_postrm_%s' % pkg) or d.getVar('pkg_postrm')
- if not postrm:
- postrm = '#!/bin/sh\n'
- postrm += d.getVar('gsettings_postinstrm')
- d.setVar('pkg_postrm_%s' % pkg, postrm)
+ pkg = d.getVar('GSETTINGS_PACKAGE')
+ if pkg:
+ bb.note("adding gsettings postinst scripts to %s" % pkg)
+
+ postinst = d.getVar('pkg_postinst_%s' % pkg) or d.getVar('pkg_postinst')
+ if not postinst:
+ postinst = '#!/bin/sh\n'
+ postinst += d.getVar('gsettings_postinstrm')
+ d.setVar('pkg_postinst_%s' % pkg, postinst)
+
+ bb.note("adding gsettings postrm scripts to %s" % pkg)
+
+ postrm = d.getVar('pkg_postrm_%s' % pkg) or d.getVar('pkg_postrm')
+ if not postrm:
+ postrm = '#!/bin/sh\n'
+ postrm += d.getVar('gsettings_postinstrm')
+ d.setVar('pkg_postrm_%s' % pkg, postrm)
}
diff --git a/external/poky/meta/classes/gtk-doc.bbclass b/external/poky/meta/classes/gtk-doc.bbclass
index b4f67549..7dd662bf 100644
--- a/external/poky/meta/classes/gtk-doc.bbclass
+++ b/external/poky/meta/classes/gtk-doc.bbclass
@@ -10,13 +10,24 @@
GTKDOC_ENABLED ?= "${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', \
bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d), 'False', d)}"
+# meson: default option name to enable/disable gtk-doc. This matches most
+# project's configuration. In doubts - check meson_options.txt in project's
+# source path.
+GTKDOC_MESON_OPTION ?= 'docs'
+GTKDOC_MESON_ENABLE_FLAG ?= 'true'
+GTKDOC_MESON_DISABLE_FLAG ?= 'false'
+
+# Auto enable/disable based on GTKDOC_ENABLED
EXTRA_OECONF_prepend_class-target = "${@bb.utils.contains('GTKDOC_ENABLED', 'True', '--enable-gtk-doc --enable-gtk-doc-html --disable-gtk-doc-pdf', \
'--disable-gtk-doc', d)} "
+EXTRA_OEMESON_prepend_class-target = "-D${GTKDOC_MESON_OPTION}=${@bb.utils.contains('GTKDOC_ENABLED', 'True', '${GTKDOC_MESON_ENABLE_FLAG}', '${GTKDOC_MESON_DISABLE_FLAG}', d)} "
# When building native recipes, disable gtkdoc, as it is not necessary,
# pulls in additional dependencies, and makes build times longer
EXTRA_OECONF_prepend_class-native = "--disable-gtk-doc "
EXTRA_OECONF_prepend_class-nativesdk = "--disable-gtk-doc "
+EXTRA_OEMESON_prepend_class-native = "-D${GTKDOC_MESON_OPTION}=${GTKDOC_MESON_DISABLE_FLAG} "
+EXTRA_OEMESON_prepend_class-nativesdk = "-D${GTKDOC_MESON_OPTION}=${GTKDOC_MESON_DISABLE_FLAG} "
# Even though gtkdoc is disabled on -native, gtk-doc package is still
# needed for m4 macros.
@@ -41,7 +52,7 @@ do_compile_prepend_class-target () {
if [ ${GTKDOC_ENABLED} = True ]; then
# Write out a qemu wrapper that will be given to gtkdoc-scangobj so that it
# can run target helper binaries through that.
- qemu_binary="${@qemu_wrapper_cmdline(d, '$STAGING_DIR_HOST', ['\$GIR_EXTRA_LIBS_PATH','$STAGING_DIR_HOST/${libdir}','$STAGING_DIR_HOST/${base_libdir}'])}"
+ qemu_binary="${@qemu_wrapper_cmdline(d, '$STAGING_DIR_HOST', ['\\$GIR_EXTRA_LIBS_PATH','$STAGING_DIR_HOST/${libdir}','$STAGING_DIR_HOST/${base_libdir}'])}"
cat > ${B}/gtkdoc-qemuwrapper << EOF
#!/bin/sh
# Use a modules directory which doesn't exist so we don't load random things
@@ -51,6 +62,9 @@ export GIO_MODULE_DIR=${STAGING_LIBDIR}/gio/modules-dummy
GIR_EXTRA_LIBS_PATH=\`find ${B} -name *.so -printf "%h\n"|sort|uniq| tr '\n' ':'\`\$GIR_EXTRA_LIBS_PATH
GIR_EXTRA_LIBS_PATH=\`find ${B} -name .libs| tr '\n' ':'\`\$GIR_EXTRA_LIBS_PATH
+# meson sets this wrongly (only to libs in build-dir), qemu-wrapper_cmdline() and GIR_EXTRA_LIBS_PATH take care of it properly
+unset LD_LIBRARY_PATH
+
if [ -d ".libs" ]; then
$qemu_binary ".libs/\$@"
else
diff --git a/external/poky/meta/classes/gtk-icon-cache.bbclass b/external/poky/meta/classes/gtk-icon-cache.bbclass
index d87167ae..91cb4ad4 100644
--- a/external/poky/meta/classes/gtk-icon-cache.bbclass
+++ b/external/poky/meta/classes/gtk-icon-cache.bbclass
@@ -1,12 +1,12 @@
FILES_${PN} += "${datadir}/icons/hicolor"
-DEPENDS += "${@['hicolor-icon-theme', '']['${BPN}' == 'hicolor-icon-theme']} gtk-icon-utils-native"
+DEPENDS +=" ${@['hicolor-icon-theme', '']['${BPN}' == 'hicolor-icon-theme']} gtk+3-native"
-PACKAGE_WRITE_DEPS += "gtk-icon-utils-native gdk-pixbuf-native"
+PACKAGE_WRITE_DEPS += "gtk+3-native gdk-pixbuf-native"
gtk_icon_cache_postinst() {
if [ "x$D" != "x" ]; then
- $INTERCEPT_DIR/postinst_intercept update_icon_cache ${PKG} \
+ $INTERCEPT_DIR/postinst_intercept update_gtk_icon_cache ${PKG} \
mlprefix=${MLPREFIX} \
libdir_native=${libdir_native}
else
@@ -24,7 +24,7 @@ fi
gtk_icon_cache_postrm() {
if [ "x$D" != "x" ]; then
- $INTERCEPT_DIR/postinst_intercept update_icon_cache ${PKG} \
+ $INTERCEPT_DIR/postinst_intercept update_gtk_icon_cache ${PKG} \
mlprefix=${MLPREFIX} \
libdir=${libdir}
else
diff --git a/external/poky/meta/classes/gtk-immodules-cache.bbclass b/external/poky/meta/classes/gtk-immodules-cache.bbclass
index 9bb0af8b..8e783fb4 100644
--- a/external/poky/meta/classes/gtk-immodules-cache.bbclass
+++ b/external/poky/meta/classes/gtk-immodules-cache.bbclass
@@ -22,6 +22,7 @@ else
gtk-query-immodules-2.0 > ${libdir}/gtk-2.0/2.10.0/immodules.cache
fi
if [ ! -z `which gtk-query-immodules-3.0` ]; then
+ mkdir -p ${libdir}/gtk-3.0/3.0.0
gtk-query-immodules-3.0 > ${libdir}/gtk-3.0/3.0.0/immodules.cache
fi
fi
diff --git a/external/poky/meta/classes/icecc.bbclass b/external/poky/meta/classes/icecc.bbclass
index 7d94525d..d095305e 100644
--- a/external/poky/meta/classes/icecc.bbclass
+++ b/external/poky/meta/classes/icecc.bbclass
@@ -34,6 +34,7 @@ BB_HASHBASE_WHITELIST += "ICECC_PARALLEL_MAKE ICECC_DISABLED ICECC_USER_PACKAGE_
ICECC_DEBUG ICECC_LOGFILE ICECC_REPEAT_RATE ICECC_PREFERRED_HOST \
ICECC_CLANG_REMOTE_CPP ICECC_IGNORE_UNVERIFIED ICECC_TEST_SOCKET \
ICECC_ENV_DEBUG ICECC_SYSTEM_PACKAGE_BL ICECC_SYSTEM_CLASS_BL \
+ ICECC_REMOTE_CPP \
"
ICECC_ENV_EXEC ?= "${STAGING_BINDIR_NATIVE}/icecc-create-env"
@@ -56,6 +57,8 @@ ICECC_ENV_VERSION = "2"
# See: https://github.com/icecc/icecream/issues/190
export ICECC_CARET_WORKAROUND ??= "0"
+export ICECC_REMOTE_CPP ??= "0"
+
ICECC_CFLAGS = ""
CFLAGS += "${ICECC_CFLAGS}"
CXXFLAGS += "${ICECC_CFLAGS}"
@@ -70,10 +73,16 @@ ICECC_ENV_DEBUG ??= ""
#
# libgcc-initial - fails with CPP sanity check error if host sysroot contains
# cross gcc built for another target tune/variant
+# pixman - prng_state: TLS reference mismatches non-TLS reference, possibly due to
+# pragma omp threadprivate(prng_state)
+# systemtap - _HelperSDT.c undefs macros and uses the identifiers in macros emitting
+# inline assembly
# target-sdk-provides-dummy - ${HOST_PREFIX} is empty which triggers the "NULL
# prefix" error.
ICECC_SYSTEM_PACKAGE_BL += "\
libgcc-initial \
+ pixman \
+ systemtap \
target-sdk-provides-dummy \
"
@@ -96,7 +105,7 @@ def icecc_dep_prepend(d):
return "icecc-create-env-native"
return ""
-DEPENDS_prepend += "${@icecc_dep_prepend(d)} "
+DEPENDS_prepend = "${@icecc_dep_prepend(d)} "
get_cross_kernel_cc[vardepsexclude] += "KERNEL_CC"
def get_cross_kernel_cc(bb,d):
@@ -129,7 +138,18 @@ def use_icecc(bb,d):
if icecc_is_cross_canadian(bb, d):
return "no"
+ if d.getVar('INHIBIT_DEFAULT_DEPS', False):
+ # We don't have a compiler, so no icecc
+ return "no"
+
pn = d.getVar('PN')
+ bpn = d.getVar('BPN')
+
+ # Blacklist/whitelist checks are made against BPN, because there is a good
+ # chance that if icecc should be skipped for a recipe, it should be skipped
+ # for all the variants of that recipe. PN is still checked in case a user
+ # specified a more specific recipe.
+ check_pn = set([pn, bpn])
system_class_blacklist = (d.getVar('ICECC_SYSTEM_CLASS_BL') or "").split()
user_class_blacklist = (d.getVar('ICECC_USER_CLASS_BL') or "none").split()
@@ -145,11 +165,11 @@ def use_icecc(bb,d):
user_package_whitelist = (d.getVar('ICECC_USER_PACKAGE_WL') or "").split()
package_blacklist = system_package_blacklist + user_package_blacklist
- if pn in package_blacklist:
+ if check_pn & set(package_blacklist):
bb.debug(1, "%s: found in blacklist, disable icecc" % pn)
return "no"
- if pn in user_package_whitelist:
+ if check_pn & set(user_package_whitelist):
bb.debug(1, "%s: found in whitelist, enable icecc" % pn)
return "yes"
@@ -233,7 +253,11 @@ def icecc_get_external_tool(bb, d, tool):
def icecc_get_tool_link(tool, d):
import subprocess
- return subprocess.check_output("readlink -f %s" % tool, shell=True).decode("utf-8")[:-1]
+ try:
+ return subprocess.check_output("readlink -f %s" % tool, shell=True).decode("utf-8")[:-1]
+ except subprocess.CalledProcessError as e:
+ bb.note("icecc: one of the tools probably disappeared during recipe parsing, cmd readlink -f %s returned %d:\n%s" % (tool, e.returncode, e.output.decode("utf-8")))
+ return tool
def icecc_get_path_tool(tool, d):
# This is a little ugly, but we want to make sure we add an actual
@@ -302,6 +326,7 @@ def set_icecc_env():
# dummy python version of set_icecc_env
return
+set_icecc_env[vardepsexclude] += "KERNEL_CC"
set_icecc_env() {
if [ "${@use_icecc(bb, d)}" = "no" ]
then
@@ -331,17 +356,6 @@ set_icecc_env() {
return
fi
- # Create symlinks to icecc in the recipe-sysroot directory
- mkdir -p ${ICE_PATH}
- if [ -n "${KERNEL_CC}" ]; then
- compilers="${@get_cross_kernel_cc(bb,d)}"
- else
- compilers="${HOST_PREFIX}gcc ${HOST_PREFIX}g++"
- fi
- for compiler in $compilers; do
- ln -sf ${ICECC_BIN} ${ICE_PATH}/$compiler
- done
-
ICECC_CC="${@icecc_get_and_check_tool(bb, d, "gcc")}"
ICECC_CXX="${@icecc_get_and_check_tool(bb, d, "g++")}"
# cannot use icecc_get_and_check_tool here because it assumes as without target_sys prefix
@@ -360,6 +374,26 @@ set_icecc_env() {
return
fi
+ # Create symlinks to icecc and wrapper-scripts in the recipe-sysroot directory
+ mkdir -p $ICE_PATH/symlinks
+ if [ -n "${KERNEL_CC}" ]; then
+ compilers="${@get_cross_kernel_cc(bb,d)}"
+ else
+ compilers="${HOST_PREFIX}gcc ${HOST_PREFIX}g++"
+ fi
+ for compiler in $compilers; do
+ ln -sf $ICECC_BIN $ICE_PATH/symlinks/$compiler
+ rm -f $ICE_PATH/$compiler
+ cat <<-__EOF__ > $ICE_PATH/$compiler
+ #!/bin/sh -e
+ export ICECC_VERSION=$ICECC_VERSION
+ export ICECC_CC=$ICECC_CC
+ export ICECC_CXX=$ICECC_CXX
+ $ICE_PATH/symlinks/$compiler "\$@"
+ __EOF__
+ chmod 775 $ICE_PATH/$compiler
+ done
+
ICECC_AS="`${ICECC_CC} -print-prog-name=as`"
# for target recipes should return something like:
# /OE/tmp-eglibc/sysroots/x86_64-linux/usr/libexec/arm920tt-oe-linux-gnueabi/gcc/arm-oe-linux-gnueabi/4.8.2/as
@@ -379,7 +413,7 @@ set_icecc_env() {
${ICECC_ENV_EXEC} ${ICECC_ENV_DEBUG} "${ICECC_CC}" "${ICECC_CXX}" "${ICECC_AS}" "${ICECC_VERSION}"
then
touch "${ICECC_VERSION}.done"
- elif ! wait_for_file "${ICECC_VERSION}.done" 30
+ elif ! wait_for_file "${ICECC_VERSION}.done" 30
then
# locking failed so wait for ${ICECC_VERSION}.done to appear
bbwarn "Timeout waiting for ${ICECC_VERSION}.done"
@@ -392,10 +426,10 @@ set_icecc_env() {
export CCACHE_PATH="$PATH"
export CCACHE_DISABLE="1"
- export ICECC_VERSION ICECC_CC ICECC_CXX
export PATH="$ICE_PATH:$PATH"
- bbnote "Using icecc"
+ bbnote "Using icecc path: $ICE_PATH"
+ bbnote "Using icecc tarball: $ICECC_VERSION"
}
do_configure_prepend() {
diff --git a/external/poky/meta/classes/image-buildinfo.bbclass b/external/poky/meta/classes/image-buildinfo.bbclass
index 87a6a1a4..94c585d4 100644
--- a/external/poky/meta/classes/image-buildinfo.bbclass
+++ b/external/poky/meta/classes/image-buildinfo.bbclass
@@ -16,9 +16,8 @@ IMAGE_BUILDINFO_VARS ?= "DISTRO DISTRO_VERSION"
IMAGE_BUILDINFO_FILE ??= "${sysconfdir}/build"
# From buildhistory.bbclass
-def image_buildinfo_outputvars(vars, listvars, d):
+def image_buildinfo_outputvars(vars, d):
vars = vars.split()
- listvars = listvars.split()
ret = ""
for var in vars:
value = d.getVar(var) or ""
@@ -59,8 +58,7 @@ def buildinfo_target(d):
return ""
# Single and list variables to be read
vars = (d.getVar("IMAGE_BUILDINFO_VARS") or "")
- listvars = (d.getVar("IMAGE_BUILDINFO_LVARS") or "")
- return image_buildinfo_outputvars(vars, listvars, d)
+ return image_buildinfo_outputvars(vars, d)
# Write build information to target filesystem
python buildinfo () {
diff --git a/external/poky/meta/classes/image-live.bbclass b/external/poky/meta/classes/image-live.bbclass
index af71be50..54058b35 100644
--- a/external/poky/meta/classes/image-live.bbclass
+++ b/external/poky/meta/classes/image-live.bbclass
@@ -37,7 +37,7 @@ do_bootimg[depends] += "dosfstools-native:do_populate_sysroot \
LABELS_LIVE ?= "boot install"
ROOT_LIVE ?= "root=/dev/ram0"
INITRD_IMAGE_LIVE ?= "${MLPREFIX}core-image-minimal-initramfs"
-INITRD_LIVE ?= "${DEPLOY_DIR_IMAGE}/${INITRD_IMAGE_LIVE}-${MACHINE}.cpio.gz"
+INITRD_LIVE ?= "${DEPLOY_DIR_IMAGE}/${INITRD_IMAGE_LIVE}-${MACHINE}.${INITRAMFS_FSTYPES}"
LIVE_ROOTFS_TYPE ?= "ext4"
ROOTFS ?= "${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.${LIVE_ROOTFS_TYPE}"
diff --git a/external/poky/meta/classes/image-prelink.bbclass b/external/poky/meta/classes/image-prelink.bbclass
index 04dd57c9..ebf6e6d7 100644
--- a/external/poky/meta/classes/image-prelink.bbclass
+++ b/external/poky/meta/classes/image-prelink.bbclass
@@ -17,6 +17,16 @@ prelink_image () {
pre_prelink_size=`du -ks ${IMAGE_ROOTFS} | awk '{size = $1 ; print size }'`
echo "Size before prelinking $pre_prelink_size."
+ # The filesystem may not contain sysconfdir so establish what is present
+ # to enable cleanup after temporary creation of sysconfdir if needed
+ presentdir="${IMAGE_ROOTFS}${sysconfdir}"
+ while [ "${IMAGE_ROOTFS}" != "${presentdir}" ] ; do
+ [ ! -d "${presentdir}" ] || break
+ presentdir=`dirname "${presentdir}"`
+ done
+
+ mkdir -p "${IMAGE_ROOTFS}${sysconfdir}"
+
# We need a prelink conf on the filesystem, add one if it's missing
if [ ! -e ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf ]; then
cp ${STAGING_ETCDIR_NATIVE}/prelink.conf \
@@ -59,6 +69,13 @@ prelink_image () {
rm $ldsoconf
fi
+ # Remove any directories temporarily created for sysconfdir
+ cleanupdir="${IMAGE_ROOTFS}${sysconfdir}"
+ while [ "${presentdir}" != "${cleanupdir}" ] ; do
+ rmdir "${cleanupdir}"
+ cleanupdir=`dirname ${cleanupdir}`
+ done
+
pre_prelink_size=`du -ks ${IMAGE_ROOTFS} | awk '{size = $1 ; print size }'`
echo "Size after prelinking $pre_prelink_size."
}
diff --git a/external/poky/meta/classes/image.bbclass b/external/poky/meta/classes/image.bbclass
index 2ff574be..6620a9e9 100644
--- a/external/poky/meta/classes/image.bbclass
+++ b/external/poky/meta/classes/image.bbclass
@@ -24,7 +24,7 @@ POPULATE_SDK_POST_TARGET_COMMAND += "rootfs_sysroot_relativelinks; "
LICENSE ?= "MIT"
PACKAGES = ""
DEPENDS += "${@' '.join(["%s-qemuwrapper-cross" % m for m in d.getVar("MULTILIB_VARIANTS").split()])} qemuwrapper-cross depmodwrapper-cross cross-localedef-native"
-RDEPENDS += "${PACKAGE_INSTALL} ${LINGUAS_INSTALL}"
+RDEPENDS += "${PACKAGE_INSTALL} ${LINGUAS_INSTALL} ${IMAGE_INSTALL_DEBUGFS}"
RRECOMMENDS += "${PACKAGE_INSTALL_ATTEMPTONLY}"
PATH_prepend = "${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:"
@@ -33,7 +33,7 @@ INHIBIT_DEFAULT_DEPS = "1"
# IMAGE_FEATURES may contain any available package group
IMAGE_FEATURES ?= ""
IMAGE_FEATURES[type] = "list"
-IMAGE_FEATURES[validitems] += "debug-tweaks read-only-rootfs empty-root-password allow-empty-password allow-root-login post-install-logging"
+IMAGE_FEATURES[validitems] += "debug-tweaks read-only-rootfs stateless-rootfs empty-root-password allow-empty-password allow-root-login post-install-logging"
# Generate companion debugfs?
IMAGE_GEN_DEBUGFS ?= "0"
@@ -124,7 +124,7 @@ python () {
def rootfs_variables(d):
from oe.rootfs import variable_depends
variables = ['IMAGE_DEVICE_TABLE','IMAGE_DEVICE_TABLES','BUILD_IMAGES_FROM_FEEDS','IMAGE_TYPES_MASKED','IMAGE_ROOTFS_ALIGNMENT','IMAGE_OVERHEAD_FACTOR','IMAGE_ROOTFS_SIZE','IMAGE_ROOTFS_EXTRA_SPACE',
- 'IMAGE_ROOTFS_MAXSIZE','IMAGE_NAME','IMAGE_LINK_NAME','IMAGE_MANIFEST','DEPLOY_DIR_IMAGE','IMAGE_FSTYPES','IMAGE_INSTALL_COMPLEMENTARY','IMAGE_LINGUAS',
+ 'IMAGE_ROOTFS_MAXSIZE','IMAGE_NAME','IMAGE_LINK_NAME','IMAGE_MANIFEST','DEPLOY_DIR_IMAGE','IMAGE_FSTYPES','IMAGE_INSTALL_COMPLEMENTARY','IMAGE_LINGUAS', 'IMAGE_LINGUAS_COMPLEMENTARY',
'MULTILIBRE_ALLOW_REP','MULTILIB_TEMP_ROOTFS','MULTILIB_VARIANTS','MULTILIBS','ALL_MULTILIB_PACKAGE_ARCHS','MULTILIB_GLOBAL_VARIANTS','BAD_RECOMMENDATIONS','NO_RECOMMENDATIONS',
'PACKAGE_ARCHS','PACKAGE_CLASSES','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','OVERRIDES','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI','INTERCEPT_DIR','USE_DEVFS',
'CONVERSIONTYPES', 'IMAGE_GEN_DEBUGFS', 'ROOTFS_RO_UNNEEDED', 'IMGDEPLOYDIR', 'PACKAGE_EXCLUDE_COMPLEMENTARY', 'REPRODUCIBLE_TIMESTAMP_ROOTFS', 'IMAGE_INSTALL_DEBUGFS']
@@ -305,11 +305,8 @@ fakeroot python do_image_qa () {
bb.build.exec_func(cmd, d)
except oe.utils.ImageQAFailed as e:
qamsg = qamsg + '\tImage QA function %s failed: %s\n' % (e.name, e.description)
- except bb.build.FuncFailed as e:
- qamsg = qamsg + '\tImage QA function %s failed' % e.name
- if e.logfile:
- qamsg = qamsg + ' (log file is located at %s)' % e.logfile
- qamsg = qamsg + '\n'
+ except Exception as e:
+ qamsg = qamsg + '\tImage QA function %s failed\n' % cmd
if qamsg:
imgname = d.getVar('IMAGE_NAME')
@@ -328,7 +325,8 @@ addtask do_image_qa_setscene
def setup_debugfs_variables(d):
d.appendVar('IMAGE_ROOTFS', '-dbg')
- d.appendVar('IMAGE_LINK_NAME', '-dbg')
+ if d.getVar('IMAGE_LINK_NAME'):
+ d.appendVar('IMAGE_LINK_NAME', '-dbg')
d.appendVar('IMAGE_NAME','-dbg')
d.setVar('IMAGE_BUILDING_DEBUGFS', 'true')
debugfs_image_fstypes = d.getVar('IMAGE_FSTYPES_DEBUGFS')
@@ -500,7 +498,7 @@ python () {
d.prependVarFlag(task, 'postfuncs', 'create_symlinks ')
d.appendVarFlag(task, 'subimages', ' ' + ' '.join(subimages))
d.appendVarFlag(task, 'vardeps', ' ' + ' '.join(vardeps))
- d.appendVarFlag(task, 'vardepsexclude', 'DATETIME DATE ' + ' '.join(vardepsexclude))
+ d.appendVarFlag(task, 'vardepsexclude', ' DATETIME DATE ' + ' '.join(vardepsexclude))
bb.debug(2, "Adding task %s before %s, after %s" % (task, 'do_image_complete', after))
bb.build.addtask(task, 'do_image_complete', after, d)
@@ -528,7 +526,7 @@ def get_rootfs_size(d):
base_size = size_kb * overhead_factor
bb.debug(1, '%f = %d * %f' % (base_size, size_kb, overhead_factor))
base_size2 = max(base_size, rootfs_req_size) + rootfs_extra_space
- bb.debug(1, '%f = max(%f, %d)[%f] + %d' % (base_size2, base_size, rootfs_req_size, max(base_size, rootfs_req_size), overhead_factor))
+ bb.debug(1, '%f = max(%f, %d)[%f] + %d' % (base_size2, base_size, rootfs_req_size, max(base_size, rootfs_req_size), rootfs_extra_space))
base_size = base_size2
if base_size != int(base_size):
@@ -553,14 +551,14 @@ def get_rootfs_size(d):
if rootfs_maxsize:
rootfs_maxsize_int = int(rootfs_maxsize)
if base_size > rootfs_maxsize_int:
- bb.fatal("The rootfs size %d(K) overrides IMAGE_ROOTFS_MAXSIZE: %d(K)" % \
+ bb.fatal("The rootfs size %d(K) exceeds IMAGE_ROOTFS_MAXSIZE: %d(K)" % \
(base_size, rootfs_maxsize_int))
# Check the initramfs size against INITRAMFS_MAXSIZE (if set)
if image_fstypes == initramfs_fstypes != '' and initramfs_maxsize:
initramfs_maxsize_int = int(initramfs_maxsize)
if base_size > initramfs_maxsize_int:
- bb.error("The initramfs size %d(K) overrides INITRAMFS_MAXSIZE: %d(K)" % \
+ bb.error("The initramfs size %d(K) exceeds INITRAMFS_MAXSIZE: %d(K)" % \
(base_size, initramfs_maxsize_int))
bb.error("You can set INITRAMFS_MAXSIZE a larger value. Usually, it should")
bb.fatal("be less than 1/2 of ram size, or you may fail to boot it.\n")
@@ -611,6 +609,7 @@ do_patch[noexec] = "1"
do_configure[noexec] = "1"
do_compile[noexec] = "1"
do_install[noexec] = "1"
+deltask do_populate_lic
deltask do_populate_sysroot
do_package[noexec] = "1"
deltask do_package_qa
@@ -664,6 +663,13 @@ reproducible_final_image_task () {
find ${IMAGE_ROOTFS} -exec touch -h --date=@$REPRODUCIBLE_TIMESTAMP_ROOTFS {} \;
fi
}
-IMAGE_PREPROCESS_COMMAND_append = " reproducible_final_image_task; "
+
+systemd_preset_all () {
+ if [ -e ${IMAGE_ROOTFS}${root_prefix}/lib/systemd/systemd ]; then
+ systemctl --root="${IMAGE_ROOTFS}" --preset-mode=enable-only preset-all
+ fi
+}
+
+IMAGE_PREPROCESS_COMMAND_append = " ${@ 'systemd_preset_all;' if bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d) and not bb.utils.contains('IMAGE_FEATURES', 'stateless-rootfs', True, False, d) else ''} reproducible_final_image_task; "
CVE_PRODUCT = ""
diff --git a/external/poky/meta/classes/image_types.bbclass b/external/poky/meta/classes/image_types.bbclass
index c7d9b8d9..f82f1d88 100644
--- a/external/poky/meta/classes/image_types.bbclass
+++ b/external/poky/meta/classes/image_types.bbclass
@@ -54,12 +54,13 @@ def imagetypes_getdepends(d):
# Sort the set so that ordering is consistant
return " ".join(sorted(deps))
-XZ_COMPRESSION_LEVEL ?= "-3"
+XZ_COMPRESSION_LEVEL ?= "-9"
XZ_INTEGRITY_CHECK ?= "crc32"
-XZ_THREADS ?= "-T 0"
ZIP_COMPRESSION_LEVEL ?= "-9"
+ZSTD_COMPRESSION_LEVEL ?= "-3"
+
JFFS2_SUM_EXTRA_ARGS ?= ""
IMAGE_CMD_jffs2 = "mkfs.jffs2 --root=${IMAGE_ROOTFS} --faketime --output=${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.jffs2 ${EXTRA_IMAGECMD}"
@@ -129,7 +130,7 @@ IMAGE_CMD_tar = "${IMAGE_CMD_TAR} --numeric-owner -cf ${IMGDEPLOYDIR}/${IMAGE_NA
do_image_cpio[cleandirs] += "${WORKDIR}/cpio_append"
IMAGE_CMD_cpio () {
- (cd ${IMAGE_ROOTFS} && find . | cpio -o -H newc >${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cpio)
+ (cd ${IMAGE_ROOTFS} && find . | sort | cpio --reproducible -o -H newc >${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cpio)
# We only need the /init symlink if we're building the real
# image. The -dbg image doesn't need it! By being clever
# about this we also avoid 'touch' below failing, as it
@@ -270,7 +271,7 @@ IMAGE_TYPES = " \
hddimg \
squashfs squashfs-xz squashfs-lzo squashfs-lz4 \
ubi ubifs multiubi \
- tar tar.gz tar.bz2 tar.xz tar.lz4 \
+ tar tar.gz tar.bz2 tar.xz tar.lz4 tar.zst \
cpio cpio.gz cpio.xz cpio.lzma cpio.lz4 \
wic wic.gz wic.bz2 wic.lzma \
container \
@@ -283,14 +284,15 @@ IMAGE_TYPES = " \
# CONVERSION_CMD/DEPENDS.
COMPRESSIONTYPES ?= ""
-CONVERSIONTYPES = "gz bz2 lzma xz lz4 lzo zip sum md5sum sha1sum sha224sum sha256sum sha384sum sha512sum bmap u-boot vmdk vdi qcow2 ${COMPRESSIONTYPES}"
+CONVERSIONTYPES = "gz bz2 lzma xz lz4 lzo zip zst sum md5sum sha1sum sha224sum sha256sum sha384sum sha512sum bmap u-boot vmdk vdi qcow2 base64 ${COMPRESSIONTYPES}"
CONVERSION_CMD_lzma = "lzma -k -f -7 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
-CONVERSION_CMD_gz = "pigz -f -9 -n -c ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.gz"
+CONVERSION_CMD_gz = "gzip -f -9 -n -c --rsyncable ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.gz"
CONVERSION_CMD_bz2 = "pbzip2 -f -k ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
-CONVERSION_CMD_xz = "xz -f -k -c ${XZ_COMPRESSION_LEVEL} ${XZ_THREADS} --check=${XZ_INTEGRITY_CHECK} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.xz"
+CONVERSION_CMD_xz = "xz -f -k -c ${XZ_COMPRESSION_LEVEL} ${XZ_DEFAULTS} --check=${XZ_INTEGRITY_CHECK} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.xz"
CONVERSION_CMD_lz4 = "lz4 -9 -z -l ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.lz4"
CONVERSION_CMD_lzo = "lzop -9 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
CONVERSION_CMD_zip = "zip ${ZIP_COMPRESSION_LEVEL} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.zip ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
+CONVERSION_CMD_zst = "zstd -f -k -T0 -c ${ZSTD_COMPRESSION_LEVEL} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.zst"
CONVERSION_CMD_sum = "sumtool -i ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} -o ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sum ${JFFS2_SUM_EXTRA_ARGS}"
CONVERSION_CMD_md5sum = "md5sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.md5sum"
CONVERSION_CMD_sha1sum = "sha1sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha1sum"
@@ -303,6 +305,7 @@ CONVERSION_CMD_u-boot = "mkimage -A ${UBOOT_ARCH} -O linux -T ramdisk -C none -n
CONVERSION_CMD_vmdk = "qemu-img convert -O vmdk ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vmdk"
CONVERSION_CMD_vdi = "qemu-img convert -O vdi ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.vdi"
CONVERSION_CMD_qcow2 = "qemu-img convert -O qcow2 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.qcow2"
+CONVERSION_CMD_base64 = "base64 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.base64"
CONVERSION_DEPENDS_lzma = "xz-native"
CONVERSION_DEPENDS_gz = "pigz-native"
CONVERSION_DEPENDS_bz2 = "pbzip2-native"
@@ -310,12 +313,14 @@ CONVERSION_DEPENDS_xz = "xz-native"
CONVERSION_DEPENDS_lz4 = "lz4-native"
CONVERSION_DEPENDS_lzo = "lzop-native"
CONVERSION_DEPENDS_zip = "zip-native"
+CONVERSION_DEPENDS_zst = "zstd-native"
CONVERSION_DEPENDS_sum = "mtd-utils-native"
CONVERSION_DEPENDS_bmap = "bmap-tools-native"
-CONVERSION_DEPENDS_u-boot = "u-boot-mkimage-native"
-CONVERSION_DEPENDS_vmdk = "qemu-native"
-CONVERSION_DEPENDS_vdi = "qemu-native"
-CONVERSION_DEPENDS_qcow2 = "qemu-native"
+CONVERSION_DEPENDS_u-boot = "u-boot-tools-native"
+CONVERSION_DEPENDS_vmdk = "qemu-system-native"
+CONVERSION_DEPENDS_vdi = "qemu-system-native"
+CONVERSION_DEPENDS_qcow2 = "qemu-system-native"
+CONVERSION_DEPENDS_base64 = "coreutils-native"
RUNNABLE_IMAGE_TYPES ?= "ext2 ext3 ext4"
RUNNABLE_MACHINE_PATTERNS ?= "qemu"
@@ -323,7 +328,7 @@ RUNNABLE_MACHINE_PATTERNS ?= "qemu"
DEPLOYABLE_IMAGE_TYPES ?= "hddimg iso"
# The IMAGE_TYPES_MASKED variable will be used to mask out from the IMAGE_FSTYPES,
-# images that will not be built at do_rootfs time: vmdk, vdi, qcow2, hdddirect, hddimg, iso, etc.
+# images that will not be built at do_rootfs time: vmdk, vdi, qcow2, hddimg, iso, etc.
IMAGE_TYPES_MASKED ?= ""
# bmap requires python3 to be in the PATH
diff --git a/external/poky/meta/classes/image_types_wic.bbclass b/external/poky/meta/classes/image_types_wic.bbclass
index 5b40a9e9..b83308b4 100644
--- a/external/poky/meta/classes/image_types_wic.bbclass
+++ b/external/poky/meta/classes/image_types_wic.bbclass
@@ -3,7 +3,10 @@
WICVARS ?= "\
BBLAYERS IMGDEPLOYDIR DEPLOY_DIR_IMAGE FAKEROOTCMD IMAGE_BASENAME IMAGE_BOOT_FILES \
IMAGE_LINK_NAME IMAGE_ROOTFS INITRAMFS_FSTYPES INITRD INITRD_LIVE ISODIR RECIPE_SYSROOT_NATIVE \
- ROOTFS_SIZE STAGING_DATADIR STAGING_DIR STAGING_LIBDIR TARGET_SYS"
+ ROOTFS_SIZE STAGING_DATADIR STAGING_DIR STAGING_LIBDIR TARGET_SYS \
+ KERNEL_IMAGETYPE MACHINE INITRAMFS_IMAGE INITRAMFS_IMAGE_BUNDLE INITRAMFS_LINK_NAME APPEND"
+
+inherit ${@bb.utils.contains('INITRAMFS_IMAGE_BUNDLE', '1', 'kernel-artifact-names', '', d)}
WKS_FILE ??= "${IMAGE_BASENAME}.${MACHINE}.wks"
WKS_FILES ?= "${WKS_FILE} ${IMAGE_BASENAME}.wks"
@@ -24,16 +27,17 @@ WIC_CREATE_EXTRA_ARGS ?= ""
IMAGE_CMD_wic () {
out="${IMGDEPLOYDIR}/${IMAGE_NAME}"
+ build_wic="${WORKDIR}/build-wic"
wks="${WKS_FULL_PATH}"
if [ -z "$wks" ]; then
bbfatal "No kickstart files from WKS_FILES were found: ${WKS_FILES}. Please set WKS_FILE or WKS_FILES appropriately."
fi
- BUILDDIR="${TOPDIR}" wic create "$wks" --vars "${STAGING_DIR}/${MACHINE}/imgdata/" -e "${IMAGE_BASENAME}" -o "$out/" ${WIC_CREATE_EXTRA_ARGS}
- mv "$out/$(basename "${wks%.wks}")"*.direct "$out${IMAGE_NAME_SUFFIX}.wic"
- rm -rf "$out/"
+ BUILDDIR="${TOPDIR}" wic create "$wks" --vars "${STAGING_DIR}/${MACHINE}/imgdata/" -e "${IMAGE_BASENAME}" -o "$build_wic/" ${WIC_CREATE_EXTRA_ARGS}
+ mv "$build_wic/$(basename "${wks%.wks}")"*.direct "$out${IMAGE_NAME_SUFFIX}.wic"
}
IMAGE_CMD_wic[vardepsexclude] = "WKS_FULL_PATH WKS_FILES TOPDIR"
+do_image_wic[cleandirs] = "${WORKDIR}/build-wic"
# Rebuild when the wks file or vars in WICVARS change
USING_WIC = "${@bb.utils.contains_any('IMAGE_FSTYPES', 'wic ' + ' '.join('wic.%s' % c for c in '${CONVERSIONTYPES}'.split()), '1', '', d)}"
@@ -43,8 +47,10 @@ do_image_wic[depends] += "${@' '.join('%s-native:do_populate_sysroot' % r for r
# We ensure all artfacts are deployed (e.g virtual/bootloader)
do_image_wic[recrdeptask] += "do_deploy"
+do_image_wic[deptask] += "do_image_complete"
-WKS_FILE_DEPENDS_DEFAULT = "syslinux-native bmap-tools-native cdrtools-native btrfs-tools-native squashfs-tools-native e2fsprogs-native"
+WKS_FILE_DEPENDS_DEFAULT = '${@bb.utils.contains_any("BUILD_ARCH", [ 'x86_64', 'i686' ], "syslinux-native", "",d)}'
+WKS_FILE_DEPENDS_DEFAULT += "bmap-tools-native cdrtools-native btrfs-tools-native squashfs-tools-native e2fsprogs-native"
WKS_FILE_DEPENDS_BOOTLOADERS = ""
WKS_FILE_DEPENDS_BOOTLOADERS_x86 = "syslinux grub-efi systemd-boot"
WKS_FILE_DEPENDS_BOOTLOADERS_x86-64 = "syslinux grub-efi systemd-boot"
@@ -73,6 +79,11 @@ python do_write_wks_template () {
wks_file = d.getVar('WKS_FULL_PATH')
with open(wks_file, 'w') as f:
f.write(template_body)
+ f.close()
+ # Copy the finalized wks file to the deploy directory for later use
+ depdir = d.getVar('IMGDEPLOYDIR')
+ basename = d.getVar('IMAGE_BASENAME')
+ bb.utils.copyfile(wks_file, "%s/%s" % (depdir, basename + '-' + os.path.basename(wks_file)))
}
python () {
@@ -101,7 +112,7 @@ python () {
# file in process_wks_template as well, so just put it in
# a variable and let the metadata deal with the deps.
d.setVar('_WKS_TEMPLATE', body)
- bb.build.addtask('do_write_wks_template', 'do_image_wic', None, d)
+ bb.build.addtask('do_write_wks_template', 'do_image_wic', 'do_image', d)
bb.build.addtask('do_image_wic', 'do_image_complete', None, d)
}
@@ -123,6 +134,10 @@ python do_rootfs_wicenv () {
value = d.getVar(var)
if value:
envf.write('%s="%s"\n' % (var, value.strip()))
+ envf.close()
+ # Copy .env file to deploy directory for later use with stand alone wic
+ depdir = d.getVar('IMGDEPLOYDIR')
+ bb.utils.copyfile(os.path.join(outdir, basename) + '.env', os.path.join(depdir, basename) + '.env')
}
addtask do_rootfs_wicenv after do_image before do_image_wic
do_rootfs_wicenv[vardeps] += "${WICVARS}"
diff --git a/external/poky/meta/classes/insane.bbclass b/external/poky/meta/classes/insane.bbclass
index 295feb8a..1d76ae7c 100644
--- a/external/poky/meta/classes/insane.bbclass
+++ b/external/poky/meta/classes/insane.bbclass
@@ -25,15 +25,17 @@ QA_SANE = "True"
WARN_QA ?= "ldflags useless-rpaths rpaths staticdev libdir xorg-driver-abi \
textrel already-stripped incompatible-license files-invalid \
installed-vs-shipped compile-host-path install-host-path \
- pn-overrides infodir build-deps \
+ pn-overrides infodir build-deps src-uri-bad \
unknown-configure-option symlink-to-sysroot multilib \
- invalid-packageconfig host-user-contaminated uppercase-pn \
+ invalid-packageconfig host-user-contaminated uppercase-pn patch-fuzz \
+ mime mime-xdg unlisted-pkg-lics \
"
ERROR_QA ?= "dev-so debug-deps dev-deps debug-files arch pkgconfig la \
perms dep-cmp pkgvarcheck perm-config perm-line perm-link \
split-strip packages-list pkgv-undefined var-undefined \
version-going-backwards expanded-d invalid-chars \
- license-checksum dev-elf file-rdeps \
+ license-checksum dev-elf file-rdeps configure-unsafe \
+ configure-gettext perllocalpod shebang-size \
"
# Add usrmerge QA check based on distro feature
ERROR_QA_append = "${@bb.utils.contains('DISTRO_FEATURES', 'usrmerge', ' usrmerge', '', d)}"
@@ -81,6 +83,29 @@ def package_qa_add_message(messages, section, new_msg):
else:
messages[section] = messages[section] + "\n" + new_msg
+QAPATHTEST[shebang-size] = "package_qa_check_shebang_size"
+def package_qa_check_shebang_size(path, name, d, elf, messages):
+ if os.path.islink(path) or elf:
+ return
+
+ try:
+ with open(path, 'rb') as f:
+ stanza = f.readline(130)
+ except IOError:
+ return
+
+ if stanza.startswith(b'#!'):
+ #Shebang not found
+ try:
+ stanza = stanza.decode("utf-8")
+ except UnicodeDecodeError:
+ #If it is not a text file, it is not a script
+ return
+
+ if len(stanza) > 129:
+ package_qa_add_message(messages, "shebang-size", "%s: %s maximum shebang size exceeded, the maximum size is 128." % (name, package_qa_clean_path(path, d)))
+ return
+
QAPATHTEST[libexec] = "package_qa_check_libexec"
def package_qa_check_libexec(path,name, d, elf, messages):
@@ -180,10 +205,50 @@ def package_qa_check_staticdev(path, name, d, elf, messages):
libgcc.a, libgcov.a will be skipped in their packages
"""
- if not name.endswith("-pic") and not name.endswith("-staticdev") and not name.endswith("-ptest") and path.endswith(".a") and not path.endswith("_nonshared.a"):
+ if not name.endswith("-pic") and not name.endswith("-staticdev") and not name.endswith("-ptest") and path.endswith(".a") and not path.endswith("_nonshared.a") and not '/usr/lib/debug-static/' in path and not '/.debug-static/' in path:
package_qa_add_message(messages, "staticdev", "non -staticdev package contains static .a library: %s path '%s'" % \
(name, package_qa_clean_path(path,d)))
+QAPATHTEST[mime] = "package_qa_check_mime"
+def package_qa_check_mime(path, name, d, elf, messages):
+ """
+ Check if package installs mime types to /usr/share/mime/packages
+ while no inheriting mime.bbclass
+ """
+
+ if d.getVar("datadir") + "/mime/packages" in path and path.endswith('.xml') and not bb.data.inherits_class("mime", d):
+ package_qa_add_message(messages, "mime", "package contains mime types but does not inherit mime: %s path '%s'" % \
+ (name, package_qa_clean_path(path,d)))
+
+QAPATHTEST[mime-xdg] = "package_qa_check_mime_xdg"
+def package_qa_check_mime_xdg(path, name, d, elf, messages):
+ """
+ Check if package installs desktop file containing MimeType and requires
+ mime-types.bbclass to create /usr/share/applications/mimeinfo.cache
+ """
+
+ if d.getVar("datadir") + "/applications" in path and path.endswith('.desktop') and not bb.data.inherits_class("mime-xdg", d):
+ mime_type_found = False
+ try:
+ with open(path, 'r') as f:
+ for line in f.read().split('\n'):
+ if 'MimeType' in line:
+ mime_type_found = True
+ break;
+ except:
+ # At least libreoffice installs symlinks with absolute paths that are dangling here.
+ # We could implement some magic but for few (one) recipes it is not worth the effort so just warn:
+ wstr = "%s cannot open %s - is it a symlink with absolute path?\n" % (name, package_qa_clean_path(path,d))
+ wstr += "Please check if (linked) file contains key 'MimeType'.\n"
+ pkgname = name
+ if name == d.getVar('PN'):
+ pkgname = '${PN}'
+ wstr += "If yes: add \'inhert mime-xdg\' and \'MIME_XDG_PACKAGES += \"%s\"\' / if no add \'INSANE_SKIP_%s += \"mime-xdg\"\' to recipe." % (pkgname, pkgname)
+ package_qa_add_message(messages, "mime-xdg", wstr)
+ if mime_type_found:
+ package_qa_add_message(messages, "mime-xdg", "package contains desktop file with key 'MimeType' but does not inhert mime-xdg: %s path '%s'" % \
+ (name, package_qa_clean_path(path,d)))
+
def package_qa_check_libdir(d):
"""
Check for wrong library installation paths. For instance, catch
@@ -258,13 +323,6 @@ def package_qa_check_dbg(path, name, d, elf, messages):
package_qa_add_message(messages, "debug-files", "non debug package contains .debug directory: %s path %s" % \
(name, package_qa_clean_path(path,d)))
-QAPATHTEST[perms] = "package_qa_check_perm"
-def package_qa_check_perm(path,name,d, elf, messages):
- """
- Check the permission of files
- """
- return
-
QAPATHTEST[arch] = "package_qa_check_arch"
def package_qa_check_arch(path,name,d, elf, messages):
"""
@@ -307,10 +365,10 @@ def package_qa_check_arch(path,name,d, elf, messages):
if not ((machine == elf.machine()) or is_32 or is_bpf):
package_qa_add_message(messages, "arch", "Architecture did not match (%s, expected %s) on %s" % \
(oe.qa.elf_machine_to_string(elf.machine()), oe.qa.elf_machine_to_string(machine), package_qa_clean_path(path,d)))
- elif not ((bits == elf.abiSize()) or is_32):
+ elif not ((bits == elf.abiSize()) or is_32 or is_bpf):
package_qa_add_message(messages, "arch", "Bit size did not match (%d to %d) %s on %s" % \
(bits, elf.abiSize(), bpn, package_qa_clean_path(path,d)))
- elif not littleendian == elf.isLittleEndian():
+ elif not ((littleendian == elf.isLittleEndian()) or is_bpf):
package_qa_add_message(messages, "arch", "Endiannes did not match (%d to %d) on %s" % \
(littleendian, elf.isLittleEndian(), package_qa_clean_path(path,d)))
@@ -346,9 +404,11 @@ def package_qa_textrel(path, name, d, elf, messages):
for line in phdrs.split("\n"):
if textrel_re.match(line):
sane = False
+ break
if not sane:
- package_qa_add_message(messages, "textrel", "ELF binary '%s' has relocations in .text" % path)
+ path = package_qa_clean_path(path, d, name)
+ package_qa_add_message(messages, "textrel", "%s: ELF binary %s has relocations in .text" % (name, path))
QAPATHTEST[ldflags] = "package_qa_hash_style"
def package_qa_hash_style(path, name, d, elf, messages):
@@ -377,11 +437,10 @@ def package_qa_hash_style(path, name, d, elf, messages):
for line in phdrs.split("\n"):
if "SYMTAB" in line:
has_syms = True
- if "GNU_HASH" in line:
+ if "GNU_HASH" or "DT_MIPS_XHASH" in line:
sane = True
- if "[mips32]" in line or "[mips64]" in line:
+ if ("[mips32]" in line or "[mips64]" in line) and d.getVar('TCLIBC') == "musl":
sane = True
-
if has_syms and not sane:
package_qa_add_message(messages, "ldflags", "No GNU_HASH in the ELF binary %s, didn't pass LDFLAGS?" % path)
@@ -399,15 +458,12 @@ def package_qa_check_buildpaths(path, name, d, elf, messages):
if os.path.islink(path):
return
- # Ignore ipk and deb's CONTROL dir
- if path.find(name + "/CONTROL/") != -1 or path.find(name + "/DEBIAN/") != -1:
- return
-
tmpdir = bytes(d.getVar('TMPDIR'), encoding="utf-8")
with open(path, 'rb') as f:
file_content = f.read()
if tmpdir in file_content:
- package_qa_add_message(messages, "buildpaths", "File %s in package contained reference to tmpdir" % package_qa_clean_path(path,d))
+ trimmed = path.replace(os.path.join (d.getVar("PKGDEST"), name), "")
+ package_qa_add_message(messages, "buildpaths", "File %s in package %s contains reference to TMPDIR" % (trimmed, name))
QAPATHTEST[xorg-driver-abi] = "package_qa_check_xorg_driver_abi"
@@ -457,7 +513,6 @@ python populate_lic_qa_checksum() {
"""
Check for changes in the license files.
"""
- import tempfile
sane = True
lic_files = d.getVar('LIC_FILES_CHKSUM') or ''
@@ -495,61 +550,45 @@ python populate_lic_qa_checksum() {
if (not beginline) and (not endline):
md5chksum = bb.utils.md5_file(srclicfile)
- with open(srclicfile, 'rb') as f:
- license = f.read()
+ with open(srclicfile, 'r', errors='replace') as f:
+ license = f.read().splitlines()
else:
- fi = open(srclicfile, 'rb')
- fo = tempfile.NamedTemporaryFile(mode='wb', prefix='poky.', suffix='.tmp', delete=False)
- tmplicfile = fo.name;
- lineno = 0
- linesout = 0
- license = []
- for line in fi:
- lineno += 1
- if (lineno >= beginline):
- if ((lineno <= endline) or not endline):
- fo.write(line)
- license.append(line)
- linesout += 1
- else:
- break
- fo.flush()
- fo.close()
- fi.close()
- md5chksum = bb.utils.md5_file(tmplicfile)
- license = b''.join(license)
- os.unlink(tmplicfile)
-
+ with open(srclicfile, 'rb') as f:
+ import hashlib
+ lineno = 0
+ license = []
+ m = hashlib.md5()
+ for line in f:
+ lineno += 1
+ if (lineno >= beginline):
+ if ((lineno <= endline) or not endline):
+ m.update(line)
+ license.append(line.decode('utf-8', errors='replace').rstrip())
+ else:
+ break
+ md5chksum = m.hexdigest()
if recipemd5 == md5chksum:
bb.note (pn + ": md5 checksum matched for ", url)
else:
if recipemd5:
msg = pn + ": The LIC_FILES_CHKSUM does not match for " + url
msg = msg + "\n" + pn + ": The new md5 checksum is " + md5chksum
- try:
- license_lines = license.decode('utf-8').split('\n')
- except:
- # License text might not be valid UTF-8, in which
- # case we don't know how to include it in our output
- # and have to skip it.
- pass
- else:
- max_lines = int(d.getVar('QA_MAX_LICENSE_LINES') or 20)
- if not license_lines or license_lines[-1] != '':
- # Ensure that our license text ends with a line break
- # (will be added with join() below).
- license_lines.append('')
- remove = len(license_lines) - max_lines
- if remove > 0:
- start = max_lines // 2
- end = start + remove - 1
- del license_lines[start:end]
- license_lines.insert(start, '...')
- msg = msg + "\n" + pn + ": Here is the selected license text:" + \
- "\n" + \
- "{:v^70}".format(" beginline=%d " % beginline if beginline else "") + \
- "\n" + "\n".join(license_lines) + \
- "{:^^70}".format(" endline=%d " % endline if endline else "")
+ max_lines = int(d.getVar('QA_MAX_LICENSE_LINES') or 20)
+ if not license or license[-1] != '':
+ # Ensure that our license text ends with a line break
+ # (will be added with join() below).
+ license.append('')
+ remove = len(license) - max_lines
+ if remove > 0:
+ start = max_lines // 2
+ end = start + remove - 1
+ del license[start:end]
+ license.insert(start, '...')
+ msg = msg + "\n" + pn + ": Here is the selected license text:" + \
+ "\n" + \
+ "{:v^70}".format(" beginline=%d " % beginline if beginline else "") + \
+ "\n" + "\n".join(license) + \
+ "{:^^70}".format(" endline=%d " % endline if endline else "")
if beginline:
if endline:
srcfiledesc = "%s (lines %d through to %d)" % (srclicfile, beginline, endline)
@@ -570,7 +609,7 @@ python populate_lic_qa_checksum() {
bb.fatal("Fatal QA errors found, failing task.")
}
-def package_qa_check_staged(path,d):
+def qa_check_staged(path,d):
"""
Check staged la and pc files for common problems like references to the work
directory.
@@ -589,20 +628,31 @@ def package_qa_check_staged(path,d):
else:
pkgconfigcheck = tmpdir
+ skip = (d.getVar('INSANE_SKIP') or "").split()
+ skip_la = False
+ if 'la' in skip:
+ bb.note("Recipe %s skipping qa checking: la" % d.getVar('PN'))
+ skip_la = True
+
+ skip_pkgconfig = False
+ if 'pkgconfig' in skip:
+ bb.note("Recipe %s skipping qa checking: pkgconfig" % d.getVar('PN'))
+ skip_pkgconfig = True
+
# find all .la and .pc files
# read the content
# and check for stuff that looks wrong
for root, dirs, files in os.walk(path):
for file in files:
path = os.path.join(root,file)
- if file.endswith(".la"):
+ if file.endswith(".la") and not skip_la:
with open(path) as f:
file_content = f.read()
file_content = file_content.replace(recipesysroot, "")
if workdir in file_content:
error_msg = "%s failed sanity test (workdir) in path %s" % (file,root)
sane &= package_qa_handle_error("la", error_msg, d)
- elif file.endswith(".pc"):
+ elif file.endswith(".pc") and not skip_pkgconfig:
with open(path) as f:
file_content = f.read()
file_content = file_content.replace(recipesysroot, "")
@@ -733,25 +783,7 @@ def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
filerdepends[subkey] = key[13:]
if filerdepends:
- next = rdepends
done = rdepends[:]
- # Find all the rdepends on the dependency chain
- while next:
- new = []
- for rdep in next:
- rdep_data = oe.packagedata.read_subpkgdata(rdep, d)
- sub_rdeps = rdep_data.get("RDEPENDS_" + rdep)
- if not sub_rdeps:
- continue
- for sub_rdep in bb.utils.explode_deps(sub_rdeps):
- if sub_rdep in done:
- continue
- if oe.packagedata.has_subpkgdata(sub_rdep, d):
- # It's a new rdep
- done.append(sub_rdep)
- new.append(sub_rdep)
- next = new
-
# Add the rprovides of itself
if pkg not in done:
done.insert(0, pkg)
@@ -824,6 +856,23 @@ def package_qa_check_usrmerge(pkg, d, messages):
return False
return True
+QAPKGTEST[perllocalpod] = "package_qa_check_perllocalpod"
+def package_qa_check_perllocalpod(pkg, d, messages):
+ """
+ Check that the recipe didn't ship a perlocal.pod file, which shouldn't be
+ installed in a distribution package. cpan.bbclass sets NO_PERLLOCAL=1 to
+ handle this for most recipes.
+ """
+ import glob
+ pkgd = oe.path.join(d.getVar('PKGDEST'), pkg)
+ podpath = oe.path.join(pkgd, d.getVar("libdir"), "perl*", "*", "*", "perllocal.pod")
+
+ matches = glob.glob(podpath)
+ if matches:
+ matches = [package_qa_clean_path(path, d, pkg) for path in matches]
+ msg = "%s contains perllocal.pod (%s), should not be installed" % (pkg, " ".join(matches))
+ package_qa_add_message(messages, "perllocalpod", msg)
+
QAPKGTEST[expanded-d] = "package_qa_check_expanded_d"
def package_qa_check_expanded_d(package, d, messages):
"""
@@ -844,6 +893,25 @@ def package_qa_check_expanded_d(package, d, messages):
sane = False
return sane
+QAPKGTEST[unlisted-pkg-lics] = "package_qa_check_unlisted_pkg_lics"
+def package_qa_check_unlisted_pkg_lics(package, d, messages):
+ """
+ Check that all licenses for a package are among the licenses for the recipe.
+ """
+ pkg_lics = d.getVar('LICENSE_' + package)
+ if not pkg_lics:
+ return True
+
+ recipe_lics_set = oe.license.list_licenses(d.getVar('LICENSE'))
+ unlisted = oe.license.list_licenses(pkg_lics) - recipe_lics_set
+ if not unlisted:
+ return True
+
+ package_qa_add_message(messages, "unlisted-pkg-lics",
+ "LICENSE_%s includes licenses (%s) that are not "
+ "listed in LICENSE" % (package, ' '.join(unlisted)))
+ return False
+
def package_qa_check_encoding(keys, encode, d):
def check_encoding(key, enc):
sane = True
@@ -885,18 +953,28 @@ def package_qa_check_host_user(path, name, d, elf, messages):
if exc.errno != errno.ENOENT:
raise
else:
- rootfs_path = path[len(dest):]
check_uid = int(d.getVar('HOST_USER_UID'))
if stat.st_uid == check_uid:
- package_qa_add_message(messages, "host-user-contaminated", "%s: %s is owned by uid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, rootfs_path, check_uid))
+ package_qa_add_message(messages, "host-user-contaminated", "%s: %s is owned by uid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, package_qa_clean_path(path, d, name), check_uid))
return False
check_gid = int(d.getVar('HOST_USER_GID'))
if stat.st_gid == check_gid:
- package_qa_add_message(messages, "host-user-contaminated", "%s: %s is owned by gid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, rootfs_path, check_gid))
+ package_qa_add_message(messages, "host-user-contaminated", "%s: %s is owned by gid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, package_qa_clean_path(path, d, name), check_gid))
return False
return True
+QARECIPETEST[src-uri-bad] = "package_qa_check_src_uri"
+def package_qa_check_src_uri(pn, d, messages):
+ import re
+
+ if "${PN}" in d.getVar("SRC_URI", False):
+ package_qa_handle_error("src-uri-bad", "%s: SRC_URI uses PN not BPN" % pn, d)
+
+ for url in d.getVar("SRC_URI").split():
+ if re.search(r"github\.com/.+/.+/archive/.+", url):
+ package_qa_handle_error("src-uri-bad", "%s: SRC_URI uses unstable GitHub archives" % pn, d)
+
# The PACKAGE FUNC to scan each package
python do_package_qa () {
@@ -937,14 +1015,20 @@ python do_package_qa () {
pkgdest = d.getVar('PKGDEST')
packages = set((d.getVar('PACKAGES') or '').split())
- cpath = oe.cachedpath.CachedPath()
global pkgfiles
pkgfiles = {}
for pkg in packages:
pkgfiles[pkg] = []
- for walkroot, dirs, files in cpath.walk(pkgdest + "/" + pkg):
+ pkgdir = os.path.join(pkgdest, pkg)
+ for walkroot, dirs, files in os.walk(pkgdir):
+ # Don't walk into top-level CONTROL or DEBIAN directories as these
+ # are temporary directories created by do_package.
+ if walkroot == pkgdir:
+ for control in ("CONTROL", "DEBIAN"):
+ if control in dirs:
+ dirs.remove(control)
for file in files:
- pkgfiles[pkg].append(walkroot + os.sep + file)
+ pkgfiles[pkg].append(os.path.join(walkroot, file))
# no packages should be scanned
if not packages:
@@ -1017,6 +1101,13 @@ do_package_qa[vardepsexclude] = "BB_TASKDEPDATA"
do_package_qa[rdeptask] = "do_packagedata"
addtask do_package_qa after do_packagedata do_package before do_build
+# Add the package specific INSANE_SKIPs to the sstate dependencies
+python() {
+ pkgs = (d.getVar('PACKAGES') or '').split()
+ for pkg in pkgs:
+ d.appendVarFlag("do_package_qa", "vardeps", " INSANE_SKIP_{}".format(pkg))
+}
+
SSTATETASKS += "do_package_qa"
do_package_qa[sstate-inputdirs] = ""
do_package_qa[sstate-outputdirs] = ""
@@ -1027,11 +1118,58 @@ addtask do_package_qa_setscene
python do_qa_staging() {
bb.note("QA checking staging")
-
- if not package_qa_check_staged(d.expand('${SYSROOT_DESTDIR}${libdir}'), d):
+ if not qa_check_staged(d.expand('${SYSROOT_DESTDIR}${libdir}'), d):
bb.fatal("QA staging was broken by the package built above")
}
+python do_qa_patch() {
+ import subprocess
+
+ ###########################################################################
+ # Check patch.log for fuzz warnings
+ #
+ # Further information on why we check for patch fuzz warnings:
+ # http://lists.openembedded.org/pipermail/openembedded-core/2018-March/148675.html
+ # https://bugzilla.yoctoproject.org/show_bug.cgi?id=10450
+ ###########################################################################
+
+ logdir = d.getVar('T')
+ patchlog = os.path.join(logdir,"log.do_patch")
+
+ if os.path.exists(patchlog):
+ fuzzheader = '--- Patch fuzz start ---'
+ fuzzfooter = '--- Patch fuzz end ---'
+ statement = "grep -e '%s' %s > /dev/null" % (fuzzheader, patchlog)
+ if subprocess.call(statement, shell=True) == 0:
+ msg = "Fuzz detected:\n\n"
+ fuzzmsg = ""
+ inFuzzInfo = False
+ f = open(patchlog, "r")
+ for line in f:
+ if fuzzheader in line:
+ inFuzzInfo = True
+ fuzzmsg = ""
+ elif fuzzfooter in line:
+ fuzzmsg = fuzzmsg.replace('\n\n', '\n')
+ msg += fuzzmsg
+ msg += "\n"
+ inFuzzInfo = False
+ elif inFuzzInfo and not 'Now at patch' in line:
+ fuzzmsg += line
+ f.close()
+ msg += "The context lines in the patches can be updated with devtool:\n"
+ msg += "\n"
+ msg += " devtool modify %s\n" % d.getVar('PN')
+ msg += " devtool finish --force-patch-refresh %s <layer_path>\n\n" % d.getVar('PN')
+ msg += "Don't forget to review changes done by devtool!\n"
+ if 'patch-fuzz' in d.getVar('ERROR_QA'):
+ bb.error(msg)
+ elif 'patch-fuzz' in d.getVar('WARN_QA'):
+ bb.warn(msg)
+ msg = "Patch log indicates that patches do not apply cleanly."
+ package_qa_handle_error("patch-fuzz", msg, d)
+}
+
python do_qa_configure() {
import subprocess
@@ -1042,15 +1180,22 @@ python do_qa_configure() {
configs = []
workdir = d.getVar('WORKDIR')
- if bb.data.inherits_class('autotools', d):
+ skip = (d.getVar('INSANE_SKIP') or "").split()
+ skip_configure_unsafe = False
+ if 'configure-unsafe' in skip:
+ bb.note("Recipe %s skipping qa checking: configure-unsafe" % d.getVar('PN'))
+ skip_configure_unsafe = True
+
+ if bb.data.inherits_class('autotools', d) and not skip_configure_unsafe:
bb.note("Checking autotools environment for common misconfiguration")
for root, dirs, files in os.walk(workdir):
statement = "grep -q -F -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s" % \
os.path.join(root,"config.log")
if "config.log" in files:
if subprocess.call(statement, shell=True) == 0:
- bb.fatal("""This autoconf log indicates errors, it looked at host include and/or library paths while determining system capabilities.
-Rerun configure task after fixing this.""")
+ error_msg = """This autoconf log indicates errors, it looked at host include and/or library paths while determining system capabilities.
+Rerun configure task after fixing this."""
+ package_qa_handle_error("configure-unsafe", error_msg, d)
if "configure.ac" in files:
configs.append(os.path.join(root,"configure.ac"))
@@ -1061,8 +1206,14 @@ Rerun configure task after fixing this.""")
# Check gettext configuration and dependencies are correct
###########################################################################
+ skip_configure_gettext = False
+ if 'configure-gettext' in skip:
+ bb.note("Recipe %s skipping qa checking: configure-gettext" % d.getVar('PN'))
+ skip_configure_gettext = True
+
cnf = d.getVar('EXTRA_OECONF') or ""
- if "gettext" not in d.getVar('P') and "gcc-runtime" not in d.getVar('P') and "--disable-nls" not in cnf:
+ if not ("gettext" in d.getVar('P') or "gcc-runtime" in d.getVar('P') or \
+ "--disable-nls" in cnf or skip_configure_gettext):
ml = d.getVar("MLPREFIX") or ""
if bb.data.inherits_class('cross-canadian', d):
gt = "nativesdk-gettext"
@@ -1073,18 +1224,22 @@ Rerun configure task after fixing this.""")
for config in configs:
gnu = "grep \"^[[:space:]]*AM_GNU_GETTEXT\" %s >/dev/null" % config
if subprocess.call(gnu, shell=True) == 0:
- bb.fatal("""%s required but not in DEPENDS for file %s.
-Missing inherit gettext?""" % (gt, config))
+ error_msg = "AM_GNU_GETTEXT used but no inherit gettext"
+ package_qa_handle_error("configure-gettext", error_msg, d)
###########################################################################
# Check unrecognised configure options (with a white list)
###########################################################################
- if bb.data.inherits_class("autotools", d):
+ if bb.data.inherits_class("autotools", d) or bb.data.inherits_class("meson", d):
bb.note("Checking configure output for unrecognised options")
try:
- flag = "WARNING: unrecognized options:"
- log = os.path.join(d.getVar('B'), 'config.log')
- output = subprocess.check_output(['grep', '-F', flag, log]).decode("utf-8").replace(', ', ' ')
+ if bb.data.inherits_class("autotools", d):
+ flag = "WARNING: unrecognized options:"
+ log = os.path.join(d.getVar('B'), 'config.log')
+ if bb.data.inherits_class("meson", d):
+ flag = "WARNING: Unknown options:"
+ log = os.path.join(d.getVar('T'), 'log.do_configure')
+ output = subprocess.check_output(['grep', '-F', flag, log]).decode("utf-8").replace(', ', ' ').replace('"', '')
options = set()
for line in output.splitlines():
options |= set(line.partition(flag)[2].split())
@@ -1123,6 +1278,9 @@ python do_qa_unpack() {
#addtask qa_staging after do_populate_sysroot before do_build
do_populate_sysroot[postfuncs] += "do_qa_staging "
+# Check for patch fuzz
+do_patch[postfuncs] += "do_qa_patch "
+
# Check broken config.log files, for packages requiring Gettext which
# don't have it in DEPENDS.
#addtask qa_configure after do_configure before do_compile
@@ -1164,6 +1322,11 @@ python () {
if prog.search(pn):
package_qa_handle_error("uppercase-pn", 'PN: %s is upper case, this can result in unexpected behavior.' % pn, d)
+ # Some people mistakenly use DEPENDS_${PN} instead of DEPENDS and wonder
+ # why it doesn't work.
+ if (d.getVar(d.expand('DEPENDS_${PN}'))):
+ package_qa_handle_error("pkgvarcheck", "recipe uses DEPENDS_${PN}, should use DEPENDS", d)
+
issues = []
if (d.getVar('PACKAGES') or "").split():
for dep in (d.getVar('QADEPENDS') or "").split():
diff --git a/external/poky/meta/classes/kernel-devicetree.bbclass b/external/poky/meta/classes/kernel-devicetree.bbclass
index 867b776a..81dda800 100644
--- a/external/poky/meta/classes/kernel-devicetree.bbclass
+++ b/external/poky/meta/classes/kernel-devicetree.bbclass
@@ -52,7 +52,7 @@ do_configure_append() {
do_compile_append() {
for dtbf in ${KERNEL_DEVICETREE}; do
dtb=`normalize_dtb "$dtbf"`
- oe_runmake $dtb
+ oe_runmake $dtb CC="${KERNEL_CC} $cc_extra " LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS}
done
}
@@ -71,23 +71,23 @@ do_deploy_append() {
dtb=`normalize_dtb "$dtbf"`
dtb_ext=${dtb##*.}
dtb_base_name=`basename $dtb .$dtb_ext`
- install -d ${DEPLOYDIR}
- install -m 0644 ${D}/${KERNEL_IMAGEDEST}/$dtb_base_name.$dtb_ext ${DEPLOYDIR}/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext
- ln -sf $dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext ${DEPLOYDIR}/$dtb_base_name.$dtb_ext
- ln -sf $dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext ${DEPLOYDIR}/$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext
+ install -d $deployDir
+ install -m 0644 ${D}/${KERNEL_IMAGEDEST}/$dtb_base_name.$dtb_ext $deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext
+ ln -sf $dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext $deployDir/$dtb_base_name.$dtb_ext
+ ln -sf $dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext $deployDir/$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext
for type in ${KERNEL_IMAGETYPE_FOR_MAKE}; do
if [ "$type" = "zImage" ] && [ "${KERNEL_DEVICETREE_BUNDLE}" = "1" ]; then
cat ${D}/${KERNEL_IMAGEDEST}/$type \
- ${DEPLOYDIR}/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext \
- > ${DEPLOYDIR}/$type-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin
+ $deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext \
+ > $deployDir/$type-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin
ln -sf $type-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin \
- ${DEPLOYDIR}/$type-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext.bin
+ $deployDir/$type-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext.bin
if [ -e "${KERNEL_OUTPUT_DIR}/${type}.initramfs" ]; then
cat ${KERNEL_OUTPUT_DIR}/${type}.initramfs \
- ${DEPLOYDIR}/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext
- > ${DEPLOYDIR}/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin
+ $deployDir/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext \
+ > $deployDir/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin
ln -sf ${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin \
- ${DEPLOYDIR}/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext.bin
+ $deployDir/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext.bin
fi
fi
done
diff --git a/external/poky/meta/classes/kernel-fitimage.bbclass b/external/poky/meta/classes/kernel-fitimage.bbclass
index 4c4fd99f..72b05ff8 100644
--- a/external/poky/meta/classes/kernel-fitimage.bbclass
+++ b/external/poky/meta/classes/kernel-fitimage.bbclass
@@ -4,12 +4,14 @@ python __anonymous () {
kerneltypes = d.getVar('KERNEL_IMAGETYPES') or ""
if 'fitImage' in kerneltypes.split():
depends = d.getVar("DEPENDS")
- depends = "%s u-boot-mkimage-native dtc-native" % depends
+ depends = "%s u-boot-tools-native dtc-native" % depends
d.setVar("DEPENDS", depends)
uarch = d.getVar("UBOOT_ARCH")
if uarch == "arm64":
replacementtype = "Image"
+ elif uarch == "riscv":
+ replacementtype = "Image"
elif uarch == "mips":
replacementtype = "vmlinuz.bin"
elif uarch == "x86":
@@ -19,9 +21,9 @@ python __anonymous () {
else:
replacementtype = "zImage"
- # Override KERNEL_IMAGETYPE_FOR_MAKE variable, which is internal
- # to kernel.bbclass . We have to override it, since we pack zImage
- # (at least for now) into the fitImage .
+ # Override KERNEL_IMAGETYPE_FOR_MAKE variable, which is internal
+ # to kernel.bbclass . We have to override it, since we pack zImage
+ # (at least for now) into the fitImage .
typeformake = d.getVar("KERNEL_IMAGETYPE_FOR_MAKE") or ""
if 'fitImage' in typeformake.split():
d.setVar('KERNEL_IMAGETYPE_FOR_MAKE', typeformake.replace('fitImage', replacementtype))
@@ -30,17 +32,30 @@ python __anonymous () {
if image:
d.appendVarFlag('do_assemble_fitimage_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete')
+ #check if there are any dtb providers
+ providerdtb = d.getVar("PREFERRED_PROVIDER_virtual/dtb")
+ if providerdtb:
+ d.appendVarFlag('do_assemble_fitimage', 'depends', ' virtual/dtb:do_populate_sysroot')
+ d.appendVarFlag('do_assemble_fitimage_initramfs', 'depends', ' virtual/dtb:do_populate_sysroot')
+ d.setVar('EXTERNAL_KERNEL_DEVICETREE', "${RECIPE_SYSROOT}/boot/devicetree")
+
# Verified boot will sign the fitImage and append the public key to
# U-Boot dtb. We ensure the U-Boot dtb is deployed before assembling
# the fitImage:
- if d.getVar('UBOOT_SIGN_ENABLE') == "1":
+ if d.getVar('UBOOT_SIGN_ENABLE') == "1" and d.getVar('UBOOT_DTB_BINARY'):
uboot_pn = d.getVar('PREFERRED_PROVIDER_u-boot') or 'u-boot'
- d.appendVarFlag('do_assemble_fitimage', 'depends', ' %s:do_deploy' % uboot_pn)
+ d.appendVarFlag('do_assemble_fitimage', 'depends', ' %s:do_populate_sysroot' % uboot_pn)
}
# Options for the device tree compiler passed to mkimage '-D' feature:
UBOOT_MKIMAGE_DTCOPTS ??= ""
+# fitImage Hash Algo
+FIT_HASH_ALG ?= "sha256"
+
+# fitImage Signature Algo
+FIT_SIGN_ALG ?= "rsa2048"
+
#
# Emit the fitImage ITS header
#
@@ -100,7 +115,7 @@ EOF
# $4 ... Compression type
fitimage_emit_section_kernel() {
- kernel_csum="sha1"
+ kernel_csum="${FIT_HASH_ALG}"
ENTRYPOINT="${UBOOT_ENTRYPOINT}"
if [ -n "${UBOOT_ENTRYSYMBOL}" ]; then
@@ -133,7 +148,7 @@ EOF
# $3 ... Path to DTB image
fitimage_emit_section_dtb() {
- dtb_csum="sha1"
+ dtb_csum="${FIT_HASH_ALG}"
dtb_loadline=""
dtb_ext=${DTB##*.}
@@ -167,7 +182,7 @@ EOF
# $3 ... Path to setup image
fitimage_emit_section_setup() {
- setup_csum="sha1"
+ setup_csum="${FIT_HASH_ALG}"
cat << EOF >> ${1}
setup@${2} {
@@ -194,8 +209,7 @@ EOF
# $3 ... Path to ramdisk image
fitimage_emit_section_ramdisk() {
- ramdisk_csum="sha1"
- ramdisk_ctype="none"
+ ramdisk_csum="${FIT_HASH_ALG}"
ramdisk_loadline=""
ramdisk_entryline=""
@@ -206,24 +220,6 @@ fitimage_emit_section_ramdisk() {
ramdisk_entryline="entry = <${UBOOT_RD_ENTRYPOINT}>;"
fi
- case $3 in
- *.gz)
- ramdisk_ctype="gzip"
- ;;
- *.bz2)
- ramdisk_ctype="bzip2"
- ;;
- *.lzma)
- ramdisk_ctype="lzma"
- ;;
- *.lzo)
- ramdisk_ctype="lzo"
- ;;
- *.lz4)
- ramdisk_ctype="lz4"
- ;;
- esac
-
cat << EOF >> ${1}
ramdisk@${2} {
description = "${INITRAMFS_IMAGE}";
@@ -231,7 +227,7 @@ fitimage_emit_section_ramdisk() {
type = "ramdisk";
arch = "${UBOOT_ARCH}";
os = "linux";
- compression = "${ramdisk_ctype}";
+ compression = "none";
${ramdisk_loadline}
${ramdisk_entryline}
hash@1 {
@@ -252,7 +248,8 @@ EOF
# $6 ... default flag
fitimage_emit_section_config() {
- conf_csum="sha1"
+ conf_csum="${FIT_HASH_ALG}"
+ conf_sign_algo="${FIT_SIGN_ALG}"
if [ -n "${UBOOT_SIGN_ENABLE}" ] ; then
conf_sign_keyname="${UBOOT_SIGN_KEYNAME}"
fi
@@ -334,7 +331,7 @@ EOF
cat << EOF >> ${1}
signature@1 {
- algo = "${conf_csum},rsa2048";
+ algo = "${conf_csum},${conf_sign_algo}";
key-name-hint = "${conf_sign_keyname}";
${sign_line}
};
@@ -373,7 +370,8 @@ fitimage_assemble() {
#
# Step 2: Prepare a DTB image section
#
- if [ -n "${KERNEL_DEVICETREE}" ]; then
+
+ if [ -z "${EXTERNAL_KERNEL_DEVICETREE}" ] && [ -n "${KERNEL_DEVICETREE}" ]; then
dtbcount=1
for DTB in ${KERNEL_DEVICETREE}; do
if echo ${DTB} | grep -q '/dts/'; then
@@ -391,6 +389,15 @@ fitimage_assemble() {
done
fi
+ if [ -n "${EXTERNAL_KERNEL_DEVICETREE}" ]; then
+ dtbcount=1
+ for DTB in $(find "${EXTERNAL_KERNEL_DEVICETREE}" \( -name '*.dtb' -o -name '*.dtbo' \) -printf '%P\n' | sort); do
+ DTB=$(echo "${DTB}" | tr '/' '_')
+ DTBS="${DTBS} ${DTB}"
+ fitimage_emit_section_dtb ${1} ${DTB} "${EXTERNAL_KERNEL_DEVICETREE}/${DTB}"
+ done
+ fi
+
#
# Step 3: Prepare a setup section. (For x86)
#
@@ -456,10 +463,17 @@ fitimage_assemble() {
# Step 7: Sign the image and add public key to U-Boot dtb
#
if [ "x${UBOOT_SIGN_ENABLE}" = "x1" ] ; then
+ add_key_to_u_boot=""
+ if [ -n "${UBOOT_DTB_BINARY}" ]; then
+ # The u-boot.dtb is a symlink to UBOOT_DTB_IMAGE, so we need copy
+ # both of them, and don't dereference the symlink.
+ cp -P ${STAGING_DATADIR}/u-boot*.dtb ${B}
+ add_key_to_u_boot="-K ${B}/${UBOOT_DTB_BINARY}"
+ fi
uboot-mkimage \
${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \
-F -k "${UBOOT_SIGN_KEYDIR}" \
- ${@'-K "${DEPLOY_DIR_IMAGE}/${UBOOT_DTB_BINARY}"' if len('${UBOOT_DTB_BINARY}') else ''} \
+ $add_key_to_u_boot \
-r arch/${ARCH}/boot/${2}
fi
}
@@ -481,7 +495,7 @@ do_assemble_fitimage_initramfs() {
fi
}
-addtask assemble_fitimage_initramfs before do_deploy after do_install
+addtask assemble_fitimage_initramfs before do_deploy after do_bundle_initramfs
kernel_do_deploy[vardepsexclude] = "DATETIME"
@@ -489,21 +503,27 @@ kernel_do_deploy_append() {
# Update deploy directory
if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage"; then
echo "Copying fit-image.its source file..."
- install -m 0644 ${B}/fit-image.its ${DEPLOYDIR}/fitImage-its-${KERNEL_FIT_NAME}.its
- ln -snf fitImage-its-${KERNEL_FIT_NAME}.its ${DEPLOYDIR}/fitImage-its-${KERNEL_FIT_LINK_NAME}
+ install -m 0644 ${B}/fit-image.its "$deployDir/fitImage-its-${KERNEL_FIT_NAME}.its"
+ ln -snf fitImage-its-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${KERNEL_FIT_LINK_NAME}"
echo "Copying linux.bin file..."
- install -m 0644 ${B}/linux.bin ${DEPLOYDIR}/fitImage-linux.bin-${KERNEL_FIT_NAME}.bin
- ln -snf fitImage-linux.bin-${KERNEL_FIT_NAME}.bin ${DEPLOYDIR}/fitImage-linux.bin-${KERNEL_FIT_LINK_NAME}
+ install -m 0644 ${B}/linux.bin $deployDir/fitImage-linux.bin-${KERNEL_FIT_NAME}.bin
+ ln -snf fitImage-linux.bin-${KERNEL_FIT_NAME}.bin "$deployDir/fitImage-linux.bin-${KERNEL_FIT_LINK_NAME}"
if [ -n "${INITRAMFS_IMAGE}" ]; then
echo "Copying fit-image-${INITRAMFS_IMAGE}.its source file..."
- install -m 0644 ${B}/fit-image-${INITRAMFS_IMAGE}.its ${DEPLOYDIR}/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its
- ln -snf fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its ${DEPLOYDIR}/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}
+ install -m 0644 ${B}/fit-image-${INITRAMFS_IMAGE}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its"
+ ln -snf fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its "$deployDir/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
echo "Copying fitImage-${INITRAMFS_IMAGE} file..."
- install -m 0644 ${B}/arch/${ARCH}/boot/fitImage-${INITRAMFS_IMAGE} ${DEPLOYDIR}/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin
- ln -snf fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin ${DEPLOYDIR}/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}
+ install -m 0644 ${B}/arch/${ARCH}/boot/fitImage-${INITRAMFS_IMAGE} "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin"
+ ln -snf fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin "$deployDir/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}"
+ fi
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" -a -n "${UBOOT_DTB_BINARY}" ] ; then
+ # UBOOT_DTB_IMAGE is a realfile, but we can't use
+ # ${UBOOT_DTB_IMAGE} since it contains ${PV} which is aimed
+ # for u-boot, but we are in kernel env now.
+ install -m 0644 ${B}/u-boot-${MACHINE}*.dtb "$deployDir/"
fi
fi
}
diff --git a/external/poky/meta/classes/kernel-module-split.bbclass b/external/poky/meta/classes/kernel-module-split.bbclass
index 67ab4161..221022b7 100644
--- a/external/poky/meta/classes/kernel-module-split.bbclass
+++ b/external/poky/meta/classes/kernel-module-split.bbclass
@@ -44,15 +44,32 @@ python split_kernel_module_packages () {
def extract_modinfo(file):
import tempfile, subprocess
tempfile.tempdir = d.getVar("WORKDIR")
+ compressed = re.match( r'.*\.([xg])z$', file)
tf = tempfile.mkstemp()
tmpfile = tf[1]
- cmd = "%sobjcopy -j .modinfo -O binary %s %s" % (d.getVar("HOST_PREFIX") or "", file, tmpfile)
+ if compressed:
+ tmpkofile = tmpfile + ".ko"
+ if compressed.group(1) == 'g':
+ cmd = "gunzip -dc %s > %s" % (file, tmpkofile)
+ subprocess.check_call(cmd, shell=True)
+ elif compressed.group(1) == 'x':
+ cmd = "xz -dc %s > %s" % (file, tmpkofile)
+ subprocess.check_call(cmd, shell=True)
+ else:
+ msg = "Cannot decompress '%s'" % file
+ raise msg
+ cmd = "%sobjcopy -j .modinfo -O binary %s %s" % (d.getVar("HOST_PREFIX") or "", tmpkofile, tmpfile)
+ else:
+ cmd = "%sobjcopy -j .modinfo -O binary %s %s" % (d.getVar("HOST_PREFIX") or "", file, tmpfile)
subprocess.check_call(cmd, shell=True)
- f = open(tmpfile)
+ # errors='replace': Some old kernel versions contain invalid utf-8 characters in mod descriptions (like 0xf6, 'ö')
+ f = open(tmpfile, errors='replace')
l = f.read().split("\000")
f.close()
os.close(tf[0])
os.unlink(tmpfile)
+ if compressed:
+ os.unlink(tmpkofile)
vals = {}
for i in l:
m = modinfoexp.match(i)
@@ -132,7 +149,7 @@ python split_kernel_module_packages () {
kernel_package_name = d.getVar("KERNEL_PACKAGE_NAME") or "kernel"
kernel_version = d.getVar("KERNEL_VERSION")
- module_regex = '^(.*)\.k?o$'
+ module_regex = r'^(.*)\.k?o(?:\.[xg]z)?$'
module_pattern_prefix = d.getVar('KERNEL_MODULE_PACKAGE_PREFIX')
module_pattern_suffix = d.getVar('KERNEL_MODULE_PACKAGE_SUFFIX')
diff --git a/external/poky/meta/classes/kernel-uboot.bbclass b/external/poky/meta/classes/kernel-uboot.bbclass
index 2364053f..87f02654 100644
--- a/external/poky/meta/classes/kernel-uboot.bbclass
+++ b/external/poky/meta/classes/kernel-uboot.bbclass
@@ -3,10 +3,6 @@ uboot_prep_kimage() {
vmlinux_path="arch/${ARCH}/boot/compressed/vmlinux"
linux_suffix=""
linux_comp="none"
- elif [ -e arch/${ARCH}/boot/Image ] ; then
- vmlinux_path="vmlinux"
- linux_suffix=""
- linux_comp="none"
elif [ -e arch/${ARCH}/boot/vmlinuz.bin ]; then
rm -f linux.bin
cp -l arch/${ARCH}/boot/vmlinuz.bin linux.bin
diff --git a/external/poky/meta/classes/kernel-uimage.bbclass b/external/poky/meta/classes/kernel-uimage.bbclass
index c2de6bb4..cedb4fa0 100644
--- a/external/poky/meta/classes/kernel-uimage.bbclass
+++ b/external/poky/meta/classes/kernel-uimage.bbclass
@@ -3,7 +3,7 @@ inherit kernel-uboot
python __anonymous () {
if "uImage" in d.getVar('KERNEL_IMAGETYPES'):
depends = d.getVar("DEPENDS")
- depends = "%s u-boot-mkimage-native" % depends
+ depends = "%s u-boot-tools-native" % depends
d.setVar("DEPENDS", depends)
# Override KERNEL_IMAGETYPE_FOR_MAKE variable, which is internal
diff --git a/external/poky/meta/classes/kernel-yocto.bbclass b/external/poky/meta/classes/kernel-yocto.bbclass
index 496c8a7f..3311f6e8 100644
--- a/external/poky/meta/classes/kernel-yocto.bbclass
+++ b/external/poky/meta/classes/kernel-yocto.bbclass
@@ -1,16 +1,41 @@
# remove tasks that modify the source tree in case externalsrc is inherited
-SRCTREECOVEREDTASKS += "do_kernel_configme do_validate_branches do_kernel_configcheck do_kernel_checkout do_fetch do_unpack do_patch"
+SRCTREECOVEREDTASKS += "do_validate_branches do_kernel_configcheck do_kernel_checkout do_fetch do_unpack do_patch"
PATCH_GIT_USER_EMAIL ?= "kernel-yocto@oe"
PATCH_GIT_USER_NAME ?= "OpenEmbedded"
+# The distro or local.conf should set this, but if nobody cares...
+LINUX_KERNEL_TYPE ??= "standard"
+
+# KMETA ?= ""
+KBRANCH ?= "master"
+KMACHINE ?= "${MACHINE}"
+SRCREV_FORMAT ?= "meta_machine"
+
+# LEVELS:
+# 0: no reporting
+# 1: report options that are specified, but not in the final config
+# 2: report options that are not hardware related, but set by a BSP
+KCONF_AUDIT_LEVEL ?= "1"
+KCONF_BSP_AUDIT_LEVEL ?= "0"
+KMETA_AUDIT ?= "yes"
+
# returns local (absolute) path names for all valid patches in the
# src_uri
-def find_patches(d):
+def find_patches(d,subdir):
patches = src_patches(d)
patch_list=[]
for p in patches:
- _, _, local, _, _, _ = bb.fetch.decodeurl(p)
- patch_list.append(local)
+ _, _, local, _, _, parm = bb.fetch.decodeurl(p)
+ # if patchdir has been passed, we won't be able to apply it so skip
+ # the patch for now, and special processing happens later
+ patchdir = ''
+ if "patchdir" in parm:
+ patchdir = parm["patchdir"]
+ if subdir:
+ if subdir == patchdir:
+ patch_list.append(local)
+ else:
+ patch_list.append(local)
return patch_list
@@ -22,7 +47,7 @@ def find_sccs(d):
base, ext = os.path.splitext(os.path.basename(s))
if ext and ext in [".scc", ".cfg"]:
sources_list.append(s)
- elif base and base in 'defconfig':
+ elif base and 'defconfig' in base:
sources_list.append(s)
return sources_list
@@ -74,13 +99,6 @@ do_kernel_metadata() {
fi
fi
- machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
- machine_srcrev="${SRCREV_machine}"
- if [ -z "${machine_srcrev}" ]; then
- # fallback to SRCREV if a non machine_meta tree is being built
- machine_srcrev="${SRCREV}"
- fi
-
# In a similar manner to the kernel itself:
#
# defconfig: $(obj)/conf
@@ -113,24 +131,47 @@ do_kernel_metadata() {
else
cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${WORKDIR}/defconfig
fi
- sccs="${WORKDIR}/defconfig"
+ in_tree_defconfig="${WORKDIR}/defconfig"
else
bbfatal "A KBUILD_DEFCONFIG '${KBUILD_DEFCONFIG}' was specified, but not present in the source tree"
fi
fi
+ # was anyone trying to patch the kernel meta data ?, we need to do
+ # this here, since the scc commands migrate the .cfg fragments to the
+ # kernel source tree, where they'll be used later.
+ check_git_config
+ patches="${@" ".join(find_patches(d,'kernel-meta'))}"
+ for p in $patches; do
+ (
+ cd ${WORKDIR}/kernel-meta
+ git am -s $p
+ )
+ done
+
sccs_from_src_uri="${@" ".join(find_sccs(d))}"
- patches="${@" ".join(find_patches(d))}"
+ patches="${@" ".join(find_patches(d,''))}"
feat_dirs="${@" ".join(find_kernel_feature_dirs(d))}"
- # a quick check to make sure we don't have duplicate defconfigs
- # If there's a defconfig in the SRC_URI, did we also have one from
- # the KBUILD_DEFCONFIG processing above ?
- if [ -n "$sccs" ]; then
- # we did have a defconfig from above. remove any that might be in the src_uri
- sccs_from_src_uri=$(echo $sccs_from_src_uri | awk '{ if ($0!="defconfig") { print $0 } }' RS=' ')
+ # a quick check to make sure we don't have duplicate defconfigs If
+ # there's a defconfig in the SRC_URI, did we also have one from the
+ # KBUILD_DEFCONFIG processing above ?
+ src_uri_defconfig=$(echo $sccs_from_src_uri | awk '(match($0, "defconfig") != 0) { print $0 }' RS=' ')
+ # drop and defconfig's from the src_uri variable, we captured it just above here if it existed
+ sccs_from_src_uri=$(echo $sccs_from_src_uri | awk '(match($0, "defconfig") == 0) { print $0 }' RS=' ')
+
+ if [ -n "$in_tree_defconfig" ]; then
+ sccs_defconfig=$in_tree_defconfig
+ if [ -n "$src_uri_defconfig" ]; then
+ bbwarn "[NOTE]: defconfig was supplied both via KBUILD_DEFCONFIG and SRC_URI. Dropping SRC_URI defconfig"
+ fi
+ else
+ # if we didn't have an in-tree one, make our defconfig the one
+ # from the src_uri. Note: there may not have been one from the
+ # src_uri, so this can be an empty variable.
+ sccs_defconfig=$src_uri_defconfig
fi
- sccs="$sccs $sccs_from_src_uri"
+ sccs="$sccs_from_src_uri"
# check for feature directories/repos/branches that were part of the
# SRC_URI. If they were supplied, we convert them into include directives
@@ -138,10 +179,10 @@ do_kernel_metadata() {
for f in ${feat_dirs}; do
if [ -d "${WORKDIR}/$f/meta" ]; then
includes="$includes -I${WORKDIR}/$f/kernel-meta"
- elif [ -d "${WORKDIR}/$f" ]; then
- includes="$includes -I${WORKDIR}/$f"
elif [ -d "${WORKDIR}/../oe-local-files/$f" ]; then
includes="$includes -I${WORKDIR}/../oe-local-files/$f"
+ elif [ -d "${WORKDIR}/$f" ]; then
+ includes="$includes -I${WORKDIR}/$f"
fi
done
for s in ${sccs} ${patches}; do
@@ -157,23 +198,37 @@ do_kernel_metadata() {
# expand kernel features into their full path equivalents
bsp_definition=$(spp ${includes} --find -DKMACHINE=${KMACHINE} -DKTYPE=${LINUX_KERNEL_TYPE})
if [ -z "$bsp_definition" ]; then
- echo "$sccs" | grep -q defconfig
- if [ $? -ne 0 ]; then
+ if [ -z "$sccs_defconfig" ]; then
bbfatal_log "Could not locate BSP definition for ${KMACHINE}/${LINUX_KERNEL_TYPE} and no defconfig was provided"
fi
+ else
+ # if the bsp definition has "define KMETA_EXTERNAL_BSP t",
+ # then we need to set a flag that will instruct the next
+ # steps to use the BSP as both configuration and patches.
+ grep -q KMETA_EXTERNAL_BSP $bsp_definition
+ if [ $? -eq 0 ]; then
+ KMETA_EXTERNAL_BSPS="t"
+ fi
fi
meta_dir=$(kgit --meta)
# run1: pull all the configuration fragments, no matter where they come from
- elements="`echo -n ${bsp_definition} ${sccs} ${patches} ${KERNEL_FEATURES}`"
+ elements="`echo -n ${bsp_definition} $sccs_defconfig ${sccs} ${patches} ${KERNEL_FEATURES}`"
if [ -n "${elements}" ]; then
echo "${bsp_definition}" > ${S}/${meta_dir}/bsp_definition
- scc --force -o ${S}/${meta_dir}:cfg,merge,meta ${includes} ${bsp_definition} ${sccs} ${patches} ${KERNEL_FEATURES}
+ scc --force -o ${S}/${meta_dir}:cfg,merge,meta ${includes} $sccs_defconfig $bsp_definition $sccs $patches ${KERNEL_FEATURES}
if [ $? -ne 0 ]; then
bbfatal_log "Could not generate configuration queue for ${KMACHINE}."
fi
fi
+ # if KMETA_EXTERNAL_BSPS has been set, or it has been detected from
+ # the bsp definition, then we inject the bsp_definition into the
+ # patch phase below. we'll piggy back on the sccs variable.
+ if [ -n "${KMETA_EXTERNAL_BSPS}" ]; then
+ sccs="${bsp_definition} ${sccs}"
+ fi
+
# run2: only generate patches for elements that have been passed on the SRC_URI
elements="`echo -n ${sccs} ${patches} ${KERNEL_FEATURES}`"
if [ -n "${elements}" ]; then
@@ -272,7 +327,7 @@ do_kernel_checkout() {
}
do_kernel_checkout[dirs] = "${S}"
-addtask kernel_checkout before do_kernel_metadata after do_unpack
+addtask kernel_checkout before do_kernel_metadata after do_symlink_kernsrc
addtask kernel_metadata after do_validate_branches do_unpack before do_patch
do_kernel_metadata[depends] = "kern-tools-native:do_populate_sysroot"
do_validate_branches[depends] = "kern-tools-native:do_populate_sysroot"
@@ -280,10 +335,9 @@ do_validate_branches[depends] = "kern-tools-native:do_populate_sysroot"
do_kernel_configme[depends] += "virtual/${TARGET_PREFIX}binutils:do_populate_sysroot"
do_kernel_configme[depends] += "virtual/${TARGET_PREFIX}gcc:do_populate_sysroot"
do_kernel_configme[depends] += "bc-native:do_populate_sysroot bison-native:do_populate_sysroot"
+do_kernel_configme[depends] += "kern-tools-native:do_populate_sysroot"
do_kernel_configme[dirs] += "${S} ${B}"
do_kernel_configme() {
- set +e
-
# translate the kconfig_mode into something that merge_config.sh
# understands
case ${KCONFIG_MODE} in
@@ -309,13 +363,20 @@ do_kernel_configme() {
bbfatal_log "Could not find configuration queue (${meta_dir}/config.queue)"
fi
- CFLAGS="${CFLAGS} ${TOOLCHAIN_OPTIONS}" HOSTCC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}" CC="${KERNEL_CC}" ARCH=${ARCH} merge_config.sh -O ${B} ${config_flags} ${configs} > ${meta_dir}/cfg/merge_config_build.log 2>&1
- if [ $? -ne 0 ]; then
- bbfatal_log "Could not configure ${KMACHINE}-${LINUX_KERNEL_TYPE}"
+ CFLAGS="${CFLAGS} ${TOOLCHAIN_OPTIONS}" HOSTCC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}" CC="${KERNEL_CC}" LD="${KERNEL_LD}" ARCH=${ARCH} merge_config.sh -O ${B} ${config_flags} ${configs} > ${meta_dir}/cfg/merge_config_build.log 2>&1
+ if [ $? -ne 0 -o ! -f ${B}/.config ]; then
+ bberror "Could not generate a .config for ${KMACHINE}-${LINUX_KERNEL_TYPE}"
+ if [ ${KCONF_AUDIT_LEVEL} -gt 1 ]; then
+ bbfatal_log "`cat ${meta_dir}/cfg/merge_config_build.log`"
+ else
+ bbfatal_log "Details can be found at: ${S}/${meta_dir}/cfg/merge_config_build.log"
+ fi
fi
- echo "# Global settings from linux recipe" >> ${B}/.config
- echo "CONFIG_LOCALVERSION="\"${LINUX_VERSION_EXTENSION}\" >> ${B}/.config
+ if [ ! -z "${LINUX_VERSION_EXTENSION}" ]; then
+ echo "# Global settings from linux recipe" >> ${B}/.config
+ echo "CONFIG_LOCALVERSION="\"${LINUX_VERSION_EXTENSION}\" >> ${B}/.config
+ fi
}
addtask kernel_configme before do_configure after do_patch
@@ -334,6 +395,7 @@ python do_kernel_configcheck() {
env = os.environ.copy()
env['PATH'] = "%s:%s%s" % (d.getVar('PATH'), s, "/scripts/util/")
+ env['LD'] = "${KERNEL_LD}"
try:
configs = subprocess.check_output(['scc', '--configs', '-o', s + '/.kernel-meta'], env=env).decode('utf-8')
@@ -437,4 +499,15 @@ python () {
# If diffconfig is available, ensure it runs after kernel_configme
if 'do_diffconfig' in d:
bb.build.addtask('do_diffconfig', None, 'do_kernel_configme', d)
+
+ externalsrc = d.getVar('EXTERNALSRC')
+ if externalsrc:
+ # If we deltask do_patch, do_kernel_configme is left without
+ # dependencies and runs too early
+ d.setVarFlag('do_kernel_configme', 'deps', (d.getVarFlag('do_kernel_configme', 'deps', False) or []) + ['do_unpack'])
}
+
+# extra tasks
+addtask kernel_version_sanity_check after do_kernel_metadata do_kernel_checkout before do_compile
+addtask validate_branches before do_patch after do_kernel_checkout
+addtask kernel_configcheck after do_configure before do_compile
diff --git a/external/poky/meta/classes/kernel.bbclass b/external/poky/meta/classes/kernel.bbclass
index c72d1fe7..9e3c34ad 100644
--- a/external/poky/meta/classes/kernel.bbclass
+++ b/external/poky/meta/classes/kernel.bbclass
@@ -5,9 +5,11 @@ KERNEL_DEPLOYSUBDIR ??= "${@ "" if (d.getVar("KERNEL_PACKAGE_NAME") == "kernel")
PROVIDES += "${@ "virtual/kernel" if (d.getVar("KERNEL_PACKAGE_NAME") == "kernel") else "" }"
DEPENDS += "virtual/${TARGET_PREFIX}binutils virtual/${TARGET_PREFIX}gcc kmod-native bc-native lzop-native bison-native"
+DEPENDS += "${@bb.utils.contains("INITRAMFS_FSTYPES", "cpio.lz4", "lz4-native", "", d)}"
PACKAGE_WRITE_DEPS += "depmodwrapper-cross"
do_deploy[depends] += "depmodwrapper-cross:do_populate_sysroot"
+do_clean[depends] += "make-mod-scripts:do_clean"
CVE_PRODUCT ?= "linux_kernel"
@@ -95,6 +97,9 @@ python __anonymous () {
d.setVar('ALLOW_EMPTY_%s-image-%s' % (kname, typelower), '1')
image = d.getVar('INITRAMFS_IMAGE')
+ # If the INTIRAMFS_IMAGE is set but the INITRAMFS_IMAGE_BUNDLE is set to 0,
+ # the do_bundle_initramfs does nothing, but the INITRAMFS_IMAGE is built
+ # standalone for use by wic and other tools.
if image:
d.appendVarFlag('do_bundle_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete')
@@ -130,7 +135,7 @@ inherit ${KERNEL_CLASSES}
# the symlink.
do_unpack[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILDDIR}"
do_clean[cleandirs] += " ${S} ${STAGING_KERNEL_DIR} ${B} ${STAGING_KERNEL_BUILDDIR}"
-base_do_unpack_append () {
+python do_symlink_kernsrc () {
s = d.getVar("S")
if s[-1] == '/':
# drop trailing slash, so that os.symlink(kernsrc, s) doesn't use s as directory name and fail
@@ -147,6 +152,7 @@ base_do_unpack_append () {
shutil.move(s, kernsrc)
os.symlink(kernsrc, s)
}
+addtask symlink_kernsrc before do_configure after do_unpack
inherit kernel-arch deploy
@@ -205,7 +211,7 @@ copy_initramfs() {
;;
*lz4)
echo "lz4 decompressing image"
- lz4 -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img
+ lz4 -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img ${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
break
;;
*lzo)
@@ -289,14 +295,10 @@ kernel_do_compile() {
# kernel sources do not use do_unpack, so SOURCE_DATE_EPOCH may not
# be set....
if [ "${SOURCE_DATE_EPOCH}" = "" -o "${SOURCE_DATE_EPOCH}" = "0" ]; then
- olddir=`pwd`
- cd ${S}
- SOURCE_DATE_EPOCH=`git log -1 --pretty=%ct`
- # git repo not guaranteed, so fall back to REPRODUCIBLE_TIMESTAMP_ROOTFS
- if [ $? -ne 0 ]; then
- SOURCE_DATE_EPOCH=${REPRODUCIBLE_TIMESTAMP_ROOTFS}
- fi
- cd $olddir
+ # The source directory is not necessarily a git repository, so we
+ # specify the git-dir to ensure that git does not query a
+ # repository in any parent directory.
+ SOURCE_DATE_EPOCH=`git --git-dir="${S}/.git" log -1 --pretty=%ct 2>/dev/null || echo "${REPRODUCIBLE_TIMESTAMP_ROOTFS}"`
fi
ts=`LC_ALL=C date -d @$SOURCE_DATE_EPOCH`
@@ -330,6 +332,21 @@ kernel_do_compile() {
do_compile_kernelmodules() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
+ if [ "${BUILD_REPRODUCIBLE_BINARIES}" = "1" ]; then
+ # kernel sources do not use do_unpack, so SOURCE_DATE_EPOCH may not
+ # be set....
+ if [ "${SOURCE_DATE_EPOCH}" = "" -o "${SOURCE_DATE_EPOCH}" = "0" ]; then
+ # The source directory is not necessarily a git repository, so we
+ # specify the git-dir to ensure that git does not query a
+ # repository in any parent directory.
+ SOURCE_DATE_EPOCH=`git --git-dir="${S}/.git" log -1 --pretty=%ct 2>/dev/null || echo "${REPRODUCIBLE_TIMESTAMP_ROOTFS}"`
+ fi
+
+ ts=`LC_ALL=C date -d @$SOURCE_DATE_EPOCH`
+ export KBUILD_BUILD_TIMESTAMP="$ts"
+ export KCONFIG_NOTIMESTAMP=1
+ bbnote "KBUILD_BUILD_TIMESTAMP: $ts"
+ fi
if (grep -q -i -e '^CONFIG_MODULES=y$' ${B}/.config); then
cc_extra=$(get_cc_option)
oe_runmake -C ${B} ${PARALLEL_MAKE} modules CC="${KERNEL_CC} $cc_extra " LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS}
@@ -447,7 +464,7 @@ do_shared_workdir () {
# Copy files required for module builds
cp System.map $kerneldir/System.map-${KERNEL_VERSION}
- cp Module.symvers $kerneldir/
+ [ -e Module.symvers ] && cp Module.symvers $kerneldir/
cp .config $kerneldir/
mkdir -p $kerneldir/include/config
cp include/config/kernel.release $kerneldir/include/config/kernel.release
@@ -487,6 +504,15 @@ do_shared_workdir () {
mkdir -p $kerneldir/arch/${ARCH}/include/generated/
cp -fR arch/${ARCH}/include/generated/* $kerneldir/arch/${ARCH}/include/generated/
fi
+
+ if (grep -q -i -e '^CONFIG_UNWINDER_ORC=y$' $kerneldir/.config); then
+ # With CONFIG_UNWINDER_ORC (the default in 4.14), objtool is required for
+ # out-of-tree modules to be able to generate object files.
+ if [ -x tools/objtool/objtool ]; then
+ mkdir -p ${kerneldir}/tools/objtool
+ cp tools/objtool/objtool ${kerneldir}/tools/objtool/
+ fi
+ fi
}
# We don't need to stage anything, not the modules/firmware since those would clash with linux-firmware
@@ -494,7 +520,7 @@ sysroot_stage_all () {
:
}
-KERNEL_CONFIG_COMMAND ?= "oe_runmake_call -C ${S} CC="${KERNEL_CC}" O=${B} olddefconfig || oe_runmake -C ${S} O=${B} CC="${KERNEL_CC}" oldnoconfig"
+KERNEL_CONFIG_COMMAND ?= "oe_runmake_call -C ${S} CC="${KERNEL_CC}" LD="${KERNEL_LD}" O=${B} olddefconfig || oe_runmake -C ${S} O=${B} CC="${KERNEL_CC}" LD="${KERNEL_LD}" oldnoconfig"
python check_oldest_kernel() {
oldest_kernel = d.getVar('OLDEST_KERNEL')
@@ -547,7 +573,7 @@ EXPORT_FUNCTIONS do_compile do_install do_configure
# kernel-image becomes kernel-image-${KERNEL_VERSION}
PACKAGES = "${KERNEL_PACKAGE_NAME} ${KERNEL_PACKAGE_NAME}-base ${KERNEL_PACKAGE_NAME}-vmlinux ${KERNEL_PACKAGE_NAME}-image ${KERNEL_PACKAGE_NAME}-dev ${KERNEL_PACKAGE_NAME}-modules"
FILES_${PN} = ""
-FILES_${KERNEL_PACKAGE_NAME}-base = "${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.order ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.builtin"
+FILES_${KERNEL_PACKAGE_NAME}-base = "${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.order ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.builtin ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/modules.builtin.modinfo"
FILES_${KERNEL_PACKAGE_NAME}-image = ""
FILES_${KERNEL_PACKAGE_NAME}-dev = "/boot/System.map* /boot/Module.symvers* /boot/config* ${KERNEL_SRC_PATH} ${nonarch_base_libdir}/modules/${KERNEL_VERSION}/build"
FILES_${KERNEL_PACKAGE_NAME}-vmlinux = "/boot/vmlinux-${KERNEL_VERSION_NAME}"
@@ -556,9 +582,9 @@ RDEPENDS_${KERNEL_PACKAGE_NAME} = "${KERNEL_PACKAGE_NAME}-base"
# Allow machines to override this dependency if kernel image files are
# not wanted in images as standard
RDEPENDS_${KERNEL_PACKAGE_NAME}-base ?= "${KERNEL_PACKAGE_NAME}-image"
-PKG_${KERNEL_PACKAGE_NAME}-image = "${KERNEL_PACKAGE_NAME}-image-${@legitimize_package_name('${KERNEL_VERSION}')}"
+PKG_${KERNEL_PACKAGE_NAME}-image = "${KERNEL_PACKAGE_NAME}-image-${@legitimize_package_name(d.getVar('KERNEL_VERSION'))}"
RDEPENDS_${KERNEL_PACKAGE_NAME}-image += "${@oe.utils.conditional('KERNEL_IMAGETYPE', 'vmlinux', '${KERNEL_PACKAGE_NAME}-vmlinux', '', d)}"
-PKG_${KERNEL_PACKAGE_NAME}-base = "${KERNEL_PACKAGE_NAME}-${@legitimize_package_name('${KERNEL_VERSION}')}"
+PKG_${KERNEL_PACKAGE_NAME}-base = "${KERNEL_PACKAGE_NAME}-${@legitimize_package_name(d.getVar('KERNEL_VERSION'))}"
RPROVIDES_${KERNEL_PACKAGE_NAME}-base += "${KERNEL_PACKAGE_NAME}-${KERNEL_VERSION}"
ALLOW_EMPTY_${KERNEL_PACKAGE_NAME} = "1"
ALLOW_EMPTY_${KERNEL_PACKAGE_NAME}-base = "1"
@@ -580,7 +606,7 @@ pkg_postinst_${KERNEL_PACKAGE_NAME}-base () {
PACKAGESPLITFUNCS_prepend = "split_kernel_packages "
python split_kernel_packages () {
- do_split_packages(d, root='${nonarch_base_libdir}/firmware', file_regex='^(.*)\.(bin|fw|cis|csp|dsp)$', output_pattern='${KERNEL_PACKAGE_NAME}-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='')
+ do_split_packages(d, root='${nonarch_base_libdir}/firmware', file_regex=r'^(.*)\.(bin|fw|cis|csp|dsp)$', output_pattern='${KERNEL_PACKAGE_NAME}-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='')
}
# Many scripts want to look in arch/$arch/boot for the bootable
@@ -599,6 +625,9 @@ do_kernel_link_images() {
if [ -f ../../../vmlinuz.bin ]; then
ln -sf ../../../vmlinuz.bin
fi
+ if [ -f ../../../vmlinux.64 ]; then
+ ln -sf ../../../vmlinux.64
+ fi
}
addtask kernel_link_images after do_compile before do_strip
diff --git a/external/poky/meta/classes/kernelsrc.bbclass b/external/poky/meta/classes/kernelsrc.bbclass
index 675d40ec..a951ba33 100644
--- a/external/poky/meta/classes/kernelsrc.bbclass
+++ b/external/poky/meta/classes/kernelsrc.bbclass
@@ -1,7 +1,7 @@
S = "${STAGING_KERNEL_DIR}"
deltask do_fetch
deltask do_unpack
-do_patch[depends] += "virtual/kernel:do_patch"
+do_patch[depends] += "virtual/kernel:do_shared_workdir"
do_patch[noexec] = "1"
do_package[depends] += "virtual/kernel:do_populate_sysroot"
KERNEL_VERSION = "${@get_kernelversion_file("${STAGING_KERNEL_BUILDDIR}")}"
diff --git a/external/poky/meta/classes/libc-common.bbclass b/external/poky/meta/classes/libc-common.bbclass
deleted file mode 100644
index 0e351b67..00000000
--- a/external/poky/meta/classes/libc-common.bbclass
+++ /dev/null
@@ -1,37 +0,0 @@
-do_install() {
- oe_runmake install_root=${D} install
- install -Dm 0644 ${WORKDIR}/etc/ld.so.conf ${D}/${sysconfdir}/ld.so.conf
- install -d ${D}${localedir}
- make -f ${WORKDIR}/generate-supported.mk IN="${S}/localedata/SUPPORTED" OUT="${WORKDIR}/SUPPORTED"
- # get rid of some broken files...
- for i in ${GLIBC_BROKEN_LOCALES}; do
- sed -i "/$i/d" ${WORKDIR}/SUPPORTED
- done
- rm -f ${D}${sysconfdir}/rpc
- rm -rf ${D}${datadir}/zoneinfo
- rm -rf ${D}${libexecdir}/getconf
-}
-
-def get_libc_fpu_setting(bb, d):
- if d.getVar('TARGET_FPU') in [ 'soft', 'ppc-efd' ]:
- return "--without-fp"
- return ""
-
-python populate_packages_prepend () {
- if d.getVar('DEBIAN_NAMES'):
- pkgs = d.getVar('PACKAGES').split()
- bpn = d.getVar('BPN')
- prefix = d.getVar('MLPREFIX') or ""
- # Set the base package...
- d.setVar('PKG_' + prefix + bpn, prefix + 'libc6')
- libcprefix = prefix + bpn + '-'
- for p in pkgs:
- # And all the subpackages.
- if p.startswith(libcprefix):
- renamed = p.replace(bpn, 'libc6', 1)
- d.setVar('PKG_' + p, renamed)
- # For backward compatibility with old -dbg package
- d.appendVar('RPROVIDES_' + libcprefix + 'dbg', ' ' + prefix + 'libc-dbg')
- d.appendVar('RCONFLICTS_' + libcprefix + 'dbg', ' ' + prefix + 'libc-dbg')
- d.appendVar('RREPLACES_' + libcprefix + 'dbg', ' ' + prefix + 'libc-dbg')
-}
diff --git a/external/poky/meta/classes/libc-package.bbclass b/external/poky/meta/classes/libc-package.bbclass
index 345ec298..de3b4250 100644
--- a/external/poky/meta/classes/libc-package.bbclass
+++ b/external/poky/meta/classes/libc-package.bbclass
@@ -37,16 +37,11 @@ python __anonymous () {
d.setVar("DEPENDS", depends)
d.setVar("GLIBC_INTERNAL_USE_BINARY_LOCALE", "compile")
break
-
- # try to fix disable charsets/locales/locale-code compile fail
- if bb.utils.contains('DISTRO_FEATURES', 'libc-charsets', True, False, d) and \
- bb.utils.contains('DISTRO_FEATURES', 'libc-locales', True, False, d) and \
- bb.utils.contains('DISTRO_FEATURES', 'libc-locale-code', True, False, d):
- d.setVar('PACKAGE_NO_GCONV', '0')
- else:
- d.setVar('PACKAGE_NO_GCONV', '1')
}
+# try to fix disable charsets/locales/locale-code compile fail
+PACKAGE_NO_GCONV ?= "0"
+
OVERRIDES_append = ":${TARGET_ARCH}-${TARGET_OS}"
locale_base_postinst_ontarget() {
@@ -69,9 +64,14 @@ do_prep_locale_tree() {
for i in $treedir/${datadir}/i18n/charmaps/*gz; do
gunzip $i
done
- tar -cf - -C ${LOCALETREESRC}${base_libdir} -p . | tar -xf - -C $treedir/${base_libdir}
- if [ -f ${STAGING_DIR_NATIVE}${prefix_native}/lib/libgcc_s.* ]; then
- tar -cf - -C ${STAGING_DIR_NATIVE}/${prefix_native}/${base_libdir} -p libgcc_s.* | tar -xf - -C $treedir/${base_libdir}
+ # The extract pattern "./l*.so*" is carefully selected so that it will
+ # match ld*.so and lib*.so*, but not any files in the gconv directory
+ # (if it exists). This makes sure we only unpack the files we need.
+ # This is important in case usrmerge is set in DISTRO_FEATURES, which
+ # means ${base_libdir} == ${libdir}.
+ tar -cf - -C ${LOCALETREESRC}${base_libdir} -p . | tar -xf - -C $treedir/${base_libdir} --wildcards './l*.so*'
+ if [ -f ${STAGING_LIBDIR_NATIVE}/libgcc_s.* ]; then
+ tar -cf - -C ${STAGING_LIBDIR_NATIVE} -p libgcc_s.* | tar -xf - -C $treedir/${base_libdir}
fi
install -m 0755 ${LOCALETREESRC}${bindir}/localedef $treedir/${base_bindir}
}
@@ -82,6 +82,9 @@ do_collect_bins_from_locale_tree() {
parent=$(dirname ${localedir})
mkdir -p ${PKGD}/$parent
tar -cf - -C $treedir/$parent -p $(basename ${localedir}) | tar -xf - -C ${PKGD}$parent
+
+ # Finalize tree by chaning all duplicate files into hard links
+ cross-localedef-hardlink -c -v ${WORKDIR}/locale-tree
}
inherit qemu
@@ -115,8 +118,8 @@ python package_do_split_gconvs () {
def calc_gconv_deps(fn, pkg, file_regex, output_pattern, group):
deps = []
f = open(fn, "rb")
- c_re = re.compile('^copy "(.*)"')
- i_re = re.compile('^include "(\w+)".*')
+ c_re = re.compile(r'^copy "(.*)"')
+ i_re = re.compile(r'^include "(\w+)".*')
for l in f.readlines():
l = l.decode("latin-1")
m = c_re.match(l) or i_re.match(l)
@@ -130,15 +133,15 @@ python package_do_split_gconvs () {
if bpn != 'glibc':
d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
- do_split_packages(d, gconv_libdir, file_regex='^(.*)\.so$', output_pattern=bpn+'-gconv-%s', \
+ do_split_packages(d, gconv_libdir, file_regex=r'^(.*)\.so$', output_pattern=bpn+'-gconv-%s', \
description='gconv module for character set %s', hook=calc_gconv_deps, \
extra_depends=bpn+'-gconv')
def calc_charmap_deps(fn, pkg, file_regex, output_pattern, group):
deps = []
f = open(fn, "rb")
- c_re = re.compile('^copy "(.*)"')
- i_re = re.compile('^include "(\w+)".*')
+ c_re = re.compile(r'^copy "(.*)"')
+ i_re = re.compile(r'^include "(\w+)".*')
for l in f.readlines():
l = l.decode("latin-1")
m = c_re.match(l) or i_re.match(l)
@@ -152,14 +155,14 @@ python package_do_split_gconvs () {
if bpn != 'glibc':
d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
- do_split_packages(d, charmap_dir, file_regex='^(.*)\.gz$', output_pattern=bpn+'-charmap-%s', \
+ do_split_packages(d, charmap_dir, file_regex=r'^(.*)\.gz$', output_pattern=bpn+'-charmap-%s', \
description='character map for %s encoding', hook=calc_charmap_deps, extra_depends='')
def calc_locale_deps(fn, pkg, file_regex, output_pattern, group):
deps = []
f = open(fn, "rb")
- c_re = re.compile('^copy "(.*)"')
- i_re = re.compile('^include "(\w+)".*')
+ c_re = re.compile(r'^copy "(.*)"')
+ i_re = re.compile(r'^include "(\w+)".*')
for l in f.readlines():
l = l.decode("latin-1")
m = c_re.match(l) or i_re.match(l)
@@ -173,13 +176,13 @@ python package_do_split_gconvs () {
if bpn != 'glibc':
d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
- do_split_packages(d, locales_dir, file_regex='(.*)', output_pattern=bpn+'-localedata-%s', \
+ do_split_packages(d, locales_dir, file_regex=r'(.*)', output_pattern=bpn+'-localedata-%s', \
description='locale definition for %s', hook=calc_locale_deps, extra_depends='')
d.setVar('PACKAGES', d.getVar('PACKAGES', False) + ' ' + d.getVar('MLPREFIX', False) + bpn + '-gconv')
use_bin = d.getVar("GLIBC_INTERNAL_USE_BINARY_LOCALE")
- dot_re = re.compile("(.*)\.(.*)")
+ dot_re = re.compile(r"(.*)\.(.*)")
# Read in supported locales and associated encodings
supported = {}
@@ -236,6 +239,8 @@ python package_do_split_gconvs () {
if use_cross_localedef == "1":
target_arch = d.getVar('TARGET_ARCH')
locale_arch_options = { \
+ "arc": " --uint32-align=4 --little-endian ", \
+ "arceb": " --uint32-align=4 --big-endian ", \
"arm": " --uint32-align=4 --little-endian ", \
"armeb": " --uint32-align=4 --big-endian ", \
"aarch64": " --uint32-align=4 --little-endian ", \
@@ -243,6 +248,7 @@ python package_do_split_gconvs () {
"sh4": " --uint32-align=4 --big-endian ", \
"powerpc": " --uint32-align=4 --big-endian ", \
"powerpc64": " --uint32-align=4 --big-endian ", \
+ "powerpc64le": " --uint32-align=4 --little-endian ", \
"mips": " --uint32-align=4 --big-endian ", \
"mipsisa32r6": " --uint32-align=4 --big-endian ", \
"mips64": " --uint32-align=4 --big-endian ", \
@@ -263,7 +269,7 @@ python package_do_split_gconvs () {
bb.error("locale_arch_options not found for target_arch=" + target_arch)
bb.fatal("unknown arch:" + target_arch + " for locale_arch_options")
- localedef_opts += " --force --no-archive --prefix=%s \
+ localedef_opts += " --force --no-hard-links --no-archive --prefix=%s \
--inputfile=%s/%s/i18n/locales/%s --charmap=%s %s/%s" \
% (treedir, treedir, datadir, locale, encoding, outputpath, name)
@@ -271,14 +277,14 @@ python package_do_split_gconvs () {
(path, i18npath, gconvpath, localedef_opts)
else: # earlier slower qemu way
qemu = qemu_target_binary(d)
- localedef_opts = "--force --no-archive --prefix=%s \
+ localedef_opts = "--force --no-hard-links --no-archive --prefix=%s \
--inputfile=%s/i18n/locales/%s --charmap=%s %s" \
% (treedir, datadir, locale, encoding, name)
qemu_options = d.getVar('QEMU_OPTIONS')
cmd = "PSEUDO_RELOADED=YES PATH=\"%s\" I18NPATH=\"%s\" %s -L %s \
- -E LD_LIBRARY_PATH=%s %s %s/bin/localedef %s" % \
+ -E LD_LIBRARY_PATH=%s %s %s${base_bindir}/localedef %s" % \
(path, i18npath, qemu, treedir, ldlibdir, qemu_options, treedir, localedef_opts)
commands["%s/%s" % (outputpath, name)] = cmd
@@ -290,7 +296,7 @@ python package_do_split_gconvs () {
d.setVar('ALLOW_EMPTY_%s' % pkgname, '1')
d.setVar('PACKAGES', '%s %s' % (pkgname, d.getVar('PACKAGES')))
rprovides = ' %svirtual-locale-%s' % (mlprefix, legitimize_package_name(name))
- m = re.match("(.*)_(.*)", name)
+ m = re.match(r"(.*)_(.*)", name)
if m:
rprovides += ' %svirtual-locale-%s' % (mlprefix, m.group(1))
d.setVar('RPROVIDES_%s' % pkgname, rprovides)
@@ -341,13 +347,15 @@ python package_do_split_gconvs () {
if use_bin == "compile":
makefile = oe.path.join(d.getVar("WORKDIR"), "locale-tree", "Makefile")
- m = open(makefile, "w")
- m.write("all: %s\n\n" % " ".join(commands.keys()))
- for cmd in commands:
- m.write(cmd + ":\n")
- m.write("\t" + commands[cmd] + "\n\n")
- m.close()
+ with open(makefile, "w") as m:
+ m.write("all: %s\n\n" % " ".join(commands.keys()))
+ total = len(commands)
+ for i, (maketarget, makerecipe) in enumerate(commands.items()):
+ m.write(maketarget + ":\n")
+ m.write("\t@echo 'Progress %d/%d'\n" % (i, total))
+ m.write("\t" + makerecipe + "\n\n")
d.setVar("EXTRA_OEMAKE", "-C %s ${PARALLEL_MAKE}" % (os.path.dirname(makefile)))
+ d.setVarFlag("oe_runmake", "progress", "outof:Progress\s(\d+)/(\d+)")
bb.note("Executing binary locale generation makefile")
bb.build.exec_func("oe_runmake", d)
bb.note("collecting binary locales from locale tree")
@@ -356,12 +364,12 @@ python package_do_split_gconvs () {
if use_bin in ('compile', 'precompiled'):
lcsplit = d.getVar('GLIBC_SPLIT_LC_PACKAGES')
if lcsplit and int(lcsplit):
- do_split_packages(d, binary_locales_dir, file_regex='^(.*/LC_\w+)', \
+ do_split_packages(d, binary_locales_dir, file_regex=r'^(.*/LC_\w+)', \
output_pattern=bpn+'-binary-localedata-%s', \
description='binary locale definition for %s', recursive=True,
hook=metapkg_hook, extra_depends='', allow_dirs=True, match_path=True)
else:
- do_split_packages(d, binary_locales_dir, file_regex='(.*)', \
+ do_split_packages(d, binary_locales_dir, file_regex=r'(.*)', \
output_pattern=bpn+'-binary-localedata-%s', \
description='binary locale definition for %s', extra_depends='', allow_dirs=True)
else:
diff --git a/external/poky/meta/classes/license.bbclass b/external/poky/meta/classes/license.bbclass
index aec6999d..f90176d6 100644
--- a/external/poky/meta/classes/license.bbclass
+++ b/external/poky/meta/classes/license.bbclass
@@ -252,7 +252,7 @@ def canonical_license(d, license):
"""
Return the canonical (SPDX) form of the license if available (so GPLv3
becomes GPL-3.0), for the license named 'X+', return canonical form of
- 'X' if availabel and the tailing '+' (so GPLv3+ becomes GPL-3.0+),
+ 'X' if available and the tailing '+' (so GPLv3+ becomes GPL-3.0+),
or the passed license if there is no canonical form.
"""
lic = d.getVarFlag('SPDXLICENSEMAP', license) or ""
@@ -262,19 +262,38 @@ def canonical_license(d, license):
lic += '+'
return lic or license
+def available_licenses(d):
+ """
+ Return the available licenses by searching the directories specified by
+ COMMON_LICENSE_DIR and LICENSE_PATH.
+ """
+ lic_dirs = ((d.getVar('COMMON_LICENSE_DIR') or '') + ' ' +
+ (d.getVar('LICENSE_PATH') or '')).split()
+
+ licenses = []
+ for lic_dir in lic_dirs:
+ licenses += os.listdir(lic_dir)
+
+ licenses = sorted(licenses)
+ return licenses
+
+# Only determine the list of all available licenses once. This assumes that any
+# additions to LICENSE_PATH have been done before this file is parsed.
+AVAILABLE_LICENSES := "${@' '.join(available_licenses(d))}"
+
def expand_wildcard_licenses(d, wildcard_licenses):
"""
- Return actual spdx format license names if wildcard used. We expand
- wildcards from SPDXLICENSEMAP flags and SRC_DISTRIBUTE_LICENSES values.
+ Return actual spdx format license names if wildcards are used. We expand
+ wildcards from SPDXLICENSEMAP flags and AVAILABLE_LICENSES.
"""
import fnmatch
- licenses = []
+ licenses = wildcard_licenses[:]
spdxmapkeys = d.getVarFlags('SPDXLICENSEMAP').keys()
for wld_lic in wildcard_licenses:
spdxflags = fnmatch.filter(spdxmapkeys, wld_lic)
licenses += [d.getVarFlag('SPDXLICENSEMAP', flag) for flag in spdxflags]
- spdx_lics = (d.getVar('SRC_DISTRIBUTE_LICENSES', False) or '').split()
+ spdx_lics = d.getVar('AVAILABLE_LICENSES').split()
for wld_lic in wildcard_licenses:
licenses += fnmatch.filter(spdx_lics, wld_lic)
@@ -287,6 +306,26 @@ def incompatible_license_contains(license, truevalue, falsevalue, d):
bad_licenses = expand_wildcard_licenses(d, bad_licenses)
return truevalue if license in bad_licenses else falsevalue
+def incompatible_pkg_license(d, dont_want_licenses, license):
+ # Handles an "or" or two license sets provided by
+ # flattened_licenses(), pick one that works if possible.
+ def choose_lic_set(a, b):
+ return a if all(oe.license.license_ok(canonical_license(d, lic),
+ dont_want_licenses) for lic in a) else b
+
+ try:
+ licenses = oe.license.flattened_licenses(license, choose_lic_set)
+ except oe.license.LicenseError as exc:
+ bb.fatal('%s: %s' % (d.getVar('P'), exc))
+
+ incompatible_lic = []
+ for l in licenses:
+ license = canonical_license(d, l)
+ if not oe.license.license_ok(license, dont_want_licenses):
+ incompatible_lic.append(license)
+
+ return sorted(incompatible_lic)
+
def incompatible_license(d, dont_want_licenses, package=None):
"""
This function checks if a recipe has only incompatible licenses. It also
@@ -298,26 +337,15 @@ def incompatible_license(d, dont_want_licenses, package=None):
if not license:
license = d.getVar('LICENSE')
- # Handles an "or" or two license sets provided by
- # flattened_licenses(), pick one that works if possible.
- def choose_lic_set(a, b):
- return a if all(oe.license.license_ok(canonical_license(d, lic),
- dont_want_licenses) for lic in a) else b
-
- try:
- licenses = oe.license.flattened_licenses(license, choose_lic_set)
- except oe.license.LicenseError as exc:
- bb.fatal('%s: %s' % (d.getVar('P'), exc))
- return any(not oe.license.license_ok(canonical_license(d, l), \
- dont_want_licenses) for l in licenses)
+ return incompatible_pkg_license(d, dont_want_licenses, license)
def check_license_flags(d):
"""
This function checks if a recipe has any LICENSE_FLAGS that
aren't whitelisted.
- If it does, it returns the first LICENSE_FLAGS item missing from the
- whitelist, or all of the LICENSE_FLAGS if there is no whitelist.
+ If it does, it returns the all LICENSE_FLAGS missing from the whitelist, or
+ all of the LICENSE_FLAGS if there is no whitelist.
If everything is is properly whitelisted, it returns None.
"""
@@ -354,22 +382,23 @@ def check_license_flags(d):
return False
def all_license_flags_match(license_flags, whitelist):
- """ Return first unmatched flag, None if all flags match """
+ """ Return all unmatched flags, None if all flags match """
pn = d.getVar('PN')
split_whitelist = whitelist.split()
+ flags = []
for flag in license_flags.split():
if not license_flag_matches(flag, split_whitelist, pn):
- return flag
- return None
+ flags.append(flag)
+ return flags if flags else None
license_flags = d.getVar('LICENSE_FLAGS')
if license_flags:
whitelist = d.getVar('LICENSE_FLAGS_WHITELIST')
if not whitelist:
- return license_flags
- unmatched_flag = all_license_flags_match(license_flags, whitelist)
- if unmatched_flag:
- return unmatched_flag
+ return license_flags.split()
+ unmatched_flags = all_license_flags_match(license_flags, whitelist)
+ if unmatched_flags:
+ return unmatched_flags
return None
def check_license_format(d):
diff --git a/external/poky/meta/classes/license_image.bbclass b/external/poky/meta/classes/license_image.bbclass
index b65ff56f..a8c72da3 100644
--- a/external/poky/meta/classes/license_image.bbclass
+++ b/external/poky/meta/classes/license_image.bbclass
@@ -32,20 +32,28 @@ python license_create_manifest() {
rootfs_license_manifest = os.path.join(d.getVar('LICENSE_DIRECTORY'),
d.getVar('IMAGE_NAME'), 'license.manifest')
- write_license_files(d, rootfs_license_manifest, pkg_dic)
+ write_license_files(d, rootfs_license_manifest, pkg_dic, rootfs=True)
}
-def write_license_files(d, license_manifest, pkg_dic):
+def write_license_files(d, license_manifest, pkg_dic, rootfs=True):
import re
+ import stat
bad_licenses = (d.getVar("INCOMPATIBLE_LICENSE") or "").split()
- bad_licenses = map(lambda l: canonical_license(d, l), bad_licenses)
+ bad_licenses = [canonical_license(d, l) for l in bad_licenses]
bad_licenses = expand_wildcard_licenses(d, bad_licenses)
+ whitelist = []
+ for lic in bad_licenses:
+ whitelist.extend((d.getVar("WHITELIST_" + lic) or "").split())
+
with open(license_manifest, "w") as license_file:
for pkg in sorted(pkg_dic):
- if bad_licenses:
+ if bad_licenses and pkg not in whitelist:
try:
+ licenses = incompatible_pkg_license(d, bad_licenses, pkg_dic[pkg]["LICENSE"])
+ if licenses:
+ bb.fatal("Package %s cannot be installed into the image because it has incompatible license(s): %s" %(pkg, ' '.join(licenses)))
(pkg_dic[pkg]["LICENSE"], pkg_dic[pkg]["LICENSES"]) = \
oe.license.manifest_licenses(pkg_dic[pkg]["LICENSE"],
bad_licenses, canonical_license, d)
@@ -55,6 +63,8 @@ def write_license_files(d, license_manifest, pkg_dic):
pkg_dic[pkg]["LICENSES"] = re.sub(r'[|&()*]', ' ', pkg_dic[pkg]["LICENSE"])
pkg_dic[pkg]["LICENSES"] = re.sub(r' *', ' ', pkg_dic[pkg]["LICENSES"])
pkg_dic[pkg]["LICENSES"] = pkg_dic[pkg]["LICENSES"].split()
+ if pkg in whitelist:
+ bb.warn("Including %s with an incompatible license %s into the image, because it has been whitelisted." %(pkg, pkg_dic[pkg]["LICENSE"]))
if not "IMAGE_MANIFEST" in pkg_dic[pkg]:
# Rootfs manifest
@@ -94,14 +104,14 @@ def write_license_files(d, license_manifest, pkg_dic):
# With both options set we see a .5 M increase in core-image-minimal
copy_lic_manifest = d.getVar('COPY_LIC_MANIFEST')
copy_lic_dirs = d.getVar('COPY_LIC_DIRS')
- if copy_lic_manifest == "1":
+ if rootfs and copy_lic_manifest == "1":
rootfs_license_dir = os.path.join(d.getVar('IMAGE_ROOTFS'),
'usr', 'share', 'common-licenses')
bb.utils.mkdirhier(rootfs_license_dir)
rootfs_license_manifest = os.path.join(rootfs_license_dir,
os.path.split(license_manifest)[1])
if not os.path.exists(rootfs_license_manifest):
- os.link(license_manifest, rootfs_license_manifest)
+ oe.path.copyhardlink(license_manifest, rootfs_license_manifest)
if copy_lic_dirs == "1":
for pkg in sorted(pkg_dic):
@@ -135,7 +145,7 @@ def write_license_files(d, license_manifest, pkg_dic):
continue
if not os.path.exists(rootfs_license):
- os.link(pkg_license, rootfs_license)
+ oe.path.copyhardlink(pkg_license, rootfs_license)
if not os.path.exists(pkg_rootfs_license):
os.symlink(os.path.join('..', lic), pkg_rootfs_license)
@@ -145,7 +155,19 @@ def write_license_files(d, license_manifest, pkg_dic):
os.path.exists(pkg_rootfs_license)):
continue
- os.link(pkg_license, pkg_rootfs_license)
+ oe.path.copyhardlink(pkg_license, pkg_rootfs_license)
+ # Fixup file ownership and permissions
+ for walkroot, dirs, files in os.walk(rootfs_license_dir):
+ for f in files:
+ p = os.path.join(walkroot, f)
+ os.lchown(p, 0, 0)
+ if not os.path.islink(p):
+ os.chmod(p, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
+ for dir in dirs:
+ p = os.path.join(walkroot, dir)
+ os.lchown(p, 0, 0)
+ os.chmod(p, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
+
def license_deployed_manifest(d):
@@ -176,7 +198,7 @@ def license_deployed_manifest(d):
d.getVar('IMAGE_NAME'))
bb.utils.mkdirhier(lic_manifest_dir)
image_license_manifest = os.path.join(lic_manifest_dir, 'image_license.manifest')
- write_license_files(d, image_license_manifest, man_dic)
+ write_license_files(d, image_license_manifest, man_dic, rootfs=False)
def get_deployed_dependencies(d):
"""
@@ -185,10 +207,6 @@ def get_deployed_dependencies(d):
deploy = {}
# Get all the dependencies for the current task (rootfs).
- # Also get EXTRA_IMAGEDEPENDS because the bootloader is
- # usually in this var and not listed in rootfs.
- # At last, get the dependencies from boot classes because
- # it might contain the bootloader.
taskdata = d.getVar("BB_TASKDEPDATA", False)
depends = list(set([dep[0] for dep
in list(taskdata.values())
diff --git a/external/poky/meta/classes/linuxloader.bbclass b/external/poky/meta/classes/linuxloader.bbclass
index b4c41349..ec0e0556 100644
--- a/external/poky/meta/classes/linuxloader.bbclass
+++ b/external/poky/meta/classes/linuxloader.bbclass
@@ -1,25 +1,31 @@
-def get_musl_loader(d):
+def get_musl_loader_arch(d):
import re
- dynamic_loader = None
+ ldso_arch = None
targetarch = d.getVar("TARGET_ARCH")
if targetarch.startswith("microblaze"):
- dynamic_loader = "${base_libdir}/ld-musl-microblaze${@bb.utils.contains('TUNE_FEATURES', 'bigendian', '', 'el' ,d)}.so.1"
+ ldso_arch = "microblaze${@bb.utils.contains('TUNE_FEATURES', 'bigendian', '', 'el' ,d)}"
elif targetarch.startswith("mips"):
- dynamic_loader = "${base_libdir}/ld-musl-mips${ABIEXTENSION}${MIPSPKGSFX_BYTE}${MIPSPKGSFX_R6}${MIPSPKGSFX_ENDIAN}${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}.so.1"
+ ldso_arch = "mips${ABIEXTENSION}${MIPSPKGSFX_BYTE}${MIPSPKGSFX_R6}${MIPSPKGSFX_ENDIAN}${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}"
elif targetarch == "powerpc":
- dynamic_loader = "${base_libdir}/ld-musl-powerpc${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}.so.1"
+ ldso_arch = "powerpc${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}"
elif targetarch == "powerpc64":
- dynamic_loader = "${base_libdir}/ld-musl-powerpc64.so.1"
+ ldso_arch = "powerpc64"
elif targetarch == "x86_64":
- dynamic_loader = "${base_libdir}/ld-musl-x86_64.so.1"
+ ldso_arch = "x86_64"
elif re.search("i.86", targetarch):
- dynamic_loader = "${base_libdir}/ld-musl-i386.so.1"
+ ldso_arch = "i386"
elif targetarch.startswith("arm"):
- dynamic_loader = "${base_libdir}/ld-musl-arm${ARMPKGSFX_ENDIAN}${ARMPKGSFX_EABI}.so.1"
+ ldso_arch = "arm${ARMPKGSFX_ENDIAN}${ARMPKGSFX_EABI}"
elif targetarch.startswith("aarch64"):
- dynamic_loader = "${base_libdir}/ld-musl-aarch64${ARMPKGSFX_ENDIAN_64}.so.1"
- return dynamic_loader
+ ldso_arch = "aarch64${ARMPKGSFX_ENDIAN_64}"
+ elif targetarch.startswith("riscv64"):
+ ldso_arch = "riscv64${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}"
+ return ldso_arch
+
+def get_musl_loader(d):
+ import re
+ return "/lib/ld-musl-" + get_musl_loader_arch(d) + ".so.1"
def get_glibc_loader(d):
import re
@@ -39,9 +45,11 @@ def get_glibc_loader(d):
elif re.search("i.86", targetarch):
dynamic_loader = "${base_libdir}/ld-linux.so.2"
elif targetarch == "arm":
- dynamic_loader = "${base_libdir}/ld-linux.so.3"
+ dynamic_loader = "${base_libdir}/ld-linux${@['-armhf', ''][d.getVar('TARGET_FPU') == 'soft']}.so.3"
elif targetarch.startswith("aarch64"):
dynamic_loader = "${base_libdir}/ld-linux-aarch64${ARMPKGSFX_ENDIAN_64}.so.1"
+ elif targetarch.startswith("riscv64"):
+ dynamic_loader = "${base_libdir}/ld-linux-riscv64-lp64${@['d', ''][d.getVar('TARGET_FPU') == 'soft']}.so.1"
return dynamic_loader
def get_linuxloader(d):
@@ -58,4 +66,5 @@ def get_linuxloader(d):
get_linuxloader[vardepvalue] = "${@get_linuxloader(d)}"
get_musl_loader[vardepvalue] = "${@get_musl_loader(d)}"
+get_musl_loader_arch[vardepvalue] = "${@get_musl_loader_arch(d)}"
get_glibc_loader[vardepvalue] = "${@get_glibc_loader(d)}"
diff --git a/external/poky/meta/classes/live-vm-common.bbclass b/external/poky/meta/classes/live-vm-common.bbclass
index 68105d9b..74e7074a 100644
--- a/external/poky/meta/classes/live-vm-common.bbclass
+++ b/external/poky/meta/classes/live-vm-common.bbclass
@@ -29,6 +29,39 @@ def pcbios(d):
PCBIOS = "${@pcbios(d)}"
PCBIOS_CLASS = "${@['','syslinux'][d.getVar('PCBIOS') == '1']}"
+# efi_populate_common DEST BOOTLOADER
+efi_populate_common() {
+ # DEST must be the root of the image so that EFIDIR is not
+ # nested under a top level directory.
+ DEST=$1
+
+ install -d ${DEST}${EFIDIR}
+
+ install -m 0644 ${DEPLOY_DIR_IMAGE}/$2-${EFI_BOOT_IMAGE} ${DEST}${EFIDIR}/${EFI_BOOT_IMAGE}
+ EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
+ printf 'fs0:%s\%s\n' "$EFIPATH" "${EFI_BOOT_IMAGE}" >${DEST}/startup.nsh
+}
+
+efi_iso_populate() {
+ iso_dir=$1
+ efi_populate $iso_dir
+ # Build a EFI directory to create efi.img
+ mkdir -p ${EFIIMGDIR}/${EFIDIR}
+ cp $iso_dir/${EFIDIR}/* ${EFIIMGDIR}${EFIDIR}
+ cp $iso_dir/${KERNEL_IMAGETYPE} ${EFIIMGDIR}
+
+ EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
+ printf 'fs0:%s\%s\n' "$EFIPATH" "${EFI_BOOT_IMAGE}" >${EFIIMGDIR}/startup.nsh
+
+ if [ -f "$iso_dir/initrd" ] ; then
+ cp $iso_dir/initrd ${EFIIMGDIR}
+ fi
+}
+
+efi_hddimg_populate() {
+ efi_populate $1
+}
+
inherit ${EFI_CLASS}
inherit ${PCBIOS_CLASS}
diff --git a/external/poky/meta/classes/manpages.bbclass b/external/poky/meta/classes/manpages.bbclass
index 50c25476..1e667806 100644
--- a/external/poky/meta/classes/manpages.bbclass
+++ b/external/poky/meta/classes/manpages.bbclass
@@ -18,8 +18,15 @@ pkg_postinst_append_${MAN_PKG} () {
if test -n "$D"; then
if ${@bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'true','false', d)}; then
sed "s:\(\s\)/:\1$D/:g" $D${sysconfdir}/man_db.conf | ${@qemu_run_binary(d, '$D', '${bindir}/mandb')} -C - -u -q $D${mandir}
+ chown -R root:root $D${mandir}
mkdir -p $D${localstatedir}/cache/man
- mv $D${mandir}/index.db $D${localstatedir}/cache/man
+ cd $D${mandir}
+ find . -name index.db | while read index; do
+ mkdir -p $D${localstatedir}/cache/man/$(dirname ${index})
+ mv ${index} $D${localstatedir}/cache/man/${index}
+ chown man:man $D${localstatedir}/cache/man/${index}
+ done
+ cd -
else
$INTERCEPT_DIR/postinst_intercept delay_to_first_boot ${PKG} mlprefix=${MLPREFIX}
fi
diff --git a/external/poky/meta/classes/mcextend.bbclass b/external/poky/meta/classes/mcextend.bbclass
new file mode 100644
index 00000000..0f8f9622
--- /dev/null
+++ b/external/poky/meta/classes/mcextend.bbclass
@@ -0,0 +1,16 @@
+python mcextend_virtclass_handler () {
+ cls = e.data.getVar("BBEXTENDCURR")
+ variant = e.data.getVar("BBEXTENDVARIANT")
+ if cls != "mcextend" or not variant:
+ return
+
+ override = ":virtclass-mcextend-" + variant
+
+ e.data.setVar("PN", e.data.getVar("PN", False) + "-" + variant)
+ e.data.setVar("MCNAME", variant)
+ e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
+}
+
+addhandler mcextend_virtclass_handler
+mcextend_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
+
diff --git a/external/poky/meta/classes/meson.bbclass b/external/poky/meta/classes/meson.bbclass
index 3cbdcf18..ff52d20e 100644
--- a/external/poky/meta/classes/meson.bbclass
+++ b/external/poky/meta/classes/meson.bbclass
@@ -12,8 +12,9 @@ MESON_SOURCEPATH = "${S}"
def noprefix(var, d):
return d.getVar(var).replace(d.getVar('prefix') + '/', '', 1)
+MESON_BUILDTYPE ?= "plain"
MESONOPTS = " --prefix ${prefix} \
- --buildtype plain \
+ --buildtype ${MESON_BUILDTYPE} \
--bindir ${@noprefix('bindir', d)} \
--sbindir ${@noprefix('sbindir', d)} \
--datadir ${@noprefix('datadir', d)} \
@@ -25,15 +26,7 @@ MESONOPTS = " --prefix ${prefix} \
--sysconfdir ${sysconfdir} \
--localstatedir ${localstatedir} \
--sharedstatedir ${sharedstatedir} \
- -Dc_args='${BUILD_CPPFLAGS} ${BUILD_CFLAGS}' \
- -Dc_link_args='${BUILD_LDFLAGS}' \
- -Dcpp_args='${BUILD_CPPFLAGS} ${BUILD_CXXFLAGS}' \
- -Dcpp_link_args='${BUILD_LDFLAGS}'"
-
-MESON_TOOLCHAIN_ARGS = "${HOST_CC_ARCH}${TOOLCHAIN_OPTIONS}"
-MESON_C_ARGS = "${MESON_TOOLCHAIN_ARGS} ${CFLAGS}"
-MESON_CPP_ARGS = "${MESON_TOOLCHAIN_ARGS} ${CXXFLAGS}"
-MESON_LINK_ARGS = "${MESON_TOOLCHAIN_ARGS} ${LDFLAGS}"
+ --wrap-mode nodownload"
EXTRA_OEMESON_append = " ${PACKAGECONFIG_CONFARGS}"
@@ -52,15 +45,35 @@ def meson_cpu_family(var, d):
arch = d.getVar(var)
if arch == 'powerpc':
return 'ppc'
- elif arch == 'powerpc64':
+ elif arch == 'powerpc64' or arch == 'powerpc64le':
return 'ppc64'
+ elif arch == 'armeb':
+ return 'arm'
+ elif arch == 'aarch64_be':
+ return 'aarch64'
elif arch == 'mipsel':
return 'mips'
+ elif arch == 'mips64el':
+ return 'mips64'
elif re.match(r"i[3-6]86", arch):
return "x86"
+ elif arch == "microblazeel":
+ return "microblaze"
else:
return arch
+# Map our OS values to what Meson expects:
+# https://mesonbuild.com/Reference-tables.html#operating-system-names
+def meson_operating_system(var, d):
+ os = d.getVar(var)
+ if "mingw" in os:
+ return "windows"
+ # avoid e.g 'linux-gnueabi'
+ elif "linux" in os:
+ return "linux"
+ else:
+ return os
+
def meson_endian(prefix, d):
arch, os = d.getVar(prefix + "_ARCH"), d.getVar(prefix + "_OS")
sitedata = siteinfo_data_for_machine(arch, os, d)
@@ -72,7 +85,7 @@ def meson_endian(prefix, d):
bb.fatal("Cannot determine endianism for %s-%s" % (arch, os))
addtask write_config before do_configure
-do_write_config[vardeps] += "MESON_C_ARGS MESON_CPP_ARGS MESON_LINK_ARGS CC CXX LD AR NM STRIP READELF"
+do_write_config[vardeps] += "CC CXX LD AR NM STRIP READELF CFLAGS CXXFLAGS LDFLAGS"
do_write_config() {
# This needs to be Py to split the args into single-element lists
cat >${WORKDIR}/meson.cross <<EOF
@@ -81,27 +94,27 @@ c = ${@meson_array('CC', d)}
cpp = ${@meson_array('CXX', d)}
ar = ${@meson_array('AR', d)}
nm = ${@meson_array('NM', d)}
-ld = ${@meson_array('LD', d)}
strip = ${@meson_array('STRIP', d)}
readelf = ${@meson_array('READELF', d)}
pkgconfig = 'pkg-config'
+llvm-config = 'llvm-config${LLVMVERSION}'
[properties]
needs_exe_wrapper = true
-c_args = ${@meson_array('MESON_C_ARGS', d)}
-c_link_args = ${@meson_array('MESON_LINK_ARGS', d)}
-cpp_args = ${@meson_array('MESON_CPP_ARGS', d)}
-cpp_link_args = ${@meson_array('MESON_LINK_ARGS', d)}
+c_args = ${@meson_array('CFLAGS', d)}
+c_link_args = ${@meson_array('LDFLAGS', d)}
+cpp_args = ${@meson_array('CXXFLAGS', d)}
+cpp_link_args = ${@meson_array('LDFLAGS', d)}
gtkdoc_exe_wrapper = '${B}/gtkdoc-qemuwrapper'
[host_machine]
-system = '${HOST_OS}'
+system = '${@meson_operating_system('HOST_OS', d)}'
cpu_family = '${@meson_cpu_family('HOST_ARCH', d)}'
cpu = '${HOST_ARCH}'
endian = '${@meson_endian('HOST', d)}'
[target_machine]
-system = '${TARGET_OS}'
+system = '${@meson_operating_system('TARGET_OS', d)}'
cpu_family = '${@meson_cpu_family('TARGET_ARCH', d)}'
cpu = '${TARGET_ARCH}'
endian = '${@meson_endian('TARGET', d)}'
@@ -111,6 +124,10 @@ EOF
CONFIGURE_FILES = "meson.build"
meson_do_configure() {
+ # Meson requires this to be 'bfd, 'lld' or 'gold' from 0.53 onwards
+ # https://github.com/mesonbuild/meson/commit/ef9aeb188ea2bc7353e59916c18901cde90fa2b3
+ unset LD
+
# Work around "Meson fails if /tmp is mounted with noexec #2972"
mkdir -p "${B}/meson-private/tmp"
export TMPDIR="${B}/meson-private/tmp"
@@ -128,6 +145,7 @@ override_native_tools() {
export CXX="${BUILD_CXX}"
export LD="${BUILD_LD}"
export AR="${BUILD_AR}"
+ export STRIP="${BUILD_STRIP}"
# These contain *target* flags but will be used as *native* flags. The
# correct native flags will be passed via -Dc_args and so on, unset them so
# they don't interfere with tools invoked by Meson (such as g-ir-scanner)
@@ -146,6 +164,16 @@ meson_do_configure_prepend_class-native() {
export PKG_CONFIG="pkg-config-native"
}
+python meson_do_qa_configure() {
+ import re
+ warn_re = re.compile(r"^WARNING: Cross property (.+) is using default value (.+)$", re.MULTILINE)
+ with open(d.expand("${B}/meson-logs/meson-log.txt")) as logfile:
+ log = logfile.read()
+ for (prop, value) in warn_re.findall(log):
+ bb.warn("Meson cross property %s used without explicit assignment, defaulting to %s" % (prop, value))
+}
+do_configure[postfuncs] += "meson_do_qa_configure"
+
do_compile[progress] = "outof:^\[(\d+)/(\d+)\]\s+"
meson_do_compile() {
ninja -v ${PARALLEL_MAKE}
diff --git a/external/poky/meta/classes/metadata_scm.bbclass b/external/poky/meta/classes/metadata_scm.bbclass
index fa791f04..58bb4c55 100644
--- a/external/poky/meta/classes/metadata_scm.bbclass
+++ b/external/poky/meta/classes/metadata_scm.bbclass
@@ -3,55 +3,15 @@ METADATA_REVISION ?= "${@base_detect_revision(d)}"
def base_detect_revision(d):
path = base_get_scmbasepath(d)
-
- scms = [base_get_metadata_git_revision]
-
- for scm in scms:
- rev = scm(path, d)
- if rev != "<unknown>":
- return rev
-
- return "<unknown>"
+ return base_get_metadata_git_revision(path, d)
def base_detect_branch(d):
path = base_get_scmbasepath(d)
-
- scms = [base_get_metadata_git_branch]
-
- for scm in scms:
- rev = scm(path, d)
- if rev != "<unknown>":
- return rev.strip()
-
- return "<unknown>"
+ return base_get_metadata_git_branch(path, d)
def base_get_scmbasepath(d):
return os.path.join(d.getVar('COREBASE'), 'meta')
-def base_get_metadata_monotone_branch(path, d):
- monotone_branch = "<unknown>"
- try:
- with open("%s/_MTN/options" % path) as f:
- monotone_branch = f.read().strip()
- if monotone_branch.startswith( "database" ):
- monotone_branch_words = monotone_branch.split()
- monotone_branch = monotone_branch_words[ monotone_branch_words.index( "branch" )+1][1:-1]
- except:
- pass
- return monotone_branch
-
-def base_get_metadata_monotone_revision(path, d):
- monotone_revision = "<unknown>"
- try:
- with open("%s/_MTN/revision" % path) as f:
- monotone_revision = f.read().strip()
- if monotone_revision.startswith( "format_version" ):
- monotone_revision_words = monotone_revision.split()
- monotone_revision = monotone_revision_words[ monotone_revision_words.index( "old_revision" )+1][1:-1]
- except IOError:
- pass
- return monotone_revision
-
def base_get_metadata_svn_revision(path, d):
# This only works with older subversion. For newer versions
# this function will need to be fixed by someone interested
diff --git a/external/poky/meta/classes/mime-xdg.bbclass b/external/poky/meta/classes/mime-xdg.bbclass
new file mode 100644
index 00000000..642a5b75
--- /dev/null
+++ b/external/poky/meta/classes/mime-xdg.bbclass
@@ -0,0 +1,74 @@
+#
+# This class creates mime <-> application associations based on entry
+# 'MimeType' in *.desktop files
+#
+
+DEPENDS += "desktop-file-utils"
+PACKAGE_WRITE_DEPS += "desktop-file-utils-native"
+DESKTOPDIR = "${datadir}/applications"
+
+# There are recipes out there installing their .desktop files as absolute
+# symlinks. For us these are dangling and cannot be introspected for "MimeType"
+# easily. By addding package-names to MIME_XDG_PACKAGES, packager can force
+# proper update-desktop-database handling. Note that all introspection is
+# skipped for MIME_XDG_PACKAGES not empty
+MIME_XDG_PACKAGES ?= ""
+
+mime_xdg_postinst() {
+if [ "x$D" != "x" ]; then
+ $INTERCEPT_DIR/postinst_intercept update_desktop_database ${PKG} \
+ mlprefix=${MLPREFIX} \
+ desktop_dir=${DESKTOPDIR}
+else
+ update-desktop-database $D${DESKTOPDIR}
+fi
+}
+
+mime_xdg_postrm() {
+if [ "x$D" != "x" ]; then
+ $INTERCEPT_DIR/postinst_intercept update_desktop_database ${PKG} \
+ mlprefix=${MLPREFIX} \
+ desktop_dir=${DESKTOPDIR}
+else
+ update-desktop-database $D${DESKTOPDIR}
+fi
+}
+
+python populate_packages_append () {
+ packages = d.getVar('PACKAGES').split()
+ pkgdest = d.getVar('PKGDEST')
+ desktop_base = d.getVar('DESKTOPDIR')
+ forced_mime_xdg_pkgs = (d.getVar('MIME_XDG_PACKAGES') or '').split()
+
+ for pkg in packages:
+ desktops_with_mime_found = pkg in forced_mime_xdg_pkgs
+ if d.getVar('MIME_XDG_PACKAGES') == '':
+ desktop_dir = '%s/%s%s' % (pkgdest, pkg, desktop_base)
+ if os.path.exists(desktop_dir):
+ for df in os.listdir(desktop_dir):
+ if df.endswith('.desktop'):
+ try:
+ with open(desktop_dir + '/'+ df, 'r') as f:
+ for line in f.read().split('\n'):
+ if 'MimeType' in line:
+ desktops_with_mime_found = True
+ break;
+ except:
+ bb.warn('Could not open %s. Set MIME_XDG_PACKAGES in recipe or add mime-xdg to INSANE_SKIP.' % desktop_dir + '/'+ df)
+ if desktops_with_mime_found:
+ break
+ if desktops_with_mime_found:
+ bb.note("adding mime-xdg postinst and postrm scripts to %s" % pkg)
+ postinst = d.getVar('pkg_postinst_%s' % pkg)
+ if not postinst:
+ postinst = '#!/bin/sh\n'
+ postinst += d.getVar('mime_xdg_postinst')
+ d.setVar('pkg_postinst_%s' % pkg, postinst)
+ postrm = d.getVar('pkg_postrm_%s' % pkg)
+ if not postrm:
+ postrm = '#!/bin/sh\n'
+ postrm += d.getVar('mime_xdg_postrm')
+ d.setVar('pkg_postrm_%s' % pkg, postrm)
+ bb.note("adding desktop-file-utils dependency to %s" % pkg)
+ d.appendVar('RDEPENDS_' + pkg, " " + d.getVar('MLPREFIX')+"desktop-file-utils")
+}
diff --git a/external/poky/meta/classes/mime.bbclass b/external/poky/meta/classes/mime.bbclass
index 6c7b868f..bb99bc35 100644
--- a/external/poky/meta/classes/mime.bbclass
+++ b/external/poky/meta/classes/mime.bbclass
@@ -1,46 +1,58 @@
-DEPENDS += "shared-mime-info"
+#
+# This class is used by recipes installing mime types
+#
+
+DEPENDS += "${@bb.utils.contains('BPN', 'shared-mime-info', '', 'shared-mime-info', d)}"
PACKAGE_WRITE_DEPS += "shared-mime-info-native"
+MIMEDIR = "${datadir}/mime"
mime_postinst() {
-if [ "$1" = configure ]; then
- UPDATEMIMEDB=`which update-mime-database`
- if [ -x "$UPDATEMIMEDB" ] ; then
- echo "Updating MIME database... this may take a while."
- $UPDATEMIMEDB $D${datadir}/mime
- else
- echo "Missing update-mime-database, update of mime database failed!"
- exit 1
- fi
+if [ "x$D" != "x" ]; then
+ $INTERCEPT_DIR/postinst_intercept update_mime_database ${PKG} \
+ mlprefix=${MLPREFIX} \
+ mimedir=${MIMEDIR}
+else
+ echo "Updating MIME database... this may take a while."
+ update-mime-database $D${MIMEDIR}
fi
}
mime_postrm() {
-if [ "$1" = remove ] || [ "$1" = upgrade ]; then
- UPDATEMIMEDB=`which update-mime-database`
- if [ -x "$UPDATEMIMEDB" ] ; then
- echo "Updating MIME database... this may take a while."
- $UPDATEMIMEDB $D${datadir}/mime
+if [ "x$D" != "x" ]; then
+ $INTERCEPT_DIR/postinst_intercept update_mime_database ${PKG} \
+ mlprefix=${MLPREFIX} \
+ mimedir=${MIMEDIR}
+else
+ echo "Updating MIME database... this may take a while."
+ # $D${MIMEDIR}/packages belong to package shared-mime-info-data,
+ # packages like libfm-mime depend on shared-mime-info-data.
+ # after shared-mime-info-data uninstalled, $D${MIMEDIR}/packages
+ # is removed, but update-mime-database need this dir to update
+ # database, workaround to create one and remove it later
+ if [ ! -d $D${MIMEDIR}/packages ]; then
+ mkdir -p $D${MIMEDIR}/packages
+ update-mime-database $D${MIMEDIR}
+ rmdir --ignore-fail-on-non-empty $D${MIMEDIR}/packages
else
- echo "Missing update-mime-database, update of mime database failed!"
- exit 1
- fi
+ update-mime-database $D${MIMEDIR}
+fi
fi
}
python populate_packages_append () {
- import re
packages = d.getVar('PACKAGES').split()
pkgdest = d.getVar('PKGDEST')
+ mimedir = d.getVar('MIMEDIR')
for pkg in packages:
- mime_dir = '%s/%s/usr/share/mime/packages' % (pkgdest, pkg)
- mimes = []
- mime_re = re.compile(".*\.xml$")
- if os.path.exists(mime_dir):
- for f in os.listdir(mime_dir):
- if mime_re.match(f):
- mimes.append(f)
- if mimes:
+ mime_packages_dir = '%s/%s%s/packages' % (pkgdest, pkg, mimedir)
+ mimes_types_found = False
+ if os.path.exists(mime_packages_dir):
+ for f in os.listdir(mime_packages_dir):
+ if f.endswith('.xml'):
+ mimes_types_found = True
+ break
+ if mimes_types_found:
bb.note("adding mime postinst and postrm scripts to %s" % pkg)
postinst = d.getVar('pkg_postinst_%s' % pkg)
if not postinst:
@@ -52,6 +64,7 @@ python populate_packages_append () {
postrm = '#!/bin/sh\n'
postrm += d.getVar('mime_postrm')
d.setVar('pkg_postrm_%s' % pkg, postrm)
- bb.note("adding shared-mime-info-data dependency to %s" % pkg)
- d.appendVar('RDEPENDS_' + pkg, " " + d.getVar('MLPREFIX')+"shared-mime-info-data")
+ if pkg != 'shared-mime-info-data':
+ bb.note("adding shared-mime-info-data dependency to %s" % pkg)
+ d.appendVar('RDEPENDS_' + pkg, " " + d.getVar('MLPREFIX')+"shared-mime-info-data")
}
diff --git a/external/poky/meta/classes/module.bbclass b/external/poky/meta/classes/module.bbclass
index e3449602..c0dfa350 100644
--- a/external/poky/meta/classes/module.bbclass
+++ b/external/poky/meta/classes/module.bbclass
@@ -48,6 +48,7 @@ module_do_compile() {
module_do_install() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
oe_runmake DEPMOD=echo MODLIB="${D}${nonarch_base_libdir}/modules/${KERNEL_VERSION}" \
+ INSTALL_FW_PATH="${D}${nonarch_base_libdir}/firmware" \
CC="${KERNEL_CC}" LD="${KERNEL_LD}" \
O=${STAGING_KERNEL_BUILDDIR} \
${MODULES_INSTALL_TARGET}
diff --git a/external/poky/meta/classes/multilib.bbclass b/external/poky/meta/classes/multilib.bbclass
index 6c6499a2..ee677da1 100644
--- a/external/poky/meta/classes/multilib.bbclass
+++ b/external/poky/meta/classes/multilib.bbclass
@@ -33,6 +33,8 @@ python multilib_virtclass_handler () {
e.data.setVar("MLPREFIX", variant + "-")
e.data.setVar("PN", variant + "-" + e.data.getVar("PN", False))
e.data.setVar('SDKTARGETSYSROOT', e.data.getVar('SDKTARGETSYSROOT'))
+ override = ":virtclass-multilib-" + variant
+ e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
target_vendor = e.data.getVar("TARGET_VENDOR_" + "virtclass-multilib-" + variant, False)
if target_vendor:
e.data.setVar("TARGET_VENDOR", target_vendor)
@@ -88,8 +90,6 @@ python multilib_virtclass_handler () {
addhandler multilib_virtclass_handler
multilib_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
-STAGINGCC_prepend = "${BBEXTENDVARIANT}-"
-
python __anonymous () {
variant = d.getVar("BBEXTENDVARIANT")
@@ -125,8 +125,55 @@ python __anonymous () {
clsextend.map_variable("USERADD_PACKAGES")
clsextend.map_variable("SYSTEMD_PACKAGES")
clsextend.map_variable("UPDATERCPN")
+
+ reset_alternative_priority(d)
}
+def reset_alternative_priority(d):
+ if not bb.data.inherits_class('update-alternatives', d):
+ return
+
+ # There might be multiple multilibs at the same time, e.g., lib32 and
+ # lib64, each of them should have a different priority.
+ multilib_variants = d.getVar('MULTILIB_VARIANTS')
+ bbextendvariant = d.getVar('BBEXTENDVARIANT')
+ reset_gap = multilib_variants.split().index(bbextendvariant) + 1
+
+ # ALTERNATIVE_PRIORITY = priority
+ alt_priority_recipe = d.getVar('ALTERNATIVE_PRIORITY')
+ # Reset ALTERNATIVE_PRIORITY when found
+ if alt_priority_recipe:
+ reset_priority = int(alt_priority_recipe) - reset_gap
+ bb.debug(1, '%s: Setting ALTERNATIVE_PRIORITY to %s' % (d.getVar('PN'), reset_priority))
+ d.setVar('ALTERNATIVE_PRIORITY', reset_priority)
+
+ handled_pkgs = []
+ for pkg in (d.getVar('PACKAGES') or "").split():
+ # ALTERNATIVE_PRIORITY_pkg = priority
+ alt_priority_pkg = d.getVar('ALTERNATIVE_PRIORITY_%s' % pkg)
+ # Reset ALTERNATIVE_PRIORITY_pkg when found
+ if alt_priority_pkg:
+ reset_priority = int(alt_priority_pkg) - reset_gap
+ if not pkg in handled_pkgs:
+ handled_pkgs.append(pkg)
+ bb.debug(1, '%s: Setting ALTERNATIVE_PRIORITY_%s to %s' % (pkg, pkg, reset_priority))
+ d.setVar('ALTERNATIVE_PRIORITY_%s' % pkg, reset_priority)
+
+ for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg) or "").split():
+ # ALTERNATIVE_PRIORITY_pkg[tool] = priority
+ alt_priority_pkg_name = d.getVarFlag('ALTERNATIVE_PRIORITY_%s' % pkg, alt_name)
+ # ALTERNATIVE_PRIORITY[tool] = priority
+ alt_priority_name = d.getVarFlag('ALTERNATIVE_PRIORITY', alt_name)
+
+ if alt_priority_pkg_name:
+ reset_priority = int(alt_priority_pkg_name) - reset_gap
+ bb.debug(1, '%s: Setting ALTERNATIVE_PRIORITY_%s[%s] to %s' % (pkg, pkg, alt_name, reset_priority))
+ d.setVarFlag('ALTERNATIVE_PRIORITY_%s' % pkg, alt_name, reset_priority)
+ elif alt_priority_name:
+ reset_priority = int(alt_priority_name) - reset_gap
+ bb.debug(1, '%s: Setting ALTERNATIVE_PRIORITY[%s] to %s' % (pkg, alt_name, reset_priority))
+ d.setVarFlag('ALTERNATIVE_PRIORITY', alt_name, reset_priority)
+
PACKAGEFUNCS_append = " do_package_qa_multilib"
python do_package_qa_multilib() {
@@ -137,11 +184,12 @@ python do_package_qa_multilib() {
for i in values:
if i.startswith('virtual/'):
i = i[len('virtual/'):]
- if (not i.startswith('kernel-module')) and (not i.startswith(mlprefix)) and \
- (not 'cross-canadian' in i) and (not i.startswith("nativesdk-")) and \
- (not i.startswith("rtld")) and (not i.startswith('kernel-vmlinux')) \
- and (not i.startswith("kernel-image")):
+
+ if (not (i.startswith(mlprefix) or i.startswith("kernel-") \
+ or ('cross-canadian' in i) or i.startswith("nativesdk-") \
+ or i.startswith("rtld") or i.startswith("/"))):
candidates.append(i)
+
if len(candidates) > 0:
msg = "%s package %s - suspicious values '%s' in %s" \
% (d.getVar('PN'), pkg, ' '.join(candidates), var)
diff --git a/external/poky/meta/classes/multilib_global.bbclass b/external/poky/meta/classes/multilib_global.bbclass
index 649cc096..98f65c8a 100644
--- a/external/poky/meta/classes/multilib_global.bbclass
+++ b/external/poky/meta/classes/multilib_global.bbclass
@@ -118,6 +118,9 @@ def preferred_ml_updates(d):
d.renameVar(prov, provexp)
def translate_provide(prefix, prov):
+ # Really need to know if kernel modules class is inherited somehow
+ if prov == "lttng-modules":
+ return prov
if not prov.startswith("virtual/"):
return prefix + "-" + prov
if prov == "virtual/kernel":
@@ -169,21 +172,27 @@ python multilib_virtclass_handler_global () {
if bb.data.inherits_class('kernel', e.data) or \
bb.data.inherits_class('module-base', e.data) or \
d.getVar('BPN') in non_ml_recipes:
+
+ # We need to avoid expanding KERNEL_VERSION which we can do by deleting it
+ # from a copy of the datastore
+ localdata = bb.data.createCopy(d)
+ localdata.delVar("KERNEL_VERSION")
+
variants = (e.data.getVar("MULTILIB_VARIANTS") or "").split()
import oe.classextend
clsextends = []
for variant in variants:
- clsextends.append(oe.classextend.ClassExtender(variant, e.data))
+ clsextends.append(oe.classextend.ClassExtender(variant, localdata))
# Process PROVIDES
- origprovs = provs = e.data.getVar("PROVIDES") or ""
+ origprovs = provs = localdata.getVar("PROVIDES") or ""
for clsextend in clsextends:
provs = provs + " " + clsextend.map_variable("PROVIDES", setvar=False)
e.data.setVar("PROVIDES", provs)
# Process RPROVIDES
- origrprovs = rprovs = e.data.getVar("RPROVIDES") or ""
+ origrprovs = rprovs = localdata.getVar("RPROVIDES") or ""
for clsextend in clsextends:
rprovs = rprovs + " " + clsextend.map_variable("RPROVIDES", setvar=False)
if rprovs.strip():
@@ -191,7 +200,7 @@ python multilib_virtclass_handler_global () {
# Process RPROVIDES_${PN}...
for pkg in (e.data.getVar("PACKAGES") or "").split():
- origrprovs = rprovs = e.data.getVar("RPROVIDES_%s" % pkg) or ""
+ origrprovs = rprovs = localdata.getVar("RPROVIDES_%s" % pkg) or ""
for clsextend in clsextends:
rprovs = rprovs + " " + clsextend.map_variable("RPROVIDES_%s" % pkg, setvar=False)
rprovs = rprovs + " " + clsextend.extname + "-" + pkg
@@ -199,5 +208,4 @@ python multilib_virtclass_handler_global () {
}
addhandler multilib_virtclass_handler_global
-multilib_virtclass_handler_global[eventmask] = "bb.event.RecipeParsed"
-
+multilib_virtclass_handler_global[eventmask] = "bb.event.RecipeTaskPreProcess"
diff --git a/external/poky/meta/classes/multilib_script.bbclass b/external/poky/meta/classes/multilib_script.bbclass
index dc166d06..b11efc1e 100644
--- a/external/poky/meta/classes/multilib_script.bbclass
+++ b/external/poky/meta/classes/multilib_script.bbclass
@@ -17,18 +17,18 @@ multilibscript_rename() {
python () {
# Do nothing if multilib isn't being used
if not d.getVar("MULTILIB_VARIANTS"):
- return
+ return
# Do nothing for native/cross
if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d):
- return
+ return
for entry in (d.getVar("MULTILIB_SCRIPTS", False) or "").split():
- pkg, script = entry.split(":")
- epkg = d.expand(pkg)
- scriptname = os.path.basename(script)
- d.appendVar("ALTERNATIVE_" + epkg, " " + scriptname + " ")
- d.setVarFlag("ALTERNATIVE_LINK_NAME", scriptname, script)
- d.setVarFlag("ALTERNATIVE_TARGET", scriptname, script + "-${MULTILIB_SUFFIX}")
- d.appendVar("multilibscript_rename", "\n mv ${PKGD}" + script + " ${PKGD}" + script + "-${MULTILIB_SUFFIX}")
- d.appendVar("FILES_" + epkg, " " + script + "-${MULTILIB_SUFFIX}")
+ pkg, script = entry.split(":")
+ epkg = d.expand(pkg)
+ scriptname = os.path.basename(script)
+ d.appendVar("ALTERNATIVE_" + epkg, " " + scriptname + " ")
+ d.setVarFlag("ALTERNATIVE_LINK_NAME", scriptname, script)
+ d.setVarFlag("ALTERNATIVE_TARGET", scriptname, script + "-${MULTILIB_SUFFIX}")
+ d.appendVar("multilibscript_rename", "\n mv ${PKGD}" + script + " ${PKGD}" + script + "-${MULTILIB_SUFFIX}")
+ d.appendVar("FILES_" + epkg, " " + script + "-${MULTILIB_SUFFIX}")
}
diff --git a/external/poky/meta/classes/native.bbclass b/external/poky/meta/classes/native.bbclass
index ddccfe2e..08106e34 100644
--- a/external/poky/meta/classes/native.bbclass
+++ b/external/poky/meta/classes/native.bbclass
@@ -89,6 +89,7 @@ export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir} /lib /lib64
NATIVE_PACKAGE_PATH_SUFFIX ?= ""
bindir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
+sbindir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
base_libdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
libdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
libexecdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
@@ -119,6 +120,9 @@ PATH_prepend = "${COREBASE}/scripts/native-intercept:"
# reused if we manipulate the paths.
SSTATE_SCAN_CMD ?= "${SSTATE_SCAN_CMD_NATIVE}"
+# No strip sysroot when DEBUG_BUILD is enabled
+INHIBIT_SYSROOT_STRIP ?= "${@oe.utils.vartrue('DEBUG_BUILD', '1', '', d)}"
+
python native_virtclass_handler () {
pn = e.data.getVar("PN")
if not pn.endswith("-native"):
@@ -182,10 +186,13 @@ python do_addto_recipe_sysroot () {
bb.build.exec_func("extend_recipe_sysroot", d)
}
addtask addto_recipe_sysroot after do_populate_sysroot
+do_addto_recipe_sysroot[deptask] = "do_populate_sysroot"
inherit nopackages
do_packagedata[stamp-extra-info] = ""
-do_populate_sysroot[stamp-extra-info] = ""
USE_NLS = "no"
+
+RECIPERDEPTASK = "do_populate_sysroot"
+do_populate_sysroot[rdeptask] = "${RECIPERDEPTASK}"
diff --git a/external/poky/meta/classes/nativesdk.bbclass b/external/poky/meta/classes/nativesdk.bbclass
index f25b0c31..7f2692c5 100644
--- a/external/poky/meta/classes/nativesdk.bbclass
+++ b/external/poky/meta/classes/nativesdk.bbclass
@@ -9,6 +9,7 @@ NATIVESDKLIBC ?= "libc-glibc"
LIBCOVERRIDE = ":${NATIVESDKLIBC}"
CLASSOVERRIDE = "class-nativesdk"
MACHINEOVERRIDES = ""
+MACHINE_FEATURES = ""
MULTILIBS = ""
@@ -57,7 +58,7 @@ EXTRA_OECONF_GCC_FLOAT = ""
CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
CFLAGS = "${BUILDSDK_CFLAGS}"
-CXXFLAGS = "${BUILDSDK_CFLAGS}"
+CXXFLAGS = "${BUILDSDK_CXXFLAGS}"
LDFLAGS = "${BUILDSDK_LDFLAGS}"
# Change to place files in SDKPATH
@@ -100,12 +101,13 @@ python () {
clsextend.map_packagevars()
clsextend.map_variable("PROVIDES")
clsextend.map_regexp_variable("PACKAGES_DYNAMIC")
+ d.setVar("LIBCEXTENSION", "")
+ d.setVar("ABIEXTENSION", "")
}
addhandler nativesdk_virtclass_handler
nativesdk_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
-do_populate_sysroot[stamp-extra-info] = ""
do_packagedata[stamp-extra-info] = ""
USE_NLS = "${SDKUSE_NLS}"
diff --git a/external/poky/meta/classes/npm.bbclass b/external/poky/meta/classes/npm.bbclass
index 6dbae6bc..068032a1 100644
--- a/external/poky/meta/classes/npm.bbclass
+++ b/external/poky/meta/classes/npm.bbclass
@@ -1,91 +1,307 @@
+# Copyright (C) 2020 Savoir-Faire Linux
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# This bbclass builds and installs an npm package to the target. The package
+# sources files should be fetched in the calling recipe by using the SRC_URI
+# variable. The ${S} variable should be updated depending of your fetcher.
+#
+# Usage:
+# SRC_URI = "..."
+# inherit npm
+#
+# Optional variables:
+# NPM_ARCH:
+# Override the auto generated npm architecture.
+#
+# NPM_INSTALL_DEV:
+# Set to 1 to also install devDependencies.
+
DEPENDS_prepend = "nodejs-native "
RDEPENDS_${PN}_prepend = "nodejs "
-S = "${WORKDIR}/npmpkg"
-def node_pkgname(d):
- bpn = d.getVar('BPN')
- if bpn.startswith("node-"):
- return bpn[5:]
- return bpn
+NPM_INSTALL_DEV ?= "0"
+
+def npm_target_arch_map(target_arch):
+ """Maps arch names to npm arch names"""
+ import re
+ if re.match("p(pc|owerpc)(|64)", target_arch):
+ return "ppc"
+ elif re.match("i.86$", target_arch):
+ return "ia32"
+ elif re.match("x86_64$", target_arch):
+ return "x64"
+ elif re.match("arm64$", target_arch):
+ return "arm"
+ return target_arch
+
+NPM_ARCH ?= "${@npm_target_arch_map(d.getVar("TARGET_ARCH"))}"
+
+NPM_PACKAGE = "${WORKDIR}/npm-package"
+NPM_CACHE = "${WORKDIR}/npm-cache"
+NPM_BUILD = "${WORKDIR}/npm-build"
-NPMPN ?= "${@node_pkgname(d)}"
+def npm_global_configs(d):
+ """Get the npm global configuration"""
+ configs = []
+ # Ensure no network access is done
+ configs.append(("offline", "true"))
+ configs.append(("proxy", "http://invalid"))
+ # Configure the cache directory
+ configs.append(("cache", d.getVar("NPM_CACHE")))
+ return configs
-NPM_INSTALLDIR = "${libdir}/node/${NPMPN}"
+def npm_pack(env, srcdir, workdir):
+ """Run 'npm pack' on a specified directory"""
+ import shlex
+ cmd = "npm pack %s" % shlex.quote(srcdir)
+ configs = [("ignore-scripts", "true")]
+ tarball = env.run(cmd, configs=configs, workdir=workdir).strip("\n")
+ return os.path.join(workdir, tarball)
-# function maps arch names to npm arch names
-def npm_oe_arch_map(target_arch, d):
+python npm_do_configure() {
+ """
+ Step one: configure the npm cache and the main npm package
+
+ Every dependencies have been fetched and patched in the source directory.
+ They have to be packed (this remove unneeded files) and added to the npm
+ cache to be available for the next step.
+
+ The main package and its associated manifest file and shrinkwrap file have
+ to be configured to take into account these cached dependencies.
+ """
+ import base64
+ import copy
+ import json
import re
- if re.match('p(pc|owerpc)(|64)', target_arch): return 'ppc'
- elif re.match('i.86$', target_arch): return 'ia32'
- elif re.match('x86_64$', target_arch): return 'x64'
- elif re.match('arm64$', target_arch): return 'arm'
- return target_arch
+ import shlex
+ import tempfile
+ from bb.fetch2.npm import NpmEnvironment
+ from bb.fetch2.npm import npm_unpack
+ from bb.fetch2.npmsw import foreach_dependencies
+ from bb.progress import OutOfProgressHandler
-NPM_ARCH ?= "${@npm_oe_arch_map(d.getVar('TARGET_ARCH'), d)}"
-NPM_INSTALL_DEV ?= "0"
+ bb.utils.remove(d.getVar("NPM_CACHE"), recurse=True)
+ bb.utils.remove(d.getVar("NPM_PACKAGE"), recurse=True)
+
+ env = NpmEnvironment(d, configs=npm_global_configs(d))
+
+ def _npm_cache_add(tarball):
+ """Run 'npm cache add' for a specified tarball"""
+ cmd = "npm cache add %s" % shlex.quote(tarball)
+ env.run(cmd)
+
+ def _npm_integrity(tarball):
+ """Return the npm integrity of a specified tarball"""
+ sha512 = bb.utils.sha512_file(tarball)
+ return "sha512-" + base64.b64encode(bytes.fromhex(sha512)).decode()
+
+ def _npm_version(tarball):
+ """Return the version of a specified tarball"""
+ regex = r"-(\d+\.\d+\.\d+(-.*)?(\+.*)?)\.tgz"
+ return re.search(regex, tarball).group(1)
+
+ def _npmsw_dependency_dict(orig, deptree):
+ """
+ Return the sub dictionary in the 'orig' dictionary corresponding to the
+ 'deptree' dependency tree. This function follows the shrinkwrap file
+ format.
+ """
+ ptr = orig
+ for dep in deptree:
+ if "dependencies" not in ptr:
+ ptr["dependencies"] = {}
+ ptr = ptr["dependencies"]
+ if dep not in ptr:
+ ptr[dep] = {}
+ ptr = ptr[dep]
+ return ptr
+
+ # Manage the manifest file and shrinkwrap files
+ orig_manifest_file = d.expand("${S}/package.json")
+ orig_shrinkwrap_file = d.expand("${S}/npm-shrinkwrap.json")
+ cached_manifest_file = d.expand("${NPM_PACKAGE}/package.json")
+ cached_shrinkwrap_file = d.expand("${NPM_PACKAGE}/npm-shrinkwrap.json")
+
+ with open(orig_manifest_file, "r") as f:
+ orig_manifest = json.load(f)
+
+ cached_manifest = copy.deepcopy(orig_manifest)
+ cached_manifest.pop("dependencies", None)
+ cached_manifest.pop("devDependencies", None)
+
+ with open(orig_shrinkwrap_file, "r") as f:
+ orig_shrinkwrap = json.load(f)
+
+ cached_shrinkwrap = copy.deepcopy(orig_shrinkwrap)
+ cached_shrinkwrap.pop("dependencies", None)
+
+ # Manage the dependencies
+ progress = OutOfProgressHandler(d, r"^(\d+)/(\d+)$")
+ progress_total = 1 # also count the main package
+ progress_done = 0
+
+ def _count_dependency(name, params, deptree):
+ nonlocal progress_total
+ progress_total += 1
+
+ def _cache_dependency(name, params, deptree):
+ destsubdirs = [os.path.join("node_modules", dep) for dep in deptree]
+ destsuffix = os.path.join(*destsubdirs)
+ with tempfile.TemporaryDirectory() as tmpdir:
+ # Add the dependency to the npm cache
+ destdir = os.path.join(d.getVar("S"), destsuffix)
+ tarball = npm_pack(env, destdir, tmpdir)
+ _npm_cache_add(tarball)
+ # Add its signature to the cached shrinkwrap
+ dep = _npmsw_dependency_dict(cached_shrinkwrap, deptree)
+ dep["version"] = _npm_version(tarball)
+ dep["integrity"] = _npm_integrity(tarball)
+ if params.get("dev", False):
+ dep["dev"] = True
+ # Display progress
+ nonlocal progress_done
+ progress_done += 1
+ progress.write("%d/%d" % (progress_done, progress_total))
+
+ dev = bb.utils.to_boolean(d.getVar("NPM_INSTALL_DEV"), False)
+ foreach_dependencies(orig_shrinkwrap, _count_dependency, dev)
+ foreach_dependencies(orig_shrinkwrap, _cache_dependency, dev)
+
+ # Configure the main package
+ with tempfile.TemporaryDirectory() as tmpdir:
+ tarball = npm_pack(env, d.getVar("S"), tmpdir)
+ npm_unpack(tarball, d.getVar("NPM_PACKAGE"), d)
+
+ # Configure the cached manifest file and cached shrinkwrap file
+ def _update_manifest(depkey):
+ for name in orig_manifest.get(depkey, {}):
+ version = cached_shrinkwrap["dependencies"][name]["version"]
+ if depkey not in cached_manifest:
+ cached_manifest[depkey] = {}
+ cached_manifest[depkey][name] = version
-npm_do_compile() {
- # Copy in any additionally fetched modules
- if [ -d ${WORKDIR}/node_modules ] ; then
- cp -a ${WORKDIR}/node_modules ${S}/
- fi
- # changing the home directory to the working directory, the .npmrc will
- # be created in this directory
- export HOME=${WORKDIR}
- if [ "${NPM_INSTALL_DEV}" = "1" ]; then
- npm config set dev true
- else
- npm config set dev false
- fi
- npm set cache ${WORKDIR}/npm_cache
- # clear cache before every build
- npm cache clear --force
- # Install pkg into ${S} without going to the registry
- if [ "${NPM_INSTALL_DEV}" = "1" ]; then
- npm --arch=${NPM_ARCH} --target_arch=${NPM_ARCH} --no-registry install
- else
- npm --arch=${NPM_ARCH} --target_arch=${NPM_ARCH} --production --no-registry install
- fi
+ _update_manifest("dependencies")
+
+ if dev:
+ _update_manifest("devDependencies")
+
+ with open(cached_manifest_file, "w") as f:
+ json.dump(cached_manifest, f, indent=2)
+
+ with open(cached_shrinkwrap_file, "w") as f:
+ json.dump(cached_shrinkwrap, f, indent=2)
}
-npm_do_install() {
- # changing the home directory to the working directory, the .npmrc will
- # be created in this directory
- export HOME=${WORKDIR}
- mkdir -p ${D}${libdir}/node_modules
- npm pack .
- npm install --prefix ${D}${prefix} -g --arch=${NPM_ARCH} --target_arch=${NPM_ARCH} --production --no-registry ${NPMPN}-${PV}.tgz
- mv ${D}${libdir}/node_modules ${D}${libdir}/node
- if [ -d ${D}${prefix}/etc ] ; then
- # This will be empty
- rmdir ${D}${prefix}/etc
- fi
+python npm_do_compile() {
+ """
+ Step two: install the npm package
+
+ Use the configured main package and the cached dependencies to run the
+ installation process. The installation is done in a directory which is
+ not the destination directory yet.
+
+ A combination of 'npm pack' and 'npm install' is used to ensure that the
+ installed files are actual copies instead of symbolic links (which is the
+ default npm behavior).
+ """
+ import shlex
+ import tempfile
+ from bb.fetch2.npm import NpmEnvironment
+
+ bb.utils.remove(d.getVar("NPM_BUILD"), recurse=True)
+
+ env = NpmEnvironment(d, configs=npm_global_configs(d))
+
+ dev = bb.utils.to_boolean(d.getVar("NPM_INSTALL_DEV"), False)
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ args = []
+ configs = []
+
+ if dev:
+ configs.append(("also", "development"))
+ else:
+ configs.append(("only", "production"))
+
+ # Report as many logs as possible for debugging purpose
+ configs.append(("loglevel", "silly"))
+
+ # Configure the installation to be done globally in the build directory
+ configs.append(("global", "true"))
+ configs.append(("prefix", d.getVar("NPM_BUILD")))
+
+ # Add node-gyp configuration
+ configs.append(("arch", d.getVar("NPM_ARCH")))
+ configs.append(("release", "true"))
+ sysroot = d.getVar("RECIPE_SYSROOT_NATIVE")
+ nodedir = os.path.join(sysroot, d.getVar("prefix_native").strip("/"))
+ configs.append(("nodedir", nodedir))
+ bindir = os.path.join(sysroot, d.getVar("bindir_native").strip("/"))
+ pythondir = os.path.join(bindir, "python-native", "python")
+ configs.append(("python", pythondir))
+
+ # Add node-pre-gyp configuration
+ args.append(("target_arch", d.getVar("NPM_ARCH")))
+ args.append(("build-from-source", "true"))
+
+ # Pack and install the main package
+ tarball = npm_pack(env, d.getVar("NPM_PACKAGE"), tmpdir)
+ env.run("npm install %s" % shlex.quote(tarball), args=args, configs=configs)
}
-python populate_packages_prepend () {
- instdir = d.expand('${D}${NPM_INSTALLDIR}')
- extrapackages = oe.package.npm_split_package_dirs(instdir)
- pkgnames = extrapackages.keys()
- d.prependVar('PACKAGES', '%s ' % ' '.join(pkgnames))
- for pkgname in pkgnames:
- pkgrelpath, pdata = extrapackages[pkgname]
- pkgpath = '${NPM_INSTALLDIR}/' + pkgrelpath
- # package names can't have underscores but npm packages sometimes use them
- oe_pkg_name = pkgname.replace('_', '-')
- expanded_pkgname = d.expand(oe_pkg_name)
- d.setVar('FILES_%s' % expanded_pkgname, pkgpath)
- if pdata:
- version = pdata.get('version', None)
- if version:
- d.setVar('PKGV_%s' % expanded_pkgname, version)
- description = pdata.get('description', None)
- if description:
- d.setVar('SUMMARY_%s' % expanded_pkgname, description.replace(u"\u2018", "'").replace(u"\u2019", "'"))
- d.appendVar('RDEPENDS_%s' % d.getVar('PN'), ' %s' % ' '.join(pkgnames).replace('_', '-'))
+npm_do_install() {
+ # Step three: final install
+ #
+ # The previous installation have to be filtered to remove some extra files.
+
+ rm -rf ${D}
+
+ # Copy the entire lib and bin directories
+ install -d ${D}/${nonarch_libdir}
+ cp --no-preserve=ownership --recursive ${NPM_BUILD}/lib/. ${D}/${nonarch_libdir}
+
+ if [ -d "${NPM_BUILD}/bin" ]
+ then
+ install -d ${D}/${bindir}
+ cp --no-preserve=ownership --recursive ${NPM_BUILD}/bin/. ${D}/${bindir}
+ fi
+
+ # If the package (or its dependencies) uses node-gyp to build native addons,
+ # object files, static libraries or other temporary files can be hidden in
+ # the lib directory. To reduce the package size and to avoid QA issues
+ # (staticdev with static library files) these files must be removed.
+ local GYP_REGEX=".*/build/Release/[^/]*.node"
+
+ # Remove any node-gyp directory in ${D} to remove temporary build files
+ for GYP_D_FILE in $(find ${D} -regex "${GYP_REGEX}")
+ do
+ local GYP_D_DIR=${GYP_D_FILE%/Release/*}
+
+ rm --recursive --force ${GYP_D_DIR}
+ done
+
+ # Copy only the node-gyp release files
+ for GYP_B_FILE in $(find ${NPM_BUILD} -regex "${GYP_REGEX}")
+ do
+ local GYP_D_FILE=${D}/${prefix}/${GYP_B_FILE#${NPM_BUILD}}
+
+ install -d ${GYP_D_FILE%/*}
+ install -m 755 ${GYP_B_FILE} ${GYP_D_FILE}
+ done
+
+ # Remove the shrinkwrap file which does not need to be packed
+ rm -f ${D}/${nonarch_libdir}/node_modules/*/npm-shrinkwrap.json
+ rm -f ${D}/${nonarch_libdir}/node_modules/@*/*/npm-shrinkwrap.json
+
+ # node(1) is using /usr/lib/node as default include directory and npm(1) is
+ # using /usr/lib/node_modules as install directory. Let's make both happy.
+ ln -fs node_modules ${D}/${nonarch_libdir}/node
}
FILES_${PN} += " \
- ${NPM_INSTALLDIR} \
+ ${bindir} \
+ ${nonarch_libdir} \
"
-EXPORT_FUNCTIONS do_compile do_install
+EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/external/poky/meta/classes/package.bbclass b/external/poky/meta/classes/package.bbclass
index 66e423e9..099d0459 100644
--- a/external/poky/meta/classes/package.bbclass
+++ b/external/poky/meta/classes/package.bbclass
@@ -40,6 +40,7 @@
inherit packagedata
inherit chrpath
+inherit package_pkgdata
# Need the package_qa_handle_error() in insane.bbclass
inherit insane
@@ -75,7 +76,7 @@ def legitimize_package_name(s):
return ('\\u%s' % cp).encode('latin-1').decode('unicode_escape')
# Handle unicode codepoints encoded as <U0123>, as in glibc locale files.
- s = re.sub('<U([0-9A-Fa-f]{1,4})>', fixutf, s)
+ s = re.sub(r'<U([0-9A-Fa-f]{1,4})>', fixutf, s)
# Remaining package name validity fixes
return s.lower().replace('_', '-').replace('@', '+').replace(',', '+').replace('/', '-')
@@ -344,7 +345,7 @@ def parse_debugsources_from_dwarfsrcfiles_output(dwarfsrcfiles_output):
return debugfiles.keys()
-def append_source_info(file, sourcefile, d, fatal=True):
+def source_info(file, d, fatal=True):
import subprocess
cmd = ["dwarfsrcfiles", file]
@@ -363,22 +364,15 @@ def append_source_info(file, sourcefile, d, fatal=True):
bb.note(msg)
debugsources = parse_debugsources_from_dwarfsrcfiles_output(output)
- # filenames are null-separated - this is an artefact of the previous use
- # of rpm's debugedit, which was writing them out that way, and the code elsewhere
- # is still assuming that.
- debuglistoutput = '\0'.join(debugsources) + '\0'
- lf = bb.utils.lockfile(sourcefile + ".lock")
- with open(sourcefile, 'a') as sf:
- sf.write(debuglistoutput)
- bb.utils.unlockfile(lf)
+ return list(debugsources)
-def splitdebuginfo(file, dvar, debugdir, debuglibdir, debugappend, debugsrcdir, sourcefile, d):
+def splitdebuginfo(file, dvar, debugdir, debuglibdir, debugappend, debugsrcdir, d):
# Function to split a single file into two components, one is the stripped
# target system binary, the other contains any debugging information. The
# two files are linked to reference each other.
#
- # sourcefile is also generated containing a list of debugsources
+ # return a mapping of files:debugsources
import stat
import subprocess
@@ -386,6 +380,7 @@ def splitdebuginfo(file, dvar, debugdir, debuglibdir, debugappend, debugsrcdir,
src = file[len(dvar):]
dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend
debugfile = dvar + dest
+ sources = []
# Split the file...
bb.utils.mkdirhier(os.path.dirname(debugfile))
@@ -397,7 +392,7 @@ def splitdebuginfo(file, dvar, debugdir, debuglibdir, debugappend, debugsrcdir,
# We ignore kernel modules, we don't generate debug info files.
if file.find("/lib/modules/") != -1 and file.endswith(".ko"):
- return 1
+ return (file, sources)
newmode = None
if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
@@ -407,7 +402,7 @@ def splitdebuginfo(file, dvar, debugdir, debuglibdir, debugappend, debugsrcdir,
# We need to extract the debug src information here...
if debugsrcdir:
- append_source_info(file, sourcefile, d)
+ sources = source_info(file, d)
bb.utils.mkdirhier(os.path.dirname(debugfile))
@@ -419,17 +414,69 @@ def splitdebuginfo(file, dvar, debugdir, debuglibdir, debugappend, debugsrcdir,
if newmode:
os.chmod(file, origmode)
- return 0
+ return (file, sources)
-def copydebugsources(debugsrcdir, d):
+def splitstaticdebuginfo(file, dvar, debugstaticdir, debugstaticlibdir, debugstaticappend, debugsrcdir, d):
+ # Unlike the function above, there is no way to split a static library
+ # two components. So to get similar results we will copy the unmodified
+ # static library (containing the debug symbols) into a new directory.
+ # We will then strip (preserving symbols) the static library in the
+ # typical location.
+ #
+ # return a mapping of files:debugsources
+
+ import stat
+ import shutil
+
+ src = file[len(dvar):]
+ dest = debugstaticlibdir + os.path.dirname(src) + debugstaticdir + "/" + os.path.basename(src) + debugstaticappend
+ debugfile = dvar + dest
+ sources = []
+
+ # Copy the file...
+ bb.utils.mkdirhier(os.path.dirname(debugfile))
+ #bb.note("Copy %s -> %s" % (file, debugfile))
+
+ dvar = d.getVar('PKGD')
+
+ newmode = None
+ if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
+ origmode = os.stat(file)[stat.ST_MODE]
+ newmode = origmode | stat.S_IWRITE | stat.S_IREAD
+ os.chmod(file, newmode)
+
+ # We need to extract the debug src information here...
+ if debugsrcdir:
+ sources = source_info(file, d)
+
+ bb.utils.mkdirhier(os.path.dirname(debugfile))
+
+ # Copy the unmodified item to the debug directory
+ shutil.copy2(file, debugfile)
+
+ if newmode:
+ os.chmod(file, origmode)
+
+ return (file, sources)
+
+def copydebugsources(debugsrcdir, sources, d):
# The debug src information written out to sourcefile is further processed
# and copied to the destination here.
import stat
import subprocess
- sourcefile = d.expand("${WORKDIR}/debugsources.list")
- if debugsrcdir and os.path.isfile(sourcefile):
+ if debugsrcdir and sources:
+ sourcefile = d.expand("${WORKDIR}/debugsources.list")
+ bb.utils.remove(sourcefile)
+
+ # filenames are null-separated - this is an artefact of the previous use
+ # of rpm's debugedit, which was writing them out that way, and the code elsewhere
+ # is still assuming that.
+ debuglistoutput = '\0'.join(sources) + '\0'
+ with open(sourcefile, 'a') as sf:
+ sf.write(debuglistoutput)
+
dvar = d.getVar('PKGD')
strip = d.getVar("STRIP")
objcopy = d.getVar("OBJCOPY")
@@ -471,7 +518,8 @@ def copydebugsources(debugsrcdir, d):
# cpio seems to have a bug with -lL together and symbolic links are just copied, not dereferenced.
# Work around this by manually finding and copying any symbolic links that made it through.
- cmd = "find %s%s -type l -print0 -delete | sed s#%s%s/##g | (cd '%s' ; cpio -pd0mL --no-preserve-owner '%s%s' 2>/dev/null)" % (dvar, debugsrcdir, dvar, debugsrcdir, workparentdir, dvar, debugsrcdir)
+ cmd = "find %s%s -type l -print0 -delete | sed s#%s%s/##g | (cd '%s' ; cpio -pd0mL --no-preserve-owner '%s%s')" % \
+ (dvar, debugsrcdir, dvar, debugsrcdir, workparentdir, dvar, debugsrcdir)
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
# The copy by cpio may have resulted in some empty directories! Remove these
@@ -821,8 +869,9 @@ python fixup_perms () {
# Now we actually load from the configuration files
for conf in get_fs_perms_list(d).split():
- if os.path.exists(conf):
- f = open(conf)
+ if not os.path.exists(conf):
+ continue
+ with open(conf) as f:
for line in f:
if line.startswith('#'):
continue
@@ -843,7 +892,6 @@ python fixup_perms () {
fs_perms_table[entry.path] = entry
if entry.path in fs_link_table:
fs_link_table.pop(entry.path)
- f.close()
# Debug -- list out in-memory table
#for dir in fs_perms_table:
@@ -902,7 +950,7 @@ python split_and_strip_files () {
dvar = d.getVar('PKGD')
pn = d.getVar('PN')
- targetos = d.getVar('TARGET_OS')
+ hostos = d.getVar('HOST_OS')
oldcwd = os.getcwd()
os.chdir(dvar)
@@ -911,30 +959,39 @@ python split_and_strip_files () {
if d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-file-directory':
# Single debug-file-directory style debug info
debugappend = ".debug"
+ debugstaticappend = ""
debugdir = ""
+ debugstaticdir = ""
debuglibdir = "/usr/lib/debug"
+ debugstaticlibdir = "/usr/lib/debug-static"
debugsrcdir = "/usr/src/debug"
elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-without-src':
# Original OE-core, a.k.a. ".debug", style debug info, but without sources in /usr/src/debug
debugappend = ""
+ debugstaticappend = ""
debugdir = "/.debug"
+ debugstaticdir = "/.debug-static"
debuglibdir = ""
+ debugstaticlibdir = ""
debugsrcdir = ""
elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg':
debugappend = ""
+ debugstaticappend = ""
debugdir = "/.debug"
+ debugstaticdir = "/.debug-static"
debuglibdir = ""
+ debugstaticlibdir = ""
debugsrcdir = "/usr/src/debug"
else:
# Original OE-core, a.k.a. ".debug", style debug info
debugappend = ""
+ debugstaticappend = ""
debugdir = "/.debug"
+ debugstaticdir = "/.debug-static"
debuglibdir = ""
+ debugstaticlibdir = ""
debugsrcdir = "/usr/src/debug"
- sourcefile = d.expand("${WORKDIR}/debugsources.list")
- bb.utils.remove(sourcefile)
-
#
# First lets figure out all of the files we may have to process ... do this only once!
#
@@ -953,12 +1010,6 @@ python split_and_strip_files () {
for root, dirs, files in cpath.walk(dvar):
for f in files:
file = os.path.join(root, f)
- if file.endswith(".ko") and file.find("/lib/modules/") != -1:
- kernmods.append(file)
- continue
- if oe.package.is_static_lib(file):
- staticlibs.append(file)
- continue
# Skip debug files
if debugappend and file.endswith(debugappend):
@@ -969,6 +1020,13 @@ python split_and_strip_files () {
if file in skipfiles:
continue
+ if file.endswith(".ko") and file.find("/lib/modules/") != -1:
+ kernmods.append(file)
+ continue
+ if oe.package.is_static_lib(file):
+ staticlibs.append(file)
+ continue
+
try:
ltarget = cpath.realpath(file, dvar, False)
s = cpath.lstat(ltarget)
@@ -1004,6 +1062,12 @@ python split_and_strip_files () {
symlinks[file] = target
results = oe.utils.multiprocess_launch(oe.package.is_elf, checkelf.keys(), d)
+
+ # Sort results by file path. This ensures that the files are always
+ # processed in the same order, which is important to make sure builds
+ # are reproducible when dealing with hardlinks
+ results.sort(key=lambda x: x[0])
+
for (file, elf_file) in results:
# It's a file (or hardlink), not a link
# ...but is it ELF, and is it already stripped?
@@ -1039,11 +1103,18 @@ python split_and_strip_files () {
# First lets process debug splitting
#
if (d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT') != '1'):
- oe.utils.multiprocess_launch(splitdebuginfo, list(elffiles), d, extraargs=(dvar, debugdir, debuglibdir, debugappend, debugsrcdir, sourcefile, d))
+ results = oe.utils.multiprocess_launch(splitdebuginfo, list(elffiles), d, extraargs=(dvar, debugdir, debuglibdir, debugappend, debugsrcdir, d))
+
+ if debugsrcdir and not hostos.startswith("mingw"):
+ if (d.getVar('PACKAGE_DEBUG_STATIC_SPLIT') == '1'):
+ results = oe.utils.multiprocess_launch(splitstaticdebuginfo, staticlibs, d, extraargs=(dvar, debugstaticdir, debugstaticlibdir, debugstaticappend, debugsrcdir, d))
+ else:
+ for file in staticlibs:
+ results.append( (file,source_info(file, d)) )
- if debugsrcdir and not targetos.startswith("mingw"):
- for file in staticlibs:
- append_source_info(file, sourcefile, d, fatal=False)
+ sources = set()
+ for r in results:
+ sources.update(r[1])
# Hardlink our debug symbols to the other hardlink copies
for ref in inodes:
@@ -1091,7 +1162,7 @@ python split_and_strip_files () {
# Process the debugsrcdir if requested...
# This copies and places the referenced sources for later debugging...
- copydebugsources(debugsrcdir, d)
+ copydebugsources(debugsrcdir, sources, d)
#
# End of debug splitting
#
@@ -1108,6 +1179,9 @@ python split_and_strip_files () {
sfiles.append((file, elf_file, strip))
for f in kernmods:
sfiles.append((f, 16, strip))
+ if (d.getVar('PACKAGE_STRIP_STATIC') == '1' or d.getVar('PACKAGE_DEBUG_STATIC_SPLIT') == '1'):
+ for f in staticlibs:
+ sfiles.append((f, 16, strip))
oe.utils.multiprocess_launch(oe.package.runstrip, sfiles, d)
@@ -1123,7 +1197,7 @@ python populate_packages () {
workdir = d.getVar('WORKDIR')
outdir = d.getVar('DEPLOY_DIR')
dvar = d.getVar('PKGD')
- packages = d.getVar('PACKAGES')
+ packages = d.getVar('PACKAGES').split()
pn = d.getVar('PN')
bb.utils.mkdirhier(outdir)
@@ -1133,32 +1207,34 @@ python populate_packages () {
split_source_package = (d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg')
- # If debug-with-srcpkg mode is enabled then the src package is added
- # into the package list and the source directory as its main content
+ # If debug-with-srcpkg mode is enabled then add the source package if it
+ # doesn't exist and add the source file contents to the source package.
if split_source_package:
src_package_name = ('%s-src' % d.getVar('PN'))
- packages += (' ' + src_package_name)
+ if not src_package_name in packages:
+ packages.append(src_package_name)
d.setVar('FILES_%s' % src_package_name, '/usr/src/debug')
# Sanity check PACKAGES for duplicates
# Sanity should be moved to sanity.bbclass once we have the infrastructure
package_dict = {}
- for i, pkg in enumerate(packages.split()):
+ for i, pkg in enumerate(packages):
if pkg in package_dict:
msg = "%s is listed in PACKAGES multiple times, this leads to packaging errors." % pkg
package_qa_handle_error("packages-list", msg, d)
- # If debug-with-srcpkg mode is enabled then the src package will have
- # priority over dbg package when assigning the files.
- # This allows src package to include source files and remove them from dbg.
- elif split_source_package and pkg.endswith("-src"):
+ # Ensure the source package gets the chance to pick up the source files
+ # before the debug package by ordering it first in PACKAGES. Whether it
+ # actually picks up any source files is controlled by
+ # PACKAGE_DEBUG_SPLIT_STYLE.
+ elif pkg.endswith("-src"):
package_dict[pkg] = (10, i)
elif autodebug and pkg.endswith("-dbg"):
package_dict[pkg] = (30, i)
else:
package_dict[pkg] = (50, i)
- package_list = sorted(package_dict.keys(), key=package_dict.get)
- d.setVar('PACKAGES', ' '.join(package_list))
+ packages = sorted(package_dict.keys(), key=package_dict.get)
+ d.setVar('PACKAGES', ' '.join(packages))
pkgdest = d.getVar('PKGDEST')
seen = []
@@ -1173,10 +1249,10 @@ python populate_packages () {
dir = os.sep
for f in (files + dirs):
path = "." + os.path.join(dir, f)
- if "/.debug/" in path or path.endswith("/.debug"):
+ if "/.debug/" in path or "/.debug-static/" in path or path.endswith("/.debug"):
debug.append(path)
- for pkg in package_list:
+ for pkg in packages:
root = os.path.join(pkgdest, pkg)
bb.utils.mkdirhier(root)
@@ -1203,7 +1279,8 @@ python populate_packages () {
src = os.path.join(src, p)
dest = os.path.join(dest, p)
fstat = cpath.stat(src)
- os.mkdir(dest, fstat.st_mode)
+ os.mkdir(dest)
+ os.chmod(dest, fstat.st_mode)
os.chown(dest, fstat.st_uid, fstat.st_gid)
if p not in seen:
seen.append(p)
@@ -1247,9 +1324,10 @@ python populate_packages () {
# Handle LICENSE_EXCLUSION
package_list = []
- for pkg in packages.split():
- if d.getVar('LICENSE_EXCLUSION-' + pkg):
- msg = "%s has an incompatible license. Excluding from packaging." % pkg
+ for pkg in packages:
+ licenses = d.getVar('LICENSE_EXCLUSION-' + pkg)
+ if licenses:
+ msg = "Excluding %s from packaging as it has incompatible license(s): %s" % (pkg, licenses)
package_qa_handle_error("incompatible-license", msg, d)
else:
package_list.append(pkg)
@@ -1336,17 +1414,23 @@ EXPORT_FUNCTIONS package_name_hook
PKGDESTWORK = "${WORKDIR}/pkgdata"
+PKGDATA_VARS = "PN PE PV PR PKGE PKGV PKGR LICENSE DESCRIPTION SUMMARY RDEPENDS RPROVIDES RRECOMMENDS RSUGGESTS RREPLACES RCONFLICTS SECTION PKG ALLOW_EMPTY FILES CONFFILES FILES_INFO pkg_postinst pkg_postrm pkg_preinst pkg_prerm"
+
python emit_pkgdata() {
from glob import glob
import json
def process_postinst_on_target(pkg, mlprefix):
+ pkgval = d.getVar('PKG_%s' % pkg)
+ if pkgval is None:
+ pkgval = pkg
+
defer_fragment = """
if [ -n "$D" ]; then
$INTERCEPT_DIR/postinst_intercept delay_to_first_boot %s mlprefix=%s
exit 0
fi
-""" % (pkg, mlprefix)
+""" % (pkgval, mlprefix)
postinst = d.getVar('pkg_postinst_%s' % pkg)
postinst_ontarget = d.getVar('pkg_postinst_ontarget_%s' % pkg)
@@ -1403,10 +1487,9 @@ fi
pkgdest = d.getVar('PKGDEST')
pkgdatadir = d.getVar('PKGDESTWORK')
- data_file = pkgdatadir + d.expand("/${PN}" )
- f = open(data_file, 'w')
- f.write("PACKAGES: %s\n" % packages)
- f.close()
+ data_file = pkgdatadir + d.expand("/${PN}")
+ with open(data_file, 'w') as fd:
+ fd.write("PACKAGES: %s\n" % packages)
pn = d.getVar('PN')
global_variants = (d.getVar('MULTILIB_GLOBAL_VARIANTS') or "").split()
@@ -1438,50 +1521,28 @@ fi
if fstat.st_ino not in seen:
seen.add(fstat.st_ino)
total_size += fstat.st_size
- d.setVar('FILES_INFO', json.dumps(files))
+ d.setVar('FILES_INFO', json.dumps(files, sort_keys=True))
- subdata_file = pkgdatadir + "/runtime/%s" % pkg
- sf = open(subdata_file, 'w')
- write_if_exists(sf, pkg, 'PN')
- write_if_exists(sf, pkg, 'PE')
- write_if_exists(sf, pkg, 'PV')
- write_if_exists(sf, pkg, 'PR')
- write_if_exists(sf, pkg, 'PKGE')
- write_if_exists(sf, pkg, 'PKGV')
- write_if_exists(sf, pkg, 'PKGR')
- write_if_exists(sf, pkg, 'LICENSE')
- write_if_exists(sf, pkg, 'DESCRIPTION')
- write_if_exists(sf, pkg, 'SUMMARY')
- write_if_exists(sf, pkg, 'RDEPENDS')
- rprov = write_if_exists(sf, pkg, 'RPROVIDES')
- write_if_exists(sf, pkg, 'RRECOMMENDS')
- write_if_exists(sf, pkg, 'RSUGGESTS')
- write_if_exists(sf, pkg, 'RREPLACES')
- write_if_exists(sf, pkg, 'RCONFLICTS')
- write_if_exists(sf, pkg, 'SECTION')
- write_if_exists(sf, pkg, 'PKG')
- write_if_exists(sf, pkg, 'ALLOW_EMPTY')
- write_if_exists(sf, pkg, 'FILES')
- write_if_exists(sf, pkg, 'CONFFILES')
process_postinst_on_target(pkg, d.getVar("MLPREFIX"))
add_set_e_to_scriptlets(pkg)
- write_if_exists(sf, pkg, 'pkg_postinst')
- write_if_exists(sf, pkg, 'pkg_postrm')
- write_if_exists(sf, pkg, 'pkg_preinst')
- write_if_exists(sf, pkg, 'pkg_prerm')
- write_if_exists(sf, pkg, 'FILERPROVIDESFLIST')
- write_if_exists(sf, pkg, 'FILES_INFO')
- for dfile in (d.getVar('FILERPROVIDESFLIST_' + pkg) or "").split():
- write_if_exists(sf, pkg, 'FILERPROVIDES_' + dfile)
-
- write_if_exists(sf, pkg, 'FILERDEPENDSFLIST')
- for dfile in (d.getVar('FILERDEPENDSFLIST_' + pkg) or "").split():
- write_if_exists(sf, pkg, 'FILERDEPENDS_' + dfile)
-
- sf.write('%s_%s: %d\n' % ('PKGSIZE', pkg, total_size))
- sf.close()
+
+ subdata_file = pkgdatadir + "/runtime/%s" % pkg
+ with open(subdata_file, 'w') as sf:
+ for var in (d.getVar('PKGDATA_VARS') or "").split():
+ val = write_if_exists(sf, pkg, var)
+
+ write_if_exists(sf, pkg, 'FILERPROVIDESFLIST')
+ for dfile in (d.getVar('FILERPROVIDESFLIST_' + pkg) or "").split():
+ write_if_exists(sf, pkg, 'FILERPROVIDES_' + dfile)
+
+ write_if_exists(sf, pkg, 'FILERDEPENDSFLIST')
+ for dfile in (d.getVar('FILERDEPENDSFLIST_' + pkg) or "").split():
+ write_if_exists(sf, pkg, 'FILERDEPENDS_' + dfile)
+
+ sf.write('%s_%s: %d\n' % ('PKGSIZE', pkg, total_size))
# Symlinks needed for rprovides lookup
+ rprov = d.getVar('RPROVIDES_%s' % pkg) or d.getVar('RPROVIDES')
if rprov:
for p in rprov.strip().split():
subdata_sym = pkgdatadir + "/runtime-rprovides/%s/%s" % (p, pkg)
@@ -1561,12 +1622,12 @@ python package_do_filedeps() {
if pkg not in requires_files:
requires_files[pkg] = []
- for file in provides:
+ for file in sorted(provides):
provides_files[pkg].append(file)
key = "FILERPROVIDES_" + file + "_" + pkg
d.appendVar(key, " " + " ".join(provides[file]))
- for file in requires:
+ for file in sorted(requires):
requires_files[pkg].append(file)
key = "FILERDEPENDS_" + file + "_" + pkg
d.appendVar(key, " " + " ".join(requires[file]))
@@ -1577,10 +1638,11 @@ python package_do_filedeps() {
d.setVar("FILERPROVIDESFLIST_" + pkg, " ".join(provides_files[pkg]))
}
-SHLIBSDIRS = "${PKGDATA_DIR}/${MLPREFIX}shlibs2"
+SHLIBSDIRS = "${WORKDIR_PKGDATA}/${MLPREFIX}shlibs2"
SHLIBSWORKDIR = "${PKGDESTWORK}/${MLPREFIX}shlibs2"
python package_do_shlibs() {
+ import itertools
import re, pipes
import subprocess
@@ -1589,8 +1651,8 @@ python package_do_shlibs() {
bb.note("not generating shlibs")
return
- lib_re = re.compile("^.*\.so")
- libdir_re = re.compile(".*/%s$" % d.getVar('baselib'))
+ lib_re = re.compile(r"^.*\.so")
+ libdir_re = re.compile(r".*/%s$" % d.getVar('baselib'))
packages = d.getVar('PACKAGES')
@@ -1605,7 +1667,7 @@ python package_do_shlibs() {
else:
shlib_pkgs = packages.split()
- targetos = d.getVar('TARGET_OS')
+ hostos = d.getVar('HOST_OS')
workdir = d.getVar('WORKDIR')
@@ -1631,23 +1693,24 @@ python package_do_shlibs() {
fd.close()
rpath = tuple()
for l in lines:
- m = re.match("\s+RPATH\s+([^\s]*)", l)
+ m = re.match(r"\s+RPATH\s+([^\s]*)", l)
if m:
rpaths = m.group(1).replace("$ORIGIN", ldir).split(":")
rpath = tuple(map(os.path.normpath, rpaths))
for l in lines:
- m = re.match("\s+NEEDED\s+([^\s]*)", l)
+ m = re.match(r"\s+NEEDED\s+([^\s]*)", l)
if m:
dep = m.group(1)
if dep not in needed:
needed.add((dep, file, rpath))
- m = re.match("\s+SONAME\s+([^\s]*)", l)
+ m = re.match(r"\s+SONAME\s+([^\s]*)", l)
if m:
this_soname = m.group(1)
prov = (this_soname, ldir, pkgver)
if not prov in sonames:
# if library is private (only used by package) then do not build shlib for it
- if not private_libs or this_soname not in private_libs:
+ import fnmatch
+ if not private_libs or len([i for i in private_libs if fnmatch.fnmatch(this_soname, i)]) == 0:
sonames.add(prov)
if libdir_re.match(os.path.dirname(file)):
needs_ldconfig = True
@@ -1721,7 +1784,7 @@ python package_do_shlibs() {
out, err = p.communicate()
# process the output, grabbing all .dll names
if p.returncode == 0:
- for m in re.finditer("DLL Name: (.*?\.dll)$", out.decode(), re.MULTILINE | re.IGNORECASE):
+ for m in re.finditer(r"DLL Name: (.*?\.dll)$", out.decode(), re.MULTILINE | re.IGNORECASE):
dllname = m.group(1)
if dllname:
needed[pkg].add((dllname, file, tuple()))
@@ -1731,14 +1794,9 @@ python package_do_shlibs() {
else:
snap_symlinks = False
- use_ldconfig = bb.utils.contains('DISTRO_FEATURES', 'ldconfig', True, False, d)
-
needed = {}
- # Take shared lock since we're only reading, not writing
- lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}"), True)
shlib_provider = oe.package.read_shlib_providers(d)
- bb.utils.unlockfile(lf)
for pkg in shlib_pkgs:
private_libs = d.getVar('PRIVATE_LIBS_' + pkg) or d.getVar('PRIVATE_LIBS') or ""
@@ -1760,9 +1818,9 @@ python package_do_shlibs() {
soname = None
if cpath.islink(file):
continue
- if targetos == "darwin" or targetos == "darwin8":
+ if hostos == "darwin" or hostos == "darwin8":
darwin_so(file, needed, sonames, renames, pkgver)
- elif targetos.startswith("mingw"):
+ elif hostos.startswith("mingw"):
mingw_dll(file, needed, sonames, renames, pkgver)
elif os.access(file, os.X_OK) or lib_re.match(file):
linuxlist.append(file)
@@ -1780,22 +1838,21 @@ python package_do_shlibs() {
bb.note("Renaming %s to %s" % (old, new))
os.rename(old, new)
pkgfiles[pkg].remove(old)
-
+
shlibs_file = os.path.join(shlibswork_dir, pkg + ".list")
if len(sonames):
- fd = open(shlibs_file, 'w')
- for s in sonames:
- if s[0] in shlib_provider and s[1] in shlib_provider[s[0]]:
- (old_pkg, old_pkgver) = shlib_provider[s[0]][s[1]]
- if old_pkg != pkg:
- bb.warn('%s-%s was registered as shlib provider for %s, changing it to %s-%s because it was built later' % (old_pkg, old_pkgver, s[0], pkg, pkgver))
- bb.debug(1, 'registering %s-%s as shlib provider for %s' % (pkg, pkgver, s[0]))
- fd.write(s[0] + ':' + s[1] + ':' + s[2] + '\n')
- if s[0] not in shlib_provider:
- shlib_provider[s[0]] = {}
- shlib_provider[s[0]][s[1]] = (pkg, pkgver)
- fd.close()
- if needs_ldconfig and use_ldconfig:
+ with open(shlibs_file, 'w') as fd:
+ for s in sonames:
+ if s[0] in shlib_provider and s[1] in shlib_provider[s[0]]:
+ (old_pkg, old_pkgver) = shlib_provider[s[0]][s[1]]
+ if old_pkg != pkg:
+ bb.warn('%s-%s was registered as shlib provider for %s, changing it to %s-%s because it was built later' % (old_pkg, old_pkgver, s[0], pkg, pkgver))
+ bb.debug(1, 'registering %s-%s as shlib provider for %s' % (pkg, pkgver, s[0]))
+ fd.write(s[0] + ':' + s[1] + ':' + s[2] + '\n')
+ if s[0] not in shlib_provider:
+ shlib_provider[s[0]] = {}
+ shlib_provider[s[0]][s[1]] = (pkg, pkgver)
+ if needs_ldconfig:
bb.debug(1, 'adding ldconfig call to postinst for %s' % pkg)
postinst = d.getVar('pkg_postinst_%s' % pkg)
if not postinst:
@@ -1833,20 +1890,21 @@ python package_do_shlibs() {
# /opt/abc/lib/libfoo.so.1 and contains /usr/bin/abc depending on system library libfoo.so.1
# but skipping it is still better alternative than providing own
# version and then adding runtime dependency for the same system library
- if private_libs and n[0] in private_libs:
+ import fnmatch
+ if private_libs and len([i for i in private_libs if fnmatch.fnmatch(n[0], i)]) > 0:
bb.debug(2, '%s: Dependency %s covered by PRIVATE_LIBS' % (pkg, n[0]))
continue
if n[0] in shlib_provider.keys():
- shlib_provider_path = []
- for k in shlib_provider[n[0]].keys():
- shlib_provider_path.append(k)
- match = None
- for p in list(n[2]) + shlib_provider_path + libsearchpath:
- if p in shlib_provider[n[0]]:
- match = p
- break
- if match:
- (dep_pkg, ver_needed) = shlib_provider[n[0]][match]
+ shlib_provider_map = shlib_provider[n[0]]
+ matches = set()
+ for p in itertools.chain(list(n[2]), sorted(shlib_provider_map.keys()), libsearchpath):
+ if p in shlib_provider_map:
+ matches.add(p)
+ if len(matches) > 1:
+ matchpkgs = ', '.join([shlib_provider_map[match][0] for match in matches])
+ bb.error("%s: Multiple shlib providers for %s: %s (used by files: %s)" % (pkg, n[0], matchpkgs, n[1]))
+ elif len(matches) == 1:
+ (dep_pkg, ver_needed) = shlib_provider_map[matches.pop()]
bb.debug(2, '%s: Dependency %s requires package %s (used by files: %s)' % (pkg, n[0], dep_pkg, n[1]))
@@ -1865,11 +1923,10 @@ python package_do_shlibs() {
deps_file = os.path.join(pkgdest, pkg + ".shlibdeps")
if os.path.exists(deps_file):
os.remove(deps_file)
- if len(deps):
- fd = open(deps_file, 'w')
- for dep in deps:
- fd.write(dep + '\n')
- fd.close()
+ if deps:
+ with open(deps_file, 'w') as fd:
+ for dep in sorted(deps):
+ fd.write(dep + '\n')
}
python package_do_pkgconfig () {
@@ -1882,9 +1939,9 @@ python package_do_pkgconfig () {
shlibs_dirs = d.getVar('SHLIBSDIRS').split()
shlibswork_dir = d.getVar('SHLIBSWORKDIR')
- pc_re = re.compile('(.*)\.pc$')
- var_re = re.compile('(.*)=(.*)')
- field_re = re.compile('(.*): (.*)')
+ pc_re = re.compile(r'(.*)\.pc$')
+ var_re = re.compile(r'(.*)=(.*)')
+ field_re = re.compile(r'(.*): (.*)')
pkgconfig_provided = {}
pkgconfig_needed = {}
@@ -1899,9 +1956,8 @@ python package_do_pkgconfig () {
pkgconfig_provided[pkg].append(name)
if not os.access(file, os.R_OK):
continue
- f = open(file, 'r')
- lines = f.readlines()
- f.close()
+ with open(file, 'r') as f:
+ lines = f.readlines()
for l in lines:
m = var_re.match(l)
if m:
@@ -1919,31 +1975,24 @@ python package_do_pkgconfig () {
for pkg in packages.split():
pkgs_file = os.path.join(shlibswork_dir, pkg + ".pclist")
if pkgconfig_provided[pkg] != []:
- f = open(pkgs_file, 'w')
- for p in pkgconfig_provided[pkg]:
- f.write('%s\n' % p)
- f.close()
-
- # Take shared lock since we're only reading, not writing
- lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}"), True)
+ with open(pkgs_file, 'w') as f:
+ for p in pkgconfig_provided[pkg]:
+ f.write('%s\n' % p)
# Go from least to most specific since the last one found wins
for dir in reversed(shlibs_dirs):
if not os.path.exists(dir):
continue
- for file in os.listdir(dir):
- m = re.match('^(.*)\.pclist$', file)
+ for file in sorted(os.listdir(dir)):
+ m = re.match(r'^(.*)\.pclist$', file)
if m:
pkg = m.group(1)
- fd = open(os.path.join(dir, file))
- lines = fd.readlines()
- fd.close()
+ with open(os.path.join(dir, file)) as fd:
+ lines = fd.readlines()
pkgconfig_provided[pkg] = []
for l in lines:
pkgconfig_provided[pkg].append(l.rstrip())
- bb.utils.unlockfile(lf)
-
for pkg in packages.split():
deps = []
for n in pkgconfig_needed[pkg]:
@@ -1957,10 +2006,9 @@ python package_do_pkgconfig () {
bb.note("couldn't find pkgconfig module '%s' in any package" % n)
deps_file = os.path.join(pkgdest, pkg + ".pcdeps")
if len(deps):
- fd = open(deps_file, 'w')
- for dep in deps:
- fd.write(dep + '\n')
- fd.close()
+ with open(deps_file, 'w') as fd:
+ for dep in deps:
+ fd.write(dep + '\n')
}
def read_libdep_files(d):
@@ -1971,9 +2019,8 @@ def read_libdep_files(d):
for extension in ".shlibdeps", ".pcdeps", ".clilibdeps":
depsfile = d.expand("${PKGDEST}/" + pkg + extension)
if os.access(depsfile, os.R_OK):
- fd = open(depsfile)
- lines = fd.readlines()
- fd.close()
+ with open(depsfile) as fd:
+ lines = fd.readlines()
for l in lines:
l.rstrip()
deps = bb.utils.explode_dep_versions2(l)
@@ -1988,7 +2035,7 @@ python read_shlibdeps () {
packages = d.getVar('PACKAGES').split()
for pkg in packages:
rdepends = bb.utils.explode_dep_versions2(d.getVar('RDEPENDS_' + pkg) or "")
- for dep in pkglibdeps[pkg]:
+ for dep in sorted(pkglibdeps[pkg]):
# Add the dep if it's not already there, or if no comparison is set
if dep not in rdepends:
rdepends[dep] = []
@@ -2021,7 +2068,7 @@ python package_depchains() {
#bb.note('depends for %s is %s' % (base, depends))
rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS_' + pkg) or "")
- for depend in depends:
+ for depend in sorted(depends):
if depend.find('-native') != -1 or depend.find('-cross') != -1 or depend.startswith('virtual/'):
#bb.note("Skipping %s" % depend)
continue
@@ -2042,7 +2089,7 @@ python package_depchains() {
#bb.note('rdepends for %s is %s' % (base, rdepends))
rreclist = bb.utils.explode_dep_versions2(d.getVar('RRECOMMENDS_' + pkg) or "")
- for depend in rdepends:
+ for depend in sorted(rdepends):
if depend.find('virtual-locale-') != -1:
#bb.note("Skipping %s" % depend)
continue
@@ -2124,10 +2171,12 @@ python package_depchains() {
# iteration, we need to list them here:
PACKAGEVARS = "FILES RDEPENDS RRECOMMENDS SUMMARY DESCRIPTION RSUGGESTS RPROVIDES RCONFLICTS PKG ALLOW_EMPTY pkg_postinst pkg_postrm pkg_postinst_ontarget INITSCRIPT_NAME INITSCRIPT_PARAMS DEBIAN_NOAUTONAME ALTERNATIVE PKGE PKGV PKGR USERADD_PARAM GROUPADD_PARAM CONFFILES SYSTEMD_SERVICE LICENSE SECTION pkg_preinst pkg_prerm RREPLACES GROUPMEMS_PARAM SYSTEMD_AUTO_ENABLE SKIP_FILEDEPS PRIVATE_LIBS"
-def gen_packagevar(d):
+def gen_packagevar(d, pkgvars="PACKAGEVARS"):
ret = []
pkgs = (d.getVar("PACKAGES") or "").split()
- vars = (d.getVar("PACKAGEVARS") or "").split()
+ vars = (d.getVar(pkgvars) or "").split()
+ for v in vars:
+ ret.append(v)
for p in pkgs:
for v in vars:
ret.append(v + "_" + p)
@@ -2140,6 +2189,7 @@ def gen_packagevar(d):
PACKAGE_PREPROCESS_FUNCS ?= ""
# Functions for setting up PKGD
PACKAGEBUILDPKGD ?= " \
+ package_prepare_pkgdata \
perform_packagecopy \
${PACKAGE_PREPROCESS_FUNCS} \
split_and_strip_files \
@@ -2260,19 +2310,19 @@ python do_package_setscene () {
}
addtask do_package_setscene
-do_packagedata () {
- :
+# Copy from PKGDESTWORK to tempdirectory as tempdirectory can be cleaned at both
+# do_package_setscene and do_packagedata_setscene leading to races
+python do_packagedata () {
+ src = d.expand("${PKGDESTWORK}")
+ dest = d.expand("${WORKDIR}/pkgdata-pdata-input")
+ oe.path.copyhardlinktree(src, dest)
}
addtask packagedata before do_build after do_package
SSTATETASKS += "do_packagedata"
-# PACKAGELOCK protects readers of PKGDATA_DIR against writes
-# whilst code is reading in do_package
-PACKAGELOCK = "${STAGING_DIR}/package-output.lock"
-do_packagedata[sstate-inputdirs] = "${PKGDESTWORK}"
+do_packagedata[sstate-inputdirs] = "${WORKDIR}/pkgdata-pdata-input"
do_packagedata[sstate-outputdirs] = "${PKGDATA_DIR}"
-do_packagedata[sstate-lockfile] = "${PACKAGELOCK}"
do_packagedata[stamp-extra-info] = "${MACHINE_ARCH}"
python do_packagedata_setscene () {
diff --git a/external/poky/meta/classes/package_deb.bbclass b/external/poky/meta/classes/package_deb.bbclass
index 6f815916..790b26ae 100644
--- a/external/poky/meta/classes/package_deb.bbclass
+++ b/external/poky/meta/classes/package_deb.bbclass
@@ -6,6 +6,8 @@ inherit package
IMAGE_PKGTYPE ?= "deb"
+DPKG_BUILDCMD ??= "dpkg-deb"
+
DPKG_ARCH ?= "${@debian_arch_map(d.getVar('TARGET_ARCH'), d.getVar('TUNE_FEATURES'))}"
DPKG_ARCH[vardepvalue] = "${DPKG_ARCH}"
@@ -269,7 +271,8 @@ def deb_write_pkg(pkg, d):
conffiles.close()
os.chdir(basedir)
- subprocess.check_output("PATH=\"%s\" dpkg-deb -b %s %s" % (localdata.getVar("PATH"), root, pkgoutdir),
+ subprocess.check_output("PATH=\"%s\" %s -b %s %s" % (localdata.getVar("PATH"), localdata.getVar("DPKG_BUILDCMD"),
+ root, pkgoutdir),
stderr=subprocess.STDOUT,
shell=True)
diff --git a/external/poky/meta/classes/package_ipk.bbclass b/external/poky/meta/classes/package_ipk.bbclass
index 5eb910ca..c008559e 100644
--- a/external/poky/meta/classes/package_ipk.bbclass
+++ b/external/poky/meta/classes/package_ipk.bbclass
@@ -8,13 +8,13 @@ IPKGCONF_SDK = "${WORKDIR}/opkg-sdk.conf"
PKGWRITEDIRIPK = "${WORKDIR}/deploy-ipks"
# Program to be used to build opkg packages
-OPKGBUILDCMD ??= "opkg-build -Z xz"
+OPKGBUILDCMD ??= 'opkg-build -Z xz -a "${XZ_DEFAULTS}"'
OPKG_ARGS += "--force_postinstall --prefer-arch-to-version"
OPKG_ARGS += "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS") == "1"]}"
OPKG_ARGS += "${@['', '--add-exclude ' + ' --add-exclude '.join((d.getVar('PACKAGE_EXCLUDE') or "").split())][(d.getVar("PACKAGE_EXCLUDE") or "").strip() != ""]}"
-OPKGLIBDIR = "${localstatedir}/lib"
+OPKGLIBDIR ??= "${localstatedir}/lib"
python do_package_ipk () {
workdir = d.getVar('WORKDIR')
@@ -45,6 +45,7 @@ def ipk_write_pkg(pkg, d):
import subprocess
import textwrap
import collections
+ import glob
def cleanupcontrol(root):
for p in ['CONTROL', 'DEBIAN']:
@@ -101,8 +102,7 @@ def ipk_write_pkg(pkg, d):
bb.utils.mkdirhier(pkgoutdir)
os.chdir(root)
cleanupcontrol(root)
- from glob import glob
- g = glob('*')
+ g = glob.glob('*')
if not g and localdata.getVar('ALLOW_EMPTY', False) != "1":
bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV'), localdata.getVar('PKGR')))
return
@@ -154,7 +154,6 @@ def ipk_write_pkg(pkg, d):
ctrlfile.write('%s\n' % textwrap.fill(description, width=74, initial_indent=' ', subsequent_indent=' '))
else:
ctrlfile.write(c % tuple(pullData(fs, localdata)))
- # more fields
custom_fields_chunk = get_package_additional_metadata("ipk", localdata)
if custom_fields_chunk is not None:
@@ -238,6 +237,10 @@ def ipk_write_pkg(pkg, d):
cleanupcontrol(root)
bb.utils.unlockfile(lf)
+# Have to list any variables referenced as X_<pkg> that aren't in pkgdata here
+IPKEXTRAVARS = "PRIORITY MAINTAINER PACKAGE_ARCH HOMEPAGE"
+ipk_write_pkg[vardeps] += "${@gen_packagevar(d, 'IPKEXTRAVARS')}"
+
# Otherwise allarch packages may change depending on override configuration
ipk_write_pkg[vardepsexclude] = "OVERRIDES"
diff --git a/external/poky/meta/classes/package_pkgdata.bbclass b/external/poky/meta/classes/package_pkgdata.bbclass
new file mode 100644
index 00000000..18b7ed62
--- /dev/null
+++ b/external/poky/meta/classes/package_pkgdata.bbclass
@@ -0,0 +1,167 @@
+WORKDIR_PKGDATA = "${WORKDIR}/pkgdata-sysroot"
+
+def package_populate_pkgdata_dir(pkgdatadir, d):
+ import glob
+
+ postinsts = []
+ seendirs = set()
+ stagingdir = d.getVar("PKGDATA_DIR")
+ pkgarchs = ['${MACHINE_ARCH}']
+ pkgarchs = pkgarchs + list(reversed(d.getVar("PACKAGE_EXTRA_ARCHS").split()))
+ pkgarchs.append('allarch')
+
+ bb.utils.mkdirhier(pkgdatadir)
+ for pkgarch in pkgarchs:
+ for manifest in glob.glob(d.expand("${SSTATE_MANIFESTS}/manifest-%s-*.packagedata" % pkgarch)):
+ with open(manifest, "r") as f:
+ for l in f:
+ l = l.strip()
+ dest = l.replace(stagingdir, "")
+ if l.endswith("/"):
+ staging_copydir(l, pkgdatadir, dest, seendirs)
+ continue
+ try:
+ staging_copyfile(l, pkgdatadir, dest, postinsts, seendirs)
+ except FileExistsError:
+ continue
+
+python package_prepare_pkgdata() {
+ import copy
+ import glob
+
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ mytaskname = d.getVar("BB_RUNTASK")
+ if mytaskname.endswith("_setscene"):
+ mytaskname = mytaskname.replace("_setscene", "")
+ workdir = d.getVar("WORKDIR")
+ pn = d.getVar("PN")
+ stagingdir = d.getVar("PKGDATA_DIR")
+ pkgdatadir = d.getVar("WORKDIR_PKGDATA")
+
+ # Detect bitbake -b usage
+ nodeps = d.getVar("BB_LIMITEDDEPS") or False
+ if nodeps:
+ staging_package_populate_pkgdata_dir(pkgdatadir, d)
+ return
+
+ start = None
+ configuredeps = []
+ for dep in taskdepdata:
+ data = taskdepdata[dep]
+ if data[1] == mytaskname and data[0] == pn:
+ start = dep
+ break
+ if start is None:
+ bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
+
+ # We need to figure out which sysroot files we need to expose to this task.
+ # This needs to match what would get restored from sstate, which is controlled
+ # ultimately by calls from bitbake to setscene_depvalid().
+ # That function expects a setscene dependency tree. We build a dependency tree
+ # condensed to inter-sstate task dependencies, similar to that used by setscene
+ # tasks. We can then call into setscene_depvalid() and decide
+ # which dependencies we can "see" and should expose in the recipe specific sysroot.
+ setscenedeps = copy.deepcopy(taskdepdata)
+
+ start = set([start])
+
+ sstatetasks = d.getVar("SSTATETASKS").split()
+ # Add recipe specific tasks referenced by setscene_depvalid()
+ sstatetasks.append("do_stash_locale")
+
+ # If start is an sstate task (like do_package) we need to add in its direct dependencies
+ # else the code below won't recurse into them.
+ for dep in set(start):
+ for dep2 in setscenedeps[dep][3]:
+ start.add(dep2)
+ start.remove(dep)
+
+ # Create collapsed do_populate_sysroot -> do_populate_sysroot tree
+ for dep in taskdepdata:
+ data = setscenedeps[dep]
+ if data[1] not in sstatetasks:
+ for dep2 in setscenedeps:
+ data2 = setscenedeps[dep2]
+ if dep in data2[3]:
+ data2[3].update(setscenedeps[dep][3])
+ data2[3].remove(dep)
+ if dep in start:
+ start.update(setscenedeps[dep][3])
+ start.remove(dep)
+ del setscenedeps[dep]
+
+ # Remove circular references
+ for dep in setscenedeps:
+ if dep in setscenedeps[dep][3]:
+ setscenedeps[dep][3].remove(dep)
+
+ # Direct dependencies should be present and can be depended upon
+ for dep in set(start):
+ if setscenedeps[dep][1] == "do_packagedata":
+ if dep not in configuredeps:
+ configuredeps.append(dep)
+
+ msgbuf = []
+ # Call into setscene_depvalid for each sub-dependency and only copy sysroot files
+ # for ones that would be restored from sstate.
+ done = list(start)
+ next = list(start)
+ while next:
+ new = []
+ for dep in next:
+ data = setscenedeps[dep]
+ for datadep in data[3]:
+ if datadep in done:
+ continue
+ taskdeps = {}
+ taskdeps[dep] = setscenedeps[dep][:2]
+ taskdeps[datadep] = setscenedeps[datadep][:2]
+ retval = setscene_depvalid(datadep, taskdeps, [], d, msgbuf)
+ done.append(datadep)
+ new.append(datadep)
+ if retval:
+ msgbuf.append("Skipping setscene dependency %s" % datadep)
+ continue
+ if datadep not in configuredeps and setscenedeps[datadep][1] == "do_packagedata":
+ configuredeps.append(datadep)
+ msgbuf.append("Adding dependency on %s" % setscenedeps[datadep][0])
+ else:
+ msgbuf.append("Following dependency on %s" % setscenedeps[datadep][0])
+ next = new
+
+ # This logging is too verbose for day to day use sadly
+ #bb.debug(2, "\n".join(msgbuf))
+
+ seendirs = set()
+ postinsts = []
+ multilibs = {}
+ manifests = {}
+
+ msg_adding = []
+
+ for dep in configuredeps:
+ c = setscenedeps[dep][0]
+ msg_adding.append(c)
+
+ manifest, d2 = oe.sstatesig.find_sstate_manifest(c, setscenedeps[dep][2], "packagedata", d, multilibs)
+ destsysroot = pkgdatadir
+
+ if manifest:
+ targetdir = destsysroot
+ with open(manifest, "r") as f:
+ manifests[dep] = manifest
+ for l in f:
+ l = l.strip()
+ dest = targetdir + l.replace(stagingdir, "")
+ if l.endswith("/"):
+ staging_copydir(l, targetdir, dest, seendirs)
+ continue
+ staging_copyfile(l, targetdir, dest, postinsts, seendirs)
+
+ bb.note("Installed into pkgdata-sysroot: %s" % str(msg_adding))
+
+}
+package_prepare_pkgdata[cleandirs] = "${WORKDIR_PKGDATA}"
+package_prepare_pkgdata[vardepsexclude] += "MACHINE_ARCH PACKAGE_EXTRA_ARCHS SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA"
+
+
diff --git a/external/poky/meta/classes/package_rpm.bbclass b/external/poky/meta/classes/package_rpm.bbclass
index 21ada348..9145717f 100644
--- a/external/poky/meta/classes/package_rpm.bbclass
+++ b/external/poky/meta/classes/package_rpm.bbclass
@@ -36,7 +36,7 @@ def write_rpm_perfiledata(srcname, d):
pkgd = d.getVar('PKGD')
def dump_filerdeps(varname, outfile, d):
- outfile.write("#!/usr/bin/env python\n\n")
+ outfile.write("#!/usr/bin/env python3\n\n")
outfile.write("# Dependency table\n")
outfile.write('deps = {\n')
for pkg in packages.split():
@@ -113,6 +113,10 @@ python write_specfile () {
source_list = os.listdir(ar_outdir)
source_number = 0
for source in source_list:
+ # do_deploy_archives may have already run (from sstate) meaning a .src.rpm may already
+ # exist in ARCHIVER_OUTDIR so skip if present.
+ if source.endswith(".src.rpm"):
+ continue
# The rpmbuild doesn't need the root permission, but it needs
# to know the file's user and group name, the only user and
# group in fakeroot is "root" when working in fakeroot.
@@ -405,7 +409,6 @@ python write_specfile () {
if not file_list and localdata.getVar('ALLOW_EMPTY', False) != "1":
bb.note("Not creating empty RPM package for %s" % splitname)
else:
- bb.note("Creating RPM package for %s" % splitname)
spec_files_top.append('%files')
if extra_pkgdata:
package_rpm_extra_pkgdata(splitname, spec_files_top, localdata)
@@ -414,7 +417,7 @@ python write_specfile () {
bb.note("Creating RPM package for %s" % splitname)
spec_files_top.extend(file_list)
else:
- bb.note("Creating EMPTY RPM Package for %s" % splitname)
+ bb.note("Creating empty RPM package for %s" % splitname)
spec_files_top.append('')
continue
@@ -506,7 +509,7 @@ python write_specfile () {
bb.note("Creating RPM package for %s" % splitname)
spec_files_bottom.extend(file_list)
else:
- bb.note("Creating EMPTY RPM Package for %s" % splitname)
+ bb.note("Creating empty RPM package for %s" % splitname)
spec_files_bottom.append('')
del localdata
@@ -690,7 +693,7 @@ python do_package_rpm () {
cmd = cmd + " --define '_tmppath " + workdir + "'"
if d.getVarFlag('ARCHIVER_MODE', 'srpm') == '1' and bb.data.inherits_class('archiver', d):
cmd = cmd + " --define '_sourcedir " + d.getVar('ARCHIVER_OUTDIR') + "'"
- cmdsrpm = cmd + " --define '_srcrpmdir " + d.getVar('ARCHIVER_OUTDIR') + "'"
+ cmdsrpm = cmd + " --define '_srcrpmdir " + d.getVar('ARCHIVER_RPMOUTDIR') + "'"
cmdsrpm = cmdsrpm + " -bs " + outspecfile
# Build the .src.rpm
d.setVar('SBUILDSPEC', cmdsrpm + "\n")
diff --git a/external/poky/meta/classes/packagegroup.bbclass b/external/poky/meta/classes/packagegroup.bbclass
index d540d421..1541c8fb 100644
--- a/external/poky/meta/classes/packagegroup.bbclass
+++ b/external/poky/meta/classes/packagegroup.bbclass
@@ -8,7 +8,7 @@ PACKAGES = "${PN}"
# By default, packagegroup packages do not depend on a certain architecture.
# Only if dependencies are modified by MACHINE_FEATURES, packages
-# need to be set to MACHINE_ARCH after inheriting packagegroup.bbclass
+# need to be set to MACHINE_ARCH before inheriting packagegroup.bbclass
PACKAGE_ARCH ?= "all"
# Fully expanded - so it applies the overrides as well
@@ -48,6 +48,8 @@ deltask do_compile
deltask do_install
deltask do_populate_sysroot
+INHIBIT_DEFAULT_DEPS = "1"
+
python () {
if bb.data.inherits_class('nativesdk', d):
return
diff --git a/external/poky/meta/classes/patch.bbclass b/external/poky/meta/classes/patch.bbclass
index cd241f1c..25ec089a 100644
--- a/external/poky/meta/classes/patch.bbclass
+++ b/external/poky/meta/classes/patch.bbclass
@@ -5,6 +5,13 @@ QUILTRCFILE ?= "${STAGING_ETCDIR_NATIVE}/quiltrc"
PATCHDEPENDENCY = "${PATCHTOOL}-native:do_populate_sysroot"
+# There is a bug in patch 2.7.3 and earlier where index lines
+# in patches can change file modes when they shouldn't:
+# http://git.savannah.gnu.org/cgit/patch.git/patch/?id=82b800c9552a088a241457948219d25ce0a407a4
+# This leaks into debug sources in particular. Add the dependency
+# to target recipes to avoid this problem until we can rely on 2.7.4 or later.
+PATCHDEPENDENCY_append_class-target = " patch-replacement-native:do_populate_sysroot"
+
PATCH_GIT_USER_NAME ?= "OpenEmbedded"
PATCH_GIT_USER_EMAIL ?= "oe.patch@oe"
diff --git a/external/poky/meta/classes/perl-version.bbclass b/external/poky/meta/classes/perl-version.bbclass
index bafd9651..84b67b81 100644
--- a/external/poky/meta/classes/perl-version.bbclass
+++ b/external/poky/meta/classes/perl-version.bbclass
@@ -1,4 +1,4 @@
-PERL_OWN_DIR = "${@["", "/perl-native"][(bb.data.inherits_class('native', d))]}"
+PERL_OWN_DIR = ""
# Determine the staged version of perl from the perl configuration file
# Assign vardepvalue, because otherwise signature is changed before and after
@@ -6,7 +6,7 @@ PERL_OWN_DIR = "${@["", "/perl-native"][(bb.data.inherits_class('native', d))]}"
get_perl_version[vardepvalue] = "${PERL_OWN_DIR}"
def get_perl_version(d):
import re
- cfg = d.expand('${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/config.sh')
+ cfg = d.expand('${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/config.sh')
try:
f = open(cfg, 'r')
except IOError:
@@ -22,3 +22,45 @@ def get_perl_version(d):
PERLVERSION := "${@get_perl_version(d)}"
PERLVERSION[vardepvalue] = ""
+
+
+# Determine the staged arch of perl from the perl configuration file
+# Assign vardepvalue, because otherwise signature is changed before and after
+# perl is built (from None to real version in config.sh).
+def get_perl_arch(d):
+ import re
+ cfg = d.expand('${STAGING_LIBDIR}${PERL_OWN_DIR}/perl5/config.sh')
+ try:
+ f = open(cfg, 'r')
+ except IOError:
+ return None
+ l = f.readlines();
+ f.close();
+ r = re.compile("^archname='([^']*)'")
+ for s in l:
+ m = r.match(s)
+ if m:
+ return m.group(1)
+ return None
+
+PERLARCH := "${@get_perl_arch(d)}"
+PERLARCH[vardepvalue] = ""
+
+# Determine the staged arch of perl-native from the perl configuration file
+# Assign vardepvalue, because otherwise signature is changed before and after
+# perl is built (from None to real version in config.sh).
+def get_perl_hostarch(d):
+ import re
+ cfg = d.expand('${STAGING_LIBDIR_NATIVE}/perl5/config.sh')
+ try:
+ f = open(cfg, 'r')
+ except IOError:
+ return None
+ l = f.readlines();
+ f.close();
+ r = re.compile("^archname='([^']*)'")
+ for s in l:
+ m = r.match(s)
+ if m:
+ return m.group(1)
+ return None
diff --git a/external/poky/meta/classes/pixbufcache.bbclass b/external/poky/meta/classes/pixbufcache.bbclass
index 3378ff2c..b07f51ed 100644
--- a/external/poky/meta/classes/pixbufcache.bbclass
+++ b/external/poky/meta/classes/pixbufcache.bbclass
@@ -3,7 +3,7 @@
# packages.
#
-DEPENDS += "qemu-native"
+DEPENDS_append_class-target = " qemu-native"
inherit qemu
PIXBUF_PACKAGES ??= "${PN}"
@@ -54,7 +54,6 @@ GDK_PIXBUF_FATAL_LOADER=1 ${STAGING_LIBDIR_NATIVE}/gdk-pixbuf-2.0/gdk-pixbuf-que
DEPENDS_append_class-native = " gdk-pixbuf-native"
SYSROOT_PREPROCESS_FUNCS_append_class-native = " pixbufcache_sstate_postinst"
-# See base.bbclass for the other half of this
pixbufcache_sstate_postinst() {
mkdir -p ${SYSROOT_DESTDIR}${bindir}
dest=${SYSROOT_DESTDIR}${bindir}/postinst-${PN}
diff --git a/external/poky/meta/classes/populate_sdk_base.bbclass b/external/poky/meta/classes/populate_sdk_base.bbclass
index 677ba3cf..3e5b1359 100644
--- a/external/poky/meta/classes/populate_sdk_base.bbclass
+++ b/external/poky/meta/classes/populate_sdk_base.bbclass
@@ -6,7 +6,9 @@ COMPLEMENTARY_GLOB[dev-pkgs] = '*-dev'
COMPLEMENTARY_GLOB[staticdev-pkgs] = '*-staticdev'
COMPLEMENTARY_GLOB[doc-pkgs] = '*-doc'
COMPLEMENTARY_GLOB[dbg-pkgs] = '*-dbg'
+COMPLEMENTARY_GLOB[src-pkgs] = '*-src'
COMPLEMENTARY_GLOB[ptest-pkgs] = '*-ptest'
+COMPLEMENTARY_GLOB[bash-completion-pkgs] = '*-bash-completion'
def complementary_globs(featurevar, d):
all_globs = d.getVarFlags('COMPLEMENTARY_GLOB')
@@ -17,8 +19,9 @@ def complementary_globs(featurevar, d):
globs.append(glob)
return ' '.join(globs)
-SDKIMAGE_FEATURES ??= "dev-pkgs dbg-pkgs ${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'doc-pkgs', '', d)}"
+SDKIMAGE_FEATURES ??= "dev-pkgs dbg-pkgs src-pkgs ${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'doc-pkgs', '', d)}"
SDKIMAGE_INSTALL_COMPLEMENTARY = '${@complementary_globs("SDKIMAGE_FEATURES", d)}'
+SDKIMAGE_INSTALL_COMPLEMENTARY[vardeps] += "SDKIMAGE_FEATURES"
PACKAGE_ARCHS_append_task-populate-sdk = " sdk-provides-dummy-target"
SDK_PACKAGE_ARCHS += "sdk-provides-dummy-${SDKPKGSUFFIX}"
@@ -44,10 +47,27 @@ TOOLCHAIN_TARGET_TASK ?= "${@multilib_pkg_extend(d, 'packagegroup-core-standalon
TOOLCHAIN_TARGET_TASK_ATTEMPTONLY ?= ""
TOOLCHAIN_OUTPUTNAME ?= "${SDK_NAME}-toolchain-${SDK_VERSION}"
+# Default archived SDK's suffix
+SDK_ARCHIVE_TYPE ?= "tar.xz"
+SDK_XZ_COMPRESSION_LEVEL ?= "-9"
+SDK_XZ_OPTIONS ?= "${XZ_DEFAULTS} ${SDK_XZ_COMPRESSION_LEVEL}"
+
+# To support different sdk type according to SDK_ARCHIVE_TYPE, now support zip and tar.xz
+python () {
+ if d.getVar('SDK_ARCHIVE_TYPE') == 'zip':
+ d.setVar('SDK_ARCHIVE_DEPENDS', 'zip-native')
+ # SDK_ARCHIVE_CMD used to generate archived sdk ${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} from input dir ${SDK_OUTPUT}/${SDKPATH} to output dir ${SDKDEPLOYDIR}
+ # recommand to cd into input dir first to avoid archive with buildpath
+ d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; zip -r -y ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} .')
+ else:
+ d.setVar('SDK_ARCHIVE_DEPENDS', 'xz-native')
+ d.setVar('SDK_ARCHIVE_CMD', 'cd ${SDK_OUTPUT}/${SDKPATH}; tar ${SDKTAROPTS} -cf - . | xz ${SDK_XZ_OPTIONS} > ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE}')
+}
+
SDK_RDEPENDS = "${TOOLCHAIN_TARGET_TASK} ${TOOLCHAIN_HOST_TASK}"
-SDK_DEPENDS = "virtual/fakeroot-native xz-native cross-localedef-native nativesdk-qemuwrapper-cross ${@' '.join(["%s-qemuwrapper-cross" % m for m in d.getVar("MULTILIB_VARIANTS").split()])} qemuwrapper-cross"
+SDK_DEPENDS = "virtual/fakeroot-native ${SDK_ARCHIVE_DEPENDS} cross-localedef-native nativesdk-qemuwrapper-cross ${@' '.join(["%s-qemuwrapper-cross" % m for m in d.getVar("MULTILIB_VARIANTS").split()])} qemuwrapper-cross"
PATH_prepend = "${STAGING_DIR_HOST}${SDKPATHNATIVE}${bindir}/crossscripts:${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:"
-SDK_DEPENDS_append_libc-glibc = " nativesdk-glibc-locale"
+SDK_DEPENDS += "nativesdk-glibc-locale"
# We want the MULTIARCH_TARGET_SYS to point to the TUNE_PKGARCH, not PACKAGE_ARCH as it
# could be set to the MACHINE_ARCH
@@ -103,7 +123,7 @@ POPULATE_SDK_POST_TARGET_COMMAND_append = " write_sdk_test_data ; "
POPULATE_SDK_POST_TARGET_COMMAND_append_task-populate-sdk = " write_target_sdk_manifest ; "
POPULATE_SDK_POST_HOST_COMMAND_append_task-populate-sdk = " write_host_sdk_manifest; "
SDK_PACKAGING_COMMAND = "${@'${SDK_PACKAGING_FUNC};' if '${SDK_PACKAGING_FUNC}' else ''}"
-SDK_POSTPROCESS_COMMAND = " create_sdk_files; check_sdk_sysroots; tar_sdk; ${SDK_PACKAGING_COMMAND} "
+SDK_POSTPROCESS_COMMAND = " create_sdk_files; check_sdk_sysroots; archive_sdk; ${SDK_PACKAGING_COMMAND} "
def populate_sdk_common(d):
from oe.sdk import populate_sdk
@@ -221,11 +241,10 @@ python check_sdk_sysroots() {
SDKTAROPTS = "--owner=root --group=root"
-fakeroot tar_sdk() {
+fakeroot archive_sdk() {
# Package it up
mkdir -p ${SDKDEPLOYDIR}
- cd ${SDK_OUTPUT}/${SDKPATH}
- tar ${SDKTAROPTS} -cf - . | xz -T 0 > ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.tar.xz
+ ${SDK_ARCHIVE_CMD}
}
TOOLCHAIN_SHAR_EXT_TMPL ?= "${COREBASE}/meta/files/toolchain-shar-extract.sh"
@@ -257,21 +276,22 @@ EOF
-e 's#@SDKEXTPATH@#${SDKEXTPATH}#g' \
-e 's#@OLDEST_KERNEL@#${SDK_OLDEST_KERNEL}#g' \
-e 's#@REAL_MULTIMACH_TARGET_SYS@#${REAL_MULTIMACH_TARGET_SYS}#g' \
- -e 's#@SDK_TITLE@#${@d.getVar("SDK_TITLE").replace('&', '\&')}#g' \
+ -e 's#@SDK_TITLE@#${@d.getVar("SDK_TITLE").replace('&', '\\&')}#g' \
-e 's#@SDK_VERSION@#${SDK_VERSION}#g' \
-e '/@SDK_PRE_INSTALL_COMMAND@/d' \
-e '/@SDK_POST_INSTALL_COMMAND@/d' \
-e 's#@SDK_GCC_VER@#${@oe.utils.host_gcc_version(d, taskcontextonly=True)}#g' \
+ -e 's#@SDK_ARCHIVE_TYPE@#${SDK_ARCHIVE_TYPE}#g' \
${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
# add execution permission
chmod +x ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
# append the SDK tarball
- cat ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.tar.xz >> ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
+ cat ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE} >> ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
# delete the old tarball, we don't need it anymore
- rm ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.tar.xz
+ rm ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.${SDK_ARCHIVE_TYPE}
}
populate_sdk_log_check() {
diff --git a/external/poky/meta/classes/populate_sdk_ext.bbclass b/external/poky/meta/classes/populate_sdk_ext.bbclass
index 40b0375e..fd0da16e 100644
--- a/external/poky/meta/classes/populate_sdk_ext.bbclass
+++ b/external/poky/meta/classes/populate_sdk_ext.bbclass
@@ -20,6 +20,8 @@ SDK_EXT_task-populate-sdk-ext = "-ext"
SDK_EXT_TYPE ?= "full"
SDK_INCLUDE_PKGDATA ?= "0"
SDK_INCLUDE_TOOLCHAIN ?= "${@'1' if d.getVar('SDK_EXT_TYPE') == 'full' else '0'}"
+SDK_INCLUDE_NATIVESDK ?= "0"
+SDK_INCLUDE_BUILDTOOLS ?= '1'
SDK_RECRDEP_TASKS ?= ""
@@ -93,6 +95,7 @@ python write_target_sdk_ext_manifest () {
real_target_multimach = d.getVar('REAL_MULTIMACH_TARGET_SYS')
pkgs = {}
+ os.makedirs(os.path.dirname(d.getVar('SDK_EXT_TARGET_MANIFEST')), exist_ok=True)
with open(d.getVar('SDK_EXT_TARGET_MANIFEST'), 'w') as f:
for fn in extra_info['filesizes']:
info = fn.split(':')
@@ -121,7 +124,7 @@ SDK_TITLE_task-populate-sdk-ext = "${@d.getVar('DISTRO_NAME') or d.getVar('DISTR
def clean_esdk_builddir(d, sdkbasepath):
"""Clean up traces of the fake build for create_filtered_tasklist()"""
import shutil
- cleanpaths = 'cache conf/sanity_info tmp'.split()
+ cleanpaths = ['cache', 'tmp']
for pth in cleanpaths:
fullpth = os.path.join(sdkbasepath, pth)
if os.path.isdir(fullpth):
@@ -176,7 +179,9 @@ def create_filtered_tasklist(d, sdkbasepath, tasklistfile, conf_initpath):
# will effectively do
clean_esdk_builddir(d, sdkbasepath)
finally:
- os.replace(sdkbasepath + '/conf/local.conf.bak', sdkbasepath + '/conf/local.conf')
+ localconf = sdkbasepath + '/conf/local.conf'
+ if os.path.exists(localconf + '.bak'):
+ os.replace(localconf + '.bak', localconf)
python copy_buildsystem () {
import re
@@ -378,9 +383,18 @@ python copy_buildsystem () {
f.write('require conf/locked-sigs.inc\n')
f.write('require conf/unlocked-sigs.inc\n')
- # Write a templateconf.cfg
- with open(baseoutpath + '/conf/templateconf.cfg', 'w') as f:
- f.write('meta/conf\n')
+ if os.path.exists(builddir + '/cache/bb_unihashes.dat'):
+ bb.parse.siggen.save_unitaskhashes()
+ bb.utils.mkdirhier(os.path.join(baseoutpath, 'cache'))
+ shutil.copyfile(builddir + '/cache/bb_unihashes.dat', baseoutpath + '/cache/bb_unihashes.dat')
+
+ # Use templateconf.cfg file from builddir if exists
+ if os.path.exists(builddir + '/conf/templateconf.cfg'):
+ shutil.copyfile(builddir + '/conf/templateconf.cfg', baseoutpath + '/conf/templateconf.cfg')
+ else:
+ # Write a templateconf.cfg
+ with open(baseoutpath + '/conf/templateconf.cfg', 'w') as f:
+ f.write('meta/conf\n')
# Ensure any variables set from the external environment (by way of
# BB_ENV_EXTRAWHITE) are set in the SDK's configuration
@@ -401,9 +415,27 @@ python copy_buildsystem () {
excluded_targets = get_sdk_install_targets(d, images_only=True)
sigfile = d.getVar('WORKDIR') + '/locked-sigs.inc'
lockedsigs_pruned = baseoutpath + '/conf/locked-sigs.inc'
+ #nativesdk-only sigfile to merge into locked-sigs.inc
+ sdk_include_nativesdk = (d.getVar("SDK_INCLUDE_NATIVESDK") == '1')
+ nativesigfile = d.getVar('WORKDIR') + '/locked-sigs_nativesdk.inc'
+ nativesigfile_pruned = d.getVar('WORKDIR') + '/locked-sigs_nativesdk_pruned.inc'
+
+ if sdk_include_nativesdk:
+ oe.copy_buildsystem.prune_lockedsigs([],
+ excluded_targets.split(),
+ nativesigfile,
+ True,
+ nativesigfile_pruned)
+
+ oe.copy_buildsystem.merge_lockedsigs([],
+ sigfile,
+ nativesigfile_pruned,
+ sigfile)
+
oe.copy_buildsystem.prune_lockedsigs([],
excluded_targets.split(),
sigfile,
+ False,
lockedsigs_pruned)
sstate_out = baseoutpath + '/sstate-cache'
@@ -414,13 +446,18 @@ python copy_buildsystem () {
sdk_include_toolchain = (d.getVar('SDK_INCLUDE_TOOLCHAIN') == '1')
sdk_ext_type = d.getVar('SDK_EXT_TYPE')
- if sdk_ext_type != 'minimal' or sdk_include_toolchain or derivative:
+ if (sdk_ext_type != 'minimal' or sdk_include_toolchain or derivative) and not sdk_include_nativesdk:
# Create the filtered task list used to generate the sstate cache shipped with the SDK
tasklistfn = d.getVar('WORKDIR') + '/tasklist.txt'
create_filtered_tasklist(d, baseoutpath, tasklistfn, conf_initpath)
else:
tasklistfn = None
+ if os.path.exists(builddir + '/cache/bb_unihashes.dat'):
+ bb.parse.siggen.save_unitaskhashes()
+ bb.utils.mkdirhier(os.path.join(baseoutpath, 'cache'))
+ shutil.copyfile(builddir + '/cache/bb_unihashes.dat', baseoutpath + '/cache/bb_unihashes.dat')
+
# Add packagedata if enabled
if d.getVar('SDK_INCLUDE_PKGDATA') == '1':
lockedsigs_base = d.getVar('WORKDIR') + '/locked-sigs-base.inc'
@@ -506,8 +543,12 @@ def get_sdk_required_utilities(buildtools_fn, d):
sanity_required_utilities = (d.getVar('SANITY_REQUIRED_UTILITIES') or '').split()
sanity_required_utilities.append(d.expand('${BUILD_PREFIX}gcc'))
sanity_required_utilities.append(d.expand('${BUILD_PREFIX}g++'))
- buildtools_installer = os.path.join(d.getVar('SDK_DEPLOY'), buildtools_fn)
- filelist, _ = bb.process.run('%s -l' % buildtools_installer)
+ if buildtools_fn:
+ buildtools_installer = os.path.join(d.getVar('SDK_DEPLOY'), buildtools_fn)
+ filelist, _ = bb.process.run('%s -l' % buildtools_installer)
+ else:
+ buildtools_installer = None
+ filelist = ""
localdata = bb.data.createCopy(d)
localdata.setVar('SDKPATH', '.')
sdkpathnative = localdata.getVar('SDKPATHNATIVE')
@@ -550,7 +591,9 @@ install_tools() {
touch ${SDK_OUTPUT}/${SDKPATH}/.devtoolbase
# find latest buildtools-tarball and install it
- install ${SDK_DEPLOY}/${SDK_BUILDTOOLS_INSTALLER} ${SDK_OUTPUT}/${SDKPATH}
+ if [ -n "${SDK_BUILDTOOLS_INSTALLER}" ]; then
+ install ${SDK_DEPLOY}/${SDK_BUILDTOOLS_INSTALLER} ${SDK_OUTPUT}/${SDKPATH}
+ fi
install -m 0644 ${COREBASE}/meta/files/ext-sdk-prepare.py ${SDK_OUTPUT}/${SDKPATH}
}
@@ -574,8 +617,8 @@ sdk_ext_preinst() {
exit 1
fi
# The relocation script used by buildtools installer requires python
- if ! command -v python > /dev/null; then
- echo "ERROR: The installer requires python, please install it first"
+ if ! command -v python3 > /dev/null; then
+ echo "ERROR: The installer requires python3, please install it first"
exit 1
fi
missing_utils=""
@@ -589,11 +632,8 @@ sdk_ext_preinst() {
exit 1
fi
SDK_EXTENSIBLE="1"
- if [ "$publish" = "1" ] ; then
- EXTRA_TAR_OPTIONS="$EXTRA_TAR_OPTIONS --exclude=ext-sdk-prepare.py"
- if [ "${SDK_EXT_TYPE}" = "minimal" ] ; then
- EXTRA_TAR_OPTIONS="$EXTRA_TAR_OPTIONS --exclude=sstate-cache"
- fi
+ if [ "$publish" = "1" ] && [ "${SDK_EXT_TYPE}" = "minimal" ] ; then
+ EXTRA_TAR_OPTIONS="$EXTRA_TAR_OPTIONS --exclude=sstate-cache"
fi
}
SDK_PRE_INSTALL_COMMAND_task-populate-sdk-ext = "${sdk_ext_preinst}"
@@ -603,16 +643,18 @@ sdk_ext_postinst() {
printf "\nExtracting buildtools...\n"
cd $target_sdk_dir
env_setup_script="$target_sdk_dir/environment-setup-${REAL_MULTIMACH_TARGET_SYS}"
- printf "buildtools\ny" | ./${SDK_BUILDTOOLS_INSTALLER} > buildtools.log || { printf 'ERROR: buildtools installation failed:\n' ; cat buildtools.log ; echo "printf 'ERROR: this SDK was not fully installed and needs reinstalling\n'" >> $env_setup_script ; exit 1 ; }
+ if [ -n "${SDK_BUILDTOOLS_INSTALLER}" ]; then
+ printf "buildtools\ny" | ./${SDK_BUILDTOOLS_INSTALLER} > buildtools.log || { printf 'ERROR: buildtools installation failed:\n' ; cat buildtools.log ; echo "printf 'ERROR: this SDK was not fully installed and needs reinstalling\n'" >> $env_setup_script ; exit 1 ; }
- # Delete the buildtools tar file since it won't be used again
- rm -f ./${SDK_BUILDTOOLS_INSTALLER}
- # We don't need the log either since it succeeded
- rm -f buildtools.log
+ # Delete the buildtools tar file since it won't be used again
+ rm -f ./${SDK_BUILDTOOLS_INSTALLER}
+ # We don't need the log either since it succeeded
+ rm -f buildtools.log
- # Make sure when the user sets up the environment, they also get
- # the buildtools-tarball tools in their path.
- echo ". $target_sdk_dir/buildtools/environment-setup*" >> $env_setup_script
+ # Make sure when the user sets up the environment, they also get
+ # the buildtools-tarball tools in their path.
+ echo ". $target_sdk_dir/buildtools/environment-setup*" >> $env_setup_script
+ fi
# Allow bitbake environment setup to be ran as part of this sdk.
echo "export OE_SKIP_SDK_CHECK=1" >> $env_setup_script
@@ -628,13 +670,15 @@ sdk_ext_postinst() {
# Warn if trying to use external bitbake and the ext SDK together
echo "(which bitbake > /dev/null 2>&1 && echo 'WARNING: attempting to use the extensible SDK in an environment set up to run bitbake - this may lead to unexpected results. Please source this script in a new shell session instead.') || true" >> $env_setup_script
- if [ "$prepare_buildsystem" != "no" ]; then
+ if [ "$prepare_buildsystem" != "no" -a -n "${SDK_BUILDTOOLS_INSTALLER}" ]; then
printf "Preparing build system...\n"
# dash which is /bin/sh on Ubuntu will not preserve the
# current working directory when first ran, nor will it set $1 when
# sourcing a script. That is why this has to look so ugly.
LOGFILE="$target_sdk_dir/preparing_build_system.log"
- sh -c ". buildtools/environment-setup* > $LOGFILE && cd $target_sdk_dir/`dirname ${oe_init_build_env_path}` && set $target_sdk_dir && . $target_sdk_dir/${oe_init_build_env_path} $target_sdk_dir >> $LOGFILE && python $target_sdk_dir/ext-sdk-prepare.py $LOGFILE '${SDK_INSTALL_TARGETS}'" || { echo "printf 'ERROR: this SDK was not fully installed and needs reinstalling\n'" >> $env_setup_script ; exit 1 ; }
+ sh -c ". buildtools/environment-setup* > $LOGFILE && cd $target_sdk_dir/`dirname ${oe_init_build_env_path}` && set $target_sdk_dir && . $target_sdk_dir/${oe_init_build_env_path} $target_sdk_dir >> $LOGFILE && python3 $target_sdk_dir/ext-sdk-prepare.py $LOGFILE '${SDK_INSTALL_TARGETS}'" || { echo "printf 'ERROR: this SDK was not fully installed and needs reinstalling\n'" >> $env_setup_script ; exit 1 ; }
+ fi
+ if [ -e $target_sdk_dir/ext-sdk-prepare.py ]; then
rm $target_sdk_dir/ext-sdk-prepare.py
fi
echo done
@@ -652,15 +696,25 @@ fakeroot python do_populate_sdk_ext() {
bb.fatal('The extensible SDK can currently only be built for the same architecture as the machine being built on - SDK_ARCH is set to %s (likely via setting SDKMACHINE) which is different from the architecture of the build machine (%s). Unable to continue.' % (d.getVar('SDK_ARCH'), d.getVar('BUILD_ARCH')))
d.setVar('SDK_INSTALL_TARGETS', get_sdk_install_targets(d))
- buildtools_fn = get_current_buildtools(d)
+ if d.getVar('SDK_INCLUDE_BUILDTOOLS') == '1':
+ buildtools_fn = get_current_buildtools(d)
+ else:
+ buildtools_fn = None
d.setVar('SDK_REQUIRED_UTILITIES', get_sdk_required_utilities(buildtools_fn, d))
d.setVar('SDK_BUILDTOOLS_INSTALLER', buildtools_fn)
d.setVar('SDKDEPLOYDIR', '${SDKEXTDEPLOYDIR}')
# ESDKs have a libc from the buildtools so ensure we don't ship linguas twice
d.delVar('SDKIMAGE_LINGUAS')
+ if d.getVar("SDK_INCLUDE_NATIVESDK") == '1':
+ generate_nativesdk_lockedsigs(d)
populate_sdk_common(d)
}
+def generate_nativesdk_lockedsigs(d):
+ import oe.copy_buildsystem
+ sigfile = d.getVar('WORKDIR') + '/locked-sigs_nativesdk.inc'
+ oe.copy_buildsystem.generate_locked_sigs(sigfile, d)
+
def get_ext_sdk_depends(d):
# Note: the deps varflag is a list not a string, so we need to specify expand=False
deps = d.getVarFlag('do_image_complete', 'deps', False)
@@ -696,7 +750,7 @@ def get_sdk_ext_rdepends(d):
do_populate_sdk_ext[dirs] = "${@d.getVarFlag('do_populate_sdk', 'dirs', False)}"
do_populate_sdk_ext[depends] = "${@d.getVarFlag('do_populate_sdk', 'depends', False)} \
- buildtools-tarball:do_populate_sdk \
+ ${@'buildtools-tarball:do_populate_sdk' if d.getVar('SDK_INCLUDE_BUILDTOOLS') == '1' else ''} \
${@'meta-world-pkgdata:do_collect_packagedata' if d.getVar('SDK_INCLUDE_PKGDATA') == '1' else ''} \
${@'meta-extsdk-toolchain:do_locked_sigs' if d.getVar('SDK_INCLUDE_TOOLCHAIN') == '1' else ''}"
diff --git a/external/poky/meta/classes/ptest.bbclass b/external/poky/meta/classes/ptest.bbclass
index 9d8a7985..47611ede 100644
--- a/external/poky/meta/classes/ptest.bbclass
+++ b/external/poky/meta/classes/ptest.bbclass
@@ -3,13 +3,17 @@ DESCRIPTION_${PN}-ptest ?= "${DESCRIPTION} \
This package contains a test directory ${PTEST_PATH} for package test purposes."
PTEST_PATH ?= "${libdir}/${BPN}/ptest"
-FILES_${PN}-ptest = "${PTEST_PATH}"
+PTEST_BUILD_HOST_FILES ?= "Makefile"
+PTEST_BUILD_HOST_PATTERN ?= ""
+
+FILES_${PN}-ptest += "${PTEST_PATH}"
SECTION_${PN}-ptest = "devel"
ALLOW_EMPTY_${PN}-ptest = "1"
PTEST_ENABLED = "${@bb.utils.contains('DISTRO_FEATURES', 'ptest', '1', '0', d)}"
PTEST_ENABLED_class-native = ""
PTEST_ENABLED_class-nativesdk = ""
PTEST_ENABLED_class-cross-canadian = ""
+RDEPENDS_${PN}-ptest += "${PN}"
RDEPENDS_${PN}-ptest_class-native = ""
RDEPENDS_${PN}-ptest_class-nativesdk = ""
RRECOMMENDS_${PN}-ptest += "ptest-runner"
@@ -45,6 +49,53 @@ do_install_ptest_base() {
fi
do_install_ptest
chown -R root:root ${D}${PTEST_PATH}
+
+ # Strip build host paths from any installed Makefile
+ for filename in ${PTEST_BUILD_HOST_FILES}; do
+ for installed_ptest_file in $(find ${D}${PTEST_PATH} -type f -name $filename); do
+ bbnote "Stripping host paths from: $installed_ptest_file"
+ sed -e 's#${HOSTTOOLS_DIR}/*##g' \
+ -e 's#${WORKDIR}/*=#.=#g' \
+ -e 's#${WORKDIR}/*##g' \
+ -i $installed_ptest_file
+ if [ -n "${PTEST_BUILD_HOST_PATTERN}" ]; then
+ sed -E '/${PTEST_BUILD_HOST_PATTERN}/d' \
+ -i $installed_ptest_file
+ fi
+ done
+ done
+}
+
+PTEST_BINDIR_PKGD_PATH = "${PKGD}${PTEST_PATH}/bin"
+
+# This function needs to run after apply_update_alternative_renames because the
+# aforementioned function will update the ALTERNATIVE_LINK_NAME flag. Append is
+# used here to make this function to run as late as possible.
+PACKAGE_PREPROCESS_FUNCS_append = "${@bb.utils.contains('PTEST_BINDIR', '1', \
+ bb.utils.contains('PTEST_ENABLED', '1', ' ptest_update_alternatives', '', d), '', d)}"
+
+python ptest_update_alternatives() {
+ """
+ This function will generate the symlinks in the PTEST_BINDIR_PKGD_PATH
+ to match the renamed binaries by update-alternatives.
+ """
+
+ if not bb.data.inherits_class('update-alternatives', d) \
+ or not update_alternatives_enabled(d):
+ return
+
+ bb.note("Generating symlinks for ptest")
+ bin_paths = { d.getVar("bindir"), d.getVar("base_bindir"),
+ d.getVar("sbindir"), d.getVar("base_sbindir") }
+ ptest_bindir = d.getVar("PTEST_BINDIR_PKGD_PATH")
+ os.mkdir(ptest_bindir)
+ for pkg in (d.getVar('PACKAGES') or "").split():
+ alternatives = update_alternatives_alt_targets(d, pkg)
+ for alt_name, alt_link, alt_target, _ in alternatives:
+ # Some alternatives are for man pages,
+ # check if the alternative is in PATH
+ if os.path.dirname(alt_link) in bin_paths:
+ os.symlink(alt_target, os.path.join(ptest_bindir, alt_name))
}
do_configure_ptest_base[dirs] = "${B}"
diff --git a/external/poky/meta/classes/pypi.bbclass b/external/poky/meta/classes/pypi.bbclass
index e5d7ab3c..87b4c85f 100644
--- a/external/poky/meta/classes/pypi.bbclass
+++ b/external/poky/meta/classes/pypi.bbclass
@@ -22,5 +22,5 @@ SECTION = "devel/python"
SRC_URI += "${PYPI_SRC_URI}"
S = "${WORKDIR}/${PYPI_PACKAGE}-${PV}"
-UPSTREAM_CHECK_URI ?= "https://pypi.python.org/pypi/${PYPI_PACKAGE}/"
-UPSTREAM_CHECK_REGEX ?= "/${PYPI_PACKAGE}/(?P<pver>(\d+[\.\-_]*)+)"
+UPSTREAM_CHECK_URI ?= "https://pypi.org/project/${PYPI_PACKAGE}/"
+UPSTREAM_CHECK_REGEX ?= "/${PYPI_PACKAGE}/(?P<pver>(\d+[\.\-_]*)+)/"
diff --git a/external/poky/meta/classes/python-dir.bbclass b/external/poky/meta/classes/python-dir.bbclass
deleted file mode 100644
index a11dc350..00000000
--- a/external/poky/meta/classes/python-dir.bbclass
+++ /dev/null
@@ -1,5 +0,0 @@
-PYTHON_BASEVERSION = "2.7"
-PYTHON_ABI = ""
-PYTHON_DIR = "python${PYTHON_BASEVERSION}"
-PYTHON_PN = "python"
-PYTHON_SITEPACKAGES_DIR = "${libdir}/${PYTHON_DIR}/site-packages"
diff --git a/external/poky/meta/classes/python3-dir.bbclass b/external/poky/meta/classes/python3-dir.bbclass
index 06bb046d..036d7140 100644
--- a/external/poky/meta/classes/python3-dir.bbclass
+++ b/external/poky/meta/classes/python3-dir.bbclass
@@ -1,5 +1,5 @@
-PYTHON_BASEVERSION = "3.5"
-PYTHON_ABI = "m"
+PYTHON_BASEVERSION = "3.8"
+PYTHON_ABI = ""
PYTHON_DIR = "python${PYTHON_BASEVERSION}"
PYTHON_PN = "python3"
PYTHON_SITEPACKAGES_DIR = "${libdir}/${PYTHON_DIR}/site-packages"
diff --git a/external/poky/meta/classes/python3native.bbclass b/external/poky/meta/classes/python3native.bbclass
index da12a714..d98fb4c7 100644
--- a/external/poky/meta/classes/python3native.bbclass
+++ b/external/poky/meta/classes/python3native.bbclass
@@ -9,6 +9,16 @@ DEPENDS_append = " python3-native "
export STAGING_INCDIR
export STAGING_LIBDIR
+# Packages can use
+# find_package(PythonInterp REQUIRED)
+# find_package(PythonLibs REQUIRED)
+# which ends up using libs/includes from build host
+# Therefore pre-empt that effort
+export PYTHON_LIBRARY="${STAGING_LIBDIR}/lib${PYTHON_DIR}${PYTHON_ABI}.so"
+export PYTHON_INCLUDE_DIR="${STAGING_INCDIR}/${PYTHON_DIR}${PYTHON_ABI}"
+
+export _PYTHON_SYSCONFIGDATA_NAME="_sysconfigdata"
+
# suppress host user's site-packages dirs.
export PYTHONNOUSERSITE = "1"
diff --git a/external/poky/meta/classes/pythonnative.bbclass b/external/poky/meta/classes/pythonnative.bbclass
deleted file mode 100644
index ae6600cd..00000000
--- a/external/poky/meta/classes/pythonnative.bbclass
+++ /dev/null
@@ -1,19 +0,0 @@
-
-inherit python-dir
-
-PYTHON="${STAGING_BINDIR_NATIVE}/python-native/python"
-# PYTHON_EXECUTABLE is used by cmake
-PYTHON_EXECUTABLE="${PYTHON}"
-EXTRANATIVEPATH += "python-native"
-DEPENDS_append = " python-native "
-
-# python-config and other scripts are using distutils modules
-# which we patch to access these variables
-export STAGING_INCDIR
-export STAGING_LIBDIR
-
-# suppress host user's site-packages dirs.
-export PYTHONNOUSERSITE = "1"
-
-# autoconf macros will use their internal default preference otherwise
-export PYTHON
diff --git a/external/poky/meta/classes/qemu.bbclass b/external/poky/meta/classes/qemu.bbclass
index f5c57801..55bdff81 100644
--- a/external/poky/meta/classes/qemu.bbclass
+++ b/external/poky/meta/classes/qemu.bbclass
@@ -16,6 +16,8 @@ def qemu_target_binary(data):
target_arch = "ppc"
elif target_arch == "powerpc64":
target_arch = "ppc64"
+ elif target_arch == "powerpc64le":
+ target_arch = "ppc64le"
return "qemu-" + target_arch
@@ -62,3 +64,4 @@ QEMU_EXTRAOPTIONS_ppc64e5500 = " -cpu e500mc"
QEMU_EXTRAOPTIONS_ppce6500 = " -cpu e500mc"
QEMU_EXTRAOPTIONS_ppc64e6500 = " -cpu e500mc"
QEMU_EXTRAOPTIONS_ppc7400 = " -cpu 7400"
+QEMU_EXTRAOPTIONS_powerpc64le = " -cpu POWER8"
diff --git a/external/poky/meta/classes/qemuboot.bbclass b/external/poky/meta/classes/qemuboot.bbclass
index 15a9e63f..99da543f 100644
--- a/external/poky/meta/classes/qemuboot.bbclass
+++ b/external/poky/meta/classes/qemuboot.bbclass
@@ -36,6 +36,9 @@
# Note, runqemu will replace @MAC@ with a predefined mac, you can set
# a custom one, but that may cause conflicts when multiple qemus are
# running on the same host.
+# Note: If more than one interface of type -device virtio-net-device gets added,
+# QB_NETWORK_DEVICE_prepend might be used, since Qemu enumerates the eth*
+# devices in reverse order to -device arguments.
#
# QB_TAP_OPT: netowrk option for 'tap' mode, e.g.,
# "-netdev tap,id=net0,ifname=@TAP@,script=no,downscript=no"
@@ -43,6 +46,15 @@
#
# QB_SLIRP_OPT: network option for SLIRP mode, e.g., -netdev user,id=net0"
#
+# QB_CMDLINE_IP_SLIRP: If QB_NETWORK_DEVICE adds more than one network interface to qemu, usually the
+# ip= kernel comand line argument needs to be changed accordingly. Details are documented
+# in the kernel docuemntation https://www.kernel.org/doc/Documentation/filesystems/nfs/nfsroot.txt
+# Example to configure only the first interface: "ip=eth0:dhcp"
+# QB_CMDLINE_IP_TAP: This parameter is similar to the QB_CMDLINE_IP_SLIRP parameter. Since the tap interface requires
+# static IP configuration @CLIENT@ and @GATEWAY@ place holders are replaced by the IP and the gateway
+# address of the qemu guest by runqemu.
+# Example: "ip=192.168.7.@CLIENT@::192.168.7.@GATEWAY@:255.255.255.0::eth0"
+#
# QB_ROOTFS_OPT: used as rootfs, e.g.,
# "-drive id=disk0,file=@ROOTFS@,if=none,format=raw -device virtio-blk-device,drive=disk0"
# Note, runqemu will replace "@ROOTFS@" with the one which is used, such as core-image-minimal-qemuarm64.ext4.
@@ -53,6 +65,10 @@
# " -device virtio-serial-device -chardev socket,id=virtcon,port=@PORT@,host=127.0.0.1 -device virtconsole,chardev=virtcon"
# Note, runqemu will replace "@PORT@" with the port number which is used.
#
+# QB_ROOTFS_EXTRA_OPT: extra options to be appended to the rootfs device in case there is none specified by QB_ROOTFS_OPT.
+# Can be used to automatically determine the image from the other variables
+# but define things link 'bootindex' when booting from EFI or 'readonly' when using squashfs
+# without the need to specify a dedicated qemu configuration
# Usage:
# IMAGE_CLASSES += "qemuboot"
# See "runqemu help" for more info
@@ -63,6 +79,9 @@ QB_DEFAULT_KERNEL ?= "${KERNEL_IMAGETYPE}"
QB_DEFAULT_FSTYPE ?= "ext4"
QB_OPT_APPEND ?= "-show-cursor"
QB_NETWORK_DEVICE ?= "-device virtio-net-pci,netdev=net0,mac=@MAC@"
+QB_CMDLINE_IP_SLIRP ?= "ip=dhcp"
+QB_CMDLINE_IP_TAP ?= "ip=192.168.7.@CLIENT@::192.168.7.@GATEWAY@:255.255.255.0"
+QB_ROOTFS_EXTRA_OPT ?= ""
# This should be kept align with ROOT_VM
QB_DRIVE_TYPE ?= "/dev/sd"
diff --git a/external/poky/meta/classes/relocatable.bbclass b/external/poky/meta/classes/relocatable.bbclass
index 582812c1..af04be5c 100644
--- a/external/poky/meta/classes/relocatable.bbclass
+++ b/external/poky/meta/classes/relocatable.bbclass
@@ -6,13 +6,15 @@ python relocatable_binaries_preprocess() {
rpath_replace(d.expand('${SYSROOT_DESTDIR}'), d)
}
-relocatable_native_pcfiles () {
- if [ -d ${SYSROOT_DESTDIR}${libdir}/pkgconfig ]; then
- rel=${@os.path.relpath(d.getVar('base_prefix'), d.getVar('libdir') + "/pkgconfig")}
- sed -i -e "s:${base_prefix}:\${pcfiledir}/$rel:g" ${SYSROOT_DESTDIR}${libdir}/pkgconfig/*.pc
- fi
- if [ -d ${SYSROOT_DESTDIR}${datadir}/pkgconfig ]; then
- rel=${@os.path.relpath(d.getVar('base_prefix'), d.getVar('datadir') + "/pkgconfig")}
- sed -i -e "s:${base_prefix}:\${pcfiledir}/$rel:g" ${SYSROOT_DESTDIR}${datadir}/pkgconfig/*.pc
- fi
+relocatable_native_pcfiles() {
+ for dir in ${libdir}/pkgconfig ${datadir}/pkgconfig; do
+ files_template=${SYSROOT_DESTDIR}$dir/*.pc
+ # Expand to any files matching $files_template
+ files=$(echo $files_template)
+ # $files_template and $files will differ if any files were found
+ if [ "$files_template" != "$files" ]; then
+ rel=$(realpath -m --relative-to=$dir ${base_prefix})
+ sed -i -e "s:${base_prefix}:\${pcfiledir}/$rel:g" $files
+ fi
+ done
}
diff --git a/external/poky/meta/classes/report-error.bbclass b/external/poky/meta/classes/report-error.bbclass
index 1c55abfb..1a12db12 100644
--- a/external/poky/meta/classes/report-error.bbclass
+++ b/external/poky/meta/classes/report-error.bbclass
@@ -25,6 +25,19 @@ def errorreport_savedata(e, newdata, file):
json.dump(newdata, f, indent=4, sort_keys=True)
return datafile
+def get_conf_data(e, filename):
+ builddir = e.data.getVar('TOPDIR')
+ filepath = os.path.join(builddir, "conf", filename)
+ jsonstring = ""
+ if os.path.exists(filepath):
+ with open(filepath, 'r') as f:
+ for line in f.readlines():
+ if line.startswith("#") or len(line.strip()) == 0:
+ continue
+ else:
+ jsonstring=jsonstring + line
+ return jsonstring
+
python errorreport_handler () {
import json
import codecs
@@ -51,6 +64,8 @@ python errorreport_handler () {
data['failures'] = []
data['component'] = " ".join(e.getPkgs())
data['branch_commit'] = str(base_detect_branch(e.data)) + ": " + str(base_detect_revision(e.data))
+ data['local_conf'] = get_conf_data(e, 'local.conf')
+ data['auto_conf'] = get_conf_data(e, 'auto.conf')
lock = bb.utils.lockfile(datafile + '.lock')
errorreport_savedata(e, data, "error-report.txt")
bb.utils.unlockfile(lock)
@@ -63,19 +78,15 @@ python errorreport_handler () {
taskdata['task'] = task
if log:
try:
- logFile = codecs.open(log, 'r', 'utf-8')
- logdata = logFile.read()
-
+ with codecs.open(log, encoding='utf-8') as logFile:
+ logdata = logFile.read()
# Replace host-specific paths so the logs are cleaner
for d in ("TOPDIR", "TMPDIR"):
s = e.data.getVar(d)
if s:
logdata = logdata.replace(s, d)
-
- logFile.close()
except:
logdata = "Unable to read log file"
-
else:
logdata = "No Log"
diff --git a/external/poky/meta/classes/reproducible_build.bbclass b/external/poky/meta/classes/reproducible_build.bbclass
index 8788ad71..8da40f65 100644
--- a/external/poky/meta/classes/reproducible_build.bbclass
+++ b/external/poky/meta/classes/reproducible_build.bbclass
@@ -39,19 +39,34 @@ inherit ${@oe.utils.ifelse(d.getVar('BUILD_REPRODUCIBLE_BINARIES') == '1', 'repr
SDE_DIR ="${WORKDIR}/source-date-epoch"
SDE_FILE = "${SDE_DIR}/__source_date_epoch.txt"
+SDE_DEPLOYDIR = "${WORKDIR}/deploy-source-date-epoch"
SSTATETASKS += "do_deploy_source_date_epoch"
do_deploy_source_date_epoch () {
- echo "Deploying SDE to ${SDE_DIR}."
+ mkdir -p ${SDE_DEPLOYDIR}
+ if [ -e ${SDE_FILE} ]; then
+ echo "Deploying SDE from ${SDE_FILE} -> ${SDE_DEPLOYDIR}."
+ cp -p ${SDE_FILE} ${SDE_DEPLOYDIR}/__source_date_epoch.txt
+ else
+ echo "${SDE_FILE} not found!"
+ fi
}
python do_deploy_source_date_epoch_setscene () {
sstate_setscene(d)
+ bb.utils.mkdirhier(d.getVar('SDE_DIR'))
+ sde_file = os.path.join(d.getVar('SDE_DEPLOYDIR'), '__source_date_epoch.txt')
+ if os.path.exists(sde_file):
+ target = d.getVar('SDE_FILE')
+ bb.debug(1, "Moving setscene SDE file %s -> %s" % (sde_file, target))
+ os.rename(sde_file, target)
+ else:
+ bb.debug(1, "%s not found!" % sde_file)
}
-do_deploy_source_date_epoch[dirs] = "${SDE_DIR}"
-do_deploy_source_date_epoch[sstate-plaindirs] = "${SDE_DIR}"
+do_deploy_source_date_epoch[dirs] = "${SDE_DEPLOYDIR}"
+do_deploy_source_date_epoch[sstate-plaindirs] = "${SDE_DEPLOYDIR}"
addtask do_deploy_source_date_epoch_setscene
addtask do_deploy_source_date_epoch before do_configure after do_patch
@@ -135,11 +150,12 @@ def fixed_source_date_epoch():
bb.debug(1, "No tarball or git repo found to determine SOURCE_DATE_EPOCH")
return 0
-python do_create_source_date_epoch_stamp() {
+python create_source_date_epoch_stamp() {
epochfile = d.getVar('SDE_FILE')
+ # If it exists we need to regenerate as the sources may have changed
if os.path.isfile(epochfile):
- bb.debug(1, "Reusing SOURCE_DATE_EPOCH from: %s" % epochfile)
- return
+ bb.debug(1, "Deleting existing SOURCE_DATE_EPOCH from: %s" % epochfile)
+ os.remove(epochfile)
sourcedir = d.getVar('S')
source_date_epoch = (
@@ -155,16 +171,32 @@ python do_create_source_date_epoch_stamp() {
f.write(str(source_date_epoch))
}
+def get_source_date_epoch_value(d):
+ cached = d.getVar('__CACHED_SOURCE_DATE_EPOCH')
+ if cached:
+ return cached
+
+ epochfile = d.getVar('SDE_FILE')
+ source_date_epoch = 0
+ if os.path.isfile(epochfile):
+ with open(epochfile, 'r') as f:
+ s = f.read()
+ try:
+ source_date_epoch = int(s)
+ except ValueError:
+ bb.warn("SOURCE_DATE_EPOCH value '%s' is invalid. Reverting to 0" % s)
+ source_date_epoch = 0
+ bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch)
+ else:
+ bb.debug(1, "Cannot find %s. SOURCE_DATE_EPOCH will default to %d" % (epochfile, source_date_epoch))
+
+ d.setVar('__CACHED_SOURCE_DATE_EPOCH', str(source_date_epoch))
+ return str(source_date_epoch)
+
+export SOURCE_DATE_EPOCH ?= "${@get_source_date_epoch_value(d)}"
BB_HASHBASE_WHITELIST += "SOURCE_DATE_EPOCH"
python () {
if d.getVar('BUILD_REPRODUCIBLE_BINARIES') == '1':
- d.appendVarFlag("do_unpack", "postfuncs", " do_create_source_date_epoch_stamp")
- epochfile = d.getVar('SDE_FILE')
- source_date_epoch = "0"
- if os.path.isfile(epochfile):
- with open(epochfile, 'r') as f:
- source_date_epoch = f.read()
- bb.debug(1, "SOURCE_DATE_EPOCH: %s" % source_date_epoch)
- d.setVar('SOURCE_DATE_EPOCH', source_date_epoch)
+ d.appendVarFlag("do_unpack", "postfuncs", " create_source_date_epoch_stamp")
}
diff --git a/external/poky/meta/classes/reproducible_build_simple.bbclass b/external/poky/meta/classes/reproducible_build_simple.bbclass
index 8a60deef..39337299 100644
--- a/external/poky/meta/classes/reproducible_build_simple.bbclass
+++ b/external/poky/meta/classes/reproducible_build_simple.bbclass
@@ -7,4 +7,3 @@ export PERL_HASH_SEED = "0"
export SOURCE_DATE_EPOCH ??= "1520598896"
REPRODUCIBLE_TIMESTAMP_ROOTFS ??= "1520598896"
-
diff --git a/external/poky/meta/classes/rm_work.bbclass b/external/poky/meta/classes/rm_work.bbclass
index c478f4a1..01c2ab1c 100644
--- a/external/poky/meta/classes/rm_work.bbclass
+++ b/external/poky/meta/classes/rm_work.bbclass
@@ -47,30 +47,26 @@ do_rm_work () {
cd `dirname ${STAMP}`
for i in `basename ${STAMP}`*
do
- # By default we'll delete the stamp, unless $i is changed by the inner loop
- # (i=dummy does this)
-
case $i in
*sigdata*|*sigbasedata*)
# Save/skip anything that looks like a signature data file.
- i=dummy
;;
- *do_image_complete_setscene*)
- # Ensure we don't 'stack' setscene extensions to this stamp with the section below
- i=dummy
+ *do_image_complete_setscene*|*do_image_qa_setscene*)
+ # Ensure we don't 'stack' setscene extensions to these stamps with the sections below
;;
*do_image_complete*)
# Promote do_image_complete stamps to setscene versions (ahead of *do_image* below)
mv $i `echo $i | sed -e "s#do_image_complete#do_image_complete_setscene#"`
- i=dummy
+ ;;
+ *do_image_qa*)
+ # Promote do_image_qa stamps to setscene versions (ahead of *do_image* below)
+ mv $i `echo $i | sed -e "s#do_image_qa#do_image_qa_setscene#"`
;;
*do_package_write*|*do_rootfs*|*do_image*|*do_bootimg*|*do_write_qemuboot_conf*|*do_build*)
- i=dummy
;;
*do_addto_recipe_sysroot*)
# Preserve recipe-sysroot-native if do_addto_recipe_sysroot has been used
excludes="$excludes recipe-sysroot-native"
- i=dummy
;;
*do_package|*do_package.*|*do_package_setscene.*)
# We remove do_package entirely, including any
@@ -78,30 +74,24 @@ do_rm_work () {
# such as 'packages' and 'packages-split' and these can be large. No end
# of chain tasks depend directly on do_package anymore.
rm -f $i;
- i=dummy
;;
*_setscene*)
# Skip stamps which are already setscene versions
- i=dummy
;;
+ *)
+ # For everything else: if suitable, promote the stamp to a setscene
+ # version, otherwise remove it
+ for j in ${SSTATETASKS} do_shared_workdir
+ do
+ case $i in
+ *$j|*$j.*)
+ mv $i `echo $i | sed -e "s#${j}#${j}_setscene#"`
+ break
+ ;;
+ esac
+ done
+ rm -f $i
esac
-
- for j in ${SSTATETASKS} do_shared_workdir
- do
- case $i in
- dummy)
- break
- ;;
- *$j|*$j.*)
- # Promote the stamp to a setscene version
- mv $i `echo $i | sed -e "s#${j}#${j}_setscene#"`
- i=dummy
- break
- ;;
- esac
- done
-
- rm -f $i
done
cd ${WORKDIR}
@@ -121,7 +111,7 @@ do_rm_work_all () {
}
do_rm_work_all[recrdeptask] = "do_rm_work"
do_rm_work_all[noexec] = "1"
-addtask rm_work_all after before do_build
+addtask rm_work_all before do_build
do_populate_sdk[postfuncs] += "rm_work_populatesdk"
rm_work_populatesdk () {
diff --git a/external/poky/meta/classes/rootfs-postcommands.bbclass b/external/poky/meta/classes/rootfs-postcommands.bbclass
index bde58ad6..c43b9a98 100644
--- a/external/poky/meta/classes/rootfs-postcommands.bbclass
+++ b/external/poky/meta/classes/rootfs-postcommands.bbclass
@@ -29,7 +29,7 @@ APPEND_append = '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", " ro
ROOTFS_POSTPROCESS_COMMAND += "write_image_test_data ; "
# Write manifest
-IMAGE_MANIFEST = "${IMGDEPLOYDIR}/${IMAGE_NAME}.rootfs.manifest"
+IMAGE_MANIFEST = "${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.manifest"
ROOTFS_POSTUNINSTALL_COMMAND =+ "write_image_manifest ; "
# Set default postinst log file
POSTINST_LOGFILE ?= "${localstatedir}/log/postinstall.log"
@@ -95,6 +95,11 @@ read_only_rootfs_hook () {
sed -i -e '/^[#[:space:]]*\/dev\/root/{s/defaults/ro/;s/\([[:space:]]*[[:digit:]]\)\([[:space:]]*\)[[:digit:]]$/\1\20/}' ${IMAGE_ROOTFS}/etc/fstab
fi
+ # Tweak the "mount -o remount,rw /" command in busybox-inittab inittab
+ if [ -f ${IMAGE_ROOTFS}/etc/inittab ]; then
+ sed -i 's|/bin/mount -o remount,rw /|/bin/mount -o remount,ro /|' ${IMAGE_ROOTFS}/etc/inittab
+ fi
+
# If we're using openssh and the /etc/ssh directory has no pre-generated keys,
# we should configure openssh to use the configuration file /etc/ssh/sshd_config_readonly
# and the keys under /var/run/ssh.
@@ -126,6 +131,12 @@ read_only_rootfs_hook () {
${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh
fi
fi
+
+ if ${@bb.utils.contains("DISTRO_FEATURES", "systemd", "true", "false", d)}; then
+ # Create machine-id
+ # 20:12 < mezcalero> koen: you have three options: a) run systemd-machine-id-setup at install time, b) have / read-only and an empty file there (for stateless) and c) boot with / writable
+ touch ${IMAGE_ROOTFS}${sysconfdir}/machine-id
+ fi
}
#
@@ -254,7 +265,7 @@ python write_image_manifest () {
with open(manifest_name, 'w+') as image_manifest:
image_manifest.write(format_pkg_list(pkgs, "ver"))
- if os.path.exists(manifest_name):
+ if os.path.exists(manifest_name) and link_name:
manifest_link = deploy_dir + "/" + link_name + ".manifest"
if os.path.lexists(manifest_link):
os.remove(manifest_link)
@@ -297,12 +308,16 @@ rootfs_check_host_user_contaminated () {
HOST_USER_UID="$(PSEUDO_UNLOAD=1 id -u)"
HOST_USER_GID="$(PSEUDO_UNLOAD=1 id -g)"
- find "${IMAGE_ROOTFS}" -wholename "${IMAGE_ROOTFS}/home" -prune \
- -user "$HOST_USER_UID" -o -group "$HOST_USER_GID" >"$contaminated"
+ find "${IMAGE_ROOTFS}" -path "${IMAGE_ROOTFS}/home" -prune -o \
+ -user "$HOST_USER_UID" -print -o -group "$HOST_USER_GID" -print >"$contaminated"
+
+ sed -e "s,${IMAGE_ROOTFS},," $contaminated | while read line; do
+ bbwarn "Path in the rootfs is owned by the same user or group as the user running bitbake:" $line `ls -lan ${IMAGE_ROOTFS}/$line`
+ done
if [ -s "$contaminated" ]; then
- echo "WARNING: Paths in the rootfs are owned by the same user or group as the user running bitbake. See the logfile for the specific paths."
- cat "$contaminated" | sed "s,^, ,"
+ bbwarn "/etc/passwd:" `cat ${IMAGE_ROOTFS}/etc/passwd`
+ bbwarn "/etc/group:" `cat ${IMAGE_ROOTFS}/etc/group`
fi
}
@@ -322,7 +337,7 @@ python write_image_test_data() {
searchString = "%s/"%(d.getVar("TOPDIR")).replace("//","/")
export2json(d, testdata_name, searchString=searchString, replaceString="")
- if os.path.exists(testdata_name):
+ if os.path.exists(testdata_name) and link_name:
testdata_link = os.path.join(deploy_dir, "%s.testdata.json" % link_name)
if os.path.lexists(testdata_link):
os.remove(testdata_link)
@@ -350,7 +365,9 @@ rootfs_reproducible () {
echo $sformatted > ${IMAGE_ROOTFS}/etc/version
bbnote "rootfs_reproducible: set /etc/version to $sformatted"
- find ${IMAGE_ROOTFS}/etc/gconf -name '%gconf.xml' -print0 | xargs -0r \
- sed -i -e 's@\bmtime="[0-9][0-9]*"@mtime="'${REPRODUCIBLE_TIMESTAMP_ROOTFS}'"@g'
+ if [ -d ${IMAGE_ROOTFS}${sysconfdir}/gconf ]; then
+ find ${IMAGE_ROOTFS}${sysconfdir}/gconf -name '%gconf.xml' -print0 | xargs -0r \
+ sed -i -e 's@\bmtime="[0-9][0-9]*"@mtime="'${REPRODUCIBLE_TIMESTAMP_ROOTFS}'"@g'
+ fi
fi
}
diff --git a/external/poky/meta/classes/rootfs_ipk.bbclass b/external/poky/meta/classes/rootfs_ipk.bbclass
index aabc370c..e73d2bfd 100644
--- a/external/poky/meta/classes/rootfs_ipk.bbclass
+++ b/external/poky/meta/classes/rootfs_ipk.bbclass
@@ -21,7 +21,7 @@ OPKG_PREPROCESS_COMMANDS = ""
OPKG_POSTPROCESS_COMMANDS = ""
-OPKGLIBDIR = "${localstatedir}/lib"
+OPKGLIBDIR ??= "${localstatedir}/lib"
MULTILIBRE_ALLOW_REP = "${OPKGLIBDIR}/opkg|/usr/lib/opkg"
diff --git a/external/poky/meta/classes/sanity.bbclass b/external/poky/meta/classes/sanity.bbclass
index 374dacf4..5ddde9cc 100644
--- a/external/poky/meta/classes/sanity.bbclass
+++ b/external/poky/meta/classes/sanity.bbclass
@@ -2,7 +2,7 @@
# Sanity check the users setup for common misconfigurations
#
-SANITY_REQUIRED_UTILITIES ?= "patch diffstat makeinfo git bzip2 tar \
+SANITY_REQUIRED_UTILITIES ?= "patch diffstat git bzip2 tar \
gzip gawk chrpath wget cpio perl file which"
def bblayers_conf_file(d):
@@ -338,7 +338,7 @@ def check_path_length(filepath, pathname, limit):
def get_filesystem_id(path):
import subprocess
try:
- return subprocess.check_output(["stat", "-f", "-c", "%t", path]).decode('utf-8')
+ return subprocess.check_output(["stat", "-f", "-c", "%t", path]).decode('utf-8').strip()
except subprocess.CalledProcessError:
bb.warn("Can't get filesystem id of: %s" % path)
return None
@@ -511,18 +511,43 @@ def check_make_version(sanity_data):
return None
-# Check if we're running on WSL (Windows Subsystem for Linux). Its known not to
-# work but we should tell the user that upfront.
+# Check if we're running on WSL (Windows Subsystem for Linux).
+# WSLv1 is known not to work but WSLv2 should work properly as
+# long as the VHDX file is optimized often, let the user know
+# upfront.
+# More information on installing WSLv2 at:
+# https://docs.microsoft.com/en-us/windows/wsl/wsl2-install
def check_wsl(d):
with open("/proc/version", "r") as f:
verdata = f.readlines()
for l in verdata:
if "Microsoft" in l:
- return "OpenEmbedded doesn't work under WSL at this time, sorry"
+ return "OpenEmbedded doesn't work under WSLv1, please upgrade to WSLv2 if you want to run builds on Windows"
+ elif "microsoft" in l:
+ bb.warn("You are running bitbake under WSLv2, this works properly but you should optimize your VHDX file eventually to avoid running out of storage space")
+ return None
+
+# Require at least gcc version 5.0.
+#
+# This can be fixed on CentOS-7 with devtoolset-6+
+# https://www.softwarecollections.org/en/scls/rhscl/devtoolset-6/
+#
+# A less invasive fix is with scripts/install-buildtools (or with user
+# built buildtools-extended-tarball)
+#
+def check_gcc_version(sanity_data):
+ from distutils.version import LooseVersion
+ import subprocess
+
+ build_cc, version = oe.utils.get_host_compiler_version(sanity_data)
+ if build_cc.strip() == "gcc":
+ if LooseVersion(version) < LooseVersion("5.0"):
+ return "Your version of gcc is older than 5.0 and will break builds. Please install a newer version of gcc (you could use the project's buildtools-extended-tarball or use scripts/install-buildtools).\n"
return None
# Tar version 1.24 and onwards handle overwriting symlinks correctly
# but earlier versions do not; this needs to work properly for sstate
+# Version 1.28 is needed so opkg-build works correctly when reproducibile builds are enabled
def check_tar_version(sanity_data):
from distutils.version import LooseVersion
import subprocess
@@ -531,8 +556,8 @@ def check_tar_version(sanity_data):
except subprocess.CalledProcessError as e:
return "Unable to execute tar --version, exit code %d\n%s\n" % (e.returncode, e.output)
version = result.split()[3]
- if LooseVersion(version) < LooseVersion("1.24"):
- return "Your version of tar is older than 1.24 and has bugs which will break builds. Please install a newer version of tar.\n"
+ if LooseVersion(version) < LooseVersion("1.28"):
+ return "Your version of tar is older than 1.28 and does not have the support needed to enable reproducible builds. Please install a newer version of tar (you could use the project's buildtools-tarball from our last release or use scripts/install-buildtools).\n"
return None
# We use git parameters and functionality only found in 1.7.8 or later
@@ -560,7 +585,7 @@ def check_perl_modules(sanity_data):
try:
subprocess.check_output(["perl", "-e", "use %s" % m])
except subprocess.CalledProcessError as e:
- errresult += e.output
+ errresult += bytes.decode(e.output)
ret += "%s " % m
if ret:
return "Required perl module(s) not found: %s\n\n%s\n" % (ret, errresult)
@@ -573,7 +598,7 @@ def sanity_check_conffiles(d):
if check_conf_exists(conffile, d) and d.getVar(current_version) is not None and \
d.getVar(current_version) != d.getVar(required_version):
try:
- bb.build.exec_func(func, d, pythonexception=True)
+ bb.build.exec_func(func, d)
except NotImplementedError as e:
bb.fatal(str(e))
d.setVar("BB_INVALIDCONF", True)
@@ -622,14 +647,16 @@ def check_sanity_version_change(status, d):
# In other words, these tests run once in a given build directory and then
# never again until the sanity version or host distrubution id/version changes.
- # Check the python install is complete. glib-2.0-natives requries
- # xml.parsers.expat
+ # Check the python install is complete. Examples that are often removed in
+ # minimal installations: glib-2.0-natives requries # xml.parsers.expat and icu
+ # requires distutils.sysconfig.
try:
import xml.parsers.expat
- except ImportError:
- status.addresult('Your python is not a full install. Please install the module xml.parsers.expat (python-xml on openSUSE and SUSE Linux).\n')
- import stat
+ import distutils.sysconfig
+ except ImportError as e:
+ status.addresult('Your Python 3 is not a full install. Please install the module %s (see the Getting Started guide for further information).\n' % e.name)
+ status.addresult(check_gcc_version(d))
status.addresult(check_make_version(d))
status.addresult(check_patch_version(d))
status.addresult(check_tar_version(d))
@@ -664,6 +691,7 @@ def check_sanity_version_change(status, d):
status.addresult('Please use ASSUME_PROVIDED +=, not ASSUME_PROVIDED = in your local.conf\n')
# Check that TMPDIR isn't on a filesystem with limited filename length (eg. eCryptFS)
+ import stat
tmpdir = d.getVar('TMPDIR')
status.addresult(check_create_long_filename(tmpdir, "TMPDIR"))
tmpdirmode = os.stat(tmpdir).st_mode
@@ -756,6 +784,12 @@ def check_sanity_everybuild(status, d):
if "." in paths or "./" in paths or "" in paths:
status.addresult("PATH contains '.', './' or '' (empty element), which will break the build, please remove this.\nParsed PATH is " + str(paths) + "\n")
+ # Check whether 'inherit' directive is found (used for a class to inherit)
+ # in conf file it's supposed to be uppercase INHERIT
+ inherit = d.getVar('inherit')
+ if inherit:
+ status.addresult("Please don't use inherit directive in your local.conf. The directive is supposed to be used in classes and recipes only to inherit of bbclasses. Here INHERIT should be used.\n")
+
# Check that the DISTRO is valid, if set
# need to take into account DISTRO renaming DISTRO
distro = d.getVar('DISTRO')
@@ -798,6 +832,11 @@ def check_sanity_everybuild(status, d):
elif d.getVar('SDK_ARCH', False) == "${BUILD_ARCH}":
status.addresult('SDKMACHINE is set, but SDK_ARCH has not been changed as a result - SDKMACHINE may have been set too late (e.g. in the distro configuration)\n')
+ # If SDK_VENDOR looks like "-my-sdk" then the triples are badly formed so fail early
+ sdkvendor = d.getVar("SDK_VENDOR")
+ if not (sdkvendor.startswith("-") and sdkvendor.count("-") == 1):
+ status.addresult("SDK_VENDOR should be of the form '-foosdk' with a single dash; found '%s'\n" % sdkvendor)
+
check_supported_distro(d)
omask = os.umask(0o022)
@@ -876,7 +915,7 @@ def check_sanity_everybuild(status, d):
with open(checkfile, "r") as f:
saved_tmpdir = f.read().strip()
if (saved_tmpdir != tmpdir):
- status.addresult("Error, TMPDIR has changed location. You need to either move it back to %s or rebuild\n" % saved_tmpdir)
+ status.addresult("Error, TMPDIR has changed location. You need to either move it back to %s or delete it and rebuild\n" % saved_tmpdir)
else:
bb.utils.mkdirhier(tmpdir)
# Remove setuid, setgid and sticky bits from TMPDIR
@@ -919,7 +958,7 @@ def check_sanity(sanity_data):
last_tmpdir = ""
last_sstate_dir = ""
last_nativelsbstr = ""
- sanityverfile = sanity_data.expand("${TOPDIR}/conf/sanity_info")
+ sanityverfile = sanity_data.expand("${TOPDIR}/cache/sanity_info")
if os.path.exists(sanityverfile):
with open(sanityverfile, 'r') as f:
for line in f:
diff --git a/external/poky/meta/classes/scons.bbclass b/external/poky/meta/classes/scons.bbclass
index b9ae19d5..6b171ca8 100644
--- a/external/poky/meta/classes/scons.bbclass
+++ b/external/poky/meta/classes/scons.bbclass
@@ -1,17 +1,31 @@
-DEPENDS += "python-scons-native"
+inherit python3native
+
+DEPENDS += "python3-scons-native"
EXTRA_OESCONS ?= ""
-do_configure[noexec] = "1"
+do_configure() {
+ unset _PYTHON_SYSCONFIGDATA_NAME
+ if [ -n "${CONFIGURESTAMPFILE}" ]; then
+ if [ -e "${CONFIGURESTAMPFILE}" -a "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" -a "${CLEANBROKEN}" != "1" ]; then
+ ${STAGING_BINDIR_NATIVE}/scons --clean PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS}
+ fi
+
+ mkdir -p `dirname ${CONFIGURESTAMPFILE}`
+ echo ${BB_TASKHASH} > ${CONFIGURESTAMPFILE}
+ fi
+}
scons_do_compile() {
- ${STAGING_BINDIR_NATIVE}/scons ${PARALLEL_MAKE} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} || \
- die "scons build execution failed."
+ unset _PYTHON_SYSCONFIGDATA_NAME
+ ${STAGING_BINDIR_NATIVE}/scons ${PARALLEL_MAKE} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} || \
+ die "scons build execution failed."
}
scons_do_install() {
- ${STAGING_BINDIR_NATIVE}/scons install_root=${D}${prefix} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} install || \
- die "scons install execution failed."
+ unset _PYTHON_SYSCONFIGDATA_NAME
+ ${STAGING_BINDIR_NATIVE}/scons install_root=${D}${prefix} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} install || \
+ die "scons install execution failed."
}
EXPORT_FUNCTIONS do_compile do_install
diff --git a/external/poky/meta/classes/setuptools.bbclass b/external/poky/meta/classes/setuptools.bbclass
deleted file mode 100644
index a923ea3c..00000000
--- a/external/poky/meta/classes/setuptools.bbclass
+++ /dev/null
@@ -1,3 +0,0 @@
-inherit distutils
-
-DEPENDS += "python-setuptools-native"
diff --git a/external/poky/meta/classes/siteinfo.bbclass b/external/poky/meta/classes/siteinfo.bbclass
index 411e7047..1a048c05 100644
--- a/external/poky/meta/classes/siteinfo.bbclass
+++ b/external/poky/meta/classes/siteinfo.bbclass
@@ -35,7 +35,6 @@ def siteinfo_data_for_machine(arch, os, d):
"lm32": "endian-big bit-32",
"m68k": "endian-big bit-32",
"microblaze": "endian-big bit-32 microblaze-common",
- "microblazeeb": "endian-big bit-32 microblaze-common",
"microblazeel": "endian-little bit-32 microblaze-common",
"mips": "endian-big bit-32 mips-common",
"mips64": "endian-big bit-64 mips-common",
@@ -48,6 +47,7 @@ def siteinfo_data_for_machine(arch, os, d):
"powerpc": "endian-big bit-32 powerpc-common",
"nios2": "endian-little bit-32 nios2-common",
"powerpc64": "endian-big bit-64 powerpc-common",
+ "powerpc64le": "endian-little bit-64 powerpc-common",
"ppc": "endian-big bit-32 powerpc-common",
"ppc64": "endian-big bit-64 powerpc-common",
"ppc64le" : "endian-little bit-64 powerpc-common",
@@ -88,8 +88,6 @@ def siteinfo_data_for_machine(arch, os, d):
"arm-linux-musleabi": "arm-linux",
"armeb-linux-gnueabi": "armeb-linux",
"armeb-linux-musleabi": "armeb-linux",
- "microblazeeb-linux" : "microblaze-linux",
- "microblazeeb-linux-musl" : "microblaze-linux",
"microblazeel-linux" : "microblaze-linux",
"microblazeel-linux-musl" : "microblaze-linux",
"mips-linux-musl": "mips-linux",
@@ -106,8 +104,10 @@ def siteinfo_data_for_machine(arch, os, d):
"powerpc-linux-muslspe": "powerpc-linux powerpc32-linux",
"powerpc64-linux-gnuspe": "powerpc-linux powerpc64-linux",
"powerpc64-linux-muslspe": "powerpc-linux powerpc64-linux",
- "powerpc64-linux": "powerpc-linux",
- "powerpc64-linux-musl": "powerpc-linux",
+ "powerpc64-linux": "powerpc-linux powerpc64-linux",
+ "powerpc64-linux-musl": "powerpc-linux powerpc64-linux",
+ "powerpc64le-linux": "powerpc-linux powerpc64-linux",
+ "powerpc64le-linux-musl": "powerpc-linux powerpc64-linux",
"riscv32-linux": "riscv32-linux",
"riscv32-linux-musl": "riscv32-linux",
"riscv64-linux": "riscv64-linux",
diff --git a/external/poky/meta/classes/sstate.bbclass b/external/poky/meta/classes/sstate.bbclass
index edbfba5d..375196ef 100644
--- a/external/poky/meta/classes/sstate.bbclass
+++ b/external/poky/meta/classes/sstate.bbclass
@@ -3,19 +3,41 @@ SSTATE_VERSION = "3"
SSTATE_MANIFESTS ?= "${TMPDIR}/sstate-control"
SSTATE_MANFILEPREFIX = "${SSTATE_MANIFESTS}/manifest-${SSTATE_MANMACH}-${PN}"
-def generate_sstatefn(spec, hash, d):
+def generate_sstatefn(spec, hash, taskname, siginfo, d):
+ if taskname is None:
+ return ""
+ extension = ".tgz"
+ # 8 chars reserved for siginfo
+ limit = 254 - 8
+ if siginfo:
+ limit = 254
+ extension = ".tgz.siginfo"
if not hash:
hash = "INVALID"
- return hash[:2] + "/" + spec + hash
+ fn = spec + hash + "_" + taskname + extension
+ # If the filename is too long, attempt to reduce it
+ if len(fn) > limit:
+ components = spec.split(":")
+ # Fields 0,5,6 are mandatory, 1 is most useful, 2,3,4 are just for information
+ # 7 is for the separators
+ avail = (254 - len(hash + "_" + taskname + extension) - len(components[0]) - len(components[1]) - len(components[5]) - len(components[6]) - 7) // 3
+ components[2] = components[2][:avail]
+ components[3] = components[3][:avail]
+ components[4] = components[4][:avail]
+ spec = ":".join(components)
+ fn = spec + hash + "_" + taskname + extension
+ if len(fn) > limit:
+ bb.fatal("Unable to reduce sstate name to less than 255 chararacters")
+ return hash[:2] + "/" + hash[2:4] + "/" + fn
SSTATE_PKGARCH = "${PACKAGE_ARCH}"
SSTATE_PKGSPEC = "sstate:${PN}:${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}:${PV}:${PR}:${SSTATE_PKGARCH}:${SSTATE_VERSION}:"
SSTATE_SWSPEC = "sstate:${PN}::${PV}:${PR}::${SSTATE_VERSION}:"
-SSTATE_PKGNAME = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PKGSPEC'), d.getVar('BB_TASKHASH'), d)}"
+SSTATE_PKGNAME = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PKGSPEC'), d.getVar('BB_UNIHASH'), d.getVar('SSTATE_CURRTASK'), False, d)}"
SSTATE_PKG = "${SSTATE_DIR}/${SSTATE_PKGNAME}"
SSTATE_EXTRAPATH = ""
SSTATE_EXTRAPATHWILDCARD = ""
-SSTATE_PATHSPEC = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/${SSTATE_PKGSPEC}"
+SSTATE_PATHSPEC = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/*/${SSTATE_PKGSPEC}*_${SSTATE_PATH_CURRTASK}.tgz*"
# explicitly make PV to depend on evaluated value of PV variable
PV[vardepvalue] = "${PV}"
@@ -23,6 +45,7 @@ PV[vardepvalue] = "${PV}"
# We don't want the sstate to depend on things like the distro string
# of the system, we let the sstate paths take care of this.
SSTATE_EXTRAPATH[vardepvalue] = ""
+SSTATE_EXTRAPATHWILDCARD[vardepvalue] = ""
# For multilib rpm the allarch packagegroup files can overwrite (in theory they're identical)
SSTATE_DUPWHITELIST = "${DEPLOY_DIR}/licenses/"
@@ -61,6 +84,7 @@ SSTATE_ARCHS = " \
SSTATE_MANMACH ?= "${SSTATE_PKGARCH}"
SSTATECREATEFUNCS = "sstate_hardcode_path"
+SSTATECREATEFUNCS[vardeps] = "SSTATE_SCAN_FILES"
SSTATEPOSTCREATEFUNCS = ""
SSTATEPREINSTFUNCS = ""
SSTATEPOSTUNPACKFUNCS = "sstate_hardcode_path_unpack"
@@ -82,6 +106,18 @@ SSTATE_SIG_PASSPHRASE ?= ""
# Whether to verify the GnUPG signatures when extracting sstate archives
SSTATE_VERIFY_SIG ?= "0"
+SSTATE_HASHEQUIV_METHOD ?= "oe.sstatesig.OEOuthashBasic"
+SSTATE_HASHEQUIV_METHOD[doc] = "The fully-qualified function used to calculate \
+ the output hash for a task, which in turn is used to determine equivalency. \
+ "
+
+SSTATE_HASHEQUIV_REPORT_TASKDATA ?= "0"
+SSTATE_HASHEQUIV_REPORT_TASKDATA[doc] = "Report additional useful data to the \
+ hash equivalency server, such as PN, PV, taskname, etc. This information \
+ is very useful for developers looking at task data, but may leak sensitive \
+ data if the equivalence server is public. \
+ "
+
python () {
if bb.data.inherits_class('native', d):
d.setVar('SSTATE_PKGARCH', d.getVar('BUILD_ARCH', False))
@@ -101,7 +137,7 @@ python () {
if bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d):
d.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
d.setVar('BB_HASHFILENAME', "True ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}")
- d.setVar('SSTATE_EXTRAPATHWILDCARD', "*/")
+ d.setVar('SSTATE_EXTRAPATHWILDCARD', "${NATIVELSBSTRING}/")
unique_tasks = sorted(set((d.getVar('SSTATETASKS') or "").split()))
d.setVar('SSTATETASKS', " ".join(unique_tasks))
@@ -303,25 +339,29 @@ def sstate_installpkg(ss, d):
from oe.gpg_sign import get_signer
sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
- sstatefetch = d.getVar('SSTATE_PKGNAME') + '_' + ss['task'] + ".tgz"
- sstatepkg = d.getVar('SSTATE_PKG') + '_' + ss['task'] + ".tgz"
+ d.setVar("SSTATE_CURRTASK", ss['task'])
+ sstatefetch = d.getVar('SSTATE_PKGNAME')
+ sstatepkg = d.getVar('SSTATE_PKG')
if not os.path.exists(sstatepkg):
pstaging_fetch(sstatefetch, d)
if not os.path.isfile(sstatepkg):
- bb.note("Staging package %s does not exist" % sstatepkg)
+ bb.note("Sstate package %s does not exist" % sstatepkg)
return False
sstate_clean(ss, d)
d.setVar('SSTATE_INSTDIR', sstateinst)
- d.setVar('SSTATE_PKG', sstatepkg)
if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False):
+ if not os.path.isfile(sstatepkg + '.sig'):
+ bb.warn("No signature file for sstate package %s, skipping acceleration..." % sstatepkg)
+ return False
signer = get_signer(d, 'local')
if not signer.verify(sstatepkg + '.sig'):
- bb.warn("Cannot verify signature on sstate package %s" % sstatepkg)
+ bb.warn("Cannot verify signature on sstate package %s, skipping acceleration..." % sstatepkg)
+ return False
# Empty sstateinst directory, ensure its clean
if os.path.exists(sstateinst):
@@ -425,8 +465,9 @@ python sstate_hardcode_path_unpack () {
def sstate_clean_cachefile(ss, d):
import oe.path
- sstatepkgfile = d.getVar('SSTATE_PATHSPEC') + "*_" + ss['task'] + ".tgz*"
if d.getVarFlag('do_%s' % ss['task'], 'task'):
+ d.setVar("SSTATE_PATH_CURRTASK", ss['task'])
+ sstatepkgfile = d.getVar('SSTATE_PATHSPEC')
bb.note("Removing %s" % sstatepkgfile)
oe.path.remove(sstatepkgfile)
@@ -597,10 +638,9 @@ def sstate_package(ss, d):
tmpdir = d.getVar('TMPDIR')
sstatebuild = d.expand("${WORKDIR}/sstate-build-%s/" % ss['task'])
- sstatepkg = d.getVar('SSTATE_PKG') + '_'+ ss['task'] + ".tgz"
+ d.setVar("SSTATE_CURRTASK", ss['task'])
bb.utils.remove(sstatebuild, recurse=True)
bb.utils.mkdirhier(sstatebuild)
- bb.utils.mkdirhier(os.path.dirname(sstatepkg))
for state in ss['dirs']:
if not os.path.exists(state[1]):
continue
@@ -633,19 +673,30 @@ def sstate_package(ss, d):
os.rename(plain, pdir)
d.setVar('SSTATE_BUILDDIR', sstatebuild)
- d.setVar('SSTATE_PKG', sstatepkg)
d.setVar('SSTATE_INSTDIR', sstatebuild)
if d.getVar('SSTATE_SKIP_CREATION') == '1':
return
+ sstate_create_package = ['sstate_report_unihash', 'sstate_create_package']
+ if d.getVar('SSTATE_SIG_KEY'):
+ sstate_create_package.append('sstate_sign_package')
+
for f in (d.getVar('SSTATECREATEFUNCS') or '').split() + \
- ['sstate_create_package', 'sstate_sign_package'] + \
+ sstate_create_package + \
(d.getVar('SSTATEPOSTCREATEFUNCS') or '').split():
# All hooks should run in SSTATE_BUILDDIR.
bb.build.exec_func(f, d, (sstatebuild,))
- bb.siggen.dump_this_task(sstatepkg + ".siginfo", d)
+ # SSTATE_PKG may have been changed by sstate_report_unihash
+ siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
+ if not os.path.exists(siginfo):
+ bb.siggen.dump_this_task(siginfo, d)
+ else:
+ try:
+ os.utime(siginfo, None)
+ except PermissionError:
+ pass
return
@@ -670,7 +721,8 @@ def pstaging_fetch(sstatefetch, d):
# if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
# we'll want to allow network access for the current set of fetches.
- if localdata.getVar('BB_NO_NETWORK') == "1" and localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK') == "1":
+ if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
+ bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
localdata.delVar('BB_NO_NETWORK')
# Try a fetch from the sstate mirror, if it fails just return and
@@ -684,10 +736,11 @@ def pstaging_fetch(sstatefetch, d):
localdata.setVar('SRC_URI', srcuri)
try:
fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
+ fetcher.checkstatus()
fetcher.download()
except bb.fetch2.BBFetchException:
- break
+ pass
def sstate_setscene(d):
shared_state = sstate_state_fromvars(d)
@@ -728,13 +781,20 @@ sstate_task_postfunc[dirs] = "${WORKDIR}"
# set as SSTATE_BUILDDIR. Will be run from within SSTATE_BUILDDIR.
#
sstate_create_package () {
+ # Exit early if it already exists
+ if [ -e ${SSTATE_PKG} ]; then
+ [ ! -w ${SSTATE_PKG} ] || touch ${SSTATE_PKG}
+ return
+ fi
+
+ mkdir -p `dirname ${SSTATE_PKG}`
TFILE=`mktemp ${SSTATE_PKG}.XXXXXXXX`
- # Use pigz if available
- OPT="-czS"
- if [ -x "$(command -v pigz)" ]; then
- OPT="-I pigz -cS"
- fi
+ # Use pigz if available
+ OPT="-czS"
+ if [ -x "$(command -v pigz)" ]; then
+ OPT="-I pigz -cS"
+ fi
# Need to handle empty directories
if [ "$(ls -A)" ]; then
@@ -749,19 +809,35 @@ sstate_create_package () {
tar $OPT --file=$TFILE --files-from=/dev/null
fi
chmod 0664 $TFILE
- mv -f $TFILE ${SSTATE_PKG}
+ # Skip if it was already created by some other process
+ if [ ! -e ${SSTATE_PKG} ]; then
+ # Move into place using ln to attempt an atomic op.
+ # Abort if it already exists
+ ln $TFILE ${SSTATE_PKG} && rm $TFILE
+ else
+ rm $TFILE
+ fi
+ [ ! -w ${SSTATE_PKG} ] || touch ${SSTATE_PKG}
}
python sstate_sign_package () {
from oe.gpg_sign import get_signer
- if d.getVar('SSTATE_SIG_KEY'):
- signer = get_signer(d, 'local')
- sstate_pkg = d.getVar('SSTATE_PKG')
- if os.path.exists(sstate_pkg + '.sig'):
- os.unlink(sstate_pkg + '.sig')
- signer.detach_sign(sstate_pkg, d.getVar('SSTATE_SIG_KEY', False), None,
- d.getVar('SSTATE_SIG_PASSPHRASE'), armor=False)
+
+ signer = get_signer(d, 'local')
+ sstate_pkg = d.getVar('SSTATE_PKG')
+ if os.path.exists(sstate_pkg + '.sig'):
+ os.unlink(sstate_pkg + '.sig')
+ signer.detach_sign(sstate_pkg, d.getVar('SSTATE_SIG_KEY', False), None,
+ d.getVar('SSTATE_SIG_PASSPHRASE'), armor=False)
+}
+
+python sstate_report_unihash() {
+ report_unihash = getattr(bb.parse.siggen, 'report_unihash', None)
+
+ if report_unihash:
+ ss = sstate_state_fromvars(d)
+ report_unihash(os.getcwd(), ss['task'], d)
}
#
@@ -780,24 +856,23 @@ sstate_unpack_package () {
BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
-def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
+def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True, **kwargs):
+ found = set()
+ missed = set()
- ret = []
- missed = []
- extension = ".tgz"
- if siginfo:
- extension = extension + ".siginfo"
+ def gethash(task):
+ return sq_data['unihash'][task]
def getpathcomponents(task, d):
# Magic data from BB_HASHFILENAME
- splithashfn = sq_hashfn[task].split(" ")
+ splithashfn = sq_data['hashfn'][task].split(" ")
spec = splithashfn[1]
if splithashfn[0] == "True":
extrapath = d.getVar("NATIVELSBSTRING") + "/"
else:
extrapath = ""
-
- tname = sq_task[task][3:]
+
+ tname = bb.runqueue.taskname_from_tid(task)[3:]
if tname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and splithashfn[2]:
spec = splithashfn[2]
@@ -806,18 +881,18 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
return spec, extrapath, tname
- for task in range(len(sq_fn)):
+ for tid in sq_data['hash']:
- spec, extrapath, tname = getpathcomponents(task, d)
+ spec, extrapath, tname = getpathcomponents(tid, d)
- sstatefile = d.expand("${SSTATE_DIR}/" + extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + extension)
+ sstatefile = d.expand("${SSTATE_DIR}/" + extrapath + generate_sstatefn(spec, gethash(tid), tname, siginfo, d))
if os.path.exists(sstatefile):
bb.debug(2, "SState: Found valid sstate file %s" % sstatefile)
- ret.append(task)
+ found.add(tid)
continue
else:
- missed.append(task)
+ missed.add(tid)
bb.debug(2, "SState: Looked for but didn't find file %s" % sstatefile)
mirrors = d.getVar("SSTATE_MIRRORS")
@@ -835,7 +910,8 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
# if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
# we'll want to allow network access for the current set of fetches.
- if localdata.getVar('BB_NO_NETWORK') == "1" and localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK') == "1":
+ if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
+ bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
localdata.delVar('BB_NO_NETWORK')
from bb.fetch2 import FetchConnectionCache
@@ -846,7 +922,7 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
thread_worker.connection_cache.close_connections()
def checkstatus(thread_worker, arg):
- (task, sstatefile) = arg
+ (tid, sstatefile) = arg
localdata2 = bb.data.createCopy(localdata)
srcuri = "file://" + sstatefile
@@ -858,26 +934,29 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
connection_cache=thread_worker.connection_cache)
fetcher.checkstatus()
bb.debug(2, "SState: Successful fetch test for %s" % srcuri)
- ret.append(task)
- if task in missed:
- missed.remove(task)
+ found.add(tid)
+ if tid in missed:
+ missed.remove(tid)
except:
- missed.append(task)
+ missed.add(tid)
bb.debug(2, "SState: Unsuccessful fetch test for %s" % srcuri)
pass
- bb.event.fire(bb.event.ProcessProgress(msg, len(tasklist) - thread_worker.tasks.qsize()), d)
+ if len(tasklist) >= min_tasks:
+ bb.event.fire(bb.event.ProcessProgress(msg, len(tasklist) - thread_worker.tasks.qsize()), d)
tasklist = []
- for task in range(len(sq_fn)):
- if task in ret:
+ min_tasks = 100
+ for tid in sq_data['hash']:
+ if tid in found:
continue
- spec, extrapath, tname = getpathcomponents(task, d)
- sstatefile = d.expand(extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + extension)
- tasklist.append((task, sstatefile))
+ spec, extrapath, tname = getpathcomponents(tid, d)
+ sstatefile = d.expand(extrapath + generate_sstatefn(spec, gethash(tid), tname, siginfo, d))
+ tasklist.append((tid, sstatefile))
if tasklist:
- msg = "Checking sstate mirror object availability"
- bb.event.fire(bb.event.ProcessStarted(msg, len(tasklist)), d)
+ if len(tasklist) >= min_tasks:
+ msg = "Checking sstate mirror object availability"
+ bb.event.fire(bb.event.ProcessStarted(msg, len(tasklist)), d)
import multiprocessing
nproc = min(multiprocessing.cpu_count(), len(tasklist))
@@ -891,37 +970,38 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
pool.wait_completion()
bb.event.disable_threadlock()
- bb.event.fire(bb.event.ProcessFinished(msg), d)
+ if len(tasklist) >= min_tasks:
+ bb.event.fire(bb.event.ProcessFinished(msg), d)
inheritlist = d.getVar("INHERIT")
if "toaster" in inheritlist:
evdata = {'missed': [], 'found': []};
- for task in missed:
- spec, extrapath, tname = getpathcomponents(task, d)
- sstatefile = d.expand(extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + ".tgz")
- evdata['missed'].append( (sq_fn[task], sq_task[task], sq_hash[task], sstatefile ) )
- for task in ret:
- spec, extrapath, tname = getpathcomponents(task, d)
- sstatefile = d.expand(extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + ".tgz")
- evdata['found'].append( (sq_fn[task], sq_task[task], sq_hash[task], sstatefile ) )
+ for tid in missed:
+ spec, extrapath, tname = getpathcomponents(tid, d)
+ sstatefile = d.expand(extrapath + generate_sstatefn(spec, gethash(tid), tname, False, d))
+ evdata['missed'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
+ for tid in found:
+ spec, extrapath, tname = getpathcomponents(tid, d)
+ sstatefile = d.expand(extrapath + generate_sstatefn(spec, gethash(tid), tname, False, d))
+ evdata['found'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
bb.event.fire(bb.event.MetadataEvent("MissedSstate", evdata), d)
- # Print some summary statistics about the current task completion and how much sstate
- # reuse there was. Avoid divide by zero errors.
- total = len(sq_fn)
- currentcount = d.getVar("BB_SETSCENE_STAMPCURRENT_COUNT") or 0
- complete = 0
- if currentcount:
- complete = (len(ret) + currentcount) / (total + currentcount) * 100
- match = 0
- if total:
- match = len(ret) / total * 100
- bb.plain("Sstate summary: Wanted %d Found %d Missed %d Current %d (%d%% match, %d%% complete)" % (total, len(ret), len(missed), currentcount, match, complete))
+ if summary:
+ # Print some summary statistics about the current task completion and how much sstate
+ # reuse there was. Avoid divide by zero errors.
+ total = len(sq_data['hash'])
+ complete = 0
+ if currentcount:
+ complete = (len(found) + currentcount) / (total + currentcount) * 100
+ match = 0
+ if total:
+ match = len(found) / total * 100
+ bb.plain("Sstate summary: Wanted %d Found %d Missed %d Current %d (%d%% match, %d%% complete)" % (total, len(found), len(missed), currentcount, match, complete))
if hasattr(bb.parse.siggen, "checkhashes"):
- bb.parse.siggen.checkhashes(missed, ret, sq_fn, sq_task, sq_hash, sq_hashfn, d)
+ bb.parse.siggen.checkhashes(sq_data, missed, found, d)
- return ret
+ return found
BB_SETSCENE_DEPVALID = "setscene_depvalid"
@@ -1036,17 +1116,24 @@ addhandler sstate_eventhandler
sstate_eventhandler[eventmask] = "bb.build.TaskSucceeded"
python sstate_eventhandler() {
d = e.data
- # When we write an sstate package we rewrite the SSTATE_PKG
- spkg = d.getVar('SSTATE_PKG')
- if not spkg.endswith(".tgz"):
+ writtensstate = d.getVar('SSTATE_CURRTASK')
+ if not writtensstate:
taskname = d.getVar("BB_RUNTASK")[3:]
spec = d.getVar('SSTATE_PKGSPEC')
swspec = d.getVar('SSTATE_SWSPEC')
if taskname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and swspec:
d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
d.setVar("SSTATE_EXTRAPATH", "")
- sstatepkg = d.getVar('SSTATE_PKG')
- bb.siggen.dump_this_task(sstatepkg + '_' + taskname + ".tgz" ".siginfo", d)
+ d.setVar("SSTATE_CURRTASK", taskname)
+ siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
+ if not os.path.exists(siginfo):
+ bb.siggen.dump_this_task(siginfo, d)
+ else:
+ try:
+ os.utime(siginfo, None)
+ except PermissionError:
+ pass
+
}
SSTATE_PRUNE_OBSOLETEWORKDIR ?= "1"
@@ -1087,12 +1174,15 @@ python sstate_eventhandler2() {
with open(i, "r") as f:
lines = f.readlines()
for l in lines:
- (stamp, manifest, workdir) = l.split()
- if stamp not in stamps and stamp not in preservestamps and stamp in machineindex:
- toremove.append(l)
- if stamp not in seen:
- bb.debug(2, "Stamp %s is not reachable, removing related manifests" % stamp)
- seen.append(stamp)
+ try:
+ (stamp, manifest, workdir) = l.split()
+ if stamp not in stamps and stamp not in preservestamps and stamp in machineindex:
+ toremove.append(l)
+ if stamp not in seen:
+ bb.debug(2, "Stamp %s is not reachable, removing related manifests" % stamp)
+ seen.append(stamp)
+ except ValueError:
+ bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
if toremove:
msg = "Removing %d recipes from the %s sysroot" % (len(toremove), a)
diff --git a/external/poky/meta/classes/staging.bbclass b/external/poky/meta/classes/staging.bbclass
index 84e13bab..5b04f88b 100644
--- a/external/poky/meta/classes/staging.bbclass
+++ b/external/poky/meta/classes/staging.bbclass
@@ -27,11 +27,12 @@ SYSROOT_DIRS_BLACKLIST = " \
${mandir} \
${docdir} \
${infodir} \
- ${datadir}/locale \
${datadir}/applications \
${datadir}/fonts \
+ ${datadir}/gtk-doc/html \
+ ${datadir}/locale \
${datadir}/pixmaps \
- ${libdir}/${PN}/ptest \
+ ${libdir}/${BPN}/ptest \
"
sysroot_stage_dir() {
@@ -74,8 +75,8 @@ python sysroot_strip () {
dstdir = d.getVar('SYSROOT_DESTDIR')
pn = d.getVar('PN')
- libdir = os.path.abspath(dstdir + os.sep + d.getVar("libdir"))
- base_libdir = os.path.abspath(dstdir + os.sep + d.getVar("base_libdir"))
+ libdir = d.getVar("libdir")
+ base_libdir = d.getVar("base_libdir")
qa_already_stripped = 'already-stripped' in (d.getVar('INSANE_SKIP_' + pn) or "").split()
strip_cmd = d.getVar("STRIP")
@@ -167,7 +168,7 @@ def staging_processfixme(fixme, target, recipesysroot, recipesysrootnative, d):
if not fixme:
return
cmd = "sed -e 's:^[^/]*/:%s/:g' %s | xargs sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIRHOST:%s:g'" % (target, " ".join(fixme), recipesysroot, recipesysrootnative)
- for fixmevar in ['COMPONENTS_DIR', 'HOSTTOOLS_DIR', 'PKGDATA_DIR', 'PSEUDO_LOCALSTATEDIR', 'LOGFIFO']:
+ for fixmevar in ['PSEUDO_SYSROOT', 'HOSTTOOLS_DIR', 'PKGDATA_DIR', 'PSEUDO_LOCALSTATEDIR', 'LOGFIFO']:
fixme_path = d.getVar(fixmevar)
cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path)
bb.debug(2, cmd)
@@ -196,7 +197,11 @@ def staging_populate_sysroot_dir(targetsysroot, nativesysroot, native, d):
for pkgarch in pkgarchs:
for manifest in glob.glob(d.expand("${SSTATE_MANIFESTS}/manifest-%s-*.populate_sysroot" % pkgarch)):
if manifest.endswith("-initial.populate_sysroot"):
- # skip glibc-initial and libgcc-initial due to file overlap
+ # skip libgcc-initial due to file overlap
+ continue
+ if not native and (manifest.endswith("-native.populate_sysroot") or "nativesdk-" in manifest):
+ continue
+ if native and not (manifest.endswith("-native.populate_sysroot") or manifest.endswith("-cross.populate_sysroot") or "-cross-" in manifest):
continue
tmanifest = targetdir + "/" + os.path.basename(manifest)
if os.path.exists(tmanifest):
@@ -256,12 +261,10 @@ python extend_recipe_sysroot() {
workdir = d.getVar("WORKDIR")
#bb.warn(str(taskdepdata))
pn = d.getVar("PN")
- mc = d.getVar("BB_CURRENT_MC")
stagingdir = d.getVar("STAGING_DIR")
sharedmanifests = d.getVar("COMPONENTS_DIR") + "/manifests"
recipesysroot = d.getVar("RECIPE_SYSROOT")
recipesysrootnative = d.getVar("RECIPE_SYSROOT_NATIVE")
- current_variant = d.getVar("BBEXTENDVARIANT")
# Detect bitbake -b usage
nodeps = d.getVar("BB_LIMITEDDEPS") or False
@@ -274,11 +277,13 @@ python extend_recipe_sysroot() {
start = None
configuredeps = []
+ owntaskdeps = []
for dep in taskdepdata:
data = taskdepdata[dep]
if data[1] == mytaskname and data[0] == pn:
start = dep
- break
+ elif data[0] == pn:
+ owntaskdeps.append(data[1])
if start is None:
bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
@@ -424,7 +429,7 @@ python extend_recipe_sysroot() {
# Was likely already uninstalled
continue
potential.append(l)
- # We need to ensure not other task needs this dependency. We hold the sysroot
+ # We need to ensure no other task needs this dependency. We hold the sysroot
# lock so we ca search the indexes to check
if potential:
for i in glob.glob(depdir + "/index.*"):
@@ -432,6 +437,11 @@ python extend_recipe_sysroot() {
continue
with open(i, "r") as f:
for l in f:
+ if l.startswith("TaskDeps:"):
+ prevtasks = l.split()[1:]
+ if mytaskname in prevtasks:
+ # We're a dependency of this task so we can clear items out the sysroot
+ break
l = l.strip()
if l in potential:
potential.remove(l)
@@ -446,12 +456,8 @@ python extend_recipe_sysroot() {
msg_exists = []
msg_adding = []
+ # Handle all removals first since files may move between recipes
for dep in configuredeps:
- if mc != 'default':
- # We should not care about other multiconfigs
- depmc = dep.split(':')[1]
- if depmc != mc:
- continue
c = setscenedeps[dep][0]
if c not in installed:
continue
@@ -461,7 +467,6 @@ python extend_recipe_sysroot() {
if os.path.exists(depdir + "/" + c):
lnk = os.readlink(depdir + "/" + c)
if lnk == c + "." + taskhash and os.path.exists(depdir + "/" + c + ".complete"):
- msg_exists.append(c)
continue
else:
bb.note("%s exists in sysroot, but is stale (%s vs. %s), removing." % (c, lnk, c + "." + taskhash))
@@ -472,6 +477,21 @@ python extend_recipe_sysroot() {
elif os.path.lexists(depdir + "/" + c):
os.unlink(depdir + "/" + c)
+ binfiles = {}
+ # Now handle installs
+ for dep in configuredeps:
+ c = setscenedeps[dep][0]
+ if c not in installed:
+ continue
+ taskhash = setscenedeps[dep][5]
+ taskmanifest = depdir + "/" + c + "." + taskhash
+
+ if os.path.exists(depdir + "/" + c):
+ lnk = os.readlink(depdir + "/" + c)
+ if lnk == c + "." + taskhash and os.path.exists(depdir + "/" + c + ".complete"):
+ msg_exists.append(c)
+ continue
+
msg_adding.append(c)
os.symlink(c + "." + taskhash, depdir + "/" + c)
@@ -550,7 +570,16 @@ python extend_recipe_sysroot() {
if l.endswith("/"):
staging_copydir(l, targetdir, dest, seendirs)
continue
- staging_copyfile(l, targetdir, dest, postinsts, seendirs)
+ if "/bin/" in l or "/sbin/" in l:
+ # defer /*bin/* files until last in case they need libs
+ binfiles[l] = (targetdir, dest)
+ else:
+ staging_copyfile(l, targetdir, dest, postinsts, seendirs)
+
+ # Handle deferred binfiles
+ for l in binfiles:
+ (targetdir, dest) = binfiles[l]
+ staging_copyfile(l, targetdir, dest, postinsts, seendirs)
bb.note("Installed into sysroot: %s" % str(msg_adding))
bb.note("Skipping as already exists in sysroot: %s" % str(msg_exists))
@@ -566,6 +595,7 @@ python extend_recipe_sysroot() {
os.symlink(manifests[dep], depdir + "/" + c + ".complete")
with open(taskindex, "w") as f:
+ f.write("TaskDeps: " + " ".join(owntaskdeps) + "\n")
for l in sorted(installed):
f.write(l + "\n")
@@ -573,22 +603,12 @@ python extend_recipe_sysroot() {
}
extend_recipe_sysroot[vardepsexclude] += "MACHINE_ARCH PACKAGE_EXTRA_ARCHS SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA"
+do_prepare_recipe_sysroot[deptask] = "do_populate_sysroot"
python do_prepare_recipe_sysroot () {
bb.build.exec_func("extend_recipe_sysroot", d)
}
addtask do_prepare_recipe_sysroot before do_configure after do_fetch
-# Clean out the recipe specific sysroots before do_fetch
-# (use a prefunc so we can order before extend_recipe_sysroot if it gets added)
-python clean_recipe_sysroot() {
- # We remove these stamps since we're removing any content they'd have added with
- # cleandirs. This removes the sigdata too, likely not a big deal,
- oe.path.remove(d.getVar("STAMP") + "*addto_recipe_sysroot*")
- return
-}
-clean_recipe_sysroot[cleandirs] += "${RECIPE_SYSROOT} ${RECIPE_SYSROOT_NATIVE}"
-do_fetch[prefuncs] += "clean_recipe_sysroot"
-
python staging_taskhandler() {
bbtasks = e.tasklist
for task in bbtasks:
diff --git a/external/poky/meta/classes/syslinux.bbclass b/external/poky/meta/classes/syslinux.bbclass
index 031dacbf..894f6b37 100644
--- a/external/poky/meta/classes/syslinux.bbclass
+++ b/external/poky/meta/classes/syslinux.bbclass
@@ -75,11 +75,6 @@ syslinux_hddimg_install() {
syslinux ${IMGDEPLOYDIR}/${IMAGE_NAME}.hddimg
}
-syslinux_hdddirect_install() {
- DEST=$1
- syslinux $DEST
-}
-
python build_syslinux_cfg () {
import copy
import sys
diff --git a/external/poky/meta/classes/systemd-boot-cfg.bbclass b/external/poky/meta/classes/systemd-boot-cfg.bbclass
index 021c9f93..b3e0e6ad 100644
--- a/external/poky/meta/classes/systemd-boot-cfg.bbclass
+++ b/external/poky/meta/classes/systemd-boot-cfg.bbclass
@@ -2,6 +2,9 @@ SYSTEMD_BOOT_CFG ?= "${S}/loader.conf"
SYSTEMD_BOOT_ENTRIES ?= ""
SYSTEMD_BOOT_TIMEOUT ?= "10"
+# Uses MACHINE specific KERNEL_IMAGETYPE
+PACKAGE_ARCH = "${MACHINE_ARCH}"
+
# Need UUID utility code.
inherit fs-uuid
diff --git a/external/poky/meta/classes/systemd-boot.bbclass b/external/poky/meta/classes/systemd-boot.bbclass
index 3cd6811a..336c4c2f 100644
--- a/external/poky/meta/classes/systemd-boot.bbclass
+++ b/external/poky/meta/classes/systemd-boot.bbclass
@@ -11,50 +11,25 @@
do_bootimg[depends] += "${MLPREFIX}systemd-boot:do_deploy"
-EFIDIR = "/EFI/BOOT"
+require conf/image-uefi.conf
# Need UUID utility code.
inherit fs-uuid
efi_populate() {
- DEST=$1
+ efi_populate_common "$1" systemd
- EFI_IMAGE="systemd-bootia32.efi"
- DEST_EFI_IMAGE="bootia32.efi"
- if [ "${TARGET_ARCH}" = "x86_64" ]; then
- EFI_IMAGE="systemd-bootx64.efi"
- DEST_EFI_IMAGE="bootx64.efi"
- fi
-
- install -d ${DEST}${EFIDIR}
# systemd-boot requires these paths for configuration files
# they are not customizable so no point in new vars
install -d ${DEST}/loader
install -d ${DEST}/loader/entries
- install -m 0644 ${DEPLOY_DIR_IMAGE}/${EFI_IMAGE} ${DEST}${EFIDIR}/${DEST_EFI_IMAGE}
- EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
- printf 'fs0:%s\%s\n' "$EFIPATH" "$DEST_EFI_IMAGE" >${DEST}/startup.nsh
install -m 0644 ${SYSTEMD_BOOT_CFG} ${DEST}/loader/loader.conf
for i in ${SYSTEMD_BOOT_ENTRIES}; do
install -m 0644 ${i} ${DEST}/loader/entries
done
}
-efi_iso_populate() {
- iso_dir=$1
- efi_populate $iso_dir
- mkdir -p ${EFIIMGDIR}/${EFIDIR}
- cp $iso_dir/${EFIDIR}/* ${EFIIMGDIR}${EFIDIR}
+efi_iso_populate_append() {
cp -r $iso_dir/loader ${EFIIMGDIR}
- cp $iso_dir/${KERNEL_IMAGETYPE} ${EFIIMGDIR}
- EFIPATH=$(echo "${EFIDIR}" | sed 's/\//\\/g')
- echo "fs0:${EFIPATH}\\${DEST_EFI_IMAGE}" > ${EFIIMGDIR}/startup.nsh
- if [ -f "$iso_dir/initrd" ] ; then
- cp $iso_dir/initrd ${EFIIMGDIR}
- fi
-}
-
-efi_hddimg_populate() {
- efi_populate $1
}
inherit systemd-boot-cfg
diff --git a/external/poky/meta/classes/systemd.bbclass b/external/poky/meta/classes/systemd.bbclass
index c8f4fdec..9e8a82c9 100644
--- a/external/poky/meta/classes/systemd.bbclass
+++ b/external/poky/meta/classes/systemd.bbclass
@@ -23,38 +23,37 @@ python __anonymous() {
}
systemd_postinst() {
-OPTS=""
+if type systemctl >/dev/null 2>/dev/null; then
+ OPTS=""
-if [ -n "$D" ]; then
- OPTS="--root=$D"
-fi
+ if [ -n "$D" ]; then
+ OPTS="--root=$D"
+ fi
-if type systemctl >/dev/null 2>/dev/null; then
- if [ -z "$D" ]; then
- systemctl daemon-reload
+ if [ "${SYSTEMD_AUTO_ENABLE}" = "enable" ]; then
+ for service in ${SYSTEMD_SERVICE_ESCAPED}; do
+ systemctl ${OPTS} enable "$service"
+ done
fi
- systemctl $OPTS ${SYSTEMD_AUTO_ENABLE} ${SYSTEMD_SERVICE_ESCAPED}
+ if [ -z "$D" ]; then
+ systemctl daemon-reload
+ systemctl preset ${SYSTEMD_SERVICE_ESCAPED}
- if [ -z "$D" -a "${SYSTEMD_AUTO_ENABLE}" = "enable" ]; then
- systemctl --no-block restart ${SYSTEMD_SERVICE_ESCAPED}
+ if [ "${SYSTEMD_AUTO_ENABLE}" = "enable" ]; then
+ systemctl --no-block restart ${SYSTEMD_SERVICE_ESCAPED}
+ fi
fi
fi
}
systemd_prerm() {
-OPTS=""
-
-if [ -n "$D" ]; then
- OPTS="--root=$D"
-fi
-
if type systemctl >/dev/null 2>/dev/null; then
if [ -z "$D" ]; then
systemctl stop ${SYSTEMD_SERVICE_ESCAPED}
- fi
- systemctl $OPTS disable ${SYSTEMD_SERVICE_ESCAPED}
+ systemctl disable ${SYSTEMD_SERVICE_ESCAPED}
+ fi
fi
}
@@ -177,12 +176,25 @@ python systemd_populate_packages() {
else:
bb.fatal("SYSTEMD_SERVICE_%s value %s does not exist" % (pkg_systemd, service))
+ def systemd_create_presets(pkg, action):
+ presetf = oe.path.join(d.getVar("PKGD"), d.getVar("systemd_unitdir"), "system-preset/98-%s.preset" % pkg)
+ bb.utils.mkdirhier(os.path.dirname(presetf))
+ with open(presetf, 'a') as fd:
+ for service in d.getVar('SYSTEMD_SERVICE_%s' % pkg).split():
+ fd.write("%s %s\n" % (action,service))
+ d.appendVar("FILES_%s" % pkg, ' ' + oe.path.join(d.getVar("systemd_unitdir"), "system-preset/98-%s.preset" % pkg))
+
# Run all modifications once when creating package
if os.path.exists(d.getVar("D")):
for pkg in d.getVar('SYSTEMD_PACKAGES').split():
systemd_check_package(pkg)
if d.getVar('SYSTEMD_SERVICE_' + pkg):
systemd_generate_package_scripts(pkg)
+ action = get_package_var(d, 'SYSTEMD_AUTO_ENABLE', pkg)
+ if action in ("enable", "disable"):
+ systemd_create_presets(pkg, action)
+ elif action not in ("mask", "preset"):
+ bb.fatal("SYSTEMD_AUTO_ENABLE_%s '%s' is not 'enable', 'disable', 'mask' or 'preset'" % (pkg, action))
systemd_check_services()
}
@@ -198,7 +210,6 @@ python rm_systemd_unitdir (){
if (os.path.exists(systemd_libdir) and not os.listdir(systemd_libdir)):
os.rmdir(systemd_libdir)
}
-do_install[postfuncs] += "rm_systemd_unitdir "
python rm_sysvinit_initddir (){
import shutil
@@ -213,4 +224,9 @@ python rm_sysvinit_initddir (){
if (os.path.exists(systemd_system_unitdir) and os.listdir(systemd_system_unitdir)):
shutil.rmtree(sysv_initddir)
}
-do_install[postfuncs] += "rm_sysvinit_initddir "
+
+do_install[postfuncs] += "${RMINITDIR} "
+RMINITDIR_class-target = " rm_sysvinit_initddir rm_systemd_unitdir "
+RMINITDIR_class-nativesdk = " rm_sysvinit_initddir rm_systemd_unitdir "
+RMINITDIR = ""
+
diff --git a/external/poky/meta/classes/terminal.bbclass b/external/poky/meta/classes/terminal.bbclass
index 73e765d5..6059ae95 100644
--- a/external/poky/meta/classes/terminal.bbclass
+++ b/external/poky/meta/classes/terminal.bbclass
@@ -14,6 +14,7 @@ def oe_terminal_prioritized():
return " ".join(o.name for o in oe.terminal.prioritized())
def emit_terminal_func(command, envdata, d):
+ import bb.build
cmd_func = 'do_terminal'
envdata.setVar(cmd_func, 'exec ' + command)
@@ -25,8 +26,7 @@ def emit_terminal_func(command, envdata, d):
bb.utils.mkdirhier(os.path.dirname(runfile))
with open(runfile, 'w') as script:
- script.write('#!/usr/bin/env %s\n' % d.getVar('SHELL'))
- script.write('set -e\n')
+ script.write(bb.build.shell_trap_code())
bb.data.emit_func(cmd_func, script, envdata)
script.write(cmd_func)
script.write("\n")
diff --git a/external/poky/meta/classes/testimage.bbclass b/external/poky/meta/classes/testimage.bbclass
index 34792283..00f0c298 100644
--- a/external/poky/meta/classes/testimage.bbclass
+++ b/external/poky/meta/classes/testimage.bbclass
@@ -31,7 +31,20 @@ TESTIMAGE_AUTO ??= "0"
# TEST_LOG_DIR contains a command ssh log and may contain infromation about what command is running, output and return codes and for qemu a boot log till login.
# Booting is handled by this class, and it's not a test in itself.
# TEST_QEMUBOOT_TIMEOUT can be used to set the maximum time in seconds the launch code will wait for the login prompt.
+# TEST_OVERALL_TIMEOUT can be used to set the maximum time in seconds the tests will be allowed to run (defaults to no limit).
# TEST_QEMUPARAMS can be used to pass extra parameters to qemu, e.g. "-m 1024" for setting the amount of ram to 1 GB.
+# TEST_RUNQEMUPARAMS can be used to pass extra parameters to runqemu, e.g. "gl" to enable OpenGL acceleration.
+
+# TESTIMAGE_BOOT_PATTERNS can be used to override certain patterns used to communicate with the target when booting,
+# if a pattern is not specifically present on this variable a default will be used when booting the target.
+# TESTIMAGE_BOOT_PATTERNS[<flag>] overrides the pattern used for that specific flag, where flag comes from a list of accepted flags
+# e.g. normally the system boots and waits for a login prompt (login:), after that it sends the command: "root\n" to log as the root user
+# if we wanted to log in as the hypothetical "webserver" user for example we could set the following:
+# TESTIMAGE_BOOT_PATTERNS = "send_login_user search_login_succeeded"
+# TESTIMAGE_BOOT_PATTERNS[send_login_user] = "webserver\n"
+# TESTIMAGE_BOOT_PATTERNS[search_login_succeeded] = "webserver@[a-zA-Z0-9\-]+:~#"
+# The accepted flags are the following: search_reached_prompt, send_login_user, search_login_succeeded, search_cmd_finished.
+# They are prefixed with either search/send, to differentiate if the pattern is meant to be sent or searched to/from the target terminal
TEST_LOG_DIR ?= "${WORKDIR}/testimage"
@@ -45,7 +58,7 @@ BASICTESTSUITE = "\
ping date df ssh scp python perl gi ptest parselogs \
logrotate connman systemd oe_syslog pam stap ldd xorg \
kernelmodule gcc buildcpio buildlzip buildgalculator \
- dnf rpm opkg apt"
+ dnf rpm opkg apt weston"
DEFAULT_TEST_SUITES = "${BASICTESTSUITE}"
@@ -63,8 +76,12 @@ DEFAULT_TEST_SUITES_remove_qemumips64 = "${MIPSREMOVE}"
TEST_SUITES ?= "${DEFAULT_TEST_SUITES}"
TEST_QEMUBOOT_TIMEOUT ?= "1000"
+TEST_OVERALL_TIMEOUT ?= ""
TEST_TARGET ?= "qemu"
TEST_QEMUPARAMS ?= ""
+TEST_RUNQEMUPARAMS ?= ""
+
+TESTIMAGE_BOOT_PATTERNS ?= ""
TESTIMAGEDEPENDS = ""
TESTIMAGEDEPENDS_append_qemuall = " qemu-native:do_populate_sysroot qemu-helper-native:do_populate_sysroot qemu-helper-native:do_addto_recipe_sysroot"
@@ -148,6 +165,29 @@ def get_testimage_json_result_dir(d):
def get_testimage_result_id(configuration):
return '%s_%s_%s_%s' % (configuration['TEST_TYPE'], configuration['IMAGE_BASENAME'], configuration['MACHINE'], configuration['STARTTIME'])
+def get_testimage_boot_patterns(d):
+ from collections import defaultdict
+ boot_patterns = defaultdict(str)
+ # Only accept certain values
+ accepted_patterns = ['search_reached_prompt', 'send_login_user', 'search_login_succeeded', 'search_cmd_finished']
+ # Not all patterns need to be overriden, e.g. perhaps we only want to change the user
+ boot_patterns_flags = d.getVarFlags('TESTIMAGE_BOOT_PATTERNS') or {}
+ if boot_patterns_flags:
+ patterns_set = [p for p in boot_patterns_flags.items() if p[0] in d.getVar('TESTIMAGE_BOOT_PATTERNS').split()]
+ for flag, flagval in patterns_set:
+ if flag not in accepted_patterns:
+ bb.fatal('Testimage: The only accepted boot patterns are: search_reached_prompt,send_login_user, \
+ search_login_succeeded,search_cmd_finished\n Make sure your TESTIMAGE_BOOT_PATTERNS=%s \
+ contains an accepted flag.' % d.getVar('TESTIMAGE_BOOT_PATTERNS'))
+ return
+ # We know boot prompt is searched through in binary format, others might be expressions
+ if flag == 'search_reached_prompt':
+ boot_patterns[flag] = flagval.encode()
+ else:
+ boot_patterns[flag] = flagval.encode().decode('unicode-escape')
+ return boot_patterns
+
+
def testimage_main(d):
import os
import json
@@ -166,7 +206,11 @@ def testimage_main(d):
"""
Catch SIGTERM from worker in order to stop qemu.
"""
- raise RuntimeError
+ os.kill(os.getpid(), signal.SIGINT)
+
+ def handle_test_timeout(timeout):
+ bb.warn("Global test timeout reached (%s seconds), stopping the tests." %(timeout))
+ os.kill(os.getpid(), signal.SIGINT)
testimage_sanity(d)
@@ -205,10 +249,14 @@ def testimage_main(d):
if d.getVar("TEST_TARGET") == "qemu":
fstypes = [fs for fs in fstypes if fs in supported_fstypes]
if not fstypes:
- bb.fatal('Unsupported image type built. Add a comptible image to '
+ bb.fatal('Unsupported image type built. Add a compatible image to '
'IMAGE_FSTYPES. Supported types: %s' %
', '.join(supported_fstypes))
- rootfs = '%s.%s' % (image_name, fstypes[0])
+ qfstype = fstypes[0]
+ qdeffstype = d.getVar("QB_DEFAULT_FSTYPE")
+ if qdeffstype:
+ qfstype = qdeffstype
+ rootfs = '%s.%s' % (image_name, qfstype)
# Get tmpdir (not really used, just for compatibility)
tmpdir = d.getVar("TMPDIR")
@@ -233,11 +281,14 @@ def testimage_main(d):
# Get use_kvm
kvm = oe.types.qemu_use_kvm(d.getVar('QEMU_USE_KVM'), d.getVar('TARGET_ARCH'))
+ # Get OVMF
+ ovmf = d.getVar("QEMU_USE_OVMF")
+
slirp = False
if d.getVar("QEMU_USE_SLIRP"):
slirp = True
- # TODO: We use the current implementatin of qemu runner because of
+ # TODO: We use the current implementation of qemu runner because of
# time constrains, qemu runner really needs a refactor too.
target_kwargs = { 'machine' : machine,
'rootfs' : rootfs,
@@ -250,12 +301,35 @@ def testimage_main(d):
'kvm' : kvm,
'slirp' : slirp,
'dump_dir' : d.getVar("TESTIMAGE_DUMP_DIR"),
+ 'serial_ports': len(d.getVar("SERIAL_CONSOLES").split()),
+ 'ovmf' : ovmf,
}
+ if d.getVar("TESTIMAGE_BOOT_PATTERNS"):
+ target_kwargs['boot_patterns'] = get_testimage_boot_patterns(d)
+
# TODO: Currently BBPATH is needed for custom loading of targets.
# It would be better to find these modules using instrospection.
target_kwargs['target_modules_path'] = d.getVar('BBPATH')
+ # hardware controlled targets might need further access
+ target_kwargs['powercontrol_cmd'] = d.getVar("TEST_POWERCONTROL_CMD") or None
+ target_kwargs['powercontrol_extra_args'] = d.getVar("TEST_POWERCONTROL_EXTRA_ARGS") or ""
+ target_kwargs['serialcontrol_cmd'] = d.getVar("TEST_SERIALCONTROL_CMD") or None
+ target_kwargs['serialcontrol_extra_args'] = d.getVar("TEST_SERIALCONTROL_EXTRA_ARGS") or ""
+
+ def export_ssh_agent(d):
+ import os
+
+ variables = ['SSH_AGENT_PID', 'SSH_AUTH_SOCK']
+ for v in variables:
+ if v not in os.environ.keys():
+ val = d.getVar(v)
+ if val is not None:
+ os.environ[v] = val
+
+ export_ssh_agent(d)
+
# runtime use network for download projects for build
export_proxies(d)
@@ -294,11 +368,16 @@ def testimage_main(d):
try:
# We need to check if runqemu ends unexpectedly
# or if the worker send us a SIGTERM
- tc.target.start(params=d.getVar("TEST_QEMUPARAMS"))
+ tc.target.start(params=d.getVar("TEST_QEMUPARAMS"), runqemuparams=d.getVar("TEST_RUNQEMUPARAMS"))
+ import threading
+ try:
+ threading.Timer(int(d.getVar("TEST_OVERALL_TIMEOUT")), handle_test_timeout, (int(d.getVar("TEST_OVERALL_TIMEOUT")),)).start()
+ except ValueError:
+ pass
results = tc.runTests()
- except (RuntimeError, BlockingIOError) as err:
- if isinstance(err, RuntimeError):
- bb.error('testimage received SIGTERM, shutting down...')
+ except (KeyboardInterrupt, BlockingIOError) as err:
+ if isinstance(err, KeyboardInterrupt):
+ bb.error('testimage interrupted, shutting down...')
else:
bb.error('runqemu failed, shutting down...')
if results:
diff --git a/external/poky/meta/classes/texinfo.bbclass b/external/poky/meta/classes/texinfo.bbclass
index 6b0def0e..f46bacab 100644
--- a/external/poky/meta/classes/texinfo.bbclass
+++ b/external/poky/meta/classes/texinfo.bbclass
@@ -6,13 +6,13 @@
# Texinfo recipe, you can remove texinfo-native from ASSUME_PROVIDED and
# makeinfo from SANITY_REQUIRED_UTILITIES.
-TEXDEP = "texinfo-native"
+TEXDEP = "${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'texinfo-replacement-native', 'texinfo-dummy-native', d)}"
TEXDEP_class-native = "texinfo-dummy-native"
TEXDEP_class-cross = "texinfo-dummy-native"
+TEXDEP_class-crosssdk = "texinfo-dummy-native"
+TEXDEP_class-cross-canadian = "texinfo-dummy-native"
DEPENDS_append = " ${TEXDEP}"
-PATH_prepend_class-native = "${STAGING_BINDIR_NATIVE}/texinfo-dummy-native:"
-PATH_prepend_class-cross = "${STAGING_BINDIR_NATIVE}/texinfo-dummy-native:"
# libtool-cross doesn't inherit cross
TEXDEP_pn-libtool-cross = "texinfo-dummy-native"
-PATH_prepend_pn-libtool-cross = "${STAGING_BINDIR_NATIVE}/texinfo-dummy-native:"
+
diff --git a/external/poky/meta/classes/tinderclient.bbclass b/external/poky/meta/classes/tinderclient.bbclass
deleted file mode 100644
index 00f453ce..00000000
--- a/external/poky/meta/classes/tinderclient.bbclass
+++ /dev/null
@@ -1,368 +0,0 @@
-def tinder_http_post(server, selector, content_type, body):
- import httplib
- # now post it
- for i in range(0,5):
- try:
- h = httplib.HTTP(server)
- h.putrequest('POST', selector)
- h.putheader('content-type', content_type)
- h.putheader('content-length', str(len(body)))
- h.endheaders()
- h.send(body)
- errcode, errmsg, headers = h.getreply()
- #print(errcode, errmsg, headers)
- return (errcode,errmsg, headers, h.file)
- except:
- print("Error sending the report!")
- # try again
- pass
-
- # return some garbage
- return (-1, "unknown", "unknown", None)
-
-def tinder_form_data(bound, dict, log):
- output = []
- # for each key in the dictionary
- for name in dict:
- assert dict[name]
- output.append( "--" + bound )
- output.append( 'Content-Disposition: form-data; name="%s"' % name )
- output.append( "" )
- output.append( dict[name] )
- if log:
- output.append( "--" + bound )
- output.append( 'Content-Disposition: form-data; name="log"; filename="log.txt"' )
- output.append( '' )
- output.append( log )
- output.append( '--' + bound + '--' )
- output.append( '' )
-
- return "\r\n".join(output)
-
-def tinder_time_string():
- """
- Return the time as GMT
- """
- return ""
-
-def tinder_format_http_post(d,status,log):
- """
- Format the Tinderbox HTTP post with the data needed
- for the tinderbox to be happy.
- """
-
- import random
-
- # the variables we will need to send on this form post
- variables = {
- "tree" : d.getVar('TINDER_TREE'),
- "machine_name" : d.getVar('TINDER_MACHINE'),
- "os" : os.uname()[0],
- "os_version" : os.uname()[2],
- "compiler" : "gcc",
- "clobber" : d.getVar('TINDER_CLOBBER') or "0",
- "srcdate" : d.getVar('SRCDATE'),
- "PN" : d.getVar('PN'),
- "PV" : d.getVar('PV'),
- "PR" : d.getVar('PR'),
- "FILE" : d.getVar('FILE') or "N/A",
- "TARGETARCH" : d.getVar('TARGET_ARCH'),
- "TARGETFPU" : d.getVar('TARGET_FPU') or "Unknown",
- "TARGETOS" : d.getVar('TARGET_OS') or "Unknown",
- "MACHINE" : d.getVar('MACHINE') or "Unknown",
- "DISTRO" : d.getVar('DISTRO') or "Unknown",
- "zecke-rocks" : "sure",
- }
-
- # optionally add the status
- if status:
- variables["status"] = str(status)
-
- # try to load the machine id
- # we only need on build_status.pl but sending it
- # always does not hurt
- try:
- f = open(d.getVar('TMPDIR')+'/tinder-machine.id', 'r')
- id = f.read()
- variables['machine_id'] = id
- except:
- pass
-
- # the boundary we will need
- boundary = "----------------------------------%d" % int(random.random()*1000000000000)
-
- # now format the body
- body = tinder_form_data( boundary, variables, log )
-
- return ("multipart/form-data; boundary=%s" % boundary),body
-
-
-def tinder_build_start(d):
- """
- Inform the tinderbox that a build is starting. We do this
- by posting our name and tree to the build_start.pl script
- on the server.
- """
-
- # get the body and type
- content_type, body = tinder_format_http_post(d,None,None)
- server = d.getVar('TINDER_HOST')
- url = d.getVar('TINDER_URL')
-
- selector = url + "/xml/build_start.pl"
-
- #print("selector %s and url %s" % (selector, url))
-
- # now post it
- errcode, errmsg, headers, h_file = tinder_http_post(server,selector,content_type, body)
- #print(errcode, errmsg, headers)
- report = h_file.read()
-
- # now let us find the machine id that was assigned to us
- search = "<machine id='"
- report = report[report.find(search)+len(search):]
- report = report[0:report.find("'")]
-
- bb.note("Machine ID assigned by tinderbox: %s" % report )
-
- # now we will need to save the machine number
- # we will override any previous numbers
- f = open(d.getVar('TMPDIR')+"/tinder-machine.id", 'w')
- f.write(report)
-
-
-def tinder_send_http(d, status, _log):
- """
- Send this log as build status
- """
-
- # get the body and type
- server = d.getVar('TINDER_HOST')
- url = d.getVar('TINDER_URL')
-
- selector = url + "/xml/build_status.pl"
-
- # now post it - in chunks of 10.000 characters
- new_log = _log
- while len(new_log) > 0:
- content_type, body = tinder_format_http_post(d,status,new_log[0:18000])
- errcode, errmsg, headers, h_file = tinder_http_post(server,selector,content_type, body)
- #print(errcode, errmsg, headers)
- #print(h.file.read())
- new_log = new_log[18000:]
-
-
-def tinder_print_info(d):
- """
- Print the TinderBox Info
- Including informations of the BaseSystem and the Tree
- we use.
- """
-
- # get the local vars
- time = tinder_time_string()
- ops = os.uname()[0]
- version = os.uname()[2]
- url = d.getVar('TINDER_URL')
- tree = d.getVar('TINDER_TREE')
- branch = d.getVar('TINDER_BRANCH')
- srcdate = d.getVar('SRCDATE')
- machine = d.getVar('MACHINE')
- distro = d.getVar('DISTRO')
- bbfiles = d.getVar('BBFILES')
- tarch = d.getVar('TARGET_ARCH')
- fpu = d.getVar('TARGET_FPU')
- oerev = d.getVar('OE_REVISION') or "unknown"
-
- # there is a bug with tipple quoted strings
- # i will work around but will fix the original
- # bug as well
- output = []
- output.append("== Tinderbox Info" )
- output.append("Time: %(time)s" )
- output.append("OS: %(ops)s" )
- output.append("%(version)s" )
- output.append("Compiler: gcc" )
- output.append("Tinderbox Client: 0.1" )
- output.append("Tinderbox Client Last Modified: yesterday" )
- output.append("Tinderbox Protocol: 0.1" )
- output.append("URL: %(url)s" )
- output.append("Tree: %(tree)s" )
- output.append("Config:" )
- output.append("branch = '%(branch)s'" )
- output.append("TARGET_ARCH = '%(tarch)s'" )
- output.append("TARGET_FPU = '%(fpu)s'" )
- output.append("SRCDATE = '%(srcdate)s'" )
- output.append("MACHINE = '%(machine)s'" )
- output.append("DISTRO = '%(distro)s'" )
- output.append("BBFILES = '%(bbfiles)s'" )
- output.append("OEREV = '%(oerev)s'" )
- output.append("== End Tinderbox Client Info" )
-
- # now create the real output
- return "\n".join(output) % vars()
-
-
-def tinder_print_env():
- """
- Print the environment variables of this build
- """
- time_start = tinder_time_string()
- time_end = tinder_time_string()
-
- # build the environment
- env = ""
- for var in os.environ:
- env += "%s=%s\n" % (var, os.environ[var])
-
- output = []
- output.append( "---> TINDERBOX RUNNING env %(time_start)s" )
- output.append( env )
- output.append( "<--- TINDERBOX FINISHED (SUCCESS) %(time_end)s" )
-
- return "\n".join(output) % vars()
-
-def tinder_tinder_start(d, event):
- """
- PRINT the configuration of this build
- """
-
- time_start = tinder_time_string()
- config = tinder_print_info(d)
- #env = tinder_print_env()
- time_end = tinder_time_string()
- packages = " ".join( event.getPkgs() )
-
- output = []
- output.append( "---> TINDERBOX PRINTING CONFIGURATION %(time_start)s" )
- output.append( config )
- #output.append( env )
- output.append( "<--- TINDERBOX FINISHED PRINTING CONFIGURATION %(time_end)s" )
- output.append( "---> TINDERBOX BUILDING '%(packages)s'" )
- output.append( "<--- TINDERBOX STARTING BUILD NOW" )
-
- output.append( "" )
-
- return "\n".join(output) % vars()
-
-def tinder_do_tinder_report(event):
- """
- Report to the tinderbox:
- On the BuildStart we will inform the box directly
- On the other events we will write to the TINDER_LOG and
- when the Task is finished we will send the report.
-
- The above is not yet fully implemented. Currently we send
- information immediately. The caching/queuing needs to be
- implemented. Also sending more or less information is not
- implemented yet.
-
- We have two temporary files stored in the TMP directory. One file
- contains the assigned machine id for the tinderclient. This id gets
- assigned when we connect the box and start the build process the second
- file is used to workaround an EventHandler limitation. If BitBake is ran
- with the continue option we want the Build to fail even if we get the
- BuildCompleted Event. In this case we have to look up the status and
- send it instead of 100/success.
- """
- import glob
-
- # variables
- name = bb.event.getName(event)
- log = ""
- status = 1
- # Check what we need to do Build* shows we start or are done
- if name == "BuildStarted":
- tinder_build_start(event.data)
- log = tinder_tinder_start(event.data,event)
-
- try:
- # truncate the tinder log file
- f = open(event.data.getVar('TINDER_LOG'), 'w')
- f.write("")
- f.close()
- except:
- pass
-
- try:
- # write a status to the file. This is needed for the -k option
- # of BitBake
- g = open(event.data.getVar('TMPDIR')+"/tinder-status", 'w')
- g.write("")
- g.close()
- except IOError:
- pass
-
- # Append the Task-Log (compile,configure...) to the log file
- # we will send to the server
- if name == "TaskSucceeded" or name == "TaskFailed":
- log_file = glob.glob("%s/log.%s.*" % (event.data.getVar('T'), event.task))
-
- if len(log_file) != 0:
- to_file = event.data.getVar('TINDER_LOG')
- log += "".join(open(log_file[0], 'r').readlines())
-
- # set the right 'HEADER'/Summary for the TinderBox
- if name == "TaskStarted":
- log += "---> TINDERBOX Task %s started\n" % event.task
- elif name == "TaskSucceeded":
- log += "<--- TINDERBOX Task %s done (SUCCESS)\n" % event.task
- elif name == "TaskFailed":
- log += "<--- TINDERBOX Task %s failed (FAILURE)\n" % event.task
- elif name == "PkgStarted":
- log += "---> TINDERBOX Package %s started\n" % event.data.getVar('PF')
- elif name == "PkgSucceeded":
- log += "<--- TINDERBOX Package %s done (SUCCESS)\n" % event.data.getVar('PF')
- elif name == "PkgFailed":
- if not event.data.getVar('TINDER_AUTOBUILD') == "0":
- build.exec_task('do_clean', event.data)
- log += "<--- TINDERBOX Package %s failed (FAILURE)\n" % event.data.getVar('PF')
- status = 200
- # remember the failure for the -k case
- h = open(event.data.getVar('TMPDIR')+"/tinder-status", 'w')
- h.write("200")
- elif name == "BuildCompleted":
- log += "Build Completed\n"
- status = 100
- # Check if we have a old status...
- try:
- h = open(event.data.getVar('TMPDIR')+'/tinder-status', 'r')
- status = int(h.read())
- except:
- pass
-
- elif name == "MultipleProviders":
- log += "---> TINDERBOX Multiple Providers\n"
- log += "multiple providers are available (%s);\n" % ", ".join(event.getCandidates())
- log += "consider defining PREFERRED_PROVIDER_%s\n" % event.getItem()
- log += "is runtime: %d\n" % event.isRuntime()
- log += "<--- TINDERBOX Multiple Providers\n"
- elif name == "NoProvider":
- log += "Error: No Provider for: %s\n" % event.getItem()
- log += "Error:Was Runtime: %d\n" % event.isRuntime()
- status = 200
- # remember the failure for the -k case
- h = open(event.data.getVar('TMPDIR')+"/tinder-status", 'w')
- h.write("200")
-
- # now post the log
- if len(log) == 0:
- return
-
- # for now we will use the http post method as it is the only one
- log_post_method = tinder_send_http
- log_post_method(event.data, status, log)
-
-
-# we want to be an event handler
-addhandler tinderclient_eventhandler
-python tinderclient_eventhandler() {
- if e.data is None or bb.event.getName(e) == "MsgNote":
- return
-
- do_tinder_report = e.data.getVar('TINDER_REPORT')
- if do_tinder_report and do_tinder_report == "1":
- tinder_do_tinder_report(e)
-
- return
-}
diff --git a/external/poky/meta/classes/toaster.bbclass b/external/poky/meta/classes/toaster.bbclass
index 6cef0b8f..9518ddf7 100644
--- a/external/poky/meta/classes/toaster.bbclass
+++ b/external/poky/meta/classes/toaster.bbclass
@@ -113,7 +113,7 @@ def _toaster_load_pkgdatafile(dirpath, filepath):
pass # ignore lines without valid key: value pairs
return pkgdata
-python toaster_package_dumpdata() {
+def _toaster_dumpdata(pkgdatadir, d):
"""
Dumps the data about the packages created by a recipe
"""
@@ -122,16 +122,24 @@ python toaster_package_dumpdata() {
if not d.getVar('PACKAGES'):
return
- pkgdatadir = d.getVar('PKGDESTWORK')
lpkgdata = {}
datadir = os.path.join(pkgdatadir, 'runtime')
# scan and send data for each generated package
- for datafile in os.listdir(datadir):
- if not datafile.endswith('.packaged'):
- lpkgdata = _toaster_load_pkgdatafile(datadir, datafile)
- # Fire an event containing the pkg data
- bb.event.fire(bb.event.MetadataEvent("SinglePackageInfo", lpkgdata), d)
+ if os.path.exists(datadir):
+ for datafile in os.listdir(datadir):
+ if not datafile.endswith('.packaged'):
+ lpkgdata = _toaster_load_pkgdatafile(datadir, datafile)
+ # Fire an event containing the pkg data
+ bb.event.fire(bb.event.MetadataEvent("SinglePackageInfo", lpkgdata), d)
+
+python toaster_package_dumpdata() {
+ _toaster_dumpdata(d.getVar('PKGDESTWORK'), d)
+}
+
+python toaster_packagedata_dumpdata() {
+ # This path needs to match do_packagedata[sstate-inputdirs]
+ _toaster_dumpdata(os.path.join(d.getVar('WORKDIR'), 'pkgdata-pdata-input'), d)
}
# 2. Dump output image files information
@@ -366,8 +374,8 @@ toaster_buildhistory_dump[eventmask] = "bb.event.BuildCompleted"
addhandler toaster_artifacts
toaster_artifacts[eventmask] = "bb.runqueue.runQueueTaskSkipped bb.runqueue.runQueueTaskCompleted"
-do_packagedata_setscene[postfuncs] += "toaster_package_dumpdata "
-do_packagedata_setscene[vardepsexclude] += "toaster_package_dumpdata "
+do_packagedata_setscene[postfuncs] += "toaster_packagedata_dumpdata "
+do_packagedata_setscene[vardepsexclude] += "toaster_packagedata_dumpdata "
do_package[postfuncs] += "toaster_package_dumpdata "
do_package[vardepsexclude] += "toaster_package_dumpdata "
diff --git a/external/poky/meta/classes/toolchain-scripts.bbclass b/external/poky/meta/classes/toolchain-scripts.bbclass
index 1a2ec4f3..db1d3215 100644
--- a/external/poky/meta/classes/toolchain-scripts.bbclass
+++ b/external/poky/meta/classes/toolchain-scripts.bbclass
@@ -90,6 +90,7 @@ toolchain_shared_env_script () {
echo 'export RANLIB=${TARGET_PREFIX}ranlib' >> $script
echo 'export OBJCOPY=${TARGET_PREFIX}objcopy' >> $script
echo 'export OBJDUMP=${TARGET_PREFIX}objdump' >> $script
+ echo 'export READELF=${TARGET_PREFIX}readelf' >> $script
echo 'export AR=${TARGET_PREFIX}ar' >> $script
echo 'export NM=${TARGET_PREFIX}nm' >> $script
echo 'export M4=m4' >> $script
diff --git a/external/poky/meta/classes/uboot-extlinux-config.bbclass b/external/poky/meta/classes/uboot-extlinux-config.bbclass
index c65c421b..f4bf94be 100644
--- a/external/poky/meta/classes/uboot-extlinux-config.bbclass
+++ b/external/poky/meta/classes/uboot-extlinux-config.bbclass
@@ -104,13 +104,16 @@ python do_create_extlinux_config() {
if default:
cfgfile.write('DEFAULT %s\n' % (default))
- for label in labels.split():
+ # Need to deconflict the labels with existing overrides
+ label_overrides = labels.split()
+ default_overrides = localdata.getVar('OVERRIDES').split(':')
+ # We're keeping all the existing overrides that aren't used as a label
+ # an override for that label will be added back in while we're processing that label
+ keep_overrides = list(filter(lambda x: x not in label_overrides, default_overrides))
- overrides = localdata.getVar('OVERRIDES')
- if not overrides:
- bb.fatal('OVERRIDES not defined')
+ for label in labels.split():
- localdata.setVar('OVERRIDES', label + ':' + overrides)
+ localdata.setVar('OVERRIDES', ':'.join(keep_overrides + [label]))
extlinux_console = localdata.getVar('UBOOT_EXTLINUX_CONSOLE')
@@ -148,5 +151,7 @@ python do_create_extlinux_config() {
except OSError:
bb.fatal('Unable to open %s' % (cfile))
}
+UBOOT_EXTLINUX_VARS = "CONSOLE MENU_DESCRIPTION ROOT KERNEL_IMAGE FDTDIR FDT KERNEL_ARGS INITRD"
+do_create_extlinux_config[vardeps] += "${@' '.join(['UBOOT_EXTLINUX_%s_%s' % (v, l) for v in d.getVar('UBOOT_EXTLINUX_VARS').split() for l in d.getVar('UBOOT_EXTLINUX_LABELS').split()])}"
addtask create_extlinux_config before do_install do_deploy after do_compile
diff --git a/external/poky/meta/classes/uboot-sign.bbclass b/external/poky/meta/classes/uboot-sign.bbclass
index afaf46fe..982ed46d 100644
--- a/external/poky/meta/classes/uboot-sign.bbclass
+++ b/external/poky/meta/classes/uboot-sign.bbclass
@@ -19,11 +19,15 @@
# The tasks sequence is set as below, using DEPLOY_IMAGE_DIR as common place to
# treat the device tree blob:
#
-# u-boot:do_deploy_dtb
-# u-boot:do_deploy
-# virtual/kernel:do_assemble_fitimage
-# u-boot:do_concat_dtb
-# u-boot:do_install
+# * u-boot:do_install_append
+# Install UBOOT_DTB_BINARY to datadir, so that kernel can use it for
+# signing, and kernel will deploy UBOOT_DTB_BINARY after signs it.
+#
+# * virtual/kernel:do_assemble_fitimage
+# Sign the image
+#
+# * u-boot:do_deploy[postfuncs]
+# Deploy files like UBOOT_DTB_IMAGE, UBOOT_DTB_SYMLINK and others.
#
# For more details on signature process, please refer to U-Boot documentation.
@@ -38,58 +42,90 @@ UBOOT_NODTB_IMAGE ?= "u-boot-nodtb-${MACHINE}-${PV}-${PR}.${UBOOT_SUFFIX}"
UBOOT_NODTB_BINARY ?= "u-boot-nodtb.${UBOOT_SUFFIX}"
UBOOT_NODTB_SYMLINK ?= "u-boot-nodtb-${MACHINE}.${UBOOT_SUFFIX}"
-#
-# Following is relevant only for u-boot recipes:
-#
-
-do_deploy_dtb () {
- mkdir -p ${DEPLOYDIR}
- cd ${DEPLOYDIR}
+# Functions in this bbclass is for u-boot only
+UBOOT_PN = "${@d.getVar('PREFERRED_PROVIDER_u-boot') or 'u-boot'}"
- if [ -f ${B}/${UBOOT_DTB_BINARY} ]; then
- install ${B}/${UBOOT_DTB_BINARY} ${DEPLOYDIR}/${UBOOT_DTB_IMAGE}
- rm -f ${UBOOT_DTB_BINARY} ${UBOOT_DTB_SYMLINK}
- ln -sf ${UBOOT_DTB_IMAGE} ${UBOOT_DTB_SYMLINK}
- ln -sf ${UBOOT_DTB_IMAGE} ${UBOOT_DTB_BINARY}
+concat_dtb_helper() {
+ if [ -e "${UBOOT_DTB_BINARY}" ]; then
+ ln -sf ${UBOOT_DTB_IMAGE} ${DEPLOYDIR}/${UBOOT_DTB_BINARY}
+ ln -sf ${UBOOT_DTB_IMAGE} ${DEPLOYDIR}/${UBOOT_DTB_SYMLINK}
fi
- if [ -f ${B}/${UBOOT_NODTB_BINARY} ]; then
- install ${B}/${UBOOT_NODTB_BINARY} ${DEPLOYDIR}/${UBOOT_NODTB_IMAGE}
- rm -f ${UBOOT_NODTB_BINARY} ${UBOOT_NODTB_SYMLINK}
- ln -sf ${UBOOT_NODTB_IMAGE} ${UBOOT_NODTB_SYMLINK}
- ln -sf ${UBOOT_NODTB_IMAGE} ${UBOOT_NODTB_BINARY}
+
+ if [ -f "${UBOOT_NODTB_BINARY}" ]; then
+ install ${UBOOT_NODTB_BINARY} ${DEPLOYDIR}/${UBOOT_NODTB_IMAGE}
+ ln -sf ${UBOOT_NODTB_IMAGE} ${DEPLOYDIR}/${UBOOT_NODTB_SYMLINK}
+ ln -sf ${UBOOT_NODTB_IMAGE} ${DEPLOYDIR}/${UBOOT_NODTB_BINARY}
fi
-}
-do_concat_dtb () {
# Concatenate U-Boot w/o DTB & DTB with public key
# (cf. kernel-fitimage.bbclass for more details)
- if [ "x${UBOOT_SIGN_ENABLE}" = "x1" ]; then
- if [ "x${UBOOT_SUFFIX}" = "ximg" -o "x${UBOOT_SUFFIX}" = "xrom" ] && \
- [ -e "${DEPLOYDIR}/${UBOOT_DTB_IMAGE}" ]; then
+ deployed_uboot_dtb_binary='${DEPLOY_DIR_IMAGE}/${UBOOT_DTB_IMAGE}'
+ if [ "x${UBOOT_SUFFIX}" = "ximg" -o "x${UBOOT_SUFFIX}" = "xrom" ] && \
+ [ -e "$deployed_uboot_dtb_binary" ]; then
+ oe_runmake EXT_DTB=$deployed_uboot_dtb_binary
+ install ${UBOOT_BINARY} ${DEPLOYDIR}/${UBOOT_IMAGE}
+ elif [ -e "${DEPLOYDIR}/${UBOOT_NODTB_IMAGE}" -a -e "$deployed_uboot_dtb_binary" ]; then
+ cd ${DEPLOYDIR}
+ cat ${UBOOT_NODTB_IMAGE} $deployed_uboot_dtb_binary | tee ${B}/${CONFIG_B_PATH}/${UBOOT_BINARY} > ${UBOOT_IMAGE}
+ else
+ bbwarn "Failure while adding public key to u-boot binary. Verified boot won't be available."
+ fi
+}
+
+concat_dtb() {
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${PN}" = "${UBOOT_PN}" -a -n "${UBOOT_DTB_BINARY}" ]; then
+ mkdir -p ${DEPLOYDIR}
+ if [ -n "${UBOOT_CONFIG}" ]; then
+ for config in ${UBOOT_MACHINE}; do
+ CONFIG_B_PATH="${config}"
+ cd ${B}/${config}
+ concat_dtb_helper
+ done
+ else
+ CONFIG_B_PATH=""
cd ${B}
- oe_runmake EXT_DTB=${DEPLOYDIR}/${UBOOT_DTB_IMAGE}
- install ${B}/${UBOOT_BINARY} ${DEPLOYDIR}/${UBOOT_IMAGE}
- install ${B}/${UBOOT_BINARY} ${DEPLOY_DIR_IMAGE}/${UBOOT_IMAGE}
- elif [ -e "${DEPLOYDIR}/${UBOOT_NODTB_IMAGE}" -a -e "${DEPLOYDIR}/${UBOOT_DTB_IMAGE}" ]; then
- cd ${DEPLOYDIR}
- cat ${UBOOT_NODTB_IMAGE} ${UBOOT_DTB_IMAGE} | tee ${B}/${UBOOT_BINARY} > ${UBOOT_IMAGE}
+ concat_dtb_helper
+ fi
+ fi
+}
+
+# Install UBOOT_DTB_BINARY to datadir, so that kernel can use it for
+# signing, and kernel will deploy UBOOT_DTB_BINARY after signs it.
+install_helper() {
+ if [ -f "${UBOOT_DTB_BINARY}" ]; then
+ install -d ${D}${datadir}
+ # UBOOT_DTB_BINARY is a symlink to UBOOT_DTB_IMAGE, so we
+ # need both of them.
+ install ${UBOOT_DTB_BINARY} ${D}${datadir}/${UBOOT_DTB_IMAGE}
+ ln -sf ${UBOOT_DTB_IMAGE} ${D}${datadir}/${UBOOT_DTB_BINARY}
+ else
+ bbwarn "${UBOOT_DTB_BINARY} not found"
+ fi
+}
+
+do_install_append() {
+ if [ "${UBOOT_SIGN_ENABLE}" = "1" -a "${PN}" = "${UBOOT_PN}" -a -n "${UBOOT_DTB_BINARY}" ]; then
+ if [ -n "${UBOOT_CONFIG}" ]; then
+ for config in ${UBOOT_MACHINE}; do
+ cd ${B}/${config}
+ install_helper
+ done
else
- bbwarn "Failure while adding public key to u-boot binary. Verified boot won't be available."
+ cd ${B}
+ install_helper
fi
fi
}
python () {
- uboot_pn = d.getVar('PREFERRED_PROVIDER_u-boot') or 'u-boot'
- if d.getVar('UBOOT_SIGN_ENABLE') == '1' and d.getVar('PN') == uboot_pn:
+ if d.getVar('UBOOT_SIGN_ENABLE') == '1' and d.getVar('PN') == d.getVar('UBOOT_PN') and d.getVar('UBOOT_DTB_BINARY'):
kernel_pn = d.getVar('PREFERRED_PROVIDER_virtual/kernel')
- # u-boot.dtb and u-boot-nodtb.bin are deployed _before_ do_deploy
- # Thus, do_deploy_setscene will also populate them in DEPLOY_IMAGE_DIR
- bb.build.addtask('do_deploy_dtb', 'do_deploy', 'do_compile', d)
+ # Make "bitbake u-boot -cdeploy" deploys the signed u-boot.dtb
+ d.appendVarFlag('do_deploy', 'depends', ' %s:do_deploy' % kernel_pn)
- # do_concat_dtb is scheduled _before_ do_install as it overwrite the
- # u-boot.bin in both DEPLOYDIR and DEPLOY_IMAGE_DIR.
- bb.build.addtask('do_concat_dtb', 'do_install', None, d)
- d.appendVarFlag('do_concat_dtb', 'depends', ' %s:do_assemble_fitimage' % kernel_pn)
+ # kernerl's do_deploy is a litle special, so we can't use
+ # do_deploy_append, otherwise it would override
+ # kernel_do_deploy.
+ d.appendVarFlag('do_deploy', 'prefuncs', ' concat_dtb')
}
diff --git a/external/poky/meta/classes/uninative.bbclass b/external/poky/meta/classes/uninative.bbclass
index 3326c0db..316c0f06 100644
--- a/external/poky/meta/classes/uninative.bbclass
+++ b/external/poky/meta/classes/uninative.bbclass
@@ -1,4 +1,4 @@
-UNINATIVE_LOADER ?= "${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/lib/${@bb.utils.contains('BUILD_ARCH', 'x86_64', 'ld-linux-x86-64.so.2', '', d)}${@bb.utils.contains('BUILD_ARCH', 'i686', 'ld-linux.so.2', '', d)}${@bb.utils.contains('BUILD_ARCH', 'aarch64', 'ld-linux-aarch64.so.1', '', d)}"
+UNINATIVE_LOADER ?= "${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/lib/${@bb.utils.contains('BUILD_ARCH', 'x86_64', 'ld-linux-x86-64.so.2', '', d)}${@bb.utils.contains('BUILD_ARCH', 'i686', 'ld-linux.so.2', '', d)}${@bb.utils.contains('BUILD_ARCH', 'aarch64', 'ld-linux-aarch64.so.1', '', d)}${@bb.utils.contains('BUILD_ARCH', 'ppc64le', 'ld64.so.2', '', d)}"
UNINATIVE_STAGING_DIR ?= "${STAGING_DIR}"
UNINATIVE_URL ?= "unset"
@@ -45,7 +45,7 @@ python uninative_event_fetchloader() {
tarballdir = os.path.join(d.getVar("UNINATIVE_DLDIR"), chksum)
tarballpath = os.path.join(tarballdir, tarball)
- if not os.path.exists(tarballpath):
+ if not os.path.exists(tarballpath + ".done"):
bb.utils.mkdirhier(tarballdir)
if d.getVar("UNINATIVE_URL") == "unset":
bb.fatal("Uninative selected but not configured, please set UNINATIVE_URL")
@@ -56,12 +56,17 @@ python uninative_event_fetchloader() {
# Our games with path manipulation of DL_DIR mean standard PREMIRRORS don't work
# and we can't easily put 'chksum' into the url path from a url parameter with
# the current fetcher url handling
- ownmirror = d.getVar('SOURCE_MIRROR_URL')
- if ownmirror:
- localdata.appendVar("PREMIRRORS", " ${UNINATIVE_URL}${UNINATIVE_TARBALL} ${SOURCE_MIRROR_URL}/uninative/%s/${UNINATIVE_TARBALL}" % chksum)
+ premirrors = bb.fetch2.mirror_from_string(localdata.getVar("PREMIRRORS"))
+ for line in premirrors:
+ try:
+ (find, replace) = line
+ except ValueError:
+ continue
+ if find.startswith("http"):
+ localdata.appendVar("PREMIRRORS", " ${UNINATIVE_URL}${UNINATIVE_TARBALL} %s/uninative/%s/${UNINATIVE_TARBALL}" % (replace, chksum))
srcuri = d.expand("${UNINATIVE_URL}${UNINATIVE_TARBALL};sha256sum=%s" % chksum)
- bb.note("Fetching uninative binary shim from %s" % srcuri)
+ bb.note("Fetching uninative binary shim %s (will check PREMIRRORS first)" % srcuri)
fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
fetcher.download()
diff --git a/external/poky/meta/classes/update-alternatives.bbclass b/external/poky/meta/classes/update-alternatives.bbclass
index a7f1a6fd..8c2b66e7 100644
--- a/external/poky/meta/classes/update-alternatives.bbclass
+++ b/external/poky/meta/classes/update-alternatives.bbclass
@@ -89,15 +89,21 @@ def ua_extend_depends(d):
if not 'virtual/update-alternatives' in d.getVar('PROVIDES'):
d.appendVar('DEPENDS', ' virtual/${MLPREFIX}update-alternatives')
-python __anonymous() {
+def update_alternatives_enabled(d):
# Update Alternatives only works on target packages...
if bb.data.inherits_class('native', d) or \
bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d) or \
bb.data.inherits_class('cross-canadian', d):
- return
+ return False
# Disable when targeting mingw32 (no target support)
if d.getVar("TARGET_OS") == "mingw32":
+ return False
+
+ return True
+
+python __anonymous() {
+ if not update_alternatives_enabled(d):
return
# compute special vardeps
@@ -125,9 +131,21 @@ def gen_updatealternativesvars(d):
populate_packages[vardeps] += "${UPDALTVARS} ${@gen_updatealternativesvars(d)}"
# We need to do the rename after the image creation step, but before
-# the split and strip steps.. packagecopy seems to be the earliest reasonable
-# place.
-python perform_packagecopy_append () {
+# the split and strip steps.. PACKAGE_PREPROCESS_FUNCS is the right
+# place for that.
+PACKAGE_PREPROCESS_FUNCS += "apply_update_alternative_renames"
+python apply_update_alternative_renames () {
+ if not update_alternatives_enabled(d):
+ return
+
+ import re
+
+ def update_files(alt_target, alt_target_rename, pkg, d):
+ f = d.getVar('FILES_' + pkg)
+ if f:
+ f = re.sub(r'(^|\s)%s(\s|$)' % re.escape (alt_target), r'\1%s\2' % alt_target_rename, f)
+ d.setVar('FILES_' + pkg, f)
+
# Check for deprecated usage...
pn = d.getVar('BPN')
if d.getVar('ALTERNATIVE_LINKS') != None:
@@ -137,7 +155,7 @@ python perform_packagecopy_append () {
pkgdest = d.getVar('PKGD')
for pkg in (d.getVar('PACKAGES') or "").split():
# If the src == dest, we know we need to rename the dest by appending ${BPN}
- link_rename = {}
+ link_rename = []
for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg) or "").split():
alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name)
if not alt_link:
@@ -163,10 +181,11 @@ python perform_packagecopy_append () {
elif os.path.lexists(src):
if os.path.islink(src):
# Delay rename of links
- link_rename[alt_target] = alt_target_rename
+ link_rename.append((alt_target, alt_target_rename))
else:
bb.note('%s: Rename %s -> %s' % (pn, alt_target, alt_target_rename))
os.rename(src, dest)
+ update_files(alt_target, alt_target_rename, pkg, d)
else:
bb.warn("%s: alternative target (%s or %s) does not exist, skipping..." % (pn, alt_target, alt_target_rename))
continue
@@ -174,61 +193,85 @@ python perform_packagecopy_append () {
# Process delayed link names
# Do these after other renames so we can correct broken links
- for alt_target in link_rename:
+ for (alt_target, alt_target_rename) in link_rename:
src = '%s/%s' % (pkgdest, alt_target)
- dest = '%s/%s' % (pkgdest, link_rename[alt_target])
- link = os.readlink(src)
+ dest = '%s/%s' % (pkgdest, alt_target_rename)
link_target = oe.path.realpath(src, pkgdest, True)
if os.path.lexists(link_target):
# Ok, the link_target exists, we can rename
- bb.note('%s: Rename (link) %s -> %s' % (pn, alt_target, link_rename[alt_target]))
+ bb.note('%s: Rename (link) %s -> %s' % (pn, alt_target, alt_target_rename))
os.rename(src, dest)
else:
# Try to resolve the broken link to link.${BPN}
link_maybe = '%s.%s' % (os.readlink(src), pn)
if os.path.lexists(os.path.join(os.path.dirname(src), link_maybe)):
# Ok, the renamed link target exists.. create a new link, and remove the original
- bb.note('%s: Creating new link %s -> %s' % (pn, link_rename[alt_target], link_maybe))
+ bb.note('%s: Creating new link %s -> %s' % (pn, alt_target_rename, link_maybe))
os.symlink(link_maybe, dest)
os.unlink(src)
else:
bb.warn('%s: Unable to resolve dangling symlink: %s' % (pn, alt_target))
+ continue
+ update_files(alt_target, alt_target_rename, pkg, d)
}
+def update_alternatives_alt_targets(d, pkg):
+ """
+ Returns the update-alternatives metadata for a package.
+
+ The returned format is a list of tuples where the tuple contains:
+ alt_name: The binary name
+ alt_link: The path for the binary (Shared by different packages)
+ alt_target: The path for the renamed binary (Unique per package)
+ alt_priority: The priority of the alt_target
+
+ All the alt_targets will be installed into the sysroot. The alt_link is
+ a symlink pointing to the alt_target with the highest priority.
+ """
+
+ pn = d.getVar('BPN')
+ pkgdest = d.getVar('PKGD')
+ updates = list()
+ for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg) or "").split():
+ alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name)
+ alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name) or \
+ d.getVarFlag('ALTERNATIVE_TARGET', alt_name) or \
+ d.getVar('ALTERNATIVE_TARGET_%s' % pkg) or \
+ d.getVar('ALTERNATIVE_TARGET') or \
+ alt_link
+ alt_priority = d.getVarFlag('ALTERNATIVE_PRIORITY_%s' % pkg, alt_name) or \
+ d.getVarFlag('ALTERNATIVE_PRIORITY', alt_name) or \
+ d.getVar('ALTERNATIVE_PRIORITY_%s' % pkg) or \
+ d.getVar('ALTERNATIVE_PRIORITY')
+
+ # This shouldn't trigger, as it should have been resolved earlier!
+ if alt_link == alt_target:
+ bb.note('alt_link == alt_target: %s == %s -- correcting, this should not happen!' % (alt_link, alt_target))
+ alt_target = '%s.%s' % (alt_target, pn)
+
+ if not os.path.lexists('%s/%s' % (pkgdest, alt_target)):
+ bb.warn('%s: NOT adding alternative provide %s: %s does not exist' % (pn, alt_link, alt_target))
+ continue
+
+ alt_target = os.path.normpath(alt_target)
+ updates.append( (alt_name, alt_link, alt_target, alt_priority) )
+
+ return updates
+
PACKAGESPLITFUNCS_prepend = "populate_packages_updatealternatives "
python populate_packages_updatealternatives () {
- pn = d.getVar('BPN')
+ if not update_alternatives_enabled(d):
+ return
# Do actual update alternatives processing
- pkgdest = d.getVar('PKGD')
for pkg in (d.getVar('PACKAGES') or "").split():
# Create post install/removal scripts
alt_setup_links = ""
alt_remove_links = ""
- for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg) or "").split():
- alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name)
- alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name)
- alt_target = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg) or d.getVar('ALTERNATIVE_TARGET') or alt_link
- # Sometimes alt_target is specified as relative to the link name.
- alt_target = os.path.join(os.path.dirname(alt_link), alt_target)
-
- alt_priority = d.getVarFlag('ALTERNATIVE_PRIORITY_%s' % pkg, alt_name) or d.getVarFlag('ALTERNATIVE_PRIORITY', alt_name)
- alt_priority = alt_priority or d.getVar('ALTERNATIVE_PRIORITY_%s' % pkg) or d.getVar('ALTERNATIVE_PRIORITY')
-
- # This shouldn't trigger, as it should have been resolved earlier!
- if alt_link == alt_target:
- bb.note('alt_link == alt_target: %s == %s -- correcting, this should not happen!' % (alt_link, alt_target))
- alt_target = '%s.%s' % (alt_target, pn)
-
- if not os.path.lexists('%s/%s' % (pkgdest, alt_target)):
- bb.warn('%s: NOT adding alternative provide %s: %s does not exist' % (pn, alt_link, alt_target))
- continue
-
- # Default to generate shell script.. eventually we may want to change this...
- alt_target = os.path.normpath(alt_target)
-
+ updates = update_alternatives_alt_targets(d, pkg)
+ for alt_name, alt_link, alt_target, alt_priority in updates:
alt_setup_links += '\tupdate-alternatives --install %s %s %s %s\n' % (alt_link, alt_name, alt_target, alt_priority)
alt_remove_links += '\tupdate-alternatives --remove %s %s\n' % (alt_name, alt_target)
@@ -241,8 +284,11 @@ python populate_packages_updatealternatives () {
bb.note('adding update-alternatives calls to postinst/prerm for %s' % pkg)
bb.note('%s' % alt_setup_links)
- postinst = d.getVar('pkg_postinst_%s' % pkg) or '#!/bin/sh\n'
- postinst += alt_setup_links
+ postinst = d.getVar('pkg_postinst_%s' % pkg)
+ if postinst:
+ postinst = alt_setup_links + postinst
+ else:
+ postinst = '#!/bin/sh\n' + alt_setup_links
d.setVar('pkg_postinst_%s' % pkg, postinst)
bb.note('%s' % alt_remove_links)
@@ -252,10 +298,15 @@ python populate_packages_updatealternatives () {
}
python package_do_filedeps_append () {
+ if update_alternatives_enabled(d):
+ apply_update_alternative_provides(d)
+}
+
+def apply_update_alternative_provides(d):
pn = d.getVar('BPN')
pkgdest = d.getVar('PKGDEST')
- for pkg in packages.split():
+ for pkg in d.getVar('PACKAGES').split():
for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg) or "").split():
alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name)
alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name)
@@ -273,5 +324,4 @@ python package_do_filedeps_append () {
d.appendVar('FILERPROVIDES_%s_%s' % (trans_target, pkg), " " + alt_link)
if not trans_target in (d.getVar('FILERPROVIDESFLIST_%s' % pkg) or ""):
d.appendVar('FILERPROVIDESFLIST_%s' % pkg, " " + trans_target)
-}
diff --git a/external/poky/meta/classes/update-rc.d.bbclass b/external/poky/meta/classes/update-rc.d.bbclass
index 265c4be9..1366fee6 100644
--- a/external/poky/meta/classes/update-rc.d.bbclass
+++ b/external/poky/meta/classes/update-rc.d.bbclass
@@ -20,28 +20,14 @@ def use_updatercd(d):
return '[ -n "$D" -o ! -d /run/systemd/system ]'
return 'true'
-updatercd_preinst() {
-if ${@use_updatercd(d)} && [ -z "$D" -a -f "${INIT_D_DIR}/${INITSCRIPT_NAME}" ]; then
- ${INIT_D_DIR}/${INITSCRIPT_NAME} stop || :
-fi
-if ${@use_updatercd(d)} && type update-rc.d >/dev/null 2>/dev/null; then
- if [ -n "$D" ]; then
- OPT="-f -r $D"
- else
- OPT="-f"
- fi
- update-rc.d $OPT ${INITSCRIPT_NAME} remove
-fi
-}
-
PACKAGE_WRITE_DEPS += "update-rc.d-native"
updatercd_postinst() {
if ${@use_updatercd(d)} && type update-rc.d >/dev/null 2>/dev/null; then
if [ -n "$D" ]; then
- OPT="-f -r $D"
+ OPT="-r $D"
else
- OPT="-f -s"
+ OPT="-s"
fi
update-rc.d $OPT ${INITSCRIPT_NAME} ${INITSCRIPT_PARAMS}
fi
@@ -79,7 +65,7 @@ python __anonymous() {
PACKAGESPLITFUNCS_prepend = "${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', 'populate_packages_updatercd ', '', d)}"
PACKAGESPLITFUNCS_remove_class-nativesdk = "populate_packages_updatercd "
-populate_packages_updatercd[vardeps] += "updatercd_prerm updatercd_postrm updatercd_preinst updatercd_postinst"
+populate_packages_updatercd[vardeps] += "updatercd_prerm updatercd_postrm updatercd_postinst"
populate_packages_updatercd[vardepsexclude] += "OVERRIDES"
python populate_packages_updatercd () {
@@ -95,7 +81,7 @@ python populate_packages_updatercd () {
d.appendVar('RDEPENDS_' + pkg, ' %sinitd-functions' % (mlprefix))
def update_rcd_package(pkg):
- bb.debug(1, 'adding update-rc.d calls to preinst/postinst/prerm/postrm for %s' % pkg)
+ bb.debug(1, 'adding update-rc.d calls to postinst/prerm/postrm for %s' % pkg)
localdata = bb.data.createCopy(d)
overrides = localdata.getVar("OVERRIDES")
@@ -103,12 +89,6 @@ python populate_packages_updatercd () {
update_rcd_auto_depend(pkg)
- preinst = d.getVar('pkg_preinst_%s' % pkg)
- if not preinst:
- preinst = '#!/bin/sh\n'
- preinst += localdata.getVar('updatercd_preinst')
- d.setVar('pkg_preinst_%s' % pkg, preinst)
-
postinst = d.getVar('pkg_postinst_%s' % pkg)
if not postinst:
postinst = '#!/bin/sh\n'
diff --git a/external/poky/meta/classes/useradd-staticids.bbclass b/external/poky/meta/classes/useradd-staticids.bbclass
index 70d59e55..3a1b5f13 100644
--- a/external/poky/meta/classes/useradd-staticids.bbclass
+++ b/external/poky/meta/classes/useradd-staticids.bbclass
@@ -76,8 +76,8 @@ def update_useradd_static_config(d):
for param in oe.useradd.split_commands(params):
try:
uaargs = parser.parse_args(oe.useradd.split_args(param))
- except:
- bb.fatal("%s: Unable to parse arguments for USERADD_PARAM_%s: '%s'" % (d.getVar('PN'), pkg, param))
+ except Exception as e:
+ bb.fatal("%s: Unable to parse arguments for USERADD_PARAM_%s '%s': %s" % (d.getVar('PN'), pkg, param, e))
# Read all passwd files specified in USERADD_UID_TABLES or files/passwd
# Use the standard passwd layout:
@@ -197,8 +197,8 @@ def update_useradd_static_config(d):
try:
# If we're processing multiple lines, we could have left over values here...
gaargs = parser.parse_args(oe.useradd.split_args(param))
- except:
- bb.fatal("%s: Unable to parse arguments for GROUPADD_PARAM_%s: '%s'" % (d.getVar('PN'), pkg, param))
+ except Exception as e:
+ bb.fatal("%s: Unable to parse arguments for GROUPADD_PARAM_%s '%s': %s" % (d.getVar('PN'), pkg, param, e))
# Read all group files specified in USERADD_GID_TABLES or files/group
# Use the standard group layout:
diff --git a/external/poky/meta/classes/useradd.bbclass b/external/poky/meta/classes/useradd.bbclass
index 124becd0..e5f3ba24 100644
--- a/external/poky/meta/classes/useradd.bbclass
+++ b/external/poky/meta/classes/useradd.bbclass
@@ -100,6 +100,8 @@ useradd_sysroot () {
# Pseudo may (do_prepare_recipe_sysroot) or may not (do_populate_sysroot_setscene) be running
# at this point so we're explicit about the environment so pseudo can load if
# not already present.
+ # PSEUDO_SYSROOT can contain references to the build architecture and COMPONENT_DIR
+ # so needs the STAGING_FIXME below
export PSEUDO="${FAKEROOTENV} ${PSEUDO_SYSROOT}${bindir_native}/pseudo"
# Explicitly set $D since it isn't set to anything
@@ -134,10 +136,10 @@ useradd_sysroot () {
}
# The export of PSEUDO in useradd_sysroot() above contains references to
-# ${COMPONENTS_DIR} and ${PSEUDO_LOCALSTATEDIR}. Additionally, the logging
+# ${PSEUDO_SYSROOT} and ${PSEUDO_LOCALSTATEDIR}. Additionally, the logging
# shell functions use ${LOGFIFO}. These need to be handled when restoring
# postinst-useradd-${PN} from the sstate cache.
-EXTRA_STAGING_FIXMES += "COMPONENTS_DIR PSEUDO_LOCALSTATEDIR LOGFIFO"
+EXTRA_STAGING_FIXMES += "PSEUDO_SYSROOT PSEUDO_LOCALSTATEDIR LOGFIFO"
python useradd_sysroot_sstate () {
scriptfile = None
@@ -182,6 +184,7 @@ def update_useradd_after_parse(d):
bb.fatal("%s inherits useradd but doesn't set USERADD_PACKAGES" % d.getVar('FILE', False))
for pkg in useradd_packages.split():
+ d.appendVarFlag("do_populate_sysroot", "vardeps", "USERADD_PARAM_%s GROUPADD_PARAM_%s GROUPMEMS_PARAM_%s" % (pkg, pkg, pkg))
if not d.getVar('USERADD_PARAM_%s' % pkg) and not d.getVar('GROUPADD_PARAM_%s' % pkg) and not d.getVar('GROUPMEMS_PARAM_%s' % pkg):
bb.fatal("%s inherits useradd but doesn't set USERADD_PARAM, GROUPADD_PARAM or GROUPMEMS_PARAM for package %s" % (d.getVar('FILE', False), pkg))
diff --git a/external/poky/meta/classes/utils.bbclass b/external/poky/meta/classes/utils.bbclass
index 0016e5c4..cd3d0570 100644
--- a/external/poky/meta/classes/utils.bbclass
+++ b/external/poky/meta/classes/utils.bbclass
@@ -264,7 +264,7 @@ create_wrapper () {
realpath=\`readlink -fn \$0\`
realdir=\`dirname \$realpath\`
export $exportstring
-exec -a \`dirname \$realpath\`/$cmdname \`dirname \$realpath\`/$cmdname.real "\$@"
+exec -a "\$0" \$realdir/$cmdname.real "\$@"
END
chmod +x $cmd
}
diff --git a/external/poky/meta/classes/vala.bbclass b/external/poky/meta/classes/vala.bbclass
index 615eb379..bcaf68c5 100644
--- a/external/poky/meta/classes/vala.bbclass
+++ b/external/poky/meta/classes/vala.bbclass
@@ -8,7 +8,7 @@ DEPENDS_append = " vala-native ${VALADEPENDS}"
# Our patched version of Vala looks in STAGING_DATADIR for .vapi files
export STAGING_DATADIR
# Upstream Vala >= 0.11 looks in XDG_DATA_DIRS for .vapi files
-export XDG_DATA_DIRS = "${STAGING_DATADIR}"
+export XDG_DATA_DIRS = "${STAGING_DATADIR}:${STAGING_LIBDIR}"
# Package additional files
FILES_${PN}-dev += "\
diff --git a/external/poky/meta/classes/waf.bbclass b/external/poky/meta/classes/waf.bbclass
index 19e93761..90024400 100644
--- a/external/poky/meta/classes/waf.bbclass
+++ b/external/poky/meta/classes/waf.bbclass
@@ -1,8 +1,36 @@
# avoids build breaks when using no-static-libs.inc
DISABLE_STATIC = ""
+B = "${WORKDIR}/build"
+
EXTRA_OECONF_append = " ${PACKAGECONFIG_CONFARGS}"
+def waflock_hash(d):
+ # Calculates the hash used for the waf lock file. This should include
+ # all of the user controllable inputs passed to waf configure. Note
+ # that the full paths for ${B} and ${S} are used; this is OK and desired
+ # because a change to either of these should create a unique lock file
+ # to prevent collisions.
+ import hashlib
+ h = hashlib.sha512()
+ def update(name):
+ val = d.getVar(name)
+ if val is not None:
+ h.update(val.encode('utf-8'))
+ update('S')
+ update('B')
+ update('prefix')
+ update('EXTRA_OECONF')
+ return h.hexdigest()
+
+# Use WAFLOCK to specify a separate lock file. The build is already
+# sufficiently isolated by setting the output directory, this ensures that
+# bitbake won't step on toes of any other configured context in the source
+# directory (e.g. if the source is coming from externalsrc and was previously
+# configured elsewhere).
+export WAFLOCK = ".lock-waf_oe_${@waflock_hash(d)}_build"
+BB_HASHBASE_WHITELIST += "WAFLOCK"
+
python waf_preconfigure() {
import subprocess
from distutils.version import StrictVersion
@@ -22,16 +50,16 @@ python waf_preconfigure() {
do_configure[prefuncs] += "waf_preconfigure"
waf_do_configure() {
- ${S}/waf configure --prefix=${prefix} ${WAF_EXTRA_CONF} ${EXTRA_OECONF}
+ (cd ${S} && ./waf configure -o ${B} --prefix=${prefix} ${WAF_EXTRA_CONF} ${EXTRA_OECONF})
}
do_compile[progress] = "outof:^\[\s*(\d+)/\s*(\d+)\]\s+"
waf_do_compile() {
- ${S}/waf build ${@oe.utils.parallel_make_argument(d, '-j%d', limit=64)}
+ (cd ${S} && ./waf build ${@oe.utils.parallel_make_argument(d, '-j%d', limit=64)})
}
waf_do_install() {
- ${S}/waf install --destdir=${D}
+ (cd ${S} && ./waf install --destdir=${D})
}
EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/external/poky/meta/classes/xmlcatalog.bbclass b/external/poky/meta/classes/xmlcatalog.bbclass
new file mode 100644
index 00000000..ae4811fd
--- /dev/null
+++ b/external/poky/meta/classes/xmlcatalog.bbclass
@@ -0,0 +1,26 @@
+DEPENDS = "libxml2-native"
+
+# A whitespace-separated list of XML catalogs to be registered, for example
+# "${sysconfdir}/xml/docbook-xml.xml".
+XMLCATALOGS ?= ""
+
+SYSROOT_PREPROCESS_FUNCS_append = " xmlcatalog_sstate_postinst"
+
+xmlcatalog_complete() {
+ ROOTCATALOG="${STAGING_ETCDIR_NATIVE}/xml/catalog"
+ if [ ! -f $ROOTCATALOG ]; then
+ mkdir --parents $(dirname $ROOTCATALOG)
+ xmlcatalog --noout --create $ROOTCATALOG
+ fi
+ for CATALOG in ${XMLCATALOGS}; do
+ xmlcatalog --noout --add nextCatalog unused file://$CATALOG $ROOTCATALOG
+ done
+}
+
+xmlcatalog_sstate_postinst() {
+ mkdir -p ${SYSROOT_DESTDIR}${bindir}
+ dest=${SYSROOT_DESTDIR}${bindir}/postinst-${PN}-xmlcatalog
+ echo '#!/bin/sh' > $dest
+ echo '${xmlcatalog_complete}' >> $dest
+ chmod 0755 $dest
+}