summaryrefslogtreecommitdiffstats
path: root/external/poky/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'external/poky/scripts')
-rwxr-xr-xexternal/poky/scripts/autobuilder-worker-prereq-tests12
-rwxr-xr-xexternal/poky/scripts/bitbake-prserv-tool3
-rwxr-xr-xexternal/poky/scripts/bitbake-whatchanged12
-rwxr-xr-xexternal/poky/scripts/buildhistory-collect-srcrevs13
-rwxr-xr-xexternal/poky/scripts/buildhistory-diff3
-rwxr-xr-xexternal/poky/scripts/buildstats-diff24
-rwxr-xr-xexternal/poky/scripts/combo-layer14
-rwxr-xr-xexternal/poky/scripts/combo-layer-hook-default.sh3
-rwxr-xr-xexternal/poky/scripts/contrib/bb-perf/bb-matrix-plot.sh15
-rwxr-xr-xexternal/poky/scripts/contrib/bb-perf/bb-matrix.sh15
-rwxr-xr-xexternal/poky/scripts/contrib/bb-perf/buildstats-plot.sh93
-rwxr-xr-xexternal/poky/scripts/contrib/bb-perf/buildstats.sh154
-rwxr-xr-xexternal/poky/scripts/contrib/bbvars.py14
-rwxr-xr-xexternal/poky/scripts/contrib/build-perf-test-wrapper.sh10
-rwxr-xr-xexternal/poky/scripts/contrib/ddimage92
-rwxr-xr-xexternal/poky/scripts/contrib/devtool-stress.py13
-rwxr-xr-xexternal/poky/scripts/contrib/dialog-power-control2
-rwxr-xr-xexternal/poky/scripts/contrib/documentation-audit.sh3
-rwxr-xr-xexternal/poky/scripts/contrib/graph-tool13
-rwxr-xr-xexternal/poky/scripts/contrib/list-packageconfig-flags.py17
-rwxr-xr-xexternal/poky/scripts/contrib/oe-build-perf-report-email.py10
-rwxr-xr-xexternal/poky/scripts/contrib/patchreview.py3
-rwxr-xr-xexternal/poky/scripts/contrib/patchtest.sh18
-rwxr-xr-xexternal/poky/scripts/contrib/serdevtry3
-rwxr-xr-xexternal/poky/scripts/contrib/test_build_time.sh16
-rwxr-xr-xexternal/poky/scripts/contrib/test_build_time_worker.sh4
-rwxr-xr-xexternal/poky/scripts/contrib/uncovered15
-rwxr-xr-xexternal/poky/scripts/contrib/verify-homepage.py4
-rwxr-xr-xexternal/poky/scripts/cp-noerror2
-rwxr-xr-xexternal/poky/scripts/create-pull-request37
-rwxr-xr-xexternal/poky/scripts/crosstap13
-rwxr-xr-xexternal/poky/scripts/devtool12
-rwxr-xr-xexternal/poky/scripts/distro/build-recipe-list.py129
-rwxr-xr-xexternal/poky/scripts/distro/distrocompare.sh123
-rwxr-xr-xexternal/poky/scripts/gen-lockedsig-cache56
-rwxr-xr-xexternal/poky/scripts/gen-site-config12
-rwxr-xr-xexternal/poky/scripts/install-buildtools343
-rw-r--r--external/poky/scripts/lib/argparse_oe.py4
-rw-r--r--external/poky/scripts/lib/build_perf/__init__.py9
-rw-r--r--external/poky/scripts/lib/build_perf/html.py9
-rw-r--r--external/poky/scripts/lib/build_perf/report.py9
-rw-r--r--external/poky/scripts/lib/buildstats.py15
-rw-r--r--external/poky/scripts/lib/checklayer/__init__.py30
-rw-r--r--external/poky/scripts/lib/checklayer/case.py4
-rw-r--r--external/poky/scripts/lib/checklayer/cases/bsp.py4
-rw-r--r--external/poky/scripts/lib/checklayer/cases/common.py6
-rw-r--r--external/poky/scripts/lib/checklayer/cases/distro.py4
-rw-r--r--external/poky/scripts/lib/checklayer/context.py4
-rw-r--r--external/poky/scripts/lib/devtool/__init__.py12
-rw-r--r--external/poky/scripts/lib/devtool/build.py25
-rw-r--r--external/poky/scripts/lib/devtool/build_image.py12
-rw-r--r--external/poky/scripts/lib/devtool/build_sdk.py12
-rw-r--r--external/poky/scripts/lib/devtool/deploy.py40
-rw-r--r--external/poky/scripts/lib/devtool/export.py12
-rw-r--r--external/poky/scripts/lib/devtool/import.py12
-rw-r--r--external/poky/scripts/lib/devtool/menuconfig.py79
-rw-r--r--external/poky/scripts/lib/devtool/package.py12
-rw-r--r--external/poky/scripts/lib/devtool/runqemu.py12
-rw-r--r--external/poky/scripts/lib/devtool/sdk.py12
-rw-r--r--external/poky/scripts/lib/devtool/search.py12
-rw-r--r--external/poky/scripts/lib/devtool/standard.py252
-rw-r--r--external/poky/scripts/lib/devtool/upgrade.py70
-rw-r--r--external/poky/scripts/lib/devtool/utilcmds.py12
-rw-r--r--external/poky/scripts/lib/recipetool/append.py12
-rw-r--r--external/poky/scripts/lib/recipetool/create.py42
-rw-r--r--external/poky/scripts/lib/recipetool/create_buildsys.py29
-rw-r--r--external/poky/scripts/lib/recipetool/create_buildsys_python.py43
-rw-r--r--external/poky/scripts/lib/recipetool/create_kernel.py12
-rw-r--r--external/poky/scripts/lib/recipetool/create_kmod.py12
-rw-r--r--external/poky/scripts/lib/recipetool/create_npm.py526
-rw-r--r--external/poky/scripts/lib/recipetool/edit.py14
-rw-r--r--external/poky/scripts/lib/recipetool/newappend.py12
-rw-r--r--external/poky/scripts/lib/recipetool/setvar.py12
-rw-r--r--external/poky/scripts/lib/resulttool/log.py84
-rwxr-xr-xexternal/poky/scripts/lib/resulttool/manualexecution.py41
-rw-r--r--external/poky/scripts/lib/resulttool/merge.py30
-rw-r--r--external/poky/scripts/lib/resulttool/regression.py10
-rw-r--r--external/poky/scripts/lib/resulttool/report.py234
-rw-r--r--external/poky/scripts/lib/resulttool/resultutils.py85
-rw-r--r--external/poky/scripts/lib/resulttool/store.py24
-rw-r--r--external/poky/scripts/lib/resulttool/template/test_report_full_text.txt47
-rw-r--r--external/poky/scripts/lib/scriptpath.py12
-rw-r--r--external/poky/scripts/lib/scriptutils.py79
-rw-r--r--external/poky/scripts/lib/wic/__init__.py14
-rw-r--r--external/poky/scripts/lib/wic/canned-wks/qemuriscv.wks3
-rw-r--r--external/poky/scripts/lib/wic/canned-wks/qemux86-directdisk.wks2
-rw-r--r--external/poky/scripts/lib/wic/engine.py129
-rw-r--r--external/poky/scripts/lib/wic/filemap.py93
-rw-r--r--external/poky/scripts/lib/wic/help.py123
-rw-r--r--external/poky/scripts/lib/wic/ksparser.py42
-rw-r--r--external/poky/scripts/lib/wic/misc.py16
-rw-r--r--external/poky/scripts/lib/wic/partition.py47
-rw-r--r--external/poky/scripts/lib/wic/pluginbase.py25
-rw-r--r--external/poky/scripts/lib/wic/plugins/imager/direct.py101
-rw-r--r--external/poky/scripts/lib/wic/plugins/source/bootimg-biosplusefi.py213
-rw-r--r--external/poky/scripts/lib/wic/plugins/source/bootimg-efi.py88
-rw-r--r--external/poky/scripts/lib/wic/plugins/source/bootimg-partition.py15
-rw-r--r--external/poky/scripts/lib/wic/plugins/source/bootimg-pcbios.py26
-rw-r--r--external/poky/scripts/lib/wic/plugins/source/isoimage-isohybrid.py55
-rw-r--r--external/poky/scripts/lib/wic/plugins/source/rawcopy.py20
-rw-r--r--external/poky/scripts/lib/wic/plugins/source/rootfs.py23
-rwxr-xr-xexternal/poky/scripts/lnr3
-rw-r--r--external/poky/scripts/multilib_header_wrapper.h18
-rwxr-xr-xexternal/poky/scripts/native-intercept/chgrp5
-rwxr-xr-xexternal/poky/scripts/native-intercept/chown3
-rwxr-xr-xexternal/poky/scripts/oe-build-perf-report30
-rwxr-xr-xexternal/poky/scripts/oe-build-perf-test13
-rwxr-xr-xexternal/poky/scripts/oe-buildenv-internal31
-rwxr-xr-xexternal/poky/scripts/oe-check-sstate12
-rwxr-xr-xexternal/poky/scripts/oe-depends-dot12
-rwxr-xr-xexternal/poky/scripts/oe-find-native-sysroot12
-rwxr-xr-xexternal/poky/scripts/oe-git-archive10
-rwxr-xr-xexternal/poky/scripts/oe-git-proxy24
-rwxr-xr-xexternal/poky/scripts/oe-gnome-terminal-phonehome2
-rwxr-xr-xexternal/poky/scripts/oe-pkgdata-browser253
-rw-r--r--external/poky/scripts/oe-pkgdata-browser.glade337
-rwxr-xr-xexternal/poky/scripts/oe-pkgdata-util30
-rwxr-xr-xexternal/poky/scripts/oe-publish-sdk16
-rwxr-xr-xexternal/poky/scripts/oe-pylint13
-rwxr-xr-xexternal/poky/scripts/oe-run-native16
-rwxr-xr-xexternal/poky/scripts/oe-selftest14
-rwxr-xr-xexternal/poky/scripts/oe-setup-builddir13
-rwxr-xr-xexternal/poky/scripts/oe-test4
-rwxr-xr-xexternal/poky/scripts/oe-trim-schemas3
-rwxr-xr-xexternal/poky/scripts/oepydevshell-internal.py7
-rwxr-xr-xexternal/poky/scripts/opkg-query-helper.py15
-rw-r--r--external/poky/scripts/postinst-intercepts/delay_to_first_boot4
-rwxr-xr-xexternal/poky/scripts/postinst-intercepts/postinst_intercept2
-rw-r--r--external/poky/scripts/postinst-intercepts/update_desktop_database8
-rw-r--r--external/poky/scripts/postinst-intercepts/update_font_cache5
-rw-r--r--external/poky/scripts/postinst-intercepts/update_gio_module_cache3
-rw-r--r--external/poky/scripts/postinst-intercepts/update_gtk_icon_cache (renamed from external/poky/scripts/postinst-intercepts/update_icon_cache)6
-rw-r--r--external/poky/scripts/postinst-intercepts/update_gtk_immodules_cache3
-rw-r--r--external/poky/scripts/postinst-intercepts/update_mime_database9
-rw-r--r--external/poky/scripts/postinst-intercepts/update_pixbuf_cache3
-rw-r--r--external/poky/scripts/postinst-intercepts/update_udev_hwdb19
-rwxr-xr-xexternal/poky/scripts/pybootchartgui/pybootchartgui.py2
-rw-r--r--external/poky/scripts/pybootchartgui/pybootchartgui/draw.py1385
-rw-r--r--external/poky/scripts/pybootchartgui/pybootchartgui/gui.py208
-rw-r--r--external/poky/scripts/pybootchartgui/pybootchartgui/parsing.py18
-rwxr-xr-xexternal/poky/scripts/pythondeps2
-rwxr-xr-xexternal/poky/scripts/recipetool12
-rwxr-xr-xexternal/poky/scripts/relocate_sdk.py13
-rwxr-xr-xexternal/poky/scripts/resulttool9
-rwxr-xr-xexternal/poky/scripts/rpm2cpio.sh4
-rwxr-xr-xexternal/poky/scripts/runqemu509
-rwxr-xr-xexternal/poky/scripts/runqemu-addptable2image14
-rwxr-xr-xexternal/poky/scripts/runqemu-export-rootfs12
-rwxr-xr-xexternal/poky/scripts/runqemu-extract-sdk14
-rwxr-xr-xexternal/poky/scripts/runqemu-gen-tapdevs12
-rwxr-xr-xexternal/poky/scripts/runqemu-ifdown12
-rwxr-xr-xexternal/poky/scripts/runqemu-ifup12
-rwxr-xr-xexternal/poky/scripts/send-error-report3
-rwxr-xr-xexternal/poky/scripts/send-pull-request15
-rwxr-xr-xexternal/poky/scripts/sstate-cache-management.sh13
-rwxr-xr-xexternal/poky/scripts/sstate-diff-machines.sh4
-rwxr-xr-xexternal/poky/scripts/sstate-sysroot-cruft.sh4
-rwxr-xr-xexternal/poky/scripts/sysroot-relativelinks.py4
-rwxr-xr-xexternal/poky/scripts/task-time3
-rwxr-xr-xexternal/poky/scripts/test-reexec15
-rwxr-xr-xexternal/poky/scripts/test-remote-image14
-rwxr-xr-xexternal/poky/scripts/tiny/dirsize.py16
-rwxr-xr-xexternal/poky/scripts/tiny/ksize.py28
-rwxr-xr-xexternal/poky/scripts/tiny/ksum.py60
-rwxr-xr-xexternal/poky/scripts/verify-bashisms3
-rwxr-xr-xexternal/poky/scripts/wic48
-rwxr-xr-xexternal/poky/scripts/yocto-check-layer19
-rwxr-xr-xexternal/poky/scripts/yocto-check-layer-wrapper4
168 files changed, 4499 insertions, 3354 deletions
diff --git a/external/poky/scripts/autobuilder-worker-prereq-tests b/external/poky/scripts/autobuilder-worker-prereq-tests
index bb46c691..5d7e6e26 100755
--- a/external/poky/scripts/autobuilder-worker-prereq-tests
+++ b/external/poky/scripts/autobuilder-worker-prereq-tests
@@ -3,7 +3,7 @@
# Script which can be run on new autobuilder workers to check all needed configuration is present.
# Designed to be run in a repo where bitbake/oe-core are already present.
#
-
+# SPDX-License-Identifier: GPL-2.0-only
#
# Todo
# Add testtools/subunit import test
@@ -15,6 +15,12 @@
# test buildistory git repo works?
#
+if [ ! -x $HOME/yocto-autobuilder-helper/scripts/checkvnc ]; then
+ echo "$HOME/yocto-autobuilder-helper should be created."
+ exit 1
+fi
+$HOME/yocto-autobuilder-helper/scripts/checkvnc
+
. ./oe-init-build-env > /dev/null
if [ "$?" != "0" ]; then
exit 1
@@ -53,12 +59,12 @@ if [ ! -e bzImage-qemux86-64.bin ]; then
fi
popd
bitbake qemu-helper-native
-runqemu qemux86-64
+DISPLAY=:1 runqemu serialstdio qemux86-64
if [ "$?" != "0" ]; then
echo "Unable to use runqemu"
exit 1
fi
-runqemu qemux86-64 kvm
+DISPLAY=:1 runqemu serialstdio qemux86-64 kvm
if [ "$?" != "0" ]; then
echo "Unable to use runqemu with kvm"
exit 1
diff --git a/external/poky/scripts/bitbake-prserv-tool b/external/poky/scripts/bitbake-prserv-tool
index fa31b525..e55d98c7 100755
--- a/external/poky/scripts/bitbake-prserv-tool
+++ b/external/poky/scripts/bitbake-prserv-tool
@@ -1,4 +1,7 @@
#!/usr/bin/env bash
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
help ()
{
diff --git a/external/poky/scripts/bitbake-whatchanged b/external/poky/scripts/bitbake-whatchanged
index 0207777e..3095dafa 100755
--- a/external/poky/scripts/bitbake-whatchanged
+++ b/external/poky/scripts/bitbake-whatchanged
@@ -4,18 +4,8 @@
# Copyright (c) 2013 Wind River Systems, Inc.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
import sys
diff --git a/external/poky/scripts/buildhistory-collect-srcrevs b/external/poky/scripts/buildhistory-collect-srcrevs
index d375b045..340bee78 100755
--- a/external/poky/scripts/buildhistory-collect-srcrevs
+++ b/external/poky/scripts/buildhistory-collect-srcrevs
@@ -5,18 +5,8 @@
# Copyright 2013 Intel Corporation
# Authored-by: Paul Eggleton <paul.eggleton@intel.com>
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import collections
import os
@@ -69,6 +59,7 @@ def main():
all_srcrevs = collections.defaultdict(list)
for root, dirs, files in os.walk(options.buildhistory_dir):
+ dirs.sort()
if '.git' in dirs:
dirs.remove('.git')
for fn in files:
diff --git a/external/poky/scripts/buildhistory-diff b/external/poky/scripts/buildhistory-diff
index 70805b06..833f7c33 100755
--- a/external/poky/scripts/buildhistory-diff
+++ b/external/poky/scripts/buildhistory-diff
@@ -4,6 +4,9 @@
#
# Copyright (C) 2013 Intel Corporation
# Author: Paul Eggleton <paul.eggleton@linux.intel.com>
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
import sys
import os
diff --git a/external/poky/scripts/buildstats-diff b/external/poky/scripts/buildstats-diff
index a128dd32..2f6498ab 100755
--- a/external/poky/scripts/buildstats-diff
+++ b/external/poky/scripts/buildstats-diff
@@ -4,15 +4,9 @@
#
# Copyright (c) 2016, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
+
import argparse
import glob
import logging
@@ -120,7 +114,7 @@ def print_ver_diff(bs1, bs2):
print(fmt_str.format(name, field1, field2, maxlen=maxlen))
-def print_task_diff(bs1, bs2, val_type, min_val=0, min_absdiff=0, sort_by=('absdiff',)):
+def print_task_diff(bs1, bs2, val_type, min_val=0, min_absdiff=0, sort_by=('absdiff',), only_tasks=[]):
"""Diff task execution times"""
def val_to_str(val, human_readable=False):
"""Convert raw value to printable string"""
@@ -157,8 +151,9 @@ def print_task_diff(bs1, bs2, val_type, min_val=0, min_absdiff=0, sort_by=('absd
"""Get cumulative sum of all tasks"""
total = 0.0
for recipe_data in buildstats.values():
- for bs_task in recipe_data.tasks.values():
- total += getattr(bs_task, val_type)
+ for name, bs_task in recipe_data.tasks.items():
+ if not only_tasks or name in only_tasks:
+ total += getattr(bs_task, val_type)
return total
if min_val:
@@ -169,7 +164,7 @@ def print_task_diff(bs1, bs2, val_type, min_val=0, min_absdiff=0, sort_by=('absd
val_to_str(min_absdiff, True), val_to_str(min_absdiff)))
# Prepare the data
- tasks_diff = diff_buildstats(bs1, bs2, val_type, min_val, min_absdiff)
+ tasks_diff = diff_buildstats(bs1, bs2, val_type, min_val, min_absdiff, only_tasks)
# Sort our list
for field in reversed(sort_by):
@@ -254,6 +249,8 @@ Script for comparing buildstats of two separate builds."""
parser.add_argument('--multi', action='store_true',
help="Read all buildstats from the given paths and "
"average over them")
+ parser.add_argument('--only-task', dest='only_tasks', metavar='TASK', action='append', default=[],
+ help="Only include TASK in report. May be specified multiple times")
parser.add_argument('buildstats1', metavar='BUILDSTATS1', help="'Left' buildstat")
parser.add_argument('buildstats2', metavar='BUILDSTATS2', help="'Right' buildstat")
@@ -272,7 +269,6 @@ Script for comparing buildstats of two separate builds."""
return args
-
def main(argv=None):
"""Script entry point"""
args = parse_args(argv)
@@ -296,7 +292,7 @@ def main(argv=None):
print_ver_diff(bs1, bs2)
else:
print_task_diff(bs1, bs2, args.diff_attr, args.min_val,
- args.min_absdiff, sort_by)
+ args.min_absdiff, sort_by, args.only_tasks)
except ScriptError as err:
log.error(str(err))
return 1
diff --git a/external/poky/scripts/combo-layer b/external/poky/scripts/combo-layer
index dc40e724..a634dd69 100755
--- a/external/poky/scripts/combo-layer
+++ b/external/poky/scripts/combo-layer
@@ -7,18 +7,8 @@
# Paul Eggleton <paul.eggleton@intel.com>
# Richard Purdie <richard.purdie@intel.com>
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import fnmatch
import os, sys
@@ -90,7 +80,7 @@ class Configuration(object):
logger.debug("Loading config file %s" % self.conffile)
self.parser = configparser.ConfigParser()
with open(self.conffile) as f:
- self.parser.readfp(f)
+ self.parser.read_file(f)
# initialize default values
self.commit_msg_template = "Automatic commit to update last_revision"
diff --git a/external/poky/scripts/combo-layer-hook-default.sh b/external/poky/scripts/combo-layer-hook-default.sh
index 1e3a3b9b..11547a98 100755
--- a/external/poky/scripts/combo-layer-hook-default.sh
+++ b/external/poky/scripts/combo-layer-hook-default.sh
@@ -1,4 +1,7 @@
#!/bin/sh
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Hook to add source component/revision info to commit message
# Parameter:
# $1 patch-file
diff --git a/external/poky/scripts/contrib/bb-perf/bb-matrix-plot.sh b/external/poky/scripts/contrib/bb-perf/bb-matrix-plot.sh
index 136a2557..e7bd129e 100755
--- a/external/poky/scripts/contrib/bb-perf/bb-matrix-plot.sh
+++ b/external/poky/scripts/contrib/bb-perf/bb-matrix-plot.sh
@@ -1,21 +1,8 @@
#!/bin/bash
#
# Copyright (c) 2011, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# SPDX-License-Identifier: GPL-2.0-or-later
#
# DESCRIPTION
# This script operates on the .dat file generated by bb-matrix.sh. It tolerates
diff --git a/external/poky/scripts/contrib/bb-perf/bb-matrix.sh b/external/poky/scripts/contrib/bb-perf/bb-matrix.sh
index 10645658..b1fff0f3 100755
--- a/external/poky/scripts/contrib/bb-perf/bb-matrix.sh
+++ b/external/poky/scripts/contrib/bb-perf/bb-matrix.sh
@@ -1,21 +1,8 @@
#!/bin/bash
#
# Copyright (c) 2011, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# SPDX-License-Identifier: GPL-2.0-or-later
#
# DESCRIPTION
# This script runs BB_CMD (typically building core-image-sato) for all
diff --git a/external/poky/scripts/contrib/bb-perf/buildstats-plot.sh b/external/poky/scripts/contrib/bb-perf/buildstats-plot.sh
index 7e8ae041..45c27d0b 100755
--- a/external/poky/scripts/contrib/bb-perf/buildstats-plot.sh
+++ b/external/poky/scripts/contrib/bb-perf/buildstats-plot.sh
@@ -1,21 +1,8 @@
#!/usr/bin/env bash
#
# Copyright (c) 2011, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# SPDX-License-Identifier: GPL-2.0-or-later
#
# DESCRIPTION
#
@@ -52,7 +39,10 @@ set -o errexit
BS_DIR="tmp/buildstats"
N=10
+RECIPE=""
+TASKS="compile:configure:fetch:install:patch:populate_lic:populate_sysroot:unpack"
STATS="utime"
+ACCUMULATE=""
SUM=""
OUTDATA_FILE="$PWD/buildstats-plot.out"
@@ -64,11 +54,15 @@ Usage: $CMD [-b buildstats_dir] [-t do_task]
(default: "$BS_DIR")
-n N Top N recipes to display. Ignored if -S is present
(default: "$N")
+ -r recipe The recipe mask to be searched
+ -t tasks The tasks to be computed
+ (default: "$TASKS")
-s stats The stats to be matched. If more that one stat, units
should be the same because data is plot as histogram.
(see buildstats.sh -h for all options) or any other defined
(build)stat separated by colons, i.e. stime:utime
(default: "$STATS")
+ -a Accumulate all stats values for found recipes
-S Sum values for a particular stat for found recipes
-o Output data file.
(default: "$OUTDATA_FILE")
@@ -77,32 +71,41 @@ EOM
}
# Parse and validate arguments
-while getopts "b:n:s:o:Sh" OPT; do
- case $OPT in
- b)
- BS_DIR="$OPTARG"
- ;;
- n)
- N="$OPTARG"
- ;;
- s)
- STATS="$OPTARG"
- ;;
- S)
- SUM="y"
- ;;
- o)
- OUTDATA_FILE="$OPTARG"
- ;;
- h)
- usage
- exit 0
- ;;
- *)
- usage
- exit 1
- ;;
- esac
+while getopts "b:n:r:t:s:o:aSh" OPT; do
+ case $OPT in
+ b)
+ BS_DIR="$OPTARG"
+ ;;
+ n)
+ N="$OPTARG"
+ ;;
+ r)
+ RECIPE="-r $OPTARG"
+ ;;
+ t)
+ TASKS="$OPTARG"
+ ;;
+ s)
+ STATS="$OPTARG"
+ ;;
+ a)
+ ACCUMULATE="-a"
+ ;;
+ S)
+ SUM="y"
+ ;;
+ o)
+ OUTDATA_FILE="$OPTARG"
+ ;;
+ h)
+ usage
+ exit 0
+ ;;
+ *)
+ usage
+ exit 1
+ ;;
+ esac
done
# Get number of stats
@@ -114,10 +117,10 @@ CD=$(dirname $0)
# Parse buildstats recipes to produce a single table
OUTBUILDSTATS="$PWD/buildstats.log"
-$CD/buildstats.sh -H -s "$STATS" -H > $OUTBUILDSTATS
+$CD/buildstats.sh -b "$BS_DIR" -s "$STATS" -t "$TASKS" $RECIPE $ACCUMULATE -H > $OUTBUILDSTATS
# Get headers
-HEADERS=$(cat $OUTBUILDSTATS | sed -n -e '1s/ /-/g' -e '1s/:/ /gp')
+HEADERS=$(cat $OUTBUILDSTATS | sed -n -e 's/\(.*\)/"\1"/' -e '1s/ /\\\\\\\\ /g' -e 's/_/\\\\\\\\_/g' -e '1s/:/" "/gp')
echo -e "set boxwidth 0.9 relative"
echo -e "set style data histograms"
@@ -126,7 +129,7 @@ echo -e "set xtics rotate by 45 right"
# Get output data
if [ -z "$SUM" ]; then
- cat $OUTBUILDSTATS | sed -e '1d' | sort -k3 -n -r | head -$N > $OUTDATA_FILE
+ cat $OUTBUILDSTATS | sed -e '1d' -e 's/_/\\\\_/g' | sort -k3 -n -r | head -$N > $OUTDATA_FILE
# include task at recipe column
sed -i -e "1i\
${HEADERS}" $OUTDATA_FILE
@@ -138,8 +141,8 @@ else
declare -a sumargs
j=0
for i in `seq $nstats`; do
- sumargs[j]=sum; j=$(( $j + 1 ))
- sumargs[j]=`expr 3 + $i - 1`; j=$(( $j + 1 ))
+ sumargs[j]=sum; j=$(( $j + 1 ))
+ sumargs[j]=`expr 3 + $i - 1`; j=$(( $j + 1 ))
done
# Do the processing with datamash
diff --git a/external/poky/scripts/contrib/bb-perf/buildstats.sh b/external/poky/scripts/contrib/bb-perf/buildstats.sh
index 8d7e2488..e45cfc14 100755
--- a/external/poky/scripts/contrib/bb-perf/buildstats.sh
+++ b/external/poky/scripts/contrib/bb-perf/buildstats.sh
@@ -1,21 +1,8 @@
#!/bin/bash
#
# Copyright (c) 2011, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# SPDX-License-Identifier: GPL-2.0-or-later
#
# DESCRIPTION
# Given 'buildstats' data (generate by bitbake when setting
@@ -49,8 +36,10 @@ Child rusage ru_majflt:Child rusage ru_inblock:Child rusage ru_oublock:Child rus
Child rusage ru_nivcsw"
BS_DIR="tmp/buildstats"
+RECIPE=""
TASKS="compile:configure:fetch:install:patch:populate_lic:populate_sysroot:unpack"
STATS="$TIME"
+ACCUMULATE=""
HEADER="" # No header by default
function usage {
@@ -59,6 +48,7 @@ cat <<EOM
Usage: $CMD [-b buildstats_dir] [-t do_task]
-b buildstats The path where the folder resides
(default: "$BS_DIR")
+ -r recipe The recipe to be computed
-t tasks The tasks to be computed
(default: "$TASKS")
-s stats The stats to be matched. Options: TIME, IO, RUSAGE, CHILD_RUSAGE
@@ -69,87 +59,109 @@ Usage: $CMD [-b buildstats_dir] [-t do_task]
IO=$IO
RUSAGE=$RUSAGE
CHILD_RUSAGE=$CHILD_RUSAGE
+ -a Accumulate all stats values for found recipes
-h Display this help message
EOM
}
# Parse and validate arguments
-while getopts "b:t:s:Hh" OPT; do
- case $OPT in
- b)
- BS_DIR="$OPTARG"
- ;;
- t)
- TASKS="$OPTARG"
- ;;
- s)
- STATS="$OPTARG"
- ;;
- H)
- HEADER="y"
- ;;
- h)
- usage
- exit 0
- ;;
- *)
- usage
- exit 1
- ;;
- esac
+while getopts "b:r:t:s:aHh" OPT; do
+ case $OPT in
+ b)
+ BS_DIR="$OPTARG"
+ ;;
+ r)
+ RECIPE="$OPTARG"
+ ;;
+ t)
+ TASKS="$OPTARG"
+ ;;
+ s)
+ STATS="$OPTARG"
+ ;;
+ a)
+ ACCUMULATE="y"
+ ;;
+ H)
+ HEADER="y"
+ ;;
+ h)
+ usage
+ exit 0
+ ;;
+ *)
+ usage
+ exit 1
+ ;;
+ esac
done
# Ensure the buildstats folder exists
if [ ! -d "$BS_DIR" ]; then
- echo "ERROR: $BS_DIR does not exist"
- usage
- exit 1
+ echo "ERROR: $BS_DIR does not exist"
+ usage
+ exit 1
fi
stats=""
IFS=":"
for stat in ${STATS}; do
- case $stat in
- TIME)
- stats="${stats}:${TIME}"
- ;;
- IO)
- stats="${stats}:${IO}"
- ;;
- RUSAGE)
- stats="${stats}:${RUSAGE}"
- ;;
- CHILD_RUSAGE)
- stats="${stats}:${CHILD_RUSAGE}"
- ;;
- *)
- stats="${STATS}"
- esac
+ case $stat in
+ TIME)
+ stats="${stats}:${TIME}"
+ ;;
+ IO)
+ stats="${stats}:${IO}"
+ ;;
+ RUSAGE)
+ stats="${stats}:${RUSAGE}"
+ ;;
+ CHILD_RUSAGE)
+ stats="${stats}:${CHILD_RUSAGE}"
+ ;;
+ *)
+ stats="${STATS}"
+ ;;
+ esac
done
# remove possible colon at the beginning
stats="$(echo "$stats" | sed -e 's/^://1')"
# Provide a header if required by the user
-[ -n "$HEADER" ] && { echo "task:recipe:$stats"; }
+if [ -n "$HEADER" ] ; then
+ if [ -n "$ACCUMULATE" ]; then
+ echo "task:recipe:accumulated(${stats//:/;})"
+ else
+ echo "task:recipe:$stats"
+ fi
+fi
for task in ${TASKS}; do
task="do_${task}"
- for file in $(find ${BS_DIR} -type f -name ${task} | awk 'BEGIN{ ORS=""; OFS=":" } { print $0,"" }'); do
+ for file in $(find ${BS_DIR} -type f -path *${RECIPE}*/${task} | awk 'BEGIN{ ORS=""; OFS=":" } { print $0,"" }'); do
recipe="$(basename $(dirname $file))"
- times=""
- for stat in ${stats}; do
- [ -z "$stat" ] && { echo "empty stats"; }
- time=$(sed -n -e "s/^\($stat\): \\(.*\\)/\\2/p" $file)
- # in case the stat is not present, set the value as NA
- [ -z "$time" ] && { time="NA"; }
- # Append it to times
- if [ -z "$times" ]; then
- times="${time}"
- else
- times="${times} ${time}"
- fi
- done
+ times=""
+ for stat in ${stats}; do
+ [ -z "$stat" ] && { echo "empty stats"; }
+ time=$(sed -n -e "s/^\($stat\): \\(.*\\)/\\2/p" $file)
+ # in case the stat is not present, set the value as NA
+ [ -z "$time" ] && { time="NA"; }
+ # Append it to times
+ if [ -z "$times" ]; then
+ times="${time}"
+ else
+ times="${times} ${time}"
+ fi
+ done
+ if [ -n "$ACCUMULATE" ]; then
+ IFS=' '; valuesarray=(${times}); IFS=':'
+ times=0
+ for value in "${valuesarray[@]}"; do
+ [ "$value" == "NA" ] && { echo "ERROR: stat is not present."; usage; exit 1; }
+ times=$(( $times + $value ))
+ done
+ fi
echo "${task} ${recipe} ${times}"
done
done
diff --git a/external/poky/scripts/contrib/bbvars.py b/external/poky/scripts/contrib/bbvars.py
index 286b5a94..09013360 100755
--- a/external/poky/scripts/contrib/bbvars.py
+++ b/external/poky/scripts/contrib/bbvars.py
@@ -1,18 +1,6 @@
#!/usr/bin/env python3
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) Darren Hart <dvhart@linux.intel.com>, 2010
diff --git a/external/poky/scripts/contrib/build-perf-test-wrapper.sh b/external/poky/scripts/contrib/build-perf-test-wrapper.sh
index 7cbb5d79..fa71d4a2 100755
--- a/external/poky/scripts/contrib/build-perf-test-wrapper.sh
+++ b/external/poky/scripts/contrib/build-perf-test-wrapper.sh
@@ -4,15 +4,7 @@
#
# Copyright (c) 2016, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
-#
+# SPDX-License-Identifier: GPL-2.0-only
#
# This script is a simple wrapper around the actual build performance tester
# script. This script initializes the build environment, runs
diff --git a/external/poky/scripts/contrib/ddimage b/external/poky/scripts/contrib/ddimage
index ab929957..7f2ad112 100755
--- a/external/poky/scripts/contrib/ddimage
+++ b/external/poky/scripts/contrib/ddimage
@@ -1,8 +1,7 @@
#!/bin/sh
-
-# Default to avoiding the first two disks on typical Linux and Mac OS installs
-# Better safe than sorry :-)
-BLACKLIST_DEVICES="/dev/sda /dev/sdb /dev/disk1 /dev/disk2"
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# 1MB blocksize
BLOCKSIZE=1048576
@@ -29,7 +28,6 @@ image_details() {
}
device_details() {
- DEV=$1
BLOCK_SIZE=512
echo "Device details"
@@ -42,11 +40,17 @@ device_details() {
fi
# Default / Linux information collection
- echo " device: $DEVICE"
+ ACTUAL_DEVICE=`readlink -f $DEVICE`
+ DEV=`basename $ACTUAL_DEVICE`
+ if [ "$ACTUAL_DEVICE" != "$DEVICE" ] ; then
+ echo " device: $DEVICE -> $ACTUAL_DEVICE"
+ else
+ echo " device: $DEVICE"
+ fi
if [ -f "/sys/class/block/$DEV/device/vendor" ]; then
echo " vendor: $(cat /sys/class/block/$DEV/device/vendor)"
else
- echo " vendor: UNKOWN"
+ echo " vendor: UNKNOWN"
fi
if [ -f "/sys/class/block/$DEV/device/model" ]; then
echo " model: $(cat /sys/class/block/$DEV/device/model)"
@@ -61,6 +65,49 @@ device_details() {
echo ""
}
+check_mount_device() {
+ if cat /proc/self/mounts | awk '{ print $1 }' | grep /dev/ | grep -q -E "^$1$" ; then
+ return 0
+ fi
+ return 1
+}
+
+is_mounted() {
+ if [ "$(uname)" = "Darwin" ]; then
+ if df | awk '{ print $1 }' | grep /dev/ | grep -q -E "^$1(s[0-9]+)?$" ; then
+ return 0
+ fi
+ else
+ if check_mount_device $1 ; then
+ return 0
+ fi
+ DEV=`basename $1`
+ if [ -d /sys/class/block/$DEV/ ] ; then
+ PARENT_BLKDEV=`basename $(readlink -f "/sys/class/block/$DEV/..")`
+ if [ "$PARENT_BLKDEV" != "block" ] ; then
+ if check_mount_device $PARENT_BLKDEV ; then
+ return 0
+ fi
+ fi
+ for CHILD_BLKDEV in `find /sys/class/block/$DEV/ -mindepth 1 -maxdepth 1 -name "$DEV*" -type d`
+ do
+ if check_mount_device /dev/`basename $CHILD_BLKDEV` ; then
+ return 0
+ fi
+ done
+ fi
+ fi
+ return 1
+}
+
+is_inuse() {
+ HOLDERS_DIR="/sys/class/block/`basename $1`/holders"
+ if [ -d $HOLDERS_DIR ] && [ `ls -A $HOLDERS_DIR` ] ; then
+ return 0
+ fi
+ return 1
+}
+
if [ $# -ne 2 ]; then
usage
exit 1
@@ -75,22 +122,37 @@ if [ ! -e "$IMAGE" ]; then
exit 1
fi
+if [ ! -e "$DEVICE" ]; then
+ echo "ERROR: Device $DEVICE does not exist"
+ usage
+ exit 1
+fi
-for i in ${BLACKLIST_DEVICES}; do
- if [ "$i" = "$DEVICE" ]; then
- echo "ERROR: Device $DEVICE is blacklisted"
- exit 1
- fi
-done
+if [ "$(uname)" = "Darwin" ]; then
+ # readlink doesn't support -f on MacOS, just assume it isn't a symlink
+ ACTUAL_DEVICE=$DEVICE
+else
+ ACTUAL_DEVICE=`readlink -f $DEVICE`
+fi
+if is_mounted $ACTUAL_DEVICE ; then
+ echo "ERROR: Device $DEVICE is currently mounted - check if this is the right device, and unmount it first if so"
+ device_details
+ exit 1
+fi
+if is_inuse $ACTUAL_DEVICE ; then
+ echo "ERROR: Device $DEVICE is currently in use (possibly part of LVM) - check if this is the right device!"
+ device_details
+ exit 1
+fi
if [ ! -w "$DEVICE" ]; then
- echo "ERROR: Device $DEVICE does not exist or is not writable"
+ echo "ERROR: Device $DEVICE is not writable - possibly use sudo?"
usage
exit 1
fi
image_details $IMAGE
-device_details $(basename $DEVICE)
+device_details
printf "Write $IMAGE to $DEVICE [y/N]? "
read RESPONSE
diff --git a/external/poky/scripts/contrib/devtool-stress.py b/external/poky/scripts/contrib/devtool-stress.py
index d555c51a..81046ecf 100755
--- a/external/poky/scripts/contrib/devtool-stress.py
+++ b/external/poky/scripts/contrib/devtool-stress.py
@@ -6,18 +6,7 @@
#
# Copyright 2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
import sys
diff --git a/external/poky/scripts/contrib/dialog-power-control b/external/poky/scripts/contrib/dialog-power-control
index 7550ea53..ad6070c3 100755
--- a/external/poky/scripts/contrib/dialog-power-control
+++ b/external/poky/scripts/contrib/dialog-power-control
@@ -1,5 +1,7 @@
#!/bin/sh
#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Simple script to show a manual power prompt for when you want to use
# automated hardware testing with testimage.bbclass but you don't have a
# web-enabled power strip or similar to do the power on/off/cycle.
diff --git a/external/poky/scripts/contrib/documentation-audit.sh b/external/poky/scripts/contrib/documentation-audit.sh
index 2144aac9..1191f57a 100755
--- a/external/poky/scripts/contrib/documentation-audit.sh
+++ b/external/poky/scripts/contrib/documentation-audit.sh
@@ -1,5 +1,7 @@
#!/bin/bash
#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Perform an audit of which packages provide documentation and which
# are missing -doc packages.
#
@@ -7,7 +9,6 @@
# this script after source'ing the build environment script, so you're
# running it from build/ directory.
#
-# Maintainer: Scott Garman <scott.a.garman@intel.com>
REPORT_DOC_SIMPLE="documentation_exists.txt"
REPORT_DOC_DETAIL="documentation_exists_detail.txt"
diff --git a/external/poky/scripts/contrib/graph-tool b/external/poky/scripts/contrib/graph-tool
index 1df5b8c3..6d2e68b8 100755
--- a/external/poky/scripts/contrib/graph-tool
+++ b/external/poky/scripts/contrib/graph-tool
@@ -7,18 +7,7 @@
#
# Copyright 2013 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
import sys
diff --git a/external/poky/scripts/contrib/list-packageconfig-flags.py b/external/poky/scripts/contrib/list-packageconfig-flags.py
index 7ce71862..d6de4dc8 100755
--- a/external/poky/scripts/contrib/list-packageconfig-flags.py
+++ b/external/poky/scripts/contrib/list-packageconfig-flags.py
@@ -1,21 +1,10 @@
#!/usr/bin/env python3
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software Foundation.
-#
# Copyright (C) 2013 Wind River Systems, Inc.
# Copyright (C) 2014 Intel Corporation
#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
# - list available recipes which have PACKAGECONFIG flags
# - list available PACKAGECONFIG flags and all affected recipes
# - list all recipes and PACKAGECONFIG information
@@ -76,7 +65,7 @@ def collect_pkgs(data_dict):
for fn in data_dict:
pkgconfigflags = data_dict[fn].getVarFlags("PACKAGECONFIG")
pkgconfigflags.pop('doc', None)
- pkgname = data_dict[fn].getVar("P")
+ pkgname = data_dict[fn].getVar("PN")
pkg_dict[pkgname] = sorted(pkgconfigflags.keys())
return pkg_dict
diff --git a/external/poky/scripts/contrib/oe-build-perf-report-email.py b/external/poky/scripts/contrib/oe-build-perf-report-email.py
index 913847bb..de3862c8 100755
--- a/external/poky/scripts/contrib/oe-build-perf-report-email.py
+++ b/external/poky/scripts/contrib/oe-build-perf-report-email.py
@@ -4,15 +4,9 @@
#
# Copyright (c) 2017, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
+
import argparse
import base64
import logging
diff --git a/external/poky/scripts/contrib/patchreview.py b/external/poky/scripts/contrib/patchreview.py
index 07216650..62c509f5 100755
--- a/external/poky/scripts/contrib/patchreview.py
+++ b/external/poky/scripts/contrib/patchreview.py
@@ -1,4 +1,7 @@
#! /usr/bin/env python3
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# TODO
# - option to just list all broken files
diff --git a/external/poky/scripts/contrib/patchtest.sh b/external/poky/scripts/contrib/patchtest.sh
index 7fe56666..b1e1ea33 100755
--- a/external/poky/scripts/contrib/patchtest.sh
+++ b/external/poky/scripts/contrib/patchtest.sh
@@ -1,26 +1,12 @@
#!/bin/bash
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# patchtest: Run patchtest on commits starting at master
#
# Copyright (c) 2017, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# SPDX-License-Identifier: GPL-2.0-or-later
#
+
set -o errexit
# Default values
diff --git a/external/poky/scripts/contrib/serdevtry b/external/poky/scripts/contrib/serdevtry
index 74bd7b71..9144730e 100755
--- a/external/poky/scripts/contrib/serdevtry
+++ b/external/poky/scripts/contrib/serdevtry
@@ -2,7 +2,8 @@
# Copyright (C) 2014 Intel Corporation
#
-# Released under the MIT license (see COPYING.MIT)
+# SPDX-License-Identifier: MIT
+#
if [ "$1" = "" -o "$1" = "--help" ] ; then
echo "Usage: $0 <serial terminal command>"
diff --git a/external/poky/scripts/contrib/test_build_time.sh b/external/poky/scripts/contrib/test_build_time.sh
index 9e5725ae..23f238ad 100755
--- a/external/poky/scripts/contrib/test_build_time.sh
+++ b/external/poky/scripts/contrib/test_build_time.sh
@@ -3,22 +3,8 @@
# Build performance regression test script
#
# Copyright 2011 Intel Corporation
-# All rights reserved.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
+# SPDX-License-Identifier: GPL-2.0-or-later
#
# DESCRIPTION
# This script is intended to be used in conjunction with "git bisect run"
diff --git a/external/poky/scripts/contrib/test_build_time_worker.sh b/external/poky/scripts/contrib/test_build_time_worker.sh
index 8e20a9ea..478e8b0d 100755
--- a/external/poky/scripts/contrib/test_build_time_worker.sh
+++ b/external/poky/scripts/contrib/test_build_time_worker.sh
@@ -1,5 +1,7 @@
#!/bin/bash
-
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# This is an example script to be used in conjunction with test_build_time.sh
if [ "$TEST_BUILDDIR" = "" ] ; then
diff --git a/external/poky/scripts/contrib/uncovered b/external/poky/scripts/contrib/uncovered
index a8399ad1..f16128cb 100755
--- a/external/poky/scripts/contrib/uncovered
+++ b/external/poky/scripts/contrib/uncovered
@@ -1,23 +1,10 @@
#!/bin/bash -eur
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Find python modules uncovered by oe-seltest
#
# Copyright (c) 2016, Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# Author: Ed Bartosh <ed.bartosh@linux.intel.com>
#
diff --git a/external/poky/scripts/contrib/verify-homepage.py b/external/poky/scripts/contrib/verify-homepage.py
index cc6e797d..7bffa78e 100755
--- a/external/poky/scripts/contrib/verify-homepage.py
+++ b/external/poky/scripts/contrib/verify-homepage.py
@@ -1,5 +1,7 @@
#!/usr/bin/env python3
-
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# This script can be used to verify HOMEPAGE values for all recipes in
# the current configuration.
# The result is influenced by network environment, since the timeout of connect url is 5 seconds as default.
diff --git a/external/poky/scripts/cp-noerror b/external/poky/scripts/cp-noerror
index 35eb211b..ab617c5d 100755
--- a/external/poky/scripts/cp-noerror
+++ b/external/poky/scripts/cp-noerror
@@ -1,5 +1,7 @@
#!/usr/bin/env python3
#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Allow copying of $1 to $2 but if files in $1 disappear during the copy operation,
# don't error.
# Also don't error if $1 disappears.
diff --git a/external/poky/scripts/create-pull-request b/external/poky/scripts/create-pull-request
index 280880b3..8eefcf63 100755
--- a/external/poky/scripts/create-pull-request
+++ b/external/poky/scripts/create-pull-request
@@ -1,21 +1,8 @@
#!/bin/sh
#
# Copyright (c) 2010-2013, Intel Corporation.
-# All Rights Reserved
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
-# the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+# SPDX-License-Identifier: GPL-2.0-or-later
#
#
@@ -136,20 +123,12 @@ fi
# Rewrite private URLs to public URLs
# Determine the repository name for use in the WEB_URL later
-case "$REMOTE_URL" in
-*@*)
- USER_RE="[A-Za-z0-9_.@][A-Za-z0-9_.@-]*\$\?"
- PROTO_RE="[a-z][a-z+]*://"
- GIT_RE="\(^\($PROTO_RE\)\?$USER_RE@\)\([^:/]*\)[:/]\(.*\)"
- REMOTE_URL=${REMOTE_URL%.git}
- REMOTE_REPO=$(echo $REMOTE_URL | sed "s#$GIT_RE#\4#")
- REMOTE_URL=$(echo $REMOTE_URL | sed "s#$GIT_RE#git://\3/\4#")
- ;;
-*)
- echo "WARNING: Unrecognized remote URL: $REMOTE_URL"
- echo " The pull and browse URLs will likely be incorrect"
- ;;
-esac
+USER_RE="[A-Za-z0-9_.@][A-Za-z0-9_.@-]*\$\?"
+PROTO_RE="[a-z][a-z+]*://"
+GIT_RE="\(^\($PROTO_RE\)\?\)\($USER_RE@\)\?\([^:/]*\)[:/]\(.*\)"
+REMOTE_URL=${REMOTE_URL%.git}
+REMOTE_REPO=$(echo $REMOTE_URL | sed "s#$GIT_RE#\5#")
+REMOTE_URL=$(echo $REMOTE_URL | sed "s#$GIT_RE#git://\4/\5#")
if [ -z "$BRANCH" ]; then
BRANCH=$(git branch | grep -e "^\* " | cut -d' ' -f2)
@@ -278,7 +257,7 @@ fi
# Replace the SUBJECT token with it.
if [ -n "$SUBJECT" ]; then
- sed -i -e "s/\*\*\* SUBJECT HERE \*\*\*/$SUBJECT/" "$CL"
+ sed -i -e "s\`\*\*\* SUBJECT HERE \*\*\*\`$SUBJECT\`" "$CL"
fi
diff --git a/external/poky/scripts/crosstap b/external/poky/scripts/crosstap
index e33fa4ad..40856bc2 100755
--- a/external/poky/scripts/crosstap
+++ b/external/poky/scripts/crosstap
@@ -17,20 +17,9 @@
# related to kernel.
#
# Copyright (c) 2018, Cisco Systems.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys
import re
diff --git a/external/poky/scripts/devtool b/external/poky/scripts/devtool
index 0e578c0d..8a4f41bc 100755
--- a/external/poky/scripts/devtool
+++ b/external/poky/scripts/devtool
@@ -4,18 +4,8 @@
#
# Copyright (C) 2014-2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os
diff --git a/external/poky/scripts/distro/build-recipe-list.py b/external/poky/scripts/distro/build-recipe-list.py
deleted file mode 100755
index 21627648..00000000
--- a/external/poky/scripts/distro/build-recipe-list.py
+++ /dev/null
@@ -1,129 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright (c) 2017, Intel Corporation.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
-#
-
-import os
-import shutil
-import csv
-import sys
-import argparse
-
-__version__ = "0.1.0"
-
-# set of BPNs
-recipenames = set()
-# map of recipe -> data
-allrecipes = {}
-
-def make_bpn(recipe):
- prefixes = ("nativesdk-",)
- suffixes = ("-native", "-cross", "-initial", "-intermediate", "-crosssdk", "-cross-canadian")
- for ix in prefixes + suffixes:
- if ix in recipe:
- recipe = recipe.replace(ix, "")
- return recipe
-
-def gather_recipes(rows):
- for row in rows:
- recipe = row[0]
- bpn = make_bpn(recipe)
- if bpn not in recipenames:
- recipenames.add(bpn)
- if recipe not in allrecipes:
- allrecipes[recipe] = row
-
-def generate_recipe_list():
- # machine list
- machine_list = ( "qemuarm64", "qemuarm", "qemumips64", "qemumips", "qemuppc", "qemux86-64", "qemux86" )
- # set filename format
- fnformat = 'distrodata.%s.csv'
-
- # store all data files in distrodata
- datadir = 'distrodata'
-
- # create the directory if it does not exists
- if not os.path.exists(datadir):
- os.mkdir(datadir)
-
- # doing bitbake distrodata
- for machine in machine_list:
- os.system('MACHINE='+ machine + ' bitbake -k universe -c distrodata')
- shutil.copy('tmp/log/distrodata.csv', 'distrodata/' + fnformat % machine)
-
- for machine in machine_list:
- with open('distrodata/' + fnformat % machine) as f:
- reader = csv.reader(f)
- rows = reader.__iter__()
- gather_recipes(rows)
-
- with open('recipe-list.txt', 'w') as f:
- for recipe in sorted(recipenames):
- f.write("%s\n" % recipe)
- print("file : recipe-list.txt is created with %d entries." % len(recipenames))
-
- with open('all-recipe-list.txt', 'w') as f:
- for recipe, row in sorted(allrecipes.items()):
- f.write("%s\n" % ','.join(row))
-
-
-def diff_for_new_recipes(recipe1, recipe2):
- prev_recipe_path = recipe1 + '/'
- curr_recipe_path = recipe2 + '/'
- if not os.path.isfile(prev_recipe_path + 'recipe-list.txt') or not os.path.isfile(curr_recipe_path + 'recipe-list.txt'):
- print("recipe files do not exists. please verify that the file exists.")
- exit(1)
-
- import csv
-
- prev = []
- new = []
-
- with open(prev_recipe_path + 'recipe-list.txt') as f:
- prev = f.readlines()
-
- with open(curr_recipe_path + 'recipe-list.txt') as f:
- new = f.readlines()
-
- updates = []
- for pn in new:
- if not pn in prev:
- updates.append(pn.rstrip())
-
- allrecipe = []
- with open(recipe1 + '_' + recipe2 + '_new_recipe_list.txt','w') as dr:
- with open(curr_recipe_path + 'all-recipe-list.txt') as f:
- reader = csv.reader(f, delimiter=',')
- for row in reader:
- if row[0] in updates:
- dr.write("%s,%s,%s" % (row[0], row[3], row[5]))
- if len(row[9:]) > 0:
- dr.write(",%s" % ','.join(row[9:]))
- dr.write("\n")
-
-def main(argv):
- if argv[0] == "generate_recipe_list":
- generate_recipe_list()
- elif argv[0] == "compare_recipe":
- diff_for_new_recipes(argv[1], argv[2])
- else:
- print("no such option. choose either 'generate_recipe_list' or 'compare_recipe'")
-
- exit(0)
-
-if __name__ == "__main__":
- try:
- sys.exit(main(sys.argv[1:]))
- except Exception as e:
- print("Exception :", e)
- sys.exit(1)
-
diff --git a/external/poky/scripts/distro/distrocompare.sh b/external/poky/scripts/distro/distrocompare.sh
deleted file mode 100755
index 908760c2..00000000
--- a/external/poky/scripts/distro/distrocompare.sh
+++ /dev/null
@@ -1,123 +0,0 @@
-#!/usr/bin/env bash
-#
-# Copyright (c) 2017, Intel Corporation.
-# All rights reserved.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-#
-# distrocompare.sh : provides capability to get a list of new packages
-# based on two distinct branches. This script takes
-# 2 parameters; either a commit-ish or a branch name
-#
-# To run : distrocompare.sh <older hash> <newer hash>
-# E.g. distrocompare.sh morty 92aa0e7
-# E.g. distrocompare.sh morty pyro
-#
-
-# get input as version
-previous_version=$1
-current_version=$2
-
-# set previous and current version
-if [ -z "$2" ]; then
- previous_version=$1
- current_version="current"
-fi
-
-# get script location. That's where the source supposedly located as well.
-scriptdir="$( realpath $(dirname "${BASH_SOURCE[0]}" ))"
-sourcedir="$( realpath $scriptdir/../.. )"
-
-# create working directory
-workdir=$(mktemp -d)
-
-# prepare to rollback to the branch if not similar
-branch=`cd $sourcedir; git branch | grep \* | cut -d ' ' -f2`
-
-# set current workdir to store final result
-currentworkdir=`pwd`
-
-# persists the file after local repo change
-cp $scriptdir/build-recipe-list.py $workdir
-
-#==================================================================
-
-function bake_distrodata {
- # get to source directory of the git
- cd $sourcedir
-
- # change the branch / commit. Do not change if input is current
- if [ "$1" != "current" ]; then
- output=$(git checkout $1 2>&1)
-
- # exit if git fails
- if [[ $output == *"error"* ]]; then
- echo "git error : $output"
- echo "exiting ... "
- rm -rf $workdir
- exit
- fi
- fi
-
- # make tmp as workdir
- cd $workdir
-
- # source oe-init to generate a new build folder
- source $sourcedir/oe-init-build-env $1
-
- # if file already exists with distrodata, do not append
- if ! grep -q "distrodata" "conf/local.conf"; then
- # add inherit distrodata to local.conf to enable distrodata feature
- echo 'INHERIT += "distrodata"' >> conf/local.conf
- fi
-
- # use from tmp
- $workdir/build-recipe-list.py generate_recipe_list
-}
-
-bake_distrodata $previous_version
-bake_distrodata $current_version
-
-#==================================================================
-
-cd $workdir
-
-# compare the 2 generated recipe-list.txt
-$workdir/build-recipe-list.py compare_recipe $previous_version $current_version
-
-# copy final result to current working directory
-cp $workdir/*_new_recipe_list.txt $currentworkdir
-
-if [ $? -ne 0 ]; then
- rm -rf $workdir/$previous_version
- rm -rf $workdir/$current_version
- rm $workdir/build-recipe-list.py
- # preserve the result in /tmp/distrodata if fail to copy the result over
- exit
-fi
-
-# cleanup
-rm -rf $workdir
-
-# perform rollback branch
-cd $sourcedir
-currentbranch=`git branch | grep \* | cut -d ' ' -f2`
-if [ "$currentbranch" != "$branch" ]; then
- git checkout $branch
-fi
-
-cd $currentworkdir
-
-#==================================================================
diff --git a/external/poky/scripts/gen-lockedsig-cache b/external/poky/scripts/gen-lockedsig-cache
index 6765891d..cd8f9a43 100755
--- a/external/poky/scripts/gen-lockedsig-cache
+++ b/external/poky/scripts/gen-lockedsig-cache
@@ -1,10 +1,13 @@
#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
import os
import sys
-import glob
import shutil
import errno
+import time
def mkdir(d):
try:
@@ -13,6 +16,38 @@ def mkdir(d):
if e.errno != errno.EEXIST:
raise e
+# extract the hash from past the last colon to last underscore
+def extract_sha(filename):
+ return filename.split(':')[7].split('_')[0]
+
+# get all files in a directory, extract hash and make
+# a map from hash to list of file with that hash
+def map_sha_to_files(dir_, prefix, sha_map):
+ sstate_prefix_path = dir_ + '/' + prefix + '/'
+ if not os.path.exists(sstate_prefix_path):
+ return
+ sstate_files = os.listdir(sstate_prefix_path)
+ for f in sstate_files:
+ try:
+ sha = extract_sha(f)
+ if sha not in sha_map:
+ sha_map[sha] = []
+ sha_map[sha].append(sstate_prefix_path + f)
+ except IndexError:
+ continue
+
+# given a prefix build a map of hash to list of files
+def build_sha_cache(prefix):
+ sha_map = {}
+
+ sstate_dir = sys.argv[2]
+ map_sha_to_files(sstate_dir, prefix, sha_map)
+
+ native_sstate_dir = sys.argv[2] + '/' + sys.argv[4]
+ map_sha_to_files(native_sstate_dir, prefix, sha_map)
+
+ return sha_map
+
if len(sys.argv) < 5:
print("Incorrect number of arguments specified")
print("syntax: gen-lockedsig-cache <locked-sigs.inc> <input-cachedir> <output-cachedir> <nativelsbstring> [filterfile]")
@@ -38,12 +73,23 @@ with open(sys.argv[1]) as f:
sigs.append(sig)
print('Gathering file list')
+start_time = time.perf_counter()
files = set()
+sstate_content_cache = {}
for s in sigs:
- p = sys.argv[2] + "/" + s[:2] + "/*" + s + "*"
- files |= set(glob.glob(p))
- p = sys.argv[2] + "/%s/" % sys.argv[4] + s[:2] + "/*" + s + "*"
- files |= set(glob.glob(p))
+ prefix = s[:2]
+ prefix2 = s[2:4]
+ if prefix not in sstate_content_cache:
+ sstate_content_cache[prefix] = {}
+ if prefix2 not in sstate_content_cache[prefix]:
+ sstate_content_cache[prefix][prefix2] = build_sha_cache(prefix + "/" + prefix2)
+
+ if s in sstate_content_cache[prefix][prefix2]:
+ for f in sstate_content_cache[prefix][prefix2][s]:
+ files.add(f)
+
+elapsed = time.perf_counter() - start_time
+print("Gathering file list took %.1fs" % elapsed)
print('Processing files')
for f in files:
diff --git a/external/poky/scripts/gen-site-config b/external/poky/scripts/gen-site-config
index 7da7a0bd..727b809c 100755
--- a/external/poky/scripts/gen-site-config
+++ b/external/poky/scripts/gen-site-config
@@ -1,18 +1,8 @@
#! /bin/sh
# Copyright (c) 2005-2008 Wind River Systems, Inc.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
cat << EOF
AC_PREREQ(2.57)
diff --git a/external/poky/scripts/install-buildtools b/external/poky/scripts/install-buildtools
new file mode 100755
index 00000000..fbf6a4bc
--- /dev/null
+++ b/external/poky/scripts/install-buildtools
@@ -0,0 +1,343 @@
+#!/usr/bin/env python3
+
+# Buildtools and buildtools extended installer helper script
+#
+# Copyright (C) 2017-2020 Intel Corporation
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# NOTE: --with-extended-buildtools is on by default
+#
+# Example usage (extended buildtools from milestone):
+# (1) using --url and --filename
+# $ install-buildtools \
+# --url http://downloads.yoctoproject.org/releases/yocto/milestones/yocto-3.1_M3/buildtools \
+# --filename x86_64-buildtools-extended-nativesdk-standalone-3.0+snapshot-20200315.sh
+# (2) using --base-url, --release, --installer-version and --build-date
+# $ install-buildtools \
+# --base-url http://downloads.yoctoproject.org/releases/yocto \
+# --release yocto-3.1_M3 \
+# --installer-version 3.0+snapshot
+# --build-date 202000315
+#
+# Example usage (standard buildtools from release):
+# (3) using --url and --filename
+# $ install-buildtools --without-extended-buildtools \
+# --url http://downloads.yoctoproject.org/releases/yocto/yocto-3.0.2/buildtools \
+# --filename x86_64-buildtools-nativesdk-standalone-3.0.2.sh
+# (4) using --base-url, --release and --installer-version
+# $ install-buildtools --without-extended-buildtools \
+# --base-url http://downloads.yoctoproject.org/releases/yocto \
+# --release yocto-3.0.2 \
+# --installer-version 3.0.2
+#
+
+import argparse
+import logging
+import os
+import platform
+import re
+import shutil
+import shlex
+import stat
+import subprocess
+import sys
+import tempfile
+from urllib.parse import quote
+
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+lib_path = scripts_path + '/lib'
+sys.path = sys.path + [lib_path]
+import scriptutils
+import scriptpath
+
+
+PROGNAME = 'install-buildtools'
+logger = scriptutils.logger_create(PROGNAME, stream=sys.stdout)
+
+DEFAULT_INSTALL_DIR = os.path.join(os.path.split(scripts_path)[0],'buildtools')
+DEFAULT_BASE_URL = 'http://downloads.yoctoproject.org/releases/yocto'
+DEFAULT_RELEASE = 'yocto-3.2_M1'
+DEFAULT_INSTALLER_VERSION = '3.1+snapshot'
+DEFAULT_BUILDDATE = '20200617'
+
+# Python version sanity check
+if not (sys.version_info.major == 3 and sys.version_info.minor >= 4):
+ logger.error("This script requires Python 3.4 or greater")
+ logger.error("You have Python %s.%s" %
+ (sys.version_info.major, sys.version_info.minor))
+ sys.exit(1)
+
+# The following three functions are copied directly from
+# bitbake/lib/bb/utils.py, in order to allow this script
+# to run on versions of python earlier than what bitbake
+# supports (e.g. less than Python 3.5 for YP 3.1 release)
+
+def _hasher(method, filename):
+ import mmap
+
+ with open(filename, "rb") as f:
+ try:
+ with mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as mm:
+ for chunk in iter(lambda: mm.read(8192), b''):
+ method.update(chunk)
+ except ValueError:
+ # You can't mmap() an empty file so silence this exception
+ pass
+ return method.hexdigest()
+
+
+def md5_file(filename):
+ """
+ Return the hex string representation of the MD5 checksum of filename.
+ """
+ import hashlib
+ return _hasher(hashlib.md5(), filename)
+
+def sha256_file(filename):
+ """
+ Return the hex string representation of the 256-bit SHA checksum of
+ filename.
+ """
+ import hashlib
+ return _hasher(hashlib.sha256(), filename)
+
+
+def main():
+ global DEFAULT_INSTALL_DIR
+ global DEFAULT_BASE_URL
+ global DEFAULT_RELEASE
+ global DEFAULT_INSTALLER_VERSION
+ global DEFAULT_BUILDDATE
+ filename = ""
+ release = ""
+ buildtools_url = ""
+ install_dir = ""
+ arch = platform.machine()
+
+ parser = argparse.ArgumentParser(
+ description="Buildtools installation helper",
+ add_help=False)
+ parser.add_argument('-u', '--url',
+ help='URL from where to fetch buildtools SDK installer, not '
+ 'including filename (optional)\n'
+ 'Requires --filename.',
+ action='store')
+ parser.add_argument('-f', '--filename',
+ help='filename for the buildtools SDK installer to be installed '
+ '(optional)\nRequires --url',
+ action='store')
+ parser.add_argument('-d', '--directory',
+ default=DEFAULT_INSTALL_DIR,
+ help='directory where buildtools SDK will be installed (optional)',
+ action='store')
+ parser.add_argument('-r', '--release',
+ default=DEFAULT_RELEASE,
+ help='Yocto Project release string for SDK which will be '
+ 'installed (optional)',
+ action='store')
+ parser.add_argument('-V', '--installer-version',
+ default=DEFAULT_INSTALLER_VERSION,
+ help='version string for the SDK to be installed (optional)',
+ action='store')
+ parser.add_argument('-b', '--base-url',
+ default=DEFAULT_BASE_URL,
+ help='base URL from which to fetch SDK (optional)', action='store')
+ parser.add_argument('-t', '--build-date',
+ default=DEFAULT_BUILDDATE,
+ help='Build date of pre-release SDK (optional)', action='store')
+ group = parser.add_mutually_exclusive_group()
+ group.add_argument('--with-extended-buildtools', action='store_true',
+ dest='with_extended_buildtools',
+ default=True,
+ help='enable extended buildtools tarball (on by default)')
+ group.add_argument('--without-extended-buildtools', action='store_false',
+ dest='with_extended_buildtools',
+ help='disable extended buildtools (traditional buildtools tarball)')
+ parser.add_argument('-c', '--check', help='enable md5 checksum checking',
+ default=True,
+ action='store_true')
+ parser.add_argument('-D', '--debug', help='enable debug output',
+ action='store_true')
+ parser.add_argument('-q', '--quiet', help='print only errors',
+ action='store_true')
+
+ parser.add_argument('-h', '--help', action='help',
+ default=argparse.SUPPRESS,
+ help='show this help message and exit')
+
+ args = parser.parse_args()
+
+ if args.debug:
+ logger.setLevel(logging.DEBUG)
+ elif args.quiet:
+ logger.setLevel(logging.ERROR)
+
+ if args.url and args.filename:
+ logger.debug("--url and --filename detected. Ignoring --base-url "
+ "--release --installer-version arguments.")
+ filename = args.filename
+ buildtools_url = "%s/%s" % (args.url, filename)
+ else:
+ if args.base_url:
+ base_url = args.base_url
+ else:
+ base_url = DEFAULT_BASE_URL
+ if args.release:
+ # check if this is a pre-release "milestone" SDK
+ m = re.search(r"^(?P<distro>[a-zA-Z\-]+)(?P<version>[0-9.]+)(?P<milestone>_M[1-9])$",
+ args.release)
+ logger.debug("milestone regex: %s" % m)
+ if m and m.group('milestone'):
+ logger.debug("release[distro]: %s" % m.group('distro'))
+ logger.debug("release[version]: %s" % m.group('version'))
+ logger.debug("release[milestone]: %s" % m.group('milestone'))
+ if not args.build_date:
+ logger.error("Milestone installers require --build-date")
+ else:
+ if args.with_extended_buildtools:
+ filename = "%s-buildtools-extended-nativesdk-standalone-%s-%s.sh" % (
+ arch, args.installer_version, args.build_date)
+ else:
+ filename = "%s-buildtools-nativesdk-standalone-%s-%s.sh" % (
+ arch, args.installer_version, args.build_date)
+ safe_filename = quote(filename)
+ buildtools_url = "%s/milestones/%s/buildtools/%s" % (base_url, args.release, safe_filename)
+ # regular release SDK
+ else:
+ if args.with_extended_buildtools:
+ filename = "%s-buildtools-extended-nativesdk-standalone-%s.sh" % (arch, args.installer_version)
+ else:
+ filename = "%s-buildtools-nativesdk-standalone-%s.sh" % (arch, args.installer_version)
+ safe_filename = quote(filename)
+ buildtools_url = "%s/%s/buildtools/%s" % (base_url, args.release, safe_filename)
+
+ tmpsdk_dir = tempfile.mkdtemp()
+ try:
+ # Fetch installer
+ logger.info("Fetching buildtools installer")
+ tmpbuildtools = os.path.join(tmpsdk_dir, filename)
+ ret = subprocess.call("wget -q -O %s %s" %
+ (tmpbuildtools, buildtools_url), shell=True)
+ if ret != 0:
+ logger.error("Could not download file from %s" % buildtools_url)
+ return ret
+
+ # Verify checksum
+ if args.check:
+ logger.info("Fetching buildtools installer checksum")
+ checksum_type = ""
+ for checksum_type in ["md5sum", "sha256sum"]:
+ check_url = "{}.{}".format(buildtools_url, checksum_type)
+ checksum_filename = "{}.{}".format(filename, checksum_type)
+ tmpbuildtools_checksum = os.path.join(tmpsdk_dir, checksum_filename)
+ ret = subprocess.call("wget -q -O %s %s" %
+ (tmpbuildtools_checksum, check_url), shell=True)
+ if ret == 0:
+ break
+ else:
+ if ret != 0:
+ logger.error("Could not download file from %s" % check_url)
+ return ret
+ regex = re.compile(r"^(?P<checksum>[0-9a-f]+)\s+(?P<path>.*/)?(?P<filename>.*)$")
+ with open(tmpbuildtools_checksum, 'rb') as f:
+ original = f.read()
+ m = re.search(regex, original.decode("utf-8"))
+ logger.debug("checksum regex match: %s" % m)
+ logger.debug("checksum: %s" % m.group('checksum'))
+ logger.debug("path: %s" % m.group('path'))
+ logger.debug("filename: %s" % m.group('filename'))
+ if filename != m.group('filename'):
+ logger.error("Filename does not match name in checksum")
+ return 1
+ checksum = m.group('checksum')
+ if checksum_type == "md5sum":
+ checksum_value = md5_file(tmpbuildtools)
+ else:
+ checksum_value = sha256_file(tmpbuildtools)
+ if checksum == checksum_value:
+ logger.info("Checksum success")
+ else:
+ logger.error("Checksum %s expected. Actual checksum is %s." %
+ (checksum, checksum_value))
+ return 1
+
+ # Make installer executable
+ logger.info("Making installer executable")
+ st = os.stat(tmpbuildtools)
+ os.chmod(tmpbuildtools, st.st_mode | stat.S_IEXEC)
+ logger.debug(os.stat(tmpbuildtools))
+ if args.directory:
+ install_dir = args.directory
+ ret = subprocess.call("%s -d %s -y" %
+ (tmpbuildtools, install_dir), shell=True)
+ else:
+ install_dir = "/opt/poky/%s" % args.installer_version
+ ret = subprocess.call("%s -y" % tmpbuildtools, shell=True)
+ if ret != 0:
+ logger.error("Could not run buildtools installer")
+ return ret
+
+ # Setup the environment
+ logger.info("Setting up the environment")
+ regex = re.compile(r'^(?P<export>export )?(?P<env_var>[A-Z_]+)=(?P<env_val>.+)$')
+ with open("%s/environment-setup-%s-pokysdk-linux" %
+ (install_dir, arch), 'rb') as f:
+ for line in f:
+ match = regex.search(line.decode('utf-8'))
+ logger.debug("export regex: %s" % match)
+ if match:
+ env_var = match.group('env_var')
+ logger.debug("env_var: %s" % env_var)
+ env_val = match.group('env_val')
+ logger.debug("env_val: %s" % env_val)
+ os.environ[env_var] = env_val
+
+ # Test installation
+ logger.info("Testing installation")
+ tool = ""
+ m = re.search("extended", tmpbuildtools)
+ logger.debug("extended regex: %s" % m)
+ if args.with_extended_buildtools and not m:
+ logger.info("Ignoring --with-extended-buildtools as filename "
+ "does not contain 'extended'")
+ if args.with_extended_buildtools and m:
+ tool = 'gcc'
+ else:
+ tool = 'tar'
+ logger.debug("install_dir: %s" % install_dir)
+ cmd = shlex.split("/usr/bin/which %s" % tool)
+ logger.debug("cmd: %s" % cmd)
+ logger.debug("tool: %s" % tool)
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+ output, errors = proc.communicate()
+ logger.debug("proc.args: %s" % proc.args)
+ logger.debug("proc.communicate(): output %s" % output)
+ logger.debug("proc.communicate(): errors %s" % errors)
+ which_tool = output.decode('utf-8')
+ logger.debug("which %s: %s" % (tool, which_tool))
+ ret = proc.returncode
+ if not which_tool.startswith(install_dir):
+ logger.error("Something went wrong: %s not found in %s" %
+ (tool, install_dir))
+ if ret != 0:
+ logger.error("Something went wrong: installation failed")
+ else:
+ logger.info("Installation successful. Remember to source the "
+ "environment setup script now and in any new session.")
+ return ret
+
+ finally:
+ # cleanup tmp directory
+ shutil.rmtree(tmpsdk_dir)
+
+
+if __name__ == '__main__':
+ try:
+ ret = main()
+ except Exception:
+ ret = 1
+ import traceback
+
+ traceback.print_exc()
+ sys.exit(ret)
diff --git a/external/poky/scripts/lib/argparse_oe.py b/external/poky/scripts/lib/argparse_oe.py
index 9bdfc1ce..94a4ac50 100644
--- a/external/poky/scripts/lib/argparse_oe.py
+++ b/external/poky/scripts/lib/argparse_oe.py
@@ -1,3 +1,7 @@
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import sys
import argparse
from collections import defaultdict, OrderedDict
diff --git a/external/poky/scripts/lib/build_perf/__init__.py b/external/poky/scripts/lib/build_perf/__init__.py
index 1f8b7290..dcbb7804 100644
--- a/external/poky/scripts/lib/build_perf/__init__.py
+++ b/external/poky/scripts/lib/build_perf/__init__.py
@@ -1,14 +1,7 @@
#
# Copyright (c) 2017, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
"""Build performance test library functions"""
diff --git a/external/poky/scripts/lib/build_perf/html.py b/external/poky/scripts/lib/build_perf/html.py
index 578bb162..d1273c9c 100644
--- a/external/poky/scripts/lib/build_perf/html.py
+++ b/external/poky/scripts/lib/build_perf/html.py
@@ -1,14 +1,7 @@
#
# Copyright (c) 2017, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
"""Helper module for HTML reporting"""
from jinja2 import Environment, PackageLoader
diff --git a/external/poky/scripts/lib/build_perf/report.py b/external/poky/scripts/lib/build_perf/report.py
index d99a3679..4e8e2a8a 100644
--- a/external/poky/scripts/lib/build_perf/report.py
+++ b/external/poky/scripts/lib/build_perf/report.py
@@ -1,14 +1,7 @@
#
# Copyright (c) 2017, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
"""Handling of build perf test reports"""
from collections import OrderedDict, Mapping, namedtuple
diff --git a/external/poky/scripts/lib/buildstats.py b/external/poky/scripts/lib/buildstats.py
index f7db3eaf..c69b5bf4 100644
--- a/external/poky/scripts/lib/buildstats.py
+++ b/external/poky/scripts/lib/buildstats.py
@@ -1,14 +1,7 @@
#
# Copyright (c) 2017, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
"""Functionality for analyzing buildstats"""
import json
@@ -268,13 +261,17 @@ class BuildStats(dict):
self[pkg].aggregate(data)
-def diff_buildstats(bs1, bs2, stat_attr, min_val=None, min_absdiff=None):
+def diff_buildstats(bs1, bs2, stat_attr, min_val=None, min_absdiff=None, only_tasks=[]):
"""Compare the tasks of two buildstats"""
tasks_diff = []
pkgs = set(bs1.keys()).union(set(bs2.keys()))
for pkg in pkgs:
tasks1 = bs1[pkg].tasks if pkg in bs1 else {}
tasks2 = bs2[pkg].tasks if pkg in bs2 else {}
+ if only_tasks:
+ tasks1 = {k: v for k, v in tasks1.items() if k in only_tasks}
+ tasks2 = {k: v for k, v in tasks2.items() if k in only_tasks}
+
if not tasks1:
pkg_op = '+'
elif not tasks2:
diff --git a/external/poky/scripts/lib/checklayer/__init__.py b/external/poky/scripts/lib/checklayer/__init__.py
index 670f0eea..fe545607 100644
--- a/external/poky/scripts/lib/checklayer/__init__.py
+++ b/external/poky/scripts/lib/checklayer/__init__.py
@@ -1,7 +1,9 @@
# Yocto Project layer check tool
#
# Copyright (C) 2017 Intel Corporation
-# Released under the MIT license (see COPYING.MIT)
+#
+# SPDX-License-Identifier: MIT
+#
import os
import re
@@ -57,9 +59,14 @@ def _get_layer_collections(layer_path, lconf=None, data=None):
pattern = ldata.getVar('BBFILE_PATTERN_%s' % name)
depends = ldata.getVar('LAYERDEPENDS_%s' % name)
compat = ldata.getVar('LAYERSERIES_COMPAT_%s' % name)
+ try:
+ depDict = bb.utils.explode_dep_versions2(depends or "")
+ except bb.utils.VersionStringException as vse:
+ bb.fatal('Error parsing LAYERDEPENDS_%s: %s' % (name, str(vse)))
+
collections[name]['priority'] = priority
collections[name]['pattern'] = pattern
- collections[name]['depends'] = depends
+ collections[name]['depends'] = ' '.join(depDict.keys())
collections[name]['compat'] = compat
return collections
@@ -141,6 +148,9 @@ def detect_layers(layer_directories, no_auto):
def _find_layer_depends(depend, layers):
for layer in layers:
+ if 'collections' not in layer:
+ continue
+
for collection in layer['collections']:
if depend == collection:
return layer
@@ -219,6 +229,20 @@ def add_layers(bblayersconf, layers, logger):
f.write("\nBBLAYERS += \"%s\"\n" % path)
return True
+def check_bblayers(bblayersconf, layer_path, logger):
+ '''
+ If layer_path found in BBLAYERS return True
+ '''
+ import bb.parse
+ import bb.data
+
+ ldata = bb.parse.handle(bblayersconf, bb.data.init(), include=True)
+ for bblayer in (ldata.getVar('BBLAYERS') or '').split():
+ if os.path.normpath(bblayer) == os.path.normpath(layer_path):
+ return True
+
+ return False
+
def check_command(error_msg, cmd, cwd=None):
'''
Run a command under a shell, capture stdout and stderr in a single stream,
@@ -243,7 +267,7 @@ def get_signatures(builddir, failsafe=False, machine=None):
sigs = {}
tune2tasks = {}
- cmd = ''
+ cmd = 'BB_ENV_EXTRAWHITE="$BB_ENV_EXTRAWHITE BB_SIGNATURE_HANDLER" BB_SIGNATURE_HANDLER="OEBasicHash" '
if machine:
cmd += 'MACHINE=%s ' % machine
cmd += 'bitbake '
diff --git a/external/poky/scripts/lib/checklayer/case.py b/external/poky/scripts/lib/checklayer/case.py
index 9dd00412..fa9dee38 100644
--- a/external/poky/scripts/lib/checklayer/case.py
+++ b/external/poky/scripts/lib/checklayer/case.py
@@ -1,5 +1,7 @@
# Copyright (C) 2017 Intel Corporation
-# Released under the MIT license (see COPYING.MIT)
+#
+# SPDX-License-Identifier: MIT
+#
from oeqa.core.case import OETestCase
diff --git a/external/poky/scripts/lib/checklayer/cases/bsp.py b/external/poky/scripts/lib/checklayer/cases/bsp.py
index b6b611be..7fd56f5d 100644
--- a/external/poky/scripts/lib/checklayer/cases/bsp.py
+++ b/external/poky/scripts/lib/checklayer/cases/bsp.py
@@ -1,5 +1,7 @@
# Copyright (C) 2017 Intel Corporation
-# Released under the MIT license (see COPYING.MIT)
+#
+# SPDX-License-Identifier: MIT
+#
import unittest
diff --git a/external/poky/scripts/lib/checklayer/cases/common.py b/external/poky/scripts/lib/checklayer/cases/common.py
index 1bef61b0..b82304e3 100644
--- a/external/poky/scripts/lib/checklayer/cases/common.py
+++ b/external/poky/scripts/lib/checklayer/cases/common.py
@@ -1,5 +1,7 @@
# Copyright (C) 2017 Intel Corporation
-# Released under the MIT license (see COPYING.MIT)
+#
+# SPDX-License-Identifier: MIT
+#
import glob
import os
@@ -10,7 +12,7 @@ from checklayer.case import OECheckLayerTestCase
class CommonCheckLayer(OECheckLayerTestCase):
def test_readme(self):
# The top-level README file may have a suffix (like README.rst or README.txt).
- readme_files = glob.glob(os.path.join(self.tc.layer['path'], 'README*'))
+ readme_files = glob.glob(os.path.join(self.tc.layer['path'], '[Rr][Ee][Aa][Dd][Mm][Ee]*'))
self.assertTrue(len(readme_files) > 0,
msg="Layer doesn't contains README file.")
diff --git a/external/poky/scripts/lib/checklayer/cases/distro.py b/external/poky/scripts/lib/checklayer/cases/distro.py
index df1b3035..f0bee549 100644
--- a/external/poky/scripts/lib/checklayer/cases/distro.py
+++ b/external/poky/scripts/lib/checklayer/cases/distro.py
@@ -1,5 +1,7 @@
# Copyright (C) 2017 Intel Corporation
-# Released under the MIT license (see COPYING.MIT)
+#
+# SPDX-License-Identifier: MIT
+#
import unittest
diff --git a/external/poky/scripts/lib/checklayer/context.py b/external/poky/scripts/lib/checklayer/context.py
index 1bec2c41..4de8f668 100644
--- a/external/poky/scripts/lib/checklayer/context.py
+++ b/external/poky/scripts/lib/checklayer/context.py
@@ -1,5 +1,7 @@
# Copyright (C) 2017 Intel Corporation
-# Released under the MIT license (see COPYING.MIT)
+#
+# SPDX-License-Identifier: MIT
+#
import os
import sys
diff --git a/external/poky/scripts/lib/devtool/__init__.py b/external/poky/scripts/lib/devtool/__init__.py
index 8fc7fffc..d39c474f 100644
--- a/external/poky/scripts/lib/devtool/__init__.py
+++ b/external/poky/scripts/lib/devtool/__init__.py
@@ -4,18 +4,8 @@
#
# Copyright (C) 2014 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool plugins module"""
import os
diff --git a/external/poky/scripts/lib/devtool/build.py b/external/poky/scripts/lib/devtool/build.py
index ba9593f1..935ffab4 100644
--- a/external/poky/scripts/lib/devtool/build.py
+++ b/external/poky/scripts/lib/devtool/build.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2014-2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool build plugin"""
import os
@@ -21,7 +11,8 @@ import bb
import logging
import argparse
import tempfile
-from devtool import exec_build_env_command, check_workspace_recipe, DevtoolError
+from devtool import exec_build_env_command, setup_tinfoil, check_workspace_recipe, DevtoolError
+from devtool import parse_recipe
logger = logging.getLogger('devtool')
@@ -53,12 +44,22 @@ def _get_build_tasks(config):
def build(args, config, basepath, workspace):
"""Entry point for the devtool 'build' subcommand"""
workspacepn = check_workspace_recipe(workspace, args.recipename, bbclassextend=True)
+ tinfoil = setup_tinfoil(config_only=False, basepath=basepath)
+ try:
+ rd = parse_recipe(config, tinfoil, args.recipename, appends=True, filter_workspace=False)
+ if not rd:
+ return 1
+ deploytask = 'do_deploy' in rd.getVar('__BBTASKS')
+ finally:
+ tinfoil.shutdown()
if args.clean:
# use clean instead of cleansstate to avoid messing things up in eSDK
build_tasks = ['do_clean']
else:
build_tasks = _get_build_tasks(config)
+ if deploytask:
+ build_tasks.append('do_deploy')
bbappend = workspace[workspacepn]['bbappend']
if args.disable_parallel_make:
diff --git a/external/poky/scripts/lib/devtool/build_image.py b/external/poky/scripts/lib/devtool/build_image.py
index e5810389..9388abba 100644
--- a/external/poky/scripts/lib/devtool/build_image.py
+++ b/external/poky/scripts/lib/devtool/build_image.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool plugin containing the build-image subcommand."""
diff --git a/external/poky/scripts/lib/devtool/build_sdk.py b/external/poky/scripts/lib/devtool/build_sdk.py
index b89d65b0..6fe02fff 100644
--- a/external/poky/scripts/lib/devtool/build_sdk.py
+++ b/external/poky/scripts/lib/devtool/build_sdk.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2015-2016 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import subprocess
diff --git a/external/poky/scripts/lib/devtool/deploy.py b/external/poky/scripts/lib/devtool/deploy.py
index 886004b5..6a997735 100644
--- a/external/poky/scripts/lib/devtool/deploy.py
+++ b/external/poky/scripts/lib/devtool/deploy.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2014-2016 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool plugin containing the deploy subcommands"""
import logging
@@ -211,12 +201,20 @@ def deploy(args, config, basepath, workspace):
if not args.show_status:
extraoptions += ' -q'
+ scp_sshexec = ''
+ ssh_sshexec = 'ssh'
+ if args.ssh_exec:
+ scp_sshexec = "-S %s" % args.ssh_exec
+ ssh_sshexec = args.ssh_exec
scp_port = ''
ssh_port = ''
if args.port:
scp_port = "-P %s" % args.port
ssh_port = "-p %s" % args.port
+ if args.key:
+ extraoptions += ' -i %s' % args.key
+
# In order to delete previously deployed files and have the manifest file on
# the target, we write out a shell script and then copy it to the target
# so we can then run it (piping tar output to it).
@@ -238,7 +236,7 @@ def deploy(args, config, basepath, workspace):
for fpath, fsize in filelist:
f.write('%s %d\n' % (fpath, fsize))
# Copy them to the target
- ret = subprocess.call("scp %s %s %s/* %s:%s" % (scp_port, extraoptions, tmpdir, args.target, os.path.dirname(tmpscript)), shell=True)
+ ret = subprocess.call("scp %s %s %s %s/* %s:%s" % (scp_sshexec, scp_port, extraoptions, tmpdir, args.target, os.path.dirname(tmpscript)), shell=True)
if ret != 0:
raise DevtoolError('Failed to copy script to %s - rerun with -s to '
'get a complete error message' % args.target)
@@ -246,7 +244,7 @@ def deploy(args, config, basepath, workspace):
shutil.rmtree(tmpdir)
# Now run the script
- ret = exec_fakeroot(rd, 'tar cf - . | ssh %s %s %s \'sh %s %s %s %s\'' % (ssh_port, extraoptions, args.target, tmpscript, args.recipename, destdir, tmpfilelist), cwd=recipe_outdir, shell=True)
+ ret = exec_fakeroot(rd, 'tar cf - . | %s %s %s %s \'sh %s %s %s %s\'' % (ssh_sshexec, ssh_port, extraoptions, args.target, tmpscript, args.recipename, destdir, tmpfilelist), cwd=recipe_outdir, shell=True)
if ret != 0:
raise DevtoolError('Deploy failed - rerun with -s to get a complete '
'error message')
@@ -276,6 +274,11 @@ def undeploy(args, config, basepath, workspace):
if not args.show_status:
extraoptions += ' -q'
+ scp_sshexec = ''
+ ssh_sshexec = 'ssh'
+ if args.ssh_exec:
+ scp_sshexec = "-S %s" % args.ssh_exec
+ ssh_sshexec = args.ssh_exec
scp_port = ''
ssh_port = ''
if args.port:
@@ -292,7 +295,7 @@ def undeploy(args, config, basepath, workspace):
with open(os.path.join(tmpdir, os.path.basename(tmpscript)), 'w') as f:
f.write(shellscript)
# Copy it to the target
- ret = subprocess.call("scp %s %s %s/* %s:%s" % (scp_port, extraoptions, tmpdir, args.target, os.path.dirname(tmpscript)), shell=True)
+ ret = subprocess.call("scp %s %s %s %s/* %s:%s" % (scp_sshexec, scp_port, extraoptions, tmpdir, args.target, os.path.dirname(tmpscript)), shell=True)
if ret != 0:
raise DevtoolError('Failed to copy script to %s - rerun with -s to '
'get a complete error message' % args.target)
@@ -300,7 +303,7 @@ def undeploy(args, config, basepath, workspace):
shutil.rmtree(tmpdir)
# Now run the script
- ret = subprocess.call('ssh %s %s %s \'sh %s %s\'' % (ssh_port, extraoptions, args.target, tmpscript, args.recipename), shell=True)
+ ret = subprocess.call('%s %s %s %s \'sh %s %s\'' % (ssh_sshexec, ssh_port, extraoptions, args.target, tmpscript, args.recipename), shell=True)
if ret != 0:
raise DevtoolError('Undeploy failed - rerun with -s to get a complete '
'error message')
@@ -324,7 +327,10 @@ def register_commands(subparsers, context):
parser_deploy.add_argument('-n', '--dry-run', help='List files to be deployed only', action='store_true')
parser_deploy.add_argument('-p', '--no-preserve', help='Do not preserve existing files', action='store_true')
parser_deploy.add_argument('--no-check-space', help='Do not check for available space before deploying', action='store_true')
+ parser_deploy.add_argument('-e', '--ssh-exec', help='Executable to use in place of ssh')
parser_deploy.add_argument('-P', '--port', help='Specify port to use for connection to the target')
+ parser_deploy.add_argument('-I', '--key',
+ help='Specifiy ssh private key for connection to the target')
strip_opts = parser_deploy.add_mutually_exclusive_group(required=False)
strip_opts.add_argument('-S', '--strip',
@@ -346,5 +352,9 @@ def register_commands(subparsers, context):
parser_undeploy.add_argument('-s', '--show-status', help='Show progress/status output', action='store_true')
parser_undeploy.add_argument('-a', '--all', help='Undeploy all recipes deployed on the target', action='store_true')
parser_undeploy.add_argument('-n', '--dry-run', help='List files to be undeployed only', action='store_true')
+ parser_undeploy.add_argument('-e', '--ssh-exec', help='Executable to use in place of ssh')
parser_undeploy.add_argument('-P', '--port', help='Specify port to use for connection to the target')
+ parser_undeploy.add_argument('-I', '--key',
+ help='Specifiy ssh private key for connection to the target')
+
parser_undeploy.set_defaults(func=undeploy)
diff --git a/external/poky/scripts/lib/devtool/export.py b/external/poky/scripts/lib/devtool/export.py
index 35349e2c..01174eda 100644
--- a/external/poky/scripts/lib/devtool/export.py
+++ b/external/poky/scripts/lib/devtool/export.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2014-2017 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool export plugin"""
import os
diff --git a/external/poky/scripts/lib/devtool/import.py b/external/poky/scripts/lib/devtool/import.py
index 4264b7d8..68298516 100644
--- a/external/poky/scripts/lib/devtool/import.py
+++ b/external/poky/scripts/lib/devtool/import.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2014-2017 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool import plugin"""
import os
diff --git a/external/poky/scripts/lib/devtool/menuconfig.py b/external/poky/scripts/lib/devtool/menuconfig.py
new file mode 100644
index 00000000..95384c53
--- /dev/null
+++ b/external/poky/scripts/lib/devtool/menuconfig.py
@@ -0,0 +1,79 @@
+# OpenEmbedded Development tool - menuconfig command plugin
+#
+# Copyright (C) 2018 Xilinx
+# Written by: Chandana Kalluri <ckalluri@xilinx.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""Devtool menuconfig plugin"""
+
+import os
+import bb
+import logging
+import argparse
+import re
+import glob
+from devtool import setup_tinfoil, parse_recipe, DevtoolError, standard, exec_build_env_command
+from devtool import check_workspace_recipe
+logger = logging.getLogger('devtool')
+
+def menuconfig(args, config, basepath, workspace):
+ """Entry point for the devtool 'menuconfig' subcommand"""
+
+ rd = ""
+ kconfigpath = ""
+ pn_src = ""
+ localfilesdir = ""
+ workspace_dir = ""
+ tinfoil = setup_tinfoil(basepath=basepath)
+ try:
+ rd = parse_recipe(config, tinfoil, args.component, appends=True, filter_workspace=False)
+ if not rd:
+ return 1
+
+ check_workspace_recipe(workspace, args.component)
+ pn = rd.getVar('PN', True)
+
+ if not rd.getVarFlag('do_menuconfig','task'):
+ raise DevtoolError("This recipe does not support menuconfig option")
+
+ workspace_dir = os.path.join(config.workspace_path,'sources')
+ kconfigpath = rd.getVar('B')
+ pn_src = os.path.join(workspace_dir,pn)
+
+ # add check to see if oe_local_files exists or not
+ localfilesdir = os.path.join(pn_src,'oe-local-files')
+ if not os.path.exists(localfilesdir):
+ bb.utils.mkdirhier(localfilesdir)
+ # Add gitignore to ensure source tree is clean
+ gitignorefile = os.path.join(localfilesdir,'.gitignore')
+ with open(gitignorefile, 'w') as f:
+ f.write('# Ignore local files, by default. Remove this file if you want to commit the directory to Git\n')
+ f.write('*\n')
+
+ finally:
+ tinfoil.shutdown()
+
+ logger.info('Launching menuconfig')
+ exec_build_env_command(config.init_path, basepath, 'bitbake -c menuconfig %s' % pn, watch=True)
+ fragment = os.path.join(localfilesdir, 'devtool-fragment.cfg')
+ res = standard._create_kconfig_diff(pn_src,rd,fragment)
+
+ return 0
+
+def register_commands(subparsers, context):
+ """register devtool subcommands from this plugin"""
+ parser_menuconfig = subparsers.add_parser('menuconfig',help='Alter build-time configuration for a recipe', description='Launches the make menuconfig command (for recipes where do_menuconfig is available), allowing users to make changes to the build-time configuration. Creates a config fragment corresponding to changes made.', group='advanced')
+ parser_menuconfig.add_argument('component', help='compenent to alter config')
+ parser_menuconfig.set_defaults(func=menuconfig,fixed_setup=context.fixed_setup)
diff --git a/external/poky/scripts/lib/devtool/package.py b/external/poky/scripts/lib/devtool/package.py
index af9e8f15..c2367342 100644
--- a/external/poky/scripts/lib/devtool/package.py
+++ b/external/poky/scripts/lib/devtool/package.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2014-2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool plugin containing the package subcommands"""
import os
diff --git a/external/poky/scripts/lib/devtool/runqemu.py b/external/poky/scripts/lib/devtool/runqemu.py
index e26cf28c..ead978aa 100644
--- a/external/poky/scripts/lib/devtool/runqemu.py
+++ b/external/poky/scripts/lib/devtool/runqemu.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool runqemu plugin"""
diff --git a/external/poky/scripts/lib/devtool/sdk.py b/external/poky/scripts/lib/devtool/sdk.py
index 46167537..3aa42a14 100644
--- a/external/poky/scripts/lib/devtool/sdk.py
+++ b/external/poky/scripts/lib/devtool/sdk.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2015-2016 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import subprocess
diff --git a/external/poky/scripts/lib/devtool/search.py b/external/poky/scripts/lib/devtool/search.py
index b4f209b7..d24040df 100644
--- a/external/poky/scripts/lib/devtool/search.py
+++ b/external/poky/scripts/lib/devtool/search.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool search plugin"""
diff --git a/external/poky/scripts/lib/devtool/standard.py b/external/poky/scripts/lib/devtool/standard.py
index ea09bbff..bab644b8 100644
--- a/external/poky/scripts/lib/devtool/standard.py
+++ b/external/poky/scripts/lib/devtool/standard.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2014-2017 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool standard plugins"""
import os
@@ -155,8 +145,8 @@ def add(args, config, basepath, workspace):
extracmdopts += ' --src-subdir "%s"' % args.src_subdir
if args.autorev:
extracmdopts += ' -a'
- if args.fetch_dev:
- extracmdopts += ' --fetch-dev'
+ if args.npm_dev:
+ extracmdopts += ' --npm-dev'
if args.mirrors:
extracmdopts += ' --mirrors'
if args.srcrev:
@@ -270,14 +260,10 @@ def add(args, config, basepath, workspace):
f.write('}\n')
if bb.data.inherits_class('npm', rd):
- f.write('do_install_append() {\n')
- f.write(' # Remove files added to source dir by devtool/externalsrc\n')
- f.write(' rm -f ${NPM_INSTALLDIR}/singletask.lock\n')
- f.write(' rm -rf ${NPM_INSTALLDIR}/.git\n')
- f.write(' rm -rf ${NPM_INSTALLDIR}/oe-local-files\n')
- f.write(' for symlink in ${EXTERNALSRC_SYMLINKS} ; do\n')
- f.write(' rm -f ${NPM_INSTALLDIR}/${symlink%%:*}\n')
- f.write(' done\n')
+ f.write('python do_configure_append() {\n')
+ f.write(' pkgdir = d.getVar("NPM_PACKAGE")\n')
+ f.write(' lockfile = os.path.join(pkgdir, "singletask.lock")\n')
+ f.write(' bb.utils.remove(lockfile)\n')
f.write('}\n')
# Check if the new layer provides recipes whose priorities have been
@@ -471,11 +457,37 @@ def sync(args, config, basepath, workspace):
finally:
tinfoil.shutdown()
+def symlink_oelocal_files_srctree(rd,srctree):
+ import oe.patch
+ if os.path.abspath(rd.getVar('S')) == os.path.abspath(rd.getVar('WORKDIR')):
+ # If recipe extracts to ${WORKDIR}, symlink the files into the srctree
+ # (otherwise the recipe won't build as expected)
+ local_files_dir = os.path.join(srctree, 'oe-local-files')
+ addfiles = []
+ for root, _, files in os.walk(local_files_dir):
+ relpth = os.path.relpath(root, local_files_dir)
+ if relpth != '.':
+ bb.utils.mkdirhier(os.path.join(srctree, relpth))
+ for fn in files:
+ if fn == '.gitignore':
+ continue
+ destpth = os.path.join(srctree, relpth, fn)
+ if os.path.exists(destpth):
+ os.unlink(destpth)
+ os.symlink('oe-local-files/%s' % fn, destpth)
+ addfiles.append(os.path.join(relpth, fn))
+ if addfiles:
+ bb.process.run('git add %s' % ' '.join(addfiles), cwd=srctree)
+ useroptions = []
+ oe.patch.GitApplyTree.gitCommandUserOptions(useroptions, d=rd)
+ bb.process.run('git %s commit -m "Committing local file symlinks\n\n%s"' % (' '.join(useroptions), oe.patch.GitApplyTree.ignore_commit_prefix), cwd=srctree)
+
def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, workspace, fixed_setup, d, tinfoil, no_overrides=False):
"""Extract sources of a recipe"""
import oe.recipeutils
import oe.patch
+ import oe.path
pn = d.getVar('PN')
@@ -572,7 +584,7 @@ def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, works
with open(preservestampfile, 'w') as f:
f.write(d.getVar('STAMP'))
try:
- if bb.data.inherits_class('kernel-yocto', d):
+ if is_kernel_yocto:
# We need to generate the kernel config
task = 'do_configure'
else:
@@ -599,6 +611,23 @@ def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, works
raise DevtoolError('Something went wrong with source extraction - the devtool-source class was not active or did not function correctly:\n%s' % str(e))
srcsubdir_rel = os.path.relpath(srcsubdir, os.path.join(tempdir, 'workdir'))
+ # Check if work-shared is empty, if yes
+ # find source and copy to work-shared
+ if is_kernel_yocto:
+ workshareddir = d.getVar('STAGING_KERNEL_DIR')
+ staging_kerVer = get_staging_kver(workshareddir)
+ kernelVersion = d.getVar('LINUX_VERSION')
+
+ # handle dangling symbolic link in work-shared:
+ if os.path.islink(workshareddir):
+ os.unlink(workshareddir)
+
+ if os.path.exists(workshareddir) and (not os.listdir(workshareddir) or kernelVersion != staging_kerVer):
+ shutil.rmtree(workshareddir)
+ oe.path.copyhardlinktree(srcsubdir,workshareddir)
+ elif not os.path.exists(workshareddir):
+ oe.path.copyhardlinktree(srcsubdir,workshareddir)
+
tempdir_localdir = os.path.join(tempdir, 'oe-local-files')
srctree_localdir = os.path.join(srctree, 'oe-local-files')
@@ -627,29 +656,7 @@ def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, works
shutil.move(tempdir_localdir, srcsubdir)
shutil.move(srcsubdir, srctree)
-
- if os.path.abspath(d.getVar('S')) == os.path.abspath(d.getVar('WORKDIR')):
- # If recipe extracts to ${WORKDIR}, symlink the files into the srctree
- # (otherwise the recipe won't build as expected)
- local_files_dir = os.path.join(srctree, 'oe-local-files')
- addfiles = []
- for root, _, files in os.walk(local_files_dir):
- relpth = os.path.relpath(root, local_files_dir)
- if relpth != '.':
- bb.utils.mkdirhier(os.path.join(srctree, relpth))
- for fn in files:
- if fn == '.gitignore':
- continue
- destpth = os.path.join(srctree, relpth, fn)
- if os.path.exists(destpth):
- os.unlink(destpth)
- os.symlink('oe-local-files/%s' % fn, destpth)
- addfiles.append(os.path.join(relpth, fn))
- if addfiles:
- bb.process.run('git add %s' % ' '.join(addfiles), cwd=srctree)
- useroptions = []
- oe.patch.GitApplyTree.gitCommandUserOptions(useroptions, d=d)
- bb.process.run('git %s commit -a -m "Committing local file symlinks\n\n%s"' % (' '.join(useroptions), oe.patch.GitApplyTree.ignore_commit_prefix), cwd=srctree)
+ symlink_oelocal_files_srctree(d,srctree)
if is_kernel_yocto:
logger.info('Copying kernel config to srctree')
@@ -717,11 +724,31 @@ def _check_preserve(config, recipename):
tf.write(line)
os.rename(newfile, origfile)
+def get_staging_kver(srcdir):
+ # Kernel version from work-shared
+ kerver = []
+ staging_kerVer=""
+ if os.path.exists(srcdir) and os.listdir(srcdir):
+ with open(os.path.join(srcdir,"Makefile")) as f:
+ version = [next(f) for x in range(5)][1:4]
+ for word in version:
+ kerver.append(word.split('= ')[1].split('\n')[0])
+ staging_kerVer = ".".join(kerver)
+ return staging_kerVer
+
+def get_staging_kbranch(srcdir):
+ staging_kbranch = ""
+ if os.path.exists(srcdir) and os.listdir(srcdir):
+ (branch, _) = bb.process.run('git branch | grep \* | cut -d \' \' -f2', cwd=srcdir)
+ staging_kbranch = "".join(branch.split('\n')[0])
+ return staging_kbranch
+
def modify(args, config, basepath, workspace):
"""Entry point for the devtool 'modify' subcommand"""
import bb
import oe.recipeutils
import oe.patch
+ import oe.path
if args.recipename in workspace:
raise DevtoolError("recipe %s is already in your workspace" %
@@ -763,6 +790,59 @@ def modify(args, config, basepath, workspace):
initial_rev = None
commits = []
check_commits = False
+
+ if bb.data.inherits_class('kernel-yocto', rd):
+ # Current set kernel version
+ kernelVersion = rd.getVar('LINUX_VERSION')
+ srcdir = rd.getVar('STAGING_KERNEL_DIR')
+ kbranch = rd.getVar('KBRANCH')
+
+ staging_kerVer = get_staging_kver(srcdir)
+ staging_kbranch = get_staging_kbranch(srcdir)
+ if (os.path.exists(srcdir) and os.listdir(srcdir)) and (kernelVersion in staging_kerVer and staging_kbranch == kbranch):
+ oe.path.copyhardlinktree(srcdir,srctree)
+ workdir = rd.getVar('WORKDIR')
+ srcsubdir = rd.getVar('S')
+ localfilesdir = os.path.join(srctree,'oe-local-files')
+ # Move local source files into separate subdir
+ recipe_patches = [os.path.basename(patch) for patch in oe.recipeutils.get_recipe_patches(rd)]
+ local_files = oe.recipeutils.get_recipe_local_files(rd)
+
+ for key in local_files.copy():
+ if key.endswith('scc'):
+ sccfile = open(local_files[key], 'r')
+ for l in sccfile:
+ line = l.split()
+ if line and line[0] in ('kconf', 'patch'):
+ cfg = os.path.join(os.path.dirname(local_files[key]), line[-1])
+ if not cfg in local_files.values():
+ local_files[line[-1]] = cfg
+ shutil.copy2(cfg, workdir)
+ sccfile.close()
+
+ # Ignore local files with subdir={BP}
+ srcabspath = os.path.abspath(srcsubdir)
+ local_files = [fname for fname in local_files if os.path.exists(os.path.join(workdir, fname)) and (srcabspath == workdir or not os.path.join(workdir, fname).startswith(srcabspath + os.sep))]
+ if local_files:
+ for fname in local_files:
+ _move_file(os.path.join(workdir, fname), os.path.join(srctree, 'oe-local-files', fname))
+ with open(os.path.join(srctree, 'oe-local-files', '.gitignore'), 'w') as f:
+ f.write('# Ignore local files, by default. Remove this file ''if you want to commit the directory to Git\n*\n')
+
+ symlink_oelocal_files_srctree(rd,srctree)
+
+ task = 'do_configure'
+ res = tinfoil.build_targets(pn, task, handle_events=True)
+
+ # Copy .config to workspace
+ kconfpath = rd.getVar('B')
+ logger.info('Copying kernel config to workspace')
+ shutil.copy2(os.path.join(kconfpath, '.config'),srctree)
+
+ # Set this to true, we still need to get initial_rev
+ # by parsing the git repo
+ args.no_extract = True
+
if not args.no_extract:
initial_rev, _ = _extract_source(srctree, args.keep_temp, args.branch, False, config, basepath, workspace, args.fixed_setup, rd, tinfoil, no_overrides=args.no_overrides)
if not initial_rev:
@@ -854,6 +934,13 @@ def modify(args, config, basepath, workspace):
' cp ${B}/.config ${S}/.config.baseline\n'
' ln -sfT ${B}/.config ${S}/.config.new\n'
'}\n')
+ if rd.getVarFlag('do_menuconfig','task'):
+ f.write('\ndo_configure_append() {\n'
+ ' if [ ! ${DEVTOOL_DISABLE_MENUCONFIG} ]; then\n'
+ ' cp ${B}/.config ${S}/.config.baseline\n'
+ ' ln -sfT ${B}/.config ${S}/.config.new\n'
+ ' fi\n'
+ '}\n')
if initial_rev:
f.write('\n# initial_rev: %s\n' % initial_rev)
for commit in commits:
@@ -1328,6 +1415,20 @@ def _export_local_files(srctree, rd, destdir, srctreebase):
if os.path.exists(os.path.join(local_files_dir, fragment_fn)):
os.unlink(os.path.join(local_files_dir, fragment_fn))
+ # Special handling for cml1, ccmake, etc bbclasses that generated
+ # configuration fragment files that are consumed as source files
+ for frag_class, frag_name in [("cml1", "fragment.cfg"), ("ccmake", "site-file.cmake")]:
+ if bb.data.inherits_class(frag_class, rd):
+ srcpath = os.path.join(rd.getVar('WORKDIR'), frag_name)
+ if os.path.exists(srcpath):
+ if frag_name not in new_set:
+ new_set.append(frag_name)
+ # copy fragment into destdir
+ shutil.copy2(srcpath, destdir)
+ # copy fragment into local files if exists
+ if os.path.isdir(local_files_dir):
+ shutil.copy2(srcpath, local_files_dir)
+
if new_set is not None:
for fname in new_set:
if fname in existing_files:
@@ -1516,17 +1617,17 @@ def _update_recipe_patch(recipename, workspace, srctree, rd, appendlayerdir, wil
patches_dir, changed_revs)
logger.debug('Pre-filtering: update: %s, new: %s' % (dict(upd_p), dict(new_p)))
if filter_patches:
- new_p = {}
- upd_p = {k:v for k,v in upd_p.items() if k in filter_patches}
+ new_p = OrderedDict()
+ upd_p = OrderedDict((k,v) for k,v in upd_p.items() if k in filter_patches)
remove_files = [f for f in remove_files if f in filter_patches]
updatefiles = False
updaterecipe = False
destpath = None
srcuri = (rd.getVar('SRC_URI', False) or '').split()
if appendlayerdir:
- files = dict((os.path.join(local_files_dir, key), val) for
+ files = OrderedDict((os.path.join(local_files_dir, key), val) for
key, val in list(upd_f.items()) + list(new_f.items()))
- files.update(dict((os.path.join(patches_dir, key), val) for
+ files.update(OrderedDict((os.path.join(patches_dir, key), val) for
key, val in list(upd_p.items()) + list(new_p.items())))
if files or remove_files:
removevalues = None
@@ -1749,7 +1850,7 @@ def status(args, config, basepath, workspace):
return 0
-def _reset(recipes, no_clean, config, basepath, workspace):
+def _reset(recipes, no_clean, remove_work, config, basepath, workspace):
"""Reset one or more recipes"""
import oe.path
@@ -1827,10 +1928,15 @@ def _reset(recipes, no_clean, config, basepath, workspace):
srctreebase = workspace[pn]['srctreebase']
if os.path.isdir(srctreebase):
if os.listdir(srctreebase):
- # We don't want to risk wiping out any work in progress
- logger.info('Leaving source tree %s as-is; if you no '
- 'longer need it then please delete it manually'
- % srctreebase)
+ if remove_work:
+ logger.info('-r argument used on %s, removing source tree.'
+ ' You will lose any unsaved work' %pn)
+ shutil.rmtree(srctreebase)
+ else:
+ # We don't want to risk wiping out any work in progress
+ logger.info('Leaving source tree %s as-is; if you no '
+ 'longer need it then please delete it manually'
+ % srctreebase)
else:
# This is unlikely, but if it's empty we can just remove it
os.rmdir(srctreebase)
@@ -1840,6 +1946,10 @@ def _reset(recipes, no_clean, config, basepath, workspace):
def reset(args, config, basepath, workspace):
"""Entry point for the devtool 'reset' subcommand"""
import bb
+ import shutil
+
+ recipes = ""
+
if args.recipename:
if args.all:
raise DevtoolError("Recipe cannot be specified if -a/--all is used")
@@ -1854,7 +1964,7 @@ def reset(args, config, basepath, workspace):
else:
recipes = args.recipename
- _reset(recipes, args.no_clean, config, basepath, workspace)
+ _reset(recipes, args.no_clean, args.remove_work, config, basepath, workspace)
return 0
@@ -1862,13 +1972,27 @@ def reset(args, config, basepath, workspace):
def _get_layer(layername, d):
"""Determine the base layer path for the specified layer name/path"""
layerdirs = d.getVar('BBLAYERS').split()
- layers = {os.path.basename(p): p for p in layerdirs}
+ layers = {} # {basename: layer_paths}
+ for p in layerdirs:
+ bn = os.path.basename(p)
+ if bn not in layers:
+ layers[bn] = [p]
+ else:
+ layers[bn].append(p)
# Provide some shortcuts
if layername.lower() in ['oe-core', 'openembedded-core']:
- layerdir = layers.get('meta', None)
+ layername = 'meta'
+ layer_paths = layers.get(layername, None)
+ if not layer_paths:
+ return os.path.abspath(layername)
+ elif len(layer_paths) == 1:
+ return os.path.abspath(layer_paths[0])
else:
- layerdir = layers.get(layername, None)
- return os.path.abspath(layerdir or layername)
+ # multiple layers having the same base name
+ logger.warning("Multiple layers have the same base name '%s', use the first one '%s'." % (layername, layer_paths[0]))
+ logger.warning("Consider using path instead of base name to specify layer:\n\t\t%s" % '\n\t\t'.join(layer_paths))
+ return os.path.abspath(layer_paths[0])
+
def finish(args, config, basepath, workspace):
"""Entry point for the devtool 'finish' subcommand"""
@@ -1891,7 +2015,8 @@ def finish(args, config, basepath, workspace):
else:
raise DevtoolError('Source tree is not clean:\n\n%s\nEnsure you have committed your changes or use -f/--force if you are sure there\'s nothing that needs to be committed' % dirty)
- no_clean = False
+ no_clean = args.no_clean
+ remove_work=args.remove_work
tinfoil = setup_tinfoil(basepath=basepath, tracking=True)
try:
rd = parse_recipe(config, tinfoil, args.recipename, True)
@@ -2043,7 +2168,7 @@ def finish(args, config, basepath, workspace):
if args.dry_run:
logger.info('Resetting recipe (dry-run)')
else:
- _reset([args.recipename], no_clean=no_clean, config=config, basepath=basepath, workspace=workspace)
+ _reset([args.recipename], no_clean=no_clean, remove_work=remove_work, config=config, basepath=basepath, workspace=workspace)
return 0
@@ -2070,7 +2195,7 @@ def register_commands(subparsers, context):
group.add_argument('--same-dir', '-s', help='Build in same directory as source', action="store_true")
group.add_argument('--no-same-dir', help='Force build in a separate build directory', action="store_true")
parser_add.add_argument('--fetch', '-f', help='Fetch the specified URI and extract it to create the source tree (deprecated - pass as positional argument instead)', metavar='URI')
- parser_add.add_argument('--fetch-dev', help='For npm, also fetch devDependencies', action="store_true")
+ parser_add.add_argument('--npm-dev', help='For npm, also fetch devDependencies', action="store_true")
parser_add.add_argument('--version', '-V', help='Version to use within recipe (PV)')
parser_add.add_argument('--no-git', '-g', help='If fetching source, do not set up source tree as a git repository', action="store_true")
group = parser_add.add_mutually_exclusive_group()
@@ -2155,6 +2280,7 @@ def register_commands(subparsers, context):
parser_reset.add_argument('recipename', nargs='*', help='Recipe to reset')
parser_reset.add_argument('--all', '-a', action="store_true", help='Reset all recipes (clear workspace)')
parser_reset.add_argument('--no-clean', '-n', action="store_true", help='Don\'t clean the sysroot to remove recipe output')
+ parser_reset.add_argument('--remove-work', '-r', action="store_true", help='Clean the sources directory along with append')
parser_reset.set_defaults(func=reset)
parser_finish = subparsers.add_parser('finish', help='Finish working on a recipe in your workspace',
@@ -2165,6 +2291,8 @@ def register_commands(subparsers, context):
parser_finish.add_argument('--mode', '-m', choices=['patch', 'srcrev', 'auto'], default='auto', help='Update mode (where %(metavar)s is %(choices)s; default is %(default)s)', metavar='MODE')
parser_finish.add_argument('--initial-rev', help='Override starting revision for patches')
parser_finish.add_argument('--force', '-f', action="store_true", help='Force continuing even if there are uncommitted changes in the source tree repository')
+ parser_finish.add_argument('--remove-work', '-r', action="store_true", help='Clean the sources directory under workspace')
+ parser_finish.add_argument('--no-clean', '-n', action="store_true", help='Don\'t clean the sysroot to remove recipe output')
parser_finish.add_argument('--no-overrides', '-O', action="store_true", help='Do not handle other override branches (if they exist)')
parser_finish.add_argument('--dry-run', '-N', action="store_true", help='Dry-run (just report changes instead of writing them)')
parser_finish.add_argument('--force-patch-refresh', action="store_true", help='Update patches in the layer even if they have not been modified (useful for refreshing patch context)')
diff --git a/external/poky/scripts/lib/devtool/upgrade.py b/external/poky/scripts/lib/devtool/upgrade.py
index 20200779..327916ad 100644
--- a/external/poky/scripts/lib/devtool/upgrade.py
+++ b/external/poky/scripts/lib/devtool/upgrade.py
@@ -2,18 +2,7 @@
#
# Copyright (C) 2014-2017 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
"""Devtool upgrade plugin"""
@@ -43,7 +32,7 @@ def _run(cmd, cwd=''):
def _get_srctree(tmpdir):
srctree = tmpdir
- dirs = os.listdir(tmpdir)
+ dirs = scriptutils.filter_src_subdirs(tmpdir)
if len(dirs) == 1:
srctree = os.path.join(tmpdir, dirs[0])
return srctree
@@ -133,18 +122,22 @@ def _cleanup_on_error(rf, srctree):
rfp = os.path.split(rf)[0] # recipe folder
rfpp = os.path.split(rfp)[0] # recipes folder
if os.path.exists(rfp):
- shutil.rmtree(b)
+ shutil.rmtree(rfp)
if not len(os.listdir(rfpp)):
os.rmdir(rfpp)
srctree = os.path.abspath(srctree)
if os.path.exists(srctree):
shutil.rmtree(srctree)
-def _upgrade_error(e, rf, srctree):
- if rf:
- cleanup_on_error(rf, srctree)
+def _upgrade_error(e, rf, srctree, keep_failure=False, extramsg=None):
+ if rf and not keep_failure:
+ _cleanup_on_error(rf, srctree)
logger.error(e)
- raise DevtoolError(e)
+ if extramsg:
+ logger.error(extramsg)
+ if keep_failure:
+ logger.info('Preserving failed upgrade files (--keep-failure)')
+ sys.exit(1)
def _get_uri(rd):
srcuris = rd.getVar('SRC_URI').split()
@@ -288,6 +281,8 @@ def _extract_new_source(newpv, srctree, no_patch, srcrev, srcbranch, branch, kee
logger.info('Preserving temporary directory %s' % tmpsrctree)
else:
shutil.rmtree(tmpsrctree)
+ if tmpdir != tmpsrctree:
+ shutil.rmtree(tmpdir)
return (rev, md5, sha256, srcbranch, srcsubdir_rel)
@@ -310,7 +305,7 @@ def _add_license_diff_to_recipe(path, diff):
f.write("\n#\n\n".encode())
f.write(orig_content)
-def _create_new_recipe(newpv, md5, sha256, srcrev, srcbranch, srcsubdir_old, srcsubdir_new, workspace, tinfoil, rd, license_diff, new_licenses):
+def _create_new_recipe(newpv, md5, sha256, srcrev, srcbranch, srcsubdir_old, srcsubdir_new, workspace, tinfoil, rd, license_diff, new_licenses, srctree, keep_failure):
"""Creates the new recipe under workspace"""
bpn = rd.getVar('BPN')
@@ -427,7 +422,10 @@ def _create_new_recipe(newpv, md5, sha256, srcrev, srcbranch, srcsubdir_old, src
newvalues["LIC_FILES_CHKSUM"] = newlicchksum
_add_license_diff_to_recipe(fullpath, license_diff)
- rd = tinfoil.parse_recipe_file(fullpath, False)
+ try:
+ rd = tinfoil.parse_recipe_file(fullpath, False)
+ except bb.tinfoil.TinfoilCommandFailed as e:
+ _upgrade_error(e, fullpath, srctree, keep_failure, 'Parsing of upgraded recipe failed')
oe.recipeutils.patch_recipe(rd, fullpath, newvalues)
return fullpath, copied
@@ -552,18 +550,18 @@ def upgrade(args, config, basepath, workspace):
try:
logger.info('Extracting current version source...')
rev1, srcsubdir1 = standard._extract_source(srctree, False, 'devtool-orig', False, config, basepath, workspace, args.fixed_setup, rd, tinfoil, no_overrides=args.no_overrides)
- old_licenses = _extract_licenses(srctree, rd.getVar('LIC_FILES_CHKSUM'))
+ old_licenses = _extract_licenses(srctree, (rd.getVar('LIC_FILES_CHKSUM') or ""))
logger.info('Extracting upgraded version source...')
rev2, md5, sha256, srcbranch, srcsubdir2 = _extract_new_source(args.version, srctree, args.no_patch,
args.srcrev, args.srcbranch, args.branch, args.keep_temp,
tinfoil, rd)
- new_licenses = _extract_licenses(srctree, rd.getVar('LIC_FILES_CHKSUM'))
+ new_licenses = _extract_licenses(srctree, (rd.getVar('LIC_FILES_CHKSUM') or ""))
license_diff = _generate_license_diff(old_licenses, new_licenses)
- rf, copied = _create_new_recipe(args.version, md5, sha256, args.srcrev, srcbranch, srcsubdir1, srcsubdir2, config.workspace_path, tinfoil, rd, license_diff, new_licenses)
+ rf, copied = _create_new_recipe(args.version, md5, sha256, args.srcrev, srcbranch, srcsubdir1, srcsubdir2, config.workspace_path, tinfoil, rd, license_diff, new_licenses, srctree, args.keep_failure)
except bb.process.CmdError as e:
- _upgrade_error(e, rf, srctree)
+ _upgrade_error(e, rf, srctree, args.keep_failure)
except DevtoolError as e:
- _upgrade_error(e, rf, srctree)
+ _upgrade_error(e, rf, srctree, args.keep_failure)
standard._add_md5(config, pn, os.path.dirname(rf))
af = _write_append(rf, srctree, args.same_dir, args.no_same_dir, rev2,
@@ -600,6 +598,20 @@ def latest_version(args, config, basepath, workspace):
tinfoil.shutdown()
return 0
+def check_upgrade_status(args, config, basepath, workspace):
+ if not args.recipe:
+ logger.info("Checking the upstream status for all recipes may take a few minutes")
+ results = oe.recipeutils.get_recipe_upgrade_status(args.recipe)
+ for result in results:
+ # pn, update_status, current, latest, maintainer, latest_commit, no_update_reason
+ if args.all or result[1] != 'MATCH':
+ logger.info("{:25} {:15} {:15} {} {} {}".format( result[0],
+ result[2],
+ result[1] if result[1] != 'UPDATE' else (result[3] if not result[3].endswith("new-commits-available") else "new commits"),
+ result[4],
+ result[5] if result[5] != 'N/A' else "",
+ "cannot be updated due to: %s" %(result[6]) if result[6] else ""))
+
def register_commands(subparsers, context):
"""Register devtool subcommands from this plugin"""
@@ -620,6 +632,7 @@ def register_commands(subparsers, context):
group.add_argument('--same-dir', '-s', help='Build in same directory as source', action="store_true")
group.add_argument('--no-same-dir', help='Force build in a separate build directory', action="store_true")
parser_upgrade.add_argument('--keep-temp', action="store_true", help='Keep temporary directory (for debugging)')
+ parser_upgrade.add_argument('--keep-failure', action="store_true", help='Keep failed upgrade recipe and associated files (for debugging)')
parser_upgrade.set_defaults(func=upgrade, fixed_setup=context.fixed_setup)
parser_latest_version = subparsers.add_parser('latest-version', help='Report the latest version of an existing recipe',
@@ -627,3 +640,10 @@ def register_commands(subparsers, context):
group='info')
parser_latest_version.add_argument('recipename', help='Name of recipe to query (just name - no version, path or extension)')
parser_latest_version.set_defaults(func=latest_version)
+
+ parser_check_upgrade_status = subparsers.add_parser('check-upgrade-status', help="Report upgradability for multiple (or all) recipes",
+ description="Prints a table of recipes together with versions currently provided by recipes, and latest upstream versions, when there is a later version available",
+ group='info')
+ parser_check_upgrade_status.add_argument('recipe', help='Name of the recipe to report (omit to report upgrade info for all recipes)', nargs='*')
+ parser_check_upgrade_status.add_argument('--all', '-a', help='Show all recipes, not just recipes needing upgrade', action="store_true")
+ parser_check_upgrade_status.set_defaults(func=check_upgrade_status)
diff --git a/external/poky/scripts/lib/devtool/utilcmds.py b/external/poky/scripts/lib/devtool/utilcmds.py
index 7cd139fb..96481776 100644
--- a/external/poky/scripts/lib/devtool/utilcmds.py
+++ b/external/poky/scripts/lib/devtool/utilcmds.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2015-2016 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool utility plugins"""
diff --git a/external/poky/scripts/lib/recipetool/append.py b/external/poky/scripts/lib/recipetool/append.py
index 3f2c134a..e9d52bb6 100644
--- a/external/poky/scripts/lib/recipetool/append.py
+++ b/external/poky/scripts/lib/recipetool/append.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os
diff --git a/external/poky/scripts/lib/recipetool/create.py b/external/poky/scripts/lib/recipetool/create.py
index dbd74a1c..566c7536 100644
--- a/external/poky/scripts/lib/recipetool/create.py
+++ b/external/poky/scripts/lib/recipetool/create.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2014-2017 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os
@@ -70,11 +60,13 @@ class RecipeHandler(object):
if RecipeHandler.recipelibmap:
return
# First build up library->package mapping
- shlib_providers = oe.package.read_shlib_providers(d)
+ d2 = bb.data.createCopy(d)
+ d2.setVar("WORKDIR_PKGDATA", "${PKGDATA_DIR}")
+ shlib_providers = oe.package.read_shlib_providers(d2)
libdir = d.getVar('libdir')
base_libdir = d.getVar('base_libdir')
libpaths = list(set([base_libdir, libdir]))
- libname_re = re.compile('^lib(.+)\.so.*$')
+ libname_re = re.compile(r'^lib(.+)\.so.*$')
pkglibmap = {}
for lib, item in shlib_providers.items():
for path, pkg in item.items():
@@ -436,7 +428,7 @@ def create_recipe(args):
if scriptutils.is_src_url(source):
# Warn about github archive URLs
- if re.match('https?://github.com/[^/]+/[^/]+/archive/.+(\.tar\..*|\.zip)$', source):
+ if re.match(r'https?://github.com/[^/]+/[^/]+/archive/.+(\.tar\..*|\.zip)$', source):
logger.warning('github archive files are not guaranteed to be stable and may be re-generated over time. If the latter occurs, the checksums will likely change and the recipe will fail at do_fetch. It is recommended that you point to an actual commit or tag in the repository instead (using the repository URL in conjunction with the -S/--srcrev option).')
# Fetch a URL
fetchuri = reformat_git_uri(urldefrag(source)[0])
@@ -468,6 +460,7 @@ def create_recipe(args):
logger.error('branch= parameter and -B/--srcbranch option cannot both be specified - use one or the other')
sys.exit(1)
srcbranch = args.srcbranch
+ params['branch'] = srcbranch
nobranch = params.get('nobranch')
if nobranch and srcbranch:
logger.error('nobranch= cannot be used if you specify a branch')
@@ -485,8 +478,6 @@ def create_recipe(args):
storeTagName = params['tag']
params['nobranch'] = '1'
del params['tag']
- if scheme == 'npm':
- params['noverify'] = '1'
fetchuri = bb.fetch2.encodeurl((scheme, network, path, user, passwd, params))
tmpparent = tinfoil.config_data.getVar('BASE_WORKDIR')
@@ -503,9 +494,7 @@ def create_recipe(args):
if ftmpdir and args.keep_temp:
logger.info('Fetch temp directory is %s' % ftmpdir)
- dirlist = os.listdir(srctree)
- filterout = ['git.indirectionsymlink']
- dirlist = [x for x in dirlist if x not in filterout]
+ dirlist = scriptutils.filter_src_subdirs(srctree)
logger.debug('Directory listing (excluding filtered out):\n %s' % '\n '.join(dirlist))
if len(dirlist) == 1:
singleitem = os.path.join(srctree, dirlist[0])
@@ -724,10 +713,8 @@ def create_recipe(args):
lines_after.append('INSANE_SKIP_${PN} += "already-stripped"')
lines_after.append('')
- if args.fetch_dev:
- extravalues['fetchdev'] = True
- else:
- extravalues['fetchdev'] = None
+ if args.npm_dev:
+ extravalues['NPM_INSTALL_DEV'] = 1
# Find all plugins that want to register handlers
logger.debug('Loading recipe handlers')
@@ -843,7 +830,7 @@ def create_recipe(args):
elif line.startswith('PV = '):
if realpv:
# Replace the first part of the PV value
- line = re.sub('"[^+]*\+', '"%s+' % realpv, line)
+ line = re.sub(r'"[^+]*\+', '"%s+' % realpv, line)
lines_before.append(line)
if args.also_native:
@@ -1063,6 +1050,7 @@ def get_license_md5sums(d, static_only=False):
md5sums['3b83ef96387f14655fc854ddc3c6bd57'] = 'Apache-2.0'
md5sums['385c55653886acac3821999a3ccd17b3'] = 'Artistic-1.0 | GPL-2.0' # some perl modules
md5sums['54c7042be62e169199200bc6477f04d1'] = 'BSD-3-Clause'
+ md5sums['bfe1f75d606912a4111c90743d6c7325'] = 'MPL-1.1'
return md5sums
def crunch_license(licfile):
@@ -1078,8 +1066,8 @@ def crunch_license(licfile):
import oe.utils
# Note: these are carefully constructed!
- license_title_re = re.compile('^\(?(#+ *)?(The )?.{1,10} [Ll]icen[sc]e( \(.{1,10}\))?\)?:?$')
- license_statement_re = re.compile('^(This (project|software) is( free software)? (released|licen[sc]ed)|(Released|Licen[cs]ed)) under the .{1,10} [Ll]icen[sc]e:?$')
+ license_title_re = re.compile(r'^\(?(#+ *)?(The )?.{1,10} [Ll]icen[sc]e( \(.{1,10}\))?\)?:?$')
+ license_statement_re = re.compile(r'^(This (project|software) is( free software)? (released|licen[sc]ed)|(Released|Licen[cs]ed)) under the .{1,10} [Ll]icen[sc]e:?$')
copyright_re = re.compile('^(#+)? *Copyright .*$')
crunched_md5sums = {}
@@ -1322,7 +1310,7 @@ def register_commands(subparsers):
group.add_argument('-S', '--srcrev', help='Source revision to fetch if fetching from an SCM such as git (default latest)')
parser_create.add_argument('-B', '--srcbranch', help='Branch in source repository if fetching from an SCM such as git (default master)')
parser_create.add_argument('--keep-temp', action="store_true", help='Keep temporary directory (for debugging)')
- parser_create.add_argument('--fetch-dev', action="store_true", help='For npm, also fetch devDependencies')
+ parser_create.add_argument('--npm-dev', action="store_true", help='For npm, also fetch devDependencies')
parser_create.add_argument('--devtool', action="store_true", help=argparse.SUPPRESS)
parser_create.add_argument('--mirrors', action="store_true", help='Enable PREMIRRORS and MIRRORS for source tree fetching (disabled by default).')
parser_create.set_defaults(func=create_recipe)
diff --git a/external/poky/scripts/lib/recipetool/create_buildsys.py b/external/poky/scripts/lib/recipetool/create_buildsys.py
index 4743c740..35a97c93 100644
--- a/external/poky/scripts/lib/recipetool/create_buildsys.py
+++ b/external/poky/scripts/lib/recipetool/create_buildsys.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2014-2016 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import re
import logging
@@ -236,9 +226,9 @@ class CmakeRecipeHandler(RecipeHandler):
elif pkg == 'PkgConfig':
inherits.append('pkgconfig')
elif pkg == 'PythonInterp':
- inherits.append('pythonnative')
+ inherits.append('python3native')
elif pkg == 'PythonLibs':
- inherits.append('python-dir')
+ inherits.append('python3-dir')
else:
# Try to map via looking at installed CMake packages in pkgdata
dep = find_cmake_package(pkg)
@@ -427,7 +417,7 @@ class AutotoolsRecipeHandler(RecipeHandler):
}
progclassmap = {'gconftool-2': 'gconf',
'pkg-config': 'pkgconfig',
- 'python': 'pythonnative',
+ 'python': 'python3native',
'python3': 'python3native',
'perl': 'perlnative',
'makeinfo': 'texinfo',
@@ -576,16 +566,7 @@ class AutotoolsRecipeHandler(RecipeHandler):
elif keyword == 'AX_PROG_XSLTPROC':
deps.append('libxslt-native')
elif keyword in ['AC_PYTHON_DEVEL', 'AX_PYTHON_DEVEL', 'AM_PATH_PYTHON']:
- pythonclass = 'pythonnative'
- res = version_re.search(value)
- if res:
- if res.group(1).startswith('3'):
- pythonclass = 'python3native'
- # Avoid replacing python3native with pythonnative
- if not pythonclass in inherits and not 'python3native' in inherits:
- if 'pythonnative' in inherits:
- inherits.remove('pythonnative')
- inherits.append(pythonclass)
+ pythonclass = 'python3native'
elif keyword == 'AX_WITH_CURSES':
deps.append('ncurses')
elif keyword == 'AX_PATH_BDB':
diff --git a/external/poky/scripts/lib/recipetool/create_buildsys_python.py b/external/poky/scripts/lib/recipetool/create_buildsys_python.py
index 5bd2aa33..adfa3779 100644
--- a/external/poky/scripts/lib/recipetool/create_buildsys_python.py
+++ b/external/poky/scripts/lib/recipetool/create_buildsys_python.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2015 Mentor Graphics Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import ast
import codecs
@@ -41,11 +31,11 @@ def tinfoil_init(instance):
class PythonRecipeHandler(RecipeHandler):
- base_pkgdeps = ['python-core']
- excluded_pkgdeps = ['python-dbg']
- # os.path is provided by python-core
+ base_pkgdeps = ['python3-core']
+ excluded_pkgdeps = ['python3-dbg']
+ # os.path is provided by python3-core
assume_provided = ['builtins', 'os.path']
- # Assumes that the host python builtin_module_names is sane for target too
+ # Assumes that the host python3 builtin_module_names is sane for target too
assume_provided = assume_provided + list(sys.builtin_module_names)
bbvar_map = {
@@ -164,8 +154,13 @@ class PythonRecipeHandler(RecipeHandler):
if 'buildsystem' in handled:
return False
- if not RecipeHandler.checkfiles(srctree, ['setup.py']):
- return
+ # Check for non-zero size setup.py files
+ setupfiles = RecipeHandler.checkfiles(srctree, ['setup.py'])
+ for fn in setupfiles:
+ if os.path.getsize(fn):
+ break
+ else:
+ return False
# setup.py is always parsed to get at certain required information, such as
# distutils vs setuptools
@@ -225,9 +220,9 @@ class PythonRecipeHandler(RecipeHandler):
self.apply_info_replacements(info)
if uses_setuptools:
- classes.append('setuptools')
+ classes.append('setuptools3')
else:
- classes.append('distutils')
+ classes.append('distutils3')
if license_str:
for i, line in enumerate(lines_before):
@@ -292,7 +287,7 @@ class PythonRecipeHandler(RecipeHandler):
for feature, feature_reqs in extras_req.items():
unmapped_deps.difference_update(feature_reqs)
- feature_req_deps = ('python-' + r.replace('.', '-').lower() for r in sorted(feature_reqs))
+ feature_req_deps = ('python3-' + r.replace('.', '-').lower() for r in sorted(feature_reqs))
lines_after.append('PACKAGECONFIG[{}] = ",,,{}"'.format(feature.lower(), ' '.join(feature_req_deps)))
inst_reqs = set()
@@ -303,7 +298,7 @@ class PythonRecipeHandler(RecipeHandler):
if inst_reqs:
unmapped_deps.difference_update(inst_reqs)
- inst_req_deps = ('python-' + r.replace('.', '-').lower() for r in sorted(inst_reqs))
+ inst_req_deps = ('python3-' + r.replace('.', '-').lower() for r in sorted(inst_reqs))
lines_after.append('# WARNING: the following rdepends are from setuptools install_requires. These')
lines_after.append('# upstream names may not correspond exactly to bitbake package names.')
lines_after.append('RDEPENDS_${{PN}} += "{}"'.format(' '.join(inst_req_deps)))
@@ -366,7 +361,7 @@ class PythonRecipeHandler(RecipeHandler):
return info, 'setuptools' in imported_modules, non_literals, extensions
def get_setup_args_info(self, setupscript='./setup.py'):
- cmd = ['python', setupscript]
+ cmd = ['python3', setupscript]
info = {}
keys = set(self.bbvar_map.keys())
keys |= set(self.setuparg_list_fields)
@@ -400,7 +395,7 @@ class PythonRecipeHandler(RecipeHandler):
def get_setup_byline(self, fields, setupscript='./setup.py'):
info = {}
- cmd = ['python', setupscript]
+ cmd = ['python3', setupscript]
cmd.extend('--' + self.setuparg_map.get(f, f.lower()) for f in fields)
try:
info_lines = self.run_command(cmd, cwd=os.path.dirname(setupscript)).splitlines()
@@ -537,7 +532,7 @@ class PythonRecipeHandler(RecipeHandler):
pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR')
ldata = tinfoil.config_data.createCopy()
- bb.parse.handle('classes/python-dir.bbclass', ldata, True)
+ bb.parse.handle('classes/python3-dir.bbclass', ldata, True)
python_sitedir = ldata.getVar('PYTHON_SITEPACKAGES_DIR')
dynload_dir = os.path.join(os.path.dirname(python_sitedir), 'lib-dynload')
diff --git a/external/poky/scripts/lib/recipetool/create_kernel.py b/external/poky/scripts/lib/recipetool/create_kernel.py
index ca4996c7..5740589a 100644
--- a/external/poky/scripts/lib/recipetool/create_kernel.py
+++ b/external/poky/scripts/lib/recipetool/create_kernel.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2016 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import re
import logging
diff --git a/external/poky/scripts/lib/recipetool/create_kmod.py b/external/poky/scripts/lib/recipetool/create_kmod.py
index 3982537a..85b5c48e 100644
--- a/external/poky/scripts/lib/recipetool/create_kmod.py
+++ b/external/poky/scripts/lib/recipetool/create_kmod.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2016 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import re
import logging
diff --git a/external/poky/scripts/lib/recipetool/create_npm.py b/external/poky/scripts/lib/recipetool/create_npm.py
index 0b09ed0b..579b7ae4 100644
--- a/external/poky/scripts/lib/recipetool/create_npm.py
+++ b/external/poky/scripts/lib/recipetool/create_npm.py
@@ -1,331 +1,255 @@
-# Recipe creation tool - node.js NPM module support plugin
-#
# Copyright (C) 2016 Intel Corporation
+# Copyright (C) 2020 Savoir-Faire Linux
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+"""Recipe creation tool - npm module support plugin"""
+import json
import os
+import re
import sys
-import logging
-import subprocess
import tempfile
-import shutil
-import json
-from recipetool.create import RecipeHandler, split_pkg_licenses, handle_license_vars
+import bb
+from bb.fetch2.npm import NpmEnvironment
+from bb.fetch2.npmsw import foreach_dependencies
+from recipetool.create import RecipeHandler
+from recipetool.create import guess_license
+from recipetool.create import split_pkg_licenses
-logger = logging.getLogger('recipetool')
+TINFOIL = None
+def tinfoil_init(instance):
+ """Initialize tinfoil"""
+ global TINFOIL
+ TINFOIL = instance
-tinfoil = None
+class NpmRecipeHandler(RecipeHandler):
+ """Class to handle the npm recipe creation"""
+
+ @staticmethod
+ def _npm_name(name):
+ """Generate a Yocto friendly npm name"""
+ name = re.sub("/", "-", name)
+ name = name.lower()
+ name = re.sub(r"[^\-a-z0-9]", "", name)
+ name = name.strip("-")
+ return name
+
+ @staticmethod
+ def _get_registry(lines):
+ """Get the registry value from the 'npm://registry' url"""
+ registry = None
+
+ def _handle_registry(varname, origvalue, op, newlines):
+ nonlocal registry
+ if origvalue.startswith("npm://"):
+ registry = re.sub(r"^npm://", "http://", origvalue.split(";")[0])
+ return origvalue, None, 0, True
-def tinfoil_init(instance):
- global tinfoil
- tinfoil = instance
+ bb.utils.edit_metadata(lines, ["SRC_URI"], _handle_registry)
+ return registry
-class NpmRecipeHandler(RecipeHandler):
- lockdownpath = None
+ @staticmethod
+ def _ensure_npm():
+ """Check if the 'npm' command is available in the recipes"""
+ if not TINFOIL.recipes_parsed:
+ TINFOIL.parse_recipes()
- def _ensure_npm(self, fixed_setup=False):
- if not tinfoil.recipes_parsed:
- tinfoil.parse_recipes()
try:
- rd = tinfoil.parse_recipe('nodejs-native')
+ d = TINFOIL.parse_recipe("nodejs-native")
except bb.providers.NoProvider:
- if fixed_setup:
- msg = 'nodejs-native is required for npm but is not available within this SDK'
- else:
- msg = 'nodejs-native is required for npm but is not available - you will likely need to add a layer that provides nodejs'
- logger.error(msg)
- return None
- bindir = rd.getVar('STAGING_BINDIR_NATIVE')
- npmpath = os.path.join(bindir, 'npm')
+ bb.error("Nothing provides 'nodejs-native' which is required for the build")
+ bb.note("You will likely need to add a layer that provides nodejs")
+ sys.exit(14)
+
+ bindir = d.getVar("STAGING_BINDIR_NATIVE")
+ npmpath = os.path.join(bindir, "npm")
+
if not os.path.exists(npmpath):
- tinfoil.build_targets('nodejs-native', 'addto_recipe_sysroot')
+ TINFOIL.build_targets("nodejs-native", "addto_recipe_sysroot")
+
if not os.path.exists(npmpath):
- logger.error('npm required to process specified source, but nodejs-native did not seem to populate it')
- return None
+ bb.error("Failed to add 'npm' to sysroot")
+ sys.exit(14)
+
return bindir
- def _handle_license(self, data):
- '''
- Handle the license value from an npm package.json file
- '''
- license = None
- if 'license' in data:
- license = data['license']
- if isinstance(license, dict):
- license = license.get('type', None)
- if license:
- if 'OR' in license:
- license = license.replace('OR', '|')
- license = license.replace('AND', '&')
- license = license.replace(' ', '_')
- if not license[0] == '(':
- license = '(' + license + ')'
- else:
- license = license.replace('AND', '&')
- if license[0] == '(':
- license = license[1:]
- if license[-1] == ')':
- license = license[:-1]
- license = license.replace('MIT/X11', 'MIT')
- license = license.replace('Public Domain', 'PD')
- license = license.replace('SEE LICENSE IN EULA',
- 'SEE-LICENSE-IN-EULA')
- return license
-
- def _shrinkwrap(self, srctree, localfilesdir, extravalues, lines_before, d):
- try:
- runenv = dict(os.environ, PATH=d.getVar('PATH'))
- bb.process.run('npm shrinkwrap', cwd=srctree, stderr=subprocess.STDOUT, env=runenv, shell=True)
- except bb.process.ExecutionError as e:
- logger.warning('npm shrinkwrap failed:\n%s' % e.stdout)
- return
-
- tmpfile = os.path.join(localfilesdir, 'npm-shrinkwrap.json')
- shutil.move(os.path.join(srctree, 'npm-shrinkwrap.json'), tmpfile)
- extravalues.setdefault('extrafiles', {})
- extravalues['extrafiles']['npm-shrinkwrap.json'] = tmpfile
- lines_before.append('NPM_SHRINKWRAP := "${THISDIR}/${PN}/npm-shrinkwrap.json"')
-
- def _lockdown(self, srctree, localfilesdir, extravalues, lines_before, d):
- runenv = dict(os.environ, PATH=d.getVar('PATH'))
- if not NpmRecipeHandler.lockdownpath:
- NpmRecipeHandler.lockdownpath = tempfile.mkdtemp('recipetool-npm-lockdown')
- bb.process.run('npm install lockdown --prefix %s' % NpmRecipeHandler.lockdownpath,
- cwd=srctree, stderr=subprocess.STDOUT, env=runenv, shell=True)
- relockbin = os.path.join(NpmRecipeHandler.lockdownpath, 'node_modules', 'lockdown', 'relock.js')
- if not os.path.exists(relockbin):
- logger.warning('Could not find relock.js within lockdown directory; skipping lockdown')
- return
- try:
- bb.process.run('node %s' % relockbin, cwd=srctree, stderr=subprocess.STDOUT, env=runenv, shell=True)
- except bb.process.ExecutionError as e:
- logger.warning('lockdown-relock failed:\n%s' % e.stdout)
- return
-
- tmpfile = os.path.join(localfilesdir, 'lockdown.json')
- shutil.move(os.path.join(srctree, 'lockdown.json'), tmpfile)
- extravalues.setdefault('extrafiles', {})
- extravalues['extrafiles']['lockdown.json'] = tmpfile
- lines_before.append('NPM_LOCKDOWN := "${THISDIR}/${PN}/lockdown.json"')
-
- def _handle_dependencies(self, d, deps, optdeps, devdeps, lines_before, srctree):
- import scriptutils
- # If this isn't a single module we need to get the dependencies
- # and add them to SRC_URI
- def varfunc(varname, origvalue, op, newlines):
- if varname == 'SRC_URI':
- if not origvalue.startswith('npm://'):
- src_uri = origvalue.split()
- deplist = {}
- for dep, depver in optdeps.items():
- depdata = self.get_npm_data(dep, depver, d)
- if self.check_npm_optional_dependency(depdata):
- deplist[dep] = depdata
- for dep, depver in devdeps.items():
- depdata = self.get_npm_data(dep, depver, d)
- if self.check_npm_optional_dependency(depdata):
- deplist[dep] = depdata
- for dep, depver in deps.items():
- depdata = self.get_npm_data(dep, depver, d)
- deplist[dep] = depdata
-
- extra_urls = []
- for dep, depdata in deplist.items():
- version = depdata.get('version', None)
- if version:
- url = 'npm://registry.npmjs.org;name=%s;version=%s;subdir=node_modules/%s' % (dep, version, dep)
- extra_urls.append(url)
- if extra_urls:
- scriptutils.fetch_url(tinfoil, ' '.join(extra_urls), None, srctree, logger)
- src_uri.extend(extra_urls)
- return src_uri, None, -1, True
- return origvalue, None, 0, True
- updated, newlines = bb.utils.edit_metadata(lines_before, ['SRC_URI'], varfunc)
- if updated:
- del lines_before[:]
- for line in newlines:
- # Hack to avoid newlines that edit_metadata inserts
- if line.endswith('\n'):
- line = line[:-1]
- lines_before.append(line)
- return updated
+ @staticmethod
+ def _npm_global_configs(dev):
+ """Get the npm global configuration"""
+ configs = []
+
+ if dev:
+ configs.append(("also", "development"))
+ else:
+ configs.append(("only", "production"))
+
+ configs.append(("save", "false"))
+ configs.append(("package-lock", "false"))
+ configs.append(("shrinkwrap", "false"))
+ return configs
+
+ def _run_npm_install(self, d, srctree, registry, dev):
+ """Run the 'npm install' command without building the addons"""
+ configs = self._npm_global_configs(dev)
+ configs.append(("ignore-scripts", "true"))
+
+ if registry:
+ configs.append(("registry", registry))
+
+ bb.utils.remove(os.path.join(srctree, "node_modules"), recurse=True)
+
+ env = NpmEnvironment(d, configs=configs)
+ env.run("npm install", workdir=srctree)
+
+ def _generate_shrinkwrap(self, d, srctree, dev):
+ """Check and generate the 'npm-shrinkwrap.json' file if needed"""
+ configs = self._npm_global_configs(dev)
+
+ env = NpmEnvironment(d, configs=configs)
+ env.run("npm shrinkwrap", workdir=srctree)
+
+ return os.path.join(srctree, "npm-shrinkwrap.json")
+
+ def _handle_licenses(self, srctree, shrinkwrap_file, dev):
+ """Return the extra license files and the list of packages"""
+ licfiles = []
+ packages = {}
+
+ def _licfiles_append(licfile):
+ """Append 'licfile' to the license files list"""
+ licfilepath = os.path.join(srctree, licfile)
+ licmd5 = bb.utils.md5_file(licfilepath)
+ licfiles.append("file://%s;md5=%s" % (licfile, licmd5))
+
+ # Handle the parent package
+ _licfiles_append("package.json")
+ packages["${PN}"] = ""
+
+ # Handle the dependencies
+ def _handle_dependency(name, params, deptree):
+ suffix = "-".join([self._npm_name(dep) for dep in deptree])
+ destdirs = [os.path.join("node_modules", dep) for dep in deptree]
+ destdir = os.path.join(*destdirs)
+ _licfiles_append(os.path.join(destdir, "package.json"))
+ packages["${PN}-" + suffix] = destdir
+
+ with open(shrinkwrap_file, "r") as f:
+ shrinkwrap = json.load(f)
+
+ foreach_dependencies(shrinkwrap, _handle_dependency, dev)
+
+ return licfiles, packages
def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
- import bb.utils
- import oe.package
- from collections import OrderedDict
+ """Handle the npm recipe creation"""
- if 'buildsystem' in handled:
+ if "buildsystem" in handled:
return False
- def read_package_json(fn):
- with open(fn, 'r', errors='surrogateescape') as f:
- return json.loads(f.read())
+ files = RecipeHandler.checkfiles(srctree, ["package.json"])
- files = RecipeHandler.checkfiles(srctree, ['package.json'])
- if files:
- d = bb.data.createCopy(tinfoil.config_data)
- npm_bindir = self._ensure_npm()
- if not npm_bindir:
- sys.exit(14)
- d.prependVar('PATH', '%s:' % npm_bindir)
-
- data = read_package_json(files[0])
- if 'name' in data and 'version' in data:
- extravalues['PN'] = data['name']
- extravalues['PV'] = data['version']
- classes.append('npm')
- handled.append('buildsystem')
- if 'description' in data:
- extravalues['SUMMARY'] = data['description']
- if 'homepage' in data:
- extravalues['HOMEPAGE'] = data['homepage']
-
- fetchdev = extravalues['fetchdev'] or None
- deps, optdeps, devdeps = self.get_npm_package_dependencies(data, fetchdev)
- self._handle_dependencies(d, deps, optdeps, devdeps, lines_before, srctree)
-
- # Shrinkwrap
- localfilesdir = tempfile.mkdtemp(prefix='recipetool-npm')
- self._shrinkwrap(srctree, localfilesdir, extravalues, lines_before, d)
-
- # Lockdown
- self._lockdown(srctree, localfilesdir, extravalues, lines_before, d)
-
- # Split each npm module out to is own package
- npmpackages = oe.package.npm_split_package_dirs(srctree)
- licvalues = None
- for item in handled:
- if isinstance(item, tuple):
- if item[0] == 'license':
- licvalues = item[1]
- break
- if not licvalues:
- licvalues = handle_license_vars(srctree, lines_before, handled, extravalues, d)
- if licvalues:
- # Augment the license list with information we have in the packages
- licenses = {}
- license = self._handle_license(data)
- if license:
- licenses['${PN}'] = license
- for pkgname, pkgitem in npmpackages.items():
- _, pdata = pkgitem
- license = self._handle_license(pdata)
- if license:
- licenses[pkgname] = license
- # Now write out the package-specific license values
- # We need to strip out the json data dicts for this since split_pkg_licenses
- # isn't expecting it
- packages = OrderedDict((x,y[0]) for x,y in npmpackages.items())
- packages['${PN}'] = ''
- pkglicenses = split_pkg_licenses(licvalues, packages, lines_after, licenses)
- all_licenses = list(set([item.replace('_', ' ') for pkglicense in pkglicenses.values() for item in pkglicense]))
- if '&' in all_licenses:
- all_licenses.remove('&')
- extravalues['LICENSE'] = ' & '.join(all_licenses)
-
- # Need to move S setting after inherit npm
- for i, line in enumerate(lines_before):
- if line.startswith('S ='):
- lines_before.pop(i)
- lines_after.insert(0, '# Must be set after inherit npm since that itself sets S')
- lines_after.insert(1, line)
- break
-
- return True
-
- return False
-
- # FIXME this is duplicated from lib/bb/fetch2/npm.py
- def _parse_view(self, output):
- '''
- Parse the output of npm view --json; the last JSON result
- is assumed to be the one that we're interested in.
- '''
- pdata = None
- outdeps = {}
- datalines = []
- bracelevel = 0
- for line in output.splitlines():
- if bracelevel:
- datalines.append(line)
- elif '{' in line:
- datalines = []
- datalines.append(line)
- bracelevel = bracelevel + line.count('{') - line.count('}')
- if datalines:
- pdata = json.loads('\n'.join(datalines))
- return pdata
-
- # FIXME this is effectively duplicated from lib/bb/fetch2/npm.py
- # (split out from _getdependencies())
- def get_npm_data(self, pkg, version, d):
- import bb.fetch2
- pkgfullname = pkg
- if version != '*' and not '/' in version:
- pkgfullname += "@'%s'" % version
- logger.debug(2, "Calling getdeps on %s" % pkg)
- runenv = dict(os.environ, PATH=d.getVar('PATH'))
- fetchcmd = "npm view %s --json" % pkgfullname
- output, _ = bb.process.run(fetchcmd, stderr=subprocess.STDOUT, env=runenv, shell=True)
- data = self._parse_view(output)
- return data
-
- # FIXME this is effectively duplicated from lib/bb/fetch2/npm.py
- # (split out from _getdependencies())
- def get_npm_package_dependencies(self, pdata, fetchdev):
- dependencies = pdata.get('dependencies', {})
- optionalDependencies = pdata.get('optionalDependencies', {})
- dependencies.update(optionalDependencies)
- if fetchdev:
- devDependencies = pdata.get('devDependencies', {})
- dependencies.update(devDependencies)
- else:
- devDependencies = {}
- depsfound = {}
- optdepsfound = {}
- devdepsfound = {}
- for dep in dependencies:
- if dep in optionalDependencies:
- optdepsfound[dep] = dependencies[dep]
- elif dep in devDependencies:
- devdepsfound[dep] = dependencies[dep]
- else:
- depsfound[dep] = dependencies[dep]
- return depsfound, optdepsfound, devdepsfound
-
- # FIXME this is effectively duplicated from lib/bb/fetch2/npm.py
- # (split out from _getdependencies())
- def check_npm_optional_dependency(self, pdata):
- pkg_os = pdata.get('os', None)
- if pkg_os:
- if not isinstance(pkg_os, list):
- pkg_os = [pkg_os]
- blacklist = False
- for item in pkg_os:
- if item.startswith('!'):
- blacklist = True
- break
- if (not blacklist and 'linux' not in pkg_os) or '!linux' in pkg_os:
- pkg = pdata.get('name', 'Unnamed package')
- logger.debug(2, "Skipping %s since it's incompatible with Linux" % pkg)
- return False
- return True
+ if not files:
+ return False
+ with open(files[0], "r") as f:
+ data = json.load(f)
+
+ if "name" not in data or "version" not in data:
+ return False
+
+ extravalues["PN"] = self._npm_name(data["name"])
+ extravalues["PV"] = data["version"]
+
+ if "description" in data:
+ extravalues["SUMMARY"] = data["description"]
+
+ if "homepage" in data:
+ extravalues["HOMEPAGE"] = data["homepage"]
+
+ dev = bb.utils.to_boolean(str(extravalues.get("NPM_INSTALL_DEV", "0")), False)
+ registry = self._get_registry(lines_before)
+
+ bb.note("Checking if npm is available ...")
+ # The native npm is used here (and not the host one) to ensure that the
+ # npm version is high enough to ensure an efficient dependency tree
+ # resolution and avoid issue with the shrinkwrap file format.
+ # Moreover the native npm is mandatory for the build.
+ bindir = self._ensure_npm()
+
+ d = bb.data.createCopy(TINFOIL.config_data)
+ d.prependVar("PATH", bindir + ":")
+ d.setVar("S", srctree)
+
+ bb.note("Generating shrinkwrap file ...")
+ # To generate the shrinkwrap file the dependencies have to be installed
+ # first. During the generation process some files may be updated /
+ # deleted. By default devtool tracks the diffs in the srctree and raises
+ # errors when finishing the recipe if some diffs are found.
+ git_exclude_file = os.path.join(srctree, ".git", "info", "exclude")
+ if os.path.exists(git_exclude_file):
+ with open(git_exclude_file, "r+") as f:
+ lines = f.readlines()
+ for line in ["/node_modules/", "/npm-shrinkwrap.json"]:
+ if line not in lines:
+ f.write(line + "\n")
+
+ lock_file = os.path.join(srctree, "package-lock.json")
+ lock_copy = lock_file + ".copy"
+ if os.path.exists(lock_file):
+ bb.utils.copyfile(lock_file, lock_copy)
+
+ self._run_npm_install(d, srctree, registry, dev)
+ shrinkwrap_file = self._generate_shrinkwrap(d, srctree, dev)
+
+ if os.path.exists(lock_copy):
+ bb.utils.movefile(lock_copy, lock_file)
+
+ # Add the shrinkwrap file as 'extrafiles'
+ shrinkwrap_copy = shrinkwrap_file + ".copy"
+ bb.utils.copyfile(shrinkwrap_file, shrinkwrap_copy)
+ extravalues.setdefault("extrafiles", {})
+ extravalues["extrafiles"]["npm-shrinkwrap.json"] = shrinkwrap_copy
+
+ url_local = "npmsw://%s" % shrinkwrap_file
+ url_recipe= "npmsw://${THISDIR}/${BPN}/npm-shrinkwrap.json"
+
+ if dev:
+ url_local += ";dev=1"
+ url_recipe += ";dev=1"
+
+ # Add the npmsw url in the SRC_URI of the generated recipe
+ def _handle_srcuri(varname, origvalue, op, newlines):
+ """Update the version value and add the 'npmsw://' url"""
+ value = origvalue.replace("version=" + data["version"], "version=${PV}")
+ value = value.replace("version=latest", "version=${PV}")
+ values = [line.strip() for line in value.strip('\n').splitlines()]
+ values.append(url_recipe)
+ return values, None, 4, False
+
+ (_, newlines) = bb.utils.edit_metadata(lines_before, ["SRC_URI"], _handle_srcuri)
+ lines_before[:] = [line.rstrip('\n') for line in newlines]
+
+ # In order to generate correct licence checksums in the recipe the
+ # dependencies have to be fetched again using the npmsw url
+ bb.note("Fetching npm dependencies ...")
+ bb.utils.remove(os.path.join(srctree, "node_modules"), recurse=True)
+ fetcher = bb.fetch2.Fetch([url_local], d)
+ fetcher.download()
+ fetcher.unpack(srctree)
+
+ bb.note("Handling licences ...")
+ (licfiles, packages) = self._handle_licenses(srctree, shrinkwrap_file, dev)
+ extravalues["LIC_FILES_CHKSUM"] = licfiles
+ split_pkg_licenses(guess_license(srctree, d), packages, lines_after, [])
+
+ classes.append("npm")
+ handled.append("buildsystem")
+
+ return True
def register_recipe_handlers(handlers):
+ """Register the npm handler"""
handlers.append((NpmRecipeHandler(), 60))
diff --git a/external/poky/scripts/lib/recipetool/edit.py b/external/poky/scripts/lib/recipetool/edit.py
index c4789a99..d5b980a1 100644
--- a/external/poky/scripts/lib/recipetool/edit.py
+++ b/external/poky/scripts/lib/recipetool/edit.py
@@ -6,18 +6,8 @@
#
# Copyright (C) 2018 Mentor Graphics Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import argparse
import errno
@@ -44,7 +34,7 @@ def edit(args):
recipe_path = tinfoil.get_recipe_file(args.target)
appends = tinfoil.get_file_appends(recipe_path)
- return scriptutils.run_editor([recipe_path] + appends, logger)
+ return scriptutils.run_editor([recipe_path] + list(appends), logger)
def register_commands(subparsers):
diff --git a/external/poky/scripts/lib/recipetool/newappend.py b/external/poky/scripts/lib/recipetool/newappend.py
index 76707b4c..08e2474d 100644
--- a/external/poky/scripts/lib/recipetool/newappend.py
+++ b/external/poky/scripts/lib/recipetool/newappend.py
@@ -7,18 +7,8 @@
#
# Copyright (C) 2015 Christopher Larson <kergoth@gmail.com>
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import argparse
import errno
diff --git a/external/poky/scripts/lib/recipetool/setvar.py b/external/poky/scripts/lib/recipetool/setvar.py
index 9de315a0..f8e2ee75 100644
--- a/external/poky/scripts/lib/recipetool/setvar.py
+++ b/external/poky/scripts/lib/recipetool/setvar.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os
diff --git a/external/poky/scripts/lib/resulttool/log.py b/external/poky/scripts/lib/resulttool/log.py
index 49816357..eb3927ec 100644
--- a/external/poky/scripts/lib/resulttool/log.py
+++ b/external/poky/scripts/lib/resulttool/log.py
@@ -2,27 +2,29 @@
#
# Copyright (c) 2019 Garmin International
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
import os
import resulttool.resultutils as resultutils
def show_ptest(result, ptest, logger):
- if 'ptestresult.sections' in result:
- if ptest in result['ptestresult.sections'] and 'log' in result['ptestresult.sections'][ptest]:
- print(result['ptestresult.sections'][ptest]['log'])
- return 0
+ logdata = resultutils.ptestresult_get_log(result, ptest)
+ if logdata is not None:
+ print(logdata)
+ return 0
- print("ptest '%s' not found" % ptest)
+ print("ptest '%s' log not found" % ptest)
return 1
+def show_reproducible(result, reproducible, logger):
+ try:
+ print(result['reproducible'][reproducible]['diffoscope.text'])
+ return 0
+
+ except KeyError:
+ print("reproducible '%s' not found" % reproducible)
+ return 1
+
def log(args, logger):
results = resultutils.load_resultsdata(args.source)
@@ -33,31 +35,49 @@ def log(args, logger):
for _, run_name, _, r in resultutils.test_run_results(results):
if args.dump_ptest:
- if 'ptestresult.sections' in r:
- for name, ptest in r['ptestresult.sections'].items():
- if 'log' in ptest:
- dest_dir = args.dump_ptest
- if args.prepend_run:
- dest_dir = os.path.join(dest_dir, run_name)
+ for sectname in ['ptestresult.sections', 'ltpposixresult.sections', 'ltpresult.sections']:
+ if sectname in r:
+ for name, ptest in r[sectname].items():
+ logdata = resultutils.generic_get_log(sectname, r, name)
+ if logdata is not None:
+ dest_dir = args.dump_ptest
+ if args.prepend_run:
+ dest_dir = os.path.join(dest_dir, run_name)
+ if not sectname.startswith("ptest"):
+ dest_dir = os.path.join(dest_dir, sectname.split(".")[0])
- os.makedirs(dest_dir, exist_ok=True)
+ os.makedirs(dest_dir, exist_ok=True)
+ dest = os.path.join(dest_dir, '%s.log' % name)
+ print(dest)
+ with open(dest, 'w') as f:
+ f.write(logdata)
- dest = os.path.join(dest_dir, '%s.log' % name)
- print(dest)
- with open(dest, 'w') as f:
- f.write(ptest['log'])
+ if args.raw_ptest:
+ found = False
+ for sectname in ['ptestresult.rawlogs', 'ltpposixresult.rawlogs', 'ltpresult.rawlogs']:
+ rawlog = resultutils.generic_get_rawlogs(sectname, r)
+ if rawlog is not None:
+ print(rawlog)
+ found = True
+ if not found:
+ print('Raw ptest logs not found')
+ return 1
- if args.raw:
- if 'ptestresult.rawlogs' in r:
- print(r['ptestresult.rawlogs']['log'])
+ if args.raw_reproducible:
+ if 'reproducible.rawlogs' in r:
+ print(r['reproducible.rawlogs']['log'])
else:
- print('Raw logs not found')
+ print('Raw reproducible logs not found')
return 1
for ptest in args.ptest:
if not show_ptest(r, ptest, logger):
return 1
+ for reproducible in args.reproducible:
+ if not show_reproducible(r, reproducible, logger):
+ return 1
+
def register_commands(subparsers):
"""Register subcommands from this plugin"""
parser = subparsers.add_parser('log', help='show logs',
@@ -70,9 +90,15 @@ def register_commands(subparsers):
help='show logs for a ptest')
parser.add_argument('--dump-ptest', metavar='DIR',
help='Dump all ptest log files to the specified directory.')
+ parser.add_argument('--reproducible', action='append', default=[],
+ help='show logs for a reproducible test')
parser.add_argument('--prepend-run', action='store_true',
help='''Dump ptest results to a subdirectory named after the test run when using --dump-ptest.
Required if more than one test run is present in the result file''')
parser.add_argument('--raw', action='store_true',
- help='show raw logs')
+ help='show raw (ptest) logs. Deprecated. Alias for "--raw-ptest"', dest='raw_ptest')
+ parser.add_argument('--raw-ptest', action='store_true',
+ help='show raw ptest log')
+ parser.add_argument('--raw-reproducible', action='store_true',
+ help='show raw reproducible build logs')
diff --git a/external/poky/scripts/lib/resulttool/manualexecution.py b/external/poky/scripts/lib/resulttool/manualexecution.py
index dc368f36..ecb27c59 100755
--- a/external/poky/scripts/lib/resulttool/manualexecution.py
+++ b/external/poky/scripts/lib/resulttool/manualexecution.py
@@ -2,15 +2,9 @@
#
# Copyright (c) 2018, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
+
import argparse
import json
import os
@@ -187,11 +181,38 @@ class ManualTestRunner(object):
write_json_file(config_options_file, config_options)
logger.info('Configuration option file created at %s' % config_options_file)
+ def make_testcase_config_file(self, logger, case_file, testcase_config_file):
+ if testcase_config_file:
+ if os.path.exists(testcase_config_file):
+ print('\nTest configuration file with name %s already exists. Please provide a unique file name' % (testcase_config_file))
+ return 0
+
+ if not testcase_config_file:
+ testcase_config_file = os.path.join(self._get_write_dir(), "testconfig_new.json")
+
+ testcase_config = {}
+ cases = load_json_file(case_file)
+ new_test_module = self._get_test_module(case_file)
+ new_testcase_config = {}
+ new_testcase_config['testcases'] = []
+
+ print('\nAdd testcases for this configuration file:')
+ for case in cases:
+ print('\n' + case['test']['@alias'])
+ add_tc_config = self._get_true_false_input('\nDo you want to add this test case to test configuration : (Y)es/(N)o\n')
+ if add_tc_config:
+ new_testcase_config['testcases'].append(case['test']['@alias'])
+ write_json_file(testcase_config_file, new_testcase_config)
+ logger.info('Testcase Configuration file created at %s' % testcase_config_file)
+
def manualexecution(args, logger):
testrunner = ManualTestRunner()
if args.make_config_options_file:
testrunner.make_config_option_file(logger, args.file, args.config_options_file)
return 0
+ if args.make_testcase_config_file:
+ testrunner.make_testcase_config_file(logger, args.file, args.testcase_config_file)
+ return 0
configurations, result_id, write_dir, test_results = testrunner.run_test(args.file, args.config_options_file, args.testcase_config_file)
resultjsonhelper = OETestResultJSONHelper()
resultjsonhelper.dump_testresult_file(write_dir, configurations, result_id, test_results)
@@ -209,4 +230,6 @@ def register_commands(subparsers):
parser_build.add_argument('-m', '--make-config-options-file', action='store_true',
help='make the configuration options file based on provided inputs')
parser_build.add_argument('-t', '--testcase-config-file', default='',
- help='the testcase configuration file to enable user to run a selected set of test case') \ No newline at end of file
+ help='the testcase configuration file to enable user to run a selected set of test case or make a testcase configuration file')
+ parser_build.add_argument('-d', '--make-testcase-config-file', action='store_true',
+ help='make the testcase configuration file to run a set of test cases based on user selection') \ No newline at end of file
diff --git a/external/poky/scripts/lib/resulttool/merge.py b/external/poky/scripts/lib/resulttool/merge.py
index 7159463f..18b4825a 100644
--- a/external/poky/scripts/lib/resulttool/merge.py
+++ b/external/poky/scripts/lib/resulttool/merge.py
@@ -3,30 +3,31 @@
# Copyright (c) 2019, Intel Corporation.
# Copyright (c) 2019, Linux Foundation
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
+
import os
import json
import resulttool.resultutils as resultutils
def merge(args, logger):
+ configvars = {}
+ if not args.not_add_testseries:
+ configvars = resultutils.extra_configvars.copy()
+ if args.executed_by:
+ configvars['EXECUTED_BY'] = args.executed_by
if resultutils.is_url(args.target_results) or os.path.isdir(args.target_results):
- results = resultutils.load_resultsdata(args.target_results, configmap=resultutils.store_map)
- resultutils.append_resultsdata(results, args.base_results, configmap=resultutils.store_map)
+ results = resultutils.load_resultsdata(args.target_results, configmap=resultutils.store_map, configvars=configvars)
+ resultutils.append_resultsdata(results, args.base_results, configmap=resultutils.store_map, configvars=configvars)
resultutils.save_resultsdata(results, args.target_results)
else:
- results = resultutils.load_resultsdata(args.base_results, configmap=resultutils.flatten_map)
+ results = resultutils.load_resultsdata(args.base_results, configmap=resultutils.flatten_map, configvars=configvars)
if os.path.exists(args.target_results):
- resultutils.append_resultsdata(results, args.target_results, configmap=resultutils.flatten_map)
+ resultutils.append_resultsdata(results, args.target_results, configmap=resultutils.flatten_map, configvars=configvars)
resultutils.save_resultsdata(results, os.path.dirname(args.target_results), fn=os.path.basename(args.target_results))
+ logger.info('Merged results to %s' % os.path.dirname(args.target_results))
+
return 0
def register_commands(subparsers):
@@ -39,4 +40,7 @@ def register_commands(subparsers):
help='the results file/directory/URL to import')
parser_build.add_argument('target_results',
help='the target file or directory to merge the base_results with')
-
+ parser_build.add_argument('-t', '--not-add-testseries', action='store_true',
+ help='do not add testseries configuration to results')
+ parser_build.add_argument('-x', '--executed-by', default='',
+ help='add executed-by configuration to each result file')
diff --git a/external/poky/scripts/lib/resulttool/regression.py b/external/poky/scripts/lib/resulttool/regression.py
index fa90ab1e..9f952951 100644
--- a/external/poky/scripts/lib/resulttool/regression.py
+++ b/external/poky/scripts/lib/resulttool/regression.py
@@ -3,15 +3,9 @@
# Copyright (c) 2019, Intel Corporation.
# Copyright (c) 2019, Linux Foundation
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
+
import resulttool.resultutils as resultutils
import json
diff --git a/external/poky/scripts/lib/resulttool/report.py b/external/poky/scripts/lib/resulttool/report.py
index 8ae42728..f0ca50eb 100644
--- a/external/poky/scripts/lib/resulttool/report.py
+++ b/external/poky/scripts/lib/resulttool/report.py
@@ -3,15 +3,9 @@
# Copyright (c) 2019, Intel Corporation.
# Copyright (c) 2019, Linux Foundation
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
+
import os
import glob
import json
@@ -23,26 +17,37 @@ import oeqa.utils.gitarchive as gitarchive
class ResultsTextReport(object):
def __init__(self):
self.ptests = {}
- self.result_types = {'passed': ['PASSED', 'passed'],
- 'failed': ['FAILED', 'failed', 'ERROR', 'error', 'UNKNOWN'],
- 'skipped': ['SKIPPED', 'skipped']}
+ self.ltptests = {}
+ self.ltpposixtests = {}
+ self.result_types = {'passed': ['PASSED', 'passed', 'PASS', 'XFAIL'],
+ 'failed': ['FAILED', 'failed', 'FAIL', 'ERROR', 'error', 'UNKNOWN', 'XPASS'],
+ 'skipped': ['SKIPPED', 'skipped', 'UNSUPPORTED', 'UNTESTED', 'UNRESOLVED']}
+
+ def handle_ptest_result(self, k, status, result, machine):
+ if machine not in self.ptests:
+ self.ptests[machine] = {}
- def handle_ptest_result(self, k, status, result):
if k == 'ptestresult.sections':
# Ensure tests without any test results still show up on the report
for suite in result['ptestresult.sections']:
- if suite not in self.ptests:
- self.ptests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ if suite not in self.ptests[machine]:
+ self.ptests[machine][suite] = {
+ 'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-',
+ 'failed_testcases': [], "testcases": set(),
+ }
if 'duration' in result['ptestresult.sections'][suite]:
- self.ptests[suite]['duration'] = result['ptestresult.sections'][suite]['duration']
+ self.ptests[machine][suite]['duration'] = result['ptestresult.sections'][suite]['duration']
if 'timeout' in result['ptestresult.sections'][suite]:
- self.ptests[suite]['duration'] += " T"
- return
+ self.ptests[machine][suite]['duration'] += " T"
+ return True
+
+ # process test result
try:
_, suite, test = k.split(".", 2)
except ValueError:
- return
+ return True
+
# Handle 'glib-2.0'
if 'ptestresult.sections' in result and suite not in result['ptestresult.sections']:
try:
@@ -51,24 +56,105 @@ class ResultsTextReport(object):
suite = suite + "." + suite1
except ValueError:
pass
- if suite not in self.ptests:
- self.ptests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+
+ if suite not in self.ptests[machine]:
+ self.ptests[machine][suite] = {
+ 'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-',
+ 'failed_testcases': [], "testcases": set(),
+ }
+
+ # do not process duplicate results
+ if test in self.ptests[machine][suite]["testcases"]:
+ print("Warning duplicate ptest result '{}.{}' for {}".format(suite, test, machine))
+ return False
+
for tk in self.result_types:
if status in self.result_types[tk]:
- self.ptests[suite][tk] += 1
+ self.ptests[machine][suite][tk] += 1
+ self.ptests[machine][suite]["testcases"].add(test)
+ return True
+
+ def handle_ltptest_result(self, k, status, result, machine):
+ if machine not in self.ltptests:
+ self.ltptests[machine] = {}
- def get_aggregated_test_result(self, logger, testresult):
+ if k == 'ltpresult.sections':
+ # Ensure tests without any test results still show up on the report
+ for suite in result['ltpresult.sections']:
+ if suite not in self.ltptests[machine]:
+ self.ltptests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ if 'duration' in result['ltpresult.sections'][suite]:
+ self.ltptests[machine][suite]['duration'] = result['ltpresult.sections'][suite]['duration']
+ if 'timeout' in result['ltpresult.sections'][suite]:
+ self.ltptests[machine][suite]['duration'] += " T"
+ return
+ try:
+ _, suite, test = k.split(".", 2)
+ except ValueError:
+ return
+ # Handle 'glib-2.0'
+ if 'ltpresult.sections' in result and suite not in result['ltpresult.sections']:
+ try:
+ _, suite, suite1, test = k.split(".", 3)
+ if suite + "." + suite1 in result['ltpresult.sections']:
+ suite = suite + "." + suite1
+ except ValueError:
+ pass
+ if suite not in self.ltptests[machine]:
+ self.ltptests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ for tk in self.result_types:
+ if status in self.result_types[tk]:
+ self.ltptests[machine][suite][tk] += 1
+
+ def handle_ltpposixtest_result(self, k, status, result, machine):
+ if machine not in self.ltpposixtests:
+ self.ltpposixtests[machine] = {}
+
+ if k == 'ltpposixresult.sections':
+ # Ensure tests without any test results still show up on the report
+ for suite in result['ltpposixresult.sections']:
+ if suite not in self.ltpposixtests[machine]:
+ self.ltpposixtests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ if 'duration' in result['ltpposixresult.sections'][suite]:
+ self.ltpposixtests[machine][suite]['duration'] = result['ltpposixresult.sections'][suite]['duration']
+ return
+ try:
+ _, suite, test = k.split(".", 2)
+ except ValueError:
+ return
+ # Handle 'glib-2.0'
+ if 'ltpposixresult.sections' in result and suite not in result['ltpposixresult.sections']:
+ try:
+ _, suite, suite1, test = k.split(".", 3)
+ if suite + "." + suite1 in result['ltpposixresult.sections']:
+ suite = suite + "." + suite1
+ except ValueError:
+ pass
+ if suite not in self.ltpposixtests[machine]:
+ self.ltpposixtests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ for tk in self.result_types:
+ if status in self.result_types[tk]:
+ self.ltpposixtests[machine][suite][tk] += 1
+
+ def get_aggregated_test_result(self, logger, testresult, machine):
test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []}
result = testresult.get('result', [])
for k in result:
test_status = result[k].get('status', [])
+ if k.startswith("ptestresult."):
+ if not self.handle_ptest_result(k, test_status, result, machine):
+ continue
+ elif k.startswith("ltpresult."):
+ self.handle_ltptest_result(k, test_status, result, machine)
+ elif k.startswith("ltpposixresult."):
+ self.handle_ltpposixtest_result(k, test_status, result, machine)
+
+ # process result if it was not skipped by a handler
for tk in self.result_types:
if test_status in self.result_types[tk]:
test_count_report[tk] += 1
if test_status in self.result_types['failed']:
test_count_report['failed_testcases'].append(k)
- if k.startswith("ptestresult."):
- self.handle_ptest_result(k, test_status, result)
return test_count_report
def print_test_report(self, template_file_name, test_count_reports):
@@ -78,10 +164,10 @@ class ResultsTextReport(object):
env = Environment(loader=file_loader, trim_blocks=True)
template = env.get_template(template_file_name)
havefailed = False
- haveptest = bool(self.ptests)
reportvalues = []
+ machines = []
cols = ['passed', 'failed', 'skipped']
- maxlen = {'passed' : 0, 'failed' : 0, 'skipped' : 0, 'result_id': 0, 'testseries' : 0, 'ptest' : 0 }
+ maxlen = {'passed' : 0, 'failed' : 0, 'skipped' : 0, 'result_id': 0, 'testseries' : 0, 'ptest' : 0 ,'ltptest': 0, 'ltpposixtest': 0}
for line in test_count_reports:
total_tested = line['passed'] + line['failed'] + line['skipped']
vals = {}
@@ -97,18 +183,53 @@ class ResultsTextReport(object):
reportvalues.append(vals)
if line['failed_testcases']:
havefailed = True
- for ptest in self.ptests:
- if len(ptest) > maxlen['ptest']:
- maxlen['ptest'] = len(ptest)
+ if line['machine'] not in machines:
+ machines.append(line['machine'])
+ reporttotalvalues = {}
+ for k in cols:
+ reporttotalvalues[k] = '%s' % sum([line[k] for line in test_count_reports])
+ reporttotalvalues['count'] = '%s' % len(test_count_reports)
+ for (machine, report) in self.ptests.items():
+ for ptest in self.ptests[machine]:
+ if len(ptest) > maxlen['ptest']:
+ maxlen['ptest'] = len(ptest)
+ for (machine, report) in self.ltptests.items():
+ for ltptest in self.ltptests[machine]:
+ if len(ltptest) > maxlen['ltptest']:
+ maxlen['ltptest'] = len(ltptest)
+ for (machine, report) in self.ltpposixtests.items():
+ for ltpposixtest in self.ltpposixtests[machine]:
+ if len(ltpposixtest) > maxlen['ltpposixtest']:
+ maxlen['ltpposixtest'] = len(ltpposixtest)
output = template.render(reportvalues=reportvalues,
+ reporttotalvalues=reporttotalvalues,
havefailed=havefailed,
- haveptest=haveptest,
+ machines=machines,
ptests=self.ptests,
+ ltptests=self.ltptests,
+ ltpposixtests=self.ltpposixtests,
maxlen=maxlen)
print(output)
- def view_test_report(self, logger, source_dir, branch, commit, tag):
+ def view_test_report(self, logger, source_dir, branch, commit, tag, use_regression_map, raw_test, selected_test_case_only):
+ def print_selected_testcase_result(testresults, selected_test_case_only):
+ for testsuite in testresults:
+ for resultid in testresults[testsuite]:
+ result = testresults[testsuite][resultid]['result']
+ test_case_result = result.get(selected_test_case_only, {})
+ if test_case_result.get('status'):
+ print('Found selected test case result for %s from %s' % (selected_test_case_only,
+ resultid))
+ print(test_case_result['status'])
+ else:
+ print('Could not find selected test case result for %s from %s' % (selected_test_case_only,
+ resultid))
+ if test_case_result.get('log'):
+ print(test_case_result['log'])
test_count_reports = []
+ configmap = resultutils.store_map
+ if use_regression_map:
+ configmap = resultutils.regression_map
if commit:
if tag:
logger.warning("Ignoring --tag as --commit was specified")
@@ -116,16 +237,48 @@ class ResultsTextReport(object):
repo = GitRepo(source_dir)
revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=branch)
rev_index = gitarchive.rev_find(revs, 'commit', commit)
- testresults = resultutils.git_get_result(repo, revs[rev_index][2])
+ testresults = resultutils.git_get_result(repo, revs[rev_index][2], configmap=configmap)
elif tag:
repo = GitRepo(source_dir)
- testresults = resultutils.git_get_result(repo, [tag])
+ testresults = resultutils.git_get_result(repo, [tag], configmap=configmap)
else:
- testresults = resultutils.load_resultsdata(source_dir)
+ testresults = resultutils.load_resultsdata(source_dir, configmap=configmap)
+ if raw_test:
+ raw_results = {}
+ for testsuite in testresults:
+ result = testresults[testsuite].get(raw_test, {})
+ if result:
+ raw_results[testsuite] = {raw_test: result}
+ if raw_results:
+ if selected_test_case_only:
+ print_selected_testcase_result(raw_results, selected_test_case_only)
+ else:
+ print(json.dumps(raw_results, sort_keys=True, indent=4))
+ else:
+ print('Could not find raw test result for %s' % raw_test)
+ return 0
+ if selected_test_case_only:
+ print_selected_testcase_result(testresults, selected_test_case_only)
+ return 0
for testsuite in testresults:
for resultid in testresults[testsuite]:
+ skip = False
result = testresults[testsuite][resultid]
- test_count_report = self.get_aggregated_test_result(logger, result)
+ machine = result['configuration']['MACHINE']
+
+ # Check to see if there is already results for these kinds of tests for the machine
+ for key in result['result'].keys():
+ testtype = str(key).split('.')[0]
+ if ((machine in self.ltptests and testtype == "ltpiresult" and self.ltptests[machine]) or
+ (machine in self.ltpposixtests and testtype == "ltpposixresult" and self.ltpposixtests[machine])):
+ print("Already have test results for %s on %s, skipping %s" %(str(key).split('.')[0], machine, resultid))
+ skip = True
+ break
+ if skip:
+ break
+
+ test_count_report = self.get_aggregated_test_result(logger, result, machine)
+ test_count_report['machine'] = machine
test_count_report['testseries'] = result['configuration']['TESTSERIES']
test_count_report['result_id'] = resultid
test_count_reports.append(test_count_report)
@@ -133,7 +286,8 @@ class ResultsTextReport(object):
def report(args, logger):
report = ResultsTextReport()
- report.view_test_report(logger, args.source_dir, args.branch, args.commit, args.tag)
+ report.view_test_report(logger, args.source_dir, args.branch, args.commit, args.tag, args.use_regression_map,
+ args.raw_test_only, args.selected_test_case_only)
return 0
def register_commands(subparsers):
@@ -148,3 +302,11 @@ def register_commands(subparsers):
parser_build.add_argument('--commit', help="Revision to report")
parser_build.add_argument('-t', '--tag', default='',
help='source_dir is a git repository, report on the tag specified from that repository')
+ parser_build.add_argument('-m', '--use_regression_map', action='store_true',
+ help='instead of the default "store_map", use the "regression_map" for report')
+ parser_build.add_argument('-r', '--raw_test_only', default='',
+ help='output raw test result only for the user provided test result id')
+ parser_build.add_argument('-s', '--selected_test_case_only', default='',
+ help='output selected test case result for the user provided test case id, if both test '
+ 'result id and test case id are provided then output the selected test case result '
+ 'from the provided test result id')
diff --git a/external/poky/scripts/lib/resulttool/resultutils.py b/external/poky/scripts/lib/resulttool/resultutils.py
index 07dab4cb..8917022d 100644
--- a/external/poky/scripts/lib/resulttool/resultutils.py
+++ b/external/poky/scripts/lib/resulttool/resultutils.py
@@ -3,16 +3,12 @@
# Copyright (c) 2019, Intel Corporation.
# Copyright (c) 2019, Linux Foundation
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
+
import os
+import base64
+import zlib
import json
import scriptpath
import copy
@@ -48,10 +44,12 @@ def is_url(p):
"""
return p.startswith('http://') or p.startswith('https://')
+extra_configvars = {'TESTSERIES': ''}
+
#
# Load the json file and append the results data into the provided results dict
#
-def append_resultsdata(results, f, configmap=store_map):
+def append_resultsdata(results, f, configmap=store_map, configvars=extra_configvars):
if type(f) is str:
if is_url(f):
with urllib.request.urlopen(f) as response:
@@ -67,12 +65,15 @@ def append_resultsdata(results, f, configmap=store_map):
for res in data:
if "configuration" not in data[res] or "result" not in data[res]:
raise ValueError("Test results data without configuration or result section?")
- if "TESTSERIES" not in data[res]["configuration"]:
- data[res]["configuration"]["TESTSERIES"] = testseries
+ for config in configvars:
+ if config == "TESTSERIES" and "TESTSERIES" not in data[res]["configuration"]:
+ data[res]["configuration"]["TESTSERIES"] = testseries
+ continue
+ if config not in data[res]["configuration"]:
+ data[res]["configuration"][config] = configvars[config]
testtype = data[res]["configuration"].get("TEST_TYPE")
if testtype not in configmap:
raise ValueError("Unknown test type %s" % testtype)
- configvars = configmap[testtype]
testpath = "/".join(data[res]["configuration"].get(i) for i in configmap[testtype])
if testpath not in results:
results[testpath] = {}
@@ -82,16 +83,16 @@ def append_resultsdata(results, f, configmap=store_map):
# Walk a directory and find/load results data
# or load directly from a file
#
-def load_resultsdata(source, configmap=store_map):
+def load_resultsdata(source, configmap=store_map, configvars=extra_configvars):
results = {}
if is_url(source) or os.path.isfile(source):
- append_resultsdata(results, source, configmap)
+ append_resultsdata(results, source, configmap, configvars)
return results
for root, dirs, files in os.walk(source):
for name in files:
f = os.path.join(root, name)
if name == "testresults.json":
- append_resultsdata(results, f, configmap)
+ append_resultsdata(results, f, configmap, configvars)
return results
def filter_resultsdata(results, resultid):
@@ -118,6 +119,41 @@ def strip_ptestresults(results):
del newresults[res]['result']['ptestresult.sections'][i]['log']
return newresults
+def decode_log(logdata):
+ if isinstance(logdata, str):
+ return logdata
+ elif isinstance(logdata, dict):
+ if "compressed" in logdata:
+ data = logdata.get("compressed")
+ data = base64.b64decode(data.encode("utf-8"))
+ data = zlib.decompress(data)
+ return data.decode("utf-8", errors='ignore')
+ return None
+
+def generic_get_log(sectionname, results, section):
+ if sectionname not in results:
+ return None
+ if section not in results[sectionname]:
+ return None
+
+ ptest = results[sectionname][section]
+ if 'log' not in ptest:
+ return None
+ return decode_log(ptest['log'])
+
+def ptestresult_get_log(results, section):
+ return generic_get_log('ptestresuls.sections', results, section)
+
+def generic_get_rawlogs(sectname, results):
+ if sectname not in results:
+ return None
+ if 'log' not in results[sectname]:
+ return None
+ return decode_log(results[sectname]['log'])
+
+def ptestresult_get_rawlogs(results):
+ return generic_get_rawlogs('ptestresult.rawlogs', results)
+
def save_resultsdata(results, destdir, fn="testresults.json", ptestjson=False, ptestlogs=False):
for res in results:
if res:
@@ -132,16 +168,19 @@ def save_resultsdata(results, destdir, fn="testresults.json", ptestjson=False, p
f.write(json.dumps(resultsout, sort_keys=True, indent=4))
for res2 in results[res]:
if ptestlogs and 'result' in results[res][res2]:
- if 'ptestresult.rawlogs' in results[res][res2]['result']:
+ seriesresults = results[res][res2]['result']
+ rawlogs = ptestresult_get_rawlogs(seriesresults)
+ if rawlogs is not None:
with open(dst.replace(fn, "ptest-raw.log"), "w+") as f:
- f.write(results[res][res2]['result']['ptestresult.rawlogs']['log'])
- if 'ptestresult.sections' in results[res][res2]['result']:
- for i in results[res][res2]['result']['ptestresult.sections']:
- if 'log' in results[res][res2]['result']['ptestresult.sections'][i]:
+ f.write(rawlogs)
+ if 'ptestresult.sections' in seriesresults:
+ for i in seriesresults['ptestresult.sections']:
+ sectionlog = ptestresult_get_log(seriesresults, i)
+ if sectionlog is not None:
with open(dst.replace(fn, "ptest-%s.log" % i), "w+") as f:
- f.write(results[res][res2]['result']['ptestresult.sections'][i]['log'])
+ f.write(sectionlog)
-def git_get_result(repo, tags):
+def git_get_result(repo, tags, configmap=store_map):
git_objs = []
for tag in tags:
files = repo.run_cmd(['ls-tree', "--name-only", "-r", tag]).splitlines()
@@ -164,7 +203,7 @@ def git_get_result(repo, tags):
# Optimize by reading all data with one git command
results = {}
for obj in parse_json_stream(repo.run_cmd(['show'] + git_objs + ['--'])):
- append_resultsdata(results, obj)
+ append_resultsdata(results, obj, configmap=configmap)
return results
diff --git a/external/poky/scripts/lib/resulttool/store.py b/external/poky/scripts/lib/resulttool/store.py
index acdfbd94..e0951f0a 100644
--- a/external/poky/scripts/lib/resulttool/store.py
+++ b/external/poky/scripts/lib/resulttool/store.py
@@ -3,15 +3,9 @@
# Copyright (c) 2019, Intel Corporation.
# Copyright (c) 2019, Linux Foundation
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
+
import tempfile
import os
import subprocess
@@ -27,16 +21,21 @@ import oeqa.utils.gitarchive as gitarchive
def store(args, logger):
tempdir = tempfile.mkdtemp(prefix='testresults.')
try:
+ configvars = resultutils.extra_configvars.copy()
+ if args.executed_by:
+ configvars['EXECUTED_BY'] = args.executed_by
+ if args.extra_test_env:
+ configvars['EXTRA_TEST_ENV'] = args.extra_test_env
results = {}
logger.info('Reading files from %s' % args.source)
if resultutils.is_url(args.source) or os.path.isfile(args.source):
- resultutils.append_resultsdata(results, args.source)
+ resultutils.append_resultsdata(results, args.source, configvars=configvars)
else:
for root, dirs, files in os.walk(args.source):
for name in files:
f = os.path.join(root, name)
if name == "testresults.json":
- resultutils.append_resultsdata(results, f)
+ resultutils.append_resultsdata(results, f, configvars=configvars)
elif args.all:
dst = f.replace(args.source, tempdir + "/")
os.makedirs(os.path.dirname(dst), exist_ok=True)
@@ -99,4 +98,7 @@ def register_commands(subparsers):
help='include all files, not just testresults.json files')
parser_build.add_argument('-e', '--allow-empty', action='store_true',
help='don\'t error if no results to store are found')
-
+ parser_build.add_argument('-x', '--executed-by', default='',
+ help='add executed-by configuration to each result file')
+ parser_build.add_argument('-t', '--extra-test-env', default='',
+ help='add extra test environment data to each result file configuration')
diff --git a/external/poky/scripts/lib/resulttool/template/test_report_full_text.txt b/external/poky/scripts/lib/resulttool/template/test_report_full_text.txt
index 590f35c7..2efba2ef 100644
--- a/external/poky/scripts/lib/resulttool/template/test_report_full_text.txt
+++ b/external/poky/scripts/lib/resulttool/template/test_report_full_text.txt
@@ -8,22 +8,57 @@ Test Result Status Summary (Counts/Percentages sorted by testseries, ID)
{{ report.testseries.ljust(maxlen['testseries']) }} | {{ report.result_id.ljust(maxlen['result_id']) }} | {{ (report.passed|string).ljust(maxlen['passed']) }} | {{ (report.failed|string).ljust(maxlen['failed']) }} | {{ (report.skipped|string).ljust(maxlen['skipped']) }}
{% endfor %}
--------------------------------------------------------------------------------------------------------------
+{{ 'Total'.ljust(maxlen['testseries']) }} | {{ reporttotalvalues['count'].ljust(maxlen['result_id']) }} | {{ reporttotalvalues['passed'].ljust(maxlen['passed']) }} | {{ reporttotalvalues['failed'].ljust(maxlen['failed']) }} | {{ reporttotalvalues['skipped'].ljust(maxlen['skipped']) }}
+--------------------------------------------------------------------------------------------------------------
-{% if haveptest %}
+{% for machine in machines %}
+{% if ptests[machine] %}
==============================================================================================================
-PTest Result Summary
+{{ machine }} PTest Result Summary
==============================================================================================================
--------------------------------------------------------------------------------------------------------------
{{ 'Recipe'.ljust(maxlen['ptest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }}
--------------------------------------------------------------------------------------------------------------
-{% for ptest in ptests |sort %}
-{{ ptest.ljust(maxlen['ptest']) }} | {{ (ptests[ptest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ptests[ptest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ptests[ptest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ptests[ptest]['duration']|string) }}
+{% for ptest in ptests[machine] |sort %}
+{{ ptest.ljust(maxlen['ptest']) }} | {{ (ptests[machine][ptest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ptests[machine][ptest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ptests[machine][ptest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ptests[machine][ptest]['duration']|string) }}
+{% endfor %}
+--------------------------------------------------------------------------------------------------------------
+
+{% endif %}
+{% endfor %}
+
+{% for machine in machines %}
+{% if ltptests[machine] %}
+==============================================================================================================
+{{ machine }} Ltp Test Result Summary
+==============================================================================================================
+--------------------------------------------------------------------------------------------------------------
+{{ 'Recipe'.ljust(maxlen['ltptest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }}
+--------------------------------------------------------------------------------------------------------------
+{% for ltptest in ltptests[machine] |sort %}
+{{ ltptest.ljust(maxlen['ltptest']) }} | {{ (ltptests[machine][ltptest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ltptests[machine][ltptest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ltptests[machine][ltptest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ltptests[machine][ltptest]['duration']|string) }}
{% endfor %}
--------------------------------------------------------------------------------------------------------------
-{% else %}
-There was no ptest data
{% endif %}
+{% endfor %}
+
+{% for machine in machines %}
+{% if ltpposixtests[machine] %}
+==============================================================================================================
+{{ machine }} Ltp Posix Result Summary
+==============================================================================================================
+--------------------------------------------------------------------------------------------------------------
+{{ 'Recipe'.ljust(maxlen['ltpposixtest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }}
+--------------------------------------------------------------------------------------------------------------
+{% for ltpposixtest in ltpposixtests[machine] |sort %}
+{{ ltpposixtest.ljust(maxlen['ltpposixtest']) }} | {{ (ltpposixtests[machine][ltpposixtest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ltpposixtests[machine][ltpposixtest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ltpposixtests[machine][ltpposixtest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ltpposixtests[machine][ltpposixtest]['duration']|string) }}
+{% endfor %}
+--------------------------------------------------------------------------------------------------------------
+
+{% endif %}
+{% endfor %}
+
==============================================================================================================
Failed test cases (sorted by testseries, ID)
diff --git a/external/poky/scripts/lib/scriptpath.py b/external/poky/scripts/lib/scriptpath.py
index d00317e1..f32326db 100644
--- a/external/poky/scripts/lib/scriptpath.py
+++ b/external/poky/scripts/lib/scriptpath.py
@@ -3,18 +3,8 @@
# Copyright (C) 2012-2014 Intel Corporation
# Copyright (C) 2011 Mentor Graphics Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os
diff --git a/external/poky/scripts/lib/scriptutils.py b/external/poky/scripts/lib/scriptutils.py
index 3c60c3a1..f92255d8 100644
--- a/external/poky/scripts/lib/scriptutils.py
+++ b/external/poky/scripts/lib/scriptutils.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2014 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import argparse
import glob
@@ -26,12 +16,51 @@ import string
import subprocess
import sys
import tempfile
+import threading
import importlib
from importlib import machinery
-def logger_create(name, stream=None):
+class KeepAliveStreamHandler(logging.StreamHandler):
+ def __init__(self, keepalive=True, **kwargs):
+ super().__init__(**kwargs)
+ if keepalive is True:
+ keepalive = 5000 # default timeout
+ self._timeout = threading.Condition()
+ self._stop = False
+
+ # background thread waits on condition, if the condition does not
+ # happen emit a keep alive message
+ def thread():
+ while not self._stop:
+ with self._timeout:
+ if not self._timeout.wait(keepalive):
+ self.emit(logging.LogRecord("keepalive", logging.INFO,
+ None, None, "Keepalive message", None, None))
+
+ self._thread = threading.Thread(target = thread, daemon = True)
+ self._thread.start()
+
+ def close(self):
+ # mark the thread to stop and notify it
+ self._stop = True
+ with self._timeout:
+ self._timeout.notify()
+ # wait for it to join
+ self._thread.join()
+ super().close()
+
+ def emit(self, record):
+ super().emit(record)
+ # trigger timer reset
+ with self._timeout:
+ self._timeout.notify()
+
+def logger_create(name, stream=None, keepalive=None):
logger = logging.getLogger(name)
- loggerhandler = logging.StreamHandler(stream=stream)
+ if keepalive is not None:
+ loggerhandler = KeepAliveStreamHandler(stream=stream, keepalive=keepalive)
+ else:
+ loggerhandler = logging.StreamHandler(stream=stream)
loggerhandler.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
logger.addHandler(loggerhandler)
logger.setLevel(logging.INFO)
@@ -39,16 +68,15 @@ def logger_create(name, stream=None):
def logger_setup_color(logger, color='auto'):
from bb.msg import BBLogFormatter
- console = logging.StreamHandler(sys.stdout)
- formatter = BBLogFormatter("%(levelname)s: %(message)s")
- console.setFormatter(formatter)
- logger.handlers = [console]
- if color == 'always' or (color=='auto' and console.stream.isatty()):
- formatter.enable_color()
+
+ for handler in logger.handlers:
+ if (isinstance(handler, logging.StreamHandler) and
+ isinstance(handler.formatter, BBLogFormatter)):
+ if color == 'always' or (color == 'auto' and handler.stream.isatty()):
+ handler.formatter.enable_color()
def load_plugins(logger, plugins, pluginpath):
- import imp
def load_plugin(name):
logger.debug('Loading plugin %s' % name)
@@ -69,6 +97,7 @@ def load_plugins(logger, plugins, pluginpath):
plugin.plugin_init(plugins)
plugins.append(plugin)
+
def git_convert_standalone_clone(repodir):
"""If specified directory is a git repository, ensure it's a standalone clone"""
import bb.process
@@ -238,3 +267,13 @@ def is_src_url(param):
elif param.startswith('git@') or ('@' in param and param.endswith('.git')):
return True
return False
+
+def filter_src_subdirs(pth):
+ """
+ Filter out subdirectories of initial unpacked source trees that we do not care about.
+ Used by devtool and recipetool.
+ """
+ dirlist = os.listdir(pth)
+ filterout = ['git.indirectionsymlink', 'source-date-epoch']
+ dirlist = [x for x in dirlist if x not in filterout]
+ return dirlist
diff --git a/external/poky/scripts/lib/wic/__init__.py b/external/poky/scripts/lib/wic/__init__.py
index 85876b13..85567934 100644
--- a/external/poky/scripts/lib/wic/__init__.py
+++ b/external/poky/scripts/lib/wic/__init__.py
@@ -1,20 +1,10 @@
-#!/usr/bin/env python -tt
+#!/usr/bin/env python3
#
# Copyright (c) 2007 Red Hat, Inc.
# Copyright (c) 2011 Intel, Inc.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the Free
-# Software Foundation; version 2 of the License
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc., 59
-# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
class WicError(Exception):
pass
diff --git a/external/poky/scripts/lib/wic/canned-wks/qemuriscv.wks b/external/poky/scripts/lib/wic/canned-wks/qemuriscv.wks
new file mode 100644
index 00000000..12c68b70
--- /dev/null
+++ b/external/poky/scripts/lib/wic/canned-wks/qemuriscv.wks
@@ -0,0 +1,3 @@
+# short-description: Create qcow2 image for RISC-V QEMU machines
+
+part / --source rootfs --fstype=ext4 --label root --align 4096 --size 5G
diff --git a/external/poky/scripts/lib/wic/canned-wks/qemux86-directdisk.wks b/external/poky/scripts/lib/wic/canned-wks/qemux86-directdisk.wks
index c8d9f121..22b45217 100644
--- a/external/poky/scripts/lib/wic/canned-wks/qemux86-directdisk.wks
+++ b/external/poky/scripts/lib/wic/canned-wks/qemux86-directdisk.wks
@@ -4,5 +4,5 @@
include common.wks.inc
-bootloader --timeout=0 --append="vga=0 rw oprofile.timer=1 rootfstype=ext4 "
+bootloader --timeout=0 --append="rw oprofile.timer=1 rootfstype=ext4 "
diff --git a/external/poky/scripts/lib/wic/engine.py b/external/poky/scripts/lib/wic/engine.py
index ea600d28..9ff43947 100644
--- a/external/poky/scripts/lib/wic/engine.py
+++ b/external/poky/scripts/lib/wic/engine.py
@@ -1,21 +1,7 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2013, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
@@ -33,6 +19,7 @@ import os
import tempfile
import json
import subprocess
+import re
from collections import namedtuple, OrderedDict
from distutils.spawn import find_executable
@@ -89,7 +76,8 @@ def find_canned_image(scripts_path, wks_file):
for fname in files:
if fname.endswith("~") or fname.endswith("#"):
continue
- if fname.endswith(".wks") and wks_file + ".wks" == fname:
+ if ((fname.endswith(".wks") and wks_file + ".wks" == fname) or \
+ (fname.endswith(".wks.in") and wks_file + ".wks.in" == fname)):
fullpath = os.path.join(canned_wks_dir, fname)
return fullpath
return None
@@ -106,7 +94,7 @@ def list_canned_images(scripts_path):
for fname in files:
if fname.endswith("~") or fname.endswith("#"):
continue
- if fname.endswith(".wks"):
+ if fname.endswith(".wks") or fname.endswith(".wks.in"):
fullpath = os.path.join(canned_wks_dir, fname)
with open(fullpath) as wks:
for line in wks:
@@ -115,7 +103,7 @@ def list_canned_images(scripts_path):
if idx != -1:
desc = line[idx + len("short-description:"):].strip()
break
- basename = os.path.splitext(fname)[0]
+ basename = fname.split('.')[0]
print(" %s\t\t%s" % (basename.ljust(30), desc))
@@ -303,7 +291,7 @@ class Disk:
def _get_part_image(self, pnum):
if pnum not in self.partitions:
- raise WicError("Partition %s is not in the image")
+ raise WicError("Partition %s is not in the image" % pnum)
part = self.partitions[pnum]
# check if fstype is supported
for fstype in self.fstypes:
@@ -326,6 +314,9 @@ class Disk:
seek=self.partitions[pnum].start)
def dir(self, pnum, path):
+ if pnum not in self.partitions:
+ raise WicError("Partition %s is not in the image" % pnum)
+
if self.partitions[pnum].fstype.startswith('ext'):
return exec_cmd("{} {} -R 'ls -l {}'".format(self.debugfs,
self._get_part_image(pnum),
@@ -335,38 +326,80 @@ class Disk:
self._get_part_image(pnum),
path))
- def copy(self, src, pnum, path):
+ def copy(self, src, dest):
"""Copy partition image into wic image."""
+ pnum = dest.part if isinstance(src, str) else src.part
+
if self.partitions[pnum].fstype.startswith('ext'):
- cmd = "printf 'cd {}\nwrite {} {}' | {} -w {}".\
- format(path, src, os.path.basename(src),
+ if isinstance(src, str):
+ cmd = "printf 'cd {}\nwrite {} {}\n' | {} -w {}".\
+ format(os.path.dirname(dest.path), src, os.path.basename(src),
self.debugfs, self._get_part_image(pnum))
+ else: # copy from wic
+ # run both dump and rdump to support both files and directory
+ cmd = "printf 'cd {}\ndump /{} {}\nrdump /{} {}\n' | {} {}".\
+ format(os.path.dirname(src.path), src.path,
+ dest, src.path, dest, self.debugfs,
+ self._get_part_image(pnum))
else: # fat
- cmd = "{} -i {} -snop {} ::{}".format(self.mcopy,
+ if isinstance(src, str):
+ cmd = "{} -i {} -snop {} ::{}".format(self.mcopy,
self._get_part_image(pnum),
- src, path)
+ src, dest.path)
+ else:
+ cmd = "{} -i {} -snop ::{} {}".format(self.mcopy,
+ self._get_part_image(pnum),
+ src.path, dest)
+
exec_cmd(cmd, as_shell=True)
self._put_part_image(pnum)
- def remove(self, pnum, path):
+ def remove_ext(self, pnum, path, recursive):
+ """
+ Remove files/dirs and their contents from the partition.
+ This only applies to ext* partition.
+ """
+ abs_path = re.sub('\/\/+', '/', path)
+ cmd = "{} {} -wR 'rm \"{}\"'".format(self.debugfs,
+ self._get_part_image(pnum),
+ abs_path)
+ out = exec_cmd(cmd , as_shell=True)
+ for line in out.splitlines():
+ if line.startswith("rm:"):
+ if "file is a directory" in line:
+ if recursive:
+ # loop through content and delete them one by one if
+ # flaged with -r
+ subdirs = iter(self.dir(pnum, abs_path).splitlines())
+ next(subdirs)
+ for subdir in subdirs:
+ dir = subdir.split(':')[1].split(" ", 1)[1]
+ if not dir == "." and not dir == "..":
+ self.remove_ext(pnum, "%s/%s" % (abs_path, dir), recursive)
+
+ rmdir_out = exec_cmd("{} {} -wR 'rmdir \"{}\"'".format(self.debugfs,
+ self._get_part_image(pnum),
+ abs_path.rstrip('/'))
+ , as_shell=True)
+
+ for rmdir_line in rmdir_out.splitlines():
+ if "directory not empty" in rmdir_line:
+ raise WicError("Could not complete operation: \n%s \n"
+ "use -r to remove non-empty directory" % rmdir_line)
+ if rmdir_line.startswith("rmdir:"):
+ raise WicError("Could not complete operation: \n%s "
+ "\n%s" % (str(line), rmdir_line))
+
+ else:
+ raise WicError("Could not complete operation: \n%s "
+ "\nUnable to remove %s" % (str(line), abs_path))
+
+ def remove(self, pnum, path, recursive):
"""Remove files/dirs from the partition."""
partimg = self._get_part_image(pnum)
if self.partitions[pnum].fstype.startswith('ext'):
- cmd = "{} {} -wR 'rm {}'".format(self.debugfs,
- self._get_part_image(pnum),
- path)
- out = exec_cmd(cmd , as_shell=True)
- for line in out.splitlines():
- if line.startswith("rm:"):
- if "file is a directory" in line:
- # Try rmdir to see if this is an empty directory. This won't delete
- # any non empty directory so let user know about any error that this might
- # generate.
- print(exec_cmd("{} {} -wR 'rmdir {}'".format(self.debugfs,
- self._get_part_image(pnum),
- path), as_shell=True))
- else:
- raise WicError("Could not complete operation: wic %s" % str(line))
+ self.remove_ext(pnum, path, recursive)
+
else: # fat
cmd = "{} -i {} ::{}".format(self.mdel, partimg, path)
try:
@@ -409,7 +442,7 @@ class Disk:
outf.flush()
def read_ptable(path):
- out = exec_cmd("{} -dJ {}".format(self.sfdisk, path))
+ out = exec_cmd("{} -J {}".format(self.sfdisk, path))
return json.loads(out)
def write_ptable(parts, target):
@@ -536,11 +569,15 @@ def wic_ls(args, native_sysroot):
def wic_cp(args, native_sysroot):
"""
- Copy local file or directory to the vfat partition of
+ Copy file or directory to/from the vfat/ext partition of
partitioned image.
"""
- disk = Disk(args.dest.image, native_sysroot)
- disk.copy(args.src, args.dest.part, args.dest.path)
+ if isinstance(args.dest, str):
+ disk = Disk(args.src.image, native_sysroot)
+ else:
+ disk = Disk(args.dest.image, native_sysroot)
+ disk.copy(args.src, args.dest)
+
def wic_rm(args, native_sysroot):
"""
@@ -548,13 +585,13 @@ def wic_rm(args, native_sysroot):
partitioned image.
"""
disk = Disk(args.path.image, native_sysroot)
- disk.remove(args.path.part, args.path.path)
+ disk.remove(args.path.part, args.path.path, args.recursive_delete)
def wic_write(args, native_sysroot):
"""
Write image to a target device.
"""
- disk = Disk(args.image, native_sysroot, ('fat', 'ext', 'swap'))
+ disk = Disk(args.image, native_sysroot, ('fat', 'ext', 'linux-swap'))
disk.write(args.target, args.expand)
def find_canned(scripts_path, file_name):
diff --git a/external/poky/scripts/lib/wic/filemap.py b/external/poky/scripts/lib/wic/filemap.py
index abbf958b..4d9da281 100644
--- a/external/poky/scripts/lib/wic/filemap.py
+++ b/external/poky/scripts/lib/wic/filemap.py
@@ -1,13 +1,8 @@
+#
# Copyright (c) 2012 Intel, Inc.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License, version 2,
-# as published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# General Public License for more details.
"""
This module implements python implements a way to get file block. Two methods
@@ -37,8 +32,13 @@ def get_block_size(file_obj):
"""
# Get the block size of the host file-system for the image file by calling
# the FIGETBSZ ioctl (number 2).
- binary_data = fcntl.ioctl(file_obj, 2, struct.pack('I', 0))
- bsize = struct.unpack('I', binary_data)[0]
+ try:
+ binary_data = fcntl.ioctl(file_obj, 2, struct.pack('I', 0))
+ bsize = struct.unpack('I', binary_data)[0]
+ except OSError:
+ bsize = None
+
+ # If ioctl causes OSError or give bsize to zero failback to os.fstat
if not bsize:
import os
stat = os.fstat(file_obj.fileno())
@@ -142,15 +142,6 @@ class _FilemapBase(object):
raise Error("the method is not implemented")
- def block_is_unmapped(self, block): # pylint: disable=W0613,R0201
- """
- This method has has to be implemented by child classes. It returns
- 'True' if block number 'block' of the image file is not mapped (hole)
- and 'False' otherwise.
- """
-
- raise Error("the method is not implemented")
-
def get_mapped_ranges(self, start, count): # pylint: disable=W0613,R0201
"""
This method has has to be implemented by child classes. This is a
@@ -164,15 +155,6 @@ class _FilemapBase(object):
raise Error("the method is not implemented")
- def get_unmapped_ranges(self, start, count): # pylint: disable=W0613,R0201
- """
- This method has has to be implemented by child classes. Just like
- 'get_mapped_ranges()', but yields unmapped block ranges instead
- (holes).
- """
-
- raise Error("the method is not implemented")
-
# The 'SEEK_HOLE' and 'SEEK_DATA' options of the file seek system call
_SEEK_DATA = 3
@@ -265,15 +247,10 @@ class FilemapSeek(_FilemapBase):
% (block, result))
return result
- def block_is_unmapped(self, block):
- """Refer the '_FilemapBase' class for the documentation."""
- return not self.block_is_mapped(block)
-
def _get_ranges(self, start, count, whence1, whence2):
"""
- This function implements 'get_mapped_ranges()' and
- 'get_unmapped_ranges()' depending on what is passed in the 'whence1'
- and 'whence2' arguments.
+ This function implements 'get_mapped_ranges()' depending
+ on what is passed in the 'whence1' and 'whence2' arguments.
"""
assert whence1 != whence2
@@ -303,12 +280,6 @@ class FilemapSeek(_FilemapBase):
% (start, count, start + count - 1))
return self._get_ranges(start, count, _SEEK_DATA, _SEEK_HOLE)
- def get_unmapped_ranges(self, start, count):
- """Refer the '_FilemapBase' class for the documentation."""
- self._log.debug("FilemapSeek: get_unmapped_ranges(%d, %d(%d))"
- % (start, count, start + count - 1))
- return self._get_ranges(start, count, _SEEK_HOLE, _SEEK_DATA)
-
# Below goes the FIEMAP ioctl implementation, which is not very readable
# because it deals with the rather complex FIEMAP ioctl. To understand the
@@ -422,10 +393,6 @@ class FilemapFiemap(_FilemapBase):
% (block, result))
return result
- def block_is_unmapped(self, block):
- """Refer the '_FilemapBase' class for the documentation."""
- return not self.block_is_mapped(block)
-
def _unpack_fiemap_extent(self, index):
"""
Unpack a 'struct fiemap_extent' structure object number 'index' from
@@ -502,23 +469,28 @@ class FilemapFiemap(_FilemapBase):
% (first_prev, last_prev))
yield (first_prev, last_prev)
- def get_unmapped_ranges(self, start, count):
+class FilemapNobmap(_FilemapBase):
+ """
+ This class is used when both the 'SEEK_DATA/HOLE' and FIEMAP are not
+ supported by the filesystem or kernel.
+ """
+
+ def __init__(self, image, log=None):
"""Refer the '_FilemapBase' class for the documentation."""
- self._log.debug("FilemapFiemap: get_unmapped_ranges(%d, %d(%d))"
- % (start, count, start + count - 1))
- hole_first = start
- for first, last in self._do_get_mapped_ranges(start, count):
- if first > hole_first:
- self._log.debug("FilemapFiemap: yielding range (%d, %d)"
- % (hole_first, first - 1))
- yield (hole_first, first - 1)
- hole_first = last + 1
+ # Call the base class constructor first
+ _FilemapBase.__init__(self, image, log)
+ self._log.debug("FilemapNobmap: initializing")
- if hole_first < start + count:
- self._log.debug("FilemapFiemap: yielding range (%d, %d)"
- % (hole_first, start + count - 1))
- yield (hole_first, start + count - 1)
+ def block_is_mapped(self, block):
+ """Refer the '_FilemapBase' class for the documentation."""
+ return True
+
+ def get_mapped_ranges(self, start, count):
+ """Refer the '_FilemapBase' class for the documentation."""
+ self._log.debug("FilemapNobmap: get_mapped_ranges(%d, %d(%d))"
+ % (start, count, start + count - 1))
+ yield (start, start + count -1)
def filemap(image, log=None):
"""
@@ -533,7 +505,10 @@ def filemap(image, log=None):
try:
return FilemapFiemap(image, log)
except ErrorNotSupp:
- return FilemapSeek(image, log)
+ try:
+ return FilemapSeek(image, log)
+ except ErrorNotSupp:
+ return FilemapNobmap(image, log)
def sparse_copy(src_fname, dst_fname, skip=0, seek=0,
length=0, api=None):
diff --git a/external/poky/scripts/lib/wic/help.py b/external/poky/scripts/lib/wic/help.py
index 64f08052..1e3d06a8 100644
--- a/external/poky/scripts/lib/wic/help.py
+++ b/external/poky/scripts/lib/wic/help.py
@@ -1,21 +1,6 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
-#
# Copyright (c) 2013, Intel Corporation.
-# All rights reserved.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This module implements some basic help invocation functions along
@@ -356,12 +341,15 @@ DESCRIPTION
wic_cp_usage = """
- Copy files and directories to the vfat or ext* partition
+ Copy files and directories to/from the vfat or ext* partition
+
+ usage: wic cp <src> <dest> [--native-sysroot <path>]
- usage: wic cp <src> <image>:<partition>[<path>] [--native-sysroot <path>]
+ source/destination image in format <image>:<partition>[<path>]
- This command copies local files or directories to the vfat or ext* partitions
-of partitioned image.
+ This command copies files or directories either
+ - from local to vfat or ext* partitions of partitioned image
+ - from vfat or ext* partitions of partitioned image to local
See 'wic help cp' for more detailed instructions.
@@ -370,16 +358,18 @@ of partitioned image.
wic_cp_help = """
NAME
- wic cp - copy files and directories to the vfat or ext* partitions
+ wic cp - copy files and directories to/from the vfat or ext* partitions
SYNOPSIS
- wic cp <src> <image>:<partition>
- wic cp <src> <image>:<partition><path>
- wic cp <src> <image>:<partition><path> --native-sysroot <path>
+ wic cp <src> <dest>:<partition>
+ wic cp <src>:<partition> <dest>
+ wic cp <src> <dest-image>:<partition><path>
+ wic cp <src> <dest-image>:<partition><path> --native-sysroot <path>
DESCRIPTION
- This command copies files and directories to the vfat or ext* partition of
- the partitioned image.
+ This command copies files or directories either
+ - from local to vfat or ext* partitions of partitioned image
+ - from vfat or ext* partitions of partitioned image to local
The first form of it copies file or directory to the root directory of
the partition:
@@ -412,6 +402,10 @@ DESCRIPTION
4 files 0 bytes
15 675 392 bytes free
+ The third form of the command copies file or directory from the specified directory
+ on the partition to local:
+ $ wic cp tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.wic:1/vmlinuz test
+
The -n option is used to specify the path to the native sysroot
containing the tools(parted and mtools) to use.
"""
@@ -437,6 +431,7 @@ NAME
SYNOPSIS
wic rm <src> <image>:<partition><path>
wic rm <src> <image>:<partition><path> --native-sysroot <path>
+ wic rm -r <image>:<partition><path>
DESCRIPTION
This command removes files or directories from the vfat or ext* partition of the
@@ -471,6 +466,9 @@ DESCRIPTION
The -n option is used to specify the path to the native sysroot
containing the tools(parted and mtools) to use.
+
+ The -r option is used to remove directories and their contents
+ recursively,this only applies to ext* partition.
"""
wic_write_usage = """
@@ -493,7 +491,7 @@ NAME
SYNOPSIS
wic write <image> <target>
wic write <image> <target> --expand auto
- wic write <image> <target> --expand 1:100M-2:300M
+ wic write <image> <target> --expand 1:100M,2:300M
wic write <image> <target> --native-sysroot <path>
DESCRIPTION
@@ -504,7 +502,7 @@ DESCRIPTION
The --expand option is used to resize image partitions.
--expand auto expands partitions to occupy all free space available on the target device.
It's also possible to specify expansion rules in a format
- <partition>:<size>[-<partition>:<size>...] for one or more partitions.
+ <partition>:<size>[,<partition>:<size>...] for one or more partitions.
Specifying size 0 will keep partition unmodified.
Note: Resizing boot partition can result in non-bootable image for non-EFI images. It is
recommended to use size 0 for boot partition to keep image bootable.
@@ -538,7 +536,8 @@ DESCRIPTION
Source plugins can also be implemented and added by external
layers - any plugins found in a scripts/lib/wic/plugins/source/
- directory in an external layer will also be made available.
+ or lib/wic/plugins/source/ directory in an external layer will
+ also be made available.
When the wic implementation needs to invoke a partition-specific
implementation, it looks for the plugin that has the same name as
@@ -971,6 +970,16 @@ DESCRIPTION
is omitted, not the directory itself. This option only
has an effect with the rootfs source plugin.
+ --include-path: This option is specific to wic. It adds the contents
+ of the given path to the resulting image. The path is
+ relative to the directory in which wic is running not
+ the rootfs itself so use of an absolute path is
+ recommended. This option is most useful when multiple
+ copies of the rootfs are added to an image and it is
+ required to add extra content to only one of these
+ copies. This option only has an effect with the rootfs
+ source plugin.
+
--extra-space: This option is specific to wic. It adds extra
space after the space filled by the content
of the partition. The final size can go
@@ -1061,3 +1070,59 @@ NAME
DESCRIPTION
Specify a help topic to display it. Topics are shown above.
"""
+
+
+wic_help = """
+Creates a customized OpenEmbedded image.
+
+Usage: wic [--version]
+ wic help [COMMAND or TOPIC]
+ wic COMMAND [ARGS]
+
+ usage 1: Returns the current version of Wic
+ usage 2: Returns detailed help for a COMMAND or TOPIC
+ usage 3: Executes COMMAND
+
+
+COMMAND:
+
+ list - List available canned images and source plugins
+ ls - List contents of partitioned image or partition
+ rm - Remove files or directories from the vfat or ext* partitions
+ help - Show help for a wic COMMAND or TOPIC
+ write - Write an image to a device
+ cp - Copy files and directories to the vfat or ext* partitions
+ create - Create a new OpenEmbedded image
+
+
+TOPIC:
+ overview - Presents an overall overview of Wic
+ plugins - Presents an overview and API for Wic plugins
+ kickstart - Presents a Wic kicstart file reference
+
+
+Examples:
+
+ $ wic --version
+
+ Returns the current version of Wic
+
+
+ $ wic help cp
+
+ Returns the SYNOPSIS and DESCRIPTION for the Wic "cp" command.
+
+
+ $ wic list images
+
+ Returns the list of canned images (i.e. *.wks files located in
+ the /scripts/lib/wic/canned-wks directory.
+
+
+ $ wic create mkefidisk -e core-image-minimal
+
+ Creates an EFI disk image from artifacts used in a previous
+ core-image-minimal build in standard BitBake locations
+ (e.g. Cooked Mode).
+
+"""
diff --git a/external/poky/scripts/lib/wic/ksparser.py b/external/poky/scripts/lib/wic/ksparser.py
index 7e5a9c50..650b9762 100644
--- a/external/poky/scripts/lib/wic/ksparser.py
+++ b/external/poky/scripts/lib/wic/ksparser.py
@@ -1,21 +1,8 @@
-#!/usr/bin/env python -tt
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#!/usr/bin/env python3
#
# Copyright (c) 2016 Intel, Inc.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the Free
-# Software Foundation; version 2 of the License
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc., 59
-# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This module provides parser for kickstart format
@@ -28,14 +15,30 @@
import os
import shlex
import logging
+import re
from argparse import ArgumentParser, ArgumentError, ArgumentTypeError
from wic.engine import find_canned
from wic.partition import Partition
+from wic.misc import get_bitbake_var
logger = logging.getLogger('wic')
+__expand_var_regexp__ = re.compile(r"\${[^{}@\n\t :]+}")
+
+def expand_line(line):
+ while True:
+ m = __expand_var_regexp__.search(line)
+ if not m:
+ return line
+ key = m.group()[2:-1]
+ val = get_bitbake_var(key)
+ if val is None:
+ logger.warning("cannot expand variable %s" % key)
+ return line
+ line = line[:m.start()] + val + line[m.end():]
+
class KickStartError(Exception):
"""Custom exception."""
pass
@@ -134,6 +137,7 @@ class KickStart():
part.add_argument('--active', action='store_true')
part.add_argument('--align', type=int)
part.add_argument('--exclude-path', nargs='+')
+ part.add_argument('--include-path', nargs='+')
part.add_argument("--extra-space", type=sizetype)
part.add_argument('--fsoptions', dest='fsopts')
part.add_argument('--fstype', default='vfat',
@@ -148,6 +152,8 @@ class KickStart():
part.add_argument('--part-name')
part.add_argument('--part-type')
part.add_argument('--rootfs-dir')
+ part.add_argument('--type', default='primary',
+ choices = ('primary', 'logical'))
# --size and --fixed-size cannot be specified together; options
# ----extra-space and --overhead-factor should also raise a parser
@@ -190,6 +196,7 @@ class KickStart():
line = line.strip()
lineno += 1
if line and line[0] != '#':
+ line = expand_line(line)
try:
line_args = shlex.split(line)
parsed = parser.parse_args(line_args)
@@ -239,6 +246,11 @@ class KickStart():
elif line.startswith('bootloader'):
if not self.bootloader:
self.bootloader = parsed
+ # Concatenate the strings set in APPEND
+ append_var = get_bitbake_var("APPEND")
+ if append_var:
+ self.bootloader.append = ' '.join(filter(None, \
+ (self.bootloader.append, append_var)))
else:
err = "%s:%d: more than one bootloader specified" \
% (confpath, lineno)
diff --git a/external/poky/scripts/lib/wic/misc.py b/external/poky/scripts/lib/wic/misc.py
index ee888b47..1f199b9f 100644
--- a/external/poky/scripts/lib/wic/misc.py
+++ b/external/poky/scripts/lib/wic/misc.py
@@ -1,21 +1,7 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2013, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This module provides a place to collect various wic-related utils
diff --git a/external/poky/scripts/lib/wic/partition.py b/external/poky/scripts/lib/wic/partition.py
index 3da7e23e..2d95f784 100644
--- a/external/poky/scripts/lib/wic/partition.py
+++ b/external/poky/scripts/lib/wic/partition.py
@@ -1,21 +1,7 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2013-2016 Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This module provides the OpenEmbedded partition object definitions.
@@ -44,6 +30,7 @@ class Partition():
self.device = None
self.extra_space = args.extra_space
self.exclude_path = args.exclude_path
+ self.include_path = args.include_path
self.fsopts = args.fsopts
self.fstype = args.fstype
self.label = args.label
@@ -64,6 +51,7 @@ class Partition():
self.use_uuid = args.use_uuid
self.uuid = args.uuid
self.fsuuid = args.fsuuid
+ self.type = args.type
self.lineno = lineno
self.source_file = ""
@@ -173,7 +161,7 @@ class Partition():
# Split sourceparams string of the form key1=val1[,key2=val2,...]
# into a dict. Also accepts valueless keys i.e. without =
splitted = self.sourceparams.split(',')
- srcparams_dict = dict(par.split('=') for par in splitted if par)
+ srcparams_dict = dict(par.split('=', 1) for par in splitted if par)
plugin = PluginMgr.get_plugins('source')[self.source]
plugin.do_configure_partition(self, srcparams_dict, creator,
@@ -225,19 +213,24 @@ class Partition():
if os.path.isfile(rootfs):
os.remove(rootfs)
- # Get rootfs size from bitbake variable if it's not set in .ks file
if not self.size and real_rootfs:
- # Bitbake variable ROOTFS_SIZE is calculated in
- # Image._get_rootfs_size method from meta/lib/oe/image.py
- # using IMAGE_ROOTFS_SIZE, IMAGE_ROOTFS_ALIGNMENT,
- # IMAGE_OVERHEAD_FACTOR and IMAGE_ROOTFS_EXTRA_SPACE
+ # The rootfs size is not set in .ks file so try to get it
+ # from bitbake variable
rsize_bb = get_bitbake_var('ROOTFS_SIZE')
- if rsize_bb:
- logger.warning('overhead-factor was specified, but size was not,'
- ' so bitbake variables will be used for the size.'
- ' In this case both IMAGE_OVERHEAD_FACTOR and '
- '--overhead-factor will be applied')
+ rdir = get_bitbake_var('IMAGE_ROOTFS')
+ if rsize_bb and rdir == rootfs_dir:
+ # Bitbake variable ROOTFS_SIZE is calculated in
+ # Image._get_rootfs_size method from meta/lib/oe/image.py
+ # using IMAGE_ROOTFS_SIZE, IMAGE_ROOTFS_ALIGNMENT,
+ # IMAGE_OVERHEAD_FACTOR and IMAGE_ROOTFS_EXTRA_SPACE
self.size = int(round(float(rsize_bb)))
+ else:
+ # Bitbake variable ROOTFS_SIZE is not defined so compute it
+ # from the rootfs_dir size using the same logic found in
+ # get_rootfs_size() from meta/classes/image.bbclass
+ du_cmd = "du -ks %s" % rootfs_dir
+ out = exec_cmd(du_cmd)
+ self.size = int(out.split()[0])
prefix = "ext" if self.fstype.startswith("ext") else self.fstype
method = getattr(self, "prepare_rootfs_" + prefix)
@@ -322,7 +315,7 @@ class Partition():
dosfs_cmd = "mkdosfs %s -i %s %s %s -C %s %d" % \
(label_str, self.fsuuid, size_str, extraopts, rootfs,
- max(8250, rootfs_size))
+ rootfs_size)
exec_native_cmd(dosfs_cmd, native_sysroot)
mcopy_cmd = "mcopy -i %s -s %s/* ::/" % (rootfs, rootfs_dir)
diff --git a/external/poky/scripts/lib/wic/pluginbase.py b/external/poky/scripts/lib/wic/pluginbase.py
index 686d2fee..d9b4e577 100644
--- a/external/poky/scripts/lib/wic/pluginbase.py
+++ b/external/poky/scripts/lib/wic/pluginbase.py
@@ -1,19 +1,9 @@
-#!/usr/bin/env python -tt
+#!/usr/bin/env python3
#
# Copyright (c) 2011 Intel, Inc.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the Free
-# Software Foundation; version 2 of the License
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc., 59
-# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
__all__ = ['ImagerPlugin', 'SourcePlugin']
@@ -28,7 +18,7 @@ from wic.misc import get_bitbake_var
PLUGIN_TYPES = ["imager", "source"]
-SCRIPTS_PLUGIN_DIR = "scripts/lib/wic/plugins"
+SCRIPTS_PLUGIN_DIR = ["scripts/lib/wic/plugins", "lib/wic/plugins"]
logger = logging.getLogger('wic')
@@ -48,10 +38,11 @@ class PluginMgr:
cls._plugin_dirs = [os.path.join(os.path.dirname(__file__), 'plugins')]
layers = get_bitbake_var("BBLAYERS") or ''
for layer_path in layers.split():
- path = os.path.join(layer_path, SCRIPTS_PLUGIN_DIR)
- path = os.path.abspath(os.path.expanduser(path))
- if path not in cls._plugin_dirs and os.path.isdir(path):
- cls._plugin_dirs.insert(0, path)
+ for script_plugin_dir in SCRIPTS_PLUGIN_DIR:
+ path = os.path.join(layer_path, script_plugin_dir)
+ path = os.path.abspath(os.path.expanduser(path))
+ if path not in cls._plugin_dirs and os.path.isdir(path):
+ cls._plugin_dirs.insert(0, path)
if ptype not in PLUGINS:
# load all ptype plugins
diff --git a/external/poky/scripts/lib/wic/plugins/imager/direct.py b/external/poky/scripts/lib/wic/plugins/imager/direct.py
index bb14a334..2d06c242 100644
--- a/external/poky/scripts/lib/wic/plugins/imager/direct.py
+++ b/external/poky/scripts/lib/wic/plugins/imager/direct.py
@@ -1,21 +1,7 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2013, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This implements the 'direct' imager plugin class for 'wic'
@@ -63,7 +49,6 @@ class DirectPlugin(ImagerPlugin):
# parse possible 'rootfs=name' items
self.rootfs_dir = dict(rdir.split('=') for rdir in rootfs_dir.split(' '))
- self.replaced_rootfs_paths = {}
self.bootimg_dir = bootimg_dir
self.kernel_dir = kernel_dir
self.native_sysroot = native_sysroot
@@ -73,6 +58,7 @@ class DirectPlugin(ImagerPlugin):
self.compressor = options.compressor
self.bmap = options.bmap
self.no_fstab_update = options.no_fstab_update
+ self.original_fstab = None
self.name = "%s-%s" % (os.path.splitext(os.path.basename(wks_file))[0],
strftime("%Y%m%d%H%M"))
@@ -118,24 +104,13 @@ class DirectPlugin(ImagerPlugin):
with open(fstab_path) as fstab:
fstab_lines = fstab.readlines()
+ self.original_fstab = fstab_lines.copy()
if self._update_fstab(fstab_lines, self.parts):
- # copy rootfs dir to workdir to update fstab
- # as rootfs can be used by other tasks and can't be modified
- new_pseudo = os.path.realpath(os.path.join(self.workdir, "pseudo"))
- from_dir = os.path.join(os.path.join(image_rootfs, ".."), "pseudo")
- from_dir = os.path.realpath(from_dir)
- copyhardlinktree(from_dir, new_pseudo)
- new_rootfs = os.path.realpath(os.path.join(self.workdir, "rootfs_copy"))
- copyhardlinktree(image_rootfs, new_rootfs)
- fstab_path = os.path.join(new_rootfs, 'etc/fstab')
-
- os.unlink(fstab_path)
-
with open(fstab_path, "w") as fstab:
fstab.writelines(fstab_lines)
-
- return new_rootfs
+ else:
+ self.original_fstab = None
def _update_fstab(self, fstab_lines, parts):
"""Assume partition order same as in wks"""
@@ -184,14 +159,8 @@ class DirectPlugin(ImagerPlugin):
filesystems from the artifacts directly and combine them into
a partitioned image.
"""
- if self.no_fstab_update:
- new_rootfs = None
- else:
- new_rootfs = self._write_fstab(self.rootfs_dir.get("ROOTFS_DIR"))
- if new_rootfs:
- # rootfs was copied to update fstab
- self.replaced_rootfs_paths[new_rootfs] = self.rootfs_dir['ROOTFS_DIR']
- self.rootfs_dir['ROOTFS_DIR'] = new_rootfs
+ if not self.no_fstab_update:
+ self._write_fstab(self.rootfs_dir.get("ROOTFS_DIR"))
for part in self.parts:
# get rootfs size from bitbake variable if it's not set in .ks file
@@ -267,8 +236,6 @@ class DirectPlugin(ImagerPlugin):
else:
suffix = '["%s"]:' % (part.mountpoint or part.label)
rootdir = part.rootfs_dir
- if rootdir in self.replaced_rootfs_paths:
- rootdir = self.replaced_rootfs_paths[rootdir]
msg += ' ROOTFS_DIR%s%s\n' % (suffix.ljust(20), rootdir)
msg += ' BOOTIMG_DIR: %s\n' % self.bootimg_dir
@@ -306,6 +273,12 @@ class DirectPlugin(ImagerPlugin):
if os.path.isfile(path):
shutil.move(path, os.path.join(self.outdir, fname))
+ #Restore original fstab
+ if self.original_fstab:
+ fstab_path = self.rootfs_dir.get("ROOTFS_DIR") + "/etc/fstab"
+ with open(fstab_path, "w") as fstab:
+ fstab.writelines(self.original_fstab)
+
# remove work directory
shutil.rmtree(self.workdir, ignore_errors=True)
@@ -327,6 +300,10 @@ class PartitionedImage():
self.path = path # Path to the image file
self.numpart = 0 # Number of allocated partitions
self.realpart = 0 # Number of partitions in the partition table
+ self.primary_part_num = 0 # Number of primary partitions (msdos)
+ self.extendedpart = 0 # Create extended partition before this logical partition (msdos)
+ self.extended_size_sec = 0 # Size of exteded partition (msdos)
+ self.logical_part_cnt = 0 # Number of total logical paritions (msdos)
self.offset = 0 # Offset of next partition (in sectors)
self.min_size = 0 # Minimum required disk size to fit
# all partitions (in bytes)
@@ -339,6 +316,7 @@ class PartitionedImage():
# Size of a sector used in calculations
self.sector_size = SECTOR_SIZE
self.native_sysroot = native_sysroot
+ num_real_partitions = len([p for p in self.partitions if not p.no_table])
# calculate the real partition number, accounting for partitions not
# in the partition table and logical partitions
@@ -348,7 +326,7 @@ class PartitionedImage():
part.realnum = 0
else:
realnum += 1
- if self.ptable_format == 'msdos' and realnum > 3 and len(partitions) > 4:
+ if self.ptable_format == 'msdos' and realnum > 3 and num_real_partitions > 4:
part.realnum = realnum + 1
continue
part.realnum = realnum
@@ -418,12 +396,16 @@ class PartitionedImage():
# Skip one sector required for the partitioning scheme overhead
self.offset += overhead
- if self.realpart > 3 and num_real_partitions > 4:
+ if self.ptable_format == "msdos":
+ if self.primary_part_num > 3 or \
+ (self.extendedpart == 0 and self.primary_part_num >= 3 and num_real_partitions > 4):
+ part.type = 'logical'
# Reserve a sector for EBR for every logical partition
# before alignment is performed.
- if self.ptable_format == "msdos":
- self.offset += 1
+ if part.type == 'logical':
+ self.offset += 2
+ align_sectors = 0
if part.align:
# If not first partition and we do have alignment set we need
# to align the partition.
@@ -449,18 +431,25 @@ class PartitionedImage():
part.start = self.offset
self.offset += part.size_sec
- part.type = 'primary'
if not part.no_table:
part.num = self.realpart
else:
part.num = 0
- if self.ptable_format == "msdos":
- # only count the partitions that are in partition table
- if num_real_partitions > 4:
- if self.realpart > 3:
- part.type = 'logical'
- part.num = self.realpart + 1
+ if self.ptable_format == "msdos" and not part.no_table:
+ if part.type == 'logical':
+ self.logical_part_cnt += 1
+ part.num = self.logical_part_cnt + 4
+ if self.extendedpart == 0:
+ # Create extended partition as a primary partition
+ self.primary_part_num += 1
+ self.extendedpart = part.num
+ else:
+ self.extended_size_sec += align_sectors
+ self.extended_size_sec += part.size_sec + 2
+ else:
+ self.primary_part_num += 1
+ part.num = self.primary_part_num
logger.debug("Assigned %s to %s%d, sectors range %d-%d size %d "
"sectors (%d bytes).", part.mountpoint, part.disk,
@@ -510,7 +499,7 @@ class PartitionedImage():
if part.num == 0:
continue
- if self.ptable_format == "msdos" and part.num == 5:
+ if self.ptable_format == "msdos" and part.num == self.extendedpart:
# Create an extended partition (note: extended
# partition is described in MBR and contains all
# logical partitions). The logical partitions save a
@@ -523,8 +512,8 @@ class PartitionedImage():
# add a sector at the back, so that there is enough
# room for all logical partitions.
self._create_partition(self.path, "extended",
- None, part.start - 1,
- self.offset - part.start + 1)
+ None, part.start - 2,
+ self.extended_size_sec)
if part.fstype == "swap":
parted_fs_type = "linux-swap"
@@ -591,9 +580,7 @@ class PartitionedImage():
self.native_sysroot)
def cleanup(self):
- # remove partition images
- for image in set(self.partimages):
- os.remove(image)
+ pass
def assemble(self):
logger.debug("Installing partitions")
diff --git a/external/poky/scripts/lib/wic/plugins/source/bootimg-biosplusefi.py b/external/poky/scripts/lib/wic/plugins/source/bootimg-biosplusefi.py
new file mode 100644
index 00000000..5bd73906
--- /dev/null
+++ b/external/poky/scripts/lib/wic/plugins/source/bootimg-biosplusefi.py
@@ -0,0 +1,213 @@
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# DESCRIPTION
+# This implements the 'bootimg-biosplusefi' source plugin class for 'wic'
+#
+# AUTHORS
+# William Bourque <wbourque [at) gmail.com>
+
+import types
+
+from wic.pluginbase import SourcePlugin
+from importlib.machinery import SourceFileLoader
+
+class BootimgBiosPlusEFIPlugin(SourcePlugin):
+ """
+ Create MBR + EFI boot partition
+
+ This plugin creates a boot partition that contains both
+ legacy BIOS and EFI content. It will be able to boot from both.
+ This is useful when managing PC fleet with some older machines
+ without EFI support.
+
+ Note it is possible to create an image that can boot from both
+ legacy BIOS and EFI by defining two partitions : one with arg
+ --source bootimg-efi and another one with --source bootimg-pcbios.
+ However, this method has the obvious downside that it requires TWO
+ partitions to be created on the storage device.
+ Both partitions will also be marked as "bootable" which does not work on
+ most BIOS, has BIOS often uses the "bootable" flag to determine
+ what to boot. If you have such a BIOS, you need to manually remove the
+ "bootable" flag from the EFI partition for the drive to be bootable.
+ Having two partitions also seems to confuse wic : the content of
+ the first partition will be duplicated into the second, even though it
+ will not be used at all.
+
+ Also, unlike "isoimage-isohybrid" that also does BIOS and EFI, this plugin
+ allows you to have more than only a single rootfs partitions and does
+ not turn the rootfs into an initramfs RAM image.
+
+ This plugin is made to put everything into a single /boot partition so it
+ does not have the limitations listed above.
+
+ The plugin is made so it does tries not to reimplement what's already
+ been done in other plugins; as such it imports "bootimg-pcbios"
+ and "bootimg-efi".
+ Plugin "bootimg-pcbios" is used to generate legacy BIOS boot.
+ Plugin "bootimg-efi" is used to generate the UEFI boot. Note that it
+ requires a --sourceparams argument to know which loader to use; refer
+ to "bootimg-efi" code/documentation for the list of loader.
+
+ Imports are handled with "SourceFileLoader" from importlib as it is
+ otherwise very difficult to import module that has hyphen "-" in their
+ filename.
+ The SourcePlugin() methods used in the plugins (do_install_disk,
+ do_configure_partition, do_prepare_partition) are then called on both,
+ beginning by "bootimg-efi".
+
+ Plugin options, such as "--sourceparams" can still be passed to a
+ plugin, as long they does not cause issue in the other plugin.
+
+ Example wic configuration:
+ part /boot --source bootimg-biosplusefi --sourceparams="loader=grub-efi"\\
+ --ondisk sda --label os_boot --active --align 1024 --use-uuid
+ """
+
+ name = 'bootimg-biosplusefi'
+
+ __PCBIOS_MODULE_NAME = "bootimg-pcbios"
+ __EFI_MODULE_NAME = "bootimg-efi"
+
+ __imgEFIObj = None
+ __imgBiosObj = None
+
+ @classmethod
+ def __init__(cls):
+ """
+ Constructor (init)
+ """
+
+ # XXX
+ # For some reasons, __init__ constructor is never called.
+ # Something to do with how pluginbase works?
+ cls.__instanciateSubClasses()
+
+ @classmethod
+ def __instanciateSubClasses(cls):
+ """
+
+ """
+
+ # Import bootimg-pcbios (class name "BootimgPcbiosPlugin")
+ modulePath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
+ cls.__PCBIOS_MODULE_NAME + ".py")
+ loader = SourceFileLoader(cls.__PCBIOS_MODULE_NAME, modulePath)
+ mod = types.ModuleType(loader.name)
+ loader.exec_module(mod)
+ cls.__imgBiosObj = mod.BootimgPcbiosPlugin()
+
+ # Import bootimg-efi (class name "BootimgEFIPlugin")
+ modulePath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
+ cls.__EFI_MODULE_NAME + ".py")
+ loader = SourceFileLoader(cls.__EFI_MODULE_NAME, modulePath)
+ mod = types.ModuleType(loader.name)
+ loader.exec_module(mod)
+ cls.__imgEFIObj = mod.BootimgEFIPlugin()
+
+ @classmethod
+ def do_install_disk(cls, disk, disk_name, creator, workdir, oe_builddir,
+ bootimg_dir, kernel_dir, native_sysroot):
+ """
+ Called after all partitions have been prepared and assembled into a
+ disk image.
+ """
+
+ if ( (not cls.__imgEFIObj) or (not cls.__imgBiosObj) ):
+ cls.__instanciateSubClasses()
+
+ cls.__imgEFIObj.do_install_disk(
+ disk,
+ disk_name,
+ creator,
+ workdir,
+ oe_builddir,
+ bootimg_dir,
+ kernel_dir,
+ native_sysroot)
+
+ cls.__imgBiosObj.do_install_disk(
+ disk,
+ disk_name,
+ creator,
+ workdir,
+ oe_builddir,
+ bootimg_dir,
+ kernel_dir,
+ native_sysroot)
+
+ @classmethod
+ def do_configure_partition(cls, part, source_params, creator, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ native_sysroot):
+ """
+ Called before do_prepare_partition()
+ """
+
+ if ( (not cls.__imgEFIObj) or (not cls.__imgBiosObj) ):
+ cls.__instanciateSubClasses()
+
+ cls.__imgEFIObj.do_configure_partition(
+ part,
+ source_params,
+ creator,
+ cr_workdir,
+ oe_builddir,
+ bootimg_dir,
+ kernel_dir,
+ native_sysroot)
+
+ cls.__imgBiosObj.do_configure_partition(
+ part,
+ source_params,
+ creator,
+ cr_workdir,
+ oe_builddir,
+ bootimg_dir,
+ kernel_dir,
+ native_sysroot)
+
+ @classmethod
+ def do_prepare_partition(cls, part, source_params, creator, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ rootfs_dir, native_sysroot):
+ """
+ Called to do the actual content population for a partition i.e. it
+ 'prepares' the partition to be incorporated into the image.
+ """
+
+ if ( (not cls.__imgEFIObj) or (not cls.__imgBiosObj) ):
+ cls.__instanciateSubClasses()
+
+ cls.__imgEFIObj.do_prepare_partition(
+ part,
+ source_params,
+ creator,
+ cr_workdir,
+ oe_builddir,
+ bootimg_dir,
+ kernel_dir,
+ rootfs_dir,
+ native_sysroot)
+
+ cls.__imgBiosObj.do_prepare_partition(
+ part,
+ source_params,
+ creator,
+ cr_workdir,
+ oe_builddir,
+ bootimg_dir,
+ kernel_dir,
+ rootfs_dir,
+ native_sysroot)
diff --git a/external/poky/scripts/lib/wic/plugins/source/bootimg-efi.py b/external/poky/scripts/lib/wic/plugins/source/bootimg-efi.py
index 12698184..2cfdc10e 100644
--- a/external/poky/scripts/lib/wic/plugins/source/bootimg-efi.py
+++ b/external/poky/scripts/lib/wic/plugins/source/bootimg-efi.py
@@ -1,21 +1,7 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2014, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This implements the 'bootimg-efi' source plugin class for 'wic'
@@ -69,28 +55,44 @@ class BootimgEFIPlugin(SourcePlugin):
if not bootimg_dir:
raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting")
- cp_cmd = "cp %s/%s %s" % (bootimg_dir, initrd, hdddir)
- exec_cmd(cp_cmd, True)
+ initrds = initrd.split(';')
+ for rd in initrds:
+ cp_cmd = "cp %s/%s %s" % (bootimg_dir, rd, hdddir)
+ exec_cmd(cp_cmd, True)
else:
logger.debug("Ignoring missing initrd")
if not custom_cfg:
# Create grub configuration using parameters from wks file
bootloader = creator.ks.bootloader
+ title = source_params.get('title')
grubefi_conf = ""
grubefi_conf += "serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1\n"
grubefi_conf += "default=boot\n"
grubefi_conf += "timeout=%s\n" % bootloader.timeout
- grubefi_conf += "menuentry 'boot'{\n"
+ grubefi_conf += "menuentry '%s'{\n" % (title if title else "boot")
- kernel = "/bzImage"
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if get_bitbake_var("INITRAMFS_IMAGE_BUNDLE") == "1":
+ if get_bitbake_var("INITRAMFS_IMAGE"):
+ kernel = "%s-%s.bin" % \
+ (get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
- grubefi_conf += "linux %s root=%s rootwait %s\n" \
- % (kernel, creator.rootdev, bootloader.append)
+ label = source_params.get('label')
+ label_conf = "root=%s" % creator.rootdev
+ if label:
+ label_conf = "LABEL=%s" % label
+
+ grubefi_conf += "linux /%s %s rootwait %s\n" \
+ % (kernel, label_conf, bootloader.append)
if initrd:
- grubefi_conf += "initrd /%s\n" % initrd
+ initrds = initrd.split(';')
+ grubefi_conf += "initrd"
+ for rd in initrds:
+ grubefi_conf += " /%s" % rd
+ grubefi_conf += "\n"
grubefi_conf += "}\n"
@@ -125,8 +127,10 @@ class BootimgEFIPlugin(SourcePlugin):
if not bootimg_dir:
raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting")
- cp_cmd = "cp %s/%s %s" % (bootimg_dir, initrd, hdddir)
- exec_cmd(cp_cmd, True)
+ initrds = initrd.split(';')
+ for rd in initrds:
+ cp_cmd = "cp %s/%s %s" % (bootimg_dir, rd, hdddir)
+ exec_cmd(cp_cmd, True)
else:
logger.debug("Ignoring missing initrd")
@@ -151,16 +155,30 @@ class BootimgEFIPlugin(SourcePlugin):
if not custom_cfg:
# Create systemd-boot configuration using parameters from wks file
- kernel = "/bzImage"
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if get_bitbake_var("INITRAMFS_IMAGE_BUNDLE") == "1":
+ if get_bitbake_var("INITRAMFS_IMAGE"):
+ kernel = "%s-%s.bin" % \
+ (get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
+
+ title = source_params.get('title')
boot_conf = ""
- boot_conf += "title boot\n"
- boot_conf += "linux %s\n" % kernel
- boot_conf += "options LABEL=Boot root=%s %s\n" % \
- (creator.rootdev, bootloader.append)
+ boot_conf += "title %s\n" % (title if title else "boot")
+ boot_conf += "linux /%s\n" % kernel
+
+ label = source_params.get('label')
+ label_conf = "LABEL=Boot root=%s" % creator.rootdev
+ if label:
+ label_conf = "LABEL=%s" % label
+
+ boot_conf += "options %s %s\n" % \
+ (label_conf, bootloader.append)
if initrd:
- boot_conf += "initrd /%s\n" % initrd
+ initrds = initrd.split(';')
+ for rd in initrds:
+ boot_conf += "initrd /%s\n" % rd
logger.debug("Writing systemd-boot config "
"%s/hdd/boot/loader/entries/boot.conf", cr_workdir)
@@ -210,8 +228,14 @@ class BootimgEFIPlugin(SourcePlugin):
hdddir = "%s/hdd/boot" % cr_workdir
- install_cmd = "install -m 0644 %s/bzImage %s/bzImage" % \
- (staging_kernel_dir, hdddir)
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if get_bitbake_var("INITRAMFS_IMAGE_BUNDLE") == "1":
+ if get_bitbake_var("INITRAMFS_IMAGE"):
+ kernel = "%s-%s.bin" % \
+ (get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
+
+ install_cmd = "install -m 0644 %s/%s %s/%s" % \
+ (staging_kernel_dir, kernel, hdddir, kernel)
exec_cmd(install_cmd)
diff --git a/external/poky/scripts/lib/wic/plugins/source/bootimg-partition.py b/external/poky/scripts/lib/wic/plugins/source/bootimg-partition.py
index ddc880be..138986a7 100644
--- a/external/poky/scripts/lib/wic/plugins/source/bootimg-partition.py
+++ b/external/poky/scripts/lib/wic/plugins/source/bootimg-partition.py
@@ -1,18 +1,5 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This implements the 'bootimg-partition' source plugin class for
diff --git a/external/poky/scripts/lib/wic/plugins/source/bootimg-pcbios.py b/external/poky/scripts/lib/wic/plugins/source/bootimg-pcbios.py
index 9347aa7f..f2639e70 100644
--- a/external/poky/scripts/lib/wic/plugins/source/bootimg-pcbios.py
+++ b/external/poky/scripts/lib/wic/plugins/source/bootimg-pcbios.py
@@ -1,21 +1,7 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2014, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This implements the 'bootimg-pcbios' source plugin class for 'wic'
@@ -163,8 +149,14 @@ class BootimgPcbiosPlugin(SourcePlugin):
hdddir = "%s/hdd/boot" % cr_workdir
- cmds = ("install -m 0644 %s/bzImage %s/vmlinuz" %
- (staging_kernel_dir, hdddir),
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if get_bitbake_var("INITRAMFS_IMAGE_BUNDLE") == "1":
+ if get_bitbake_var("INITRAMFS_IMAGE"):
+ kernel = "%s-%s.bin" % \
+ (get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
+
+ cmds = ("install -m 0644 %s/%s %s/vmlinuz" %
+ (staging_kernel_dir, kernel, hdddir),
"install -m 444 %s/syslinux/ldlinux.sys %s/ldlinux.sys" %
(bootimg_dir, hdddir),
"install -m 0644 %s/syslinux/vesamenu.c32 %s/vesamenu.c32" %
diff --git a/external/poky/scripts/lib/wic/plugins/source/isoimage-isohybrid.py b/external/poky/scripts/lib/wic/plugins/source/isoimage-isohybrid.py
index 170077c2..11326a27 100644
--- a/external/poky/scripts/lib/wic/plugins/source/isoimage-isohybrid.py
+++ b/external/poky/scripts/lib/wic/plugins/source/isoimage-isohybrid.py
@@ -1,18 +1,5 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This implements the 'isoimage-isohybrid' source plugin class for 'wic'
@@ -83,8 +70,13 @@ class IsoImagePlugin(SourcePlugin):
syslinux_conf += "DEFAULT boot\n"
syslinux_conf += "LABEL boot\n"
- kernel = "/bzImage"
- syslinux_conf += "KERNEL " + kernel + "\n"
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if get_bitbake_var("INITRAMFS_IMAGE_BUNDLE") == "1":
+ if get_bitbake_var("INITRAMFS_IMAGE"):
+ kernel = "%s-%s.bin" % \
+ (get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
+
+ syslinux_conf += "KERNEL /" + kernel + "\n"
syslinux_conf += "APPEND initrd=/initrd LABEL=boot %s\n" \
% bootloader.append
@@ -127,9 +119,13 @@ class IsoImagePlugin(SourcePlugin):
grubefi_conf += "\n"
grubefi_conf += "menuentry 'boot'{\n"
- kernel = "/bzImage"
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if get_bitbake_var("INITRAMFS_IMAGE_BUNDLE") == "1":
+ if get_bitbake_var("INITRAMFS_IMAGE"):
+ kernel = "%s-%s.bin" % \
+ (get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
- grubefi_conf += "linux %s rootwait %s\n" \
+ grubefi_conf += "linux /%s rootwait %s\n" \
% (kernel, bootloader.append)
grubefi_conf += "initrd /initrd \n"
grubefi_conf += "}\n"
@@ -281,9 +277,14 @@ class IsoImagePlugin(SourcePlugin):
if os.path.isfile("%s/initrd.cpio.gz" % cr_workdir):
os.remove("%s/initrd.cpio.gz" % cr_workdir)
- # Install bzImage
- install_cmd = "install -m 0644 %s/bzImage %s/bzImage" % \
- (kernel_dir, isodir)
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if get_bitbake_var("INITRAMFS_IMAGE_BUNDLE") == "1":
+ if get_bitbake_var("INITRAMFS_IMAGE"):
+ kernel = "%s-%s.bin" % \
+ (get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
+
+ install_cmd = "install -m 0644 %s/%s %s/%s" % \
+ (kernel_dir, kernel, isodir, kernel)
exec_cmd(install_cmd)
#Create bootloader for efi boot
@@ -335,19 +336,23 @@ class IsoImagePlugin(SourcePlugin):
(img_iso_dir, isodir)
exec_cmd(install_cmd)
else:
+ # Default to 100 blocks of extra space for file system overhead
+ esp_extra_blocks = int(source_params.get('esp_extra_blocks', '100'))
+
du_cmd = "du -bks %s/EFI" % isodir
out = exec_cmd(du_cmd)
blocks = int(out.split()[0])
- # Add some extra space for file system overhead
- blocks += 100
+ blocks += esp_extra_blocks
logger.debug("Added 100 extra blocks to %s to get to %d "
"total blocks", part.mountpoint, blocks)
# dosfs image for EFI boot
bootimg = "%s/efi.img" % isodir
- dosfs_cmd = 'mkfs.vfat -n "EFIimg" -S 512 -C %s %d' \
- % (bootimg, blocks)
+ esp_label = source_params.get('esp_label', 'EFIimg')
+
+ dosfs_cmd = 'mkfs.vfat -n \'%s\' -S 512 -C %s %d' \
+ % (esp_label, bootimg, blocks)
exec_native_cmd(dosfs_cmd, native_sysroot)
mmd_cmd = "mmd -i %s ::/EFI" % bootimg
diff --git a/external/poky/scripts/lib/wic/plugins/source/rawcopy.py b/external/poky/scripts/lib/wic/plugins/source/rawcopy.py
index e86398ac..3c4997d8 100644
--- a/external/poky/scripts/lib/wic/plugins/source/rawcopy.py
+++ b/external/poky/scripts/lib/wic/plugins/source/rawcopy.py
@@ -1,18 +1,5 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
import logging
@@ -70,7 +57,10 @@ class RawCopyPlugin(SourcePlugin):
raise WicError("No file specified")
src = os.path.join(kernel_dir, source_params['file'])
- dst = os.path.join(cr_workdir, "%s.%s" % (source_params['file'], part.lineno))
+ dst = os.path.join(cr_workdir, "%s.%s" % (os.path.basename(source_params['file']), part.lineno))
+
+ if not os.path.exists(os.path.dirname(dst)):
+ os.makedirs(os.path.dirname(dst))
if 'skip' in source_params:
sparse_copy(src, dst, skip=int(source_params['skip']))
diff --git a/external/poky/scripts/lib/wic/plugins/source/rootfs.py b/external/poky/scripts/lib/wic/plugins/source/rootfs.py
index aec720fb..705aeb55 100644
--- a/external/poky/scripts/lib/wic/plugins/source/rootfs.py
+++ b/external/poky/scripts/lib/wic/plugins/source/rootfs.py
@@ -1,21 +1,7 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2014, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This implements the 'rootfs' source plugin class for 'wic'
@@ -85,7 +71,7 @@ class RootfsPlugin(SourcePlugin):
new_rootfs = None
# Handle excluded paths.
- if part.exclude_path is not None:
+ if part.exclude_path or part.include_path:
# We need a new rootfs directory we can delete files from. Copy to
# workdir.
new_rootfs = os.path.realpath(os.path.join(cr_workdir, "rootfs%d" % part.lineno))
@@ -95,7 +81,10 @@ class RootfsPlugin(SourcePlugin):
copyhardlinktree(part.rootfs_dir, new_rootfs)
- for orig_path in part.exclude_path:
+ for path in part.include_path or []:
+ copyhardlinktree(path, new_rootfs)
+
+ for orig_path in part.exclude_path or []:
path = orig_path
if os.path.isabs(path):
logger.error("Must be relative: --exclude-path=%s" % orig_path)
diff --git a/external/poky/scripts/lnr b/external/poky/scripts/lnr
index 5fed780e..a2ac4fec 100755
--- a/external/poky/scripts/lnr
+++ b/external/poky/scripts/lnr
@@ -1,4 +1,7 @@
#! /usr/bin/env python3
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Create a *relative* symlink, just like ln --relative does but without needing
# coreutils 8.16.
diff --git a/external/poky/scripts/multilib_header_wrapper.h b/external/poky/scripts/multilib_header_wrapper.h
index 48247907..c81e7ee5 100644
--- a/external/poky/scripts/multilib_header_wrapper.h
+++ b/external/poky/scripts/multilib_header_wrapper.h
@@ -1,23 +1,7 @@
/*
* Copyright (C) 2005-2011 by Wind River Systems, Inc.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
*/
diff --git a/external/poky/scripts/native-intercept/chgrp b/external/poky/scripts/native-intercept/chgrp
new file mode 100755
index 00000000..399c979f
--- /dev/null
+++ b/external/poky/scripts/native-intercept/chgrp
@@ -0,0 +1,5 @@
+#! /bin/sh
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+echo "Intercept $0: $@ -- do nothing"
diff --git a/external/poky/scripts/native-intercept/chown b/external/poky/scripts/native-intercept/chown
index 4f43271c..399c979f 100755
--- a/external/poky/scripts/native-intercept/chown
+++ b/external/poky/scripts/native-intercept/chown
@@ -1,2 +1,5 @@
#! /bin/sh
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
echo "Intercept $0: $@ -- do nothing"
diff --git a/external/poky/scripts/oe-build-perf-report b/external/poky/scripts/oe-build-perf-report
index f6fb458c..e781f4f0 100755
--- a/external/poky/scripts/oe-build-perf-report
+++ b/external/poky/scripts/oe-build-perf-report
@@ -4,15 +4,9 @@
#
# Copyright (c) 2017, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
+
import argparse
import json
import logging
@@ -378,7 +372,7 @@ def print_html_report(data, id_comp, buildstats):
chart_opts=chart_opts))
-def get_buildstats(repo, notes_ref, revs, outdir=None):
+def get_buildstats(repo, notes_ref, notes_ref2, revs, outdir=None):
"""Get the buildstats from git notes"""
full_ref = 'refs/notes/' + notes_ref
if not repo.rev_parse(full_ref):
@@ -397,8 +391,13 @@ def get_buildstats(repo, notes_ref, revs, outdir=None):
for tag in rev.tags:
log.debug(' %s', tag)
try:
- bs_all = json.loads(repo.run_cmd(['notes', '--ref', notes_ref,
- 'show', tag + '^0']))
+ try:
+ bs_all = json.loads(repo.run_cmd(['notes', '--ref', notes_ref, 'show', tag + '^0']))
+ except GitError:
+ if notes_ref2:
+ bs_all = json.loads(repo.run_cmd(['notes', '--ref', notes_ref2, 'show', tag + '^0']))
+ else:
+ raise
except GitError:
log.warning("Buildstats not found for %s", tag)
bs_all = {}
@@ -595,9 +594,12 @@ def main(argv=None):
buildstats = None
if args.dump_buildstats or args.html:
outdir = 'oe-build-perf-buildstats' if args.dump_buildstats else None
- notes_ref = 'buildstats/{}/{}/{}'.format(args.hostname, args.branch,
- args.machine)
- buildstats = get_buildstats(repo, notes_ref, [rev_l, rev_r], outdir)
+ notes_ref = 'buildstats/{}/{}/{}'.format(args.hostname, args.branch, args.machine)
+ notes_ref2 = None
+ if args.branch2:
+ notes_ref = 'buildstats/{}/{}/{}'.format(args.hostname, args.branch2, args.machine)
+ notes_ref2 = 'buildstats/{}/{}/{}'.format(args.hostname, args.branch, args.machine)
+ buildstats = get_buildstats(repo, notes_ref, notes_ref2, [rev_l, rev_r], outdir)
# Print report
if not args.html:
diff --git a/external/poky/scripts/oe-build-perf-test b/external/poky/scripts/oe-build-perf-test
index 669470fa..00e00b4c 100755
--- a/external/poky/scripts/oe-build-perf-test
+++ b/external/poky/scripts/oe-build-perf-test
@@ -1,19 +1,12 @@
-#!/usr/bin/python3
+#!/usr/bin/env python3
#
# Build performance test script
#
# Copyright (c) 2016, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
-#
-"""Build performance test script"""
+
import argparse
import errno
import fcntl
diff --git a/external/poky/scripts/oe-buildenv-internal b/external/poky/scripts/oe-buildenv-internal
index 52ce3298..ba0a9b44 100755
--- a/external/poky/scripts/oe-buildenv-internal
+++ b/external/poky/scripts/oe-buildenv-internal
@@ -4,19 +4,8 @@
#
# Copyright (C) 2006-2011 Linux Foundation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
+# SPDX-License-Identifier: GPL-2.0-or-later
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
if ! $(return >/dev/null 2>&1) ; then
echo 'oe-buildenv-internal: error: this script must be sourced'
@@ -40,22 +29,15 @@ if [ -z "$OE_SKIP_SDK_CHECK" ] && [ -n "$OECORE_SDK_VERSION" ]; then
return 1
fi
-py_v27_check=$(python2 -c 'import sys; print sys.version_info >= (2,7,3)')
-if [ "$py_v27_check" != "True" ]; then
- echo >&2 "OpenEmbedded requires 'python' to be python v2 (>= 2.7.3), not python v3."
- echo >&2 "Please upgrade your python v2."
-fi
-unset py_v27_check
-
# We potentially have code that doesn't parse correctly with older versions
# of Python, and rather than fixing that and being eternally vigilant for
# any other new feature use, just check the version here.
-py_v34_check=$(python3 -c 'import sys; print(sys.version_info >= (3,4,0))')
-if [ "$py_v34_check" != "True" ]; then
- echo >&2 "BitBake requires Python 3.4.0 or later as 'python3'"
+py_v35_check=$(python3 -c 'import sys; print(sys.version_info >= (3,5,0))')
+if [ "$py_v35_check" != "True" ]; then
+ echo >&2 "BitBake requires Python 3.5.0 or later as 'python3 (scripts/install-buildtools can be used if needed)'"
return 1
fi
-unset py_v34_check
+unset py_v35_check
if [ -z "$BDIR" ]; then
if [ -z "$1" ]; then
@@ -124,7 +106,8 @@ BB_ENV_EXTRAWHITE_OE="MACHINE DISTRO TCMODE TCLIBC HTTP_PROXY http_proxy \
HTTPS_PROXY https_proxy FTP_PROXY ftp_proxy FTPS_PROXY ftps_proxy ALL_PROXY \
all_proxy NO_PROXY no_proxy SSH_AGENT_PID SSH_AUTH_SOCK BB_SRCREV_POLICY \
SDKMACHINE BB_NUMBER_THREADS BB_NO_NETWORK PARALLEL_MAKE GIT_PROXY_COMMAND \
-SOCKS5_PASSWD SOCKS5_USER SCREENDIR STAMPS_DIR BBPATH_EXTRA BB_SETSCENE_ENFORCE"
+SOCKS5_PASSWD SOCKS5_USER SCREENDIR STAMPS_DIR BBPATH_EXTRA BB_SETSCENE_ENFORCE \
+BB_LOGCONFIG"
BB_ENV_EXTRAWHITE="$(echo $BB_ENV_EXTRAWHITE $BB_ENV_EXTRAWHITE_OE | tr ' ' '\n' | LC_ALL=C sort --unique | tr '\n' ' ')"
diff --git a/external/poky/scripts/oe-check-sstate b/external/poky/scripts/oe-check-sstate
index d06efe43..ca249ca6 100755
--- a/external/poky/scripts/oe-check-sstate
+++ b/external/poky/scripts/oe-check-sstate
@@ -5,18 +5,8 @@
# Copyright 2016 Intel Corporation
# Authored-by: Paul Eggleton <paul.eggleton@intel.com>
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os
diff --git a/external/poky/scripts/oe-depends-dot b/external/poky/scripts/oe-depends-dot
index 6c7e9d33..5eb3e127 100755
--- a/external/poky/scripts/oe-depends-dot
+++ b/external/poky/scripts/oe-depends-dot
@@ -2,18 +2,8 @@
#
# Copyright (C) 2018 Wind River Systems, Inc.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
import sys
diff --git a/external/poky/scripts/oe-find-native-sysroot b/external/poky/scripts/oe-find-native-sysroot
index cc146b3f..5146bbf9 100755
--- a/external/poky/scripts/oe-find-native-sysroot
+++ b/external/poky/scripts/oe-find-native-sysroot
@@ -17,18 +17,8 @@
#
# Copyright (c) 2010 Linux Foundation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
if [ "$1" = '--help' -o "$1" = '-h' -o $# -ne 1 ] ; then
echo 'Usage: oe-find-native-sysroot <recipe> [-h|--help]'
diff --git a/external/poky/scripts/oe-git-archive b/external/poky/scripts/oe-git-archive
index ab1c2b9a..9305ed0b 100755
--- a/external/poky/scripts/oe-git-archive
+++ b/external/poky/scripts/oe-git-archive
@@ -4,15 +4,9 @@
#
# Copyright (c) 2017, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
+
import argparse
import logging
import os
diff --git a/external/poky/scripts/oe-git-proxy b/external/poky/scripts/oe-git-proxy
index 1800942f..aa9b9dc9 100755
--- a/external/poky/scripts/oe-git-proxy
+++ b/external/poky/scripts/oe-git-proxy
@@ -13,11 +13,15 @@
# ALL_PROXY=https://proxy.example.com:8080
#
# Copyright (c) 2013, Intel Corporation.
-# All rights reserved.
+#
+# SPDX-License-Identifier: GPL-2.0-only
#
# AUTHORS
# Darren Hart <dvhart@linux.intel.com>
+# disable pathname expansion, NO_PROXY fields could start with "*" or be it
+set -f
+
if [ $# -lt 2 -o "$1" = '--help' -o "$1" = '-h' ] ; then
echo 'oe-git-proxy: error: the following arguments are required: host port'
echo 'Usage: oe-git-proxy host port'
@@ -40,10 +44,12 @@ if [ $# -lt 2 -o "$1" = '--help' -o "$1" = '-h' ] ; then
fi
# Locate the netcat binary
-SOCAT=$(which socat 2>/dev/null)
-if [ $? -ne 0 ]; then
- echo "ERROR: socat binary not in PATH" 1>&2
- exit 1
+if [ -z "$SOCAT" ]; then
+ SOCAT=$(which socat 2>/dev/null)
+ if [ $? -ne 0 ]; then
+ echo "ERROR: socat binary not in PATH" 1>&2
+ exit 1
+ fi
fi
METHOD=""
@@ -58,7 +64,7 @@ ipv4_val() {
IP="$1"
SHIFT=24
VAL=0
- for B in ${IP//./ }; do
+ for B in $( echo "$IP" | tr '.' ' ' ); do
VAL=$(($VAL+$(($B<<$SHIFT))))
SHIFT=$(($SHIFT-8))
done
@@ -101,7 +107,7 @@ match_host() {
HOST=$1
GLOB=$2
- if [ -z "${HOST%%$GLOB}" ]; then
+ if [ -z "${HOST%%*$GLOB}" ]; then
return 0
fi
@@ -131,8 +137,8 @@ if [ -z "$ALL_PROXY" ]; then
fi
# Connect directly to hosts in NO_PROXY
-for H in "${NO_PROXY//,/ }"; do
- if match_host $1 "$H"; then
+for H in $( echo "$NO_PROXY" | tr ',' ' ' ); do
+ if match_host $1 $H; then
exec $SOCAT STDIO $METHOD
fi
done
diff --git a/external/poky/scripts/oe-gnome-terminal-phonehome b/external/poky/scripts/oe-gnome-terminal-phonehome
index e0235488..b6b9a386 100755
--- a/external/poky/scripts/oe-gnome-terminal-phonehome
+++ b/external/poky/scripts/oe-gnome-terminal-phonehome
@@ -1,5 +1,7 @@
#!/bin/sh
#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Gnome terminal won't tell us which PID a given command is run as
# or allow a single instance so we can't tell when it completes.
# This allows us to figure out the PID of the target so we can tell
diff --git a/external/poky/scripts/oe-pkgdata-browser b/external/poky/scripts/oe-pkgdata-browser
new file mode 100755
index 00000000..8d223185
--- /dev/null
+++ b/external/poky/scripts/oe-pkgdata-browser
@@ -0,0 +1,253 @@
+#! /usr/bin/env python3
+
+import os, sys, enum, ast
+
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+lib_path = scripts_path + '/lib'
+sys.path = sys.path + [lib_path]
+
+import scriptpath
+bitbakepath = scriptpath.add_bitbake_lib_path()
+if not bitbakepath:
+ print("Unable to find bitbake by searching parent directory of this script or PATH")
+ sys.exit(1)
+import bb
+
+import gi
+gi.require_version('Gtk', '3.0')
+from gi.repository import Gtk, Gdk, GObject
+
+RecipeColumns = enum.IntEnum("RecipeColumns", {"Recipe": 0})
+PackageColumns = enum.IntEnum("PackageColumns", {"Package": 0, "Size": 1})
+FileColumns = enum.IntEnum("FileColumns", {"Filename": 0, "Size": 1})
+
+import time
+def timeit(f):
+ def timed(*args, **kw):
+ ts = time.time()
+ print ("func:%r calling" % f.__name__)
+ result = f(*args, **kw)
+ te = time.time()
+ print ('func:%r args:[%r, %r] took: %2.4f sec' % \
+ (f.__name__, args, kw, te-ts))
+ return result
+ return timed
+
+def human_size(nbytes):
+ import math
+ suffixes = ['B', 'kB', 'MB', 'GB', 'TB', 'PB']
+ human = nbytes
+ rank = 0
+ if nbytes != 0:
+ rank = int((math.log10(nbytes)) / 3)
+ rank = min(rank, len(suffixes) - 1)
+ human = nbytes / (1000.0 ** rank)
+ f = ('%.2f' % human).rstrip('0').rstrip('.')
+ return '%s %s' % (f, suffixes[rank])
+
+def load(filename, suffix=None):
+ from configparser import ConfigParser
+ from itertools import chain
+
+ parser = ConfigParser()
+ if suffix:
+ parser.optionxform = lambda option: option.replace("_" + suffix, "")
+ with open(filename) as lines:
+ lines = chain(("[fake]",), lines)
+ parser.read_file(lines)
+
+ # TODO extract the data and put it into a real dict so we can transform some
+ # values to ints?
+ return parser["fake"]
+
+def find_pkgdata():
+ import subprocess
+ output = subprocess.check_output(("bitbake", "-e"), universal_newlines=True)
+ for line in output.splitlines():
+ if line.startswith("PKGDATA_DIR="):
+ return line.split("=", 1)[1].strip("\'\"")
+ # TODO exception or something
+ return None
+
+def packages_in_recipe(pkgdata, recipe):
+ """
+ Load the recipe pkgdata to determine the list of runtime packages.
+ """
+ data = load(os.path.join(pkgdata, recipe))
+ packages = data["PACKAGES"].split()
+ return packages
+
+def load_runtime_package(pkgdata, package):
+ return load(os.path.join(pkgdata, "runtime", package), suffix=package)
+
+def recipe_from_package(pkgdata, package):
+ data = load(os.path.join(pkgdata, "runtime", package), suffix=package)
+ return data["PN"]
+
+def summary(data):
+ s = ""
+ s += "{0[PKG]} {0[PKGV]}-{0[PKGR]}\n{0[LICENSE]}\n{0[SUMMARY]}\n".format(data)
+
+ return s
+
+
+class PkgUi():
+ def __init__(self, pkgdata):
+ self.pkgdata = pkgdata
+ self.current_recipe = None
+ self.recipe_iters = {}
+ self.package_iters = {}
+
+ builder = Gtk.Builder()
+ builder.add_from_file(os.path.join(os.path.dirname(__file__), "oe-pkgdata-browser.glade"))
+
+ self.window = builder.get_object("window")
+ self.window.connect("delete-event", Gtk.main_quit)
+
+ self.recipe_store = builder.get_object("recipe_store")
+ self.recipe_view = builder.get_object("recipe_view")
+ self.package_store = builder.get_object("package_store")
+ self.package_view = builder.get_object("package_view")
+
+ # Somehow resizable does not get set via builder xml
+ package_name_column = builder.get_object("package_name_column")
+ package_name_column.set_resizable(True)
+ file_name_column = builder.get_object("file_name_column")
+ file_name_column.set_resizable(True)
+
+ self.recipe_view.get_selection().connect("changed", self.on_recipe_changed)
+ self.package_view.get_selection().connect("changed", self.on_package_changed)
+
+ self.package_store.set_sort_column_id(PackageColumns.Package, Gtk.SortType.ASCENDING)
+ builder.get_object("package_size_column").set_cell_data_func(builder.get_object("package_size_cell"), lambda column, cell, model, iter, data: cell.set_property("text", human_size(model[iter][PackageColumns.Size])))
+
+ self.label = builder.get_object("label1")
+ self.depends_label = builder.get_object("depends_label")
+ self.recommends_label = builder.get_object("recommends_label")
+ self.suggests_label = builder.get_object("suggests_label")
+ self.provides_label = builder.get_object("provides_label")
+
+ self.depends_label.connect("activate-link", self.on_link_activate)
+ self.recommends_label.connect("activate-link", self.on_link_activate)
+ self.suggests_label.connect("activate-link", self.on_link_activate)
+
+ self.file_store = builder.get_object("file_store")
+ self.file_store.set_sort_column_id(FileColumns.Filename, Gtk.SortType.ASCENDING)
+ builder.get_object("file_size_column").set_cell_data_func(builder.get_object("file_size_cell"), lambda column, cell, model, iter, data: cell.set_property("text", human_size(model[iter][FileColumns.Size])))
+
+ self.files_view = builder.get_object("files_scrollview")
+ self.files_label = builder.get_object("files_label")
+
+ self.load_recipes()
+
+ self.recipe_view.set_cursor(Gtk.TreePath.new_first())
+
+ self.window.show()
+
+ def on_link_activate(self, label, url_string):
+ from urllib.parse import urlparse
+ url = urlparse(url_string)
+ if url.scheme == "package":
+ package = url.path
+ recipe = recipe_from_package(self.pkgdata, package)
+
+ it = self.recipe_iters[recipe]
+ path = self.recipe_store.get_path(it)
+ self.recipe_view.set_cursor(path)
+ self.recipe_view.scroll_to_cell(path)
+
+ self.on_recipe_changed(self.recipe_view.get_selection())
+
+ it = self.package_iters[package]
+ path = self.package_store.get_path(it)
+ self.package_view.set_cursor(path)
+ self.package_view.scroll_to_cell(path)
+
+ return True
+ else:
+ return False
+
+ def on_recipe_changed(self, selection):
+ self.package_store.clear()
+ self.package_iters = {}
+
+ (model, it) = selection.get_selected()
+ if not it:
+ return
+
+ recipe = model[it][RecipeColumns.Recipe]
+ packages = packages_in_recipe(self.pkgdata, recipe)
+ for package in packages:
+ # TODO also show PKG after debian-renaming?
+ data = load_runtime_package(self.pkgdata, package)
+ # TODO stash data to avoid reading in on_package_changed
+ self.package_iters[package] = self.package_store.append([package, int(data["PKGSIZE"])])
+
+ package = recipe if recipe in packages else sorted(packages)[0]
+ path = self.package_store.get_path(self.package_iters[package])
+ self.package_view.set_cursor(path)
+ self.package_view.scroll_to_cell(path)
+
+ def on_package_changed(self, selection):
+ self.label.set_text("")
+ self.file_store.clear()
+ self.depends_label.hide()
+ self.recommends_label.hide()
+ self.suggests_label.hide()
+ self.provides_label.hide()
+ self.files_view.hide()
+ self.files_label.hide()
+
+ (model, it) = selection.get_selected()
+ if it is None:
+ return
+
+ package = model[it][PackageColumns.Package]
+ data = load_runtime_package(self.pkgdata, package)
+
+ self.label.set_text(summary(data))
+
+ files = ast.literal_eval(data["FILES_INFO"])
+ if files:
+ self.files_label.set_text("{0} files take {1}.".format(len(files), human_size(int(data["PKGSIZE"]))))
+ self.files_view.show()
+ for filename, size in files.items():
+ self.file_store.append([filename, size])
+ else:
+ self.files_view.hide()
+ self.files_label.set_text("This package has no files.")
+ self.files_label.show()
+
+ def update_deps(field, prefix, label, clickable=True):
+ if field in data:
+ l = []
+ for name, version in bb.utils.explode_dep_versions2(data[field]).items():
+ if clickable:
+ l.append("<a href='package:{0}'>{0}</a> {1}".format(name, " ".join(version)).strip())
+ else:
+ l.append("{0} {1}".format(name, " ".join(version)).strip())
+ label.set_markup(prefix + ", ".join(l))
+ label.show()
+ else:
+ label.hide()
+ update_deps("RDEPENDS", "Depends: ", self.depends_label)
+ update_deps("RRECOMMENDS", "Recommends: ", self.recommends_label)
+ update_deps("RSUGGESTS", "Suggests: ", self.suggests_label)
+ update_deps("RPROVIDES", "Provides: ", self.provides_label, clickable=False)
+
+ def load_recipes(self):
+ for recipe in sorted(os.listdir(pkgdata)):
+ if os.path.isfile(os.path.join(pkgdata, recipe)):
+ self.recipe_iters[recipe] = self.recipe_store.append([recipe])
+
+if __name__ == "__main__":
+ import argparse
+
+ parser = argparse.ArgumentParser(description='pkgdata browser')
+ parser.add_argument('-p', '--pkgdata', help="Optional location of pkgdata")
+
+ args = parser.parse_args()
+ pkgdata = args.pkgdata if args.pkgdata else find_pkgdata()
+ # TODO assert pkgdata is a directory
+ window = PkgUi(pkgdata)
+ Gtk.main()
diff --git a/external/poky/scripts/oe-pkgdata-browser.glade b/external/poky/scripts/oe-pkgdata-browser.glade
new file mode 100644
index 00000000..0d06c825
--- /dev/null
+++ b/external/poky/scripts/oe-pkgdata-browser.glade
@@ -0,0 +1,337 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Generated with glade 3.18.3 -->
+<interface>
+ <requires lib="gtk+" version="3.12"/>
+ <object class="GtkListStore" id="file_store">
+ <columns>
+ <!-- column-name Filename -->
+ <column type="gchararray"/>
+ <!-- column-name Size -->
+ <column type="glong"/>
+ </columns>
+ </object>
+ <object class="GtkListStore" id="package_store">
+ <columns>
+ <!-- column-name Package -->
+ <column type="gchararray"/>
+ <!-- column-name Size -->
+ <column type="glong"/>
+ </columns>
+ </object>
+ <object class="GtkListStore" id="pkgdata_store">
+ <columns>
+ <!-- column-name Name -->
+ <column type="gchararray"/>
+ <!-- column-name Path -->
+ <column type="gchararray"/>
+ </columns>
+ </object>
+ <object class="GtkListStore" id="recipe_store">
+ <columns>
+ <!-- column-name Recipe -->
+ <column type="gchararray"/>
+ </columns>
+ </object>
+ <object class="GtkWindow" id="window">
+ <property name="can_focus">False</property>
+ <property name="title" translatable="yes">Package Data Browser</property>
+ <property name="default_width">1200</property>
+ <property name="default_height">900</property>
+ <property name="icon_name">accessories-dictionary</property>
+ <property name="has_resize_grip">True</property>
+ <child>
+ <object class="GtkBox" id="box1">
+ <property name="visible">True</property>
+ <property name="can_focus">False</property>
+ <property name="margin_left">4</property>
+ <property name="margin_right">4</property>
+ <property name="margin_top">4</property>
+ <property name="margin_bottom">4</property>
+ <property name="orientation">vertical</property>
+ <property name="spacing">4</property>
+ <child>
+ <object class="GtkComboBox" id="pkgdata_combo">
+ <property name="can_focus">False</property>
+ <property name="model">pkgdata_store</property>
+ <property name="id_column">1</property>
+ <child>
+ <object class="GtkCellRendererText" id="cellrenderertext5"/>
+ <attributes>
+ <attribute name="text">0</attribute>
+ </attributes>
+ </child>
+ </object>
+ <packing>
+ <property name="expand">False</property>
+ <property name="fill">True</property>
+ <property name="position">0</property>
+ </packing>
+ </child>
+ <child>
+ <object class="GtkPaned" id="paned1">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="position">400</property>
+ <property name="position_set">True</property>
+ <child>
+ <object class="GtkScrolledWindow" id="scrolledwindow1">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="shadow_type">in</property>
+ <property name="min_content_width">100</property>
+ <child>
+ <object class="GtkTreeView" id="recipe_view">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="model">recipe_store</property>
+ <property name="search_column">0</property>
+ <property name="fixed_height_mode">True</property>
+ <property name="show_expanders">False</property>
+ <child internal-child="selection">
+ <object class="GtkTreeSelection" id="treeview-selection1"/>
+ </child>
+ <child>
+ <object class="GtkTreeViewColumn" id="treeviewcolumn1">
+ <property name="sizing">fixed</property>
+ <property name="title" translatable="yes">Recipe</property>
+ <child>
+ <object class="GtkCellRendererText" id="cellrenderertext1"/>
+ <attributes>
+ <attribute name="text">0</attribute>
+ </attributes>
+ </child>
+ </object>
+ </child>
+ </object>
+ </child>
+ </object>
+ <packing>
+ <property name="resize">False</property>
+ <property name="shrink">True</property>
+ </packing>
+ </child>
+ <child>
+ <object class="GtkPaned" id="paned2">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="position">400</property>
+ <property name="position_set">True</property>
+ <child>
+ <object class="GtkScrolledWindow" id="scrolledwindow2">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="shadow_type">in</property>
+ <property name="min_content_width">100</property>
+ <child>
+ <object class="GtkTreeView" id="package_view">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="model">package_store</property>
+ <property name="search_column">0</property>
+ <property name="show_expanders">False</property>
+ <child internal-child="selection">
+ <object class="GtkTreeSelection" id="treeview-selection2"/>
+ </child>
+ <child>
+ <object class="GtkTreeViewColumn" id="package_name_column">
+ <property name="resizable">True</property>
+ <property name="sizing">autosize</property>
+ <property name="title" translatable="yes">Package</property>
+ <property name="sort_column_id">0</property>
+ <child>
+ <object class="GtkCellRendererText" id="cellrenderertext2"/>
+ <attributes>
+ <attribute name="text">0</attribute>
+ </attributes>
+ </child>
+ </object>
+ </child>
+ <child>
+ <object class="GtkTreeViewColumn" id="package_size_column">
+ <property name="resizable">True</property>
+ <property name="sizing">autosize</property>
+ <property name="title" translatable="yes">Size</property>
+ <property name="sort_column_id">1</property>
+ <child>
+ <object class="GtkCellRendererText" id="package_size_cell"/>
+ </child>
+ </object>
+ </child>
+ </object>
+ </child>
+ </object>
+ <packing>
+ <property name="resize">False</property>
+ <property name="shrink">True</property>
+ </packing>
+ </child>
+ <child>
+ <object class="GtkBox" id="box2">
+ <property name="visible">True</property>
+ <property name="can_focus">False</property>
+ <property name="margin_left">4</property>
+ <property name="orientation">vertical</property>
+ <property name="spacing">4</property>
+ <child>
+ <object class="GtkLabel" id="label1">
+ <property name="visible">True</property>
+ <property name="can_focus">False</property>
+ <property name="xalign">0</property>
+ <property name="label" translatable="yes">label</property>
+ </object>
+ <packing>
+ <property name="expand">False</property>
+ <property name="fill">True</property>
+ <property name="position">0</property>
+ </packing>
+ </child>
+ <child>
+ <object class="GtkLabel" id="depends_label">
+ <property name="visible">True</property>
+ <property name="can_focus">False</property>
+ <property name="xalign">0</property>
+ <property name="label" translatable="yes">depends_label</property>
+ <property name="wrap">True</property>
+ <property name="track_visited_links">False</property>
+ </object>
+ <packing>
+ <property name="expand">False</property>
+ <property name="fill">True</property>
+ <property name="position">1</property>
+ </packing>
+ </child>
+ <child>
+ <object class="GtkLabel" id="recommends_label">
+ <property name="visible">True</property>
+ <property name="can_focus">False</property>
+ <property name="xalign">0</property>
+ <property name="label" translatable="yes">recs_label</property>
+ <property name="wrap">True</property>
+ <property name="track_visited_links">False</property>
+ </object>
+ <packing>
+ <property name="expand">False</property>
+ <property name="fill">True</property>
+ <property name="position">2</property>
+ </packing>
+ </child>
+ <child>
+ <object class="GtkLabel" id="suggests_label">
+ <property name="visible">True</property>
+ <property name="can_focus">False</property>
+ <property name="xalign">0</property>
+ <property name="label" translatable="yes">suggests_label</property>
+ <property name="wrap">True</property>
+ <property name="track_visited_links">False</property>
+ </object>
+ <packing>
+ <property name="expand">False</property>
+ <property name="fill">True</property>
+ <property name="position">3</property>
+ </packing>
+ </child>
+ <child>
+ <object class="GtkLabel" id="provides_label">
+ <property name="visible">True</property>
+ <property name="can_focus">False</property>
+ <property name="xalign">0</property>
+ <property name="label" translatable="yes">provides_label</property>
+ <property name="wrap">True</property>
+ <property name="track_visited_links">False</property>
+ </object>
+ <packing>
+ <property name="expand">False</property>
+ <property name="fill">True</property>
+ <property name="position">4</property>
+ </packing>
+ </child>
+ <child>
+ <object class="GtkLabel" id="files_label">
+ <property name="visible">True</property>
+ <property name="can_focus">False</property>
+ <property name="xalign">0</property>
+ <property name="label" translatable="yes">files_label</property>
+ <property name="ellipsize">end</property>
+ </object>
+ <packing>
+ <property name="expand">False</property>
+ <property name="fill">True</property>
+ <property name="position">5</property>
+ </packing>
+ </child>
+ <child>
+ <object class="GtkScrolledWindow" id="files_scrollview">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="shadow_type">in</property>
+ <child>
+ <object class="GtkTreeView" id="files_view">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="model">file_store</property>
+ <property name="rules_hint">True</property>
+ <property name="search_column">0</property>
+ <property name="show_expanders">False</property>
+ <child internal-child="selection">
+ <object class="GtkTreeSelection" id="treeview-selection3"/>
+ </child>
+ <child>
+ <object class="GtkTreeViewColumn" id="file_name_column">
+ <property name="title" translatable="yes">Name</property>
+ <property name="sort_indicator">True</property>
+ <property name="sort_column_id">0</property>
+ <child>
+ <object class="GtkCellRendererText" id="cellrenderertext3">
+ <property name="background_rgba">rgba(0,0,0,0)</property>
+ </object>
+ <attributes>
+ <attribute name="text">0</attribute>
+ </attributes>
+ </child>
+ </object>
+ </child>
+ <child>
+ <object class="GtkTreeViewColumn" id="file_size_column">
+ <property name="title" translatable="yes">Size</property>
+ <property name="sort_indicator">True</property>
+ <property name="sort_column_id">1</property>
+ <child>
+ <object class="GtkCellRendererText" id="file_size_cell"/>
+ <attributes>
+ <attribute name="text">1</attribute>
+ </attributes>
+ </child>
+ </object>
+ </child>
+ </object>
+ </child>
+ </object>
+ <packing>
+ <property name="expand">True</property>
+ <property name="fill">True</property>
+ <property name="position">6</property>
+ </packing>
+ </child>
+ </object>
+ <packing>
+ <property name="resize">True</property>
+ <property name="shrink">True</property>
+ </packing>
+ </child>
+ </object>
+ <packing>
+ <property name="resize">True</property>
+ <property name="shrink">True</property>
+ </packing>
+ </child>
+ </object>
+ <packing>
+ <property name="expand">True</property>
+ <property name="fill">True</property>
+ <property name="position">1</property>
+ </packing>
+ </child>
+ </object>
+ </child>
+ </object>
+</interface>
diff --git a/external/poky/scripts/oe-pkgdata-util b/external/poky/scripts/oe-pkgdata-util
index 53739b0b..93220e36 100755
--- a/external/poky/scripts/oe-pkgdata-util
+++ b/external/poky/scripts/oe-pkgdata-util
@@ -6,18 +6,7 @@
#
# Copyright 2012-2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
import sys
@@ -400,21 +389,16 @@ def list_pkgs(args):
return False
return True
+ pkglist = []
if args.recipe:
packages = get_recipe_pkgs(args.pkgdata_dir, args.recipe, args.unpackaged)
if args.runtime:
- pkglist = []
runtime_pkgs = lookup_pkglist(packages, args.pkgdata_dir, False)
for rtpkgs in runtime_pkgs.values():
pkglist.extend(rtpkgs)
else:
pkglist = packages
-
- for pkg in pkglist:
- if matchpkg(pkg):
- found = True
- print("%s" % pkg)
else:
if args.runtime:
searchdir = 'runtime-reverse'
@@ -425,9 +409,13 @@ def list_pkgs(args):
for fn in files:
if fn.endswith('.packaged'):
continue
- if matchpkg(fn):
- found = True
- print("%s" % fn)
+ pkglist.append(fn)
+
+ for pkg in sorted(pkglist):
+ if matchpkg(pkg):
+ found = True
+ print("%s" % pkg)
+
if not found:
if args.pkgspec:
logger.error("Unable to find any package matching %s" % args.pkgspec)
diff --git a/external/poky/scripts/oe-publish-sdk b/external/poky/scripts/oe-publish-sdk
index ee33acf9..4b70f436 100755
--- a/external/poky/scripts/oe-publish-sdk
+++ b/external/poky/scripts/oe-publish-sdk
@@ -1,21 +1,11 @@
#!/usr/bin/env python3
-
+#
# OpenEmbedded SDK publishing tool
-
-# Copyright (C) 2015-2016 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# Copyright (C) 2015-2016 Intel Corporation
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os
diff --git a/external/poky/scripts/oe-pylint b/external/poky/scripts/oe-pylint
new file mode 100755
index 00000000..7cc1ccb0
--- /dev/null
+++ b/external/poky/scripts/oe-pylint
@@ -0,0 +1,13 @@
+#!/bin/bash
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Run the pylint3 against our common python module spaces and print a report of potential issues
+#
+this_dir=$(dirname $(readlink -f $0))
+ERRORS="-E"
+IGNORELIST="$ERRORS -d logging-too-many-args -d missing-docstring -d line-too-long -d invalid-name"
+PYTHONPATH=$this_dir/../bitbake/lib/ pylint3 $IGNORELIST bb
+PYTHONPATH=$this_dir/../bitbake/lib/:$this_dir/../meta/lib pylint3 $IGNORELIST -d undefined-variable oe
+PYTHONPATH=$this_dir/../bitbake/lib/:$this_dir/../meta/lib pylint3 $IGNORELIST oeqa
+PYTHONPATH=$this_dir/../bitbake/lib/:$this_dir/../meta/lib:$this_dir/lib pylint3 $IGNORELIST -d undefined-variable argparse_oe buildstats devtool recipetool scriptpath testcasemgmt build_perf checklayer resulttool scriptutils wic \ No newline at end of file
diff --git a/external/poky/scripts/oe-run-native b/external/poky/scripts/oe-run-native
index a29e9943..4e63e69c 100755
--- a/external/poky/scripts/oe-run-native
+++ b/external/poky/scripts/oe-run-native
@@ -1,20 +1,8 @@
#!/bin/bash
#
# Copyright (c) 2016, Intel Corporation.
-# All Rights Reserved
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
-# the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, see <http://www.gnu.org/licenses/>
+# SPDX-License-Identifier: GPL-2.0-or-later
#
#
@@ -28,7 +16,7 @@ if [ $# -lt 1 -o "$1" = '--help' -o "$1" = '-h' ] ; then
echo 'OpenEmbedded run-native - runs native tools'
echo ''
echo 'arguments:'
- echo ' native-recipe The recipe which provoides tool'
+ echo ' native-recipe The recipe which provides tool'
echo ' tool Native tool to run'
echo ''
exit 2
diff --git a/external/poky/scripts/oe-selftest b/external/poky/scripts/oe-selftest
index 1bf860a4..18ac0f58 100755
--- a/external/poky/scripts/oe-selftest
+++ b/external/poky/scripts/oe-selftest
@@ -2,18 +2,8 @@
# Copyright (c) 2013-2017 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# DESCRIPTION
# This script runs tests defined in meta/lib/oeqa/selftest/
@@ -43,7 +33,7 @@ scriptpath.add_bitbake_lib_path()
from oeqa.utils import load_test_components
from oeqa.core.exception import OEQAPreRun
-logger = scriptutils.logger_create('oe-selftest', stream=sys.stdout)
+logger = scriptutils.logger_create('oe-selftest', stream=sys.stdout, keepalive=True)
def main():
description = "Script that runs unit tests against bitbake and other Yocto related tools. The goal is to validate tools functionality and metadata integrity. Refer to https://wiki.yoctoproject.org/wiki/Oe-selftest for more information."
diff --git a/external/poky/scripts/oe-setup-builddir b/external/poky/scripts/oe-setup-builddir
index 55d73ca1..30eaa8ef 100755
--- a/external/poky/scripts/oe-setup-builddir
+++ b/external/poky/scripts/oe-setup-builddir
@@ -4,19 +4,8 @@
#
# Copyright (C) 2006-2011 Linux Foundation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
+# SPDX-License-Identifier: GPL-2.0-or-later
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
if [ -z "$BUILDDIR" ]; then
echo >&2 "Error: The build directory (BUILDDIR) must be set!"
diff --git a/external/poky/scripts/oe-test b/external/poky/scripts/oe-test
index 34d9012d..55985b0b 100755
--- a/external/poky/scripts/oe-test
+++ b/external/poky/scripts/oe-test
@@ -3,7 +3,9 @@
# OpenEmbedded test tool
#
# Copyright (C) 2016 Intel Corporation
-# Released under the MIT license (see COPYING.MIT)
+#
+# SPDX-License-Identifier: MIT
+#
import os
import sys
diff --git a/external/poky/scripts/oe-trim-schemas b/external/poky/scripts/oe-trim-schemas
index 7c199ef1..bf77c8cf 100755
--- a/external/poky/scripts/oe-trim-schemas
+++ b/external/poky/scripts/oe-trim-schemas
@@ -1,4 +1,7 @@
#! /usr/bin/env python3
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
import sys
try:
diff --git a/external/poky/scripts/oepydevshell-internal.py b/external/poky/scripts/oepydevshell-internal.py
index 04621ae8..96c078ef 100755
--- a/external/poky/scripts/oepydevshell-internal.py
+++ b/external/poky/scripts/oepydevshell-internal.py
@@ -1,4 +1,7 @@
#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
import os
import sys
@@ -63,7 +66,9 @@ try:
(ready, _, _) = select.select([pty, sys.stdin], writers , [], 0)
try:
if pty in ready:
- i = i + pty.read().decode('utf-8')
+ readdata = pty.read()
+ if readdata:
+ i = i + readdata.decode('utf-8')
if i:
# Write a page at a time to avoid overflowing output
# d.keys() is a good way to do that
diff --git a/external/poky/scripts/opkg-query-helper.py b/external/poky/scripts/opkg-query-helper.py
index ce89491f..bc3ab438 100755
--- a/external/poky/scripts/opkg-query-helper.py
+++ b/external/poky/scripts/opkg-query-helper.py
@@ -6,21 +6,8 @@
#
# Copyright 2012 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-#
-
import sys
import fileinput
diff --git a/external/poky/scripts/postinst-intercepts/delay_to_first_boot b/external/poky/scripts/postinst-intercepts/delay_to_first_boot
index ecdbef95..fa8e1caa 100644
--- a/external/poky/scripts/postinst-intercepts/delay_to_first_boot
+++ b/external/poky/scripts/postinst-intercepts/delay_to_first_boot
@@ -1,2 +1,6 @@
#!/bin/sh
+#
+# SPDX-License-Identifier: MIT
+#
+
exit 1
diff --git a/external/poky/scripts/postinst-intercepts/postinst_intercept b/external/poky/scripts/postinst-intercepts/postinst_intercept
index b18e806d..b91974c8 100755
--- a/external/poky/scripts/postinst-intercepts/postinst_intercept
+++ b/external/poky/scripts/postinst-intercepts/postinst_intercept
@@ -1,5 +1,7 @@
#!/bin/sh
#
+# SPDX-License-Identifier: MIT
+#
# This script is called from inside postinstall scriptlets at do_rootfs time. It
# actually adds, at the end, the list of packages for which the intercept script
# is valid. Also, if one wants to pass any variables to the intercept script from
diff --git a/external/poky/scripts/postinst-intercepts/update_desktop_database b/external/poky/scripts/postinst-intercepts/update_desktop_database
new file mode 100644
index 00000000..8903b496
--- /dev/null
+++ b/external/poky/scripts/postinst-intercepts/update_desktop_database
@@ -0,0 +1,8 @@
+#!/bin/sh
+#
+# SPDX-License-Identifier: MIT
+#
+# Post-install intercept for mime-xdg.bbclass
+
+update-desktop-database $D${desktop_dir}
+
diff --git a/external/poky/scripts/postinst-intercepts/update_font_cache b/external/poky/scripts/postinst-intercepts/update_font_cache
index e0ec4719..46bdb8c5 100644
--- a/external/poky/scripts/postinst-intercepts/update_font_cache
+++ b/external/poky/scripts/postinst-intercepts/update_font_cache
@@ -1,6 +1,11 @@
#!/bin/sh
+#
+# SPDX-License-Identifier: MIT
+#
set -e
PSEUDO_UNLOAD=1 ${binprefix}qemuwrapper -L $D -E ${fontconfigcacheenv} $D${libexecdir}/${binprefix}fc-cache --sysroot=$D --system-only ${fontconfigcacheparams}
+
chown -R root:root $D${fontconfigcachedir}
+find $D -type f -name .uuid -exec chown root:root '{}' +
diff --git a/external/poky/scripts/postinst-intercepts/update_gio_module_cache b/external/poky/scripts/postinst-intercepts/update_gio_module_cache
index d1f01409..c87fa85d 100644
--- a/external/poky/scripts/postinst-intercepts/update_gio_module_cache
+++ b/external/poky/scripts/postinst-intercepts/update_gio_module_cache
@@ -1,4 +1,7 @@
#!/bin/sh
+#
+# SPDX-License-Identifier: MIT
+#
set -e
diff --git a/external/poky/scripts/postinst-intercepts/update_icon_cache b/external/poky/scripts/postinst-intercepts/update_gtk_icon_cache
index 9cf2a72a..99367a28 100644
--- a/external/poky/scripts/postinst-intercepts/update_icon_cache
+++ b/external/poky/scripts/postinst-intercepts/update_gtk_icon_cache
@@ -1,8 +1,12 @@
#!/bin/sh
+#
+# SPDX-License-Identifier: MIT
+#
+# Post-install intercept for gtk-icon-cache.bbclass
set -e
-# update native pixbuf loaders
+# Update native pixbuf loaders
$STAGING_DIR_NATIVE/${libdir_native}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders --update-cache
for icondir in $D/usr/share/icons/*/ ; do
diff --git a/external/poky/scripts/postinst-intercepts/update_gtk_immodules_cache b/external/poky/scripts/postinst-intercepts/update_gtk_immodules_cache
index 39551697..9f07ccca 100644
--- a/external/poky/scripts/postinst-intercepts/update_gtk_immodules_cache
+++ b/external/poky/scripts/postinst-intercepts/update_gtk_immodules_cache
@@ -1,4 +1,7 @@
#!/bin/sh
+#
+# SPDX-License-Identifier: MIT
+#
set -e
diff --git a/external/poky/scripts/postinst-intercepts/update_mime_database b/external/poky/scripts/postinst-intercepts/update_mime_database
new file mode 100644
index 00000000..582d1e16
--- /dev/null
+++ b/external/poky/scripts/postinst-intercepts/update_mime_database
@@ -0,0 +1,9 @@
+#!/bin/sh
+#
+# SPDX-License-Identifier: MIT
+#
+# Post-install intercept for mime.bbclass
+
+echo "Updating MIME database... this may take a while."
+update-mime-database $D${mimedir}
+
diff --git a/external/poky/scripts/postinst-intercepts/update_pixbuf_cache b/external/poky/scripts/postinst-intercepts/update_pixbuf_cache
index ebea07c3..ea128144 100644
--- a/external/poky/scripts/postinst-intercepts/update_pixbuf_cache
+++ b/external/poky/scripts/postinst-intercepts/update_pixbuf_cache
@@ -1,4 +1,7 @@
#!/bin/sh
+#
+# SPDX-License-Identifier: MIT
+#
set -e
diff --git a/external/poky/scripts/postinst-intercepts/update_udev_hwdb b/external/poky/scripts/postinst-intercepts/update_udev_hwdb
index b5cce0a0..102e99b9 100644
--- a/external/poky/scripts/postinst-intercepts/update_udev_hwdb
+++ b/external/poky/scripts/postinst-intercepts/update_udev_hwdb
@@ -1,6 +1,21 @@
#!/bin/sh
+#
+# SPDX-License-Identifier: MIT
+#
set -e
-PSEUDO_UNLOAD=1 ${binprefix}qemuwrapper -L $D $D${libexecdir}/${binprefix}udevadm hwdb --update --root $D
-chown root:root $D${sysconfdir}/udev/hwdb.bin
+case "${PREFERRED_PROVIDER_udev}" in
+ systemd)
+ UDEV_EXTRA_ARGS="--usr"
+ UDEVLIBDIR="${rootlibexecdir}"
+ ;;
+
+ *)
+ UDEV_EXTRA_ARGS=""
+ UDEVLIBDIR="${sysconfdir}"
+ ;;
+esac
+
+PSEUDO_UNLOAD=1 ${binprefix}qemuwrapper -L $D $D${libexecdir}/${binprefix}udevadm hwdb --update --root $D ${UDEV_EXTRA_ARGS}
+chown root:root $D${UDEVLIBDIR}/udev/hwdb.bin
diff --git a/external/poky/scripts/pybootchartgui/pybootchartgui.py b/external/poky/scripts/pybootchartgui/pybootchartgui.py
index 7ce1a5be..1c4062b4 100755
--- a/external/poky/scripts/pybootchartgui/pybootchartgui.py
+++ b/external/poky/scripts/pybootchartgui/pybootchartgui.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
#
# This file is part of pybootchartgui.
diff --git a/external/poky/scripts/pybootchartgui/pybootchartgui/draw.py b/external/poky/scripts/pybootchartgui/pybootchartgui/draw.py
index 201ce457..53324b9f 100644
--- a/external/poky/scripts/pybootchartgui/pybootchartgui/draw.py
+++ b/external/poky/scripts/pybootchartgui/pybootchartgui/draw.py
@@ -19,22 +19,23 @@ import math
import re
import random
import colorsys
+import functools
from operator import itemgetter
class RenderOptions:
- def __init__(self, app_options):
- # should we render a cumulative CPU time chart
- self.cumulative = True
- self.charts = True
- self.kernel_only = False
- self.app_options = app_options
+ def __init__(self, app_options):
+ # should we render a cumulative CPU time chart
+ self.cumulative = True
+ self.charts = True
+ self.kernel_only = False
+ self.app_options = app_options
- def proc_tree (self, trace):
- if self.kernel_only:
- return trace.kernel_tree
- else:
- return trace.proc_tree
+ def proc_tree (self, trace):
+ if self.kernel_only:
+ return trace.kernel_tree
+ else:
+ return trace.proc_tree
# Process tree background color.
BACK_COLOR = (1.0, 1.0, 1.0, 1.0)
@@ -136,11 +137,11 @@ TASK_COLOR_PACKAGE_WRITE = (0.0, 0.50, 0.50, 1.0)
# Distinct colors used for different disk volumnes.
# If we have more volumns, colors get re-used.
VOLUME_COLORS = [
- (1.0, 1.0, 0.00, 1.0),
- (0.0, 1.00, 0.00, 1.0),
- (1.0, 0.00, 1.00, 1.0),
- (0.0, 0.00, 1.00, 1.0),
- (0.0, 1.00, 1.00, 1.0),
+ (1.0, 1.0, 0.00, 1.0),
+ (0.0, 1.00, 0.00, 1.0),
+ (1.0, 0.00, 1.00, 1.0),
+ (0.0, 0.00, 1.00, 1.0),
+ (0.0, 1.00, 1.00, 1.0),
]
# Process states
@@ -152,7 +153,7 @@ STATE_STOPPED = 4
STATE_ZOMBIE = 5
STATE_COLORS = [(0, 0, 0, 0), PROC_COLOR_R, PROC_COLOR_S, PROC_COLOR_D, \
- PROC_COLOR_T, PROC_COLOR_Z, PROC_COLOR_X, PROC_COLOR_W]
+ PROC_COLOR_T, PROC_COLOR_Z, PROC_COLOR_X, PROC_COLOR_W]
# CumulativeStats Types
STAT_TYPE_CPU = 0
@@ -160,80 +161,80 @@ STAT_TYPE_IO = 1
# Convert ps process state to an int
def get_proc_state(flag):
- return "RSDTZXW".find(flag) + 1
+ return "RSDTZXW".find(flag) + 1
def draw_text(ctx, text, color, x, y):
- ctx.set_source_rgba(*color)
- ctx.move_to(x, y)
- ctx.show_text(text)
+ ctx.set_source_rgba(*color)
+ ctx.move_to(x, y)
+ ctx.show_text(text)
def draw_fill_rect(ctx, color, rect):
- ctx.set_source_rgba(*color)
- ctx.rectangle(*rect)
- ctx.fill()
+ ctx.set_source_rgba(*color)
+ ctx.rectangle(*rect)
+ ctx.fill()
def draw_rect(ctx, color, rect):
- ctx.set_source_rgba(*color)
- ctx.rectangle(*rect)
- ctx.stroke()
+ ctx.set_source_rgba(*color)
+ ctx.rectangle(*rect)
+ ctx.stroke()
def draw_legend_box(ctx, label, fill_color, x, y, s):
- draw_fill_rect(ctx, fill_color, (x, y - s, s, s))
- draw_rect(ctx, PROC_BORDER_COLOR, (x, y - s, s, s))
- draw_text(ctx, label, TEXT_COLOR, x + s + 5, y)
+ draw_fill_rect(ctx, fill_color, (x, y - s, s, s))
+ draw_rect(ctx, PROC_BORDER_COLOR, (x, y - s, s, s))
+ draw_text(ctx, label, TEXT_COLOR, x + s + 5, y)
def draw_legend_line(ctx, label, fill_color, x, y, s):
- draw_fill_rect(ctx, fill_color, (x, y - s/2, s + 1, 3))
- ctx.arc(x + (s + 1)/2.0, y - (s - 3)/2.0, 2.5, 0, 2.0 * math.pi)
- ctx.fill()
- draw_text(ctx, label, TEXT_COLOR, x + s + 5, y)
+ draw_fill_rect(ctx, fill_color, (x, y - s/2, s + 1, 3))
+ ctx.arc(x + (s + 1)/2.0, y - (s - 3)/2.0, 2.5, 0, 2.0 * math.pi)
+ ctx.fill()
+ draw_text(ctx, label, TEXT_COLOR, x + s + 5, y)
def draw_label_in_box(ctx, color, label, x, y, w, maxx):
- label_w = ctx.text_extents(label)[2]
- label_x = x + w / 2 - label_w / 2
- if label_w + 10 > w:
- label_x = x + w + 5
- if label_x + label_w > maxx:
- label_x = x - label_w - 5
- draw_text(ctx, label, color, label_x, y)
+ label_w = ctx.text_extents(label)[2]
+ label_x = x + w / 2 - label_w / 2
+ if label_w + 10 > w:
+ label_x = x + w + 5
+ if label_x + label_w > maxx:
+ label_x = x - label_w - 5
+ draw_text(ctx, label, color, label_x, y)
def draw_sec_labels(ctx, options, rect, sec_w, nsecs):
- ctx.set_font_size(AXIS_FONT_SIZE)
- prev_x = 0
- for i in range(0, rect[2] + 1, sec_w):
- if ((i / sec_w) % nsecs == 0) :
- if options.app_options.as_minutes :
- label = "%.1f" % (i / sec_w / 60.0)
- else :
- label = "%d" % (i / sec_w)
- label_w = ctx.text_extents(label)[2]
- x = rect[0] + i - label_w/2
- if x >= prev_x:
- draw_text(ctx, label, TEXT_COLOR, x, rect[1] - 2)
- prev_x = x + label_w
+ ctx.set_font_size(AXIS_FONT_SIZE)
+ prev_x = 0
+ for i in range(0, rect[2] + 1, sec_w):
+ if ((i / sec_w) % nsecs == 0) :
+ if options.app_options.as_minutes :
+ label = "%.1f" % (i / sec_w / 60.0)
+ else :
+ label = "%d" % (i / sec_w)
+ label_w = ctx.text_extents(label)[2]
+ x = rect[0] + i - label_w/2
+ if x >= prev_x:
+ draw_text(ctx, label, TEXT_COLOR, x, rect[1] - 2)
+ prev_x = x + label_w
def draw_box_ticks(ctx, rect, sec_w):
- draw_rect(ctx, BORDER_COLOR, tuple(rect))
-
- ctx.set_line_cap(cairo.LINE_CAP_SQUARE)
-
- for i in range(sec_w, rect[2] + 1, sec_w):
- if ((i / sec_w) % 10 == 0) :
- ctx.set_line_width(1.5)
- elif sec_w < 5 :
- continue
- else :
- ctx.set_line_width(1.0)
- if ((i / sec_w) % 30 == 0) :
- ctx.set_source_rgba(*TICK_COLOR_BOLD)
- else :
- ctx.set_source_rgba(*TICK_COLOR)
- ctx.move_to(rect[0] + i, rect[1] + 1)
- ctx.line_to(rect[0] + i, rect[1] + rect[3] - 1)
- ctx.stroke()
- ctx.set_line_width(1.0)
-
- ctx.set_line_cap(cairo.LINE_CAP_BUTT)
+ draw_rect(ctx, BORDER_COLOR, tuple(rect))
+
+ ctx.set_line_cap(cairo.LINE_CAP_SQUARE)
+
+ for i in range(sec_w, rect[2] + 1, sec_w):
+ if ((i / sec_w) % 10 == 0) :
+ ctx.set_line_width(1.5)
+ elif sec_w < 5 :
+ continue
+ else :
+ ctx.set_line_width(1.0)
+ if ((i / sec_w) % 30 == 0) :
+ ctx.set_source_rgba(*TICK_COLOR_BOLD)
+ else :
+ ctx.set_source_rgba(*TICK_COLOR)
+ ctx.move_to(rect[0] + i, rect[1] + 1)
+ ctx.line_to(rect[0] + i, rect[1] + rect[3] - 1)
+ ctx.stroke()
+ ctx.set_line_width(1.0)
+
+ ctx.set_line_cap(cairo.LINE_CAP_BUTT)
def draw_annotations(ctx, proc_tree, times, rect):
ctx.set_line_cap(cairo.LINE_CAP_SQUARE)
@@ -252,51 +253,51 @@ def draw_annotations(ctx, proc_tree, times, rect):
ctx.set_dash([])
def draw_chart(ctx, color, fill, chart_bounds, data, proc_tree, data_range):
- ctx.set_line_width(0.5)
- x_shift = proc_tree.start_time
-
- def transform_point_coords(point, x_base, y_base, \
- xscale, yscale, x_trans, y_trans):
- x = (point[0] - x_base) * xscale + x_trans
- y = (point[1] - y_base) * -yscale + y_trans + chart_bounds[3]
- return x, y
-
- max_x = max (x for (x, y) in data)
- max_y = max (y for (x, y) in data)
- # avoid divide by zero
- if max_y == 0:
- max_y = 1.0
- xscale = float (chart_bounds[2]) / (max_x - x_shift)
- # If data_range is given, scale the chart so that the value range in
- # data_range matches the chart bounds exactly.
- # Otherwise, scale so that the actual data matches the chart bounds.
- if data_range:
- yscale = float(chart_bounds[3]) / (data_range[1] - data_range[0])
- ybase = data_range[0]
- else:
- yscale = float(chart_bounds[3]) / max_y
- ybase = 0
-
- first = transform_point_coords (data[0], x_shift, ybase, xscale, yscale, \
- chart_bounds[0], chart_bounds[1])
- last = transform_point_coords (data[-1], x_shift, ybase, xscale, yscale, \
- chart_bounds[0], chart_bounds[1])
-
- ctx.set_source_rgba(*color)
- ctx.move_to(*first)
- for point in data:
- x, y = transform_point_coords (point, x_shift, ybase, xscale, yscale, \
- chart_bounds[0], chart_bounds[1])
- ctx.line_to(x, y)
- if fill:
- ctx.stroke_preserve()
- ctx.line_to(last[0], chart_bounds[1]+chart_bounds[3])
- ctx.line_to(first[0], chart_bounds[1]+chart_bounds[3])
- ctx.line_to(first[0], first[1])
- ctx.fill()
- else:
- ctx.stroke()
- ctx.set_line_width(1.0)
+ ctx.set_line_width(0.5)
+ x_shift = proc_tree.start_time
+
+ def transform_point_coords(point, x_base, y_base, \
+ xscale, yscale, x_trans, y_trans):
+ x = (point[0] - x_base) * xscale + x_trans
+ y = (point[1] - y_base) * -yscale + y_trans + chart_bounds[3]
+ return x, y
+
+ max_x = max (x for (x, y) in data)
+ max_y = max (y for (x, y) in data)
+ # avoid divide by zero
+ if max_y == 0:
+ max_y = 1.0
+ xscale = float (chart_bounds[2]) / (max_x - x_shift)
+ # If data_range is given, scale the chart so that the value range in
+ # data_range matches the chart bounds exactly.
+ # Otherwise, scale so that the actual data matches the chart bounds.
+ if data_range:
+ yscale = float(chart_bounds[3]) / (data_range[1] - data_range[0])
+ ybase = data_range[0]
+ else:
+ yscale = float(chart_bounds[3]) / max_y
+ ybase = 0
+
+ first = transform_point_coords (data[0], x_shift, ybase, xscale, yscale, \
+ chart_bounds[0], chart_bounds[1])
+ last = transform_point_coords (data[-1], x_shift, ybase, xscale, yscale, \
+ chart_bounds[0], chart_bounds[1])
+
+ ctx.set_source_rgba(*color)
+ ctx.move_to(*first)
+ for point in data:
+ x, y = transform_point_coords (point, x_shift, ybase, xscale, yscale, \
+ chart_bounds[0], chart_bounds[1])
+ ctx.line_to(x, y)
+ if fill:
+ ctx.stroke_preserve()
+ ctx.line_to(last[0], chart_bounds[1]+chart_bounds[3])
+ ctx.line_to(first[0], chart_bounds[1]+chart_bounds[3])
+ ctx.line_to(first[0], first[1])
+ ctx.fill()
+ else:
+ ctx.stroke()
+ ctx.set_line_width(1.0)
bar_h = 55
meminfo_bar_h = 2 * bar_h
@@ -307,342 +308,348 @@ sec_w_base = 1 # the width of a second
proc_h = 16 # the height of a process
leg_s = 10
MIN_IMG_W = 800
-CUML_HEIGHT = 2000 # Increased value to accomodate CPU and I/O Graphs
+CUML_HEIGHT = 2000 # Increased value to accommodate CPU and I/O Graphs
OPTIONS = None
def extents(options, xscale, trace):
- start = min(trace.start.keys())
- end = start
-
- processes = 0
- for proc in trace.processes:
- if not options.app_options.show_all and \
- trace.processes[proc][1] - trace.processes[proc][0] < options.app_options.mintime:
- continue
-
- if trace.processes[proc][1] > end:
- end = trace.processes[proc][1]
- processes += 1
-
- if trace.min is not None and trace.max is not None:
- start = trace.min
- end = trace.max
-
- w = int ((end - start) * sec_w_base * xscale) + 2 * off_x
- h = proc_h * processes + header_h + 2 * off_y
-
- if options.charts:
- if trace.cpu_stats:
- h += 30 + bar_h
- if trace.disk_stats:
- h += 30 + bar_h
- if trace.monitor_disk:
- h += 30 + bar_h
- if trace.mem_stats:
- h += meminfo_bar_h
-
- return (w, h)
+ start = min(trace.start.keys())
+ end = start
+
+ processes = 0
+ for proc in trace.processes:
+ if not options.app_options.show_all and \
+ trace.processes[proc][1] - trace.processes[proc][0] < options.app_options.mintime:
+ continue
+
+ if trace.processes[proc][1] > end:
+ end = trace.processes[proc][1]
+ processes += 1
+
+ if trace.min is not None and trace.max is not None:
+ start = trace.min
+ end = trace.max
+
+ w = int ((end - start) * sec_w_base * xscale) + 2 * off_x
+ h = proc_h * processes + header_h + 2 * off_y
+
+ if options.charts:
+ if trace.cpu_stats:
+ h += 30 + bar_h
+ if trace.disk_stats:
+ h += 30 + bar_h
+ if trace.monitor_disk:
+ h += 30 + bar_h
+ if trace.mem_stats:
+ h += meminfo_bar_h
+
+ # Allow for width of process legend and offset
+ if w < (720 + off_x):
+ w = 720 + off_x
+
+ return (w, h)
def clip_visible(clip, rect):
- xmax = max (clip[0], rect[0])
- ymax = max (clip[1], rect[1])
- xmin = min (clip[0] + clip[2], rect[0] + rect[2])
- ymin = min (clip[1] + clip[3], rect[1] + rect[3])
- return (xmin > xmax and ymin > ymax)
+ xmax = max (clip[0], rect[0])
+ ymax = max (clip[1], rect[1])
+ xmin = min (clip[0] + clip[2], rect[0] + rect[2])
+ ymin = min (clip[1] + clip[3], rect[1] + rect[3])
+ return (xmin > xmax and ymin > ymax)
def render_charts(ctx, options, clip, trace, curr_y, w, h, sec_w):
- proc_tree = options.proc_tree(trace)
-
- # render bar legend
- if trace.cpu_stats:
- ctx.set_font_size(LEGEND_FONT_SIZE)
-
- draw_legend_box(ctx, "CPU (user+sys)", CPU_COLOR, off_x, curr_y+20, leg_s)
- draw_legend_box(ctx, "I/O (wait)", IO_COLOR, off_x + 120, curr_y+20, leg_s)
-
- # render I/O wait
- chart_rect = (off_x, curr_y+30, w, bar_h)
- if clip_visible (clip, chart_rect):
- draw_box_ticks (ctx, chart_rect, sec_w)
- draw_annotations (ctx, proc_tree, trace.times, chart_rect)
- draw_chart (ctx, IO_COLOR, True, chart_rect, \
- [(sample.time, sample.user + sample.sys + sample.io) for sample in trace.cpu_stats], \
- proc_tree, None)
- # render CPU load
- draw_chart (ctx, CPU_COLOR, True, chart_rect, \
- [(sample.time, sample.user + sample.sys) for sample in trace.cpu_stats], \
- proc_tree, None)
-
- curr_y = curr_y + 30 + bar_h
-
- # render second chart
- if trace.disk_stats:
- draw_legend_line(ctx, "Disk throughput", DISK_TPUT_COLOR, off_x, curr_y+20, leg_s)
- draw_legend_box(ctx, "Disk utilization", IO_COLOR, off_x + 120, curr_y+20, leg_s)
-
- # render I/O utilization
- chart_rect = (off_x, curr_y+30, w, bar_h)
- if clip_visible (clip, chart_rect):
- draw_box_ticks (ctx, chart_rect, sec_w)
- draw_annotations (ctx, proc_tree, trace.times, chart_rect)
- draw_chart (ctx, IO_COLOR, True, chart_rect, \
- [(sample.time, sample.util) for sample in trace.disk_stats], \
- proc_tree, None)
-
- # render disk throughput
- max_sample = max (trace.disk_stats, key = lambda s: s.tput)
- if clip_visible (clip, chart_rect):
- draw_chart (ctx, DISK_TPUT_COLOR, False, chart_rect, \
- [(sample.time, sample.tput) for sample in trace.disk_stats], \
- proc_tree, None)
-
- pos_x = off_x + ((max_sample.time - proc_tree.start_time) * w / proc_tree.duration)
-
- shift_x, shift_y = -20, 20
- if (pos_x < off_x + 245):
- shift_x, shift_y = 5, 40
-
- label = "%dMB/s" % round ((max_sample.tput) / 1024.0)
- draw_text (ctx, label, DISK_TPUT_COLOR, pos_x + shift_x, curr_y + shift_y)
-
- curr_y = curr_y + 30 + bar_h
-
- # render disk space usage
- #
- # Draws the amount of disk space used on each volume relative to the
- # lowest recorded amount. The graphs for each volume are stacked above
- # each other so that total disk usage is visible.
- if trace.monitor_disk:
- ctx.set_font_size(LEGEND_FONT_SIZE)
- # Determine set of volumes for which we have
- # information and the minimal amount of used disk
- # space for each. Currently samples are allowed to
- # not have a values for all volumes; drawing could be
- # made more efficient if that wasn't the case.
- volumes = set()
- min_used = {}
- for sample in trace.monitor_disk:
- for volume, used in sample.records.items():
- volumes.add(volume)
- if volume not in min_used or min_used[volume] > used:
- min_used[volume] = used
- volumes = sorted(list(volumes))
- disk_scale = 0
- for i, volume in enumerate(volumes):
- volume_scale = max([sample.records[volume] - min_used[volume]
- for sample in trace.monitor_disk
- if volume in sample.records])
- # Does not take length of volume name into account, but fixed offset
- # works okay in practice.
- draw_legend_box(ctx, '%s (max: %u MiB)' % (volume, volume_scale / 1024 / 1024),
- VOLUME_COLORS[i % len(VOLUME_COLORS)],
- off_x + i * 250, curr_y+20, leg_s)
- disk_scale += volume_scale
-
- # render used amount of disk space
- chart_rect = (off_x, curr_y+30, w, bar_h)
- if clip_visible (clip, chart_rect):
- draw_box_ticks (ctx, chart_rect, sec_w)
- draw_annotations (ctx, proc_tree, trace.times, chart_rect)
- for i in range(len(volumes), 0, -1):
- draw_chart (ctx, VOLUME_COLORS[(i - 1) % len(VOLUME_COLORS)], True, chart_rect, \
- [(sample.time,
- # Sum up used space of all volumes including the current one
- # so that the graphs appear as stacked on top of each other.
- reduce(lambda x,y: x+y,
- [sample.records[volume] - min_used[volume]
- for volume in volumes[0:i]
- if volume in sample.records],
- 0))
- for sample in trace.monitor_disk], \
- proc_tree, [0, disk_scale])
-
- curr_y = curr_y + 30 + bar_h
-
- # render mem usage
- chart_rect = (off_x, curr_y+30, w, meminfo_bar_h)
- mem_stats = trace.mem_stats
- if mem_stats and clip_visible (clip, chart_rect):
- mem_scale = max(sample.buffers for sample in mem_stats)
- draw_legend_box(ctx, "Mem cached (scale: %u MiB)" % (float(mem_scale) / 1024), MEM_CACHED_COLOR, off_x, curr_y+20, leg_s)
- draw_legend_box(ctx, "Used", MEM_USED_COLOR, off_x + 240, curr_y+20, leg_s)
- draw_legend_box(ctx, "Buffers", MEM_BUFFERS_COLOR, off_x + 360, curr_y+20, leg_s)
- draw_legend_line(ctx, "Swap (scale: %u MiB)" % max([(sample.swap)/1024 for sample in mem_stats]), \
- MEM_SWAP_COLOR, off_x + 480, curr_y+20, leg_s)
- draw_box_ticks(ctx, chart_rect, sec_w)
- draw_annotations(ctx, proc_tree, trace.times, chart_rect)
- draw_chart(ctx, MEM_BUFFERS_COLOR, True, chart_rect, \
- [(sample.time, sample.buffers) for sample in trace.mem_stats], \
- proc_tree, [0, mem_scale])
- draw_chart(ctx, MEM_USED_COLOR, True, chart_rect, \
- [(sample.time, sample.used) for sample in mem_stats], \
- proc_tree, [0, mem_scale])
- draw_chart(ctx, MEM_CACHED_COLOR, True, chart_rect, \
- [(sample.time, sample.cached) for sample in mem_stats], \
- proc_tree, [0, mem_scale])
- draw_chart(ctx, MEM_SWAP_COLOR, False, chart_rect, \
- [(sample.time, float(sample.swap)) for sample in mem_stats], \
- proc_tree, None)
-
- curr_y = curr_y + meminfo_bar_h
-
- return curr_y
+ proc_tree = options.proc_tree(trace)
+
+ # render bar legend
+ if trace.cpu_stats:
+ ctx.set_font_size(LEGEND_FONT_SIZE)
+
+ draw_legend_box(ctx, "CPU (user+sys)", CPU_COLOR, off_x, curr_y+20, leg_s)
+ draw_legend_box(ctx, "I/O (wait)", IO_COLOR, off_x + 120, curr_y+20, leg_s)
+
+ # render I/O wait
+ chart_rect = (off_x, curr_y+30, w, bar_h)
+ if clip_visible (clip, chart_rect):
+ draw_box_ticks (ctx, chart_rect, sec_w)
+ draw_annotations (ctx, proc_tree, trace.times, chart_rect)
+ draw_chart (ctx, IO_COLOR, True, chart_rect, \
+ [(sample.time, sample.user + sample.sys + sample.io) for sample in trace.cpu_stats], \
+ proc_tree, None)
+ # render CPU load
+ draw_chart (ctx, CPU_COLOR, True, chart_rect, \
+ [(sample.time, sample.user + sample.sys) for sample in trace.cpu_stats], \
+ proc_tree, None)
+
+ curr_y = curr_y + 30 + bar_h
+
+ # render second chart
+ if trace.disk_stats:
+ draw_legend_line(ctx, "Disk throughput", DISK_TPUT_COLOR, off_x, curr_y+20, leg_s)
+ draw_legend_box(ctx, "Disk utilization", IO_COLOR, off_x + 120, curr_y+20, leg_s)
+
+ # render I/O utilization
+ chart_rect = (off_x, curr_y+30, w, bar_h)
+ if clip_visible (clip, chart_rect):
+ draw_box_ticks (ctx, chart_rect, sec_w)
+ draw_annotations (ctx, proc_tree, trace.times, chart_rect)
+ draw_chart (ctx, IO_COLOR, True, chart_rect, \
+ [(sample.time, sample.util) for sample in trace.disk_stats], \
+ proc_tree, None)
+
+ # render disk throughput
+ max_sample = max (trace.disk_stats, key = lambda s: s.tput)
+ if clip_visible (clip, chart_rect):
+ draw_chart (ctx, DISK_TPUT_COLOR, False, chart_rect, \
+ [(sample.time, sample.tput) for sample in trace.disk_stats], \
+ proc_tree, None)
+
+ pos_x = off_x + ((max_sample.time - proc_tree.start_time) * w / proc_tree.duration)
+
+ shift_x, shift_y = -20, 20
+ if (pos_x < off_x + 245):
+ shift_x, shift_y = 5, 40
+
+ label = "%dMB/s" % round ((max_sample.tput) / 1024.0)
+ draw_text (ctx, label, DISK_TPUT_COLOR, pos_x + shift_x, curr_y + shift_y)
+
+ curr_y = curr_y + 30 + bar_h
+
+ # render disk space usage
+ #
+ # Draws the amount of disk space used on each volume relative to the
+ # lowest recorded amount. The graphs for each volume are stacked above
+ # each other so that total disk usage is visible.
+ if trace.monitor_disk:
+ ctx.set_font_size(LEGEND_FONT_SIZE)
+ # Determine set of volumes for which we have
+ # information and the minimal amount of used disk
+ # space for each. Currently samples are allowed to
+ # not have a values for all volumes; drawing could be
+ # made more efficient if that wasn't the case.
+ volumes = set()
+ min_used = {}
+ for sample in trace.monitor_disk:
+ for volume, used in sample.records.items():
+ volumes.add(volume)
+ if volume not in min_used or min_used[volume] > used:
+ min_used[volume] = used
+ volumes = sorted(list(volumes))
+ disk_scale = 0
+ for i, volume in enumerate(volumes):
+ volume_scale = max([sample.records[volume] - min_used[volume]
+ for sample in trace.monitor_disk
+ if volume in sample.records])
+ # Does not take length of volume name into account, but fixed offset
+ # works okay in practice.
+ draw_legend_box(ctx, '%s (max: %u MiB)' % (volume, volume_scale / 1024 / 1024),
+ VOLUME_COLORS[i % len(VOLUME_COLORS)],
+ off_x + i * 250, curr_y+20, leg_s)
+ disk_scale += volume_scale
+
+ # render used amount of disk space
+ chart_rect = (off_x, curr_y+30, w, bar_h)
+ if clip_visible (clip, chart_rect):
+ draw_box_ticks (ctx, chart_rect, sec_w)
+ draw_annotations (ctx, proc_tree, trace.times, chart_rect)
+ for i in range(len(volumes), 0, -1):
+ draw_chart (ctx, VOLUME_COLORS[(i - 1) % len(VOLUME_COLORS)], True, chart_rect, \
+ [(sample.time,
+ # Sum up used space of all volumes including the current one
+ # so that the graphs appear as stacked on top of each other.
+ functools.reduce(lambda x,y: x+y,
+ [sample.records[volume] - min_used[volume]
+ for volume in volumes[0:i]
+ if volume in sample.records],
+ 0))
+ for sample in trace.monitor_disk], \
+ proc_tree, [0, disk_scale])
+
+ curr_y = curr_y + 30 + bar_h
+
+ # render mem usage
+ chart_rect = (off_x, curr_y+30, w, meminfo_bar_h)
+ mem_stats = trace.mem_stats
+ if mem_stats and clip_visible (clip, chart_rect):
+ mem_scale = max(sample.buffers for sample in mem_stats)
+ draw_legend_box(ctx, "Mem cached (scale: %u MiB)" % (float(mem_scale) / 1024), MEM_CACHED_COLOR, off_x, curr_y+20, leg_s)
+ draw_legend_box(ctx, "Used", MEM_USED_COLOR, off_x + 240, curr_y+20, leg_s)
+ draw_legend_box(ctx, "Buffers", MEM_BUFFERS_COLOR, off_x + 360, curr_y+20, leg_s)
+ draw_legend_line(ctx, "Swap (scale: %u MiB)" % max([(sample.swap)/1024 for sample in mem_stats]), \
+ MEM_SWAP_COLOR, off_x + 480, curr_y+20, leg_s)
+ draw_box_ticks(ctx, chart_rect, sec_w)
+ draw_annotations(ctx, proc_tree, trace.times, chart_rect)
+ draw_chart(ctx, MEM_BUFFERS_COLOR, True, chart_rect, \
+ [(sample.time, sample.buffers) for sample in trace.mem_stats], \
+ proc_tree, [0, mem_scale])
+ draw_chart(ctx, MEM_USED_COLOR, True, chart_rect, \
+ [(sample.time, sample.used) for sample in mem_stats], \
+ proc_tree, [0, mem_scale])
+ draw_chart(ctx, MEM_CACHED_COLOR, True, chart_rect, \
+ [(sample.time, sample.cached) for sample in mem_stats], \
+ proc_tree, [0, mem_scale])
+ draw_chart(ctx, MEM_SWAP_COLOR, False, chart_rect, \
+ [(sample.time, float(sample.swap)) for sample in mem_stats], \
+ proc_tree, None)
+
+ curr_y = curr_y + meminfo_bar_h
+
+ return curr_y
def render_processes_chart(ctx, options, trace, curr_y, w, h, sec_w):
- chart_rect = [off_x, curr_y+header_h, w, h - 2 * off_y - header_h - leg_s + proc_h]
-
- draw_legend_box (ctx, "Configure", \
- TASK_COLOR_CONFIGURE, off_x , curr_y + 45, leg_s)
- draw_legend_box (ctx, "Compile", \
- TASK_COLOR_COMPILE, off_x+120, curr_y + 45, leg_s)
- draw_legend_box (ctx, "Install", \
- TASK_COLOR_INSTALL, off_x+240, curr_y + 45, leg_s)
- draw_legend_box (ctx, "Populate Sysroot", \
- TASK_COLOR_SYSROOT, off_x+360, curr_y + 45, leg_s)
- draw_legend_box (ctx, "Package", \
- TASK_COLOR_PACKAGE, off_x+480, curr_y + 45, leg_s)
- draw_legend_box (ctx, "Package Write",
- TASK_COLOR_PACKAGE_WRITE, off_x+600, curr_y + 45, leg_s)
-
- ctx.set_font_size(PROC_TEXT_FONT_SIZE)
-
- draw_box_ticks(ctx, chart_rect, sec_w)
- draw_sec_labels(ctx, options, chart_rect, sec_w, 30)
-
- y = curr_y+header_h
-
- offset = trace.min or min(trace.start.keys())
- for s in sorted(trace.start.keys()):
- for val in sorted(trace.start[s]):
- if not options.app_options.show_all and \
- trace.processes[val][1] - s < options.app_options.mintime:
- continue
- task = val.split(":")[1]
- #print val
- #print trace.processes[val][1]
- #print s
- x = chart_rect[0] + (s - offset) * sec_w
- w = ((trace.processes[val][1] - s) * sec_w)
-
- #print "proc at %s %s %s %s" % (x, y, w, proc_h)
- col = None
- if task == "do_compile":
- col = TASK_COLOR_COMPILE
- elif task == "do_configure":
- col = TASK_COLOR_CONFIGURE
- elif task == "do_install":
- col = TASK_COLOR_INSTALL
- elif task == "do_populate_sysroot":
- col = TASK_COLOR_SYSROOT
- elif task == "do_package":
- col = TASK_COLOR_PACKAGE
- elif task == "do_package_write_rpm" or \
+ chart_rect = [off_x, curr_y+header_h, w, h - curr_y - 1 * off_y - header_h ]
+
+ draw_legend_box (ctx, "Configure", \
+ TASK_COLOR_CONFIGURE, off_x , curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Compile", \
+ TASK_COLOR_COMPILE, off_x+120, curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Install", \
+ TASK_COLOR_INSTALL, off_x+240, curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Populate Sysroot", \
+ TASK_COLOR_SYSROOT, off_x+360, curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Package", \
+ TASK_COLOR_PACKAGE, off_x+480, curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Package Write", \
+ TASK_COLOR_PACKAGE_WRITE, off_x+600, curr_y + 45, leg_s)
+
+ ctx.set_font_size(PROC_TEXT_FONT_SIZE)
+
+ draw_box_ticks(ctx, chart_rect, sec_w)
+ draw_sec_labels(ctx, options, chart_rect, sec_w, 30)
+
+ y = curr_y+header_h
+
+ offset = trace.min or min(trace.start.keys())
+ for start in sorted(trace.start.keys()):
+ for process in sorted(trace.start[start]):
+ if not options.app_options.show_all and \
+ trace.processes[process][1] - start < options.app_options.mintime:
+ continue
+ task = process.split(":")[1]
+
+ #print(process)
+ #print(trace.processes[process][1])
+ #print(s)
+
+ x = chart_rect[0] + (start - offset) * sec_w
+ w = ((trace.processes[process][1] - start) * sec_w)
+
+ #print("proc at %s %s %s %s" % (x, y, w, proc_h))
+ col = None
+ if task == "do_compile":
+ col = TASK_COLOR_COMPILE
+ elif task == "do_configure":
+ col = TASK_COLOR_CONFIGURE
+ elif task == "do_install":
+ col = TASK_COLOR_INSTALL
+ elif task == "do_populate_sysroot":
+ col = TASK_COLOR_SYSROOT
+ elif task == "do_package":
+ col = TASK_COLOR_PACKAGE
+ elif task == "do_package_write_rpm" or \
task == "do_package_write_deb" or \
task == "do_package_write_ipk":
- col = TASK_COLOR_PACKAGE_WRITE
- else:
- col = WHITE
+ col = TASK_COLOR_PACKAGE_WRITE
+ else:
+ col = WHITE
- if col:
- draw_fill_rect(ctx, col, (x, y, w, proc_h))
- draw_rect(ctx, PROC_BORDER_COLOR, (x, y, w, proc_h))
+ if col:
+ draw_fill_rect(ctx, col, (x, y, w, proc_h))
+ draw_rect(ctx, PROC_BORDER_COLOR, (x, y, w, proc_h))
- draw_label_in_box(ctx, PROC_TEXT_COLOR, val, x, y + proc_h - 4, w, proc_h)
- y = y + proc_h
+ draw_label_in_box(ctx, PROC_TEXT_COLOR, process, x, y + proc_h - 4, w, proc_h)
+ y = y + proc_h
- return curr_y
+ return curr_y
#
# Render the chart.
#
def render(ctx, options, xscale, trace):
- (w, h) = extents (options, xscale, trace)
- global OPTIONS
- OPTIONS = options.app_options
+ (w, h) = extents (options, xscale, trace)
+ global OPTIONS
+ OPTIONS = options.app_options
- # x, y, w, h
- clip = ctx.clip_extents()
+ # x, y, w, h
+ clip = ctx.clip_extents()
- sec_w = int (xscale * sec_w_base)
- ctx.set_line_width(1.0)
- ctx.select_font_face(FONT_NAME)
- draw_fill_rect(ctx, WHITE, (0, 0, max(w, MIN_IMG_W), h))
- w -= 2*off_x
- curr_y = off_y;
+ sec_w = int (xscale * sec_w_base)
+ ctx.set_line_width(1.0)
+ ctx.select_font_face(FONT_NAME)
+ draw_fill_rect(ctx, WHITE, (0, 0, max(w, MIN_IMG_W), h))
+ w -= 2*off_x
+ curr_y = off_y;
- if options.charts:
- curr_y = render_charts (ctx, options, clip, trace, curr_y, w, h, sec_w)
+ if options.charts:
+ curr_y = render_charts (ctx, options, clip, trace, curr_y, w, h, sec_w)
- curr_y = render_processes_chart (ctx, options, trace, curr_y, w, h, sec_w)
+ curr_y = render_processes_chart (ctx, options, trace, curr_y, w, h, sec_w)
- return
+ return
- proc_tree = options.proc_tree (trace)
+ proc_tree = options.proc_tree (trace)
- # draw the title and headers
- if proc_tree.idle:
- duration = proc_tree.idle
- else:
- duration = proc_tree.duration
+ # draw the title and headers
+ if proc_tree.idle:
+ duration = proc_tree.idle
+ else:
+ duration = proc_tree.duration
- if not options.kernel_only:
- curr_y = draw_header (ctx, trace.headers, duration)
- else:
- curr_y = off_y;
+ if not options.kernel_only:
+ curr_y = draw_header (ctx, trace.headers, duration)
+ else:
+ curr_y = off_y;
- # draw process boxes
- proc_height = h
- if proc_tree.taskstats and options.cumulative:
- proc_height -= CUML_HEIGHT
+ # draw process boxes
+ proc_height = h
+ if proc_tree.taskstats and options.cumulative:
+ proc_height -= CUML_HEIGHT
- draw_process_bar_chart(ctx, clip, options, proc_tree, trace.times,
- curr_y, w, proc_height, sec_w)
+ draw_process_bar_chart(ctx, clip, options, proc_tree, trace.times,
+ curr_y, w, proc_height, sec_w)
- curr_y = proc_height
- ctx.set_font_size(SIG_FONT_SIZE)
- draw_text(ctx, SIGNATURE, SIG_COLOR, off_x + 5, proc_height - 8)
+ curr_y = proc_height
+ ctx.set_font_size(SIG_FONT_SIZE)
+ draw_text(ctx, SIGNATURE, SIG_COLOR, off_x + 5, proc_height - 8)
- # draw a cumulative CPU-time-per-process graph
- if proc_tree.taskstats and options.cumulative:
- cuml_rect = (off_x, curr_y + off_y, w, CUML_HEIGHT/2 - off_y * 2)
- if clip_visible (clip, cuml_rect):
- draw_cuml_graph(ctx, proc_tree, cuml_rect, duration, sec_w, STAT_TYPE_CPU)
+ # draw a cumulative CPU-time-per-process graph
+ if proc_tree.taskstats and options.cumulative:
+ cuml_rect = (off_x, curr_y + off_y, w, CUML_HEIGHT/2 - off_y * 2)
+ if clip_visible (clip, cuml_rect):
+ draw_cuml_graph(ctx, proc_tree, cuml_rect, duration, sec_w, STAT_TYPE_CPU)
- # draw a cumulative I/O-time-per-process graph
- if proc_tree.taskstats and options.cumulative:
- cuml_rect = (off_x, curr_y + off_y * 100, w, CUML_HEIGHT/2 - off_y * 2)
- if clip_visible (clip, cuml_rect):
- draw_cuml_graph(ctx, proc_tree, cuml_rect, duration, sec_w, STAT_TYPE_IO)
+ # draw a cumulative I/O-time-per-process graph
+ if proc_tree.taskstats and options.cumulative:
+ cuml_rect = (off_x, curr_y + off_y * 100, w, CUML_HEIGHT/2 - off_y * 2)
+ if clip_visible (clip, cuml_rect):
+ draw_cuml_graph(ctx, proc_tree, cuml_rect, duration, sec_w, STAT_TYPE_IO)
def draw_process_bar_chart(ctx, clip, options, proc_tree, times, curr_y, w, h, sec_w):
- header_size = 0
- if not options.kernel_only:
- draw_legend_box (ctx, "Running (%cpu)",
- PROC_COLOR_R, off_x , curr_y + 45, leg_s)
- draw_legend_box (ctx, "Unint.sleep (I/O)",
- PROC_COLOR_D, off_x+120, curr_y + 45, leg_s)
- draw_legend_box (ctx, "Sleeping",
- PROC_COLOR_S, off_x+240, curr_y + 45, leg_s)
- draw_legend_box (ctx, "Zombie",
- PROC_COLOR_Z, off_x+360, curr_y + 45, leg_s)
- header_size = 45
-
- chart_rect = [off_x, curr_y + header_size + 15,
- w, h - 2 * off_y - (curr_y + header_size + 15) + proc_h]
- ctx.set_font_size (PROC_TEXT_FONT_SIZE)
-
- draw_box_ticks (ctx, chart_rect, sec_w)
- if sec_w > 100:
- nsec = 1
- else:
- nsec = 5
- draw_sec_labels (ctx, options, chart_rect, sec_w, nsec)
- draw_annotations (ctx, proc_tree, times, chart_rect)
-
- y = curr_y + 60
- for root in proc_tree.process_tree:
- draw_processes_recursively(ctx, root, proc_tree, y, proc_h, chart_rect, clip)
- y = y + proc_h * proc_tree.num_nodes([root])
+ header_size = 0
+ if not options.kernel_only:
+ draw_legend_box (ctx, "Running (%cpu)",
+ PROC_COLOR_R, off_x , curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Unint.sleep (I/O)",
+ PROC_COLOR_D, off_x+120, curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Sleeping",
+ PROC_COLOR_S, off_x+240, curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Zombie",
+ PROC_COLOR_Z, off_x+360, curr_y + 45, leg_s)
+ header_size = 45
+
+ chart_rect = [off_x, curr_y + header_size + 15,
+ w, h - 2 * off_y - (curr_y + header_size + 15) + proc_h]
+ ctx.set_font_size (PROC_TEXT_FONT_SIZE)
+
+ draw_box_ticks (ctx, chart_rect, sec_w)
+ if sec_w > 100:
+ nsec = 1
+ else:
+ nsec = 5
+ draw_sec_labels (ctx, options, chart_rect, sec_w, nsec)
+ draw_annotations (ctx, proc_tree, times, chart_rect)
+
+ y = curr_y + 60
+ for root in proc_tree.process_tree:
+ draw_processes_recursively(ctx, root, proc_tree, y, proc_h, chart_rect, clip)
+ y = y + proc_h * proc_tree.num_nodes([root])
def draw_header (ctx, headers, duration):
@@ -678,291 +685,291 @@ def draw_header (ctx, headers, duration):
return header_y
def draw_processes_recursively(ctx, proc, proc_tree, y, proc_h, rect, clip) :
- x = rect[0] + ((proc.start_time - proc_tree.start_time) * rect[2] / proc_tree.duration)
- w = ((proc.duration) * rect[2] / proc_tree.duration)
-
- draw_process_activity_colors(ctx, proc, proc_tree, x, y, w, proc_h, rect, clip)
- draw_rect(ctx, PROC_BORDER_COLOR, (x, y, w, proc_h))
- ipid = int(proc.pid)
- if not OPTIONS.show_all:
- cmdString = proc.cmd
- else:
- cmdString = ''
- if (OPTIONS.show_pid or OPTIONS.show_all) and ipid is not 0:
- cmdString = cmdString + " [" + str(ipid // 1000) + "]"
- if OPTIONS.show_all:
- if proc.args:
- cmdString = cmdString + " '" + "' '".join(proc.args) + "'"
- else:
- cmdString = cmdString + " " + proc.exe
-
- draw_label_in_box(ctx, PROC_TEXT_COLOR, cmdString, x, y + proc_h - 4, w, rect[0] + rect[2])
-
- next_y = y + proc_h
- for child in proc.child_list:
- if next_y > clip[1] + clip[3]:
- break
- child_x, child_y = draw_processes_recursively(ctx, child, proc_tree, next_y, proc_h, rect, clip)
- draw_process_connecting_lines(ctx, x, y, child_x, child_y, proc_h)
- next_y = next_y + proc_h * proc_tree.num_nodes([child])
-
- return x, y
+ x = rect[0] + ((proc.start_time - proc_tree.start_time) * rect[2] / proc_tree.duration)
+ w = ((proc.duration) * rect[2] / proc_tree.duration)
+
+ draw_process_activity_colors(ctx, proc, proc_tree, x, y, w, proc_h, rect, clip)
+ draw_rect(ctx, PROC_BORDER_COLOR, (x, y, w, proc_h))
+ ipid = int(proc.pid)
+ if not OPTIONS.show_all:
+ cmdString = proc.cmd
+ else:
+ cmdString = ''
+ if (OPTIONS.show_pid or OPTIONS.show_all) and ipid is not 0:
+ cmdString = cmdString + " [" + str(ipid // 1000) + "]"
+ if OPTIONS.show_all:
+ if proc.args:
+ cmdString = cmdString + " '" + "' '".join(proc.args) + "'"
+ else:
+ cmdString = cmdString + " " + proc.exe
+
+ draw_label_in_box(ctx, PROC_TEXT_COLOR, cmdString, x, y + proc_h - 4, w, rect[0] + rect[2])
+
+ next_y = y + proc_h
+ for child in proc.child_list:
+ if next_y > clip[1] + clip[3]:
+ break
+ child_x, child_y = draw_processes_recursively(ctx, child, proc_tree, next_y, proc_h, rect, clip)
+ draw_process_connecting_lines(ctx, x, y, child_x, child_y, proc_h)
+ next_y = next_y + proc_h * proc_tree.num_nodes([child])
+
+ return x, y
def draw_process_activity_colors(ctx, proc, proc_tree, x, y, w, proc_h, rect, clip):
- if y > clip[1] + clip[3] or y + proc_h + 2 < clip[1]:
- return
+ if y > clip[1] + clip[3] or y + proc_h + 2 < clip[1]:
+ return
- draw_fill_rect(ctx, PROC_COLOR_S, (x, y, w, proc_h))
+ draw_fill_rect(ctx, PROC_COLOR_S, (x, y, w, proc_h))
- last_tx = -1
- for sample in proc.samples :
- tx = rect[0] + round(((sample.time - proc_tree.start_time) * rect[2] / proc_tree.duration))
+ last_tx = -1
+ for sample in proc.samples :
+ tx = rect[0] + round(((sample.time - proc_tree.start_time) * rect[2] / proc_tree.duration))
- # samples are sorted chronologically
- if tx < clip[0]:
- continue
- if tx > clip[0] + clip[2]:
- break
+ # samples are sorted chronologically
+ if tx < clip[0]:
+ continue
+ if tx > clip[0] + clip[2]:
+ break
- tw = round(proc_tree.sample_period * rect[2] / float(proc_tree.duration))
- if last_tx != -1 and abs(last_tx - tx) <= tw:
- tw -= last_tx - tx
- tx = last_tx
- tw = max (tw, 1) # nice to see at least something
+ tw = round(proc_tree.sample_period * rect[2] / float(proc_tree.duration))
+ if last_tx != -1 and abs(last_tx - tx) <= tw:
+ tw -= last_tx - tx
+ tx = last_tx
+ tw = max (tw, 1) # nice to see at least something
- last_tx = tx + tw
- state = get_proc_state( sample.state )
+ last_tx = tx + tw
+ state = get_proc_state( sample.state )
- color = STATE_COLORS[state]
- if state == STATE_RUNNING:
- alpha = min (sample.cpu_sample.user + sample.cpu_sample.sys, 1.0)
- color = tuple(list(PROC_COLOR_R[0:3]) + [alpha])
-# print "render time %d [ tx %d tw %d ], sample state %s color %s alpha %g" % (sample.time, tx, tw, state, color, alpha)
- elif state == STATE_SLEEPING:
- continue
+ color = STATE_COLORS[state]
+ if state == STATE_RUNNING:
+ alpha = min (sample.cpu_sample.user + sample.cpu_sample.sys, 1.0)
+ color = tuple(list(PROC_COLOR_R[0:3]) + [alpha])
+# print "render time %d [ tx %d tw %d ], sample state %s color %s alpha %g" % (sample.time, tx, tw, state, color, alpha)
+ elif state == STATE_SLEEPING:
+ continue
- draw_fill_rect(ctx, color, (tx, y, tw, proc_h))
+ draw_fill_rect(ctx, color, (tx, y, tw, proc_h))
def draw_process_connecting_lines(ctx, px, py, x, y, proc_h):
- ctx.set_source_rgba(*DEP_COLOR)
- ctx.set_dash([2, 2])
- if abs(px - x) < 3:
- dep_off_x = 3
- dep_off_y = proc_h / 4
- ctx.move_to(x, y + proc_h / 2)
- ctx.line_to(px - dep_off_x, y + proc_h / 2)
- ctx.line_to(px - dep_off_x, py - dep_off_y)
- ctx.line_to(px, py - dep_off_y)
- else:
- ctx.move_to(x, y + proc_h / 2)
- ctx.line_to(px, y + proc_h / 2)
- ctx.line_to(px, py)
- ctx.stroke()
- ctx.set_dash([])
+ ctx.set_source_rgba(*DEP_COLOR)
+ ctx.set_dash([2, 2])
+ if abs(px - x) < 3:
+ dep_off_x = 3
+ dep_off_y = proc_h / 4
+ ctx.move_to(x, y + proc_h / 2)
+ ctx.line_to(px - dep_off_x, y + proc_h / 2)
+ ctx.line_to(px - dep_off_x, py - dep_off_y)
+ ctx.line_to(px, py - dep_off_y)
+ else:
+ ctx.move_to(x, y + proc_h / 2)
+ ctx.line_to(px, y + proc_h / 2)
+ ctx.line_to(px, py)
+ ctx.stroke()
+ ctx.set_dash([])
# elide the bootchart collector - it is quite distorting
def elide_bootchart(proc):
- return proc.cmd == 'bootchartd' or proc.cmd == 'bootchart-colle'
+ return proc.cmd == 'bootchartd' or proc.cmd == 'bootchart-colle'
class CumlSample:
- def __init__(self, proc):
- self.cmd = proc.cmd
- self.samples = []
- self.merge_samples (proc)
- self.color = None
-
- def merge_samples(self, proc):
- self.samples.extend (proc.samples)
- self.samples.sort (key = lambda p: p.time)
-
- def next(self):
- global palette_idx
- palette_idx += HSV_STEP
- return palette_idx
-
- def get_color(self):
- if self.color is None:
- i = self.next() % HSV_MAX_MOD
- h = 0.0
- if i is not 0:
- h = (1.0 * i) / HSV_MAX_MOD
- s = 0.5
- v = 1.0
- c = colorsys.hsv_to_rgb (h, s, v)
- self.color = (c[0], c[1], c[2], 1.0)
- return self.color
+ def __init__(self, proc):
+ self.cmd = proc.cmd
+ self.samples = []
+ self.merge_samples (proc)
+ self.color = None
+
+ def merge_samples(self, proc):
+ self.samples.extend (proc.samples)
+ self.samples.sort (key = lambda p: p.time)
+
+ def next(self):
+ global palette_idx
+ palette_idx += HSV_STEP
+ return palette_idx
+
+ def get_color(self):
+ if self.color is None:
+ i = self.next() % HSV_MAX_MOD
+ h = 0.0
+ if i is not 0:
+ h = (1.0 * i) / HSV_MAX_MOD
+ s = 0.5
+ v = 1.0
+ c = colorsys.hsv_to_rgb (h, s, v)
+ self.color = (c[0], c[1], c[2], 1.0)
+ return self.color
def draw_cuml_graph(ctx, proc_tree, chart_bounds, duration, sec_w, stat_type):
- global palette_idx
- palette_idx = 0
-
- time_hash = {}
- total_time = 0.0
- m_proc_list = {}
-
- if stat_type is STAT_TYPE_CPU:
- sample_value = 'cpu'
- else:
- sample_value = 'io'
- for proc in proc_tree.process_list:
- if elide_bootchart(proc):
- continue
-
- for sample in proc.samples:
- total_time += getattr(sample.cpu_sample, sample_value)
- if not sample.time in time_hash:
- time_hash[sample.time] = 1
-
- # merge pids with the same cmd
- if not proc.cmd in m_proc_list:
- m_proc_list[proc.cmd] = CumlSample (proc)
- continue
- s = m_proc_list[proc.cmd]
- s.merge_samples (proc)
-
- # all the sample times
- times = sorted(time_hash)
- if len (times) < 2:
- print("degenerate boot chart")
- return
-
- pix_per_ns = chart_bounds[3] / total_time
-# print "total time: %g pix-per-ns %g" % (total_time, pix_per_ns)
-
- # FIXME: we have duplicates in the process list too [!] - why !?
-
- # Render bottom up, left to right
- below = {}
- for time in times:
- below[time] = chart_bounds[1] + chart_bounds[3]
-
- # same colors each time we render
- random.seed (0)
-
- ctx.set_line_width(1)
-
- legends = []
- labels = []
-
- # render each pid in order
- for cs in m_proc_list.values():
- row = {}
- cuml = 0.0
-
- # print "pid : %s -> %g samples %d" % (proc.cmd, cuml, len (cs.samples))
- for sample in cs.samples:
- cuml += getattr(sample.cpu_sample, sample_value)
- row[sample.time] = cuml
-
- process_total_time = cuml
-
- # hide really tiny processes
- if cuml * pix_per_ns <= 2:
- continue
-
- last_time = times[0]
- y = last_below = below[last_time]
- last_cuml = cuml = 0.0
-
- ctx.set_source_rgba(*cs.get_color())
- for time in times:
- render_seg = False
-
- # did the underlying trend increase ?
- if below[time] != last_below:
- last_below = below[last_time]
- last_cuml = cuml
- render_seg = True
-
- # did we move up a pixel increase ?
- if time in row:
- nc = round (row[time] * pix_per_ns)
- if nc != cuml:
- last_cuml = cuml
- cuml = nc
- render_seg = True
-
-# if last_cuml > cuml:
-# assert fail ... - un-sorted process samples
-
- # draw the trailing rectangle from the last time to
- # before now, at the height of the last segment.
- if render_seg:
- w = math.ceil ((time - last_time) * chart_bounds[2] / proc_tree.duration) + 1
- x = chart_bounds[0] + round((last_time - proc_tree.start_time) * chart_bounds[2] / proc_tree.duration)
- ctx.rectangle (x, below[last_time] - last_cuml, w, last_cuml)
- ctx.fill()
-# ctx.stroke()
- last_time = time
- y = below [time] - cuml
-
- row[time] = y
-
- # render the last segment
- x = chart_bounds[0] + round((last_time - proc_tree.start_time) * chart_bounds[2] / proc_tree.duration)
- y = below[last_time] - cuml
- ctx.rectangle (x, y, chart_bounds[2] - x, cuml)
- ctx.fill()
-# ctx.stroke()
-
- # render legend if it will fit
- if cuml > 8:
- label = cs.cmd
- extnts = ctx.text_extents(label)
- label_w = extnts[2]
- label_h = extnts[3]
-# print "Text extents %g by %g" % (label_w, label_h)
- labels.append((label,
- chart_bounds[0] + chart_bounds[2] - label_w - off_x * 2,
- y + (cuml + label_h) / 2))
- if cs in legends:
- print("ARGH - duplicate process in list !")
-
- legends.append ((cs, process_total_time))
-
- below = row
-
- # render grid-lines over the top
- draw_box_ticks(ctx, chart_bounds, sec_w)
-
- # render labels
- for l in labels:
- draw_text(ctx, l[0], TEXT_COLOR, l[1], l[2])
-
- # Render legends
- font_height = 20
- label_width = 300
- LEGENDS_PER_COL = 15
- LEGENDS_TOTAL = 45
- ctx.set_font_size (TITLE_FONT_SIZE)
- dur_secs = duration / 100
- cpu_secs = total_time / 1000000000
-
- # misleading - with multiple CPUs ...
-# idle = ((dur_secs - cpu_secs) / dur_secs) * 100.0
- if stat_type is STAT_TYPE_CPU:
- label = "Cumulative CPU usage, by process; total CPU: " \
- " %.5g(s) time: %.3g(s)" % (cpu_secs, dur_secs)
- else:
- label = "Cumulative I/O usage, by process; total I/O: " \
- " %.5g(s) time: %.3g(s)" % (cpu_secs, dur_secs)
-
- draw_text(ctx, label, TEXT_COLOR, chart_bounds[0] + off_x,
- chart_bounds[1] + font_height)
-
- i = 0
- legends = sorted(legends, key=itemgetter(1), reverse=True)
- ctx.set_font_size(TEXT_FONT_SIZE)
- for t in legends:
- cs = t[0]
- time = t[1]
- x = chart_bounds[0] + off_x + int (i/LEGENDS_PER_COL) * label_width
- y = chart_bounds[1] + font_height * ((i % LEGENDS_PER_COL) + 2)
- str = "%s - %.0f(ms) (%2.2f%%)" % (cs.cmd, time/1000000, (time/total_time) * 100.0)
- draw_legend_box(ctx, str, cs.color, x, y, leg_s)
- i = i + 1
- if i >= LEGENDS_TOTAL:
- break
+ global palette_idx
+ palette_idx = 0
+
+ time_hash = {}
+ total_time = 0.0
+ m_proc_list = {}
+
+ if stat_type is STAT_TYPE_CPU:
+ sample_value = 'cpu'
+ else:
+ sample_value = 'io'
+ for proc in proc_tree.process_list:
+ if elide_bootchart(proc):
+ continue
+
+ for sample in proc.samples:
+ total_time += getattr(sample.cpu_sample, sample_value)
+ if not sample.time in time_hash:
+ time_hash[sample.time] = 1
+
+ # merge pids with the same cmd
+ if not proc.cmd in m_proc_list:
+ m_proc_list[proc.cmd] = CumlSample (proc)
+ continue
+ s = m_proc_list[proc.cmd]
+ s.merge_samples (proc)
+
+ # all the sample times
+ times = sorted(time_hash)
+ if len (times) < 2:
+ print("degenerate boot chart")
+ return
+
+ pix_per_ns = chart_bounds[3] / total_time
+# print "total time: %g pix-per-ns %g" % (total_time, pix_per_ns)
+
+ # FIXME: we have duplicates in the process list too [!] - why !?
+
+ # Render bottom up, left to right
+ below = {}
+ for time in times:
+ below[time] = chart_bounds[1] + chart_bounds[3]
+
+ # same colors each time we render
+ random.seed (0)
+
+ ctx.set_line_width(1)
+
+ legends = []
+ labels = []
+
+ # render each pid in order
+ for cs in m_proc_list.values():
+ row = {}
+ cuml = 0.0
+
+ # print "pid : %s -> %g samples %d" % (proc.cmd, cuml, len (cs.samples))
+ for sample in cs.samples:
+ cuml += getattr(sample.cpu_sample, sample_value)
+ row[sample.time] = cuml
+
+ process_total_time = cuml
+
+ # hide really tiny processes
+ if cuml * pix_per_ns <= 2:
+ continue
+
+ last_time = times[0]
+ y = last_below = below[last_time]
+ last_cuml = cuml = 0.0
+
+ ctx.set_source_rgba(*cs.get_color())
+ for time in times:
+ render_seg = False
+
+ # did the underlying trend increase ?
+ if below[time] != last_below:
+ last_below = below[last_time]
+ last_cuml = cuml
+ render_seg = True
+
+ # did we move up a pixel increase ?
+ if time in row:
+ nc = round (row[time] * pix_per_ns)
+ if nc != cuml:
+ last_cuml = cuml
+ cuml = nc
+ render_seg = True
+
+# if last_cuml > cuml:
+# assert fail ... - un-sorted process samples
+
+ # draw the trailing rectangle from the last time to
+ # before now, at the height of the last segment.
+ if render_seg:
+ w = math.ceil ((time - last_time) * chart_bounds[2] / proc_tree.duration) + 1
+ x = chart_bounds[0] + round((last_time - proc_tree.start_time) * chart_bounds[2] / proc_tree.duration)
+ ctx.rectangle (x, below[last_time] - last_cuml, w, last_cuml)
+ ctx.fill()
+# ctx.stroke()
+ last_time = time
+ y = below [time] - cuml
+
+ row[time] = y
+
+ # render the last segment
+ x = chart_bounds[0] + round((last_time - proc_tree.start_time) * chart_bounds[2] / proc_tree.duration)
+ y = below[last_time] - cuml
+ ctx.rectangle (x, y, chart_bounds[2] - x, cuml)
+ ctx.fill()
+# ctx.stroke()
+
+ # render legend if it will fit
+ if cuml > 8:
+ label = cs.cmd
+ extnts = ctx.text_extents(label)
+ label_w = extnts[2]
+ label_h = extnts[3]
+# print "Text extents %g by %g" % (label_w, label_h)
+ labels.append((label,
+ chart_bounds[0] + chart_bounds[2] - label_w - off_x * 2,
+ y + (cuml + label_h) / 2))
+ if cs in legends:
+ print("ARGH - duplicate process in list !")
+
+ legends.append ((cs, process_total_time))
+
+ below = row
+
+ # render grid-lines over the top
+ draw_box_ticks(ctx, chart_bounds, sec_w)
+
+ # render labels
+ for l in labels:
+ draw_text(ctx, l[0], TEXT_COLOR, l[1], l[2])
+
+ # Render legends
+ font_height = 20
+ label_width = 300
+ LEGENDS_PER_COL = 15
+ LEGENDS_TOTAL = 45
+ ctx.set_font_size (TITLE_FONT_SIZE)
+ dur_secs = duration / 100
+ cpu_secs = total_time / 1000000000
+
+ # misleading - with multiple CPUs ...
+# idle = ((dur_secs - cpu_secs) / dur_secs) * 100.0
+ if stat_type is STAT_TYPE_CPU:
+ label = "Cumulative CPU usage, by process; total CPU: " \
+ " %.5g(s) time: %.3g(s)" % (cpu_secs, dur_secs)
+ else:
+ label = "Cumulative I/O usage, by process; total I/O: " \
+ " %.5g(s) time: %.3g(s)" % (cpu_secs, dur_secs)
+
+ draw_text(ctx, label, TEXT_COLOR, chart_bounds[0] + off_x,
+ chart_bounds[1] + font_height)
+
+ i = 0
+ legends = sorted(legends, key=itemgetter(1), reverse=True)
+ ctx.set_font_size(TEXT_FONT_SIZE)
+ for t in legends:
+ cs = t[0]
+ time = t[1]
+ x = chart_bounds[0] + off_x + int (i/LEGENDS_PER_COL) * label_width
+ y = chart_bounds[1] + font_height * ((i % LEGENDS_PER_COL) + 2)
+ str = "%s - %.0f(ms) (%2.2f%%)" % (cs.cmd, time/1000000, (time/total_time) * 100.0)
+ draw_legend_box(ctx, str, cs.color, x, y, leg_s)
+ i = i + 1
+ if i >= LEGENDS_TOTAL:
+ break
diff --git a/external/poky/scripts/pybootchartgui/pybootchartgui/gui.py b/external/poky/scripts/pybootchartgui/pybootchartgui/gui.py
index 7fedd232..e1fe9155 100644
--- a/external/poky/scripts/pybootchartgui/pybootchartgui/gui.py
+++ b/external/poky/scripts/pybootchartgui/pybootchartgui/gui.py
@@ -13,64 +13,83 @@
# You should have received a copy of the GNU General Public License
# along with pybootchartgui. If not, see <http://www.gnu.org/licenses/>.
-import gobject
-import gtk
-import gtk.gdk
-import gtk.keysyms
+import gi
+gi.require_version('Gtk', '3.0')
+from gi.repository import Gtk as gtk
+from gi.repository import Gtk
+from gi.repository import Gdk
+from gi.repository import GObject as gobject
+from gi.repository import GObject
+
from . import draw
from .draw import RenderOptions
-class PyBootchartWidget(gtk.DrawingArea):
+class PyBootchartWidget(gtk.DrawingArea, gtk.Scrollable):
__gsignals__ = {
- 'expose-event': 'override',
- 'clicked' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_STRING, gtk.gdk.Event)),
+ 'clicked' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_STRING, Gdk.Event)),
'position-changed' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_INT, gobject.TYPE_INT)),
'set-scroll-adjustments' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gtk.Adjustment, gtk.Adjustment))
}
+ hadjustment = GObject.property(type=Gtk.Adjustment,
+ default=Gtk.Adjustment(),
+ flags=GObject.PARAM_READWRITE)
+ hscroll_policy = GObject.property(type=Gtk.ScrollablePolicy,
+ default=Gtk.ScrollablePolicy.MINIMUM,
+ flags=GObject.PARAM_READWRITE)
+ vadjustment = GObject.property(type=Gtk.Adjustment,
+ default=Gtk.Adjustment(),
+ flags=GObject.PARAM_READWRITE)
+ vscroll_policy = GObject.property(type=Gtk.ScrollablePolicy,
+ default=Gtk.ScrollablePolicy.MINIMUM,
+ flags=GObject.PARAM_READWRITE)
+
def __init__(self, trace, options, xscale):
gtk.DrawingArea.__init__(self)
self.trace = trace
self.options = options
- self.set_flags(gtk.CAN_FOCUS)
+ self.set_can_focus(True)
- self.add_events(gtk.gdk.BUTTON_PRESS_MASK | gtk.gdk.BUTTON_RELEASE_MASK)
+ self.add_events(Gdk.EventMask.BUTTON_PRESS_MASK | Gdk.EventMask.BUTTON_RELEASE_MASK)
self.connect("button-press-event", self.on_area_button_press)
self.connect("button-release-event", self.on_area_button_release)
- self.add_events(gtk.gdk.POINTER_MOTION_MASK | gtk.gdk.POINTER_MOTION_HINT_MASK | gtk.gdk.BUTTON_RELEASE_MASK)
+ self.add_events(Gdk.EventMask.POINTER_MOTION_MASK | Gdk.EventMask.POINTER_MOTION_HINT_MASK | Gdk.EventMask.BUTTON_RELEASE_MASK)
self.connect("motion-notify-event", self.on_area_motion_notify)
self.connect("scroll-event", self.on_area_scroll_event)
self.connect('key-press-event', self.on_key_press_event)
- self.connect('set-scroll-adjustments', self.on_set_scroll_adjustments)
self.connect("size-allocate", self.on_allocation_size_changed)
self.connect("position-changed", self.on_position_changed)
+ self.connect("draw", self.on_draw)
+
self.zoom_ratio = 1.0
self.xscale = xscale
self.x, self.y = 0.0, 0.0
self.chart_width, self.chart_height = draw.extents(self.options, self.xscale, self.trace)
- self.hadj = None
- self.vadj = None
- self.hadj_changed_signal_id = None
- self.vadj_changed_signal_id = None
-
- def do_expose_event(self, event):
- cr = self.window.cairo_create()
-
- # set a clip region for the expose event
- cr.rectangle(
- event.area.x, event.area.y,
- event.area.width, event.area.height
- )
- cr.clip()
- self.draw(cr, self.get_allocation())
- return False
-
- def draw(self, cr, rect):
+ self.our_width, self.our_height = self.chart_width, self.chart_height
+
+ self.hadj = gtk.Adjustment(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
+ self.vadj = gtk.Adjustment(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
+ self.vadj.connect('value-changed', self.on_adjustments_changed)
+ self.hadj.connect('value-changed', self.on_adjustments_changed)
+
+ def bound_vals(self):
+ self.x = max(0, self.x)
+ self.y = max(0, self.y)
+ self.x = min(self.chart_width - self.our_width, self.x)
+ self.y = min(self.chart_height - self.our_height, self.y)
+
+ def on_draw(self, darea, cr):
+ # set a clip region
+ #cr.rectangle(
+ # self.x, self.y,
+ # self.chart_width, self.chart_height
+ #)
+ #cr.clip()
cr.set_source_rgba(1.0, 1.0, 1.0, 1.0)
cr.paint()
cr.scale(self.zoom_ratio, self.zoom_ratio)
@@ -84,7 +103,7 @@ class PyBootchartWidget(gtk.DrawingArea):
def zoom_image (self, zoom_ratio):
self.zoom_ratio = zoom_ratio
- self._set_scroll_adjustments (self.hadj, self.vadj)
+ self._set_scroll_adjustments()
self.queue_draw()
def zoom_to_rect (self, rect):
@@ -122,126 +141,101 @@ class PyBootchartWidget(gtk.DrawingArea):
def show_toggled(self, button):
self.options.app_options.show_all = button.get_property ('active')
self.chart_width, self.chart_height = draw.extents(self.options, self.xscale, self.trace)
- self._set_scroll_adjustments(self.hadj, self.vadj)
+ self._set_scroll_adjustments()
self.queue_draw()
POS_INCREMENT = 100
def on_key_press_event(self, widget, event):
- if event.keyval == gtk.keysyms.Left:
+ if event.keyval == Gdk.keyval_from_name("Left"):
self.x -= self.POS_INCREMENT/self.zoom_ratio
- elif event.keyval == gtk.keysyms.Right:
+ elif event.keyval == Gdk.keyval_from_name("Right"):
self.x += self.POS_INCREMENT/self.zoom_ratio
- elif event.keyval == gtk.keysyms.Up:
+ elif event.keyval == Gdk.keyval_from_name("Up"):
self.y -= self.POS_INCREMENT/self.zoom_ratio
- elif event.keyval == gtk.keysyms.Down:
+ elif event.keyval == Gdk.keyval_from_name("Down"):
self.y += self.POS_INCREMENT/self.zoom_ratio
else:
return False
+ self.bound_vals()
self.queue_draw()
self.position_changed()
return True
def on_area_button_press(self, area, event):
if event.button == 2 or event.button == 1:
- area.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.FLEUR))
+ window = self.get_window()
+ window.set_cursor(Gdk.Cursor(Gdk.CursorType.FLEUR))
self.prevmousex = event.x
self.prevmousey = event.y
- if event.type not in (gtk.gdk.BUTTON_PRESS, gtk.gdk.BUTTON_RELEASE):
+ if event.type not in (Gdk.EventType.BUTTON_PRESS, Gdk.EventType.BUTTON_RELEASE):
return False
return False
def on_area_button_release(self, area, event):
if event.button == 2 or event.button == 1:
- area.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.ARROW))
+ window = self.get_window()
+ window.set_cursor(Gdk.Cursor(Gdk.CursorType.ARROW))
self.prevmousex = None
self.prevmousey = None
return True
return False
def on_area_scroll_event(self, area, event):
- if event.state & gtk.gdk.CONTROL_MASK:
- if event.direction == gtk.gdk.SCROLL_UP:
+ if event.state & Gdk.CONTROL_MASK:
+ if event.direction == Gdk.SCROLL_UP:
self.zoom_image(self.zoom_ratio * self.ZOOM_INCREMENT)
return True
- if event.direction == gtk.gdk.SCROLL_DOWN:
+ if event.direction == Gdk.SCROLL_DOWN:
self.zoom_image(self.zoom_ratio / self.ZOOM_INCREMENT)
return True
return False
def on_area_motion_notify(self, area, event):
state = event.state
- if state & gtk.gdk.BUTTON2_MASK or state & gtk.gdk.BUTTON1_MASK:
+ if state & Gdk.ModifierType.BUTTON2_MASK or state & Gdk.ModifierType.BUTTON1_MASK:
x, y = int(event.x), int(event.y)
# pan the image
self.x += (self.prevmousex - x)/self.zoom_ratio
self.y += (self.prevmousey - y)/self.zoom_ratio
+ self.bound_vals()
self.queue_draw()
self.prevmousex = x
self.prevmousey = y
self.position_changed()
return True
- def on_set_scroll_adjustments(self, area, hadj, vadj):
- self._set_scroll_adjustments (hadj, vadj)
-
def on_allocation_size_changed(self, widget, allocation):
self.hadj.page_size = allocation.width
self.hadj.page_increment = allocation.width * 0.9
self.vadj.page_size = allocation.height
self.vadj.page_increment = allocation.height * 0.9
+ self.our_width = allocation.width
+ if self.chart_width < self.our_width:
+ self.our_width = self.chart_width
+ self.our_height = allocation.height
+ if self.chart_height < self.our_height:
+ self.our_height = self.chart_height
+ self._set_scroll_adjustments()
def _set_adj_upper(self, adj, upper):
- changed = False
- value_changed = False
-
- if adj.upper != upper:
- adj.upper = upper
- changed = True
-
- max_value = max(0.0, upper - adj.page_size)
- if adj.value > max_value:
- adj.value = max_value
- value_changed = True
-
- if changed:
- adj.changed()
- if value_changed:
- adj.value_changed()
-
- def _set_scroll_adjustments(self, hadj, vadj):
- if hadj == None:
- hadj = gtk.Adjustment(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
- if vadj == None:
- vadj = gtk.Adjustment(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
-
- if self.hadj_changed_signal_id != None and \
- self.hadj != None and hadj != self.hadj:
- self.hadj.disconnect (self.hadj_changed_signal_id)
- if self.vadj_changed_signal_id != None and \
- self.vadj != None and vadj != self.vadj:
- self.vadj.disconnect (self.vadj_changed_signal_id)
-
- if hadj != None:
- self.hadj = hadj
- self._set_adj_upper (self.hadj, self.zoom_ratio * self.chart_width)
- self.hadj_changed_signal_id = self.hadj.connect('value-changed', self.on_adjustments_changed)
-
- if vadj != None:
- self.vadj = vadj
- self._set_adj_upper (self.vadj, self.zoom_ratio * self.chart_height)
- self.vadj_changed_signal_id = self.vadj.connect('value-changed', self.on_adjustments_changed)
+
+ if adj.get_upper() != upper:
+ adj.set_upper(upper)
+
+ def _set_scroll_adjustments(self):
+ self._set_adj_upper (self.hadj, self.zoom_ratio * (self.chart_width - self.our_width))
+ self._set_adj_upper (self.vadj, self.zoom_ratio * (self.chart_height - self.our_height))
def on_adjustments_changed(self, adj):
- self.x = self.hadj.value / self.zoom_ratio
- self.y = self.vadj.value / self.zoom_ratio
+ self.x = self.hadj.get_value() / self.zoom_ratio
+ self.y = self.vadj.get_value() / self.zoom_ratio
self.queue_draw()
def on_position_changed(self, widget, x, y):
- self.hadj.value = x * self.zoom_ratio
- self.vadj.value = y * self.zoom_ratio
-
-PyBootchartWidget.set_set_scroll_adjustments_signal('set-scroll-adjustments')
+ self.hadj.set_value(x * self.zoom_ratio)
+ #self.hadj.value_changed()
+ self.vadj.set_value(y * self.zoom_ratio)
class PyBootchartShell(gtk.VBox):
ui = '''
@@ -260,7 +254,7 @@ class PyBootchartShell(gtk.VBox):
def __init__(self, window, trace, options, xscale):
gtk.VBox.__init__(self)
- self.widget = PyBootchartWidget(trace, options, xscale)
+ self.widget2 = PyBootchartWidget(trace, options, xscale)
# Create a UIManager instance
uimanager = self.uimanager = gtk.UIManager()
@@ -275,12 +269,12 @@ class PyBootchartShell(gtk.VBox):
# Create actions
actiongroup.add_actions((
- ('Expand', gtk.STOCK_ADD, None, None, None, self.widget.on_expand),
- ('Contract', gtk.STOCK_REMOVE, None, None, None, self.widget.on_contract),
- ('ZoomIn', gtk.STOCK_ZOOM_IN, None, None, None, self.widget.on_zoom_in),
- ('ZoomOut', gtk.STOCK_ZOOM_OUT, None, None, None, self.widget.on_zoom_out),
- ('ZoomFit', gtk.STOCK_ZOOM_FIT, 'Fit Width', None, None, self.widget.on_zoom_fit),
- ('Zoom100', gtk.STOCK_ZOOM_100, None, None, None, self.widget.on_zoom_100),
+ ('Expand', gtk.STOCK_ADD, None, None, None, self.widget2.on_expand),
+ ('Contract', gtk.STOCK_REMOVE, None, None, None, self.widget2.on_contract),
+ ('ZoomIn', gtk.STOCK_ZOOM_IN, None, None, None, self.widget2.on_zoom_in),
+ ('ZoomOut', gtk.STOCK_ZOOM_OUT, None, None, None, self.widget2.on_zoom_out),
+ ('ZoomFit', gtk.STOCK_ZOOM_FIT, 'Fit Width', None, None, self.widget2.on_zoom_fit),
+ ('Zoom100', gtk.STOCK_ZOOM_100, None, None, None, self.widget2.on_zoom_100),
))
# Add the actiongroup to the uimanager
@@ -290,29 +284,33 @@ class PyBootchartShell(gtk.VBox):
uimanager.add_ui_from_string(self.ui)
# Scrolled window
- scrolled = gtk.ScrolledWindow()
- scrolled.add(self.widget)
+ scrolled = gtk.ScrolledWindow(self.widget2.hadj, self.widget2.vadj)
+ scrolled.add(self.widget2)
+
+ #scrolled.set_hadjustment()
+ #scrolled.set_vadjustment(self.widget2.vadj)
+ scrolled.set_policy(gtk.PolicyType.ALWAYS, gtk.PolicyType.ALWAYS)
# toolbar / h-box
hbox = gtk.HBox(False, 8)
# Create a Toolbar
toolbar = uimanager.get_widget('/ToolBar')
- hbox.pack_start(toolbar, True, True)
+ hbox.pack_start(toolbar, True, True, 0)
if not options.kernel_only:
# Misc. options
button = gtk.CheckButton("Show more")
- button.connect ('toggled', self.widget.show_toggled)
+ button.connect ('toggled', self.widget2.show_toggled)
button.set_active(options.app_options.show_all)
- hbox.pack_start (button, False, True)
+ hbox.pack_start (button, False, True, 0)
- self.pack_start(hbox, False)
- self.pack_start(scrolled)
+ self.pack_start(hbox, False, True, 0)
+ self.pack_start(scrolled, True, True, 0)
self.show_all()
def grab_focus(self, window):
- window.set_focus(self.widget)
+ window.set_focus(self.widget2)
class PyBootchartWindow(gtk.Window):
diff --git a/external/poky/scripts/pybootchartgui/pybootchartgui/parsing.py b/external/poky/scripts/pybootchartgui/pybootchartgui/parsing.py
index bcfb2da5..b42dac6b 100644
--- a/external/poky/scripts/pybootchartgui/pybootchartgui/parsing.py
+++ b/external/poky/scripts/pybootchartgui/pybootchartgui/parsing.py
@@ -18,7 +18,7 @@ import string
import re
import sys
import tarfile
-from time import clock
+import time
from collections import defaultdict
from functools import reduce
@@ -267,7 +267,7 @@ def _parse_headers(file):
value = line.strip()
headers[last] += value
return headers, last
- return reduce(parse, file.read().decode('utf-8').split('\n'), (defaultdict(str),''))[0]
+ return reduce(parse, file.read().split('\n'), (defaultdict(str),''))[0]
def _parse_timed_blocks(file):
"""Parses (ie., splits) a file into so-called timed-blocks. A
@@ -281,7 +281,7 @@ def _parse_timed_blocks(file):
return (int(lines[0]), lines[1:])
except ValueError:
raise ParseError("expected a timed-block, but timestamp '%s' is not an integer" % lines[0])
- blocks = file.read().decode('utf-8').split('\n\n')
+ blocks = file.read().split('\n\n')
return [parse(block) for block in blocks if block.strip() and not block.endswith(' not running\n')]
def _parse_proc_ps_log(writer, file):
@@ -577,7 +577,7 @@ def _parse_dmesg(writer, file):
processMap['k-boot'] = kernel
base_ts = False
max_ts = 0
- for line in file.read().decode('utf-8').split('\n'):
+ for line in file.read().split('\n'):
t = timestamp_re.match (line)
if t is None:
# print "duff timestamp " + line
@@ -665,7 +665,7 @@ def _parse_pacct(writer, file):
def _parse_paternity_log(writer, file):
parent_map = {}
parent_map[0] = 0
- for line in file.read().decode('utf-8').split('\n'):
+ for line in file.read().split('\n'):
if not line:
continue
elems = line.split(' ') # <Child> <Parent>
@@ -678,7 +678,7 @@ def _parse_paternity_log(writer, file):
def _parse_cmdline_log(writer, file):
cmdLines = {}
- for block in file.read().decode('utf-8').split('\n\n'):
+ for block in file.read().split('\n\n'):
lines = block.split('\n')
if len (lines) >= 3:
# print "Lines '%s'" % (lines[0])
@@ -723,7 +723,7 @@ def get_num_cpus(headers):
def _do_parse(writer, state, filename, file):
writer.info("parsing '%s'" % filename)
- t1 = clock()
+ t1 = time.process_time()
name = os.path.basename(filename)
if name == "proc_diskstats.log":
state.disk_stats = _parse_proc_disk_stat_log(file)
@@ -743,7 +743,7 @@ def _do_parse(writer, state, filename, file):
state.monitor_disk = _parse_monitor_disk_log(file)
elif not filename.endswith('.log'):
_parse_bitbake_buildstats(writer, state, filename, file)
- t2 = clock()
+ t2 = time.process_time()
writer.info(" %s seconds" % str(t2-t1))
return state
@@ -751,7 +751,7 @@ def parse_file(writer, state, filename):
if state.filename is None:
state.filename = filename
basename = os.path.basename(filename)
- with open(filename, "rb") as file:
+ with open(filename, "r") as file:
return _do_parse(writer, state, filename, file)
def parse_paths(writer, state, paths):
diff --git a/external/poky/scripts/pythondeps b/external/poky/scripts/pythondeps
index 3e13a587..be21dd84 100755
--- a/external/poky/scripts/pythondeps
+++ b/external/poky/scripts/pythondeps
@@ -1,5 +1,7 @@
#!/usr/bin/env python3
#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Determine dependencies of python scripts or available python modules in a search path.
#
# Given the -d argument and a filename/filenames, returns the modules imported by those files.
diff --git a/external/poky/scripts/recipetool b/external/poky/scripts/recipetool
index 3a3c9b74..e2d585d2 100755
--- a/external/poky/scripts/recipetool
+++ b/external/poky/scripts/recipetool
@@ -4,18 +4,8 @@
#
# Copyright (C) 2014 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os
diff --git a/external/poky/scripts/relocate_sdk.py b/external/poky/scripts/relocate_sdk.py
index c752fa2c..8c0fdb98 100755
--- a/external/poky/scripts/relocate_sdk.py
+++ b/external/poky/scripts/relocate_sdk.py
@@ -2,18 +2,7 @@
#
# Copyright (c) 2012 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This script is called by the SDK installer script. It replaces the dynamic
diff --git a/external/poky/scripts/resulttool b/external/poky/scripts/resulttool
index 9477667a..fc282bda 100755
--- a/external/poky/scripts/resulttool
+++ b/external/poky/scripts/resulttool
@@ -25,14 +25,7 @@
#
# Copyright (c) 2019, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
import os
diff --git a/external/poky/scripts/rpm2cpio.sh b/external/poky/scripts/rpm2cpio.sh
index cf23472b..7cd771bb 100755
--- a/external/poky/scripts/rpm2cpio.sh
+++ b/external/poky/scripts/rpm2cpio.sh
@@ -1,5 +1,4 @@
#!/bin/sh -efu
-
# This file comes from rpm 4.x distribution
fatal() {
@@ -23,7 +22,8 @@ calcsize() {
i=0
while [ $i -lt 8 ]; do
- b="$(_dd $(($offset + $i)) bs=1 count=1)"
+ b=$(_dd $(($offset + $i)) bs=1 count=1; echo X)
+ b=${b%X}
[ -z "$b" ] &&
b="0" ||
b="$(exec printf '%u\n' "'$b")"
diff --git a/external/poky/scripts/runqemu b/external/poky/scripts/runqemu
index f83e0572..310d79fd 100755
--- a/external/poky/scripts/runqemu
+++ b/external/poky/scripts/runqemu
@@ -5,18 +5,8 @@
# Copyright (C) 2006-2011 Linux Foundation
# Copyright (c) 2016 Wind River Systems, Inc.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import sys
@@ -69,26 +59,32 @@ def print_usage():
Usage: you can run this script with any valid combination
of the following environment variables (in any order):
KERNEL - the kernel image file to use
+ BIOS - the bios image file to use
ROOTFS - the rootfs image file or nfsroot directory to use
DEVICE_TREE - the device tree blob to use
MACHINE - the machine name (optional, autodetected from KERNEL filename if unspecified)
Simplified QEMU command-line options can be passed with:
nographic - disable video console
+ sdl - choose the SDL UI frontend
+ gtk - choose the Gtk UI frontend
+ gl - enable virgl-based GL acceleration (also needs gtk or sdl options)
+ gl-es - enable virgl-based GL acceleration, using OpenGL ES (also needs gtk or sdl options)
+ egl-headless - enable headless EGL output; use vnc (via publicvnc option) or spice to see it
serial - enable a serial console on /dev/ttyS0
+ serialstdio - enable a serial console on the console (regardless of graphics mode)
slirp - enable user networking, no root privileges is required
+ snapshot - don't write changes to back to images
kvm - enable KVM when running x86/x86_64 (VT-capable CPU required)
kvm-vhost - enable KVM with vhost when running x86/x86_64 (VT-capable CPU required)
publicvnc - enable a VNC server open to all hosts
audio - enable audio
[*/]ovmf* - OVMF firmware file or base name for booting with UEFI
tcpserial=<port> - specify tcp serial port number
- biosdir=<dir> - specify custom bios dir
- biosfilename=<filename> - specify bios filename
qemuparams=<xyz> - specify custom parameters to QEMU
bootparams=<xyz> - specify custom kernel parameters during boot
help, -h, --help: print this text
-d, --debug: Enable debug output
- -q, --quite: Hide most output except error messages
+ -q, --quiet: Hide most output except error messages
Examples:
runqemu
@@ -114,39 +110,6 @@ def check_tun():
if not os.access(dev_tun, os.W_OK):
raise RunQemuError("TUN control device %s is not writable, please fix (e.g. sudo chmod 666 %s)" % (dev_tun, dev_tun))
-def check_libgl(qemu_bin):
- cmd = ('ldd', qemu_bin)
- logger.debug('Running %s...' % str(cmd))
- need_gl = subprocess.check_output(cmd).decode('utf-8')
- if re.search('libGLU', need_gl):
- # We can't run without a libGL.so
- libgl = False
- check_files = (('/usr/lib/libGL.so', '/usr/lib/libGLU.so'), \
- ('/usr/lib64/libGL.so', '/usr/lib64/libGLU.so'), \
- ('/usr/lib/*-linux-gnu/libGL.so', '/usr/lib/*-linux-gnu/libGLU.so'))
-
- for (f1, f2) in check_files:
- if re.search('\*', f1):
- for g1 in glob.glob(f1):
- if libgl:
- break
- if os.path.exists(g1):
- for g2 in glob.glob(f2):
- if os.path.exists(g2):
- libgl = True
- break
- if libgl:
- break
- else:
- if os.path.exists(f1) and os.path.exists(f2):
- libgl = True
- break
- if not libgl:
- logger.error("You need libGL.so and libGLU.so to exist in your library path to run the QEMU emulator.")
- logger.error("Ubuntu package names are: libgl1-mesa-dev and libglu1-mesa-dev.")
- logger.error("Fedora package names are: mesa-libGL-devel mesa-libGLU-devel.")
- raise RunQemuError('%s requires libGLU, but not found' % qemu_bin)
-
def get_first_file(cmds):
"""Return first file found in wildcard cmds"""
for cmd in cmds:
@@ -157,19 +120,6 @@ def get_first_file(cmds):
return f
return ''
-def check_free_port(host, port):
- """ Check whether the port is free or not """
- import socket
- from contextlib import closing
-
- with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
- if sock.connect_ex((host, port)) == 0:
- # Port is open, so not free
- return False
- else:
- # Port is not open, so free
- return True
-
class BaseConfig(object):
def __init__(self):
# The self.d saved vars from self.set(), part of them are from qemuboot.conf
@@ -180,10 +130,12 @@ class BaseConfig(object):
self.env_vars = ('MACHINE',
'ROOTFS',
'KERNEL',
+ 'BIOS',
'DEVICE_TREE',
'DEPLOY_DIR_IMAGE',
'OE_TMPDIR',
'OECORE_NATIVE_SYSROOT',
+ 'MULTICONFIG',
)
self.qemu_opt = ''
@@ -196,12 +148,15 @@ class BaseConfig(object):
# to be added with -drive if=pflash.
# Found in the same places as the rootfs, with or without one of
# these suffices: qcow2, bin.
- # Setting one also adds "-vga std" because that is all that
- # OVMF supports.
self.ovmf_bios = []
+ # When enrolling default Secure Boot keys, the hypervisor
+ # must provide the Platform Key and the first Key Exchange Key
+ # certificate in the Type 11 SMBIOS table.
+ self.ovmf_secboot_pkkek1 = ''
self.qemuboot = ''
self.qbconfload = False
self.kernel = ''
+ self.bios = ''
self.kernel_cmdline = ''
self.kernel_cmdline_script = ''
self.bootparams = ''
@@ -210,23 +165,28 @@ class BaseConfig(object):
self.kvm_enabled = False
self.vhost_enabled = False
self.slirp_enabled = False
+ self.net_bridge = None
self.nfs_instance = 0
self.nfs_running = False
+ self.serialconsole = False
self.serialstdio = False
self.cleantap = False
self.saved_stty = ''
self.audio_enabled = False
self.tcpserial_portnum = ''
- self.custombiosdir = ''
- self.lock = ''
- self.lock_descriptor = None
+ self.taplock = ''
+ self.taplock_descriptor = None
+ self.portlocks = {}
self.bitbake_e = ''
self.snapshot = False
+ self.wictypes = ('wic', 'wic.vmdk', 'wic.qcow2', 'wic.vdi')
self.fstypes = ('ext2', 'ext3', 'ext4', 'jffs2', 'nfs', 'btrfs',
'cpio.gz', 'cpio', 'ramfs', 'tar.bz2', 'tar.gz')
- self.vmtypes = ('hddimg', 'hdddirect', 'wic', 'wic.vmdk',
- 'wic.qcow2', 'wic.vdi', 'iso')
+ self.vmtypes = ('hddimg', 'iso')
+ self.fsinfo = {}
self.network_device = "-device e1000,netdev=net0,mac=@MAC@"
+ self.cmdline_ip_slirp = "ip=dhcp"
+ self.cmdline_ip_tap = "ip=192.168.7.@CLIENT@::192.168.7.@GATEWAY@:255.255.255.0"
# Use different mac section for tap and slirp to avoid
# conflicts, e.g., when one is running with tap, the other is
# running with slirp.
@@ -240,30 +200,78 @@ class BaseConfig(object):
# avoid cleanup twice
self.cleaned = False
- def acquire_lock(self, error=True):
- logger.debug("Acquiring lockfile %s..." % self.lock)
+ def acquire_taplock(self, error=True):
+ logger.debug("Acquiring lockfile %s..." % self.taplock)
try:
- self.lock_descriptor = open(self.lock, 'w')
- fcntl.flock(self.lock_descriptor, fcntl.LOCK_EX|fcntl.LOCK_NB)
+ self.taplock_descriptor = open(self.taplock, 'w')
+ fcntl.flock(self.taplock_descriptor, fcntl.LOCK_EX|fcntl.LOCK_NB)
except Exception as e:
- msg = "Acquiring lockfile %s failed: %s" % (self.lock, e)
+ msg = "Acquiring lockfile %s failed: %s" % (self.taplock, e)
if error:
logger.error(msg)
else:
logger.info(msg)
- if self.lock_descriptor:
- self.lock_descriptor.close()
- self.lock_descriptor = None
+ if self.taplock_descriptor:
+ self.taplock_descriptor.close()
+ self.taplock_descriptor = None
return False
return True
- def release_lock(self):
- if self.lock_descriptor:
+ def release_taplock(self):
+ if self.taplock_descriptor:
logger.debug("Releasing lockfile for tap device '%s'" % self.tap)
- fcntl.flock(self.lock_descriptor, fcntl.LOCK_UN)
- self.lock_descriptor.close()
- os.remove(self.lock)
- self.lock_descriptor = None
+ fcntl.flock(self.taplock_descriptor, fcntl.LOCK_UN)
+ self.taplock_descriptor.close()
+ os.remove(self.taplock)
+ self.taplock_descriptor = None
+
+ def check_free_port(self, host, port, lockdir):
+ """ Check whether the port is free or not """
+ import socket
+ from contextlib import closing
+
+ lockfile = os.path.join(lockdir, str(port) + '.lock')
+ if self.acquire_portlock(lockfile):
+ with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
+ if sock.connect_ex((host, port)) == 0:
+ # Port is open, so not free
+ self.release_portlock(lockfile)
+ return False
+ else:
+ # Port is not open, so free
+ return True
+ else:
+ return False
+
+ def acquire_portlock(self, lockfile):
+ logger.debug("Acquiring lockfile %s..." % lockfile)
+ try:
+ portlock_descriptor = open(lockfile, 'w')
+ self.portlocks.update({lockfile: portlock_descriptor})
+ fcntl.flock(self.portlocks[lockfile], fcntl.LOCK_EX|fcntl.LOCK_NB)
+ except Exception as e:
+ msg = "Acquiring lockfile %s failed: %s" % (lockfile, e)
+ logger.info(msg)
+ if lockfile in self.portlocks.keys() and self.portlocks[lockfile]:
+ self.portlocks[lockfile].close()
+ del self.portlocks[lockfile]
+ return False
+ return True
+
+ def release_portlock(self, lockfile=None):
+ if lockfile != None:
+ logger.debug("Releasing lockfile '%s'" % lockfile)
+ fcntl.flock(self.portlocks[lockfile], fcntl.LOCK_UN)
+ self.portlocks[lockfile].close()
+ os.remove(lockfile)
+ del self.portlocks[lockfile]
+ elif len(self.portlocks):
+ for lockfile, descriptor in self.portlocks.items():
+ logger.debug("Releasing lockfile '%s'" % lockfile)
+ fcntl.flock(descriptor, fcntl.LOCK_UN)
+ descriptor.close()
+ os.remove(lockfile)
+ self.portlocks = {}
def get(self, key):
if key in self.d:
@@ -290,7 +298,7 @@ class BaseConfig(object):
def check_arg_fstype(self, fst):
"""Check and set FSTYPE"""
- if fst not in self.fstypes + self.vmtypes:
+ if fst not in self.fstypes + self.vmtypes + self.wictypes:
logger.warning("Maybe unsupported FSTYPE: %s" % fst)
if not self.fstype or self.fstype == fst:
if fst == 'ramfs':
@@ -397,9 +405,7 @@ class BaseConfig(object):
self.set("MACHINE", arg)
return
- cmd = 'MACHINE=%s bitbake -e' % arg
- logger.info('Running %s...' % cmd)
- self.bitbake_e = subprocess.check_output(cmd, shell=True).decode('utf-8')
+ self.bitbake_e = self.run_bitbake_env(arg)
# bitbake -e doesn't report invalid MACHINE as an error, so
# let's check DEPLOY_DIR_IMAGE to make sure that it is a valid
# MACHINE.
@@ -427,13 +433,50 @@ class BaseConfig(object):
unknown_arg = ""
for arg in sys.argv[1:]:
- if arg in self.fstypes + self.vmtypes:
+ if arg in self.fstypes + self.vmtypes + self.wictypes:
self.check_arg_fstype(arg)
elif arg == 'nographic':
self.qemu_opt_script += ' -nographic'
self.kernel_cmdline_script += ' console=ttyS0'
+ elif arg == 'sdl':
+ if 'gl' in sys.argv[1:]:
+ self.qemu_opt_script += ' -vga virtio -display sdl,gl=on'
+ elif 'gl-es' in sys.argv[1:]:
+ self.qemu_opt_script += ' -vga virtio -display sdl,gl=es'
+ else:
+ self.qemu_opt_script += ' -display sdl'
+ elif arg == 'gtk':
+ if 'gl' in sys.argv[1:]:
+ self.qemu_opt_script += ' -vga virtio -display gtk,gl=on'
+ elif 'gl-es' in sys.argv[1:]:
+ self.qemu_opt_script += ' -vga virtio -display gtk,gl=es'
+ else:
+ self.qemu_opt_script += ' -display gtk'
+ elif arg == 'gl' or arg == 'gl-es':
+ # These args are handled inside sdl or gtk blocks above
+ pass
+ elif arg == 'egl-headless':
+ self.qemu_opt_script += ' -vga virtio -display egl-headless'
+ # As runqemu can be run within bitbake (when using testimage, for example),
+ # we need to ensure that we run host pkg-config, and that it does not
+ # get mis-directed to native build paths set by bitbake.
+ try:
+ del os.environ['PKG_CONFIG_PATH']
+ del os.environ['PKG_CONFIG_DIR']
+ del os.environ['PKG_CONFIG_LIBDIR']
+ del os.environ['PKG_CONFIG_SYSROOT_DIR']
+ except KeyError:
+ pass
+ try:
+ dripath = subprocess.check_output("PATH=/bin:/usr/bin:$PATH pkg-config --variable=dridriverdir dri", shell=True)
+ except subprocess.CalledProcessError as e:
+ raise RunQemuError("Could not determine the path to dri drivers on the host via pkg-config.\nPlease install Mesa development files (particularly, dri.pc) on the host machine.")
+ os.environ['LIBGL_DRIVERS_PATH'] = dripath.decode('utf-8').strip()
elif arg == 'serial':
self.kernel_cmdline_script += ' console=ttyS0'
+ self.serialconsole = True
+ elif arg == "serialstdio":
+ self.kernel_cmdline_script += ' console=ttyS0'
self.serialstdio = True
elif arg == 'audio':
logger.info("Enabling audio in qemu")
@@ -445,16 +488,14 @@ class BaseConfig(object):
self.vhost_enabled = True
elif arg == 'slirp':
self.slirp_enabled = True
+ elif arg.startswith('bridge='):
+ self.net_bridge = '%s' % arg[len('bridge='):]
elif arg == 'snapshot':
self.snapshot = True
elif arg == 'publicvnc':
self.qemu_opt_script += ' -vnc :0'
elif arg.startswith('tcpserial='):
- self.tcpserial_portnum = arg[len('tcpserial='):]
- elif arg.startswith('biosdir='):
- self.custombiosdir = arg[len('biosdir='):]
- elif arg.startswith('biosfilename='):
- self.qemu_opt_script += ' -bios %s' % arg[len('biosfilename='):]
+ self.tcpserial_portnum = '%s' % arg[len('tcpserial='):]
elif arg.startswith('qemuparams='):
self.qemuparams = ' %s' % arg[len('qemuparams='):]
elif arg.startswith('bootparams='):
@@ -503,12 +544,13 @@ class BaseConfig(object):
yocto_paravirt_kvm_wiki = "https://wiki.yoctoproject.org/wiki/Running_an_x86_Yocto_Linux_image_under_QEMU_KVM"
dev_kvm = '/dev/kvm'
dev_vhost = '/dev/vhost-net'
- with open('/proc/cpuinfo', 'r') as f:
- kvm_cap = re.search('vmx|svm', "".join(f.readlines()))
- if not kvm_cap:
- logger.error("You are trying to enable KVM on a cpu without VT support.")
- logger.error("Remove kvm from the command-line, or refer:")
- raise RunQemuError(yocto_kvm_wiki)
+ if self.qemu_system.endswith(('i386', 'x86_64')):
+ with open('/proc/cpuinfo', 'r') as f:
+ kvm_cap = re.search('vmx|svm', "".join(f.readlines()))
+ if not kvm_cap:
+ logger.error("You are trying to enable KVM on a cpu without VT support.")
+ logger.error("Remove kvm from the command-line, or refer:")
+ raise RunQemuError(yocto_kvm_wiki)
if not os.path.exists(dev_kvm):
logger.error("Missing KVM device. Have you inserted kvm modules?")
@@ -547,6 +589,40 @@ class BaseConfig(object):
else:
raise RunQemuError("FSTYPE is NULL!")
+ # parse QB_FSINFO into dict, e.g. { 'wic': ['no-kernel-in-fs', 'a-flag'], 'ext4': ['another-flag']}
+ wic_fs = False
+ qb_fsinfo = self.get('QB_FSINFO')
+ if qb_fsinfo:
+ qb_fsinfo = qb_fsinfo.split()
+ for fsinfo in qb_fsinfo:
+ try:
+ fstype, fsflag = fsinfo.split(':')
+
+ if fstype == 'wic':
+ if fsflag == 'no-kernel-in-fs':
+ wic_fs = True
+ elif fsflag == 'kernel-in-fs':
+ wic_fs = False
+ else:
+ logger.warn('Unknown flag "%s:%s" in QB_FSINFO', fstype, fsflag)
+ continue
+ else:
+ logger.warn('QB_FSINFO is not supported for image type "%s"', fstype)
+ continue
+
+ if fstype in self.fsinfo:
+ self.fsinfo[fstype].append(fsflag)
+ else:
+ self.fsinfo[fstype] = [fsflag]
+ except Exception:
+ logger.error('Invalid parameter "%s" in QB_FSINFO', fsinfo)
+
+ # treat wic images as vmimages (with kernel) or as fsimages (rootfs only)
+ if wic_fs:
+ self.fstypes = self.fstypes + self.wictypes
+ else:
+ self.vmtypes = self.vmtypes + self.wictypes
+
def check_rootfs(self):
"""Check and set rootfs"""
@@ -578,6 +654,23 @@ class BaseConfig(object):
if not os.path.exists(self.rootfs):
raise RunQemuError("Can't find rootfs: %s" % self.rootfs)
+ def setup_pkkek1(self):
+ """
+ Extract from PEM certificate the Platform Key and first Key
+ Exchange Key certificate string. The hypervisor needs to provide
+ it in the Type 11 SMBIOS table
+ """
+ pemcert = '%s/%s' % (self.get('DEPLOY_DIR_IMAGE'), 'OvmfPkKek1.pem')
+ try:
+ with open(pemcert, 'r') as pemfile:
+ key = pemfile.read().replace('\n', ''). \
+ replace('-----BEGIN CERTIFICATE-----', ''). \
+ replace('-----END CERTIFICATE-----', '')
+ self.ovmf_secboot_pkkek1 = key
+
+ except FileNotFoundError:
+ raise RunQemuError("Can't open PEM certificate %s " % pemcert)
+
def check_ovmf(self):
"""Check and set full path for OVMF firmware and variable file(s)."""
@@ -588,6 +681,8 @@ class BaseConfig(object):
path = '%s/%s.%s' % (self.get('DEPLOY_DIR_IMAGE'), ovmf, suffix)
if os.path.exists(path):
self.ovmf_bios[index] = path
+ if ovmf.endswith('secboot'):
+ self.setup_pkkek1()
break
else:
raise RunQemuError("Can't find OVMF firmware: %s" % ovmf)
@@ -642,25 +737,30 @@ class BaseConfig(object):
if not os.path.exists(self.dtb):
raise RunQemuError('DTB not found: %s, %s or %s' % cmds)
- def check_biosdir(self):
- """Check custombiosdir"""
- if not self.custombiosdir:
+ def check_bios(self):
+ """Check and set bios"""
+
+ # See if the user supplied a BIOS option
+ if self.get('BIOS'):
+ self.bios = self.get('BIOS')
+
+ # QB_DEFAULT_BIOS is always a full file path
+ bios_name = os.path.basename(self.get('QB_DEFAULT_BIOS'))
+
+ # The user didn't want a bios to be loaded
+ if (bios_name == "" or bios_name == "none") and not self.bios:
return
- biosdir = ""
- biosdir_native = "%s/%s" % (self.get('STAGING_DIR_NATIVE'), self.custombiosdir)
- biosdir_host = "%s/%s" % (self.get('STAGING_DIR_HOST'), self.custombiosdir)
- for i in (self.custombiosdir, biosdir_native, biosdir_host):
- if os.path.isdir(i):
- biosdir = i
- break
+ if not self.bios:
+ deploy_dir_image = self.get('DEPLOY_DIR_IMAGE')
+ self.bios = "%s/%s" % (deploy_dir_image, bios_name)
+
+ if not self.bios:
+ raise RunQemuError('BIOS not found: %s' % bios_match_name)
+
+ if not os.path.exists(self.bios):
+ raise RunQemuError("KERNEL %s not found" % self.bios)
- if biosdir:
- logger.debug("Assuming biosdir is: %s" % biosdir)
- self.qemu_opt_script += ' -L %s' % biosdir
- else:
- logger.error("Custom BIOS directory not found. Tried: %s, %s, and %s" % (self.custombiosdir, biosdir_native, biosdir_host))
- raise RunQemuError("Invalid custombiosdir: %s" % self.custombiosdir)
def check_mem(self):
"""
@@ -671,8 +771,8 @@ class BaseConfig(object):
if s:
self.set('QB_MEM', '-m %s' % s.group(1))
elif not self.get('QB_MEM'):
- logger.info('QB_MEM is not set, use 512M by default')
- self.set('QB_MEM', '-m 512')
+ logger.info('QB_MEM is not set, use 256M by default')
+ self.set('QB_MEM', '-m 256')
# Check and remove M or m suffix
qb_mem = self.get('QB_MEM')
@@ -693,15 +793,21 @@ class BaseConfig(object):
def check_tcpserial(self):
if self.tcpserial_portnum:
+ ports = self.tcpserial_portnum.split(':')
+ port = ports[0]
if self.get('QB_TCPSERIAL_OPT'):
- self.qemu_opt_script += ' ' + self.get('QB_TCPSERIAL_OPT').replace('@PORT@', self.tcpserial_portnum)
+ self.qemu_opt_script += ' ' + self.get('QB_TCPSERIAL_OPT').replace('@PORT@', port)
else:
- self.qemu_opt_script += ' -serial tcp:127.0.0.1:%s' % self.tcpserial_portnum
+ self.qemu_opt_script += ' -serial tcp:127.0.0.1:%s' % port
+
+ if len(ports) > 1:
+ for port in ports[1:]:
+ self.qemu_opt_script += ' -serial tcp:127.0.0.1:%s' % port
def check_and_set(self):
"""Check configs sanity and set when needed"""
self.validate_paths()
- if not self.slirp_enabled:
+ if not self.slirp_enabled and not self.net_bridge:
check_tun()
# Check audio
if self.audio_enabled:
@@ -715,13 +821,14 @@ class BaseConfig(object):
else:
os.putenv('QEMU_AUDIO_DRV', 'none')
+ self.check_qemu_system()
self.check_kvm()
self.check_fstype()
self.check_rootfs()
self.check_ovmf()
self.check_kernel()
self.check_dtb()
- self.check_biosdir()
+ self.check_bios()
self.check_mem()
self.check_tcpserial()
@@ -830,21 +937,30 @@ class BaseConfig(object):
self.set('STAGING_BINDIR_NATIVE', '%s/usr/bin' % self.get('STAGING_DIR_NATIVE'))
def print_config(self):
- logger.info('Continuing with the following parameters:\n')
+ logoutput = ['Continuing with the following parameters:']
if not self.fstype in self.vmtypes:
- print('KERNEL: [%s]' % self.kernel)
+ logoutput.append('KERNEL: [%s]' % self.kernel)
+ if self.bios:
+ logoutput.append('BIOS: [%s]' % self.bios)
if self.dtb:
- print('DTB: [%s]' % self.dtb)
- print('MACHINE: [%s]' % self.get('MACHINE'))
- print('FSTYPE: [%s]' % self.fstype)
+ logoutput.append('DTB: [%s]' % self.dtb)
+ logoutput.append('MACHINE: [%s]' % self.get('MACHINE'))
+ try:
+ fstype_flags = ' (' + ', '.join(self.fsinfo[self.fstype]) + ')'
+ except KeyError:
+ fstype_flags = ''
+ logoutput.append('FSTYPE: [%s%s]' % (self.fstype, fstype_flags))
if self.fstype == 'nfs':
- print('NFS_DIR: [%s]' % self.rootfs)
+ logoutput.append('NFS_DIR: [%s]' % self.rootfs)
else:
- print('ROOTFS: [%s]' % self.rootfs)
+ logoutput.append('ROOTFS: [%s]' % self.rootfs)
if self.ovmf_bios:
- print('OVMF: %s' % self.ovmf_bios)
- print('CONFFILE: [%s]' % self.qemuboot)
- print('')
+ logoutput.append('OVMF: %s' % self.ovmf_bios)
+ if (self.ovmf_secboot_pkkek1):
+ logoutput.append('SECBOOT PKKEK1: [%s...]' % self.ovmf_secboot_pkkek1[0:100])
+ logoutput.append('CONFFILE: [%s]' % self.qemuboot)
+ logoutput.append('')
+ logger.info('\n'.join(logoutput))
def setup_nfs(self):
if not self.nfs_server:
@@ -874,7 +990,7 @@ class BaseConfig(object):
# Use '%s' since they are integers
os.putenv(k, '%s' % v)
- self.unfs_opts="nfsvers=3,port=%s,udp,mountport=%s" % (nfsd_port, mountd_port)
+ self.unfs_opts="nfsvers=3,port=%s,tcp,mountport=%s" % (nfsd_port, mountd_port)
# Extract .tar.bz2 or .tar.bz if no nfs dir
if not (self.rootfs and os.path.isdir(self.rootfs)):
@@ -909,12 +1025,18 @@ class BaseConfig(object):
self.nfs_running = True
+ def setup_net_bridge(self):
+ self.set('NETWORK_CMD', '-netdev bridge,br=%s,id=net0,helper=%s -device virtio-net-pci,netdev=net0 ' % (
+ self.net_bridge, os.path.join(self.bindir_native, 'qemu-oe-bridge-helper')))
+
def setup_slirp(self):
"""Setup user networking"""
if self.fstype == 'nfs':
self.setup_nfs()
- self.kernel_cmdline_script += ' ip=dhcp'
+ netconf = " " + self.cmdline_ip_slirp
+ logger.info("Network configuration:%s", netconf)
+ self.kernel_cmdline_script += netconf
# Port mapping
hostfwd = ",hostfwd=tcp::2222-:22,hostfwd=tcp::2323-:23"
qb_slirp_opt_default = "-netdev user,id=net0%s,tftp=%s" % (hostfwd, self.get('DEPLOY_DIR_IMAGE'))
@@ -923,10 +1045,21 @@ class BaseConfig(object):
ports = re.findall('hostfwd=[^-]*:([0-9]+)-[^,-]*', qb_slirp_opt)
ports = [int(i) for i in ports]
mac = 2
+
+ lockdir = "/tmp/qemu-port-locks"
+ if not os.path.exists(lockdir):
+ # There might be a race issue when multi runqemu processess are
+ # running at the same time.
+ try:
+ os.mkdir(lockdir)
+ os.chmod(lockdir, 0o777)
+ except FileExistsError:
+ pass
+
# Find a free port to avoid conflicts
for p in ports[:]:
p_new = p
- while not check_free_port('localhost', p_new):
+ while not self.check_free_port('localhost', p_new, lockdir):
p_new += 1
mac += 1
while p_new in ports:
@@ -981,8 +1114,8 @@ class BaseConfig(object):
if os.path.exists('%s.skip' % lockfile):
logger.info('Found %s.skip, skipping %s' % (lockfile, p))
continue
- self.lock = lockfile + '.lock'
- if self.acquire_lock(error=False):
+ self.taplock = lockfile + '.lock'
+ if self.acquire_taplock(error=False):
tap = p
logger.info("Using preconfigured tap device %s" % tap)
logger.info("If this is not intended, touch %s.skip to make runqemu skip %s." %(lockfile, tap))
@@ -998,25 +1131,31 @@ class BaseConfig(object):
uid = os.getuid()
logger.info("Setting up tap interface under sudo")
cmd = ('sudo', self.qemuifup, str(uid), str(gid), self.bindir_native)
- tap = subprocess.check_output(cmd).decode('utf-8').strip()
+ try:
+ tap = subprocess.check_output(cmd).decode('utf-8').strip()
+ except subprocess.CalledProcessError as e:
+ logger.error('Setting up tap device failed:\n%s\nRun runqemu-gen-tapdevs to manually create one.' % str(e))
+ sys.exit(1)
lockfile = os.path.join(lockdir, tap)
- self.lock = lockfile + '.lock'
- self.acquire_lock()
+ self.taplock = lockfile + '.lock'
+ self.acquire_taplock()
self.cleantap = True
logger.debug('Created tap: %s' % tap)
if not tap:
logger.error("Failed to setup tap device. Run runqemu-gen-tapdevs to manually create.")
- return 1
+ sys.exit(1)
self.tap = tap
tapnum = int(tap[3:])
gateway = tapnum * 2 + 1
client = gateway + 1
if self.fstype == 'nfs':
self.setup_nfs()
- netconf = "192.168.7.%s::192.168.7.%s:255.255.255.0" % (client, gateway)
- logger.info("Network configuration: %s", netconf)
- self.kernel_cmdline_script += " ip=%s" % netconf
+ netconf = " " + self.cmdline_ip_tap
+ netconf = netconf.replace('@CLIENT@', str(client))
+ netconf = netconf.replace('@GATEWAY@', str(gateway))
+ logger.info("Network configuration:%s", netconf)
+ self.kernel_cmdline_script += netconf
mac = "%s%02x" % (self.mac_tap, client)
qb_tap_opt = self.get('QB_TAP_OPT')
if qb_tap_opt:
@@ -1035,9 +1174,13 @@ class BaseConfig(object):
if sys.stdin.isatty():
self.saved_stty = subprocess.check_output(("stty", "-g")).decode('utf-8').strip()
self.network_device = self.get('QB_NETWORK_DEVICE') or self.network_device
- if self.slirp_enabled:
+ if self.net_bridge:
+ self.setup_net_bridge()
+ elif self.slirp_enabled:
+ self.cmdline_ip_slirp = self.get('QB_CMDLINE_IP_SLIRP') or self.cmdline_ip_slirp
self.setup_slirp()
else:
+ self.cmdline_ip_tap = self.get('QB_CMDLINE_IP_TAP') or self.cmdline_ip_tap
self.setup_tap()
def setup_rootfs(self):
@@ -1053,6 +1196,10 @@ class BaseConfig(object):
else:
self.rootfs_options = '-drive file=%s,if=virtio,format=%s' % (self.rootfs, rootfs_format)
+ qb_rootfs_extra_opt = self.get("QB_ROOTFS_EXTRA_OPT")
+ if qb_rootfs_extra_opt and not qb_rootfs_extra_opt.startswith(","):
+ qb_rootfs_extra_opt = "," + qb_rootfs_extra_opt
+
if self.fstype in ('cpio.gz', 'cpio'):
self.kernel_cmdline = 'root=/dev/ram0 rw debugshell'
self.rootfs_options = '-initrd %s' % self.rootfs
@@ -1065,11 +1212,15 @@ class BaseConfig(object):
drive_type = self.get('QB_DRIVE_TYPE')
if drive_type.startswith("/dev/sd"):
logger.info('Using scsi drive')
- vm_drive = '-drive if=none,id=hd,file=%s,format=%s -device virtio-scsi-pci,id=scsi -device scsi-hd,drive=hd' \
- % (self.rootfs, rootfs_format)
+ vm_drive = '-drive if=none,id=hd,file=%s,format=%s -device virtio-scsi-pci,id=scsi -device scsi-hd,drive=hd%s' \
+ % (self.rootfs, rootfs_format, qb_rootfs_extra_opt)
elif drive_type.startswith("/dev/hd"):
logger.info('Using ide drive')
vm_drive = "-drive file=%s,format=%s" % (self.rootfs, rootfs_format)
+ elif drive_type.startswith("/dev/vdb"):
+ logger.info('Using block virtio drive');
+ vm_drive = '-drive id=disk0,file=%s,if=none,format=%s -device virtio-blk-device,drive=disk0%s' \
+ % (self.rootfs, rootfs_format,qb_rootfs_extra_opt)
else:
# virtio might have been selected explicitly (just use it), or
# is used as fallback (then warn about that).
@@ -1081,12 +1232,12 @@ class BaseConfig(object):
# All branches above set vm_drive.
self.rootfs_options = '%s -no-reboot' % vm_drive
- self.kernel_cmdline = 'root=%s rw highres=off' % (self.get('QB_KERNEL_ROOT'))
+ self.kernel_cmdline = 'root=%s rw' % (self.get('QB_KERNEL_ROOT'))
if self.fstype == 'nfs':
self.rootfs_options = ''
k_root = '/dev/nfs nfsroot=%s:%s,%s' % (self.nfs_server, os.path.abspath(self.rootfs), self.unfs_opts)
- self.kernel_cmdline = 'root=%s rw highres=off' % k_root
+ self.kernel_cmdline = 'root=%s rw' % k_root
if self.fstype == 'none':
self.rootfs_options = ''
@@ -1140,21 +1291,23 @@ class BaseConfig(object):
return 'qemu-system-%s' % qbsys
- def setup_final(self):
+ def check_qemu_system(self):
qemu_system = self.get('QB_SYSTEM_NAME')
if not qemu_system:
qemu_system = self.guess_qb_system()
if not qemu_system:
raise RunQemuError("Failed to boot, QB_SYSTEM_NAME is NULL!")
+ self.qemu_system = qemu_system
- qemu_bin = os.path.join(self.bindir_native, qemu_system)
+ def setup_final(self):
+ qemu_bin = os.path.join(self.bindir_native, self.qemu_system)
# It is possible to have qemu-native in ASSUME_PROVIDED, and it won't
# find QEMU in sysroot, it needs to use host's qemu.
if not os.path.exists(qemu_bin):
logger.info("QEMU binary not found in %s, trying host's QEMU" % qemu_bin)
for path in (os.environ['PATH'] or '').split(':'):
- qemu_bin_tmp = os.path.join(path, qemu_system)
+ qemu_bin_tmp = os.path.join(path, self.qemu_system)
logger.info("Trying: %s" % qemu_bin_tmp)
if os.path.exists(qemu_bin_tmp):
qemu_bin = qemu_bin_tmp
@@ -1166,20 +1319,21 @@ class BaseConfig(object):
if not os.access(qemu_bin, os.X_OK):
raise OEPathError("No QEMU binary '%s' could be found" % qemu_bin)
- check_libgl(qemu_bin)
-
self.qemu_opt = "%s %s %s %s" % (qemu_bin, self.get('NETWORK_CMD'), self.get('ROOTFS_OPTIONS'), self.get('QB_OPT_APPEND'))
for ovmf in self.ovmf_bios:
format = ovmf.rsplit('.', 1)[-1]
self.qemu_opt += ' -drive if=pflash,format=%s,file=%s' % (format, ovmf)
- if self.ovmf_bios:
- # OVMF only supports normal VGA, i.e. we need to override a -vga vmware
- # that gets added for example for normal qemux86.
- self.qemu_opt += ' -vga std'
self.qemu_opt += ' ' + self.qemu_opt_script
+ if self.ovmf_secboot_pkkek1:
+ # Provide the Platform Key and first Key Exchange Key certificate as an
+ # OEM string in the SMBIOS Type 11 table. Prepend the certificate string
+ # with "application prefix" of the EnrollDefaultKeys.efi application
+ self.qemu_opt += ' -smbios type=11,value=4e32566d-8e9e-4f52-81d3-5bb9715f9727:' \
+ + self.ovmf_secboot_pkkek1
+
# Append qemuparams to override previous settings
if self.qemuparams:
self.qemu_opt += ' ' + self.qemuparams
@@ -1187,7 +1341,7 @@ class BaseConfig(object):
if self.snapshot:
self.qemu_opt += " -snapshot"
- if self.serialstdio:
+ if self.serialconsole:
if sys.stdin.isatty():
subprocess.check_call(("stty", "intr", "^]"))
logger.info("Interrupt character is '^]'")
@@ -1214,7 +1368,7 @@ class BaseConfig(object):
# INIT: Id "S1" respawning too fast: disabled for 5 minutes
serial_num = len(re.findall("-serial", self.qemu_opt))
if serial_num == 0:
- if re.search("-nographic", self.qemu_opt):
+ if re.search("-nographic", self.qemu_opt) or self.serialstdio:
self.qemu_opt += " -serial mon:stdio -serial null"
else:
self.qemu_opt += " -serial mon:vc -serial null"
@@ -1225,6 +1379,8 @@ class BaseConfig(object):
kernel_opts = "-kernel %s -append '%s %s %s %s'" % (self.kernel, self.kernel_cmdline,
self.kernel_cmdline_script, self.get('QB_KERNEL_CMDLINE_APPEND'),
self.bootparams)
+ if self.bios:
+ kernel_opts += " -bios %s" % self.bios
if self.dtb:
kernel_opts += " -dtb %s" % self.dtb
else:
@@ -1233,8 +1389,11 @@ class BaseConfig(object):
cmds = shlex.split(cmd)
logger.info('Running %s\n' % cmd)
pass_fds = []
- if self.lock_descriptor:
- pass_fds = [self.lock_descriptor.fileno()]
+ if self.taplock_descriptor:
+ pass_fds = [self.taplock_descriptor.fileno()]
+ if len(self.portlocks):
+ for descriptor in self.portlocks.values():
+ pass_fds.append(descriptor.fileno())
process = subprocess.Popen(cmds, stderr=subprocess.PIPE, pass_fds=pass_fds)
self.qemupid = process.pid
retcode = process.wait()
@@ -1256,7 +1415,8 @@ class BaseConfig(object):
cmd = ('sudo', self.qemuifdown, self.tap, self.bindir_native)
logger.debug('Running %s' % str(cmd))
subprocess.check_call(cmd)
- self.release_lock()
+ self.release_taplock()
+ self.release_portlock()
if self.nfs_running:
logger.info("Shutting down the userspace NFS server...")
@@ -1274,10 +1434,7 @@ class BaseConfig(object):
self.cleaned = True
- def load_bitbake_env(self, mach=None):
- if self.bitbake_e:
- return
-
+ def run_bitbake_env(self, mach=None):
bitbake = shutil.which('bitbake')
if not bitbake:
return
@@ -1285,14 +1442,24 @@ class BaseConfig(object):
if not mach:
mach = self.get('MACHINE')
+ multiconfig = self.get('MULTICONFIG')
+ if multiconfig:
+ multiconfig = "mc:%s" % multiconfig
+
if mach:
- cmd = 'MACHINE=%s bitbake -e' % mach
+ cmd = 'MACHINE=%s bitbake -e %s' % (mach, multiconfig)
else:
- cmd = 'bitbake -e'
+ cmd = 'bitbake -e %s' % multiconfig
logger.info('Running %s...' % cmd)
+ return subprocess.check_output(cmd, shell=True).decode('utf-8')
+
+ def load_bitbake_env(self, mach=None):
+ if self.bitbake_e:
+ return
+
try:
- self.bitbake_e = subprocess.check_output(cmd, shell=True).decode('utf-8')
+ self.bitbake_e = self.run_bitbake_env(mach=mach)
except subprocess.CalledProcessError as err:
self.bitbake_e = ''
logger.warning("Couldn't run 'bitbake -e' to gather environment information:\n%s" % err.output.decode('utf-8'))
@@ -1307,7 +1474,13 @@ class BaseConfig(object):
if result and os.path.exists(result):
return result
- cmd = ('bitbake', 'qemu-helper-native', '-e')
+ cmd = ['bitbake', '-e']
+ multiconfig = self.get('MULTICONFIG')
+ if multiconfig:
+ cmd.append('mc:%s:qemu-helper-native' % multiconfig)
+ else:
+ cmd.append('qemu-helper-native')
+
logger.info('Running %s...' % str(cmd))
out = subprocess.check_output(cmd).decode('utf-8')
diff --git a/external/poky/scripts/runqemu-addptable2image b/external/poky/scripts/runqemu-addptable2image
index f0195ad8..ca294272 100755
--- a/external/poky/scripts/runqemu-addptable2image
+++ b/external/poky/scripts/runqemu-addptable2image
@@ -4,20 +4,8 @@
#
# Copyright (C) 2006-2007 OpenedHand Ltd.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
+# SPDX-License-Identifier: GPL-2.0-or-later
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
IMAGE=$1
IMAGEOUT=$2
diff --git a/external/poky/scripts/runqemu-export-rootfs b/external/poky/scripts/runqemu-export-rootfs
index 70cdcdbb..384c0917 100755
--- a/external/poky/scripts/runqemu-export-rootfs
+++ b/external/poky/scripts/runqemu-export-rootfs
@@ -2,18 +2,8 @@
#
# Copyright (c) 2005-2009 Wind River Systems, Inc.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
usage() {
echo "Usage: $0 {start|stop|restart} <nfs-export-dir>"
diff --git a/external/poky/scripts/runqemu-extract-sdk b/external/poky/scripts/runqemu-extract-sdk
index 4da3eb10..9bc0c07f 100755
--- a/external/poky/scripts/runqemu-extract-sdk
+++ b/external/poky/scripts/runqemu-extract-sdk
@@ -7,18 +7,8 @@
#
# Copyright (c) 2010 Intel Corp.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
function usage() {
echo "Usage: $0 <image-tarball> <extract-dir>"
@@ -79,7 +69,7 @@ fi
pseudo_state_dir="$SDK_ROOTFS_DIR/../$(basename "$SDK_ROOTFS_DIR").pseudo_state"
pseudo_state_dir="$(readlink -f $pseudo_state_dir)"
-debug_image="`echo $ROOTFS_TARBALL | grep '\-dbg\.tar\.'`"
+debug_image="`echo $ROOTFS_TARBALL | grep '\-dbg\.rootfs\.tar'`"
if [ -e "$pseudo_state_dir" -a -z "$debug_image" ]; then
echo "Error: $pseudo_state_dir already exists!"
diff --git a/external/poky/scripts/runqemu-gen-tapdevs b/external/poky/scripts/runqemu-gen-tapdevs
index 869fee26..a6ee4517 100755
--- a/external/poky/scripts/runqemu-gen-tapdevs
+++ b/external/poky/scripts/runqemu-gen-tapdevs
@@ -9,18 +9,8 @@
#
# Copyright (C) 2010 Intel Corp.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
uid=`id -u`
gid=`id -g`
diff --git a/external/poky/scripts/runqemu-ifdown b/external/poky/scripts/runqemu-ifdown
index 24869685..a104c37b 100755
--- a/external/poky/scripts/runqemu-ifdown
+++ b/external/poky/scripts/runqemu-ifdown
@@ -13,18 +13,8 @@
#
# Copyright (c) 2006-2011 Linux Foundation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
usage() {
echo "sudo $(basename $0) <tap-dev> <native-sysroot-basedir>"
diff --git a/external/poky/scripts/runqemu-ifup b/external/poky/scripts/runqemu-ifup
index 59a15eaa..bb661740 100755
--- a/external/poky/scripts/runqemu-ifup
+++ b/external/poky/scripts/runqemu-ifup
@@ -20,18 +20,8 @@
#
# Copyright (c) 2006-2011 Linux Foundation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
usage() {
echo "sudo $(basename $0) <uid> <gid> <native-sysroot-basedir>"
diff --git a/external/poky/scripts/send-error-report b/external/poky/scripts/send-error-report
index 0ed7cc90..cfbcaa52 100755
--- a/external/poky/scripts/send-error-report
+++ b/external/poky/scripts/send-error-report
@@ -6,6 +6,9 @@
# Copyright (C) 2013 Intel Corporation
# Author: Andreea Proca <andreea.b.proca@intel.com>
# Author: Michael Wood <michael.g.wood@intel.com>
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
import urllib.request, urllib.error
import sys
diff --git a/external/poky/scripts/send-pull-request b/external/poky/scripts/send-pull-request
index 883deacb..70b5a5cf 100755
--- a/external/poky/scripts/send-pull-request
+++ b/external/poky/scripts/send-pull-request
@@ -1,21 +1,8 @@
#!/bin/bash
#
# Copyright (c) 2010-2011, Intel Corporation.
-# All Rights Reserved
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
-# the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+# SPDX-License-Identifier: GPL-2.0-or-later
#
#
diff --git a/external/poky/scripts/sstate-cache-management.sh b/external/poky/scripts/sstate-cache-management.sh
index 2ab450ab..f1706a22 100755
--- a/external/poky/scripts/sstate-cache-management.sh
+++ b/external/poky/scripts/sstate-cache-management.sh
@@ -2,18 +2,7 @@
# Copyright (c) 2012 Wind River Systems, Inc.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+# SPDX-License-Identifier: GPL-2.0-only
#
# Global vars
diff --git a/external/poky/scripts/sstate-diff-machines.sh b/external/poky/scripts/sstate-diff-machines.sh
index 27c6a330..1d721eb8 100755
--- a/external/poky/scripts/sstate-diff-machines.sh
+++ b/external/poky/scripts/sstate-diff-machines.sh
@@ -1,5 +1,7 @@
#!/bin/bash
-
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Used to compare sstate checksums between MACHINES.
# Execute script and compare generated list.M files.
# Using bash to have PIPESTATUS variable.
diff --git a/external/poky/scripts/sstate-sysroot-cruft.sh b/external/poky/scripts/sstate-sysroot-cruft.sh
index d9917f51..fbf1ca3c 100755
--- a/external/poky/scripts/sstate-sysroot-cruft.sh
+++ b/external/poky/scripts/sstate-sysroot-cruft.sh
@@ -1,5 +1,7 @@
#!/bin/sh
-
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Used to find files installed in sysroot which are not tracked by sstate manifest
# Global vars
diff --git a/external/poky/scripts/sysroot-relativelinks.py b/external/poky/scripts/sysroot-relativelinks.py
index ffe25472..56e36f3a 100755
--- a/external/poky/scripts/sysroot-relativelinks.py
+++ b/external/poky/scripts/sysroot-relativelinks.py
@@ -1,4 +1,8 @@
#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import sys
import os
diff --git a/external/poky/scripts/task-time b/external/poky/scripts/task-time
index e58040a9..bcd1e258 100755
--- a/external/poky/scripts/task-time
+++ b/external/poky/scripts/task-time
@@ -1,4 +1,7 @@
#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
import argparse
import os
diff --git a/external/poky/scripts/test-reexec b/external/poky/scripts/test-reexec
index 30e792c7..fccdac4d 100755
--- a/external/poky/scripts/test-reexec
+++ b/external/poky/scripts/test-reexec
@@ -3,21 +3,8 @@
# Test Script for task re-execution
#
# Copyright 2012 Intel Corporation
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+# SPDX-License-Identifier: GPL-2.0-or-later
#
# DESCRIPTION
# This script is intended to address issues for re-execution of
diff --git a/external/poky/scripts/test-remote-image b/external/poky/scripts/test-remote-image
index 27b1cae3..d209d228 100755
--- a/external/poky/scripts/test-remote-image
+++ b/external/poky/scripts/test-remote-image
@@ -1,19 +1,9 @@
#!/usr/bin/env python3
-
-# Copyright (c) 2014 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# Copyright (c) 2014 Intel Corporation
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# DESCRIPTION
# This script is used to test public autobuilder images on remote hardware.
diff --git a/external/poky/scripts/tiny/dirsize.py b/external/poky/scripts/tiny/dirsize.py
index ddccc5a8..501516b0 100755
--- a/external/poky/scripts/tiny/dirsize.py
+++ b/external/poky/scripts/tiny/dirsize.py
@@ -1,22 +1,8 @@
#!/usr/bin/env python3
#
# Copyright (c) 2011, Intel Corporation.
-# All rights reserved.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
+# SPDX-License-Identifier: GPL-2.0-or-later
#
# Display details of the root filesystem size, broken up by directory.
# Allows for limiting by size to focus on the larger files.
diff --git a/external/poky/scripts/tiny/ksize.py b/external/poky/scripts/tiny/ksize.py
index ea1ca7ff..db2b9ec3 100755
--- a/external/poky/scripts/tiny/ksize.py
+++ b/external/poky/scripts/tiny/ksize.py
@@ -1,24 +1,10 @@
#!/usr/bin/env python3
#
# Copyright (c) 2011, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
+# SPDX-License-Identifier: GPL-2.0-or-later
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-#
-#
-# Display details of the kernel build size, broken up by built-in.o. Sort
+# Display details of the kernel build size, broken up by built-in.[o,a]. Sort
# the objects by size. Run from the top level kernel build directory.
#
# Author: Darren Hart <dvhart@linux.intel.com>
@@ -41,7 +27,7 @@ def usage():
class Sizes:
def __init__(self, glob):
self.title = glob
- p = Popen("size -t " + str(glob), shell=True, stdout=PIPE, stderr=PIPE)
+ p = Popen("size -t " + str(glob), shell=True, stdout=PIPE, stderr=PIPE, universal_newlines=True)
output = p.communicate()[0].splitlines()
if len(output) > 2:
sizes = output[-1].split()[0:4]
@@ -63,17 +49,17 @@ class Report:
path = os.path.dirname(filename)
p = Popen("ls " + str(path) + "/*.o | grep -v built-in.o",
- shell=True, stdout=PIPE, stderr=PIPE)
+ shell=True, stdout=PIPE, stderr=PIPE, universal_newlines=True)
glob = ' '.join(p.communicate()[0].splitlines())
oreport = Report(glob, str(path) + "/*.o")
oreport.sizes.title = str(path) + "/*.o"
r.parts.append(oreport)
if subglob:
- p = Popen("ls " + subglob, shell=True, stdout=PIPE, stderr=PIPE)
+ p = Popen("ls " + subglob, shell=True, stdout=PIPE, stderr=PIPE, universal_newlines=True)
for f in p.communicate()[0].splitlines():
path = os.path.dirname(f)
- r.parts.append(Report.create(f, path, str(path) + "/*/built-in.o"))
+ r.parts.append(Report.create(f, path, str(path) + "/*/built-in.[o,a]"))
r.parts.sort(reverse=True)
for b in r.parts:
@@ -153,7 +139,7 @@ def main():
else:
assert False, "unhandled option"
- glob = "arch/*/built-in.o */built-in.o"
+ glob = "arch/*/built-in.[o,a] */built-in.[o,a]"
vmlinux = Report.create("vmlinux", "Linux Kernel", glob)
vmlinux.show()
diff --git a/external/poky/scripts/tiny/ksum.py b/external/poky/scripts/tiny/ksum.py
index d4f38921..8f0e4c05 100755
--- a/external/poky/scripts/tiny/ksum.py
+++ b/external/poky/scripts/tiny/ksum.py
@@ -1,22 +1,8 @@
-#!/usr/bin/env python
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#!/usr/bin/env python3
#
# Copyright (c) 2016, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION 'ksum.py' generates a combined summary of vmlinux and
# module sizes for a built kernel, as a quick tool for comparing the
@@ -76,7 +62,7 @@ def is_ko_file(filename):
return False
def collect_object_files():
- print "Collecting object files recursively from %s..." % os.getcwd()
+ print("Collecting object files recursively from %s..." % os.getcwd())
for dirpath, dirs, files in os.walk(os.getcwd()):
for filename in files:
if is_ko_file(filename):
@@ -84,7 +70,7 @@ def collect_object_files():
elif is_vmlinux_file(filename):
global vmlinux_file
vmlinux_file = os.path.join(dirpath, filename)
- print "Collecting object files [DONE]"
+ print("Collecting object files [DONE]")
def add_ko_file(filename):
p = Popen("size -t " + filename, shell=True, stdout=PIPE, stderr=PIPE)
@@ -92,9 +78,9 @@ def add_ko_file(filename):
if len(output) > 2:
sizes = output[-1].split()[0:4]
if verbose:
- print " %10d %10d %10d %10d\t" % \
- (int(sizes[0]), int(sizes[1]), int(sizes[2]), int(sizes[3])),
- print "%s" % filename[len(os.getcwd()) + 1:]
+ print(" %10d %10d %10d %10d\t" % \
+ (int(sizes[0]), int(sizes[1]), int(sizes[2]), int(sizes[3])), end=' ')
+ print("%s" % filename[len(os.getcwd()) + 1:])
global n_ko_files, ko_text, ko_data, ko_bss, ko_total
ko_text += int(sizes[0])
ko_data += int(sizes[1])
@@ -108,9 +94,9 @@ def get_vmlinux_totals():
if len(output) > 2:
sizes = output[-1].split()[0:4]
if verbose:
- print " %10d %10d %10d %10d\t" % \
- (int(sizes[0]), int(sizes[1]), int(sizes[2]), int(sizes[3])),
- print "%s" % vmlinux_file[len(os.getcwd()) + 1:]
+ print(" %10d %10d %10d %10d\t" % \
+ (int(sizes[0]), int(sizes[1]), int(sizes[2]), int(sizes[3])), end=' ')
+ print("%s" % vmlinux_file[len(os.getcwd()) + 1:])
global vmlinux_text, vmlinux_data, vmlinux_bss, vmlinux_total
vmlinux_text += int(sizes[0])
vmlinux_data += int(sizes[1])
@@ -143,20 +129,20 @@ def main():
sum_ko_files()
get_vmlinux_totals()
- print "\nTotals:"
- print "\nvmlinux:"
- print " text\tdata\t\tbss\t\ttotal"
- print " %-10d\t%-10d\t%-10d\t%-10d" % \
- (vmlinux_text, vmlinux_data, vmlinux_bss, vmlinux_total)
- print "\nmodules (%d):" % n_ko_files
- print " text\tdata\t\tbss\t\ttotal"
- print " %-10d\t%-10d\t%-10d\t%-10d" % \
- (ko_text, ko_data, ko_bss, ko_total)
- print "\nvmlinux + modules:"
- print " text\tdata\t\tbss\t\ttotal"
- print " %-10d\t%-10d\t%-10d\t%-10d" % \
+ print("\nTotals:")
+ print("\nvmlinux:")
+ print(" text\tdata\t\tbss\t\ttotal")
+ print(" %-10d\t%-10d\t%-10d\t%-10d" % \
+ (vmlinux_text, vmlinux_data, vmlinux_bss, vmlinux_total))
+ print("\nmodules (%d):" % n_ko_files)
+ print(" text\tdata\t\tbss\t\ttotal")
+ print(" %-10d\t%-10d\t%-10d\t%-10d" % \
+ (ko_text, ko_data, ko_bss, ko_total))
+ print("\nvmlinux + modules:")
+ print(" text\tdata\t\tbss\t\ttotal")
+ print(" %-10d\t%-10d\t%-10d\t%-10d" % \
(vmlinux_text + ko_text, vmlinux_data + ko_data, \
- vmlinux_bss + ko_bss, vmlinux_total + ko_total)
+ vmlinux_bss + ko_bss, vmlinux_total + ko_total))
if __name__ == "__main__":
try:
diff --git a/external/poky/scripts/verify-bashisms b/external/poky/scripts/verify-bashisms
index a979bd29..fb0cc719 100755
--- a/external/poky/scripts/verify-bashisms
+++ b/external/poky/scripts/verify-bashisms
@@ -1,4 +1,7 @@
#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
import sys, os, subprocess, re, shutil
diff --git a/external/poky/scripts/wic b/external/poky/scripts/wic
index a3c0f731..24700f38 100755
--- a/external/poky/scripts/wic
+++ b/external/poky/scripts/wic
@@ -1,22 +1,8 @@
#!/usr/bin/env python3
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2013, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION 'wic' is the OpenEmbedded Image Creator that users can
# use to generate bootable images. Invoking it without any arguments
@@ -406,9 +392,9 @@ def imgpathtype(arg):
def wic_init_parser_cp(subparser):
subparser.add_argument("src",
- help="source spec")
- subparser.add_argument("dest", type=imgpathtype,
- help="image spec: <image>:<vfat partition>[<path>]")
+ help="image spec: <image>:<vfat partition>[<path>] or <file>")
+ subparser.add_argument("dest",
+ help="image spec: <image>:<vfat partition>[<path>] or <file>")
subparser.add_argument("-n", "--native-sysroot",
help="path to the native sysroot containing the tools")
@@ -417,6 +403,9 @@ def wic_init_parser_rm(subparser):
help="path: <image>:<vfat partition><path>")
subparser.add_argument("-n", "--native-sysroot",
help="path to the native sysroot containing the tools")
+ subparser.add_argument("-r", dest="recursive_delete", action="store_true", default=False,
+ help="remove directories and their contents recursively, "
+ " this only applies to ext* partition")
def expandtype(rules):
"""
@@ -501,31 +490,48 @@ subcommands = {
def init_parser(parser):
parser.add_argument("--version", action="version",
version="%(prog)s {version}".format(version=__version__))
+ parser.add_argument("-D", "--debug", dest="debug", action="store_true",
+ default=False, help="output debug information")
+
subparsers = parser.add_subparsers(dest='command', help=hlp.wic_usage)
for subcmd in subcommands:
subparser = subparsers.add_parser(subcmd, help=subcommands[subcmd][2])
subcommands[subcmd][3](subparser)
+class WicArgumentParser(argparse.ArgumentParser):
+ def format_help(self):
+ return hlp.wic_help
def main(argv):
- parser = argparse.ArgumentParser(
+ parser = WicArgumentParser(
description="wic version %s" % __version__)
init_parser(parser)
args = parser.parse_args(argv)
+ if args.debug:
+ logger.setLevel(logging.DEBUG)
+
if "command" in vars(args):
if args.command == "help":
if args.help_topic is None:
parser.print_help()
- print()
- print("Please specify a help topic")
elif args.help_topic in helptopics:
hlpt = helptopics[args.help_topic]
hlpt[0](hlpt[1], hlpt[2])
return 0
+ # validate wic cp src and dest parameter to identify which one of it is
+ # image and cast it into imgtype
+ if args.command == "cp":
+ if ":" in args.dest:
+ args.dest = imgtype(args.dest)
+ elif ":" in args.src:
+ args.src = imgtype(args.src)
+ else:
+ raise argparse.ArgumentTypeError("no image or partition number specified.")
+
return hlp.invoke_subcommand(args, parser, hlp.wic_help_usage, subcommands)
diff --git a/external/poky/scripts/yocto-check-layer b/external/poky/scripts/yocto-check-layer
index 106c9552..b7c83c8b 100755
--- a/external/poky/scripts/yocto-check-layer
+++ b/external/poky/scripts/yocto-check-layer
@@ -3,7 +3,9 @@
# Yocto Project layer checking tool
#
# Copyright (C) 2017 Intel Corporation
-# Released under the MIT license (see COPYING.MIT)
+#
+# SPDX-License-Identifier: MIT
+#
import os
import sys
@@ -22,7 +24,7 @@ import scriptpath
scriptpath.add_oe_lib_path()
scriptpath.add_bitbake_lib_path()
-from checklayer import LayerType, detect_layers, add_layers, add_layer_dependencies, get_signatures
+from checklayer import LayerType, detect_layers, add_layers, add_layer_dependencies, get_signatures, check_bblayers
from oeqa.utils.commands import get_bb_vars
PROGNAME = 'yocto-check-layer'
@@ -82,7 +84,7 @@ def main():
logger.setLevel(logging.ERROR)
if not 'BUILDDIR' in os.environ:
- logger.error("You must source the environment before run this script.")
+ logger.error("You must source the environment before running this script.")
logger.error("$ source oe-init-build-env")
return 1
builddir = os.environ['BUILDDIR']
@@ -90,7 +92,7 @@ def main():
layers = detect_layers(args.layers, args.no_auto)
if not layers:
- logger.error("Fail to detect layers")
+ logger.error("Failed to detect layers")
return 1
if args.additional_layers:
additional_layers = detect_layers(args.additional_layers, args.no_auto)
@@ -106,7 +108,7 @@ def main():
for layer in layers:
if layer['type'] == LayerType.ERROR_BSP_DISTRO:
logger.error("%s: Can't be DISTRO and BSP type at the same time."\
- " The conf/distro and conf/machine folders was found."\
+ " Both conf/distro and conf/machine folders were found."\
% layer['name'])
layers.remove(layer)
elif layer['type'] == LayerType.ERROR_NO_LAYER_CONF:
@@ -136,6 +138,13 @@ def main():
layer['type'] == LayerType.ERROR_BSP_DISTRO:
continue
+ if check_bblayers(bblayersconf, layer['path'], logger):
+ logger.info("%s already in %s. To capture initial signatures, layer under test should not present "
+ "in BBLAYERS. Please remove %s from BBLAYERS." % (layer['name'], bblayersconf, layer['name']))
+ results[layer['name']] = None
+ results_status[layer['name']] = 'SKIPPED (Layer under test should not present in BBLAYERS)'
+ continue
+
logger.info('')
logger.info("Setting up for %s(%s), %s" % (layer['name'], layer['type'],
layer['path']))
diff --git a/external/poky/scripts/yocto-check-layer-wrapper b/external/poky/scripts/yocto-check-layer-wrapper
index b5df9ce9..2e3b6990 100755
--- a/external/poky/scripts/yocto-check-layer-wrapper
+++ b/external/poky/scripts/yocto-check-layer-wrapper
@@ -6,7 +6,9 @@
# script to avoid a contaminated environment.
#
# Copyright (C) 2017 Intel Corporation
-# Released under the MIT license (see COPYING.MIT)
+#
+# SPDX-License-Identifier: MIT
+#
if [ -z "$BUILDDIR" ]; then
echo "Please source oe-init-build-env before run this script."