diff options
author | Jacobo Aragunde Pérez <jaragunde@igalia.com> | 2019-06-17 13:12:00 +0200 |
---|---|---|
committer | Jan-Simon Moeller <jsmoeller@linuxfoundation.org> | 2019-06-20 12:57:47 +0000 |
commit | e6cc8d4e0977b4a3dda28c70caea3515f9d07edd (patch) | |
tree | 37493398c6f35728ca73cc36c64e91e9bed33c1a /meta-html5-framework/recipes-wam | |
parent | 13e42025cf8c2b6f5b8d10666353e60da7d52370 (diff) |
chromium68: custom toolchain, target v8 snapshot
Modify the chromium68 recipe to make it more like the one available in
meta-browser: https://github.com/OSSystems/meta-browser
Instead of passing Yocto's build flags to the cros (=ChromeOS)
toolchain, generate a toolchain definition file specifically for this
build.
For the generation of the V8 memory snapshot, build the required files
for the target architecture, then use qemu-native to run them.
Upstream chromium addresses this problem by downloading a specific
native toolchain for every target platform, which we cannot do in this
context.
The toolchain changes trigger an issue with ARMv7 builds; backported
one patch to address that specific issue.
This changes also triggered a problem with 32 bit targets, the
generated binaries were too big. Forced the DEBUG_ARGS to use -g1 (or
-g0 otherwise).
Bug-AGL: SPEC-2514
Change-Id: Ib18431b628415c58a3c29595bfff10057e355a4b
Signed-off-by: Jacobo Aragunde Pérez <jaragunde@igalia.com>
Diffstat (limited to 'meta-html5-framework/recipes-wam')
4 files changed, 298 insertions, 74 deletions
diff --git a/meta-html5-framework/recipes-wam/chromium/chromium68/0001-skia-Build-skcms-with-mfp16-format-ieee-on-GCC-ARM-b.patch b/meta-html5-framework/recipes-wam/chromium/chromium68/0001-skia-Build-skcms-with-mfp16-format-ieee-on-GCC-ARM-b.patch new file mode 100644 index 00000000..22f50266 --- /dev/null +++ b/meta-html5-framework/recipes-wam/chromium/chromium68/0001-skia-Build-skcms-with-mfp16-format-ieee-on-GCC-ARM-b.patch @@ -0,0 +1,60 @@ +From a01fb357a1ce755834779c905a14c3376e1a6239 Mon Sep 17 00:00:00 2001 +From: Raphael Kubo da Costa <raphael.kubo.da.costa@intel.com> +Date: Tue, 17 Jul 2018 11:56:52 +0000 +Subject: [PATCH] skia: Build skcms with -mfp16-format=ieee on GCC ARM builds + +skcms' Transform_inl.h assumes support for 16-bit floating point in the +__ARM_FP macro means the __fp16 (and corresponding SIMD data type) is +present. + +While this is currently true for LLVM, which always sets the equivalent of +GCC's -mfp16-format=ieee internally on ARM builds, GCC explicitly needs that +option to be specified in order to enable support for __fp16. Doing so +allows GCC ARM builds to proceed without the following error: + + ../../third_party/skia/third_party/skcms/src/Transform_inl.h: In function 'F_from_Half_': + ../../third_party/skia/third_party/skcms/src/Transform_inl.h:101:72: error: 'float16x4_t' undeclared (first use in this function); did you mean 'float32x4_t'? + SI ATTR F NS(F_from_Half_(U16 half)) { return vcvt_f32_f16((float16x4_t)half); } + ^~~~~~~~~~~ + float32x4_t + ../../third_party/skia/third_party/skcms/src/Transform_inl.h:101:72: note: each undeclared identifier is reported only once for each function it appears in + ../../third_party/skia/third_party/skcms/src/Transform_inl.h:101:84: error: expected ')' before 'half' + SI ATTR F NS(F_from_Half_(U16 half)) { return vcvt_f32_f16((float16x4_t)half); } + ^~~~ + ) + ../../third_party/skia/third_party/skcms/src/Transform_inl.h: In function 'Half_from_F_': + ../../third_party/skia/third_party/skcms/src/Transform_inl.h:102:5: error: can't convert a value of type 'int' to vector type '__vector(4) short unsigned int' which has different size + SI ATTR U16 NS(Half_from_F_(F f)) { return (U16)vcvt_f16_f32( f); } + ^~ + +Bug: 819294 +Change-Id: Ib7417fb9bdc6bd93553084053ba69f9d3409b112 +Reviewed-on: https://chromium-review.googlesource.com/1138251 +Reviewed-by: Florin Malita <fmalita@chromium.org> +Commit-Queue: Raphael Kubo da Costa (CET) <raphael.kubo.da.costa@intel.com> +Cr-Commit-Position: refs/heads/master@{#575608} +--- + src/skia/BUILD.gn | 7 +++++++ + 1 file changed, 7 insertions(+) + +diff --git a/src/skia/BUILD.gn b/src/skia/BUILD.gn +index 294b9ac16..6efada326 100644 +--- a/src/skia/BUILD.gn ++++ b/src/skia/BUILD.gn +@@ -242,6 +242,13 @@ source_set("skcms") { + ] + } + ++ # LLVM automatically sets the equivalent of GCC's -mfp16-format=ieee on ARM ++ # builds by default, while GCC itself does not. We need it to enable support ++ # for half-precision floating point data types used by SKCMS on ARM. ++ if (is_linux && !is_clang && current_cpu == "arm") { ++ cflags += [ "-mfp16-format=ieee" ] ++ } ++ + public = [ + "//third_party/skia/third_party/skcms/skcms.h", + ] +-- +2.11.0 + diff --git a/meta-html5-framework/recipes-wam/chromium/chromium68/v8-qemu-wrapper.patch b/meta-html5-framework/recipes-wam/chromium/chromium68/v8-qemu-wrapper.patch new file mode 100644 index 00000000..485766b0 --- /dev/null +++ b/meta-html5-framework/recipes-wam/chromium/chromium68/v8-qemu-wrapper.patch @@ -0,0 +1,40 @@ +Upstream-Status: Inappropriate [embedder specific] + +The patch below makes the V8 binaries run during the build be invoked through +QEMU, as they are built for the target. + +Signed-off-by: Raphael Kubo da Costa <raphael.kubo.da.costa@intel.com> +Signed-off-by: Maksim Sisov <msisov@igalia.com> + +Index: git/src/tools/v8_context_snapshot/BUILD.gn +=================================================================== +--- git.orig/src/tools/v8_context_snapshot/BUILD.gn ++++ git/src/tools/v8_context_snapshot/BUILD.gn +@@ -62,6 +62,7 @@ if (use_v8_context_snapshot) { + output_path = rebase_path(output_file, root_build_dir) + + args = [ ++ "./v8-qemu-wrapper.sh", + "./" + rebase_path( + get_label_info( + ":v8_context_snapshot_generator($v8_snapshot_toolchain)", +Index: git/src/v8/BUILD.gn +=================================================================== +--- git.orig/src/v8/BUILD.gn ++++ git/src/v8/BUILD.gn +@@ -900,6 +900,7 @@ action("run_torque") { + } + + args = [ ++ "./v8-qemu-wrapper.sh", + "./" + rebase_path(get_label_info(":torque($v8_torque_toolchain)", + "root_out_dir") + "/torque", + root_build_dir), +@@ -977,6 +978,7 @@ template("run_mksnapshot") { + data = [] + + args = [ ++ "./v8-qemu-wrapper.sh", + "./" + rebase_path(get_label_info(":mksnapshot($v8_snapshot_toolchain)", + "root_out_dir") + "/mksnapshot", + root_build_dir), diff --git a/meta-html5-framework/recipes-wam/chromium/chromium68_git.bb b/meta-html5-framework/recipes-wam/chromium/chromium68_git.bb index c028226e..5a94e3c3 100644 --- a/meta-html5-framework/recipes-wam/chromium/chromium68_git.bb +++ b/meta-html5-framework/recipes-wam/chromium/chromium68_git.bb @@ -12,19 +12,24 @@ LIC_FILES_CHKSUM = "\ require gn-utils.inc -inherit gettext +inherit gettext qemu -DEPENDS = "virtual/gettext wayland wayland-native pixman freetype fontconfig openssl pango cairo icu libxkbcommon libexif dbus pciutils udev libcap alsa-lib virtual/egl elfutils-native libdrm atk gperf-native gconf nss nss-native nspr nspr-native bison-native" +DEPENDS = "virtual/gettext wayland wayland-native pixman freetype fontconfig openssl pango cairo icu libxkbcommon libexif dbus pciutils udev libcap alsa-lib virtual/egl elfutils-native libdrm atk gperf-native gconf nss nss-native nspr nspr-native bison-native qemu-native" PROVIDES = "${BROWSER_APPLICATION}" SRC_URI = "\ git://github.com/webosose/${PN};branch=@39.agl.guppy;protocol=https;rev=${SRCREV_chromium68} \ git://github.com/webosose/v8;destsuffix=git/src/v8;rev=${SRCREV_v8} \ + file://v8-qemu-wrapper.patch \ " SRCREV_chromium68 = "259a8b56bc833956f3acd318b7f19b028277beda" SRCREV_v8 = "1e3af71f1ff3735e8a5b639c48dfca63a7b8a647" +# Backport of https://chromium-review.googlesource.com/c/chromium/src/+/1138251/ +SRC_URI_append_armv7a = " file://0001-skia-Build-skcms-with-mfp16-format-ieee-on-GCC-ARM-b.patch" +SRC_URI_append_armv7ve = " file://0001-skia-Build-skcms-with-mfp16-format-ieee-on-GCC-ARM-b.patch" + # we don't include SRCPV in PV, so we have to manually include SRCREVs in do_fetch vardeps do_fetch[vardeps] += "SRCREV_v8" SRCREV_FORMAT = "main_v8" @@ -35,6 +40,8 @@ SRC_DIR = "${S}/src" OUT_DIR = "${WORKDIR}/build" BUILD_TYPE = "Release" +B = "${OUT_DIR}/${BUILD_TYPE}" + WEBRUNTIME_BUILD_TARGET = "webos:weboswebruntime" BROWSER_APP_BUILD_TARGET = "chrome" BROWSER_APPLICATION = "chromium68-browser" @@ -47,11 +54,10 @@ WEBOS_SYSTEM_BUS_FILES_LOCATION = "${S}/files/sysbus" PACKAGECONFIG ?= "jumbo use-upstream-wayland" -# Options to enable debug/debug-webcore build. +# Options to enable debug-webcore build. # Add the following line to local.conf (or local.dev.inc) to enable them: -# PACKAGECONFIG_append_pn-chromium68 = " debug debug-webcore" -# By default debug is completely disabled to speed up build -PACKAGECONFIG[debug] = "is_debug=false is_component_build=false symbol_level=2, is_debug=false symbol_level=0" +# PACKAGECONFIG_append_pn-chromium68 = " debug-webcore" +# Other debug options are controlled by sections later in this file PACKAGECONFIG[debug-webcore] = "remove_webcore_debug_symbols=false,remove_webcore_debug_symbols=true" # Set a default value for jumbo file merge of 8. This should be good for build @@ -75,22 +81,12 @@ PACKAGECONFIG[use-upstream-wayland] = " \ " GN_ARGS = "\ - cros_host_ar=\"${BUILD_AR}\"\ - cros_host_cc=\"${BUILD_CC}\"\ - cros_host_cxx=\"${BUILD_CXX}\"\ - cros_host_extra_ldflags=\"${BUILD_LDFLAGS}\"\ - cros_target_ar=\"${AR}\"\ - cros_target_cc=\"${CC}\"\ - cros_target_cxx=\"${CXX}\"\ enable_memorymanager_webapi=false\ ffmpeg_branding=\"Chrome\"\ host_os=\"linux\"\ - is_cross_linux_build=true\ - is_clang=false\ ozone_auto_platforms=false\ proprietary_codecs=true\ target_os=\"linux\"\ - target_sysroot=\"${STAGING_DIR_HOST}\"\ treat_warnings_as_errors=false\ is_agl=true\ use_cbe=true\ @@ -102,35 +98,139 @@ GN_ARGS = "\ use_ozone=true\ use_xkbcommon=true\ use_pmlog=false\ - use_sysroot=false\ use_system_debugger_abort=true\ use_webos_gpu_info_collector=false\ ${PACKAGECONFIG_CONFARGS}\ " -# We need this for cross to 32 bit architectures, as we do not have a way -# to retrieve a host gcc for 32 bits in yocto -GN_ARGS_TOOLCHAIN = "\ - is_host_clang=true\ - host_toolchain=\"//build/toolchain/yocto:clang_yocto_native\" \ - fatal_linker_warnings=false\ - use_custom_libcxx_for_host=true\ +# From Chromium's BUILDCONFIG.gn: +# Set to enable the official build level of optimization. This has nothing +# to do with branding, but enables an additional level of optimization above +# release (!is_debug). This might be better expressed as a tri-state +# (debug, release, official) but for historical reasons there are two +# separate flags. +# See also: https://groups.google.com/a/chromium.org/d/msg/chromium-dev/hkcb6AOX5gE/PPT1ukWoBwAJ +GN_ARGS += "is_debug=false is_official_build=true" + +# is_cfi default value is true for x86-64 builds with is_official_build=true. +# As of M63, we explicitly need to set it to false, otherwise we fail the +# following assertion in //build/config/sanitizers/sanitizers.gni: +# assert(!is_cfi || is_clang, +# "is_cfi requires setting is_clang = true in 'gn args'") +GN_ARGS += "is_cfi=false" + +# By default, passing is_official_build=true to GN causes its symbol_level +# variable to be set to "2". This means the compiler will be passed "-g2" and +# we will end up with a very large chrome binary (around 5Gb as of M58) +# regardless of whether DEBUG_BUILD has been set or not. In addition, binutils, +# file and other utilities are unable to read a 32-bit binary this size, which +# causes it not to be stripped. +# The solution is two-fold: +# 1. Make sure -g is not passed on 32-bit architectures via DEBUG_FLAGS. -g is +# the same as -g2. -g1 generates an 800MB binary, which is a lot more +# manageable. +# 2. Explicitly pass symbol_level=0 to GN. This causes -g0 to be passed +# instead, so that if DEBUG_BUILD is not set GN will not create a huge debug +# binary anyway. Since our compiler flags are passed after GN's, -g0 does +# not cause any issues if DEBUG_BUILD is set, as -g1 will be passed later. +DEBUG_FLAGS_remove_arm = "-g" +DEBUG_FLAGS_append_arm = "-g1" +DEBUG_FLAGS_remove_x86 = "-g" +DEBUG_FLAGS_append_x86 = "-g1" +GN_ARGS += "symbol_level=0" + +# We do not want to use Chromium's own Debian-based sysroots, it is easier to +# just let Chromium's build system assume we are not using a sysroot at all and +# let Yocto handle everything. +GN_ARGS += "use_sysroot=false" + +# Toolchains we will use for the build. We need to point to the toolchain file +# we've created, set the right target architecture and make sure we are not +# using Chromium's toolchain (bundled clang, bundled binutils etc). +GN_ARGS += "\ + custom_toolchain=\"//build/toolchain/yocto:yocto_target\" \ + gold_path=\"\" \ + host_toolchain=\"//build/toolchain/yocto:yocto_native\" \ + is_clang=${@is_default_cc_clang(d)} \ + clang_base_path=\"${@clang_install_path(d)}\" \ + clang_use_chrome_plugins=false \ + linux_use_bundled_binutils=false \ + target_cpu=\"${@gn_target_arch_name(d)}\" \ + v8_snapshot_toolchain=\"//build/toolchain/yocto:yocto_target\" \ " -# But for x86-64 previous setting fails in torque, so this makes build use -# gcc on host, and use host toolchain for v8 snapshot and torque -GN_ARGS_TOOLCHAIN_x86-64 = "\ - is_host_clang=false\ - use_custom_libcxx_for_host=false\ - v8_snapshot_toolchain=\"//build/toolchain/cros:host\" \ - cros_v8_snapshot_is_clang=false\ - cros_v8_snapshot_ar=\"${BUILD_AR}\"\ - cros_v8_snapshot_cc=\"${BUILD_CC}\"\ - cros_v8_snapshot_cxx=\"${BUILD_CXX}\"\ - cros_v8_snapshot_extra_ldflags=\"${BUILD_LDFLAGS}\"\ +# ARM builds need special additional flags (see ${S}/build/config/arm.gni). +# If we do not pass |arm_arch| and friends to GN, it will deduce a value that +# will then conflict with TUNE_CCARGS and CC. +# Note that as of M61 in some corner cases parts of the build system disable +# the "compiler_arm_fpu" GN config, whereas -mfpu is always passed via ${CC}. +# We might want to rework that if there are issues in the future. +def get_compiler_flag(params, param_name, d): + """Given a sequence of compiler arguments in |params|, returns the value of + an option |param_name| or an empty string if the option is not present.""" + for param in params: + if param.startswith(param_name): + return param.split('=')[1] + return '' + +ARM_FLOAT_ABI = "${@bb.utils.contains('TUNE_FEATURES', 'callconvention-hard', 'hard', 'softfp', d)}" +ARM_FPU = "${@get_compiler_flag(d.getVar('TUNE_CCARGS').split(), '-mfpu', d)}" +ARM_TUNE = "${@get_compiler_flag(d.getVar('TUNE_CCARGS').split(), '-mcpu', d)}" +ARM_VERSION_aarch64 = "8" +ARM_VERSION_armv7a = "7" +ARM_VERSION_armv7ve = "7" +ARM_VERSION_armv6 = "6" + +# GN computes and defaults to it automatically where needed +# forcing it from cmdline breaks build on places where it ends up +# overriding what GN wants +TUNE_CCARGS_remove = "-mthumb" + +GN_ARGS_append_arm = " \ + arm_float_abi=\"${ARM_FLOAT_ABI}\" \ + arm_fpu=\"${ARM_FPU}\" \ + arm_tune=\"${ARM_TUNE}\" \ + arm_version=${ARM_VERSION} \ " +# tcmalloc's atomicops-internals-arm-v6plus.h uses the "dmb" instruction that +# is not available on (some?) ARMv6 models, which causes the build to fail. +GN_ARGS_append_armv6 += 'use_allocator="none"' +# The WebRTC code fails to build on ARMv6 when NEON is enabled. +# https://bugs.chromium.org/p/webrtc/issues/detail?id=6574 +GN_ARGS_append_armv6 += 'arm_use_neon=false' -GN_ARGS += "${GN_ARGS_TOOLCHAIN}" +# Disable glibc shims on musl +# tcmalloc does not play well with musl as of M62 (and possibly earlier). +# https://github.com/gperftools/gperftools/issues/693 +GN_ARGS_append_libc-musl = ' use_allocator_shim=false' + +# V8's JIT infrastructure requires binaries such as mksnapshot and +# mkpeephole to be run in the host during the build. However, these +# binaries must have the same bit-width as the target (e.g. a x86_64 +# host targeting ARMv6 needs to produce a 32-bit binary). Instead of +# depending on a third Yocto toolchain, we just build those binaries +# for the target and run them on the host with QEMU. +python do_create_v8_qemu_wrapper () { + """Creates a small wrapper that invokes QEMU to run some target V8 binaries + on the host.""" + qemu_libdirs = [d.expand('${STAGING_DIR_HOST}${libdir}'), + d.expand('${STAGING_DIR_HOST}${base_libdir}')] + qemu_cmd = qemu_wrapper_cmdline(d, d.getVar('STAGING_DIR_HOST', True), + qemu_libdirs) + wrapper_path = d.expand('${B}/v8-qemu-wrapper.sh') + with open(wrapper_path, 'w') as wrapper_file: + wrapper_file.write("""#!/bin/sh + +# This file has been generated automatically. +# It invokes QEMU to run binaries built for the target in the host during the +# build process. + +%s "$@" +""" % qemu_cmd) + os.chmod(wrapper_path, 0o755) +} +do_create_v8_qemu_wrapper[dirs] = "${B}" +addtask create_v8_qemu_wrapper after do_patch before do_configure python do_write_toolchain_file () { """Writes a BUILD.gn file for Yocto detailing its toolchains.""" @@ -140,7 +240,6 @@ python do_write_toolchain_file () { write_toolchain_file(d, toolchain_file) } addtask write_toolchain_file after do_patch before do_configure -# end TODO: drop this after we migrate to ubuntu 16.04 or above # More options to speed up the build GN_ARGS += "\ @@ -153,34 +252,12 @@ GN_ARGS += "\ use_pulseaudio=false\ " -# Following options help build with icecc -GN_ARGS += "\ - linux_use_bundled_binutils=false\ - use_debug_fission=false\ -" - # Respect ld-is-gold in DISTRO_FEATURES when enabling gold # Similar patch applied in meta-browser # http://patchwork.openembedded.org/patch/77755/ EXTRA_OEGN_GOLD = "${@bb.utils.contains('DISTRO_FEATURES', 'ld-is-gold', 'use_gold=true', 'use_gold=false', d)}" GN_ARGS += "${EXTRA_OEGN_GOLD}" -GN_ARGS_append_arm = " target_cpu=\"arm\"" -GN_ARGS_append_qemux86 = " target_cpu=\"x86\"" -GN_ARGS_append_aarch64 = " target_cpu=\"arm64\"" - -# ARM builds need special additional flags (see ${S}/build/config/arm.gni). -ARM_FLOAT_ABI = "${@bb.utils.contains('TUNE_FEATURES', 'callconvention-hard', 'hard', 'softfp', d)}" -GN_ARGS_append_armv6 = " arm_arch=\"armv6\" arm_version=6 arm_float_abi=\"${ARM_FLOAT_ABI}\"" -GN_ARGS_append_armv7a = " arm_arch=\"armv7-a\" arm_version=7 arm_float_abi=\"${ARM_FLOAT_ABI}\"" -GN_ARGS_append_armv7ve = " arm_arch=\"armv7ve\" arm_version=7 arm_float_abi=\"${ARM_FLOAT_ABI}\"" -# tcmalloc's atomicops-internals-arm-v6plus.h uses the "dmb" instruction that -# is not available on (some?) ARMv6 models, which causes the build to fail. -GN_ARGS_append_armv6 += 'use_allocator="none"' -# The WebRTC code fails to build on ARMv6 when NEON is enabled. -# https://bugs.chromium.org/p/webrtc/issues/detail?id=6574 -GN_ARGS_append_armv6 += 'arm_use_neon=false' - # Doesn't build for armv[45]* COMPATIBLE_MACHINE = "(-)" COMPATIBLE_MACHINE_aarch64 = "(.*)" @@ -206,7 +283,7 @@ do_compile() { fi export PATH="${S}/depot_tools:$PATH" - ${S}/depot_tools/ninja -C ${OUT_DIR}/${BUILD_TYPE} ${TARGET} + ${S}/depot_tools/ninja -v -C ${OUT_DIR}/${BUILD_TYPE} ${TARGET} } do_configure() { diff --git a/meta-html5-framework/recipes-wam/chromium/gn-utils.inc b/meta-html5-framework/recipes-wam/chromium/gn-utils.inc index bca7af8d..0fd55a63 100644 --- a/meta-html5-framework/recipes-wam/chromium/gn-utils.inc +++ b/meta-html5-framework/recipes-wam/chromium/gn-utils.inc @@ -16,20 +16,67 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. -def gn_arch_name(yocto_arch): - """Translates between Yocto's architecture values and the corresponding - ones used by GN.""" - translation_table = { - 'aarch64': 'arm64', - 'arm': 'arm', - 'i586': 'x86', - 'x86_64': 'x64', +# GN host architecture helpers. +# +# BUILD_ARCH's value corresponds to what uname returns as the machine name. +# The mapping in gn_host_arch_name() tries to match several possible values +# returned by the Linux kernel in uname(2) into the corresponding values GN +# understands. +def gn_host_arch_name(d): + """Returns a GN architecture name corresponding to the build host's machine + architecture.""" + import re + arch_translations = { + r'aarch64.*': 'arm64', + r'arm.*': 'arm', + r'i[3456]86$': 'x86', + r'x86_64$': 'x64', } - try: - return translation_table[yocto_arch] - except KeyError: - bb.msg.fatal('"%s" is not a supported architecture.' % yocto_arch) + build_arch = d.getVar("BUILD_ARCH") + for arch_regexp, gn_arch_name in arch_translations.items(): + if re.match(arch_regexp, build_arch): + return gn_arch_name + bb.fatal('Unsuported BUILD_ARCH value: "%s"' % build_arch) + +# GN target architecture helpers. +# +# Determining the target architecture is more difficult, as there are many +# different values we can use on the Yocto side (e.g. TUNE_ARCH, TARGET_ARCH, +# MACHINEOVERRIDES etc). What we do is define the mapping with regular, +# non-Python variables with overrides that are generic enough (i.e. "x86" +# instead of "i586") and then use gn_target_arch_name() to return the right +# value with some validation. +GN_TARGET_ARCH_NAME_aarch64 = "arm64" +GN_TARGET_ARCH_NAME_arm = "arm" +GN_TARGET_ARCH_NAME_x86 = "x86" +GN_TARGET_ARCH_NAME_x86-64 = "x64" + +BUILD_CC_toolchain-clang = "clang" +BUILD_CXX_toolchain-clang = "clang++" +BUILD_LD_toolchain-clang = "clang" + +# knob for clang, when using meta-clang to provide clang and case where +# clang happens to be default compiler for OE we should let it use clang +def is_default_cc_clang(d): + """Return true if clang is default cross compiler.""" + toolchain = d.getVar("TOOLCHAIN") + overrides = d.getVar("OVERRIDES") + if toolchain == "clang" and "toolchain-clang" in overrides.split(":"): + return "true" + return "false" + +def clang_install_path(d): + """Return clang compiler install path.""" + return d.getVar("STAGING_BINDIR_NATIVE") +def gn_target_arch_name(d): + """Returns a GN architecture name corresponding to the target machine's + architecture.""" + name = d.getVar("GN_TARGET_ARCH_NAME") + if name is None: + bb.fatal('Unsupported target architecture. A valid override for the ' + 'GN_TARGET_ARCH_NAME variable could not be found.') + return name def write_toolchain_file(d, file_path): """Creates a complete GN toolchain file in |file_path|.""" @@ -70,7 +117,7 @@ def write_toolchain_file(d, file_path): native_toolchain = { 'toolchain_name': 'yocto_native', - 'current_cpu': gn_arch_name(d.getVar('BUILD_ARCH', True)), + 'current_cpu': gn_host_arch_name(d), 'cc': d.expand('${BUILD_CC}'), 'cxx': d.expand('${BUILD_CXX}'), 'ar': d.expand('${BUILD_AR}'), @@ -83,7 +130,7 @@ def write_toolchain_file(d, file_path): } target_toolchain = { 'toolchain_name': 'yocto_target', - 'current_cpu': gn_arch_name(d.getVar('TUNE_ARCH', True)), + 'current_cpu': gn_target_arch_name(d), 'cc': d.expand('${CC}'), 'cxx': d.expand('${CXX}'), 'ar': d.expand('${AR}'), |