summaryrefslogtreecommitdiffstats
path: root/meta-html5-framework/recipes-wam/chromium/chromium68_git.bb
diff options
context:
space:
mode:
authorJacobo Aragunde Pérez <jaragunde@igalia.com>2019-06-17 13:12:00 +0200
committerJan-Simon Moeller <jsmoeller@linuxfoundation.org>2019-06-20 12:57:47 +0000
commite6cc8d4e0977b4a3dda28c70caea3515f9d07edd (patch)
tree37493398c6f35728ca73cc36c64e91e9bed33c1a /meta-html5-framework/recipes-wam/chromium/chromium68_git.bb
parent13e42025cf8c2b6f5b8d10666353e60da7d52370 (diff)
chromium68: custom toolchain, target v8 snapshot
Modify the chromium68 recipe to make it more like the one available in meta-browser: https://github.com/OSSystems/meta-browser Instead of passing Yocto's build flags to the cros (=ChromeOS) toolchain, generate a toolchain definition file specifically for this build. For the generation of the V8 memory snapshot, build the required files for the target architecture, then use qemu-native to run them. Upstream chromium addresses this problem by downloading a specific native toolchain for every target platform, which we cannot do in this context. The toolchain changes trigger an issue with ARMv7 builds; backported one patch to address that specific issue. This changes also triggered a problem with 32 bit targets, the generated binaries were too big. Forced the DEBUG_ARGS to use -g1 (or -g0 otherwise). Bug-AGL: SPEC-2514 Change-Id: Ib18431b628415c58a3c29595bfff10057e355a4b Signed-off-by: Jacobo Aragunde Pérez <jaragunde@igalia.com>
Diffstat (limited to 'meta-html5-framework/recipes-wam/chromium/chromium68_git.bb')
-rw-r--r--meta-html5-framework/recipes-wam/chromium/chromium68_git.bb197
1 files changed, 137 insertions, 60 deletions
diff --git a/meta-html5-framework/recipes-wam/chromium/chromium68_git.bb b/meta-html5-framework/recipes-wam/chromium/chromium68_git.bb
index c028226e..5a94e3c3 100644
--- a/meta-html5-framework/recipes-wam/chromium/chromium68_git.bb
+++ b/meta-html5-framework/recipes-wam/chromium/chromium68_git.bb
@@ -12,19 +12,24 @@ LIC_FILES_CHKSUM = "\
require gn-utils.inc
-inherit gettext
+inherit gettext qemu
-DEPENDS = "virtual/gettext wayland wayland-native pixman freetype fontconfig openssl pango cairo icu libxkbcommon libexif dbus pciutils udev libcap alsa-lib virtual/egl elfutils-native libdrm atk gperf-native gconf nss nss-native nspr nspr-native bison-native"
+DEPENDS = "virtual/gettext wayland wayland-native pixman freetype fontconfig openssl pango cairo icu libxkbcommon libexif dbus pciutils udev libcap alsa-lib virtual/egl elfutils-native libdrm atk gperf-native gconf nss nss-native nspr nspr-native bison-native qemu-native"
PROVIDES = "${BROWSER_APPLICATION}"
SRC_URI = "\
git://github.com/webosose/${PN};branch=@39.agl.guppy;protocol=https;rev=${SRCREV_chromium68} \
git://github.com/webosose/v8;destsuffix=git/src/v8;rev=${SRCREV_v8} \
+ file://v8-qemu-wrapper.patch \
"
SRCREV_chromium68 = "259a8b56bc833956f3acd318b7f19b028277beda"
SRCREV_v8 = "1e3af71f1ff3735e8a5b639c48dfca63a7b8a647"
+# Backport of https://chromium-review.googlesource.com/c/chromium/src/+/1138251/
+SRC_URI_append_armv7a = " file://0001-skia-Build-skcms-with-mfp16-format-ieee-on-GCC-ARM-b.patch"
+SRC_URI_append_armv7ve = " file://0001-skia-Build-skcms-with-mfp16-format-ieee-on-GCC-ARM-b.patch"
+
# we don't include SRCPV in PV, so we have to manually include SRCREVs in do_fetch vardeps
do_fetch[vardeps] += "SRCREV_v8"
SRCREV_FORMAT = "main_v8"
@@ -35,6 +40,8 @@ SRC_DIR = "${S}/src"
OUT_DIR = "${WORKDIR}/build"
BUILD_TYPE = "Release"
+B = "${OUT_DIR}/${BUILD_TYPE}"
+
WEBRUNTIME_BUILD_TARGET = "webos:weboswebruntime"
BROWSER_APP_BUILD_TARGET = "chrome"
BROWSER_APPLICATION = "chromium68-browser"
@@ -47,11 +54,10 @@ WEBOS_SYSTEM_BUS_FILES_LOCATION = "${S}/files/sysbus"
PACKAGECONFIG ?= "jumbo use-upstream-wayland"
-# Options to enable debug/debug-webcore build.
+# Options to enable debug-webcore build.
# Add the following line to local.conf (or local.dev.inc) to enable them:
-# PACKAGECONFIG_append_pn-chromium68 = " debug debug-webcore"
-# By default debug is completely disabled to speed up build
-PACKAGECONFIG[debug] = "is_debug=false is_component_build=false symbol_level=2, is_debug=false symbol_level=0"
+# PACKAGECONFIG_append_pn-chromium68 = " debug-webcore"
+# Other debug options are controlled by sections later in this file
PACKAGECONFIG[debug-webcore] = "remove_webcore_debug_symbols=false,remove_webcore_debug_symbols=true"
# Set a default value for jumbo file merge of 8. This should be good for build
@@ -75,22 +81,12 @@ PACKAGECONFIG[use-upstream-wayland] = " \
"
GN_ARGS = "\
- cros_host_ar=\"${BUILD_AR}\"\
- cros_host_cc=\"${BUILD_CC}\"\
- cros_host_cxx=\"${BUILD_CXX}\"\
- cros_host_extra_ldflags=\"${BUILD_LDFLAGS}\"\
- cros_target_ar=\"${AR}\"\
- cros_target_cc=\"${CC}\"\
- cros_target_cxx=\"${CXX}\"\
enable_memorymanager_webapi=false\
ffmpeg_branding=\"Chrome\"\
host_os=\"linux\"\
- is_cross_linux_build=true\
- is_clang=false\
ozone_auto_platforms=false\
proprietary_codecs=true\
target_os=\"linux\"\
- target_sysroot=\"${STAGING_DIR_HOST}\"\
treat_warnings_as_errors=false\
is_agl=true\
use_cbe=true\
@@ -102,35 +98,139 @@ GN_ARGS = "\
use_ozone=true\
use_xkbcommon=true\
use_pmlog=false\
- use_sysroot=false\
use_system_debugger_abort=true\
use_webos_gpu_info_collector=false\
${PACKAGECONFIG_CONFARGS}\
"
-# We need this for cross to 32 bit architectures, as we do not have a way
-# to retrieve a host gcc for 32 bits in yocto
-GN_ARGS_TOOLCHAIN = "\
- is_host_clang=true\
- host_toolchain=\"//build/toolchain/yocto:clang_yocto_native\" \
- fatal_linker_warnings=false\
- use_custom_libcxx_for_host=true\
+# From Chromium's BUILDCONFIG.gn:
+# Set to enable the official build level of optimization. This has nothing
+# to do with branding, but enables an additional level of optimization above
+# release (!is_debug). This might be better expressed as a tri-state
+# (debug, release, official) but for historical reasons there are two
+# separate flags.
+# See also: https://groups.google.com/a/chromium.org/d/msg/chromium-dev/hkcb6AOX5gE/PPT1ukWoBwAJ
+GN_ARGS += "is_debug=false is_official_build=true"
+
+# is_cfi default value is true for x86-64 builds with is_official_build=true.
+# As of M63, we explicitly need to set it to false, otherwise we fail the
+# following assertion in //build/config/sanitizers/sanitizers.gni:
+# assert(!is_cfi || is_clang,
+# "is_cfi requires setting is_clang = true in 'gn args'")
+GN_ARGS += "is_cfi=false"
+
+# By default, passing is_official_build=true to GN causes its symbol_level
+# variable to be set to "2". This means the compiler will be passed "-g2" and
+# we will end up with a very large chrome binary (around 5Gb as of M58)
+# regardless of whether DEBUG_BUILD has been set or not. In addition, binutils,
+# file and other utilities are unable to read a 32-bit binary this size, which
+# causes it not to be stripped.
+# The solution is two-fold:
+# 1. Make sure -g is not passed on 32-bit architectures via DEBUG_FLAGS. -g is
+# the same as -g2. -g1 generates an 800MB binary, which is a lot more
+# manageable.
+# 2. Explicitly pass symbol_level=0 to GN. This causes -g0 to be passed
+# instead, so that if DEBUG_BUILD is not set GN will not create a huge debug
+# binary anyway. Since our compiler flags are passed after GN's, -g0 does
+# not cause any issues if DEBUG_BUILD is set, as -g1 will be passed later.
+DEBUG_FLAGS_remove_arm = "-g"
+DEBUG_FLAGS_append_arm = "-g1"
+DEBUG_FLAGS_remove_x86 = "-g"
+DEBUG_FLAGS_append_x86 = "-g1"
+GN_ARGS += "symbol_level=0"
+
+# We do not want to use Chromium's own Debian-based sysroots, it is easier to
+# just let Chromium's build system assume we are not using a sysroot at all and
+# let Yocto handle everything.
+GN_ARGS += "use_sysroot=false"
+
+# Toolchains we will use for the build. We need to point to the toolchain file
+# we've created, set the right target architecture and make sure we are not
+# using Chromium's toolchain (bundled clang, bundled binutils etc).
+GN_ARGS += "\
+ custom_toolchain=\"//build/toolchain/yocto:yocto_target\" \
+ gold_path=\"\" \
+ host_toolchain=\"//build/toolchain/yocto:yocto_native\" \
+ is_clang=${@is_default_cc_clang(d)} \
+ clang_base_path=\"${@clang_install_path(d)}\" \
+ clang_use_chrome_plugins=false \
+ linux_use_bundled_binutils=false \
+ target_cpu=\"${@gn_target_arch_name(d)}\" \
+ v8_snapshot_toolchain=\"//build/toolchain/yocto:yocto_target\" \
"
-# But for x86-64 previous setting fails in torque, so this makes build use
-# gcc on host, and use host toolchain for v8 snapshot and torque
-GN_ARGS_TOOLCHAIN_x86-64 = "\
- is_host_clang=false\
- use_custom_libcxx_for_host=false\
- v8_snapshot_toolchain=\"//build/toolchain/cros:host\" \
- cros_v8_snapshot_is_clang=false\
- cros_v8_snapshot_ar=\"${BUILD_AR}\"\
- cros_v8_snapshot_cc=\"${BUILD_CC}\"\
- cros_v8_snapshot_cxx=\"${BUILD_CXX}\"\
- cros_v8_snapshot_extra_ldflags=\"${BUILD_LDFLAGS}\"\
+# ARM builds need special additional flags (see ${S}/build/config/arm.gni).
+# If we do not pass |arm_arch| and friends to GN, it will deduce a value that
+# will then conflict with TUNE_CCARGS and CC.
+# Note that as of M61 in some corner cases parts of the build system disable
+# the "compiler_arm_fpu" GN config, whereas -mfpu is always passed via ${CC}.
+# We might want to rework that if there are issues in the future.
+def get_compiler_flag(params, param_name, d):
+ """Given a sequence of compiler arguments in |params|, returns the value of
+ an option |param_name| or an empty string if the option is not present."""
+ for param in params:
+ if param.startswith(param_name):
+ return param.split('=')[1]
+ return ''
+
+ARM_FLOAT_ABI = "${@bb.utils.contains('TUNE_FEATURES', 'callconvention-hard', 'hard', 'softfp', d)}"
+ARM_FPU = "${@get_compiler_flag(d.getVar('TUNE_CCARGS').split(), '-mfpu', d)}"
+ARM_TUNE = "${@get_compiler_flag(d.getVar('TUNE_CCARGS').split(), '-mcpu', d)}"
+ARM_VERSION_aarch64 = "8"
+ARM_VERSION_armv7a = "7"
+ARM_VERSION_armv7ve = "7"
+ARM_VERSION_armv6 = "6"
+
+# GN computes and defaults to it automatically where needed
+# forcing it from cmdline breaks build on places where it ends up
+# overriding what GN wants
+TUNE_CCARGS_remove = "-mthumb"
+
+GN_ARGS_append_arm = " \
+ arm_float_abi=\"${ARM_FLOAT_ABI}\" \
+ arm_fpu=\"${ARM_FPU}\" \
+ arm_tune=\"${ARM_TUNE}\" \
+ arm_version=${ARM_VERSION} \
"
+# tcmalloc's atomicops-internals-arm-v6plus.h uses the "dmb" instruction that
+# is not available on (some?) ARMv6 models, which causes the build to fail.
+GN_ARGS_append_armv6 += 'use_allocator="none"'
+# The WebRTC code fails to build on ARMv6 when NEON is enabled.
+# https://bugs.chromium.org/p/webrtc/issues/detail?id=6574
+GN_ARGS_append_armv6 += 'arm_use_neon=false'
-GN_ARGS += "${GN_ARGS_TOOLCHAIN}"
+# Disable glibc shims on musl
+# tcmalloc does not play well with musl as of M62 (and possibly earlier).
+# https://github.com/gperftools/gperftools/issues/693
+GN_ARGS_append_libc-musl = ' use_allocator_shim=false'
+
+# V8's JIT infrastructure requires binaries such as mksnapshot and
+# mkpeephole to be run in the host during the build. However, these
+# binaries must have the same bit-width as the target (e.g. a x86_64
+# host targeting ARMv6 needs to produce a 32-bit binary). Instead of
+# depending on a third Yocto toolchain, we just build those binaries
+# for the target and run them on the host with QEMU.
+python do_create_v8_qemu_wrapper () {
+ """Creates a small wrapper that invokes QEMU to run some target V8 binaries
+ on the host."""
+ qemu_libdirs = [d.expand('${STAGING_DIR_HOST}${libdir}'),
+ d.expand('${STAGING_DIR_HOST}${base_libdir}')]
+ qemu_cmd = qemu_wrapper_cmdline(d, d.getVar('STAGING_DIR_HOST', True),
+ qemu_libdirs)
+ wrapper_path = d.expand('${B}/v8-qemu-wrapper.sh')
+ with open(wrapper_path, 'w') as wrapper_file:
+ wrapper_file.write("""#!/bin/sh
+
+# This file has been generated automatically.
+# It invokes QEMU to run binaries built for the target in the host during the
+# build process.
+
+%s "$@"
+""" % qemu_cmd)
+ os.chmod(wrapper_path, 0o755)
+}
+do_create_v8_qemu_wrapper[dirs] = "${B}"
+addtask create_v8_qemu_wrapper after do_patch before do_configure
python do_write_toolchain_file () {
"""Writes a BUILD.gn file for Yocto detailing its toolchains."""
@@ -140,7 +240,6 @@ python do_write_toolchain_file () {
write_toolchain_file(d, toolchain_file)
}
addtask write_toolchain_file after do_patch before do_configure
-# end TODO: drop this after we migrate to ubuntu 16.04 or above
# More options to speed up the build
GN_ARGS += "\
@@ -153,34 +252,12 @@ GN_ARGS += "\
use_pulseaudio=false\
"
-# Following options help build with icecc
-GN_ARGS += "\
- linux_use_bundled_binutils=false\
- use_debug_fission=false\
-"
-
# Respect ld-is-gold in DISTRO_FEATURES when enabling gold
# Similar patch applied in meta-browser
# http://patchwork.openembedded.org/patch/77755/
EXTRA_OEGN_GOLD = "${@bb.utils.contains('DISTRO_FEATURES', 'ld-is-gold', 'use_gold=true', 'use_gold=false', d)}"
GN_ARGS += "${EXTRA_OEGN_GOLD}"
-GN_ARGS_append_arm = " target_cpu=\"arm\""
-GN_ARGS_append_qemux86 = " target_cpu=\"x86\""
-GN_ARGS_append_aarch64 = " target_cpu=\"arm64\""
-
-# ARM builds need special additional flags (see ${S}/build/config/arm.gni).
-ARM_FLOAT_ABI = "${@bb.utils.contains('TUNE_FEATURES', 'callconvention-hard', 'hard', 'softfp', d)}"
-GN_ARGS_append_armv6 = " arm_arch=\"armv6\" arm_version=6 arm_float_abi=\"${ARM_FLOAT_ABI}\""
-GN_ARGS_append_armv7a = " arm_arch=\"armv7-a\" arm_version=7 arm_float_abi=\"${ARM_FLOAT_ABI}\""
-GN_ARGS_append_armv7ve = " arm_arch=\"armv7ve\" arm_version=7 arm_float_abi=\"${ARM_FLOAT_ABI}\""
-# tcmalloc's atomicops-internals-arm-v6plus.h uses the "dmb" instruction that
-# is not available on (some?) ARMv6 models, which causes the build to fail.
-GN_ARGS_append_armv6 += 'use_allocator="none"'
-# The WebRTC code fails to build on ARMv6 when NEON is enabled.
-# https://bugs.chromium.org/p/webrtc/issues/detail?id=6574
-GN_ARGS_append_armv6 += 'arm_use_neon=false'
-
# Doesn't build for armv[45]*
COMPATIBLE_MACHINE = "(-)"
COMPATIBLE_MACHINE_aarch64 = "(.*)"
@@ -206,7 +283,7 @@ do_compile() {
fi
export PATH="${S}/depot_tools:$PATH"
- ${S}/depot_tools/ninja -C ${OUT_DIR}/${BUILD_TYPE} ${TARGET}
+ ${S}/depot_tools/ninja -v -C ${OUT_DIR}/${BUILD_TYPE} ${TARGET}
}
do_configure() {