From c4a6287185179732dfc1e903c195ff90c19f1065 Mon Sep 17 00:00:00 2001 From: Frode Isaksen Date: Tue, 19 Dec 2017 11:15:35 +0000 Subject: This layer provides Energy Aware Scheduling (EAS) patches For the moment only for Renesas R-Car Gen3 SoC's. Can be expanded for other SoC's by setting the machine feature biglittle and provide the relevant EAS patches. Bug-AGL: SPEC-813 Change-Id: I2b5e69c515c33e57be19b30466fe208d7b8ac1a5 Signed-off-by: Frode Isaksen --- ...SD_ASYM_CPUCAPACITY-topology-flag-when-cp.patch | 151 +++++++++++++++++++++ 1 file changed, 151 insertions(+) create mode 100644 meta-eas/recipes-kernel/linux/linux-renesas/0043-arm-Update-SD_ASYM_CPUCAPACITY-topology-flag-when-cp.patch (limited to 'meta-eas/recipes-kernel/linux/linux-renesas/0043-arm-Update-SD_ASYM_CPUCAPACITY-topology-flag-when-cp.patch') diff --git a/meta-eas/recipes-kernel/linux/linux-renesas/0043-arm-Update-SD_ASYM_CPUCAPACITY-topology-flag-when-cp.patch b/meta-eas/recipes-kernel/linux/linux-renesas/0043-arm-Update-SD_ASYM_CPUCAPACITY-topology-flag-when-cp.patch new file mode 100644 index 0000000..6781716 --- /dev/null +++ b/meta-eas/recipes-kernel/linux/linux-renesas/0043-arm-Update-SD_ASYM_CPUCAPACITY-topology-flag-when-cp.patch @@ -0,0 +1,151 @@ +From 82ae3c283c44fb74536b33dfeb3f23e228af869f Mon Sep 17 00:00:00 2001 +From: Morten Rasmussen +Date: Wed, 22 Jun 2016 10:58:17 +0100 +Subject: [PATCH 43/92] arm: Update SD_ASYM_CPUCAPACITY topology flag when cpu + capacity is changed + +When cpu capacity information is provided through DT or sysfs we might +not know if the system has asymmetric cpu capacities when the system +topology information is initially passed to the scheduler. We therefore +have to enforce a rebuild of the sched_domain hierarchy if we later +discover that the flag was initially set wrong. + +It is mainly an issue for systems with same type of cores (dmips/mhz) +but different max frequencies, and asymmetric cpu capacity systems where +cpu capacity is not provided by DT but set through sysfs later. + +cc: Russell King + +Signed-off-by: Morten Rasmussen +(cherry picked from commit d02bb26e525bcbeb3c91345c4ee2a1fe1ca11e48) +Signed-off-by: Gaku Inami +--- + arch/arm/kernel/topology.c | 39 +++++++++++++++++++++++++++++++++++++-- + 1 file changed, 37 insertions(+), 2 deletions(-) + +diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c +index 69fb4b6..4d94639 100644 +--- a/arch/arm/kernel/topology.c ++++ b/arch/arm/kernel/topology.c +@@ -22,6 +22,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -43,6 +44,8 @@ + */ + static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; + static DEFINE_MUTEX(cpu_scale_mutex); ++static bool asym_cpucap; ++static bool update_flags; + + unsigned long scale_cpu_capacity(struct sched_domain *sd, int cpu) + { +@@ -54,6 +57,14 @@ static void set_capacity_scale(unsigned int cpu, unsigned long capacity) + per_cpu(cpu_scale, cpu) = capacity; + } + ++static void update_sched_flags(void) ++{ ++ update_flags = true; ++ rebuild_sched_domains(); ++ update_flags = false; ++ pr_debug("cpu_capacity: Rebuilt sched_domain hierarchy.\n"); ++} ++ + #ifdef CONFIG_PROC_SYSCTL + #include + #include +@@ -83,6 +94,7 @@ static ssize_t store_cpu_capacity(struct device *dev, + + if (count) { + char *p = (char *) buf; ++ bool asym = false; + + ret = kstrtoul(p, 0, &new_capacity); + if (ret) +@@ -93,6 +105,17 @@ static ssize_t store_cpu_capacity(struct device *dev, + mutex_lock(&cpu_scale_mutex); + for_each_cpu(i, &cpu_topology[this_cpu].core_sibling) + set_capacity_scale(i, new_capacity); ++ ++ for_each_possible_cpu(i) { ++ if (per_cpu(cpu_scale, i) != new_capacity) ++ asym = true; ++ } ++ ++ if (asym != asym_cpucap) { ++ asym_cpucap = asym; ++ update_sched_flags(); ++ } ++ + mutex_unlock(&cpu_scale_mutex); + } + +@@ -154,7 +177,6 @@ struct cpu_efficiency { + static u32 *raw_capacity; + static bool cap_parsing_failed; + static u32 capacity_scale; +-static bool asym_cpucap; + + static int __init parse_cpu_capacity(struct device_node *cpu_node, int cpu) + { +@@ -199,6 +221,7 @@ static void normalize_cpu_capacity(void) + { + u64 capacity; + int cpu; ++ bool asym = false; + + if (!raw_capacity || cap_parsing_failed) + return; +@@ -212,9 +235,12 @@ static void normalize_cpu_capacity(void) + pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n", + cpu, arch_scale_cpu_capacity(NULL, cpu)); + if (capacity < capacity_scale) +- asym_cpucap = true; ++ asym = true; + } + mutex_unlock(&cpu_scale_mutex); ++ ++ if (asym != asym_cpucap) ++ asym_cpucap = asym; + } + + #ifdef CONFIG_CPU_FREQ +@@ -232,6 +258,7 @@ static void normalize_cpu_capacity(void) + { + struct cpufreq_policy *policy = data; + int cpu; ++ bool asym; + + if (cap_parsing_done) + return 0; +@@ -254,7 +281,10 @@ static void normalize_cpu_capacity(void) + } + if (cpumask_empty(cpus_to_visit)) { + if (!cap_parsing_failed) { ++ asym = asym_cpucap; + normalize_cpu_capacity(); ++ if (asym != asym_cpucap) ++ update_sched_flags(); + kfree(raw_capacity); + pr_debug("cpu_capacity: parsing done"); + } else { +@@ -502,6 +532,11 @@ static void update_siblings_masks(unsigned int cpuid) + smp_wmb(); + } + ++int arch_update_cpu_topology(void) ++{ ++ return update_flags ? 1 : 0; ++} ++ + /* + * store_cpu_topology is called at boot when only one cpu is running + * and with the mutex cpu_hotplug.lock locked, when several cpus have booted, +-- +1.9.1 + -- cgit 1.2.3-korg