summaryrefslogtreecommitdiffstats
path: root/meta-eas/recipes-kernel/linux/linux-renesas/0044-arm64-Set-SD_ASYM_CPUCAPACITY-topology-flag-for-asym.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-eas/recipes-kernel/linux/linux-renesas/0044-arm64-Set-SD_ASYM_CPUCAPACITY-topology-flag-for-asym.patch')
-rw-r--r--meta-eas/recipes-kernel/linux/linux-renesas/0044-arm64-Set-SD_ASYM_CPUCAPACITY-topology-flag-for-asym.patch165
1 files changed, 165 insertions, 0 deletions
diff --git a/meta-eas/recipes-kernel/linux/linux-renesas/0044-arm64-Set-SD_ASYM_CPUCAPACITY-topology-flag-for-asym.patch b/meta-eas/recipes-kernel/linux/linux-renesas/0044-arm64-Set-SD_ASYM_CPUCAPACITY-topology-flag-for-asym.patch
new file mode 100644
index 0000000..819a5cf
--- /dev/null
+++ b/meta-eas/recipes-kernel/linux/linux-renesas/0044-arm64-Set-SD_ASYM_CPUCAPACITY-topology-flag-for-asym.patch
@@ -0,0 +1,165 @@
+From bacbde41338ca493a4557fb33faf4ea4ab788bb5 Mon Sep 17 00:00:00 2001
+From: Morten Rasmussen <morten.rasmussen@arm.com>
+Date: Wed, 22 Jun 2016 14:51:17 +0100
+Subject: [PATCH 44/92] arm64: Set SD_ASYM_CPUCAPACITY topology flag for
+ asymmetric cpu capacity systems
+
+In addition to adjusting the cpu capacities asymmetric cpu capacity
+systems also have to set the SD_ASYM_CPUCAPACITY topology flag to inform
+the scheduler to look harder when load balancing. This patch sets the
+topology flag on DIE level for such systems based on the cpu capacities
+provided through DT or sysfs.
+
+cc: Catalin Marinas <catalin.marinas@arm.com>
+cc: Will Deacon <will.deacon@arm.com>
+
+Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com>
+(cherry picked from commit 8a8992543ec08e26f8a8ab3cdb4bb556283ce9ea)
+Signed-off-by: Gaku Inami <gaku.inami.xw@bp.renesas.com>
+---
+ arch/arm64/kernel/topology.c | 52 ++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 52 insertions(+)
+
+diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
+index 280319b..eb2ad75 100644
+--- a/arch/arm64/kernel/topology.c
++++ b/arch/arm64/kernel/topology.c
+@@ -21,12 +21,15 @@
+ #include <linux/sched.h>
+ #include <linux/slab.h>
+ #include <linux/cpufreq.h>
++#include <linux/cpuset.h>
+
+ #include <asm/cputype.h>
+ #include <asm/topology.h>
+
+ static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
+ static DEFINE_MUTEX(cpu_scale_mutex);
++static bool asym_cpucap;
++static bool update_flags;
+
+ unsigned long scale_cpu_capacity(struct sched_domain *sd, int cpu)
+ {
+@@ -38,6 +41,14 @@ static void set_capacity_scale(unsigned int cpu, unsigned long capacity)
+ per_cpu(cpu_scale, cpu) = capacity;
+ }
+
++static void update_sched_flags(void)
++{
++ update_flags = true;
++ rebuild_sched_domains();
++ update_flags = false;
++ pr_debug("cpu_capacity: Rebuilt sched_domain hierarchy.\n");
++}
++
+ #ifdef CONFIG_PROC_SYSCTL
+ #include <asm/cpu.h>
+ #include <linux/string.h>
+@@ -67,6 +78,7 @@ static ssize_t store_cpu_capacity(struct device *dev,
+
+ if (count) {
+ char *p = (char *) buf;
++ bool asym = false;
+
+ ret = kstrtoul(p, 0, &new_capacity);
+ if (ret)
+@@ -77,6 +89,16 @@ static ssize_t store_cpu_capacity(struct device *dev,
+ mutex_lock(&cpu_scale_mutex);
+ for_each_cpu(i, &cpu_topology[this_cpu].core_sibling)
+ set_capacity_scale(i, new_capacity);
++
++ for_each_possible_cpu(i) {
++ if (per_cpu(cpu_scale, i) != new_capacity)
++ asym = true;
++ }
++
++ if (asym != asym_cpucap) {
++ asym_cpucap = asym;
++ update_sched_flags();
++ }
+ mutex_unlock(&cpu_scale_mutex);
+ }
+
+@@ -153,6 +175,7 @@ static void normalize_cpu_capacity(void)
+ {
+ u64 capacity;
+ int cpu;
++ bool asym = false;
+
+ if (!raw_capacity || cap_parsing_failed)
+ return;
+@@ -167,8 +190,13 @@ static void normalize_cpu_capacity(void)
+ set_capacity_scale(cpu, capacity);
+ pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
+ cpu, arch_scale_cpu_capacity(NULL, cpu));
++ if (capacity < capacity_scale)
++ asym = true;
+ }
+ mutex_unlock(&cpu_scale_mutex);
++
++ if (asym != asym_cpucap)
++ asym_cpucap = asym;
+ }
+
+ #ifdef CONFIG_CPU_FREQ
+@@ -186,6 +214,7 @@ static void normalize_cpu_capacity(void)
+ {
+ struct cpufreq_policy *policy = data;
+ int cpu;
++ bool asym;
+
+ if (cap_parsing_done)
+ return 0;
+@@ -208,7 +237,10 @@ static void normalize_cpu_capacity(void)
+ }
+ if (cpumask_empty(cpus_to_visit)) {
+ if (!cap_parsing_failed) {
++ asym = asym_cpucap;
+ normalize_cpu_capacity();
++ if (asym != asym_cpucap)
++ update_sched_flags();
+ kfree(raw_capacity);
+ pr_debug("cpu_capacity: parsing done\n");
+ } else {
+@@ -488,6 +520,11 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
+ return &cpu_topology[cpu].core_sibling;
+ }
+
++static int cpu_cpu_flags(void)
++{
++ return asym_cpucap ? SD_ASYM_CPUCAPACITY : 0;
++}
++
+ static void update_siblings_masks(unsigned int cpuid)
+ {
+ struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
+@@ -569,6 +606,19 @@ static void __init reset_cpu_topology(void)
+ }
+ }
+
++static struct sched_domain_topology_level arm64_topology[] = {
++#ifdef CONFIG_SCHED_MC
++ { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
++#endif
++ { cpu_cpu_mask, cpu_cpu_flags, SD_INIT_NAME(DIE) },
++ { NULL, }
++};
++
++int arch_update_cpu_topology(void)
++{
++ return update_flags ? 1 : 0;
++}
++
+ void __init init_cpu_topology(void)
+ {
+ reset_cpu_topology();
+@@ -579,4 +629,6 @@ void __init init_cpu_topology(void)
+ */
+ if (of_have_populated_dt() && parse_dt_topology())
+ reset_cpu_topology();
++ else
++ set_sched_topology(arm64_topology);
+ }
+--
+1.9.1
+