From 82ae3c283c44fb74536b33dfeb3f23e228af869f Mon Sep 17 00:00:00 2001 From: Morten Rasmussen Date: Wed, 22 Jun 2016 10:58:17 +0100 Subject: [PATCH 43/92] arm: Update SD_ASYM_CPUCAPACITY topology flag when cpu capacity is changed When cpu capacity information is provided through DT or sysfs we might not know if the system has asymmetric cpu capacities when the system topology information is initially passed to the scheduler. We therefore have to enforce a rebuild of the sched_domain hierarchy if we later discover that the flag was initially set wrong. It is mainly an issue for systems with same type of cores (dmips/mhz) but different max frequencies, and asymmetric cpu capacity systems where cpu capacity is not provided by DT but set through sysfs later. cc: Russell King Signed-off-by: Morten Rasmussen (cherry picked from commit d02bb26e525bcbeb3c91345c4ee2a1fe1ca11e48) Signed-off-by: Gaku Inami --- arch/arm/kernel/topology.c | 39 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 69fb4b6..4d94639 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -43,6 +44,8 @@ */ static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; static DEFINE_MUTEX(cpu_scale_mutex); +static bool asym_cpucap; +static bool update_flags; unsigned long scale_cpu_capacity(struct sched_domain *sd, int cpu) { @@ -54,6 +57,14 @@ static void set_capacity_scale(unsigned int cpu, unsigned long capacity) per_cpu(cpu_scale, cpu) = capacity; } +static void update_sched_flags(void) +{ + update_flags = true; + rebuild_sched_domains(); + update_flags = false; + pr_debug("cpu_capacity: Rebuilt sched_domain hierarchy.\n"); +} + #ifdef CONFIG_PROC_SYSCTL #include #include @@ -83,6 +94,7 @@ static ssize_t store_cpu_capacity(struct device *dev, if (count) { char *p = (char *) buf; + bool asym = false; ret = kstrtoul(p, 0, &new_capacity); if (ret) @@ -93,6 +105,17 @@ static ssize_t store_cpu_capacity(struct device *dev, mutex_lock(&cpu_scale_mutex); for_each_cpu(i, &cpu_topology[this_cpu].core_sibling) set_capacity_scale(i, new_capacity); + + for_each_possible_cpu(i) { + if (per_cpu(cpu_scale, i) != new_capacity) + asym = true; + } + + if (asym != asym_cpucap) { + asym_cpucap = asym; + update_sched_flags(); + } + mutex_unlock(&cpu_scale_mutex); } @@ -154,7 +177,6 @@ struct cpu_efficiency { static u32 *raw_capacity; static bool cap_parsing_failed; static u32 capacity_scale; -static bool asym_cpucap; static int __init parse_cpu_capacity(struct device_node *cpu_node, int cpu) { @@ -199,6 +221,7 @@ static void normalize_cpu_capacity(void) { u64 capacity; int cpu; + bool asym = false; if (!raw_capacity || cap_parsing_failed) return; @@ -212,9 +235,12 @@ static void normalize_cpu_capacity(void) pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n", cpu, arch_scale_cpu_capacity(NULL, cpu)); if (capacity < capacity_scale) - asym_cpucap = true; + asym = true; } mutex_unlock(&cpu_scale_mutex); + + if (asym != asym_cpucap) + asym_cpucap = asym; } #ifdef CONFIG_CPU_FREQ @@ -232,6 +258,7 @@ static void normalize_cpu_capacity(void) { struct cpufreq_policy *policy = data; int cpu; + bool asym; if (cap_parsing_done) return 0; @@ -254,7 +281,10 @@ static void normalize_cpu_capacity(void) } if (cpumask_empty(cpus_to_visit)) { if (!cap_parsing_failed) { + asym = asym_cpucap; normalize_cpu_capacity(); + if (asym != asym_cpucap) + update_sched_flags(); kfree(raw_capacity); pr_debug("cpu_capacity: parsing done"); } else { @@ -502,6 +532,11 @@ static void update_siblings_masks(unsigned int cpuid) smp_wmb(); } +int arch_update_cpu_topology(void) +{ + return update_flags ? 1 : 0; +} + /* * store_cpu_topology is called at boot when only one cpu is running * and with the mutex cpu_hotplug.lock locked, when several cpus have booted, -- 1.9.1