From b1238cc2a12dba910ff1f6af592196a2743bf594 Mon Sep 17 00:00:00 2001 From: Morten Rasmussen Date: Fri, 12 Aug 2016 11:29:04 +0100 Subject: [PATCH 64/92] arm: Set SD_SHARE_CAP_STATES sched_domain flag when applicable Energy-aware scheduling relies on the SD_SHARE_CAP_STATES to identify sharing the same clock source/frequency domain. The assumption is that a sched_domain exists that match the clock/frequency domain, i.e. policy->related_cpus in cpufreq terms. The flag is not set for systems without frequency scaling or systems with per-cpu frequency scaling. cc: Russell King Signed-off-by: Morten Rasmussen (cherry picked from commit cca07e1a18915b6990c1cf464686bcb24d4d1472) Signed-off-by: Gaku Inami --- arch/arm/kernel/topology.c | 33 ++++++++++++++++++++++++++++++--- 1 file changed, 30 insertions(+), 3 deletions(-) diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 4d94639..c77f39f 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -45,6 +45,7 @@ static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; static DEFINE_MUTEX(cpu_scale_mutex); static bool asym_cpucap; +static bool sd_mc_share_cap, sd_die_share_cap; static bool update_flags; unsigned long scale_cpu_capacity(struct sched_domain *sd, int cpu) @@ -268,6 +269,14 @@ static void normalize_cpu_capacity(void) pr_debug("cpu_capacity: calling %s for CPUs [%*pbl] (to_visit=%*pbl)\n", __func__, cpumask_pr_args(policy->related_cpus), cpumask_pr_args(cpus_to_visit)); + + cpu = cpumask_first(policy->related_cpus); + + if (cpumask_subset(cpu_coregroup_mask(cpu), policy->related_cpus)) + sd_mc_share_cap = true; + else if (cpumask_subset(cpu_cpu_mask(cpu), policy->related_cpus)) + sd_die_share_cap = true; + cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus); @@ -279,11 +288,13 @@ static void normalize_cpu_capacity(void) policy->cpuinfo.max_freq / 1000UL; capacity_scale = max(raw_capacity[cpu], capacity_scale); } + if (cpumask_empty(cpus_to_visit)) { if (!cap_parsing_failed) { asym = asym_cpucap; normalize_cpu_capacity(); - if (asym != asym_cpucap) + if (asym != asym_cpucap || + sd_mc_share_cap || sd_die_share_cap) update_sched_flags(); kfree(raw_capacity); pr_debug("cpu_capacity: parsing done"); @@ -477,6 +488,9 @@ static void update_cpu_capacity(unsigned int cpu) if (scale_cpu_capacity(NULL, cpu) < SCHED_CAPACITY_SCALE) asym_cpucap = true; + if (scale_cpu_capacity(NULL, cpu) < SCHED_CAPACITY_SCALE) + asym_cpucap = true; + pr_info("CPU%u: update cpu_capacity %lu\n", cpu, arch_scale_cpu_capacity(NULL, cpu)); } @@ -594,12 +608,25 @@ void store_cpu_topology(unsigned int cpuid) static inline int cpu_corepower_flags(void) { - return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN; + int mc_flags = SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN; + + if (sd_mc_share_cap) + mc_flags |= SD_SHARE_CAP_STATES; + + return mc_flags; } static inline int arm_cpu_cpu_flags(void) { - return asym_cpucap ? SD_ASYM_CPUCAPACITY : 0; + int die_flags = 0; + + if (asym_cpucap) + die_flags |= SD_ASYM_CPUCAPACITY; + + if (sd_die_share_cap) + die_flags |= SD_SHARE_CAP_STATES; + + return die_flags; } static struct sched_domain_topology_level arm_topology[] = { -- 1.9.1