diff options
author | Frode Isaksen <fisaksen@baylibre.com> | 2017-12-19 11:15:35 +0000 |
---|---|---|
committer | Jan-Simon Moeller <jsmoeller@linuxfoundation.org> | 2018-02-07 11:47:29 +0000 |
commit | c4a6287185179732dfc1e903c195ff90c19f1065 (patch) | |
tree | d35f5010dbd952e40f5c178322026445b55757c1 /meta-eas/recipes-kernel/linux/linux-renesas/0015-sched-Extend-scheduler-s-asym-packing.patch | |
parent | 109dea1d5c5a38807b098b588584636ae636a302 (diff) |
This layer provides Energy Aware Scheduling (EAS) patcheseel_5.1.0eel_5.0.3eel_5.0.2eel/5.1.0eel/5.0.3eel/5.0.25.1.05.0.35.0.2eel
For the moment only for Renesas R-Car Gen3 SoC's.
Can be expanded for other SoC's by setting the machine
feature biglittle and provide the relevant EAS patches.
Bug-AGL: SPEC-813
Change-Id: I2b5e69c515c33e57be19b30466fe208d7b8ac1a5
Signed-off-by: Frode Isaksen <fisaksen@baylibre.com>
Diffstat (limited to 'meta-eas/recipes-kernel/linux/linux-renesas/0015-sched-Extend-scheduler-s-asym-packing.patch')
-rw-r--r-- | meta-eas/recipes-kernel/linux/linux-renesas/0015-sched-Extend-scheduler-s-asym-packing.patch | 208 |
1 files changed, 208 insertions, 0 deletions
diff --git a/meta-eas/recipes-kernel/linux/linux-renesas/0015-sched-Extend-scheduler-s-asym-packing.patch b/meta-eas/recipes-kernel/linux/linux-renesas/0015-sched-Extend-scheduler-s-asym-packing.patch new file mode 100644 index 0000000..6d2b9de --- /dev/null +++ b/meta-eas/recipes-kernel/linux/linux-renesas/0015-sched-Extend-scheduler-s-asym-packing.patch @@ -0,0 +1,208 @@ +From e467f39a4c96c4a6f96be66a9224807fe9b7346e Mon Sep 17 00:00:00 2001 +From: Tim Chen <tim.c.chen@linux.intel.com> +Date: Tue, 22 Nov 2016 12:23:53 -0800 +Subject: [PATCH 15/92] sched: Extend scheduler's asym packing + +We generalize the scheduler's asym packing to provide an ordering +of the cpu beyond just the cpu number. This allows the use of the +ASYM_PACKING scheduler machinery to move loads to preferred CPU in a +sched domain. The preference is defined with the cpu priority +given by arch_asym_cpu_priority(cpu). + +We also record the most preferred cpu in a sched group when +we build the cpu's capacity for fast lookup of preferred cpu +during load balancing. + +Co-developed-by: Peter Zijlstra (Intel) <peterz@infradead.org> +Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com> +Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> +Cc: linux-pm@vger.kernel.org +Cc: jolsa@redhat.com +Cc: rjw@rjwysocki.net +Cc: linux-acpi@vger.kernel.org +Cc: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com> +Cc: bp@suse.de +Link: http://lkml.kernel.org/r/0e73ae12737dfaafa46c07066cc7c5d3f1675e46.1479844244.git.tim.c.chen@linux.intel.com +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> + +(cherry picked from commit afe06efdf07c12fd9370d5cce5383398cedf6c90) +Signed-off-by: Gaku Inami <gaku.inami.xw@bp.renesas.com> +--- + include/linux/sched.h | 2 ++ + kernel/sched/core.c | 15 +++++++++++++++ + kernel/sched/fair.c | 53 ++++++++++++++++++++++++++++++++++----------------- + kernel/sched/sched.h | 6 ++++++ + 4 files changed, 59 insertions(+), 17 deletions(-) + +diff --git a/include/linux/sched.h b/include/linux/sched.h +index e9c009d..2e3c2a1 100644 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1057,6 +1057,8 @@ static inline int cpu_numa_flags(void) + } + #endif + ++extern int arch_asym_cpu_priority(int cpu); ++ + struct sched_domain_attr { + int relax_domain_level; + }; +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index 391d6c9..209d2ea 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -6302,7 +6302,22 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) + WARN_ON(!sg); + + do { ++ int cpu, max_cpu = -1; ++ + sg->group_weight = cpumask_weight(sched_group_cpus(sg)); ++ ++ if (!(sd->flags & SD_ASYM_PACKING)) ++ goto next; ++ ++ for_each_cpu(cpu, sched_group_cpus(sg)) { ++ if (max_cpu < 0) ++ max_cpu = cpu; ++ else if (sched_asym_prefer(cpu, max_cpu)) ++ max_cpu = cpu; ++ } ++ sg->asym_prefer_cpu = max_cpu; ++ ++next: + sg = sg->next; + } while (sg != sd->groups); + +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index aa47589..18d9e75 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -97,6 +97,16 @@ + + const_debug unsigned int sysctl_sched_migration_cost = 500000UL; + ++#ifdef CONFIG_SMP ++/* ++ * For asym packing, by default the lower numbered cpu has higher priority. ++ */ ++int __weak arch_asym_cpu_priority(int cpu) ++{ ++ return -cpu; ++} ++#endif ++ + #ifdef CONFIG_CFS_BANDWIDTH + /* + * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool +@@ -7388,16 +7398,18 @@ static bool update_sd_pick_busiest(struct lb_env *env, + if (env->idle == CPU_NOT_IDLE) + return true; + /* +- * ASYM_PACKING needs to move all the work to the lowest +- * numbered CPUs in the group, therefore mark all groups +- * higher than ourself as busy. ++ * ASYM_PACKING needs to move all the work to the highest ++ * prority CPUs in the group, therefore mark all groups ++ * of lower priority than ourself as busy. + */ +- if (sgs->sum_nr_running && env->dst_cpu < group_first_cpu(sg)) { ++ if (sgs->sum_nr_running && ++ sched_asym_prefer(env->dst_cpu, sg->asym_prefer_cpu)) { + if (!sds->busiest) + return true; + +- /* Prefer to move from highest possible cpu's work */ +- if (group_first_cpu(sds->busiest) < group_first_cpu(sg)) ++ /* Prefer to move from lowest priority cpu's work */ ++ if (sched_asym_prefer(sds->busiest->asym_prefer_cpu, ++ sg->asym_prefer_cpu)) + return true; + } + +@@ -7549,8 +7561,8 @@ static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds) + if (!sds->busiest) + return 0; + +- busiest_cpu = group_first_cpu(sds->busiest); +- if (env->dst_cpu > busiest_cpu) ++ busiest_cpu = sds->busiest->asym_prefer_cpu; ++ if (sched_asym_prefer(busiest_cpu, env->dst_cpu)) + return 0; + + env->imbalance = DIV_ROUND_CLOSEST( +@@ -7888,10 +7900,11 @@ static int need_active_balance(struct lb_env *env) + + /* + * ASYM_PACKING needs to force migrate tasks from busy but +- * higher numbered CPUs in order to pack all tasks in the +- * lowest numbered CPUs. ++ * lower priority CPUs in order to pack all tasks in the ++ * highest priority CPUs. + */ +- if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu) ++ if ((sd->flags & SD_ASYM_PACKING) && ++ sched_asym_prefer(env->dst_cpu, env->src_cpu)) + return 1; + } + +@@ -8740,7 +8753,7 @@ static inline bool nohz_kick_needed(struct rq *rq) + unsigned long now = jiffies; + struct sched_domain_shared *sds; + struct sched_domain *sd; +- int nr_busy, cpu = rq->cpu; ++ int nr_busy, i, cpu = rq->cpu; + bool kick = false; + + if (unlikely(rq->idle_balance)) +@@ -8791,12 +8804,18 @@ static inline bool nohz_kick_needed(struct rq *rq) + } + + sd = rcu_dereference(per_cpu(sd_asym, cpu)); +- if (sd && (cpumask_first_and(nohz.idle_cpus_mask, +- sched_domain_span(sd)) < cpu)) { +- kick = true; +- goto unlock; +- } ++ if (sd) { ++ for_each_cpu(i, sched_domain_span(sd)) { ++ if (i == cpu || ++ !cpumask_test_cpu(i, nohz.idle_cpus_mask)) ++ continue; + ++ if (sched_asym_prefer(i, cpu)) { ++ kick = true; ++ goto unlock; ++ } ++ } ++ } + unlock: + rcu_read_unlock(); + return kick; +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h +index d7e3931..7b34c78 100644 +--- a/kernel/sched/sched.h ++++ b/kernel/sched/sched.h +@@ -540,6 +540,11 @@ struct dl_rq { + + #ifdef CONFIG_SMP + ++static inline bool sched_asym_prefer(int a, int b) ++{ ++ return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b); ++} ++ + /* + * We add the notion of a root-domain which will be used to define per-domain + * variables. Each exclusive cpuset essentially defines an island domain by +@@ -908,6 +913,7 @@ struct sched_group { + + unsigned int group_weight; + struct sched_group_capacity *sgc; ++ int asym_prefer_cpu; /* cpu of highest priority in group */ + + /* + * The CPUs this group covers. +-- +1.9.1 + |