summaryrefslogtreecommitdiffstats
path: root/meta-eas/recipes-kernel/linux/linux-renesas/0003-sched-fair-Consider-spare-capacity-in-find_idlest_gr.patch
diff options
context:
space:
mode:
authorFrode Isaksen <fisaksen@baylibre.com>2017-12-19 11:15:35 +0000
committerJan-Simon Moeller <jsmoeller@linuxfoundation.org>2018-02-07 11:47:29 +0000
commitc4a6287185179732dfc1e903c195ff90c19f1065 (patch)
treed35f5010dbd952e40f5c178322026445b55757c1 /meta-eas/recipes-kernel/linux/linux-renesas/0003-sched-fair-Consider-spare-capacity-in-find_idlest_gr.patch
parent109dea1d5c5a38807b098b588584636ae636a302 (diff)
This layer provides Energy Aware Scheduling (EAS) patcheseel_5.1.0eel_5.0.3eel_5.0.2eel/5.1.0eel/5.0.3eel/5.0.25.1.05.0.35.0.2eel
For the moment only for Renesas R-Car Gen3 SoC's. Can be expanded for other SoC's by setting the machine feature biglittle and provide the relevant EAS patches. Bug-AGL: SPEC-813 Change-Id: I2b5e69c515c33e57be19b30466fe208d7b8ac1a5 Signed-off-by: Frode Isaksen <fisaksen@baylibre.com>
Diffstat (limited to 'meta-eas/recipes-kernel/linux/linux-renesas/0003-sched-fair-Consider-spare-capacity-in-find_idlest_gr.patch')
-rw-r--r--meta-eas/recipes-kernel/linux/linux-renesas/0003-sched-fair-Consider-spare-capacity-in-find_idlest_gr.patch140
1 files changed, 140 insertions, 0 deletions
diff --git a/meta-eas/recipes-kernel/linux/linux-renesas/0003-sched-fair-Consider-spare-capacity-in-find_idlest_gr.patch b/meta-eas/recipes-kernel/linux/linux-renesas/0003-sched-fair-Consider-spare-capacity-in-find_idlest_gr.patch
new file mode 100644
index 0000000..674ebb9
--- /dev/null
+++ b/meta-eas/recipes-kernel/linux/linux-renesas/0003-sched-fair-Consider-spare-capacity-in-find_idlest_gr.patch
@@ -0,0 +1,140 @@
+From 575af3de702dae80c40bf510aaf7755374accb88 Mon Sep 17 00:00:00 2001
+From: Morten Rasmussen <morten.rasmussen@arm.com>
+Date: Fri, 14 Oct 2016 14:41:08 +0100
+Subject: [PATCH 03/92] sched/fair: Consider spare capacity in
+ find_idlest_group()
+
+In low-utilization scenarios comparing relative loads in
+find_idlest_group() doesn't always lead to the most optimum choice.
+Systems with groups containing different numbers of cpus and/or cpus of
+different compute capacity are significantly better off when considering
+spare capacity rather than relative load in those scenarios.
+
+In addition to existing load based search an alternative spare capacity
+based candidate sched_group is found and selected instead if sufficient
+spare capacity exists. If not, existing behaviour is preserved.
+
+Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: dietmar.eggemann@arm.com
+Cc: freedom.tan@mediatek.com
+Cc: keita.kobayashi.ym@renesas.com
+Cc: mgalbraith@suse.de
+Cc: sgurrappadi@nvidia.com
+Cc: vincent.guittot@linaro.org
+Cc: yuyang.du@intel.com
+Link: http://lkml.kernel.org/r/1476452472-24740-3-git-send-email-morten.rasmussen@arm.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+(cherry picked from commit 6a0b19c0f39a7a7b7fb77d3867a733136ff059a3)
+Signed-off-by: Gaku Inami <gaku.inami.xw@bp.renesas.com>
+---
+ kernel/sched/fair.c | 50 +++++++++++++++++++++++++++++++++++++++++++++-----
+ 1 file changed, 45 insertions(+), 5 deletions(-)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index b05d691..1ad3706 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -5202,6 +5202,14 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
+ return 1;
+ }
+
++static inline int task_util(struct task_struct *p);
++static int cpu_util_wake(int cpu, struct task_struct *p);
++
++static unsigned long capacity_spare_wake(int cpu, struct task_struct *p)
++{
++ return capacity_orig_of(cpu) - cpu_util_wake(cpu, p);
++}
++
+ /*
+ * find_idlest_group finds and returns the least busy CPU group within the
+ * domain.
+@@ -5211,7 +5219,9 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
+ int this_cpu, int sd_flag)
+ {
+ struct sched_group *idlest = NULL, *group = sd->groups;
++ struct sched_group *most_spare_sg = NULL;
+ unsigned long min_load = ULONG_MAX, this_load = 0;
++ unsigned long most_spare = 0, this_spare = 0;
+ int load_idx = sd->forkexec_idx;
+ int imbalance = 100 + (sd->imbalance_pct-100)/2;
+
+@@ -5219,7 +5229,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
+ load_idx = sd->wake_idx;
+
+ do {
+- unsigned long load, avg_load;
++ unsigned long load, avg_load, spare_cap, max_spare_cap;
+ int local_group;
+ int i;
+
+@@ -5231,8 +5241,12 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
+ local_group = cpumask_test_cpu(this_cpu,
+ sched_group_cpus(group));
+
+- /* Tally up the load of all CPUs in the group */
++ /*
++ * Tally up the load of all CPUs in the group and find
++ * the group containing the CPU with most spare capacity.
++ */
+ avg_load = 0;
++ max_spare_cap = 0;
+
+ for_each_cpu(i, sched_group_cpus(group)) {
+ /* Bias balancing toward cpus of our domain */
+@@ -5242,6 +5256,11 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
+ load = target_load(i, load_idx);
+
+ avg_load += load;
++
++ spare_cap = capacity_spare_wake(i, p);
++
++ if (spare_cap > max_spare_cap)
++ max_spare_cap = spare_cap;
+ }
+
+ /* Adjust by relative CPU capacity of the group */
+@@ -5249,12 +5268,33 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
+
+ if (local_group) {
+ this_load = avg_load;
+- } else if (avg_load < min_load) {
+- min_load = avg_load;
+- idlest = group;
++ this_spare = max_spare_cap;
++ } else {
++ if (avg_load < min_load) {
++ min_load = avg_load;
++ idlest = group;
++ }
++
++ if (most_spare < max_spare_cap) {
++ most_spare = max_spare_cap;
++ most_spare_sg = group;
++ }
+ }
+ } while (group = group->next, group != sd->groups);
+
++ /*
++ * The cross-over point between using spare capacity or least load
++ * is too conservative for high utilization tasks on partially
++ * utilized systems if we require spare_capacity > task_util(p),
++ * so we allow for some task stuffing by using
++ * spare_capacity > task_util(p)/2.
++ */
++ if (this_spare > task_util(p) / 2 &&
++ imbalance*this_spare > 100*most_spare)
++ return NULL;
++ else if (most_spare > task_util(p) / 2)
++ return most_spare_sg;
++
+ if (!idlest || 100*this_load < imbalance*min_load)
+ return NULL;
+ return idlest;
+--
+1.9.1
+