summaryrefslogtreecommitdiffstats
path: root/meta-eas/recipes-kernel/linux/linux-renesas/0021-sched-Prevent-unnecessary-active-balance-of-single-t.patch
diff options
context:
space:
mode:
authorFrode Isaksen <fisaksen@baylibre.com>2017-12-19 11:15:35 +0000
committerJan-Simon Moeller <jsmoeller@linuxfoundation.org>2018-02-07 11:47:29 +0000
commitc4a6287185179732dfc1e903c195ff90c19f1065 (patch)
treed35f5010dbd952e40f5c178322026445b55757c1 /meta-eas/recipes-kernel/linux/linux-renesas/0021-sched-Prevent-unnecessary-active-balance-of-single-t.patch
parent109dea1d5c5a38807b098b588584636ae636a302 (diff)
This layer provides Energy Aware Scheduling (EAS) patcheseel_5.1.0eel_5.0.3eel_5.0.2eel/5.1.0eel/5.0.3eel/5.0.25.1.05.0.35.0.2eel
For the moment only for Renesas R-Car Gen3 SoC's. Can be expanded for other SoC's by setting the machine feature biglittle and provide the relevant EAS patches. Bug-AGL: SPEC-813 Change-Id: I2b5e69c515c33e57be19b30466fe208d7b8ac1a5 Signed-off-by: Frode Isaksen <fisaksen@baylibre.com>
Diffstat (limited to 'meta-eas/recipes-kernel/linux/linux-renesas/0021-sched-Prevent-unnecessary-active-balance-of-single-t.patch')
-rw-r--r--meta-eas/recipes-kernel/linux/linux-renesas/0021-sched-Prevent-unnecessary-active-balance-of-single-t.patch63
1 files changed, 63 insertions, 0 deletions
diff --git a/meta-eas/recipes-kernel/linux/linux-renesas/0021-sched-Prevent-unnecessary-active-balance-of-single-t.patch b/meta-eas/recipes-kernel/linux/linux-renesas/0021-sched-Prevent-unnecessary-active-balance-of-single-t.patch
new file mode 100644
index 0000000..64ad147
--- /dev/null
+++ b/meta-eas/recipes-kernel/linux/linux-renesas/0021-sched-Prevent-unnecessary-active-balance-of-single-t.patch
@@ -0,0 +1,63 @@
+From 67c41183d153dca79ae8073ce6f056b66ba384f2 Mon Sep 17 00:00:00 2001
+From: Morten Rasmussen <morten.rasmussen@arm.com>
+Date: Thu, 2 Jul 2015 17:16:34 +0100
+Subject: [PATCH 21/92] sched: Prevent unnecessary active balance of single
+ task in sched group
+
+Scenarios with the busiest group having just one task and the local
+being idle on topologies with sched groups with different numbers of
+cpus manage to dodge all load-balance bailout conditions resulting the
+nr_balance_failed counter to be incremented. This eventually causes a
+pointless active migration of the task. This patch prevents this by not
+incrementing the counter when the busiest group only has one task.
+ASYM_PACKING migrations and migrations due to reduced capacity should
+still take place as these are explicitly captured by
+need_active_balance().
+
+A better solution would be to not attempt the load-balance in the first
+place, but that requires significant changes to the order of bailout
+conditions and statistics gathering.
+
+cc: Ingo Molnar <mingo@redhat.com>
+cc: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com>
+(cherry picked from commit b1a7e8eae736c483b9ffed01b4dd35747f448232)
+Signed-off-by: Gaku Inami <gaku.inami.xw@bp.renesas.com>
+---
+ kernel/sched/fair.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 18d9e75..341fccd 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -6509,6 +6509,7 @@ struct lb_env {
+ int new_dst_cpu;
+ enum cpu_idle_type idle;
+ long imbalance;
++ unsigned int src_grp_nr_running;
+ /* The set of CPUs under consideration for load-balancing */
+ struct cpumask *cpus;
+
+@@ -7517,6 +7518,8 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
+ if (env->sd->flags & SD_NUMA)
+ env->fbq_type = fbq_classify_group(&sds->busiest_stat);
+
++ env->src_grp_nr_running = sds->busiest_stat.sum_nr_running;
++
+ if (!env->sd->parent) {
+ /* update overload indicator if we are at root domain */
+ if (env->dst_rq->rd->overload != overload)
+@@ -8133,7 +8136,8 @@ static int load_balance(int this_cpu, struct rq *this_rq,
+ * excessive cache_hot migrations and active balances.
+ */
+ if (idle != CPU_NEWLY_IDLE)
+- sd->nr_balance_failed++;
++ if (env.src_grp_nr_running > 1)
++ sd->nr_balance_failed++;
+
+ if (need_active_balance(&env)) {
+ raw_spin_lock_irqsave(&busiest->lock, flags);
+--
+1.9.1
+