summaryrefslogtreecommitdiffstats
path: root/meta-eas/recipes-kernel/linux/linux-renesas/0021-sched-Prevent-unnecessary-active-balance-of-single-t.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-eas/recipes-kernel/linux/linux-renesas/0021-sched-Prevent-unnecessary-active-balance-of-single-t.patch')
-rw-r--r--meta-eas/recipes-kernel/linux/linux-renesas/0021-sched-Prevent-unnecessary-active-balance-of-single-t.patch63
1 files changed, 63 insertions, 0 deletions
diff --git a/meta-eas/recipes-kernel/linux/linux-renesas/0021-sched-Prevent-unnecessary-active-balance-of-single-t.patch b/meta-eas/recipes-kernel/linux/linux-renesas/0021-sched-Prevent-unnecessary-active-balance-of-single-t.patch
new file mode 100644
index 0000000..64ad147
--- /dev/null
+++ b/meta-eas/recipes-kernel/linux/linux-renesas/0021-sched-Prevent-unnecessary-active-balance-of-single-t.patch
@@ -0,0 +1,63 @@
+From 67c41183d153dca79ae8073ce6f056b66ba384f2 Mon Sep 17 00:00:00 2001
+From: Morten Rasmussen <morten.rasmussen@arm.com>
+Date: Thu, 2 Jul 2015 17:16:34 +0100
+Subject: [PATCH 21/92] sched: Prevent unnecessary active balance of single
+ task in sched group
+
+Scenarios with the busiest group having just one task and the local
+being idle on topologies with sched groups with different numbers of
+cpus manage to dodge all load-balance bailout conditions resulting the
+nr_balance_failed counter to be incremented. This eventually causes a
+pointless active migration of the task. This patch prevents this by not
+incrementing the counter when the busiest group only has one task.
+ASYM_PACKING migrations and migrations due to reduced capacity should
+still take place as these are explicitly captured by
+need_active_balance().
+
+A better solution would be to not attempt the load-balance in the first
+place, but that requires significant changes to the order of bailout
+conditions and statistics gathering.
+
+cc: Ingo Molnar <mingo@redhat.com>
+cc: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com>
+(cherry picked from commit b1a7e8eae736c483b9ffed01b4dd35747f448232)
+Signed-off-by: Gaku Inami <gaku.inami.xw@bp.renesas.com>
+---
+ kernel/sched/fair.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 18d9e75..341fccd 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -6509,6 +6509,7 @@ struct lb_env {
+ int new_dst_cpu;
+ enum cpu_idle_type idle;
+ long imbalance;
++ unsigned int src_grp_nr_running;
+ /* The set of CPUs under consideration for load-balancing */
+ struct cpumask *cpus;
+
+@@ -7517,6 +7518,8 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
+ if (env->sd->flags & SD_NUMA)
+ env->fbq_type = fbq_classify_group(&sds->busiest_stat);
+
++ env->src_grp_nr_running = sds->busiest_stat.sum_nr_running;
++
+ if (!env->sd->parent) {
+ /* update overload indicator if we are at root domain */
+ if (env->dst_rq->rd->overload != overload)
+@@ -8133,7 +8136,8 @@ static int load_balance(int this_cpu, struct rq *this_rq,
+ * excessive cache_hot migrations and active balances.
+ */
+ if (idle != CPU_NEWLY_IDLE)
+- sd->nr_balance_failed++;
++ if (env.src_grp_nr_running > 1)
++ sd->nr_balance_failed++;
+
+ if (need_active_balance(&env)) {
+ raw_spin_lock_irqsave(&busiest->lock, flags);
+--
+1.9.1
+