From c4a6287185179732dfc1e903c195ff90c19f1065 Mon Sep 17 00:00:00 2001 From: Frode Isaksen Date: Tue, 19 Dec 2017 11:15:35 +0000 Subject: This layer provides Energy Aware Scheduling (EAS) patches For the moment only for Renesas R-Car Gen3 SoC's. Can be expanded for other SoC's by setting the machine feature biglittle and provide the relevant EAS patches. Bug-AGL: SPEC-813 Change-Id: I2b5e69c515c33e57be19b30466fe208d7b8ac1a5 Signed-off-by: Frode Isaksen --- ...nt-unnecessary-active-balance-of-single-t.patch | 63 ++++++++++++++++++++++ 1 file changed, 63 insertions(+) create mode 100644 meta-eas/recipes-kernel/linux/linux-renesas/0021-sched-Prevent-unnecessary-active-balance-of-single-t.patch (limited to 'meta-eas/recipes-kernel/linux/linux-renesas/0021-sched-Prevent-unnecessary-active-balance-of-single-t.patch') diff --git a/meta-eas/recipes-kernel/linux/linux-renesas/0021-sched-Prevent-unnecessary-active-balance-of-single-t.patch b/meta-eas/recipes-kernel/linux/linux-renesas/0021-sched-Prevent-unnecessary-active-balance-of-single-t.patch new file mode 100644 index 0000000..64ad147 --- /dev/null +++ b/meta-eas/recipes-kernel/linux/linux-renesas/0021-sched-Prevent-unnecessary-active-balance-of-single-t.patch @@ -0,0 +1,63 @@ +From 67c41183d153dca79ae8073ce6f056b66ba384f2 Mon Sep 17 00:00:00 2001 +From: Morten Rasmussen +Date: Thu, 2 Jul 2015 17:16:34 +0100 +Subject: [PATCH 21/92] sched: Prevent unnecessary active balance of single + task in sched group + +Scenarios with the busiest group having just one task and the local +being idle on topologies with sched groups with different numbers of +cpus manage to dodge all load-balance bailout conditions resulting the +nr_balance_failed counter to be incremented. This eventually causes a +pointless active migration of the task. This patch prevents this by not +incrementing the counter when the busiest group only has one task. +ASYM_PACKING migrations and migrations due to reduced capacity should +still take place as these are explicitly captured by +need_active_balance(). + +A better solution would be to not attempt the load-balance in the first +place, but that requires significant changes to the order of bailout +conditions and statistics gathering. + +cc: Ingo Molnar +cc: Peter Zijlstra +Signed-off-by: Morten Rasmussen +(cherry picked from commit b1a7e8eae736c483b9ffed01b4dd35747f448232) +Signed-off-by: Gaku Inami +--- + kernel/sched/fair.c | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index 18d9e75..341fccd 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -6509,6 +6509,7 @@ struct lb_env { + int new_dst_cpu; + enum cpu_idle_type idle; + long imbalance; ++ unsigned int src_grp_nr_running; + /* The set of CPUs under consideration for load-balancing */ + struct cpumask *cpus; + +@@ -7517,6 +7518,8 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd + if (env->sd->flags & SD_NUMA) + env->fbq_type = fbq_classify_group(&sds->busiest_stat); + ++ env->src_grp_nr_running = sds->busiest_stat.sum_nr_running; ++ + if (!env->sd->parent) { + /* update overload indicator if we are at root domain */ + if (env->dst_rq->rd->overload != overload) +@@ -8133,7 +8136,8 @@ static int load_balance(int this_cpu, struct rq *this_rq, + * excessive cache_hot migrations and active balances. + */ + if (idle != CPU_NEWLY_IDLE) +- sd->nr_balance_failed++; ++ if (env.src_grp_nr_running > 1) ++ sd->nr_balance_failed++; + + if (need_active_balance(&env)) { + raw_spin_lock_irqsave(&busiest->lock, flags); +-- +1.9.1 + -- cgit 1.2.3-korg