summaryrefslogtreecommitdiffstats
path: root/meta-eas/recipes-kernel/linux/linux-renesas/0011-sched-fair-Propagate-asynchrous-detach.patch
diff options
context:
space:
mode:
authorFrode Isaksen <fisaksen@baylibre.com>2017-12-19 11:15:35 +0000
committerJan-Simon Moeller <jsmoeller@linuxfoundation.org>2018-02-07 11:47:29 +0000
commitc4a6287185179732dfc1e903c195ff90c19f1065 (patch)
treed35f5010dbd952e40f5c178322026445b55757c1 /meta-eas/recipes-kernel/linux/linux-renesas/0011-sched-fair-Propagate-asynchrous-detach.patch
parent109dea1d5c5a38807b098b588584636ae636a302 (diff)
This layer provides Energy Aware Scheduling (EAS) patcheseel_5.1.0eel_5.0.3eel_5.0.2eel/5.1.0eel/5.0.3eel/5.0.25.1.05.0.35.0.2eel
For the moment only for Renesas R-Car Gen3 SoC's. Can be expanded for other SoC's by setting the machine feature biglittle and provide the relevant EAS patches. Bug-AGL: SPEC-813 Change-Id: I2b5e69c515c33e57be19b30466fe208d7b8ac1a5 Signed-off-by: Frode Isaksen <fisaksen@baylibre.com>
Diffstat (limited to 'meta-eas/recipes-kernel/linux/linux-renesas/0011-sched-fair-Propagate-asynchrous-detach.patch')
-rw-r--r--meta-eas/recipes-kernel/linux/linux-renesas/0011-sched-fair-Propagate-asynchrous-detach.patch72
1 files changed, 72 insertions, 0 deletions
diff --git a/meta-eas/recipes-kernel/linux/linux-renesas/0011-sched-fair-Propagate-asynchrous-detach.patch b/meta-eas/recipes-kernel/linux/linux-renesas/0011-sched-fair-Propagate-asynchrous-detach.patch
new file mode 100644
index 0000000..3d7412a
--- /dev/null
+++ b/meta-eas/recipes-kernel/linux/linux-renesas/0011-sched-fair-Propagate-asynchrous-detach.patch
@@ -0,0 +1,72 @@
+From 02927c812e8cd4ebd635e567ccd935d9e87075b8 Mon Sep 17 00:00:00 2001
+From: Vincent Guittot <vincent.guittot@linaro.org>
+Date: Tue, 8 Nov 2016 10:53:46 +0100
+Subject: [PATCH 11/92] sched/fair: Propagate asynchrous detach
+
+A task can be asynchronously detached from cfs_rq when migrating
+between CPUs. The load of the migrated task is then removed from
+source cfs_rq during its next update. We use this event to set
+propagation flag.
+
+During the load balance, we take advantage of the update of blocked
+load to propagate any pending changes.
+
+The propagation relies on patch:
+
+ "sched: Fix hierarchical order in rq->leaf_cfs_rq_list"
+
+... which orders children and parents, to ensure that it's done in one pass.
+
+Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Morten.Rasmussen@arm.com
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: bsegall@google.com
+Cc: kernellwp@gmail.com
+Cc: pjt@google.com
+Cc: yuyang.du@intel.com
+Link: http://lkml.kernel.org/r/1478598827-32372-6-git-send-email-vincent.guittot@linaro.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+(cherry picked from commit 4e5160766fcc9f41bbd38bac11f92dce993644aa)
+Signed-off-by: Gaku Inami <gaku.inami.xw@bp.renesas.com>
+---
+ kernel/sched/fair.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 8cf26fd..090a9bb 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -3219,6 +3219,7 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
+ sub_positive(&sa->load_avg, r);
+ sub_positive(&sa->load_sum, r * LOAD_AVG_MAX);
+ removed_load = 1;
++ set_tg_cfs_propagate(cfs_rq);
+ }
+
+ if (atomic_long_read(&cfs_rq->removed_util_avg)) {
+@@ -3226,6 +3227,7 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
+ sub_positive(&sa->util_avg, r);
+ sub_positive(&sa->util_sum, r * LOAD_AVG_MAX);
+ removed_util = 1;
++ set_tg_cfs_propagate(cfs_rq);
+ }
+
+ decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
+@@ -6872,6 +6874,10 @@ static void update_blocked_averages(int cpu)
+
+ if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true))
+ update_tg_load_avg(cfs_rq, 0);
++
++ /* Propagate pending load changes to the parent */
++ if (cfs_rq->tg->se[cpu])
++ update_load_avg(cfs_rq->tg->se[cpu], 0);
+ }
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+ }
+--
+1.9.1
+