diff options
author | Frode Isaksen <fisaksen@baylibre.com> | 2017-12-19 11:15:35 +0000 |
---|---|---|
committer | Jan-Simon Moeller <jsmoeller@linuxfoundation.org> | 2018-02-07 11:47:29 +0000 |
commit | c4a6287185179732dfc1e903c195ff90c19f1065 (patch) | |
tree | d35f5010dbd952e40f5c178322026445b55757c1 /meta-eas/recipes-kernel/linux/linux-renesas/0009-sched-fair-Factorize-PELT-update.patch | |
parent | 109dea1d5c5a38807b098b588584636ae636a302 (diff) |
This layer provides Energy Aware Scheduling (EAS) patcheseel_5.1.0eel_5.0.3eel_5.0.2eel/5.1.0eel/5.0.3eel/5.0.25.1.05.0.35.0.2eel
For the moment only for Renesas R-Car Gen3 SoC's.
Can be expanded for other SoC's by setting the machine
feature biglittle and provide the relevant EAS patches.
Bug-AGL: SPEC-813
Change-Id: I2b5e69c515c33e57be19b30466fe208d7b8ac1a5
Signed-off-by: Frode Isaksen <fisaksen@baylibre.com>
Diffstat (limited to 'meta-eas/recipes-kernel/linux/linux-renesas/0009-sched-fair-Factorize-PELT-update.patch')
-rw-r--r-- | meta-eas/recipes-kernel/linux/linux-renesas/0009-sched-fair-Factorize-PELT-update.patch | 242 |
1 files changed, 242 insertions, 0 deletions
diff --git a/meta-eas/recipes-kernel/linux/linux-renesas/0009-sched-fair-Factorize-PELT-update.patch b/meta-eas/recipes-kernel/linux/linux-renesas/0009-sched-fair-Factorize-PELT-update.patch new file mode 100644 index 0000000..b5c051c --- /dev/null +++ b/meta-eas/recipes-kernel/linux/linux-renesas/0009-sched-fair-Factorize-PELT-update.patch @@ -0,0 +1,242 @@ +From 5fce03ae44059401dcf219ca80a53850fed51932 Mon Sep 17 00:00:00 2001 +From: Vincent Guittot <vincent.guittot@linaro.org> +Date: Tue, 8 Nov 2016 10:53:44 +0100 +Subject: [PATCH 09/92] sched/fair: Factorize PELT update + +Every time we modify load/utilization of sched_entity, we start to +sync it with its cfs_rq. This update is done in different ways: + + - when attaching/detaching a sched_entity, we update cfs_rq and then + we sync the entity with the cfs_rq. + + - when enqueueing/dequeuing the sched_entity, we update both + sched_entity and cfs_rq metrics to now. + +Use update_load_avg() everytime we have to update and sync cfs_rq and +sched_entity before changing the state of a sched_enity. + +Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> +Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> +Acked-by: Dietmar Eggemann <dietmar.eggemann@arm.com> +Cc: Linus Torvalds <torvalds@linux-foundation.org> +Cc: Morten.Rasmussen@arm.com +Cc: Peter Zijlstra <peterz@infradead.org> +Cc: Thomas Gleixner <tglx@linutronix.de> +Cc: bsegall@google.com +Cc: kernellwp@gmail.com +Cc: pjt@google.com +Cc: yuyang.du@intel.com +Link: http://lkml.kernel.org/r/1478598827-32372-4-git-send-email-vincent.guittot@linaro.org +Signed-off-by: Ingo Molnar <mingo@kernel.org> +(cherry picked from commit d31b1a66cbe0931733583ad9d9e8c6cfd710907d) +Signed-off-by: Gaku Inami <gaku.inami.xw@bp.renesas.com> +--- + kernel/sched/fair.c | 76 ++++++++++++++++++----------------------------------- + 1 file changed, 25 insertions(+), 51 deletions(-) + +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index 4a67026..d707ad0 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -3092,8 +3092,14 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq) + return decayed || removed_load; + } + ++/* ++ * Optional action to be done while updating the load average ++ */ ++#define UPDATE_TG 0x1 ++#define SKIP_AGE_LOAD 0x2 ++ + /* Update task and its cfs_rq load average */ +-static inline void update_load_avg(struct sched_entity *se, int update_tg) ++static inline void update_load_avg(struct sched_entity *se, int flags) + { + struct cfs_rq *cfs_rq = cfs_rq_of(se); + u64 now = cfs_rq_clock_task(cfs_rq); +@@ -3104,11 +3110,13 @@ static inline void update_load_avg(struct sched_entity *se, int update_tg) + * Track task load average for carrying it to new CPU after migrated, and + * track group sched_entity load average for task_h_load calc in migration + */ +- __update_load_avg(now, cpu, &se->avg, ++ if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) { ++ __update_load_avg(now, cpu, &se->avg, + se->on_rq * scale_load_down(se->load.weight), + cfs_rq->curr == se, NULL); ++ } + +- if (update_cfs_rq_load_avg(now, cfs_rq, true) && update_tg) ++ if (update_cfs_rq_load_avg(now, cfs_rq, true) && (flags & UPDATE_TG)) + update_tg_load_avg(cfs_rq, 0); + } + +@@ -3122,26 +3130,6 @@ static inline void update_load_avg(struct sched_entity *se, int update_tg) + */ + static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) + { +- if (!sched_feat(ATTACH_AGE_LOAD)) +- goto skip_aging; +- +- /* +- * If we got migrated (either between CPUs or between cgroups) we'll +- * have aged the average right before clearing @last_update_time. +- * +- * Or we're fresh through post_init_entity_util_avg(). +- */ +- if (se->avg.last_update_time) { +- __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)), +- &se->avg, 0, 0, NULL); +- +- /* +- * XXX: we could have just aged the entire load away if we've been +- * absent from the fair class for too long. +- */ +- } +- +-skip_aging: + se->avg.last_update_time = cfs_rq->avg.last_update_time; + cfs_rq->avg.load_avg += se->avg.load_avg; + cfs_rq->avg.load_sum += se->avg.load_sum; +@@ -3161,9 +3149,6 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s + */ + static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) + { +- __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)), +- &se->avg, se->on_rq * scale_load_down(se->load.weight), +- cfs_rq->curr == se, NULL); + + sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); + sub_positive(&cfs_rq->avg.load_sum, se->avg.load_sum); +@@ -3178,34 +3163,20 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s + enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) + { + struct sched_avg *sa = &se->avg; +- u64 now = cfs_rq_clock_task(cfs_rq); +- int migrated, decayed; +- +- migrated = !sa->last_update_time; +- if (!migrated) { +- __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa, +- se->on_rq * scale_load_down(se->load.weight), +- cfs_rq->curr == se, NULL); +- } +- +- decayed = update_cfs_rq_load_avg(now, cfs_rq, !migrated); + + cfs_rq->runnable_load_avg += sa->load_avg; + cfs_rq->runnable_load_sum += sa->load_sum; + +- if (migrated) ++ if (!sa->last_update_time) { + attach_entity_load_avg(cfs_rq, se); +- +- if (decayed || migrated) + update_tg_load_avg(cfs_rq, 0); ++ } + } + + /* Remove the runnable load generated by se from cfs_rq's runnable load average */ + static inline void + dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) + { +- update_load_avg(se, 1); +- + cfs_rq->runnable_load_avg = + max_t(long, cfs_rq->runnable_load_avg - se->avg.load_avg, 0); + cfs_rq->runnable_load_sum = +@@ -3289,7 +3260,10 @@ static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq) + return 0; + } + +-static inline void update_load_avg(struct sched_entity *se, int not_used) ++#define UPDATE_TG 0x0 ++#define SKIP_AGE_LOAD 0x0 ++ ++static inline void update_load_avg(struct sched_entity *se, int not_used1) + { + cpufreq_update_util(rq_of(cfs_rq_of(se)), 0); + } +@@ -3434,6 +3408,7 @@ static inline void check_schedstat_required(void) + if (renorm && !curr) + se->vruntime += cfs_rq->min_vruntime; + ++ update_load_avg(se, UPDATE_TG); + enqueue_entity_load_avg(cfs_rq, se); + account_entity_enqueue(cfs_rq, se); + update_cfs_shares(cfs_rq); +@@ -3508,6 +3483,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) + * Update run-time statistics of the 'current'. + */ + update_curr(cfs_rq); ++ update_load_avg(se, UPDATE_TG); + dequeue_entity_load_avg(cfs_rq, se); + + update_stats_dequeue(cfs_rq, se, flags); +@@ -3595,7 +3571,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) + */ + update_stats_wait_end(cfs_rq, se); + __dequeue_entity(cfs_rq, se); +- update_load_avg(se, 1); ++ update_load_avg(se, UPDATE_TG); + } + + update_stats_curr_start(cfs_rq, se); +@@ -3713,7 +3689,7 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) + /* + * Ensure that runnable average is periodically updated. + */ +- update_load_avg(curr, 1); ++ update_load_avg(curr, UPDATE_TG); + update_cfs_shares(cfs_rq); + + #ifdef CONFIG_SCHED_HRTICK +@@ -4610,7 +4586,7 @@ static inline void hrtick_update(struct rq *rq) + if (cfs_rq_throttled(cfs_rq)) + break; + +- update_load_avg(se, 1); ++ update_load_avg(se, UPDATE_TG); + update_cfs_shares(cfs_rq); + } + +@@ -4669,7 +4645,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) + if (cfs_rq_throttled(cfs_rq)) + break; + +- update_load_avg(se, 1); ++ update_load_avg(se, UPDATE_TG); + update_cfs_shares(cfs_rq); + } + +@@ -8821,10 +8797,9 @@ static inline bool vruntime_normalized(struct task_struct *p) + static void detach_entity_cfs_rq(struct sched_entity *se) + { + struct cfs_rq *cfs_rq = cfs_rq_of(se); +- u64 now = cfs_rq_clock_task(cfs_rq); + + /* Catch up with the cfs_rq and remove our load when we leave */ +- update_cfs_rq_load_avg(now, cfs_rq, false); ++ update_load_avg(se, 0); + detach_entity_load_avg(cfs_rq, se); + update_tg_load_avg(cfs_rq, false); + } +@@ -8832,7 +8807,6 @@ static void detach_entity_cfs_rq(struct sched_entity *se) + static void attach_entity_cfs_rq(struct sched_entity *se) + { + struct cfs_rq *cfs_rq = cfs_rq_of(se); +- u64 now = cfs_rq_clock_task(cfs_rq); + + #ifdef CONFIG_FAIR_GROUP_SCHED + /* +@@ -8843,7 +8817,7 @@ static void attach_entity_cfs_rq(struct sched_entity *se) + #endif + + /* Synchronize entity with its cfs_rq */ +- update_cfs_rq_load_avg(now, cfs_rq, false); ++ update_load_avg(se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD); + attach_entity_load_avg(cfs_rq, se); + update_tg_load_avg(cfs_rq, false); + } +-- +1.9.1 + |