diff options
author | Frode Isaksen <fisaksen@baylibre.com> | 2017-12-19 11:15:35 +0000 |
---|---|---|
committer | Jan-Simon Moeller <jsmoeller@linuxfoundation.org> | 2018-02-07 11:47:29 +0000 |
commit | c4a6287185179732dfc1e903c195ff90c19f1065 (patch) | |
tree | d35f5010dbd952e40f5c178322026445b55757c1 /meta-eas/recipes-kernel/linux/linux-renesas/0051-sched-Relocated-cpu_util-and-change-return-type.patch | |
parent | 109dea1d5c5a38807b098b588584636ae636a302 (diff) |
This layer provides Energy Aware Scheduling (EAS) patcheseel_5.1.0eel_5.0.3eel_5.0.2eel/5.1.0eel/5.0.3eel/5.0.25.1.05.0.35.0.2eel
For the moment only for Renesas R-Car Gen3 SoC's.
Can be expanded for other SoC's by setting the machine
feature biglittle and provide the relevant EAS patches.
Bug-AGL: SPEC-813
Change-Id: I2b5e69c515c33e57be19b30466fe208d7b8ac1a5
Signed-off-by: Frode Isaksen <fisaksen@baylibre.com>
Diffstat (limited to 'meta-eas/recipes-kernel/linux/linux-renesas/0051-sched-Relocated-cpu_util-and-change-return-type.patch')
-rw-r--r-- | meta-eas/recipes-kernel/linux/linux-renesas/0051-sched-Relocated-cpu_util-and-change-return-type.patch | 109 |
1 files changed, 109 insertions, 0 deletions
diff --git a/meta-eas/recipes-kernel/linux/linux-renesas/0051-sched-Relocated-cpu_util-and-change-return-type.patch b/meta-eas/recipes-kernel/linux/linux-renesas/0051-sched-Relocated-cpu_util-and-change-return-type.patch new file mode 100644 index 0000000..dab8b97 --- /dev/null +++ b/meta-eas/recipes-kernel/linux/linux-renesas/0051-sched-Relocated-cpu_util-and-change-return-type.patch @@ -0,0 +1,109 @@ +From 88ce1ba602ccc9479feb7837f2c7bc074379561c Mon Sep 17 00:00:00 2001 +From: Morten Rasmussen <morten.rasmussen@arm.com> +Date: Thu, 11 Dec 2014 15:25:29 +0000 +Subject: [PATCH 51/92] sched: Relocated cpu_util() and change return type + +Move cpu_util() to an earlier position in fair.c and change return +type to unsigned long as negative usage doesn't make much sense. All +other load and capacity related functions use unsigned long including +the caller of cpu_util(). + +cc: Ingo Molnar <mingo@redhat.com> +cc: Peter Zijlstra <peterz@infradead.org> + +Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com> +(cherry picked from commit c5aa28c6e0792ee8a994d3e401fc619694d01f5f) +Signed-off-by: Gaku Inami <gaku.inami.xw@bp.renesas.com> +--- + kernel/sched/fair.c | 68 ++++++++++++++++++++++++++--------------------------- + 1 file changed, 34 insertions(+), 34 deletions(-) + +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index c7d9bbf..0085d4f 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -5274,6 +5274,40 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg) + + #endif + ++/* ++ * cpu_util returns the amount of capacity of a CPU that is used by CFS ++ * tasks. The unit of the return value must be the one of capacity so we can ++ * compare the utilization with the capacity of the CPU that is available for ++ * CFS task (ie cpu_capacity). ++ * ++ * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the ++ * recent utilization of currently non-runnable tasks on a CPU. It represents ++ * the amount of utilization of a CPU in the range [0..capacity_orig] where ++ * capacity_orig is the cpu_capacity available at the highest frequency ++ * (arch_scale_freq_capacity()). ++ * The utilization of a CPU converges towards a sum equal to or less than the ++ * current capacity (capacity_curr <= capacity_orig) of the CPU because it is ++ * the running time on this CPU scaled by capacity_curr. ++ * ++ * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even ++ * higher than capacity_orig because of unfortunate rounding in ++ * cfs.avg.util_avg or just after migrating tasks and new task wakeups until ++ * the average stabilizes with the new running time. We need to check that the ++ * utilization stays within the range of [0..capacity_orig] and cap it if ++ * necessary. Without utilization capping, a group could be seen as overloaded ++ * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of ++ * available capacity. We allow utilization to overshoot capacity_curr (but not ++ * capacity_orig) as it useful for predicting the capacity required after task ++ * migrations (scheduler-driven DVFS). ++ */ ++static unsigned long cpu_util(int cpu) ++{ ++ unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg; ++ unsigned long capacity = capacity_orig_of(cpu); ++ ++ return (util >= capacity) ? capacity : util; ++} ++ + static void record_wakee(struct task_struct *p) + { + /* +@@ -5783,40 +5817,6 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) + return target; + } + +-/* +- * cpu_util returns the amount of capacity of a CPU that is used by CFS +- * tasks. The unit of the return value must be the one of capacity so we can +- * compare the utilization with the capacity of the CPU that is available for +- * CFS task (ie cpu_capacity). +- * +- * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the +- * recent utilization of currently non-runnable tasks on a CPU. It represents +- * the amount of utilization of a CPU in the range [0..capacity_orig] where +- * capacity_orig is the cpu_capacity available at the highest frequency +- * (arch_scale_freq_capacity()). +- * The utilization of a CPU converges towards a sum equal to or less than the +- * current capacity (capacity_curr <= capacity_orig) of the CPU because it is +- * the running time on this CPU scaled by capacity_curr. +- * +- * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even +- * higher than capacity_orig because of unfortunate rounding in +- * cfs.avg.util_avg or just after migrating tasks and new task wakeups until +- * the average stabilizes with the new running time. We need to check that the +- * utilization stays within the range of [0..capacity_orig] and cap it if +- * necessary. Without utilization capping, a group could be seen as overloaded +- * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of +- * available capacity. We allow utilization to overshoot capacity_curr (but not +- * capacity_orig) as it useful for predicting the capacity required after task +- * migrations (scheduler-driven DVFS). +- */ +-static int cpu_util(int cpu) +-{ +- unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg; +- unsigned long capacity = capacity_orig_of(cpu); +- +- return (util >= capacity) ? capacity : util; +-} +- + static inline int task_util(struct task_struct *p) + { + return p->se.avg.util_avg; +-- +1.9.1 + |