summaryrefslogtreecommitdiffstats
path: root/meta-eas/recipes-kernel/linux/linux-renesas/0083-sched-Consider-misfit-tasks-when-load-balancing.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-eas/recipes-kernel/linux/linux-renesas/0083-sched-Consider-misfit-tasks-when-load-balancing.patch')
-rw-r--r--meta-eas/recipes-kernel/linux/linux-renesas/0083-sched-Consider-misfit-tasks-when-load-balancing.patch180
1 files changed, 180 insertions, 0 deletions
diff --git a/meta-eas/recipes-kernel/linux/linux-renesas/0083-sched-Consider-misfit-tasks-when-load-balancing.patch b/meta-eas/recipes-kernel/linux/linux-renesas/0083-sched-Consider-misfit-tasks-when-load-balancing.patch
new file mode 100644
index 0000000..db57a58
--- /dev/null
+++ b/meta-eas/recipes-kernel/linux/linux-renesas/0083-sched-Consider-misfit-tasks-when-load-balancing.patch
@@ -0,0 +1,180 @@
+From 488802cf1168582adc4fdd039ecd5b3dfe652085 Mon Sep 17 00:00:00 2001
+From: Morten Rasmussen <morten.rasmussen@arm.com>
+Date: Tue, 28 Jul 2015 15:42:47 +0100
+Subject: [PATCH 83/92] sched: Consider misfit tasks when load-balancing
+
+With the new group_misfit_task load-balancing scenario additional policy
+conditions are needed when load-balancing. Misfit task balancing only
+makes sense between source group with lower capacity than the target
+group. If capacities are the same, fallback to normal group_other
+balancing. The aim is to balance tasks such that no task has its
+throughput hindered by compute capacity if a cpu with more capacity is
+available. Load-balancing is generally based on average load in the
+sched_groups, but for misfitting tasks it is necessary to introduce
+exceptions to migrate tasks against usual metrics and optimize
+throughput.
+
+This patch ensures the following load-balance for mixed capacity systems
+(e.g. ARM big.LITTLE) for always-running tasks:
+
+1. Place a task on each cpu starting in order from cpus with highest
+capacity to lowest until all cpus are in use (i.e. one task on each
+cpu).
+
+2. Once all cpus are in use balance according to compute capacity such
+that load per capacity is approximately the same regardless of the
+compute capacity (i.e. big cpus get more tasks than little cpus).
+
+Necessary changes are introduced in find_busiest_group(),
+calculate_imbalance(), and find_busiest_queue(). This includes passing
+the group_type on to find_busiest_queue() through struct lb_env, which
+is currently only considers imbalance and not the imbalance situation
+(group_type).
+
+To avoid taking remote rq locks to examine source sched_groups for
+misfit tasks, each cpu is responsible for tracking misfit tasks
+themselves and update the rq->misfit_task flag. This means checking task
+utilization when tasks are scheduled and on sched_tick.
+
+Change-Id: I458461cebf269d6d4eeac6f83e4c84f4e4d7a9dd
+Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com>
+(cherry picked from commit bcd0dfada3e8abb6f1d5cff73134fc54ab76f409)
+Signed-off-by: Gaku Inami <gaku.inami.xw@bp.renesas.com>
+---
+ kernel/sched/fair.c | 64 +++++++++++++++++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 62 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index e7fd15e..966af0b 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -6169,6 +6169,27 @@ static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
+ return min_cap * 1024 < task_util(p) * capacity_margin;
+ }
+
++static inline bool __task_fits(struct task_struct *p, int cpu, int util)
++{
++ unsigned long capacity = capacity_of(cpu);
++
++ return (capacity * 1024) > (util * capacity_margin);
++}
++
++static inline bool task_fits_max(struct task_struct *p, int cpu)
++{
++ unsigned long capacity = capacity_of(cpu);
++ unsigned long max_capacity = cpu_rq(cpu)->rd->max_cpu_capacity;
++
++ if (capacity == max_capacity)
++ return true;
++
++ if (capacity * capacity_margin > max_capacity * 1024)
++ return true;
++
++ return __task_fits(p, cpu, 0);
++}
++
+ static bool cpu_overutilized(int cpu)
+ {
+ return (capacity_of(cpu) * 1024) < (cpu_util(cpu) * capacity_margin);
+@@ -6904,6 +6925,7 @@ struct lb_env {
+ unsigned int loop_max;
+
+ enum fbq_type fbq_type;
++ enum group_type busiest_group_type;
+ struct list_head tasks;
+ };
+
+@@ -7902,6 +7924,15 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
+ sgs->group_type = group_classify(sg, sgs);
+ }
+
++ /*
++ * Ignore task groups with misfit tasks if local group has no
++ * capacity or if per-cpu capacity isn't higher.
++ */
++ if (sgs->group_type == group_misfit_task &&
++ (!group_has_capacity(env, &sds->local_stat) ||
++ !group_smaller_cpu_capacity(sg, sds->local)))
++ sgs->group_type = group_other;
++
+ if (update_sd_pick_busiest(env, sds, sg, sgs)) {
+ sds->busiest = sg;
+ sds->busiest_stat = *sgs;
+@@ -8082,6 +8113,22 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
+ */
+ if (busiest->avg_load <= sds->avg_load ||
+ local->avg_load >= sds->avg_load) {
++ /* Misfitting tasks should be migrated in any case */
++ if (busiest->group_type == group_misfit_task) {
++ env->imbalance = busiest->group_misfit_task;
++ return;
++ }
++
++ /*
++ * Busiest group is overloaded, local is not, use the spare
++ * cycles to maximize throughput
++ */
++ if (busiest->group_type == group_overloaded &&
++ local->group_type <= group_misfit_task) {
++ env->imbalance = busiest->load_per_task;
++ return;
++ }
++
+ env->imbalance = 0;
+ return fix_small_imbalance(env, sds);
+ }
+@@ -8115,6 +8162,11 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
+ (sds->avg_load - local->avg_load) * local->group_capacity
+ ) / SCHED_CAPACITY_SCALE;
+
++ /* Boost imbalance to allow misfit task to be balanced. */
++ if (busiest->group_type == group_misfit_task)
++ env->imbalance = max_t(long, env->imbalance,
++ busiest->group_misfit_task);
++
+ /*
+ * if *imbalance is less than the average load per runnable task
+ * there is no guarantee that any tasks will be moved so we'll have
+@@ -8181,6 +8233,11 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
+ busiest->group_no_capacity)
+ goto force_balance;
+
++ /* Misfitting tasks should be dealt with regardless of the avg load */
++ if (busiest->group_type == group_misfit_task) {
++ goto force_balance;
++ }
++
+ /*
+ * If the local group is busier than the selected busiest group
+ * don't try and pull any tasks.
+@@ -8204,7 +8261,8 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
+ * might end up to just move the imbalance on another group
+ */
+ if ((busiest->group_type != group_overloaded) &&
+- (local->idle_cpus <= (busiest->idle_cpus + 1)))
++ (local->idle_cpus <= (busiest->idle_cpus + 1)) &&
++ !group_smaller_cpu_capacity(sds.busiest, sds.local))
+ goto out_balanced;
+ } else {
+ /*
+@@ -8217,6 +8275,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
+ }
+
+ force_balance:
++ env->busiest_group_type = busiest->group_type;
+ /* Looks like there is an imbalance. Compute it */
+ calculate_imbalance(env, &sds);
+ return sds.busiest;
+@@ -8275,7 +8334,8 @@ static struct rq *find_busiest_queue(struct lb_env *env,
+ */
+
+ if (rq->nr_running == 1 && wl > env->imbalance &&
+- !check_cpu_capacity(rq, env->sd))
++ !check_cpu_capacity(rq, env->sd) &&
++ env->busiest_group_type != group_misfit_task)
+ continue;
+
+ /*
+--
+1.9.1
+