summaryrefslogtreecommitdiffstats
path: root/meta-eas/recipes-kernel/linux/linux-renesas/0080-sched-core-Fix-find_idlest_group-for-fork.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-eas/recipes-kernel/linux/linux-renesas/0080-sched-core-Fix-find_idlest_group-for-fork.patch')
-rw-r--r--meta-eas/recipes-kernel/linux/linux-renesas/0080-sched-core-Fix-find_idlest_group-for-fork.patch86
1 files changed, 86 insertions, 0 deletions
diff --git a/meta-eas/recipes-kernel/linux/linux-renesas/0080-sched-core-Fix-find_idlest_group-for-fork.patch b/meta-eas/recipes-kernel/linux/linux-renesas/0080-sched-core-Fix-find_idlest_group-for-fork.patch
new file mode 100644
index 0000000..ccf260c
--- /dev/null
+++ b/meta-eas/recipes-kernel/linux/linux-renesas/0080-sched-core-Fix-find_idlest_group-for-fork.patch
@@ -0,0 +1,86 @@
+From 4e4846aafb09473f1ca69fa061e72437b55f78c7 Mon Sep 17 00:00:00 2001
+From: Vincent Guittot <vincent.guittot@linaro.org>
+Date: Thu, 8 Dec 2016 17:56:53 +0100
+Subject: [PATCH 80/92] sched/core: Fix find_idlest_group() for fork
+
+During fork, the utilization of a task is init once the rq has been
+selected because the current utilization level of the rq is used to
+set the utilization of the fork task. As the task's utilization is
+still 0 at this step of the fork sequence, it doesn't make sense to
+look for some spare capacity that can fit the task's utilization.
+Furthermore, I can see perf regressions for the test:
+
+ hackbench -P -g 1
+
+because the least loaded policy is always bypassed and tasks are not
+spread during fork.
+
+With this patch and the fix below, we are back to same performances as
+for v4.8. The fix below is only a temporary one used for the test
+until a smarter solution is found because we can't simply remove the
+test which is useful for others benchmarks
+
+| @@ -5708,13 +5708,6 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
+|
+| avg_cost = this_sd->avg_scan_cost;
+|
+| - /*
+| - * Due to large variance we need a large fuzz factor; hackbench in
+| - * particularly is sensitive here.
+| - */
+| - if ((avg_idle / 512) < avg_cost)
+| - return -1;
+| -
+| time = local_clock();
+|
+| for_each_cpu_wrap(cpu, sched_domain_span(sd), target, wrap) {
+
+Tested-by: Matt Fleming <matt@codeblueprint.co.uk>
+Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Matt Fleming <matt@codeblueprint.co.uk>
+Acked-by: Morten Rasmussen <morten.rasmussen@arm.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: dietmar.eggemann@arm.com
+Cc: kernellwp@gmail.com
+Cc: umgwanakikbuti@gmail.com
+Cc: yuyang.du@intel.comc
+Link: http://lkml.kernel.org/r/1481216215-24651-2-git-send-email-vincent.guittot@linaro.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+(cherry picked from commit f2e4ac502860c2adcc5ce56e0064fc4bcab90af4)
+Signed-off-by: Gaku Inami <gaku.inami.xw@bp.renesas.com>
+---
+ kernel/sched/fair.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 489f6d3..fea3ca8 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -5809,13 +5809,21 @@ static unsigned long capacity_spare_wake(int cpu, struct task_struct *p)
+ * utilized systems if we require spare_capacity > task_util(p),
+ * so we allow for some task stuffing by using
+ * spare_capacity > task_util(p)/2.
++ *
++ * Spare capacity can't be used for fork because the utilization has
++ * not been set yet, we must first select a rq to compute the initial
++ * utilization.
+ */
++ if (sd_flag & SD_BALANCE_FORK)
++ goto skip_spare;
++
+ if (this_spare > task_util(p) / 2 &&
+ imbalance*this_spare > 100*most_spare)
+ return NULL;
+ else if (most_spare > task_util(p) / 2)
+ return most_spare_sg;
+
++skip_spare:
+ if (!idlest || 100*this_load < imbalance*min_load)
+ return NULL;
+ return idlest;
+--
+1.9.1
+