summaryrefslogtreecommitdiffstats
path: root/meta-eas/recipes-kernel/linux/linux-renesas/0014-sched-fair-Clean-up-the-tunable-parameter-definition.patch
diff options
context:
space:
mode:
authorFrode Isaksen <fisaksen@baylibre.com>2017-12-19 11:15:35 +0000
committerJan-Simon Moeller <jsmoeller@linuxfoundation.org>2018-02-07 11:47:29 +0000
commitc4a6287185179732dfc1e903c195ff90c19f1065 (patch)
treed35f5010dbd952e40f5c178322026445b55757c1 /meta-eas/recipes-kernel/linux/linux-renesas/0014-sched-fair-Clean-up-the-tunable-parameter-definition.patch
parent109dea1d5c5a38807b098b588584636ae636a302 (diff)
This layer provides Energy Aware Scheduling (EAS) patcheseel_5.1.0eel_5.0.3eel_5.0.2eel/5.1.0eel/5.0.3eel/5.0.25.1.05.0.35.0.2eel
For the moment only for Renesas R-Car Gen3 SoC's. Can be expanded for other SoC's by setting the machine feature biglittle and provide the relevant EAS patches. Bug-AGL: SPEC-813 Change-Id: I2b5e69c515c33e57be19b30466fe208d7b8ac1a5 Signed-off-by: Frode Isaksen <fisaksen@baylibre.com>
Diffstat (limited to 'meta-eas/recipes-kernel/linux/linux-renesas/0014-sched-fair-Clean-up-the-tunable-parameter-definition.patch')
-rw-r--r--meta-eas/recipes-kernel/linux/linux-renesas/0014-sched-fair-Clean-up-the-tunable-parameter-definition.patch141
1 files changed, 141 insertions, 0 deletions
diff --git a/meta-eas/recipes-kernel/linux/linux-renesas/0014-sched-fair-Clean-up-the-tunable-parameter-definition.patch b/meta-eas/recipes-kernel/linux/linux-renesas/0014-sched-fair-Clean-up-the-tunable-parameter-definition.patch
new file mode 100644
index 0000000..df0388a
--- /dev/null
+++ b/meta-eas/recipes-kernel/linux/linux-renesas/0014-sched-fair-Clean-up-the-tunable-parameter-definition.patch
@@ -0,0 +1,141 @@
+From 05871df5280460c20d17434132329bebb27a2cfa Mon Sep 17 00:00:00 2001
+From: Ingo Molnar <mingo@kernel.org>
+Date: Wed, 23 Nov 2016 07:37:00 +0100
+Subject: [PATCH 14/92] sched/fair: Clean up the tunable parameter definitions
+
+No change in functionality:
+
+ - align the default values vertically to make them easier to scan
+ - standardize the 'default:' lines
+ - fix minor whitespace typos
+
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Mike Galbraith <efault@gmx.de>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+(cherry picked from commit 2b4d5b2582deffb77b3b4b48a59cd36e9e1e14d9)
+Signed-off-by: Gaku Inami <gaku.inami.xw@bp.renesas.com>
+---
+ kernel/sched/fair.c | 50 ++++++++++++++++++++++++++++----------------------
+ 1 file changed, 28 insertions(+), 22 deletions(-)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 02605f2..aa47589 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -37,7 +37,6 @@
+
+ /*
+ * Targeted preemption latency for CPU-bound tasks:
+- * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
+ *
+ * NOTE: this latency value is not the same as the concept of
+ * 'timeslice length' - timeslices in CFS are of variable length
+@@ -46,31 +45,35 @@
+ *
+ * (to see the precise effective timeslice length of your workload,
+ * run vmstat and monitor the context-switches (cs) field)
++ *
++ * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
+ */
+-unsigned int sysctl_sched_latency = 6000000ULL;
+-unsigned int normalized_sysctl_sched_latency = 6000000ULL;
++unsigned int sysctl_sched_latency = 6000000ULL;
++unsigned int normalized_sysctl_sched_latency = 6000000ULL;
+
+ /*
+ * The initial- and re-scaling of tunables is configurable
+- * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
+ *
+ * Options are:
+- * SCHED_TUNABLESCALING_NONE - unscaled, always *1
+- * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
+- * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
++ *
++ * SCHED_TUNABLESCALING_NONE - unscaled, always *1
++ * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
++ * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
++ *
++ * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
+ */
+-enum sched_tunable_scaling sysctl_sched_tunable_scaling
+- = SCHED_TUNABLESCALING_LOG;
++enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG;
+
+ /*
+ * Minimal preemption granularity for CPU-bound tasks:
++ *
+ * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
+ */
+-unsigned int sysctl_sched_min_granularity = 750000ULL;
+-unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
++unsigned int sysctl_sched_min_granularity = 750000ULL;
++unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
+
+ /*
+- * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
++ * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity
+ */
+ static unsigned int sched_nr_latency = 8;
+
+@@ -82,16 +85,17 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling
+
+ /*
+ * SCHED_OTHER wake-up granularity.
+- * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
+ *
+ * This option delays the preemption effects of decoupled workloads
+ * and reduces their over-scheduling. Synchronous workloads will still
+ * have immediate wakeup/sleep latencies.
++ *
++ * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
+ */
+-unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
+-unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
++unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
++unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
+
+-const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
++const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
+
+ #ifdef CONFIG_CFS_BANDWIDTH
+ /*
+@@ -102,16 +106,18 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling
+ * to consumption or the quota being specified to be smaller than the slice)
+ * we will always only issue the remaining available time.
+ *
+- * default: 5 msec, units: microseconds
+- */
+-unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
++ * (default: 5 msec, units: microseconds)
++ */
++unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
+ #endif
+
+ /*
+ * The margin used when comparing utilization with CPU capacity:
+ * util * margin < capacity * 1024
++ *
++ * (default: ~20%)
+ */
+-unsigned int capacity_margin = 1280; /* ~20% */
++unsigned int capacity_margin = 1280;
+
+ static inline void update_load_add(struct load_weight *lw, unsigned long inc)
+ {
+@@ -7174,8 +7180,8 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
+ * cpumask covering 1 cpu of the first group and 3 cpus of the second group.
+ * Something like:
+ *
+- * { 0 1 2 3 } { 4 5 6 7 }
+- * * * * *
++ * { 0 1 2 3 } { 4 5 6 7 }
++ * * * * *
+ *
+ * If we were to balance group-wise we'd place two tasks in the first group and
+ * two tasks in the second group. Clearly this is undesired as it will overload
+--
+1.9.1
+