diff options
Diffstat (limited to 'meta-eas/recipes-kernel/linux/linux-renesas/0001-sched-fair-Kill-the-unused-sched_shares_window_ns-tu.patch')
-rw-r--r-- | meta-eas/recipes-kernel/linux/linux-renesas/0001-sched-fair-Kill-the-unused-sched_shares_window_ns-tu.patch | 80 |
1 files changed, 80 insertions, 0 deletions
diff --git a/meta-eas/recipes-kernel/linux/linux-renesas/0001-sched-fair-Kill-the-unused-sched_shares_window_ns-tu.patch b/meta-eas/recipes-kernel/linux/linux-renesas/0001-sched-fair-Kill-the-unused-sched_shares_window_ns-tu.patch new file mode 100644 index 0000000..5477ade --- /dev/null +++ b/meta-eas/recipes-kernel/linux/linux-renesas/0001-sched-fair-Kill-the-unused-sched_shares_window_ns-tu.patch @@ -0,0 +1,80 @@ +From d1f3667faa908a76aeb6e54f38b1e1d4b2a3ce2b Mon Sep 17 00:00:00 2001 +From: Matt Fleming <matt@codeblueprint.co.uk> +Date: Wed, 19 Oct 2016 15:10:59 +0100 +Subject: [PATCH 01/92] sched/fair: Kill the unused 'sched_shares_window_ns' + tunable + +The last user of this tunable was removed in 2012 in commit: + + 82958366cfea ("sched: Replace update_shares weight distribution with per-entity computation") + +Delete it since its very existence confuses people. + +Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk> +Cc: Dietmar Eggemann <dietmar.eggemann@arm.com> +Cc: Linus Torvalds <torvalds@linux-foundation.org> +Cc: Mike Galbraith <umgwanakikbuti@gmail.com> +Cc: Paul Turner <pjt@google.com> +Cc: Peter Zijlstra <peterz@infradead.org> +Cc: Thomas Gleixner <tglx@linutronix.de> +Link: http://lkml.kernel.org/r/20161019141059.26408-1-matt@codeblueprint.co.uk +Signed-off-by: Ingo Molnar <mingo@kernel.org> +(cherry picked from commit 3c3fcb45d524feb5d14a14f332e3eec7f2aff8f3) +Signed-off-by: Gaku Inami <gaku.inami.xw@bp.renesas.com> +--- + include/linux/sched/sysctl.h | 1 - + kernel/sched/fair.c | 7 ------- + kernel/sysctl.c | 7 ------- + 3 files changed, 15 deletions(-) + +diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h +index 22db1e6..4411453 100644 +--- a/include/linux/sched/sysctl.h ++++ b/include/linux/sched/sysctl.h +@@ -36,7 +36,6 @@ enum sched_tunable_scaling { + extern unsigned int sysctl_sched_migration_cost; + extern unsigned int sysctl_sched_nr_migrate; + extern unsigned int sysctl_sched_time_avg; +-extern unsigned int sysctl_sched_shares_window; + + int sched_proc_update_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *length, +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index c242944..3cf446c 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -93,13 +93,6 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling + + const_debug unsigned int sysctl_sched_migration_cost = 500000UL; + +-/* +- * The exponential sliding window over which load is averaged for shares +- * distribution. +- * (default: 10msec) +- */ +-unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL; +- + #ifdef CONFIG_CFS_BANDWIDTH + /* + * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool +diff --git a/kernel/sysctl.c b/kernel/sysctl.c +index 706309f..739fb17 100644 +--- a/kernel/sysctl.c ++++ b/kernel/sysctl.c +@@ -347,13 +347,6 @@ static int sysrq_sysctl_handler(struct ctl_table *table, int write, + .mode = 0644, + .proc_handler = proc_dointvec, + }, +- { +- .procname = "sched_shares_window_ns", +- .data = &sysctl_sched_shares_window, +- .maxlen = sizeof(unsigned int), +- .mode = 0644, +- .proc_handler = proc_dointvec, +- }, + #ifdef CONFIG_SCHEDSTATS + { + .procname = "sched_schedstats", +-- +1.9.1 + |