summaryrefslogtreecommitdiffstats
path: root/meta-eas/recipes-kernel/linux/linux-renesas/0043-arm-Update-SD_ASYM_CPUCAPACITY-topology-flag-when-cp.patch
blob: 6781716de220f511fd34d3da14239530d9275e2f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
From 82ae3c283c44fb74536b33dfeb3f23e228af869f Mon Sep 17 00:00:00 2001
From: Morten Rasmussen <morten.rasmussen@arm.com>
Date: Wed, 22 Jun 2016 10:58:17 +0100
Subject: [PATCH 43/92] arm: Update SD_ASYM_CPUCAPACITY topology flag when cpu
 capacity is changed

When cpu capacity information is provided through DT or sysfs we might
not know if the system has asymmetric cpu capacities when the system
topology information is initially passed to the scheduler. We therefore
have to enforce a rebuild of the sched_domain hierarchy if we later
discover that the flag was initially set wrong.

It is mainly an issue for systems with same type of cores (dmips/mhz)
but different max frequencies, and asymmetric cpu capacity systems where
cpu capacity is not provided by DT but set through sysfs later.

cc: Russell King <linux@armlinux.org.uk>

Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com>
(cherry picked from commit d02bb26e525bcbeb3c91345c4ee2a1fe1ca11e48)
Signed-off-by: Gaku Inami <gaku.inami.xw@bp.renesas.com>
---
 arch/arm/kernel/topology.c | 39 +++++++++++++++++++++++++++++++++++++--
 1 file changed, 37 insertions(+), 2 deletions(-)

diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 69fb4b6..4d94639 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -22,6 +22,7 @@
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/cpufreq.h>
+#include <linux/cpuset.h>
 
 #include <asm/cputype.h>
 #include <asm/topology.h>
@@ -43,6 +44,8 @@
  */
 static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
 static DEFINE_MUTEX(cpu_scale_mutex);
+static bool asym_cpucap;
+static bool update_flags;
 
 unsigned long scale_cpu_capacity(struct sched_domain *sd, int cpu)
 {
@@ -54,6 +57,14 @@ static void set_capacity_scale(unsigned int cpu, unsigned long capacity)
 	per_cpu(cpu_scale, cpu) = capacity;
 }
 
+static void update_sched_flags(void)
+{
+	update_flags = true;
+	rebuild_sched_domains();
+	update_flags = false;
+	pr_debug("cpu_capacity: Rebuilt sched_domain hierarchy.\n");
+}
+
 #ifdef CONFIG_PROC_SYSCTL
 #include <asm/cpu.h>
 #include <linux/string.h>
@@ -83,6 +94,7 @@ static ssize_t store_cpu_capacity(struct device *dev,
 
 	if (count) {
 		char *p = (char *) buf;
+		bool asym = false;
 
 		ret = kstrtoul(p, 0, &new_capacity);
 		if (ret)
@@ -93,6 +105,17 @@ static ssize_t store_cpu_capacity(struct device *dev,
 		mutex_lock(&cpu_scale_mutex);
 		for_each_cpu(i, &cpu_topology[this_cpu].core_sibling)
 			set_capacity_scale(i, new_capacity);
+
+		for_each_possible_cpu(i) {
+			if (per_cpu(cpu_scale, i) != new_capacity)
+				asym = true;
+		}
+
+		if (asym != asym_cpucap) {
+			asym_cpucap = asym;
+			update_sched_flags();
+		}
+
 		mutex_unlock(&cpu_scale_mutex);
 	}
 
@@ -154,7 +177,6 @@ struct cpu_efficiency {
 static u32 *raw_capacity;
 static bool cap_parsing_failed;
 static u32 capacity_scale;
-static bool asym_cpucap;
 
 static int __init parse_cpu_capacity(struct device_node *cpu_node, int cpu)
 {
@@ -199,6 +221,7 @@ static void normalize_cpu_capacity(void)
 {
 	u64 capacity;
 	int cpu;
+	bool asym = false;
 
 	if (!raw_capacity || cap_parsing_failed)
 		return;
@@ -212,9 +235,12 @@ static void normalize_cpu_capacity(void)
 		pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
 			cpu, arch_scale_cpu_capacity(NULL, cpu));
 		if (capacity < capacity_scale)
-			asym_cpucap = true;
+			asym = true;
 	}
 	mutex_unlock(&cpu_scale_mutex);
+
+	if (asym != asym_cpucap)
+		asym_cpucap = asym;
 }
 
 #ifdef CONFIG_CPU_FREQ
@@ -232,6 +258,7 @@ static void normalize_cpu_capacity(void)
 {
 	struct cpufreq_policy *policy = data;
 	int cpu;
+	bool asym;
 
 	if (cap_parsing_done)
 		return 0;
@@ -254,7 +281,10 @@ static void normalize_cpu_capacity(void)
 		}
 		if (cpumask_empty(cpus_to_visit)) {
 			if (!cap_parsing_failed) {
+				asym = asym_cpucap;
 				normalize_cpu_capacity();
+				if (asym != asym_cpucap)
+					update_sched_flags();
 				kfree(raw_capacity);
 				pr_debug("cpu_capacity: parsing done");
 			} else {
@@ -502,6 +532,11 @@ static void update_siblings_masks(unsigned int cpuid)
 	smp_wmb();
 }
 
+int arch_update_cpu_topology(void)
+{
+	return update_flags ? 1 : 0;
+}
+
 /*
  * store_cpu_topology is called at boot when only one cpu is running
  * and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
-- 
1.9.1