summaryrefslogtreecommitdiffstats
path: root/meta-eas/recipes-kernel/linux/linux-renesas/0077-sched-events-Introduce-sched-entity-PELT-trace-event.patch
blob: 5e7d12873424f856ce65afdfc555a73b7aea2ad2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
From f815820c55e552529885b5c541be9e2e3000c198 Mon Sep 17 00:00:00 2001
From: Dietmar Eggemann <dietmar.eggemann@arm.com>
Date: Mon, 25 Apr 2016 00:48:09 +0100
Subject: [PATCH 77/92] sched/events: Introduce sched entity PELT trace event
 sched_pelt_se()

The trace event is only defined if symmetric multi-processing
(CONFIG_SMP) is enabled.
To let this trace event work for configurations with and without group
scheduling support for SCHED_OTHER (CONFIG_FAIR_GROUP_SCHED) the
following special handling is necessary for non-existent key=value
pairs:

id = -1      : In case sched entity is representing a task
             : In case CONFIG_FAIR_GROUP_SCHED is not set

pid = -1     : In case sched entity is representing a task group

comm = "n/a" : In case sched entity is representing a task group

Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
(cherry picked from commit 31f5aeb31289ec28c5831b62a2176da50b3c7fa4)
Signed-off-by: Gaku Inami <gaku.inami.xw@bp.renesas.com>
---
 include/trace/events/sched.h | 54 ++++++++++++++++++++++++++++++++++++++++++++
 kernel/sched/fair.c          |  9 ++++++--
 2 files changed, 61 insertions(+), 2 deletions(-)

diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index c9c3348..58ba3d5 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -565,6 +565,60 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct *
 
 #ifdef CONFIG_SMP
 /*
+ * Tracepoint for sched entity Per Entity Load Tracking (PELT).
+ */
+TRACE_EVENT(sched_pelt_se,
+
+	TP_PROTO(struct sched_entity *se),
+
+	TP_ARGS(se),
+
+	TP_STRUCT__entry(
+		__field( int,		cpu			)
+		__field( int,		id			)
+		__array( char,		comm,	TASK_COMM_LEN	)
+		__field( pid_t,		pid			)
+		__field( unsigned long,	load_avg		)
+		__field( unsigned long,	util_avg		)
+		__field( u64,		load_sum		)
+		__field( u32,		util_sum		)
+		__field( u32,		period_contrib		)
+		__field( u64,		last_update_time	)
+	),
+
+	TP_fast_assign(
+		struct task_struct *p = container_of(se, struct task_struct, se);
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+		__entry->cpu = se->my_q ? cpu_of(se->cfs_rq->rq) : task_cpu(p);
+		__entry->id  = se->my_q ? se->my_q->tg->css.id : -1;
+		memcpy(__entry->comm, se->my_q ? "n/a" : p->comm,
+		       TASK_COMM_LEN);
+		__entry->pid = se->my_q ? -1 : p->pid;
+#else
+		__entry->cpu = task_cpu(p);
+		__entry->id  = -1;
+		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+		__entry->pid = p->pid;
+#endif
+		__entry->load_avg		= se->avg.load_avg;
+		__entry->util_avg		= se->avg.util_avg;
+		__entry->load_sum		= se->avg.load_sum;
+		__entry->util_sum		= se->avg.util_sum;
+		__entry->period_contrib		= se->avg.period_contrib;
+		__entry->last_update_time	= se->avg.last_update_time;
+	),
+
+	TP_printk("cpu=%d tg_css_id=%d comm=%s pid=%d load_avg=%lu util_avg=%lu"
+		  " load_sum=%llu util_sum=%u period_contrib=%u"
+		  " last_update_time=%llu",
+		  __entry->cpu, __entry->id, __entry->comm, __entry->pid,
+		  __entry->load_avg, __entry->util_avg, __entry->load_sum,
+		  __entry->util_sum, __entry->period_contrib,
+		  __entry->last_update_time)
+);
+
+/*
  * Tracepoint for cfs_rq Per Entity Load Tracking (PELT).
  */
 TRACE_EVENT(sched_pelt_cfs_rq,
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 03b4666..fffe7cb 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2845,7 +2845,7 @@ static u32 __compute_runnable_contrib(u64 n)
 	 */
 	if ((s64)delta < 0) {
 		sa->last_update_time = now;
-		return 0;
+		goto trace;
 	}
 
 	/*
@@ -2854,7 +2854,7 @@ static u32 __compute_runnable_contrib(u64 n)
 	 */
 	delta >>= 10;
 	if (!delta)
-		return 0;
+		goto trace;
 	sa->last_update_time = now;
 
 	scale_freq = arch_scale_freq_capacity(NULL, cpu);
@@ -2931,8 +2931,11 @@ static u32 __compute_runnable_contrib(u64 n)
 		sa->util_avg = sa->util_sum / LOAD_AVG_MAX;
 	}
 
+trace:
 	if (cfs_rq)
 		trace_sched_pelt_cfs_rq(cfs_rq);
+	else
+		trace_sched_pelt_se(container_of(sa, struct sched_entity, avg));
 
 	return decayed;
 }
@@ -3315,6 +3318,7 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
 	cfs_rq_util_change(cfs_rq);
 
 	trace_sched_pelt_cfs_rq(cfs_rq);
+	trace_sched_pelt_se(se);
 }
 
 /**
@@ -3337,6 +3341,7 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
 	cfs_rq_util_change(cfs_rq);
 
 	trace_sched_pelt_cfs_rq(cfs_rq);
+	trace_sched_pelt_se(se);
 }
 
 /* Add the load generated by se into cfs_rq's load average */
-- 
1.9.1