summaryrefslogtreecommitdiffstats
path: root/meta-eas/recipes-kernel/linux/linux-renesas/0076-sched-events-Introduce-cfs_rq-PELT-trace-event-sched.patch
blob: 950e7a903adadae7dc804d43fc5bac5a3a076f95 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
From 1f93576e19b38a0df4203faaf07b656e5f24b6f8 Mon Sep 17 00:00:00 2001
From: Dietmar Eggemann <dietmar.eggemann@arm.com>
Date: Mon, 9 Nov 2015 12:07:27 +0000
Subject: [PATCH 76/92] sched/events: Introduce cfs_rq PELT trace event
 sched_pelt_cfs_rq()

The trace event is only defined if symmetric multi-processing
(CONFIG_SMP) is enabled.
To let this trace event work for configurations with and without group
scheduling support for SCHED_OTHER (CONFIG_FAIR_GROUP_SCHED) the
following special handling is necessary for non-existent key=value
pairs:

id = -1 : In case of !CONFIG_FAIR_GROUP_SCHED the task group css id is
          set to -1.

Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
(cherry picked from commit 1c61904cbc5998b19dc8c04bb0cfc99f9a8ec9db)
Signed-off-by: Gaku Inami <gaku.inami.xw@bp.renesas.com>
---
 include/trace/events/sched.h | 51 ++++++++++++++++++++++++++++++++++++++++++++
 kernel/sched/fair.c          |  7 ++++++
 2 files changed, 58 insertions(+)

diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 9b90c57..c9c3348 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -562,6 +562,57 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct *
 
 	TP_printk("cpu=%d", __entry->cpu)
 );
+
+#ifdef CONFIG_SMP
+/*
+ * Tracepoint for cfs_rq Per Entity Load Tracking (PELT).
+ */
+TRACE_EVENT(sched_pelt_cfs_rq,
+
+	TP_PROTO(struct cfs_rq *cfs_rq),
+
+	TP_ARGS(cfs_rq),
+
+	TP_STRUCT__entry(
+		__field( int,		cpu			)
+		__field( int,		id			)
+		__field( unsigned long,	load_avg		)
+		__field( unsigned long,	util_avg		)
+		__field( u64,		load_sum		)
+		__field( u32,		util_sum		)
+		__field( u32,		period_contrib		)
+		__field( u64,		last_update_time	)
+		__field( unsigned long,	runnable_load_avg	)
+		__field( u64,		runnable_load_sum	)
+	),
+
+	TP_fast_assign(
+#ifdef CONFIG_FAIR_GROUP_SCHED
+		__entry->cpu			= cfs_rq->rq->cpu;
+		__entry->id			= cfs_rq->tg->css.id;
+#else
+		__entry->cpu			= (container_of(cfs_rq, struct rq, cfs))->cpu;
+		__entry->id			= -1;
+#endif
+		__entry->load_avg		= cfs_rq->avg.load_avg;
+		__entry->util_avg		= cfs_rq->avg.util_avg;
+		__entry->load_sum		= cfs_rq->avg.load_sum;
+		__entry->util_sum		= cfs_rq->avg.util_sum;
+		__entry->period_contrib		= cfs_rq->avg.period_contrib;
+		__entry->last_update_time	= cfs_rq->avg.last_update_time;
+		__entry->runnable_load_avg	= cfs_rq->runnable_load_avg;
+		__entry->runnable_load_sum	= cfs_rq->runnable_load_sum;
+	),
+
+	TP_printk("cpu=%d tg_css_id=%d load_avg=%lu util_avg=%lu"
+		  " load_sum=%llu util_sum=%u period_contrib=%u last_update_time=%llu"
+		  " runnable_load_avg=%lu runnable_load_sum=%llu",
+		  __entry->cpu, __entry->id, __entry->load_avg,
+		  __entry->util_avg, __entry->load_sum, __entry->util_sum,
+		  __entry->period_contrib, __entry->last_update_time,
+		  __entry->runnable_load_avg, __entry->runnable_load_sum)
+);
+#endif /* CONFIG_SMP */
 #endif /* _TRACE_SCHED_H */
 
 /* This part must be outside protection */
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index bc347af..03b4666 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2931,6 +2931,9 @@ static u32 __compute_runnable_contrib(u64 n)
 		sa->util_avg = sa->util_sum / LOAD_AVG_MAX;
 	}
 
+	if (cfs_rq)
+		trace_sched_pelt_cfs_rq(cfs_rq);
+
 	return decayed;
 }
 
@@ -3310,6 +3313,8 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
 	set_tg_cfs_propagate(cfs_rq);
 
 	cfs_rq_util_change(cfs_rq);
+
+	trace_sched_pelt_cfs_rq(cfs_rq);
 }
 
 /**
@@ -3330,6 +3335,8 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
 	set_tg_cfs_propagate(cfs_rq);
 
 	cfs_rq_util_change(cfs_rq);
+
+	trace_sched_pelt_cfs_rq(cfs_rq);
 }
 
 /* Add the load generated by se into cfs_rq's load average */
-- 
1.9.1