summaryrefslogtreecommitdiffstats
path: root/meta-eas/recipes-kernel/linux/linux-renesas/0076-sched-events-Introduce-cfs_rq-PELT-trace-event-sched.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-eas/recipes-kernel/linux/linux-renesas/0076-sched-events-Introduce-cfs_rq-PELT-trace-event-sched.patch')
-rw-r--r--meta-eas/recipes-kernel/linux/linux-renesas/0076-sched-events-Introduce-cfs_rq-PELT-trace-event-sched.patch121
1 files changed, 121 insertions, 0 deletions
diff --git a/meta-eas/recipes-kernel/linux/linux-renesas/0076-sched-events-Introduce-cfs_rq-PELT-trace-event-sched.patch b/meta-eas/recipes-kernel/linux/linux-renesas/0076-sched-events-Introduce-cfs_rq-PELT-trace-event-sched.patch
new file mode 100644
index 0000000..950e7a9
--- /dev/null
+++ b/meta-eas/recipes-kernel/linux/linux-renesas/0076-sched-events-Introduce-cfs_rq-PELT-trace-event-sched.patch
@@ -0,0 +1,121 @@
+From 1f93576e19b38a0df4203faaf07b656e5f24b6f8 Mon Sep 17 00:00:00 2001
+From: Dietmar Eggemann <dietmar.eggemann@arm.com>
+Date: Mon, 9 Nov 2015 12:07:27 +0000
+Subject: [PATCH 76/92] sched/events: Introduce cfs_rq PELT trace event
+ sched_pelt_cfs_rq()
+
+The trace event is only defined if symmetric multi-processing
+(CONFIG_SMP) is enabled.
+To let this trace event work for configurations with and without group
+scheduling support for SCHED_OTHER (CONFIG_FAIR_GROUP_SCHED) the
+following special handling is necessary for non-existent key=value
+pairs:
+
+id = -1 : In case of !CONFIG_FAIR_GROUP_SCHED the task group css id is
+ set to -1.
+
+Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
+(cherry picked from commit 1c61904cbc5998b19dc8c04bb0cfc99f9a8ec9db)
+Signed-off-by: Gaku Inami <gaku.inami.xw@bp.renesas.com>
+---
+ include/trace/events/sched.h | 51 ++++++++++++++++++++++++++++++++++++++++++++
+ kernel/sched/fair.c | 7 ++++++
+ 2 files changed, 58 insertions(+)
+
+diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
+index 9b90c57..c9c3348 100644
+--- a/include/trace/events/sched.h
++++ b/include/trace/events/sched.h
+@@ -562,6 +562,57 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct *
+
+ TP_printk("cpu=%d", __entry->cpu)
+ );
++
++#ifdef CONFIG_SMP
++/*
++ * Tracepoint for cfs_rq Per Entity Load Tracking (PELT).
++ */
++TRACE_EVENT(sched_pelt_cfs_rq,
++
++ TP_PROTO(struct cfs_rq *cfs_rq),
++
++ TP_ARGS(cfs_rq),
++
++ TP_STRUCT__entry(
++ __field( int, cpu )
++ __field( int, id )
++ __field( unsigned long, load_avg )
++ __field( unsigned long, util_avg )
++ __field( u64, load_sum )
++ __field( u32, util_sum )
++ __field( u32, period_contrib )
++ __field( u64, last_update_time )
++ __field( unsigned long, runnable_load_avg )
++ __field( u64, runnable_load_sum )
++ ),
++
++ TP_fast_assign(
++#ifdef CONFIG_FAIR_GROUP_SCHED
++ __entry->cpu = cfs_rq->rq->cpu;
++ __entry->id = cfs_rq->tg->css.id;
++#else
++ __entry->cpu = (container_of(cfs_rq, struct rq, cfs))->cpu;
++ __entry->id = -1;
++#endif
++ __entry->load_avg = cfs_rq->avg.load_avg;
++ __entry->util_avg = cfs_rq->avg.util_avg;
++ __entry->load_sum = cfs_rq->avg.load_sum;
++ __entry->util_sum = cfs_rq->avg.util_sum;
++ __entry->period_contrib = cfs_rq->avg.period_contrib;
++ __entry->last_update_time = cfs_rq->avg.last_update_time;
++ __entry->runnable_load_avg = cfs_rq->runnable_load_avg;
++ __entry->runnable_load_sum = cfs_rq->runnable_load_sum;
++ ),
++
++ TP_printk("cpu=%d tg_css_id=%d load_avg=%lu util_avg=%lu"
++ " load_sum=%llu util_sum=%u period_contrib=%u last_update_time=%llu"
++ " runnable_load_avg=%lu runnable_load_sum=%llu",
++ __entry->cpu, __entry->id, __entry->load_avg,
++ __entry->util_avg, __entry->load_sum, __entry->util_sum,
++ __entry->period_contrib, __entry->last_update_time,
++ __entry->runnable_load_avg, __entry->runnable_load_sum)
++);
++#endif /* CONFIG_SMP */
+ #endif /* _TRACE_SCHED_H */
+
+ /* This part must be outside protection */
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index bc347af..03b4666 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -2931,6 +2931,9 @@ static u32 __compute_runnable_contrib(u64 n)
+ sa->util_avg = sa->util_sum / LOAD_AVG_MAX;
+ }
+
++ if (cfs_rq)
++ trace_sched_pelt_cfs_rq(cfs_rq);
++
+ return decayed;
+ }
+
+@@ -3310,6 +3313,8 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
+ set_tg_cfs_propagate(cfs_rq);
+
+ cfs_rq_util_change(cfs_rq);
++
++ trace_sched_pelt_cfs_rq(cfs_rq);
+ }
+
+ /**
+@@ -3330,6 +3335,8 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
+ set_tg_cfs_propagate(cfs_rq);
+
+ cfs_rq_util_change(cfs_rq);
++
++ trace_sched_pelt_cfs_rq(cfs_rq);
+ }
+
+ /* Add the load generated by se into cfs_rq's load average */
+--
+1.9.1
+