diff options
Diffstat (limited to 'meta-eas/recipes-kernel/linux/linux-renesas/0077-sched-events-Introduce-sched-entity-PELT-trace-event.patch')
-rw-r--r-- | meta-eas/recipes-kernel/linux/linux-renesas/0077-sched-events-Introduce-sched-entity-PELT-trace-event.patch | 146 |
1 files changed, 146 insertions, 0 deletions
diff --git a/meta-eas/recipes-kernel/linux/linux-renesas/0077-sched-events-Introduce-sched-entity-PELT-trace-event.patch b/meta-eas/recipes-kernel/linux/linux-renesas/0077-sched-events-Introduce-sched-entity-PELT-trace-event.patch new file mode 100644 index 0000000..5e7d128 --- /dev/null +++ b/meta-eas/recipes-kernel/linux/linux-renesas/0077-sched-events-Introduce-sched-entity-PELT-trace-event.patch @@ -0,0 +1,146 @@ +From f815820c55e552529885b5c541be9e2e3000c198 Mon Sep 17 00:00:00 2001 +From: Dietmar Eggemann <dietmar.eggemann@arm.com> +Date: Mon, 25 Apr 2016 00:48:09 +0100 +Subject: [PATCH 77/92] sched/events: Introduce sched entity PELT trace event + sched_pelt_se() + +The trace event is only defined if symmetric multi-processing +(CONFIG_SMP) is enabled. +To let this trace event work for configurations with and without group +scheduling support for SCHED_OTHER (CONFIG_FAIR_GROUP_SCHED) the +following special handling is necessary for non-existent key=value +pairs: + +id = -1 : In case sched entity is representing a task + : In case CONFIG_FAIR_GROUP_SCHED is not set + +pid = -1 : In case sched entity is representing a task group + +comm = "n/a" : In case sched entity is representing a task group + +Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com> +(cherry picked from commit 31f5aeb31289ec28c5831b62a2176da50b3c7fa4) +Signed-off-by: Gaku Inami <gaku.inami.xw@bp.renesas.com> +--- + include/trace/events/sched.h | 54 ++++++++++++++++++++++++++++++++++++++++++++ + kernel/sched/fair.c | 9 ++++++-- + 2 files changed, 61 insertions(+), 2 deletions(-) + +diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h +index c9c3348..58ba3d5 100644 +--- a/include/trace/events/sched.h ++++ b/include/trace/events/sched.h +@@ -565,6 +565,60 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct * + + #ifdef CONFIG_SMP + /* ++ * Tracepoint for sched entity Per Entity Load Tracking (PELT). ++ */ ++TRACE_EVENT(sched_pelt_se, ++ ++ TP_PROTO(struct sched_entity *se), ++ ++ TP_ARGS(se), ++ ++ TP_STRUCT__entry( ++ __field( int, cpu ) ++ __field( int, id ) ++ __array( char, comm, TASK_COMM_LEN ) ++ __field( pid_t, pid ) ++ __field( unsigned long, load_avg ) ++ __field( unsigned long, util_avg ) ++ __field( u64, load_sum ) ++ __field( u32, util_sum ) ++ __field( u32, period_contrib ) ++ __field( u64, last_update_time ) ++ ), ++ ++ TP_fast_assign( ++ struct task_struct *p = container_of(se, struct task_struct, se); ++ ++#ifdef CONFIG_FAIR_GROUP_SCHED ++ __entry->cpu = se->my_q ? cpu_of(se->cfs_rq->rq) : task_cpu(p); ++ __entry->id = se->my_q ? se->my_q->tg->css.id : -1; ++ memcpy(__entry->comm, se->my_q ? "n/a" : p->comm, ++ TASK_COMM_LEN); ++ __entry->pid = se->my_q ? -1 : p->pid; ++#else ++ __entry->cpu = task_cpu(p); ++ __entry->id = -1; ++ memcpy(__entry->comm, p->comm, TASK_COMM_LEN); ++ __entry->pid = p->pid; ++#endif ++ __entry->load_avg = se->avg.load_avg; ++ __entry->util_avg = se->avg.util_avg; ++ __entry->load_sum = se->avg.load_sum; ++ __entry->util_sum = se->avg.util_sum; ++ __entry->period_contrib = se->avg.period_contrib; ++ __entry->last_update_time = se->avg.last_update_time; ++ ), ++ ++ TP_printk("cpu=%d tg_css_id=%d comm=%s pid=%d load_avg=%lu util_avg=%lu" ++ " load_sum=%llu util_sum=%u period_contrib=%u" ++ " last_update_time=%llu", ++ __entry->cpu, __entry->id, __entry->comm, __entry->pid, ++ __entry->load_avg, __entry->util_avg, __entry->load_sum, ++ __entry->util_sum, __entry->period_contrib, ++ __entry->last_update_time) ++); ++ ++/* + * Tracepoint for cfs_rq Per Entity Load Tracking (PELT). + */ + TRACE_EVENT(sched_pelt_cfs_rq, +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index 03b4666..fffe7cb 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -2845,7 +2845,7 @@ static u32 __compute_runnable_contrib(u64 n) + */ + if ((s64)delta < 0) { + sa->last_update_time = now; +- return 0; ++ goto trace; + } + + /* +@@ -2854,7 +2854,7 @@ static u32 __compute_runnable_contrib(u64 n) + */ + delta >>= 10; + if (!delta) +- return 0; ++ goto trace; + sa->last_update_time = now; + + scale_freq = arch_scale_freq_capacity(NULL, cpu); +@@ -2931,8 +2931,11 @@ static u32 __compute_runnable_contrib(u64 n) + sa->util_avg = sa->util_sum / LOAD_AVG_MAX; + } + ++trace: + if (cfs_rq) + trace_sched_pelt_cfs_rq(cfs_rq); ++ else ++ trace_sched_pelt_se(container_of(sa, struct sched_entity, avg)); + + return decayed; + } +@@ -3315,6 +3318,7 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s + cfs_rq_util_change(cfs_rq); + + trace_sched_pelt_cfs_rq(cfs_rq); ++ trace_sched_pelt_se(se); + } + + /** +@@ -3337,6 +3341,7 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s + cfs_rq_util_change(cfs_rq); + + trace_sched_pelt_cfs_rq(cfs_rq); ++ trace_sched_pelt_se(se); + } + + /* Add the load generated by se into cfs_rq's load average */ +-- +1.9.1 + |