From b43253276571111c9504d63feec14c9aed0a49ea Mon Sep 17 00:00:00 2001 From: Stephen Dickey Date: Tue, 2 Nov 2021 19:19:58 +0530 Subject: [PATCH] sched/walt: Improve the scheduler Improve core_ctl tracing such that the necessary flags for debugging eval_need are present in the trace, and so that all returns go through the same tracepoint, unlocking, and return functionality. Change-Id: I61d5ab86ba7650bea77e4416b0ffa9a07869bbf3 Signed-off-by: Stephen Dickey Signed-off-by: Tengfei Fan Signed-off-by: UtsavBalar1231 --- include/trace/events/sched.h | 34 +++++++++++++++++++++++----------- kernel/sched/core_ctl.c | 9 ++++++--- 2 files changed, 29 insertions(+), 14 deletions(-) diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index c8a3662da638..b0bc47e9454d 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -1425,23 +1425,35 @@ TRACE_EVENT(sched_boost_cpu, TRACE_EVENT(core_ctl_eval_need, - TP_PROTO(unsigned int cpu, unsigned int old_need, - unsigned int new_need, unsigned int updated), - TP_ARGS(cpu, old_need, new_need, updated), + TP_PROTO(unsigned int cpu, unsigned int last_need, + unsigned int new_need, unsigned int active_cpus, + unsigned int ret, unsigned int need_flag, + unsigned int updated, s64 need_ts), + TP_ARGS(cpu, last_need, new_need, active_cpus, ret, need_flag, updated, need_ts), TP_STRUCT__entry( __field(u32, cpu) - __field(u32, old_need) + __field(u32, last_need) __field(u32, new_need) + __field(u32, active_cpus) + __field(u32, ret) + __field(u32, need_flag) __field(u32, updated) + __field(s64, need_ts) ), TP_fast_assign( - __entry->cpu = cpu; - __entry->old_need = old_need; - __entry->new_need = new_need; - __entry->updated = updated; - ), - TP_printk("cpu=%u, old_need=%u, new_need=%u, updated=%u", __entry->cpu, - __entry->old_need, __entry->new_need, __entry->updated) + __entry->cpu = cpu; + __entry->last_need = last_need; + __entry->new_need = new_need; + __entry->active_cpus = active_cpus; + __entry->ret = ret; + __entry->need_flag = need_flag; + __entry->updated = updated; + __entry->need_ts = need_ts; + ), + TP_printk("cpu=%u last_need=%u new_need=%u active_cpus=%u ret=%u need_flag=%u updated=%u need_ts=%llu", + __entry->cpu, __entry->last_need, __entry->new_need, + __entry->active_cpus, __entry->ret, __entry->need_flag, + __entry->updated, __entry->need_ts) ); TRACE_EVENT(core_ctl_set_busy, diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c index dc65337d2dbf..602ec6edbc4e 100644 --- a/kernel/sched/core_ctl.c +++ b/kernel/sched/core_ctl.c @@ -823,8 +823,8 @@ static bool eval_need(struct cluster_data *cluster) */ if (new_need == last_need && new_need == cluster->active_cpus) { cluster->need_ts = now; - spin_unlock_irqrestore(&state_lock, flags); - return 0; + ret = 0; + goto unlock; } elapsed = now - cluster->need_ts; @@ -835,8 +835,11 @@ static bool eval_need(struct cluster_data *cluster) cluster->need_ts = now; cluster->need_cpus = new_need; } + +unlock: trace_core_ctl_eval_need(cluster->first_cpu, last_need, new_need, - ret && need_flag); + cluster->active_cpus, ret, need_flag, + ret && need_flag, cluster->need_ts); spin_unlock_irqrestore(&state_lock, flags); return ret && need_flag;