From 779ac30678caa159a9ee3d1c4007e20002ee887f Mon Sep 17 00:00:00 2001 From: Stephen Dickey Date: Tue, 2 Nov 2021 19:21:02 +0530 Subject: [PATCH] sched/walt: eval_need code and trace cleanup Improve the naming and types of variables in eval_need, reduce computation and adjust the tracepoint to reflect the new names of variables. Change-Id: Ifc19bc802bbc8382577b0e4ed4b4c9be77ffe99e Signed-off-by: Stephen Dickey Signed-off-by: Tengfei Fan Signed-off-by: UtsavBalar1231 --- include/trace/events/sched.h | 16 ++++++++-------- kernel/sched/core_ctl.c | 22 +++++++++++----------- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index b0bc47e9454d..50922405ef81 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -1427,16 +1427,16 @@ TRACE_EVENT(core_ctl_eval_need, TP_PROTO(unsigned int cpu, unsigned int last_need, unsigned int new_need, unsigned int active_cpus, - unsigned int ret, unsigned int need_flag, + unsigned int adj_now, unsigned int adj_possible, unsigned int updated, s64 need_ts), - TP_ARGS(cpu, last_need, new_need, active_cpus, ret, need_flag, updated, need_ts), + TP_ARGS(cpu, last_need, new_need, active_cpus, adj_now, adj_possible, updated, need_ts), TP_STRUCT__entry( __field(u32, cpu) __field(u32, last_need) __field(u32, new_need) __field(u32, active_cpus) - __field(u32, ret) - __field(u32, need_flag) + __field(u32, adj_now) + __field(u32, adj_possible) __field(u32, updated) __field(s64, need_ts) ), @@ -1445,14 +1445,14 @@ TRACE_EVENT(core_ctl_eval_need, __entry->last_need = last_need; __entry->new_need = new_need; __entry->active_cpus = active_cpus; - __entry->ret = ret; - __entry->need_flag = need_flag; + __entry->adj_now = adj_now; + __entry->adj_possible = adj_possible; __entry->updated = updated; __entry->need_ts = need_ts; ), - TP_printk("cpu=%u last_need=%u new_need=%u active_cpus=%u ret=%u need_flag=%u updated=%u need_ts=%llu", + TP_printk("cpu=%u last_need=%u new_need=%u active_cpus=%u adj_now=%u adj_possible=%u updated=%u need_ts=%llu", __entry->cpu, __entry->last_need, __entry->new_need, - __entry->active_cpus, __entry->ret, __entry->need_flag, + __entry->active_cpus, __entry->adj_now, __entry->adj_possible, __entry->updated, __entry->need_ts) ); diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c index 602ec6edbc4e..409ca972bc9b 100644 --- a/kernel/sched/core_ctl.c +++ b/kernel/sched/core_ctl.c @@ -777,8 +777,8 @@ static bool eval_need(struct cluster_data *cluster) unsigned long flags; struct cpu_data *c; unsigned int need_cpus = 0, last_need, thres_idx; - int ret = 0; - bool need_flag = false; + bool adj_now = false; + bool adj_possible = false; unsigned int new_need; s64 now, elapsed; @@ -808,13 +808,12 @@ static bool eval_need(struct cluster_data *cluster) need_cpus = apply_task_need(cluster, need_cpus); } new_need = apply_limits(cluster, need_cpus); - need_flag = adjustment_possible(cluster, new_need); last_need = cluster->need_cpus; now = ktime_to_ms(ktime_get()); if (new_need > cluster->active_cpus) { - ret = 1; + adj_now = true; } else { /* * When there is no change in need and there are no more @@ -823,26 +822,27 @@ static bool eval_need(struct cluster_data *cluster) */ if (new_need == last_need && new_need == cluster->active_cpus) { cluster->need_ts = now; - ret = 0; + adj_now = false; goto unlock; } - elapsed = now - cluster->need_ts; - ret = elapsed >= cluster->offline_delay_ms; + elapsed = now - cluster->need_ts; + adj_now = elapsed >= cluster->offline_delay_ms; } - if (ret) { + if (adj_now) { + adj_possible = adjustment_possible(cluster, new_need); cluster->need_ts = now; cluster->need_cpus = new_need; } unlock: trace_core_ctl_eval_need(cluster->first_cpu, last_need, new_need, - cluster->active_cpus, ret, need_flag, - ret && need_flag, cluster->need_ts); + cluster->active_cpus, adj_now, adj_possible, + adj_now && adj_possible, cluster->need_ts); spin_unlock_irqrestore(&state_lock, flags); - return ret && need_flag; + return adj_now && adj_possible; } static void apply_need(struct cluster_data *cluster)