Home
last modified time | relevance | path

Searched refs:cfs_rq (Results 1 – 8 of 8) sorted by relevance

/linux/kernel/sched/
H A Dfair.c4163 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in clear_tg_offline_cfs_rqs() local
4483 struct cfs_rq *cfs_rq; in migrate_se_pelt_lag() local
5718 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_unthrottle_up() local
5747 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_throttle_down() local
6490 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in update_runtime_enabled() local
6515 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in unthrottle_offline_cfs_rqs() local
6750 struct cfs_rq *cfs_rq; in enqueue_task_fair() local
6843 struct cfs_rq *cfs_rq; in dequeue_task_fair() local
6951 struct cfs_rq *cfs_rq; in cpu_load_without() local
6974 struct cfs_rq *cfs_rq; in cpu_runnable_without() local
[all …]
H A Dpelt.h5 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
6 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
156 static inline void update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) in update_idle_cfs_rq_clock_pelt() argument
169 static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) in cfs_rq_clock_pelt() argument
172 return cfs_rq->throttled_clock_pelt - cfs_rq->throttled_clock_pelt_time; in cfs_rq_clock_pelt()
174 return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_pelt_time; in cfs_rq_clock_pelt()
177 static inline void update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) { } in update_idle_cfs_rq_clock_pelt() argument
178 static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) in cfs_rq_clock_pelt() argument
180 return rq_clock_pelt(rq_of(cfs_rq)); in cfs_rq_clock_pelt()
187 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) in update_cfs_rq_load_avg() argument
[all …]
H A Dpelt.c306 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se) in __update_load_avg_se() argument
309 cfs_rq->curr == se)) { in __update_load_avg_se()
320 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq) in __update_load_avg_cfs_rq() argument
322 if (___update_load_sum(now, &cfs_rq->avg, in __update_load_avg_cfs_rq()
323 scale_load_down(cfs_rq->load.weight), in __update_load_avg_cfs_rq()
324 cfs_rq->h_nr_running, in __update_load_avg_cfs_rq()
325 cfs_rq->curr != NULL)) { in __update_load_avg_cfs_rq()
327 ___update_load_avg(&cfs_rq->avg, 1); in __update_load_avg_cfs_rq()
328 trace_pelt_cfs_tp(cfs_rq); in __update_load_avg_cfs_rq()
H A Ddebug.c629 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) in print_cfs_rq() argument
672 cfs_rq->nr_spread_over); in print_cfs_rq()
676 cfs_rq->idle_nr_running); in print_cfs_rq()
682 cfs_rq->avg.load_avg); in print_cfs_rq()
684 cfs_rq->avg.runnable_avg); in print_cfs_rq()
686 cfs_rq->avg.util_avg); in print_cfs_rq()
688 cfs_rq->avg.util_est); in print_cfs_rq()
690 cfs_rq->removed.load_avg); in print_cfs_rq()
692 cfs_rq->removed.util_avg); in print_cfs_rq()
704 cfs_rq->throttled); in print_cfs_rq()
[all …]
H A Dsched.h361 struct cfs_rq;
400 struct cfs_rq **cfs_rq; member
502 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
529 struct cfs_rq *prev, struct cfs_rq *next);
532 struct cfs_rq *prev, struct cfs_rq *next) { } in set_task_rq_fair()
1208 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of() argument
1215 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of() argument
2037 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]); in set_task_rq()
2038 p->se.cfs_rq = tg->cfs_rq[cpu]; in set_task_rq()
2895 extern void init_cfs_rq(struct cfs_rq *cfs_rq);
[all …]
H A Dcore.c4540 p->se.cfs_rq = NULL; in __sched_fork()
5560 struct sched_entity *curr = (&p->se)->cfs_rq->curr; in prefetch_curr_exec_start()
9949 root_task_group.cfs_rq = (struct cfs_rq **)ptr; in sched_init()
10880 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; in tg_set_cfs_bandwidth() local
10881 struct rq *rq = cfs_rq->rq; in tg_set_cfs_bandwidth()
10884 cfs_rq->runtime_enabled = runtime_enabled; in tg_set_cfs_bandwidth()
10885 cfs_rq->runtime_remaining = 0; in tg_set_cfs_bandwidth()
10887 if (cfs_rq->throttled) in tg_set_cfs_bandwidth()
10888 unthrottle_cfs_rq(cfs_rq); in tg_set_cfs_bandwidth()
11128 total += READ_ONCE(tg->cfs_rq[i]->throttled_clock_self_time); in throttled_time_self()
/linux/include/trace/events/
H A Dsched.h779 TP_PROTO(struct cfs_rq *cfs_rq),
780 TP_ARGS(cfs_rq));
811 TP_PROTO(struct cfs_rq *cfs_rq),
812 TP_ARGS(cfs_rq));
/linux/include/linux/
H A Dsched.h57 struct cfs_rq;
559 struct cfs_rq *cfs_rq; member
561 struct cfs_rq *my_q;