Home
last modified time | relevance | path

Searched refs:this_rq (Results 1 – 25 of 27) sorted by relevance

12

/dports/multimedia/v4l_compat/linux-5.13-rc2/kernel/sched/
H A Dloadavg.c83 nr_active = this_rq->nr_running - adjust; in calc_load_fold_active()
84 nr_active += (long)this_rq->nr_uninterruptible; in calc_load_fold_active()
86 if (nr_active != this_rq->calc_load_active) { in calc_load_fold_active()
87 delta = nr_active - this_rq->calc_load_active; in calc_load_fold_active()
88 this_rq->calc_load_active = nr_active; in calc_load_fold_active()
252 calc_load_nohz_fold(this_rq()); in calc_load_nohz_start()
266 struct rq *this_rq = this_rq(); in calc_load_nohz_stop() local
281 this_rq->calc_load_update += LOAD_FREQ; in calc_load_nohz_stop()
386 void calc_global_load_tick(struct rq *this_rq) in calc_global_load_tick() argument
393 delta = calc_load_fold_active(this_rq, 0); in calc_global_load_tick()
[all …]
H A Dsched.h1377 rq = this_rq(); in this_rq_lock_irq()
2198 __releases(this_rq->lock) in _double_lock_balance()
2200 __acquires(this_rq->lock) in _double_lock_balance()
2202 raw_spin_unlock(&this_rq->lock); in _double_lock_balance()
2203 double_rq_lock(this_rq, busiest); in _double_lock_balance()
2217 __releases(this_rq->lock) in _double_lock_balance()
2219 __acquires(this_rq->lock) in _double_lock_balance()
2224 if (busiest < this_rq) { in _double_lock_balance()
2225 raw_spin_unlock(&this_rq->lock); in _double_lock_balance()
2227 raw_spin_lock_nested(&this_rq->lock, in _double_lock_balance()
[all …]
H A Drt.c585 return this_rq()->rd->span; in sched_rt_period_mask()
2118 rq = this_rq(); in rto_push_irq_work_func()
2150 int this_cpu = this_rq->cpu, cpu; in pull_rt_task()
2167 cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask)) in pull_rt_task()
2172 tell_cpu_to_push(this_rq); in pull_rt_task()
2191 this_rq->rt.highest_prio.curr) in pull_rt_task()
2200 double_lock_balance(this_rq, src_rq); in pull_rt_task()
2232 activate_task(this_rq, p, 0); in pull_rt_task()
2246 raw_spin_unlock(&this_rq->lock); in pull_rt_task()
2249 raw_spin_lock(&this_rq->lock); in pull_rt_task()
[all …]
H A Dfair.c9621 .dst_rq = this_rq, in load_balance()
10418 int this_cpu = this_rq->cpu; in _nohz_idle_balance()
10541 int this_cpu = this_rq->cpu; in nohz_newidle_balance()
10589 int this_cpu = this_rq->cpu; in newidle_balance()
10599 this_rq->idle_stamp = rq_clock(this_rq); in newidle_balance()
10613 rq_unpin_lock(this_rq, rf); in newidle_balance()
10679 if (this_rq->nr_running != this_rq->cfs.h_nr_running) in newidle_balance()
10688 this_rq->idle_stamp = 0; in newidle_balance()
10692 rq_repin_lock(this_rq, rf); in newidle_balance()
10703 struct rq *this_rq = this_rq(); in run_rebalance_domains() local
[all …]
H A Ddeadline.c2215 int this_cpu = this_rq->cpu, cpu; in pull_dl_task()
2221 if (likely(!dl_overloaded(this_rq))) in pull_dl_task()
2240 if (this_rq->dl.dl_nr_running && in pull_dl_task()
2247 double_lock_balance(this_rq, src_rq); in pull_dl_task()
2264 (!this_rq->dl.dl_nr_running || in pull_dl_task()
2266 this_rq->dl.earliest_dl.curr))) { in pull_dl_task()
2283 activate_task(this_rq, p, 0); in pull_dl_task()
2291 double_unlock_balance(this_rq, src_rq); in pull_dl_task()
2294 raw_spin_unlock(&this_rq->lock); in pull_dl_task()
2297 raw_spin_lock(&this_rq->lock); in pull_dl_task()
[all …]
H A Dcputime.c222 struct rq *rq = this_rq(); in account_idle_time()
242 steal -= this_rq()->prev_steal_time; in steal_account_process_time()
245 this_rq()->prev_steal_time += steal; in steal_account_process_time()
385 } else if (p == this_rq()->idle) { in irqtime_account_process_tick()
491 else if ((p != this_rq()->idle) || (irq_count() != HARDIRQ_OFFSET)) in account_process_tick()
H A Dmembarrier.c234 struct rq *rq = this_rq(); in membarrier_update_current_mm()
H A Didle.c22 idle_set_state(this_rq(), idle_state); in sched_idle_set_state()
H A Dcore.c403 if (rq == this_rq()) in hrtick_start()
1762 this_rq()->nr_pinned++; in migrate_disable()
1791 this_rq()->nr_pinned--; in migrate_enable()
1920 struct rq *rq = this_rq(); in migration_cpu_stop()
2921 rq = this_rq(); in ttwu_stat()
3059 struct rq *rq = this_rq(); in sched_ttwu_pending()
4173 struct rq *rq = this_rq(); in finish_task_switch()
7127 rq = this_rq(); in yield_to()
7602 BUG_ON(current != this_rq()->idle); in idle_task_exit()
7615 struct rq *rq = this_rq(); in __balance_push_cpu_stop()
[all …]
/dports/multimedia/libv4l/linux-5.13-rc2/kernel/sched/
H A Dloadavg.c83 nr_active = this_rq->nr_running - adjust; in calc_load_fold_active()
84 nr_active += (long)this_rq->nr_uninterruptible; in calc_load_fold_active()
86 if (nr_active != this_rq->calc_load_active) { in calc_load_fold_active()
87 delta = nr_active - this_rq->calc_load_active; in calc_load_fold_active()
88 this_rq->calc_load_active = nr_active; in calc_load_fold_active()
252 calc_load_nohz_fold(this_rq()); in calc_load_nohz_start()
266 struct rq *this_rq = this_rq(); in calc_load_nohz_stop() local
281 this_rq->calc_load_update += LOAD_FREQ; in calc_load_nohz_stop()
386 void calc_global_load_tick(struct rq *this_rq) in calc_global_load_tick() argument
393 delta = calc_load_fold_active(this_rq, 0); in calc_global_load_tick()
[all …]
H A Dsched.h1377 rq = this_rq(); in this_rq_lock_irq()
2198 __releases(this_rq->lock) in _double_lock_balance()
2200 __acquires(this_rq->lock) in _double_lock_balance()
2202 raw_spin_unlock(&this_rq->lock); in _double_lock_balance()
2203 double_rq_lock(this_rq, busiest); in _double_lock_balance()
2217 __releases(this_rq->lock) in _double_lock_balance()
2219 __acquires(this_rq->lock) in _double_lock_balance()
2224 if (busiest < this_rq) { in _double_lock_balance()
2225 raw_spin_unlock(&this_rq->lock); in _double_lock_balance()
2227 raw_spin_lock_nested(&this_rq->lock, in _double_lock_balance()
[all …]
H A Drt.c585 return this_rq()->rd->span; in sched_rt_period_mask()
2118 rq = this_rq(); in rto_push_irq_work_func()
2150 int this_cpu = this_rq->cpu, cpu; in pull_rt_task()
2167 cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask)) in pull_rt_task()
2172 tell_cpu_to_push(this_rq); in pull_rt_task()
2191 this_rq->rt.highest_prio.curr) in pull_rt_task()
2200 double_lock_balance(this_rq, src_rq); in pull_rt_task()
2232 activate_task(this_rq, p, 0); in pull_rt_task()
2246 raw_spin_unlock(&this_rq->lock); in pull_rt_task()
2249 raw_spin_lock(&this_rq->lock); in pull_rt_task()
[all …]
H A Dfair.c9621 .dst_rq = this_rq, in load_balance()
10418 int this_cpu = this_rq->cpu; in _nohz_idle_balance()
10541 int this_cpu = this_rq->cpu; in nohz_newidle_balance()
10589 int this_cpu = this_rq->cpu; in newidle_balance()
10599 this_rq->idle_stamp = rq_clock(this_rq); in newidle_balance()
10613 rq_unpin_lock(this_rq, rf); in newidle_balance()
10679 if (this_rq->nr_running != this_rq->cfs.h_nr_running) in newidle_balance()
10688 this_rq->idle_stamp = 0; in newidle_balance()
10692 rq_repin_lock(this_rq, rf); in newidle_balance()
10703 struct rq *this_rq = this_rq(); in run_rebalance_domains() local
[all …]
H A Ddeadline.c2215 int this_cpu = this_rq->cpu, cpu; in pull_dl_task()
2221 if (likely(!dl_overloaded(this_rq))) in pull_dl_task()
2240 if (this_rq->dl.dl_nr_running && in pull_dl_task()
2247 double_lock_balance(this_rq, src_rq); in pull_dl_task()
2264 (!this_rq->dl.dl_nr_running || in pull_dl_task()
2266 this_rq->dl.earliest_dl.curr))) { in pull_dl_task()
2283 activate_task(this_rq, p, 0); in pull_dl_task()
2291 double_unlock_balance(this_rq, src_rq); in pull_dl_task()
2294 raw_spin_unlock(&this_rq->lock); in pull_dl_task()
2297 raw_spin_lock(&this_rq->lock); in pull_dl_task()
[all …]
H A Dcputime.c222 struct rq *rq = this_rq(); in account_idle_time()
242 steal -= this_rq()->prev_steal_time; in steal_account_process_time()
245 this_rq()->prev_steal_time += steal; in steal_account_process_time()
385 } else if (p == this_rq()->idle) { in irqtime_account_process_tick()
491 else if ((p != this_rq()->idle) || (irq_count() != HARDIRQ_OFFSET)) in account_process_tick()
H A Dmembarrier.c234 struct rq *rq = this_rq(); in membarrier_update_current_mm()
H A Didle.c22 idle_set_state(this_rq(), idle_state); in sched_idle_set_state()
/dports/multimedia/v4l-utils/linux-5.13-rc2/kernel/sched/
H A Dloadavg.c83 nr_active = this_rq->nr_running - adjust; in calc_load_fold_active()
84 nr_active += (long)this_rq->nr_uninterruptible; in calc_load_fold_active()
86 if (nr_active != this_rq->calc_load_active) { in calc_load_fold_active()
87 delta = nr_active - this_rq->calc_load_active; in calc_load_fold_active()
88 this_rq->calc_load_active = nr_active; in calc_load_fold_active()
252 calc_load_nohz_fold(this_rq()); in calc_load_nohz_start()
266 struct rq *this_rq = this_rq(); in calc_load_nohz_stop() local
281 this_rq->calc_load_update += LOAD_FREQ; in calc_load_nohz_stop()
386 void calc_global_load_tick(struct rq *this_rq) in calc_global_load_tick() argument
393 delta = calc_load_fold_active(this_rq, 0); in calc_global_load_tick()
[all …]
H A Dsched.h1377 rq = this_rq(); in this_rq_lock_irq()
2198 __releases(this_rq->lock) in _double_lock_balance()
2200 __acquires(this_rq->lock) in _double_lock_balance()
2202 raw_spin_unlock(&this_rq->lock); in _double_lock_balance()
2203 double_rq_lock(this_rq, busiest); in _double_lock_balance()
2217 __releases(this_rq->lock) in _double_lock_balance()
2219 __acquires(this_rq->lock) in _double_lock_balance()
2224 if (busiest < this_rq) { in _double_lock_balance()
2225 raw_spin_unlock(&this_rq->lock); in _double_lock_balance()
2227 raw_spin_lock_nested(&this_rq->lock, in _double_lock_balance()
[all …]
H A Drt.c585 return this_rq()->rd->span; in sched_rt_period_mask()
2118 rq = this_rq(); in rto_push_irq_work_func()
2150 int this_cpu = this_rq->cpu, cpu; in pull_rt_task()
2167 cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask)) in pull_rt_task()
2172 tell_cpu_to_push(this_rq); in pull_rt_task()
2191 this_rq->rt.highest_prio.curr) in pull_rt_task()
2200 double_lock_balance(this_rq, src_rq); in pull_rt_task()
2232 activate_task(this_rq, p, 0); in pull_rt_task()
2246 raw_spin_unlock(&this_rq->lock); in pull_rt_task()
2249 raw_spin_lock(&this_rq->lock); in pull_rt_task()
[all …]
H A Dfair.c9621 .dst_rq = this_rq, in load_balance()
10418 int this_cpu = this_rq->cpu; in _nohz_idle_balance()
10541 int this_cpu = this_rq->cpu; in nohz_newidle_balance()
10589 int this_cpu = this_rq->cpu; in newidle_balance()
10599 this_rq->idle_stamp = rq_clock(this_rq); in newidle_balance()
10613 rq_unpin_lock(this_rq, rf); in newidle_balance()
10679 if (this_rq->nr_running != this_rq->cfs.h_nr_running) in newidle_balance()
10688 this_rq->idle_stamp = 0; in newidle_balance()
10692 rq_repin_lock(this_rq, rf); in newidle_balance()
10703 struct rq *this_rq = this_rq(); in run_rebalance_domains() local
[all …]
H A Ddeadline.c2215 int this_cpu = this_rq->cpu, cpu; in pull_dl_task()
2221 if (likely(!dl_overloaded(this_rq))) in pull_dl_task()
2240 if (this_rq->dl.dl_nr_running && in pull_dl_task()
2247 double_lock_balance(this_rq, src_rq); in pull_dl_task()
2264 (!this_rq->dl.dl_nr_running || in pull_dl_task()
2266 this_rq->dl.earliest_dl.curr))) { in pull_dl_task()
2283 activate_task(this_rq, p, 0); in pull_dl_task()
2291 double_unlock_balance(this_rq, src_rq); in pull_dl_task()
2294 raw_spin_unlock(&this_rq->lock); in pull_dl_task()
2297 raw_spin_lock(&this_rq->lock); in pull_dl_task()
[all …]
H A Dcputime.c222 struct rq *rq = this_rq(); in account_idle_time()
242 steal -= this_rq()->prev_steal_time; in steal_account_process_time()
245 this_rq()->prev_steal_time += steal; in steal_account_process_time()
385 } else if (p == this_rq()->idle) { in irqtime_account_process_tick()
491 else if ((p != this_rq()->idle) || (irq_count() != HARDIRQ_OFFSET)) in account_process_tick()
H A Didle.c22 idle_set_state(this_rq(), idle_state); in sched_idle_set_state()
H A Dmembarrier.c234 struct rq *rq = this_rq(); in membarrier_update_current_mm()

12