Home
last modified time | relevance | path

Searched refs:rq (Results 1 – 25 of 650) sorted by relevance

12345678910>>...26

/linux/drivers/gpu/drm/i915/
H A Di915_request.c383 rq->ring->head = rq->postfix; in i915_request_retire()
404 rq->engine->remove_active_request(rq); in i915_request_retire()
556 if (rq->infix == rq->postfix) in __i915_request_skip()
559 RQ_TRACE(rq, "error: %d\n", rq->fence.error); in __i915_request_skip()
567 rq->infix = rq->postfix; in __i915_request_skip()
596 rq = i915_request_get(rq); in i915_request_mark_eio()
1000 rq->head = rq->ring->emit; in __i915_request_create()
1002 ret = rq->engine->request_alloc(rq); in __i915_request_create()
1811 rq->postfix = intel_ring_offset(rq, cs); in __i915_request_commit()
2226 rq->fence.context, rq->fence.seqno, in i915_request_show()
[all …]
H A Di915_request.h64 #define RQ_TRACE(rq, fmt, ...) do { \ argument
414 dma_fence_put(&rq->fence); in i915_request_put()
509 seqno = __hwsp_seqno(rq); in hwsp_seqno()
517 return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno - 1); in __i915_request_has_started()
550 if (i915_request_signaled(rq)) in i915_request_started()
575 if (!i915_request_is_active(rq)) in i915_request_is_running()
579 result = __i915_request_has_started(rq) && i915_request_is_active(rq); in i915_request_is_running()
603 return !list_empty(&rq->sched.link); in i915_request_is_ready()
608 return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno); in __i915_request_is_complete()
615 if (i915_request_signaled(rq)) in i915_request_completed()
[all …]
/linux/kernel/sched/
H A Dpelt.h13 static inline u64 hw_load_avg(struct rq *rq) in hw_load_avg() argument
24 static inline u64 hw_load_avg(struct rq *rq) in hw_load_avg() argument
31 int update_irq_load_avg(struct rq *rq, u64 running);
34 update_irq_load_avg(struct rq *rq, u64 running) in update_irq_load_avg() argument
64 static inline u64 rq_clock_pelt(struct rq *rq) in rq_clock_pelt() argument
69 return rq->clock_pelt - rq->lost_idle_time; in rq_clock_pelt()
75 rq->clock_pelt = rq_clock_task(rq); in _update_idle_rq_clock_pelt()
77 u64_u32_store(rq->clock_idle, rq_clock(rq)); in _update_idle_rq_clock_pelt()
150 rq->lost_idle_time += rq_clock_task(rq) - rq->clock_pelt; in update_idle_rq_clock_pelt()
210 static inline u64 hw_load_avg(struct rq *rq) in hw_load_avg() argument
[all …]
H A Dsched.h733 struct rq *rq; member
994 void (*func)(struct rq *rq);
1221 static inline int cpu_of(struct rq *rq) in cpu_of() argument
1489 struct rq *rq = task_rq(p); in cfs_rq_of() local
1749 struct rq *rq; in this_rq_lock_irq() local
1805 queue_balance_callback(struct rq *rq, in queue_balance_callback() argument
1807 void (*func)(struct rq *rq)) in queue_balance_callback() argument
2292 void (*rq_online)(struct rq *rq);
2293 void (*rq_offline)(struct rq *rq);
2295 struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq);
[all …]
H A Ddeadline.c74 struct rq *rq = dl_se->rq; in rq_of_dl_se() local
325 struct rq *rq; in dl_change_utilization() local
769 struct rq *rq) in replenish_dl_new_period() argument
1129 struct rq *rq; in dl_task_timer() local
1433 dl_se->rq = rq; in dl_server_init()
1470 struct rq *rq; in inactive_task_timer() local
1476 rq = dl_se->rq; in inactive_task_timer()
1903 struct rq *rq; in select_task_rq_dl() local
1950 struct rq *rq; in migrate_task_rq_dl() local
2579 struct rq *rq; in set_cpus_allowed_dl() local
[all …]
H A Dstop_task.c19 balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in balance_stop() argument
26 wakeup_preempt_stop(struct rq *rq, struct task_struct *p, int flags) in wakeup_preempt_stop() argument
36 static struct task_struct *pick_task_stop(struct rq *rq) in pick_task_stop() argument
41 return rq->stop; in pick_task_stop()
44 static struct task_struct *pick_next_task_stop(struct rq *rq) in pick_next_task_stop() argument
55 enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags) in enqueue_task_stop() argument
61 dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags) in dequeue_task_stop() argument
66 static void yield_task_stop(struct rq *rq) in yield_task_stop() argument
88 static void switched_to_stop(struct rq *rq, struct task_struct *p) in switched_to_stop() argument
94 prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio) in prio_changed_stop() argument
[all …]
H A Dstats.h13 rq_sched_info_arrive(struct rq *rq, unsigned long long delta) in rq_sched_info_arrive() argument
15 if (rq) { in rq_sched_info_arrive()
25 rq_sched_info_depart(struct rq *rq, unsigned long long delta) in rq_sched_info_depart() argument
27 if (rq) in rq_sched_info_depart()
32 rq_sched_info_dequeue(struct rq *rq, unsigned long long delta) in rq_sched_info_dequeue() argument
34 if (rq) in rq_sched_info_dequeue()
47 void __update_stats_wait_start(struct rq *rq, struct task_struct *p,
50 void __update_stats_wait_end(struct rq *rq, struct task_struct *p,
52 void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p,
170 struct rq *rq; in psi_ttwu_dequeue() local
[all …]
H A Drt.c220 struct rq *rq = cpu_rq(cpu); in init_tg_rt_entry() local
224 rt_rq->rq = rq; in init_tg_rt_entry()
306 struct rq *rq = rq_of_rt_se(rt_se); in rt_rq_of_se() local
533 struct rq *rq = rq_of_rt_rq(rt_rq); in sched_rt_rq_enqueue() local
634 struct rq *rq = rq_of_rt_rq(rt_rq); in sched_rt_rq_enqueue() local
1036 struct rq *rq = rq_of_rt_rq(rt_rq); in dequeue_top_rt_rq() local
1053 struct rq *rq = rq_of_rt_rq(rt_rq); in enqueue_top_rt_rq() local
1536 requeue_task_rt(rq, rq->curr, 0); in yield_task_rt()
1546 struct rq *rq; in select_task_rq_rt() local
2266 struct rq *rq; in rto_push_irq_work_func() local
[all …]
H A Dcore.c631 struct rq *rq; in __task_rq_lock() local
656 struct rq *rq; in task_rq_lock() local
1754 struct rq *rq; in uclamp_update_active() local
2296 struct rq *rq; in wait_task_inactive() local
3206 struct rq *rq; in __set_cpus_allowed_ptr() local
3250 struct rq *rq; in restrict_cpus_allowed_ptr() local
3727 struct rq *rq; in ttwu_stat() local
3853 struct rq *rq; in ttwu_runnable() local
4879 struct rq *rq; in wake_up_new_task() local
9119 struct rq *rq = scope.rq; in sched_rr_get_interval() local
[all …]
/linux/drivers/scsi/fnic/
H A Dvnic_rq.c23 if (!rq->bufs[i]) { in vnic_rq_alloc_bufs()
30 buf = rq->bufs[i]; in vnic_rq_alloc_bufs()
47 rq->to_use = rq->to_clean = rq->bufs[0]; in vnic_rq_alloc_bufs()
48 rq->buf_index = 0; in vnic_rq_alloc_bufs()
58 vdev = rq->vdev; in vnic_rq_free()
67 rq->ctrl = NULL; in vnic_rq_free()
76 rq->vdev = vdev; in vnic_rq_alloc()
79 if (!rq->ctrl) { in vnic_rq_alloc()
108 iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size); in vnic_rq_init()
117 rq->to_use = rq->to_clean = in vnic_rq_init()
[all …]
H A Dvnic_rq.h96 return rq->ring.desc_avail; in vnic_rq_desc_avail()
102 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used()
107 return rq->to_use->desc; in vnic_rq_next_desc()
112 return rq->to_use->index; in vnic_rq_next_index()
117 return rq->buf_index++; in vnic_rq_next_buf_index()
132 rq->to_use = buf; in vnic_rq_post()
133 rq->ring.desc_avail--; in vnic_rq_post()
177 buf = rq->to_clean; in vnic_rq_service()
185 rq->ring.desc_avail++; in vnic_rq_service()
192 buf = rq->to_clean; in vnic_rq_service()
[all …]
/linux/drivers/net/ethernet/cisco/enic/
H A Dvnic_rq.c26 if (!rq->bufs[i]) in vnic_rq_alloc_bufs()
31 buf = rq->bufs[i]; in vnic_rq_alloc_bufs()
48 rq->to_use = rq->to_clean = rq->bufs[0]; in vnic_rq_alloc_bufs()
58 vdev = rq->vdev; in vnic_rq_free()
69 rq->ctrl = NULL; in vnic_rq_free()
77 rq->index = index; in vnic_rq_alloc()
78 rq->vdev = vdev; in vnic_rq_alloc()
81 if (!rq->ctrl) { in vnic_rq_alloc()
120 rq->to_use = rq->to_clean = in vnic_rq_init_start()
185 rq->ring.desc_avail = rq->ring.desc_count - 1; in vnic_rq_clean()
[all …]
H A Dvnic_rq.h87 return rq->ring.desc_avail; in vnic_rq_desc_avail()
93 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used()
98 return rq->to_use->desc; in vnic_rq_next_desc()
103 return rq->to_use->index; in vnic_rq_next_index()
120 rq->to_use = buf; in vnic_rq_post()
121 rq->ring.desc_avail--; in vnic_rq_post()
160 buf = rq->to_clean; in vnic_rq_service()
168 rq->ring.desc_avail++; in vnic_rq_service()
170 rq->to_clean = buf->next; in vnic_rq_service()
175 buf = rq->to_clean; in vnic_rq_service()
[all …]
/linux/drivers/gpu/drm/i915/gt/
H A Dgen8_engine_cs.c46 if (IS_KABYLAKE(rq->i915) && IS_GRAPHICS_STEP(rq->i915, 0, STEP_C0)) in gen8_emit_flush_rcs()
227 IS_DG2(rq->i915)) { in mtl_dummy_pipe_control()
474 rq->infix = intel_ring_offset(rq, cs); in gen8_emit_init_breadcrumb()
605 GEM_BUG_ON(intel_ring_direction(ring, rq->wa_tail, rq->head) <= 0); in assert_request_valid()
618 rq->wa_tail = intel_ring_offset(rq, cs); in gen8_emit_wa_tail()
651 rq->tail = intel_ring_offset(rq, cs); in gen8_emit_fini_breadcrumb_tail()
652 assert_ring_tail_valid(rq->ring, rq->tail); in gen8_emit_fini_breadcrumb_tail()
659 return gen8_emit_ggtt_write(cs, rq->fence.seqno, hwsp_offset(rq), 0); in emit_xcs_breadcrumb()
664 return gen8_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs)); in gen8_emit_fini_breadcrumb_xcs()
799 rq->tail = intel_ring_offset(rq, cs); in gen12_emit_fini_breadcrumb_tail()
[all …]
H A Dgen6_engine_cs.c167 *cs++ = rq->fence.seqno; in gen6_emit_breadcrumb_rcs()
172 rq->tail = intel_ring_offset(rq, cs); in gen6_emit_breadcrumb_rcs()
173 assert_ring_tail_valid(rq->ring, rq->tail); in gen6_emit_breadcrumb_rcs()
337 gen7_stall_cs(rq); in gen7_emit_flush_rcs()
364 *cs++ = rq->fence.seqno; in gen7_emit_breadcrumb_rcs()
369 rq->tail = intel_ring_offset(rq, cs); in gen7_emit_breadcrumb_rcs()
370 assert_ring_tail_valid(rq->ring, rq->tail); in gen7_emit_breadcrumb_rcs()
386 rq->tail = intel_ring_offset(rq, cs); in gen6_emit_breadcrumb_xcs()
387 assert_ring_tail_valid(rq->ring, rq->tail); in gen6_emit_breadcrumb_xcs()
418 rq->tail = intel_ring_offset(rq, cs); in gen7_emit_breadcrumb_xcs()
[all …]
H A Dintel_breadcrumbs.c113 if (rq->context != ce) in check_signal_order()
221 &rq->fence.flags)) in signal_irq_work()
255 rq->engine->sched_engine->retire_inflight_request_prio(rq); in signal_irq_work()
257 spin_lock(&rq->lock); in signal_irq_work()
263 i915_request_put(rq); in signal_irq_work()
343 i915_request_get(rq); in irq_signal_request()
395 i915_request_get(rq); in insert_breadcrumb()
456 i915_request_put(rq); in i915_request_cancel_breadcrumb()
474 &rq->fence.flags)) in intel_context_remove_breadcrumbs()
479 i915_request_put(rq); in intel_context_remove_breadcrumbs()
[all …]
H A Dselftest_execlists.c266 GEM_BUG_ON(rq[1]->postfix <= rq[0]->postfix); in live_unlite_restore()
289 GEM_BUG_ON(rq[0]->postfix > rq[1]->postfix); in live_unlite_restore()
873 err = rq->engine->emit_init_breadcrumb(rq); in semaphore_queue()
2082 intel_context_ban(rq->context, rq); in __cancel_active0()
2141 intel_context_ban(rq[1]->context, rq[1]); in __cancel_active1()
2224 intel_context_ban(rq[2]->context, rq[2]); in __cancel_queued()
2293 intel_context_ban(rq->context, rq); in __cancel_hostile()
2591 ring_size = rq->wa_tail - rq->head; in live_chain_preempt()
2772 err = rq->engine->emit_bb_start(rq, in create_gang()
3188 err = rq->engine->emit_bb_start(rq, in create_gpr_client()
[all …]
/linux/fs/erofs/
H A Ddecompressor.c67 struct z_erofs_decompress_req *rq = ctx->rq; in z_erofs_lz4_prepare_dstpages() local
128 struct z_erofs_decompress_req *rq = ctx->rq; in z_erofs_lz4_handle_overlap() local
210 struct z_erofs_decompress_req *rq = ctx->rq; in z_erofs_lz4_decompress_mem() local
243 rq->inputsize, rq->outputsize, rq->outputsize); in z_erofs_lz4_decompress_mem()
246 rq->inputsize, rq->outputsize); in z_erofs_lz4_decompress_mem()
279 ctx.rq = rq; in z_erofs_lz4_decompress()
280 ctx.oend = rq->pageofs_out + rq->outputsize; in z_erofs_lz4_decompress()
326 if (rq->outputsize > rq->inputsize) in z_erofs_transform_plain()
334 if (rq->out[0] == rq->in[nrpages_in - 1]) { in z_erofs_transform_plain()
338 memcpy_to_page(rq->out[0], rq->pageofs_out, in z_erofs_transform_plain()
[all …]
/linux/include/linux/
H A Dblk-mq.h208 #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) argument
249 #define rq_list_next(rq) (rq)->rq_next argument
265 *src = rq->rq_next; in rq_list_move()
524 struct request *rq; member
820 complete(rq); in blk_mq_complete_request_direct()
944 return rq + 1; in blk_mq_rq_to_pdu()
957 rq->q->mq_ops->cleanup_rq(rq); in blk_mq_cleanup_rq()
965 rq->bio = rq->biotail = bio; in blk_rq_bio_prep()
1013 if ((rq->bio)) \
1048 if (!rq->bio) in blk_rq_cur_bytes()
[all …]
/linux/block/
H A Dblk-flush.c107 if (blk_rq_sectors(rq)) in blk_flush_policy()
132 rq->bio = rq->biotail; in blk_flush_restore_request()
133 if (rq->bio) in blk_flush_restore_request()
134 rq->__sector = rq->bio->bi_iter.bi_sector; in blk_flush_restore_request()
138 rq->end_io = rq->flush.saved_end_io; in blk_flush_restore_request()
174 rq->flush.seq |= seq; in blk_flush_complete_seq()
223 struct request *rq, *n; in flush_end_io() local
363 WARN_ON(rq->tag < 0); in mq_flush_data_end_io()
387 rq->flush.seq = 0; in blk_rq_init_flush()
389 rq->flush.saved_end_io = rq->end_io; /* Usually NULL */ in blk_rq_init_flush()
[all …]
H A Dblk-mq.c316 memset(rq, 0, sizeof(*rq)); in blk_rq_init()
564 if (!rq || rq->q != q) in blk_mq_alloc_cached_request()
606 rq->bio = rq->biotail = NULL; in blk_mq_alloc_request()
763 rq->bio, rq->biotail, blk_rq_bytes(rq)); in blk_dump_rq_flags()
1046 rq_qos_done(rq->q, rq); in __blk_mq_end_request()
1096 rq_qos_done(rq->q, rq); in blk_mq_end_request_batch()
1102 if (rq->end_io && rq->end_io(rq, 0) == RQ_END_IO_NONE) in blk_mq_end_request_batch()
1265 rq->mq_hctx->tags->rqs[rq->tag] = rq; in blk_mq_start_request()
2028 bd.rq = rq; in blk_mq_dispatch_rq_list()
2567 .rq = rq, in __blk_mq_issue_directly()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/
H A Drx.c30 if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, rq->mpwqe.pages_per_wqe))) in mlx5e_xsk_alloc_rx_mpwqe()
62 mxbuf->rq = rq; in mlx5e_xsk_alloc_rx_mpwqe()
70 .key = rq->mkey_be, in mlx5e_xsk_alloc_rx_mpwqe()
73 mxbuf->rq = rq; in mlx5e_xsk_alloc_rx_mpwqe()
83 .key = rq->mkey_be, in mlx5e_xsk_alloc_rx_mpwqe()
87 .key = rq->mkey_be, in mlx5e_xsk_alloc_rx_mpwqe()
91 .key = rq->mkey_be, in mlx5e_xsk_alloc_rx_mpwqe()
95 .key = rq->mkey_be, in mlx5e_xsk_alloc_rx_mpwqe()
98 mxbuf->rq = rq; in mlx5e_xsk_alloc_rx_mpwqe()
119 mxbuf->rq = rq; in mlx5e_xsk_alloc_rx_mpwqe()
[all …]
/linux/drivers/scsi/esas2r/
H A Desas2r_disc.c359 rq->interrupt_cx = dc; in esas2r_disc_start_port()
529 rq, in esas2r_disc_block_dev_scan()
538 rq->timeout = 30000; in esas2r_disc_block_dev_scan()
539 rq->interrupt_cx = dc; in esas2r_disc_block_dev_scan()
604 rq, in esas2r_disc_raid_grp_info()
615 rq->interrupt_cx = dc; in esas2r_disc_raid_grp_info()
666 rq->req_stat); in esas2r_disc_raid_grp_info_cb()
715 rq, in esas2r_disc_part_info()
730 rq->interrupt_cx = dc; in esas2r_disc_part_info()
807 rq, in esas2r_disc_passthru_dev_info()
[all …]
H A Desas2r_vda.c93 clear_vda_request(rq); in esas2r_process_vda_ioctl()
97 rq->interrupt_cx = vi; in esas2r_process_vda_ioctl()
200 rq->vrq->mgt.dev_index = in esas2r_process_vda_ioctl()
355 clear_vda_request(rq); in esas2r_build_flash_req()
382 clear_vda_request(rq); in esas2r_build_mgt_req()
424 clear_vda_request(rq); in esas2r_build_ae_req()
435 rq->vrq_md->phys_addr + in esas2r_build_ae_req()
455 clear_vda_request(rq); in esas2r_build_cli_req()
472 clear_vda_request(rq); in esas2r_build_ioctl_req()
490 clear_vda_request(rq); in esas2r_build_cfg_req()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/
H A Den_rx.c340 return &rq->wqe.frags[ix << rq->wqe.info.log_num_frags]; in get_frag()
819 .umr.rq = rq, in mlx5e_alloc_rx_mpwqe()
980 struct mlx5e_rq *rq = &c->rq; in mlx5e_handle_shampo_hd_umr() local
1131 rq->mpwqe.umr_in_progress += rq->mpwqe.umr_last_bulk; in mlx5e_post_rx_mpwqes()
1668 mxbuf->rq = rq; in mlx5e_fill_mxbuf()
1743 rq->buff.frame0_sz, rq->buff.map_dir); in mlx5e_skb_from_cqe_nonlinear()
1747 mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz, in mlx5e_skb_from_cqe_nonlinear()
1811 mlx5e_dump_error_cqe(&rq->cq, rq->rqn, err_cqe); in trigger_report()
2476 rq, cqe); in mlx5e_rx_cq_process_enhanced_cqe_comp()
2511 rq, cqe); in mlx5e_rx_cq_process_basic_cqe_comp()
[all …]

12345678910>>...26