Home
last modified time | relevance | path

Searched refs:mq_hctx (Results 1 – 25 of 33) sorted by relevance

12

/dports/multimedia/v4l-utils/linux-5.13-rc2/block/
H A Dblk-mq-sched.c84 return rqa->mq_hctx > rqb->mq_hctx; in sched_rq_cmp()
90 list_first_entry(rq_list, struct request, queuelist)->mq_hctx; in blk_mq_dispatch_hctx_list()
96 if (rq->mq_hctx != hctx) { in blk_mq_dispatch_hctx_list()
171 if (rq->mq_hctx != hctx) in __blk_mq_do_dispatch_sched()
278 } while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, 1)); in blk_mq_do_dispatch_ctx()
425 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_mq_sched_insert_request()
H A Dblk-mq.c294 rq->mq_hctx = data->hctx; in blk_mq_rq_ctx_init()
493 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in __blk_mq_free_request()
498 rq->mq_hctx = NULL; in __blk_mq_free_request()
512 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_mq_free_request()
1085 blk_mq_tag_busy(rq->mq_hctx); in __blk_mq_get_driver_tag()
1091 if (!hctx_may_queue(rq->mq_hctx, bt)) in __blk_mq_get_driver_tag()
1353 WARN_ON_ONCE(hctx != rq->mq_hctx); in blk_mq_dispatch_rq_list()
1917 if (rqa->mq_hctx != rqb->mq_hctx) in plug_rq_cmp()
1918 return rqa->mq_hctx > rqb->mq_hctx; in plug_rq_cmp()
2281 data.hctx = same_queue_rq->mq_hctx; in blk_mq_submit_bio()
[all …]
H A Dblk-flush.c314 flush_rq->mq_hctx = first_rq->mq_hctx; in blk_kick_flush()
340 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in mq_flush_data_end_io()
H A Dblk-mq-tag.c218 if (rq && rq->q == hctx->queue && rq->mq_hctx == hctx) in bt_iter()
613 return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) | in blk_mq_unique_tag()
H A Dblk-mq.h259 __blk_mq_put_driver_tag(rq->mq_hctx, rq); in blk_mq_put_driver_tag()
H A Dmq-deadline.c571 blk_mq_sched_mark_restart_hctx(rq->mq_hctx); in dd_finish_request()
H A Dblk-mq-debugfs.c406 if (rq->mq_hctx == params->hctx) in hctx_show_busy_rq()
/dports/multimedia/v4l_compat/linux-5.13-rc2/block/
H A Dblk-mq-sched.c84 return rqa->mq_hctx > rqb->mq_hctx; in sched_rq_cmp()
90 list_first_entry(rq_list, struct request, queuelist)->mq_hctx; in blk_mq_dispatch_hctx_list()
96 if (rq->mq_hctx != hctx) { in blk_mq_dispatch_hctx_list()
171 if (rq->mq_hctx != hctx) in __blk_mq_do_dispatch_sched()
278 } while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, 1)); in blk_mq_do_dispatch_ctx()
425 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_mq_sched_insert_request()
H A Dblk-mq.c294 rq->mq_hctx = data->hctx; in blk_mq_rq_ctx_init()
493 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in __blk_mq_free_request()
498 rq->mq_hctx = NULL; in __blk_mq_free_request()
512 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_mq_free_request()
1085 blk_mq_tag_busy(rq->mq_hctx); in __blk_mq_get_driver_tag()
1091 if (!hctx_may_queue(rq->mq_hctx, bt)) in __blk_mq_get_driver_tag()
1353 WARN_ON_ONCE(hctx != rq->mq_hctx); in blk_mq_dispatch_rq_list()
1917 if (rqa->mq_hctx != rqb->mq_hctx) in plug_rq_cmp()
1918 return rqa->mq_hctx > rqb->mq_hctx; in plug_rq_cmp()
2281 data.hctx = same_queue_rq->mq_hctx; in blk_mq_submit_bio()
[all …]
H A Dblk-flush.c314 flush_rq->mq_hctx = first_rq->mq_hctx; in blk_kick_flush()
340 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in mq_flush_data_end_io()
H A Dblk-mq-tag.c218 if (rq && rq->q == hctx->queue && rq->mq_hctx == hctx) in bt_iter()
613 return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) | in blk_mq_unique_tag()
H A Dblk-mq.h259 __blk_mq_put_driver_tag(rq->mq_hctx, rq); in blk_mq_put_driver_tag()
H A Dmq-deadline.c571 blk_mq_sched_mark_restart_hctx(rq->mq_hctx); in dd_finish_request()
H A Dblk-mq-debugfs.c406 if (rq->mq_hctx == params->hctx) in hctx_show_busy_rq()
/dports/multimedia/libv4l/linux-5.13-rc2/block/
H A Dblk-mq-sched.c84 return rqa->mq_hctx > rqb->mq_hctx; in sched_rq_cmp()
90 list_first_entry(rq_list, struct request, queuelist)->mq_hctx; in blk_mq_dispatch_hctx_list()
96 if (rq->mq_hctx != hctx) { in blk_mq_dispatch_hctx_list()
171 if (rq->mq_hctx != hctx) in __blk_mq_do_dispatch_sched()
278 } while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, 1)); in blk_mq_do_dispatch_ctx()
425 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_mq_sched_insert_request()
H A Dblk-mq.c294 rq->mq_hctx = data->hctx; in blk_mq_rq_ctx_init()
493 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in __blk_mq_free_request()
498 rq->mq_hctx = NULL; in __blk_mq_free_request()
512 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_mq_free_request()
1085 blk_mq_tag_busy(rq->mq_hctx); in __blk_mq_get_driver_tag()
1091 if (!hctx_may_queue(rq->mq_hctx, bt)) in __blk_mq_get_driver_tag()
1353 WARN_ON_ONCE(hctx != rq->mq_hctx); in blk_mq_dispatch_rq_list()
1917 if (rqa->mq_hctx != rqb->mq_hctx) in plug_rq_cmp()
1918 return rqa->mq_hctx > rqb->mq_hctx; in plug_rq_cmp()
2281 data.hctx = same_queue_rq->mq_hctx; in blk_mq_submit_bio()
[all …]
H A Dblk-flush.c314 flush_rq->mq_hctx = first_rq->mq_hctx; in blk_kick_flush()
340 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in mq_flush_data_end_io()
H A Dblk-mq-tag.c218 if (rq && rq->q == hctx->queue && rq->mq_hctx == hctx) in bt_iter()
613 return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) | in blk_mq_unique_tag()
H A Dblk-mq.h259 __blk_mq_put_driver_tag(rq->mq_hctx, rq); in blk_mq_put_driver_tag()
H A Dmq-deadline.c571 blk_mq_sched_mark_restart_hctx(rq->mq_hctx); in dd_finish_request()
H A Dblk-mq-debugfs.c406 if (rq->mq_hctx == params->hctx) in hctx_show_busy_rq()
/dports/multimedia/v4l_compat/linux-5.13-rc2/drivers/nvme/host/
H A Dnvme.h187 return req->mq_hctx->queue_num + 1; in nvme_req_qid()
/dports/multimedia/libv4l/linux-5.13-rc2/drivers/nvme/host/
H A Dnvme.h187 return req->mq_hctx->queue_num + 1; in nvme_req_qid()
/dports/multimedia/v4l-utils/linux-5.13-rc2/drivers/nvme/host/
H A Dnvme.h187 return req->mq_hctx->queue_num + 1; in nvme_req_qid()
/dports/multimedia/v4l_compat/linux-5.13-rc2/include/linux/
H A Dblkdev.h125 struct blk_mq_hw_ctx *mq_hctx; member

12