Searched refs:queue_max_segments (Results 1 – 11 of 11) sorted by relevance
137 bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT); in nvme_zns_alloc_report_buffer()
112 return queue_var_show(queue_max_segments(q), page); in queue_max_segments_show()481 QUEUE_RO_ENTRY(queue_max_segments, "max_segments");
181 return queue_max_segments(rq->q); in blk_rq_get_max_segments()
575 if (nr_segs > queue_max_segments(q)) in blk_rq_map_user_bvec()
1002 min(bio->bi_max_vecs, queue_max_segments(q))) in bio_add_hw_page()
188 bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT); in sd_zbc_alloc_report_buffer()
350 sdp->sg_tablesize = queue_max_segments(q); in sg_open()1480 sdp->sg_tablesize = queue_max_segments(q); in sg_alloc()
4264 i = queue_max_segments(SDp->request_queue); in st_probe()
1154 static inline unsigned short queue_max_segments(const struct request_queue *q) in queue_max_segments() function1202 return queue_max_segments(bdev_get_queue(bdev)); in bdev_max_segments()
952 if ((pd->settings.size << 9) / CD_FRAMESIZE <= queue_max_segments(q)) { in pkt_set_segment_merging()960 if ((pd->settings.size << 9) / PAGE_SIZE <= queue_max_segments(q)) { in pkt_set_segment_merging()
548 bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT); in virtblk_alloc_report_buffer()