xref: /linux/block/blk.h (revision dc53d9ea)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
28324aa91SJens Axboe #ifndef BLK_INTERNAL_H
38324aa91SJens Axboe #define BLK_INTERNAL_H
48324aa91SJens Axboe 
5a892c8d5SSatya Tangirala #include <linux/blk-crypto.h>
69bb33f24SChristoph Hellwig #include <linux/memblock.h>	/* for max_pfn/max_low_pfn */
70eb4db47SKeith Busch #include <linux/sched/sysctl.h>
808420cf7SJens Axboe #include <linux/timekeeping.h>
9c39ae60dSChristoph Hellwig #include <xen/xen.h>
10a892c8d5SSatya Tangirala #include "blk-crypto-internal.h"
11a73f730dSTejun Heo 
122e9bc346SChristoph Hellwig struct elevator_type;
132e9bc346SChristoph Hellwig 
140d2602caSJens Axboe /* Max future timer expiry for timeouts */
150d2602caSJens Axboe #define BLK_MAX_TIMEOUT		(5 * HZ)
160d2602caSJens Axboe 
1718fbda91SOmar Sandoval extern struct dentry *blk_debugfs_root;
1818fbda91SOmar Sandoval 
197c94e1c1SMing Lei struct blk_flush_queue {
20b175c867SChengming Zhou 	spinlock_t		mq_flush_lock;
217c94e1c1SMing Lei 	unsigned int		flush_pending_idx:1;
227c94e1c1SMing Lei 	unsigned int		flush_running_idx:1;
238d699663SYufen Yu 	blk_status_t 		rq_status;
247c94e1c1SMing Lei 	unsigned long		flush_pending_since;
257c94e1c1SMing Lei 	struct list_head	flush_queue[2];
26b175c867SChengming Zhou 	unsigned long		flush_data_in_flight;
277c94e1c1SMing Lei 	struct request		*flush_rq;
287c94e1c1SMing Lei };
297c94e1c1SMing Lei 
30a9ed27a7SMing Lei bool is_flush_rq(struct request *req);
318d699663SYufen Yu 
32754a1572SGuoqing Jiang struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
33754a1572SGuoqing Jiang 					      gfp_t flags);
34f70ced09SMing Lei void blk_free_flush_queue(struct blk_flush_queue *q);
35f3552655SMing Lei 
363ef28e83SDan Williams void blk_freeze_queue(struct request_queue *q);
37aec89dc5SChristoph Hellwig void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
388e141f9eSChristoph Hellwig void blk_queue_start_drain(struct request_queue *q);
39c98cb5bbSJens Axboe int __bio_queue_enter(struct request_queue *q, struct bio *bio);
403f98c753SMing Lei void submit_bio_noacct_nocheck(struct bio *bio);
41c98cb5bbSJens Axboe 
blk_try_enter_queue(struct request_queue * q,bool pm)42c98cb5bbSJens Axboe static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
43c98cb5bbSJens Axboe {
44c98cb5bbSJens Axboe 	rcu_read_lock();
45c98cb5bbSJens Axboe 	if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter))
46c98cb5bbSJens Axboe 		goto fail;
47c98cb5bbSJens Axboe 
48c98cb5bbSJens Axboe 	/*
49c98cb5bbSJens Axboe 	 * The code that increments the pm_only counter must ensure that the
50c98cb5bbSJens Axboe 	 * counter is globally visible before the queue is unfrozen.
51c98cb5bbSJens Axboe 	 */
52c98cb5bbSJens Axboe 	if (blk_queue_pm_only(q) &&
53c98cb5bbSJens Axboe 	    (!pm || queue_rpm_status(q) == RPM_SUSPENDED))
54c98cb5bbSJens Axboe 		goto fail_put;
55c98cb5bbSJens Axboe 
56c98cb5bbSJens Axboe 	rcu_read_unlock();
57c98cb5bbSJens Axboe 	return true;
58c98cb5bbSJens Axboe 
59c98cb5bbSJens Axboe fail_put:
60c98cb5bbSJens Axboe 	blk_queue_exit(q);
61c98cb5bbSJens Axboe fail:
62c98cb5bbSJens Axboe 	rcu_read_unlock();
63c98cb5bbSJens Axboe 	return false;
64c98cb5bbSJens Axboe }
65c98cb5bbSJens Axboe 
bio_queue_enter(struct bio * bio)66c98cb5bbSJens Axboe static inline int bio_queue_enter(struct bio *bio)
67c98cb5bbSJens Axboe {
68c98cb5bbSJens Axboe 	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
69c98cb5bbSJens Axboe 
70c98cb5bbSJens Axboe 	if (blk_try_enter_queue(q, false))
71c98cb5bbSJens Axboe 		return 0;
72c98cb5bbSJens Axboe 	return __bio_queue_enter(q, bio);
73c98cb5bbSJens Axboe }
743ef28e83SDan Williams 
blk_wait_io(struct completion * done)750eb4db47SKeith Busch static inline void blk_wait_io(struct completion *done)
760eb4db47SKeith Busch {
770eb4db47SKeith Busch 	/* Prevent hang_check timer from firing at us during very long I/O */
780eb4db47SKeith Busch 	unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;
790eb4db47SKeith Busch 
800eb4db47SKeith Busch 	if (timeout)
810eb4db47SKeith Busch 		while (!wait_for_completion_io_timeout(done, timeout))
820eb4db47SKeith Busch 			;
830eb4db47SKeith Busch 	else
840eb4db47SKeith Busch 		wait_for_completion_io(done);
850eb4db47SKeith Busch }
860eb4db47SKeith Busch 
87dc0b8a57SChristoph Hellwig #define BIO_INLINE_VECS 4
887a800a20SChristoph Hellwig struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
897a800a20SChristoph Hellwig 		gfp_t gfp_mask);
907a800a20SChristoph Hellwig void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs);
91eec716a1SMing Lei 
927c8998f7SJinyoung Choi bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv,
937c8998f7SJinyoung Choi 		struct page *page, unsigned len, unsigned offset,
947c8998f7SJinyoung Choi 		bool *same_page);
957c8998f7SJinyoung Choi 
biovec_phys_mergeable(struct request_queue * q,struct bio_vec * vec1,struct bio_vec * vec2)963dccdae5SChristoph Hellwig static inline bool biovec_phys_mergeable(struct request_queue *q,
973dccdae5SChristoph Hellwig 		struct bio_vec *vec1, struct bio_vec *vec2)
986a9f5f24SChristoph Hellwig {
993dccdae5SChristoph Hellwig 	unsigned long mask = queue_segment_boundary(q);
1006e768461SChristoph Hellwig 	phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset;
1016e768461SChristoph Hellwig 	phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset;
1023dccdae5SChristoph Hellwig 
103f630a5d0SAlexander Potapenko 	/*
104f630a5d0SAlexander Potapenko 	 * Merging adjacent physical pages may not work correctly under KMSAN
105f630a5d0SAlexander Potapenko 	 * if their metadata pages aren't adjacent. Just disable merging.
106f630a5d0SAlexander Potapenko 	 */
107f630a5d0SAlexander Potapenko 	if (IS_ENABLED(CONFIG_KMSAN))
108f630a5d0SAlexander Potapenko 		return false;
109f630a5d0SAlexander Potapenko 
1103dccdae5SChristoph Hellwig 	if (addr1 + vec1->bv_len != addr2)
1116a9f5f24SChristoph Hellwig 		return false;
1120383ad43SMing Lei 	if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
1136a9f5f24SChristoph Hellwig 		return false;
1143dccdae5SChristoph Hellwig 	if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
1153dccdae5SChristoph Hellwig 		return false;
1166a9f5f24SChristoph Hellwig 	return true;
1176a9f5f24SChristoph Hellwig }
1186a9f5f24SChristoph Hellwig 
__bvec_gap_to_prev(const struct queue_limits * lim,struct bio_vec * bprv,unsigned int offset)119aa261f20SBart Van Assche static inline bool __bvec_gap_to_prev(const struct queue_limits *lim,
12027ca1d4eSChristoph Hellwig 		struct bio_vec *bprv, unsigned int offset)
12127ca1d4eSChristoph Hellwig {
122c55ddd90SChristoph Hellwig 	return (offset & lim->virt_boundary_mask) ||
123c55ddd90SChristoph Hellwig 		((bprv->bv_offset + bprv->bv_len) & lim->virt_boundary_mask);
12427ca1d4eSChristoph Hellwig }
12527ca1d4eSChristoph Hellwig 
12627ca1d4eSChristoph Hellwig /*
12727ca1d4eSChristoph Hellwig  * Check if adding a bio_vec after bprv with offset would create a gap in
12827ca1d4eSChristoph Hellwig  * the SG list. Most drivers don't care about this, but some do.
12927ca1d4eSChristoph Hellwig  */
bvec_gap_to_prev(const struct queue_limits * lim,struct bio_vec * bprv,unsigned int offset)130aa261f20SBart Van Assche static inline bool bvec_gap_to_prev(const struct queue_limits *lim,
13127ca1d4eSChristoph Hellwig 		struct bio_vec *bprv, unsigned int offset)
13227ca1d4eSChristoph Hellwig {
133c55ddd90SChristoph Hellwig 	if (!lim->virt_boundary_mask)
13427ca1d4eSChristoph Hellwig 		return false;
135c55ddd90SChristoph Hellwig 	return __bvec_gap_to_prev(lim, bprv, offset);
13627ca1d4eSChristoph Hellwig }
13727ca1d4eSChristoph Hellwig 
rq_mergeable(struct request * rq)138badf7f64SChristoph Hellwig static inline bool rq_mergeable(struct request *rq)
139badf7f64SChristoph Hellwig {
140badf7f64SChristoph Hellwig 	if (blk_rq_is_passthrough(rq))
141badf7f64SChristoph Hellwig 		return false;
142badf7f64SChristoph Hellwig 
143badf7f64SChristoph Hellwig 	if (req_op(rq) == REQ_OP_FLUSH)
144badf7f64SChristoph Hellwig 		return false;
145badf7f64SChristoph Hellwig 
146badf7f64SChristoph Hellwig 	if (req_op(rq) == REQ_OP_WRITE_ZEROES)
147badf7f64SChristoph Hellwig 		return false;
148badf7f64SChristoph Hellwig 
149badf7f64SChristoph Hellwig 	if (req_op(rq) == REQ_OP_ZONE_APPEND)
150badf7f64SChristoph Hellwig 		return false;
151badf7f64SChristoph Hellwig 
152badf7f64SChristoph Hellwig 	if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
153badf7f64SChristoph Hellwig 		return false;
154badf7f64SChristoph Hellwig 	if (rq->rq_flags & RQF_NOMERGE_FLAGS)
155badf7f64SChristoph Hellwig 		return false;
156badf7f64SChristoph Hellwig 
157badf7f64SChristoph Hellwig 	return true;
158badf7f64SChristoph Hellwig }
159badf7f64SChristoph Hellwig 
160badf7f64SChristoph Hellwig /*
161badf7f64SChristoph Hellwig  * There are two different ways to handle DISCARD merges:
162badf7f64SChristoph Hellwig  *  1) If max_discard_segments > 1, the driver treats every bio as a range and
163badf7f64SChristoph Hellwig  *     send the bios to controller together. The ranges don't need to be
164badf7f64SChristoph Hellwig  *     contiguous.
165badf7f64SChristoph Hellwig  *  2) Otherwise, the request will be normal read/write requests.  The ranges
166badf7f64SChristoph Hellwig  *     need to be contiguous.
167badf7f64SChristoph Hellwig  */
blk_discard_mergable(struct request * req)168badf7f64SChristoph Hellwig static inline bool blk_discard_mergable(struct request *req)
169badf7f64SChristoph Hellwig {
170badf7f64SChristoph Hellwig 	if (req_op(req) == REQ_OP_DISCARD &&
171badf7f64SChristoph Hellwig 	    queue_max_discard_segments(req->q) > 1)
172badf7f64SChristoph Hellwig 		return true;
173badf7f64SChristoph Hellwig 	return false;
174badf7f64SChristoph Hellwig }
175badf7f64SChristoph Hellwig 
blk_rq_get_max_segments(struct request * rq)17649d24398SUday Shankar static inline unsigned int blk_rq_get_max_segments(struct request *rq)
17749d24398SUday Shankar {
17849d24398SUday Shankar 	if (req_op(rq) == REQ_OP_DISCARD)
17949d24398SUday Shankar 		return queue_max_discard_segments(rq->q);
18049d24398SUday Shankar 	return queue_max_segments(rq->q);
18149d24398SUday Shankar }
18249d24398SUday Shankar 
blk_queue_get_max_sectors(struct request_queue * q,enum req_op op)1832a9336c4SChristoph Hellwig static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
18477e7ffd7SBart Van Assche 						     enum req_op op)
1852a9336c4SChristoph Hellwig {
1862a9336c4SChristoph Hellwig 	if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
1872a9336c4SChristoph Hellwig 		return min(q->limits.max_discard_sectors,
1882a9336c4SChristoph Hellwig 			   UINT_MAX >> SECTOR_SHIFT);
1892a9336c4SChristoph Hellwig 
1902a9336c4SChristoph Hellwig 	if (unlikely(op == REQ_OP_WRITE_ZEROES))
1912a9336c4SChristoph Hellwig 		return q->limits.max_write_zeroes_sectors;
1922a9336c4SChristoph Hellwig 
1932a9336c4SChristoph Hellwig 	return q->limits.max_sectors;
1942a9336c4SChristoph Hellwig }
1952a9336c4SChristoph Hellwig 
1965a48fc14SDan Williams #ifdef CONFIG_BLK_DEV_INTEGRITY
1975a48fc14SDan Williams void blk_flush_integrity(void);
1987c20f116SChristoph Hellwig bool __bio_integrity_endio(struct bio *);
199ece841abSJustin Tee void bio_integrity_free(struct bio *bio);
bio_integrity_endio(struct bio * bio)2007c20f116SChristoph Hellwig static inline bool bio_integrity_endio(struct bio *bio)
2017c20f116SChristoph Hellwig {
2027c20f116SChristoph Hellwig 	if (bio_integrity(bio))
2037c20f116SChristoph Hellwig 		return __bio_integrity_endio(bio);
2047c20f116SChristoph Hellwig 	return true;
2057c20f116SChristoph Hellwig }
20643b729bfSChristoph Hellwig 
20792cf2fd1SChristoph Hellwig bool blk_integrity_merge_rq(struct request_queue *, struct request *,
20892cf2fd1SChristoph Hellwig 		struct request *);
209d59da419SChristoph Hellwig bool blk_integrity_merge_bio(struct request_queue *, struct request *,
210d59da419SChristoph Hellwig 		struct bio *);
21192cf2fd1SChristoph Hellwig 
integrity_req_gap_back_merge(struct request * req,struct bio * next)21243b729bfSChristoph Hellwig static inline bool integrity_req_gap_back_merge(struct request *req,
21343b729bfSChristoph Hellwig 		struct bio *next)
21443b729bfSChristoph Hellwig {
21543b729bfSChristoph Hellwig 	struct bio_integrity_payload *bip = bio_integrity(req->bio);
21643b729bfSChristoph Hellwig 	struct bio_integrity_payload *bip_next = bio_integrity(next);
21743b729bfSChristoph Hellwig 
218c55ddd90SChristoph Hellwig 	return bvec_gap_to_prev(&req->q->limits,
219c55ddd90SChristoph Hellwig 				&bip->bip_vec[bip->bip_vcnt - 1],
22043b729bfSChristoph Hellwig 				bip_next->bip_vec[0].bv_offset);
22143b729bfSChristoph Hellwig }
22243b729bfSChristoph Hellwig 
integrity_req_gap_front_merge(struct request * req,struct bio * bio)22343b729bfSChristoph Hellwig static inline bool integrity_req_gap_front_merge(struct request *req,
22443b729bfSChristoph Hellwig 		struct bio *bio)
22543b729bfSChristoph Hellwig {
22643b729bfSChristoph Hellwig 	struct bio_integrity_payload *bip = bio_integrity(bio);
22743b729bfSChristoph Hellwig 	struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
22843b729bfSChristoph Hellwig 
229c55ddd90SChristoph Hellwig 	return bvec_gap_to_prev(&req->q->limits,
230c55ddd90SChristoph Hellwig 				&bip->bip_vec[bip->bip_vcnt - 1],
23143b729bfSChristoph Hellwig 				bip_next->bip_vec[0].bv_offset);
23243b729bfSChristoph Hellwig }
233581e2600SChristoph Hellwig 
234ff53cd52SThomas Weißschuh extern const struct attribute_group blk_integrity_attr_group;
23543b729bfSChristoph Hellwig #else /* CONFIG_BLK_DEV_INTEGRITY */
blk_integrity_merge_rq(struct request_queue * rq,struct request * r1,struct request * r2)23692cf2fd1SChristoph Hellwig static inline bool blk_integrity_merge_rq(struct request_queue *rq,
23792cf2fd1SChristoph Hellwig 		struct request *r1, struct request *r2)
23892cf2fd1SChristoph Hellwig {
23992cf2fd1SChristoph Hellwig 	return true;
24092cf2fd1SChristoph Hellwig }
blk_integrity_merge_bio(struct request_queue * rq,struct request * r,struct bio * b)241d59da419SChristoph Hellwig static inline bool blk_integrity_merge_bio(struct request_queue *rq,
242d59da419SChristoph Hellwig 		struct request *r, struct bio *b)
243d59da419SChristoph Hellwig {
244d59da419SChristoph Hellwig 	return true;
245d59da419SChristoph Hellwig }
integrity_req_gap_back_merge(struct request * req,struct bio * next)24643b729bfSChristoph Hellwig static inline bool integrity_req_gap_back_merge(struct request *req,
24743b729bfSChristoph Hellwig 		struct bio *next)
24843b729bfSChristoph Hellwig {
24943b729bfSChristoph Hellwig 	return false;
25043b729bfSChristoph Hellwig }
integrity_req_gap_front_merge(struct request * req,struct bio * bio)25143b729bfSChristoph Hellwig static inline bool integrity_req_gap_front_merge(struct request *req,
25243b729bfSChristoph Hellwig 		struct bio *bio)
25343b729bfSChristoph Hellwig {
25443b729bfSChristoph Hellwig 	return false;
25543b729bfSChristoph Hellwig }
25643b729bfSChristoph Hellwig 
blk_flush_integrity(void)2575a48fc14SDan Williams static inline void blk_flush_integrity(void)
2585a48fc14SDan Williams {
2595a48fc14SDan Williams }
bio_integrity_endio(struct bio * bio)2607c20f116SChristoph Hellwig static inline bool bio_integrity_endio(struct bio *bio)
2617c20f116SChristoph Hellwig {
2627c20f116SChristoph Hellwig 	return true;
2637c20f116SChristoph Hellwig }
bio_integrity_free(struct bio * bio)264ece841abSJustin Tee static inline void bio_integrity_free(struct bio *bio)
265ece841abSJustin Tee {
266ece841abSJustin Tee }
26743b729bfSChristoph Hellwig #endif /* CONFIG_BLK_DEV_INTEGRITY */
2688324aa91SJens Axboe 
2690d2602caSJens Axboe unsigned long blk_rq_timeout(unsigned long timeout);
27087ee7b11SJens Axboe void blk_add_timer(struct request *req);
271320ae51fSJens Axboe 
272320ae51fSJens Axboe bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
2730c5bcc92SChristoph Hellwig 		unsigned int nr_segs);
274bdc6a287SBaolin Wang bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
275bdc6a287SBaolin Wang 			struct bio *bio, unsigned int nr_segs);
276320ae51fSJens Axboe 
277242f9dcbSJens Axboe /*
278ba0ffdd8SJens Axboe  * Plug flush limits
279ba0ffdd8SJens Axboe  */
280ba0ffdd8SJens Axboe #define BLK_MAX_REQUEST_COUNT	32
281ba0ffdd8SJens Axboe #define BLK_PLUG_FLUSH_SIZE	(128 * 1024)
282ba0ffdd8SJens Axboe 
283ba0ffdd8SJens Axboe /*
284158dbda0STejun Heo  * Internal elevator interface
285158dbda0STejun Heo  */
286e8064021SChristoph Hellwig #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
287158dbda0STejun Heo 
288360f2648SChristoph Hellwig bool blk_insert_flush(struct request *rq);
289dd831006STejun Heo 
2908237c01fSKeith Busch int elevator_switch(struct request_queue *q, struct elevator_type *new_e);
29164b36075SChristoph Hellwig void elevator_disable(struct request_queue *q);
2920c6cb3a2SChristoph Hellwig void elevator_exit(struct request_queue *q);
293cecf5d87SMing Lei int elv_register_queue(struct request_queue *q, bool uevent);
29483d016acSBart Van Assche void elv_unregister_queue(struct request_queue *q);
29583d016acSBart Van Assche 
2963ad5cee5SChristoph Hellwig ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
2973ad5cee5SChristoph Hellwig 		char *buf);
2983ad5cee5SChristoph Hellwig ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
2993ad5cee5SChristoph Hellwig 		char *buf);
3003ad5cee5SChristoph Hellwig ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
3013ad5cee5SChristoph Hellwig 		char *buf);
3023ad5cee5SChristoph Hellwig ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
3033ad5cee5SChristoph Hellwig 		char *buf);
3043ad5cee5SChristoph Hellwig ssize_t part_fail_store(struct device *dev, struct device_attribute *attr,
3053ad5cee5SChristoph Hellwig 		const char *buf, size_t count);
306581d4e28SJens Axboe ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
307581d4e28SJens Axboe ssize_t part_timeout_store(struct device *, struct device_attribute *,
308581d4e28SJens Axboe 				const char *, size_t);
309581d4e28SJens Axboe 
bio_may_exceed_limits(struct bio * bio,const struct queue_limits * lim)310c55ddd90SChristoph Hellwig static inline bool bio_may_exceed_limits(struct bio *bio,
311aa261f20SBart Van Assche 					 const struct queue_limits *lim)
312abd45c15SJens Axboe {
313abd45c15SJens Axboe 	switch (bio_op(bio)) {
314abd45c15SJens Axboe 	case REQ_OP_DISCARD:
315abd45c15SJens Axboe 	case REQ_OP_SECURE_ERASE:
316abd45c15SJens Axboe 	case REQ_OP_WRITE_ZEROES:
317abd45c15SJens Axboe 		return true; /* non-trivial splitting decisions */
318abd45c15SJens Axboe 	default:
319abd45c15SJens Axboe 		break;
320abd45c15SJens Axboe 	}
321abd45c15SJens Axboe 
322abd45c15SJens Axboe 	/*
323abd45c15SJens Axboe 	 * All drivers must accept single-segments bios that are <= PAGE_SIZE.
324abd45c15SJens Axboe 	 * This is a quick and dirty check that relies on the fact that
325abd45c15SJens Axboe 	 * bi_io_vec[0] is always valid if a bio has data.  The check might
326abd45c15SJens Axboe 	 * lead to occasional false negatives when bios are cloned, but compared
327abd45c15SJens Axboe 	 * to the performance impact of cloned bios themselves the loop below
328abd45c15SJens Axboe 	 * doesn't matter anyway.
329abd45c15SJens Axboe 	 */
330c55ddd90SChristoph Hellwig 	return lim->chunk_sectors || bio->bi_vcnt != 1 ||
331abd45c15SJens Axboe 		bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE;
332abd45c15SJens Axboe }
333abd45c15SJens Axboe 
334aa261f20SBart Van Assche struct bio *__bio_split_to_limits(struct bio *bio,
335aa261f20SBart Van Assche 				  const struct queue_limits *lim,
336abd45c15SJens Axboe 				  unsigned int *nr_segs);
33714ccb66bSChristoph Hellwig int ll_back_merge_fn(struct request *req, struct bio *bio,
33814ccb66bSChristoph Hellwig 		unsigned int nr_segs);
339fd2ef39cSJan Kara bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
3405e84ea3aSJens Axboe 				struct request *next);
341e9cd19c0SChristoph Hellwig unsigned int blk_recalc_rq_segments(struct request *rq);
342050c8ea8STejun Heo bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
34334fe7c05SChristoph Hellwig enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
344d6d48196SJens Axboe 
345d690cb8aSChristoph Hellwig int blk_set_default_limits(struct queue_limits *lim);
346ff88972cSAdrian Bunk int blk_dev_init(void);
347ff88972cSAdrian Bunk 
348c2553b58SJens Axboe /*
349c2553b58SJens Axboe  * Contribute to IO statistics IFF:
350c2553b58SJens Axboe  *
351c2553b58SJens Axboe  *	a) it's attached to a gendisk, and
35248d9b0d4SLogan Gunthorpe  *	b) the queue had IO stats enabled when this request was started
353c2553b58SJens Axboe  */
blk_do_io_stat(struct request * rq)354599d067dSChengguang Xu static inline bool blk_do_io_stat(struct request *rq)
355fb8ec18cSJens Axboe {
35641fa7222SChristoph Hellwig 	return (rq->rq_flags & RQF_IO_STAT) && !blk_rq_is_passthrough(rq);
357be6bfe36SPavel Begunkov }
358be6bfe36SPavel Begunkov 
359450b7879SChristoph Hellwig void update_io_ticks(struct block_device *part, unsigned long now, bool end);
360fb8ec18cSJens Axboe 
req_set_nomerge(struct request_queue * q,struct request * req)3616cf7677fSChristoph Hellwig static inline void req_set_nomerge(struct request_queue *q, struct request *req)
3626cf7677fSChristoph Hellwig {
3636cf7677fSChristoph Hellwig 	req->cmd_flags |= REQ_NOMERGE;
3646cf7677fSChristoph Hellwig 	if (req == q->last_merge)
3656cf7677fSChristoph Hellwig 		q->last_merge = NULL;
3666cf7677fSChristoph Hellwig }
3676cf7677fSChristoph Hellwig 
368f2dbd76aSTejun Heo /*
369f2dbd76aSTejun Heo  * Internal io_context interface
370f2dbd76aSTejun Heo  */
37187dd1d63SChristoph Hellwig struct io_cq *ioc_find_get_icq(struct request_queue *q);
372eca5892aSChristoph Hellwig struct io_cq *ioc_lookup_icq(struct request_queue *q);
3735ef16305SChristoph Hellwig #ifdef CONFIG_BLK_ICQ
3747e5a8794STejun Heo void ioc_clear_queue(struct request_queue *q);
3755ef16305SChristoph Hellwig #else
ioc_clear_queue(struct request_queue * q)3765ef16305SChristoph Hellwig static inline void ioc_clear_queue(struct request_queue *q)
3775ef16305SChristoph Hellwig {
3785ef16305SChristoph Hellwig }
3795ef16305SChristoph Hellwig #endif /* CONFIG_BLK_ICQ */
380f2dbd76aSTejun Heo 
381297e3d85SShaohua Li #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
382297e3d85SShaohua Li extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
383297e3d85SShaohua Li extern ssize_t blk_throtl_sample_time_store(struct request_queue *q,
384297e3d85SShaohua Li 	const char *page, size_t count);
3859e234eeaSShaohua Li extern void blk_throtl_bio_endio(struct bio *bio);
386b9147dd1SShaohua Li extern void blk_throtl_stat_add(struct request *rq, u64 time);
3879e234eeaSShaohua Li #else
blk_throtl_bio_endio(struct bio * bio)3889e234eeaSShaohua Li static inline void blk_throtl_bio_endio(struct bio *bio) { }
blk_throtl_stat_add(struct request * rq,u64 time)389b9147dd1SShaohua Li static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
390297e3d85SShaohua Li #endif
391bc9fcbf9STejun Heo 
39251d798cdSChristoph Hellwig struct bio *__blk_queue_bounce(struct bio *bio, struct request_queue *q);
3939bb33f24SChristoph Hellwig 
blk_queue_may_bounce(struct request_queue * q)3949bb33f24SChristoph Hellwig static inline bool blk_queue_may_bounce(struct request_queue *q)
3959bb33f24SChristoph Hellwig {
3969bb33f24SChristoph Hellwig 	return IS_ENABLED(CONFIG_BOUNCE) &&
3979bb33f24SChristoph Hellwig 		q->limits.bounce == BLK_BOUNCE_HIGH &&
3989bb33f24SChristoph Hellwig 		max_low_pfn >= max_pfn;
3999bb33f24SChristoph Hellwig }
4009bb33f24SChristoph Hellwig 
blk_queue_bounce(struct bio * bio,struct request_queue * q)40151d798cdSChristoph Hellwig static inline struct bio *blk_queue_bounce(struct bio *bio,
40251d798cdSChristoph Hellwig 		struct request_queue *q)
4033bce016aSChristoph Hellwig {
40451d798cdSChristoph Hellwig 	if (unlikely(blk_queue_may_bounce(q) && bio_has_data(bio)))
40551d798cdSChristoph Hellwig 		return __blk_queue_bounce(bio, q);
40651d798cdSChristoph Hellwig 	return bio;
4073bce016aSChristoph Hellwig }
4083bce016aSChristoph Hellwig 
409bf505456SDamien Le Moal #ifdef CONFIG_BLK_DEV_ZONED
4105d400665SChristoph Hellwig void disk_free_zone_bitmaps(struct gendisk *disk);
4115e4ea834SChristoph Hellwig int blkdev_report_zones_ioctl(struct block_device *bdev, unsigned int cmd,
4125e4ea834SChristoph Hellwig 		unsigned long arg);
41305bdb996SChristoph Hellwig int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
414cfb42576SChristoph Hellwig 		unsigned int cmd, unsigned long arg);
415cfb42576SChristoph Hellwig #else /* CONFIG_BLK_DEV_ZONED */
disk_free_zone_bitmaps(struct gendisk * disk)4165d400665SChristoph Hellwig static inline void disk_free_zone_bitmaps(struct gendisk *disk) {}
blkdev_report_zones_ioctl(struct block_device * bdev,unsigned int cmd,unsigned long arg)417cfb42576SChristoph Hellwig static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
4185e4ea834SChristoph Hellwig 		unsigned int cmd, unsigned long arg)
419cfb42576SChristoph Hellwig {
420cfb42576SChristoph Hellwig 	return -ENOTTY;
421cfb42576SChristoph Hellwig }
blkdev_zone_mgmt_ioctl(struct block_device * bdev,blk_mode_t mode,unsigned int cmd,unsigned long arg)422cfb42576SChristoph Hellwig static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
42305bdb996SChristoph Hellwig 		blk_mode_t mode, unsigned int cmd, unsigned long arg)
424cfb42576SChristoph Hellwig {
425cfb42576SChristoph Hellwig 	return -ENOTTY;
426cfb42576SChristoph Hellwig }
427cfb42576SChristoph Hellwig #endif /* CONFIG_BLK_DEV_ZONED */
428cfb42576SChristoph Hellwig 
429cfb42576SChristoph Hellwig struct block_device *bdev_alloc(struct gendisk *disk, u8 partno);
430cfb42576SChristoph Hellwig void bdev_add(struct block_device *bdev, dev_t dev);
431bf505456SDamien Le Moal 
4327c3f828bSChristoph Hellwig int blk_alloc_ext_minor(void);
4337c3f828bSChristoph Hellwig void blk_free_ext_minor(unsigned int minor);
434581e2600SChristoph Hellwig #define ADDPART_FLAG_NONE	0
435581e2600SChristoph Hellwig #define ADDPART_FLAG_RAID	1
436581e2600SChristoph Hellwig #define ADDPART_FLAG_WHOLEDISK	2
4377f6be376SChristoph Hellwig int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
4387f6be376SChristoph Hellwig 		sector_t length);
439926fbb16SChristoph Hellwig int bdev_del_partition(struct gendisk *disk, int partno);
4403d2e7989SChristoph Hellwig int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
4413d2e7989SChristoph Hellwig 		sector_t length);
442eec1be4cSChristoph Hellwig void drop_partition(struct block_device *part);
443581e2600SChristoph Hellwig 
44483794367SDamien Le Moal void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors);
44583794367SDamien Le Moal 
4466f8191fdSChristoph Hellwig struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
4476f8191fdSChristoph Hellwig 		struct lock_class_key *lkclass);
4486f8191fdSChristoph Hellwig 
449e4581105SChristoph Hellwig int bio_add_hw_page(struct request_queue *q, struct bio *bio,
450130879f1SChristoph Hellwig 		struct page *page, unsigned int len, unsigned int offset,
451e4581105SChristoph Hellwig 		unsigned int max_sectors, bool *same_page);
452130879f1SChristoph Hellwig 
453fd363244SDavid Howells /*
454fd363244SDavid Howells  * Clean up a page appropriately, where the page may be pinned, may have a
455fd363244SDavid Howells  * ref taken on it or neither.
456fd363244SDavid Howells  */
bio_release_page(struct bio * bio,struct page * page)457fd363244SDavid Howells static inline void bio_release_page(struct bio *bio, struct page *page)
458fd363244SDavid Howells {
459fd363244SDavid Howells 	if (bio_flagged(bio, BIO_PAGE_PINNED))
460fd363244SDavid Howells 		unpin_user_page(page);
461fd363244SDavid Howells }
462fd363244SDavid Howells 
463ad751ba1SChristoph Hellwig struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id);
464704b914fSMing Lei 
46505bdb996SChristoph Hellwig int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode);
466da7ba729SChristoph Hellwig 
46792e7755eSLuis Chamberlain int disk_alloc_events(struct gendisk *disk);
468d5870edfSChristoph Hellwig void disk_add_events(struct gendisk *disk);
469d5870edfSChristoph Hellwig void disk_del_events(struct gendisk *disk);
470d5870edfSChristoph Hellwig void disk_release_events(struct gendisk *disk);
471926597ffSChristoph Hellwig void disk_block_events(struct gendisk *disk);
472926597ffSChristoph Hellwig void disk_unblock_events(struct gendisk *disk);
473926597ffSChristoph Hellwig void disk_flush_events(struct gendisk *disk, unsigned int mask);
4742bc8cda5SChristoph Hellwig extern struct device_attribute dev_attr_events;
4752bc8cda5SChristoph Hellwig extern struct device_attribute dev_attr_events_async;
4762bc8cda5SChristoph Hellwig extern struct device_attribute dev_attr_events_poll_msecs;
477d5870edfSChristoph Hellwig 
478cc5c516dSChristoph Hellwig extern struct attribute_group blk_trace_attr_group;
479cc5c516dSChristoph Hellwig 
48005bdb996SChristoph Hellwig blk_mode_t file_to_blk_mode(struct file *file);
48105bdb996SChristoph Hellwig int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode,
48205bdb996SChristoph Hellwig 		loff_t lstart, loff_t lend);
4838a709512SChristoph Hellwig long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
48484b8514bSChristoph Hellwig long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
48584b8514bSChristoph Hellwig 
486cd82cca7SChristoph Hellwig extern const struct address_space_operations def_blk_aops;
487cd82cca7SChristoph Hellwig 
48822d0c408SChristoph Hellwig int disk_register_independent_access_ranges(struct gendisk *disk);
489a2247f19SDamien Le Moal void disk_unregister_independent_access_ranges(struct gendisk *disk);
490a2247f19SDamien Le Moal 
49106c8c691SChristoph Hellwig #ifdef CONFIG_FAIL_MAKE_REQUEST
49206c8c691SChristoph Hellwig bool should_fail_request(struct block_device *part, unsigned int bytes);
49306c8c691SChristoph Hellwig #else /* CONFIG_FAIL_MAKE_REQUEST */
should_fail_request(struct block_device * part,unsigned int bytes)49406c8c691SChristoph Hellwig static inline bool should_fail_request(struct block_device *part,
49506c8c691SChristoph Hellwig 					unsigned int bytes)
49606c8c691SChristoph Hellwig {
49706c8c691SChristoph Hellwig 	return false;
49806c8c691SChristoph Hellwig }
49906c8c691SChristoph Hellwig #endif /* CONFIG_FAIL_MAKE_REQUEST */
50006c8c691SChristoph Hellwig 
5010a467d0fSJens Axboe /*
5020a467d0fSJens Axboe  * Optimized request reference counting. Ideally we'd make timeouts be more
5030a467d0fSJens Axboe  * clever, as that's the only reason we need references at all... But until
5040a467d0fSJens Axboe  * this happens, this is faster than using refcount_t. Also see:
5050a467d0fSJens Axboe  *
5060a467d0fSJens Axboe  * abc54d634334 ("io_uring: switch to atomic_t for io_kiocb reference count")
5070a467d0fSJens Axboe  */
5080a467d0fSJens Axboe #define req_ref_zero_or_close_to_overflow(req)	\
5090a467d0fSJens Axboe 	((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u)
5100a467d0fSJens Axboe 
req_ref_inc_not_zero(struct request * req)5110a467d0fSJens Axboe static inline bool req_ref_inc_not_zero(struct request *req)
5120a467d0fSJens Axboe {
5130a467d0fSJens Axboe 	return atomic_inc_not_zero(&req->ref);
5140a467d0fSJens Axboe }
5150a467d0fSJens Axboe 
req_ref_put_and_test(struct request * req)5160a467d0fSJens Axboe static inline bool req_ref_put_and_test(struct request *req)
5170a467d0fSJens Axboe {
5180a467d0fSJens Axboe 	WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
5190a467d0fSJens Axboe 	return atomic_dec_and_test(&req->ref);
5200a467d0fSJens Axboe }
5210a467d0fSJens Axboe 
req_ref_set(struct request * req,int value)5220a467d0fSJens Axboe static inline void req_ref_set(struct request *req, int value)
5230a467d0fSJens Axboe {
5240a467d0fSJens Axboe 	atomic_set(&req->ref, value);
5250a467d0fSJens Axboe }
5260a467d0fSJens Axboe 
req_ref_read(struct request * req)5270a467d0fSJens Axboe static inline int req_ref_read(struct request *req)
5280a467d0fSJens Axboe {
5290a467d0fSJens Axboe 	return atomic_read(&req->ref);
5300a467d0fSJens Axboe }
5310a467d0fSJens Axboe 
blk_time_get_ns(void)53208420cf7SJens Axboe static inline u64 blk_time_get_ns(void)
53308420cf7SJens Axboe {
534da4c8c3dSJens Axboe 	struct blk_plug *plug = current->plug;
535da4c8c3dSJens Axboe 
536*b874d4aaSJens Axboe 	if (!plug || !in_task())
53708420cf7SJens Axboe 		return ktime_get_ns();
538da4c8c3dSJens Axboe 
539da4c8c3dSJens Axboe 	/*
540da4c8c3dSJens Axboe 	 * 0 could very well be a valid time, but rather than flag "this is
541da4c8c3dSJens Axboe 	 * a valid timestamp" separately, just accept that we'll do an extra
542da4c8c3dSJens Axboe 	 * ktime_get_ns() if we just happen to get 0 as the current time.
543da4c8c3dSJens Axboe 	 */
54406b23f92SJens Axboe 	if (!plug->cur_ktime) {
545da4c8c3dSJens Axboe 		plug->cur_ktime = ktime_get_ns();
54606b23f92SJens Axboe 		current->flags |= PF_BLOCK_TS;
54706b23f92SJens Axboe 	}
548da4c8c3dSJens Axboe 	return plug->cur_ktime;
54908420cf7SJens Axboe }
55008420cf7SJens Axboe 
blk_time_get(void)55108420cf7SJens Axboe static inline ktime_t blk_time_get(void)
55208420cf7SJens Axboe {
55308420cf7SJens Axboe 	return ns_to_ktime(blk_time_get_ns());
55408420cf7SJens Axboe }
55508420cf7SJens Axboe 
556c4e47bbbSJens Axboe /*
557c4e47bbbSJens Axboe  * From most significant bit:
558c4e47bbbSJens Axboe  * 1 bit: reserved for other usage, see below
559c4e47bbbSJens Axboe  * 12 bits: original size of bio
560c4e47bbbSJens Axboe  * 51 bits: issue time of bio
561c4e47bbbSJens Axboe  */
562c4e47bbbSJens Axboe #define BIO_ISSUE_RES_BITS      1
563c4e47bbbSJens Axboe #define BIO_ISSUE_SIZE_BITS     12
564c4e47bbbSJens Axboe #define BIO_ISSUE_RES_SHIFT     (64 - BIO_ISSUE_RES_BITS)
565c4e47bbbSJens Axboe #define BIO_ISSUE_SIZE_SHIFT    (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
566c4e47bbbSJens Axboe #define BIO_ISSUE_TIME_MASK     ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
567c4e47bbbSJens Axboe #define BIO_ISSUE_SIZE_MASK     \
568c4e47bbbSJens Axboe 	(((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
569c4e47bbbSJens Axboe #define BIO_ISSUE_RES_MASK      (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
570c4e47bbbSJens Axboe 
571c4e47bbbSJens Axboe /* Reserved bit for blk-throtl */
572c4e47bbbSJens Axboe #define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
573c4e47bbbSJens Axboe 
__bio_issue_time(u64 time)574c4e47bbbSJens Axboe static inline u64 __bio_issue_time(u64 time)
575c4e47bbbSJens Axboe {
576c4e47bbbSJens Axboe 	return time & BIO_ISSUE_TIME_MASK;
577c4e47bbbSJens Axboe }
578c4e47bbbSJens Axboe 
bio_issue_time(struct bio_issue * issue)579c4e47bbbSJens Axboe static inline u64 bio_issue_time(struct bio_issue *issue)
580c4e47bbbSJens Axboe {
581c4e47bbbSJens Axboe 	return __bio_issue_time(issue->value);
582c4e47bbbSJens Axboe }
583c4e47bbbSJens Axboe 
bio_issue_size(struct bio_issue * issue)584c4e47bbbSJens Axboe static inline sector_t bio_issue_size(struct bio_issue *issue)
585c4e47bbbSJens Axboe {
586c4e47bbbSJens Axboe 	return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT);
587c4e47bbbSJens Axboe }
588c4e47bbbSJens Axboe 
bio_issue_init(struct bio_issue * issue,sector_t size)589c4e47bbbSJens Axboe static inline void bio_issue_init(struct bio_issue *issue,
590c4e47bbbSJens Axboe 				       sector_t size)
591c4e47bbbSJens Axboe {
592c4e47bbbSJens Axboe 	size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
593c4e47bbbSJens Axboe 	issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
59408420cf7SJens Axboe 			(blk_time_get_ns() & BIO_ISSUE_TIME_MASK) |
595c4e47bbbSJens Axboe 			((u64)size << BIO_ISSUE_SIZE_SHIFT));
596c4e47bbbSJens Axboe }
597c4e47bbbSJens Axboe 
5987c09a4edSChristian Brauner void bdev_release(struct file *bdev_file);
599a56aefcaSChristian Brauner int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
600a56aefcaSChristian Brauner 	      const struct blk_holder_ops *hops, struct file *bdev_file);
601a56aefcaSChristian Brauner int bdev_permission(dev_t dev, blk_mode_t mode, void *holder);
6021ddeeb2aSLinus Torvalds 
603bc9fcbf9STejun Heo #endif /* BLK_INTERNAL_H */
604