xref: /linux/block/blk-lib.c (revision 34ffec60)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to generic helpers functions
4  */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/scatterlist.h>
10 
11 #include "blk.h"
12 
13 struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp)
14 {
15 	struct bio *new = bio_alloc(gfp, nr_pages);
16 
17 	if (bio) {
18 		bio_chain(bio, new);
19 		submit_bio(bio);
20 	}
21 
22 	return new;
23 }
24 
25 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
26 		sector_t nr_sects, gfp_t gfp_mask, int flags,
27 		struct bio **biop)
28 {
29 	struct request_queue *q = bdev_get_queue(bdev);
30 	struct bio *bio = *biop;
31 	unsigned int op;
32 	sector_t bs_mask;
33 
34 	if (!q)
35 		return -ENXIO;
36 
37 	if (bdev_read_only(bdev))
38 		return -EPERM;
39 
40 	if (flags & BLKDEV_DISCARD_SECURE) {
41 		if (!blk_queue_secure_erase(q))
42 			return -EOPNOTSUPP;
43 		op = REQ_OP_SECURE_ERASE;
44 	} else {
45 		if (!blk_queue_discard(q))
46 			return -EOPNOTSUPP;
47 		op = REQ_OP_DISCARD;
48 	}
49 
50 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
51 	if ((sector | nr_sects) & bs_mask)
52 		return -EINVAL;
53 
54 	if (!nr_sects)
55 		return -EINVAL;
56 
57 	while (nr_sects) {
58 		unsigned int req_sects = min_t(unsigned int, nr_sects,
59 				bio_allowed_max_sectors(q));
60 
61 		bio = blk_next_bio(bio, 0, gfp_mask);
62 		bio->bi_iter.bi_sector = sector;
63 		bio_set_dev(bio, bdev);
64 		bio_set_op_attrs(bio, op, 0);
65 
66 		bio->bi_iter.bi_size = req_sects << 9;
67 		sector += req_sects;
68 		nr_sects -= req_sects;
69 
70 		/*
71 		 * We can loop for a long time in here, if someone does
72 		 * full device discards (like mkfs). Be nice and allow
73 		 * us to schedule out to avoid softlocking if preempt
74 		 * is disabled.
75 		 */
76 		cond_resched();
77 	}
78 
79 	*biop = bio;
80 	return 0;
81 }
82 EXPORT_SYMBOL(__blkdev_issue_discard);
83 
84 /**
85  * blkdev_issue_discard - queue a discard
86  * @bdev:	blockdev to issue discard for
87  * @sector:	start sector
88  * @nr_sects:	number of sectors to discard
89  * @gfp_mask:	memory allocation flags (for bio_alloc)
90  * @flags:	BLKDEV_DISCARD_* flags to control behaviour
91  *
92  * Description:
93  *    Issue a discard request for the sectors in question.
94  */
95 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
96 		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
97 {
98 	struct bio *bio = NULL;
99 	struct blk_plug plug;
100 	int ret;
101 
102 	blk_start_plug(&plug);
103 	ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
104 			&bio);
105 	if (!ret && bio) {
106 		ret = submit_bio_wait(bio);
107 		if (ret == -EOPNOTSUPP)
108 			ret = 0;
109 		bio_put(bio);
110 	}
111 	blk_finish_plug(&plug);
112 
113 	return ret;
114 }
115 EXPORT_SYMBOL(blkdev_issue_discard);
116 
117 /**
118  * __blkdev_issue_write_same - generate number of bios with same page
119  * @bdev:	target blockdev
120  * @sector:	start sector
121  * @nr_sects:	number of sectors to write
122  * @gfp_mask:	memory allocation flags (for bio_alloc)
123  * @page:	page containing data to write
124  * @biop:	pointer to anchor bio
125  *
126  * Description:
127  *  Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
128  */
129 static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
130 		sector_t nr_sects, gfp_t gfp_mask, struct page *page,
131 		struct bio **biop)
132 {
133 	struct request_queue *q = bdev_get_queue(bdev);
134 	unsigned int max_write_same_sectors;
135 	struct bio *bio = *biop;
136 	sector_t bs_mask;
137 
138 	if (!q)
139 		return -ENXIO;
140 
141 	if (bdev_read_only(bdev))
142 		return -EPERM;
143 
144 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
145 	if ((sector | nr_sects) & bs_mask)
146 		return -EINVAL;
147 
148 	if (!bdev_write_same(bdev))
149 		return -EOPNOTSUPP;
150 
151 	/* Ensure that max_write_same_sectors doesn't overflow bi_size */
152 	max_write_same_sectors = bio_allowed_max_sectors(q);
153 
154 	while (nr_sects) {
155 		bio = blk_next_bio(bio, 1, gfp_mask);
156 		bio->bi_iter.bi_sector = sector;
157 		bio_set_dev(bio, bdev);
158 		bio->bi_vcnt = 1;
159 		bio->bi_io_vec->bv_page = page;
160 		bio->bi_io_vec->bv_offset = 0;
161 		bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
162 		bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
163 
164 		if (nr_sects > max_write_same_sectors) {
165 			bio->bi_iter.bi_size = max_write_same_sectors << 9;
166 			nr_sects -= max_write_same_sectors;
167 			sector += max_write_same_sectors;
168 		} else {
169 			bio->bi_iter.bi_size = nr_sects << 9;
170 			nr_sects = 0;
171 		}
172 		cond_resched();
173 	}
174 
175 	*biop = bio;
176 	return 0;
177 }
178 
179 /**
180  * blkdev_issue_write_same - queue a write same operation
181  * @bdev:	target blockdev
182  * @sector:	start sector
183  * @nr_sects:	number of sectors to write
184  * @gfp_mask:	memory allocation flags (for bio_alloc)
185  * @page:	page containing data
186  *
187  * Description:
188  *    Issue a write same request for the sectors in question.
189  */
190 int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
191 				sector_t nr_sects, gfp_t gfp_mask,
192 				struct page *page)
193 {
194 	struct bio *bio = NULL;
195 	struct blk_plug plug;
196 	int ret;
197 
198 	blk_start_plug(&plug);
199 	ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
200 			&bio);
201 	if (ret == 0 && bio) {
202 		ret = submit_bio_wait(bio);
203 		bio_put(bio);
204 	}
205 	blk_finish_plug(&plug);
206 	return ret;
207 }
208 EXPORT_SYMBOL(blkdev_issue_write_same);
209 
210 static int __blkdev_issue_write_zeroes(struct block_device *bdev,
211 		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
212 		struct bio **biop, unsigned flags)
213 {
214 	struct bio *bio = *biop;
215 	unsigned int max_write_zeroes_sectors;
216 	struct request_queue *q = bdev_get_queue(bdev);
217 
218 	if (!q)
219 		return -ENXIO;
220 
221 	if (bdev_read_only(bdev))
222 		return -EPERM;
223 
224 	/* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
225 	max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
226 
227 	if (max_write_zeroes_sectors == 0)
228 		return -EOPNOTSUPP;
229 
230 	while (nr_sects) {
231 		bio = blk_next_bio(bio, 0, gfp_mask);
232 		bio->bi_iter.bi_sector = sector;
233 		bio_set_dev(bio, bdev);
234 		bio->bi_opf = REQ_OP_WRITE_ZEROES;
235 		if (flags & BLKDEV_ZERO_NOUNMAP)
236 			bio->bi_opf |= REQ_NOUNMAP;
237 
238 		if (nr_sects > max_write_zeroes_sectors) {
239 			bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
240 			nr_sects -= max_write_zeroes_sectors;
241 			sector += max_write_zeroes_sectors;
242 		} else {
243 			bio->bi_iter.bi_size = nr_sects << 9;
244 			nr_sects = 0;
245 		}
246 		cond_resched();
247 	}
248 
249 	*biop = bio;
250 	return 0;
251 }
252 
253 /*
254  * Convert a number of 512B sectors to a number of pages.
255  * The result is limited to a number of pages that can fit into a BIO.
256  * Also make sure that the result is always at least 1 (page) for the cases
257  * where nr_sects is lower than the number of sectors in a page.
258  */
259 static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
260 {
261 	sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
262 
263 	return min(pages, (sector_t)BIO_MAX_PAGES);
264 }
265 
266 static int __blkdev_issue_zero_pages(struct block_device *bdev,
267 		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
268 		struct bio **biop)
269 {
270 	struct request_queue *q = bdev_get_queue(bdev);
271 	struct bio *bio = *biop;
272 	int bi_size = 0;
273 	unsigned int sz;
274 
275 	if (!q)
276 		return -ENXIO;
277 
278 	if (bdev_read_only(bdev))
279 		return -EPERM;
280 
281 	while (nr_sects != 0) {
282 		bio = blk_next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
283 				   gfp_mask);
284 		bio->bi_iter.bi_sector = sector;
285 		bio_set_dev(bio, bdev);
286 		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
287 
288 		while (nr_sects != 0) {
289 			sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
290 			bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
291 			nr_sects -= bi_size >> 9;
292 			sector += bi_size >> 9;
293 			if (bi_size < sz)
294 				break;
295 		}
296 		cond_resched();
297 	}
298 
299 	*biop = bio;
300 	return 0;
301 }
302 
303 /**
304  * __blkdev_issue_zeroout - generate number of zero filed write bios
305  * @bdev:	blockdev to issue
306  * @sector:	start sector
307  * @nr_sects:	number of sectors to write
308  * @gfp_mask:	memory allocation flags (for bio_alloc)
309  * @biop:	pointer to anchor bio
310  * @flags:	controls detailed behavior
311  *
312  * Description:
313  *  Zero-fill a block range, either using hardware offload or by explicitly
314  *  writing zeroes to the device.
315  *
316  *  If a device is using logical block provisioning, the underlying space will
317  *  not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
318  *
319  *  If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
320  *  -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
321  */
322 int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
323 		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
324 		unsigned flags)
325 {
326 	int ret;
327 	sector_t bs_mask;
328 
329 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
330 	if ((sector | nr_sects) & bs_mask)
331 		return -EINVAL;
332 
333 	ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
334 			biop, flags);
335 	if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
336 		return ret;
337 
338 	return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
339 					 biop);
340 }
341 EXPORT_SYMBOL(__blkdev_issue_zeroout);
342 
343 /**
344  * blkdev_issue_zeroout - zero-fill a block range
345  * @bdev:	blockdev to write
346  * @sector:	start sector
347  * @nr_sects:	number of sectors to write
348  * @gfp_mask:	memory allocation flags (for bio_alloc)
349  * @flags:	controls detailed behavior
350  *
351  * Description:
352  *  Zero-fill a block range, either using hardware offload or by explicitly
353  *  writing zeroes to the device.  See __blkdev_issue_zeroout() for the
354  *  valid values for %flags.
355  */
356 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
357 		sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
358 {
359 	int ret = 0;
360 	sector_t bs_mask;
361 	struct bio *bio;
362 	struct blk_plug plug;
363 	bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev);
364 
365 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
366 	if ((sector | nr_sects) & bs_mask)
367 		return -EINVAL;
368 
369 retry:
370 	bio = NULL;
371 	blk_start_plug(&plug);
372 	if (try_write_zeroes) {
373 		ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
374 						  gfp_mask, &bio, flags);
375 	} else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
376 		ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
377 						gfp_mask, &bio);
378 	} else {
379 		/* No zeroing offload support */
380 		ret = -EOPNOTSUPP;
381 	}
382 	if (ret == 0 && bio) {
383 		ret = submit_bio_wait(bio);
384 		bio_put(bio);
385 	}
386 	blk_finish_plug(&plug);
387 	if (ret && try_write_zeroes) {
388 		if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
389 			try_write_zeroes = false;
390 			goto retry;
391 		}
392 		if (!bdev_write_zeroes_sectors(bdev)) {
393 			/*
394 			 * Zeroing offload support was indicated, but the
395 			 * device reported ILLEGAL REQUEST (for some devices
396 			 * there is no non-destructive way to verify whether
397 			 * WRITE ZEROES is actually supported).
398 			 */
399 			ret = -EOPNOTSUPP;
400 		}
401 	}
402 
403 	return ret;
404 }
405 EXPORT_SYMBOL(blkdev_issue_zeroout);
406