xref: /linux/block/blk-map.c (revision 13f3956e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to mapping data to requests
4  */
5 #include <linux/kernel.h>
6 #include <linux/sched/task_stack.h>
7 #include <linux/module.h>
8 #include <linux/bio.h>
9 #include <linux/blkdev.h>
10 #include <linux/uio.h>
11 
12 #include "blk.h"
13 
14 struct bio_map_data {
15 	bool is_our_pages : 1;
16 	bool is_null_mapped : 1;
17 	struct iov_iter iter;
18 	struct iovec iov[];
19 };
20 
bio_alloc_map_data(struct iov_iter * data,gfp_t gfp_mask)21 static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
22 					       gfp_t gfp_mask)
23 {
24 	struct bio_map_data *bmd;
25 
26 	if (data->nr_segs > UIO_MAXIOV)
27 		return NULL;
28 
29 	bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask);
30 	if (!bmd)
31 		return NULL;
32 	bmd->iter = *data;
33 	if (iter_is_iovec(data)) {
34 		memcpy(bmd->iov, iter_iov(data), sizeof(struct iovec) * data->nr_segs);
35 		bmd->iter.__iov = bmd->iov;
36 	}
37 	return bmd;
38 }
39 
40 /**
41  * bio_copy_from_iter - copy all pages from iov_iter to bio
42  * @bio: The &struct bio which describes the I/O as destination
43  * @iter: iov_iter as source
44  *
45  * Copy all pages from iov_iter to bio.
46  * Returns 0 on success, or error on failure.
47  */
bio_copy_from_iter(struct bio * bio,struct iov_iter * iter)48 static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
49 {
50 	struct bio_vec *bvec;
51 	struct bvec_iter_all iter_all;
52 
53 	bio_for_each_segment_all(bvec, bio, iter_all) {
54 		ssize_t ret;
55 
56 		ret = copy_page_from_iter(bvec->bv_page,
57 					  bvec->bv_offset,
58 					  bvec->bv_len,
59 					  iter);
60 
61 		if (!iov_iter_count(iter))
62 			break;
63 
64 		if (ret < bvec->bv_len)
65 			return -EFAULT;
66 	}
67 
68 	return 0;
69 }
70 
71 /**
72  * bio_copy_to_iter - copy all pages from bio to iov_iter
73  * @bio: The &struct bio which describes the I/O as source
74  * @iter: iov_iter as destination
75  *
76  * Copy all pages from bio to iov_iter.
77  * Returns 0 on success, or error on failure.
78  */
bio_copy_to_iter(struct bio * bio,struct iov_iter iter)79 static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
80 {
81 	struct bio_vec *bvec;
82 	struct bvec_iter_all iter_all;
83 
84 	bio_for_each_segment_all(bvec, bio, iter_all) {
85 		ssize_t ret;
86 
87 		ret = copy_page_to_iter(bvec->bv_page,
88 					bvec->bv_offset,
89 					bvec->bv_len,
90 					&iter);
91 
92 		if (!iov_iter_count(&iter))
93 			break;
94 
95 		if (ret < bvec->bv_len)
96 			return -EFAULT;
97 	}
98 
99 	return 0;
100 }
101 
102 /**
103  *	bio_uncopy_user	-	finish previously mapped bio
104  *	@bio: bio being terminated
105  *
106  *	Free pages allocated from bio_copy_user_iov() and write back data
107  *	to user space in case of a read.
108  */
bio_uncopy_user(struct bio * bio)109 static int bio_uncopy_user(struct bio *bio)
110 {
111 	struct bio_map_data *bmd = bio->bi_private;
112 	int ret = 0;
113 
114 	if (!bmd->is_null_mapped) {
115 		/*
116 		 * if we're in a workqueue, the request is orphaned, so
117 		 * don't copy into a random user address space, just free
118 		 * and return -EINTR so user space doesn't expect any data.
119 		 */
120 		if (!current->mm)
121 			ret = -EINTR;
122 		else if (bio_data_dir(bio) == READ)
123 			ret = bio_copy_to_iter(bio, bmd->iter);
124 		if (bmd->is_our_pages)
125 			bio_free_pages(bio);
126 	}
127 	kfree(bmd);
128 	return ret;
129 }
130 
bio_copy_user_iov(struct request * rq,struct rq_map_data * map_data,struct iov_iter * iter,gfp_t gfp_mask)131 static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
132 		struct iov_iter *iter, gfp_t gfp_mask)
133 {
134 	struct bio_map_data *bmd;
135 	struct page *page;
136 	struct bio *bio;
137 	int i = 0, ret;
138 	int nr_pages;
139 	unsigned int len = iter->count;
140 	unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
141 
142 	bmd = bio_alloc_map_data(iter, gfp_mask);
143 	if (!bmd)
144 		return -ENOMEM;
145 
146 	/*
147 	 * We need to do a deep copy of the iov_iter including the iovecs.
148 	 * The caller provided iov might point to an on-stack or otherwise
149 	 * shortlived one.
150 	 */
151 	bmd->is_our_pages = !map_data;
152 	bmd->is_null_mapped = (map_data && map_data->null_mapped);
153 
154 	nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE));
155 
156 	ret = -ENOMEM;
157 	bio = bio_kmalloc(nr_pages, gfp_mask);
158 	if (!bio)
159 		goto out_bmd;
160 	bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, req_op(rq));
161 
162 	if (map_data) {
163 		nr_pages = 1U << map_data->page_order;
164 		i = map_data->offset / PAGE_SIZE;
165 	}
166 	while (len) {
167 		unsigned int bytes = PAGE_SIZE;
168 
169 		bytes -= offset;
170 
171 		if (bytes > len)
172 			bytes = len;
173 
174 		if (map_data) {
175 			if (i == map_data->nr_entries * nr_pages) {
176 				ret = -ENOMEM;
177 				goto cleanup;
178 			}
179 
180 			page = map_data->pages[i / nr_pages];
181 			page += (i % nr_pages);
182 
183 			i++;
184 		} else {
185 			page = alloc_page(GFP_NOIO | gfp_mask);
186 			if (!page) {
187 				ret = -ENOMEM;
188 				goto cleanup;
189 			}
190 		}
191 
192 		if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) {
193 			if (!map_data)
194 				__free_page(page);
195 			break;
196 		}
197 
198 		len -= bytes;
199 		offset = 0;
200 	}
201 
202 	if (map_data)
203 		map_data->offset += bio->bi_iter.bi_size;
204 
205 	/*
206 	 * success
207 	 */
208 	if (iov_iter_rw(iter) == WRITE &&
209 	     (!map_data || !map_data->null_mapped)) {
210 		ret = bio_copy_from_iter(bio, iter);
211 		if (ret)
212 			goto cleanup;
213 	} else if (map_data && map_data->from_user) {
214 		struct iov_iter iter2 = *iter;
215 
216 		/* This is the copy-in part of SG_DXFER_TO_FROM_DEV. */
217 		iter2.data_source = ITER_SOURCE;
218 		ret = bio_copy_from_iter(bio, &iter2);
219 		if (ret)
220 			goto cleanup;
221 	} else {
222 		if (bmd->is_our_pages)
223 			zero_fill_bio(bio);
224 		iov_iter_advance(iter, bio->bi_iter.bi_size);
225 	}
226 
227 	bio->bi_private = bmd;
228 
229 	ret = blk_rq_append_bio(rq, bio);
230 	if (ret)
231 		goto cleanup;
232 	return 0;
233 cleanup:
234 	if (!map_data)
235 		bio_free_pages(bio);
236 	bio_uninit(bio);
237 	kfree(bio);
238 out_bmd:
239 	kfree(bmd);
240 	return ret;
241 }
242 
blk_mq_map_bio_put(struct bio * bio)243 static void blk_mq_map_bio_put(struct bio *bio)
244 {
245 	if (bio->bi_opf & REQ_ALLOC_CACHE) {
246 		bio_put(bio);
247 	} else {
248 		bio_uninit(bio);
249 		kfree(bio);
250 	}
251 }
252 
blk_rq_map_bio_alloc(struct request * rq,unsigned int nr_vecs,gfp_t gfp_mask)253 static struct bio *blk_rq_map_bio_alloc(struct request *rq,
254 		unsigned int nr_vecs, gfp_t gfp_mask)
255 {
256 	struct bio *bio;
257 
258 	if (rq->cmd_flags & REQ_ALLOC_CACHE && (nr_vecs <= BIO_INLINE_VECS)) {
259 		bio = bio_alloc_bioset(NULL, nr_vecs, rq->cmd_flags, gfp_mask,
260 					&fs_bio_set);
261 		if (!bio)
262 			return NULL;
263 	} else {
264 		bio = bio_kmalloc(nr_vecs, gfp_mask);
265 		if (!bio)
266 			return NULL;
267 		bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq));
268 	}
269 	return bio;
270 }
271 
bio_map_user_iov(struct request * rq,struct iov_iter * iter,gfp_t gfp_mask)272 static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
273 		gfp_t gfp_mask)
274 {
275 	iov_iter_extraction_t extraction_flags = 0;
276 	unsigned int max_sectors = queue_max_hw_sectors(rq->q);
277 	unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS);
278 	struct bio *bio;
279 	int ret;
280 	int j;
281 
282 	if (!iov_iter_count(iter))
283 		return -EINVAL;
284 
285 	bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask);
286 	if (bio == NULL)
287 		return -ENOMEM;
288 
289 	if (blk_queue_pci_p2pdma(rq->q))
290 		extraction_flags |= ITER_ALLOW_P2PDMA;
291 	if (iov_iter_extract_will_pin(iter))
292 		bio_set_flag(bio, BIO_PAGE_PINNED);
293 
294 	while (iov_iter_count(iter)) {
295 		struct page *stack_pages[UIO_FASTIOV];
296 		struct page **pages = stack_pages;
297 		ssize_t bytes;
298 		size_t offs;
299 		int npages;
300 
301 		if (nr_vecs > ARRAY_SIZE(stack_pages))
302 			pages = NULL;
303 
304 		bytes = iov_iter_extract_pages(iter, &pages, LONG_MAX,
305 					       nr_vecs, extraction_flags, &offs);
306 		if (unlikely(bytes <= 0)) {
307 			ret = bytes ? bytes : -EFAULT;
308 			goto out_unmap;
309 		}
310 
311 		npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);
312 
313 		if (unlikely(offs & queue_dma_alignment(rq->q)))
314 			j = 0;
315 		else {
316 			for (j = 0; j < npages; j++) {
317 				struct page *page = pages[j];
318 				unsigned int n = PAGE_SIZE - offs;
319 				bool same_page = false;
320 
321 				if (n > bytes)
322 					n = bytes;
323 
324 				if (!bio_add_hw_page(rq->q, bio, page, n, offs,
325 						     max_sectors, &same_page))
326 					break;
327 
328 				if (same_page)
329 					bio_release_page(bio, page);
330 				bytes -= n;
331 				offs = 0;
332 			}
333 		}
334 		/*
335 		 * release the pages we didn't map into the bio, if any
336 		 */
337 		while (j < npages)
338 			bio_release_page(bio, pages[j++]);
339 		if (pages != stack_pages)
340 			kvfree(pages);
341 		/* couldn't stuff something into bio? */
342 		if (bytes) {
343 			iov_iter_revert(iter, bytes);
344 			break;
345 		}
346 	}
347 
348 	ret = blk_rq_append_bio(rq, bio);
349 	if (ret)
350 		goto out_unmap;
351 	return 0;
352 
353  out_unmap:
354 	bio_release_pages(bio, false);
355 	blk_mq_map_bio_put(bio);
356 	return ret;
357 }
358 
bio_invalidate_vmalloc_pages(struct bio * bio)359 static void bio_invalidate_vmalloc_pages(struct bio *bio)
360 {
361 #ifdef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE
362 	if (bio->bi_private && !op_is_write(bio_op(bio))) {
363 		unsigned long i, len = 0;
364 
365 		for (i = 0; i < bio->bi_vcnt; i++)
366 			len += bio->bi_io_vec[i].bv_len;
367 		invalidate_kernel_vmap_range(bio->bi_private, len);
368 	}
369 #endif
370 }
371 
bio_map_kern_endio(struct bio * bio)372 static void bio_map_kern_endio(struct bio *bio)
373 {
374 	bio_invalidate_vmalloc_pages(bio);
375 	bio_uninit(bio);
376 	kfree(bio);
377 }
378 
379 /**
380  *	bio_map_kern	-	map kernel address into bio
381  *	@q: the struct request_queue for the bio
382  *	@data: pointer to buffer to map
383  *	@len: length in bytes
384  *	@gfp_mask: allocation flags for bio allocation
385  *
386  *	Map the kernel address into a bio suitable for io to a block
387  *	device. Returns an error pointer in case of error.
388  */
bio_map_kern(struct request_queue * q,void * data,unsigned int len,gfp_t gfp_mask)389 static struct bio *bio_map_kern(struct request_queue *q, void *data,
390 		unsigned int len, gfp_t gfp_mask)
391 {
392 	unsigned long kaddr = (unsigned long)data;
393 	unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
394 	unsigned long start = kaddr >> PAGE_SHIFT;
395 	const int nr_pages = end - start;
396 	bool is_vmalloc = is_vmalloc_addr(data);
397 	struct page *page;
398 	int offset, i;
399 	struct bio *bio;
400 
401 	bio = bio_kmalloc(nr_pages, gfp_mask);
402 	if (!bio)
403 		return ERR_PTR(-ENOMEM);
404 	bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0);
405 
406 	if (is_vmalloc) {
407 		flush_kernel_vmap_range(data, len);
408 		bio->bi_private = data;
409 	}
410 
411 	offset = offset_in_page(kaddr);
412 	for (i = 0; i < nr_pages; i++) {
413 		unsigned int bytes = PAGE_SIZE - offset;
414 
415 		if (len <= 0)
416 			break;
417 
418 		if (bytes > len)
419 			bytes = len;
420 
421 		if (!is_vmalloc)
422 			page = virt_to_page(data);
423 		else
424 			page = vmalloc_to_page(data);
425 		if (bio_add_pc_page(q, bio, page, bytes,
426 				    offset) < bytes) {
427 			/* we don't support partial mappings */
428 			bio_uninit(bio);
429 			kfree(bio);
430 			return ERR_PTR(-EINVAL);
431 		}
432 
433 		data += bytes;
434 		len -= bytes;
435 		offset = 0;
436 	}
437 
438 	bio->bi_end_io = bio_map_kern_endio;
439 	return bio;
440 }
441 
bio_copy_kern_endio(struct bio * bio)442 static void bio_copy_kern_endio(struct bio *bio)
443 {
444 	bio_free_pages(bio);
445 	bio_uninit(bio);
446 	kfree(bio);
447 }
448 
bio_copy_kern_endio_read(struct bio * bio)449 static void bio_copy_kern_endio_read(struct bio *bio)
450 {
451 	char *p = bio->bi_private;
452 	struct bio_vec *bvec;
453 	struct bvec_iter_all iter_all;
454 
455 	bio_for_each_segment_all(bvec, bio, iter_all) {
456 		memcpy_from_bvec(p, bvec);
457 		p += bvec->bv_len;
458 	}
459 
460 	bio_copy_kern_endio(bio);
461 }
462 
463 /**
464  *	bio_copy_kern	-	copy kernel address into bio
465  *	@q: the struct request_queue for the bio
466  *	@data: pointer to buffer to copy
467  *	@len: length in bytes
468  *	@gfp_mask: allocation flags for bio and page allocation
469  *	@reading: data direction is READ
470  *
471  *	copy the kernel address into a bio suitable for io to a block
472  *	device. Returns an error pointer in case of error.
473  */
bio_copy_kern(struct request_queue * q,void * data,unsigned int len,gfp_t gfp_mask,int reading)474 static struct bio *bio_copy_kern(struct request_queue *q, void *data,
475 		unsigned int len, gfp_t gfp_mask, int reading)
476 {
477 	unsigned long kaddr = (unsigned long)data;
478 	unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
479 	unsigned long start = kaddr >> PAGE_SHIFT;
480 	struct bio *bio;
481 	void *p = data;
482 	int nr_pages = 0;
483 
484 	/*
485 	 * Overflow, abort
486 	 */
487 	if (end < start)
488 		return ERR_PTR(-EINVAL);
489 
490 	nr_pages = end - start;
491 	bio = bio_kmalloc(nr_pages, gfp_mask);
492 	if (!bio)
493 		return ERR_PTR(-ENOMEM);
494 	bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0);
495 
496 	while (len) {
497 		struct page *page;
498 		unsigned int bytes = PAGE_SIZE;
499 
500 		if (bytes > len)
501 			bytes = len;
502 
503 		page = alloc_page(GFP_NOIO | __GFP_ZERO | gfp_mask);
504 		if (!page)
505 			goto cleanup;
506 
507 		if (!reading)
508 			memcpy(page_address(page), p, bytes);
509 
510 		if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
511 			break;
512 
513 		len -= bytes;
514 		p += bytes;
515 	}
516 
517 	if (reading) {
518 		bio->bi_end_io = bio_copy_kern_endio_read;
519 		bio->bi_private = data;
520 	} else {
521 		bio->bi_end_io = bio_copy_kern_endio;
522 	}
523 
524 	return bio;
525 
526 cleanup:
527 	bio_free_pages(bio);
528 	bio_uninit(bio);
529 	kfree(bio);
530 	return ERR_PTR(-ENOMEM);
531 }
532 
533 /*
534  * Append a bio to a passthrough request.  Only works if the bio can be merged
535  * into the request based on the driver constraints.
536  */
blk_rq_append_bio(struct request * rq,struct bio * bio)537 int blk_rq_append_bio(struct request *rq, struct bio *bio)
538 {
539 	struct bvec_iter iter;
540 	struct bio_vec bv;
541 	unsigned int nr_segs = 0;
542 
543 	bio_for_each_bvec(bv, bio, iter)
544 		nr_segs++;
545 
546 	if (!rq->bio) {
547 		blk_rq_bio_prep(rq, bio, nr_segs);
548 	} else {
549 		if (!ll_back_merge_fn(rq, bio, nr_segs))
550 			return -EINVAL;
551 		rq->biotail->bi_next = bio;
552 		rq->biotail = bio;
553 		rq->__data_len += (bio)->bi_iter.bi_size;
554 		bio_crypt_free_ctx(bio);
555 	}
556 
557 	return 0;
558 }
559 EXPORT_SYMBOL(blk_rq_append_bio);
560 
561 /* Prepare bio for passthrough IO given ITER_BVEC iter */
blk_rq_map_user_bvec(struct request * rq,const struct iov_iter * iter)562 static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter)
563 {
564 	struct request_queue *q = rq->q;
565 	size_t nr_iter = iov_iter_count(iter);
566 	size_t nr_segs = iter->nr_segs;
567 	struct bio_vec *bvecs, *bvprvp = NULL;
568 	const struct queue_limits *lim = &q->limits;
569 	unsigned int nsegs = 0, bytes = 0;
570 	struct bio *bio;
571 	size_t i;
572 
573 	if (!nr_iter || (nr_iter >> SECTOR_SHIFT) > queue_max_hw_sectors(q))
574 		return -EINVAL;
575 	if (nr_segs > queue_max_segments(q))
576 		return -EINVAL;
577 
578 	/* no iovecs to alloc, as we already have a BVEC iterator */
579 	bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL);
580 	if (bio == NULL)
581 		return -ENOMEM;
582 
583 	bio_iov_bvec_set(bio, (struct iov_iter *)iter);
584 	blk_rq_bio_prep(rq, bio, nr_segs);
585 
586 	/* loop to perform a bunch of sanity checks */
587 	bvecs = (struct bio_vec *)iter->bvec;
588 	for (i = 0; i < nr_segs; i++) {
589 		struct bio_vec *bv = &bvecs[i];
590 
591 		/*
592 		 * If the queue doesn't support SG gaps and adding this
593 		 * offset would create a gap, fallback to copy.
594 		 */
595 		if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) {
596 			blk_mq_map_bio_put(bio);
597 			return -EREMOTEIO;
598 		}
599 		/* check full condition */
600 		if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len)
601 			goto put_bio;
602 		if (bytes + bv->bv_len > nr_iter)
603 			goto put_bio;
604 		if (bv->bv_offset + bv->bv_len > PAGE_SIZE)
605 			goto put_bio;
606 
607 		nsegs++;
608 		bytes += bv->bv_len;
609 		bvprvp = bv;
610 	}
611 	return 0;
612 put_bio:
613 	blk_mq_map_bio_put(bio);
614 	return -EINVAL;
615 }
616 
617 /**
618  * blk_rq_map_user_iov - map user data to a request, for passthrough requests
619  * @q:		request queue where request should be inserted
620  * @rq:		request to map data to
621  * @map_data:   pointer to the rq_map_data holding pages (if necessary)
622  * @iter:	iovec iterator
623  * @gfp_mask:	memory allocation flags
624  *
625  * Description:
626  *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
627  *    a kernel bounce buffer is used.
628  *
629  *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
630  *    still in process context.
631  */
blk_rq_map_user_iov(struct request_queue * q,struct request * rq,struct rq_map_data * map_data,const struct iov_iter * iter,gfp_t gfp_mask)632 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
633 			struct rq_map_data *map_data,
634 			const struct iov_iter *iter, gfp_t gfp_mask)
635 {
636 	bool copy = false, map_bvec = false;
637 	unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
638 	struct bio *bio = NULL;
639 	struct iov_iter i;
640 	int ret = -EINVAL;
641 
642 	if (map_data)
643 		copy = true;
644 	else if (blk_queue_may_bounce(q))
645 		copy = true;
646 	else if (iov_iter_alignment(iter) & align)
647 		copy = true;
648 	else if (iov_iter_is_bvec(iter))
649 		map_bvec = true;
650 	else if (!user_backed_iter(iter))
651 		copy = true;
652 	else if (queue_virt_boundary(q))
653 		copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
654 
655 	if (map_bvec) {
656 		ret = blk_rq_map_user_bvec(rq, iter);
657 		if (!ret)
658 			return 0;
659 		if (ret != -EREMOTEIO)
660 			goto fail;
661 		/* fall back to copying the data on limits mismatches */
662 		copy = true;
663 	}
664 
665 	i = *iter;
666 	do {
667 		if (copy)
668 			ret = bio_copy_user_iov(rq, map_data, &i, gfp_mask);
669 		else
670 			ret = bio_map_user_iov(rq, &i, gfp_mask);
671 		if (ret)
672 			goto unmap_rq;
673 		if (!bio)
674 			bio = rq->bio;
675 	} while (iov_iter_count(&i));
676 
677 	return 0;
678 
679 unmap_rq:
680 	blk_rq_unmap_user(bio);
681 fail:
682 	rq->bio = NULL;
683 	return ret;
684 }
685 EXPORT_SYMBOL(blk_rq_map_user_iov);
686 
blk_rq_map_user(struct request_queue * q,struct request * rq,struct rq_map_data * map_data,void __user * ubuf,unsigned long len,gfp_t gfp_mask)687 int blk_rq_map_user(struct request_queue *q, struct request *rq,
688 		    struct rq_map_data *map_data, void __user *ubuf,
689 		    unsigned long len, gfp_t gfp_mask)
690 {
691 	struct iov_iter i;
692 	int ret = import_ubuf(rq_data_dir(rq), ubuf, len, &i);
693 
694 	if (unlikely(ret < 0))
695 		return ret;
696 
697 	return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
698 }
699 EXPORT_SYMBOL(blk_rq_map_user);
700 
blk_rq_map_user_io(struct request * req,struct rq_map_data * map_data,void __user * ubuf,unsigned long buf_len,gfp_t gfp_mask,bool vec,int iov_count,bool check_iter_count,int rw)701 int blk_rq_map_user_io(struct request *req, struct rq_map_data *map_data,
702 		void __user *ubuf, unsigned long buf_len, gfp_t gfp_mask,
703 		bool vec, int iov_count, bool check_iter_count, int rw)
704 {
705 	int ret = 0;
706 
707 	if (vec) {
708 		struct iovec fast_iov[UIO_FASTIOV];
709 		struct iovec *iov = fast_iov;
710 		struct iov_iter iter;
711 
712 		ret = import_iovec(rw, ubuf, iov_count ? iov_count : buf_len,
713 				UIO_FASTIOV, &iov, &iter);
714 		if (ret < 0)
715 			return ret;
716 
717 		if (iov_count) {
718 			/* SG_IO howto says that the shorter of the two wins */
719 			iov_iter_truncate(&iter, buf_len);
720 			if (check_iter_count && !iov_iter_count(&iter)) {
721 				kfree(iov);
722 				return -EINVAL;
723 			}
724 		}
725 
726 		ret = blk_rq_map_user_iov(req->q, req, map_data, &iter,
727 				gfp_mask);
728 		kfree(iov);
729 	} else if (buf_len) {
730 		ret = blk_rq_map_user(req->q, req, map_data, ubuf, buf_len,
731 				gfp_mask);
732 	}
733 	return ret;
734 }
735 EXPORT_SYMBOL(blk_rq_map_user_io);
736 
737 /**
738  * blk_rq_unmap_user - unmap a request with user data
739  * @bio:	       start of bio list
740  *
741  * Description:
742  *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
743  *    supply the original rq->bio from the blk_rq_map_user() return, since
744  *    the I/O completion may have changed rq->bio.
745  */
blk_rq_unmap_user(struct bio * bio)746 int blk_rq_unmap_user(struct bio *bio)
747 {
748 	struct bio *next_bio;
749 	int ret = 0, ret2;
750 
751 	while (bio) {
752 		if (bio->bi_private) {
753 			ret2 = bio_uncopy_user(bio);
754 			if (ret2 && !ret)
755 				ret = ret2;
756 		} else {
757 			bio_release_pages(bio, bio_data_dir(bio) == READ);
758 		}
759 
760 		next_bio = bio;
761 		bio = bio->bi_next;
762 		blk_mq_map_bio_put(next_bio);
763 	}
764 
765 	return ret;
766 }
767 EXPORT_SYMBOL(blk_rq_unmap_user);
768 
769 /**
770  * blk_rq_map_kern - map kernel data to a request, for passthrough requests
771  * @q:		request queue where request should be inserted
772  * @rq:		request to fill
773  * @kbuf:	the kernel buffer
774  * @len:	length of user data
775  * @gfp_mask:	memory allocation flags
776  *
777  * Description:
778  *    Data will be mapped directly if possible. Otherwise a bounce
779  *    buffer is used. Can be called multiple times to append multiple
780  *    buffers.
781  */
blk_rq_map_kern(struct request_queue * q,struct request * rq,void * kbuf,unsigned int len,gfp_t gfp_mask)782 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
783 		    unsigned int len, gfp_t gfp_mask)
784 {
785 	int reading = rq_data_dir(rq) == READ;
786 	unsigned long addr = (unsigned long) kbuf;
787 	struct bio *bio;
788 	int ret;
789 
790 	if (len > (queue_max_hw_sectors(q) << 9))
791 		return -EINVAL;
792 	if (!len || !kbuf)
793 		return -EINVAL;
794 
795 	if (!blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf) ||
796 	    blk_queue_may_bounce(q))
797 		bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
798 	else
799 		bio = bio_map_kern(q, kbuf, len, gfp_mask);
800 
801 	if (IS_ERR(bio))
802 		return PTR_ERR(bio);
803 
804 	bio->bi_opf &= ~REQ_OP_MASK;
805 	bio->bi_opf |= req_op(rq);
806 
807 	ret = blk_rq_append_bio(rq, bio);
808 	if (unlikely(ret)) {
809 		bio_uninit(bio);
810 		kfree(bio);
811 	}
812 	return ret;
813 }
814 EXPORT_SYMBOL(blk_rq_map_kern);
815