1 /*
2 * linux/drivers/block/loop.c
3 *
4 * Written by Theodore Ts'o, 3/29/93
5 *
6 * Copyright 1993 by Theodore Ts'o. Redistribution of this file is
7 * permitted under the GNU General Public License.
8 *
9 * DES encryption plus some minor changes by Werner Almesberger, 30-MAY-1993
10 * more DES encryption plus IDEA encryption by Nicholas J. Leon, June 20, 1996
11 *
12 * Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994
13 * Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996
14 *
15 * Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997
16 *
17 * Added devfs support - Richard Gooch <rgooch@atnf.csiro.au> 16-Jan-1998
18 *
19 * Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998
20 *
21 * Loadable modules and other fixes by AK, 1998
22 *
23 * Make real block number available to downstream transfer functions, enables
24 * CBC (and relatives) mode encryption requiring unique IVs per data block.
25 * Reed H. Petty, rhp@draper.net
26 *
27 * Maximum number of loop devices now dynamic via max_loop module parameter.
28 * Russell Kroll <rkroll@exploits.org> 19990701
29 *
30 * Maximum number of loop devices when compiled-in now selectable by passing
31 * max_loop=<1-255> to the kernel on boot.
32 * Erik I. Bolsø, <eriki@himolde.no>, Oct 31, 1999
33 *
34 * Completely rewrite request handling to be make_request_fn style and
35 * non blocking, pushing work to a helper thread. Lots of fixes from
36 * Al Viro too.
37 * Jens Axboe <axboe@suse.de>, Nov 2000
38 *
39 * Support up to 256 loop devices
40 * Heinz Mauelshagen <mge@sistina.com>, Feb 2002
41 *
42 * Support for falling back on the write file operation when the address space
43 * operations write_begin is not available on the backing filesystem.
44 * Anton Altaparmakov, 16 Feb 2005
45 *
46 * Still To Fix:
47 * - Advisory locking is ignored here.
48 * - Should use an own CAP_* category instead of CAP_SYS_ADMIN
49 *
50 */
51
52 #include <linux/module.h>
53 #include <linux/moduleparam.h>
54 #include <linux/sched.h>
55 #include <linux/fs.h>
56 #include <linux/pagemap.h>
57 #include <linux/file.h>
58 #include <linux/stat.h>
59 #include <linux/errno.h>
60 #include <linux/major.h>
61 #include <linux/wait.h>
62 #include <linux/blkdev.h>
63 #include <linux/blkpg.h>
64 #include <linux/init.h>
65 #include <linux/swap.h>
66 #include <linux/slab.h>
67 #include <linux/compat.h>
68 #include <linux/suspend.h>
69 #include <linux/freezer.h>
70 #include <linux/mutex.h>
71 #include <linux/writeback.h>
72 #include <linux/completion.h>
73 #include <linux/highmem.h>
74 #include <linux/kthread.h>
75 #include <linux/splice.h>
76 #include <linux/sysfs.h>
77 #include <linux/miscdevice.h>
78 #include <linux/falloc.h>
79 #include <linux/uio.h>
80 #include <linux/ioprio.h>
81 #include <linux/blk-cgroup.h>
82
83 #include "loop.h"
84
85 #include <linux/uaccess.h>
86
87 static DEFINE_IDR(loop_index_idr);
88 static DEFINE_MUTEX(loop_ctl_mutex);
89
90 static int max_part;
91 static int part_shift;
92
transfer_xor(struct loop_device * lo,int cmd,struct page * raw_page,unsigned raw_off,struct page * loop_page,unsigned loop_off,int size,sector_t real_block)93 static int transfer_xor(struct loop_device *lo, int cmd,
94 struct page *raw_page, unsigned raw_off,
95 struct page *loop_page, unsigned loop_off,
96 int size, sector_t real_block)
97 {
98 char *raw_buf = kmap_atomic(raw_page) + raw_off;
99 char *loop_buf = kmap_atomic(loop_page) + loop_off;
100 char *in, *out, *key;
101 int i, keysize;
102
103 if (cmd == READ) {
104 in = raw_buf;
105 out = loop_buf;
106 } else {
107 in = loop_buf;
108 out = raw_buf;
109 }
110
111 key = lo->lo_encrypt_key;
112 keysize = lo->lo_encrypt_key_size;
113 for (i = 0; i < size; i++)
114 *out++ = *in++ ^ key[(i & 511) % keysize];
115
116 kunmap_atomic(loop_buf);
117 kunmap_atomic(raw_buf);
118 cond_resched();
119 return 0;
120 }
121
xor_init(struct loop_device * lo,const struct loop_info64 * info)122 static int xor_init(struct loop_device *lo, const struct loop_info64 *info)
123 {
124 if (unlikely(info->lo_encrypt_key_size <= 0))
125 return -EINVAL;
126 return 0;
127 }
128
129 static struct loop_func_table none_funcs = {
130 .number = LO_CRYPT_NONE,
131 };
132
133 static struct loop_func_table xor_funcs = {
134 .number = LO_CRYPT_XOR,
135 .transfer = transfer_xor,
136 .init = xor_init
137 };
138
139 /* xfer_funcs[0] is special - its release function is never called */
140 static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
141 &none_funcs,
142 &xor_funcs
143 };
144
get_size(loff_t offset,loff_t sizelimit,struct file * file)145 static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
146 {
147 loff_t loopsize;
148
149 /* Compute loopsize in bytes */
150 loopsize = i_size_read(file->f_mapping->host);
151 if (offset > 0)
152 loopsize -= offset;
153 /* offset is beyond i_size, weird but possible */
154 if (loopsize < 0)
155 return 0;
156
157 if (sizelimit > 0 && sizelimit < loopsize)
158 loopsize = sizelimit;
159 /*
160 * Unfortunately, if we want to do I/O on the device,
161 * the number of 512-byte sectors has to fit into a sector_t.
162 */
163 return loopsize >> 9;
164 }
165
get_loop_size(struct loop_device * lo,struct file * file)166 static loff_t get_loop_size(struct loop_device *lo, struct file *file)
167 {
168 return get_size(lo->lo_offset, lo->lo_sizelimit, file);
169 }
170
__loop_update_dio(struct loop_device * lo,bool dio)171 static void __loop_update_dio(struct loop_device *lo, bool dio)
172 {
173 struct file *file = lo->lo_backing_file;
174 struct address_space *mapping = file->f_mapping;
175 struct inode *inode = mapping->host;
176 unsigned short sb_bsize = 0;
177 unsigned dio_align = 0;
178 bool use_dio;
179
180 if (inode->i_sb->s_bdev) {
181 sb_bsize = bdev_logical_block_size(inode->i_sb->s_bdev);
182 dio_align = sb_bsize - 1;
183 }
184
185 /*
186 * We support direct I/O only if lo_offset is aligned with the
187 * logical I/O size of backing device, and the logical block
188 * size of loop is bigger than the backing device's and the loop
189 * needn't transform transfer.
190 *
191 * TODO: the above condition may be loosed in the future, and
192 * direct I/O may be switched runtime at that time because most
193 * of requests in sane applications should be PAGE_SIZE aligned
194 */
195 if (dio) {
196 if (queue_logical_block_size(lo->lo_queue) >= sb_bsize &&
197 !(lo->lo_offset & dio_align) &&
198 mapping->a_ops->direct_IO &&
199 !lo->transfer)
200 use_dio = true;
201 else
202 use_dio = false;
203 } else {
204 use_dio = false;
205 }
206
207 if (lo->use_dio == use_dio)
208 return;
209
210 /* flush dirty pages before changing direct IO */
211 vfs_fsync(file, 0);
212
213 /*
214 * The flag of LO_FLAGS_DIRECT_IO is handled similarly with
215 * LO_FLAGS_READ_ONLY, both are set from kernel, and losetup
216 * will get updated by ioctl(LOOP_GET_STATUS)
217 */
218 if (lo->lo_state == Lo_bound)
219 blk_mq_freeze_queue(lo->lo_queue);
220 lo->use_dio = use_dio;
221 if (use_dio) {
222 blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, lo->lo_queue);
223 lo->lo_flags |= LO_FLAGS_DIRECT_IO;
224 } else {
225 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue);
226 lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
227 }
228 if (lo->lo_state == Lo_bound)
229 blk_mq_unfreeze_queue(lo->lo_queue);
230 }
231
232 /**
233 * loop_validate_block_size() - validates the passed in block size
234 * @bsize: size to validate
235 */
236 static int
loop_validate_block_size(unsigned short bsize)237 loop_validate_block_size(unsigned short bsize)
238 {
239 if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize))
240 return -EINVAL;
241
242 return 0;
243 }
244
245 /**
246 * loop_set_size() - sets device size and notifies userspace
247 * @lo: struct loop_device to set the size for
248 * @size: new size of the loop device
249 *
250 * Callers must validate that the size passed into this function fits into
251 * a sector_t, eg using loop_validate_size()
252 */
loop_set_size(struct loop_device * lo,loff_t size)253 static void loop_set_size(struct loop_device *lo, loff_t size)
254 {
255 if (!set_capacity_and_notify(lo->lo_disk, size))
256 kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
257 }
258
259 static inline int
lo_do_transfer(struct loop_device * lo,int cmd,struct page * rpage,unsigned roffs,struct page * lpage,unsigned loffs,int size,sector_t rblock)260 lo_do_transfer(struct loop_device *lo, int cmd,
261 struct page *rpage, unsigned roffs,
262 struct page *lpage, unsigned loffs,
263 int size, sector_t rblock)
264 {
265 int ret;
266
267 ret = lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock);
268 if (likely(!ret))
269 return 0;
270
271 printk_ratelimited(KERN_ERR
272 "loop: Transfer error at byte offset %llu, length %i.\n",
273 (unsigned long long)rblock << 9, size);
274 return ret;
275 }
276
lo_write_bvec(struct file * file,struct bio_vec * bvec,loff_t * ppos)277 static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
278 {
279 struct iov_iter i;
280 ssize_t bw;
281
282 iov_iter_bvec(&i, WRITE, bvec, 1, bvec->bv_len);
283
284 file_start_write(file);
285 bw = vfs_iter_write(file, &i, ppos, 0);
286 file_end_write(file);
287
288 if (likely(bw == bvec->bv_len))
289 return 0;
290
291 printk_ratelimited(KERN_ERR
292 "loop: Write error at byte offset %llu, length %i.\n",
293 (unsigned long long)*ppos, bvec->bv_len);
294 if (bw >= 0)
295 bw = -EIO;
296 return bw;
297 }
298
lo_write_simple(struct loop_device * lo,struct request * rq,loff_t pos)299 static int lo_write_simple(struct loop_device *lo, struct request *rq,
300 loff_t pos)
301 {
302 struct bio_vec bvec;
303 struct req_iterator iter;
304 int ret = 0;
305
306 rq_for_each_segment(bvec, rq, iter) {
307 ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos);
308 if (ret < 0)
309 break;
310 cond_resched();
311 }
312
313 return ret;
314 }
315
316 /*
317 * This is the slow, transforming version that needs to double buffer the
318 * data as it cannot do the transformations in place without having direct
319 * access to the destination pages of the backing file.
320 */
lo_write_transfer(struct loop_device * lo,struct request * rq,loff_t pos)321 static int lo_write_transfer(struct loop_device *lo, struct request *rq,
322 loff_t pos)
323 {
324 struct bio_vec bvec, b;
325 struct req_iterator iter;
326 struct page *page;
327 int ret = 0;
328
329 page = alloc_page(GFP_NOIO);
330 if (unlikely(!page))
331 return -ENOMEM;
332
333 rq_for_each_segment(bvec, rq, iter) {
334 ret = lo_do_transfer(lo, WRITE, page, 0, bvec.bv_page,
335 bvec.bv_offset, bvec.bv_len, pos >> 9);
336 if (unlikely(ret))
337 break;
338
339 b.bv_page = page;
340 b.bv_offset = 0;
341 b.bv_len = bvec.bv_len;
342 ret = lo_write_bvec(lo->lo_backing_file, &b, &pos);
343 if (ret < 0)
344 break;
345 }
346
347 __free_page(page);
348 return ret;
349 }
350
lo_read_simple(struct loop_device * lo,struct request * rq,loff_t pos)351 static int lo_read_simple(struct loop_device *lo, struct request *rq,
352 loff_t pos)
353 {
354 struct bio_vec bvec;
355 struct req_iterator iter;
356 struct iov_iter i;
357 ssize_t len;
358
359 rq_for_each_segment(bvec, rq, iter) {
360 iov_iter_bvec(&i, READ, &bvec, 1, bvec.bv_len);
361 len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
362 if (len < 0)
363 return len;
364
365 flush_dcache_page(bvec.bv_page);
366
367 if (len != bvec.bv_len) {
368 struct bio *bio;
369
370 __rq_for_each_bio(bio, rq)
371 zero_fill_bio(bio);
372 break;
373 }
374 cond_resched();
375 }
376
377 return 0;
378 }
379
lo_read_transfer(struct loop_device * lo,struct request * rq,loff_t pos)380 static int lo_read_transfer(struct loop_device *lo, struct request *rq,
381 loff_t pos)
382 {
383 struct bio_vec bvec, b;
384 struct req_iterator iter;
385 struct iov_iter i;
386 struct page *page;
387 ssize_t len;
388 int ret = 0;
389
390 page = alloc_page(GFP_NOIO);
391 if (unlikely(!page))
392 return -ENOMEM;
393
394 rq_for_each_segment(bvec, rq, iter) {
395 loff_t offset = pos;
396
397 b.bv_page = page;
398 b.bv_offset = 0;
399 b.bv_len = bvec.bv_len;
400
401 iov_iter_bvec(&i, READ, &b, 1, b.bv_len);
402 len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
403 if (len < 0) {
404 ret = len;
405 goto out_free_page;
406 }
407
408 ret = lo_do_transfer(lo, READ, page, 0, bvec.bv_page,
409 bvec.bv_offset, len, offset >> 9);
410 if (ret)
411 goto out_free_page;
412
413 flush_dcache_page(bvec.bv_page);
414
415 if (len != bvec.bv_len) {
416 struct bio *bio;
417
418 __rq_for_each_bio(bio, rq)
419 zero_fill_bio(bio);
420 break;
421 }
422 }
423
424 ret = 0;
425 out_free_page:
426 __free_page(page);
427 return ret;
428 }
429
lo_fallocate(struct loop_device * lo,struct request * rq,loff_t pos,int mode)430 static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
431 int mode)
432 {
433 /*
434 * We use fallocate to manipulate the space mappings used by the image
435 * a.k.a. discard/zerorange. However we do not support this if
436 * encryption is enabled, because it may give an attacker useful
437 * information.
438 */
439 struct file *file = lo->lo_backing_file;
440 struct request_queue *q = lo->lo_queue;
441 int ret;
442
443 mode |= FALLOC_FL_KEEP_SIZE;
444
445 if (!blk_queue_discard(q)) {
446 ret = -EOPNOTSUPP;
447 goto out;
448 }
449
450 ret = file->f_op->fallocate(file, mode, pos, blk_rq_bytes(rq));
451 if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP))
452 ret = -EIO;
453 out:
454 return ret;
455 }
456
lo_req_flush(struct loop_device * lo,struct request * rq)457 static int lo_req_flush(struct loop_device *lo, struct request *rq)
458 {
459 struct file *file = lo->lo_backing_file;
460 int ret = vfs_fsync(file, 0);
461 if (unlikely(ret && ret != -EINVAL))
462 ret = -EIO;
463
464 return ret;
465 }
466
lo_complete_rq(struct request * rq)467 static void lo_complete_rq(struct request *rq)
468 {
469 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
470 blk_status_t ret = BLK_STS_OK;
471
472 if (!cmd->use_aio || cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) ||
473 req_op(rq) != REQ_OP_READ) {
474 if (cmd->ret < 0)
475 ret = errno_to_blk_status(cmd->ret);
476 goto end_io;
477 }
478
479 /*
480 * Short READ - if we got some data, advance our request and
481 * retry it. If we got no data, end the rest with EIO.
482 */
483 if (cmd->ret) {
484 blk_update_request(rq, BLK_STS_OK, cmd->ret);
485 cmd->ret = 0;
486 blk_mq_requeue_request(rq, true);
487 } else {
488 if (cmd->use_aio) {
489 struct bio *bio = rq->bio;
490
491 while (bio) {
492 zero_fill_bio(bio);
493 bio = bio->bi_next;
494 }
495 }
496 ret = BLK_STS_IOERR;
497 end_io:
498 blk_mq_end_request(rq, ret);
499 }
500 }
501
lo_rw_aio_do_completion(struct loop_cmd * cmd)502 static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
503 {
504 struct request *rq = blk_mq_rq_from_pdu(cmd);
505
506 if (!atomic_dec_and_test(&cmd->ref))
507 return;
508 kfree(cmd->bvec);
509 cmd->bvec = NULL;
510 if (likely(!blk_should_fake_timeout(rq->q)))
511 blk_mq_complete_request(rq);
512 }
513
lo_rw_aio_complete(struct kiocb * iocb,long ret,long ret2)514 static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
515 {
516 struct loop_cmd *cmd = container_of(iocb, struct loop_cmd, iocb);
517
518 if (cmd->css)
519 css_put(cmd->css);
520 cmd->ret = ret;
521 lo_rw_aio_do_completion(cmd);
522 }
523
lo_rw_aio(struct loop_device * lo,struct loop_cmd * cmd,loff_t pos,bool rw)524 static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
525 loff_t pos, bool rw)
526 {
527 struct iov_iter iter;
528 struct req_iterator rq_iter;
529 struct bio_vec *bvec;
530 struct request *rq = blk_mq_rq_from_pdu(cmd);
531 struct bio *bio = rq->bio;
532 struct file *file = lo->lo_backing_file;
533 struct bio_vec tmp;
534 unsigned int offset;
535 int nr_bvec = 0;
536 int ret;
537
538 rq_for_each_bvec(tmp, rq, rq_iter)
539 nr_bvec++;
540
541 if (rq->bio != rq->biotail) {
542
543 bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec),
544 GFP_NOIO);
545 if (!bvec)
546 return -EIO;
547 cmd->bvec = bvec;
548
549 /*
550 * The bios of the request may be started from the middle of
551 * the 'bvec' because of bio splitting, so we can't directly
552 * copy bio->bi_iov_vec to new bvec. The rq_for_each_bvec
553 * API will take care of all details for us.
554 */
555 rq_for_each_bvec(tmp, rq, rq_iter) {
556 *bvec = tmp;
557 bvec++;
558 }
559 bvec = cmd->bvec;
560 offset = 0;
561 } else {
562 /*
563 * Same here, this bio may be started from the middle of the
564 * 'bvec' because of bio splitting, so offset from the bvec
565 * must be passed to iov iterator
566 */
567 offset = bio->bi_iter.bi_bvec_done;
568 bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
569 }
570 atomic_set(&cmd->ref, 2);
571
572 iov_iter_bvec(&iter, rw, bvec, nr_bvec, blk_rq_bytes(rq));
573 iter.iov_offset = offset;
574
575 cmd->iocb.ki_pos = pos;
576 cmd->iocb.ki_filp = file;
577 cmd->iocb.ki_complete = lo_rw_aio_complete;
578 cmd->iocb.ki_flags = IOCB_DIRECT;
579 cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
580 if (cmd->css)
581 kthread_associate_blkcg(cmd->css);
582
583 if (rw == WRITE)
584 ret = call_write_iter(file, &cmd->iocb, &iter);
585 else
586 ret = call_read_iter(file, &cmd->iocb, &iter);
587
588 lo_rw_aio_do_completion(cmd);
589 kthread_associate_blkcg(NULL);
590
591 if (ret != -EIOCBQUEUED)
592 cmd->iocb.ki_complete(&cmd->iocb, ret, 0);
593 return 0;
594 }
595
do_req_filebacked(struct loop_device * lo,struct request * rq)596 static int do_req_filebacked(struct loop_device *lo, struct request *rq)
597 {
598 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
599 loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
600
601 /*
602 * lo_write_simple and lo_read_simple should have been covered
603 * by io submit style function like lo_rw_aio(), one blocker
604 * is that lo_read_simple() need to call flush_dcache_page after
605 * the page is written from kernel, and it isn't easy to handle
606 * this in io submit style function which submits all segments
607 * of the req at one time. And direct read IO doesn't need to
608 * run flush_dcache_page().
609 */
610 switch (req_op(rq)) {
611 case REQ_OP_FLUSH:
612 return lo_req_flush(lo, rq);
613 case REQ_OP_WRITE_ZEROES:
614 /*
615 * If the caller doesn't want deallocation, call zeroout to
616 * write zeroes the range. Otherwise, punch them out.
617 */
618 return lo_fallocate(lo, rq, pos,
619 (rq->cmd_flags & REQ_NOUNMAP) ?
620 FALLOC_FL_ZERO_RANGE :
621 FALLOC_FL_PUNCH_HOLE);
622 case REQ_OP_DISCARD:
623 return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE);
624 case REQ_OP_WRITE:
625 if (lo->transfer)
626 return lo_write_transfer(lo, rq, pos);
627 else if (cmd->use_aio)
628 return lo_rw_aio(lo, cmd, pos, WRITE);
629 else
630 return lo_write_simple(lo, rq, pos);
631 case REQ_OP_READ:
632 if (lo->transfer)
633 return lo_read_transfer(lo, rq, pos);
634 else if (cmd->use_aio)
635 return lo_rw_aio(lo, cmd, pos, READ);
636 else
637 return lo_read_simple(lo, rq, pos);
638 default:
639 WARN_ON_ONCE(1);
640 return -EIO;
641 }
642 }
643
loop_update_dio(struct loop_device * lo)644 static inline void loop_update_dio(struct loop_device *lo)
645 {
646 __loop_update_dio(lo, (lo->lo_backing_file->f_flags & O_DIRECT) |
647 lo->use_dio);
648 }
649
loop_reread_partitions(struct loop_device * lo,struct block_device * bdev)650 static void loop_reread_partitions(struct loop_device *lo,
651 struct block_device *bdev)
652 {
653 int rc;
654
655 mutex_lock(&bdev->bd_mutex);
656 rc = bdev_disk_changed(bdev, false);
657 mutex_unlock(&bdev->bd_mutex);
658 if (rc)
659 pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n",
660 __func__, lo->lo_number, lo->lo_file_name, rc);
661 }
662
is_loop_device(struct file * file)663 static inline int is_loop_device(struct file *file)
664 {
665 struct inode *i = file->f_mapping->host;
666
667 return i && S_ISBLK(i->i_mode) && imajor(i) == LOOP_MAJOR;
668 }
669
loop_validate_file(struct file * file,struct block_device * bdev)670 static int loop_validate_file(struct file *file, struct block_device *bdev)
671 {
672 struct inode *inode = file->f_mapping->host;
673 struct file *f = file;
674
675 /* Avoid recursion */
676 while (is_loop_device(f)) {
677 struct loop_device *l;
678
679 if (f->f_mapping->host->i_rdev == bdev->bd_dev)
680 return -EBADF;
681
682 l = I_BDEV(f->f_mapping->host)->bd_disk->private_data;
683 if (l->lo_state != Lo_bound) {
684 return -EINVAL;
685 }
686 f = l->lo_backing_file;
687 }
688 if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
689 return -EINVAL;
690 return 0;
691 }
692
693 /*
694 * loop_change_fd switched the backing store of a loopback device to
695 * a new file. This is useful for operating system installers to free up
696 * the original file and in High Availability environments to switch to
697 * an alternative location for the content in case of server meltdown.
698 * This can only work if the loop device is used read-only, and if the
699 * new backing store is the same size and type as the old backing store.
700 */
loop_change_fd(struct loop_device * lo,struct block_device * bdev,unsigned int arg)701 static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
702 unsigned int arg)
703 {
704 struct file *file = NULL, *old_file;
705 int error;
706 bool partscan;
707
708 error = mutex_lock_killable(&lo->lo_mutex);
709 if (error)
710 return error;
711 error = -ENXIO;
712 if (lo->lo_state != Lo_bound)
713 goto out_err;
714
715 /* the loop device has to be read-only */
716 error = -EINVAL;
717 if (!(lo->lo_flags & LO_FLAGS_READ_ONLY))
718 goto out_err;
719
720 error = -EBADF;
721 file = fget(arg);
722 if (!file)
723 goto out_err;
724
725 error = loop_validate_file(file, bdev);
726 if (error)
727 goto out_err;
728
729 old_file = lo->lo_backing_file;
730
731 error = -EINVAL;
732
733 /* size of the new backing store needs to be the same */
734 if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
735 goto out_err;
736
737 /* and ... switch */
738 blk_mq_freeze_queue(lo->lo_queue);
739 mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
740 lo->lo_backing_file = file;
741 lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping);
742 mapping_set_gfp_mask(file->f_mapping,
743 lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
744 loop_update_dio(lo);
745 blk_mq_unfreeze_queue(lo->lo_queue);
746 partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
747 mutex_unlock(&lo->lo_mutex);
748 /*
749 * We must drop file reference outside of lo_mutex as dropping
750 * the file ref can take bd_mutex which creates circular locking
751 * dependency.
752 */
753 fput(old_file);
754 if (partscan)
755 loop_reread_partitions(lo, bdev);
756 return 0;
757
758 out_err:
759 mutex_unlock(&lo->lo_mutex);
760 if (file)
761 fput(file);
762 return error;
763 }
764
765 /* loop sysfs attributes */
766
loop_attr_show(struct device * dev,char * page,ssize_t (* callback)(struct loop_device *,char *))767 static ssize_t loop_attr_show(struct device *dev, char *page,
768 ssize_t (*callback)(struct loop_device *, char *))
769 {
770 struct gendisk *disk = dev_to_disk(dev);
771 struct loop_device *lo = disk->private_data;
772
773 return callback(lo, page);
774 }
775
776 #define LOOP_ATTR_RO(_name) \
777 static ssize_t loop_attr_##_name##_show(struct loop_device *, char *); \
778 static ssize_t loop_attr_do_show_##_name(struct device *d, \
779 struct device_attribute *attr, char *b) \
780 { \
781 return loop_attr_show(d, b, loop_attr_##_name##_show); \
782 } \
783 static struct device_attribute loop_attr_##_name = \
784 __ATTR(_name, 0444, loop_attr_do_show_##_name, NULL);
785
loop_attr_backing_file_show(struct loop_device * lo,char * buf)786 static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf)
787 {
788 ssize_t ret;
789 char *p = NULL;
790
791 spin_lock_irq(&lo->lo_lock);
792 if (lo->lo_backing_file)
793 p = file_path(lo->lo_backing_file, buf, PAGE_SIZE - 1);
794 spin_unlock_irq(&lo->lo_lock);
795
796 if (IS_ERR_OR_NULL(p))
797 ret = PTR_ERR(p);
798 else {
799 ret = strlen(p);
800 memmove(buf, p, ret);
801 buf[ret++] = '\n';
802 buf[ret] = 0;
803 }
804
805 return ret;
806 }
807
loop_attr_offset_show(struct loop_device * lo,char * buf)808 static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf)
809 {
810 return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_offset);
811 }
812
loop_attr_sizelimit_show(struct loop_device * lo,char * buf)813 static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf)
814 {
815 return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit);
816 }
817
loop_attr_autoclear_show(struct loop_device * lo,char * buf)818 static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf)
819 {
820 int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR);
821
822 return sprintf(buf, "%s\n", autoclear ? "1" : "0");
823 }
824
loop_attr_partscan_show(struct loop_device * lo,char * buf)825 static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf)
826 {
827 int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN);
828
829 return sprintf(buf, "%s\n", partscan ? "1" : "0");
830 }
831
loop_attr_dio_show(struct loop_device * lo,char * buf)832 static ssize_t loop_attr_dio_show(struct loop_device *lo, char *buf)
833 {
834 int dio = (lo->lo_flags & LO_FLAGS_DIRECT_IO);
835
836 return sprintf(buf, "%s\n", dio ? "1" : "0");
837 }
838
839 LOOP_ATTR_RO(backing_file);
840 LOOP_ATTR_RO(offset);
841 LOOP_ATTR_RO(sizelimit);
842 LOOP_ATTR_RO(autoclear);
843 LOOP_ATTR_RO(partscan);
844 LOOP_ATTR_RO(dio);
845
846 static struct attribute *loop_attrs[] = {
847 &loop_attr_backing_file.attr,
848 &loop_attr_offset.attr,
849 &loop_attr_sizelimit.attr,
850 &loop_attr_autoclear.attr,
851 &loop_attr_partscan.attr,
852 &loop_attr_dio.attr,
853 NULL,
854 };
855
856 static struct attribute_group loop_attribute_group = {
857 .name = "loop",
858 .attrs= loop_attrs,
859 };
860
loop_sysfs_init(struct loop_device * lo)861 static void loop_sysfs_init(struct loop_device *lo)
862 {
863 lo->sysfs_inited = !sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj,
864 &loop_attribute_group);
865 }
866
loop_sysfs_exit(struct loop_device * lo)867 static void loop_sysfs_exit(struct loop_device *lo)
868 {
869 if (lo->sysfs_inited)
870 sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj,
871 &loop_attribute_group);
872 }
873
loop_config_discard(struct loop_device * lo)874 static void loop_config_discard(struct loop_device *lo)
875 {
876 struct file *file = lo->lo_backing_file;
877 struct inode *inode = file->f_mapping->host;
878 struct request_queue *q = lo->lo_queue;
879 u32 granularity, max_discard_sectors;
880
881 /*
882 * If the backing device is a block device, mirror its zeroing
883 * capability. Set the discard sectors to the block device's zeroing
884 * capabilities because loop discards result in blkdev_issue_zeroout(),
885 * not blkdev_issue_discard(). This maintains consistent behavior with
886 * file-backed loop devices: discarded regions read back as zero.
887 */
888 if (S_ISBLK(inode->i_mode) && !lo->lo_encrypt_key_size) {
889 struct request_queue *backingq = bdev_get_queue(I_BDEV(inode));
890
891 max_discard_sectors = backingq->limits.max_write_zeroes_sectors;
892 granularity = backingq->limits.discard_granularity ?:
893 queue_physical_block_size(backingq);
894
895 /*
896 * We use punch hole to reclaim the free space used by the
897 * image a.k.a. discard. However we do not support discard if
898 * encryption is enabled, because it may give an attacker
899 * useful information.
900 */
901 } else if (!file->f_op->fallocate || lo->lo_encrypt_key_size) {
902 max_discard_sectors = 0;
903 granularity = 0;
904
905 } else {
906 max_discard_sectors = UINT_MAX >> 9;
907 granularity = inode->i_sb->s_blocksize;
908 }
909
910 if (max_discard_sectors) {
911 q->limits.discard_granularity = granularity;
912 blk_queue_max_discard_sectors(q, max_discard_sectors);
913 blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
914 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
915 } else {
916 q->limits.discard_granularity = 0;
917 blk_queue_max_discard_sectors(q, 0);
918 blk_queue_max_write_zeroes_sectors(q, 0);
919 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
920 }
921 q->limits.discard_alignment = 0;
922 }
923
loop_unprepare_queue(struct loop_device * lo)924 static void loop_unprepare_queue(struct loop_device *lo)
925 {
926 kthread_flush_worker(&lo->worker);
927 kthread_stop(lo->worker_task);
928 }
929
loop_kthread_worker_fn(void * worker_ptr)930 static int loop_kthread_worker_fn(void *worker_ptr)
931 {
932 current->flags |= PF_LOCAL_THROTTLE | PF_MEMALLOC_NOIO;
933 return kthread_worker_fn(worker_ptr);
934 }
935
loop_prepare_queue(struct loop_device * lo)936 static int loop_prepare_queue(struct loop_device *lo)
937 {
938 kthread_init_worker(&lo->worker);
939 lo->worker_task = kthread_run(loop_kthread_worker_fn,
940 &lo->worker, "loop%d", lo->lo_number);
941 if (IS_ERR(lo->worker_task))
942 return -ENOMEM;
943 set_user_nice(lo->worker_task, MIN_NICE);
944 return 0;
945 }
946
loop_update_rotational(struct loop_device * lo)947 static void loop_update_rotational(struct loop_device *lo)
948 {
949 struct file *file = lo->lo_backing_file;
950 struct inode *file_inode = file->f_mapping->host;
951 struct block_device *file_bdev = file_inode->i_sb->s_bdev;
952 struct request_queue *q = lo->lo_queue;
953 bool nonrot = true;
954
955 /* not all filesystems (e.g. tmpfs) have a sb->s_bdev */
956 if (file_bdev)
957 nonrot = blk_queue_nonrot(bdev_get_queue(file_bdev));
958
959 if (nonrot)
960 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
961 else
962 blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
963 }
964
965 static int
loop_release_xfer(struct loop_device * lo)966 loop_release_xfer(struct loop_device *lo)
967 {
968 int err = 0;
969 struct loop_func_table *xfer = lo->lo_encryption;
970
971 if (xfer) {
972 if (xfer->release)
973 err = xfer->release(lo);
974 lo->transfer = NULL;
975 lo->lo_encryption = NULL;
976 module_put(xfer->owner);
977 }
978 return err;
979 }
980
981 static int
loop_init_xfer(struct loop_device * lo,struct loop_func_table * xfer,const struct loop_info64 * i)982 loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer,
983 const struct loop_info64 *i)
984 {
985 int err = 0;
986
987 if (xfer) {
988 struct module *owner = xfer->owner;
989
990 if (!try_module_get(owner))
991 return -EINVAL;
992 if (xfer->init)
993 err = xfer->init(lo, i);
994 if (err)
995 module_put(owner);
996 else
997 lo->lo_encryption = xfer;
998 }
999 return err;
1000 }
1001
1002 /**
1003 * loop_set_status_from_info - configure device from loop_info
1004 * @lo: struct loop_device to configure
1005 * @info: struct loop_info64 to configure the device with
1006 *
1007 * Configures the loop device parameters according to the passed
1008 * in loop_info64 configuration.
1009 */
1010 static int
loop_set_status_from_info(struct loop_device * lo,const struct loop_info64 * info)1011 loop_set_status_from_info(struct loop_device *lo,
1012 const struct loop_info64 *info)
1013 {
1014 int err;
1015 struct loop_func_table *xfer;
1016 kuid_t uid = current_uid();
1017
1018 if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
1019 return -EINVAL;
1020
1021 err = loop_release_xfer(lo);
1022 if (err)
1023 return err;
1024
1025 if (info->lo_encrypt_type) {
1026 unsigned int type = info->lo_encrypt_type;
1027
1028 if (type >= MAX_LO_CRYPT)
1029 return -EINVAL;
1030 xfer = xfer_funcs[type];
1031 if (xfer == NULL)
1032 return -EINVAL;
1033 } else
1034 xfer = NULL;
1035
1036 err = loop_init_xfer(lo, xfer, info);
1037 if (err)
1038 return err;
1039
1040 lo->lo_offset = info->lo_offset;
1041 lo->lo_sizelimit = info->lo_sizelimit;
1042 memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
1043 memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE);
1044 lo->lo_file_name[LO_NAME_SIZE-1] = 0;
1045 lo->lo_crypt_name[LO_NAME_SIZE-1] = 0;
1046
1047 if (!xfer)
1048 xfer = &none_funcs;
1049 lo->transfer = xfer->transfer;
1050 lo->ioctl = xfer->ioctl;
1051
1052 lo->lo_flags = info->lo_flags;
1053
1054 lo->lo_encrypt_key_size = info->lo_encrypt_key_size;
1055 lo->lo_init[0] = info->lo_init[0];
1056 lo->lo_init[1] = info->lo_init[1];
1057 if (info->lo_encrypt_key_size) {
1058 memcpy(lo->lo_encrypt_key, info->lo_encrypt_key,
1059 info->lo_encrypt_key_size);
1060 lo->lo_key_owner = uid;
1061 }
1062
1063 return 0;
1064 }
1065
loop_configure(struct loop_device * lo,fmode_t mode,struct block_device * bdev,const struct loop_config * config)1066 static int loop_configure(struct loop_device *lo, fmode_t mode,
1067 struct block_device *bdev,
1068 const struct loop_config *config)
1069 {
1070 struct file *file;
1071 struct inode *inode;
1072 struct address_space *mapping;
1073 int error;
1074 loff_t size;
1075 bool partscan;
1076 unsigned short bsize;
1077
1078 /* This is safe, since we have a reference from open(). */
1079 __module_get(THIS_MODULE);
1080
1081 error = -EBADF;
1082 file = fget(config->fd);
1083 if (!file)
1084 goto out;
1085
1086 /*
1087 * If we don't hold exclusive handle for the device, upgrade to it
1088 * here to avoid changing device under exclusive owner.
1089 */
1090 if (!(mode & FMODE_EXCL)) {
1091 error = bd_prepare_to_claim(bdev, loop_configure);
1092 if (error)
1093 goto out_putf;
1094 }
1095
1096 error = mutex_lock_killable(&lo->lo_mutex);
1097 if (error)
1098 goto out_bdev;
1099
1100 error = -EBUSY;
1101 if (lo->lo_state != Lo_unbound)
1102 goto out_unlock;
1103
1104 error = loop_validate_file(file, bdev);
1105 if (error)
1106 goto out_unlock;
1107
1108 mapping = file->f_mapping;
1109 inode = mapping->host;
1110
1111 if ((config->info.lo_flags & ~LOOP_CONFIGURE_SETTABLE_FLAGS) != 0) {
1112 error = -EINVAL;
1113 goto out_unlock;
1114 }
1115
1116 if (config->block_size) {
1117 error = loop_validate_block_size(config->block_size);
1118 if (error)
1119 goto out_unlock;
1120 }
1121
1122 error = loop_set_status_from_info(lo, &config->info);
1123 if (error)
1124 goto out_unlock;
1125
1126 if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) ||
1127 !file->f_op->write_iter)
1128 lo->lo_flags |= LO_FLAGS_READ_ONLY;
1129
1130 error = loop_prepare_queue(lo);
1131 if (error)
1132 goto out_unlock;
1133
1134 set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
1135
1136 lo->use_dio = lo->lo_flags & LO_FLAGS_DIRECT_IO;
1137 lo->lo_device = bdev;
1138 lo->lo_backing_file = file;
1139 lo->old_gfp_mask = mapping_gfp_mask(mapping);
1140 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
1141
1142 if (!(lo->lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
1143 blk_queue_write_cache(lo->lo_queue, true, false);
1144
1145 if (config->block_size)
1146 bsize = config->block_size;
1147 else if ((lo->lo_backing_file->f_flags & O_DIRECT) && inode->i_sb->s_bdev)
1148 /* In case of direct I/O, match underlying block size */
1149 bsize = bdev_logical_block_size(inode->i_sb->s_bdev);
1150 else
1151 bsize = 512;
1152
1153 blk_queue_logical_block_size(lo->lo_queue, bsize);
1154 blk_queue_physical_block_size(lo->lo_queue, bsize);
1155 blk_queue_io_min(lo->lo_queue, bsize);
1156
1157 loop_update_rotational(lo);
1158 loop_update_dio(lo);
1159 loop_sysfs_init(lo);
1160
1161 size = get_loop_size(lo, file);
1162 loop_set_size(lo, size);
1163
1164 lo->lo_state = Lo_bound;
1165 if (part_shift)
1166 lo->lo_flags |= LO_FLAGS_PARTSCAN;
1167 partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
1168 if (partscan)
1169 lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN;
1170
1171 /* Grab the block_device to prevent its destruction after we
1172 * put /dev/loopXX inode. Later in __loop_clr_fd() we bdput(bdev).
1173 */
1174 bdgrab(bdev);
1175 mutex_unlock(&lo->lo_mutex);
1176 if (partscan)
1177 loop_reread_partitions(lo, bdev);
1178 if (!(mode & FMODE_EXCL))
1179 bd_abort_claiming(bdev, loop_configure);
1180 return 0;
1181
1182 out_unlock:
1183 mutex_unlock(&lo->lo_mutex);
1184 out_bdev:
1185 if (!(mode & FMODE_EXCL))
1186 bd_abort_claiming(bdev, loop_configure);
1187 out_putf:
1188 fput(file);
1189 out:
1190 /* This is safe: open() is still holding a reference. */
1191 module_put(THIS_MODULE);
1192 return error;
1193 }
1194
__loop_clr_fd(struct loop_device * lo,bool release)1195 static int __loop_clr_fd(struct loop_device *lo, bool release)
1196 {
1197 struct file *filp = NULL;
1198 gfp_t gfp = lo->old_gfp_mask;
1199 struct block_device *bdev = lo->lo_device;
1200 int err = 0;
1201 bool partscan = false;
1202 int lo_number;
1203
1204 mutex_lock(&lo->lo_mutex);
1205 if (WARN_ON_ONCE(lo->lo_state != Lo_rundown)) {
1206 err = -ENXIO;
1207 goto out_unlock;
1208 }
1209
1210 filp = lo->lo_backing_file;
1211 if (filp == NULL) {
1212 err = -EINVAL;
1213 goto out_unlock;
1214 }
1215
1216 if (test_bit(QUEUE_FLAG_WC, &lo->lo_queue->queue_flags))
1217 blk_queue_write_cache(lo->lo_queue, false, false);
1218
1219 /* freeze request queue during the transition */
1220 blk_mq_freeze_queue(lo->lo_queue);
1221
1222 spin_lock_irq(&lo->lo_lock);
1223 lo->lo_backing_file = NULL;
1224 spin_unlock_irq(&lo->lo_lock);
1225
1226 loop_release_xfer(lo);
1227 lo->transfer = NULL;
1228 lo->ioctl = NULL;
1229 lo->lo_device = NULL;
1230 lo->lo_encryption = NULL;
1231 lo->lo_offset = 0;
1232 lo->lo_sizelimit = 0;
1233 lo->lo_encrypt_key_size = 0;
1234 memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
1235 memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
1236 memset(lo->lo_file_name, 0, LO_NAME_SIZE);
1237 blk_queue_logical_block_size(lo->lo_queue, 512);
1238 blk_queue_physical_block_size(lo->lo_queue, 512);
1239 blk_queue_io_min(lo->lo_queue, 512);
1240 if (bdev) {
1241 bdput(bdev);
1242 invalidate_bdev(bdev);
1243 bdev->bd_inode->i_mapping->wb_err = 0;
1244 }
1245 set_capacity(lo->lo_disk, 0);
1246 loop_sysfs_exit(lo);
1247 if (bdev) {
1248 /* let user-space know about this change */
1249 kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
1250 }
1251 mapping_set_gfp_mask(filp->f_mapping, gfp);
1252 /* This is safe: open() is still holding a reference. */
1253 module_put(THIS_MODULE);
1254 blk_mq_unfreeze_queue(lo->lo_queue);
1255
1256 partscan = lo->lo_flags & LO_FLAGS_PARTSCAN && bdev;
1257 lo_number = lo->lo_number;
1258 loop_unprepare_queue(lo);
1259 out_unlock:
1260 mutex_unlock(&lo->lo_mutex);
1261 if (partscan) {
1262 /*
1263 * bd_mutex has been held already in release path, so don't
1264 * acquire it if this function is called in such case.
1265 *
1266 * If the reread partition isn't from release path, lo_refcnt
1267 * must be at least one and it can only become zero when the
1268 * current holder is released.
1269 */
1270 if (!release)
1271 mutex_lock(&bdev->bd_mutex);
1272 err = bdev_disk_changed(bdev, false);
1273 if (!release)
1274 mutex_unlock(&bdev->bd_mutex);
1275 if (err)
1276 pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
1277 __func__, lo_number, err);
1278 /* Device is gone, no point in returning error */
1279 err = 0;
1280 }
1281
1282 /*
1283 * lo->lo_state is set to Lo_unbound here after above partscan has
1284 * finished.
1285 *
1286 * There cannot be anybody else entering __loop_clr_fd() as
1287 * lo->lo_backing_file is already cleared and Lo_rundown state
1288 * protects us from all the other places trying to change the 'lo'
1289 * device.
1290 */
1291 mutex_lock(&lo->lo_mutex);
1292 lo->lo_flags = 0;
1293 if (!part_shift)
1294 lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
1295 lo->lo_state = Lo_unbound;
1296 mutex_unlock(&lo->lo_mutex);
1297
1298 /*
1299 * Need not hold lo_mutex to fput backing file. Calling fput holding
1300 * lo_mutex triggers a circular lock dependency possibility warning as
1301 * fput can take bd_mutex which is usually taken before lo_mutex.
1302 */
1303 if (filp)
1304 fput(filp);
1305 return err;
1306 }
1307
loop_clr_fd(struct loop_device * lo)1308 static int loop_clr_fd(struct loop_device *lo)
1309 {
1310 int err;
1311
1312 err = mutex_lock_killable(&lo->lo_mutex);
1313 if (err)
1314 return err;
1315 if (lo->lo_state != Lo_bound) {
1316 mutex_unlock(&lo->lo_mutex);
1317 return -ENXIO;
1318 }
1319 /*
1320 * If we've explicitly asked to tear down the loop device,
1321 * and it has an elevated reference count, set it for auto-teardown when
1322 * the last reference goes away. This stops $!~#$@ udev from
1323 * preventing teardown because it decided that it needs to run blkid on
1324 * the loopback device whenever they appear. xfstests is notorious for
1325 * failing tests because blkid via udev races with a losetup
1326 * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d
1327 * command to fail with EBUSY.
1328 */
1329 if (atomic_read(&lo->lo_refcnt) > 1) {
1330 lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
1331 mutex_unlock(&lo->lo_mutex);
1332 return 0;
1333 }
1334 lo->lo_state = Lo_rundown;
1335 mutex_unlock(&lo->lo_mutex);
1336
1337 return __loop_clr_fd(lo, false);
1338 }
1339
1340 static int
loop_set_status(struct loop_device * lo,const struct loop_info64 * info)1341 loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
1342 {
1343 int err;
1344 struct block_device *bdev;
1345 kuid_t uid = current_uid();
1346 int prev_lo_flags;
1347 bool partscan = false;
1348 bool size_changed = false;
1349
1350 err = mutex_lock_killable(&lo->lo_mutex);
1351 if (err)
1352 return err;
1353 if (lo->lo_encrypt_key_size &&
1354 !uid_eq(lo->lo_key_owner, uid) &&
1355 !capable(CAP_SYS_ADMIN)) {
1356 err = -EPERM;
1357 goto out_unlock;
1358 }
1359 if (lo->lo_state != Lo_bound) {
1360 err = -ENXIO;
1361 goto out_unlock;
1362 }
1363
1364 if (lo->lo_offset != info->lo_offset ||
1365 lo->lo_sizelimit != info->lo_sizelimit) {
1366 size_changed = true;
1367 sync_blockdev(lo->lo_device);
1368 invalidate_bdev(lo->lo_device);
1369 }
1370
1371 /* I/O need to be drained during transfer transition */
1372 blk_mq_freeze_queue(lo->lo_queue);
1373
1374 if (size_changed && lo->lo_device->bd_inode->i_mapping->nrpages) {
1375 /* If any pages were dirtied after invalidate_bdev(), try again */
1376 err = -EAGAIN;
1377 pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
1378 __func__, lo->lo_number, lo->lo_file_name,
1379 lo->lo_device->bd_inode->i_mapping->nrpages);
1380 goto out_unfreeze;
1381 }
1382
1383 prev_lo_flags = lo->lo_flags;
1384
1385 err = loop_set_status_from_info(lo, info);
1386 if (err)
1387 goto out_unfreeze;
1388
1389 /* Mask out flags that can't be set using LOOP_SET_STATUS. */
1390 lo->lo_flags &= LOOP_SET_STATUS_SETTABLE_FLAGS;
1391 /* For those flags, use the previous values instead */
1392 lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_SETTABLE_FLAGS;
1393 /* For flags that can't be cleared, use previous values too */
1394 lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_CLEARABLE_FLAGS;
1395
1396 if (size_changed) {
1397 loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit,
1398 lo->lo_backing_file);
1399 loop_set_size(lo, new_size);
1400 }
1401
1402 loop_config_discard(lo);
1403
1404 /* update dio if lo_offset or transfer is changed */
1405 __loop_update_dio(lo, lo->use_dio);
1406
1407 out_unfreeze:
1408 blk_mq_unfreeze_queue(lo->lo_queue);
1409
1410 if (!err && (lo->lo_flags & LO_FLAGS_PARTSCAN) &&
1411 !(prev_lo_flags & LO_FLAGS_PARTSCAN)) {
1412 lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN;
1413 bdev = lo->lo_device;
1414 partscan = true;
1415 }
1416 out_unlock:
1417 mutex_unlock(&lo->lo_mutex);
1418 if (partscan)
1419 loop_reread_partitions(lo, bdev);
1420
1421 return err;
1422 }
1423
1424 static int
loop_get_status(struct loop_device * lo,struct loop_info64 * info)1425 loop_get_status(struct loop_device *lo, struct loop_info64 *info)
1426 {
1427 struct path path;
1428 struct kstat stat;
1429 int ret;
1430
1431 ret = mutex_lock_killable(&lo->lo_mutex);
1432 if (ret)
1433 return ret;
1434 if (lo->lo_state != Lo_bound) {
1435 mutex_unlock(&lo->lo_mutex);
1436 return -ENXIO;
1437 }
1438
1439 memset(info, 0, sizeof(*info));
1440 info->lo_number = lo->lo_number;
1441 info->lo_offset = lo->lo_offset;
1442 info->lo_sizelimit = lo->lo_sizelimit;
1443 info->lo_flags = lo->lo_flags;
1444 memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE);
1445 memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE);
1446 info->lo_encrypt_type =
1447 lo->lo_encryption ? lo->lo_encryption->number : 0;
1448 if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) {
1449 info->lo_encrypt_key_size = lo->lo_encrypt_key_size;
1450 memcpy(info->lo_encrypt_key, lo->lo_encrypt_key,
1451 lo->lo_encrypt_key_size);
1452 }
1453
1454 /* Drop lo_mutex while we call into the filesystem. */
1455 path = lo->lo_backing_file->f_path;
1456 path_get(&path);
1457 mutex_unlock(&lo->lo_mutex);
1458 ret = vfs_getattr(&path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT);
1459 if (!ret) {
1460 info->lo_device = huge_encode_dev(stat.dev);
1461 info->lo_inode = stat.ino;
1462 info->lo_rdevice = huge_encode_dev(stat.rdev);
1463 }
1464 path_put(&path);
1465 return ret;
1466 }
1467
1468 static void
loop_info64_from_old(const struct loop_info * info,struct loop_info64 * info64)1469 loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64)
1470 {
1471 memset(info64, 0, sizeof(*info64));
1472 info64->lo_number = info->lo_number;
1473 info64->lo_device = info->lo_device;
1474 info64->lo_inode = info->lo_inode;
1475 info64->lo_rdevice = info->lo_rdevice;
1476 info64->lo_offset = info->lo_offset;
1477 info64->lo_sizelimit = 0;
1478 info64->lo_encrypt_type = info->lo_encrypt_type;
1479 info64->lo_encrypt_key_size = info->lo_encrypt_key_size;
1480 info64->lo_flags = info->lo_flags;
1481 info64->lo_init[0] = info->lo_init[0];
1482 info64->lo_init[1] = info->lo_init[1];
1483 if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1484 memcpy(info64->lo_crypt_name, info->lo_name, LO_NAME_SIZE);
1485 else
1486 memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE);
1487 memcpy(info64->lo_encrypt_key, info->lo_encrypt_key, LO_KEY_SIZE);
1488 }
1489
1490 static int
loop_info64_to_old(const struct loop_info64 * info64,struct loop_info * info)1491 loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info)
1492 {
1493 memset(info, 0, sizeof(*info));
1494 info->lo_number = info64->lo_number;
1495 info->lo_device = info64->lo_device;
1496 info->lo_inode = info64->lo_inode;
1497 info->lo_rdevice = info64->lo_rdevice;
1498 info->lo_offset = info64->lo_offset;
1499 info->lo_encrypt_type = info64->lo_encrypt_type;
1500 info->lo_encrypt_key_size = info64->lo_encrypt_key_size;
1501 info->lo_flags = info64->lo_flags;
1502 info->lo_init[0] = info64->lo_init[0];
1503 info->lo_init[1] = info64->lo_init[1];
1504 if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1505 memcpy(info->lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
1506 else
1507 memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE);
1508 memcpy(info->lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
1509
1510 /* error in case values were truncated */
1511 if (info->lo_device != info64->lo_device ||
1512 info->lo_rdevice != info64->lo_rdevice ||
1513 info->lo_inode != info64->lo_inode ||
1514 info->lo_offset != info64->lo_offset)
1515 return -EOVERFLOW;
1516
1517 return 0;
1518 }
1519
1520 static int
loop_set_status_old(struct loop_device * lo,const struct loop_info __user * arg)1521 loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg)
1522 {
1523 struct loop_info info;
1524 struct loop_info64 info64;
1525
1526 if (copy_from_user(&info, arg, sizeof (struct loop_info)))
1527 return -EFAULT;
1528 loop_info64_from_old(&info, &info64);
1529 return loop_set_status(lo, &info64);
1530 }
1531
1532 static int
loop_set_status64(struct loop_device * lo,const struct loop_info64 __user * arg)1533 loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg)
1534 {
1535 struct loop_info64 info64;
1536
1537 if (copy_from_user(&info64, arg, sizeof (struct loop_info64)))
1538 return -EFAULT;
1539 return loop_set_status(lo, &info64);
1540 }
1541
1542 static int
loop_get_status_old(struct loop_device * lo,struct loop_info __user * arg)1543 loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
1544 struct loop_info info;
1545 struct loop_info64 info64;
1546 int err;
1547
1548 if (!arg)
1549 return -EINVAL;
1550 err = loop_get_status(lo, &info64);
1551 if (!err)
1552 err = loop_info64_to_old(&info64, &info);
1553 if (!err && copy_to_user(arg, &info, sizeof(info)))
1554 err = -EFAULT;
1555
1556 return err;
1557 }
1558
1559 static int
loop_get_status64(struct loop_device * lo,struct loop_info64 __user * arg)1560 loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
1561 struct loop_info64 info64;
1562 int err;
1563
1564 if (!arg)
1565 return -EINVAL;
1566 err = loop_get_status(lo, &info64);
1567 if (!err && copy_to_user(arg, &info64, sizeof(info64)))
1568 err = -EFAULT;
1569
1570 return err;
1571 }
1572
loop_set_capacity(struct loop_device * lo)1573 static int loop_set_capacity(struct loop_device *lo)
1574 {
1575 loff_t size;
1576
1577 if (unlikely(lo->lo_state != Lo_bound))
1578 return -ENXIO;
1579
1580 size = get_loop_size(lo, lo->lo_backing_file);
1581 loop_set_size(lo, size);
1582
1583 return 0;
1584 }
1585
loop_set_dio(struct loop_device * lo,unsigned long arg)1586 static int loop_set_dio(struct loop_device *lo, unsigned long arg)
1587 {
1588 int error = -ENXIO;
1589 if (lo->lo_state != Lo_bound)
1590 goto out;
1591
1592 __loop_update_dio(lo, !!arg);
1593 if (lo->use_dio == !!arg)
1594 return 0;
1595 error = -EINVAL;
1596 out:
1597 return error;
1598 }
1599
loop_set_block_size(struct loop_device * lo,unsigned long arg)1600 static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
1601 {
1602 int err = 0;
1603
1604 if (lo->lo_state != Lo_bound)
1605 return -ENXIO;
1606
1607 err = loop_validate_block_size(arg);
1608 if (err)
1609 return err;
1610
1611 if (lo->lo_queue->limits.logical_block_size == arg)
1612 return 0;
1613
1614 sync_blockdev(lo->lo_device);
1615 invalidate_bdev(lo->lo_device);
1616
1617 blk_mq_freeze_queue(lo->lo_queue);
1618
1619 /* invalidate_bdev should have truncated all the pages */
1620 if (lo->lo_device->bd_inode->i_mapping->nrpages) {
1621 err = -EAGAIN;
1622 pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
1623 __func__, lo->lo_number, lo->lo_file_name,
1624 lo->lo_device->bd_inode->i_mapping->nrpages);
1625 goto out_unfreeze;
1626 }
1627
1628 blk_queue_logical_block_size(lo->lo_queue, arg);
1629 blk_queue_physical_block_size(lo->lo_queue, arg);
1630 blk_queue_io_min(lo->lo_queue, arg);
1631 loop_update_dio(lo);
1632 out_unfreeze:
1633 blk_mq_unfreeze_queue(lo->lo_queue);
1634
1635 return err;
1636 }
1637
lo_simple_ioctl(struct loop_device * lo,unsigned int cmd,unsigned long arg)1638 static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
1639 unsigned long arg)
1640 {
1641 int err;
1642
1643 err = mutex_lock_killable(&lo->lo_mutex);
1644 if (err)
1645 return err;
1646 switch (cmd) {
1647 case LOOP_SET_CAPACITY:
1648 err = loop_set_capacity(lo);
1649 break;
1650 case LOOP_SET_DIRECT_IO:
1651 err = loop_set_dio(lo, arg);
1652 break;
1653 case LOOP_SET_BLOCK_SIZE:
1654 err = loop_set_block_size(lo, arg);
1655 break;
1656 default:
1657 err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
1658 }
1659 mutex_unlock(&lo->lo_mutex);
1660 return err;
1661 }
1662
lo_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long arg)1663 static int lo_ioctl(struct block_device *bdev, fmode_t mode,
1664 unsigned int cmd, unsigned long arg)
1665 {
1666 struct loop_device *lo = bdev->bd_disk->private_data;
1667 void __user *argp = (void __user *) arg;
1668 int err;
1669
1670 switch (cmd) {
1671 case LOOP_SET_FD: {
1672 /*
1673 * Legacy case - pass in a zeroed out struct loop_config with
1674 * only the file descriptor set , which corresponds with the
1675 * default parameters we'd have used otherwise.
1676 */
1677 struct loop_config config;
1678
1679 memset(&config, 0, sizeof(config));
1680 config.fd = arg;
1681
1682 return loop_configure(lo, mode, bdev, &config);
1683 }
1684 case LOOP_CONFIGURE: {
1685 struct loop_config config;
1686
1687 if (copy_from_user(&config, argp, sizeof(config)))
1688 return -EFAULT;
1689
1690 return loop_configure(lo, mode, bdev, &config);
1691 }
1692 case LOOP_CHANGE_FD:
1693 return loop_change_fd(lo, bdev, arg);
1694 case LOOP_CLR_FD:
1695 return loop_clr_fd(lo);
1696 case LOOP_SET_STATUS:
1697 err = -EPERM;
1698 if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) {
1699 err = loop_set_status_old(lo, argp);
1700 }
1701 break;
1702 case LOOP_GET_STATUS:
1703 return loop_get_status_old(lo, argp);
1704 case LOOP_SET_STATUS64:
1705 err = -EPERM;
1706 if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) {
1707 err = loop_set_status64(lo, argp);
1708 }
1709 break;
1710 case LOOP_GET_STATUS64:
1711 return loop_get_status64(lo, argp);
1712 case LOOP_SET_CAPACITY:
1713 case LOOP_SET_DIRECT_IO:
1714 case LOOP_SET_BLOCK_SIZE:
1715 if (!(mode & FMODE_WRITE) && !capable(CAP_SYS_ADMIN))
1716 return -EPERM;
1717 fallthrough;
1718 default:
1719 err = lo_simple_ioctl(lo, cmd, arg);
1720 break;
1721 }
1722
1723 return err;
1724 }
1725
1726 #ifdef CONFIG_COMPAT
1727 struct compat_loop_info {
1728 compat_int_t lo_number; /* ioctl r/o */
1729 compat_dev_t lo_device; /* ioctl r/o */
1730 compat_ulong_t lo_inode; /* ioctl r/o */
1731 compat_dev_t lo_rdevice; /* ioctl r/o */
1732 compat_int_t lo_offset;
1733 compat_int_t lo_encrypt_type;
1734 compat_int_t lo_encrypt_key_size; /* ioctl w/o */
1735 compat_int_t lo_flags; /* ioctl r/o */
1736 char lo_name[LO_NAME_SIZE];
1737 unsigned char lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */
1738 compat_ulong_t lo_init[2];
1739 char reserved[4];
1740 };
1741
1742 /*
1743 * Transfer 32-bit compatibility structure in userspace to 64-bit loop info
1744 * - noinlined to reduce stack space usage in main part of driver
1745 */
1746 static noinline int
loop_info64_from_compat(const struct compat_loop_info __user * arg,struct loop_info64 * info64)1747 loop_info64_from_compat(const struct compat_loop_info __user *arg,
1748 struct loop_info64 *info64)
1749 {
1750 struct compat_loop_info info;
1751
1752 if (copy_from_user(&info, arg, sizeof(info)))
1753 return -EFAULT;
1754
1755 memset(info64, 0, sizeof(*info64));
1756 info64->lo_number = info.lo_number;
1757 info64->lo_device = info.lo_device;
1758 info64->lo_inode = info.lo_inode;
1759 info64->lo_rdevice = info.lo_rdevice;
1760 info64->lo_offset = info.lo_offset;
1761 info64->lo_sizelimit = 0;
1762 info64->lo_encrypt_type = info.lo_encrypt_type;
1763 info64->lo_encrypt_key_size = info.lo_encrypt_key_size;
1764 info64->lo_flags = info.lo_flags;
1765 info64->lo_init[0] = info.lo_init[0];
1766 info64->lo_init[1] = info.lo_init[1];
1767 if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1768 memcpy(info64->lo_crypt_name, info.lo_name, LO_NAME_SIZE);
1769 else
1770 memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE);
1771 memcpy(info64->lo_encrypt_key, info.lo_encrypt_key, LO_KEY_SIZE);
1772 return 0;
1773 }
1774
1775 /*
1776 * Transfer 64-bit loop info to 32-bit compatibility structure in userspace
1777 * - noinlined to reduce stack space usage in main part of driver
1778 */
1779 static noinline int
loop_info64_to_compat(const struct loop_info64 * info64,struct compat_loop_info __user * arg)1780 loop_info64_to_compat(const struct loop_info64 *info64,
1781 struct compat_loop_info __user *arg)
1782 {
1783 struct compat_loop_info info;
1784
1785 memset(&info, 0, sizeof(info));
1786 info.lo_number = info64->lo_number;
1787 info.lo_device = info64->lo_device;
1788 info.lo_inode = info64->lo_inode;
1789 info.lo_rdevice = info64->lo_rdevice;
1790 info.lo_offset = info64->lo_offset;
1791 info.lo_encrypt_type = info64->lo_encrypt_type;
1792 info.lo_encrypt_key_size = info64->lo_encrypt_key_size;
1793 info.lo_flags = info64->lo_flags;
1794 info.lo_init[0] = info64->lo_init[0];
1795 info.lo_init[1] = info64->lo_init[1];
1796 if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1797 memcpy(info.lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
1798 else
1799 memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE);
1800 memcpy(info.lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
1801
1802 /* error in case values were truncated */
1803 if (info.lo_device != info64->lo_device ||
1804 info.lo_rdevice != info64->lo_rdevice ||
1805 info.lo_inode != info64->lo_inode ||
1806 info.lo_offset != info64->lo_offset ||
1807 info.lo_init[0] != info64->lo_init[0] ||
1808 info.lo_init[1] != info64->lo_init[1])
1809 return -EOVERFLOW;
1810
1811 if (copy_to_user(arg, &info, sizeof(info)))
1812 return -EFAULT;
1813 return 0;
1814 }
1815
1816 static int
loop_set_status_compat(struct loop_device * lo,const struct compat_loop_info __user * arg)1817 loop_set_status_compat(struct loop_device *lo,
1818 const struct compat_loop_info __user *arg)
1819 {
1820 struct loop_info64 info64;
1821 int ret;
1822
1823 ret = loop_info64_from_compat(arg, &info64);
1824 if (ret < 0)
1825 return ret;
1826 return loop_set_status(lo, &info64);
1827 }
1828
1829 static int
loop_get_status_compat(struct loop_device * lo,struct compat_loop_info __user * arg)1830 loop_get_status_compat(struct loop_device *lo,
1831 struct compat_loop_info __user *arg)
1832 {
1833 struct loop_info64 info64;
1834 int err;
1835
1836 if (!arg)
1837 return -EINVAL;
1838 err = loop_get_status(lo, &info64);
1839 if (!err)
1840 err = loop_info64_to_compat(&info64, arg);
1841 return err;
1842 }
1843
lo_compat_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long arg)1844 static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
1845 unsigned int cmd, unsigned long arg)
1846 {
1847 struct loop_device *lo = bdev->bd_disk->private_data;
1848 int err;
1849
1850 switch(cmd) {
1851 case LOOP_SET_STATUS:
1852 err = loop_set_status_compat(lo,
1853 (const struct compat_loop_info __user *)arg);
1854 break;
1855 case LOOP_GET_STATUS:
1856 err = loop_get_status_compat(lo,
1857 (struct compat_loop_info __user *)arg);
1858 break;
1859 case LOOP_SET_CAPACITY:
1860 case LOOP_CLR_FD:
1861 case LOOP_GET_STATUS64:
1862 case LOOP_SET_STATUS64:
1863 case LOOP_CONFIGURE:
1864 arg = (unsigned long) compat_ptr(arg);
1865 fallthrough;
1866 case LOOP_SET_FD:
1867 case LOOP_CHANGE_FD:
1868 case LOOP_SET_BLOCK_SIZE:
1869 case LOOP_SET_DIRECT_IO:
1870 err = lo_ioctl(bdev, mode, cmd, arg);
1871 break;
1872 default:
1873 err = -ENOIOCTLCMD;
1874 break;
1875 }
1876 return err;
1877 }
1878 #endif
1879
lo_open(struct block_device * bdev,fmode_t mode)1880 static int lo_open(struct block_device *bdev, fmode_t mode)
1881 {
1882 struct loop_device *lo;
1883 int err;
1884
1885 /*
1886 * take loop_ctl_mutex to protect lo pointer from race with
1887 * loop_control_ioctl(LOOP_CTL_REMOVE), however, to reduce contention
1888 * release it prior to updating lo->lo_refcnt.
1889 */
1890 err = mutex_lock_killable(&loop_ctl_mutex);
1891 if (err)
1892 return err;
1893 lo = bdev->bd_disk->private_data;
1894 if (!lo) {
1895 mutex_unlock(&loop_ctl_mutex);
1896 return -ENXIO;
1897 }
1898 err = mutex_lock_killable(&lo->lo_mutex);
1899 mutex_unlock(&loop_ctl_mutex);
1900 if (err)
1901 return err;
1902 atomic_inc(&lo->lo_refcnt);
1903 mutex_unlock(&lo->lo_mutex);
1904 return 0;
1905 }
1906
lo_release(struct gendisk * disk,fmode_t mode)1907 static void lo_release(struct gendisk *disk, fmode_t mode)
1908 {
1909 struct loop_device *lo = disk->private_data;
1910
1911 mutex_lock(&lo->lo_mutex);
1912 if (atomic_dec_return(&lo->lo_refcnt))
1913 goto out_unlock;
1914
1915 if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
1916 if (lo->lo_state != Lo_bound)
1917 goto out_unlock;
1918 lo->lo_state = Lo_rundown;
1919 mutex_unlock(&lo->lo_mutex);
1920 /*
1921 * In autoclear mode, stop the loop thread
1922 * and remove configuration after last close.
1923 */
1924 __loop_clr_fd(lo, true);
1925 return;
1926 } else if (lo->lo_state == Lo_bound) {
1927 /*
1928 * Otherwise keep thread (if running) and config,
1929 * but flush possible ongoing bios in thread.
1930 */
1931 blk_mq_freeze_queue(lo->lo_queue);
1932 blk_mq_unfreeze_queue(lo->lo_queue);
1933 }
1934
1935 out_unlock:
1936 mutex_unlock(&lo->lo_mutex);
1937 }
1938
1939 static const struct block_device_operations lo_fops = {
1940 .owner = THIS_MODULE,
1941 .open = lo_open,
1942 .release = lo_release,
1943 .ioctl = lo_ioctl,
1944 #ifdef CONFIG_COMPAT
1945 .compat_ioctl = lo_compat_ioctl,
1946 #endif
1947 };
1948
1949 /*
1950 * And now the modules code and kernel interface.
1951 */
1952 static int max_loop;
1953 module_param(max_loop, int, 0444);
1954 MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
1955 module_param(max_part, int, 0444);
1956 MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
1957 MODULE_LICENSE("GPL");
1958 MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
1959
loop_register_transfer(struct loop_func_table * funcs)1960 int loop_register_transfer(struct loop_func_table *funcs)
1961 {
1962 unsigned int n = funcs->number;
1963
1964 if (n >= MAX_LO_CRYPT || xfer_funcs[n])
1965 return -EINVAL;
1966 xfer_funcs[n] = funcs;
1967 return 0;
1968 }
1969
unregister_transfer_cb(int id,void * ptr,void * data)1970 static int unregister_transfer_cb(int id, void *ptr, void *data)
1971 {
1972 struct loop_device *lo = ptr;
1973 struct loop_func_table *xfer = data;
1974
1975 mutex_lock(&lo->lo_mutex);
1976 if (lo->lo_encryption == xfer)
1977 loop_release_xfer(lo);
1978 mutex_unlock(&lo->lo_mutex);
1979 return 0;
1980 }
1981
loop_unregister_transfer(int number)1982 int loop_unregister_transfer(int number)
1983 {
1984 unsigned int n = number;
1985 struct loop_func_table *xfer;
1986
1987 if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL)
1988 return -EINVAL;
1989
1990 xfer_funcs[n] = NULL;
1991 idr_for_each(&loop_index_idr, &unregister_transfer_cb, xfer);
1992 return 0;
1993 }
1994
1995 EXPORT_SYMBOL(loop_register_transfer);
1996 EXPORT_SYMBOL(loop_unregister_transfer);
1997
loop_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)1998 static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
1999 const struct blk_mq_queue_data *bd)
2000 {
2001 struct request *rq = bd->rq;
2002 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
2003 struct loop_device *lo = rq->q->queuedata;
2004
2005 blk_mq_start_request(rq);
2006
2007 if (lo->lo_state != Lo_bound)
2008 return BLK_STS_IOERR;
2009
2010 switch (req_op(rq)) {
2011 case REQ_OP_FLUSH:
2012 case REQ_OP_DISCARD:
2013 case REQ_OP_WRITE_ZEROES:
2014 cmd->use_aio = false;
2015 break;
2016 default:
2017 cmd->use_aio = lo->use_dio;
2018 break;
2019 }
2020
2021 /* always use the first bio's css */
2022 #ifdef CONFIG_BLK_CGROUP
2023 if (cmd->use_aio && rq->bio && rq->bio->bi_blkg) {
2024 cmd->css = &bio_blkcg(rq->bio)->css;
2025 css_get(cmd->css);
2026 } else
2027 #endif
2028 cmd->css = NULL;
2029 kthread_queue_work(&lo->worker, &cmd->work);
2030
2031 return BLK_STS_OK;
2032 }
2033
loop_handle_cmd(struct loop_cmd * cmd)2034 static void loop_handle_cmd(struct loop_cmd *cmd)
2035 {
2036 struct request *rq = blk_mq_rq_from_pdu(cmd);
2037 const bool write = op_is_write(req_op(rq));
2038 struct loop_device *lo = rq->q->queuedata;
2039 int ret = 0;
2040
2041 if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
2042 ret = -EIO;
2043 goto failed;
2044 }
2045
2046 ret = do_req_filebacked(lo, rq);
2047 failed:
2048 /* complete non-aio request */
2049 if (!cmd->use_aio || ret) {
2050 if (ret == -EOPNOTSUPP)
2051 cmd->ret = ret;
2052 else
2053 cmd->ret = ret ? -EIO : 0;
2054 if (likely(!blk_should_fake_timeout(rq->q)))
2055 blk_mq_complete_request(rq);
2056 }
2057 }
2058
loop_queue_work(struct kthread_work * work)2059 static void loop_queue_work(struct kthread_work *work)
2060 {
2061 struct loop_cmd *cmd =
2062 container_of(work, struct loop_cmd, work);
2063
2064 loop_handle_cmd(cmd);
2065 }
2066
loop_init_request(struct blk_mq_tag_set * set,struct request * rq,unsigned int hctx_idx,unsigned int numa_node)2067 static int loop_init_request(struct blk_mq_tag_set *set, struct request *rq,
2068 unsigned int hctx_idx, unsigned int numa_node)
2069 {
2070 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
2071
2072 kthread_init_work(&cmd->work, loop_queue_work);
2073 return 0;
2074 }
2075
2076 static const struct blk_mq_ops loop_mq_ops = {
2077 .queue_rq = loop_queue_rq,
2078 .init_request = loop_init_request,
2079 .complete = lo_complete_rq,
2080 };
2081
loop_add(struct loop_device ** l,int i)2082 static int loop_add(struct loop_device **l, int i)
2083 {
2084 struct loop_device *lo;
2085 struct gendisk *disk;
2086 int err;
2087
2088 err = -ENOMEM;
2089 lo = kzalloc(sizeof(*lo), GFP_KERNEL);
2090 if (!lo)
2091 goto out;
2092
2093 lo->lo_state = Lo_unbound;
2094
2095 /* allocate id, if @id >= 0, we're requesting that specific id */
2096 if (i >= 0) {
2097 err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL);
2098 if (err == -ENOSPC)
2099 err = -EEXIST;
2100 } else {
2101 err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL);
2102 }
2103 if (err < 0)
2104 goto out_free_dev;
2105 i = err;
2106
2107 err = -ENOMEM;
2108 lo->tag_set.ops = &loop_mq_ops;
2109 lo->tag_set.nr_hw_queues = 1;
2110 lo->tag_set.queue_depth = 128;
2111 lo->tag_set.numa_node = NUMA_NO_NODE;
2112 lo->tag_set.cmd_size = sizeof(struct loop_cmd);
2113 lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING;
2114 lo->tag_set.driver_data = lo;
2115
2116 err = blk_mq_alloc_tag_set(&lo->tag_set);
2117 if (err)
2118 goto out_free_idr;
2119
2120 lo->lo_queue = blk_mq_init_queue(&lo->tag_set);
2121 if (IS_ERR(lo->lo_queue)) {
2122 err = PTR_ERR(lo->lo_queue);
2123 goto out_cleanup_tags;
2124 }
2125 lo->lo_queue->queuedata = lo;
2126
2127 blk_queue_max_hw_sectors(lo->lo_queue, BLK_DEF_MAX_SECTORS);
2128
2129 /*
2130 * By default, we do buffer IO, so it doesn't make sense to enable
2131 * merge because the I/O submitted to backing file is handled page by
2132 * page. For directio mode, merge does help to dispatch bigger request
2133 * to underlayer disk. We will enable merge once directio is enabled.
2134 */
2135 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue);
2136
2137 err = -ENOMEM;
2138 disk = lo->lo_disk = alloc_disk(1 << part_shift);
2139 if (!disk)
2140 goto out_free_queue;
2141
2142 /*
2143 * Disable partition scanning by default. The in-kernel partition
2144 * scanning can be requested individually per-device during its
2145 * setup. Userspace can always add and remove partitions from all
2146 * devices. The needed partition minors are allocated from the
2147 * extended minor space, the main loop device numbers will continue
2148 * to match the loop minors, regardless of the number of partitions
2149 * used.
2150 *
2151 * If max_part is given, partition scanning is globally enabled for
2152 * all loop devices. The minors for the main loop devices will be
2153 * multiples of max_part.
2154 *
2155 * Note: Global-for-all-devices, set-only-at-init, read-only module
2156 * parameteters like 'max_loop' and 'max_part' make things needlessly
2157 * complicated, are too static, inflexible and may surprise
2158 * userspace tools. Parameters like this in general should be avoided.
2159 */
2160 if (!part_shift)
2161 disk->flags |= GENHD_FL_NO_PART_SCAN;
2162 disk->flags |= GENHD_FL_EXT_DEVT;
2163 atomic_set(&lo->lo_refcnt, 0);
2164 mutex_init(&lo->lo_mutex);
2165 lo->lo_number = i;
2166 spin_lock_init(&lo->lo_lock);
2167 disk->major = LOOP_MAJOR;
2168 disk->first_minor = i << part_shift;
2169 disk->fops = &lo_fops;
2170 disk->private_data = lo;
2171 disk->queue = lo->lo_queue;
2172 sprintf(disk->disk_name, "loop%d", i);
2173 add_disk(disk);
2174 *l = lo;
2175 return lo->lo_number;
2176
2177 out_free_queue:
2178 blk_cleanup_queue(lo->lo_queue);
2179 out_cleanup_tags:
2180 blk_mq_free_tag_set(&lo->tag_set);
2181 out_free_idr:
2182 idr_remove(&loop_index_idr, i);
2183 out_free_dev:
2184 kfree(lo);
2185 out:
2186 return err;
2187 }
2188
loop_remove(struct loop_device * lo)2189 static void loop_remove(struct loop_device *lo)
2190 {
2191 del_gendisk(lo->lo_disk);
2192 blk_cleanup_queue(lo->lo_queue);
2193 blk_mq_free_tag_set(&lo->tag_set);
2194 put_disk(lo->lo_disk);
2195 mutex_destroy(&lo->lo_mutex);
2196 kfree(lo);
2197 }
2198
find_free_cb(int id,void * ptr,void * data)2199 static int find_free_cb(int id, void *ptr, void *data)
2200 {
2201 struct loop_device *lo = ptr;
2202 struct loop_device **l = data;
2203
2204 if (lo->lo_state == Lo_unbound) {
2205 *l = lo;
2206 return 1;
2207 }
2208 return 0;
2209 }
2210
loop_lookup(struct loop_device ** l,int i)2211 static int loop_lookup(struct loop_device **l, int i)
2212 {
2213 struct loop_device *lo;
2214 int ret = -ENODEV;
2215
2216 if (i < 0) {
2217 int err;
2218
2219 err = idr_for_each(&loop_index_idr, &find_free_cb, &lo);
2220 if (err == 1) {
2221 *l = lo;
2222 ret = lo->lo_number;
2223 }
2224 goto out;
2225 }
2226
2227 /* lookup and return a specific i */
2228 lo = idr_find(&loop_index_idr, i);
2229 if (lo) {
2230 *l = lo;
2231 ret = lo->lo_number;
2232 }
2233 out:
2234 return ret;
2235 }
2236
loop_probe(dev_t dev)2237 static void loop_probe(dev_t dev)
2238 {
2239 int idx = MINOR(dev) >> part_shift;
2240 struct loop_device *lo;
2241
2242 if (max_loop && idx >= max_loop)
2243 return;
2244
2245 mutex_lock(&loop_ctl_mutex);
2246 if (loop_lookup(&lo, idx) < 0)
2247 loop_add(&lo, idx);
2248 mutex_unlock(&loop_ctl_mutex);
2249 }
2250
loop_control_ioctl(struct file * file,unsigned int cmd,unsigned long parm)2251 static long loop_control_ioctl(struct file *file, unsigned int cmd,
2252 unsigned long parm)
2253 {
2254 struct loop_device *lo;
2255 int ret;
2256
2257 ret = mutex_lock_killable(&loop_ctl_mutex);
2258 if (ret)
2259 return ret;
2260
2261 ret = -ENOSYS;
2262 switch (cmd) {
2263 case LOOP_CTL_ADD:
2264 ret = loop_lookup(&lo, parm);
2265 if (ret >= 0) {
2266 ret = -EEXIST;
2267 break;
2268 }
2269 ret = loop_add(&lo, parm);
2270 break;
2271 case LOOP_CTL_REMOVE:
2272 ret = loop_lookup(&lo, parm);
2273 if (ret < 0)
2274 break;
2275 ret = mutex_lock_killable(&lo->lo_mutex);
2276 if (ret)
2277 break;
2278 if (lo->lo_state != Lo_unbound) {
2279 ret = -EBUSY;
2280 mutex_unlock(&lo->lo_mutex);
2281 break;
2282 }
2283 if (atomic_read(&lo->lo_refcnt) > 0) {
2284 ret = -EBUSY;
2285 mutex_unlock(&lo->lo_mutex);
2286 break;
2287 }
2288 lo->lo_disk->private_data = NULL;
2289 mutex_unlock(&lo->lo_mutex);
2290 idr_remove(&loop_index_idr, lo->lo_number);
2291 loop_remove(lo);
2292 break;
2293 case LOOP_CTL_GET_FREE:
2294 ret = loop_lookup(&lo, -1);
2295 if (ret >= 0)
2296 break;
2297 ret = loop_add(&lo, -1);
2298 }
2299 mutex_unlock(&loop_ctl_mutex);
2300
2301 return ret;
2302 }
2303
2304 static const struct file_operations loop_ctl_fops = {
2305 .open = nonseekable_open,
2306 .unlocked_ioctl = loop_control_ioctl,
2307 .compat_ioctl = loop_control_ioctl,
2308 .owner = THIS_MODULE,
2309 .llseek = noop_llseek,
2310 };
2311
2312 static struct miscdevice loop_misc = {
2313 .minor = LOOP_CTRL_MINOR,
2314 .name = "loop-control",
2315 .fops = &loop_ctl_fops,
2316 };
2317
2318 MODULE_ALIAS_MISCDEV(LOOP_CTRL_MINOR);
2319 MODULE_ALIAS("devname:loop-control");
2320
loop_init(void)2321 static int __init loop_init(void)
2322 {
2323 int i, nr;
2324 struct loop_device *lo;
2325 int err;
2326
2327 part_shift = 0;
2328 if (max_part > 0) {
2329 part_shift = fls(max_part);
2330
2331 /*
2332 * Adjust max_part according to part_shift as it is exported
2333 * to user space so that user can decide correct minor number
2334 * if [s]he want to create more devices.
2335 *
2336 * Note that -1 is required because partition 0 is reserved
2337 * for the whole disk.
2338 */
2339 max_part = (1UL << part_shift) - 1;
2340 }
2341
2342 if ((1UL << part_shift) > DISK_MAX_PARTS) {
2343 err = -EINVAL;
2344 goto err_out;
2345 }
2346
2347 if (max_loop > 1UL << (MINORBITS - part_shift)) {
2348 err = -EINVAL;
2349 goto err_out;
2350 }
2351
2352 /*
2353 * If max_loop is specified, create that many devices upfront.
2354 * This also becomes a hard limit. If max_loop is not specified,
2355 * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
2356 * init time. Loop devices can be requested on-demand with the
2357 * /dev/loop-control interface, or be instantiated by accessing
2358 * a 'dead' device node.
2359 */
2360 if (max_loop)
2361 nr = max_loop;
2362 else
2363 nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
2364
2365 err = misc_register(&loop_misc);
2366 if (err < 0)
2367 goto err_out;
2368
2369
2370 if (__register_blkdev(LOOP_MAJOR, "loop", loop_probe)) {
2371 err = -EIO;
2372 goto misc_out;
2373 }
2374
2375 /* pre-create number of devices given by config or max_loop */
2376 mutex_lock(&loop_ctl_mutex);
2377 for (i = 0; i < nr; i++)
2378 loop_add(&lo, i);
2379 mutex_unlock(&loop_ctl_mutex);
2380
2381 printk(KERN_INFO "loop: module loaded\n");
2382 return 0;
2383
2384 misc_out:
2385 misc_deregister(&loop_misc);
2386 err_out:
2387 return err;
2388 }
2389
loop_exit_cb(int id,void * ptr,void * data)2390 static int loop_exit_cb(int id, void *ptr, void *data)
2391 {
2392 struct loop_device *lo = ptr;
2393
2394 loop_remove(lo);
2395 return 0;
2396 }
2397
loop_exit(void)2398 static void __exit loop_exit(void)
2399 {
2400 mutex_lock(&loop_ctl_mutex);
2401
2402 idr_for_each(&loop_index_idr, &loop_exit_cb, NULL);
2403 idr_destroy(&loop_index_idr);
2404
2405 unregister_blkdev(LOOP_MAJOR, "loop");
2406
2407 misc_deregister(&loop_misc);
2408
2409 mutex_unlock(&loop_ctl_mutex);
2410 }
2411
2412 module_init(loop_init);
2413 module_exit(loop_exit);
2414
2415 #ifndef MODULE
max_loop_setup(char * str)2416 static int __init max_loop_setup(char *str)
2417 {
2418 max_loop = simple_strtol(str, NULL, 0);
2419 return 1;
2420 }
2421
2422 __setup("max_loop=", max_loop_setup);
2423 #endif
2424