1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 *
6 * This file is released under the GPL.
7 */
8
9 #include "dm-core.h"
10 #include "dm-rq.h"
11 #include "dm-uevent.h"
12 #include "dm-ima.h"
13
14 #include <linux/init.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/signal.h>
19 #include <linux/blkpg.h>
20 #include <linux/bio.h>
21 #include <linux/mempool.h>
22 #include <linux/dax.h>
23 #include <linux/slab.h>
24 #include <linux/idr.h>
25 #include <linux/uio.h>
26 #include <linux/hdreg.h>
27 #include <linux/delay.h>
28 #include <linux/wait.h>
29 #include <linux/pr.h>
30 #include <linux/refcount.h>
31 #include <linux/part_stat.h>
32 #include <linux/blk-crypto.h>
33 #include <linux/blk-crypto-profile.h>
34
35 #define DM_MSG_PREFIX "core"
36
37 /*
38 * Cookies are numeric values sent with CHANGE and REMOVE
39 * uevents while resuming, removing or renaming the device.
40 */
41 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
42 #define DM_COOKIE_LENGTH 24
43
44 /*
45 * For REQ_POLLED fs bio, this flag is set if we link mapped underlying
46 * dm_io into one list, and reuse bio->bi_private as the list head. Before
47 * ending this fs bio, we will recover its ->bi_private.
48 */
49 #define REQ_DM_POLL_LIST REQ_DRV
50
51 static const char *_name = DM_NAME;
52
53 static unsigned int major;
54 static unsigned int _major;
55
56 static DEFINE_IDR(_minor_idr);
57
58 static DEFINE_SPINLOCK(_minor_lock);
59
60 static void do_deferred_remove(struct work_struct *w);
61
62 static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
63
64 static struct workqueue_struct *deferred_remove_workqueue;
65
66 atomic_t dm_global_event_nr = ATOMIC_INIT(0);
67 DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq);
68
dm_issue_global_event(void)69 void dm_issue_global_event(void)
70 {
71 atomic_inc(&dm_global_event_nr);
72 wake_up(&dm_global_eventq);
73 }
74
75 DEFINE_STATIC_KEY_FALSE(stats_enabled);
76 DEFINE_STATIC_KEY_FALSE(swap_bios_enabled);
77 DEFINE_STATIC_KEY_FALSE(zoned_enabled);
78
79 /*
80 * One of these is allocated (on-stack) per original bio.
81 */
82 struct clone_info {
83 struct dm_table *map;
84 struct bio *bio;
85 struct dm_io *io;
86 sector_t sector;
87 unsigned int sector_count;
88 bool is_abnormal_io:1;
89 bool submit_as_polled:1;
90 };
91
clone_to_tio(struct bio * clone)92 static inline struct dm_target_io *clone_to_tio(struct bio *clone)
93 {
94 return container_of(clone, struct dm_target_io, clone);
95 }
96
dm_per_bio_data(struct bio * bio,size_t data_size)97 void *dm_per_bio_data(struct bio *bio, size_t data_size)
98 {
99 if (!dm_tio_flagged(clone_to_tio(bio), DM_TIO_INSIDE_DM_IO))
100 return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size;
101 return (char *)bio - DM_IO_BIO_OFFSET - data_size;
102 }
103 EXPORT_SYMBOL_GPL(dm_per_bio_data);
104
dm_bio_from_per_bio_data(void * data,size_t data_size)105 struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
106 {
107 struct dm_io *io = (struct dm_io *)((char *)data + data_size);
108
109 if (io->magic == DM_IO_MAGIC)
110 return (struct bio *)((char *)io + DM_IO_BIO_OFFSET);
111 BUG_ON(io->magic != DM_TIO_MAGIC);
112 return (struct bio *)((char *)io + DM_TARGET_IO_BIO_OFFSET);
113 }
114 EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data);
115
dm_bio_get_target_bio_nr(const struct bio * bio)116 unsigned int dm_bio_get_target_bio_nr(const struct bio *bio)
117 {
118 return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
119 }
120 EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
121
122 #define MINOR_ALLOCED ((void *)-1)
123
124 #define DM_NUMA_NODE NUMA_NO_NODE
125 static int dm_numa_node = DM_NUMA_NODE;
126
127 #define DEFAULT_SWAP_BIOS (8 * 1048576 / PAGE_SIZE)
128 static int swap_bios = DEFAULT_SWAP_BIOS;
get_swap_bios(void)129 static int get_swap_bios(void)
130 {
131 int latch = READ_ONCE(swap_bios);
132
133 if (unlikely(latch <= 0))
134 latch = DEFAULT_SWAP_BIOS;
135 return latch;
136 }
137
138 struct table_device {
139 struct list_head list;
140 refcount_t count;
141 struct dm_dev dm_dev;
142 };
143
144 /*
145 * Bio-based DM's mempools' reserved IOs set by the user.
146 */
147 #define RESERVED_BIO_BASED_IOS 16
148 static unsigned int reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
149
__dm_get_module_param_int(int * module_param,int min,int max)150 static int __dm_get_module_param_int(int *module_param, int min, int max)
151 {
152 int param = READ_ONCE(*module_param);
153 int modified_param = 0;
154 bool modified = true;
155
156 if (param < min)
157 modified_param = min;
158 else if (param > max)
159 modified_param = max;
160 else
161 modified = false;
162
163 if (modified) {
164 (void)cmpxchg(module_param, param, modified_param);
165 param = modified_param;
166 }
167
168 return param;
169 }
170
__dm_get_module_param(unsigned int * module_param,unsigned int def,unsigned int max)171 unsigned int __dm_get_module_param(unsigned int *module_param, unsigned int def, unsigned int max)
172 {
173 unsigned int param = READ_ONCE(*module_param);
174 unsigned int modified_param = 0;
175
176 if (!param)
177 modified_param = def;
178 else if (param > max)
179 modified_param = max;
180
181 if (modified_param) {
182 (void)cmpxchg(module_param, param, modified_param);
183 param = modified_param;
184 }
185
186 return param;
187 }
188
dm_get_reserved_bio_based_ios(void)189 unsigned int dm_get_reserved_bio_based_ios(void)
190 {
191 return __dm_get_module_param(&reserved_bio_based_ios,
192 RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS);
193 }
194 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
195
dm_get_numa_node(void)196 static unsigned int dm_get_numa_node(void)
197 {
198 return __dm_get_module_param_int(&dm_numa_node,
199 DM_NUMA_NODE, num_online_nodes() - 1);
200 }
201
local_init(void)202 static int __init local_init(void)
203 {
204 int r;
205
206 r = dm_uevent_init();
207 if (r)
208 return r;
209
210 deferred_remove_workqueue = alloc_ordered_workqueue("kdmremove", 0);
211 if (!deferred_remove_workqueue) {
212 r = -ENOMEM;
213 goto out_uevent_exit;
214 }
215
216 _major = major;
217 r = register_blkdev(_major, _name);
218 if (r < 0)
219 goto out_free_workqueue;
220
221 if (!_major)
222 _major = r;
223
224 return 0;
225
226 out_free_workqueue:
227 destroy_workqueue(deferred_remove_workqueue);
228 out_uevent_exit:
229 dm_uevent_exit();
230
231 return r;
232 }
233
local_exit(void)234 static void local_exit(void)
235 {
236 destroy_workqueue(deferred_remove_workqueue);
237
238 unregister_blkdev(_major, _name);
239 dm_uevent_exit();
240
241 _major = 0;
242
243 DMINFO("cleaned up");
244 }
245
246 static int (*_inits[])(void) __initdata = {
247 local_init,
248 dm_target_init,
249 dm_linear_init,
250 dm_stripe_init,
251 dm_io_init,
252 dm_kcopyd_init,
253 dm_interface_init,
254 dm_statistics_init,
255 };
256
257 static void (*_exits[])(void) = {
258 local_exit,
259 dm_target_exit,
260 dm_linear_exit,
261 dm_stripe_exit,
262 dm_io_exit,
263 dm_kcopyd_exit,
264 dm_interface_exit,
265 dm_statistics_exit,
266 };
267
dm_init(void)268 static int __init dm_init(void)
269 {
270 const int count = ARRAY_SIZE(_inits);
271 int r, i;
272
273 #if (IS_ENABLED(CONFIG_IMA) && !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE))
274 DMWARN("CONFIG_IMA_DISABLE_HTABLE is disabled."
275 " Duplicate IMA measurements will not be recorded in the IMA log.");
276 #endif
277
278 for (i = 0; i < count; i++) {
279 r = _inits[i]();
280 if (r)
281 goto bad;
282 }
283
284 return 0;
285 bad:
286 while (i--)
287 _exits[i]();
288
289 return r;
290 }
291
dm_exit(void)292 static void __exit dm_exit(void)
293 {
294 int i = ARRAY_SIZE(_exits);
295
296 while (i--)
297 _exits[i]();
298
299 /*
300 * Should be empty by this point.
301 */
302 idr_destroy(&_minor_idr);
303 }
304
305 /*
306 * Block device functions
307 */
dm_deleting_md(struct mapped_device * md)308 int dm_deleting_md(struct mapped_device *md)
309 {
310 return test_bit(DMF_DELETING, &md->flags);
311 }
312
dm_blk_open(struct gendisk * disk,blk_mode_t mode)313 static int dm_blk_open(struct gendisk *disk, blk_mode_t mode)
314 {
315 struct mapped_device *md;
316
317 spin_lock(&_minor_lock);
318
319 md = disk->private_data;
320 if (!md)
321 goto out;
322
323 if (test_bit(DMF_FREEING, &md->flags) ||
324 dm_deleting_md(md)) {
325 md = NULL;
326 goto out;
327 }
328
329 dm_get(md);
330 atomic_inc(&md->open_count);
331 out:
332 spin_unlock(&_minor_lock);
333
334 return md ? 0 : -ENXIO;
335 }
336
dm_blk_close(struct gendisk * disk)337 static void dm_blk_close(struct gendisk *disk)
338 {
339 struct mapped_device *md;
340
341 spin_lock(&_minor_lock);
342
343 md = disk->private_data;
344 if (WARN_ON(!md))
345 goto out;
346
347 if (atomic_dec_and_test(&md->open_count) &&
348 (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
349 queue_work(deferred_remove_workqueue, &deferred_remove_work);
350
351 dm_put(md);
352 out:
353 spin_unlock(&_minor_lock);
354 }
355
dm_open_count(struct mapped_device * md)356 int dm_open_count(struct mapped_device *md)
357 {
358 return atomic_read(&md->open_count);
359 }
360
361 /*
362 * Guarantees nothing is using the device before it's deleted.
363 */
dm_lock_for_deletion(struct mapped_device * md,bool mark_deferred,bool only_deferred)364 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
365 {
366 int r = 0;
367
368 spin_lock(&_minor_lock);
369
370 if (dm_open_count(md)) {
371 r = -EBUSY;
372 if (mark_deferred)
373 set_bit(DMF_DEFERRED_REMOVE, &md->flags);
374 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
375 r = -EEXIST;
376 else
377 set_bit(DMF_DELETING, &md->flags);
378
379 spin_unlock(&_minor_lock);
380
381 return r;
382 }
383
dm_cancel_deferred_remove(struct mapped_device * md)384 int dm_cancel_deferred_remove(struct mapped_device *md)
385 {
386 int r = 0;
387
388 spin_lock(&_minor_lock);
389
390 if (test_bit(DMF_DELETING, &md->flags))
391 r = -EBUSY;
392 else
393 clear_bit(DMF_DEFERRED_REMOVE, &md->flags);
394
395 spin_unlock(&_minor_lock);
396
397 return r;
398 }
399
do_deferred_remove(struct work_struct * w)400 static void do_deferred_remove(struct work_struct *w)
401 {
402 dm_deferred_remove();
403 }
404
dm_blk_getgeo(struct block_device * bdev,struct hd_geometry * geo)405 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
406 {
407 struct mapped_device *md = bdev->bd_disk->private_data;
408
409 return dm_get_geometry(md, geo);
410 }
411
dm_prepare_ioctl(struct mapped_device * md,int * srcu_idx,struct block_device ** bdev)412 static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
413 struct block_device **bdev)
414 {
415 struct dm_target *ti;
416 struct dm_table *map;
417 int r;
418
419 retry:
420 r = -ENOTTY;
421 map = dm_get_live_table(md, srcu_idx);
422 if (!map || !dm_table_get_size(map))
423 return r;
424
425 /* We only support devices that have a single target */
426 if (map->num_targets != 1)
427 return r;
428
429 ti = dm_table_get_target(map, 0);
430 if (!ti->type->prepare_ioctl)
431 return r;
432
433 if (dm_suspended_md(md))
434 return -EAGAIN;
435
436 r = ti->type->prepare_ioctl(ti, bdev);
437 if (r == -ENOTCONN && !fatal_signal_pending(current)) {
438 dm_put_live_table(md, *srcu_idx);
439 fsleep(10000);
440 goto retry;
441 }
442
443 return r;
444 }
445
dm_unprepare_ioctl(struct mapped_device * md,int srcu_idx)446 static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
447 {
448 dm_put_live_table(md, srcu_idx);
449 }
450
dm_blk_ioctl(struct block_device * bdev,blk_mode_t mode,unsigned int cmd,unsigned long arg)451 static int dm_blk_ioctl(struct block_device *bdev, blk_mode_t mode,
452 unsigned int cmd, unsigned long arg)
453 {
454 struct mapped_device *md = bdev->bd_disk->private_data;
455 int r, srcu_idx;
456
457 r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
458 if (r < 0)
459 goto out;
460
461 if (r > 0) {
462 /*
463 * Target determined this ioctl is being issued against a
464 * subset of the parent bdev; require extra privileges.
465 */
466 if (!capable(CAP_SYS_RAWIO)) {
467 DMDEBUG_LIMIT(
468 "%s: sending ioctl %x to DM device without required privilege.",
469 current->comm, cmd);
470 r = -ENOIOCTLCMD;
471 goto out;
472 }
473 }
474
475 if (!bdev->bd_disk->fops->ioctl)
476 r = -ENOTTY;
477 else
478 r = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
479 out:
480 dm_unprepare_ioctl(md, srcu_idx);
481 return r;
482 }
483
dm_start_time_ns_from_clone(struct bio * bio)484 u64 dm_start_time_ns_from_clone(struct bio *bio)
485 {
486 return jiffies_to_nsecs(clone_to_tio(bio)->io->start_time);
487 }
488 EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone);
489
bio_is_flush_with_data(struct bio * bio)490 static inline bool bio_is_flush_with_data(struct bio *bio)
491 {
492 return ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size);
493 }
494
dm_io_sectors(struct dm_io * io,struct bio * bio)495 static inline unsigned int dm_io_sectors(struct dm_io *io, struct bio *bio)
496 {
497 /*
498 * If REQ_PREFLUSH set, don't account payload, it will be
499 * submitted (and accounted) after this flush completes.
500 */
501 if (bio_is_flush_with_data(bio))
502 return 0;
503 if (unlikely(dm_io_flagged(io, DM_IO_WAS_SPLIT)))
504 return io->sectors;
505 return bio_sectors(bio);
506 }
507
dm_io_acct(struct dm_io * io,bool end)508 static void dm_io_acct(struct dm_io *io, bool end)
509 {
510 struct bio *bio = io->orig_bio;
511
512 if (dm_io_flagged(io, DM_IO_BLK_STAT)) {
513 if (!end)
514 bdev_start_io_acct(bio->bi_bdev, bio_op(bio),
515 io->start_time);
516 else
517 bdev_end_io_acct(bio->bi_bdev, bio_op(bio),
518 dm_io_sectors(io, bio),
519 io->start_time);
520 }
521
522 if (static_branch_unlikely(&stats_enabled) &&
523 unlikely(dm_stats_used(&io->md->stats))) {
524 sector_t sector;
525
526 if (unlikely(dm_io_flagged(io, DM_IO_WAS_SPLIT)))
527 sector = bio_end_sector(bio) - io->sector_offset;
528 else
529 sector = bio->bi_iter.bi_sector;
530
531 dm_stats_account_io(&io->md->stats, bio_data_dir(bio),
532 sector, dm_io_sectors(io, bio),
533 end, io->start_time, &io->stats_aux);
534 }
535 }
536
__dm_start_io_acct(struct dm_io * io)537 static void __dm_start_io_acct(struct dm_io *io)
538 {
539 dm_io_acct(io, false);
540 }
541
dm_start_io_acct(struct dm_io * io,struct bio * clone)542 static void dm_start_io_acct(struct dm_io *io, struct bio *clone)
543 {
544 /*
545 * Ensure IO accounting is only ever started once.
546 */
547 if (dm_io_flagged(io, DM_IO_ACCOUNTED))
548 return;
549
550 /* Expect no possibility for race unless DM_TIO_IS_DUPLICATE_BIO. */
551 if (!clone || likely(dm_tio_is_normal(clone_to_tio(clone)))) {
552 dm_io_set_flag(io, DM_IO_ACCOUNTED);
553 } else {
554 unsigned long flags;
555 /* Can afford locking given DM_TIO_IS_DUPLICATE_BIO */
556 spin_lock_irqsave(&io->lock, flags);
557 if (dm_io_flagged(io, DM_IO_ACCOUNTED)) {
558 spin_unlock_irqrestore(&io->lock, flags);
559 return;
560 }
561 dm_io_set_flag(io, DM_IO_ACCOUNTED);
562 spin_unlock_irqrestore(&io->lock, flags);
563 }
564
565 __dm_start_io_acct(io);
566 }
567
dm_end_io_acct(struct dm_io * io)568 static void dm_end_io_acct(struct dm_io *io)
569 {
570 dm_io_acct(io, true);
571 }
572
alloc_io(struct mapped_device * md,struct bio * bio,gfp_t gfp_mask)573 static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio, gfp_t gfp_mask)
574 {
575 struct dm_io *io;
576 struct dm_target_io *tio;
577 struct bio *clone;
578
579 clone = bio_alloc_clone(NULL, bio, gfp_mask, &md->mempools->io_bs);
580 if (unlikely(!clone))
581 return NULL;
582 tio = clone_to_tio(clone);
583 tio->flags = 0;
584 dm_tio_set_flag(tio, DM_TIO_INSIDE_DM_IO);
585 tio->io = NULL;
586
587 io = container_of(tio, struct dm_io, tio);
588 io->magic = DM_IO_MAGIC;
589 io->status = BLK_STS_OK;
590
591 /* one ref is for submission, the other is for completion */
592 atomic_set(&io->io_count, 2);
593 this_cpu_inc(*md->pending_io);
594 io->orig_bio = bio;
595 io->md = md;
596 spin_lock_init(&io->lock);
597 io->start_time = jiffies;
598 io->flags = 0;
599 if (blk_queue_io_stat(md->queue))
600 dm_io_set_flag(io, DM_IO_BLK_STAT);
601
602 if (static_branch_unlikely(&stats_enabled) &&
603 unlikely(dm_stats_used(&md->stats)))
604 dm_stats_record_start(&md->stats, &io->stats_aux);
605
606 return io;
607 }
608
free_io(struct dm_io * io)609 static void free_io(struct dm_io *io)
610 {
611 bio_put(&io->tio.clone);
612 }
613
alloc_tio(struct clone_info * ci,struct dm_target * ti,unsigned int target_bio_nr,unsigned int * len,gfp_t gfp_mask)614 static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
615 unsigned int target_bio_nr, unsigned int *len, gfp_t gfp_mask)
616 {
617 struct mapped_device *md = ci->io->md;
618 struct dm_target_io *tio;
619 struct bio *clone;
620
621 if (!ci->io->tio.io) {
622 /* the dm_target_io embedded in ci->io is available */
623 tio = &ci->io->tio;
624 /* alloc_io() already initialized embedded clone */
625 clone = &tio->clone;
626 } else {
627 clone = bio_alloc_clone(NULL, ci->bio, gfp_mask,
628 &md->mempools->bs);
629 if (!clone)
630 return NULL;
631
632 /* REQ_DM_POLL_LIST shouldn't be inherited */
633 clone->bi_opf &= ~REQ_DM_POLL_LIST;
634
635 tio = clone_to_tio(clone);
636 tio->flags = 0; /* also clears DM_TIO_INSIDE_DM_IO */
637 }
638
639 tio->magic = DM_TIO_MAGIC;
640 tio->io = ci->io;
641 tio->ti = ti;
642 tio->target_bio_nr = target_bio_nr;
643 tio->len_ptr = len;
644 tio->old_sector = 0;
645
646 /* Set default bdev, but target must bio_set_dev() before issuing IO */
647 clone->bi_bdev = md->disk->part0;
648 if (unlikely(ti->needs_bio_set_dev))
649 bio_set_dev(clone, md->disk->part0);
650
651 if (len) {
652 clone->bi_iter.bi_size = to_bytes(*len);
653 if (bio_integrity(clone))
654 bio_integrity_trim(clone);
655 }
656
657 return clone;
658 }
659
free_tio(struct bio * clone)660 static void free_tio(struct bio *clone)
661 {
662 if (dm_tio_flagged(clone_to_tio(clone), DM_TIO_INSIDE_DM_IO))
663 return;
664 bio_put(clone);
665 }
666
667 /*
668 * Add the bio to the list of deferred io.
669 */
queue_io(struct mapped_device * md,struct bio * bio)670 static void queue_io(struct mapped_device *md, struct bio *bio)
671 {
672 unsigned long flags;
673
674 spin_lock_irqsave(&md->deferred_lock, flags);
675 bio_list_add(&md->deferred, bio);
676 spin_unlock_irqrestore(&md->deferred_lock, flags);
677 queue_work(md->wq, &md->work);
678 }
679
680 /*
681 * Everyone (including functions in this file), should use this
682 * function to access the md->map field, and make sure they call
683 * dm_put_live_table() when finished.
684 */
dm_get_live_table(struct mapped_device * md,int * srcu_idx)685 struct dm_table *dm_get_live_table(struct mapped_device *md,
686 int *srcu_idx) __acquires(md->io_barrier)
687 {
688 *srcu_idx = srcu_read_lock(&md->io_barrier);
689
690 return srcu_dereference(md->map, &md->io_barrier);
691 }
692
dm_put_live_table(struct mapped_device * md,int srcu_idx)693 void dm_put_live_table(struct mapped_device *md,
694 int srcu_idx) __releases(md->io_barrier)
695 {
696 srcu_read_unlock(&md->io_barrier, srcu_idx);
697 }
698
dm_sync_table(struct mapped_device * md)699 void dm_sync_table(struct mapped_device *md)
700 {
701 synchronize_srcu(&md->io_barrier);
702 synchronize_rcu_expedited();
703 }
704
705 /*
706 * A fast alternative to dm_get_live_table/dm_put_live_table.
707 * The caller must not block between these two functions.
708 */
dm_get_live_table_fast(struct mapped_device * md)709 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
710 {
711 rcu_read_lock();
712 return rcu_dereference(md->map);
713 }
714
dm_put_live_table_fast(struct mapped_device * md)715 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
716 {
717 rcu_read_unlock();
718 }
719
720 static char *_dm_claim_ptr = "I belong to device-mapper";
721
722 /*
723 * Open a table device so we can use it as a map destination.
724 */
open_table_device(struct mapped_device * md,dev_t dev,blk_mode_t mode)725 static struct table_device *open_table_device(struct mapped_device *md,
726 dev_t dev, blk_mode_t mode)
727 {
728 struct table_device *td;
729 struct file *bdev_file;
730 struct block_device *bdev;
731 u64 part_off;
732 int r;
733
734 td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
735 if (!td)
736 return ERR_PTR(-ENOMEM);
737 refcount_set(&td->count, 1);
738
739 bdev_file = bdev_file_open_by_dev(dev, mode, _dm_claim_ptr, NULL);
740 if (IS_ERR(bdev_file)) {
741 r = PTR_ERR(bdev_file);
742 goto out_free_td;
743 }
744
745 bdev = file_bdev(bdev_file);
746
747 /*
748 * We can be called before the dm disk is added. In that case we can't
749 * register the holder relation here. It will be done once add_disk was
750 * called.
751 */
752 if (md->disk->slave_dir) {
753 r = bd_link_disk_holder(bdev, md->disk);
754 if (r)
755 goto out_blkdev_put;
756 }
757
758 td->dm_dev.mode = mode;
759 td->dm_dev.bdev = bdev;
760 td->dm_dev.bdev_file = bdev_file;
761 td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off,
762 NULL, NULL);
763 format_dev_t(td->dm_dev.name, dev);
764 list_add(&td->list, &md->table_devices);
765 return td;
766
767 out_blkdev_put:
768 __fput_sync(bdev_file);
769 out_free_td:
770 kfree(td);
771 return ERR_PTR(r);
772 }
773
774 /*
775 * Close a table device that we've been using.
776 */
close_table_device(struct table_device * td,struct mapped_device * md)777 static void close_table_device(struct table_device *td, struct mapped_device *md)
778 {
779 if (md->disk->slave_dir)
780 bd_unlink_disk_holder(td->dm_dev.bdev, md->disk);
781
782 /* Leverage async fput() if DMF_DEFERRED_REMOVE set */
783 if (unlikely(test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
784 fput(td->dm_dev.bdev_file);
785 else
786 __fput_sync(td->dm_dev.bdev_file);
787
788 put_dax(td->dm_dev.dax_dev);
789 list_del(&td->list);
790 kfree(td);
791 }
792
find_table_device(struct list_head * l,dev_t dev,blk_mode_t mode)793 static struct table_device *find_table_device(struct list_head *l, dev_t dev,
794 blk_mode_t mode)
795 {
796 struct table_device *td;
797
798 list_for_each_entry(td, l, list)
799 if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode)
800 return td;
801
802 return NULL;
803 }
804
dm_get_table_device(struct mapped_device * md,dev_t dev,blk_mode_t mode,struct dm_dev ** result)805 int dm_get_table_device(struct mapped_device *md, dev_t dev, blk_mode_t mode,
806 struct dm_dev **result)
807 {
808 struct table_device *td;
809
810 mutex_lock(&md->table_devices_lock);
811 td = find_table_device(&md->table_devices, dev, mode);
812 if (!td) {
813 td = open_table_device(md, dev, mode);
814 if (IS_ERR(td)) {
815 mutex_unlock(&md->table_devices_lock);
816 return PTR_ERR(td);
817 }
818 } else {
819 refcount_inc(&td->count);
820 }
821 mutex_unlock(&md->table_devices_lock);
822
823 *result = &td->dm_dev;
824 return 0;
825 }
826
dm_put_table_device(struct mapped_device * md,struct dm_dev * d)827 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
828 {
829 struct table_device *td = container_of(d, struct table_device, dm_dev);
830
831 mutex_lock(&md->table_devices_lock);
832 if (refcount_dec_and_test(&td->count))
833 close_table_device(td, md);
834 mutex_unlock(&md->table_devices_lock);
835 }
836
837 /*
838 * Get the geometry associated with a dm device
839 */
dm_get_geometry(struct mapped_device * md,struct hd_geometry * geo)840 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
841 {
842 *geo = md->geometry;
843
844 return 0;
845 }
846
847 /*
848 * Set the geometry of a device.
849 */
dm_set_geometry(struct mapped_device * md,struct hd_geometry * geo)850 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
851 {
852 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
853
854 if (geo->start > sz) {
855 DMERR("Start sector is beyond the geometry limits.");
856 return -EINVAL;
857 }
858
859 md->geometry = *geo;
860
861 return 0;
862 }
863
__noflush_suspending(struct mapped_device * md)864 static int __noflush_suspending(struct mapped_device *md)
865 {
866 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
867 }
868
dm_requeue_add_io(struct dm_io * io,bool first_stage)869 static void dm_requeue_add_io(struct dm_io *io, bool first_stage)
870 {
871 struct mapped_device *md = io->md;
872
873 if (first_stage) {
874 struct dm_io *next = md->requeue_list;
875
876 md->requeue_list = io;
877 io->next = next;
878 } else {
879 bio_list_add_head(&md->deferred, io->orig_bio);
880 }
881 }
882
dm_kick_requeue(struct mapped_device * md,bool first_stage)883 static void dm_kick_requeue(struct mapped_device *md, bool first_stage)
884 {
885 if (first_stage)
886 queue_work(md->wq, &md->requeue_work);
887 else
888 queue_work(md->wq, &md->work);
889 }
890
891 /*
892 * Return true if the dm_io's original bio is requeued.
893 * io->status is updated with error if requeue disallowed.
894 */
dm_handle_requeue(struct dm_io * io,bool first_stage)895 static bool dm_handle_requeue(struct dm_io *io, bool first_stage)
896 {
897 struct bio *bio = io->orig_bio;
898 bool handle_requeue = (io->status == BLK_STS_DM_REQUEUE);
899 bool handle_polled_eagain = ((io->status == BLK_STS_AGAIN) &&
900 (bio->bi_opf & REQ_POLLED));
901 struct mapped_device *md = io->md;
902 bool requeued = false;
903
904 if (handle_requeue || handle_polled_eagain) {
905 unsigned long flags;
906
907 if (bio->bi_opf & REQ_POLLED) {
908 /*
909 * Upper layer won't help us poll split bio
910 * (io->orig_bio may only reflect a subset of the
911 * pre-split original) so clear REQ_POLLED.
912 */
913 bio_clear_polled(bio);
914 }
915
916 /*
917 * Target requested pushing back the I/O or
918 * polled IO hit BLK_STS_AGAIN.
919 */
920 spin_lock_irqsave(&md->deferred_lock, flags);
921 if ((__noflush_suspending(md) &&
922 !WARN_ON_ONCE(dm_is_zone_write(md, bio))) ||
923 handle_polled_eagain || first_stage) {
924 dm_requeue_add_io(io, first_stage);
925 requeued = true;
926 } else {
927 /*
928 * noflush suspend was interrupted or this is
929 * a write to a zoned target.
930 */
931 io->status = BLK_STS_IOERR;
932 }
933 spin_unlock_irqrestore(&md->deferred_lock, flags);
934 }
935
936 if (requeued)
937 dm_kick_requeue(md, first_stage);
938
939 return requeued;
940 }
941
__dm_io_complete(struct dm_io * io,bool first_stage)942 static void __dm_io_complete(struct dm_io *io, bool first_stage)
943 {
944 struct bio *bio = io->orig_bio;
945 struct mapped_device *md = io->md;
946 blk_status_t io_error;
947 bool requeued;
948
949 requeued = dm_handle_requeue(io, first_stage);
950 if (requeued && first_stage)
951 return;
952
953 io_error = io->status;
954 if (dm_io_flagged(io, DM_IO_ACCOUNTED))
955 dm_end_io_acct(io);
956 else if (!io_error) {
957 /*
958 * Must handle target that DM_MAPIO_SUBMITTED only to
959 * then bio_endio() rather than dm_submit_bio_remap()
960 */
961 __dm_start_io_acct(io);
962 dm_end_io_acct(io);
963 }
964 free_io(io);
965 smp_wmb();
966 this_cpu_dec(*md->pending_io);
967
968 /* nudge anyone waiting on suspend queue */
969 if (unlikely(wq_has_sleeper(&md->wait)))
970 wake_up(&md->wait);
971
972 /* Return early if the original bio was requeued */
973 if (requeued)
974 return;
975
976 if (bio_is_flush_with_data(bio)) {
977 /*
978 * Preflush done for flush with data, reissue
979 * without REQ_PREFLUSH.
980 */
981 bio->bi_opf &= ~REQ_PREFLUSH;
982 queue_io(md, bio);
983 } else {
984 /* done with normal IO or empty flush */
985 if (io_error)
986 bio->bi_status = io_error;
987 bio_endio(bio);
988 }
989 }
990
dm_wq_requeue_work(struct work_struct * work)991 static void dm_wq_requeue_work(struct work_struct *work)
992 {
993 struct mapped_device *md = container_of(work, struct mapped_device,
994 requeue_work);
995 unsigned long flags;
996 struct dm_io *io;
997
998 /* reuse deferred lock to simplify dm_handle_requeue */
999 spin_lock_irqsave(&md->deferred_lock, flags);
1000 io = md->requeue_list;
1001 md->requeue_list = NULL;
1002 spin_unlock_irqrestore(&md->deferred_lock, flags);
1003
1004 while (io) {
1005 struct dm_io *next = io->next;
1006
1007 dm_io_rewind(io, &md->disk->bio_split);
1008
1009 io->next = NULL;
1010 __dm_io_complete(io, false);
1011 io = next;
1012 cond_resched();
1013 }
1014 }
1015
1016 /*
1017 * Two staged requeue:
1018 *
1019 * 1) io->orig_bio points to the real original bio, and the part mapped to
1020 * this io must be requeued, instead of other parts of the original bio.
1021 *
1022 * 2) io->orig_bio points to new cloned bio which matches the requeued dm_io.
1023 */
dm_io_complete(struct dm_io * io)1024 static void dm_io_complete(struct dm_io *io)
1025 {
1026 bool first_requeue;
1027
1028 /*
1029 * Only dm_io that has been split needs two stage requeue, otherwise
1030 * we may run into long bio clone chain during suspend and OOM could
1031 * be triggered.
1032 *
1033 * Also flush data dm_io won't be marked as DM_IO_WAS_SPLIT, so they
1034 * also aren't handled via the first stage requeue.
1035 */
1036 if (dm_io_flagged(io, DM_IO_WAS_SPLIT))
1037 first_requeue = true;
1038 else
1039 first_requeue = false;
1040
1041 __dm_io_complete(io, first_requeue);
1042 }
1043
1044 /*
1045 * Decrements the number of outstanding ios that a bio has been
1046 * cloned into, completing the original io if necc.
1047 */
__dm_io_dec_pending(struct dm_io * io)1048 static inline void __dm_io_dec_pending(struct dm_io *io)
1049 {
1050 if (atomic_dec_and_test(&io->io_count))
1051 dm_io_complete(io);
1052 }
1053
dm_io_set_error(struct dm_io * io,blk_status_t error)1054 static void dm_io_set_error(struct dm_io *io, blk_status_t error)
1055 {
1056 unsigned long flags;
1057
1058 /* Push-back supersedes any I/O errors */
1059 spin_lock_irqsave(&io->lock, flags);
1060 if (!(io->status == BLK_STS_DM_REQUEUE &&
1061 __noflush_suspending(io->md))) {
1062 io->status = error;
1063 }
1064 spin_unlock_irqrestore(&io->lock, flags);
1065 }
1066
dm_io_dec_pending(struct dm_io * io,blk_status_t error)1067 static void dm_io_dec_pending(struct dm_io *io, blk_status_t error)
1068 {
1069 if (unlikely(error))
1070 dm_io_set_error(io, error);
1071
1072 __dm_io_dec_pending(io);
1073 }
1074
1075 /*
1076 * The queue_limits are only valid as long as you have a reference
1077 * count on 'md'. But _not_ imposing verification to avoid atomic_read(),
1078 */
dm_get_queue_limits(struct mapped_device * md)1079 static inline struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
1080 {
1081 return &md->queue->limits;
1082 }
1083
disable_discard(struct mapped_device * md)1084 void disable_discard(struct mapped_device *md)
1085 {
1086 struct queue_limits *limits = dm_get_queue_limits(md);
1087
1088 /* device doesn't really support DISCARD, disable it */
1089 limits->max_hw_discard_sectors = 0;
1090 }
1091
disable_write_zeroes(struct mapped_device * md)1092 void disable_write_zeroes(struct mapped_device *md)
1093 {
1094 struct queue_limits *limits = dm_get_queue_limits(md);
1095
1096 /* device doesn't really support WRITE ZEROES, disable it */
1097 limits->max_write_zeroes_sectors = 0;
1098 }
1099
swap_bios_limit(struct dm_target * ti,struct bio * bio)1100 static bool swap_bios_limit(struct dm_target *ti, struct bio *bio)
1101 {
1102 return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios);
1103 }
1104
clone_endio(struct bio * bio)1105 static void clone_endio(struct bio *bio)
1106 {
1107 blk_status_t error = bio->bi_status;
1108 struct dm_target_io *tio = clone_to_tio(bio);
1109 struct dm_target *ti = tio->ti;
1110 dm_endio_fn endio = ti->type->end_io;
1111 struct dm_io *io = tio->io;
1112 struct mapped_device *md = io->md;
1113
1114 if (unlikely(error == BLK_STS_TARGET)) {
1115 if (bio_op(bio) == REQ_OP_DISCARD &&
1116 !bdev_max_discard_sectors(bio->bi_bdev))
1117 disable_discard(md);
1118 else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
1119 !bdev_write_zeroes_sectors(bio->bi_bdev))
1120 disable_write_zeroes(md);
1121 }
1122
1123 if (static_branch_unlikely(&zoned_enabled) &&
1124 unlikely(bdev_is_zoned(bio->bi_bdev)))
1125 dm_zone_endio(io, bio);
1126
1127 if (endio) {
1128 int r = endio(ti, bio, &error);
1129
1130 switch (r) {
1131 case DM_ENDIO_REQUEUE:
1132 if (static_branch_unlikely(&zoned_enabled)) {
1133 /*
1134 * Requeuing writes to a sequential zone of a zoned
1135 * target will break the sequential write pattern:
1136 * fail such IO.
1137 */
1138 if (WARN_ON_ONCE(dm_is_zone_write(md, bio)))
1139 error = BLK_STS_IOERR;
1140 else
1141 error = BLK_STS_DM_REQUEUE;
1142 } else
1143 error = BLK_STS_DM_REQUEUE;
1144 fallthrough;
1145 case DM_ENDIO_DONE:
1146 break;
1147 case DM_ENDIO_INCOMPLETE:
1148 /* The target will handle the io */
1149 return;
1150 default:
1151 DMCRIT("unimplemented target endio return value: %d", r);
1152 BUG();
1153 }
1154 }
1155
1156 if (static_branch_unlikely(&swap_bios_enabled) &&
1157 unlikely(swap_bios_limit(ti, bio)))
1158 up(&md->swap_bios_semaphore);
1159
1160 free_tio(bio);
1161 dm_io_dec_pending(io, error);
1162 }
1163
1164 /*
1165 * Return maximum size of I/O possible at the supplied sector up to the current
1166 * target boundary.
1167 */
max_io_len_target_boundary(struct dm_target * ti,sector_t target_offset)1168 static inline sector_t max_io_len_target_boundary(struct dm_target *ti,
1169 sector_t target_offset)
1170 {
1171 return ti->len - target_offset;
1172 }
1173
__max_io_len(struct dm_target * ti,sector_t sector,unsigned int max_granularity,unsigned int max_sectors)1174 static sector_t __max_io_len(struct dm_target *ti, sector_t sector,
1175 unsigned int max_granularity,
1176 unsigned int max_sectors)
1177 {
1178 sector_t target_offset = dm_target_offset(ti, sector);
1179 sector_t len = max_io_len_target_boundary(ti, target_offset);
1180
1181 /*
1182 * Does the target need to split IO even further?
1183 * - varied (per target) IO splitting is a tenet of DM; this
1184 * explains why stacked chunk_sectors based splitting via
1185 * bio_split_to_limits() isn't possible here.
1186 */
1187 if (!max_granularity)
1188 return len;
1189 return min_t(sector_t, len,
1190 min(max_sectors ? : queue_max_sectors(ti->table->md->queue),
1191 blk_chunk_sectors_left(target_offset, max_granularity)));
1192 }
1193
max_io_len(struct dm_target * ti,sector_t sector)1194 static inline sector_t max_io_len(struct dm_target *ti, sector_t sector)
1195 {
1196 return __max_io_len(ti, sector, ti->max_io_len, 0);
1197 }
1198
dm_set_target_max_io_len(struct dm_target * ti,sector_t len)1199 int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
1200 {
1201 if (len > UINT_MAX) {
1202 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
1203 (unsigned long long)len, UINT_MAX);
1204 ti->error = "Maximum size of target IO is too large";
1205 return -EINVAL;
1206 }
1207
1208 ti->max_io_len = (uint32_t) len;
1209
1210 return 0;
1211 }
1212 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
1213
dm_dax_get_live_target(struct mapped_device * md,sector_t sector,int * srcu_idx)1214 static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
1215 sector_t sector, int *srcu_idx)
1216 __acquires(md->io_barrier)
1217 {
1218 struct dm_table *map;
1219 struct dm_target *ti;
1220
1221 map = dm_get_live_table(md, srcu_idx);
1222 if (!map)
1223 return NULL;
1224
1225 ti = dm_table_find_target(map, sector);
1226 if (!ti)
1227 return NULL;
1228
1229 return ti;
1230 }
1231
dm_dax_direct_access(struct dax_device * dax_dev,pgoff_t pgoff,long nr_pages,enum dax_access_mode mode,void ** kaddr,pfn_t * pfn)1232 static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
1233 long nr_pages, enum dax_access_mode mode, void **kaddr,
1234 pfn_t *pfn)
1235 {
1236 struct mapped_device *md = dax_get_private(dax_dev);
1237 sector_t sector = pgoff * PAGE_SECTORS;
1238 struct dm_target *ti;
1239 long len, ret = -EIO;
1240 int srcu_idx;
1241
1242 ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1243
1244 if (!ti)
1245 goto out;
1246 if (!ti->type->direct_access)
1247 goto out;
1248 len = max_io_len(ti, sector) / PAGE_SECTORS;
1249 if (len < 1)
1250 goto out;
1251 nr_pages = min(len, nr_pages);
1252 ret = ti->type->direct_access(ti, pgoff, nr_pages, mode, kaddr, pfn);
1253
1254 out:
1255 dm_put_live_table(md, srcu_idx);
1256
1257 return ret;
1258 }
1259
dm_dax_zero_page_range(struct dax_device * dax_dev,pgoff_t pgoff,size_t nr_pages)1260 static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
1261 size_t nr_pages)
1262 {
1263 struct mapped_device *md = dax_get_private(dax_dev);
1264 sector_t sector = pgoff * PAGE_SECTORS;
1265 struct dm_target *ti;
1266 int ret = -EIO;
1267 int srcu_idx;
1268
1269 ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1270
1271 if (!ti)
1272 goto out;
1273 if (WARN_ON(!ti->type->dax_zero_page_range)) {
1274 /*
1275 * ->zero_page_range() is mandatory dax operation. If we are
1276 * here, something is wrong.
1277 */
1278 goto out;
1279 }
1280 ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages);
1281 out:
1282 dm_put_live_table(md, srcu_idx);
1283
1284 return ret;
1285 }
1286
dm_dax_recovery_write(struct dax_device * dax_dev,pgoff_t pgoff,void * addr,size_t bytes,struct iov_iter * i)1287 static size_t dm_dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
1288 void *addr, size_t bytes, struct iov_iter *i)
1289 {
1290 struct mapped_device *md = dax_get_private(dax_dev);
1291 sector_t sector = pgoff * PAGE_SECTORS;
1292 struct dm_target *ti;
1293 int srcu_idx;
1294 long ret = 0;
1295
1296 ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1297 if (!ti || !ti->type->dax_recovery_write)
1298 goto out;
1299
1300 ret = ti->type->dax_recovery_write(ti, pgoff, addr, bytes, i);
1301 out:
1302 dm_put_live_table(md, srcu_idx);
1303 return ret;
1304 }
1305
1306 /*
1307 * A target may call dm_accept_partial_bio only from the map routine. It is
1308 * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management
1309 * operations, REQ_OP_ZONE_APPEND (zone append writes) and any bio serviced by
1310 * __send_duplicate_bios().
1311 *
1312 * dm_accept_partial_bio informs the dm that the target only wants to process
1313 * additional n_sectors sectors of the bio and the rest of the data should be
1314 * sent in a next bio.
1315 *
1316 * A diagram that explains the arithmetics:
1317 * +--------------------+---------------+-------+
1318 * | 1 | 2 | 3 |
1319 * +--------------------+---------------+-------+
1320 *
1321 * <-------------- *tio->len_ptr --------------->
1322 * <----- bio_sectors ----->
1323 * <-- n_sectors -->
1324 *
1325 * Region 1 was already iterated over with bio_advance or similar function.
1326 * (it may be empty if the target doesn't use bio_advance)
1327 * Region 2 is the remaining bio size that the target wants to process.
1328 * (it may be empty if region 1 is non-empty, although there is no reason
1329 * to make it empty)
1330 * The target requires that region 3 is to be sent in the next bio.
1331 *
1332 * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
1333 * the partially processed part (the sum of regions 1+2) must be the same for all
1334 * copies of the bio.
1335 */
dm_accept_partial_bio(struct bio * bio,unsigned int n_sectors)1336 void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors)
1337 {
1338 struct dm_target_io *tio = clone_to_tio(bio);
1339 struct dm_io *io = tio->io;
1340 unsigned int bio_sectors = bio_sectors(bio);
1341
1342 BUG_ON(dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO));
1343 BUG_ON(op_is_zone_mgmt(bio_op(bio)));
1344 BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND);
1345 BUG_ON(bio_sectors > *tio->len_ptr);
1346 BUG_ON(n_sectors > bio_sectors);
1347
1348 *tio->len_ptr -= bio_sectors - n_sectors;
1349 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
1350
1351 /*
1352 * __split_and_process_bio() may have already saved mapped part
1353 * for accounting but it is being reduced so update accordingly.
1354 */
1355 dm_io_set_flag(io, DM_IO_WAS_SPLIT);
1356 io->sectors = n_sectors;
1357 io->sector_offset = bio_sectors(io->orig_bio);
1358 }
1359 EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
1360
1361 /*
1362 * @clone: clone bio that DM core passed to target's .map function
1363 * @tgt_clone: clone of @clone bio that target needs submitted
1364 *
1365 * Targets should use this interface to submit bios they take
1366 * ownership of when returning DM_MAPIO_SUBMITTED.
1367 *
1368 * Target should also enable ti->accounts_remapped_io
1369 */
dm_submit_bio_remap(struct bio * clone,struct bio * tgt_clone)1370 void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone)
1371 {
1372 struct dm_target_io *tio = clone_to_tio(clone);
1373 struct dm_io *io = tio->io;
1374
1375 /* establish bio that will get submitted */
1376 if (!tgt_clone)
1377 tgt_clone = clone;
1378
1379 /*
1380 * Account io->origin_bio to DM dev on behalf of target
1381 * that took ownership of IO with DM_MAPIO_SUBMITTED.
1382 */
1383 dm_start_io_acct(io, clone);
1384
1385 trace_block_bio_remap(tgt_clone, disk_devt(io->md->disk),
1386 tio->old_sector);
1387 submit_bio_noacct(tgt_clone);
1388 }
1389 EXPORT_SYMBOL_GPL(dm_submit_bio_remap);
1390
__set_swap_bios_limit(struct mapped_device * md,int latch)1391 static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch)
1392 {
1393 mutex_lock(&md->swap_bios_lock);
1394 while (latch < md->swap_bios) {
1395 cond_resched();
1396 down(&md->swap_bios_semaphore);
1397 md->swap_bios--;
1398 }
1399 while (latch > md->swap_bios) {
1400 cond_resched();
1401 up(&md->swap_bios_semaphore);
1402 md->swap_bios++;
1403 }
1404 mutex_unlock(&md->swap_bios_lock);
1405 }
1406
__map_bio(struct bio * clone)1407 static void __map_bio(struct bio *clone)
1408 {
1409 struct dm_target_io *tio = clone_to_tio(clone);
1410 struct dm_target *ti = tio->ti;
1411 struct dm_io *io = tio->io;
1412 struct mapped_device *md = io->md;
1413 int r;
1414
1415 clone->bi_end_io = clone_endio;
1416
1417 /*
1418 * Map the clone.
1419 */
1420 tio->old_sector = clone->bi_iter.bi_sector;
1421
1422 if (static_branch_unlikely(&swap_bios_enabled) &&
1423 unlikely(swap_bios_limit(ti, clone))) {
1424 int latch = get_swap_bios();
1425
1426 if (unlikely(latch != md->swap_bios))
1427 __set_swap_bios_limit(md, latch);
1428 down(&md->swap_bios_semaphore);
1429 }
1430
1431 if (likely(ti->type->map == linear_map))
1432 r = linear_map(ti, clone);
1433 else if (ti->type->map == stripe_map)
1434 r = stripe_map(ti, clone);
1435 else
1436 r = ti->type->map(ti, clone);
1437
1438 switch (r) {
1439 case DM_MAPIO_SUBMITTED:
1440 /* target has assumed ownership of this io */
1441 if (!ti->accounts_remapped_io)
1442 dm_start_io_acct(io, clone);
1443 break;
1444 case DM_MAPIO_REMAPPED:
1445 dm_submit_bio_remap(clone, NULL);
1446 break;
1447 case DM_MAPIO_KILL:
1448 case DM_MAPIO_REQUEUE:
1449 if (static_branch_unlikely(&swap_bios_enabled) &&
1450 unlikely(swap_bios_limit(ti, clone)))
1451 up(&md->swap_bios_semaphore);
1452 free_tio(clone);
1453 if (r == DM_MAPIO_KILL)
1454 dm_io_dec_pending(io, BLK_STS_IOERR);
1455 else
1456 dm_io_dec_pending(io, BLK_STS_DM_REQUEUE);
1457 break;
1458 default:
1459 DMCRIT("unimplemented target map return value: %d", r);
1460 BUG();
1461 }
1462 }
1463
setup_split_accounting(struct clone_info * ci,unsigned int len)1464 static void setup_split_accounting(struct clone_info *ci, unsigned int len)
1465 {
1466 struct dm_io *io = ci->io;
1467
1468 if (ci->sector_count > len) {
1469 /*
1470 * Split needed, save the mapped part for accounting.
1471 * NOTE: dm_accept_partial_bio() will update accordingly.
1472 */
1473 dm_io_set_flag(io, DM_IO_WAS_SPLIT);
1474 io->sectors = len;
1475 io->sector_offset = bio_sectors(ci->bio);
1476 }
1477 }
1478
alloc_multiple_bios(struct bio_list * blist,struct clone_info * ci,struct dm_target * ti,unsigned int num_bios,unsigned * len,gfp_t gfp_flag)1479 static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
1480 struct dm_target *ti, unsigned int num_bios,
1481 unsigned *len, gfp_t gfp_flag)
1482 {
1483 struct bio *bio;
1484 int try = (gfp_flag & GFP_NOWAIT) ? 0 : 1;
1485
1486 for (; try < 2; try++) {
1487 int bio_nr;
1488
1489 if (try && num_bios > 1)
1490 mutex_lock(&ci->io->md->table_devices_lock);
1491 for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
1492 bio = alloc_tio(ci, ti, bio_nr, len,
1493 try ? GFP_NOIO : GFP_NOWAIT);
1494 if (!bio)
1495 break;
1496
1497 bio_list_add(blist, bio);
1498 }
1499 if (try && num_bios > 1)
1500 mutex_unlock(&ci->io->md->table_devices_lock);
1501 if (bio_nr == num_bios)
1502 return;
1503
1504 while ((bio = bio_list_pop(blist)))
1505 free_tio(bio);
1506 }
1507 }
1508
__send_duplicate_bios(struct clone_info * ci,struct dm_target * ti,unsigned int num_bios,unsigned int * len,gfp_t gfp_flag)1509 static unsigned int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
1510 unsigned int num_bios, unsigned int *len,
1511 gfp_t gfp_flag)
1512 {
1513 struct bio_list blist = BIO_EMPTY_LIST;
1514 struct bio *clone;
1515 unsigned int ret = 0;
1516
1517 if (WARN_ON_ONCE(num_bios == 0)) /* num_bios = 0 is a bug in caller */
1518 return 0;
1519
1520 /* dm_accept_partial_bio() is not supported with shared tio->len_ptr */
1521 if (len)
1522 setup_split_accounting(ci, *len);
1523
1524 /*
1525 * Using alloc_multiple_bios(), even if num_bios is 1, to consistently
1526 * support allocating using GFP_NOWAIT with GFP_NOIO fallback.
1527 */
1528 alloc_multiple_bios(&blist, ci, ti, num_bios, len, gfp_flag);
1529 while ((clone = bio_list_pop(&blist))) {
1530 if (num_bios > 1)
1531 dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO);
1532 __map_bio(clone);
1533 ret += 1;
1534 }
1535
1536 return ret;
1537 }
1538
__send_empty_flush(struct clone_info * ci)1539 static void __send_empty_flush(struct clone_info *ci)
1540 {
1541 struct dm_table *t = ci->map;
1542 struct bio flush_bio;
1543
1544 /*
1545 * Use an on-stack bio for this, it's safe since we don't
1546 * need to reference it after submit. It's just used as
1547 * the basis for the clone(s).
1548 */
1549 bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0,
1550 REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC);
1551
1552 ci->bio = &flush_bio;
1553 ci->sector_count = 0;
1554 ci->io->tio.clone.bi_iter.bi_size = 0;
1555
1556 for (unsigned int i = 0; i < t->num_targets; i++) {
1557 unsigned int bios;
1558 struct dm_target *ti = dm_table_get_target(t, i);
1559
1560 if (unlikely(ti->num_flush_bios == 0))
1561 continue;
1562
1563 atomic_add(ti->num_flush_bios, &ci->io->io_count);
1564 bios = __send_duplicate_bios(ci, ti, ti->num_flush_bios,
1565 NULL, GFP_NOWAIT);
1566 atomic_sub(ti->num_flush_bios - bios, &ci->io->io_count);
1567 }
1568
1569 /*
1570 * alloc_io() takes one extra reference for submission, so the
1571 * reference won't reach 0 without the following subtraction
1572 */
1573 atomic_sub(1, &ci->io->io_count);
1574
1575 bio_uninit(ci->bio);
1576 }
1577
__send_abnormal_io(struct clone_info * ci,struct dm_target * ti,unsigned int num_bios,unsigned int max_granularity,unsigned int max_sectors)1578 static void __send_abnormal_io(struct clone_info *ci, struct dm_target *ti,
1579 unsigned int num_bios, unsigned int max_granularity,
1580 unsigned int max_sectors)
1581 {
1582 unsigned int len, bios;
1583
1584 len = min_t(sector_t, ci->sector_count,
1585 __max_io_len(ti, ci->sector, max_granularity, max_sectors));
1586
1587 atomic_add(num_bios, &ci->io->io_count);
1588 bios = __send_duplicate_bios(ci, ti, num_bios, &len, GFP_NOIO);
1589 /*
1590 * alloc_io() takes one extra reference for submission, so the
1591 * reference won't reach 0 without the following (+1) subtraction
1592 */
1593 atomic_sub(num_bios - bios + 1, &ci->io->io_count);
1594
1595 ci->sector += len;
1596 ci->sector_count -= len;
1597 }
1598
is_abnormal_io(struct bio * bio)1599 static bool is_abnormal_io(struct bio *bio)
1600 {
1601 enum req_op op = bio_op(bio);
1602
1603 if (op != REQ_OP_READ && op != REQ_OP_WRITE && op != REQ_OP_FLUSH) {
1604 switch (op) {
1605 case REQ_OP_DISCARD:
1606 case REQ_OP_SECURE_ERASE:
1607 case REQ_OP_WRITE_ZEROES:
1608 return true;
1609 default:
1610 break;
1611 }
1612 }
1613
1614 return false;
1615 }
1616
__process_abnormal_io(struct clone_info * ci,struct dm_target * ti)1617 static blk_status_t __process_abnormal_io(struct clone_info *ci,
1618 struct dm_target *ti)
1619 {
1620 unsigned int num_bios = 0;
1621 unsigned int max_granularity = 0;
1622 unsigned int max_sectors = 0;
1623 struct queue_limits *limits = dm_get_queue_limits(ti->table->md);
1624
1625 switch (bio_op(ci->bio)) {
1626 case REQ_OP_DISCARD:
1627 num_bios = ti->num_discard_bios;
1628 max_sectors = limits->max_discard_sectors;
1629 if (ti->max_discard_granularity)
1630 max_granularity = max_sectors;
1631 break;
1632 case REQ_OP_SECURE_ERASE:
1633 num_bios = ti->num_secure_erase_bios;
1634 max_sectors = limits->max_secure_erase_sectors;
1635 if (ti->max_secure_erase_granularity)
1636 max_granularity = max_sectors;
1637 break;
1638 case REQ_OP_WRITE_ZEROES:
1639 num_bios = ti->num_write_zeroes_bios;
1640 max_sectors = limits->max_write_zeroes_sectors;
1641 if (ti->max_write_zeroes_granularity)
1642 max_granularity = max_sectors;
1643 break;
1644 default:
1645 break;
1646 }
1647
1648 /*
1649 * Even though the device advertised support for this type of
1650 * request, that does not mean every target supports it, and
1651 * reconfiguration might also have changed that since the
1652 * check was performed.
1653 */
1654 if (unlikely(!num_bios))
1655 return BLK_STS_NOTSUPP;
1656
1657 __send_abnormal_io(ci, ti, num_bios, max_granularity, max_sectors);
1658
1659 return BLK_STS_OK;
1660 }
1661
1662 /*
1663 * Reuse ->bi_private as dm_io list head for storing all dm_io instances
1664 * associated with this bio, and this bio's bi_private needs to be
1665 * stored in dm_io->data before the reuse.
1666 *
1667 * bio->bi_private is owned by fs or upper layer, so block layer won't
1668 * touch it after splitting. Meantime it won't be changed by anyone after
1669 * bio is submitted. So this reuse is safe.
1670 */
dm_poll_list_head(struct bio * bio)1671 static inline struct dm_io **dm_poll_list_head(struct bio *bio)
1672 {
1673 return (struct dm_io **)&bio->bi_private;
1674 }
1675
dm_queue_poll_io(struct bio * bio,struct dm_io * io)1676 static void dm_queue_poll_io(struct bio *bio, struct dm_io *io)
1677 {
1678 struct dm_io **head = dm_poll_list_head(bio);
1679
1680 if (!(bio->bi_opf & REQ_DM_POLL_LIST)) {
1681 bio->bi_opf |= REQ_DM_POLL_LIST;
1682 /*
1683 * Save .bi_private into dm_io, so that we can reuse
1684 * .bi_private as dm_io list head for storing dm_io list
1685 */
1686 io->data = bio->bi_private;
1687
1688 /* tell block layer to poll for completion */
1689 bio->bi_cookie = ~BLK_QC_T_NONE;
1690
1691 io->next = NULL;
1692 } else {
1693 /*
1694 * bio recursed due to split, reuse original poll list,
1695 * and save bio->bi_private too.
1696 */
1697 io->data = (*head)->data;
1698 io->next = *head;
1699 }
1700
1701 *head = io;
1702 }
1703
1704 /*
1705 * Select the correct strategy for processing a non-flush bio.
1706 */
__split_and_process_bio(struct clone_info * ci)1707 static blk_status_t __split_and_process_bio(struct clone_info *ci)
1708 {
1709 struct bio *clone;
1710 struct dm_target *ti;
1711 unsigned int len;
1712
1713 ti = dm_table_find_target(ci->map, ci->sector);
1714 if (unlikely(!ti))
1715 return BLK_STS_IOERR;
1716
1717 if (unlikely(ci->is_abnormal_io))
1718 return __process_abnormal_io(ci, ti);
1719
1720 /*
1721 * Only support bio polling for normal IO, and the target io is
1722 * exactly inside the dm_io instance (verified in dm_poll_dm_io)
1723 */
1724 ci->submit_as_polled = !!(ci->bio->bi_opf & REQ_POLLED);
1725
1726 len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count);
1727 setup_split_accounting(ci, len);
1728
1729 if (unlikely(ci->bio->bi_opf & REQ_NOWAIT)) {
1730 if (unlikely(!dm_target_supports_nowait(ti->type)))
1731 return BLK_STS_NOTSUPP;
1732
1733 clone = alloc_tio(ci, ti, 0, &len, GFP_NOWAIT);
1734 if (unlikely(!clone))
1735 return BLK_STS_AGAIN;
1736 } else {
1737 clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO);
1738 }
1739 __map_bio(clone);
1740
1741 ci->sector += len;
1742 ci->sector_count -= len;
1743
1744 return BLK_STS_OK;
1745 }
1746
init_clone_info(struct clone_info * ci,struct dm_io * io,struct dm_table * map,struct bio * bio,bool is_abnormal)1747 static void init_clone_info(struct clone_info *ci, struct dm_io *io,
1748 struct dm_table *map, struct bio *bio, bool is_abnormal)
1749 {
1750 ci->map = map;
1751 ci->io = io;
1752 ci->bio = bio;
1753 ci->is_abnormal_io = is_abnormal;
1754 ci->submit_as_polled = false;
1755 ci->sector = bio->bi_iter.bi_sector;
1756 ci->sector_count = bio_sectors(bio);
1757
1758 /* Shouldn't happen but sector_count was being set to 0 so... */
1759 if (static_branch_unlikely(&zoned_enabled) &&
1760 WARN_ON_ONCE(op_is_zone_mgmt(bio_op(bio)) && ci->sector_count))
1761 ci->sector_count = 0;
1762 }
1763
1764 #ifdef CONFIG_BLK_DEV_ZONED
dm_zone_bio_needs_split(struct mapped_device * md,struct bio * bio)1765 static inline bool dm_zone_bio_needs_split(struct mapped_device *md,
1766 struct bio *bio)
1767 {
1768 /*
1769 * For mapped device that need zone append emulation, we must
1770 * split any large BIO that straddles zone boundaries.
1771 */
1772 return dm_emulate_zone_append(md) && bio_straddles_zones(bio) &&
1773 !bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING);
1774 }
dm_zone_plug_bio(struct mapped_device * md,struct bio * bio)1775 static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio)
1776 {
1777 return dm_emulate_zone_append(md) && blk_zone_plug_bio(bio, 0);
1778 }
1779 #else
dm_zone_bio_needs_split(struct mapped_device * md,struct bio * bio)1780 static inline bool dm_zone_bio_needs_split(struct mapped_device *md,
1781 struct bio *bio)
1782 {
1783 return false;
1784 }
dm_zone_plug_bio(struct mapped_device * md,struct bio * bio)1785 static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio)
1786 {
1787 return false;
1788 }
1789 #endif
1790
1791 /*
1792 * Entry point to split a bio into clones and submit them to the targets.
1793 */
dm_split_and_process_bio(struct mapped_device * md,struct dm_table * map,struct bio * bio)1794 static void dm_split_and_process_bio(struct mapped_device *md,
1795 struct dm_table *map, struct bio *bio)
1796 {
1797 struct clone_info ci;
1798 struct dm_io *io;
1799 blk_status_t error = BLK_STS_OK;
1800 bool is_abnormal, need_split;
1801
1802 need_split = is_abnormal = is_abnormal_io(bio);
1803 if (static_branch_unlikely(&zoned_enabled))
1804 need_split = is_abnormal || dm_zone_bio_needs_split(md, bio);
1805
1806 if (unlikely(need_split)) {
1807 /*
1808 * Use bio_split_to_limits() for abnormal IO (e.g. discard, etc)
1809 * otherwise associated queue_limits won't be imposed.
1810 * Also split the BIO for mapped devices needing zone append
1811 * emulation to ensure that the BIO does not cross zone
1812 * boundaries.
1813 */
1814 bio = bio_split_to_limits(bio);
1815 if (!bio)
1816 return;
1817 }
1818
1819 /*
1820 * Use the block layer zone write plugging for mapped devices that
1821 * need zone append emulation (e.g. dm-crypt).
1822 */
1823 if (static_branch_unlikely(&zoned_enabled) && dm_zone_plug_bio(md, bio))
1824 return;
1825
1826 /* Only support nowait for normal IO */
1827 if (unlikely(bio->bi_opf & REQ_NOWAIT) && !is_abnormal) {
1828 io = alloc_io(md, bio, GFP_NOWAIT);
1829 if (unlikely(!io)) {
1830 /* Unable to do anything without dm_io. */
1831 bio_wouldblock_error(bio);
1832 return;
1833 }
1834 } else {
1835 io = alloc_io(md, bio, GFP_NOIO);
1836 }
1837 init_clone_info(&ci, io, map, bio, is_abnormal);
1838
1839 if (bio->bi_opf & REQ_PREFLUSH) {
1840 __send_empty_flush(&ci);
1841 /* dm_io_complete submits any data associated with flush */
1842 goto out;
1843 }
1844
1845 error = __split_and_process_bio(&ci);
1846 if (error || !ci.sector_count)
1847 goto out;
1848 /*
1849 * Remainder must be passed to submit_bio_noacct() so it gets handled
1850 * *after* bios already submitted have been completely processed.
1851 */
1852 bio_trim(bio, io->sectors, ci.sector_count);
1853 trace_block_split(bio, bio->bi_iter.bi_sector);
1854 bio_inc_remaining(bio);
1855 submit_bio_noacct(bio);
1856 out:
1857 /*
1858 * Drop the extra reference count for non-POLLED bio, and hold one
1859 * reference for POLLED bio, which will be released in dm_poll_bio
1860 *
1861 * Add every dm_io instance into the dm_io list head which is stored
1862 * in bio->bi_private, so that dm_poll_bio can poll them all.
1863 */
1864 if (error || !ci.submit_as_polled) {
1865 /*
1866 * In case of submission failure, the extra reference for
1867 * submitting io isn't consumed yet
1868 */
1869 if (error)
1870 atomic_dec(&io->io_count);
1871 dm_io_dec_pending(io, error);
1872 } else
1873 dm_queue_poll_io(bio, io);
1874 }
1875
dm_submit_bio(struct bio * bio)1876 static void dm_submit_bio(struct bio *bio)
1877 {
1878 struct mapped_device *md = bio->bi_bdev->bd_disk->private_data;
1879 int srcu_idx;
1880 struct dm_table *map;
1881
1882 map = dm_get_live_table(md, &srcu_idx);
1883
1884 /* If suspended, or map not yet available, queue this IO for later */
1885 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) ||
1886 unlikely(!map)) {
1887 if (bio->bi_opf & REQ_NOWAIT)
1888 bio_wouldblock_error(bio);
1889 else if (bio->bi_opf & REQ_RAHEAD)
1890 bio_io_error(bio);
1891 else
1892 queue_io(md, bio);
1893 goto out;
1894 }
1895
1896 dm_split_and_process_bio(md, map, bio);
1897 out:
1898 dm_put_live_table(md, srcu_idx);
1899 }
1900
dm_poll_dm_io(struct dm_io * io,struct io_comp_batch * iob,unsigned int flags)1901 static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob,
1902 unsigned int flags)
1903 {
1904 WARN_ON_ONCE(!dm_tio_is_normal(&io->tio));
1905
1906 /* don't poll if the mapped io is done */
1907 if (atomic_read(&io->io_count) > 1)
1908 bio_poll(&io->tio.clone, iob, flags);
1909
1910 /* bio_poll holds the last reference */
1911 return atomic_read(&io->io_count) == 1;
1912 }
1913
dm_poll_bio(struct bio * bio,struct io_comp_batch * iob,unsigned int flags)1914 static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob,
1915 unsigned int flags)
1916 {
1917 struct dm_io **head = dm_poll_list_head(bio);
1918 struct dm_io *list = *head;
1919 struct dm_io *tmp = NULL;
1920 struct dm_io *curr, *next;
1921
1922 /* Only poll normal bio which was marked as REQ_DM_POLL_LIST */
1923 if (!(bio->bi_opf & REQ_DM_POLL_LIST))
1924 return 0;
1925
1926 WARN_ON_ONCE(!list);
1927
1928 /*
1929 * Restore .bi_private before possibly completing dm_io.
1930 *
1931 * bio_poll() is only possible once @bio has been completely
1932 * submitted via submit_bio_noacct()'s depth-first submission.
1933 * So there is no dm_queue_poll_io() race associated with
1934 * clearing REQ_DM_POLL_LIST here.
1935 */
1936 bio->bi_opf &= ~REQ_DM_POLL_LIST;
1937 bio->bi_private = list->data;
1938
1939 for (curr = list, next = curr->next; curr; curr = next, next =
1940 curr ? curr->next : NULL) {
1941 if (dm_poll_dm_io(curr, iob, flags)) {
1942 /*
1943 * clone_endio() has already occurred, so no
1944 * error handling is needed here.
1945 */
1946 __dm_io_dec_pending(curr);
1947 } else {
1948 curr->next = tmp;
1949 tmp = curr;
1950 }
1951 }
1952
1953 /* Not done? */
1954 if (tmp) {
1955 bio->bi_opf |= REQ_DM_POLL_LIST;
1956 /* Reset bio->bi_private to dm_io list head */
1957 *head = tmp;
1958 return 0;
1959 }
1960 return 1;
1961 }
1962
1963 /*
1964 *---------------------------------------------------------------
1965 * An IDR is used to keep track of allocated minor numbers.
1966 *---------------------------------------------------------------
1967 */
free_minor(int minor)1968 static void free_minor(int minor)
1969 {
1970 spin_lock(&_minor_lock);
1971 idr_remove(&_minor_idr, minor);
1972 spin_unlock(&_minor_lock);
1973 }
1974
1975 /*
1976 * See if the device with a specific minor # is free.
1977 */
specific_minor(int minor)1978 static int specific_minor(int minor)
1979 {
1980 int r;
1981
1982 if (minor >= (1 << MINORBITS))
1983 return -EINVAL;
1984
1985 idr_preload(GFP_KERNEL);
1986 spin_lock(&_minor_lock);
1987
1988 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
1989
1990 spin_unlock(&_minor_lock);
1991 idr_preload_end();
1992 if (r < 0)
1993 return r == -ENOSPC ? -EBUSY : r;
1994 return 0;
1995 }
1996
next_free_minor(int * minor)1997 static int next_free_minor(int *minor)
1998 {
1999 int r;
2000
2001 idr_preload(GFP_KERNEL);
2002 spin_lock(&_minor_lock);
2003
2004 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
2005
2006 spin_unlock(&_minor_lock);
2007 idr_preload_end();
2008 if (r < 0)
2009 return r;
2010 *minor = r;
2011 return 0;
2012 }
2013
2014 static const struct block_device_operations dm_blk_dops;
2015 static const struct block_device_operations dm_rq_blk_dops;
2016 static const struct dax_operations dm_dax_ops;
2017
2018 static void dm_wq_work(struct work_struct *work);
2019
2020 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
dm_queue_destroy_crypto_profile(struct request_queue * q)2021 static void dm_queue_destroy_crypto_profile(struct request_queue *q)
2022 {
2023 dm_destroy_crypto_profile(q->crypto_profile);
2024 }
2025
2026 #else /* CONFIG_BLK_INLINE_ENCRYPTION */
2027
dm_queue_destroy_crypto_profile(struct request_queue * q)2028 static inline void dm_queue_destroy_crypto_profile(struct request_queue *q)
2029 {
2030 }
2031 #endif /* !CONFIG_BLK_INLINE_ENCRYPTION */
2032
cleanup_mapped_device(struct mapped_device * md)2033 static void cleanup_mapped_device(struct mapped_device *md)
2034 {
2035 if (md->wq)
2036 destroy_workqueue(md->wq);
2037 dm_free_md_mempools(md->mempools);
2038
2039 if (md->dax_dev) {
2040 dax_remove_host(md->disk);
2041 kill_dax(md->dax_dev);
2042 put_dax(md->dax_dev);
2043 md->dax_dev = NULL;
2044 }
2045
2046 if (md->disk) {
2047 spin_lock(&_minor_lock);
2048 md->disk->private_data = NULL;
2049 spin_unlock(&_minor_lock);
2050 if (dm_get_md_type(md) != DM_TYPE_NONE) {
2051 struct table_device *td;
2052
2053 dm_sysfs_exit(md);
2054 list_for_each_entry(td, &md->table_devices, list) {
2055 bd_unlink_disk_holder(td->dm_dev.bdev,
2056 md->disk);
2057 }
2058
2059 /*
2060 * Hold lock to make sure del_gendisk() won't concurrent
2061 * with open/close_table_device().
2062 */
2063 mutex_lock(&md->table_devices_lock);
2064 del_gendisk(md->disk);
2065 mutex_unlock(&md->table_devices_lock);
2066 }
2067 dm_queue_destroy_crypto_profile(md->queue);
2068 put_disk(md->disk);
2069 }
2070
2071 if (md->pending_io) {
2072 free_percpu(md->pending_io);
2073 md->pending_io = NULL;
2074 }
2075
2076 cleanup_srcu_struct(&md->io_barrier);
2077
2078 mutex_destroy(&md->suspend_lock);
2079 mutex_destroy(&md->type_lock);
2080 mutex_destroy(&md->table_devices_lock);
2081 mutex_destroy(&md->swap_bios_lock);
2082
2083 dm_mq_cleanup_mapped_device(md);
2084 }
2085
2086 /*
2087 * Allocate and initialise a blank device with a given minor.
2088 */
alloc_dev(int minor)2089 static struct mapped_device *alloc_dev(int minor)
2090 {
2091 int r, numa_node_id = dm_get_numa_node();
2092 struct dax_device *dax_dev;
2093 struct mapped_device *md;
2094 void *old_md;
2095
2096 md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
2097 if (!md) {
2098 DMERR("unable to allocate device, out of memory.");
2099 return NULL;
2100 }
2101
2102 if (!try_module_get(THIS_MODULE))
2103 goto bad_module_get;
2104
2105 /* get a minor number for the dev */
2106 if (minor == DM_ANY_MINOR)
2107 r = next_free_minor(&minor);
2108 else
2109 r = specific_minor(minor);
2110 if (r < 0)
2111 goto bad_minor;
2112
2113 r = init_srcu_struct(&md->io_barrier);
2114 if (r < 0)
2115 goto bad_io_barrier;
2116
2117 md->numa_node_id = numa_node_id;
2118 md->init_tio_pdu = false;
2119 md->type = DM_TYPE_NONE;
2120 mutex_init(&md->suspend_lock);
2121 mutex_init(&md->type_lock);
2122 mutex_init(&md->table_devices_lock);
2123 spin_lock_init(&md->deferred_lock);
2124 atomic_set(&md->holders, 1);
2125 atomic_set(&md->open_count, 0);
2126 atomic_set(&md->event_nr, 0);
2127 atomic_set(&md->uevent_seq, 0);
2128 INIT_LIST_HEAD(&md->uevent_list);
2129 INIT_LIST_HEAD(&md->table_devices);
2130 spin_lock_init(&md->uevent_lock);
2131
2132 /*
2133 * default to bio-based until DM table is loaded and md->type
2134 * established. If request-based table is loaded: blk-mq will
2135 * override accordingly.
2136 */
2137 md->disk = blk_alloc_disk(NULL, md->numa_node_id);
2138 if (IS_ERR(md->disk))
2139 goto bad;
2140 md->queue = md->disk->queue;
2141
2142 init_waitqueue_head(&md->wait);
2143 INIT_WORK(&md->work, dm_wq_work);
2144 INIT_WORK(&md->requeue_work, dm_wq_requeue_work);
2145 init_waitqueue_head(&md->eventq);
2146 init_completion(&md->kobj_holder.completion);
2147
2148 md->requeue_list = NULL;
2149 md->swap_bios = get_swap_bios();
2150 sema_init(&md->swap_bios_semaphore, md->swap_bios);
2151 mutex_init(&md->swap_bios_lock);
2152
2153 md->disk->major = _major;
2154 md->disk->first_minor = minor;
2155 md->disk->minors = 1;
2156 md->disk->flags |= GENHD_FL_NO_PART;
2157 md->disk->fops = &dm_blk_dops;
2158 md->disk->private_data = md;
2159 sprintf(md->disk->disk_name, "dm-%d", minor);
2160
2161 dax_dev = alloc_dax(md, &dm_dax_ops);
2162 if (IS_ERR(dax_dev)) {
2163 if (PTR_ERR(dax_dev) != -EOPNOTSUPP)
2164 goto bad;
2165 } else {
2166 set_dax_nocache(dax_dev);
2167 set_dax_nomc(dax_dev);
2168 md->dax_dev = dax_dev;
2169 if (dax_add_host(dax_dev, md->disk))
2170 goto bad;
2171 }
2172
2173 format_dev_t(md->name, MKDEV(_major, minor));
2174
2175 md->wq = alloc_workqueue("kdmflush/%s", WQ_MEM_RECLAIM, 0, md->name);
2176 if (!md->wq)
2177 goto bad;
2178
2179 md->pending_io = alloc_percpu(unsigned long);
2180 if (!md->pending_io)
2181 goto bad;
2182
2183 r = dm_stats_init(&md->stats);
2184 if (r < 0)
2185 goto bad;
2186
2187 /* Populate the mapping, nobody knows we exist yet */
2188 spin_lock(&_minor_lock);
2189 old_md = idr_replace(&_minor_idr, md, minor);
2190 spin_unlock(&_minor_lock);
2191
2192 BUG_ON(old_md != MINOR_ALLOCED);
2193
2194 return md;
2195
2196 bad:
2197 cleanup_mapped_device(md);
2198 bad_io_barrier:
2199 free_minor(minor);
2200 bad_minor:
2201 module_put(THIS_MODULE);
2202 bad_module_get:
2203 kvfree(md);
2204 return NULL;
2205 }
2206
2207 static void unlock_fs(struct mapped_device *md);
2208
free_dev(struct mapped_device * md)2209 static void free_dev(struct mapped_device *md)
2210 {
2211 int minor = MINOR(disk_devt(md->disk));
2212
2213 unlock_fs(md);
2214
2215 cleanup_mapped_device(md);
2216
2217 WARN_ON_ONCE(!list_empty(&md->table_devices));
2218 dm_stats_cleanup(&md->stats);
2219 free_minor(minor);
2220
2221 module_put(THIS_MODULE);
2222 kvfree(md);
2223 }
2224
2225 /*
2226 * Bind a table to the device.
2227 */
event_callback(void * context)2228 static void event_callback(void *context)
2229 {
2230 unsigned long flags;
2231 LIST_HEAD(uevents);
2232 struct mapped_device *md = context;
2233
2234 spin_lock_irqsave(&md->uevent_lock, flags);
2235 list_splice_init(&md->uevent_list, &uevents);
2236 spin_unlock_irqrestore(&md->uevent_lock, flags);
2237
2238 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
2239
2240 atomic_inc(&md->event_nr);
2241 wake_up(&md->eventq);
2242 dm_issue_global_event();
2243 }
2244
2245 /*
2246 * Returns old map, which caller must destroy.
2247 */
__bind(struct mapped_device * md,struct dm_table * t,struct queue_limits * limits)2248 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2249 struct queue_limits *limits)
2250 {
2251 struct dm_table *old_map;
2252 sector_t size;
2253 int ret;
2254
2255 lockdep_assert_held(&md->suspend_lock);
2256
2257 size = dm_table_get_size(t);
2258
2259 /*
2260 * Wipe any geometry if the size of the table changed.
2261 */
2262 if (size != dm_get_size(md))
2263 memset(&md->geometry, 0, sizeof(md->geometry));
2264
2265 set_capacity(md->disk, size);
2266
2267 dm_table_event_callback(t, event_callback, md);
2268
2269 if (dm_table_request_based(t)) {
2270 /*
2271 * Leverage the fact that request-based DM targets are
2272 * immutable singletons - used to optimize dm_mq_queue_rq.
2273 */
2274 md->immutable_target = dm_table_get_immutable_target(t);
2275
2276 /*
2277 * There is no need to reload with request-based dm because the
2278 * size of front_pad doesn't change.
2279 *
2280 * Note for future: If you are to reload bioset, prep-ed
2281 * requests in the queue may refer to bio from the old bioset,
2282 * so you must walk through the queue to unprep.
2283 */
2284 if (!md->mempools) {
2285 md->mempools = t->mempools;
2286 t->mempools = NULL;
2287 }
2288 } else {
2289 /*
2290 * The md may already have mempools that need changing.
2291 * If so, reload bioset because front_pad may have changed
2292 * because a different table was loaded.
2293 */
2294 dm_free_md_mempools(md->mempools);
2295 md->mempools = t->mempools;
2296 t->mempools = NULL;
2297 }
2298
2299 ret = dm_table_set_restrictions(t, md->queue, limits);
2300 if (ret) {
2301 old_map = ERR_PTR(ret);
2302 goto out;
2303 }
2304
2305 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2306 rcu_assign_pointer(md->map, (void *)t);
2307 md->immutable_target_type = dm_table_get_immutable_target_type(t);
2308
2309 if (old_map)
2310 dm_sync_table(md);
2311 out:
2312 return old_map;
2313 }
2314
2315 /*
2316 * Returns unbound table for the caller to free.
2317 */
__unbind(struct mapped_device * md)2318 static struct dm_table *__unbind(struct mapped_device *md)
2319 {
2320 struct dm_table *map = rcu_dereference_protected(md->map, 1);
2321
2322 if (!map)
2323 return NULL;
2324
2325 dm_table_event_callback(map, NULL, NULL);
2326 RCU_INIT_POINTER(md->map, NULL);
2327 dm_sync_table(md);
2328
2329 return map;
2330 }
2331
2332 /*
2333 * Constructor for a new device.
2334 */
dm_create(int minor,struct mapped_device ** result)2335 int dm_create(int minor, struct mapped_device **result)
2336 {
2337 struct mapped_device *md;
2338
2339 md = alloc_dev(minor);
2340 if (!md)
2341 return -ENXIO;
2342
2343 dm_ima_reset_data(md);
2344
2345 *result = md;
2346 return 0;
2347 }
2348
2349 /*
2350 * Functions to manage md->type.
2351 * All are required to hold md->type_lock.
2352 */
dm_lock_md_type(struct mapped_device * md)2353 void dm_lock_md_type(struct mapped_device *md)
2354 {
2355 mutex_lock(&md->type_lock);
2356 }
2357
dm_unlock_md_type(struct mapped_device * md)2358 void dm_unlock_md_type(struct mapped_device *md)
2359 {
2360 mutex_unlock(&md->type_lock);
2361 }
2362
dm_set_md_type(struct mapped_device * md,enum dm_queue_mode type)2363 void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type)
2364 {
2365 BUG_ON(!mutex_is_locked(&md->type_lock));
2366 md->type = type;
2367 }
2368
dm_get_md_type(struct mapped_device * md)2369 enum dm_queue_mode dm_get_md_type(struct mapped_device *md)
2370 {
2371 return md->type;
2372 }
2373
dm_get_immutable_target_type(struct mapped_device * md)2374 struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
2375 {
2376 return md->immutable_target_type;
2377 }
2378
2379 /*
2380 * Setup the DM device's queue based on md's type
2381 */
dm_setup_md_queue(struct mapped_device * md,struct dm_table * t)2382 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
2383 {
2384 enum dm_queue_mode type = dm_table_get_type(t);
2385 struct queue_limits limits;
2386 struct table_device *td;
2387 int r;
2388
2389 switch (type) {
2390 case DM_TYPE_REQUEST_BASED:
2391 md->disk->fops = &dm_rq_blk_dops;
2392 r = dm_mq_init_request_queue(md, t);
2393 if (r) {
2394 DMERR("Cannot initialize queue for request-based dm mapped device");
2395 return r;
2396 }
2397 break;
2398 case DM_TYPE_BIO_BASED:
2399 case DM_TYPE_DAX_BIO_BASED:
2400 blk_queue_flag_set(QUEUE_FLAG_IO_STAT, md->queue);
2401 break;
2402 case DM_TYPE_NONE:
2403 WARN_ON_ONCE(true);
2404 break;
2405 }
2406
2407 r = dm_calculate_queue_limits(t, &limits);
2408 if (r) {
2409 DMERR("Cannot calculate initial queue limits");
2410 return r;
2411 }
2412 r = dm_table_set_restrictions(t, md->queue, &limits);
2413 if (r)
2414 return r;
2415
2416 /*
2417 * Hold lock to make sure add_disk() and del_gendisk() won't concurrent
2418 * with open_table_device() and close_table_device().
2419 */
2420 mutex_lock(&md->table_devices_lock);
2421 r = add_disk(md->disk);
2422 mutex_unlock(&md->table_devices_lock);
2423 if (r)
2424 return r;
2425
2426 /*
2427 * Register the holder relationship for devices added before the disk
2428 * was live.
2429 */
2430 list_for_each_entry(td, &md->table_devices, list) {
2431 r = bd_link_disk_holder(td->dm_dev.bdev, md->disk);
2432 if (r)
2433 goto out_undo_holders;
2434 }
2435
2436 r = dm_sysfs_init(md);
2437 if (r)
2438 goto out_undo_holders;
2439
2440 md->type = type;
2441 return 0;
2442
2443 out_undo_holders:
2444 list_for_each_entry_continue_reverse(td, &md->table_devices, list)
2445 bd_unlink_disk_holder(td->dm_dev.bdev, md->disk);
2446 mutex_lock(&md->table_devices_lock);
2447 del_gendisk(md->disk);
2448 mutex_unlock(&md->table_devices_lock);
2449 return r;
2450 }
2451
dm_get_md(dev_t dev)2452 struct mapped_device *dm_get_md(dev_t dev)
2453 {
2454 struct mapped_device *md;
2455 unsigned int minor = MINOR(dev);
2456
2457 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
2458 return NULL;
2459
2460 spin_lock(&_minor_lock);
2461
2462 md = idr_find(&_minor_idr, minor);
2463 if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) ||
2464 test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
2465 md = NULL;
2466 goto out;
2467 }
2468 dm_get(md);
2469 out:
2470 spin_unlock(&_minor_lock);
2471
2472 return md;
2473 }
2474 EXPORT_SYMBOL_GPL(dm_get_md);
2475
dm_get_mdptr(struct mapped_device * md)2476 void *dm_get_mdptr(struct mapped_device *md)
2477 {
2478 return md->interface_ptr;
2479 }
2480
dm_set_mdptr(struct mapped_device * md,void * ptr)2481 void dm_set_mdptr(struct mapped_device *md, void *ptr)
2482 {
2483 md->interface_ptr = ptr;
2484 }
2485
dm_get(struct mapped_device * md)2486 void dm_get(struct mapped_device *md)
2487 {
2488 atomic_inc(&md->holders);
2489 BUG_ON(test_bit(DMF_FREEING, &md->flags));
2490 }
2491
dm_hold(struct mapped_device * md)2492 int dm_hold(struct mapped_device *md)
2493 {
2494 spin_lock(&_minor_lock);
2495 if (test_bit(DMF_FREEING, &md->flags)) {
2496 spin_unlock(&_minor_lock);
2497 return -EBUSY;
2498 }
2499 dm_get(md);
2500 spin_unlock(&_minor_lock);
2501 return 0;
2502 }
2503 EXPORT_SYMBOL_GPL(dm_hold);
2504
dm_device_name(struct mapped_device * md)2505 const char *dm_device_name(struct mapped_device *md)
2506 {
2507 return md->name;
2508 }
2509 EXPORT_SYMBOL_GPL(dm_device_name);
2510
__dm_destroy(struct mapped_device * md,bool wait)2511 static void __dm_destroy(struct mapped_device *md, bool wait)
2512 {
2513 struct dm_table *map;
2514 int srcu_idx;
2515
2516 might_sleep();
2517
2518 spin_lock(&_minor_lock);
2519 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2520 set_bit(DMF_FREEING, &md->flags);
2521 spin_unlock(&_minor_lock);
2522
2523 blk_mark_disk_dead(md->disk);
2524
2525 /*
2526 * Take suspend_lock so that presuspend and postsuspend methods
2527 * do not race with internal suspend.
2528 */
2529 mutex_lock(&md->suspend_lock);
2530 map = dm_get_live_table(md, &srcu_idx);
2531 if (!dm_suspended_md(md)) {
2532 dm_table_presuspend_targets(map);
2533 set_bit(DMF_SUSPENDED, &md->flags);
2534 set_bit(DMF_POST_SUSPENDING, &md->flags);
2535 dm_table_postsuspend_targets(map);
2536 }
2537 /* dm_put_live_table must be before fsleep, otherwise deadlock is possible */
2538 dm_put_live_table(md, srcu_idx);
2539 mutex_unlock(&md->suspend_lock);
2540
2541 /*
2542 * Rare, but there may be I/O requests still going to complete,
2543 * for example. Wait for all references to disappear.
2544 * No one should increment the reference count of the mapped_device,
2545 * after the mapped_device state becomes DMF_FREEING.
2546 */
2547 if (wait)
2548 while (atomic_read(&md->holders))
2549 fsleep(1000);
2550 else if (atomic_read(&md->holders))
2551 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2552 dm_device_name(md), atomic_read(&md->holders));
2553
2554 dm_table_destroy(__unbind(md));
2555 free_dev(md);
2556 }
2557
dm_destroy(struct mapped_device * md)2558 void dm_destroy(struct mapped_device *md)
2559 {
2560 __dm_destroy(md, true);
2561 }
2562
dm_destroy_immediate(struct mapped_device * md)2563 void dm_destroy_immediate(struct mapped_device *md)
2564 {
2565 __dm_destroy(md, false);
2566 }
2567
dm_put(struct mapped_device * md)2568 void dm_put(struct mapped_device *md)
2569 {
2570 atomic_dec(&md->holders);
2571 }
2572 EXPORT_SYMBOL_GPL(dm_put);
2573
dm_in_flight_bios(struct mapped_device * md)2574 static bool dm_in_flight_bios(struct mapped_device *md)
2575 {
2576 int cpu;
2577 unsigned long sum = 0;
2578
2579 for_each_possible_cpu(cpu)
2580 sum += *per_cpu_ptr(md->pending_io, cpu);
2581
2582 return sum != 0;
2583 }
2584
dm_wait_for_bios_completion(struct mapped_device * md,unsigned int task_state)2585 static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int task_state)
2586 {
2587 int r = 0;
2588 DEFINE_WAIT(wait);
2589
2590 while (true) {
2591 prepare_to_wait(&md->wait, &wait, task_state);
2592
2593 if (!dm_in_flight_bios(md))
2594 break;
2595
2596 if (signal_pending_state(task_state, current)) {
2597 r = -EINTR;
2598 break;
2599 }
2600
2601 io_schedule();
2602 }
2603 finish_wait(&md->wait, &wait);
2604
2605 smp_rmb();
2606
2607 return r;
2608 }
2609
dm_wait_for_completion(struct mapped_device * md,unsigned int task_state)2610 static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_state)
2611 {
2612 int r = 0;
2613
2614 if (!queue_is_mq(md->queue))
2615 return dm_wait_for_bios_completion(md, task_state);
2616
2617 while (true) {
2618 if (!blk_mq_queue_inflight(md->queue))
2619 break;
2620
2621 if (signal_pending_state(task_state, current)) {
2622 r = -EINTR;
2623 break;
2624 }
2625
2626 fsleep(5000);
2627 }
2628
2629 return r;
2630 }
2631
2632 /*
2633 * Process the deferred bios
2634 */
dm_wq_work(struct work_struct * work)2635 static void dm_wq_work(struct work_struct *work)
2636 {
2637 struct mapped_device *md = container_of(work, struct mapped_device, work);
2638 struct bio *bio;
2639
2640 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
2641 spin_lock_irq(&md->deferred_lock);
2642 bio = bio_list_pop(&md->deferred);
2643 spin_unlock_irq(&md->deferred_lock);
2644
2645 if (!bio)
2646 break;
2647
2648 submit_bio_noacct(bio);
2649 cond_resched();
2650 }
2651 }
2652
dm_queue_flush(struct mapped_device * md)2653 static void dm_queue_flush(struct mapped_device *md)
2654 {
2655 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2656 smp_mb__after_atomic();
2657 queue_work(md->wq, &md->work);
2658 }
2659
2660 /*
2661 * Swap in a new table, returning the old one for the caller to destroy.
2662 */
dm_swap_table(struct mapped_device * md,struct dm_table * table)2663 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
2664 {
2665 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
2666 struct queue_limits limits;
2667 int r;
2668
2669 mutex_lock(&md->suspend_lock);
2670
2671 /* device must be suspended */
2672 if (!dm_suspended_md(md))
2673 goto out;
2674
2675 /*
2676 * If the new table has no data devices, retain the existing limits.
2677 * This helps multipath with queue_if_no_path if all paths disappear,
2678 * then new I/O is queued based on these limits, and then some paths
2679 * reappear.
2680 */
2681 if (dm_table_has_no_data_devices(table)) {
2682 live_map = dm_get_live_table_fast(md);
2683 if (live_map)
2684 limits = md->queue->limits;
2685 dm_put_live_table_fast(md);
2686 }
2687
2688 if (!live_map) {
2689 r = dm_calculate_queue_limits(table, &limits);
2690 if (r) {
2691 map = ERR_PTR(r);
2692 goto out;
2693 }
2694 }
2695
2696 map = __bind(md, table, &limits);
2697 dm_issue_global_event();
2698
2699 out:
2700 mutex_unlock(&md->suspend_lock);
2701 return map;
2702 }
2703
2704 /*
2705 * Functions to lock and unlock any filesystem running on the
2706 * device.
2707 */
lock_fs(struct mapped_device * md)2708 static int lock_fs(struct mapped_device *md)
2709 {
2710 int r;
2711
2712 WARN_ON(test_bit(DMF_FROZEN, &md->flags));
2713
2714 r = bdev_freeze(md->disk->part0);
2715 if (!r)
2716 set_bit(DMF_FROZEN, &md->flags);
2717 return r;
2718 }
2719
unlock_fs(struct mapped_device * md)2720 static void unlock_fs(struct mapped_device *md)
2721 {
2722 if (!test_bit(DMF_FROZEN, &md->flags))
2723 return;
2724 bdev_thaw(md->disk->part0);
2725 clear_bit(DMF_FROZEN, &md->flags);
2726 }
2727
2728 /*
2729 * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG
2730 * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE
2731 * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY
2732 *
2733 * If __dm_suspend returns 0, the device is completely quiescent
2734 * now. There is no request-processing activity. All new requests
2735 * are being added to md->deferred list.
2736 */
__dm_suspend(struct mapped_device * md,struct dm_table * map,unsigned int suspend_flags,unsigned int task_state,int dmf_suspended_flag)2737 static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
2738 unsigned int suspend_flags, unsigned int task_state,
2739 int dmf_suspended_flag)
2740 {
2741 bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
2742 bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
2743 int r;
2744
2745 lockdep_assert_held(&md->suspend_lock);
2746
2747 /*
2748 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2749 * This flag is cleared before dm_suspend returns.
2750 */
2751 if (noflush)
2752 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2753 else
2754 DMDEBUG("%s: suspending with flush", dm_device_name(md));
2755
2756 /*
2757 * This gets reverted if there's an error later and the targets
2758 * provide the .presuspend_undo hook.
2759 */
2760 dm_table_presuspend_targets(map);
2761
2762 /*
2763 * Flush I/O to the device.
2764 * Any I/O submitted after lock_fs() may not be flushed.
2765 * noflush takes precedence over do_lockfs.
2766 * (lock_fs() flushes I/Os and waits for them to complete.)
2767 */
2768 if (!noflush && do_lockfs) {
2769 r = lock_fs(md);
2770 if (r) {
2771 dm_table_presuspend_undo_targets(map);
2772 return r;
2773 }
2774 }
2775
2776 /*
2777 * Here we must make sure that no processes are submitting requests
2778 * to target drivers i.e. no one may be executing
2779 * dm_split_and_process_bio from dm_submit_bio.
2780 *
2781 * To get all processes out of dm_split_and_process_bio in dm_submit_bio,
2782 * we take the write lock. To prevent any process from reentering
2783 * dm_split_and_process_bio from dm_submit_bio and quiesce the thread
2784 * (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND and call
2785 * flush_workqueue(md->wq).
2786 */
2787 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2788 if (map)
2789 synchronize_srcu(&md->io_barrier);
2790
2791 /*
2792 * Stop md->queue before flushing md->wq in case request-based
2793 * dm defers requests to md->wq from md->queue.
2794 */
2795 if (dm_request_based(md))
2796 dm_stop_queue(md->queue);
2797
2798 flush_workqueue(md->wq);
2799
2800 /*
2801 * At this point no more requests are entering target request routines.
2802 * We call dm_wait_for_completion to wait for all existing requests
2803 * to finish.
2804 */
2805 r = dm_wait_for_completion(md, task_state);
2806 if (!r)
2807 set_bit(dmf_suspended_flag, &md->flags);
2808
2809 if (noflush)
2810 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2811 if (map)
2812 synchronize_srcu(&md->io_barrier);
2813
2814 /* were we interrupted ? */
2815 if (r < 0) {
2816 dm_queue_flush(md);
2817
2818 if (dm_request_based(md))
2819 dm_start_queue(md->queue);
2820
2821 unlock_fs(md);
2822 dm_table_presuspend_undo_targets(map);
2823 /* pushback list is already flushed, so skip flush */
2824 }
2825
2826 return r;
2827 }
2828
2829 /*
2830 * We need to be able to change a mapping table under a mounted
2831 * filesystem. For example we might want to move some data in
2832 * the background. Before the table can be swapped with
2833 * dm_bind_table, dm_suspend must be called to flush any in
2834 * flight bios and ensure that any further io gets deferred.
2835 */
2836 /*
2837 * Suspend mechanism in request-based dm.
2838 *
2839 * 1. Flush all I/Os by lock_fs() if needed.
2840 * 2. Stop dispatching any I/O by stopping the request_queue.
2841 * 3. Wait for all in-flight I/Os to be completed or requeued.
2842 *
2843 * To abort suspend, start the request_queue.
2844 */
dm_suspend(struct mapped_device * md,unsigned int suspend_flags)2845 int dm_suspend(struct mapped_device *md, unsigned int suspend_flags)
2846 {
2847 struct dm_table *map = NULL;
2848 int r = 0;
2849
2850 retry:
2851 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2852
2853 if (dm_suspended_md(md)) {
2854 r = -EINVAL;
2855 goto out_unlock;
2856 }
2857
2858 if (dm_suspended_internally_md(md)) {
2859 /* already internally suspended, wait for internal resume */
2860 mutex_unlock(&md->suspend_lock);
2861 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2862 if (r)
2863 return r;
2864 goto retry;
2865 }
2866
2867 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2868 if (!map) {
2869 /* avoid deadlock with fs/namespace.c:do_mount() */
2870 suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
2871 }
2872
2873 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
2874 if (r)
2875 goto out_unlock;
2876
2877 set_bit(DMF_POST_SUSPENDING, &md->flags);
2878 dm_table_postsuspend_targets(map);
2879 clear_bit(DMF_POST_SUSPENDING, &md->flags);
2880
2881 out_unlock:
2882 mutex_unlock(&md->suspend_lock);
2883 return r;
2884 }
2885
__dm_resume(struct mapped_device * md,struct dm_table * map)2886 static int __dm_resume(struct mapped_device *md, struct dm_table *map)
2887 {
2888 if (map) {
2889 int r = dm_table_resume_targets(map);
2890
2891 if (r)
2892 return r;
2893 }
2894
2895 dm_queue_flush(md);
2896
2897 /*
2898 * Flushing deferred I/Os must be done after targets are resumed
2899 * so that mapping of targets can work correctly.
2900 * Request-based dm is queueing the deferred I/Os in its request_queue.
2901 */
2902 if (dm_request_based(md))
2903 dm_start_queue(md->queue);
2904
2905 unlock_fs(md);
2906
2907 return 0;
2908 }
2909
dm_resume(struct mapped_device * md)2910 int dm_resume(struct mapped_device *md)
2911 {
2912 int r;
2913 struct dm_table *map = NULL;
2914
2915 retry:
2916 r = -EINVAL;
2917 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
2918
2919 if (!dm_suspended_md(md))
2920 goto out;
2921
2922 if (dm_suspended_internally_md(md)) {
2923 /* already internally suspended, wait for internal resume */
2924 mutex_unlock(&md->suspend_lock);
2925 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
2926 if (r)
2927 return r;
2928 goto retry;
2929 }
2930
2931 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2932 if (!map || !dm_table_get_size(map))
2933 goto out;
2934
2935 r = __dm_resume(md, map);
2936 if (r)
2937 goto out;
2938
2939 clear_bit(DMF_SUSPENDED, &md->flags);
2940 out:
2941 mutex_unlock(&md->suspend_lock);
2942
2943 return r;
2944 }
2945
2946 /*
2947 * Internal suspend/resume works like userspace-driven suspend. It waits
2948 * until all bios finish and prevents issuing new bios to the target drivers.
2949 * It may be used only from the kernel.
2950 */
2951
__dm_internal_suspend(struct mapped_device * md,unsigned int suspend_flags)2952 static void __dm_internal_suspend(struct mapped_device *md, unsigned int suspend_flags)
2953 {
2954 struct dm_table *map = NULL;
2955
2956 lockdep_assert_held(&md->suspend_lock);
2957
2958 if (md->internal_suspend_count++)
2959 return; /* nested internal suspend */
2960
2961 if (dm_suspended_md(md)) {
2962 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
2963 return; /* nest suspend */
2964 }
2965
2966 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2967
2968 /*
2969 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
2970 * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend
2971 * would require changing .presuspend to return an error -- avoid this
2972 * until there is a need for more elaborate variants of internal suspend.
2973 */
2974 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
2975 DMF_SUSPENDED_INTERNALLY);
2976
2977 set_bit(DMF_POST_SUSPENDING, &md->flags);
2978 dm_table_postsuspend_targets(map);
2979 clear_bit(DMF_POST_SUSPENDING, &md->flags);
2980 }
2981
__dm_internal_resume(struct mapped_device * md)2982 static void __dm_internal_resume(struct mapped_device *md)
2983 {
2984 int r;
2985 struct dm_table *map;
2986
2987 BUG_ON(!md->internal_suspend_count);
2988
2989 if (--md->internal_suspend_count)
2990 return; /* resume from nested internal suspend */
2991
2992 if (dm_suspended_md(md))
2993 goto done; /* resume from nested suspend */
2994
2995 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2996 r = __dm_resume(md, map);
2997 if (r) {
2998 /*
2999 * If a preresume method of some target failed, we are in a
3000 * tricky situation. We can't return an error to the caller. We
3001 * can't fake success because then the "resume" and
3002 * "postsuspend" methods would not be paired correctly, and it
3003 * would break various targets, for example it would cause list
3004 * corruption in the "origin" target.
3005 *
3006 * So, we fake normal suspend here, to make sure that the
3007 * "resume" and "postsuspend" methods will be paired correctly.
3008 */
3009 DMERR("Preresume method failed: %d", r);
3010 set_bit(DMF_SUSPENDED, &md->flags);
3011 }
3012 done:
3013 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
3014 smp_mb__after_atomic();
3015 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY);
3016 }
3017
dm_internal_suspend_noflush(struct mapped_device * md)3018 void dm_internal_suspend_noflush(struct mapped_device *md)
3019 {
3020 mutex_lock(&md->suspend_lock);
3021 __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG);
3022 mutex_unlock(&md->suspend_lock);
3023 }
3024 EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush);
3025
dm_internal_resume(struct mapped_device * md)3026 void dm_internal_resume(struct mapped_device *md)
3027 {
3028 mutex_lock(&md->suspend_lock);
3029 __dm_internal_resume(md);
3030 mutex_unlock(&md->suspend_lock);
3031 }
3032 EXPORT_SYMBOL_GPL(dm_internal_resume);
3033
3034 /*
3035 * Fast variants of internal suspend/resume hold md->suspend_lock,
3036 * which prevents interaction with userspace-driven suspend.
3037 */
3038
dm_internal_suspend_fast(struct mapped_device * md)3039 void dm_internal_suspend_fast(struct mapped_device *md)
3040 {
3041 mutex_lock(&md->suspend_lock);
3042 if (dm_suspended_md(md) || dm_suspended_internally_md(md))
3043 return;
3044
3045 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
3046 synchronize_srcu(&md->io_barrier);
3047 flush_workqueue(md->wq);
3048 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
3049 }
3050 EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
3051
dm_internal_resume_fast(struct mapped_device * md)3052 void dm_internal_resume_fast(struct mapped_device *md)
3053 {
3054 if (dm_suspended_md(md) || dm_suspended_internally_md(md))
3055 goto done;
3056
3057 dm_queue_flush(md);
3058
3059 done:
3060 mutex_unlock(&md->suspend_lock);
3061 }
3062 EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
3063
3064 /*
3065 *---------------------------------------------------------------
3066 * Event notification.
3067 *---------------------------------------------------------------
3068 */
dm_kobject_uevent(struct mapped_device * md,enum kobject_action action,unsigned int cookie,bool need_resize_uevent)3069 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
3070 unsigned int cookie, bool need_resize_uevent)
3071 {
3072 int r;
3073 unsigned int noio_flag;
3074 char udev_cookie[DM_COOKIE_LENGTH];
3075 char *envp[3] = { NULL, NULL, NULL };
3076 char **envpp = envp;
3077 if (cookie) {
3078 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
3079 DM_COOKIE_ENV_VAR_NAME, cookie);
3080 *envpp++ = udev_cookie;
3081 }
3082 if (need_resize_uevent) {
3083 *envpp++ = "RESIZE=1";
3084 }
3085
3086 noio_flag = memalloc_noio_save();
3087
3088 r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp);
3089
3090 memalloc_noio_restore(noio_flag);
3091
3092 return r;
3093 }
3094
dm_next_uevent_seq(struct mapped_device * md)3095 uint32_t dm_next_uevent_seq(struct mapped_device *md)
3096 {
3097 return atomic_add_return(1, &md->uevent_seq);
3098 }
3099
dm_get_event_nr(struct mapped_device * md)3100 uint32_t dm_get_event_nr(struct mapped_device *md)
3101 {
3102 return atomic_read(&md->event_nr);
3103 }
3104
dm_wait_event(struct mapped_device * md,int event_nr)3105 int dm_wait_event(struct mapped_device *md, int event_nr)
3106 {
3107 return wait_event_interruptible(md->eventq,
3108 (event_nr != atomic_read(&md->event_nr)));
3109 }
3110
dm_uevent_add(struct mapped_device * md,struct list_head * elist)3111 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
3112 {
3113 unsigned long flags;
3114
3115 spin_lock_irqsave(&md->uevent_lock, flags);
3116 list_add(elist, &md->uevent_list);
3117 spin_unlock_irqrestore(&md->uevent_lock, flags);
3118 }
3119
3120 /*
3121 * The gendisk is only valid as long as you have a reference
3122 * count on 'md'.
3123 */
dm_disk(struct mapped_device * md)3124 struct gendisk *dm_disk(struct mapped_device *md)
3125 {
3126 return md->disk;
3127 }
3128 EXPORT_SYMBOL_GPL(dm_disk);
3129
dm_kobject(struct mapped_device * md)3130 struct kobject *dm_kobject(struct mapped_device *md)
3131 {
3132 return &md->kobj_holder.kobj;
3133 }
3134
dm_get_from_kobject(struct kobject * kobj)3135 struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
3136 {
3137 struct mapped_device *md;
3138
3139 md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
3140
3141 spin_lock(&_minor_lock);
3142 if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
3143 md = NULL;
3144 goto out;
3145 }
3146 dm_get(md);
3147 out:
3148 spin_unlock(&_minor_lock);
3149
3150 return md;
3151 }
3152
dm_suspended_md(struct mapped_device * md)3153 int dm_suspended_md(struct mapped_device *md)
3154 {
3155 return test_bit(DMF_SUSPENDED, &md->flags);
3156 }
3157
dm_post_suspending_md(struct mapped_device * md)3158 static int dm_post_suspending_md(struct mapped_device *md)
3159 {
3160 return test_bit(DMF_POST_SUSPENDING, &md->flags);
3161 }
3162
dm_suspended_internally_md(struct mapped_device * md)3163 int dm_suspended_internally_md(struct mapped_device *md)
3164 {
3165 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
3166 }
3167
dm_test_deferred_remove_flag(struct mapped_device * md)3168 int dm_test_deferred_remove_flag(struct mapped_device *md)
3169 {
3170 return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
3171 }
3172
dm_suspended(struct dm_target * ti)3173 int dm_suspended(struct dm_target *ti)
3174 {
3175 return dm_suspended_md(ti->table->md);
3176 }
3177 EXPORT_SYMBOL_GPL(dm_suspended);
3178
dm_post_suspending(struct dm_target * ti)3179 int dm_post_suspending(struct dm_target *ti)
3180 {
3181 return dm_post_suspending_md(ti->table->md);
3182 }
3183 EXPORT_SYMBOL_GPL(dm_post_suspending);
3184
dm_noflush_suspending(struct dm_target * ti)3185 int dm_noflush_suspending(struct dm_target *ti)
3186 {
3187 return __noflush_suspending(ti->table->md);
3188 }
3189 EXPORT_SYMBOL_GPL(dm_noflush_suspending);
3190
dm_free_md_mempools(struct dm_md_mempools * pools)3191 void dm_free_md_mempools(struct dm_md_mempools *pools)
3192 {
3193 if (!pools)
3194 return;
3195
3196 bioset_exit(&pools->bs);
3197 bioset_exit(&pools->io_bs);
3198
3199 kfree(pools);
3200 }
3201
3202 struct dm_pr {
3203 u64 old_key;
3204 u64 new_key;
3205 u32 flags;
3206 bool abort;
3207 bool fail_early;
3208 int ret;
3209 enum pr_type type;
3210 struct pr_keys *read_keys;
3211 struct pr_held_reservation *rsv;
3212 };
3213
dm_call_pr(struct block_device * bdev,iterate_devices_callout_fn fn,struct dm_pr * pr)3214 static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
3215 struct dm_pr *pr)
3216 {
3217 struct mapped_device *md = bdev->bd_disk->private_data;
3218 struct dm_table *table;
3219 struct dm_target *ti;
3220 int ret = -ENOTTY, srcu_idx;
3221
3222 table = dm_get_live_table(md, &srcu_idx);
3223 if (!table || !dm_table_get_size(table))
3224 goto out;
3225
3226 /* We only support devices that have a single target */
3227 if (table->num_targets != 1)
3228 goto out;
3229 ti = dm_table_get_target(table, 0);
3230
3231 if (dm_suspended_md(md)) {
3232 ret = -EAGAIN;
3233 goto out;
3234 }
3235
3236 ret = -EINVAL;
3237 if (!ti->type->iterate_devices)
3238 goto out;
3239
3240 ti->type->iterate_devices(ti, fn, pr);
3241 ret = 0;
3242 out:
3243 dm_put_live_table(md, srcu_idx);
3244 return ret;
3245 }
3246
3247 /*
3248 * For register / unregister we need to manually call out to every path.
3249 */
__dm_pr_register(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)3250 static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev,
3251 sector_t start, sector_t len, void *data)
3252 {
3253 struct dm_pr *pr = data;
3254 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3255 int ret;
3256
3257 if (!ops || !ops->pr_register) {
3258 pr->ret = -EOPNOTSUPP;
3259 return -1;
3260 }
3261
3262 ret = ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags);
3263 if (!ret)
3264 return 0;
3265
3266 if (!pr->ret)
3267 pr->ret = ret;
3268
3269 if (pr->fail_early)
3270 return -1;
3271
3272 return 0;
3273 }
3274
dm_pr_register(struct block_device * bdev,u64 old_key,u64 new_key,u32 flags)3275 static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
3276 u32 flags)
3277 {
3278 struct dm_pr pr = {
3279 .old_key = old_key,
3280 .new_key = new_key,
3281 .flags = flags,
3282 .fail_early = true,
3283 .ret = 0,
3284 };
3285 int ret;
3286
3287 ret = dm_call_pr(bdev, __dm_pr_register, &pr);
3288 if (ret) {
3289 /* Didn't even get to register a path */
3290 return ret;
3291 }
3292
3293 if (!pr.ret)
3294 return 0;
3295 ret = pr.ret;
3296
3297 if (!new_key)
3298 return ret;
3299
3300 /* unregister all paths if we failed to register any path */
3301 pr.old_key = new_key;
3302 pr.new_key = 0;
3303 pr.flags = 0;
3304 pr.fail_early = false;
3305 (void) dm_call_pr(bdev, __dm_pr_register, &pr);
3306 return ret;
3307 }
3308
3309
__dm_pr_reserve(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)3310 static int __dm_pr_reserve(struct dm_target *ti, struct dm_dev *dev,
3311 sector_t start, sector_t len, void *data)
3312 {
3313 struct dm_pr *pr = data;
3314 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3315
3316 if (!ops || !ops->pr_reserve) {
3317 pr->ret = -EOPNOTSUPP;
3318 return -1;
3319 }
3320
3321 pr->ret = ops->pr_reserve(dev->bdev, pr->old_key, pr->type, pr->flags);
3322 if (!pr->ret)
3323 return -1;
3324
3325 return 0;
3326 }
3327
dm_pr_reserve(struct block_device * bdev,u64 key,enum pr_type type,u32 flags)3328 static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
3329 u32 flags)
3330 {
3331 struct dm_pr pr = {
3332 .old_key = key,
3333 .flags = flags,
3334 .type = type,
3335 .fail_early = false,
3336 .ret = 0,
3337 };
3338 int ret;
3339
3340 ret = dm_call_pr(bdev, __dm_pr_reserve, &pr);
3341 if (ret)
3342 return ret;
3343
3344 return pr.ret;
3345 }
3346
3347 /*
3348 * If there is a non-All Registrants type of reservation, the release must be
3349 * sent down the holding path. For the cases where there is no reservation or
3350 * the path is not the holder the device will also return success, so we must
3351 * try each path to make sure we got the correct path.
3352 */
__dm_pr_release(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)3353 static int __dm_pr_release(struct dm_target *ti, struct dm_dev *dev,
3354 sector_t start, sector_t len, void *data)
3355 {
3356 struct dm_pr *pr = data;
3357 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3358
3359 if (!ops || !ops->pr_release) {
3360 pr->ret = -EOPNOTSUPP;
3361 return -1;
3362 }
3363
3364 pr->ret = ops->pr_release(dev->bdev, pr->old_key, pr->type);
3365 if (pr->ret)
3366 return -1;
3367
3368 return 0;
3369 }
3370
dm_pr_release(struct block_device * bdev,u64 key,enum pr_type type)3371 static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
3372 {
3373 struct dm_pr pr = {
3374 .old_key = key,
3375 .type = type,
3376 .fail_early = false,
3377 };
3378 int ret;
3379
3380 ret = dm_call_pr(bdev, __dm_pr_release, &pr);
3381 if (ret)
3382 return ret;
3383
3384 return pr.ret;
3385 }
3386
__dm_pr_preempt(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)3387 static int __dm_pr_preempt(struct dm_target *ti, struct dm_dev *dev,
3388 sector_t start, sector_t len, void *data)
3389 {
3390 struct dm_pr *pr = data;
3391 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3392
3393 if (!ops || !ops->pr_preempt) {
3394 pr->ret = -EOPNOTSUPP;
3395 return -1;
3396 }
3397
3398 pr->ret = ops->pr_preempt(dev->bdev, pr->old_key, pr->new_key, pr->type,
3399 pr->abort);
3400 if (!pr->ret)
3401 return -1;
3402
3403 return 0;
3404 }
3405
dm_pr_preempt(struct block_device * bdev,u64 old_key,u64 new_key,enum pr_type type,bool abort)3406 static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
3407 enum pr_type type, bool abort)
3408 {
3409 struct dm_pr pr = {
3410 .new_key = new_key,
3411 .old_key = old_key,
3412 .type = type,
3413 .fail_early = false,
3414 };
3415 int ret;
3416
3417 ret = dm_call_pr(bdev, __dm_pr_preempt, &pr);
3418 if (ret)
3419 return ret;
3420
3421 return pr.ret;
3422 }
3423
dm_pr_clear(struct block_device * bdev,u64 key)3424 static int dm_pr_clear(struct block_device *bdev, u64 key)
3425 {
3426 struct mapped_device *md = bdev->bd_disk->private_data;
3427 const struct pr_ops *ops;
3428 int r, srcu_idx;
3429
3430 r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
3431 if (r < 0)
3432 goto out;
3433
3434 ops = bdev->bd_disk->fops->pr_ops;
3435 if (ops && ops->pr_clear)
3436 r = ops->pr_clear(bdev, key);
3437 else
3438 r = -EOPNOTSUPP;
3439 out:
3440 dm_unprepare_ioctl(md, srcu_idx);
3441 return r;
3442 }
3443
__dm_pr_read_keys(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)3444 static int __dm_pr_read_keys(struct dm_target *ti, struct dm_dev *dev,
3445 sector_t start, sector_t len, void *data)
3446 {
3447 struct dm_pr *pr = data;
3448 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3449
3450 if (!ops || !ops->pr_read_keys) {
3451 pr->ret = -EOPNOTSUPP;
3452 return -1;
3453 }
3454
3455 pr->ret = ops->pr_read_keys(dev->bdev, pr->read_keys);
3456 if (!pr->ret)
3457 return -1;
3458
3459 return 0;
3460 }
3461
dm_pr_read_keys(struct block_device * bdev,struct pr_keys * keys)3462 static int dm_pr_read_keys(struct block_device *bdev, struct pr_keys *keys)
3463 {
3464 struct dm_pr pr = {
3465 .read_keys = keys,
3466 };
3467 int ret;
3468
3469 ret = dm_call_pr(bdev, __dm_pr_read_keys, &pr);
3470 if (ret)
3471 return ret;
3472
3473 return pr.ret;
3474 }
3475
__dm_pr_read_reservation(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)3476 static int __dm_pr_read_reservation(struct dm_target *ti, struct dm_dev *dev,
3477 sector_t start, sector_t len, void *data)
3478 {
3479 struct dm_pr *pr = data;
3480 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3481
3482 if (!ops || !ops->pr_read_reservation) {
3483 pr->ret = -EOPNOTSUPP;
3484 return -1;
3485 }
3486
3487 pr->ret = ops->pr_read_reservation(dev->bdev, pr->rsv);
3488 if (!pr->ret)
3489 return -1;
3490
3491 return 0;
3492 }
3493
dm_pr_read_reservation(struct block_device * bdev,struct pr_held_reservation * rsv)3494 static int dm_pr_read_reservation(struct block_device *bdev,
3495 struct pr_held_reservation *rsv)
3496 {
3497 struct dm_pr pr = {
3498 .rsv = rsv,
3499 };
3500 int ret;
3501
3502 ret = dm_call_pr(bdev, __dm_pr_read_reservation, &pr);
3503 if (ret)
3504 return ret;
3505
3506 return pr.ret;
3507 }
3508
3509 static const struct pr_ops dm_pr_ops = {
3510 .pr_register = dm_pr_register,
3511 .pr_reserve = dm_pr_reserve,
3512 .pr_release = dm_pr_release,
3513 .pr_preempt = dm_pr_preempt,
3514 .pr_clear = dm_pr_clear,
3515 .pr_read_keys = dm_pr_read_keys,
3516 .pr_read_reservation = dm_pr_read_reservation,
3517 };
3518
3519 static const struct block_device_operations dm_blk_dops = {
3520 .submit_bio = dm_submit_bio,
3521 .poll_bio = dm_poll_bio,
3522 .open = dm_blk_open,
3523 .release = dm_blk_close,
3524 .ioctl = dm_blk_ioctl,
3525 .getgeo = dm_blk_getgeo,
3526 .report_zones = dm_blk_report_zones,
3527 .pr_ops = &dm_pr_ops,
3528 .owner = THIS_MODULE
3529 };
3530
3531 static const struct block_device_operations dm_rq_blk_dops = {
3532 .open = dm_blk_open,
3533 .release = dm_blk_close,
3534 .ioctl = dm_blk_ioctl,
3535 .getgeo = dm_blk_getgeo,
3536 .pr_ops = &dm_pr_ops,
3537 .owner = THIS_MODULE
3538 };
3539
3540 static const struct dax_operations dm_dax_ops = {
3541 .direct_access = dm_dax_direct_access,
3542 .zero_page_range = dm_dax_zero_page_range,
3543 .recovery_write = dm_dax_recovery_write,
3544 };
3545
3546 /*
3547 * module hooks
3548 */
3549 module_init(dm_init);
3550 module_exit(dm_exit);
3551
3552 module_param(major, uint, 0);
3553 MODULE_PARM_DESC(major, "The major number of the device mapper");
3554
3555 module_param(reserved_bio_based_ios, uint, 0644);
3556 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
3557
3558 module_param(dm_numa_node, int, 0644);
3559 MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
3560
3561 module_param(swap_bios, int, 0644);
3562 MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs");
3563
3564 MODULE_DESCRIPTION(DM_NAME " driver");
3565 MODULE_AUTHOR("Joe Thornber <dm-devel@lists.linux.dev>");
3566 MODULE_LICENSE("GPL");
3567