1 /*
2  * Copyright (C) 2001 Sistina Software (UK) Limited.
3  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the LGPL.
6  */
7 
8 #ifndef _LINUX_DEVICE_MAPPER_H
9 #define _LINUX_DEVICE_MAPPER_H
10 
11 #include <linux/bio.h>
12 #include <linux/blkdev.h>
13 #include <linux/dm-ioctl.h>
14 #include <linux/math64.h>
15 #include <linux/ratelimit.h>
16 
17 struct dm_dev;
18 struct dm_target;
19 struct dm_table;
20 struct dm_report_zones_args;
21 struct mapped_device;
22 struct bio_vec;
23 
24 /*
25  * Type of table, mapped_device's mempool and request_queue
26  */
27 enum dm_queue_mode {
28 	DM_TYPE_NONE		 = 0,
29 	DM_TYPE_BIO_BASED	 = 1,
30 	DM_TYPE_REQUEST_BASED	 = 2,
31 	DM_TYPE_DAX_BIO_BASED	 = 3,
32 };
33 
34 typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
35 
36 union map_info {
37 	void *ptr;
38 };
39 
40 /*
41  * In the constructor the target parameter will already have the
42  * table, type, begin and len fields filled in.
43  */
44 typedef int (*dm_ctr_fn) (struct dm_target *target,
45 			  unsigned int argc, char **argv);
46 
47 /*
48  * The destructor doesn't need to free the dm_target, just
49  * anything hidden ti->private.
50  */
51 typedef void (*dm_dtr_fn) (struct dm_target *ti);
52 
53 /*
54  * The map function must return:
55  * < 0: error
56  * = 0: The target will handle the io by resubmitting it later
57  * = 1: simple remap complete
58  * = 2: The target wants to push back the io
59  */
60 typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
61 typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti,
62 					    struct request *rq,
63 					    union map_info *map_context,
64 					    struct request **clone);
65 typedef void (*dm_release_clone_request_fn) (struct request *clone,
66 					     union map_info *map_context);
67 
68 /*
69  * Returns:
70  * < 0 : error (currently ignored)
71  * 0   : ended successfully
72  * 1   : for some reason the io has still not completed (eg,
73  *       multipath target might want to requeue a failed io).
74  * 2   : The target wants to push back the io
75  */
76 typedef int (*dm_endio_fn) (struct dm_target *ti,
77 			    struct bio *bio, blk_status_t *error);
78 typedef int (*dm_request_endio_fn) (struct dm_target *ti,
79 				    struct request *clone, blk_status_t error,
80 				    union map_info *map_context);
81 
82 typedef void (*dm_presuspend_fn) (struct dm_target *ti);
83 typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti);
84 typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
85 typedef int (*dm_preresume_fn) (struct dm_target *ti);
86 typedef void (*dm_resume_fn) (struct dm_target *ti);
87 
88 typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
89 			      unsigned status_flags, char *result, unsigned maxlen);
90 
91 typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv,
92 			      char *result, unsigned maxlen);
93 
94 typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev);
95 
96 #ifdef CONFIG_BLK_DEV_ZONED
97 typedef int (*dm_report_zones_fn) (struct dm_target *ti,
98 				   struct dm_report_zones_args *args,
99 				   unsigned int nr_zones);
100 #else
101 /*
102  * Define dm_report_zones_fn so that targets can assign to NULL if
103  * CONFIG_BLK_DEV_ZONED disabled. Otherwise each target needs to do
104  * awkward #ifdefs in their target_type, etc.
105  */
106 typedef int (*dm_report_zones_fn) (struct dm_target *dummy);
107 #endif
108 
109 /*
110  * These iteration functions are typically used to check (and combine)
111  * properties of underlying devices.
112  * E.g. Does at least one underlying device support flush?
113  *      Does any underlying device not support WRITE_SAME?
114  *
115  * The callout function is called once for each contiguous section of
116  * an underlying device.  State can be maintained in *data.
117  * Return non-zero to stop iterating through any further devices.
118  */
119 typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
120 					   struct dm_dev *dev,
121 					   sector_t start, sector_t len,
122 					   void *data);
123 
124 /*
125  * This function must iterate through each section of device used by the
126  * target until it encounters a non-zero return code, which it then returns.
127  * Returns zero if no callout returned non-zero.
128  */
129 typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
130 				      iterate_devices_callout_fn fn,
131 				      void *data);
132 
133 typedef void (*dm_io_hints_fn) (struct dm_target *ti,
134 				struct queue_limits *limits);
135 
136 /*
137  * Returns:
138  *    0: The target can handle the next I/O immediately.
139  *    1: The target can't handle the next I/O immediately.
140  */
141 typedef int (*dm_busy_fn) (struct dm_target *ti);
142 
143 /*
144  * Returns:
145  *  < 0 : error
146  * >= 0 : the number of bytes accessible at the address
147  */
148 typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
149 		long nr_pages, void **kaddr, pfn_t *pfn);
150 typedef size_t (*dm_dax_copy_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
151 		void *addr, size_t bytes, struct iov_iter *i);
152 typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff,
153 		size_t nr_pages);
154 #define PAGE_SECTORS (PAGE_SIZE / 512)
155 
156 void dm_error(const char *message);
157 
158 struct dm_dev {
159 	struct block_device *bdev;
160 	struct dax_device *dax_dev;
161 	fmode_t mode;
162 	char name[16];
163 };
164 
165 dev_t dm_get_dev_t(const char *path);
166 
167 /*
168  * Constructors should call these functions to ensure destination devices
169  * are opened/closed correctly.
170  */
171 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
172 		  struct dm_dev **result);
173 void dm_put_device(struct dm_target *ti, struct dm_dev *d);
174 
175 /*
176  * Information about a target type
177  */
178 
179 struct target_type {
180 	uint64_t features;
181 	const char *name;
182 	struct module *module;
183 	unsigned version[3];
184 	dm_ctr_fn ctr;
185 	dm_dtr_fn dtr;
186 	dm_map_fn map;
187 	dm_clone_and_map_request_fn clone_and_map_rq;
188 	dm_release_clone_request_fn release_clone_rq;
189 	dm_endio_fn end_io;
190 	dm_request_endio_fn rq_end_io;
191 	dm_presuspend_fn presuspend;
192 	dm_presuspend_undo_fn presuspend_undo;
193 	dm_postsuspend_fn postsuspend;
194 	dm_preresume_fn preresume;
195 	dm_resume_fn resume;
196 	dm_status_fn status;
197 	dm_message_fn message;
198 	dm_prepare_ioctl_fn prepare_ioctl;
199 	dm_report_zones_fn report_zones;
200 	dm_busy_fn busy;
201 	dm_iterate_devices_fn iterate_devices;
202 	dm_io_hints_fn io_hints;
203 	dm_dax_direct_access_fn direct_access;
204 	dm_dax_copy_iter_fn dax_copy_from_iter;
205 	dm_dax_copy_iter_fn dax_copy_to_iter;
206 	dm_dax_zero_page_range_fn dax_zero_page_range;
207 
208 	/* For internal device-mapper use. */
209 	struct list_head list;
210 };
211 
212 /*
213  * Target features
214  */
215 
216 /*
217  * Any table that contains an instance of this target must have only one.
218  */
219 #define DM_TARGET_SINGLETON		0x00000001
220 #define dm_target_needs_singleton(type)	((type)->features & DM_TARGET_SINGLETON)
221 
222 /*
223  * Indicates that a target does not support read-only devices.
224  */
225 #define DM_TARGET_ALWAYS_WRITEABLE	0x00000002
226 #define dm_target_always_writeable(type) \
227 		((type)->features & DM_TARGET_ALWAYS_WRITEABLE)
228 
229 /*
230  * Any device that contains a table with an instance of this target may never
231  * have tables containing any different target type.
232  */
233 #define DM_TARGET_IMMUTABLE		0x00000004
234 #define dm_target_is_immutable(type)	((type)->features & DM_TARGET_IMMUTABLE)
235 
236 /*
237  * Indicates that a target may replace any target; even immutable targets.
238  * .map, .map_rq, .clone_and_map_rq and .release_clone_rq are all defined.
239  */
240 #define DM_TARGET_WILDCARD		0x00000008
241 #define dm_target_is_wildcard(type)	((type)->features & DM_TARGET_WILDCARD)
242 
243 /*
244  * A target implements own bio data integrity.
245  */
246 #define DM_TARGET_INTEGRITY		0x00000010
247 #define dm_target_has_integrity(type)	((type)->features & DM_TARGET_INTEGRITY)
248 
249 /*
250  * A target passes integrity data to the lower device.
251  */
252 #define DM_TARGET_PASSES_INTEGRITY	0x00000020
253 #define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY)
254 
255 /*
256  * Indicates support for zoned block devices:
257  * - DM_TARGET_ZONED_HM: the target also supports host-managed zoned
258  *   block devices but does not support combining different zoned models.
259  * - DM_TARGET_MIXED_ZONED_MODEL: the target supports combining multiple
260  *   devices with different zoned models.
261  */
262 #ifdef CONFIG_BLK_DEV_ZONED
263 #define DM_TARGET_ZONED_HM		0x00000040
264 #define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM)
265 #else
266 #define DM_TARGET_ZONED_HM		0x00000000
267 #define dm_target_supports_zoned_hm(type) (false)
268 #endif
269 
270 /*
271  * A target handles REQ_NOWAIT
272  */
273 #define DM_TARGET_NOWAIT		0x00000080
274 #define dm_target_supports_nowait(type) ((type)->features & DM_TARGET_NOWAIT)
275 
276 /*
277  * A target supports passing through inline crypto support.
278  */
279 #define DM_TARGET_PASSES_CRYPTO		0x00000100
280 #define dm_target_passes_crypto(type) ((type)->features & DM_TARGET_PASSES_CRYPTO)
281 
282 #ifdef CONFIG_BLK_DEV_ZONED
283 #define DM_TARGET_MIXED_ZONED_MODEL	0x00000200
284 #define dm_target_supports_mixed_zoned_model(type) \
285 	((type)->features & DM_TARGET_MIXED_ZONED_MODEL)
286 #else
287 #define DM_TARGET_MIXED_ZONED_MODEL	0x00000000
288 #define dm_target_supports_mixed_zoned_model(type) (false)
289 #endif
290 
291 struct dm_target {
292 	struct dm_table *table;
293 	struct target_type *type;
294 
295 	/* target limits */
296 	sector_t begin;
297 	sector_t len;
298 
299 	/* If non-zero, maximum size of I/O submitted to a target. */
300 	uint32_t max_io_len;
301 
302 	/*
303 	 * A number of zero-length barrier bios that will be submitted
304 	 * to the target for the purpose of flushing cache.
305 	 *
306 	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
307 	 * It is a responsibility of the target driver to remap these bios
308 	 * to the real underlying devices.
309 	 */
310 	unsigned num_flush_bios;
311 
312 	/*
313 	 * The number of discard bios that will be submitted to the target.
314 	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
315 	 */
316 	unsigned num_discard_bios;
317 
318 	/*
319 	 * The number of secure erase bios that will be submitted to the target.
320 	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
321 	 */
322 	unsigned num_secure_erase_bios;
323 
324 	/*
325 	 * The number of WRITE SAME bios that will be submitted to the target.
326 	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
327 	 */
328 	unsigned num_write_same_bios;
329 
330 	/*
331 	 * The number of WRITE ZEROES bios that will be submitted to the target.
332 	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
333 	 */
334 	unsigned num_write_zeroes_bios;
335 
336 	/*
337 	 * The minimum number of extra bytes allocated in each io for the
338 	 * target to use.
339 	 */
340 	unsigned per_io_data_size;
341 
342 	/* target specific data */
343 	void *private;
344 
345 	/* Used to provide an error string from the ctr */
346 	char *error;
347 
348 	/*
349 	 * Set if this target needs to receive flushes regardless of
350 	 * whether or not its underlying devices have support.
351 	 */
352 	bool flush_supported:1;
353 
354 	/*
355 	 * Set if this target needs to receive discards regardless of
356 	 * whether or not its underlying devices have support.
357 	 */
358 	bool discards_supported:1;
359 
360 	/*
361 	 * Set if we need to limit the number of in-flight bios when swapping.
362 	 */
363 	bool limit_swap_bios:1;
364 };
365 
366 void *dm_per_bio_data(struct bio *bio, size_t data_size);
367 struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size);
368 unsigned dm_bio_get_target_bio_nr(const struct bio *bio);
369 
370 u64 dm_start_time_ns_from_clone(struct bio *bio);
371 
372 int dm_register_target(struct target_type *t);
373 void dm_unregister_target(struct target_type *t);
374 
375 /*
376  * Target argument parsing.
377  */
378 struct dm_arg_set {
379 	unsigned argc;
380 	char **argv;
381 };
382 
383 /*
384  * The minimum and maximum value of a numeric argument, together with
385  * the error message to use if the number is found to be outside that range.
386  */
387 struct dm_arg {
388 	unsigned min;
389 	unsigned max;
390 	char *error;
391 };
392 
393 /*
394  * Validate the next argument, either returning it as *value or, if invalid,
395  * returning -EINVAL and setting *error.
396  */
397 int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
398 		unsigned *value, char **error);
399 
400 /*
401  * Process the next argument as the start of a group containing between
402  * arg->min and arg->max further arguments. Either return the size as
403  * *num_args or, if invalid, return -EINVAL and set *error.
404  */
405 int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
406 		      unsigned *num_args, char **error);
407 
408 /*
409  * Return the current argument and shift to the next.
410  */
411 const char *dm_shift_arg(struct dm_arg_set *as);
412 
413 /*
414  * Move through num_args arguments.
415  */
416 void dm_consume_args(struct dm_arg_set *as, unsigned num_args);
417 
418 /*-----------------------------------------------------------------
419  * Functions for creating and manipulating mapped devices.
420  * Drop the reference with dm_put when you finish with the object.
421  *---------------------------------------------------------------*/
422 
423 /*
424  * DM_ANY_MINOR chooses the next available minor number.
425  */
426 #define DM_ANY_MINOR (-1)
427 int dm_create(int minor, struct mapped_device **md);
428 
429 /*
430  * Reference counting for md.
431  */
432 struct mapped_device *dm_get_md(dev_t dev);
433 void dm_get(struct mapped_device *md);
434 int dm_hold(struct mapped_device *md);
435 void dm_put(struct mapped_device *md);
436 
437 /*
438  * An arbitrary pointer may be stored alongside a mapped device.
439  */
440 void dm_set_mdptr(struct mapped_device *md, void *ptr);
441 void *dm_get_mdptr(struct mapped_device *md);
442 
443 /*
444  * A device can still be used while suspended, but I/O is deferred.
445  */
446 int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
447 int dm_resume(struct mapped_device *md);
448 
449 /*
450  * Event functions.
451  */
452 uint32_t dm_get_event_nr(struct mapped_device *md);
453 int dm_wait_event(struct mapped_device *md, int event_nr);
454 uint32_t dm_next_uevent_seq(struct mapped_device *md);
455 void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
456 
457 /*
458  * Info functions.
459  */
460 const char *dm_device_name(struct mapped_device *md);
461 int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
462 struct gendisk *dm_disk(struct mapped_device *md);
463 int dm_suspended(struct dm_target *ti);
464 int dm_post_suspending(struct dm_target *ti);
465 int dm_noflush_suspending(struct dm_target *ti);
466 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
467 union map_info *dm_get_rq_mapinfo(struct request *rq);
468 
469 #ifdef CONFIG_BLK_DEV_ZONED
470 struct dm_report_zones_args {
471 	struct dm_target *tgt;
472 	sector_t next_sector;
473 
474 	void *orig_data;
475 	report_zones_cb orig_cb;
476 	unsigned int zone_idx;
477 
478 	/* must be filled by ->report_zones before calling dm_report_zones_cb */
479 	sector_t start;
480 };
481 int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx, void *data);
482 #endif /* CONFIG_BLK_DEV_ZONED */
483 
484 /*
485  * Device mapper functions to parse and create devices specified by the
486  * parameter "dm-mod.create="
487  */
488 int __init dm_early_create(struct dm_ioctl *dmi,
489 			   struct dm_target_spec **spec_array,
490 			   char **target_params_array);
491 
492 struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
493 
494 /*
495  * Geometry functions.
496  */
497 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
498 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
499 
500 /*-----------------------------------------------------------------
501  * Functions for manipulating device-mapper tables.
502  *---------------------------------------------------------------*/
503 
504 /*
505  * First create an empty table.
506  */
507 int dm_table_create(struct dm_table **result, fmode_t mode,
508 		    unsigned num_targets, struct mapped_device *md);
509 
510 /*
511  * Then call this once for each target.
512  */
513 int dm_table_add_target(struct dm_table *t, const char *type,
514 			sector_t start, sector_t len, char *params);
515 
516 /*
517  * Target can use this to set the table's type.
518  * Can only ever be called from a target's ctr.
519  * Useful for "hybrid" target (supports both bio-based
520  * and request-based).
521  */
522 void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type);
523 
524 /*
525  * Finally call this to make the table ready for use.
526  */
527 int dm_table_complete(struct dm_table *t);
528 
529 /*
530  * Destroy the table when finished.
531  */
532 void dm_table_destroy(struct dm_table *t);
533 
534 /*
535  * Target may require that it is never sent I/O larger than len.
536  */
537 int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len);
538 
539 /*
540  * Table reference counting.
541  */
542 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx);
543 void dm_put_live_table(struct mapped_device *md, int srcu_idx);
544 void dm_sync_table(struct mapped_device *md);
545 
546 /*
547  * Queries
548  */
549 sector_t dm_table_get_size(struct dm_table *t);
550 unsigned int dm_table_get_num_targets(struct dm_table *t);
551 fmode_t dm_table_get_mode(struct dm_table *t);
552 struct mapped_device *dm_table_get_md(struct dm_table *t);
553 const char *dm_table_device_name(struct dm_table *t);
554 
555 /*
556  * Trigger an event.
557  */
558 void dm_table_event(struct dm_table *t);
559 
560 /*
561  * Run the queue for request-based targets.
562  */
563 void dm_table_run_md_queue_async(struct dm_table *t);
564 
565 /*
566  * The device must be suspended before calling this method.
567  * Returns the previous table, which the caller must destroy.
568  */
569 struct dm_table *dm_swap_table(struct mapped_device *md,
570 			       struct dm_table *t);
571 
572 /*
573  * Table keyslot manager functions
574  */
575 void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm);
576 
577 /*-----------------------------------------------------------------
578  * Macros.
579  *---------------------------------------------------------------*/
580 #define DM_NAME "device-mapper"
581 
582 #define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n"
583 
584 #define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__)
585 
586 #define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__)
587 #define DMERR_LIMIT(fmt, ...) pr_err_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
588 #define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__)
589 #define DMWARN_LIMIT(fmt, ...) pr_warn_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
590 #define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__)
591 #define DMINFO_LIMIT(fmt, ...) pr_info_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
592 
593 #define DMDEBUG(fmt, ...) pr_debug(DM_FMT(fmt), ##__VA_ARGS__)
594 #define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
595 
596 #define DMEMIT(x...) sz += ((sz >= maxlen) ? \
597 			  0 : scnprintf(result + sz, maxlen - sz, x))
598 
599 /*
600  * Definitions of return values from target end_io function.
601  */
602 #define DM_ENDIO_DONE		0
603 #define DM_ENDIO_INCOMPLETE	1
604 #define DM_ENDIO_REQUEUE	2
605 #define DM_ENDIO_DELAY_REQUEUE	3
606 
607 /*
608  * Definitions of return values from target map function.
609  */
610 #define DM_MAPIO_SUBMITTED	0
611 #define DM_MAPIO_REMAPPED	1
612 #define DM_MAPIO_REQUEUE	DM_ENDIO_REQUEUE
613 #define DM_MAPIO_DELAY_REQUEUE	DM_ENDIO_DELAY_REQUEUE
614 #define DM_MAPIO_KILL		4
615 
616 #define dm_sector_div64(x, y)( \
617 { \
618 	u64 _res; \
619 	(x) = div64_u64_rem(x, y, &_res); \
620 	_res; \
621 } \
622 )
623 
624 /*
625  * Ceiling(n / sz)
626  */
627 #define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
628 
629 #define dm_sector_div_up(n, sz) ( \
630 { \
631 	sector_t _r = ((n) + (sz) - 1); \
632 	sector_div(_r, (sz)); \
633 	_r; \
634 } \
635 )
636 
637 /*
638  * ceiling(n / size) * size
639  */
640 #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
641 
642 /*
643  * Sector offset taken relative to the start of the target instead of
644  * relative to the start of the device.
645  */
646 #define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
647 
to_sector(unsigned long long n)648 static inline sector_t to_sector(unsigned long long n)
649 {
650 	return (n >> SECTOR_SHIFT);
651 }
652 
to_bytes(sector_t n)653 static inline unsigned long to_bytes(sector_t n)
654 {
655 	return (n << SECTOR_SHIFT);
656 }
657 
658 #endif	/* _LINUX_DEVICE_MAPPER_H */
659