1 /*
2  * Copyright (C) 2001-2003 Sistina Software (UK) Limited.
3  *
4  * This file is released under the GPL.
5  */
6 
7 #include "dm.h"
8 #include <linux/module.h>
9 #include <linux/init.h>
10 #include <linux/blkdev.h>
11 #include <linux/bio.h>
12 #include <linux/dax.h>
13 #include <linux/slab.h>
14 #include <linux/device-mapper.h>
15 
16 #define DM_MSG_PREFIX "linear"
17 
18 /*
19  * Linear: maps a linear range of a device.
20  */
21 struct linear_c {
22 	struct dm_dev *dev;
23 	sector_t start;
24 };
25 
26 /*
27  * Construct a linear mapping: <dev_path> <offset>
28  */
linear_ctr(struct dm_target * ti,unsigned int argc,char ** argv)29 static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
30 {
31 	struct linear_c *lc;
32 	unsigned long long tmp;
33 	char dummy;
34 	int ret;
35 
36 	if (argc != 2) {
37 		ti->error = "Invalid argument count";
38 		return -EINVAL;
39 	}
40 
41 	lc = kmalloc(sizeof(*lc), GFP_KERNEL);
42 	if (lc == NULL) {
43 		ti->error = "Cannot allocate linear context";
44 		return -ENOMEM;
45 	}
46 
47 	ret = -EINVAL;
48 	if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1 || tmp != (sector_t)tmp) {
49 		ti->error = "Invalid device sector";
50 		goto bad;
51 	}
52 	lc->start = tmp;
53 
54 	ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &lc->dev);
55 	if (ret) {
56 		ti->error = "Device lookup failed";
57 		goto bad;
58 	}
59 
60 	ti->num_flush_bios = 1;
61 	ti->num_discard_bios = 1;
62 	ti->num_secure_erase_bios = 1;
63 	ti->num_write_same_bios = 1;
64 	ti->num_write_zeroes_bios = 1;
65 	ti->private = lc;
66 	return 0;
67 
68       bad:
69 	kfree(lc);
70 	return ret;
71 }
72 
linear_dtr(struct dm_target * ti)73 static void linear_dtr(struct dm_target *ti)
74 {
75 	struct linear_c *lc = (struct linear_c *) ti->private;
76 
77 	dm_put_device(ti, lc->dev);
78 	kfree(lc);
79 }
80 
linear_map_sector(struct dm_target * ti,sector_t bi_sector)81 static sector_t linear_map_sector(struct dm_target *ti, sector_t bi_sector)
82 {
83 	struct linear_c *lc = ti->private;
84 
85 	return lc->start + dm_target_offset(ti, bi_sector);
86 }
87 
linear_map_bio(struct dm_target * ti,struct bio * bio)88 static void linear_map_bio(struct dm_target *ti, struct bio *bio)
89 {
90 	struct linear_c *lc = ti->private;
91 
92 	bio_set_dev(bio, lc->dev->bdev);
93 	if (bio_sectors(bio) || op_is_zone_mgmt(bio_op(bio)))
94 		bio->bi_iter.bi_sector =
95 			linear_map_sector(ti, bio->bi_iter.bi_sector);
96 }
97 
linear_map(struct dm_target * ti,struct bio * bio)98 static int linear_map(struct dm_target *ti, struct bio *bio)
99 {
100 	linear_map_bio(ti, bio);
101 
102 	return DM_MAPIO_REMAPPED;
103 }
104 
linear_status(struct dm_target * ti,status_type_t type,unsigned status_flags,char * result,unsigned maxlen)105 static void linear_status(struct dm_target *ti, status_type_t type,
106 			  unsigned status_flags, char *result, unsigned maxlen)
107 {
108 	struct linear_c *lc = (struct linear_c *) ti->private;
109 
110 	switch (type) {
111 	case STATUSTYPE_INFO:
112 		result[0] = '\0';
113 		break;
114 
115 	case STATUSTYPE_TABLE:
116 		snprintf(result, maxlen, "%s %llu", lc->dev->name,
117 				(unsigned long long)lc->start);
118 		break;
119 	}
120 }
121 
linear_prepare_ioctl(struct dm_target * ti,struct block_device ** bdev)122 static int linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
123 {
124 	struct linear_c *lc = (struct linear_c *) ti->private;
125 	struct dm_dev *dev = lc->dev;
126 
127 	*bdev = dev->bdev;
128 
129 	/*
130 	 * Only pass ioctls through if the device sizes match exactly.
131 	 */
132 	if (lc->start ||
133 	    ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
134 		return 1;
135 	return 0;
136 }
137 
138 #ifdef CONFIG_BLK_DEV_ZONED
linear_report_zones(struct dm_target * ti,struct dm_report_zones_args * args,unsigned int nr_zones)139 static int linear_report_zones(struct dm_target *ti,
140 		struct dm_report_zones_args *args, unsigned int nr_zones)
141 {
142 	struct linear_c *lc = ti->private;
143 	sector_t sector = linear_map_sector(ti, args->next_sector);
144 
145 	args->start = lc->start;
146 	return blkdev_report_zones(lc->dev->bdev, sector, nr_zones,
147 				   dm_report_zones_cb, args);
148 }
149 #else
150 #define linear_report_zones NULL
151 #endif
152 
linear_iterate_devices(struct dm_target * ti,iterate_devices_callout_fn fn,void * data)153 static int linear_iterate_devices(struct dm_target *ti,
154 				  iterate_devices_callout_fn fn, void *data)
155 {
156 	struct linear_c *lc = ti->private;
157 
158 	return fn(ti, lc->dev, lc->start, ti->len, data);
159 }
160 
161 #if IS_ENABLED(CONFIG_DAX_DRIVER)
linear_dax_direct_access(struct dm_target * ti,pgoff_t pgoff,long nr_pages,void ** kaddr,pfn_t * pfn)162 static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
163 		long nr_pages, void **kaddr, pfn_t *pfn)
164 {
165 	long ret;
166 	struct linear_c *lc = ti->private;
167 	struct block_device *bdev = lc->dev->bdev;
168 	struct dax_device *dax_dev = lc->dev->dax_dev;
169 	sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
170 
171 	dev_sector = linear_map_sector(ti, sector);
172 	ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages * PAGE_SIZE, &pgoff);
173 	if (ret)
174 		return ret;
175 	return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
176 }
177 
linear_dax_copy_from_iter(struct dm_target * ti,pgoff_t pgoff,void * addr,size_t bytes,struct iov_iter * i)178 static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
179 		void *addr, size_t bytes, struct iov_iter *i)
180 {
181 	struct linear_c *lc = ti->private;
182 	struct block_device *bdev = lc->dev->bdev;
183 	struct dax_device *dax_dev = lc->dev->dax_dev;
184 	sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
185 
186 	dev_sector = linear_map_sector(ti, sector);
187 	if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
188 		return 0;
189 	return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
190 }
191 
linear_dax_copy_to_iter(struct dm_target * ti,pgoff_t pgoff,void * addr,size_t bytes,struct iov_iter * i)192 static size_t linear_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff,
193 		void *addr, size_t bytes, struct iov_iter *i)
194 {
195 	struct linear_c *lc = ti->private;
196 	struct block_device *bdev = lc->dev->bdev;
197 	struct dax_device *dax_dev = lc->dev->dax_dev;
198 	sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
199 
200 	dev_sector = linear_map_sector(ti, sector);
201 	if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
202 		return 0;
203 	return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i);
204 }
205 
linear_dax_zero_page_range(struct dm_target * ti,pgoff_t pgoff,size_t nr_pages)206 static int linear_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
207 				      size_t nr_pages)
208 {
209 	int ret;
210 	struct linear_c *lc = ti->private;
211 	struct block_device *bdev = lc->dev->bdev;
212 	struct dax_device *dax_dev = lc->dev->dax_dev;
213 	sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
214 
215 	dev_sector = linear_map_sector(ti, sector);
216 	ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages << PAGE_SHIFT, &pgoff);
217 	if (ret)
218 		return ret;
219 	return dax_zero_page_range(dax_dev, pgoff, nr_pages);
220 }
221 
222 #else
223 #define linear_dax_direct_access NULL
224 #define linear_dax_copy_from_iter NULL
225 #define linear_dax_copy_to_iter NULL
226 #define linear_dax_zero_page_range NULL
227 #endif
228 
229 static struct target_type linear_target = {
230 	.name   = "linear",
231 	.version = {1, 4, 0},
232 	.features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_NOWAIT |
233 		    DM_TARGET_ZONED_HM | DM_TARGET_PASSES_CRYPTO,
234 	.report_zones = linear_report_zones,
235 	.module = THIS_MODULE,
236 	.ctr    = linear_ctr,
237 	.dtr    = linear_dtr,
238 	.map    = linear_map,
239 	.status = linear_status,
240 	.prepare_ioctl = linear_prepare_ioctl,
241 	.iterate_devices = linear_iterate_devices,
242 	.direct_access = linear_dax_direct_access,
243 	.dax_copy_from_iter = linear_dax_copy_from_iter,
244 	.dax_copy_to_iter = linear_dax_copy_to_iter,
245 	.dax_zero_page_range = linear_dax_zero_page_range,
246 };
247 
dm_linear_init(void)248 int __init dm_linear_init(void)
249 {
250 	int r = dm_register_target(&linear_target);
251 
252 	if (r < 0)
253 		DMERR("register failed %d", r);
254 
255 	return r;
256 }
257 
dm_linear_exit(void)258 void dm_linear_exit(void)
259 {
260 	dm_unregister_target(&linear_target);
261 }
262