xref: /linux/drivers/md/dm-ebs-target.c (revision fa34e589)
13bd94003SHeinz Mauelshagen // SPDX-License-Identifier: GPL-2.0-only
2d3c7b35cSHeinz Mauelshagen /*
3d3c7b35cSHeinz Mauelshagen  * Copyright (C) 2020 Red Hat GmbH
4d3c7b35cSHeinz Mauelshagen  *
5d3c7b35cSHeinz Mauelshagen  * This file is released under the GPL.
6d3c7b35cSHeinz Mauelshagen  *
7d3c7b35cSHeinz Mauelshagen  * Device-mapper target to emulate smaller logical block
8d3c7b35cSHeinz Mauelshagen  * size on backing devices exposing (natively) larger ones.
9d3c7b35cSHeinz Mauelshagen  *
10d3c7b35cSHeinz Mauelshagen  * E.g. 512 byte sector emulation on 4K native disks.
11d3c7b35cSHeinz Mauelshagen  */
12d3c7b35cSHeinz Mauelshagen 
13d3c7b35cSHeinz Mauelshagen #include "dm.h"
14d3c7b35cSHeinz Mauelshagen #include <linux/module.h>
15d3c7b35cSHeinz Mauelshagen #include <linux/workqueue.h>
16d3c7b35cSHeinz Mauelshagen #include <linux/dm-bufio.h>
17d3c7b35cSHeinz Mauelshagen 
18d3c7b35cSHeinz Mauelshagen #define DM_MSG_PREFIX "ebs"
19d3c7b35cSHeinz Mauelshagen 
20d3c7b35cSHeinz Mauelshagen static void ebs_dtr(struct dm_target *ti);
21d3c7b35cSHeinz Mauelshagen 
22d3c7b35cSHeinz Mauelshagen /* Emulated block size context. */
23d3c7b35cSHeinz Mauelshagen struct ebs_c {
24d3c7b35cSHeinz Mauelshagen 	struct dm_dev *dev;		/* Underlying device to emulate block size on. */
25d3c7b35cSHeinz Mauelshagen 	struct dm_bufio_client *bufio;	/* Use dm-bufio for read and read-modify-write processing. */
26d3c7b35cSHeinz Mauelshagen 	struct workqueue_struct *wq;	/* Workqueue for ^ processing of bios. */
27d3c7b35cSHeinz Mauelshagen 	struct work_struct ws;		/* Work item used for ^. */
28d3c7b35cSHeinz Mauelshagen 	struct bio_list bios_in;	/* Worker bios input list. */
29d3c7b35cSHeinz Mauelshagen 	spinlock_t lock;		/* Guard bios input list above. */
30d3c7b35cSHeinz Mauelshagen 	sector_t start;			/* <start> table line argument, see ebs_ctr below. */
31d3c7b35cSHeinz Mauelshagen 	unsigned int e_bs;		/* Emulated block size in sectors exposed to upper layer. */
321c72e023SBhaskar Chowdhury 	unsigned int u_bs;		/* Underlying block size in sectors retrieved from/set on lower layer device. */
33d3c7b35cSHeinz Mauelshagen 	unsigned char block_shift;	/* bitshift sectors -> blocks used in dm-bufio API. */
34d3c7b35cSHeinz Mauelshagen 	bool u_bs_set:1;		/* Flag to indicate underlying block size is set on table line. */
35d3c7b35cSHeinz Mauelshagen };
36d3c7b35cSHeinz Mauelshagen 
__sector_to_block(struct ebs_c * ec,sector_t sector)37d3c7b35cSHeinz Mauelshagen static inline sector_t __sector_to_block(struct ebs_c *ec, sector_t sector)
38d3c7b35cSHeinz Mauelshagen {
39d3c7b35cSHeinz Mauelshagen 	return sector >> ec->block_shift;
40d3c7b35cSHeinz Mauelshagen }
41d3c7b35cSHeinz Mauelshagen 
__block_mod(sector_t sector,unsigned int bs)42d3c7b35cSHeinz Mauelshagen static inline sector_t __block_mod(sector_t sector, unsigned int bs)
43d3c7b35cSHeinz Mauelshagen {
44d3c7b35cSHeinz Mauelshagen 	return sector & (bs - 1);
45d3c7b35cSHeinz Mauelshagen }
46d3c7b35cSHeinz Mauelshagen 
471c72e023SBhaskar Chowdhury /* Return number of blocks for a bio, accounting for misalignment of start and end sectors. */
__nr_blocks(struct ebs_c * ec,struct bio * bio)48d3c7b35cSHeinz Mauelshagen static inline unsigned int __nr_blocks(struct ebs_c *ec, struct bio *bio)
49d3c7b35cSHeinz Mauelshagen {
50d3c7b35cSHeinz Mauelshagen 	sector_t end_sector = __block_mod(bio->bi_iter.bi_sector, ec->u_bs) + bio_sectors(bio);
51d3c7b35cSHeinz Mauelshagen 
52d3c7b35cSHeinz Mauelshagen 	return __sector_to_block(ec, end_sector) + (__block_mod(end_sector, ec->u_bs) ? 1 : 0);
53d3c7b35cSHeinz Mauelshagen }
54d3c7b35cSHeinz Mauelshagen 
__ebs_check_bs(unsigned int bs)55d3c7b35cSHeinz Mauelshagen static inline bool __ebs_check_bs(unsigned int bs)
56d3c7b35cSHeinz Mauelshagen {
57d3c7b35cSHeinz Mauelshagen 	return bs && is_power_of_2(bs);
58d3c7b35cSHeinz Mauelshagen }
59d3c7b35cSHeinz Mauelshagen 
60d3c7b35cSHeinz Mauelshagen /*
61d3c7b35cSHeinz Mauelshagen  * READ/WRITE:
62d3c7b35cSHeinz Mauelshagen  *
63d3c7b35cSHeinz Mauelshagen  * copy blocks between bufio blocks and bio vector's (partial/overlapping) pages.
64d3c7b35cSHeinz Mauelshagen  */
__ebs_rw_bvec(struct ebs_c * ec,enum req_op op,struct bio_vec * bv,struct bvec_iter * iter)6567a7b9a5SBart Van Assche static int __ebs_rw_bvec(struct ebs_c *ec, enum req_op op, struct bio_vec *bv,
6667a7b9a5SBart Van Assche 			 struct bvec_iter *iter)
67d3c7b35cSHeinz Mauelshagen {
68d3c7b35cSHeinz Mauelshagen 	int r = 0;
69d3c7b35cSHeinz Mauelshagen 	unsigned char *ba, *pa;
70d3c7b35cSHeinz Mauelshagen 	unsigned int cur_len;
71d3c7b35cSHeinz Mauelshagen 	unsigned int bv_len = bv->bv_len;
72d3c7b35cSHeinz Mauelshagen 	unsigned int buf_off = to_bytes(__block_mod(iter->bi_sector, ec->u_bs));
73d3c7b35cSHeinz Mauelshagen 	sector_t block = __sector_to_block(ec, iter->bi_sector);
74d3c7b35cSHeinz Mauelshagen 	struct dm_buffer *b;
75d3c7b35cSHeinz Mauelshagen 
76d3c7b35cSHeinz Mauelshagen 	if (unlikely(!bv->bv_page || !bv_len))
77d3c7b35cSHeinz Mauelshagen 		return -EIO;
78d3c7b35cSHeinz Mauelshagen 
793a8ba33bSChristoph Hellwig 	pa = bvec_virt(bv);
80d3c7b35cSHeinz Mauelshagen 
81d3c7b35cSHeinz Mauelshagen 	/* Handle overlapping page <-> blocks */
82d3c7b35cSHeinz Mauelshagen 	while (bv_len) {
83d3c7b35cSHeinz Mauelshagen 		cur_len = min(dm_bufio_get_block_size(ec->bufio) - buf_off, bv_len);
84d3c7b35cSHeinz Mauelshagen 
85d3c7b35cSHeinz Mauelshagen 		/* Avoid reading for writes in case bio vector's page overwrites block completely. */
8667a7b9a5SBart Van Assche 		if (op == REQ_OP_READ || buf_off || bv_len < dm_bufio_get_block_size(ec->bufio))
87d3c7b35cSHeinz Mauelshagen 			ba = dm_bufio_read(ec->bufio, block, &b);
88d3c7b35cSHeinz Mauelshagen 		else
89d3c7b35cSHeinz Mauelshagen 			ba = dm_bufio_new(ec->bufio, block, &b);
90d3c7b35cSHeinz Mauelshagen 
9152252adeSAntonio Quartulli 		if (IS_ERR(ba)) {
92d3c7b35cSHeinz Mauelshagen 			/*
93d3c7b35cSHeinz Mauelshagen 			 * Carry on with next buffer, if any, to issue all possible
94d3c7b35cSHeinz Mauelshagen 			 * data but return error.
95d3c7b35cSHeinz Mauelshagen 			 */
96d3c7b35cSHeinz Mauelshagen 			r = PTR_ERR(ba);
97d3c7b35cSHeinz Mauelshagen 		} else {
98d3c7b35cSHeinz Mauelshagen 			/* Copy data to/from bio to buffer if read/new was successful above. */
99d3c7b35cSHeinz Mauelshagen 			ba += buf_off;
10067a7b9a5SBart Van Assche 			if (op == REQ_OP_READ) {
101d3c7b35cSHeinz Mauelshagen 				memcpy(pa, ba, cur_len);
102d3c7b35cSHeinz Mauelshagen 				flush_dcache_page(bv->bv_page);
103d3c7b35cSHeinz Mauelshagen 			} else {
104d3c7b35cSHeinz Mauelshagen 				flush_dcache_page(bv->bv_page);
105d3c7b35cSHeinz Mauelshagen 				memcpy(ba, pa, cur_len);
106d3c7b35cSHeinz Mauelshagen 				dm_bufio_mark_partial_buffer_dirty(b, buf_off, buf_off + cur_len);
107d3c7b35cSHeinz Mauelshagen 			}
108d3c7b35cSHeinz Mauelshagen 
109d3c7b35cSHeinz Mauelshagen 			dm_bufio_release(b);
110d3c7b35cSHeinz Mauelshagen 		}
111d3c7b35cSHeinz Mauelshagen 
112d3c7b35cSHeinz Mauelshagen 		pa += cur_len;
113d3c7b35cSHeinz Mauelshagen 		bv_len -= cur_len;
114d3c7b35cSHeinz Mauelshagen 		buf_off = 0;
115d3c7b35cSHeinz Mauelshagen 		block++;
116d3c7b35cSHeinz Mauelshagen 	}
117d3c7b35cSHeinz Mauelshagen 
118d3c7b35cSHeinz Mauelshagen 	return r;
119d3c7b35cSHeinz Mauelshagen }
120d3c7b35cSHeinz Mauelshagen 
121d3c7b35cSHeinz Mauelshagen /* READ/WRITE: iterate bio vector's copying between (partial) pages and bufio blocks. */
__ebs_rw_bio(struct ebs_c * ec,enum req_op op,struct bio * bio)12267a7b9a5SBart Van Assche static int __ebs_rw_bio(struct ebs_c *ec, enum req_op op, struct bio *bio)
123d3c7b35cSHeinz Mauelshagen {
124d3c7b35cSHeinz Mauelshagen 	int r = 0, rr;
125d3c7b35cSHeinz Mauelshagen 	struct bio_vec bv;
126d3c7b35cSHeinz Mauelshagen 	struct bvec_iter iter;
127d3c7b35cSHeinz Mauelshagen 
128d3c7b35cSHeinz Mauelshagen 	bio_for_each_bvec(bv, bio, iter) {
12967a7b9a5SBart Van Assche 		rr = __ebs_rw_bvec(ec, op, &bv, &iter);
130d3c7b35cSHeinz Mauelshagen 		if (rr)
131d3c7b35cSHeinz Mauelshagen 			r = rr;
132d3c7b35cSHeinz Mauelshagen 	}
133d3c7b35cSHeinz Mauelshagen 
134d3c7b35cSHeinz Mauelshagen 	return r;
135d3c7b35cSHeinz Mauelshagen }
136d3c7b35cSHeinz Mauelshagen 
137a5089a95SHeinz Mauelshagen /*
138a5089a95SHeinz Mauelshagen  * Discard bio's blocks, i.e. pass discards down.
139a5089a95SHeinz Mauelshagen  *
140a5089a95SHeinz Mauelshagen  * Avoid discarding partial blocks at beginning and end;
141a5089a95SHeinz Mauelshagen  * return 0 in case no blocks can be discarded as a result.
142a5089a95SHeinz Mauelshagen  */
__ebs_discard_bio(struct ebs_c * ec,struct bio * bio)143a5089a95SHeinz Mauelshagen static int __ebs_discard_bio(struct ebs_c *ec, struct bio *bio)
144a5089a95SHeinz Mauelshagen {
145a5089a95SHeinz Mauelshagen 	sector_t block, blocks, sector = bio->bi_iter.bi_sector;
146a5089a95SHeinz Mauelshagen 
147a5089a95SHeinz Mauelshagen 	block = __sector_to_block(ec, sector);
148a5089a95SHeinz Mauelshagen 	blocks = __nr_blocks(ec, bio);
149a5089a95SHeinz Mauelshagen 
150a5089a95SHeinz Mauelshagen 	/*
151a5089a95SHeinz Mauelshagen 	 * Partial first underlying block (__nr_blocks() may have
152a5089a95SHeinz Mauelshagen 	 * resulted in one block).
153a5089a95SHeinz Mauelshagen 	 */
154a5089a95SHeinz Mauelshagen 	if (__block_mod(sector, ec->u_bs)) {
155a5089a95SHeinz Mauelshagen 		block++;
156a5089a95SHeinz Mauelshagen 		blocks--;
157a5089a95SHeinz Mauelshagen 	}
158a5089a95SHeinz Mauelshagen 
159a5089a95SHeinz Mauelshagen 	/* Partial last underlying block if any. */
160a5089a95SHeinz Mauelshagen 	if (blocks && __block_mod(bio_end_sector(bio), ec->u_bs))
161a5089a95SHeinz Mauelshagen 		blocks--;
162a5089a95SHeinz Mauelshagen 
163a5089a95SHeinz Mauelshagen 	return blocks ? dm_bufio_issue_discard(ec->bufio, block, blocks) : 0;
164a5089a95SHeinz Mauelshagen }
165a5089a95SHeinz Mauelshagen 
166a5089a95SHeinz Mauelshagen /* Release blocks them from the bufio cache. */
__ebs_forget_bio(struct ebs_c * ec,struct bio * bio)167a5089a95SHeinz Mauelshagen static void __ebs_forget_bio(struct ebs_c *ec, struct bio *bio)
168d3c7b35cSHeinz Mauelshagen {
169d3c7b35cSHeinz Mauelshagen 	sector_t blocks, sector = bio->bi_iter.bi_sector;
170d3c7b35cSHeinz Mauelshagen 
171d3c7b35cSHeinz Mauelshagen 	blocks = __nr_blocks(ec, bio);
172334b4fc1SMikulas Patocka 
173334b4fc1SMikulas Patocka 	dm_bufio_forget_buffers(ec->bufio, __sector_to_block(ec, sector), blocks);
174d3c7b35cSHeinz Mauelshagen }
175d3c7b35cSHeinz Mauelshagen 
1761c72e023SBhaskar Chowdhury /* Worker function to process incoming bios. */
__ebs_process_bios(struct work_struct * ws)177d3c7b35cSHeinz Mauelshagen static void __ebs_process_bios(struct work_struct *ws)
178d3c7b35cSHeinz Mauelshagen {
179d3c7b35cSHeinz Mauelshagen 	int r;
180d3c7b35cSHeinz Mauelshagen 	bool write = false;
181d3c7b35cSHeinz Mauelshagen 	sector_t block1, block2;
182d3c7b35cSHeinz Mauelshagen 	struct ebs_c *ec = container_of(ws, struct ebs_c, ws);
183d3c7b35cSHeinz Mauelshagen 	struct bio *bio;
184d3c7b35cSHeinz Mauelshagen 	struct bio_list bios;
185d3c7b35cSHeinz Mauelshagen 
186d3c7b35cSHeinz Mauelshagen 	bio_list_init(&bios);
187d3c7b35cSHeinz Mauelshagen 
188d3c7b35cSHeinz Mauelshagen 	spin_lock_irq(&ec->lock);
189d3c7b35cSHeinz Mauelshagen 	bios = ec->bios_in;
190d3c7b35cSHeinz Mauelshagen 	bio_list_init(&ec->bios_in);
191d3c7b35cSHeinz Mauelshagen 	spin_unlock_irq(&ec->lock);
192d3c7b35cSHeinz Mauelshagen 
193d3c7b35cSHeinz Mauelshagen 	/* Prefetch all read and any mis-aligned write buffers */
194d3c7b35cSHeinz Mauelshagen 	bio_list_for_each(bio, &bios) {
195d3c7b35cSHeinz Mauelshagen 		block1 = __sector_to_block(ec, bio->bi_iter.bi_sector);
196d3c7b35cSHeinz Mauelshagen 		if (bio_op(bio) == REQ_OP_READ)
197d3c7b35cSHeinz Mauelshagen 			dm_bufio_prefetch(ec->bufio, block1, __nr_blocks(ec, bio));
198d3c7b35cSHeinz Mauelshagen 		else if (bio_op(bio) == REQ_OP_WRITE && !(bio->bi_opf & REQ_PREFLUSH)) {
199d3c7b35cSHeinz Mauelshagen 			block2 = __sector_to_block(ec, bio_end_sector(bio));
200d3c7b35cSHeinz Mauelshagen 			if (__block_mod(bio->bi_iter.bi_sector, ec->u_bs))
201d3c7b35cSHeinz Mauelshagen 				dm_bufio_prefetch(ec->bufio, block1, 1);
202d3c7b35cSHeinz Mauelshagen 			if (__block_mod(bio_end_sector(bio), ec->u_bs) && block2 != block1)
203d3c7b35cSHeinz Mauelshagen 				dm_bufio_prefetch(ec->bufio, block2, 1);
204d3c7b35cSHeinz Mauelshagen 		}
205d3c7b35cSHeinz Mauelshagen 	}
206d3c7b35cSHeinz Mauelshagen 
207d3c7b35cSHeinz Mauelshagen 	bio_list_for_each(bio, &bios) {
208d3c7b35cSHeinz Mauelshagen 		r = -EIO;
209d3c7b35cSHeinz Mauelshagen 		if (bio_op(bio) == REQ_OP_READ)
21067a7b9a5SBart Van Assche 			r = __ebs_rw_bio(ec, REQ_OP_READ, bio);
211d3c7b35cSHeinz Mauelshagen 		else if (bio_op(bio) == REQ_OP_WRITE) {
212d3c7b35cSHeinz Mauelshagen 			write = true;
21367a7b9a5SBart Van Assche 			r = __ebs_rw_bio(ec, REQ_OP_WRITE, bio);
214d3c7b35cSHeinz Mauelshagen 		} else if (bio_op(bio) == REQ_OP_DISCARD) {
215a5089a95SHeinz Mauelshagen 			__ebs_forget_bio(ec, bio);
216a5089a95SHeinz Mauelshagen 			r = __ebs_discard_bio(ec, bio);
217d3c7b35cSHeinz Mauelshagen 		}
218d3c7b35cSHeinz Mauelshagen 
219d3c7b35cSHeinz Mauelshagen 		if (r < 0)
220d3c7b35cSHeinz Mauelshagen 			bio->bi_status = errno_to_blk_status(r);
221d3c7b35cSHeinz Mauelshagen 	}
222d3c7b35cSHeinz Mauelshagen 
223d3c7b35cSHeinz Mauelshagen 	/*
224d3c7b35cSHeinz Mauelshagen 	 * We write dirty buffers after processing I/O on them
225d3c7b35cSHeinz Mauelshagen 	 * but before we endio thus addressing REQ_FUA/REQ_SYNC.
226d3c7b35cSHeinz Mauelshagen 	 */
227d3c7b35cSHeinz Mauelshagen 	r = write ? dm_bufio_write_dirty_buffers(ec->bufio) : 0;
228d3c7b35cSHeinz Mauelshagen 
229d3c7b35cSHeinz Mauelshagen 	while ((bio = bio_list_pop(&bios))) {
230d3c7b35cSHeinz Mauelshagen 		/* Any other request is endioed. */
231d3c7b35cSHeinz Mauelshagen 		if (unlikely(r && bio_op(bio) == REQ_OP_WRITE))
232d3c7b35cSHeinz Mauelshagen 			bio_io_error(bio);
233d3c7b35cSHeinz Mauelshagen 		else
234d3c7b35cSHeinz Mauelshagen 			bio_endio(bio);
235d3c7b35cSHeinz Mauelshagen 	}
236d3c7b35cSHeinz Mauelshagen }
237d3c7b35cSHeinz Mauelshagen 
238d3c7b35cSHeinz Mauelshagen /*
239d3c7b35cSHeinz Mauelshagen  * Construct an emulated block size mapping: <dev_path> <offset> <ebs> [<ubs>]
240d3c7b35cSHeinz Mauelshagen  *
241d3c7b35cSHeinz Mauelshagen  * <dev_path>: path of the underlying device
242d3c7b35cSHeinz Mauelshagen  * <offset>: offset in 512 bytes sectors into <dev_path>
243d3c7b35cSHeinz Mauelshagen  * <ebs>: emulated block size in units of 512 bytes exposed to the upper layer
244d3c7b35cSHeinz Mauelshagen  * [<ubs>]: underlying block size in units of 512 bytes imposed on the lower layer;
245d3c7b35cSHeinz Mauelshagen  *	    optional, if not supplied, retrieve logical block size from underlying device
246d3c7b35cSHeinz Mauelshagen  */
ebs_ctr(struct dm_target * ti,unsigned int argc,char ** argv)247d3c7b35cSHeinz Mauelshagen static int ebs_ctr(struct dm_target *ti, unsigned int argc, char **argv)
248d3c7b35cSHeinz Mauelshagen {
249d3c7b35cSHeinz Mauelshagen 	int r;
250d3c7b35cSHeinz Mauelshagen 	unsigned short tmp1;
251d3c7b35cSHeinz Mauelshagen 	unsigned long long tmp;
252d3c7b35cSHeinz Mauelshagen 	char dummy;
253d3c7b35cSHeinz Mauelshagen 	struct ebs_c *ec;
254d3c7b35cSHeinz Mauelshagen 
255d3c7b35cSHeinz Mauelshagen 	if (argc < 3 || argc > 4) {
256d3c7b35cSHeinz Mauelshagen 		ti->error = "Invalid argument count";
257d3c7b35cSHeinz Mauelshagen 		return -EINVAL;
258d3c7b35cSHeinz Mauelshagen 	}
259d3c7b35cSHeinz Mauelshagen 
260d3c7b35cSHeinz Mauelshagen 	ec = ti->private = kzalloc(sizeof(*ec), GFP_KERNEL);
261d3c7b35cSHeinz Mauelshagen 	if (!ec) {
262d3c7b35cSHeinz Mauelshagen 		ti->error = "Cannot allocate ebs context";
263d3c7b35cSHeinz Mauelshagen 		return -ENOMEM;
264d3c7b35cSHeinz Mauelshagen 	}
265d3c7b35cSHeinz Mauelshagen 
266d3c7b35cSHeinz Mauelshagen 	r = -EINVAL;
267d3c7b35cSHeinz Mauelshagen 	if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1 ||
268d3c7b35cSHeinz Mauelshagen 	    tmp != (sector_t)tmp ||
269d3c7b35cSHeinz Mauelshagen 	    (sector_t)tmp >= ti->len) {
270d3c7b35cSHeinz Mauelshagen 		ti->error = "Invalid device offset sector";
271d3c7b35cSHeinz Mauelshagen 		goto bad;
272d3c7b35cSHeinz Mauelshagen 	}
273d3c7b35cSHeinz Mauelshagen 	ec->start = tmp;
274d3c7b35cSHeinz Mauelshagen 
275d3c7b35cSHeinz Mauelshagen 	if (sscanf(argv[2], "%hu%c", &tmp1, &dummy) != 1 ||
276d3c7b35cSHeinz Mauelshagen 	    !__ebs_check_bs(tmp1) ||
277d3c7b35cSHeinz Mauelshagen 	    to_bytes(tmp1) > PAGE_SIZE) {
278d3c7b35cSHeinz Mauelshagen 		ti->error = "Invalid emulated block size";
279d3c7b35cSHeinz Mauelshagen 		goto bad;
280d3c7b35cSHeinz Mauelshagen 	}
281d3c7b35cSHeinz Mauelshagen 	ec->e_bs = tmp1;
282d3c7b35cSHeinz Mauelshagen 
283d3c7b35cSHeinz Mauelshagen 	if (argc > 3) {
284d3c7b35cSHeinz Mauelshagen 		if (sscanf(argv[3], "%hu%c", &tmp1, &dummy) != 1 || !__ebs_check_bs(tmp1)) {
285d3c7b35cSHeinz Mauelshagen 			ti->error = "Invalid underlying block size";
286d3c7b35cSHeinz Mauelshagen 			goto bad;
287d3c7b35cSHeinz Mauelshagen 		}
288d3c7b35cSHeinz Mauelshagen 		ec->u_bs = tmp1;
289d3c7b35cSHeinz Mauelshagen 		ec->u_bs_set = true;
290d3c7b35cSHeinz Mauelshagen 	} else
291d3c7b35cSHeinz Mauelshagen 		ec->u_bs_set = false;
292d3c7b35cSHeinz Mauelshagen 
293d3c7b35cSHeinz Mauelshagen 	r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ec->dev);
294d3c7b35cSHeinz Mauelshagen 	if (r) {
295d3c7b35cSHeinz Mauelshagen 		ti->error = "Device lookup failed";
296d3c7b35cSHeinz Mauelshagen 		ec->dev = NULL;
297d3c7b35cSHeinz Mauelshagen 		goto bad;
298d3c7b35cSHeinz Mauelshagen 	}
299d3c7b35cSHeinz Mauelshagen 
300d3c7b35cSHeinz Mauelshagen 	r = -EINVAL;
301d3c7b35cSHeinz Mauelshagen 	if (!ec->u_bs_set) {
302d3c7b35cSHeinz Mauelshagen 		ec->u_bs = to_sector(bdev_logical_block_size(ec->dev->bdev));
303d3c7b35cSHeinz Mauelshagen 		if (!__ebs_check_bs(ec->u_bs)) {
304d3c7b35cSHeinz Mauelshagen 			ti->error = "Invalid retrieved underlying block size";
305d3c7b35cSHeinz Mauelshagen 			goto bad;
306d3c7b35cSHeinz Mauelshagen 		}
307d3c7b35cSHeinz Mauelshagen 	}
308d3c7b35cSHeinz Mauelshagen 
309d3c7b35cSHeinz Mauelshagen 	if (!ec->u_bs_set && ec->e_bs == ec->u_bs)
310d3c7b35cSHeinz Mauelshagen 		DMINFO("Emulation superfluous: emulated equal to underlying block size");
311d3c7b35cSHeinz Mauelshagen 
312d3c7b35cSHeinz Mauelshagen 	if (__block_mod(ec->start, ec->u_bs)) {
313d3c7b35cSHeinz Mauelshagen 		ti->error = "Device offset must be multiple of underlying block size";
314d3c7b35cSHeinz Mauelshagen 		goto bad;
315d3c7b35cSHeinz Mauelshagen 	}
316d3c7b35cSHeinz Mauelshagen 
3170fcb100dSNathan Huckleberry 	ec->bufio = dm_bufio_client_create(ec->dev->bdev, to_bytes(ec->u_bs), 1,
3180fcb100dSNathan Huckleberry 					   0, NULL, NULL, 0);
319d3c7b35cSHeinz Mauelshagen 	if (IS_ERR(ec->bufio)) {
320d3c7b35cSHeinz Mauelshagen 		ti->error = "Cannot create dm bufio client";
321d3c7b35cSHeinz Mauelshagen 		r = PTR_ERR(ec->bufio);
322d3c7b35cSHeinz Mauelshagen 		ec->bufio = NULL;
323d3c7b35cSHeinz Mauelshagen 		goto bad;
324d3c7b35cSHeinz Mauelshagen 	}
325d3c7b35cSHeinz Mauelshagen 
326d3c7b35cSHeinz Mauelshagen 	ec->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
327d3c7b35cSHeinz Mauelshagen 	if (!ec->wq) {
328d3c7b35cSHeinz Mauelshagen 		ti->error = "Cannot create dm-" DM_MSG_PREFIX " workqueue";
329d3c7b35cSHeinz Mauelshagen 		r = -ENOMEM;
330d3c7b35cSHeinz Mauelshagen 		goto bad;
331d3c7b35cSHeinz Mauelshagen 	}
332d3c7b35cSHeinz Mauelshagen 
333d3c7b35cSHeinz Mauelshagen 	ec->block_shift = __ffs(ec->u_bs);
334d3c7b35cSHeinz Mauelshagen 	INIT_WORK(&ec->ws, &__ebs_process_bios);
335d3c7b35cSHeinz Mauelshagen 	bio_list_init(&ec->bios_in);
336d3c7b35cSHeinz Mauelshagen 	spin_lock_init(&ec->lock);
337d3c7b35cSHeinz Mauelshagen 
338d3c7b35cSHeinz Mauelshagen 	ti->num_flush_bios = 1;
339d3c7b35cSHeinz Mauelshagen 	ti->num_discard_bios = 1;
340d3c7b35cSHeinz Mauelshagen 	ti->num_secure_erase_bios = 0;
341d3c7b35cSHeinz Mauelshagen 	ti->num_write_zeroes_bios = 0;
342d3c7b35cSHeinz Mauelshagen 	return 0;
343d3c7b35cSHeinz Mauelshagen bad:
344d3c7b35cSHeinz Mauelshagen 	ebs_dtr(ti);
345d3c7b35cSHeinz Mauelshagen 	return r;
346d3c7b35cSHeinz Mauelshagen }
347d3c7b35cSHeinz Mauelshagen 
ebs_dtr(struct dm_target * ti)348d3c7b35cSHeinz Mauelshagen static void ebs_dtr(struct dm_target *ti)
349d3c7b35cSHeinz Mauelshagen {
350d3c7b35cSHeinz Mauelshagen 	struct ebs_c *ec = ti->private;
351d3c7b35cSHeinz Mauelshagen 
352d3c7b35cSHeinz Mauelshagen 	if (ec->wq)
353d3c7b35cSHeinz Mauelshagen 		destroy_workqueue(ec->wq);
354d3c7b35cSHeinz Mauelshagen 	if (ec->bufio)
355d3c7b35cSHeinz Mauelshagen 		dm_bufio_client_destroy(ec->bufio);
356d3c7b35cSHeinz Mauelshagen 	if (ec->dev)
357d3c7b35cSHeinz Mauelshagen 		dm_put_device(ti, ec->dev);
358d3c7b35cSHeinz Mauelshagen 	kfree(ec);
359d3c7b35cSHeinz Mauelshagen }
360d3c7b35cSHeinz Mauelshagen 
ebs_map(struct dm_target * ti,struct bio * bio)361d3c7b35cSHeinz Mauelshagen static int ebs_map(struct dm_target *ti, struct bio *bio)
362d3c7b35cSHeinz Mauelshagen {
363d3c7b35cSHeinz Mauelshagen 	struct ebs_c *ec = ti->private;
364d3c7b35cSHeinz Mauelshagen 
365d3c7b35cSHeinz Mauelshagen 	bio_set_dev(bio, ec->dev->bdev);
366d3c7b35cSHeinz Mauelshagen 	bio->bi_iter.bi_sector = ec->start + dm_target_offset(ti, bio->bi_iter.bi_sector);
367d3c7b35cSHeinz Mauelshagen 
3684cb6f226SJohn Dorminy 	if (unlikely(bio_op(bio) == REQ_OP_FLUSH))
369d3c7b35cSHeinz Mauelshagen 		return DM_MAPIO_REMAPPED;
370d3c7b35cSHeinz Mauelshagen 	/*
371d3c7b35cSHeinz Mauelshagen 	 * Only queue for bufio processing in case of partial or overlapping buffers
372d3c7b35cSHeinz Mauelshagen 	 * -or-
373d3c7b35cSHeinz Mauelshagen 	 * emulation with ebs == ubs aiming for tests of dm-bufio overhead.
374d3c7b35cSHeinz Mauelshagen 	 */
375d3c7b35cSHeinz Mauelshagen 	if (likely(__block_mod(bio->bi_iter.bi_sector, ec->u_bs) ||
376d3c7b35cSHeinz Mauelshagen 		   __block_mod(bio_end_sector(bio), ec->u_bs) ||
377d3c7b35cSHeinz Mauelshagen 		   ec->e_bs == ec->u_bs)) {
378d3c7b35cSHeinz Mauelshagen 		spin_lock_irq(&ec->lock);
379d3c7b35cSHeinz Mauelshagen 		bio_list_add(&ec->bios_in, bio);
380d3c7b35cSHeinz Mauelshagen 		spin_unlock_irq(&ec->lock);
381d3c7b35cSHeinz Mauelshagen 
382d3c7b35cSHeinz Mauelshagen 		queue_work(ec->wq, &ec->ws);
383d3c7b35cSHeinz Mauelshagen 
384d3c7b35cSHeinz Mauelshagen 		return DM_MAPIO_SUBMITTED;
385d3c7b35cSHeinz Mauelshagen 	}
386d3c7b35cSHeinz Mauelshagen 
387d3c7b35cSHeinz Mauelshagen 	/* Forget any buffer content relative to this direct backing device I/O. */
388d3c7b35cSHeinz Mauelshagen 	__ebs_forget_bio(ec, bio);
389d3c7b35cSHeinz Mauelshagen 
390d3c7b35cSHeinz Mauelshagen 	return DM_MAPIO_REMAPPED;
391d3c7b35cSHeinz Mauelshagen }
392d3c7b35cSHeinz Mauelshagen 
ebs_status(struct dm_target * ti,status_type_t type,unsigned int status_flags,char * result,unsigned int maxlen)393d3c7b35cSHeinz Mauelshagen static void ebs_status(struct dm_target *ti, status_type_t type,
39486a3238cSHeinz Mauelshagen 		       unsigned int status_flags, char *result, unsigned int maxlen)
395d3c7b35cSHeinz Mauelshagen {
396d3c7b35cSHeinz Mauelshagen 	struct ebs_c *ec = ti->private;
397d3c7b35cSHeinz Mauelshagen 
398d3c7b35cSHeinz Mauelshagen 	switch (type) {
399d3c7b35cSHeinz Mauelshagen 	case STATUSTYPE_INFO:
400d3c7b35cSHeinz Mauelshagen 		*result = '\0';
401d3c7b35cSHeinz Mauelshagen 		break;
402d3c7b35cSHeinz Mauelshagen 	case STATUSTYPE_TABLE:
403d3c7b35cSHeinz Mauelshagen 		snprintf(result, maxlen, ec->u_bs_set ? "%s %llu %u %u" : "%s %llu %u",
404d3c7b35cSHeinz Mauelshagen 			 ec->dev->name, (unsigned long long) ec->start, ec->e_bs, ec->u_bs);
405d3c7b35cSHeinz Mauelshagen 		break;
4068ec45662STushar Sugandhi 	case STATUSTYPE_IMA:
4078ec45662STushar Sugandhi 		*result = '\0';
4088ec45662STushar Sugandhi 		break;
409d3c7b35cSHeinz Mauelshagen 	}
410d3c7b35cSHeinz Mauelshagen }
411d3c7b35cSHeinz Mauelshagen 
ebs_prepare_ioctl(struct dm_target * ti,struct block_device ** bdev)412d3c7b35cSHeinz Mauelshagen static int ebs_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
413d3c7b35cSHeinz Mauelshagen {
414d3c7b35cSHeinz Mauelshagen 	struct ebs_c *ec = ti->private;
415d3c7b35cSHeinz Mauelshagen 	struct dm_dev *dev = ec->dev;
416d3c7b35cSHeinz Mauelshagen 
417d3c7b35cSHeinz Mauelshagen 	/*
418d3c7b35cSHeinz Mauelshagen 	 * Only pass ioctls through if the device sizes match exactly.
419d3c7b35cSHeinz Mauelshagen 	 */
420d3c7b35cSHeinz Mauelshagen 	*bdev = dev->bdev;
4216dcbb52cSChristoph Hellwig 	return !!(ec->start || ti->len != bdev_nr_sectors(dev->bdev));
422d3c7b35cSHeinz Mauelshagen }
423d3c7b35cSHeinz Mauelshagen 
ebs_io_hints(struct dm_target * ti,struct queue_limits * limits)424d3c7b35cSHeinz Mauelshagen static void ebs_io_hints(struct dm_target *ti, struct queue_limits *limits)
425d3c7b35cSHeinz Mauelshagen {
426d3c7b35cSHeinz Mauelshagen 	struct ebs_c *ec = ti->private;
427d3c7b35cSHeinz Mauelshagen 
428d3c7b35cSHeinz Mauelshagen 	limits->logical_block_size = to_bytes(ec->e_bs);
429d3c7b35cSHeinz Mauelshagen 	limits->physical_block_size = to_bytes(ec->u_bs);
430d3c7b35cSHeinz Mauelshagen 	limits->alignment_offset = limits->physical_block_size;
431d3c7b35cSHeinz Mauelshagen 	blk_limits_io_min(limits, limits->logical_block_size);
432d3c7b35cSHeinz Mauelshagen }
433d3c7b35cSHeinz Mauelshagen 
ebs_iterate_devices(struct dm_target * ti,iterate_devices_callout_fn fn,void * data)434d3c7b35cSHeinz Mauelshagen static int ebs_iterate_devices(struct dm_target *ti,
435d3c7b35cSHeinz Mauelshagen 				  iterate_devices_callout_fn fn, void *data)
436d3c7b35cSHeinz Mauelshagen {
437d3c7b35cSHeinz Mauelshagen 	struct ebs_c *ec = ti->private;
438d3c7b35cSHeinz Mauelshagen 
439d3c7b35cSHeinz Mauelshagen 	return fn(ti, ec->dev, ec->start, ti->len, data);
440d3c7b35cSHeinz Mauelshagen }
441d3c7b35cSHeinz Mauelshagen 
442d3c7b35cSHeinz Mauelshagen static struct target_type ebs_target = {
443d3c7b35cSHeinz Mauelshagen 	.name		 = "ebs",
444a5089a95SHeinz Mauelshagen 	.version	 = {1, 0, 1},
445d3c7b35cSHeinz Mauelshagen 	.features	 = DM_TARGET_PASSES_INTEGRITY,
446d3c7b35cSHeinz Mauelshagen 	.module		 = THIS_MODULE,
447d3c7b35cSHeinz Mauelshagen 	.ctr		 = ebs_ctr,
448d3c7b35cSHeinz Mauelshagen 	.dtr		 = ebs_dtr,
449d3c7b35cSHeinz Mauelshagen 	.map		 = ebs_map,
450d3c7b35cSHeinz Mauelshagen 	.status		 = ebs_status,
451d3c7b35cSHeinz Mauelshagen 	.io_hints	 = ebs_io_hints,
452d3c7b35cSHeinz Mauelshagen 	.prepare_ioctl	 = ebs_prepare_ioctl,
453d3c7b35cSHeinz Mauelshagen 	.iterate_devices = ebs_iterate_devices,
454d3c7b35cSHeinz Mauelshagen };
4553664ff82SYangtao Li module_dm(ebs);
456d3c7b35cSHeinz Mauelshagen 
457*fa34e589SMike Snitzer MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@lists.linux.dev>");
458d3c7b35cSHeinz Mauelshagen MODULE_DESCRIPTION(DM_NAME " emulated block size target");
459d3c7b35cSHeinz Mauelshagen MODULE_LICENSE("GPL");
460