xref: /linux/drivers/md/dm-verity-target.c (revision c375b223)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 Red Hat, Inc.
4  *
5  * Author: Mikulas Patocka <mpatocka@redhat.com>
6  *
7  * Based on Chromium dm-verity driver (C) 2011 The Chromium OS Authors
8  *
9  * In the file "/sys/module/dm_verity/parameters/prefetch_cluster" you can set
10  * default prefetch value. Data are read in "prefetch_cluster" chunks from the
11  * hash device. Setting this greatly improves performance when data and hash
12  * are on the same disk on different partitions on devices with poor random
13  * access behavior.
14  */
15 
16 #include "dm-verity.h"
17 #include "dm-verity-fec.h"
18 #include "dm-verity-verify-sig.h"
19 #include "dm-audit.h"
20 #include <linux/module.h>
21 #include <linux/reboot.h>
22 #include <linux/scatterlist.h>
23 #include <linux/string.h>
24 #include <linux/jump_label.h>
25 
26 #define DM_MSG_PREFIX			"verity"
27 
28 #define DM_VERITY_ENV_LENGTH		42
29 #define DM_VERITY_ENV_VAR_NAME		"DM_VERITY_ERR_BLOCK_NR"
30 
31 #define DM_VERITY_DEFAULT_PREFETCH_SIZE	262144
32 
33 #define DM_VERITY_MAX_CORRUPTED_ERRS	100
34 
35 #define DM_VERITY_OPT_LOGGING		"ignore_corruption"
36 #define DM_VERITY_OPT_RESTART		"restart_on_corruption"
37 #define DM_VERITY_OPT_PANIC		"panic_on_corruption"
38 #define DM_VERITY_OPT_IGN_ZEROES	"ignore_zero_blocks"
39 #define DM_VERITY_OPT_AT_MOST_ONCE	"check_at_most_once"
40 #define DM_VERITY_OPT_TASKLET_VERIFY	"try_verify_in_tasklet"
41 
42 #define DM_VERITY_OPTS_MAX		(4 + DM_VERITY_OPTS_FEC + \
43 					 DM_VERITY_ROOT_HASH_VERIFICATION_OPTS)
44 
45 static unsigned int dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
46 
47 module_param_named(prefetch_cluster, dm_verity_prefetch_cluster, uint, 0644);
48 
49 static DEFINE_STATIC_KEY_FALSE(use_bh_wq_enabled);
50 
51 struct dm_verity_prefetch_work {
52 	struct work_struct work;
53 	struct dm_verity *v;
54 	unsigned short ioprio;
55 	sector_t block;
56 	unsigned int n_blocks;
57 };
58 
59 /*
60  * Auxiliary structure appended to each dm-bufio buffer. If the value
61  * hash_verified is nonzero, hash of the block has been verified.
62  *
63  * The variable hash_verified is set to 0 when allocating the buffer, then
64  * it can be changed to 1 and it is never reset to 0 again.
65  *
66  * There is no lock around this value, a race condition can at worst cause
67  * that multiple processes verify the hash of the same buffer simultaneously
68  * and write 1 to hash_verified simultaneously.
69  * This condition is harmless, so we don't need locking.
70  */
71 struct buffer_aux {
72 	int hash_verified;
73 };
74 
75 /*
76  * Initialize struct buffer_aux for a freshly created buffer.
77  */
dm_bufio_alloc_callback(struct dm_buffer * buf)78 static void dm_bufio_alloc_callback(struct dm_buffer *buf)
79 {
80 	struct buffer_aux *aux = dm_bufio_get_aux_data(buf);
81 
82 	aux->hash_verified = 0;
83 }
84 
85 /*
86  * Translate input sector number to the sector number on the target device.
87  */
verity_map_sector(struct dm_verity * v,sector_t bi_sector)88 static sector_t verity_map_sector(struct dm_verity *v, sector_t bi_sector)
89 {
90 	return v->data_start + dm_target_offset(v->ti, bi_sector);
91 }
92 
93 /*
94  * Return hash position of a specified block at a specified tree level
95  * (0 is the lowest level).
96  * The lowest "hash_per_block_bits"-bits of the result denote hash position
97  * inside a hash block. The remaining bits denote location of the hash block.
98  */
verity_position_at_level(struct dm_verity * v,sector_t block,int level)99 static sector_t verity_position_at_level(struct dm_verity *v, sector_t block,
100 					 int level)
101 {
102 	return block >> (level * v->hash_per_block_bits);
103 }
104 
verity_hash_update(struct dm_verity * v,struct ahash_request * req,const u8 * data,size_t len,struct crypto_wait * wait)105 static int verity_hash_update(struct dm_verity *v, struct ahash_request *req,
106 				const u8 *data, size_t len,
107 				struct crypto_wait *wait)
108 {
109 	struct scatterlist sg;
110 
111 	if (likely(!is_vmalloc_addr(data))) {
112 		sg_init_one(&sg, data, len);
113 		ahash_request_set_crypt(req, &sg, NULL, len);
114 		return crypto_wait_req(crypto_ahash_update(req), wait);
115 	}
116 
117 	do {
118 		int r;
119 		size_t this_step = min_t(size_t, len, PAGE_SIZE - offset_in_page(data));
120 
121 		flush_kernel_vmap_range((void *)data, this_step);
122 		sg_init_table(&sg, 1);
123 		sg_set_page(&sg, vmalloc_to_page(data), this_step, offset_in_page(data));
124 		ahash_request_set_crypt(req, &sg, NULL, this_step);
125 		r = crypto_wait_req(crypto_ahash_update(req), wait);
126 		if (unlikely(r))
127 			return r;
128 		data += this_step;
129 		len -= this_step;
130 	} while (len);
131 
132 	return 0;
133 }
134 
135 /*
136  * Wrapper for crypto_ahash_init, which handles verity salting.
137  */
verity_hash_init(struct dm_verity * v,struct ahash_request * req,struct crypto_wait * wait,bool may_sleep)138 static int verity_hash_init(struct dm_verity *v, struct ahash_request *req,
139 				struct crypto_wait *wait, bool may_sleep)
140 {
141 	int r;
142 
143 	ahash_request_set_tfm(req, v->tfm);
144 	ahash_request_set_callback(req,
145 		may_sleep ? CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG : 0,
146 		crypto_req_done, (void *)wait);
147 	crypto_init_wait(wait);
148 
149 	r = crypto_wait_req(crypto_ahash_init(req), wait);
150 
151 	if (unlikely(r < 0)) {
152 		if (r != -ENOMEM)
153 			DMERR("crypto_ahash_init failed: %d", r);
154 		return r;
155 	}
156 
157 	if (likely(v->salt_size && (v->version >= 1)))
158 		r = verity_hash_update(v, req, v->salt, v->salt_size, wait);
159 
160 	return r;
161 }
162 
verity_hash_final(struct dm_verity * v,struct ahash_request * req,u8 * digest,struct crypto_wait * wait)163 static int verity_hash_final(struct dm_verity *v, struct ahash_request *req,
164 			     u8 *digest, struct crypto_wait *wait)
165 {
166 	int r;
167 
168 	if (unlikely(v->salt_size && (!v->version))) {
169 		r = verity_hash_update(v, req, v->salt, v->salt_size, wait);
170 
171 		if (r < 0) {
172 			DMERR("%s failed updating salt: %d", __func__, r);
173 			goto out;
174 		}
175 	}
176 
177 	ahash_request_set_crypt(req, NULL, digest, 0);
178 	r = crypto_wait_req(crypto_ahash_final(req), wait);
179 out:
180 	return r;
181 }
182 
verity_hash(struct dm_verity * v,struct ahash_request * req,const u8 * data,size_t len,u8 * digest,bool may_sleep)183 int verity_hash(struct dm_verity *v, struct ahash_request *req,
184 		const u8 *data, size_t len, u8 *digest, bool may_sleep)
185 {
186 	int r;
187 	struct crypto_wait wait;
188 
189 	r = verity_hash_init(v, req, &wait, may_sleep);
190 	if (unlikely(r < 0))
191 		goto out;
192 
193 	r = verity_hash_update(v, req, data, len, &wait);
194 	if (unlikely(r < 0))
195 		goto out;
196 
197 	r = verity_hash_final(v, req, digest, &wait);
198 
199 out:
200 	return r;
201 }
202 
verity_hash_at_level(struct dm_verity * v,sector_t block,int level,sector_t * hash_block,unsigned int * offset)203 static void verity_hash_at_level(struct dm_verity *v, sector_t block, int level,
204 				 sector_t *hash_block, unsigned int *offset)
205 {
206 	sector_t position = verity_position_at_level(v, block, level);
207 	unsigned int idx;
208 
209 	*hash_block = v->hash_level_block[level] + (position >> v->hash_per_block_bits);
210 
211 	if (!offset)
212 		return;
213 
214 	idx = position & ((1 << v->hash_per_block_bits) - 1);
215 	if (!v->version)
216 		*offset = idx * v->digest_size;
217 	else
218 		*offset = idx << (v->hash_dev_block_bits - v->hash_per_block_bits);
219 }
220 
221 /*
222  * Handle verification errors.
223  */
verity_handle_err(struct dm_verity * v,enum verity_block_type type,unsigned long long block)224 static int verity_handle_err(struct dm_verity *v, enum verity_block_type type,
225 			     unsigned long long block)
226 {
227 	char verity_env[DM_VERITY_ENV_LENGTH];
228 	char *envp[] = { verity_env, NULL };
229 	const char *type_str = "";
230 	struct mapped_device *md = dm_table_get_md(v->ti->table);
231 
232 	/* Corruption should be visible in device status in all modes */
233 	v->hash_failed = true;
234 
235 	if (v->corrupted_errs >= DM_VERITY_MAX_CORRUPTED_ERRS)
236 		goto out;
237 
238 	v->corrupted_errs++;
239 
240 	switch (type) {
241 	case DM_VERITY_BLOCK_TYPE_DATA:
242 		type_str = "data";
243 		break;
244 	case DM_VERITY_BLOCK_TYPE_METADATA:
245 		type_str = "metadata";
246 		break;
247 	default:
248 		BUG();
249 	}
250 
251 	DMERR_LIMIT("%s: %s block %llu is corrupted", v->data_dev->name,
252 		    type_str, block);
253 
254 	if (v->corrupted_errs == DM_VERITY_MAX_CORRUPTED_ERRS) {
255 		DMERR("%s: reached maximum errors", v->data_dev->name);
256 		dm_audit_log_target(DM_MSG_PREFIX, "max-corrupted-errors", v->ti, 0);
257 	}
258 
259 	snprintf(verity_env, DM_VERITY_ENV_LENGTH, "%s=%d,%llu",
260 		DM_VERITY_ENV_VAR_NAME, type, block);
261 
262 	kobject_uevent_env(&disk_to_dev(dm_disk(md))->kobj, KOBJ_CHANGE, envp);
263 
264 out:
265 	if (v->mode == DM_VERITY_MODE_LOGGING)
266 		return 0;
267 
268 	if (v->mode == DM_VERITY_MODE_RESTART)
269 		kernel_restart("dm-verity device corrupted");
270 
271 	if (v->mode == DM_VERITY_MODE_PANIC)
272 		panic("dm-verity device corrupted");
273 
274 	return 1;
275 }
276 
277 /*
278  * Verify hash of a metadata block pertaining to the specified data block
279  * ("block" argument) at a specified level ("level" argument).
280  *
281  * On successful return, verity_io_want_digest(v, io) contains the hash value
282  * for a lower tree level or for the data block (if we're at the lowest level).
283  *
284  * If "skip_unverified" is true, unverified buffer is skipped and 1 is returned.
285  * If "skip_unverified" is false, unverified buffer is hashed and verified
286  * against current value of verity_io_want_digest(v, io).
287  */
verity_verify_level(struct dm_verity * v,struct dm_verity_io * io,sector_t block,int level,bool skip_unverified,u8 * want_digest)288 static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
289 			       sector_t block, int level, bool skip_unverified,
290 			       u8 *want_digest)
291 {
292 	struct dm_buffer *buf;
293 	struct buffer_aux *aux;
294 	u8 *data;
295 	int r;
296 	sector_t hash_block;
297 	unsigned int offset;
298 	struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
299 
300 	verity_hash_at_level(v, block, level, &hash_block, &offset);
301 
302 	if (static_branch_unlikely(&use_bh_wq_enabled) && io->in_bh) {
303 		data = dm_bufio_get(v->bufio, hash_block, &buf);
304 		if (data == NULL) {
305 			/*
306 			 * In tasklet and the hash was not in the bufio cache.
307 			 * Return early and resume execution from a work-queue
308 			 * to read the hash from disk.
309 			 */
310 			return -EAGAIN;
311 		}
312 	} else {
313 		data = dm_bufio_read_with_ioprio(v->bufio, hash_block,
314 						&buf, bio_prio(bio));
315 	}
316 
317 	if (IS_ERR(data))
318 		return PTR_ERR(data);
319 
320 	aux = dm_bufio_get_aux_data(buf);
321 
322 	if (!aux->hash_verified) {
323 		if (skip_unverified) {
324 			r = 1;
325 			goto release_ret_r;
326 		}
327 
328 		r = verity_hash(v, verity_io_hash_req(v, io),
329 				data, 1 << v->hash_dev_block_bits,
330 				verity_io_real_digest(v, io), !io->in_bh);
331 		if (unlikely(r < 0))
332 			goto release_ret_r;
333 
334 		if (likely(memcmp(verity_io_real_digest(v, io), want_digest,
335 				  v->digest_size) == 0))
336 			aux->hash_verified = 1;
337 		else if (static_branch_unlikely(&use_bh_wq_enabled) && io->in_bh) {
338 			/*
339 			 * Error handling code (FEC included) cannot be run in a
340 			 * tasklet since it may sleep, so fallback to work-queue.
341 			 */
342 			r = -EAGAIN;
343 			goto release_ret_r;
344 		} else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_METADATA,
345 					     hash_block, data, NULL) == 0)
346 			aux->hash_verified = 1;
347 		else if (verity_handle_err(v,
348 					   DM_VERITY_BLOCK_TYPE_METADATA,
349 					   hash_block)) {
350 			struct bio *bio =
351 				dm_bio_from_per_bio_data(io,
352 							 v->ti->per_io_data_size);
353 			dm_audit_log_bio(DM_MSG_PREFIX, "verify-metadata", bio,
354 					 block, 0);
355 			r = -EIO;
356 			goto release_ret_r;
357 		}
358 	}
359 
360 	data += offset;
361 	memcpy(want_digest, data, v->digest_size);
362 	r = 0;
363 
364 release_ret_r:
365 	dm_bufio_release(buf);
366 	return r;
367 }
368 
369 /*
370  * Find a hash for a given block, write it to digest and verify the integrity
371  * of the hash tree if necessary.
372  */
verity_hash_for_block(struct dm_verity * v,struct dm_verity_io * io,sector_t block,u8 * digest,bool * is_zero)373 int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io,
374 			  sector_t block, u8 *digest, bool *is_zero)
375 {
376 	int r = 0, i;
377 
378 	if (likely(v->levels)) {
379 		/*
380 		 * First, we try to get the requested hash for
381 		 * the current block. If the hash block itself is
382 		 * verified, zero is returned. If it isn't, this
383 		 * function returns 1 and we fall back to whole
384 		 * chain verification.
385 		 */
386 		r = verity_verify_level(v, io, block, 0, true, digest);
387 		if (likely(r <= 0))
388 			goto out;
389 	}
390 
391 	memcpy(digest, v->root_digest, v->digest_size);
392 
393 	for (i = v->levels - 1; i >= 0; i--) {
394 		r = verity_verify_level(v, io, block, i, false, digest);
395 		if (unlikely(r))
396 			goto out;
397 	}
398 out:
399 	if (!r && v->zero_digest)
400 		*is_zero = !memcmp(v->zero_digest, digest, v->digest_size);
401 	else
402 		*is_zero = false;
403 
404 	return r;
405 }
406 
407 /*
408  * Calculates the digest for the given bio
409  */
verity_for_io_block(struct dm_verity * v,struct dm_verity_io * io,struct bvec_iter * iter,struct crypto_wait * wait)410 static int verity_for_io_block(struct dm_verity *v, struct dm_verity_io *io,
411 			       struct bvec_iter *iter, struct crypto_wait *wait)
412 {
413 	unsigned int todo = 1 << v->data_dev_block_bits;
414 	struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
415 	struct scatterlist sg;
416 	struct ahash_request *req = verity_io_hash_req(v, io);
417 
418 	do {
419 		int r;
420 		unsigned int len;
421 		struct bio_vec bv = bio_iter_iovec(bio, *iter);
422 
423 		sg_init_table(&sg, 1);
424 
425 		len = bv.bv_len;
426 
427 		if (likely(len >= todo))
428 			len = todo;
429 		/*
430 		 * Operating on a single page at a time looks suboptimal
431 		 * until you consider the typical block size is 4,096B.
432 		 * Going through this loops twice should be very rare.
433 		 */
434 		sg_set_page(&sg, bv.bv_page, len, bv.bv_offset);
435 		ahash_request_set_crypt(req, &sg, NULL, len);
436 		r = crypto_wait_req(crypto_ahash_update(req), wait);
437 
438 		if (unlikely(r < 0)) {
439 			DMERR("%s crypto op failed: %d", __func__, r);
440 			return r;
441 		}
442 
443 		bio_advance_iter(bio, iter, len);
444 		todo -= len;
445 	} while (todo);
446 
447 	return 0;
448 }
449 
450 /*
451  * Calls function process for 1 << v->data_dev_block_bits bytes in the bio_vec
452  * starting from iter.
453  */
verity_for_bv_block(struct dm_verity * v,struct dm_verity_io * io,struct bvec_iter * iter,int (* process)(struct dm_verity * v,struct dm_verity_io * io,u8 * data,size_t len))454 int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
455 			struct bvec_iter *iter,
456 			int (*process)(struct dm_verity *v,
457 				       struct dm_verity_io *io, u8 *data,
458 				       size_t len))
459 {
460 	unsigned int todo = 1 << v->data_dev_block_bits;
461 	struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
462 
463 	do {
464 		int r;
465 		u8 *page;
466 		unsigned int len;
467 		struct bio_vec bv = bio_iter_iovec(bio, *iter);
468 
469 		page = bvec_kmap_local(&bv);
470 		len = bv.bv_len;
471 
472 		if (likely(len >= todo))
473 			len = todo;
474 
475 		r = process(v, io, page, len);
476 		kunmap_local(page);
477 
478 		if (r < 0)
479 			return r;
480 
481 		bio_advance_iter(bio, iter, len);
482 		todo -= len;
483 	} while (todo);
484 
485 	return 0;
486 }
487 
verity_recheck_copy(struct dm_verity * v,struct dm_verity_io * io,u8 * data,size_t len)488 static int verity_recheck_copy(struct dm_verity *v, struct dm_verity_io *io,
489 			       u8 *data, size_t len)
490 {
491 	memcpy(data, io->recheck_buffer, len);
492 	io->recheck_buffer += len;
493 
494 	return 0;
495 }
496 
verity_recheck(struct dm_verity * v,struct dm_verity_io * io,struct bvec_iter start,sector_t cur_block)497 static noinline int verity_recheck(struct dm_verity *v, struct dm_verity_io *io,
498 				   struct bvec_iter start, sector_t cur_block)
499 {
500 	struct page *page;
501 	void *buffer;
502 	int r;
503 	struct dm_io_request io_req;
504 	struct dm_io_region io_loc;
505 
506 	page = mempool_alloc(&v->recheck_pool, GFP_NOIO);
507 	buffer = page_to_virt(page);
508 
509 	io_req.bi_opf = REQ_OP_READ;
510 	io_req.mem.type = DM_IO_KMEM;
511 	io_req.mem.ptr.addr = buffer;
512 	io_req.notify.fn = NULL;
513 	io_req.client = v->io;
514 	io_loc.bdev = v->data_dev->bdev;
515 	io_loc.sector = cur_block << (v->data_dev_block_bits - SECTOR_SHIFT);
516 	io_loc.count = 1 << (v->data_dev_block_bits - SECTOR_SHIFT);
517 	r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
518 	if (unlikely(r))
519 		goto free_ret;
520 
521 	r = verity_hash(v, verity_io_hash_req(v, io), buffer,
522 			1 << v->data_dev_block_bits,
523 			verity_io_real_digest(v, io), true);
524 	if (unlikely(r))
525 		goto free_ret;
526 
527 	if (memcmp(verity_io_real_digest(v, io),
528 		   verity_io_want_digest(v, io), v->digest_size)) {
529 		r = -EIO;
530 		goto free_ret;
531 	}
532 
533 	io->recheck_buffer = buffer;
534 	r = verity_for_bv_block(v, io, &start, verity_recheck_copy);
535 	if (unlikely(r))
536 		goto free_ret;
537 
538 	r = 0;
539 free_ret:
540 	mempool_free(page, &v->recheck_pool);
541 
542 	return r;
543 }
544 
verity_bv_zero(struct dm_verity * v,struct dm_verity_io * io,u8 * data,size_t len)545 static int verity_bv_zero(struct dm_verity *v, struct dm_verity_io *io,
546 			  u8 *data, size_t len)
547 {
548 	memset(data, 0, len);
549 	return 0;
550 }
551 
552 /*
553  * Moves the bio iter one data block forward.
554  */
verity_bv_skip_block(struct dm_verity * v,struct dm_verity_io * io,struct bvec_iter * iter)555 static inline void verity_bv_skip_block(struct dm_verity *v,
556 					struct dm_verity_io *io,
557 					struct bvec_iter *iter)
558 {
559 	struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
560 
561 	bio_advance_iter(bio, iter, 1 << v->data_dev_block_bits);
562 }
563 
564 /*
565  * Verify one "dm_verity_io" structure.
566  */
verity_verify_io(struct dm_verity_io * io)567 static int verity_verify_io(struct dm_verity_io *io)
568 {
569 	bool is_zero;
570 	struct dm_verity *v = io->v;
571 	struct bvec_iter start;
572 	struct bvec_iter iter_copy;
573 	struct bvec_iter *iter;
574 	struct crypto_wait wait;
575 	struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
576 	unsigned int b;
577 
578 	if (static_branch_unlikely(&use_bh_wq_enabled) && io->in_bh) {
579 		/*
580 		 * Copy the iterator in case we need to restart
581 		 * verification in a work-queue.
582 		 */
583 		iter_copy = io->iter;
584 		iter = &iter_copy;
585 	} else
586 		iter = &io->iter;
587 
588 	for (b = 0; b < io->n_blocks; b++) {
589 		int r;
590 		sector_t cur_block = io->block + b;
591 		struct ahash_request *req = verity_io_hash_req(v, io);
592 
593 		if (v->validated_blocks && bio->bi_status == BLK_STS_OK &&
594 		    likely(test_bit(cur_block, v->validated_blocks))) {
595 			verity_bv_skip_block(v, io, iter);
596 			continue;
597 		}
598 
599 		r = verity_hash_for_block(v, io, cur_block,
600 					  verity_io_want_digest(v, io),
601 					  &is_zero);
602 		if (unlikely(r < 0))
603 			return r;
604 
605 		if (is_zero) {
606 			/*
607 			 * If we expect a zero block, don't validate, just
608 			 * return zeros.
609 			 */
610 			r = verity_for_bv_block(v, io, iter,
611 						verity_bv_zero);
612 			if (unlikely(r < 0))
613 				return r;
614 
615 			continue;
616 		}
617 
618 		r = verity_hash_init(v, req, &wait, !io->in_bh);
619 		if (unlikely(r < 0))
620 			return r;
621 
622 		start = *iter;
623 		r = verity_for_io_block(v, io, iter, &wait);
624 		if (unlikely(r < 0))
625 			return r;
626 
627 		r = verity_hash_final(v, req, verity_io_real_digest(v, io),
628 					&wait);
629 		if (unlikely(r < 0))
630 			return r;
631 
632 		if (likely(memcmp(verity_io_real_digest(v, io),
633 				  verity_io_want_digest(v, io), v->digest_size) == 0)) {
634 			if (v->validated_blocks)
635 				set_bit(cur_block, v->validated_blocks);
636 			continue;
637 		} else if (static_branch_unlikely(&use_bh_wq_enabled) && io->in_bh) {
638 			/*
639 			 * Error handling code (FEC included) cannot be run in a
640 			 * tasklet since it may sleep, so fallback to work-queue.
641 			 */
642 			return -EAGAIN;
643 		} else if (verity_recheck(v, io, start, cur_block) == 0) {
644 			if (v->validated_blocks)
645 				set_bit(cur_block, v->validated_blocks);
646 			continue;
647 #if defined(CONFIG_DM_VERITY_FEC)
648 		} else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA,
649 					     cur_block, NULL, &start) == 0) {
650 			continue;
651 #endif
652 		} else {
653 			if (bio->bi_status) {
654 				/*
655 				 * Error correction failed; Just return error
656 				 */
657 				return -EIO;
658 			}
659 			if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA,
660 					      cur_block)) {
661 				dm_audit_log_bio(DM_MSG_PREFIX, "verify-data",
662 						 bio, cur_block, 0);
663 				return -EIO;
664 			}
665 		}
666 	}
667 
668 	return 0;
669 }
670 
671 /*
672  * Skip verity work in response to I/O error when system is shutting down.
673  */
verity_is_system_shutting_down(void)674 static inline bool verity_is_system_shutting_down(void)
675 {
676 	return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
677 		|| system_state == SYSTEM_RESTART;
678 }
679 
680 /*
681  * End one "io" structure with a given error.
682  */
verity_finish_io(struct dm_verity_io * io,blk_status_t status)683 static void verity_finish_io(struct dm_verity_io *io, blk_status_t status)
684 {
685 	struct dm_verity *v = io->v;
686 	struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
687 
688 	bio->bi_end_io = io->orig_bi_end_io;
689 	bio->bi_status = status;
690 
691 	if (!static_branch_unlikely(&use_bh_wq_enabled) || !io->in_bh)
692 		verity_fec_finish_io(io);
693 
694 	bio_endio(bio);
695 }
696 
verity_work(struct work_struct * w)697 static void verity_work(struct work_struct *w)
698 {
699 	struct dm_verity_io *io = container_of(w, struct dm_verity_io, work);
700 
701 	io->in_bh = false;
702 
703 	verity_finish_io(io, errno_to_blk_status(verity_verify_io(io)));
704 }
705 
verity_bh_work(struct work_struct * w)706 static void verity_bh_work(struct work_struct *w)
707 {
708 	struct dm_verity_io *io = container_of(w, struct dm_verity_io, bh_work);
709 	int err;
710 
711 	io->in_bh = true;
712 	err = verity_verify_io(io);
713 	if (err == -EAGAIN || err == -ENOMEM) {
714 		/* fallback to retrying with work-queue */
715 		INIT_WORK(&io->work, verity_work);
716 		queue_work(io->v->verify_wq, &io->work);
717 		return;
718 	}
719 
720 	verity_finish_io(io, errno_to_blk_status(err));
721 }
722 
verity_end_io(struct bio * bio)723 static void verity_end_io(struct bio *bio)
724 {
725 	struct dm_verity_io *io = bio->bi_private;
726 
727 	if (bio->bi_status &&
728 	    (!verity_fec_is_enabled(io->v) ||
729 	     verity_is_system_shutting_down() ||
730 	     (bio->bi_opf & REQ_RAHEAD))) {
731 		verity_finish_io(io, bio->bi_status);
732 		return;
733 	}
734 
735 	if (static_branch_unlikely(&use_bh_wq_enabled) && io->v->use_bh_wq) {
736 		INIT_WORK(&io->bh_work, verity_bh_work);
737 		queue_work(system_bh_wq, &io->bh_work);
738 	} else {
739 		INIT_WORK(&io->work, verity_work);
740 		queue_work(io->v->verify_wq, &io->work);
741 	}
742 }
743 
744 /*
745  * Prefetch buffers for the specified io.
746  * The root buffer is not prefetched, it is assumed that it will be cached
747  * all the time.
748  */
verity_prefetch_io(struct work_struct * work)749 static void verity_prefetch_io(struct work_struct *work)
750 {
751 	struct dm_verity_prefetch_work *pw =
752 		container_of(work, struct dm_verity_prefetch_work, work);
753 	struct dm_verity *v = pw->v;
754 	int i;
755 
756 	for (i = v->levels - 2; i >= 0; i--) {
757 		sector_t hash_block_start;
758 		sector_t hash_block_end;
759 
760 		verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL);
761 		verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL);
762 
763 		if (!i) {
764 			unsigned int cluster = READ_ONCE(dm_verity_prefetch_cluster);
765 
766 			cluster >>= v->data_dev_block_bits;
767 			if (unlikely(!cluster))
768 				goto no_prefetch_cluster;
769 
770 			if (unlikely(cluster & (cluster - 1)))
771 				cluster = 1 << __fls(cluster);
772 
773 			hash_block_start &= ~(sector_t)(cluster - 1);
774 			hash_block_end |= cluster - 1;
775 			if (unlikely(hash_block_end >= v->hash_blocks))
776 				hash_block_end = v->hash_blocks - 1;
777 		}
778 no_prefetch_cluster:
779 		dm_bufio_prefetch_with_ioprio(v->bufio, hash_block_start,
780 					hash_block_end - hash_block_start + 1,
781 					pw->ioprio);
782 	}
783 
784 	kfree(pw);
785 }
786 
verity_submit_prefetch(struct dm_verity * v,struct dm_verity_io * io,unsigned short ioprio)787 static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io,
788 				   unsigned short ioprio)
789 {
790 	sector_t block = io->block;
791 	unsigned int n_blocks = io->n_blocks;
792 	struct dm_verity_prefetch_work *pw;
793 
794 	if (v->validated_blocks) {
795 		while (n_blocks && test_bit(block, v->validated_blocks)) {
796 			block++;
797 			n_blocks--;
798 		}
799 		while (n_blocks && test_bit(block + n_blocks - 1,
800 					    v->validated_blocks))
801 			n_blocks--;
802 		if (!n_blocks)
803 			return;
804 	}
805 
806 	pw = kmalloc(sizeof(struct dm_verity_prefetch_work),
807 		GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
808 
809 	if (!pw)
810 		return;
811 
812 	INIT_WORK(&pw->work, verity_prefetch_io);
813 	pw->v = v;
814 	pw->block = block;
815 	pw->n_blocks = n_blocks;
816 	pw->ioprio = ioprio;
817 	queue_work(v->verify_wq, &pw->work);
818 }
819 
820 /*
821  * Bio map function. It allocates dm_verity_io structure and bio vector and
822  * fills them. Then it issues prefetches and the I/O.
823  */
verity_map(struct dm_target * ti,struct bio * bio)824 static int verity_map(struct dm_target *ti, struct bio *bio)
825 {
826 	struct dm_verity *v = ti->private;
827 	struct dm_verity_io *io;
828 
829 	bio_set_dev(bio, v->data_dev->bdev);
830 	bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector);
831 
832 	if (((unsigned int)bio->bi_iter.bi_sector | bio_sectors(bio)) &
833 	    ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) {
834 		DMERR_LIMIT("unaligned io");
835 		return DM_MAPIO_KILL;
836 	}
837 
838 	if (bio_end_sector(bio) >>
839 	    (v->data_dev_block_bits - SECTOR_SHIFT) > v->data_blocks) {
840 		DMERR_LIMIT("io out of range");
841 		return DM_MAPIO_KILL;
842 	}
843 
844 	if (bio_data_dir(bio) == WRITE)
845 		return DM_MAPIO_KILL;
846 
847 	io = dm_per_bio_data(bio, ti->per_io_data_size);
848 	io->v = v;
849 	io->orig_bi_end_io = bio->bi_end_io;
850 	io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
851 	io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits;
852 
853 	bio->bi_end_io = verity_end_io;
854 	bio->bi_private = io;
855 	io->iter = bio->bi_iter;
856 
857 	verity_fec_init_io(io);
858 
859 	verity_submit_prefetch(v, io, bio_prio(bio));
860 
861 	submit_bio_noacct(bio);
862 
863 	return DM_MAPIO_SUBMITTED;
864 }
865 
866 /*
867  * Status: V (valid) or C (corruption found)
868  */
verity_status(struct dm_target * ti,status_type_t type,unsigned int status_flags,char * result,unsigned int maxlen)869 static void verity_status(struct dm_target *ti, status_type_t type,
870 			  unsigned int status_flags, char *result, unsigned int maxlen)
871 {
872 	struct dm_verity *v = ti->private;
873 	unsigned int args = 0;
874 	unsigned int sz = 0;
875 	unsigned int x;
876 
877 	switch (type) {
878 	case STATUSTYPE_INFO:
879 		DMEMIT("%c", v->hash_failed ? 'C' : 'V');
880 		break;
881 	case STATUSTYPE_TABLE:
882 		DMEMIT("%u %s %s %u %u %llu %llu %s ",
883 			v->version,
884 			v->data_dev->name,
885 			v->hash_dev->name,
886 			1 << v->data_dev_block_bits,
887 			1 << v->hash_dev_block_bits,
888 			(unsigned long long)v->data_blocks,
889 			(unsigned long long)v->hash_start,
890 			v->alg_name
891 			);
892 		for (x = 0; x < v->digest_size; x++)
893 			DMEMIT("%02x", v->root_digest[x]);
894 		DMEMIT(" ");
895 		if (!v->salt_size)
896 			DMEMIT("-");
897 		else
898 			for (x = 0; x < v->salt_size; x++)
899 				DMEMIT("%02x", v->salt[x]);
900 		if (v->mode != DM_VERITY_MODE_EIO)
901 			args++;
902 		if (verity_fec_is_enabled(v))
903 			args += DM_VERITY_OPTS_FEC;
904 		if (v->zero_digest)
905 			args++;
906 		if (v->validated_blocks)
907 			args++;
908 		if (v->use_bh_wq)
909 			args++;
910 		if (v->signature_key_desc)
911 			args += DM_VERITY_ROOT_HASH_VERIFICATION_OPTS;
912 		if (!args)
913 			return;
914 		DMEMIT(" %u", args);
915 		if (v->mode != DM_VERITY_MODE_EIO) {
916 			DMEMIT(" ");
917 			switch (v->mode) {
918 			case DM_VERITY_MODE_LOGGING:
919 				DMEMIT(DM_VERITY_OPT_LOGGING);
920 				break;
921 			case DM_VERITY_MODE_RESTART:
922 				DMEMIT(DM_VERITY_OPT_RESTART);
923 				break;
924 			case DM_VERITY_MODE_PANIC:
925 				DMEMIT(DM_VERITY_OPT_PANIC);
926 				break;
927 			default:
928 				BUG();
929 			}
930 		}
931 		if (v->zero_digest)
932 			DMEMIT(" " DM_VERITY_OPT_IGN_ZEROES);
933 		if (v->validated_blocks)
934 			DMEMIT(" " DM_VERITY_OPT_AT_MOST_ONCE);
935 		if (v->use_bh_wq)
936 			DMEMIT(" " DM_VERITY_OPT_TASKLET_VERIFY);
937 		sz = verity_fec_status_table(v, sz, result, maxlen);
938 		if (v->signature_key_desc)
939 			DMEMIT(" " DM_VERITY_ROOT_HASH_VERIFICATION_OPT_SIG_KEY
940 				" %s", v->signature_key_desc);
941 		break;
942 
943 	case STATUSTYPE_IMA:
944 		DMEMIT_TARGET_NAME_VERSION(ti->type);
945 		DMEMIT(",hash_failed=%c", v->hash_failed ? 'C' : 'V');
946 		DMEMIT(",verity_version=%u", v->version);
947 		DMEMIT(",data_device_name=%s", v->data_dev->name);
948 		DMEMIT(",hash_device_name=%s", v->hash_dev->name);
949 		DMEMIT(",verity_algorithm=%s", v->alg_name);
950 
951 		DMEMIT(",root_digest=");
952 		for (x = 0; x < v->digest_size; x++)
953 			DMEMIT("%02x", v->root_digest[x]);
954 
955 		DMEMIT(",salt=");
956 		if (!v->salt_size)
957 			DMEMIT("-");
958 		else
959 			for (x = 0; x < v->salt_size; x++)
960 				DMEMIT("%02x", v->salt[x]);
961 
962 		DMEMIT(",ignore_zero_blocks=%c", v->zero_digest ? 'y' : 'n');
963 		DMEMIT(",check_at_most_once=%c", v->validated_blocks ? 'y' : 'n');
964 		if (v->signature_key_desc)
965 			DMEMIT(",root_hash_sig_key_desc=%s", v->signature_key_desc);
966 
967 		if (v->mode != DM_VERITY_MODE_EIO) {
968 			DMEMIT(",verity_mode=");
969 			switch (v->mode) {
970 			case DM_VERITY_MODE_LOGGING:
971 				DMEMIT(DM_VERITY_OPT_LOGGING);
972 				break;
973 			case DM_VERITY_MODE_RESTART:
974 				DMEMIT(DM_VERITY_OPT_RESTART);
975 				break;
976 			case DM_VERITY_MODE_PANIC:
977 				DMEMIT(DM_VERITY_OPT_PANIC);
978 				break;
979 			default:
980 				DMEMIT("invalid");
981 			}
982 		}
983 		DMEMIT(";");
984 		break;
985 	}
986 }
987 
verity_prepare_ioctl(struct dm_target * ti,struct block_device ** bdev)988 static int verity_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
989 {
990 	struct dm_verity *v = ti->private;
991 
992 	*bdev = v->data_dev->bdev;
993 
994 	if (v->data_start || ti->len != bdev_nr_sectors(v->data_dev->bdev))
995 		return 1;
996 	return 0;
997 }
998 
verity_iterate_devices(struct dm_target * ti,iterate_devices_callout_fn fn,void * data)999 static int verity_iterate_devices(struct dm_target *ti,
1000 				  iterate_devices_callout_fn fn, void *data)
1001 {
1002 	struct dm_verity *v = ti->private;
1003 
1004 	return fn(ti, v->data_dev, v->data_start, ti->len, data);
1005 }
1006 
verity_io_hints(struct dm_target * ti,struct queue_limits * limits)1007 static void verity_io_hints(struct dm_target *ti, struct queue_limits *limits)
1008 {
1009 	struct dm_verity *v = ti->private;
1010 
1011 	if (limits->logical_block_size < 1 << v->data_dev_block_bits)
1012 		limits->logical_block_size = 1 << v->data_dev_block_bits;
1013 
1014 	if (limits->physical_block_size < 1 << v->data_dev_block_bits)
1015 		limits->physical_block_size = 1 << v->data_dev_block_bits;
1016 
1017 	blk_limits_io_min(limits, limits->logical_block_size);
1018 }
1019 
verity_dtr(struct dm_target * ti)1020 static void verity_dtr(struct dm_target *ti)
1021 {
1022 	struct dm_verity *v = ti->private;
1023 
1024 	if (v->verify_wq)
1025 		destroy_workqueue(v->verify_wq);
1026 
1027 	mempool_exit(&v->recheck_pool);
1028 	if (v->io)
1029 		dm_io_client_destroy(v->io);
1030 
1031 	if (v->bufio)
1032 		dm_bufio_client_destroy(v->bufio);
1033 
1034 	kvfree(v->validated_blocks);
1035 	kfree(v->salt);
1036 	kfree(v->root_digest);
1037 	kfree(v->zero_digest);
1038 
1039 	if (v->tfm)
1040 		crypto_free_ahash(v->tfm);
1041 
1042 	kfree(v->alg_name);
1043 
1044 	if (v->hash_dev)
1045 		dm_put_device(ti, v->hash_dev);
1046 
1047 	if (v->data_dev)
1048 		dm_put_device(ti, v->data_dev);
1049 
1050 	verity_fec_dtr(v);
1051 
1052 	kfree(v->signature_key_desc);
1053 
1054 	if (v->use_bh_wq)
1055 		static_branch_dec(&use_bh_wq_enabled);
1056 
1057 	kfree(v);
1058 
1059 	dm_audit_log_dtr(DM_MSG_PREFIX, ti, 1);
1060 }
1061 
verity_alloc_most_once(struct dm_verity * v)1062 static int verity_alloc_most_once(struct dm_verity *v)
1063 {
1064 	struct dm_target *ti = v->ti;
1065 
1066 	/* the bitset can only handle INT_MAX blocks */
1067 	if (v->data_blocks > INT_MAX) {
1068 		ti->error = "device too large to use check_at_most_once";
1069 		return -E2BIG;
1070 	}
1071 
1072 	v->validated_blocks = kvcalloc(BITS_TO_LONGS(v->data_blocks),
1073 				       sizeof(unsigned long),
1074 				       GFP_KERNEL);
1075 	if (!v->validated_blocks) {
1076 		ti->error = "failed to allocate bitset for check_at_most_once";
1077 		return -ENOMEM;
1078 	}
1079 
1080 	return 0;
1081 }
1082 
verity_alloc_zero_digest(struct dm_verity * v)1083 static int verity_alloc_zero_digest(struct dm_verity *v)
1084 {
1085 	int r = -ENOMEM;
1086 	struct ahash_request *req;
1087 	u8 *zero_data;
1088 
1089 	v->zero_digest = kmalloc(v->digest_size, GFP_KERNEL);
1090 
1091 	if (!v->zero_digest)
1092 		return r;
1093 
1094 	req = kmalloc(v->ahash_reqsize, GFP_KERNEL);
1095 
1096 	if (!req)
1097 		return r; /* verity_dtr will free zero_digest */
1098 
1099 	zero_data = kzalloc(1 << v->data_dev_block_bits, GFP_KERNEL);
1100 
1101 	if (!zero_data)
1102 		goto out;
1103 
1104 	r = verity_hash(v, req, zero_data, 1 << v->data_dev_block_bits,
1105 			v->zero_digest, true);
1106 
1107 out:
1108 	kfree(req);
1109 	kfree(zero_data);
1110 
1111 	return r;
1112 }
1113 
verity_is_verity_mode(const char * arg_name)1114 static inline bool verity_is_verity_mode(const char *arg_name)
1115 {
1116 	return (!strcasecmp(arg_name, DM_VERITY_OPT_LOGGING) ||
1117 		!strcasecmp(arg_name, DM_VERITY_OPT_RESTART) ||
1118 		!strcasecmp(arg_name, DM_VERITY_OPT_PANIC));
1119 }
1120 
verity_parse_verity_mode(struct dm_verity * v,const char * arg_name)1121 static int verity_parse_verity_mode(struct dm_verity *v, const char *arg_name)
1122 {
1123 	if (v->mode)
1124 		return -EINVAL;
1125 
1126 	if (!strcasecmp(arg_name, DM_VERITY_OPT_LOGGING))
1127 		v->mode = DM_VERITY_MODE_LOGGING;
1128 	else if (!strcasecmp(arg_name, DM_VERITY_OPT_RESTART))
1129 		v->mode = DM_VERITY_MODE_RESTART;
1130 	else if (!strcasecmp(arg_name, DM_VERITY_OPT_PANIC))
1131 		v->mode = DM_VERITY_MODE_PANIC;
1132 
1133 	return 0;
1134 }
1135 
verity_parse_opt_args(struct dm_arg_set * as,struct dm_verity * v,struct dm_verity_sig_opts * verify_args,bool only_modifier_opts)1136 static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
1137 				 struct dm_verity_sig_opts *verify_args,
1138 				 bool only_modifier_opts)
1139 {
1140 	int r = 0;
1141 	unsigned int argc;
1142 	struct dm_target *ti = v->ti;
1143 	const char *arg_name;
1144 
1145 	static const struct dm_arg _args[] = {
1146 		{0, DM_VERITY_OPTS_MAX, "Invalid number of feature args"},
1147 	};
1148 
1149 	r = dm_read_arg_group(_args, as, &argc, &ti->error);
1150 	if (r)
1151 		return -EINVAL;
1152 
1153 	if (!argc)
1154 		return 0;
1155 
1156 	do {
1157 		arg_name = dm_shift_arg(as);
1158 		argc--;
1159 
1160 		if (verity_is_verity_mode(arg_name)) {
1161 			if (only_modifier_opts)
1162 				continue;
1163 			r = verity_parse_verity_mode(v, arg_name);
1164 			if (r) {
1165 				ti->error = "Conflicting error handling parameters";
1166 				return r;
1167 			}
1168 			continue;
1169 
1170 		} else if (!strcasecmp(arg_name, DM_VERITY_OPT_IGN_ZEROES)) {
1171 			if (only_modifier_opts)
1172 				continue;
1173 			r = verity_alloc_zero_digest(v);
1174 			if (r) {
1175 				ti->error = "Cannot allocate zero digest";
1176 				return r;
1177 			}
1178 			continue;
1179 
1180 		} else if (!strcasecmp(arg_name, DM_VERITY_OPT_AT_MOST_ONCE)) {
1181 			if (only_modifier_opts)
1182 				continue;
1183 			r = verity_alloc_most_once(v);
1184 			if (r)
1185 				return r;
1186 			continue;
1187 
1188 		} else if (!strcasecmp(arg_name, DM_VERITY_OPT_TASKLET_VERIFY)) {
1189 			v->use_bh_wq = true;
1190 			static_branch_inc(&use_bh_wq_enabled);
1191 			continue;
1192 
1193 		} else if (verity_is_fec_opt_arg(arg_name)) {
1194 			if (only_modifier_opts)
1195 				continue;
1196 			r = verity_fec_parse_opt_args(as, v, &argc, arg_name);
1197 			if (r)
1198 				return r;
1199 			continue;
1200 
1201 		} else if (verity_verify_is_sig_opt_arg(arg_name)) {
1202 			if (only_modifier_opts)
1203 				continue;
1204 			r = verity_verify_sig_parse_opt_args(as, v,
1205 							     verify_args,
1206 							     &argc, arg_name);
1207 			if (r)
1208 				return r;
1209 			continue;
1210 
1211 		} else if (only_modifier_opts) {
1212 			/*
1213 			 * Ignore unrecognized opt, could easily be an extra
1214 			 * argument to an option whose parsing was skipped.
1215 			 * Normal parsing (@only_modifier_opts=false) will
1216 			 * properly parse all options (and their extra args).
1217 			 */
1218 			continue;
1219 		}
1220 
1221 		DMERR("Unrecognized verity feature request: %s", arg_name);
1222 		ti->error = "Unrecognized verity feature request";
1223 		return -EINVAL;
1224 	} while (argc && !r);
1225 
1226 	return r;
1227 }
1228 
1229 /*
1230  * Target parameters:
1231  *	<version>	The current format is version 1.
1232  *			Vsn 0 is compatible with original Chromium OS releases.
1233  *	<data device>
1234  *	<hash device>
1235  *	<data block size>
1236  *	<hash block size>
1237  *	<the number of data blocks>
1238  *	<hash start block>
1239  *	<algorithm>
1240  *	<digest>
1241  *	<salt>		Hex string or "-" if no salt.
1242  */
verity_ctr(struct dm_target * ti,unsigned int argc,char ** argv)1243 static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1244 {
1245 	struct dm_verity *v;
1246 	struct dm_verity_sig_opts verify_args = {0};
1247 	struct dm_arg_set as;
1248 	unsigned int num;
1249 	unsigned long long num_ll;
1250 	int r;
1251 	int i;
1252 	sector_t hash_position;
1253 	char dummy;
1254 	char *root_hash_digest_to_validate;
1255 
1256 	v = kzalloc(sizeof(struct dm_verity), GFP_KERNEL);
1257 	if (!v) {
1258 		ti->error = "Cannot allocate verity structure";
1259 		return -ENOMEM;
1260 	}
1261 	ti->private = v;
1262 	v->ti = ti;
1263 
1264 	r = verity_fec_ctr_alloc(v);
1265 	if (r)
1266 		goto bad;
1267 
1268 	if ((dm_table_get_mode(ti->table) & ~BLK_OPEN_READ)) {
1269 		ti->error = "Device must be readonly";
1270 		r = -EINVAL;
1271 		goto bad;
1272 	}
1273 
1274 	if (argc < 10) {
1275 		ti->error = "Not enough arguments";
1276 		r = -EINVAL;
1277 		goto bad;
1278 	}
1279 
1280 	/* Parse optional parameters that modify primary args */
1281 	if (argc > 10) {
1282 		as.argc = argc - 10;
1283 		as.argv = argv + 10;
1284 		r = verity_parse_opt_args(&as, v, &verify_args, true);
1285 		if (r < 0)
1286 			goto bad;
1287 	}
1288 
1289 	if (sscanf(argv[0], "%u%c", &num, &dummy) != 1 ||
1290 	    num > 1) {
1291 		ti->error = "Invalid version";
1292 		r = -EINVAL;
1293 		goto bad;
1294 	}
1295 	v->version = num;
1296 
1297 	r = dm_get_device(ti, argv[1], BLK_OPEN_READ, &v->data_dev);
1298 	if (r) {
1299 		ti->error = "Data device lookup failed";
1300 		goto bad;
1301 	}
1302 
1303 	r = dm_get_device(ti, argv[2], BLK_OPEN_READ, &v->hash_dev);
1304 	if (r) {
1305 		ti->error = "Hash device lookup failed";
1306 		goto bad;
1307 	}
1308 
1309 	if (sscanf(argv[3], "%u%c", &num, &dummy) != 1 ||
1310 	    !num || (num & (num - 1)) ||
1311 	    num < bdev_logical_block_size(v->data_dev->bdev) ||
1312 	    num > PAGE_SIZE) {
1313 		ti->error = "Invalid data device block size";
1314 		r = -EINVAL;
1315 		goto bad;
1316 	}
1317 	v->data_dev_block_bits = __ffs(num);
1318 
1319 	if (sscanf(argv[4], "%u%c", &num, &dummy) != 1 ||
1320 	    !num || (num & (num - 1)) ||
1321 	    num < bdev_logical_block_size(v->hash_dev->bdev) ||
1322 	    num > INT_MAX) {
1323 		ti->error = "Invalid hash device block size";
1324 		r = -EINVAL;
1325 		goto bad;
1326 	}
1327 	v->hash_dev_block_bits = __ffs(num);
1328 
1329 	if (sscanf(argv[5], "%llu%c", &num_ll, &dummy) != 1 ||
1330 	    (sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT))
1331 	    >> (v->data_dev_block_bits - SECTOR_SHIFT) != num_ll) {
1332 		ti->error = "Invalid data blocks";
1333 		r = -EINVAL;
1334 		goto bad;
1335 	}
1336 	v->data_blocks = num_ll;
1337 
1338 	if (ti->len > (v->data_blocks << (v->data_dev_block_bits - SECTOR_SHIFT))) {
1339 		ti->error = "Data device is too small";
1340 		r = -EINVAL;
1341 		goto bad;
1342 	}
1343 
1344 	if (sscanf(argv[6], "%llu%c", &num_ll, &dummy) != 1 ||
1345 	    (sector_t)(num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT))
1346 	    >> (v->hash_dev_block_bits - SECTOR_SHIFT) != num_ll) {
1347 		ti->error = "Invalid hash start";
1348 		r = -EINVAL;
1349 		goto bad;
1350 	}
1351 	v->hash_start = num_ll;
1352 
1353 	v->alg_name = kstrdup(argv[7], GFP_KERNEL);
1354 	if (!v->alg_name) {
1355 		ti->error = "Cannot allocate algorithm name";
1356 		r = -ENOMEM;
1357 		goto bad;
1358 	}
1359 
1360 	v->tfm = crypto_alloc_ahash(v->alg_name, 0,
1361 				    v->use_bh_wq ? CRYPTO_ALG_ASYNC : 0);
1362 	if (IS_ERR(v->tfm)) {
1363 		ti->error = "Cannot initialize hash function";
1364 		r = PTR_ERR(v->tfm);
1365 		v->tfm = NULL;
1366 		goto bad;
1367 	}
1368 
1369 	/*
1370 	 * dm-verity performance can vary greatly depending on which hash
1371 	 * algorithm implementation is used.  Help people debug performance
1372 	 * problems by logging the ->cra_driver_name.
1373 	 */
1374 	DMINFO("%s using implementation \"%s\"", v->alg_name,
1375 	       crypto_hash_alg_common(v->tfm)->base.cra_driver_name);
1376 
1377 	v->digest_size = crypto_ahash_digestsize(v->tfm);
1378 	if ((1 << v->hash_dev_block_bits) < v->digest_size * 2) {
1379 		ti->error = "Digest size too big";
1380 		r = -EINVAL;
1381 		goto bad;
1382 	}
1383 	v->ahash_reqsize = sizeof(struct ahash_request) +
1384 		crypto_ahash_reqsize(v->tfm);
1385 
1386 	v->root_digest = kmalloc(v->digest_size, GFP_KERNEL);
1387 	if (!v->root_digest) {
1388 		ti->error = "Cannot allocate root digest";
1389 		r = -ENOMEM;
1390 		goto bad;
1391 	}
1392 	if (strlen(argv[8]) != v->digest_size * 2 ||
1393 	    hex2bin(v->root_digest, argv[8], v->digest_size)) {
1394 		ti->error = "Invalid root digest";
1395 		r = -EINVAL;
1396 		goto bad;
1397 	}
1398 	root_hash_digest_to_validate = argv[8];
1399 
1400 	if (strcmp(argv[9], "-")) {
1401 		v->salt_size = strlen(argv[9]) / 2;
1402 		v->salt = kmalloc(v->salt_size, GFP_KERNEL);
1403 		if (!v->salt) {
1404 			ti->error = "Cannot allocate salt";
1405 			r = -ENOMEM;
1406 			goto bad;
1407 		}
1408 		if (strlen(argv[9]) != v->salt_size * 2 ||
1409 		    hex2bin(v->salt, argv[9], v->salt_size)) {
1410 			ti->error = "Invalid salt";
1411 			r = -EINVAL;
1412 			goto bad;
1413 		}
1414 	}
1415 
1416 	argv += 10;
1417 	argc -= 10;
1418 
1419 	/* Optional parameters */
1420 	if (argc) {
1421 		as.argc = argc;
1422 		as.argv = argv;
1423 		r = verity_parse_opt_args(&as, v, &verify_args, false);
1424 		if (r < 0)
1425 			goto bad;
1426 	}
1427 
1428 	/* Root hash signature is  a optional parameter*/
1429 	r = verity_verify_root_hash(root_hash_digest_to_validate,
1430 				    strlen(root_hash_digest_to_validate),
1431 				    verify_args.sig,
1432 				    verify_args.sig_size);
1433 	if (r < 0) {
1434 		ti->error = "Root hash verification failed";
1435 		goto bad;
1436 	}
1437 	v->hash_per_block_bits =
1438 		__fls((1 << v->hash_dev_block_bits) / v->digest_size);
1439 
1440 	v->levels = 0;
1441 	if (v->data_blocks)
1442 		while (v->hash_per_block_bits * v->levels < 64 &&
1443 		       (unsigned long long)(v->data_blocks - 1) >>
1444 		       (v->hash_per_block_bits * v->levels))
1445 			v->levels++;
1446 
1447 	if (v->levels > DM_VERITY_MAX_LEVELS) {
1448 		ti->error = "Too many tree levels";
1449 		r = -E2BIG;
1450 		goto bad;
1451 	}
1452 
1453 	hash_position = v->hash_start;
1454 	for (i = v->levels - 1; i >= 0; i--) {
1455 		sector_t s;
1456 
1457 		v->hash_level_block[i] = hash_position;
1458 		s = (v->data_blocks + ((sector_t)1 << ((i + 1) * v->hash_per_block_bits)) - 1)
1459 					>> ((i + 1) * v->hash_per_block_bits);
1460 		if (hash_position + s < hash_position) {
1461 			ti->error = "Hash device offset overflow";
1462 			r = -E2BIG;
1463 			goto bad;
1464 		}
1465 		hash_position += s;
1466 	}
1467 	v->hash_blocks = hash_position;
1468 
1469 	r = mempool_init_page_pool(&v->recheck_pool, 1, 0);
1470 	if (unlikely(r)) {
1471 		ti->error = "Cannot allocate mempool";
1472 		goto bad;
1473 	}
1474 
1475 	v->io = dm_io_client_create();
1476 	if (IS_ERR(v->io)) {
1477 		r = PTR_ERR(v->io);
1478 		v->io = NULL;
1479 		ti->error = "Cannot allocate dm io";
1480 		goto bad;
1481 	}
1482 
1483 	v->bufio = dm_bufio_client_create(v->hash_dev->bdev,
1484 		1 << v->hash_dev_block_bits, 1, sizeof(struct buffer_aux),
1485 		dm_bufio_alloc_callback, NULL,
1486 		v->use_bh_wq ? DM_BUFIO_CLIENT_NO_SLEEP : 0);
1487 	if (IS_ERR(v->bufio)) {
1488 		ti->error = "Cannot initialize dm-bufio";
1489 		r = PTR_ERR(v->bufio);
1490 		v->bufio = NULL;
1491 		goto bad;
1492 	}
1493 
1494 	if (dm_bufio_get_device_size(v->bufio) < v->hash_blocks) {
1495 		ti->error = "Hash device is too small";
1496 		r = -E2BIG;
1497 		goto bad;
1498 	}
1499 
1500 	/*
1501 	 * Using WQ_HIGHPRI improves throughput and completion latency by
1502 	 * reducing wait times when reading from a dm-verity device.
1503 	 *
1504 	 * Also as required for the "try_verify_in_tasklet" feature: WQ_HIGHPRI
1505 	 * allows verify_wq to preempt softirq since verification in BH workqueue
1506 	 * will fall-back to using it for error handling (or if the bufio cache
1507 	 * doesn't have required hashes).
1508 	 */
1509 	v->verify_wq = alloc_workqueue("kverityd", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1510 	if (!v->verify_wq) {
1511 		ti->error = "Cannot allocate workqueue";
1512 		r = -ENOMEM;
1513 		goto bad;
1514 	}
1515 
1516 	ti->per_io_data_size = sizeof(struct dm_verity_io) +
1517 				v->ahash_reqsize + v->digest_size * 2;
1518 
1519 	r = verity_fec_ctr(v);
1520 	if (r)
1521 		goto bad;
1522 
1523 	ti->per_io_data_size = roundup(ti->per_io_data_size,
1524 				       __alignof__(struct dm_verity_io));
1525 
1526 	verity_verify_sig_opts_cleanup(&verify_args);
1527 
1528 	dm_audit_log_ctr(DM_MSG_PREFIX, ti, 1);
1529 
1530 	return 0;
1531 
1532 bad:
1533 
1534 	verity_verify_sig_opts_cleanup(&verify_args);
1535 	dm_audit_log_ctr(DM_MSG_PREFIX, ti, 0);
1536 	verity_dtr(ti);
1537 
1538 	return r;
1539 }
1540 
1541 /*
1542  * Check whether a DM target is a verity target.
1543  */
dm_is_verity_target(struct dm_target * ti)1544 bool dm_is_verity_target(struct dm_target *ti)
1545 {
1546 	return ti->type->module == THIS_MODULE;
1547 }
1548 
1549 /*
1550  * Get the verity mode (error behavior) of a verity target.
1551  *
1552  * Returns the verity mode of the target, or -EINVAL if 'ti' is not a verity
1553  * target.
1554  */
dm_verity_get_mode(struct dm_target * ti)1555 int dm_verity_get_mode(struct dm_target *ti)
1556 {
1557 	struct dm_verity *v = ti->private;
1558 
1559 	if (!dm_is_verity_target(ti))
1560 		return -EINVAL;
1561 
1562 	return v->mode;
1563 }
1564 
1565 /*
1566  * Get the root digest of a verity target.
1567  *
1568  * Returns a copy of the root digest, the caller is responsible for
1569  * freeing the memory of the digest.
1570  */
dm_verity_get_root_digest(struct dm_target * ti,u8 ** root_digest,unsigned int * digest_size)1571 int dm_verity_get_root_digest(struct dm_target *ti, u8 **root_digest, unsigned int *digest_size)
1572 {
1573 	struct dm_verity *v = ti->private;
1574 
1575 	if (!dm_is_verity_target(ti))
1576 		return -EINVAL;
1577 
1578 	*root_digest = kmemdup(v->root_digest, v->digest_size, GFP_KERNEL);
1579 	if (*root_digest == NULL)
1580 		return -ENOMEM;
1581 
1582 	*digest_size = v->digest_size;
1583 
1584 	return 0;
1585 }
1586 
1587 static struct target_type verity_target = {
1588 	.name		= "verity",
1589 	.features	= DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
1590 	.version	= {1, 10, 0},
1591 	.module		= THIS_MODULE,
1592 	.ctr		= verity_ctr,
1593 	.dtr		= verity_dtr,
1594 	.map		= verity_map,
1595 	.status		= verity_status,
1596 	.prepare_ioctl	= verity_prepare_ioctl,
1597 	.iterate_devices = verity_iterate_devices,
1598 	.io_hints	= verity_io_hints,
1599 };
1600 module_dm(verity);
1601 
1602 MODULE_AUTHOR("Mikulas Patocka <mpatocka@redhat.com>");
1603 MODULE_AUTHOR("Mandeep Baines <msb@chromium.org>");
1604 MODULE_AUTHOR("Will Drewry <wad@chromium.org>");
1605 MODULE_DESCRIPTION(DM_NAME " target for transparent disk integrity checking");
1606 MODULE_LICENSE("GPL");
1607