1 /*
2  * CDDL HEADER START
3  *
4  * This file and its contents are supplied under the terms of the
5  * Common Development and Distribution License ("CDDL"), version 1.0.
6  * You may only use this file in accordance with the terms of version
7  * 1.0 of the CDDL.
8  *
9  * A full copy of the text of the CDDL should have accompanied this
10  * source.  A copy of the CDDL is also available via the Internet at
11  * http://www.illumos.org/license/CDDL.
12  *
13  * CDDL HEADER END
14  */
15 
16 /*
17  * Copyright (c) 2017, Datto, Inc. All rights reserved.
18  */
19 
20 #include <sys/zio_crypt.h>
21 #include <sys/dmu.h>
22 #include <sys/dmu_objset.h>
23 #include <sys/dnode.h>
24 #include <sys/fs/zfs.h>
25 #include <sys/zio.h>
26 #include <sys/zil.h>
27 #include <sys/sha2.h>
28 #include <sys/hkdf.h>
29 
30 /*
31  * This file is responsible for handling all of the details of generating
32  * encryption parameters and performing encryption and authentication.
33  *
34  * BLOCK ENCRYPTION PARAMETERS:
35  * Encryption /Authentication Algorithm Suite (crypt):
36  * The encryption algorithm, mode, and key length we are going to use. We
37  * currently support AES in either GCM or CCM modes with 128, 192, and 256 bit
38  * keys. All authentication is currently done with SHA512-HMAC.
39  *
40  * Plaintext:
41  * The unencrypted data that we want to encrypt.
42  *
43  * Initialization Vector (IV):
44  * An initialization vector for the encryption algorithms. This is used to
45  * "tweak" the encryption algorithms so that two blocks of the same data are
46  * encrypted into different ciphertext outputs, thus obfuscating block patterns.
47  * The supported encryption modes (AES-GCM and AES-CCM) require that an IV is
48  * never reused with the same encryption key. This value is stored unencrypted
49  * and must simply be provided to the decryption function. We use a 96 bit IV
50  * (as recommended by NIST) for all block encryption. For non-dedup blocks we
51  * derive the IV randomly. The first 64 bits of the IV are stored in the second
52  * word of DVA[2] and the remaining 32 bits are stored in the upper 32 bits of
53  * blk_fill. This is safe because encrypted blocks can't use the upper 32 bits
54  * of blk_fill. We only encrypt level 0 blocks, which normally have a fill count
55  * of 1. The only exception is for DMU_OT_DNODE objects, where the fill count of
56  * level 0 blocks is the number of allocated dnodes in that block. The on-disk
57  * format supports at most 2^15 slots per L0 dnode block, because the maximum
58  * block size is 16MB (2^24). In either case, for level 0 blocks this number
59  * will still be smaller than UINT32_MAX so it is safe to store the IV in the
60  * top 32 bits of blk_fill, while leaving the bottom 32 bits of the fill count
61  * for the dnode code.
62  *
63  * Master key:
64  * This is the most important secret data of an encrypted dataset. It is used
65  * along with the salt to generate that actual encryption keys via HKDF. We
66  * do not use the master key to directly encrypt any data because there are
67  * theoretical limits on how much data can actually be safely encrypted with
68  * any encryption mode. The master key is stored encrypted on disk with the
69  * user's wrapping key. Its length is determined by the encryption algorithm.
70  * For details on how this is stored see the block comment in dsl_crypt.c
71  *
72  * Salt:
73  * Used as an input to the HKDF function, along with the master key. We use a
74  * 64 bit salt, stored unencrypted in the first word of DVA[2]. Any given salt
75  * can be used for encrypting many blocks, so we cache the current salt and the
76  * associated derived key in zio_crypt_t so we do not need to derive it again
77  * needlessly.
78  *
79  * Encryption Key:
80  * A secret binary key, generated from an HKDF function used to encrypt and
81  * decrypt data.
82  *
83  * Message Authentication Code (MAC)
84  * The MAC is an output of authenticated encryption modes such as AES-GCM and
85  * AES-CCM. Its purpose is to ensure that an attacker cannot modify encrypted
86  * data on disk and return garbage to the application. Effectively, it is a
87  * checksum that can not be reproduced by an attacker. We store the MAC in the
88  * second 128 bits of blk_cksum, leaving the first 128 bits for a truncated
89  * regular checksum of the ciphertext which can be used for scrubbing.
90  *
91  * OBJECT AUTHENTICATION:
92  * Some object types, such as DMU_OT_MASTER_NODE cannot be encrypted because
93  * they contain some info that always needs to be readable. To prevent this
94  * data from being altered, we authenticate this data using SHA512-HMAC. This
95  * will produce a MAC (similar to the one produced via encryption) which can
96  * be used to verify the object was not modified. HMACs do not require key
97  * rotation or IVs, so we can keep up to the full 3 copies of authenticated
98  * data.
99  *
100  * ZIL ENCRYPTION:
101  * ZIL blocks have their bp written to disk ahead of the associated data, so we
102  * cannot store the MAC there as we normally do. For these blocks the MAC is
103  * stored in the embedded checksum within the zil_chain_t header. The salt and
104  * IV are generated for the block on bp allocation instead of at encryption
105  * time. In addition, ZIL blocks have some pieces that must be left in plaintext
106  * for claiming even though all of the sensitive user data still needs to be
107  * encrypted. The function zio_crypt_init_uios_zil() handles parsing which
108  * pieces of the block need to be encrypted. All data that is not encrypted is
109  * authenticated using the AAD mechanisms that the supported encryption modes
110  * provide for. In order to preserve the semantics of the ZIL for encrypted
111  * datasets, the ZIL is not protected at the objset level as described below.
112  *
113  * DNODE ENCRYPTION:
114  * Similarly to ZIL blocks, the core part of each dnode_phys_t needs to be left
115  * in plaintext for scrubbing and claiming, but the bonus buffers might contain
116  * sensitive user data. The function zio_crypt_init_uios_dnode() handles parsing
117  * which which pieces of the block need to be encrypted. For more details about
118  * dnode authentication and encryption, see zio_crypt_init_uios_dnode().
119  *
120  * OBJECT SET AUTHENTICATION:
121  * Up to this point, everything we have encrypted and authenticated has been
122  * at level 0 (or -2 for the ZIL). If we did not do any further work the
123  * on-disk format would be susceptible to attacks that deleted or rearranged
124  * the order of level 0 blocks. Ideally, the cleanest solution would be to
125  * maintain a tree of authentication MACs going up the bp tree. However, this
126  * presents a problem for raw sends. Send files do not send information about
127  * indirect blocks so there would be no convenient way to transfer the MACs and
128  * they cannot be recalculated on the receive side without the master key which
129  * would defeat one of the purposes of raw sends in the first place. Instead,
130  * for the indirect levels of the bp tree, we use a regular SHA512 of the MACs
131  * from the level below. We also include some portable fields from blk_prop such
132  * as the lsize and compression algorithm to prevent the data from being
133  * misinterpreted.
134  *
135  * At the objset level, we maintain 2 separate 256 bit MACs in the
136  * objset_phys_t. The first one is "portable" and is the logical root of the
137  * MAC tree maintained in the metadnode's bps. The second, is "local" and is
138  * used as the root MAC for the user accounting objects, which are also not
139  * transferred via "zfs send". The portable MAC is sent in the DRR_BEGIN payload
140  * of the send file. The useraccounting code ensures that the useraccounting
141  * info is not present upon a receive, so the local MAC can simply be cleared
142  * out at that time. For more info about objset_phys_t authentication, see
143  * zio_crypt_do_objset_hmacs().
144  *
145  * CONSIDERATIONS FOR DEDUP:
146  * In order for dedup to work, blocks that we want to dedup with one another
147  * need to use the same IV and encryption key, so that they will have the same
148  * ciphertext. Normally, one should never reuse an IV with the same encryption
149  * key or else AES-GCM and AES-CCM can both actually leak the plaintext of both
150  * blocks. In this case, however, since we are using the same plaintext as
151  * well all that we end up with is a duplicate of the original ciphertext we
152  * already had. As a result, an attacker with read access to the raw disk will
153  * be able to tell which blocks are the same but this information is given away
154  * by dedup anyway. In order to get the same IVs and encryption keys for
155  * equivalent blocks of data we use an HMAC of the plaintext. We use an HMAC
156  * here so that a reproducible checksum of the plaintext is never available to
157  * the attacker. The HMAC key is kept alongside the master key, encrypted on
158  * disk. The first 64 bits of the HMAC are used in place of the random salt, and
159  * the next 96 bits are used as the IV. As a result of this mechanism, dedup
160  * will only work within a clone family since encrypted dedup requires use of
161  * the same master and HMAC keys.
162  */
163 
164 /*
165  * After encrypting many blocks with the same key we may start to run up
166  * against the theoretical limits of how much data can securely be encrypted
167  * with a single key using the supported encryption modes. The most obvious
168  * limitation is that our risk of generating 2 equivalent 96 bit IVs increases
169  * the more IVs we generate (which both GCM and CCM modes strictly forbid).
170  * This risk actually grows surprisingly quickly over time according to the
171  * Birthday Problem. With a total IV space of 2^(96 bits), and assuming we have
172  * generated n IVs with a cryptographically secure RNG, the approximate
173  * probability p(n) of a collision is given as:
174  *
175  * p(n) ~= e^(-n*(n-1)/(2*(2^96)))
176  *
177  * [http://www.math.cornell.edu/~mec/2008-2009/TianyiZheng/Birthday.html]
178  *
179  * Assuming that we want to ensure that p(n) never goes over 1 / 1 trillion
180  * we must not write more than 398,065,730 blocks with the same encryption key.
181  * Therefore, we rotate our keys after 400,000,000 blocks have been written by
182  * generating a new random 64 bit salt for our HKDF encryption key generation
183  * function.
184  */
185 #define	ZFS_KEY_MAX_SALT_USES_DEFAULT	400000000
186 #define	ZFS_CURRENT_MAX_SALT_USES	\
187 	(MIN(zfs_key_max_salt_uses, ZFS_KEY_MAX_SALT_USES_DEFAULT))
188 unsigned long zfs_key_max_salt_uses = ZFS_KEY_MAX_SALT_USES_DEFAULT;
189 
190 /*
191  * Set to a nonzero value to cause zio_do_crypt_uio() to fail 1/this many
192  * calls, to test decryption error handling code paths.
193  */
194 uint64_t zio_decrypt_fail_fraction = 0;
195 
196 typedef struct blkptr_auth_buf {
197 	uint64_t bab_prop;			/* blk_prop - portable mask */
198 	uint8_t bab_mac[ZIO_DATA_MAC_LEN];	/* MAC from blk_cksum */
199 	uint64_t bab_pad;			/* reserved for future use */
200 } blkptr_auth_buf_t;
201 
202 zio_crypt_info_t zio_crypt_table[ZIO_CRYPT_FUNCTIONS] = {
203 	{"",			ZC_TYPE_NONE,	0,	"inherit"},
204 	{"",			ZC_TYPE_NONE,	0,	"on"},
205 	{"",			ZC_TYPE_NONE,	0,	"off"},
206 	{SUN_CKM_AES_CCM,	ZC_TYPE_CCM,	16,	"aes-128-ccm"},
207 	{SUN_CKM_AES_CCM,	ZC_TYPE_CCM,	24,	"aes-192-ccm"},
208 	{SUN_CKM_AES_CCM,	ZC_TYPE_CCM,	32,	"aes-256-ccm"},
209 	{SUN_CKM_AES_GCM,	ZC_TYPE_GCM,	16,	"aes-128-gcm"},
210 	{SUN_CKM_AES_GCM,	ZC_TYPE_GCM,	24,	"aes-192-gcm"},
211 	{SUN_CKM_AES_GCM,	ZC_TYPE_GCM,	32,	"aes-256-gcm"}
212 };
213 
214 static void
215 zio_crypt_key_destroy_early(zio_crypt_key_t *key)
216 {
217 	rw_destroy(&key->zk_salt_lock);
218 
219 	/* free crypto templates */
220 	bzero(&key->zk_session, sizeof (key->zk_session));
221 
222 	/* zero out sensitive data */
223 	bzero(key, sizeof (zio_crypt_key_t));
224 }
225 
226 void
227 zio_crypt_key_destroy(zio_crypt_key_t *key)
228 {
229 
230 	freebsd_crypt_freesession(&key->zk_session);
231 	zio_crypt_key_destroy_early(key);
232 }
233 
234 int
235 zio_crypt_key_init(uint64_t crypt, zio_crypt_key_t *key)
236 {
237 	int ret;
238 	crypto_mechanism_t mech __unused;
239 	uint_t keydata_len;
240 	zio_crypt_info_t *ci = NULL;
241 
242 	ASSERT(key != NULL);
243 	ASSERT3U(crypt, <, ZIO_CRYPT_FUNCTIONS);
244 
245 	ci = &zio_crypt_table[crypt];
246 	if (ci->ci_crypt_type != ZC_TYPE_GCM &&
247 	    ci->ci_crypt_type != ZC_TYPE_CCM)
248 		return (ENOTSUP);
249 
250 	keydata_len = zio_crypt_table[crypt].ci_keylen;
251 	bzero(key, sizeof (zio_crypt_key_t));
252 	rw_init(&key->zk_salt_lock, NULL, RW_DEFAULT, NULL);
253 
254 	/* fill keydata buffers and salt with random data */
255 	ret = random_get_bytes((uint8_t *)&key->zk_guid, sizeof (uint64_t));
256 	if (ret != 0)
257 		goto error;
258 
259 	ret = random_get_bytes(key->zk_master_keydata, keydata_len);
260 	if (ret != 0)
261 		goto error;
262 
263 	ret = random_get_bytes(key->zk_hmac_keydata, SHA512_HMAC_KEYLEN);
264 	if (ret != 0)
265 		goto error;
266 
267 	ret = random_get_bytes(key->zk_salt, ZIO_DATA_SALT_LEN);
268 	if (ret != 0)
269 		goto error;
270 
271 	/* derive the current key from the master key */
272 	ret = hkdf_sha512(key->zk_master_keydata, keydata_len, NULL, 0,
273 	    key->zk_salt, ZIO_DATA_SALT_LEN, key->zk_current_keydata,
274 	    keydata_len);
275 	if (ret != 0)
276 		goto error;
277 
278 	/* initialize keys for the ICP */
279 	key->zk_current_key.ck_format = CRYPTO_KEY_RAW;
280 	key->zk_current_key.ck_data = key->zk_current_keydata;
281 	key->zk_current_key.ck_length = CRYPTO_BYTES2BITS(keydata_len);
282 
283 	key->zk_hmac_key.ck_format = CRYPTO_KEY_RAW;
284 	key->zk_hmac_key.ck_data = &key->zk_hmac_key;
285 	key->zk_hmac_key.ck_length = CRYPTO_BYTES2BITS(SHA512_HMAC_KEYLEN);
286 
287 	ci = &zio_crypt_table[crypt];
288 	if (ci->ci_crypt_type != ZC_TYPE_GCM &&
289 	    ci->ci_crypt_type != ZC_TYPE_CCM)
290 		return (ENOTSUP);
291 
292 	ret = freebsd_crypt_newsession(&key->zk_session, ci,
293 	    &key->zk_current_key);
294 	if (ret)
295 		goto error;
296 
297 	key->zk_crypt = crypt;
298 	key->zk_version = ZIO_CRYPT_KEY_CURRENT_VERSION;
299 	key->zk_salt_count = 0;
300 
301 	return (0);
302 
303 error:
304 	zio_crypt_key_destroy_early(key);
305 	return (ret);
306 }
307 
308 static int
309 zio_crypt_key_change_salt(zio_crypt_key_t *key)
310 {
311 	int ret = 0;
312 	uint8_t salt[ZIO_DATA_SALT_LEN];
313 	crypto_mechanism_t mech __unused;
314 
315 	uint_t keydata_len = zio_crypt_table[key->zk_crypt].ci_keylen;
316 
317 	/* generate a new salt */
318 	ret = random_get_bytes(salt, ZIO_DATA_SALT_LEN);
319 	if (ret != 0)
320 		goto error;
321 
322 	rw_enter(&key->zk_salt_lock, RW_WRITER);
323 
324 	/* someone beat us to the salt rotation, just unlock and return */
325 	if (key->zk_salt_count < ZFS_CURRENT_MAX_SALT_USES)
326 		goto out_unlock;
327 
328 	/* derive the current key from the master key and the new salt */
329 	ret = hkdf_sha512(key->zk_master_keydata, keydata_len, NULL, 0,
330 	    salt, ZIO_DATA_SALT_LEN, key->zk_current_keydata, keydata_len);
331 	if (ret != 0)
332 		goto out_unlock;
333 
334 	/* assign the salt and reset the usage count */
335 	bcopy(salt, key->zk_salt, ZIO_DATA_SALT_LEN);
336 	key->zk_salt_count = 0;
337 
338 	freebsd_crypt_freesession(&key->zk_session);
339 	ret = freebsd_crypt_newsession(&key->zk_session,
340 	    &zio_crypt_table[key->zk_crypt], &key->zk_current_key);
341 	if (ret != 0)
342 		goto out_unlock;
343 
344 	rw_exit(&key->zk_salt_lock);
345 
346 	return (0);
347 
348 out_unlock:
349 	rw_exit(&key->zk_salt_lock);
350 error:
351 	return (ret);
352 }
353 
354 /* See comment above zfs_key_max_salt_uses definition for details */
355 int
356 zio_crypt_key_get_salt(zio_crypt_key_t *key, uint8_t *salt)
357 {
358 	int ret;
359 	boolean_t salt_change;
360 
361 	rw_enter(&key->zk_salt_lock, RW_READER);
362 
363 	bcopy(key->zk_salt, salt, ZIO_DATA_SALT_LEN);
364 	salt_change = (atomic_inc_64_nv(&key->zk_salt_count) >=
365 	    ZFS_CURRENT_MAX_SALT_USES);
366 
367 	rw_exit(&key->zk_salt_lock);
368 
369 	if (salt_change) {
370 		ret = zio_crypt_key_change_salt(key);
371 		if (ret != 0)
372 			goto error;
373 	}
374 
375 	return (0);
376 
377 error:
378 	return (ret);
379 }
380 
381 void *failed_decrypt_buf;
382 int failed_decrypt_size;
383 
384 /*
385  * This function handles all encryption and decryption in zfs. When
386  * encrypting it expects puio to reference the plaintext and cuio to
387  * reference the ciphertext. cuio must have enough space for the
388  * ciphertext + room for a MAC. datalen should be the length of the
389  * plaintext / ciphertext alone.
390  */
391 /*
392  * The implementation for FreeBSD's OpenCrypto.
393  *
394  * The big difference between ICP and FOC is that FOC uses a single
395  * buffer for input and output.  This means that (for AES-GCM, the
396  * only one supported right now) the source must be copied into the
397  * destination, and the destination must have the AAD, and the tag/MAC,
398  * already associated with it.  (Both implementations can use a uio.)
399  *
400  * Since the auth data is part of the iovec array, all we need to know
401  * is the length:  0 means there's no AAD.
402  *
403  */
404 static int
405 zio_do_crypt_uio_opencrypto(boolean_t encrypt, freebsd_crypt_session_t *sess,
406     uint64_t crypt, crypto_key_t *key, uint8_t *ivbuf, uint_t datalen,
407     uio_t *uio, uint_t auth_len)
408 {
409 	zio_crypt_info_t *ci;
410 	int ret;
411 
412 	ci = &zio_crypt_table[crypt];
413 	if (ci->ci_crypt_type != ZC_TYPE_GCM &&
414 	    ci->ci_crypt_type != ZC_TYPE_CCM)
415 		return (ENOTSUP);
416 
417 
418 	ret = freebsd_crypt_uio(encrypt, sess, ci, uio, key, ivbuf,
419 	    datalen, auth_len);
420 	if (ret != 0) {
421 #ifdef FCRYPTO_DEBUG
422 		printf("%s(%d):  Returning error %s\n",
423 		    __FUNCTION__, __LINE__, encrypt ? "EIO" : "ECKSUM");
424 #endif
425 		ret = SET_ERROR(encrypt ? EIO : ECKSUM);
426 	}
427 
428 	return (ret);
429 }
430 
431 int
432 zio_crypt_key_wrap(crypto_key_t *cwkey, zio_crypt_key_t *key, uint8_t *iv,
433     uint8_t *mac, uint8_t *keydata_out, uint8_t *hmac_keydata_out)
434 {
435 	int ret;
436 	uint64_t aad[3];
437 	/*
438 	 * With OpenCrypto in FreeBSD, the same buffer is used for
439 	 * input and output.  Also, the AAD (for AES-GMC at least)
440 	 * needs to logically go in front.
441 	 */
442 	uio_t cuio;
443 	iovec_t iovecs[4];
444 	uint64_t crypt = key->zk_crypt;
445 	uint_t enc_len, keydata_len, aad_len;
446 
447 	ASSERT3U(crypt, <, ZIO_CRYPT_FUNCTIONS);
448 	ASSERT3U(cwkey->ck_format, ==, CRYPTO_KEY_RAW);
449 
450 	keydata_len = zio_crypt_table[crypt].ci_keylen;
451 
452 	/* generate iv for wrapping the master and hmac key */
453 	ret = random_get_pseudo_bytes(iv, WRAPPING_IV_LEN);
454 	if (ret != 0)
455 		goto error;
456 
457 	/*
458 	 * Since we only support one buffer, we need to copy
459 	 * the plain text (source) to the cipher buffer (dest).
460 	 * We set iovecs[0] -- the authentication data -- below.
461 	 */
462 	bcopy((void*)key->zk_master_keydata, keydata_out, keydata_len);
463 	bcopy((void*)key->zk_hmac_keydata, hmac_keydata_out,
464 	    SHA512_HMAC_KEYLEN);
465 	iovecs[1].iov_base = keydata_out;
466 	iovecs[1].iov_len = keydata_len;
467 	iovecs[2].iov_base = hmac_keydata_out;
468 	iovecs[2].iov_len = SHA512_HMAC_KEYLEN;
469 	iovecs[3].iov_base = mac;
470 	iovecs[3].iov_len = WRAPPING_MAC_LEN;
471 
472 	/*
473 	 * Although we don't support writing to the old format, we do
474 	 * support rewrapping the key so that the user can move and
475 	 * quarantine datasets on the old format.
476 	 */
477 	if (key->zk_version == 0) {
478 		aad_len = sizeof (uint64_t);
479 		aad[0] = LE_64(key->zk_guid);
480 	} else {
481 		ASSERT3U(key->zk_version, ==, ZIO_CRYPT_KEY_CURRENT_VERSION);
482 		aad_len = sizeof (uint64_t) * 3;
483 		aad[0] = LE_64(key->zk_guid);
484 		aad[1] = LE_64(crypt);
485 		aad[2] = LE_64(key->zk_version);
486 	}
487 
488 	iovecs[0].iov_base = aad;
489 	iovecs[0].iov_len = aad_len;
490 	enc_len = zio_crypt_table[crypt].ci_keylen + SHA512_HMAC_KEYLEN;
491 
492 	cuio.uio_iov = iovecs;
493 	cuio.uio_iovcnt = 4;
494 	cuio.uio_segflg = UIO_SYSSPACE;
495 
496 	/* encrypt the keys and store the resulting ciphertext and mac */
497 	ret = zio_do_crypt_uio_opencrypto(B_TRUE, NULL, crypt, cwkey,
498 	    iv, enc_len, &cuio, aad_len);
499 	if (ret != 0)
500 		goto error;
501 
502 	return (0);
503 
504 error:
505 	return (ret);
506 }
507 
508 int
509 zio_crypt_key_unwrap(crypto_key_t *cwkey, uint64_t crypt, uint64_t version,
510     uint64_t guid, uint8_t *keydata, uint8_t *hmac_keydata, uint8_t *iv,
511     uint8_t *mac, zio_crypt_key_t *key)
512 {
513 	int ret;
514 	uint64_t aad[3];
515 	/*
516 	 * With OpenCrypto in FreeBSD, the same buffer is used for
517 	 * input and output.  Also, the AAD (for AES-GMC at least)
518 	 * needs to logically go in front.
519 	 */
520 	uio_t cuio;
521 	iovec_t iovecs[4];
522 	void *src, *dst;
523 	uint_t enc_len, keydata_len, aad_len;
524 
525 	ASSERT3U(crypt, <, ZIO_CRYPT_FUNCTIONS);
526 	ASSERT3U(cwkey->ck_format, ==, CRYPTO_KEY_RAW);
527 
528 	keydata_len = zio_crypt_table[crypt].ci_keylen;
529 	rw_init(&key->zk_salt_lock, NULL, RW_DEFAULT, NULL);
530 
531 	/*
532 	 * Since we only support one buffer, we need to copy
533 	 * the encrypted buffer (source) to the plain buffer
534 	 * (dest).  We set iovecs[0] -- the authentication data --
535 	 * below.
536 	 */
537 	dst = key->zk_master_keydata;
538 	src = keydata;
539 
540 	bcopy(src, dst, keydata_len);
541 
542 	dst = key->zk_hmac_keydata;
543 	src = hmac_keydata;
544 	bcopy(src, dst, SHA512_HMAC_KEYLEN);
545 
546 	iovecs[1].iov_base = key->zk_master_keydata;
547 	iovecs[1].iov_len = keydata_len;
548 	iovecs[2].iov_base = key->zk_hmac_keydata;
549 	iovecs[2].iov_len = SHA512_HMAC_KEYLEN;
550 	iovecs[3].iov_base = mac;
551 	iovecs[3].iov_len = WRAPPING_MAC_LEN;
552 
553 	if (version == 0) {
554 		aad_len = sizeof (uint64_t);
555 		aad[0] = LE_64(guid);
556 	} else {
557 		ASSERT3U(version, ==, ZIO_CRYPT_KEY_CURRENT_VERSION);
558 		aad_len = sizeof (uint64_t) * 3;
559 		aad[0] = LE_64(guid);
560 		aad[1] = LE_64(crypt);
561 		aad[2] = LE_64(version);
562 	}
563 
564 	enc_len = keydata_len + SHA512_HMAC_KEYLEN;
565 	iovecs[0].iov_base = aad;
566 	iovecs[0].iov_len = aad_len;
567 
568 	cuio.uio_iov = iovecs;
569 	cuio.uio_iovcnt = 4;
570 	cuio.uio_segflg = UIO_SYSSPACE;
571 
572 	/* decrypt the keys and store the result in the output buffers */
573 	ret = zio_do_crypt_uio_opencrypto(B_FALSE, NULL, crypt, cwkey,
574 	    iv, enc_len, &cuio, aad_len);
575 
576 	if (ret != 0)
577 		goto error;
578 
579 	/* generate a fresh salt */
580 	ret = random_get_bytes(key->zk_salt, ZIO_DATA_SALT_LEN);
581 	if (ret != 0)
582 		goto error;
583 
584 	/* derive the current key from the master key */
585 	ret = hkdf_sha512(key->zk_master_keydata, keydata_len, NULL, 0,
586 	    key->zk_salt, ZIO_DATA_SALT_LEN, key->zk_current_keydata,
587 	    keydata_len);
588 	if (ret != 0)
589 		goto error;
590 
591 	/* initialize keys for ICP */
592 	key->zk_current_key.ck_format = CRYPTO_KEY_RAW;
593 	key->zk_current_key.ck_data = key->zk_current_keydata;
594 	key->zk_current_key.ck_length = CRYPTO_BYTES2BITS(keydata_len);
595 
596 	key->zk_hmac_key.ck_format = CRYPTO_KEY_RAW;
597 	key->zk_hmac_key.ck_data = key->zk_hmac_keydata;
598 	key->zk_hmac_key.ck_length = CRYPTO_BYTES2BITS(SHA512_HMAC_KEYLEN);
599 
600 	ret = freebsd_crypt_newsession(&key->zk_session,
601 	    &zio_crypt_table[crypt], &key->zk_current_key);
602 	if (ret != 0)
603 		goto error;
604 
605 	key->zk_crypt = crypt;
606 	key->zk_version = version;
607 	key->zk_guid = guid;
608 	key->zk_salt_count = 0;
609 
610 	return (0);
611 
612 error:
613 	zio_crypt_key_destroy_early(key);
614 	return (ret);
615 }
616 
617 int
618 zio_crypt_generate_iv(uint8_t *ivbuf)
619 {
620 	int ret;
621 
622 	/* randomly generate the IV */
623 	ret = random_get_pseudo_bytes(ivbuf, ZIO_DATA_IV_LEN);
624 	if (ret != 0)
625 		goto error;
626 
627 	return (0);
628 
629 error:
630 	bzero(ivbuf, ZIO_DATA_IV_LEN);
631 	return (ret);
632 }
633 
634 int
635 zio_crypt_do_hmac(zio_crypt_key_t *key, uint8_t *data, uint_t datalen,
636     uint8_t *digestbuf, uint_t digestlen)
637 {
638 	uint8_t raw_digestbuf[SHA512_DIGEST_LENGTH];
639 
640 	ASSERT3U(digestlen, <=, SHA512_DIGEST_LENGTH);
641 
642 	crypto_mac(&key->zk_hmac_key, data, datalen,
643 	    raw_digestbuf, SHA512_DIGEST_LENGTH);
644 
645 	bcopy(raw_digestbuf, digestbuf, digestlen);
646 
647 	return (0);
648 }
649 
650 int
651 zio_crypt_generate_iv_salt_dedup(zio_crypt_key_t *key, uint8_t *data,
652     uint_t datalen, uint8_t *ivbuf, uint8_t *salt)
653 {
654 	int ret;
655 	uint8_t digestbuf[SHA512_DIGEST_LENGTH];
656 
657 	ret = zio_crypt_do_hmac(key, data, datalen,
658 	    digestbuf, SHA512_DIGEST_LENGTH);
659 	if (ret != 0)
660 		return (ret);
661 
662 	bcopy(digestbuf, salt, ZIO_DATA_SALT_LEN);
663 	bcopy(digestbuf + ZIO_DATA_SALT_LEN, ivbuf, ZIO_DATA_IV_LEN);
664 
665 	return (0);
666 }
667 
668 /*
669  * The following functions are used to encode and decode encryption parameters
670  * into blkptr_t and zil_header_t. The ICP wants to use these parameters as
671  * byte strings, which normally means that these strings would not need to deal
672  * with byteswapping at all. However, both blkptr_t and zil_header_t may be
673  * byteswapped by lower layers and so we must "undo" that byteswap here upon
674  * decoding and encoding in a non-native byteorder. These functions require
675  * that the byteorder bit is correct before being called.
676  */
677 void
678 zio_crypt_encode_params_bp(blkptr_t *bp, uint8_t *salt, uint8_t *iv)
679 {
680 	uint64_t val64;
681 	uint32_t val32;
682 
683 	ASSERT(BP_IS_ENCRYPTED(bp));
684 
685 	if (!BP_SHOULD_BYTESWAP(bp)) {
686 		bcopy(salt, &bp->blk_dva[2].dva_word[0], sizeof (uint64_t));
687 		bcopy(iv, &bp->blk_dva[2].dva_word[1], sizeof (uint64_t));
688 		bcopy(iv + sizeof (uint64_t), &val32, sizeof (uint32_t));
689 		BP_SET_IV2(bp, val32);
690 	} else {
691 		bcopy(salt, &val64, sizeof (uint64_t));
692 		bp->blk_dva[2].dva_word[0] = BSWAP_64(val64);
693 
694 		bcopy(iv, &val64, sizeof (uint64_t));
695 		bp->blk_dva[2].dva_word[1] = BSWAP_64(val64);
696 
697 		bcopy(iv + sizeof (uint64_t), &val32, sizeof (uint32_t));
698 		BP_SET_IV2(bp, BSWAP_32(val32));
699 	}
700 }
701 
702 void
703 zio_crypt_decode_params_bp(const blkptr_t *bp, uint8_t *salt, uint8_t *iv)
704 {
705 	uint64_t val64;
706 	uint32_t val32;
707 
708 	ASSERT(BP_IS_PROTECTED(bp));
709 
710 	/* for convenience, so callers don't need to check */
711 	if (BP_IS_AUTHENTICATED(bp)) {
712 		bzero(salt, ZIO_DATA_SALT_LEN);
713 		bzero(iv, ZIO_DATA_IV_LEN);
714 		return;
715 	}
716 
717 	if (!BP_SHOULD_BYTESWAP(bp)) {
718 		bcopy(&bp->blk_dva[2].dva_word[0], salt, sizeof (uint64_t));
719 		bcopy(&bp->blk_dva[2].dva_word[1], iv, sizeof (uint64_t));
720 
721 		val32 = (uint32_t)BP_GET_IV2(bp);
722 		bcopy(&val32, iv + sizeof (uint64_t), sizeof (uint32_t));
723 	} else {
724 		val64 = BSWAP_64(bp->blk_dva[2].dva_word[0]);
725 		bcopy(&val64, salt, sizeof (uint64_t));
726 
727 		val64 = BSWAP_64(bp->blk_dva[2].dva_word[1]);
728 		bcopy(&val64, iv, sizeof (uint64_t));
729 
730 		val32 = BSWAP_32((uint32_t)BP_GET_IV2(bp));
731 		bcopy(&val32, iv + sizeof (uint64_t), sizeof (uint32_t));
732 	}
733 }
734 
735 void
736 zio_crypt_encode_mac_bp(blkptr_t *bp, uint8_t *mac)
737 {
738 	uint64_t val64;
739 
740 	ASSERT(BP_USES_CRYPT(bp));
741 	ASSERT3U(BP_GET_TYPE(bp), !=, DMU_OT_OBJSET);
742 
743 	if (!BP_SHOULD_BYTESWAP(bp)) {
744 		bcopy(mac, &bp->blk_cksum.zc_word[2], sizeof (uint64_t));
745 		bcopy(mac + sizeof (uint64_t), &bp->blk_cksum.zc_word[3],
746 		    sizeof (uint64_t));
747 	} else {
748 		bcopy(mac, &val64, sizeof (uint64_t));
749 		bp->blk_cksum.zc_word[2] = BSWAP_64(val64);
750 
751 		bcopy(mac + sizeof (uint64_t), &val64, sizeof (uint64_t));
752 		bp->blk_cksum.zc_word[3] = BSWAP_64(val64);
753 	}
754 }
755 
756 void
757 zio_crypt_decode_mac_bp(const blkptr_t *bp, uint8_t *mac)
758 {
759 	uint64_t val64;
760 
761 	ASSERT(BP_USES_CRYPT(bp) || BP_IS_HOLE(bp));
762 
763 	/* for convenience, so callers don't need to check */
764 	if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
765 		bzero(mac, ZIO_DATA_MAC_LEN);
766 		return;
767 	}
768 
769 	if (!BP_SHOULD_BYTESWAP(bp)) {
770 		bcopy(&bp->blk_cksum.zc_word[2], mac, sizeof (uint64_t));
771 		bcopy(&bp->blk_cksum.zc_word[3], mac + sizeof (uint64_t),
772 		    sizeof (uint64_t));
773 	} else {
774 		val64 = BSWAP_64(bp->blk_cksum.zc_word[2]);
775 		bcopy(&val64, mac, sizeof (uint64_t));
776 
777 		val64 = BSWAP_64(bp->blk_cksum.zc_word[3]);
778 		bcopy(&val64, mac + sizeof (uint64_t), sizeof (uint64_t));
779 	}
780 }
781 
782 void
783 zio_crypt_encode_mac_zil(void *data, uint8_t *mac)
784 {
785 	zil_chain_t *zilc = data;
786 
787 	bcopy(mac, &zilc->zc_eck.zec_cksum.zc_word[2], sizeof (uint64_t));
788 	bcopy(mac + sizeof (uint64_t), &zilc->zc_eck.zec_cksum.zc_word[3],
789 	    sizeof (uint64_t));
790 }
791 
792 void
793 zio_crypt_decode_mac_zil(const void *data, uint8_t *mac)
794 {
795 	/*
796 	 * The ZIL MAC is embedded in the block it protects, which will
797 	 * not have been byteswapped by the time this function has been called.
798 	 * As a result, we don't need to worry about byteswapping the MAC.
799 	 */
800 	const zil_chain_t *zilc = data;
801 
802 	bcopy(&zilc->zc_eck.zec_cksum.zc_word[2], mac, sizeof (uint64_t));
803 	bcopy(&zilc->zc_eck.zec_cksum.zc_word[3], mac + sizeof (uint64_t),
804 	    sizeof (uint64_t));
805 }
806 
807 /*
808  * This routine takes a block of dnodes (src_abd) and copies only the bonus
809  * buffers to the same offsets in the dst buffer. datalen should be the size
810  * of both the src_abd and the dst buffer (not just the length of the bonus
811  * buffers).
812  */
813 void
814 zio_crypt_copy_dnode_bonus(abd_t *src_abd, uint8_t *dst, uint_t datalen)
815 {
816 	uint_t i, max_dnp = datalen >> DNODE_SHIFT;
817 	uint8_t *src;
818 	dnode_phys_t *dnp, *sdnp, *ddnp;
819 
820 	src = abd_borrow_buf_copy(src_abd, datalen);
821 
822 	sdnp = (dnode_phys_t *)src;
823 	ddnp = (dnode_phys_t *)dst;
824 
825 	for (i = 0; i < max_dnp; i += sdnp[i].dn_extra_slots + 1) {
826 		dnp = &sdnp[i];
827 		if (dnp->dn_type != DMU_OT_NONE &&
828 		    DMU_OT_IS_ENCRYPTED(dnp->dn_bonustype) &&
829 		    dnp->dn_bonuslen != 0) {
830 			bcopy(DN_BONUS(dnp), DN_BONUS(&ddnp[i]),
831 			    DN_MAX_BONUS_LEN(dnp));
832 		}
833 	}
834 
835 	abd_return_buf(src_abd, src, datalen);
836 }
837 
838 /*
839  * This function decides what fields from blk_prop are included in
840  * the on-disk various MAC algorithms.
841  */
842 static void
843 zio_crypt_bp_zero_nonportable_blkprop(blkptr_t *bp, uint64_t version)
844 {
845 	int avoidlint = SPA_MINBLOCKSIZE;
846 	/*
847 	 * Version 0 did not properly zero out all non-portable fields
848 	 * as it should have done. We maintain this code so that we can
849 	 * do read-only imports of pools on this version.
850 	 */
851 	if (version == 0) {
852 		BP_SET_DEDUP(bp, 0);
853 		BP_SET_CHECKSUM(bp, 0);
854 		BP_SET_PSIZE(bp, avoidlint);
855 		return;
856 	}
857 
858 	ASSERT3U(version, ==, ZIO_CRYPT_KEY_CURRENT_VERSION);
859 
860 	/*
861 	 * The hole_birth feature might set these fields even if this bp
862 	 * is a hole. We zero them out here to guarantee that raw sends
863 	 * will function with or without the feature.
864 	 */
865 	if (BP_IS_HOLE(bp)) {
866 		bp->blk_prop = 0ULL;
867 		return;
868 	}
869 
870 	/*
871 	 * At L0 we want to verify these fields to ensure that data blocks
872 	 * can not be reinterpreted. For instance, we do not want an attacker
873 	 * to trick us into returning raw lz4 compressed data to the user
874 	 * by modifying the compression bits. At higher levels, we cannot
875 	 * enforce this policy since raw sends do not convey any information
876 	 * about indirect blocks, so these values might be different on the
877 	 * receive side. Fortunately, this does not open any new attack
878 	 * vectors, since any alterations that can be made to a higher level
879 	 * bp must still verify the correct order of the layer below it.
880 	 */
881 	if (BP_GET_LEVEL(bp) != 0) {
882 		BP_SET_BYTEORDER(bp, 0);
883 		BP_SET_COMPRESS(bp, 0);
884 
885 		/*
886 		 * psize cannot be set to zero or it will trigger
887 		 * asserts, but the value doesn't really matter as
888 		 * long as it is constant.
889 		 */
890 		BP_SET_PSIZE(bp, avoidlint);
891 	}
892 
893 	BP_SET_DEDUP(bp, 0);
894 	BP_SET_CHECKSUM(bp, 0);
895 }
896 
897 static void
898 zio_crypt_bp_auth_init(uint64_t version, boolean_t should_bswap, blkptr_t *bp,
899     blkptr_auth_buf_t *bab, uint_t *bab_len)
900 {
901 	blkptr_t tmpbp = *bp;
902 
903 	if (should_bswap)
904 		byteswap_uint64_array(&tmpbp, sizeof (blkptr_t));
905 
906 	ASSERT(BP_USES_CRYPT(&tmpbp) || BP_IS_HOLE(&tmpbp));
907 	ASSERT0(BP_IS_EMBEDDED(&tmpbp));
908 
909 	zio_crypt_decode_mac_bp(&tmpbp, bab->bab_mac);
910 
911 	/*
912 	 * We always MAC blk_prop in LE to ensure portability. This
913 	 * must be done after decoding the mac, since the endianness
914 	 * will get zero'd out here.
915 	 */
916 	zio_crypt_bp_zero_nonportable_blkprop(&tmpbp, version);
917 	bab->bab_prop = LE_64(tmpbp.blk_prop);
918 	bab->bab_pad = 0ULL;
919 
920 	/* version 0 did not include the padding */
921 	*bab_len = sizeof (blkptr_auth_buf_t);
922 	if (version == 0)
923 		*bab_len -= sizeof (uint64_t);
924 }
925 
926 static int
927 zio_crypt_bp_do_hmac_updates(crypto_context_t ctx, uint64_t version,
928     boolean_t should_bswap, blkptr_t *bp)
929 {
930 	uint_t bab_len;
931 	blkptr_auth_buf_t bab;
932 
933 	zio_crypt_bp_auth_init(version, should_bswap, bp, &bab, &bab_len);
934 	crypto_mac_update(ctx, &bab, bab_len);
935 
936 	return (0);
937 }
938 
939 static void
940 zio_crypt_bp_do_indrect_checksum_updates(SHA2_CTX *ctx, uint64_t version,
941     boolean_t should_bswap, blkptr_t *bp)
942 {
943 	uint_t bab_len;
944 	blkptr_auth_buf_t bab;
945 
946 	zio_crypt_bp_auth_init(version, should_bswap, bp, &bab, &bab_len);
947 	SHA2Update(ctx, &bab, bab_len);
948 }
949 
950 static void
951 zio_crypt_bp_do_aad_updates(uint8_t **aadp, uint_t *aad_len, uint64_t version,
952     boolean_t should_bswap, blkptr_t *bp)
953 {
954 	uint_t bab_len;
955 	blkptr_auth_buf_t bab;
956 
957 	zio_crypt_bp_auth_init(version, should_bswap, bp, &bab, &bab_len);
958 	bcopy(&bab, *aadp, bab_len);
959 	*aadp += bab_len;
960 	*aad_len += bab_len;
961 }
962 
963 static int
964 zio_crypt_do_dnode_hmac_updates(crypto_context_t ctx, uint64_t version,
965     boolean_t should_bswap, dnode_phys_t *dnp)
966 {
967 	int ret, i;
968 	dnode_phys_t *adnp;
969 	boolean_t le_bswap = (should_bswap == ZFS_HOST_BYTEORDER);
970 	uint8_t tmp_dncore[offsetof(dnode_phys_t, dn_blkptr)];
971 
972 	/* authenticate the core dnode (masking out non-portable bits) */
973 	bcopy(dnp, tmp_dncore, sizeof (tmp_dncore));
974 	adnp = (dnode_phys_t *)tmp_dncore;
975 	if (le_bswap) {
976 		adnp->dn_datablkszsec = BSWAP_16(adnp->dn_datablkszsec);
977 		adnp->dn_bonuslen = BSWAP_16(adnp->dn_bonuslen);
978 		adnp->dn_maxblkid = BSWAP_64(adnp->dn_maxblkid);
979 		adnp->dn_used = BSWAP_64(adnp->dn_used);
980 	}
981 	adnp->dn_flags &= DNODE_CRYPT_PORTABLE_FLAGS_MASK;
982 	adnp->dn_used = 0;
983 
984 	crypto_mac_update(ctx, adnp, sizeof (tmp_dncore));
985 
986 	for (i = 0; i < dnp->dn_nblkptr; i++) {
987 		ret = zio_crypt_bp_do_hmac_updates(ctx, version,
988 		    should_bswap, &dnp->dn_blkptr[i]);
989 		if (ret != 0)
990 			goto error;
991 	}
992 
993 	if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
994 		ret = zio_crypt_bp_do_hmac_updates(ctx, version,
995 		    should_bswap, DN_SPILL_BLKPTR(dnp));
996 		if (ret != 0)
997 			goto error;
998 	}
999 
1000 	return (0);
1001 
1002 error:
1003 	return (ret);
1004 }
1005 
1006 /*
1007  * objset_phys_t blocks introduce a number of exceptions to the normal
1008  * authentication process. objset_phys_t's contain 2 separate HMACS for
1009  * protecting the integrity of their data. The portable_mac protects the
1010  * metadnode. This MAC can be sent with a raw send and protects against
1011  * reordering of data within the metadnode. The local_mac protects the user
1012  * accounting objects which are not sent from one system to another.
1013  *
1014  * In addition, objset blocks are the only blocks that can be modified and
1015  * written to disk without the key loaded under certain circumstances. During
1016  * zil_claim() we need to be able to update the zil_header_t to complete
1017  * claiming log blocks and during raw receives we need to write out the
1018  * portable_mac from the send file. Both of these actions are possible
1019  * because these fields are not protected by either MAC so neither one will
1020  * need to modify the MACs without the key. However, when the modified blocks
1021  * are written out they will be byteswapped into the host machine's native
1022  * endianness which will modify fields protected by the MAC. As a result, MAC
1023  * calculation for objset blocks works slightly differently from other block
1024  * types. Where other block types MAC the data in whatever endianness is
1025  * written to disk, objset blocks always MAC little endian version of their
1026  * values. In the code, should_bswap is the value from BP_SHOULD_BYTESWAP()
1027  * and le_bswap indicates whether a byteswap is needed to get this block
1028  * into little endian format.
1029  */
1030 /* ARGSUSED */
1031 int
1032 zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen,
1033     boolean_t should_bswap, uint8_t *portable_mac, uint8_t *local_mac)
1034 {
1035 	int ret;
1036 	struct hmac_ctx hash_ctx;
1037 	struct hmac_ctx *ctx = &hash_ctx;
1038 	objset_phys_t *osp = data;
1039 	uint64_t intval;
1040 	boolean_t le_bswap = (should_bswap == ZFS_HOST_BYTEORDER);
1041 	uint8_t raw_portable_mac[SHA512_DIGEST_LENGTH];
1042 	uint8_t raw_local_mac[SHA512_DIGEST_LENGTH];
1043 
1044 
1045 	/* calculate the portable MAC from the portable fields and metadnode */
1046 	crypto_mac_init(ctx, &key->zk_hmac_key);
1047 
1048 	/* add in the os_type */
1049 	intval = (le_bswap) ? osp->os_type : BSWAP_64(osp->os_type);
1050 	crypto_mac_update(ctx, &intval, sizeof (uint64_t));
1051 
1052 	/* add in the portable os_flags */
1053 	intval = osp->os_flags;
1054 	if (should_bswap)
1055 		intval = BSWAP_64(intval);
1056 	intval &= OBJSET_CRYPT_PORTABLE_FLAGS_MASK;
1057 	/* CONSTCOND */
1058 	if (!ZFS_HOST_BYTEORDER)
1059 		intval = BSWAP_64(intval);
1060 
1061 	crypto_mac_update(ctx, &intval, sizeof (uint64_t));
1062 
1063 	/* add in fields from the metadnode */
1064 	ret = zio_crypt_do_dnode_hmac_updates(ctx, key->zk_version,
1065 	    should_bswap, &osp->os_meta_dnode);
1066 	if (ret)
1067 		goto error;
1068 
1069 	crypto_mac_final(ctx, raw_portable_mac, SHA512_DIGEST_LENGTH);
1070 
1071 	bcopy(raw_portable_mac, portable_mac, ZIO_OBJSET_MAC_LEN);
1072 
1073 	/*
1074 	 * The local MAC protects the user, group and project accounting.
1075 	 * If these objects are not present, the local MAC is zeroed out.
1076 	 */
1077 	if ((datalen >= OBJSET_PHYS_SIZE_V3 &&
1078 	    osp->os_userused_dnode.dn_type == DMU_OT_NONE &&
1079 	    osp->os_groupused_dnode.dn_type == DMU_OT_NONE &&
1080 	    osp->os_projectused_dnode.dn_type == DMU_OT_NONE) ||
1081 	    (datalen >= OBJSET_PHYS_SIZE_V2 &&
1082 	    osp->os_userused_dnode.dn_type == DMU_OT_NONE &&
1083 	    osp->os_groupused_dnode.dn_type == DMU_OT_NONE) ||
1084 	    (datalen <= OBJSET_PHYS_SIZE_V1)) {
1085 		bzero(local_mac, ZIO_OBJSET_MAC_LEN);
1086 		return (0);
1087 	}
1088 
1089 	/* calculate the local MAC from the userused and groupused dnodes */
1090 	crypto_mac_init(ctx, &key->zk_hmac_key);
1091 
1092 	/* add in the non-portable os_flags */
1093 	intval = osp->os_flags;
1094 	if (should_bswap)
1095 		intval = BSWAP_64(intval);
1096 	intval &= ~OBJSET_CRYPT_PORTABLE_FLAGS_MASK;
1097 	/* CONSTCOND */
1098 	if (!ZFS_HOST_BYTEORDER)
1099 		intval = BSWAP_64(intval);
1100 
1101 	crypto_mac_update(ctx, &intval, sizeof (uint64_t));
1102 
1103 	/* XXX check dnode type ... */
1104 	/* add in fields from the user accounting dnodes */
1105 	if (osp->os_userused_dnode.dn_type != DMU_OT_NONE) {
1106 		ret = zio_crypt_do_dnode_hmac_updates(ctx, key->zk_version,
1107 		    should_bswap, &osp->os_userused_dnode);
1108 		if (ret)
1109 			goto error;
1110 	}
1111 
1112 	if (osp->os_groupused_dnode.dn_type != DMU_OT_NONE) {
1113 		ret = zio_crypt_do_dnode_hmac_updates(ctx, key->zk_version,
1114 		    should_bswap, &osp->os_groupused_dnode);
1115 		if (ret)
1116 			goto error;
1117 	}
1118 
1119 	if (osp->os_projectused_dnode.dn_type != DMU_OT_NONE &&
1120 	    datalen >= OBJSET_PHYS_SIZE_V3) {
1121 		ret = zio_crypt_do_dnode_hmac_updates(ctx, key->zk_version,
1122 		    should_bswap, &osp->os_projectused_dnode);
1123 		if (ret)
1124 			goto error;
1125 	}
1126 
1127 	crypto_mac_final(ctx, raw_local_mac, SHA512_DIGEST_LENGTH);
1128 
1129 	bcopy(raw_local_mac, local_mac, ZIO_OBJSET_MAC_LEN);
1130 
1131 	return (0);
1132 
1133 error:
1134 	bzero(portable_mac, ZIO_OBJSET_MAC_LEN);
1135 	bzero(local_mac, ZIO_OBJSET_MAC_LEN);
1136 	return (ret);
1137 }
1138 
1139 static void
1140 zio_crypt_destroy_uio(uio_t *uio)
1141 {
1142 	if (uio->uio_iov)
1143 		kmem_free(uio->uio_iov, uio->uio_iovcnt * sizeof (iovec_t));
1144 }
1145 
1146 /*
1147  * This function parses an uncompressed indirect block and returns a checksum
1148  * of all the portable fields from all of the contained bps. The portable
1149  * fields are the MAC and all of the fields from blk_prop except for the dedup,
1150  * checksum, and psize bits. For an explanation of the purpose of this, see
1151  * the comment block on object set authentication.
1152  */
1153 static int
1154 zio_crypt_do_indirect_mac_checksum_impl(boolean_t generate, void *buf,
1155     uint_t datalen, uint64_t version, boolean_t byteswap, uint8_t *cksum)
1156 {
1157 	blkptr_t *bp;
1158 	int i, epb = datalen >> SPA_BLKPTRSHIFT;
1159 	SHA2_CTX ctx;
1160 	uint8_t digestbuf[SHA512_DIGEST_LENGTH];
1161 
1162 	/* checksum all of the MACs from the layer below */
1163 	SHA2Init(SHA512, &ctx);
1164 	for (i = 0, bp = buf; i < epb; i++, bp++) {
1165 		zio_crypt_bp_do_indrect_checksum_updates(&ctx, version,
1166 		    byteswap, bp);
1167 	}
1168 	SHA2Final(digestbuf, &ctx);
1169 
1170 	if (generate) {
1171 		bcopy(digestbuf, cksum, ZIO_DATA_MAC_LEN);
1172 		return (0);
1173 	}
1174 
1175 	if (bcmp(digestbuf, cksum, ZIO_DATA_MAC_LEN) != 0) {
1176 #ifdef FCRYPTO_DEBUG
1177 		printf("%s(%d): Setting ECKSUM\n", __FUNCTION__, __LINE__);
1178 #endif
1179 		return (SET_ERROR(ECKSUM));
1180 	}
1181 	return (0);
1182 }
1183 
1184 int
1185 zio_crypt_do_indirect_mac_checksum(boolean_t generate, void *buf,
1186     uint_t datalen, boolean_t byteswap, uint8_t *cksum)
1187 {
1188 	int ret;
1189 
1190 	/*
1191 	 * Unfortunately, callers of this function will not always have
1192 	 * easy access to the on-disk format version. This info is
1193 	 * normally found in the DSL Crypto Key, but the checksum-of-MACs
1194 	 * is expected to be verifiable even when the key isn't loaded.
1195 	 * Here, instead of doing a ZAP lookup for the version for each
1196 	 * zio, we simply try both existing formats.
1197 	 */
1198 	ret = zio_crypt_do_indirect_mac_checksum_impl(generate, buf,
1199 	    datalen, ZIO_CRYPT_KEY_CURRENT_VERSION, byteswap, cksum);
1200 	if (ret == ECKSUM) {
1201 		ASSERT(!generate);
1202 		ret = zio_crypt_do_indirect_mac_checksum_impl(generate,
1203 		    buf, datalen, 0, byteswap, cksum);
1204 	}
1205 
1206 	return (ret);
1207 }
1208 
1209 int
1210 zio_crypt_do_indirect_mac_checksum_abd(boolean_t generate, abd_t *abd,
1211     uint_t datalen, boolean_t byteswap, uint8_t *cksum)
1212 {
1213 	int ret;
1214 	void *buf;
1215 
1216 	buf = abd_borrow_buf_copy(abd, datalen);
1217 	ret = zio_crypt_do_indirect_mac_checksum(generate, buf, datalen,
1218 	    byteswap, cksum);
1219 	abd_return_buf(abd, buf, datalen);
1220 
1221 	return (ret);
1222 }
1223 
1224 /*
1225  * Special case handling routine for encrypting / decrypting ZIL blocks.
1226  * We do not check for the older ZIL chain because the encryption feature
1227  * was not available before the newer ZIL chain was introduced. The goal
1228  * here is to encrypt everything except the blkptr_t of a lr_write_t and
1229  * the zil_chain_t header. Everything that is not encrypted is authenticated.
1230  */
1231 /*
1232  * The OpenCrypto used in FreeBSD does not use separate source and
1233  * destination buffers; instead, the same buffer is used.  Further, to
1234  * accommodate some of the drivers, the authbuf needs to be logically before
1235  * the data.  This means that we need to copy the source to the destination,
1236  * and set up an extra iovec_t at the beginning to handle the authbuf.
1237  * It also means we'll only return one uio_t.
1238  */
1239 
1240 /* ARGSUSED */
1241 static int
1242 zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf,
1243     uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap, uio_t *puio,
1244     uio_t *out_uio, uint_t *enc_len, uint8_t **authbuf, uint_t *auth_len,
1245     boolean_t *no_crypt)
1246 {
1247 	uint8_t *aadbuf = zio_buf_alloc(datalen);
1248 	uint8_t *src, *dst, *slrp, *dlrp, *blkend, *aadp;
1249 	iovec_t *dst_iovecs;
1250 	zil_chain_t *zilc;
1251 	lr_t *lr;
1252 	uint64_t txtype, lr_len;
1253 	uint_t crypt_len, nr_iovecs, vec;
1254 	uint_t aad_len = 0, total_len = 0;
1255 
1256 	if (encrypt) {
1257 		src = plainbuf;
1258 		dst = cipherbuf;
1259 	} else {
1260 		src = cipherbuf;
1261 		dst = plainbuf;
1262 	}
1263 	bcopy(src, dst, datalen);
1264 
1265 	/* Find the start and end record of the log block. */
1266 	zilc = (zil_chain_t *)src;
1267 	slrp = src + sizeof (zil_chain_t);
1268 	aadp = aadbuf;
1269 	blkend = src + ((byteswap) ? BSWAP_64(zilc->zc_nused) : zilc->zc_nused);
1270 
1271 	/*
1272 	 * Calculate the number of encrypted iovecs we will need.
1273 	 */
1274 
1275 	/* We need at least two iovecs -- one for the AAD, one for the MAC. */
1276 	nr_iovecs = 2;
1277 
1278 	for (; slrp < blkend; slrp += lr_len) {
1279 		lr = (lr_t *)slrp;
1280 
1281 		if (byteswap) {
1282 			txtype = BSWAP_64(lr->lrc_txtype);
1283 			lr_len = BSWAP_64(lr->lrc_reclen);
1284 		} else {
1285 			txtype = lr->lrc_txtype;
1286 			lr_len = lr->lrc_reclen;
1287 		}
1288 
1289 		nr_iovecs++;
1290 		if (txtype == TX_WRITE && lr_len != sizeof (lr_write_t))
1291 			nr_iovecs++;
1292 	}
1293 
1294 	dst_iovecs = kmem_alloc(nr_iovecs * sizeof (iovec_t), KM_SLEEP);
1295 
1296 	/*
1297 	 * Copy the plain zil header over and authenticate everything except
1298 	 * the checksum that will store our MAC. If we are writing the data
1299 	 * the embedded checksum will not have been calculated yet, so we don't
1300 	 * authenticate that.
1301 	 */
1302 	bcopy(src, aadp, sizeof (zil_chain_t) - sizeof (zio_eck_t));
1303 	aadp += sizeof (zil_chain_t) - sizeof (zio_eck_t);
1304 	aad_len += sizeof (zil_chain_t) - sizeof (zio_eck_t);
1305 
1306 	slrp = src + sizeof (zil_chain_t);
1307 	dlrp = dst + sizeof (zil_chain_t);
1308 
1309 	/*
1310 	 * Loop over records again, filling in iovecs.
1311 	 */
1312 
1313 	/* The first iovec will contain the authbuf. */
1314 	vec = 1;
1315 
1316 	for (; slrp < blkend; slrp += lr_len, dlrp += lr_len) {
1317 		lr = (lr_t *)slrp;
1318 
1319 		if (!byteswap) {
1320 			txtype = lr->lrc_txtype;
1321 			lr_len = lr->lrc_reclen;
1322 		} else {
1323 			txtype = BSWAP_64(lr->lrc_txtype);
1324 			lr_len = BSWAP_64(lr->lrc_reclen);
1325 		}
1326 
1327 		/* copy the common lr_t */
1328 		bcopy(slrp, dlrp, sizeof (lr_t));
1329 		bcopy(slrp, aadp, sizeof (lr_t));
1330 		aadp += sizeof (lr_t);
1331 		aad_len += sizeof (lr_t);
1332 
1333 		/*
1334 		 * If this is a TX_WRITE record we want to encrypt everything
1335 		 * except the bp if exists. If the bp does exist we want to
1336 		 * authenticate it.
1337 		 */
1338 		if (txtype == TX_WRITE) {
1339 			crypt_len = sizeof (lr_write_t) -
1340 			    sizeof (lr_t) - sizeof (blkptr_t);
1341 			dst_iovecs[vec].iov_base = (char *)dlrp +
1342 			    sizeof (lr_t);
1343 			dst_iovecs[vec].iov_len = crypt_len;
1344 
1345 			/* copy the bp now since it will not be encrypted */
1346 			bcopy(slrp + sizeof (lr_write_t) - sizeof (blkptr_t),
1347 			    dlrp + sizeof (lr_write_t) - sizeof (blkptr_t),
1348 			    sizeof (blkptr_t));
1349 			bcopy(slrp + sizeof (lr_write_t) - sizeof (blkptr_t),
1350 			    aadp, sizeof (blkptr_t));
1351 			aadp += sizeof (blkptr_t);
1352 			aad_len += sizeof (blkptr_t);
1353 			vec++;
1354 			total_len += crypt_len;
1355 
1356 			if (lr_len != sizeof (lr_write_t)) {
1357 				crypt_len = lr_len - sizeof (lr_write_t);
1358 				dst_iovecs[vec].iov_base = (char *)
1359 				    dlrp + sizeof (lr_write_t);
1360 				dst_iovecs[vec].iov_len = crypt_len;
1361 				vec++;
1362 				total_len += crypt_len;
1363 			}
1364 		} else {
1365 			crypt_len = lr_len - sizeof (lr_t);
1366 			dst_iovecs[vec].iov_base = (char *)dlrp +
1367 			    sizeof (lr_t);
1368 			dst_iovecs[vec].iov_len = crypt_len;
1369 			vec++;
1370 			total_len += crypt_len;
1371 		}
1372 	}
1373 
1374 	/* The last iovec will contain the MAC. */
1375 	ASSERT3U(vec, ==, nr_iovecs - 1);
1376 
1377 	/* AAD */
1378 	dst_iovecs[0].iov_base = aadbuf;
1379 	dst_iovecs[0].iov_len = aad_len;
1380 	/* MAC */
1381 	dst_iovecs[vec].iov_base = 0;
1382 	dst_iovecs[vec].iov_len = 0;
1383 
1384 	*no_crypt = (vec == 1);
1385 	*enc_len = total_len;
1386 	*authbuf = aadbuf;
1387 	*auth_len = aad_len;
1388 	out_uio->uio_iov = dst_iovecs;
1389 	out_uio->uio_iovcnt = nr_iovecs;
1390 
1391 	return (0);
1392 }
1393 
1394 /*
1395  * Special case handling routine for encrypting / decrypting dnode blocks.
1396  */
1397 static int
1398 zio_crypt_init_uios_dnode(boolean_t encrypt, uint64_t version,
1399     uint8_t *plainbuf, uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap,
1400     uio_t *puio, uio_t *out_uio, uint_t *enc_len, uint8_t **authbuf,
1401     uint_t *auth_len, boolean_t *no_crypt)
1402 {
1403 	uint8_t *aadbuf = zio_buf_alloc(datalen);
1404 	uint8_t *src, *dst, *aadp;
1405 	dnode_phys_t *dnp, *adnp, *sdnp, *ddnp;
1406 	iovec_t *dst_iovecs;
1407 	uint_t nr_iovecs, crypt_len, vec;
1408 	uint_t aad_len = 0, total_len = 0;
1409 	uint_t i, j, max_dnp = datalen >> DNODE_SHIFT;
1410 
1411 	if (encrypt) {
1412 		src = plainbuf;
1413 		dst = cipherbuf;
1414 	} else {
1415 		src = cipherbuf;
1416 		dst = plainbuf;
1417 	}
1418 	bcopy(src, dst, datalen);
1419 
1420 	sdnp = (dnode_phys_t *)src;
1421 	ddnp = (dnode_phys_t *)dst;
1422 	aadp = aadbuf;
1423 
1424 	/*
1425 	 * Count the number of iovecs we will need to do the encryption by
1426 	 * counting the number of bonus buffers that need to be encrypted.
1427 	 */
1428 
1429 	/* We need at least two iovecs -- one for the AAD, one for the MAC. */
1430 	nr_iovecs = 2;
1431 
1432 	for (i = 0; i < max_dnp; i += sdnp[i].dn_extra_slots + 1) {
1433 		/*
1434 		 * This block may still be byteswapped. However, all of the
1435 		 * values we use are either uint8_t's (for which byteswapping
1436 		 * is a noop) or a * != 0 check, which will work regardless
1437 		 * of whether or not we byteswap.
1438 		 */
1439 		if (sdnp[i].dn_type != DMU_OT_NONE &&
1440 		    DMU_OT_IS_ENCRYPTED(sdnp[i].dn_bonustype) &&
1441 		    sdnp[i].dn_bonuslen != 0) {
1442 			nr_iovecs++;
1443 		}
1444 	}
1445 
1446 	dst_iovecs = kmem_alloc(nr_iovecs * sizeof (iovec_t), KM_SLEEP);
1447 
1448 	/*
1449 	 * Iterate through the dnodes again, this time filling in the uios
1450 	 * we allocated earlier. We also concatenate any data we want to
1451 	 * authenticate onto aadbuf.
1452 	 */
1453 
1454 	/* The first iovec will contain the authbuf. */
1455 	vec = 1;
1456 
1457 	for (i = 0; i < max_dnp; i += sdnp[i].dn_extra_slots + 1) {
1458 		dnp = &sdnp[i];
1459 
1460 		/* copy over the core fields and blkptrs (kept as plaintext) */
1461 		bcopy(dnp, &ddnp[i], (uint8_t *)DN_BONUS(dnp) - (uint8_t *)dnp);
1462 
1463 		if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
1464 			bcopy(DN_SPILL_BLKPTR(dnp), DN_SPILL_BLKPTR(&ddnp[i]),
1465 			    sizeof (blkptr_t));
1466 		}
1467 
1468 		/*
1469 		 * Handle authenticated data. We authenticate everything in
1470 		 * the dnode that can be brought over when we do a raw send.
1471 		 * This includes all of the core fields as well as the MACs
1472 		 * stored in the bp checksums and all of the portable bits
1473 		 * from blk_prop. We include the dnode padding here in case it
1474 		 * ever gets used in the future. Some dn_flags and dn_used are
1475 		 * not portable so we mask those out values out of the
1476 		 * authenticated data.
1477 		 */
1478 		crypt_len = offsetof(dnode_phys_t, dn_blkptr);
1479 		bcopy(dnp, aadp, crypt_len);
1480 		adnp = (dnode_phys_t *)aadp;
1481 		adnp->dn_flags &= DNODE_CRYPT_PORTABLE_FLAGS_MASK;
1482 		adnp->dn_used = 0;
1483 		aadp += crypt_len;
1484 		aad_len += crypt_len;
1485 
1486 		for (j = 0; j < dnp->dn_nblkptr; j++) {
1487 			zio_crypt_bp_do_aad_updates(&aadp, &aad_len,
1488 			    version, byteswap, &dnp->dn_blkptr[j]);
1489 		}
1490 
1491 		if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
1492 			zio_crypt_bp_do_aad_updates(&aadp, &aad_len,
1493 			    version, byteswap, DN_SPILL_BLKPTR(dnp));
1494 		}
1495 
1496 		/*
1497 		 * If this bonus buffer needs to be encrypted, we prepare an
1498 		 * iovec_t. The encryption / decryption functions will fill
1499 		 * this in for us with the encrypted or decrypted data.
1500 		 * Otherwise we add the bonus buffer to the authenticated
1501 		 * data buffer and copy it over to the destination. The
1502 		 * encrypted iovec extends to DN_MAX_BONUS_LEN(dnp) so that
1503 		 * we can guarantee alignment with the AES block size
1504 		 * (128 bits).
1505 		 */
1506 		crypt_len = DN_MAX_BONUS_LEN(dnp);
1507 		if (dnp->dn_type != DMU_OT_NONE &&
1508 		    DMU_OT_IS_ENCRYPTED(dnp->dn_bonustype) &&
1509 		    dnp->dn_bonuslen != 0) {
1510 			dst_iovecs[vec].iov_base = DN_BONUS(&ddnp[i]);
1511 			dst_iovecs[vec].iov_len = crypt_len;
1512 
1513 			vec++;
1514 			total_len += crypt_len;
1515 		} else {
1516 			bcopy(DN_BONUS(dnp), DN_BONUS(&ddnp[i]), crypt_len);
1517 			bcopy(DN_BONUS(dnp), aadp, crypt_len);
1518 			aadp += crypt_len;
1519 			aad_len += crypt_len;
1520 		}
1521 	}
1522 
1523 	/* The last iovec will contain the MAC. */
1524 	ASSERT3U(vec, ==, nr_iovecs - 1);
1525 
1526 	/* AAD */
1527 	dst_iovecs[0].iov_base = aadbuf;
1528 	dst_iovecs[0].iov_len = aad_len;
1529 	/* MAC */
1530 	dst_iovecs[vec].iov_base = 0;
1531 	dst_iovecs[vec].iov_len = 0;
1532 
1533 	*no_crypt = (vec == 1);
1534 	*enc_len = total_len;
1535 	*authbuf = aadbuf;
1536 	*auth_len = aad_len;
1537 	out_uio->uio_iov = dst_iovecs;
1538 	out_uio->uio_iovcnt = nr_iovecs;
1539 
1540 	return (0);
1541 }
1542 
1543 /* ARGSUSED */
1544 static int
1545 zio_crypt_init_uios_normal(boolean_t encrypt, uint8_t *plainbuf,
1546     uint8_t *cipherbuf, uint_t datalen, uio_t *puio, uio_t *out_uio,
1547     uint_t *enc_len)
1548 {
1549 	int ret;
1550 	uint_t nr_plain = 1, nr_cipher = 2;
1551 	iovec_t *plain_iovecs = NULL, *cipher_iovecs = NULL;
1552 	void *src, *dst;
1553 
1554 	cipher_iovecs = kmem_alloc(nr_cipher * sizeof (iovec_t),
1555 	    KM_SLEEP);
1556 	if (!cipher_iovecs) {
1557 		ret = SET_ERROR(ENOMEM);
1558 		goto error;
1559 	}
1560 	bzero(cipher_iovecs, nr_cipher * sizeof (iovec_t));
1561 
1562 	if (encrypt) {
1563 		src = plainbuf;
1564 		dst = cipherbuf;
1565 	} else {
1566 		src = cipherbuf;
1567 		dst = plainbuf;
1568 	}
1569 	bcopy(src, dst, datalen);
1570 	cipher_iovecs[0].iov_base = dst;
1571 	cipher_iovecs[0].iov_len = datalen;
1572 
1573 	*enc_len = datalen;
1574 	out_uio->uio_iov = cipher_iovecs;
1575 	out_uio->uio_iovcnt = nr_cipher;
1576 
1577 	return (0);
1578 
1579 error:
1580 	if (plain_iovecs != NULL)
1581 		kmem_free(plain_iovecs, nr_plain * sizeof (iovec_t));
1582 	if (cipher_iovecs != NULL)
1583 		kmem_free(cipher_iovecs, nr_cipher * sizeof (iovec_t));
1584 
1585 	*enc_len = 0;
1586 	out_uio->uio_iov = NULL;
1587 	out_uio->uio_iovcnt = 0;
1588 
1589 	return (ret);
1590 }
1591 
1592 /*
1593  * This function builds up the plaintext (puio) and ciphertext (cuio) uios so
1594  * that they can be used for encryption and decryption by zio_do_crypt_uio().
1595  * Most blocks will use zio_crypt_init_uios_normal(), with ZIL and dnode blocks
1596  * requiring special handling to parse out pieces that are to be encrypted. The
1597  * authbuf is used by these special cases to store additional authenticated
1598  * data (AAD) for the encryption modes.
1599  */
1600 static int
1601 zio_crypt_init_uios(boolean_t encrypt, uint64_t version, dmu_object_type_t ot,
1602     uint8_t *plainbuf, uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap,
1603     uint8_t *mac, uio_t *puio, uio_t *cuio, uint_t *enc_len, uint8_t **authbuf,
1604     uint_t *auth_len, boolean_t *no_crypt)
1605 {
1606 	int ret;
1607 	iovec_t *mac_iov;
1608 
1609 	ASSERT(DMU_OT_IS_ENCRYPTED(ot) || ot == DMU_OT_NONE);
1610 
1611 	/* route to handler */
1612 	switch (ot) {
1613 	case DMU_OT_INTENT_LOG:
1614 		ret = zio_crypt_init_uios_zil(encrypt, plainbuf, cipherbuf,
1615 		    datalen, byteswap, puio, cuio, enc_len, authbuf, auth_len,
1616 		    no_crypt);
1617 		break;
1618 	case DMU_OT_DNODE:
1619 		ret = zio_crypt_init_uios_dnode(encrypt, version, plainbuf,
1620 		    cipherbuf, datalen, byteswap, puio, cuio, enc_len, authbuf,
1621 		    auth_len, no_crypt);
1622 		break;
1623 	default:
1624 		ret = zio_crypt_init_uios_normal(encrypt, plainbuf, cipherbuf,
1625 		    datalen, puio, cuio, enc_len);
1626 		*authbuf = NULL;
1627 		*auth_len = 0;
1628 		*no_crypt = B_FALSE;
1629 		break;
1630 	}
1631 
1632 	if (ret != 0)
1633 		goto error;
1634 
1635 	/* populate the uios */
1636 	cuio->uio_segflg = UIO_SYSSPACE;
1637 
1638 	mac_iov = ((iovec_t *)&cuio->uio_iov[cuio->uio_iovcnt - 1]);
1639 	mac_iov->iov_base = (void *)mac;
1640 	mac_iov->iov_len = ZIO_DATA_MAC_LEN;
1641 
1642 	return (0);
1643 
1644 error:
1645 	return (ret);
1646 }
1647 
1648 void *failed_decrypt_buf;
1649 int faile_decrypt_size;
1650 
1651 /*
1652  * Primary encryption / decryption entrypoint for zio data.
1653  */
1654 int
1655 zio_do_crypt_data(boolean_t encrypt, zio_crypt_key_t *key,
1656     dmu_object_type_t ot, boolean_t byteswap, uint8_t *salt, uint8_t *iv,
1657     uint8_t *mac, uint_t datalen, uint8_t *plainbuf, uint8_t *cipherbuf,
1658     boolean_t *no_crypt)
1659 {
1660 	int ret;
1661 	boolean_t locked = B_FALSE;
1662 	uint64_t crypt = key->zk_crypt;
1663 	uint_t keydata_len = zio_crypt_table[crypt].ci_keylen;
1664 	uint_t enc_len, auth_len;
1665 	uio_t puio, cuio;
1666 	uint8_t enc_keydata[MASTER_KEY_MAX_LEN];
1667 	crypto_key_t tmp_ckey, *ckey = NULL;
1668 	freebsd_crypt_session_t *tmpl = NULL;
1669 	uint8_t *authbuf = NULL;
1670 
1671 	bzero(&puio, sizeof (uio_t));
1672 	bzero(&cuio, sizeof (uio_t));
1673 
1674 #ifdef FCRYPTO_DEBUG
1675 	printf("%s(%s, %p, %p, %d, %p, %p, %u, %s, %p, %p, %p)\n",
1676 	    __FUNCTION__,
1677 	    encrypt ? "encrypt" : "decrypt",
1678 	    key, salt, ot, iv, mac, datalen,
1679 	    byteswap ? "byteswap" : "native_endian", plainbuf,
1680 	    cipherbuf, no_crypt);
1681 
1682 	printf("\tkey = {");
1683 	for (int i = 0; i < key->zk_current_key.ck_length/8; i++)
1684 		printf("%02x ", ((uint8_t *)key->zk_current_key.ck_data)[i]);
1685 	printf("}\n");
1686 #endif
1687 	/* create uios for encryption */
1688 	ret = zio_crypt_init_uios(encrypt, key->zk_version, ot, plainbuf,
1689 	    cipherbuf, datalen, byteswap, mac, &puio, &cuio, &enc_len,
1690 	    &authbuf, &auth_len, no_crypt);
1691 	if (ret != 0)
1692 		return (ret);
1693 
1694 	/*
1695 	 * If the needed key is the current one, just use it. Otherwise we
1696 	 * need to generate a temporary one from the given salt + master key.
1697 	 * If we are encrypting, we must return a copy of the current salt
1698 	 * so that it can be stored in the blkptr_t.
1699 	 */
1700 	rw_enter(&key->zk_salt_lock, RW_READER);
1701 	locked = B_TRUE;
1702 
1703 	if (bcmp(salt, key->zk_salt, ZIO_DATA_SALT_LEN) == 0) {
1704 		ckey = &key->zk_current_key;
1705 		tmpl = &key->zk_session;
1706 	} else {
1707 		rw_exit(&key->zk_salt_lock);
1708 		locked = B_FALSE;
1709 
1710 		ret = hkdf_sha512(key->zk_master_keydata, keydata_len, NULL, 0,
1711 		    salt, ZIO_DATA_SALT_LEN, enc_keydata, keydata_len);
1712 		if (ret != 0)
1713 			goto error;
1714 		tmp_ckey.ck_format = CRYPTO_KEY_RAW;
1715 		tmp_ckey.ck_data = enc_keydata;
1716 		tmp_ckey.ck_length = CRYPTO_BYTES2BITS(keydata_len);
1717 
1718 		ckey = &tmp_ckey;
1719 		tmpl = NULL;
1720 	}
1721 
1722 	/* perform the encryption / decryption */
1723 	ret = zio_do_crypt_uio_opencrypto(encrypt, tmpl, key->zk_crypt,
1724 	    ckey, iv, enc_len, &cuio, auth_len);
1725 	if (ret != 0)
1726 		goto error;
1727 	if (locked) {
1728 		rw_exit(&key->zk_salt_lock);
1729 		locked = B_FALSE;
1730 	}
1731 
1732 	if (authbuf != NULL)
1733 		zio_buf_free(authbuf, datalen);
1734 	if (ckey == &tmp_ckey)
1735 		bzero(enc_keydata, keydata_len);
1736 	zio_crypt_destroy_uio(&puio);
1737 	zio_crypt_destroy_uio(&cuio);
1738 
1739 	return (0);
1740 
1741 error:
1742 	if (!encrypt) {
1743 		if (failed_decrypt_buf != NULL)
1744 			kmem_free(failed_decrypt_buf, failed_decrypt_size);
1745 		failed_decrypt_buf = kmem_alloc(datalen, KM_SLEEP);
1746 		failed_decrypt_size = datalen;
1747 		bcopy(cipherbuf, failed_decrypt_buf, datalen);
1748 	}
1749 	if (locked)
1750 		rw_exit(&key->zk_salt_lock);
1751 	if (authbuf != NULL)
1752 		zio_buf_free(authbuf, datalen);
1753 	if (ckey == &tmp_ckey)
1754 		bzero(enc_keydata, keydata_len);
1755 	zio_crypt_destroy_uio(&puio);
1756 	zio_crypt_destroy_uio(&cuio);
1757 	return (SET_ERROR(ret));
1758 }
1759 
1760 /*
1761  * Simple wrapper around zio_do_crypt_data() to work with abd's instead of
1762  * linear buffers.
1763  */
1764 int
1765 zio_do_crypt_abd(boolean_t encrypt, zio_crypt_key_t *key, dmu_object_type_t ot,
1766     boolean_t byteswap, uint8_t *salt, uint8_t *iv, uint8_t *mac,
1767     uint_t datalen, abd_t *pabd, abd_t *cabd, boolean_t *no_crypt)
1768 {
1769 	int ret;
1770 	void *ptmp, *ctmp;
1771 
1772 	if (encrypt) {
1773 		ptmp = abd_borrow_buf_copy(pabd, datalen);
1774 		ctmp = abd_borrow_buf(cabd, datalen);
1775 	} else {
1776 		ptmp = abd_borrow_buf(pabd, datalen);
1777 		ctmp = abd_borrow_buf_copy(cabd, datalen);
1778 	}
1779 
1780 	ret = zio_do_crypt_data(encrypt, key, ot, byteswap, salt, iv, mac,
1781 	    datalen, ptmp, ctmp, no_crypt);
1782 	if (ret != 0)
1783 		goto error;
1784 
1785 	if (encrypt) {
1786 		abd_return_buf(pabd, ptmp, datalen);
1787 		abd_return_buf_copy(cabd, ctmp, datalen);
1788 	} else {
1789 		abd_return_buf_copy(pabd, ptmp, datalen);
1790 		abd_return_buf(cabd, ctmp, datalen);
1791 	}
1792 
1793 	return (0);
1794 
1795 error:
1796 	if (encrypt) {
1797 		abd_return_buf(pabd, ptmp, datalen);
1798 		abd_return_buf_copy(cabd, ctmp, datalen);
1799 	} else {
1800 		abd_return_buf_copy(pabd, ptmp, datalen);
1801 		abd_return_buf(cabd, ctmp, datalen);
1802 	}
1803 
1804 	return (SET_ERROR(ret));
1805 }
1806 
1807 #if defined(_KERNEL) && defined(HAVE_SPL)
1808 /* BEGIN CSTYLED */
1809 module_param(zfs_key_max_salt_uses, ulong, 0644);
1810 MODULE_PARM_DESC(zfs_key_max_salt_uses, "Max number of times a salt value "
1811 	"can be used for generating encryption keys before it is rotated");
1812 /* END CSTYLED */
1813 #endif
1814