1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/module.h>
4 #include <linux/slab.h>
5 #include <linux/crypto.h>
6 #include <crypto/internal/aead.h>
7 #include <crypto/internal/cipher.h>
8 #include <crypto/internal/skcipher.h>
9 #include <crypto/aes.h>
10 #include <crypto/sha1.h>
11 #include <crypto/sha2.h>
12 #include <crypto/hash.h>
13 #include <crypto/hmac.h>
14 #include <crypto/algapi.h>
15 #include <crypto/authenc.h>
16 #include <crypto/scatterwalk.h>
17 #include <crypto/xts.h>
18 #include <linux/dma-mapping.h>
19 #include "adf_accel_devices.h"
20 #include "qat_algs_send.h"
21 #include "adf_common_drv.h"
22 #include "qat_crypto.h"
23 #include "icp_qat_hw.h"
24 #include "icp_qat_fw.h"
25 #include "icp_qat_fw_la.h"
26 #include "qat_bl.h"
27 
28 #define QAT_AES_HW_CONFIG_ENC(alg, mode) \
29 	ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
30 				       ICP_QAT_HW_CIPHER_NO_CONVERT, \
31 				       ICP_QAT_HW_CIPHER_ENCRYPT)
32 
33 #define QAT_AES_HW_CONFIG_DEC(alg, mode) \
34 	ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
35 				       ICP_QAT_HW_CIPHER_KEY_CONVERT, \
36 				       ICP_QAT_HW_CIPHER_DECRYPT)
37 
38 #define QAT_AES_HW_CONFIG_DEC_NO_CONV(alg, mode) \
39 	ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
40 				       ICP_QAT_HW_CIPHER_NO_CONVERT, \
41 				       ICP_QAT_HW_CIPHER_DECRYPT)
42 
43 #define HW_CAP_AES_V2(accel_dev) \
44 	(GET_HW_DATA(accel_dev)->accel_capabilities_mask & \
45 	 ICP_ACCEL_CAPABILITIES_AES_V2)
46 
47 static DEFINE_MUTEX(algs_lock);
48 static unsigned int active_devs;
49 
50 /* Common content descriptor */
51 struct qat_alg_cd {
52 	union {
53 		struct qat_enc { /* Encrypt content desc */
54 			struct icp_qat_hw_cipher_algo_blk cipher;
55 			struct icp_qat_hw_auth_algo_blk hash;
56 		} qat_enc_cd;
57 		struct qat_dec { /* Decrypt content desc */
58 			struct icp_qat_hw_auth_algo_blk hash;
59 			struct icp_qat_hw_cipher_algo_blk cipher;
60 		} qat_dec_cd;
61 	};
62 } __aligned(64);
63 
64 struct qat_alg_aead_ctx {
65 	struct qat_alg_cd *enc_cd;
66 	struct qat_alg_cd *dec_cd;
67 	dma_addr_t enc_cd_paddr;
68 	dma_addr_t dec_cd_paddr;
69 	struct icp_qat_fw_la_bulk_req enc_fw_req;
70 	struct icp_qat_fw_la_bulk_req dec_fw_req;
71 	struct crypto_shash *hash_tfm;
72 	enum icp_qat_hw_auth_algo qat_hash_alg;
73 	struct qat_crypto_instance *inst;
74 	union {
75 		struct sha1_state sha1;
76 		struct sha256_state sha256;
77 		struct sha512_state sha512;
78 	};
79 	char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
80 	char opad[SHA512_BLOCK_SIZE];
81 };
82 
83 struct qat_alg_skcipher_ctx {
84 	struct icp_qat_hw_cipher_algo_blk *enc_cd;
85 	struct icp_qat_hw_cipher_algo_blk *dec_cd;
86 	dma_addr_t enc_cd_paddr;
87 	dma_addr_t dec_cd_paddr;
88 	struct icp_qat_fw_la_bulk_req enc_fw_req;
89 	struct icp_qat_fw_la_bulk_req dec_fw_req;
90 	struct qat_crypto_instance *inst;
91 	struct crypto_skcipher *ftfm;
92 	struct crypto_cipher *tweak;
93 	bool fallback;
94 	int mode;
95 };
96 
97 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
98 {
99 	switch (qat_hash_alg) {
100 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
101 		return ICP_QAT_HW_SHA1_STATE1_SZ;
102 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
103 		return ICP_QAT_HW_SHA256_STATE1_SZ;
104 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
105 		return ICP_QAT_HW_SHA512_STATE1_SZ;
106 	default:
107 		return -EFAULT;
108 	}
109 }
110 
111 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
112 				  struct qat_alg_aead_ctx *ctx,
113 				  const u8 *auth_key,
114 				  unsigned int auth_keylen)
115 {
116 	SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
117 	int block_size = crypto_shash_blocksize(ctx->hash_tfm);
118 	int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
119 	__be32 *hash_state_out;
120 	__be64 *hash512_state_out;
121 	int i, offset;
122 
123 	memset(ctx->ipad, 0, block_size);
124 	memset(ctx->opad, 0, block_size);
125 	shash->tfm = ctx->hash_tfm;
126 
127 	if (auth_keylen > block_size) {
128 		int ret = crypto_shash_digest(shash, auth_key,
129 					      auth_keylen, ctx->ipad);
130 		if (ret)
131 			return ret;
132 
133 		memcpy(ctx->opad, ctx->ipad, digest_size);
134 	} else {
135 		memcpy(ctx->ipad, auth_key, auth_keylen);
136 		memcpy(ctx->opad, auth_key, auth_keylen);
137 	}
138 
139 	for (i = 0; i < block_size; i++) {
140 		char *ipad_ptr = ctx->ipad + i;
141 		char *opad_ptr = ctx->opad + i;
142 		*ipad_ptr ^= HMAC_IPAD_VALUE;
143 		*opad_ptr ^= HMAC_OPAD_VALUE;
144 	}
145 
146 	if (crypto_shash_init(shash))
147 		return -EFAULT;
148 
149 	if (crypto_shash_update(shash, ctx->ipad, block_size))
150 		return -EFAULT;
151 
152 	hash_state_out = (__be32 *)hash->sha.state1;
153 	hash512_state_out = (__be64 *)hash_state_out;
154 
155 	switch (ctx->qat_hash_alg) {
156 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
157 		if (crypto_shash_export(shash, &ctx->sha1))
158 			return -EFAULT;
159 		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
160 			*hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
161 		break;
162 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
163 		if (crypto_shash_export(shash, &ctx->sha256))
164 			return -EFAULT;
165 		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
166 			*hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
167 		break;
168 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
169 		if (crypto_shash_export(shash, &ctx->sha512))
170 			return -EFAULT;
171 		for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
172 			*hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
173 		break;
174 	default:
175 		return -EFAULT;
176 	}
177 
178 	if (crypto_shash_init(shash))
179 		return -EFAULT;
180 
181 	if (crypto_shash_update(shash, ctx->opad, block_size))
182 		return -EFAULT;
183 
184 	offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
185 	if (offset < 0)
186 		return -EFAULT;
187 
188 	hash_state_out = (__be32 *)(hash->sha.state1 + offset);
189 	hash512_state_out = (__be64 *)hash_state_out;
190 
191 	switch (ctx->qat_hash_alg) {
192 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
193 		if (crypto_shash_export(shash, &ctx->sha1))
194 			return -EFAULT;
195 		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
196 			*hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
197 		break;
198 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
199 		if (crypto_shash_export(shash, &ctx->sha256))
200 			return -EFAULT;
201 		for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
202 			*hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
203 		break;
204 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
205 		if (crypto_shash_export(shash, &ctx->sha512))
206 			return -EFAULT;
207 		for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
208 			*hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
209 		break;
210 	default:
211 		return -EFAULT;
212 	}
213 	memzero_explicit(ctx->ipad, block_size);
214 	memzero_explicit(ctx->opad, block_size);
215 	return 0;
216 }
217 
218 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
219 {
220 	header->hdr_flags =
221 		ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
222 	header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
223 	header->comn_req_flags =
224 		ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
225 					    QAT_COMN_PTR_TYPE_SGL);
226 	ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
227 				  ICP_QAT_FW_LA_PARTIAL_NONE);
228 	ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
229 					   ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
230 	ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
231 				ICP_QAT_FW_LA_NO_PROTO);
232 	ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
233 				       ICP_QAT_FW_LA_NO_UPDATE_STATE);
234 }
235 
236 static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
237 					 int alg,
238 					 struct crypto_authenc_keys *keys,
239 					 int mode)
240 {
241 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
242 	unsigned int digestsize = crypto_aead_authsize(aead_tfm);
243 	struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
244 	struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
245 	struct icp_qat_hw_auth_algo_blk *hash =
246 		(struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
247 		sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
248 	struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
249 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
250 	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
251 	void *ptr = &req_tmpl->cd_ctrl;
252 	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
253 	struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
254 
255 	/* CD setup */
256 	cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
257 	memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
258 	hash->sha.inner_setup.auth_config.config =
259 		ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
260 					     ctx->qat_hash_alg, digestsize);
261 	hash->sha.inner_setup.auth_counter.counter =
262 		cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
263 
264 	if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
265 		return -EFAULT;
266 
267 	/* Request setup */
268 	qat_alg_init_common_hdr(header);
269 	header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
270 	ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
271 					   ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
272 	ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
273 				   ICP_QAT_FW_LA_RET_AUTH_RES);
274 	ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
275 				   ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
276 	cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
277 	cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
278 
279 	/* Cipher CD config setup */
280 	cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
281 	cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
282 	cipher_cd_ctrl->cipher_cfg_offset = 0;
283 	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
284 	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
285 	/* Auth CD config setup */
286 	hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
287 	hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
288 	hash_cd_ctrl->inner_res_sz = digestsize;
289 	hash_cd_ctrl->final_sz = digestsize;
290 
291 	switch (ctx->qat_hash_alg) {
292 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
293 		hash_cd_ctrl->inner_state1_sz =
294 			round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
295 		hash_cd_ctrl->inner_state2_sz =
296 			round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
297 		break;
298 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
299 		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
300 		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
301 		break;
302 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
303 		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
304 		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
305 		break;
306 	default:
307 		break;
308 	}
309 	hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
310 			((sizeof(struct icp_qat_hw_auth_setup) +
311 			 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
312 	ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
313 	ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
314 	return 0;
315 }
316 
317 static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
318 					 int alg,
319 					 struct crypto_authenc_keys *keys,
320 					 int mode)
321 {
322 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
323 	unsigned int digestsize = crypto_aead_authsize(aead_tfm);
324 	struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
325 	struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
326 	struct icp_qat_hw_cipher_algo_blk *cipher =
327 		(struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
328 		sizeof(struct icp_qat_hw_auth_setup) +
329 		roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
330 	struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
331 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
332 	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
333 	void *ptr = &req_tmpl->cd_ctrl;
334 	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
335 	struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
336 	struct icp_qat_fw_la_auth_req_params *auth_param =
337 		(struct icp_qat_fw_la_auth_req_params *)
338 		((char *)&req_tmpl->serv_specif_rqpars +
339 		sizeof(struct icp_qat_fw_la_cipher_req_params));
340 
341 	/* CD setup */
342 	cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
343 	memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
344 	hash->sha.inner_setup.auth_config.config =
345 		ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
346 					     ctx->qat_hash_alg,
347 					     digestsize);
348 	hash->sha.inner_setup.auth_counter.counter =
349 		cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
350 
351 	if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
352 		return -EFAULT;
353 
354 	/* Request setup */
355 	qat_alg_init_common_hdr(header);
356 	header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
357 	ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
358 					   ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
359 	ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
360 				   ICP_QAT_FW_LA_NO_RET_AUTH_RES);
361 	ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
362 				   ICP_QAT_FW_LA_CMP_AUTH_RES);
363 	cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
364 	cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
365 
366 	/* Cipher CD config setup */
367 	cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
368 	cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
369 	cipher_cd_ctrl->cipher_cfg_offset =
370 		(sizeof(struct icp_qat_hw_auth_setup) +
371 		 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
372 	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
373 	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
374 
375 	/* Auth CD config setup */
376 	hash_cd_ctrl->hash_cfg_offset = 0;
377 	hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
378 	hash_cd_ctrl->inner_res_sz = digestsize;
379 	hash_cd_ctrl->final_sz = digestsize;
380 
381 	switch (ctx->qat_hash_alg) {
382 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
383 		hash_cd_ctrl->inner_state1_sz =
384 			round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
385 		hash_cd_ctrl->inner_state2_sz =
386 			round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
387 		break;
388 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
389 		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
390 		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
391 		break;
392 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
393 		hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
394 		hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
395 		break;
396 	default:
397 		break;
398 	}
399 
400 	hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
401 			((sizeof(struct icp_qat_hw_auth_setup) +
402 			 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
403 	auth_param->auth_res_sz = digestsize;
404 	ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
405 	ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
406 	return 0;
407 }
408 
409 static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
410 				      struct icp_qat_fw_la_bulk_req *req,
411 				      struct icp_qat_hw_cipher_algo_blk *cd,
412 				      const u8 *key, unsigned int keylen)
413 {
414 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
415 	struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
416 	struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
417 	bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
418 	int mode = ctx->mode;
419 
420 	qat_alg_init_common_hdr(header);
421 	header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
422 	cd_pars->u.s.content_desc_params_sz =
423 				sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
424 
425 	if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_XTS_MODE) {
426 		ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags,
427 					     ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
428 
429 		/* Store both XTS keys in CD, only the first key is sent
430 		 * to the HW, the second key is used for tweak calculation
431 		 */
432 		memcpy(cd->ucs_aes.key, key, keylen);
433 		keylen = keylen / 2;
434 	} else if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
435 		ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags,
436 					     ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
437 		memcpy(cd->ucs_aes.key, key, keylen);
438 		keylen = round_up(keylen, 16);
439 	} else {
440 		memcpy(cd->aes.key, key, keylen);
441 	}
442 
443 	/* Cipher CD config setup */
444 	cd_ctrl->cipher_key_sz = keylen >> 3;
445 	cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
446 	cd_ctrl->cipher_cfg_offset = 0;
447 	ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
448 	ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
449 }
450 
451 static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
452 				      int alg, const u8 *key,
453 				      unsigned int keylen, int mode)
454 {
455 	struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
456 	struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
457 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
458 
459 	qat_alg_skcipher_init_com(ctx, req, enc_cd, key, keylen);
460 	cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
461 	enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
462 }
463 
464 static void qat_alg_xts_reverse_key(const u8 *key_forward, unsigned int keylen,
465 				    u8 *key_reverse)
466 {
467 	struct crypto_aes_ctx aes_expanded;
468 	int nrounds;
469 	u8 *key;
470 
471 	aes_expandkey(&aes_expanded, key_forward, keylen);
472 	if (keylen == AES_KEYSIZE_128) {
473 		nrounds = 10;
474 		key = (u8 *)aes_expanded.key_enc + (AES_BLOCK_SIZE * nrounds);
475 		memcpy(key_reverse, key, AES_BLOCK_SIZE);
476 	} else {
477 		/* AES_KEYSIZE_256 */
478 		nrounds = 14;
479 		key = (u8 *)aes_expanded.key_enc + (AES_BLOCK_SIZE * nrounds);
480 		memcpy(key_reverse, key, AES_BLOCK_SIZE);
481 		memcpy(key_reverse + AES_BLOCK_SIZE, key - AES_BLOCK_SIZE,
482 		       AES_BLOCK_SIZE);
483 	}
484 }
485 
486 static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx,
487 				      int alg, const u8 *key,
488 				      unsigned int keylen, int mode)
489 {
490 	struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
491 	struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
492 	struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
493 	bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
494 
495 	qat_alg_skcipher_init_com(ctx, req, dec_cd, key, keylen);
496 	cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
497 
498 	if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_XTS_MODE) {
499 		/* Key reversing not supported, set no convert */
500 		dec_cd->aes.cipher_config.val =
501 				QAT_AES_HW_CONFIG_DEC_NO_CONV(alg, mode);
502 
503 		/* In-place key reversal */
504 		qat_alg_xts_reverse_key(dec_cd->ucs_aes.key, keylen / 2,
505 					dec_cd->ucs_aes.key);
506 	} else if (mode != ICP_QAT_HW_CIPHER_CTR_MODE) {
507 		dec_cd->aes.cipher_config.val =
508 					QAT_AES_HW_CONFIG_DEC(alg, mode);
509 	} else {
510 		dec_cd->aes.cipher_config.val =
511 					QAT_AES_HW_CONFIG_ENC(alg, mode);
512 	}
513 }
514 
515 static int qat_alg_validate_key(int key_len, int *alg, int mode)
516 {
517 	if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
518 		switch (key_len) {
519 		case AES_KEYSIZE_128:
520 			*alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
521 			break;
522 		case AES_KEYSIZE_192:
523 			*alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
524 			break;
525 		case AES_KEYSIZE_256:
526 			*alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
527 			break;
528 		default:
529 			return -EINVAL;
530 		}
531 	} else {
532 		switch (key_len) {
533 		case AES_KEYSIZE_128 << 1:
534 			*alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
535 			break;
536 		case AES_KEYSIZE_256 << 1:
537 			*alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
538 			break;
539 		default:
540 			return -EINVAL;
541 		}
542 	}
543 	return 0;
544 }
545 
546 static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
547 				      unsigned int keylen,  int mode)
548 {
549 	struct crypto_authenc_keys keys;
550 	int alg;
551 
552 	if (crypto_authenc_extractkeys(&keys, key, keylen))
553 		goto bad_key;
554 
555 	if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
556 		goto bad_key;
557 
558 	if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
559 		goto error;
560 
561 	if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
562 		goto error;
563 
564 	memzero_explicit(&keys, sizeof(keys));
565 	return 0;
566 bad_key:
567 	memzero_explicit(&keys, sizeof(keys));
568 	return -EINVAL;
569 error:
570 	memzero_explicit(&keys, sizeof(keys));
571 	return -EFAULT;
572 }
573 
574 static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
575 					  const u8 *key,
576 					  unsigned int keylen,
577 					  int mode)
578 {
579 	int alg;
580 
581 	if (qat_alg_validate_key(keylen, &alg, mode))
582 		return -EINVAL;
583 
584 	qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode);
585 	qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode);
586 	return 0;
587 }
588 
589 static int qat_alg_aead_rekey(struct crypto_aead *tfm, const u8 *key,
590 			      unsigned int keylen)
591 {
592 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
593 
594 	memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
595 	memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
596 	memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
597 	memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
598 
599 	return qat_alg_aead_init_sessions(tfm, key, keylen,
600 					  ICP_QAT_HW_CIPHER_CBC_MODE);
601 }
602 
603 static int qat_alg_aead_newkey(struct crypto_aead *tfm, const u8 *key,
604 			       unsigned int keylen)
605 {
606 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
607 	struct qat_crypto_instance *inst = NULL;
608 	int node = numa_node_id();
609 	struct device *dev;
610 	int ret;
611 
612 	inst = qat_crypto_get_instance_node(node);
613 	if (!inst)
614 		return -EINVAL;
615 	dev = &GET_DEV(inst->accel_dev);
616 	ctx->inst = inst;
617 	ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
618 					 &ctx->enc_cd_paddr,
619 					 GFP_ATOMIC);
620 	if (!ctx->enc_cd) {
621 		ret = -ENOMEM;
622 		goto out_free_inst;
623 	}
624 	ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
625 					 &ctx->dec_cd_paddr,
626 					 GFP_ATOMIC);
627 	if (!ctx->dec_cd) {
628 		ret = -ENOMEM;
629 		goto out_free_enc;
630 	}
631 
632 	ret = qat_alg_aead_init_sessions(tfm, key, keylen,
633 					 ICP_QAT_HW_CIPHER_CBC_MODE);
634 	if (ret)
635 		goto out_free_all;
636 
637 	return 0;
638 
639 out_free_all:
640 	memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
641 	dma_free_coherent(dev, sizeof(struct qat_alg_cd),
642 			  ctx->dec_cd, ctx->dec_cd_paddr);
643 	ctx->dec_cd = NULL;
644 out_free_enc:
645 	memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
646 	dma_free_coherent(dev, sizeof(struct qat_alg_cd),
647 			  ctx->enc_cd, ctx->enc_cd_paddr);
648 	ctx->enc_cd = NULL;
649 out_free_inst:
650 	ctx->inst = NULL;
651 	qat_crypto_put_instance(inst);
652 	return ret;
653 }
654 
655 static int qat_alg_aead_setkey(struct crypto_aead *tfm, const u8 *key,
656 			       unsigned int keylen)
657 {
658 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
659 
660 	if (ctx->enc_cd)
661 		return qat_alg_aead_rekey(tfm, key, keylen);
662 	else
663 		return qat_alg_aead_newkey(tfm, key, keylen);
664 }
665 
666 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
667 				  struct qat_crypto_request *qat_req)
668 {
669 	struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
670 	struct qat_crypto_instance *inst = ctx->inst;
671 	struct aead_request *areq = qat_req->aead_req;
672 	u8 stat_filed = qat_resp->comn_resp.comn_status;
673 	int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
674 
675 	qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
676 	if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
677 		res = -EBADMSG;
678 	aead_request_complete(areq, res);
679 }
680 
681 static void qat_alg_update_iv_ctr_mode(struct qat_crypto_request *qat_req)
682 {
683 	struct skcipher_request *sreq = qat_req->skcipher_req;
684 	u64 iv_lo_prev;
685 	u64 iv_lo;
686 	u64 iv_hi;
687 
688 	memcpy(qat_req->iv, sreq->iv, AES_BLOCK_SIZE);
689 
690 	iv_lo = be64_to_cpu(qat_req->iv_lo);
691 	iv_hi = be64_to_cpu(qat_req->iv_hi);
692 
693 	iv_lo_prev = iv_lo;
694 	iv_lo += DIV_ROUND_UP(sreq->cryptlen, AES_BLOCK_SIZE);
695 	if (iv_lo < iv_lo_prev)
696 		iv_hi++;
697 
698 	qat_req->iv_lo = cpu_to_be64(iv_lo);
699 	qat_req->iv_hi = cpu_to_be64(iv_hi);
700 }
701 
702 static void qat_alg_update_iv_cbc_mode(struct qat_crypto_request *qat_req)
703 {
704 	struct skcipher_request *sreq = qat_req->skcipher_req;
705 	int offset = sreq->cryptlen - AES_BLOCK_SIZE;
706 	struct scatterlist *sgl;
707 
708 	if (qat_req->encryption)
709 		sgl = sreq->dst;
710 	else
711 		sgl = sreq->src;
712 
713 	scatterwalk_map_and_copy(qat_req->iv, sgl, offset, AES_BLOCK_SIZE, 0);
714 }
715 
716 static void qat_alg_update_iv(struct qat_crypto_request *qat_req)
717 {
718 	struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
719 	struct device *dev = &GET_DEV(ctx->inst->accel_dev);
720 
721 	switch (ctx->mode) {
722 	case ICP_QAT_HW_CIPHER_CTR_MODE:
723 		qat_alg_update_iv_ctr_mode(qat_req);
724 		break;
725 	case ICP_QAT_HW_CIPHER_CBC_MODE:
726 		qat_alg_update_iv_cbc_mode(qat_req);
727 		break;
728 	case ICP_QAT_HW_CIPHER_XTS_MODE:
729 		break;
730 	default:
731 		dev_warn(dev, "Unsupported IV update for cipher mode %d\n",
732 			 ctx->mode);
733 	}
734 }
735 
736 static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
737 				      struct qat_crypto_request *qat_req)
738 {
739 	struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
740 	struct qat_crypto_instance *inst = ctx->inst;
741 	struct skcipher_request *sreq = qat_req->skcipher_req;
742 	u8 stat_filed = qat_resp->comn_resp.comn_status;
743 	int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
744 
745 	qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
746 	if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
747 		res = -EINVAL;
748 
749 	if (qat_req->encryption)
750 		qat_alg_update_iv(qat_req);
751 
752 	memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE);
753 
754 	skcipher_request_complete(sreq, res);
755 }
756 
757 void qat_alg_callback(void *resp)
758 {
759 	struct icp_qat_fw_la_resp *qat_resp = resp;
760 	struct qat_crypto_request *qat_req =
761 				(void *)(__force long)qat_resp->opaque_data;
762 	struct qat_instance_backlog *backlog = qat_req->alg_req.backlog;
763 
764 	qat_req->cb(qat_resp, qat_req);
765 
766 	qat_alg_send_backlog(backlog);
767 }
768 
769 static int qat_alg_send_sym_message(struct qat_crypto_request *qat_req,
770 				    struct qat_crypto_instance *inst,
771 				    struct crypto_async_request *base)
772 {
773 	struct qat_alg_req *alg_req = &qat_req->alg_req;
774 
775 	alg_req->fw_req = (u32 *)&qat_req->req;
776 	alg_req->tx_ring = inst->sym_tx;
777 	alg_req->base = base;
778 	alg_req->backlog = &inst->backlog;
779 
780 	return qat_alg_send_message(alg_req);
781 }
782 
783 static int qat_alg_aead_dec(struct aead_request *areq)
784 {
785 	struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
786 	struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
787 	struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
788 	struct qat_crypto_request *qat_req = aead_request_ctx(areq);
789 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
790 	struct icp_qat_fw_la_auth_req_params *auth_param;
791 	struct icp_qat_fw_la_bulk_req *msg;
792 	int digst_size = crypto_aead_authsize(aead_tfm);
793 	gfp_t f = qat_algs_alloc_flags(&areq->base);
794 	int ret;
795 	u32 cipher_len;
796 
797 	cipher_len = areq->cryptlen - digst_size;
798 	if (cipher_len % AES_BLOCK_SIZE != 0)
799 		return -EINVAL;
800 
801 	ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
802 				 &qat_req->buf, NULL, f);
803 	if (unlikely(ret))
804 		return ret;
805 
806 	msg = &qat_req->req;
807 	*msg = ctx->dec_fw_req;
808 	qat_req->aead_ctx = ctx;
809 	qat_req->aead_req = areq;
810 	qat_req->cb = qat_aead_alg_callback;
811 	qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
812 	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
813 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
814 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
815 	cipher_param->cipher_length = cipher_len;
816 	cipher_param->cipher_offset = areq->assoclen;
817 	memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
818 	auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
819 	auth_param->auth_off = 0;
820 	auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
821 
822 	ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
823 	if (ret == -ENOSPC)
824 		qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
825 
826 	return ret;
827 }
828 
829 static int qat_alg_aead_enc(struct aead_request *areq)
830 {
831 	struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
832 	struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
833 	struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
834 	struct qat_crypto_request *qat_req = aead_request_ctx(areq);
835 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
836 	struct icp_qat_fw_la_auth_req_params *auth_param;
837 	gfp_t f = qat_algs_alloc_flags(&areq->base);
838 	struct icp_qat_fw_la_bulk_req *msg;
839 	u8 *iv = areq->iv;
840 	int ret;
841 
842 	if (areq->cryptlen % AES_BLOCK_SIZE != 0)
843 		return -EINVAL;
844 
845 	ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
846 				 &qat_req->buf, NULL, f);
847 	if (unlikely(ret))
848 		return ret;
849 
850 	msg = &qat_req->req;
851 	*msg = ctx->enc_fw_req;
852 	qat_req->aead_ctx = ctx;
853 	qat_req->aead_req = areq;
854 	qat_req->cb = qat_aead_alg_callback;
855 	qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
856 	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
857 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
858 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
859 	auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param));
860 
861 	memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
862 	cipher_param->cipher_length = areq->cryptlen;
863 	cipher_param->cipher_offset = areq->assoclen;
864 
865 	auth_param->auth_off = 0;
866 	auth_param->auth_len = areq->assoclen + areq->cryptlen;
867 
868 	ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
869 	if (ret == -ENOSPC)
870 		qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
871 
872 	return ret;
873 }
874 
875 static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx,
876 				  const u8 *key, unsigned int keylen,
877 				  int mode)
878 {
879 	memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
880 	memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
881 	memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
882 	memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
883 
884 	return qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
885 }
886 
887 static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx,
888 				   const u8 *key, unsigned int keylen,
889 				   int mode)
890 {
891 	struct qat_crypto_instance *inst = NULL;
892 	struct device *dev;
893 	int node = numa_node_id();
894 	int ret;
895 
896 	inst = qat_crypto_get_instance_node(node);
897 	if (!inst)
898 		return -EINVAL;
899 	dev = &GET_DEV(inst->accel_dev);
900 	ctx->inst = inst;
901 	ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
902 					 &ctx->enc_cd_paddr,
903 					 GFP_ATOMIC);
904 	if (!ctx->enc_cd) {
905 		ret = -ENOMEM;
906 		goto out_free_instance;
907 	}
908 	ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
909 					 &ctx->dec_cd_paddr,
910 					 GFP_ATOMIC);
911 	if (!ctx->dec_cd) {
912 		ret = -ENOMEM;
913 		goto out_free_enc;
914 	}
915 
916 	ret = qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
917 	if (ret)
918 		goto out_free_all;
919 
920 	return 0;
921 
922 out_free_all:
923 	memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
924 	dma_free_coherent(dev, sizeof(*ctx->dec_cd),
925 			  ctx->dec_cd, ctx->dec_cd_paddr);
926 	ctx->dec_cd = NULL;
927 out_free_enc:
928 	memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
929 	dma_free_coherent(dev, sizeof(*ctx->enc_cd),
930 			  ctx->enc_cd, ctx->enc_cd_paddr);
931 	ctx->enc_cd = NULL;
932 out_free_instance:
933 	ctx->inst = NULL;
934 	qat_crypto_put_instance(inst);
935 	return ret;
936 }
937 
938 static int qat_alg_skcipher_setkey(struct crypto_skcipher *tfm,
939 				   const u8 *key, unsigned int keylen,
940 				   int mode)
941 {
942 	struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
943 
944 	ctx->mode = mode;
945 
946 	if (ctx->enc_cd)
947 		return qat_alg_skcipher_rekey(ctx, key, keylen, mode);
948 	else
949 		return qat_alg_skcipher_newkey(ctx, key, keylen, mode);
950 }
951 
952 static int qat_alg_skcipher_cbc_setkey(struct crypto_skcipher *tfm,
953 				       const u8 *key, unsigned int keylen)
954 {
955 	return qat_alg_skcipher_setkey(tfm, key, keylen,
956 				       ICP_QAT_HW_CIPHER_CBC_MODE);
957 }
958 
959 static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm,
960 				       const u8 *key, unsigned int keylen)
961 {
962 	return qat_alg_skcipher_setkey(tfm, key, keylen,
963 				       ICP_QAT_HW_CIPHER_CTR_MODE);
964 }
965 
966 static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm,
967 				       const u8 *key, unsigned int keylen)
968 {
969 	struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
970 	int ret;
971 
972 	ret = xts_verify_key(tfm, key, keylen);
973 	if (ret)
974 		return ret;
975 
976 	if (keylen >> 1 == AES_KEYSIZE_192) {
977 		ret = crypto_skcipher_setkey(ctx->ftfm, key, keylen);
978 		if (ret)
979 			return ret;
980 
981 		ctx->fallback = true;
982 
983 		return 0;
984 	}
985 
986 	ctx->fallback = false;
987 
988 	ret = qat_alg_skcipher_setkey(tfm, key, keylen,
989 				      ICP_QAT_HW_CIPHER_XTS_MODE);
990 	if (ret)
991 		return ret;
992 
993 	if (HW_CAP_AES_V2(ctx->inst->accel_dev))
994 		ret = crypto_cipher_setkey(ctx->tweak, key + (keylen / 2),
995 					   keylen / 2);
996 
997 	return ret;
998 }
999 
1000 static void qat_alg_set_req_iv(struct qat_crypto_request *qat_req)
1001 {
1002 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
1003 	struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
1004 	bool aes_v2_capable = HW_CAP_AES_V2(ctx->inst->accel_dev);
1005 	u8 *iv = qat_req->skcipher_req->iv;
1006 
1007 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1008 
1009 	if (aes_v2_capable && ctx->mode == ICP_QAT_HW_CIPHER_XTS_MODE)
1010 		crypto_cipher_encrypt_one(ctx->tweak,
1011 					  (u8 *)cipher_param->u.cipher_IV_array,
1012 					  iv);
1013 	else
1014 		memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
1015 }
1016 
1017 static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
1018 {
1019 	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1020 	struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
1021 	struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1022 	struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
1023 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
1024 	gfp_t f = qat_algs_alloc_flags(&req->base);
1025 	struct icp_qat_fw_la_bulk_req *msg;
1026 	int ret;
1027 
1028 	if (req->cryptlen == 0)
1029 		return 0;
1030 
1031 	ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, req->src, req->dst,
1032 				 &qat_req->buf, NULL, f);
1033 	if (unlikely(ret))
1034 		return ret;
1035 
1036 	msg = &qat_req->req;
1037 	*msg = ctx->enc_fw_req;
1038 	qat_req->skcipher_ctx = ctx;
1039 	qat_req->skcipher_req = req;
1040 	qat_req->cb = qat_skcipher_alg_callback;
1041 	qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
1042 	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1043 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1044 	qat_req->encryption = true;
1045 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1046 	cipher_param->cipher_length = req->cryptlen;
1047 	cipher_param->cipher_offset = 0;
1048 
1049 	qat_alg_set_req_iv(qat_req);
1050 
1051 	ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
1052 	if (ret == -ENOSPC)
1053 		qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
1054 
1055 	return ret;
1056 }
1057 
1058 static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
1059 {
1060 	if (req->cryptlen % AES_BLOCK_SIZE != 0)
1061 		return -EINVAL;
1062 
1063 	return qat_alg_skcipher_encrypt(req);
1064 }
1065 
1066 static int qat_alg_skcipher_xts_encrypt(struct skcipher_request *req)
1067 {
1068 	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1069 	struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
1070 	struct skcipher_request *nreq = skcipher_request_ctx(req);
1071 
1072 	if (req->cryptlen < XTS_BLOCK_SIZE)
1073 		return -EINVAL;
1074 
1075 	if (ctx->fallback) {
1076 		memcpy(nreq, req, sizeof(*req));
1077 		skcipher_request_set_tfm(nreq, ctx->ftfm);
1078 		return crypto_skcipher_encrypt(nreq);
1079 	}
1080 
1081 	return qat_alg_skcipher_encrypt(req);
1082 }
1083 
1084 static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
1085 {
1086 	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1087 	struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
1088 	struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1089 	struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
1090 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
1091 	gfp_t f = qat_algs_alloc_flags(&req->base);
1092 	struct icp_qat_fw_la_bulk_req *msg;
1093 	int ret;
1094 
1095 	if (req->cryptlen == 0)
1096 		return 0;
1097 
1098 	ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, req->src, req->dst,
1099 				 &qat_req->buf, NULL, f);
1100 	if (unlikely(ret))
1101 		return ret;
1102 
1103 	msg = &qat_req->req;
1104 	*msg = ctx->dec_fw_req;
1105 	qat_req->skcipher_ctx = ctx;
1106 	qat_req->skcipher_req = req;
1107 	qat_req->cb = qat_skcipher_alg_callback;
1108 	qat_req->req.comn_mid.opaque_data = (u64)(__force long)qat_req;
1109 	qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1110 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1111 	qat_req->encryption = false;
1112 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1113 	cipher_param->cipher_length = req->cryptlen;
1114 	cipher_param->cipher_offset = 0;
1115 
1116 	qat_alg_set_req_iv(qat_req);
1117 	qat_alg_update_iv(qat_req);
1118 
1119 	ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
1120 	if (ret == -ENOSPC)
1121 		qat_bl_free_bufl(ctx->inst->accel_dev, &qat_req->buf);
1122 
1123 	return ret;
1124 }
1125 
1126 static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
1127 {
1128 	if (req->cryptlen % AES_BLOCK_SIZE != 0)
1129 		return -EINVAL;
1130 
1131 	return qat_alg_skcipher_decrypt(req);
1132 }
1133 
1134 static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req)
1135 {
1136 	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1137 	struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(stfm);
1138 	struct skcipher_request *nreq = skcipher_request_ctx(req);
1139 
1140 	if (req->cryptlen < XTS_BLOCK_SIZE)
1141 		return -EINVAL;
1142 
1143 	if (ctx->fallback) {
1144 		memcpy(nreq, req, sizeof(*req));
1145 		skcipher_request_set_tfm(nreq, ctx->ftfm);
1146 		return crypto_skcipher_decrypt(nreq);
1147 	}
1148 
1149 	return qat_alg_skcipher_decrypt(req);
1150 }
1151 
1152 static int qat_alg_aead_init(struct crypto_aead *tfm,
1153 			     enum icp_qat_hw_auth_algo hash,
1154 			     const char *hash_name)
1155 {
1156 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1157 
1158 	ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1159 	if (IS_ERR(ctx->hash_tfm))
1160 		return PTR_ERR(ctx->hash_tfm);
1161 	ctx->qat_hash_alg = hash;
1162 	crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1163 	return 0;
1164 }
1165 
1166 static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
1167 {
1168 	return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
1169 }
1170 
1171 static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
1172 {
1173 	return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
1174 }
1175 
1176 static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
1177 {
1178 	return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
1179 }
1180 
1181 static void qat_alg_aead_exit(struct crypto_aead *tfm)
1182 {
1183 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1184 	struct qat_crypto_instance *inst = ctx->inst;
1185 	struct device *dev;
1186 
1187 	crypto_free_shash(ctx->hash_tfm);
1188 
1189 	if (!inst)
1190 		return;
1191 
1192 	dev = &GET_DEV(inst->accel_dev);
1193 	if (ctx->enc_cd) {
1194 		memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1195 		dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1196 				  ctx->enc_cd, ctx->enc_cd_paddr);
1197 	}
1198 	if (ctx->dec_cd) {
1199 		memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1200 		dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1201 				  ctx->dec_cd, ctx->dec_cd_paddr);
1202 	}
1203 	qat_crypto_put_instance(inst);
1204 }
1205 
1206 static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm)
1207 {
1208 	crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1209 	return 0;
1210 }
1211 
1212 static int qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher *tfm)
1213 {
1214 	struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1215 	int reqsize;
1216 
1217 	ctx->ftfm = crypto_alloc_skcipher("xts(aes)", 0,
1218 					  CRYPTO_ALG_NEED_FALLBACK);
1219 	if (IS_ERR(ctx->ftfm))
1220 		return PTR_ERR(ctx->ftfm);
1221 
1222 	ctx->tweak = crypto_alloc_cipher("aes", 0, 0);
1223 	if (IS_ERR(ctx->tweak)) {
1224 		crypto_free_skcipher(ctx->ftfm);
1225 		return PTR_ERR(ctx->tweak);
1226 	}
1227 
1228 	reqsize = max(sizeof(struct qat_crypto_request),
1229 		      sizeof(struct skcipher_request) +
1230 		      crypto_skcipher_reqsize(ctx->ftfm));
1231 	crypto_skcipher_set_reqsize(tfm, reqsize);
1232 
1233 	return 0;
1234 }
1235 
1236 static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)
1237 {
1238 	struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1239 	struct qat_crypto_instance *inst = ctx->inst;
1240 	struct device *dev;
1241 
1242 	if (!inst)
1243 		return;
1244 
1245 	dev = &GET_DEV(inst->accel_dev);
1246 	if (ctx->enc_cd) {
1247 		memset(ctx->enc_cd, 0,
1248 		       sizeof(struct icp_qat_hw_cipher_algo_blk));
1249 		dma_free_coherent(dev,
1250 				  sizeof(struct icp_qat_hw_cipher_algo_blk),
1251 				  ctx->enc_cd, ctx->enc_cd_paddr);
1252 	}
1253 	if (ctx->dec_cd) {
1254 		memset(ctx->dec_cd, 0,
1255 		       sizeof(struct icp_qat_hw_cipher_algo_blk));
1256 		dma_free_coherent(dev,
1257 				  sizeof(struct icp_qat_hw_cipher_algo_blk),
1258 				  ctx->dec_cd, ctx->dec_cd_paddr);
1259 	}
1260 	qat_crypto_put_instance(inst);
1261 }
1262 
1263 static void qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher *tfm)
1264 {
1265 	struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1266 
1267 	if (ctx->ftfm)
1268 		crypto_free_skcipher(ctx->ftfm);
1269 
1270 	if (ctx->tweak)
1271 		crypto_free_cipher(ctx->tweak);
1272 
1273 	qat_alg_skcipher_exit_tfm(tfm);
1274 }
1275 
1276 static struct aead_alg qat_aeads[] = { {
1277 	.base = {
1278 		.cra_name = "authenc(hmac(sha1),cbc(aes))",
1279 		.cra_driver_name = "qat_aes_cbc_hmac_sha1",
1280 		.cra_priority = 4001,
1281 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1282 		.cra_blocksize = AES_BLOCK_SIZE,
1283 		.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1284 		.cra_module = THIS_MODULE,
1285 	},
1286 	.init = qat_alg_aead_sha1_init,
1287 	.exit = qat_alg_aead_exit,
1288 	.setkey = qat_alg_aead_setkey,
1289 	.decrypt = qat_alg_aead_dec,
1290 	.encrypt = qat_alg_aead_enc,
1291 	.ivsize = AES_BLOCK_SIZE,
1292 	.maxauthsize = SHA1_DIGEST_SIZE,
1293 }, {
1294 	.base = {
1295 		.cra_name = "authenc(hmac(sha256),cbc(aes))",
1296 		.cra_driver_name = "qat_aes_cbc_hmac_sha256",
1297 		.cra_priority = 4001,
1298 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1299 		.cra_blocksize = AES_BLOCK_SIZE,
1300 		.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1301 		.cra_module = THIS_MODULE,
1302 	},
1303 	.init = qat_alg_aead_sha256_init,
1304 	.exit = qat_alg_aead_exit,
1305 	.setkey = qat_alg_aead_setkey,
1306 	.decrypt = qat_alg_aead_dec,
1307 	.encrypt = qat_alg_aead_enc,
1308 	.ivsize = AES_BLOCK_SIZE,
1309 	.maxauthsize = SHA256_DIGEST_SIZE,
1310 }, {
1311 	.base = {
1312 		.cra_name = "authenc(hmac(sha512),cbc(aes))",
1313 		.cra_driver_name = "qat_aes_cbc_hmac_sha512",
1314 		.cra_priority = 4001,
1315 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1316 		.cra_blocksize = AES_BLOCK_SIZE,
1317 		.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1318 		.cra_module = THIS_MODULE,
1319 	},
1320 	.init = qat_alg_aead_sha512_init,
1321 	.exit = qat_alg_aead_exit,
1322 	.setkey = qat_alg_aead_setkey,
1323 	.decrypt = qat_alg_aead_dec,
1324 	.encrypt = qat_alg_aead_enc,
1325 	.ivsize = AES_BLOCK_SIZE,
1326 	.maxauthsize = SHA512_DIGEST_SIZE,
1327 } };
1328 
1329 static struct skcipher_alg qat_skciphers[] = { {
1330 	.base.cra_name = "cbc(aes)",
1331 	.base.cra_driver_name = "qat_aes_cbc",
1332 	.base.cra_priority = 4001,
1333 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1334 	.base.cra_blocksize = AES_BLOCK_SIZE,
1335 	.base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1336 	.base.cra_alignmask = 0,
1337 	.base.cra_module = THIS_MODULE,
1338 
1339 	.init = qat_alg_skcipher_init_tfm,
1340 	.exit = qat_alg_skcipher_exit_tfm,
1341 	.setkey = qat_alg_skcipher_cbc_setkey,
1342 	.decrypt = qat_alg_skcipher_blk_decrypt,
1343 	.encrypt = qat_alg_skcipher_blk_encrypt,
1344 	.min_keysize = AES_MIN_KEY_SIZE,
1345 	.max_keysize = AES_MAX_KEY_SIZE,
1346 	.ivsize = AES_BLOCK_SIZE,
1347 }, {
1348 	.base.cra_name = "ctr(aes)",
1349 	.base.cra_driver_name = "qat_aes_ctr",
1350 	.base.cra_priority = 4001,
1351 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1352 	.base.cra_blocksize = 1,
1353 	.base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1354 	.base.cra_alignmask = 0,
1355 	.base.cra_module = THIS_MODULE,
1356 
1357 	.init = qat_alg_skcipher_init_tfm,
1358 	.exit = qat_alg_skcipher_exit_tfm,
1359 	.setkey = qat_alg_skcipher_ctr_setkey,
1360 	.decrypt = qat_alg_skcipher_decrypt,
1361 	.encrypt = qat_alg_skcipher_encrypt,
1362 	.min_keysize = AES_MIN_KEY_SIZE,
1363 	.max_keysize = AES_MAX_KEY_SIZE,
1364 	.ivsize = AES_BLOCK_SIZE,
1365 }, {
1366 	.base.cra_name = "xts(aes)",
1367 	.base.cra_driver_name = "qat_aes_xts",
1368 	.base.cra_priority = 4001,
1369 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
1370 			  CRYPTO_ALG_ALLOCATES_MEMORY,
1371 	.base.cra_blocksize = AES_BLOCK_SIZE,
1372 	.base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1373 	.base.cra_alignmask = 0,
1374 	.base.cra_module = THIS_MODULE,
1375 
1376 	.init = qat_alg_skcipher_init_xts_tfm,
1377 	.exit = qat_alg_skcipher_exit_xts_tfm,
1378 	.setkey = qat_alg_skcipher_xts_setkey,
1379 	.decrypt = qat_alg_skcipher_xts_decrypt,
1380 	.encrypt = qat_alg_skcipher_xts_encrypt,
1381 	.min_keysize = 2 * AES_MIN_KEY_SIZE,
1382 	.max_keysize = 2 * AES_MAX_KEY_SIZE,
1383 	.ivsize = AES_BLOCK_SIZE,
1384 } };
1385 
1386 int qat_algs_register(void)
1387 {
1388 	int ret = 0;
1389 
1390 	mutex_lock(&algs_lock);
1391 	if (++active_devs != 1)
1392 		goto unlock;
1393 
1394 	ret = crypto_register_skciphers(qat_skciphers,
1395 					ARRAY_SIZE(qat_skciphers));
1396 	if (ret)
1397 		goto unlock;
1398 
1399 	ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1400 	if (ret)
1401 		goto unreg_algs;
1402 
1403 unlock:
1404 	mutex_unlock(&algs_lock);
1405 	return ret;
1406 
1407 unreg_algs:
1408 	crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
1409 	goto unlock;
1410 }
1411 
1412 void qat_algs_unregister(void)
1413 {
1414 	mutex_lock(&algs_lock);
1415 	if (--active_devs != 0)
1416 		goto unlock;
1417 
1418 	crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1419 	crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
1420 
1421 unlock:
1422 	mutex_unlock(&algs_lock);
1423 }
1424