1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * Scatterlist Cryptographic API. 4 * 5 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> 6 * Copyright (c) 2002 David S. Miller (davem@redhat.com) 7 * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au> 8 * 9 * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no> 10 * and Nettle, by Niels Möller. 11 */ 12 #ifndef _LINUX_CRYPTO_H 13 #define _LINUX_CRYPTO_H 14 15 #include <linux/completion.h> 16 #include <linux/refcount.h> 17 #include <linux/slab.h> 18 #include <linux/types.h> 19 20 /* 21 * Algorithm masks and types. 22 */ 23 #define CRYPTO_ALG_TYPE_MASK 0x0000000f 24 #define CRYPTO_ALG_TYPE_CIPHER 0x00000001 25 #define CRYPTO_ALG_TYPE_COMPRESS 0x00000002 26 #define CRYPTO_ALG_TYPE_AEAD 0x00000003 27 #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 28 #define CRYPTO_ALG_TYPE_AKCIPHER 0x00000006 29 #define CRYPTO_ALG_TYPE_SIG 0x00000007 30 #define CRYPTO_ALG_TYPE_KPP 0x00000008 31 #define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a 32 #define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b 33 #define CRYPTO_ALG_TYPE_RNG 0x0000000c 34 #define CRYPTO_ALG_TYPE_HASH 0x0000000e 35 #define CRYPTO_ALG_TYPE_SHASH 0x0000000e 36 #define CRYPTO_ALG_TYPE_AHASH 0x0000000f 37 38 #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e 39 #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e 40 #define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e 41 42 #define CRYPTO_ALG_LARVAL 0x00000010 43 #define CRYPTO_ALG_DEAD 0x00000020 44 #define CRYPTO_ALG_DYING 0x00000040 45 #define CRYPTO_ALG_ASYNC 0x00000080 46 47 /* 48 * Set if the algorithm (or an algorithm which it uses) requires another 49 * algorithm of the same type to handle corner cases. 50 */ 51 #define CRYPTO_ALG_NEED_FALLBACK 0x00000100 52 53 /* 54 * Set if the algorithm has passed automated run-time testing. Note that 55 * if there is no run-time testing for a given algorithm it is considered 56 * to have passed. 57 */ 58 59 #define CRYPTO_ALG_TESTED 0x00000400 60 61 /* 62 * Set if the algorithm is an instance that is built from templates. 63 */ 64 #define CRYPTO_ALG_INSTANCE 0x00000800 65 66 /* Set this bit if the algorithm provided is hardware accelerated but 67 * not available to userspace via instruction set or so. 68 */ 69 #define CRYPTO_ALG_KERN_DRIVER_ONLY 0x00001000 70 71 /* 72 * Mark a cipher as a service implementation only usable by another 73 * cipher and never by a normal user of the kernel crypto API 74 */ 75 #define CRYPTO_ALG_INTERNAL 0x00002000 76 77 /* 78 * Set if the algorithm has a ->setkey() method but can be used without 79 * calling it first, i.e. there is a default key. 80 */ 81 #define CRYPTO_ALG_OPTIONAL_KEY 0x00004000 82 83 /* 84 * Don't trigger module loading 85 */ 86 #define CRYPTO_NOLOAD 0x00008000 87 88 /* 89 * The algorithm may allocate memory during request processing, i.e. during 90 * encryption, decryption, or hashing. Users can request an algorithm with this 91 * flag unset if they can't handle memory allocation failures. 92 * 93 * This flag is currently only implemented for algorithms of type "skcipher", 94 * "aead", "ahash", "shash", and "cipher". Algorithms of other types might not 95 * have this flag set even if they allocate memory. 96 * 97 * In some edge cases, algorithms can allocate memory regardless of this flag. 98 * To avoid these cases, users must obey the following usage constraints: 99 * skcipher: 100 * - The IV buffer and all scatterlist elements must be aligned to the 101 * algorithm's alignmask. 102 * - If the data were to be divided into chunks of size 103 * crypto_skcipher_walksize() (with any remainder going at the end), no 104 * chunk can cross a page boundary or a scatterlist element boundary. 105 * aead: 106 * - The IV buffer and all scatterlist elements must be aligned to the 107 * algorithm's alignmask. 108 * - The first scatterlist element must contain all the associated data, 109 * and its pages must be !PageHighMem. 110 * - If the plaintext/ciphertext were to be divided into chunks of size 111 * crypto_aead_walksize() (with the remainder going at the end), no chunk 112 * can cross a page boundary or a scatterlist element boundary. 113 * ahash: 114 * - The result buffer must be aligned to the algorithm's alignmask. 115 * - crypto_ahash_finup() must not be used unless the algorithm implements 116 * ->finup() natively. 117 */ 118 #define CRYPTO_ALG_ALLOCATES_MEMORY 0x00010000 119 120 /* 121 * Mark an algorithm as a service implementation only usable by a 122 * template and never by a normal user of the kernel crypto API. 123 * This is intended to be used by algorithms that are themselves 124 * not FIPS-approved but may instead be used to implement parts of 125 * a FIPS-approved algorithm (e.g., dh vs. ffdhe2048(dh)). 126 */ 127 #define CRYPTO_ALG_FIPS_INTERNAL 0x00020000 128 129 /* 130 * Transform masks and values (for crt_flags). 131 */ 132 #define CRYPTO_TFM_NEED_KEY 0x00000001 133 134 #define CRYPTO_TFM_REQ_MASK 0x000fff00 135 #define CRYPTO_TFM_REQ_FORBID_WEAK_KEYS 0x00000100 136 #define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200 137 #define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400 138 139 /* 140 * Miscellaneous stuff. 141 */ 142 #define CRYPTO_MAX_ALG_NAME 128 143 144 /* 145 * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual 146 * declaration) is used to ensure that the crypto_tfm context structure is 147 * aligned correctly for the given architecture so that there are no alignment 148 * faults for C data types. On architectures that support non-cache coherent 149 * DMA, such as ARM or arm64, it also takes into account the minimal alignment 150 * that is required to ensure that the context struct member does not share any 151 * cachelines with the rest of the struct. This is needed to ensure that cache 152 * maintenance for non-coherent DMA (cache invalidation in particular) does not 153 * affect data that may be accessed by the CPU concurrently. 154 */ 155 #define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN 156 157 #define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN))) 158 159 struct crypto_tfm; 160 struct crypto_type; 161 struct module; 162 163 typedef void (*crypto_completion_t)(void *req, int err); 164 165 /** 166 * DOC: Block Cipher Context Data Structures 167 * 168 * These data structures define the operating context for each block cipher 169 * type. 170 */ 171 172 struct crypto_async_request { 173 struct list_head list; 174 crypto_completion_t complete; 175 void *data; 176 struct crypto_tfm *tfm; 177 178 u32 flags; 179 }; 180 181 /** 182 * DOC: Block Cipher Algorithm Definitions 183 * 184 * These data structures define modular crypto algorithm implementations, 185 * managed via crypto_register_alg() and crypto_unregister_alg(). 186 */ 187 188 /** 189 * struct cipher_alg - single-block symmetric ciphers definition 190 * @cia_min_keysize: Minimum key size supported by the transformation. This is 191 * the smallest key length supported by this transformation 192 * algorithm. This must be set to one of the pre-defined 193 * values as this is not hardware specific. Possible values 194 * for this field can be found via git grep "_MIN_KEY_SIZE" 195 * include/crypto/ 196 * @cia_max_keysize: Maximum key size supported by the transformation. This is 197 * the largest key length supported by this transformation 198 * algorithm. This must be set to one of the pre-defined values 199 * as this is not hardware specific. Possible values for this 200 * field can be found via git grep "_MAX_KEY_SIZE" 201 * include/crypto/ 202 * @cia_setkey: Set key for the transformation. This function is used to either 203 * program a supplied key into the hardware or store the key in the 204 * transformation context for programming it later. Note that this 205 * function does modify the transformation context. This function 206 * can be called multiple times during the existence of the 207 * transformation object, so one must make sure the key is properly 208 * reprogrammed into the hardware. This function is also 209 * responsible for checking the key length for validity. 210 * @cia_encrypt: Encrypt a single block. This function is used to encrypt a 211 * single block of data, which must be @cra_blocksize big. This 212 * always operates on a full @cra_blocksize and it is not possible 213 * to encrypt a block of smaller size. The supplied buffers must 214 * therefore also be at least of @cra_blocksize size. Both the 215 * input and output buffers are always aligned to @cra_alignmask. 216 * In case either of the input or output buffer supplied by user 217 * of the crypto API is not aligned to @cra_alignmask, the crypto 218 * API will re-align the buffers. The re-alignment means that a 219 * new buffer will be allocated, the data will be copied into the 220 * new buffer, then the processing will happen on the new buffer, 221 * then the data will be copied back into the original buffer and 222 * finally the new buffer will be freed. In case a software 223 * fallback was put in place in the @cra_init call, this function 224 * might need to use the fallback if the algorithm doesn't support 225 * all of the key sizes. In case the key was stored in 226 * transformation context, the key might need to be re-programmed 227 * into the hardware in this function. This function shall not 228 * modify the transformation context, as this function may be 229 * called in parallel with the same transformation object. 230 * @cia_decrypt: Decrypt a single block. This is a reverse counterpart to 231 * @cia_encrypt, and the conditions are exactly the same. 232 * 233 * All fields are mandatory and must be filled. 234 */ 235 struct cipher_alg { 236 unsigned int cia_min_keysize; 237 unsigned int cia_max_keysize; 238 int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key, 239 unsigned int keylen); 240 void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 241 void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 242 }; 243 244 /** 245 * struct compress_alg - compression/decompression algorithm 246 * @coa_compress: Compress a buffer of specified length, storing the resulting 247 * data in the specified buffer. Return the length of the 248 * compressed data in dlen. 249 * @coa_decompress: Decompress the source buffer, storing the uncompressed 250 * data in the specified buffer. The length of the data is 251 * returned in dlen. 252 * 253 * All fields are mandatory. 254 */ 255 struct compress_alg { 256 int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src, 257 unsigned int slen, u8 *dst, unsigned int *dlen); 258 int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src, 259 unsigned int slen, u8 *dst, unsigned int *dlen); 260 }; 261 262 #define cra_cipher cra_u.cipher 263 #define cra_compress cra_u.compress 264 265 /** 266 * struct crypto_alg - definition of a cryptograpic cipher algorithm 267 * @cra_flags: Flags describing this transformation. See include/linux/crypto.h 268 * CRYPTO_ALG_* flags for the flags which go in here. Those are 269 * used for fine-tuning the description of the transformation 270 * algorithm. 271 * @cra_blocksize: Minimum block size of this transformation. The size in bytes 272 * of the smallest possible unit which can be transformed with 273 * this algorithm. The users must respect this value. 274 * In case of HASH transformation, it is possible for a smaller 275 * block than @cra_blocksize to be passed to the crypto API for 276 * transformation, in case of any other transformation type, an 277 * error will be returned upon any attempt to transform smaller 278 * than @cra_blocksize chunks. 279 * @cra_ctxsize: Size of the operational context of the transformation. This 280 * value informs the kernel crypto API about the memory size 281 * needed to be allocated for the transformation context. 282 * @cra_alignmask: Alignment mask for the input and output data buffer. The data 283 * buffer containing the input data for the algorithm must be 284 * aligned to this alignment mask. The data buffer for the 285 * output data must be aligned to this alignment mask. Note that 286 * the Crypto API will do the re-alignment in software, but 287 * only under special conditions and there is a performance hit. 288 * The re-alignment happens at these occasions for different 289 * @cra_u types: cipher -- For both input data and output data 290 * buffer; ahash -- For output hash destination buf; shash -- 291 * For output hash destination buf. 292 * This is needed on hardware which is flawed by design and 293 * cannot pick data from arbitrary addresses. 294 * @cra_priority: Priority of this transformation implementation. In case 295 * multiple transformations with same @cra_name are available to 296 * the Crypto API, the kernel will use the one with highest 297 * @cra_priority. 298 * @cra_name: Generic name (usable by multiple implementations) of the 299 * transformation algorithm. This is the name of the transformation 300 * itself. This field is used by the kernel when looking up the 301 * providers of particular transformation. 302 * @cra_driver_name: Unique name of the transformation provider. This is the 303 * name of the provider of the transformation. This can be any 304 * arbitrary value, but in the usual case, this contains the 305 * name of the chip or provider and the name of the 306 * transformation algorithm. 307 * @cra_type: Type of the cryptographic transformation. This is a pointer to 308 * struct crypto_type, which implements callbacks common for all 309 * transformation types. There are multiple options, such as 310 * &crypto_skcipher_type, &crypto_ahash_type, &crypto_rng_type. 311 * This field might be empty. In that case, there are no common 312 * callbacks. This is the case for: cipher, compress, shash. 313 * @cra_u: Callbacks implementing the transformation. This is a union of 314 * multiple structures. Depending on the type of transformation selected 315 * by @cra_type and @cra_flags above, the associated structure must be 316 * filled with callbacks. This field might be empty. This is the case 317 * for ahash, shash. 318 * @cra_init: Initialize the cryptographic transformation object. This function 319 * is used to initialize the cryptographic transformation object. 320 * This function is called only once at the instantiation time, right 321 * after the transformation context was allocated. In case the 322 * cryptographic hardware has some special requirements which need to 323 * be handled by software, this function shall check for the precise 324 * requirement of the transformation and put any software fallbacks 325 * in place. 326 * @cra_exit: Deinitialize the cryptographic transformation object. This is a 327 * counterpart to @cra_init, used to remove various changes set in 328 * @cra_init. 329 * @cra_u.cipher: Union member which contains a single-block symmetric cipher 330 * definition. See @struct @cipher_alg. 331 * @cra_u.compress: Union member which contains a (de)compression algorithm. 332 * See @struct @compress_alg. 333 * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE 334 * @cra_list: internally used 335 * @cra_users: internally used 336 * @cra_refcnt: internally used 337 * @cra_destroy: internally used 338 * 339 * The struct crypto_alg describes a generic Crypto API algorithm and is common 340 * for all of the transformations. Any variable not documented here shall not 341 * be used by a cipher implementation as it is internal to the Crypto API. 342 */ 343 struct crypto_alg { 344 struct list_head cra_list; 345 struct list_head cra_users; 346 347 u32 cra_flags; 348 unsigned int cra_blocksize; 349 unsigned int cra_ctxsize; 350 unsigned int cra_alignmask; 351 352 int cra_priority; 353 refcount_t cra_refcnt; 354 355 char cra_name[CRYPTO_MAX_ALG_NAME]; 356 char cra_driver_name[CRYPTO_MAX_ALG_NAME]; 357 358 const struct crypto_type *cra_type; 359 360 union { 361 struct cipher_alg cipher; 362 struct compress_alg compress; 363 } cra_u; 364 365 int (*cra_init)(struct crypto_tfm *tfm); 366 void (*cra_exit)(struct crypto_tfm *tfm); 367 void (*cra_destroy)(struct crypto_alg *alg); 368 369 struct module *cra_module; 370 } CRYPTO_MINALIGN_ATTR; 371 372 /* 373 * A helper struct for waiting for completion of async crypto ops 374 */ 375 struct crypto_wait { 376 struct completion completion; 377 int err; 378 }; 379 380 /* 381 * Macro for declaring a crypto op async wait object on stack 382 */ 383 #define DECLARE_CRYPTO_WAIT(_wait) \ 384 struct crypto_wait _wait = { \ 385 COMPLETION_INITIALIZER_ONSTACK((_wait).completion), 0 } 386 387 /* 388 * Async ops completion helper functioons 389 */ 390 void crypto_req_done(void *req, int err); 391 392 static inline int crypto_wait_req(int err, struct crypto_wait *wait) 393 { 394 switch (err) { 395 case -EINPROGRESS: 396 case -EBUSY: 397 wait_for_completion(&wait->completion); 398 reinit_completion(&wait->completion); 399 err = wait->err; 400 break; 401 } 402 403 return err; 404 } 405 406 static inline void crypto_init_wait(struct crypto_wait *wait) 407 { 408 init_completion(&wait->completion); 409 } 410 411 /* 412 * Algorithm query interface. 413 */ 414 int crypto_has_alg(const char *name, u32 type, u32 mask); 415 416 /* 417 * Transforms: user-instantiated objects which encapsulate algorithms 418 * and core processing logic. Managed via crypto_alloc_*() and 419 * crypto_free_*(), as well as the various helpers below. 420 */ 421 422 struct crypto_tfm { 423 refcount_t refcnt; 424 425 u32 crt_flags; 426 427 int node; 428 429 void (*exit)(struct crypto_tfm *tfm); 430 431 struct crypto_alg *__crt_alg; 432 433 void *__crt_ctx[] CRYPTO_MINALIGN_ATTR; 434 }; 435 436 struct crypto_comp { 437 struct crypto_tfm base; 438 }; 439 440 /* 441 * Transform user interface. 442 */ 443 444 struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask); 445 void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm); 446 447 static inline void crypto_free_tfm(struct crypto_tfm *tfm) 448 { 449 return crypto_destroy_tfm(tfm, tfm); 450 } 451 452 /* 453 * Transform helpers which query the underlying algorithm. 454 */ 455 static inline const char *crypto_tfm_alg_name(struct crypto_tfm *tfm) 456 { 457 return tfm->__crt_alg->cra_name; 458 } 459 460 static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm) 461 { 462 return tfm->__crt_alg->cra_driver_name; 463 } 464 465 static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm) 466 { 467 return tfm->__crt_alg->cra_blocksize; 468 } 469 470 static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm) 471 { 472 return tfm->__crt_alg->cra_alignmask; 473 } 474 475 static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm) 476 { 477 return tfm->crt_flags; 478 } 479 480 static inline void crypto_tfm_set_flags(struct crypto_tfm *tfm, u32 flags) 481 { 482 tfm->crt_flags |= flags; 483 } 484 485 static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags) 486 { 487 tfm->crt_flags &= ~flags; 488 } 489 490 static inline unsigned int crypto_tfm_ctx_alignment(void) 491 { 492 struct crypto_tfm *tfm; 493 return __alignof__(tfm->__crt_ctx); 494 } 495 496 static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm) 497 { 498 return (struct crypto_comp *)tfm; 499 } 500 501 static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name, 502 u32 type, u32 mask) 503 { 504 type &= ~CRYPTO_ALG_TYPE_MASK; 505 type |= CRYPTO_ALG_TYPE_COMPRESS; 506 mask |= CRYPTO_ALG_TYPE_MASK; 507 508 return __crypto_comp_cast(crypto_alloc_base(alg_name, type, mask)); 509 } 510 511 static inline struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm) 512 { 513 return &tfm->base; 514 } 515 516 static inline void crypto_free_comp(struct crypto_comp *tfm) 517 { 518 crypto_free_tfm(crypto_comp_tfm(tfm)); 519 } 520 521 static inline int crypto_has_comp(const char *alg_name, u32 type, u32 mask) 522 { 523 type &= ~CRYPTO_ALG_TYPE_MASK; 524 type |= CRYPTO_ALG_TYPE_COMPRESS; 525 mask |= CRYPTO_ALG_TYPE_MASK; 526 527 return crypto_has_alg(alg_name, type, mask); 528 } 529 530 static inline const char *crypto_comp_name(struct crypto_comp *tfm) 531 { 532 return crypto_tfm_alg_name(crypto_comp_tfm(tfm)); 533 } 534 535 int crypto_comp_compress(struct crypto_comp *tfm, 536 const u8 *src, unsigned int slen, 537 u8 *dst, unsigned int *dlen); 538 539 int crypto_comp_decompress(struct crypto_comp *tfm, 540 const u8 *src, unsigned int slen, 541 u8 *dst, unsigned int *dlen); 542 543 #endif /* _LINUX_CRYPTO_H */ 544 545