mm: rename FOLL_MLOCK to FOLL_POPULATE
[deliverable/linux.git] / include / linux / crypto.h
1 /*
2 * Scatterlist Cryptographic API.
3 *
4 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
5 * Copyright (c) 2002 David S. Miller (davem@redhat.com)
6 * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
7 *
8 * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
9 * and Nettle, by Niels Möller.
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the Free
13 * Software Foundation; either version 2 of the License, or (at your option)
14 * any later version.
15 *
16 */
17 #ifndef _LINUX_CRYPTO_H
18 #define _LINUX_CRYPTO_H
19
20 #include <linux/atomic.h>
21 #include <linux/kernel.h>
22 #include <linux/list.h>
23 #include <linux/bug.h>
24 #include <linux/slab.h>
25 #include <linux/string.h>
26 #include <linux/uaccess.h>
27
28 /*
29 * Autoloaded crypto modules should only use a prefixed name to avoid allowing
30 * arbitrary modules to be loaded. Loading from userspace may still need the
31 * unprefixed names, so retains those aliases as well.
32 * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
33 * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
34 * expands twice on the same line. Instead, use a separate base name for the
35 * alias.
36 */
37 #define MODULE_ALIAS_CRYPTO(name) \
38 __MODULE_INFO(alias, alias_userspace, name); \
39 __MODULE_INFO(alias, alias_crypto, "crypto-" name)
40
41 /*
42 * Algorithm masks and types.
43 */
44 #define CRYPTO_ALG_TYPE_MASK 0x0000000f
45 #define CRYPTO_ALG_TYPE_CIPHER 0x00000001
46 #define CRYPTO_ALG_TYPE_COMPRESS 0x00000002
47 #define CRYPTO_ALG_TYPE_AEAD 0x00000003
48 #define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004
49 #define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005
50 #define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006
51 #define CRYPTO_ALG_TYPE_DIGEST 0x00000008
52 #define CRYPTO_ALG_TYPE_HASH 0x00000008
53 #define CRYPTO_ALG_TYPE_SHASH 0x00000009
54 #define CRYPTO_ALG_TYPE_AHASH 0x0000000a
55 #define CRYPTO_ALG_TYPE_RNG 0x0000000c
56 #define CRYPTO_ALG_TYPE_PCOMPRESS 0x0000000f
57
58 #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
59 #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000c
60 #define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c
61
62 #define CRYPTO_ALG_LARVAL 0x00000010
63 #define CRYPTO_ALG_DEAD 0x00000020
64 #define CRYPTO_ALG_DYING 0x00000040
65 #define CRYPTO_ALG_ASYNC 0x00000080
66
67 /*
68 * Set this bit if and only if the algorithm requires another algorithm of
69 * the same type to handle corner cases.
70 */
71 #define CRYPTO_ALG_NEED_FALLBACK 0x00000100
72
73 /*
74 * This bit is set for symmetric key ciphers that have already been wrapped
75 * with a generic IV generator to prevent them from being wrapped again.
76 */
77 #define CRYPTO_ALG_GENIV 0x00000200
78
79 /*
80 * Set if the algorithm has passed automated run-time testing. Note that
81 * if there is no run-time testing for a given algorithm it is considered
82 * to have passed.
83 */
84
85 #define CRYPTO_ALG_TESTED 0x00000400
86
87 /*
88 * Set if the algorithm is an instance that is build from templates.
89 */
90 #define CRYPTO_ALG_INSTANCE 0x00000800
91
92 /* Set this bit if the algorithm provided is hardware accelerated but
93 * not available to userspace via instruction set or so.
94 */
95 #define CRYPTO_ALG_KERN_DRIVER_ONLY 0x00001000
96
97 /*
98 * Transform masks and values (for crt_flags).
99 */
100 #define CRYPTO_TFM_REQ_MASK 0x000fff00
101 #define CRYPTO_TFM_RES_MASK 0xfff00000
102
103 #define CRYPTO_TFM_REQ_WEAK_KEY 0x00000100
104 #define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200
105 #define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400
106 #define CRYPTO_TFM_RES_WEAK_KEY 0x00100000
107 #define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000
108 #define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000
109 #define CRYPTO_TFM_RES_BAD_BLOCK_LEN 0x00800000
110 #define CRYPTO_TFM_RES_BAD_FLAGS 0x01000000
111
112 /*
113 * Miscellaneous stuff.
114 */
115 #define CRYPTO_MAX_ALG_NAME 64
116
117 /*
118 * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual
119 * declaration) is used to ensure that the crypto_tfm context structure is
120 * aligned correctly for the given architecture so that there are no alignment
121 * faults for C data types. In particular, this is required on platforms such
122 * as arm where pointers are 32-bit aligned but there are data types such as
123 * u64 which require 64-bit alignment.
124 */
125 #define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN
126
127 #define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN)))
128
129 struct scatterlist;
130 struct crypto_ablkcipher;
131 struct crypto_async_request;
132 struct crypto_aead;
133 struct crypto_blkcipher;
134 struct crypto_hash;
135 struct crypto_rng;
136 struct crypto_tfm;
137 struct crypto_type;
138 struct aead_givcrypt_request;
139 struct skcipher_givcrypt_request;
140
141 typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
142
143 /**
144 * DOC: Block Cipher Context Data Structures
145 *
146 * These data structures define the operating context for each block cipher
147 * type.
148 */
149
150 struct crypto_async_request {
151 struct list_head list;
152 crypto_completion_t complete;
153 void *data;
154 struct crypto_tfm *tfm;
155
156 u32 flags;
157 };
158
159 struct ablkcipher_request {
160 struct crypto_async_request base;
161
162 unsigned int nbytes;
163
164 void *info;
165
166 struct scatterlist *src;
167 struct scatterlist *dst;
168
169 void *__ctx[] CRYPTO_MINALIGN_ATTR;
170 };
171
172 /**
173 * struct aead_request - AEAD request
174 * @base: Common attributes for async crypto requests
175 * @assoclen: Length in bytes of associated data for authentication
176 * @cryptlen: Length of data to be encrypted or decrypted
177 * @iv: Initialisation vector
178 * @assoc: Associated data
179 * @src: Source data
180 * @dst: Destination data
181 * @__ctx: Start of private context data
182 */
183 struct aead_request {
184 struct crypto_async_request base;
185
186 unsigned int assoclen;
187 unsigned int cryptlen;
188
189 u8 *iv;
190
191 struct scatterlist *assoc;
192 struct scatterlist *src;
193 struct scatterlist *dst;
194
195 void *__ctx[] CRYPTO_MINALIGN_ATTR;
196 };
197
198 struct blkcipher_desc {
199 struct crypto_blkcipher *tfm;
200 void *info;
201 u32 flags;
202 };
203
204 struct cipher_desc {
205 struct crypto_tfm *tfm;
206 void (*crfn)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
207 unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst,
208 const u8 *src, unsigned int nbytes);
209 void *info;
210 };
211
212 struct hash_desc {
213 struct crypto_hash *tfm;
214 u32 flags;
215 };
216
217 /**
218 * DOC: Block Cipher Algorithm Definitions
219 *
220 * These data structures define modular crypto algorithm implementations,
221 * managed via crypto_register_alg() and crypto_unregister_alg().
222 */
223
224 /**
225 * struct ablkcipher_alg - asynchronous block cipher definition
226 * @min_keysize: Minimum key size supported by the transformation. This is the
227 * smallest key length supported by this transformation algorithm.
228 * This must be set to one of the pre-defined values as this is
229 * not hardware specific. Possible values for this field can be
230 * found via git grep "_MIN_KEY_SIZE" include/crypto/
231 * @max_keysize: Maximum key size supported by the transformation. This is the
232 * largest key length supported by this transformation algorithm.
233 * This must be set to one of the pre-defined values as this is
234 * not hardware specific. Possible values for this field can be
235 * found via git grep "_MAX_KEY_SIZE" include/crypto/
236 * @setkey: Set key for the transformation. This function is used to either
237 * program a supplied key into the hardware or store the key in the
238 * transformation context for programming it later. Note that this
239 * function does modify the transformation context. This function can
240 * be called multiple times during the existence of the transformation
241 * object, so one must make sure the key is properly reprogrammed into
242 * the hardware. This function is also responsible for checking the key
243 * length for validity. In case a software fallback was put in place in
244 * the @cra_init call, this function might need to use the fallback if
245 * the algorithm doesn't support all of the key sizes.
246 * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt
247 * the supplied scatterlist containing the blocks of data. The crypto
248 * API consumer is responsible for aligning the entries of the
249 * scatterlist properly and making sure the chunks are correctly
250 * sized. In case a software fallback was put in place in the
251 * @cra_init call, this function might need to use the fallback if
252 * the algorithm doesn't support all of the key sizes. In case the
253 * key was stored in transformation context, the key might need to be
254 * re-programmed into the hardware in this function. This function
255 * shall not modify the transformation context, as this function may
256 * be called in parallel with the same transformation object.
257 * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt
258 * and the conditions are exactly the same.
259 * @givencrypt: Update the IV for encryption. With this function, a cipher
260 * implementation may provide the function on how to update the IV
261 * for encryption.
262 * @givdecrypt: Update the IV for decryption. This is the reverse of
263 * @givencrypt .
264 * @geniv: The transformation implementation may use an "IV generator" provided
265 * by the kernel crypto API. Several use cases have a predefined
266 * approach how IVs are to be updated. For such use cases, the kernel
267 * crypto API provides ready-to-use implementations that can be
268 * referenced with this variable.
269 * @ivsize: IV size applicable for transformation. The consumer must provide an
270 * IV of exactly that size to perform the encrypt or decrypt operation.
271 *
272 * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
273 * mandatory and must be filled.
274 */
275 struct ablkcipher_alg {
276 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
277 unsigned int keylen);
278 int (*encrypt)(struct ablkcipher_request *req);
279 int (*decrypt)(struct ablkcipher_request *req);
280 int (*givencrypt)(struct skcipher_givcrypt_request *req);
281 int (*givdecrypt)(struct skcipher_givcrypt_request *req);
282
283 const char *geniv;
284
285 unsigned int min_keysize;
286 unsigned int max_keysize;
287 unsigned int ivsize;
288 };
289
290 /**
291 * struct aead_alg - AEAD cipher definition
292 * @maxauthsize: Set the maximum authentication tag size supported by the
293 * transformation. A transformation may support smaller tag sizes.
294 * As the authentication tag is a message digest to ensure the
295 * integrity of the encrypted data, a consumer typically wants the
296 * largest authentication tag possible as defined by this
297 * variable.
298 * @setauthsize: Set authentication size for the AEAD transformation. This
299 * function is used to specify the consumer requested size of the
300 * authentication tag to be either generated by the transformation
301 * during encryption or the size of the authentication tag to be
302 * supplied during the decryption operation. This function is also
303 * responsible for checking the authentication tag size for
304 * validity.
305 * @setkey: see struct ablkcipher_alg
306 * @encrypt: see struct ablkcipher_alg
307 * @decrypt: see struct ablkcipher_alg
308 * @givencrypt: see struct ablkcipher_alg
309 * @givdecrypt: see struct ablkcipher_alg
310 * @geniv: see struct ablkcipher_alg
311 * @ivsize: see struct ablkcipher_alg
312 *
313 * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
314 * mandatory and must be filled.
315 */
316 struct aead_alg {
317 int (*setkey)(struct crypto_aead *tfm, const u8 *key,
318 unsigned int keylen);
319 int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize);
320 int (*encrypt)(struct aead_request *req);
321 int (*decrypt)(struct aead_request *req);
322 int (*givencrypt)(struct aead_givcrypt_request *req);
323 int (*givdecrypt)(struct aead_givcrypt_request *req);
324
325 const char *geniv;
326
327 unsigned int ivsize;
328 unsigned int maxauthsize;
329 };
330
331 /**
332 * struct blkcipher_alg - synchronous block cipher definition
333 * @min_keysize: see struct ablkcipher_alg
334 * @max_keysize: see struct ablkcipher_alg
335 * @setkey: see struct ablkcipher_alg
336 * @encrypt: see struct ablkcipher_alg
337 * @decrypt: see struct ablkcipher_alg
338 * @geniv: see struct ablkcipher_alg
339 * @ivsize: see struct ablkcipher_alg
340 *
341 * All fields except @geniv and @ivsize are mandatory and must be filled.
342 */
343 struct blkcipher_alg {
344 int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
345 unsigned int keylen);
346 int (*encrypt)(struct blkcipher_desc *desc,
347 struct scatterlist *dst, struct scatterlist *src,
348 unsigned int nbytes);
349 int (*decrypt)(struct blkcipher_desc *desc,
350 struct scatterlist *dst, struct scatterlist *src,
351 unsigned int nbytes);
352
353 const char *geniv;
354
355 unsigned int min_keysize;
356 unsigned int max_keysize;
357 unsigned int ivsize;
358 };
359
360 /**
361 * struct cipher_alg - single-block symmetric ciphers definition
362 * @cia_min_keysize: Minimum key size supported by the transformation. This is
363 * the smallest key length supported by this transformation
364 * algorithm. This must be set to one of the pre-defined
365 * values as this is not hardware specific. Possible values
366 * for this field can be found via git grep "_MIN_KEY_SIZE"
367 * include/crypto/
368 * @cia_max_keysize: Maximum key size supported by the transformation. This is
369 * the largest key length supported by this transformation
370 * algorithm. This must be set to one of the pre-defined values
371 * as this is not hardware specific. Possible values for this
372 * field can be found via git grep "_MAX_KEY_SIZE"
373 * include/crypto/
374 * @cia_setkey: Set key for the transformation. This function is used to either
375 * program a supplied key into the hardware or store the key in the
376 * transformation context for programming it later. Note that this
377 * function does modify the transformation context. This function
378 * can be called multiple times during the existence of the
379 * transformation object, so one must make sure the key is properly
380 * reprogrammed into the hardware. This function is also
381 * responsible for checking the key length for validity.
382 * @cia_encrypt: Encrypt a single block. This function is used to encrypt a
383 * single block of data, which must be @cra_blocksize big. This
384 * always operates on a full @cra_blocksize and it is not possible
385 * to encrypt a block of smaller size. The supplied buffers must
386 * therefore also be at least of @cra_blocksize size. Both the
387 * input and output buffers are always aligned to @cra_alignmask.
388 * In case either of the input or output buffer supplied by user
389 * of the crypto API is not aligned to @cra_alignmask, the crypto
390 * API will re-align the buffers. The re-alignment means that a
391 * new buffer will be allocated, the data will be copied into the
392 * new buffer, then the processing will happen on the new buffer,
393 * then the data will be copied back into the original buffer and
394 * finally the new buffer will be freed. In case a software
395 * fallback was put in place in the @cra_init call, this function
396 * might need to use the fallback if the algorithm doesn't support
397 * all of the key sizes. In case the key was stored in
398 * transformation context, the key might need to be re-programmed
399 * into the hardware in this function. This function shall not
400 * modify the transformation context, as this function may be
401 * called in parallel with the same transformation object.
402 * @cia_decrypt: Decrypt a single block. This is a reverse counterpart to
403 * @cia_encrypt, and the conditions are exactly the same.
404 *
405 * All fields are mandatory and must be filled.
406 */
407 struct cipher_alg {
408 unsigned int cia_min_keysize;
409 unsigned int cia_max_keysize;
410 int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key,
411 unsigned int keylen);
412 void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
413 void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
414 };
415
416 struct compress_alg {
417 int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src,
418 unsigned int slen, u8 *dst, unsigned int *dlen);
419 int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src,
420 unsigned int slen, u8 *dst, unsigned int *dlen);
421 };
422
423 /**
424 * struct rng_alg - random number generator definition
425 * @rng_make_random: The function defined by this variable obtains a random
426 * number. The random number generator transform must generate
427 * the random number out of the context provided with this
428 * call.
429 * @rng_reset: Reset of the random number generator by clearing the entire state.
430 * With the invocation of this function call, the random number
431 * generator shall completely reinitialize its state. If the random
432 * number generator requires a seed for setting up a new state,
433 * the seed must be provided by the consumer while invoking this
434 * function. The required size of the seed is defined with
435 * @seedsize .
436 * @seedsize: The seed size required for a random number generator
437 * initialization defined with this variable. Some random number
438 * generators like the SP800-90A DRBG does not require a seed as the
439 * seeding is implemented internally without the need of support by
440 * the consumer. In this case, the seed size is set to zero.
441 */
442 struct rng_alg {
443 int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata,
444 unsigned int dlen);
445 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
446
447 unsigned int seedsize;
448 };
449
450
451 #define cra_ablkcipher cra_u.ablkcipher
452 #define cra_aead cra_u.aead
453 #define cra_blkcipher cra_u.blkcipher
454 #define cra_cipher cra_u.cipher
455 #define cra_compress cra_u.compress
456 #define cra_rng cra_u.rng
457
458 /**
459 * struct crypto_alg - definition of a cryptograpic cipher algorithm
460 * @cra_flags: Flags describing this transformation. See include/linux/crypto.h
461 * CRYPTO_ALG_* flags for the flags which go in here. Those are
462 * used for fine-tuning the description of the transformation
463 * algorithm.
464 * @cra_blocksize: Minimum block size of this transformation. The size in bytes
465 * of the smallest possible unit which can be transformed with
466 * this algorithm. The users must respect this value.
467 * In case of HASH transformation, it is possible for a smaller
468 * block than @cra_blocksize to be passed to the crypto API for
469 * transformation, in case of any other transformation type, an
470 * error will be returned upon any attempt to transform smaller
471 * than @cra_blocksize chunks.
472 * @cra_ctxsize: Size of the operational context of the transformation. This
473 * value informs the kernel crypto API about the memory size
474 * needed to be allocated for the transformation context.
475 * @cra_alignmask: Alignment mask for the input and output data buffer. The data
476 * buffer containing the input data for the algorithm must be
477 * aligned to this alignment mask. The data buffer for the
478 * output data must be aligned to this alignment mask. Note that
479 * the Crypto API will do the re-alignment in software, but
480 * only under special conditions and there is a performance hit.
481 * The re-alignment happens at these occasions for different
482 * @cra_u types: cipher -- For both input data and output data
483 * buffer; ahash -- For output hash destination buf; shash --
484 * For output hash destination buf.
485 * This is needed on hardware which is flawed by design and
486 * cannot pick data from arbitrary addresses.
487 * @cra_priority: Priority of this transformation implementation. In case
488 * multiple transformations with same @cra_name are available to
489 * the Crypto API, the kernel will use the one with highest
490 * @cra_priority.
491 * @cra_name: Generic name (usable by multiple implementations) of the
492 * transformation algorithm. This is the name of the transformation
493 * itself. This field is used by the kernel when looking up the
494 * providers of particular transformation.
495 * @cra_driver_name: Unique name of the transformation provider. This is the
496 * name of the provider of the transformation. This can be any
497 * arbitrary value, but in the usual case, this contains the
498 * name of the chip or provider and the name of the
499 * transformation algorithm.
500 * @cra_type: Type of the cryptographic transformation. This is a pointer to
501 * struct crypto_type, which implements callbacks common for all
502 * trasnformation types. There are multiple options:
503 * &crypto_blkcipher_type, &crypto_ablkcipher_type,
504 * &crypto_ahash_type, &crypto_aead_type, &crypto_rng_type.
505 * This field might be empty. In that case, there are no common
506 * callbacks. This is the case for: cipher, compress, shash.
507 * @cra_u: Callbacks implementing the transformation. This is a union of
508 * multiple structures. Depending on the type of transformation selected
509 * by @cra_type and @cra_flags above, the associated structure must be
510 * filled with callbacks. This field might be empty. This is the case
511 * for ahash, shash.
512 * @cra_init: Initialize the cryptographic transformation object. This function
513 * is used to initialize the cryptographic transformation object.
514 * This function is called only once at the instantiation time, right
515 * after the transformation context was allocated. In case the
516 * cryptographic hardware has some special requirements which need to
517 * be handled by software, this function shall check for the precise
518 * requirement of the transformation and put any software fallbacks
519 * in place.
520 * @cra_exit: Deinitialize the cryptographic transformation object. This is a
521 * counterpart to @cra_init, used to remove various changes set in
522 * @cra_init.
523 * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE
524 * @cra_list: internally used
525 * @cra_users: internally used
526 * @cra_refcnt: internally used
527 * @cra_destroy: internally used
528 *
529 * The struct crypto_alg describes a generic Crypto API algorithm and is common
530 * for all of the transformations. Any variable not documented here shall not
531 * be used by a cipher implementation as it is internal to the Crypto API.
532 */
533 struct crypto_alg {
534 struct list_head cra_list;
535 struct list_head cra_users;
536
537 u32 cra_flags;
538 unsigned int cra_blocksize;
539 unsigned int cra_ctxsize;
540 unsigned int cra_alignmask;
541
542 int cra_priority;
543 atomic_t cra_refcnt;
544
545 char cra_name[CRYPTO_MAX_ALG_NAME];
546 char cra_driver_name[CRYPTO_MAX_ALG_NAME];
547
548 const struct crypto_type *cra_type;
549
550 union {
551 struct ablkcipher_alg ablkcipher;
552 struct aead_alg aead;
553 struct blkcipher_alg blkcipher;
554 struct cipher_alg cipher;
555 struct compress_alg compress;
556 struct rng_alg rng;
557 } cra_u;
558
559 int (*cra_init)(struct crypto_tfm *tfm);
560 void (*cra_exit)(struct crypto_tfm *tfm);
561 void (*cra_destroy)(struct crypto_alg *alg);
562
563 struct module *cra_module;
564 };
565
566 /*
567 * Algorithm registration interface.
568 */
569 int crypto_register_alg(struct crypto_alg *alg);
570 int crypto_unregister_alg(struct crypto_alg *alg);
571 int crypto_register_algs(struct crypto_alg *algs, int count);
572 int crypto_unregister_algs(struct crypto_alg *algs, int count);
573
574 /*
575 * Algorithm query interface.
576 */
577 int crypto_has_alg(const char *name, u32 type, u32 mask);
578
579 /*
580 * Transforms: user-instantiated objects which encapsulate algorithms
581 * and core processing logic. Managed via crypto_alloc_*() and
582 * crypto_free_*(), as well as the various helpers below.
583 */
584
585 struct ablkcipher_tfm {
586 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
587 unsigned int keylen);
588 int (*encrypt)(struct ablkcipher_request *req);
589 int (*decrypt)(struct ablkcipher_request *req);
590 int (*givencrypt)(struct skcipher_givcrypt_request *req);
591 int (*givdecrypt)(struct skcipher_givcrypt_request *req);
592
593 struct crypto_ablkcipher *base;
594
595 unsigned int ivsize;
596 unsigned int reqsize;
597 };
598
599 struct aead_tfm {
600 int (*setkey)(struct crypto_aead *tfm, const u8 *key,
601 unsigned int keylen);
602 int (*encrypt)(struct aead_request *req);
603 int (*decrypt)(struct aead_request *req);
604 int (*givencrypt)(struct aead_givcrypt_request *req);
605 int (*givdecrypt)(struct aead_givcrypt_request *req);
606
607 struct crypto_aead *base;
608
609 unsigned int ivsize;
610 unsigned int authsize;
611 unsigned int reqsize;
612 };
613
614 struct blkcipher_tfm {
615 void *iv;
616 int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
617 unsigned int keylen);
618 int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
619 struct scatterlist *src, unsigned int nbytes);
620 int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
621 struct scatterlist *src, unsigned int nbytes);
622 };
623
624 struct cipher_tfm {
625 int (*cit_setkey)(struct crypto_tfm *tfm,
626 const u8 *key, unsigned int keylen);
627 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
628 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
629 };
630
631 struct hash_tfm {
632 int (*init)(struct hash_desc *desc);
633 int (*update)(struct hash_desc *desc,
634 struct scatterlist *sg, unsigned int nsg);
635 int (*final)(struct hash_desc *desc, u8 *out);
636 int (*digest)(struct hash_desc *desc, struct scatterlist *sg,
637 unsigned int nsg, u8 *out);
638 int (*setkey)(struct crypto_hash *tfm, const u8 *key,
639 unsigned int keylen);
640 unsigned int digestsize;
641 };
642
643 struct compress_tfm {
644 int (*cot_compress)(struct crypto_tfm *tfm,
645 const u8 *src, unsigned int slen,
646 u8 *dst, unsigned int *dlen);
647 int (*cot_decompress)(struct crypto_tfm *tfm,
648 const u8 *src, unsigned int slen,
649 u8 *dst, unsigned int *dlen);
650 };
651
652 struct rng_tfm {
653 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
654 unsigned int dlen);
655 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
656 };
657
658 #define crt_ablkcipher crt_u.ablkcipher
659 #define crt_aead crt_u.aead
660 #define crt_blkcipher crt_u.blkcipher
661 #define crt_cipher crt_u.cipher
662 #define crt_hash crt_u.hash
663 #define crt_compress crt_u.compress
664 #define crt_rng crt_u.rng
665
666 struct crypto_tfm {
667
668 u32 crt_flags;
669
670 union {
671 struct ablkcipher_tfm ablkcipher;
672 struct aead_tfm aead;
673 struct blkcipher_tfm blkcipher;
674 struct cipher_tfm cipher;
675 struct hash_tfm hash;
676 struct compress_tfm compress;
677 struct rng_tfm rng;
678 } crt_u;
679
680 void (*exit)(struct crypto_tfm *tfm);
681
682 struct crypto_alg *__crt_alg;
683
684 void *__crt_ctx[] CRYPTO_MINALIGN_ATTR;
685 };
686
687 struct crypto_ablkcipher {
688 struct crypto_tfm base;
689 };
690
691 struct crypto_aead {
692 struct crypto_tfm base;
693 };
694
695 struct crypto_blkcipher {
696 struct crypto_tfm base;
697 };
698
699 struct crypto_cipher {
700 struct crypto_tfm base;
701 };
702
703 struct crypto_comp {
704 struct crypto_tfm base;
705 };
706
707 struct crypto_hash {
708 struct crypto_tfm base;
709 };
710
711 struct crypto_rng {
712 struct crypto_tfm base;
713 };
714
715 enum {
716 CRYPTOA_UNSPEC,
717 CRYPTOA_ALG,
718 CRYPTOA_TYPE,
719 CRYPTOA_U32,
720 __CRYPTOA_MAX,
721 };
722
723 #define CRYPTOA_MAX (__CRYPTOA_MAX - 1)
724
725 /* Maximum number of (rtattr) parameters for each template. */
726 #define CRYPTO_MAX_ATTRS 32
727
728 struct crypto_attr_alg {
729 char name[CRYPTO_MAX_ALG_NAME];
730 };
731
732 struct crypto_attr_type {
733 u32 type;
734 u32 mask;
735 };
736
737 struct crypto_attr_u32 {
738 u32 num;
739 };
740
741 /*
742 * Transform user interface.
743 */
744
745 struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask);
746 void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm);
747
748 static inline void crypto_free_tfm(struct crypto_tfm *tfm)
749 {
750 return crypto_destroy_tfm(tfm, tfm);
751 }
752
753 int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
754
755 /*
756 * Transform helpers which query the underlying algorithm.
757 */
758 static inline const char *crypto_tfm_alg_name(struct crypto_tfm *tfm)
759 {
760 return tfm->__crt_alg->cra_name;
761 }
762
763 static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm)
764 {
765 return tfm->__crt_alg->cra_driver_name;
766 }
767
768 static inline int crypto_tfm_alg_priority(struct crypto_tfm *tfm)
769 {
770 return tfm->__crt_alg->cra_priority;
771 }
772
773 static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
774 {
775 return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
776 }
777
778 static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm)
779 {
780 return tfm->__crt_alg->cra_blocksize;
781 }
782
783 static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm)
784 {
785 return tfm->__crt_alg->cra_alignmask;
786 }
787
788 static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm)
789 {
790 return tfm->crt_flags;
791 }
792
793 static inline void crypto_tfm_set_flags(struct crypto_tfm *tfm, u32 flags)
794 {
795 tfm->crt_flags |= flags;
796 }
797
798 static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags)
799 {
800 tfm->crt_flags &= ~flags;
801 }
802
803 static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
804 {
805 return tfm->__crt_ctx;
806 }
807
808 static inline unsigned int crypto_tfm_ctx_alignment(void)
809 {
810 struct crypto_tfm *tfm;
811 return __alignof__(tfm->__crt_ctx);
812 }
813
814 /*
815 * API wrappers.
816 */
817 static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast(
818 struct crypto_tfm *tfm)
819 {
820 return (struct crypto_ablkcipher *)tfm;
821 }
822
823 static inline u32 crypto_skcipher_type(u32 type)
824 {
825 type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
826 type |= CRYPTO_ALG_TYPE_BLKCIPHER;
827 return type;
828 }
829
830 static inline u32 crypto_skcipher_mask(u32 mask)
831 {
832 mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
833 mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK;
834 return mask;
835 }
836
837 /**
838 * DOC: Asynchronous Block Cipher API
839 *
840 * Asynchronous block cipher API is used with the ciphers of type
841 * CRYPTO_ALG_TYPE_ABLKCIPHER (listed as type "ablkcipher" in /proc/crypto).
842 *
843 * Asynchronous cipher operations imply that the function invocation for a
844 * cipher request returns immediately before the completion of the operation.
845 * The cipher request is scheduled as a separate kernel thread and therefore
846 * load-balanced on the different CPUs via the process scheduler. To allow
847 * the kernel crypto API to inform the caller about the completion of a cipher
848 * request, the caller must provide a callback function. That function is
849 * invoked with the cipher handle when the request completes.
850 *
851 * To support the asynchronous operation, additional information than just the
852 * cipher handle must be supplied to the kernel crypto API. That additional
853 * information is given by filling in the ablkcipher_request data structure.
854 *
855 * For the asynchronous block cipher API, the state is maintained with the tfm
856 * cipher handle. A single tfm can be used across multiple calls and in
857 * parallel. For asynchronous block cipher calls, context data supplied and
858 * only used by the caller can be referenced the request data structure in
859 * addition to the IV used for the cipher request. The maintenance of such
860 * state information would be important for a crypto driver implementer to
861 * have, because when calling the callback function upon completion of the
862 * cipher operation, that callback function may need some information about
863 * which operation just finished if it invoked multiple in parallel. This
864 * state information is unused by the kernel crypto API.
865 */
866
867 /**
868 * crypto_alloc_ablkcipher() - allocate asynchronous block cipher handle
869 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
870 * ablkcipher cipher
871 * @type: specifies the type of the cipher
872 * @mask: specifies the mask for the cipher
873 *
874 * Allocate a cipher handle for an ablkcipher. The returned struct
875 * crypto_ablkcipher is the cipher handle that is required for any subsequent
876 * API invocation for that ablkcipher.
877 *
878 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
879 * of an error, PTR_ERR() returns the error code.
880 */
881 struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
882 u32 type, u32 mask);
883
884 static inline struct crypto_tfm *crypto_ablkcipher_tfm(
885 struct crypto_ablkcipher *tfm)
886 {
887 return &tfm->base;
888 }
889
890 /**
891 * crypto_free_ablkcipher() - zeroize and free cipher handle
892 * @tfm: cipher handle to be freed
893 */
894 static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm)
895 {
896 crypto_free_tfm(crypto_ablkcipher_tfm(tfm));
897 }
898
899 /**
900 * crypto_has_ablkcipher() - Search for the availability of an ablkcipher.
901 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
902 * ablkcipher
903 * @type: specifies the type of the cipher
904 * @mask: specifies the mask for the cipher
905 *
906 * Return: true when the ablkcipher is known to the kernel crypto API; false
907 * otherwise
908 */
909 static inline int crypto_has_ablkcipher(const char *alg_name, u32 type,
910 u32 mask)
911 {
912 return crypto_has_alg(alg_name, crypto_skcipher_type(type),
913 crypto_skcipher_mask(mask));
914 }
915
916 static inline struct ablkcipher_tfm *crypto_ablkcipher_crt(
917 struct crypto_ablkcipher *tfm)
918 {
919 return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher;
920 }
921
922 /**
923 * crypto_ablkcipher_ivsize() - obtain IV size
924 * @tfm: cipher handle
925 *
926 * The size of the IV for the ablkcipher referenced by the cipher handle is
927 * returned. This IV size may be zero if the cipher does not need an IV.
928 *
929 * Return: IV size in bytes
930 */
931 static inline unsigned int crypto_ablkcipher_ivsize(
932 struct crypto_ablkcipher *tfm)
933 {
934 return crypto_ablkcipher_crt(tfm)->ivsize;
935 }
936
937 /**
938 * crypto_ablkcipher_blocksize() - obtain block size of cipher
939 * @tfm: cipher handle
940 *
941 * The block size for the ablkcipher referenced with the cipher handle is
942 * returned. The caller may use that information to allocate appropriate
943 * memory for the data returned by the encryption or decryption operation
944 *
945 * Return: block size of cipher
946 */
947 static inline unsigned int crypto_ablkcipher_blocksize(
948 struct crypto_ablkcipher *tfm)
949 {
950 return crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(tfm));
951 }
952
953 static inline unsigned int crypto_ablkcipher_alignmask(
954 struct crypto_ablkcipher *tfm)
955 {
956 return crypto_tfm_alg_alignmask(crypto_ablkcipher_tfm(tfm));
957 }
958
959 static inline u32 crypto_ablkcipher_get_flags(struct crypto_ablkcipher *tfm)
960 {
961 return crypto_tfm_get_flags(crypto_ablkcipher_tfm(tfm));
962 }
963
964 static inline void crypto_ablkcipher_set_flags(struct crypto_ablkcipher *tfm,
965 u32 flags)
966 {
967 crypto_tfm_set_flags(crypto_ablkcipher_tfm(tfm), flags);
968 }
969
970 static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm,
971 u32 flags)
972 {
973 crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags);
974 }
975
976 /**
977 * crypto_ablkcipher_setkey() - set key for cipher
978 * @tfm: cipher handle
979 * @key: buffer holding the key
980 * @keylen: length of the key in bytes
981 *
982 * The caller provided key is set for the ablkcipher referenced by the cipher
983 * handle.
984 *
985 * Note, the key length determines the cipher type. Many block ciphers implement
986 * different cipher modes depending on the key size, such as AES-128 vs AES-192
987 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
988 * is performed.
989 *
990 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
991 */
992 static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
993 const u8 *key, unsigned int keylen)
994 {
995 struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(tfm);
996
997 return crt->setkey(crt->base, key, keylen);
998 }
999
1000 /**
1001 * crypto_ablkcipher_reqtfm() - obtain cipher handle from request
1002 * @req: ablkcipher_request out of which the cipher handle is to be obtained
1003 *
1004 * Return the crypto_ablkcipher handle when furnishing an ablkcipher_request
1005 * data structure.
1006 *
1007 * Return: crypto_ablkcipher handle
1008 */
1009 static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm(
1010 struct ablkcipher_request *req)
1011 {
1012 return __crypto_ablkcipher_cast(req->base.tfm);
1013 }
1014
1015 /**
1016 * crypto_ablkcipher_encrypt() - encrypt plaintext
1017 * @req: reference to the ablkcipher_request handle that holds all information
1018 * needed to perform the cipher operation
1019 *
1020 * Encrypt plaintext data using the ablkcipher_request handle. That data
1021 * structure and how it is filled with data is discussed with the
1022 * ablkcipher_request_* functions.
1023 *
1024 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1025 */
1026 static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
1027 {
1028 struct ablkcipher_tfm *crt =
1029 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
1030 return crt->encrypt(req);
1031 }
1032
1033 /**
1034 * crypto_ablkcipher_decrypt() - decrypt ciphertext
1035 * @req: reference to the ablkcipher_request handle that holds all information
1036 * needed to perform the cipher operation
1037 *
1038 * Decrypt ciphertext data using the ablkcipher_request handle. That data
1039 * structure and how it is filled with data is discussed with the
1040 * ablkcipher_request_* functions.
1041 *
1042 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1043 */
1044 static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
1045 {
1046 struct ablkcipher_tfm *crt =
1047 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
1048 return crt->decrypt(req);
1049 }
1050
1051 /**
1052 * DOC: Asynchronous Cipher Request Handle
1053 *
1054 * The ablkcipher_request data structure contains all pointers to data
1055 * required for the asynchronous cipher operation. This includes the cipher
1056 * handle (which can be used by multiple ablkcipher_request instances), pointer
1057 * to plaintext and ciphertext, asynchronous callback function, etc. It acts
1058 * as a handle to the ablkcipher_request_* API calls in a similar way as
1059 * ablkcipher handle to the crypto_ablkcipher_* API calls.
1060 */
1061
1062 /**
1063 * crypto_ablkcipher_reqsize() - obtain size of the request data structure
1064 * @tfm: cipher handle
1065 *
1066 * Return: number of bytes
1067 */
1068 static inline unsigned int crypto_ablkcipher_reqsize(
1069 struct crypto_ablkcipher *tfm)
1070 {
1071 return crypto_ablkcipher_crt(tfm)->reqsize;
1072 }
1073
1074 /**
1075 * ablkcipher_request_set_tfm() - update cipher handle reference in request
1076 * @req: request handle to be modified
1077 * @tfm: cipher handle that shall be added to the request handle
1078 *
1079 * Allow the caller to replace the existing ablkcipher handle in the request
1080 * data structure with a different one.
1081 */
1082 static inline void ablkcipher_request_set_tfm(
1083 struct ablkcipher_request *req, struct crypto_ablkcipher *tfm)
1084 {
1085 req->base.tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_crt(tfm)->base);
1086 }
1087
1088 static inline struct ablkcipher_request *ablkcipher_request_cast(
1089 struct crypto_async_request *req)
1090 {
1091 return container_of(req, struct ablkcipher_request, base);
1092 }
1093
1094 /**
1095 * ablkcipher_request_alloc() - allocate request data structure
1096 * @tfm: cipher handle to be registered with the request
1097 * @gfp: memory allocation flag that is handed to kmalloc by the API call.
1098 *
1099 * Allocate the request data structure that must be used with the ablkcipher
1100 * encrypt and decrypt API calls. During the allocation, the provided ablkcipher
1101 * handle is registered in the request data structure.
1102 *
1103 * Return: allocated request handle in case of success; IS_ERR() is true in case
1104 * of an error, PTR_ERR() returns the error code.
1105 */
1106 static inline struct ablkcipher_request *ablkcipher_request_alloc(
1107 struct crypto_ablkcipher *tfm, gfp_t gfp)
1108 {
1109 struct ablkcipher_request *req;
1110
1111 req = kmalloc(sizeof(struct ablkcipher_request) +
1112 crypto_ablkcipher_reqsize(tfm), gfp);
1113
1114 if (likely(req))
1115 ablkcipher_request_set_tfm(req, tfm);
1116
1117 return req;
1118 }
1119
1120 /**
1121 * ablkcipher_request_free() - zeroize and free request data structure
1122 * @req: request data structure cipher handle to be freed
1123 */
1124 static inline void ablkcipher_request_free(struct ablkcipher_request *req)
1125 {
1126 kzfree(req);
1127 }
1128
1129 /**
1130 * ablkcipher_request_set_callback() - set asynchronous callback function
1131 * @req: request handle
1132 * @flags: specify zero or an ORing of the flags
1133 * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
1134 * increase the wait queue beyond the initial maximum size;
1135 * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
1136 * @compl: callback function pointer to be registered with the request handle
1137 * @data: The data pointer refers to memory that is not used by the kernel
1138 * crypto API, but provided to the callback function for it to use. Here,
1139 * the caller can provide a reference to memory the callback function can
1140 * operate on. As the callback function is invoked asynchronously to the
1141 * related functionality, it may need to access data structures of the
1142 * related functionality which can be referenced using this pointer. The
1143 * callback function can access the memory via the "data" field in the
1144 * crypto_async_request data structure provided to the callback function.
1145 *
1146 * This function allows setting the callback function that is triggered once the
1147 * cipher operation completes.
1148 *
1149 * The callback function is registered with the ablkcipher_request handle and
1150 * must comply with the following template
1151 *
1152 * void callback_function(struct crypto_async_request *req, int error)
1153 */
1154 static inline void ablkcipher_request_set_callback(
1155 struct ablkcipher_request *req,
1156 u32 flags, crypto_completion_t compl, void *data)
1157 {
1158 req->base.complete = compl;
1159 req->base.data = data;
1160 req->base.flags = flags;
1161 }
1162
1163 /**
1164 * ablkcipher_request_set_crypt() - set data buffers
1165 * @req: request handle
1166 * @src: source scatter / gather list
1167 * @dst: destination scatter / gather list
1168 * @nbytes: number of bytes to process from @src
1169 * @iv: IV for the cipher operation which must comply with the IV size defined
1170 * by crypto_ablkcipher_ivsize
1171 *
1172 * This function allows setting of the source data and destination data
1173 * scatter / gather lists.
1174 *
1175 * For encryption, the source is treated as the plaintext and the
1176 * destination is the ciphertext. For a decryption operation, the use is
1177 * reversed - the source is the ciphertext and the destination is the plaintext.
1178 */
1179 static inline void ablkcipher_request_set_crypt(
1180 struct ablkcipher_request *req,
1181 struct scatterlist *src, struct scatterlist *dst,
1182 unsigned int nbytes, void *iv)
1183 {
1184 req->src = src;
1185 req->dst = dst;
1186 req->nbytes = nbytes;
1187 req->info = iv;
1188 }
1189
1190 /**
1191 * DOC: Authenticated Encryption With Associated Data (AEAD) Cipher API
1192 *
1193 * The AEAD cipher API is used with the ciphers of type CRYPTO_ALG_TYPE_AEAD
1194 * (listed as type "aead" in /proc/crypto)
1195 *
1196 * The most prominent examples for this type of encryption is GCM and CCM.
1197 * However, the kernel supports other types of AEAD ciphers which are defined
1198 * with the following cipher string:
1199 *
1200 * authenc(keyed message digest, block cipher)
1201 *
1202 * For example: authenc(hmac(sha256), cbc(aes))
1203 *
1204 * The example code provided for the asynchronous block cipher operation
1205 * applies here as well. Naturally all *ablkcipher* symbols must be exchanged
1206 * the *aead* pendants discussed in the following. In addtion, for the AEAD
1207 * operation, the aead_request_set_assoc function must be used to set the
1208 * pointer to the associated data memory location before performing the
1209 * encryption or decryption operation. In case of an encryption, the associated
1210 * data memory is filled during the encryption operation. For decryption, the
1211 * associated data memory must contain data that is used to verify the integrity
1212 * of the decrypted data. Another deviation from the asynchronous block cipher
1213 * operation is that the caller should explicitly check for -EBADMSG of the
1214 * crypto_aead_decrypt. That error indicates an authentication error, i.e.
1215 * a breach in the integrity of the message. In essence, that -EBADMSG error
1216 * code is the key bonus an AEAD cipher has over "standard" block chaining
1217 * modes.
1218 */
1219
1220 static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm)
1221 {
1222 return (struct crypto_aead *)tfm;
1223 }
1224
1225 /**
1226 * crypto_alloc_aead() - allocate AEAD cipher handle
1227 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1228 * AEAD cipher
1229 * @type: specifies the type of the cipher
1230 * @mask: specifies the mask for the cipher
1231 *
1232 * Allocate a cipher handle for an AEAD. The returned struct
1233 * crypto_aead is the cipher handle that is required for any subsequent
1234 * API invocation for that AEAD.
1235 *
1236 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
1237 * of an error, PTR_ERR() returns the error code.
1238 */
1239 struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask);
1240
1241 static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
1242 {
1243 return &tfm->base;
1244 }
1245
1246 /**
1247 * crypto_free_aead() - zeroize and free aead handle
1248 * @tfm: cipher handle to be freed
1249 */
1250 static inline void crypto_free_aead(struct crypto_aead *tfm)
1251 {
1252 crypto_free_tfm(crypto_aead_tfm(tfm));
1253 }
1254
1255 static inline struct aead_tfm *crypto_aead_crt(struct crypto_aead *tfm)
1256 {
1257 return &crypto_aead_tfm(tfm)->crt_aead;
1258 }
1259
1260 /**
1261 * crypto_aead_ivsize() - obtain IV size
1262 * @tfm: cipher handle
1263 *
1264 * The size of the IV for the aead referenced by the cipher handle is
1265 * returned. This IV size may be zero if the cipher does not need an IV.
1266 *
1267 * Return: IV size in bytes
1268 */
1269 static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm)
1270 {
1271 return crypto_aead_crt(tfm)->ivsize;
1272 }
1273
1274 /**
1275 * crypto_aead_authsize() - obtain maximum authentication data size
1276 * @tfm: cipher handle
1277 *
1278 * The maximum size of the authentication data for the AEAD cipher referenced
1279 * by the AEAD cipher handle is returned. The authentication data size may be
1280 * zero if the cipher implements a hard-coded maximum.
1281 *
1282 * The authentication data may also be known as "tag value".
1283 *
1284 * Return: authentication data size / tag size in bytes
1285 */
1286 static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm)
1287 {
1288 return crypto_aead_crt(tfm)->authsize;
1289 }
1290
1291 /**
1292 * crypto_aead_blocksize() - obtain block size of cipher
1293 * @tfm: cipher handle
1294 *
1295 * The block size for the AEAD referenced with the cipher handle is returned.
1296 * The caller may use that information to allocate appropriate memory for the
1297 * data returned by the encryption or decryption operation
1298 *
1299 * Return: block size of cipher
1300 */
1301 static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm)
1302 {
1303 return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm));
1304 }
1305
1306 static inline unsigned int crypto_aead_alignmask(struct crypto_aead *tfm)
1307 {
1308 return crypto_tfm_alg_alignmask(crypto_aead_tfm(tfm));
1309 }
1310
1311 static inline u32 crypto_aead_get_flags(struct crypto_aead *tfm)
1312 {
1313 return crypto_tfm_get_flags(crypto_aead_tfm(tfm));
1314 }
1315
1316 static inline void crypto_aead_set_flags(struct crypto_aead *tfm, u32 flags)
1317 {
1318 crypto_tfm_set_flags(crypto_aead_tfm(tfm), flags);
1319 }
1320
1321 static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags)
1322 {
1323 crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags);
1324 }
1325
1326 /**
1327 * crypto_aead_setkey() - set key for cipher
1328 * @tfm: cipher handle
1329 * @key: buffer holding the key
1330 * @keylen: length of the key in bytes
1331 *
1332 * The caller provided key is set for the AEAD referenced by the cipher
1333 * handle.
1334 *
1335 * Note, the key length determines the cipher type. Many block ciphers implement
1336 * different cipher modes depending on the key size, such as AES-128 vs AES-192
1337 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
1338 * is performed.
1339 *
1340 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
1341 */
1342 static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1343 unsigned int keylen)
1344 {
1345 struct aead_tfm *crt = crypto_aead_crt(tfm);
1346
1347 return crt->setkey(crt->base, key, keylen);
1348 }
1349
1350 /**
1351 * crypto_aead_setauthsize() - set authentication data size
1352 * @tfm: cipher handle
1353 * @authsize: size of the authentication data / tag in bytes
1354 *
1355 * Set the authentication data size / tag size. AEAD requires an authentication
1356 * tag (or MAC) in addition to the associated data.
1357 *
1358 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
1359 */
1360 int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize);
1361
1362 static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
1363 {
1364 return __crypto_aead_cast(req->base.tfm);
1365 }
1366
1367 /**
1368 * crypto_aead_encrypt() - encrypt plaintext
1369 * @req: reference to the aead_request handle that holds all information
1370 * needed to perform the cipher operation
1371 *
1372 * Encrypt plaintext data using the aead_request handle. That data structure
1373 * and how it is filled with data is discussed with the aead_request_*
1374 * functions.
1375 *
1376 * IMPORTANT NOTE The encryption operation creates the authentication data /
1377 * tag. That data is concatenated with the created ciphertext.
1378 * The ciphertext memory size is therefore the given number of
1379 * block cipher blocks + the size defined by the
1380 * crypto_aead_setauthsize invocation. The caller must ensure
1381 * that sufficient memory is available for the ciphertext and
1382 * the authentication tag.
1383 *
1384 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1385 */
1386 static inline int crypto_aead_encrypt(struct aead_request *req)
1387 {
1388 return crypto_aead_crt(crypto_aead_reqtfm(req))->encrypt(req);
1389 }
1390
1391 /**
1392 * crypto_aead_decrypt() - decrypt ciphertext
1393 * @req: reference to the ablkcipher_request handle that holds all information
1394 * needed to perform the cipher operation
1395 *
1396 * Decrypt ciphertext data using the aead_request handle. That data structure
1397 * and how it is filled with data is discussed with the aead_request_*
1398 * functions.
1399 *
1400 * IMPORTANT NOTE The caller must concatenate the ciphertext followed by the
1401 * authentication data / tag. That authentication data / tag
1402 * must have the size defined by the crypto_aead_setauthsize
1403 * invocation.
1404 *
1405 *
1406 * Return: 0 if the cipher operation was successful; -EBADMSG: The AEAD
1407 * cipher operation performs the authentication of the data during the
1408 * decryption operation. Therefore, the function returns this error if
1409 * the authentication of the ciphertext was unsuccessful (i.e. the
1410 * integrity of the ciphertext or the associated data was violated);
1411 * < 0 if an error occurred.
1412 */
1413 static inline int crypto_aead_decrypt(struct aead_request *req)
1414 {
1415 if (req->cryptlen < crypto_aead_authsize(crypto_aead_reqtfm(req)))
1416 return -EINVAL;
1417
1418 return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req);
1419 }
1420
1421 /**
1422 * DOC: Asynchronous AEAD Request Handle
1423 *
1424 * The aead_request data structure contains all pointers to data required for
1425 * the AEAD cipher operation. This includes the cipher handle (which can be
1426 * used by multiple aead_request instances), pointer to plaintext and
1427 * ciphertext, asynchronous callback function, etc. It acts as a handle to the
1428 * aead_request_* API calls in a similar way as AEAD handle to the
1429 * crypto_aead_* API calls.
1430 */
1431
1432 /**
1433 * crypto_aead_reqsize() - obtain size of the request data structure
1434 * @tfm: cipher handle
1435 *
1436 * Return: number of bytes
1437 */
1438 static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm)
1439 {
1440 return crypto_aead_crt(tfm)->reqsize;
1441 }
1442
1443 /**
1444 * aead_request_set_tfm() - update cipher handle reference in request
1445 * @req: request handle to be modified
1446 * @tfm: cipher handle that shall be added to the request handle
1447 *
1448 * Allow the caller to replace the existing aead handle in the request
1449 * data structure with a different one.
1450 */
1451 static inline void aead_request_set_tfm(struct aead_request *req,
1452 struct crypto_aead *tfm)
1453 {
1454 req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base);
1455 }
1456
1457 /**
1458 * aead_request_alloc() - allocate request data structure
1459 * @tfm: cipher handle to be registered with the request
1460 * @gfp: memory allocation flag that is handed to kmalloc by the API call.
1461 *
1462 * Allocate the request data structure that must be used with the AEAD
1463 * encrypt and decrypt API calls. During the allocation, the provided aead
1464 * handle is registered in the request data structure.
1465 *
1466 * Return: allocated request handle in case of success; IS_ERR() is true in case
1467 * of an error, PTR_ERR() returns the error code.
1468 */
1469 static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
1470 gfp_t gfp)
1471 {
1472 struct aead_request *req;
1473
1474 req = kmalloc(sizeof(*req) + crypto_aead_reqsize(tfm), gfp);
1475
1476 if (likely(req))
1477 aead_request_set_tfm(req, tfm);
1478
1479 return req;
1480 }
1481
1482 /**
1483 * aead_request_free() - zeroize and free request data structure
1484 * @req: request data structure cipher handle to be freed
1485 */
1486 static inline void aead_request_free(struct aead_request *req)
1487 {
1488 kzfree(req);
1489 }
1490
1491 /**
1492 * aead_request_set_callback() - set asynchronous callback function
1493 * @req: request handle
1494 * @flags: specify zero or an ORing of the flags
1495 * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
1496 * increase the wait queue beyond the initial maximum size;
1497 * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
1498 * @compl: callback function pointer to be registered with the request handle
1499 * @data: The data pointer refers to memory that is not used by the kernel
1500 * crypto API, but provided to the callback function for it to use. Here,
1501 * the caller can provide a reference to memory the callback function can
1502 * operate on. As the callback function is invoked asynchronously to the
1503 * related functionality, it may need to access data structures of the
1504 * related functionality which can be referenced using this pointer. The
1505 * callback function can access the memory via the "data" field in the
1506 * crypto_async_request data structure provided to the callback function.
1507 *
1508 * Setting the callback function that is triggered once the cipher operation
1509 * completes
1510 *
1511 * The callback function is registered with the aead_request handle and
1512 * must comply with the following template
1513 *
1514 * void callback_function(struct crypto_async_request *req, int error)
1515 */
1516 static inline void aead_request_set_callback(struct aead_request *req,
1517 u32 flags,
1518 crypto_completion_t compl,
1519 void *data)
1520 {
1521 req->base.complete = compl;
1522 req->base.data = data;
1523 req->base.flags = flags;
1524 }
1525
1526 /**
1527 * aead_request_set_crypt - set data buffers
1528 * @req: request handle
1529 * @src: source scatter / gather list
1530 * @dst: destination scatter / gather list
1531 * @cryptlen: number of bytes to process from @src
1532 * @iv: IV for the cipher operation which must comply with the IV size defined
1533 * by crypto_aead_ivsize()
1534 *
1535 * Setting the source data and destination data scatter / gather lists.
1536 *
1537 * For encryption, the source is treated as the plaintext and the
1538 * destination is the ciphertext. For a decryption operation, the use is
1539 * reversed - the source is the ciphertext and the destination is the plaintext.
1540 *
1541 * IMPORTANT NOTE AEAD requires an authentication tag (MAC). For decryption,
1542 * the caller must concatenate the ciphertext followed by the
1543 * authentication tag and provide the entire data stream to the
1544 * decryption operation (i.e. the data length used for the
1545 * initialization of the scatterlist and the data length for the
1546 * decryption operation is identical). For encryption, however,
1547 * the authentication tag is created while encrypting the data.
1548 * The destination buffer must hold sufficient space for the
1549 * ciphertext and the authentication tag while the encryption
1550 * invocation must only point to the plaintext data size. The
1551 * following code snippet illustrates the memory usage
1552 * buffer = kmalloc(ptbuflen + (enc ? authsize : 0));
1553 * sg_init_one(&sg, buffer, ptbuflen + (enc ? authsize : 0));
1554 * aead_request_set_crypt(req, &sg, &sg, ptbuflen, iv);
1555 */
1556 static inline void aead_request_set_crypt(struct aead_request *req,
1557 struct scatterlist *src,
1558 struct scatterlist *dst,
1559 unsigned int cryptlen, u8 *iv)
1560 {
1561 req->src = src;
1562 req->dst = dst;
1563 req->cryptlen = cryptlen;
1564 req->iv = iv;
1565 }
1566
1567 /**
1568 * aead_request_set_assoc() - set the associated data scatter / gather list
1569 * @req: request handle
1570 * @assoc: associated data scatter / gather list
1571 * @assoclen: number of bytes to process from @assoc
1572 *
1573 * For encryption, the memory is filled with the associated data. For
1574 * decryption, the memory must point to the associated data.
1575 */
1576 static inline void aead_request_set_assoc(struct aead_request *req,
1577 struct scatterlist *assoc,
1578 unsigned int assoclen)
1579 {
1580 req->assoc = assoc;
1581 req->assoclen = assoclen;
1582 }
1583
1584 /**
1585 * DOC: Synchronous Block Cipher API
1586 *
1587 * The synchronous block cipher API is used with the ciphers of type
1588 * CRYPTO_ALG_TYPE_BLKCIPHER (listed as type "blkcipher" in /proc/crypto)
1589 *
1590 * Synchronous calls, have a context in the tfm. But since a single tfm can be
1591 * used in multiple calls and in parallel, this info should not be changeable
1592 * (unless a lock is used). This applies, for example, to the symmetric key.
1593 * However, the IV is changeable, so there is an iv field in blkcipher_tfm
1594 * structure for synchronous blkcipher api. So, its the only state info that can
1595 * be kept for synchronous calls without using a big lock across a tfm.
1596 *
1597 * The block cipher API allows the use of a complete cipher, i.e. a cipher
1598 * consisting of a template (a block chaining mode) and a single block cipher
1599 * primitive (e.g. AES).
1600 *
1601 * The plaintext data buffer and the ciphertext data buffer are pointed to
1602 * by using scatter/gather lists. The cipher operation is performed
1603 * on all segments of the provided scatter/gather lists.
1604 *
1605 * The kernel crypto API supports a cipher operation "in-place" which means that
1606 * the caller may provide the same scatter/gather list for the plaintext and
1607 * cipher text. After the completion of the cipher operation, the plaintext
1608 * data is replaced with the ciphertext data in case of an encryption and vice
1609 * versa for a decryption. The caller must ensure that the scatter/gather lists
1610 * for the output data point to sufficiently large buffers, i.e. multiples of
1611 * the block size of the cipher.
1612 */
1613
1614 static inline struct crypto_blkcipher *__crypto_blkcipher_cast(
1615 struct crypto_tfm *tfm)
1616 {
1617 return (struct crypto_blkcipher *)tfm;
1618 }
1619
1620 static inline struct crypto_blkcipher *crypto_blkcipher_cast(
1621 struct crypto_tfm *tfm)
1622 {
1623 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_BLKCIPHER);
1624 return __crypto_blkcipher_cast(tfm);
1625 }
1626
1627 /**
1628 * crypto_alloc_blkcipher() - allocate synchronous block cipher handle
1629 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1630 * blkcipher cipher
1631 * @type: specifies the type of the cipher
1632 * @mask: specifies the mask for the cipher
1633 *
1634 * Allocate a cipher handle for a block cipher. The returned struct
1635 * crypto_blkcipher is the cipher handle that is required for any subsequent
1636 * API invocation for that block cipher.
1637 *
1638 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
1639 * of an error, PTR_ERR() returns the error code.
1640 */
1641 static inline struct crypto_blkcipher *crypto_alloc_blkcipher(
1642 const char *alg_name, u32 type, u32 mask)
1643 {
1644 type &= ~CRYPTO_ALG_TYPE_MASK;
1645 type |= CRYPTO_ALG_TYPE_BLKCIPHER;
1646 mask |= CRYPTO_ALG_TYPE_MASK;
1647
1648 return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask));
1649 }
1650
1651 static inline struct crypto_tfm *crypto_blkcipher_tfm(
1652 struct crypto_blkcipher *tfm)
1653 {
1654 return &tfm->base;
1655 }
1656
1657 /**
1658 * crypto_free_blkcipher() - zeroize and free the block cipher handle
1659 * @tfm: cipher handle to be freed
1660 */
1661 static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm)
1662 {
1663 crypto_free_tfm(crypto_blkcipher_tfm(tfm));
1664 }
1665
1666 /**
1667 * crypto_has_blkcipher() - Search for the availability of a block cipher
1668 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1669 * block cipher
1670 * @type: specifies the type of the cipher
1671 * @mask: specifies the mask for the cipher
1672 *
1673 * Return: true when the block cipher is known to the kernel crypto API; false
1674 * otherwise
1675 */
1676 static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask)
1677 {
1678 type &= ~CRYPTO_ALG_TYPE_MASK;
1679 type |= CRYPTO_ALG_TYPE_BLKCIPHER;
1680 mask |= CRYPTO_ALG_TYPE_MASK;
1681
1682 return crypto_has_alg(alg_name, type, mask);
1683 }
1684
1685 /**
1686 * crypto_blkcipher_name() - return the name / cra_name from the cipher handle
1687 * @tfm: cipher handle
1688 *
1689 * Return: The character string holding the name of the cipher
1690 */
1691 static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm)
1692 {
1693 return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm));
1694 }
1695
1696 static inline struct blkcipher_tfm *crypto_blkcipher_crt(
1697 struct crypto_blkcipher *tfm)
1698 {
1699 return &crypto_blkcipher_tfm(tfm)->crt_blkcipher;
1700 }
1701
1702 static inline struct blkcipher_alg *crypto_blkcipher_alg(
1703 struct crypto_blkcipher *tfm)
1704 {
1705 return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher;
1706 }
1707
1708 /**
1709 * crypto_blkcipher_ivsize() - obtain IV size
1710 * @tfm: cipher handle
1711 *
1712 * The size of the IV for the block cipher referenced by the cipher handle is
1713 * returned. This IV size may be zero if the cipher does not need an IV.
1714 *
1715 * Return: IV size in bytes
1716 */
1717 static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm)
1718 {
1719 return crypto_blkcipher_alg(tfm)->ivsize;
1720 }
1721
1722 /**
1723 * crypto_blkcipher_blocksize() - obtain block size of cipher
1724 * @tfm: cipher handle
1725 *
1726 * The block size for the block cipher referenced with the cipher handle is
1727 * returned. The caller may use that information to allocate appropriate
1728 * memory for the data returned by the encryption or decryption operation.
1729 *
1730 * Return: block size of cipher
1731 */
1732 static inline unsigned int crypto_blkcipher_blocksize(
1733 struct crypto_blkcipher *tfm)
1734 {
1735 return crypto_tfm_alg_blocksize(crypto_blkcipher_tfm(tfm));
1736 }
1737
1738 static inline unsigned int crypto_blkcipher_alignmask(
1739 struct crypto_blkcipher *tfm)
1740 {
1741 return crypto_tfm_alg_alignmask(crypto_blkcipher_tfm(tfm));
1742 }
1743
1744 static inline u32 crypto_blkcipher_get_flags(struct crypto_blkcipher *tfm)
1745 {
1746 return crypto_tfm_get_flags(crypto_blkcipher_tfm(tfm));
1747 }
1748
1749 static inline void crypto_blkcipher_set_flags(struct crypto_blkcipher *tfm,
1750 u32 flags)
1751 {
1752 crypto_tfm_set_flags(crypto_blkcipher_tfm(tfm), flags);
1753 }
1754
1755 static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm,
1756 u32 flags)
1757 {
1758 crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags);
1759 }
1760
1761 /**
1762 * crypto_blkcipher_setkey() - set key for cipher
1763 * @tfm: cipher handle
1764 * @key: buffer holding the key
1765 * @keylen: length of the key in bytes
1766 *
1767 * The caller provided key is set for the block cipher referenced by the cipher
1768 * handle.
1769 *
1770 * Note, the key length determines the cipher type. Many block ciphers implement
1771 * different cipher modes depending on the key size, such as AES-128 vs AES-192
1772 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
1773 * is performed.
1774 *
1775 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
1776 */
1777 static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm,
1778 const u8 *key, unsigned int keylen)
1779 {
1780 return crypto_blkcipher_crt(tfm)->setkey(crypto_blkcipher_tfm(tfm),
1781 key, keylen);
1782 }
1783
1784 /**
1785 * crypto_blkcipher_encrypt() - encrypt plaintext
1786 * @desc: reference to the block cipher handle with meta data
1787 * @dst: scatter/gather list that is filled by the cipher operation with the
1788 * ciphertext
1789 * @src: scatter/gather list that holds the plaintext
1790 * @nbytes: number of bytes of the plaintext to encrypt.
1791 *
1792 * Encrypt plaintext data using the IV set by the caller with a preceding
1793 * call of crypto_blkcipher_set_iv.
1794 *
1795 * The blkcipher_desc data structure must be filled by the caller and can
1796 * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
1797 * with the block cipher handle; desc.flags is filled with either
1798 * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
1799 *
1800 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1801 */
1802 static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc,
1803 struct scatterlist *dst,
1804 struct scatterlist *src,
1805 unsigned int nbytes)
1806 {
1807 desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
1808 return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
1809 }
1810
1811 /**
1812 * crypto_blkcipher_encrypt_iv() - encrypt plaintext with dedicated IV
1813 * @desc: reference to the block cipher handle with meta data
1814 * @dst: scatter/gather list that is filled by the cipher operation with the
1815 * ciphertext
1816 * @src: scatter/gather list that holds the plaintext
1817 * @nbytes: number of bytes of the plaintext to encrypt.
1818 *
1819 * Encrypt plaintext data with the use of an IV that is solely used for this
1820 * cipher operation. Any previously set IV is not used.
1821 *
1822 * The blkcipher_desc data structure must be filled by the caller and can
1823 * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
1824 * with the block cipher handle; desc.info is filled with the IV to be used for
1825 * the current operation; desc.flags is filled with either
1826 * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
1827 *
1828 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1829 */
1830 static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc,
1831 struct scatterlist *dst,
1832 struct scatterlist *src,
1833 unsigned int nbytes)
1834 {
1835 return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
1836 }
1837
1838 /**
1839 * crypto_blkcipher_decrypt() - decrypt ciphertext
1840 * @desc: reference to the block cipher handle with meta data
1841 * @dst: scatter/gather list that is filled by the cipher operation with the
1842 * plaintext
1843 * @src: scatter/gather list that holds the ciphertext
1844 * @nbytes: number of bytes of the ciphertext to decrypt.
1845 *
1846 * Decrypt ciphertext data using the IV set by the caller with a preceding
1847 * call of crypto_blkcipher_set_iv.
1848 *
1849 * The blkcipher_desc data structure must be filled by the caller as documented
1850 * for the crypto_blkcipher_encrypt call above.
1851 *
1852 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1853 *
1854 */
1855 static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc,
1856 struct scatterlist *dst,
1857 struct scatterlist *src,
1858 unsigned int nbytes)
1859 {
1860 desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
1861 return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
1862 }
1863
1864 /**
1865 * crypto_blkcipher_decrypt_iv() - decrypt ciphertext with dedicated IV
1866 * @desc: reference to the block cipher handle with meta data
1867 * @dst: scatter/gather list that is filled by the cipher operation with the
1868 * plaintext
1869 * @src: scatter/gather list that holds the ciphertext
1870 * @nbytes: number of bytes of the ciphertext to decrypt.
1871 *
1872 * Decrypt ciphertext data with the use of an IV that is solely used for this
1873 * cipher operation. Any previously set IV is not used.
1874 *
1875 * The blkcipher_desc data structure must be filled by the caller as documented
1876 * for the crypto_blkcipher_encrypt_iv call above.
1877 *
1878 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1879 */
1880 static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc,
1881 struct scatterlist *dst,
1882 struct scatterlist *src,
1883 unsigned int nbytes)
1884 {
1885 return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
1886 }
1887
1888 /**
1889 * crypto_blkcipher_set_iv() - set IV for cipher
1890 * @tfm: cipher handle
1891 * @src: buffer holding the IV
1892 * @len: length of the IV in bytes
1893 *
1894 * The caller provided IV is set for the block cipher referenced by the cipher
1895 * handle.
1896 */
1897 static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm,
1898 const u8 *src, unsigned int len)
1899 {
1900 memcpy(crypto_blkcipher_crt(tfm)->iv, src, len);
1901 }
1902
1903 /**
1904 * crypto_blkcipher_get_iv() - obtain IV from cipher
1905 * @tfm: cipher handle
1906 * @dst: buffer filled with the IV
1907 * @len: length of the buffer dst
1908 *
1909 * The caller can obtain the IV set for the block cipher referenced by the
1910 * cipher handle and store it into the user-provided buffer. If the buffer
1911 * has an insufficient space, the IV is truncated to fit the buffer.
1912 */
1913 static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm,
1914 u8 *dst, unsigned int len)
1915 {
1916 memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len);
1917 }
1918
1919 /**
1920 * DOC: Single Block Cipher API
1921 *
1922 * The single block cipher API is used with the ciphers of type
1923 * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto).
1924 *
1925 * Using the single block cipher API calls, operations with the basic cipher
1926 * primitive can be implemented. These cipher primitives exclude any block
1927 * chaining operations including IV handling.
1928 *
1929 * The purpose of this single block cipher API is to support the implementation
1930 * of templates or other concepts that only need to perform the cipher operation
1931 * on one block at a time. Templates invoke the underlying cipher primitive
1932 * block-wise and process either the input or the output data of these cipher
1933 * operations.
1934 */
1935
1936 static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm)
1937 {
1938 return (struct crypto_cipher *)tfm;
1939 }
1940
1941 static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm)
1942 {
1943 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
1944 return __crypto_cipher_cast(tfm);
1945 }
1946
1947 /**
1948 * crypto_alloc_cipher() - allocate single block cipher handle
1949 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1950 * single block cipher
1951 * @type: specifies the type of the cipher
1952 * @mask: specifies the mask for the cipher
1953 *
1954 * Allocate a cipher handle for a single block cipher. The returned struct
1955 * crypto_cipher is the cipher handle that is required for any subsequent API
1956 * invocation for that single block cipher.
1957 *
1958 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
1959 * of an error, PTR_ERR() returns the error code.
1960 */
1961 static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name,
1962 u32 type, u32 mask)
1963 {
1964 type &= ~CRYPTO_ALG_TYPE_MASK;
1965 type |= CRYPTO_ALG_TYPE_CIPHER;
1966 mask |= CRYPTO_ALG_TYPE_MASK;
1967
1968 return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask));
1969 }
1970
1971 static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm)
1972 {
1973 return &tfm->base;
1974 }
1975
1976 /**
1977 * crypto_free_cipher() - zeroize and free the single block cipher handle
1978 * @tfm: cipher handle to be freed
1979 */
1980 static inline void crypto_free_cipher(struct crypto_cipher *tfm)
1981 {
1982 crypto_free_tfm(crypto_cipher_tfm(tfm));
1983 }
1984
1985 /**
1986 * crypto_has_cipher() - Search for the availability of a single block cipher
1987 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1988 * single block cipher
1989 * @type: specifies the type of the cipher
1990 * @mask: specifies the mask for the cipher
1991 *
1992 * Return: true when the single block cipher is known to the kernel crypto API;
1993 * false otherwise
1994 */
1995 static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask)
1996 {
1997 type &= ~CRYPTO_ALG_TYPE_MASK;
1998 type |= CRYPTO_ALG_TYPE_CIPHER;
1999 mask |= CRYPTO_ALG_TYPE_MASK;
2000
2001 return crypto_has_alg(alg_name, type, mask);
2002 }
2003
2004 static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm)
2005 {
2006 return &crypto_cipher_tfm(tfm)->crt_cipher;
2007 }
2008
2009 /**
2010 * crypto_cipher_blocksize() - obtain block size for cipher
2011 * @tfm: cipher handle
2012 *
2013 * The block size for the single block cipher referenced with the cipher handle
2014 * tfm is returned. The caller may use that information to allocate appropriate
2015 * memory for the data returned by the encryption or decryption operation
2016 *
2017 * Return: block size of cipher
2018 */
2019 static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm)
2020 {
2021 return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm));
2022 }
2023
2024 static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm)
2025 {
2026 return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm));
2027 }
2028
2029 static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm)
2030 {
2031 return crypto_tfm_get_flags(crypto_cipher_tfm(tfm));
2032 }
2033
2034 static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm,
2035 u32 flags)
2036 {
2037 crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags);
2038 }
2039
2040 static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm,
2041 u32 flags)
2042 {
2043 crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags);
2044 }
2045
2046 /**
2047 * crypto_cipher_setkey() - set key for cipher
2048 * @tfm: cipher handle
2049 * @key: buffer holding the key
2050 * @keylen: length of the key in bytes
2051 *
2052 * The caller provided key is set for the single block cipher referenced by the
2053 * cipher handle.
2054 *
2055 * Note, the key length determines the cipher type. Many block ciphers implement
2056 * different cipher modes depending on the key size, such as AES-128 vs AES-192
2057 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
2058 * is performed.
2059 *
2060 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
2061 */
2062 static inline int crypto_cipher_setkey(struct crypto_cipher *tfm,
2063 const u8 *key, unsigned int keylen)
2064 {
2065 return crypto_cipher_crt(tfm)->cit_setkey(crypto_cipher_tfm(tfm),
2066 key, keylen);
2067 }
2068
2069 /**
2070 * crypto_cipher_encrypt_one() - encrypt one block of plaintext
2071 * @tfm: cipher handle
2072 * @dst: points to the buffer that will be filled with the ciphertext
2073 * @src: buffer holding the plaintext to be encrypted
2074 *
2075 * Invoke the encryption operation of one block. The caller must ensure that
2076 * the plaintext and ciphertext buffers are at least one block in size.
2077 */
2078 static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
2079 u8 *dst, const u8 *src)
2080 {
2081 crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm),
2082 dst, src);
2083 }
2084
2085 /**
2086 * crypto_cipher_decrypt_one() - decrypt one block of ciphertext
2087 * @tfm: cipher handle
2088 * @dst: points to the buffer that will be filled with the plaintext
2089 * @src: buffer holding the ciphertext to be decrypted
2090 *
2091 * Invoke the decryption operation of one block. The caller must ensure that
2092 * the plaintext and ciphertext buffers are at least one block in size.
2093 */
2094 static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
2095 u8 *dst, const u8 *src)
2096 {
2097 crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm),
2098 dst, src);
2099 }
2100
2101 /**
2102 * DOC: Synchronous Message Digest API
2103 *
2104 * The synchronous message digest API is used with the ciphers of type
2105 * CRYPTO_ALG_TYPE_HASH (listed as type "hash" in /proc/crypto)
2106 */
2107
2108 static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm)
2109 {
2110 return (struct crypto_hash *)tfm;
2111 }
2112
2113 static inline struct crypto_hash *crypto_hash_cast(struct crypto_tfm *tfm)
2114 {
2115 BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_HASH) &
2116 CRYPTO_ALG_TYPE_HASH_MASK);
2117 return __crypto_hash_cast(tfm);
2118 }
2119
2120 /**
2121 * crypto_alloc_hash() - allocate synchronous message digest handle
2122 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
2123 * message digest cipher
2124 * @type: specifies the type of the cipher
2125 * @mask: specifies the mask for the cipher
2126 *
2127 * Allocate a cipher handle for a message digest. The returned struct
2128 * crypto_hash is the cipher handle that is required for any subsequent
2129 * API invocation for that message digest.
2130 *
2131 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
2132 * of an error, PTR_ERR() returns the error code.
2133 */
2134 static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name,
2135 u32 type, u32 mask)
2136 {
2137 type &= ~CRYPTO_ALG_TYPE_MASK;
2138 mask &= ~CRYPTO_ALG_TYPE_MASK;
2139 type |= CRYPTO_ALG_TYPE_HASH;
2140 mask |= CRYPTO_ALG_TYPE_HASH_MASK;
2141
2142 return __crypto_hash_cast(crypto_alloc_base(alg_name, type, mask));
2143 }
2144
2145 static inline struct crypto_tfm *crypto_hash_tfm(struct crypto_hash *tfm)
2146 {
2147 return &tfm->base;
2148 }
2149
2150 /**
2151 * crypto_free_hash() - zeroize and free message digest handle
2152 * @tfm: cipher handle to be freed
2153 */
2154 static inline void crypto_free_hash(struct crypto_hash *tfm)
2155 {
2156 crypto_free_tfm(crypto_hash_tfm(tfm));
2157 }
2158
2159 /**
2160 * crypto_has_hash() - Search for the availability of a message digest
2161 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
2162 * message digest cipher
2163 * @type: specifies the type of the cipher
2164 * @mask: specifies the mask for the cipher
2165 *
2166 * Return: true when the message digest cipher is known to the kernel crypto
2167 * API; false otherwise
2168 */
2169 static inline int crypto_has_hash(const char *alg_name, u32 type, u32 mask)
2170 {
2171 type &= ~CRYPTO_ALG_TYPE_MASK;
2172 mask &= ~CRYPTO_ALG_TYPE_MASK;
2173 type |= CRYPTO_ALG_TYPE_HASH;
2174 mask |= CRYPTO_ALG_TYPE_HASH_MASK;
2175
2176 return crypto_has_alg(alg_name, type, mask);
2177 }
2178
2179 static inline struct hash_tfm *crypto_hash_crt(struct crypto_hash *tfm)
2180 {
2181 return &crypto_hash_tfm(tfm)->crt_hash;
2182 }
2183
2184 /**
2185 * crypto_hash_blocksize() - obtain block size for message digest
2186 * @tfm: cipher handle
2187 *
2188 * The block size for the message digest cipher referenced with the cipher
2189 * handle is returned.
2190 *
2191 * Return: block size of cipher
2192 */
2193 static inline unsigned int crypto_hash_blocksize(struct crypto_hash *tfm)
2194 {
2195 return crypto_tfm_alg_blocksize(crypto_hash_tfm(tfm));
2196 }
2197
2198 static inline unsigned int crypto_hash_alignmask(struct crypto_hash *tfm)
2199 {
2200 return crypto_tfm_alg_alignmask(crypto_hash_tfm(tfm));
2201 }
2202
2203 /**
2204 * crypto_hash_digestsize() - obtain message digest size
2205 * @tfm: cipher handle
2206 *
2207 * The size for the message digest created by the message digest cipher
2208 * referenced with the cipher handle is returned.
2209 *
2210 * Return: message digest size
2211 */
2212 static inline unsigned int crypto_hash_digestsize(struct crypto_hash *tfm)
2213 {
2214 return crypto_hash_crt(tfm)->digestsize;
2215 }
2216
2217 static inline u32 crypto_hash_get_flags(struct crypto_hash *tfm)
2218 {
2219 return crypto_tfm_get_flags(crypto_hash_tfm(tfm));
2220 }
2221
2222 static inline void crypto_hash_set_flags(struct crypto_hash *tfm, u32 flags)
2223 {
2224 crypto_tfm_set_flags(crypto_hash_tfm(tfm), flags);
2225 }
2226
2227 static inline void crypto_hash_clear_flags(struct crypto_hash *tfm, u32 flags)
2228 {
2229 crypto_tfm_clear_flags(crypto_hash_tfm(tfm), flags);
2230 }
2231
2232 /**
2233 * crypto_hash_init() - (re)initialize message digest handle
2234 * @desc: cipher request handle that to be filled by caller --
2235 * desc.tfm is filled with the hash cipher handle;
2236 * desc.flags is filled with either CRYPTO_TFM_REQ_MAY_SLEEP or 0.
2237 *
2238 * The call (re-)initializes the message digest referenced by the hash cipher
2239 * request handle. Any potentially existing state created by previous
2240 * operations is discarded.
2241 *
2242 * Return: 0 if the message digest initialization was successful; < 0 if an
2243 * error occurred
2244 */
2245 static inline int crypto_hash_init(struct hash_desc *desc)
2246 {
2247 return crypto_hash_crt(desc->tfm)->init(desc);
2248 }
2249
2250 /**
2251 * crypto_hash_update() - add data to message digest for processing
2252 * @desc: cipher request handle
2253 * @sg: scatter / gather list pointing to the data to be added to the message
2254 * digest
2255 * @nbytes: number of bytes to be processed from @sg
2256 *
2257 * Updates the message digest state of the cipher handle pointed to by the
2258 * hash cipher request handle with the input data pointed to by the
2259 * scatter/gather list.
2260 *
2261 * Return: 0 if the message digest update was successful; < 0 if an error
2262 * occurred
2263 */
2264 static inline int crypto_hash_update(struct hash_desc *desc,
2265 struct scatterlist *sg,
2266 unsigned int nbytes)
2267 {
2268 return crypto_hash_crt(desc->tfm)->update(desc, sg, nbytes);
2269 }
2270
2271 /**
2272 * crypto_hash_final() - calculate message digest
2273 * @desc: cipher request handle
2274 * @out: message digest output buffer -- The caller must ensure that the out
2275 * buffer has a sufficient size (e.g. by using the crypto_hash_digestsize
2276 * function).
2277 *
2278 * Finalize the message digest operation and create the message digest
2279 * based on all data added to the cipher handle. The message digest is placed
2280 * into the output buffer.
2281 *
2282 * Return: 0 if the message digest creation was successful; < 0 if an error
2283 * occurred
2284 */
2285 static inline int crypto_hash_final(struct hash_desc *desc, u8 *out)
2286 {
2287 return crypto_hash_crt(desc->tfm)->final(desc, out);
2288 }
2289
2290 /**
2291 * crypto_hash_digest() - calculate message digest for a buffer
2292 * @desc: see crypto_hash_final()
2293 * @sg: see crypto_hash_update()
2294 * @nbytes: see crypto_hash_update()
2295 * @out: see crypto_hash_final()
2296 *
2297 * This function is a "short-hand" for the function calls of crypto_hash_init,
2298 * crypto_hash_update and crypto_hash_final. The parameters have the same
2299 * meaning as discussed for those separate three functions.
2300 *
2301 * Return: 0 if the message digest creation was successful; < 0 if an error
2302 * occurred
2303 */
2304 static inline int crypto_hash_digest(struct hash_desc *desc,
2305 struct scatterlist *sg,
2306 unsigned int nbytes, u8 *out)
2307 {
2308 return crypto_hash_crt(desc->tfm)->digest(desc, sg, nbytes, out);
2309 }
2310
2311 /**
2312 * crypto_hash_setkey() - set key for message digest
2313 * @hash: cipher handle
2314 * @key: buffer holding the key
2315 * @keylen: length of the key in bytes
2316 *
2317 * The caller provided key is set for the message digest cipher. The cipher
2318 * handle must point to a keyed hash in order for this function to succeed.
2319 *
2320 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
2321 */
2322 static inline int crypto_hash_setkey(struct crypto_hash *hash,
2323 const u8 *key, unsigned int keylen)
2324 {
2325 return crypto_hash_crt(hash)->setkey(hash, key, keylen);
2326 }
2327
2328 static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm)
2329 {
2330 return (struct crypto_comp *)tfm;
2331 }
2332
2333 static inline struct crypto_comp *crypto_comp_cast(struct crypto_tfm *tfm)
2334 {
2335 BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_COMPRESS) &
2336 CRYPTO_ALG_TYPE_MASK);
2337 return __crypto_comp_cast(tfm);
2338 }
2339
2340 static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name,
2341 u32 type, u32 mask)
2342 {
2343 type &= ~CRYPTO_ALG_TYPE_MASK;
2344 type |= CRYPTO_ALG_TYPE_COMPRESS;
2345 mask |= CRYPTO_ALG_TYPE_MASK;
2346
2347 return __crypto_comp_cast(crypto_alloc_base(alg_name, type, mask));
2348 }
2349
2350 static inline struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm)
2351 {
2352 return &tfm->base;
2353 }
2354
2355 static inline void crypto_free_comp(struct crypto_comp *tfm)
2356 {
2357 crypto_free_tfm(crypto_comp_tfm(tfm));
2358 }
2359
2360 static inline int crypto_has_comp(const char *alg_name, u32 type, u32 mask)
2361 {
2362 type &= ~CRYPTO_ALG_TYPE_MASK;
2363 type |= CRYPTO_ALG_TYPE_COMPRESS;
2364 mask |= CRYPTO_ALG_TYPE_MASK;
2365
2366 return crypto_has_alg(alg_name, type, mask);
2367 }
2368
2369 static inline const char *crypto_comp_name(struct crypto_comp *tfm)
2370 {
2371 return crypto_tfm_alg_name(crypto_comp_tfm(tfm));
2372 }
2373
2374 static inline struct compress_tfm *crypto_comp_crt(struct crypto_comp *tfm)
2375 {
2376 return &crypto_comp_tfm(tfm)->crt_compress;
2377 }
2378
2379 static inline int crypto_comp_compress(struct crypto_comp *tfm,
2380 const u8 *src, unsigned int slen,
2381 u8 *dst, unsigned int *dlen)
2382 {
2383 return crypto_comp_crt(tfm)->cot_compress(crypto_comp_tfm(tfm),
2384 src, slen, dst, dlen);
2385 }
2386
2387 static inline int crypto_comp_decompress(struct crypto_comp *tfm,
2388 const u8 *src, unsigned int slen,
2389 u8 *dst, unsigned int *dlen)
2390 {
2391 return crypto_comp_crt(tfm)->cot_decompress(crypto_comp_tfm(tfm),
2392 src, slen, dst, dlen);
2393 }
2394
2395 #endif /* _LINUX_CRYPTO_H */
2396
This page took 0.119607 seconds and 5 git commands to generate.