Commit | Line | Data |
---|---|---|
bf754ae8 JG |
1 | /* |
2 | * Cryptographic API. | |
3 | * | |
4 | * s390 implementation of the AES Cipher Algorithm. | |
5 | * | |
6 | * s390 Version: | |
a53c8fab | 7 | * Copyright IBM Corp. 2005, 2007 |
bf754ae8 | 8 | * Author(s): Jan Glauber (jang@de.ibm.com) |
b0c3e75d | 9 | * Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback |
bf754ae8 | 10 | * |
f8246af0 | 11 | * Derived from "crypto/aes_generic.c" |
bf754ae8 JG |
12 | * |
13 | * This program is free software; you can redistribute it and/or modify it | |
14 | * under the terms of the GNU General Public License as published by the Free | |
15 | * Software Foundation; either version 2 of the License, or (at your option) | |
16 | * any later version. | |
17 | * | |
18 | */ | |
19 | ||
39f09392 JG |
20 | #define KMSG_COMPONENT "aes_s390" |
21 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | |
22 | ||
89e12654 | 23 | #include <crypto/aes.h> |
a9e62fad | 24 | #include <crypto/algapi.h> |
b0c3e75d | 25 | #include <linux/err.h> |
bf754ae8 | 26 | #include <linux/module.h> |
d05377c1 | 27 | #include <linux/cpufeature.h> |
bf754ae8 | 28 | #include <linux/init.h> |
0519e9ad | 29 | #include <linux/spinlock.h> |
49abc0d2 | 30 | #include <crypto/xts.h> |
c7d4d259 | 31 | #include <asm/cpacf.h> |
bf754ae8 | 32 | |
86aa9fc2 JG |
33 | #define AES_KEYLEN_128 1 |
34 | #define AES_KEYLEN_192 2 | |
35 | #define AES_KEYLEN_256 4 | |
36 | ||
0200f3ec | 37 | static u8 *ctrblk; |
0519e9ad | 38 | static DEFINE_SPINLOCK(ctrblk_lock); |
0200f3ec | 39 | static char keylen_flag; |
bf754ae8 JG |
40 | |
41 | struct s390_aes_ctx { | |
bf754ae8 | 42 | u8 key[AES_MAX_KEY_SIZE]; |
a9e62fad HX |
43 | long enc; |
44 | long dec; | |
bf754ae8 | 45 | int key_len; |
b0c3e75d SS |
46 | union { |
47 | struct crypto_blkcipher *blk; | |
48 | struct crypto_cipher *cip; | |
49 | } fallback; | |
bf754ae8 JG |
50 | }; |
51 | ||
99d97222 GS |
52 | struct pcc_param { |
53 | u8 key[32]; | |
54 | u8 tweak[16]; | |
55 | u8 block[16]; | |
56 | u8 bit[16]; | |
57 | u8 xts[16]; | |
58 | }; | |
59 | ||
60 | struct s390_xts_ctx { | |
61 | u8 key[32]; | |
9dda2769 | 62 | u8 pcc_key[32]; |
99d97222 GS |
63 | long enc; |
64 | long dec; | |
65 | int key_len; | |
66 | struct crypto_blkcipher *fallback; | |
67 | }; | |
68 | ||
b0c3e75d SS |
69 | /* |
70 | * Check if the key_len is supported by the HW. | |
71 | * Returns 0 if it is, a positive number if it is not and software fallback is | |
72 | * required or a negative number in case the key size is not valid | |
73 | */ | |
74 | static int need_fallback(unsigned int key_len) | |
bf754ae8 | 75 | { |
bf754ae8 JG |
76 | switch (key_len) { |
77 | case 16: | |
86aa9fc2 | 78 | if (!(keylen_flag & AES_KEYLEN_128)) |
b0c3e75d | 79 | return 1; |
bf754ae8 JG |
80 | break; |
81 | case 24: | |
86aa9fc2 | 82 | if (!(keylen_flag & AES_KEYLEN_192)) |
b0c3e75d | 83 | return 1; |
bf754ae8 JG |
84 | break; |
85 | case 32: | |
86aa9fc2 | 86 | if (!(keylen_flag & AES_KEYLEN_256)) |
b0c3e75d | 87 | return 1; |
bf754ae8 JG |
88 | break; |
89 | default: | |
b0c3e75d | 90 | return -1; |
bf754ae8 JG |
91 | break; |
92 | } | |
b0c3e75d SS |
93 | return 0; |
94 | } | |
95 | ||
96 | static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key, | |
97 | unsigned int key_len) | |
98 | { | |
99 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | |
100 | int ret; | |
101 | ||
d7ac7690 RK |
102 | sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; |
103 | sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags & | |
b0c3e75d SS |
104 | CRYPTO_TFM_REQ_MASK); |
105 | ||
106 | ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len); | |
107 | if (ret) { | |
108 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; | |
d7ac7690 | 109 | tfm->crt_flags |= (sctx->fallback.cip->base.crt_flags & |
b0c3e75d SS |
110 | CRYPTO_TFM_RES_MASK); |
111 | } | |
112 | return ret; | |
113 | } | |
114 | ||
115 | static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, | |
116 | unsigned int key_len) | |
117 | { | |
118 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | |
119 | u32 *flags = &tfm->crt_flags; | |
120 | int ret; | |
121 | ||
122 | ret = need_fallback(key_len); | |
123 | if (ret < 0) { | |
124 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | |
125 | return -EINVAL; | |
126 | } | |
bf754ae8 JG |
127 | |
128 | sctx->key_len = key_len; | |
b0c3e75d SS |
129 | if (!ret) { |
130 | memcpy(sctx->key, in_key, key_len); | |
131 | return 0; | |
132 | } | |
133 | ||
134 | return setkey_fallback_cip(tfm, in_key, key_len); | |
bf754ae8 JG |
135 | } |
136 | ||
6c2bb98b | 137 | static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) |
bf754ae8 | 138 | { |
e6a67ad0 | 139 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); |
bf754ae8 | 140 | |
b0c3e75d SS |
141 | if (unlikely(need_fallback(sctx->key_len))) { |
142 | crypto_cipher_encrypt_one(sctx->fallback.cip, out, in); | |
143 | return; | |
144 | } | |
145 | ||
bf754ae8 JG |
146 | switch (sctx->key_len) { |
147 | case 16: | |
c7d4d259 MS |
148 | cpacf_km(CPACF_KM_AES_128_ENC, &sctx->key, out, in, |
149 | AES_BLOCK_SIZE); | |
bf754ae8 JG |
150 | break; |
151 | case 24: | |
c7d4d259 MS |
152 | cpacf_km(CPACF_KM_AES_192_ENC, &sctx->key, out, in, |
153 | AES_BLOCK_SIZE); | |
bf754ae8 JG |
154 | break; |
155 | case 32: | |
c7d4d259 MS |
156 | cpacf_km(CPACF_KM_AES_256_ENC, &sctx->key, out, in, |
157 | AES_BLOCK_SIZE); | |
bf754ae8 JG |
158 | break; |
159 | } | |
160 | } | |
161 | ||
6c2bb98b | 162 | static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) |
bf754ae8 | 163 | { |
e6a67ad0 | 164 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); |
bf754ae8 | 165 | |
b0c3e75d SS |
166 | if (unlikely(need_fallback(sctx->key_len))) { |
167 | crypto_cipher_decrypt_one(sctx->fallback.cip, out, in); | |
168 | return; | |
169 | } | |
170 | ||
bf754ae8 JG |
171 | switch (sctx->key_len) { |
172 | case 16: | |
c7d4d259 MS |
173 | cpacf_km(CPACF_KM_AES_128_DEC, &sctx->key, out, in, |
174 | AES_BLOCK_SIZE); | |
bf754ae8 JG |
175 | break; |
176 | case 24: | |
c7d4d259 MS |
177 | cpacf_km(CPACF_KM_AES_192_DEC, &sctx->key, out, in, |
178 | AES_BLOCK_SIZE); | |
bf754ae8 JG |
179 | break; |
180 | case 32: | |
c7d4d259 MS |
181 | cpacf_km(CPACF_KM_AES_256_DEC, &sctx->key, out, in, |
182 | AES_BLOCK_SIZE); | |
bf754ae8 JG |
183 | break; |
184 | } | |
185 | } | |
186 | ||
b0c3e75d SS |
187 | static int fallback_init_cip(struct crypto_tfm *tfm) |
188 | { | |
189 | const char *name = tfm->__crt_alg->cra_name; | |
190 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | |
191 | ||
192 | sctx->fallback.cip = crypto_alloc_cipher(name, 0, | |
193 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); | |
194 | ||
195 | if (IS_ERR(sctx->fallback.cip)) { | |
39f09392 JG |
196 | pr_err("Allocating AES fallback algorithm %s failed\n", |
197 | name); | |
b59cdcb3 | 198 | return PTR_ERR(sctx->fallback.cip); |
b0c3e75d SS |
199 | } |
200 | ||
201 | return 0; | |
202 | } | |
203 | ||
204 | static void fallback_exit_cip(struct crypto_tfm *tfm) | |
205 | { | |
206 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | |
207 | ||
208 | crypto_free_cipher(sctx->fallback.cip); | |
209 | sctx->fallback.cip = NULL; | |
210 | } | |
bf754ae8 JG |
211 | |
212 | static struct crypto_alg aes_alg = { | |
213 | .cra_name = "aes", | |
65b75c36 | 214 | .cra_driver_name = "aes-s390", |
c7d4d259 | 215 | .cra_priority = 300, |
f67d1369 JG |
216 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER | |
217 | CRYPTO_ALG_NEED_FALLBACK, | |
bf754ae8 JG |
218 | .cra_blocksize = AES_BLOCK_SIZE, |
219 | .cra_ctxsize = sizeof(struct s390_aes_ctx), | |
220 | .cra_module = THIS_MODULE, | |
b0c3e75d SS |
221 | .cra_init = fallback_init_cip, |
222 | .cra_exit = fallback_exit_cip, | |
bf754ae8 JG |
223 | .cra_u = { |
224 | .cipher = { | |
225 | .cia_min_keysize = AES_MIN_KEY_SIZE, | |
226 | .cia_max_keysize = AES_MAX_KEY_SIZE, | |
227 | .cia_setkey = aes_set_key, | |
228 | .cia_encrypt = aes_encrypt, | |
229 | .cia_decrypt = aes_decrypt, | |
bf754ae8 JG |
230 | } |
231 | } | |
232 | }; | |
233 | ||
b0c3e75d SS |
234 | static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key, |
235 | unsigned int len) | |
236 | { | |
237 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | |
238 | unsigned int ret; | |
239 | ||
240 | sctx->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; | |
241 | sctx->fallback.blk->base.crt_flags |= (tfm->crt_flags & | |
242 | CRYPTO_TFM_REQ_MASK); | |
243 | ||
244 | ret = crypto_blkcipher_setkey(sctx->fallback.blk, key, len); | |
245 | if (ret) { | |
246 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; | |
247 | tfm->crt_flags |= (sctx->fallback.blk->base.crt_flags & | |
248 | CRYPTO_TFM_RES_MASK); | |
249 | } | |
250 | return ret; | |
251 | } | |
252 | ||
253 | static int fallback_blk_dec(struct blkcipher_desc *desc, | |
254 | struct scatterlist *dst, struct scatterlist *src, | |
255 | unsigned int nbytes) | |
256 | { | |
257 | unsigned int ret; | |
258 | struct crypto_blkcipher *tfm; | |
259 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); | |
260 | ||
b0c3e75d SS |
261 | tfm = desc->tfm; |
262 | desc->tfm = sctx->fallback.blk; | |
263 | ||
2d74d405 | 264 | ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes); |
b0c3e75d SS |
265 | |
266 | desc->tfm = tfm; | |
267 | return ret; | |
268 | } | |
269 | ||
270 | static int fallback_blk_enc(struct blkcipher_desc *desc, | |
271 | struct scatterlist *dst, struct scatterlist *src, | |
272 | unsigned int nbytes) | |
273 | { | |
274 | unsigned int ret; | |
275 | struct crypto_blkcipher *tfm; | |
276 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); | |
277 | ||
b0c3e75d SS |
278 | tfm = desc->tfm; |
279 | desc->tfm = sctx->fallback.blk; | |
280 | ||
2d74d405 | 281 | ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes); |
b0c3e75d SS |
282 | |
283 | desc->tfm = tfm; | |
284 | return ret; | |
285 | } | |
286 | ||
a9e62fad HX |
287 | static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, |
288 | unsigned int key_len) | |
289 | { | |
290 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | |
b0c3e75d SS |
291 | int ret; |
292 | ||
293 | ret = need_fallback(key_len); | |
294 | if (ret > 0) { | |
295 | sctx->key_len = key_len; | |
296 | return setkey_fallback_blk(tfm, in_key, key_len); | |
297 | } | |
a9e62fad HX |
298 | |
299 | switch (key_len) { | |
300 | case 16: | |
c7d4d259 MS |
301 | sctx->enc = CPACF_KM_AES_128_ENC; |
302 | sctx->dec = CPACF_KM_AES_128_DEC; | |
a9e62fad HX |
303 | break; |
304 | case 24: | |
c7d4d259 MS |
305 | sctx->enc = CPACF_KM_AES_192_ENC; |
306 | sctx->dec = CPACF_KM_AES_192_DEC; | |
a9e62fad HX |
307 | break; |
308 | case 32: | |
c7d4d259 MS |
309 | sctx->enc = CPACF_KM_AES_256_ENC; |
310 | sctx->dec = CPACF_KM_AES_256_DEC; | |
a9e62fad HX |
311 | break; |
312 | } | |
313 | ||
314 | return aes_set_key(tfm, in_key, key_len); | |
315 | } | |
316 | ||
317 | static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param, | |
318 | struct blkcipher_walk *walk) | |
319 | { | |
320 | int ret = blkcipher_walk_virt(desc, walk); | |
321 | unsigned int nbytes; | |
322 | ||
323 | while ((nbytes = walk->nbytes)) { | |
324 | /* only use complete blocks */ | |
325 | unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1); | |
326 | u8 *out = walk->dst.virt.addr; | |
327 | u8 *in = walk->src.virt.addr; | |
328 | ||
c7d4d259 | 329 | ret = cpacf_km(func, param, out, in, n); |
36eb2caa JG |
330 | if (ret < 0 || ret != n) |
331 | return -EIO; | |
a9e62fad HX |
332 | |
333 | nbytes &= AES_BLOCK_SIZE - 1; | |
334 | ret = blkcipher_walk_done(desc, walk, nbytes); | |
335 | } | |
336 | ||
337 | return ret; | |
338 | } | |
339 | ||
340 | static int ecb_aes_encrypt(struct blkcipher_desc *desc, | |
341 | struct scatterlist *dst, struct scatterlist *src, | |
342 | unsigned int nbytes) | |
343 | { | |
344 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); | |
345 | struct blkcipher_walk walk; | |
346 | ||
b0c3e75d SS |
347 | if (unlikely(need_fallback(sctx->key_len))) |
348 | return fallback_blk_enc(desc, dst, src, nbytes); | |
349 | ||
a9e62fad HX |
350 | blkcipher_walk_init(&walk, dst, src, nbytes); |
351 | return ecb_aes_crypt(desc, sctx->enc, sctx->key, &walk); | |
352 | } | |
353 | ||
354 | static int ecb_aes_decrypt(struct blkcipher_desc *desc, | |
355 | struct scatterlist *dst, struct scatterlist *src, | |
356 | unsigned int nbytes) | |
357 | { | |
358 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); | |
359 | struct blkcipher_walk walk; | |
360 | ||
b0c3e75d SS |
361 | if (unlikely(need_fallback(sctx->key_len))) |
362 | return fallback_blk_dec(desc, dst, src, nbytes); | |
363 | ||
a9e62fad HX |
364 | blkcipher_walk_init(&walk, dst, src, nbytes); |
365 | return ecb_aes_crypt(desc, sctx->dec, sctx->key, &walk); | |
366 | } | |
367 | ||
b0c3e75d SS |
368 | static int fallback_init_blk(struct crypto_tfm *tfm) |
369 | { | |
370 | const char *name = tfm->__crt_alg->cra_name; | |
371 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | |
372 | ||
373 | sctx->fallback.blk = crypto_alloc_blkcipher(name, 0, | |
374 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); | |
375 | ||
376 | if (IS_ERR(sctx->fallback.blk)) { | |
39f09392 JG |
377 | pr_err("Allocating AES fallback algorithm %s failed\n", |
378 | name); | |
b0c3e75d SS |
379 | return PTR_ERR(sctx->fallback.blk); |
380 | } | |
381 | ||
382 | return 0; | |
383 | } | |
384 | ||
385 | static void fallback_exit_blk(struct crypto_tfm *tfm) | |
386 | { | |
387 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | |
388 | ||
389 | crypto_free_blkcipher(sctx->fallback.blk); | |
390 | sctx->fallback.blk = NULL; | |
391 | } | |
392 | ||
a9e62fad HX |
393 | static struct crypto_alg ecb_aes_alg = { |
394 | .cra_name = "ecb(aes)", | |
395 | .cra_driver_name = "ecb-aes-s390", | |
c7d4d259 | 396 | .cra_priority = 400, /* combo: aes + ecb */ |
f67d1369 JG |
397 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | |
398 | CRYPTO_ALG_NEED_FALLBACK, | |
a9e62fad HX |
399 | .cra_blocksize = AES_BLOCK_SIZE, |
400 | .cra_ctxsize = sizeof(struct s390_aes_ctx), | |
401 | .cra_type = &crypto_blkcipher_type, | |
402 | .cra_module = THIS_MODULE, | |
b0c3e75d SS |
403 | .cra_init = fallback_init_blk, |
404 | .cra_exit = fallback_exit_blk, | |
a9e62fad HX |
405 | .cra_u = { |
406 | .blkcipher = { | |
407 | .min_keysize = AES_MIN_KEY_SIZE, | |
408 | .max_keysize = AES_MAX_KEY_SIZE, | |
409 | .setkey = ecb_aes_set_key, | |
410 | .encrypt = ecb_aes_encrypt, | |
411 | .decrypt = ecb_aes_decrypt, | |
412 | } | |
413 | } | |
414 | }; | |
415 | ||
416 | static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, | |
417 | unsigned int key_len) | |
418 | { | |
419 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | |
b0c3e75d SS |
420 | int ret; |
421 | ||
422 | ret = need_fallback(key_len); | |
423 | if (ret > 0) { | |
424 | sctx->key_len = key_len; | |
425 | return setkey_fallback_blk(tfm, in_key, key_len); | |
426 | } | |
a9e62fad HX |
427 | |
428 | switch (key_len) { | |
429 | case 16: | |
c7d4d259 MS |
430 | sctx->enc = CPACF_KMC_AES_128_ENC; |
431 | sctx->dec = CPACF_KMC_AES_128_DEC; | |
a9e62fad HX |
432 | break; |
433 | case 24: | |
c7d4d259 MS |
434 | sctx->enc = CPACF_KMC_AES_192_ENC; |
435 | sctx->dec = CPACF_KMC_AES_192_DEC; | |
a9e62fad HX |
436 | break; |
437 | case 32: | |
c7d4d259 MS |
438 | sctx->enc = CPACF_KMC_AES_256_ENC; |
439 | sctx->dec = CPACF_KMC_AES_256_DEC; | |
a9e62fad HX |
440 | break; |
441 | } | |
442 | ||
443 | return aes_set_key(tfm, in_key, key_len); | |
444 | } | |
445 | ||
f262f0f5 | 446 | static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, |
a9e62fad HX |
447 | struct blkcipher_walk *walk) |
448 | { | |
f262f0f5 | 449 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); |
a9e62fad HX |
450 | int ret = blkcipher_walk_virt(desc, walk); |
451 | unsigned int nbytes = walk->nbytes; | |
f262f0f5 HX |
452 | struct { |
453 | u8 iv[AES_BLOCK_SIZE]; | |
454 | u8 key[AES_MAX_KEY_SIZE]; | |
455 | } param; | |
a9e62fad HX |
456 | |
457 | if (!nbytes) | |
458 | goto out; | |
459 | ||
f262f0f5 HX |
460 | memcpy(param.iv, walk->iv, AES_BLOCK_SIZE); |
461 | memcpy(param.key, sctx->key, sctx->key_len); | |
a9e62fad HX |
462 | do { |
463 | /* only use complete blocks */ | |
464 | unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1); | |
465 | u8 *out = walk->dst.virt.addr; | |
466 | u8 *in = walk->src.virt.addr; | |
467 | ||
c7d4d259 | 468 | ret = cpacf_kmc(func, ¶m, out, in, n); |
36eb2caa JG |
469 | if (ret < 0 || ret != n) |
470 | return -EIO; | |
a9e62fad HX |
471 | |
472 | nbytes &= AES_BLOCK_SIZE - 1; | |
473 | ret = blkcipher_walk_done(desc, walk, nbytes); | |
474 | } while ((nbytes = walk->nbytes)); | |
f262f0f5 | 475 | memcpy(walk->iv, param.iv, AES_BLOCK_SIZE); |
a9e62fad HX |
476 | |
477 | out: | |
478 | return ret; | |
479 | } | |
480 | ||
481 | static int cbc_aes_encrypt(struct blkcipher_desc *desc, | |
482 | struct scatterlist *dst, struct scatterlist *src, | |
483 | unsigned int nbytes) | |
484 | { | |
485 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); | |
486 | struct blkcipher_walk walk; | |
487 | ||
b0c3e75d SS |
488 | if (unlikely(need_fallback(sctx->key_len))) |
489 | return fallback_blk_enc(desc, dst, src, nbytes); | |
490 | ||
a9e62fad | 491 | blkcipher_walk_init(&walk, dst, src, nbytes); |
f262f0f5 | 492 | return cbc_aes_crypt(desc, sctx->enc, &walk); |
a9e62fad HX |
493 | } |
494 | ||
495 | static int cbc_aes_decrypt(struct blkcipher_desc *desc, | |
496 | struct scatterlist *dst, struct scatterlist *src, | |
497 | unsigned int nbytes) | |
498 | { | |
499 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); | |
500 | struct blkcipher_walk walk; | |
501 | ||
b0c3e75d SS |
502 | if (unlikely(need_fallback(sctx->key_len))) |
503 | return fallback_blk_dec(desc, dst, src, nbytes); | |
504 | ||
a9e62fad | 505 | blkcipher_walk_init(&walk, dst, src, nbytes); |
f262f0f5 | 506 | return cbc_aes_crypt(desc, sctx->dec, &walk); |
a9e62fad HX |
507 | } |
508 | ||
509 | static struct crypto_alg cbc_aes_alg = { | |
510 | .cra_name = "cbc(aes)", | |
511 | .cra_driver_name = "cbc-aes-s390", | |
c7d4d259 | 512 | .cra_priority = 400, /* combo: aes + cbc */ |
f67d1369 JG |
513 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | |
514 | CRYPTO_ALG_NEED_FALLBACK, | |
a9e62fad HX |
515 | .cra_blocksize = AES_BLOCK_SIZE, |
516 | .cra_ctxsize = sizeof(struct s390_aes_ctx), | |
517 | .cra_type = &crypto_blkcipher_type, | |
518 | .cra_module = THIS_MODULE, | |
b0c3e75d SS |
519 | .cra_init = fallback_init_blk, |
520 | .cra_exit = fallback_exit_blk, | |
a9e62fad HX |
521 | .cra_u = { |
522 | .blkcipher = { | |
523 | .min_keysize = AES_MIN_KEY_SIZE, | |
524 | .max_keysize = AES_MAX_KEY_SIZE, | |
525 | .ivsize = AES_BLOCK_SIZE, | |
526 | .setkey = cbc_aes_set_key, | |
527 | .encrypt = cbc_aes_encrypt, | |
528 | .decrypt = cbc_aes_decrypt, | |
529 | } | |
530 | } | |
531 | }; | |
532 | ||
99d97222 GS |
533 | static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key, |
534 | unsigned int len) | |
535 | { | |
536 | struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm); | |
537 | unsigned int ret; | |
538 | ||
539 | xts_ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; | |
540 | xts_ctx->fallback->base.crt_flags |= (tfm->crt_flags & | |
541 | CRYPTO_TFM_REQ_MASK); | |
542 | ||
543 | ret = crypto_blkcipher_setkey(xts_ctx->fallback, key, len); | |
544 | if (ret) { | |
545 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; | |
546 | tfm->crt_flags |= (xts_ctx->fallback->base.crt_flags & | |
547 | CRYPTO_TFM_RES_MASK); | |
548 | } | |
549 | return ret; | |
550 | } | |
551 | ||
552 | static int xts_fallback_decrypt(struct blkcipher_desc *desc, | |
553 | struct scatterlist *dst, struct scatterlist *src, | |
554 | unsigned int nbytes) | |
555 | { | |
556 | struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); | |
557 | struct crypto_blkcipher *tfm; | |
558 | unsigned int ret; | |
559 | ||
560 | tfm = desc->tfm; | |
561 | desc->tfm = xts_ctx->fallback; | |
562 | ||
563 | ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes); | |
564 | ||
565 | desc->tfm = tfm; | |
566 | return ret; | |
567 | } | |
568 | ||
569 | static int xts_fallback_encrypt(struct blkcipher_desc *desc, | |
570 | struct scatterlist *dst, struct scatterlist *src, | |
571 | unsigned int nbytes) | |
572 | { | |
573 | struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); | |
574 | struct crypto_blkcipher *tfm; | |
575 | unsigned int ret; | |
576 | ||
577 | tfm = desc->tfm; | |
578 | desc->tfm = xts_ctx->fallback; | |
579 | ||
580 | ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes); | |
581 | ||
582 | desc->tfm = tfm; | |
583 | return ret; | |
584 | } | |
585 | ||
586 | static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, | |
587 | unsigned int key_len) | |
588 | { | |
589 | struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm); | |
590 | u32 *flags = &tfm->crt_flags; | |
28856a9e SM |
591 | int err; |
592 | ||
593 | err = xts_check_key(tfm, in_key, key_len); | |
594 | if (err) | |
595 | return err; | |
99d97222 GS |
596 | |
597 | switch (key_len) { | |
598 | case 32: | |
c7d4d259 MS |
599 | xts_ctx->enc = CPACF_KM_XTS_128_ENC; |
600 | xts_ctx->dec = CPACF_KM_XTS_128_DEC; | |
99d97222 | 601 | memcpy(xts_ctx->key + 16, in_key, 16); |
9dda2769 | 602 | memcpy(xts_ctx->pcc_key + 16, in_key + 16, 16); |
99d97222 GS |
603 | break; |
604 | case 48: | |
605 | xts_ctx->enc = 0; | |
606 | xts_ctx->dec = 0; | |
607 | xts_fallback_setkey(tfm, in_key, key_len); | |
608 | break; | |
609 | case 64: | |
c7d4d259 MS |
610 | xts_ctx->enc = CPACF_KM_XTS_256_ENC; |
611 | xts_ctx->dec = CPACF_KM_XTS_256_DEC; | |
99d97222 | 612 | memcpy(xts_ctx->key, in_key, 32); |
9dda2769 | 613 | memcpy(xts_ctx->pcc_key, in_key + 32, 32); |
99d97222 GS |
614 | break; |
615 | default: | |
616 | *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | |
617 | return -EINVAL; | |
618 | } | |
619 | xts_ctx->key_len = key_len; | |
620 | return 0; | |
621 | } | |
622 | ||
623 | static int xts_aes_crypt(struct blkcipher_desc *desc, long func, | |
624 | struct s390_xts_ctx *xts_ctx, | |
625 | struct blkcipher_walk *walk) | |
626 | { | |
627 | unsigned int offset = (xts_ctx->key_len >> 1) & 0x10; | |
628 | int ret = blkcipher_walk_virt(desc, walk); | |
629 | unsigned int nbytes = walk->nbytes; | |
630 | unsigned int n; | |
631 | u8 *in, *out; | |
9dda2769 GS |
632 | struct pcc_param pcc_param; |
633 | struct { | |
634 | u8 key[32]; | |
635 | u8 init[16]; | |
636 | } xts_param; | |
99d97222 GS |
637 | |
638 | if (!nbytes) | |
639 | goto out; | |
640 | ||
9dda2769 GS |
641 | memset(pcc_param.block, 0, sizeof(pcc_param.block)); |
642 | memset(pcc_param.bit, 0, sizeof(pcc_param.bit)); | |
643 | memset(pcc_param.xts, 0, sizeof(pcc_param.xts)); | |
644 | memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak)); | |
645 | memcpy(pcc_param.key, xts_ctx->pcc_key, 32); | |
c7d4d259 MS |
646 | /* remove decipher modifier bit from 'func' and call PCC */ |
647 | ret = cpacf_pcc(func & 0x7f, &pcc_param.key[offset]); | |
36eb2caa JG |
648 | if (ret < 0) |
649 | return -EIO; | |
99d97222 | 650 | |
9dda2769 GS |
651 | memcpy(xts_param.key, xts_ctx->key, 32); |
652 | memcpy(xts_param.init, pcc_param.xts, 16); | |
99d97222 GS |
653 | do { |
654 | /* only use complete blocks */ | |
655 | n = nbytes & ~(AES_BLOCK_SIZE - 1); | |
656 | out = walk->dst.virt.addr; | |
657 | in = walk->src.virt.addr; | |
658 | ||
c7d4d259 | 659 | ret = cpacf_km(func, &xts_param.key[offset], out, in, n); |
36eb2caa JG |
660 | if (ret < 0 || ret != n) |
661 | return -EIO; | |
99d97222 GS |
662 | |
663 | nbytes &= AES_BLOCK_SIZE - 1; | |
664 | ret = blkcipher_walk_done(desc, walk, nbytes); | |
665 | } while ((nbytes = walk->nbytes)); | |
666 | out: | |
667 | return ret; | |
668 | } | |
669 | ||
670 | static int xts_aes_encrypt(struct blkcipher_desc *desc, | |
671 | struct scatterlist *dst, struct scatterlist *src, | |
672 | unsigned int nbytes) | |
673 | { | |
674 | struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); | |
675 | struct blkcipher_walk walk; | |
676 | ||
677 | if (unlikely(xts_ctx->key_len == 48)) | |
678 | return xts_fallback_encrypt(desc, dst, src, nbytes); | |
679 | ||
680 | blkcipher_walk_init(&walk, dst, src, nbytes); | |
681 | return xts_aes_crypt(desc, xts_ctx->enc, xts_ctx, &walk); | |
682 | } | |
683 | ||
684 | static int xts_aes_decrypt(struct blkcipher_desc *desc, | |
685 | struct scatterlist *dst, struct scatterlist *src, | |
686 | unsigned int nbytes) | |
687 | { | |
688 | struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); | |
689 | struct blkcipher_walk walk; | |
690 | ||
691 | if (unlikely(xts_ctx->key_len == 48)) | |
692 | return xts_fallback_decrypt(desc, dst, src, nbytes); | |
693 | ||
694 | blkcipher_walk_init(&walk, dst, src, nbytes); | |
695 | return xts_aes_crypt(desc, xts_ctx->dec, xts_ctx, &walk); | |
696 | } | |
697 | ||
698 | static int xts_fallback_init(struct crypto_tfm *tfm) | |
699 | { | |
700 | const char *name = tfm->__crt_alg->cra_name; | |
701 | struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm); | |
702 | ||
703 | xts_ctx->fallback = crypto_alloc_blkcipher(name, 0, | |
704 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); | |
705 | ||
706 | if (IS_ERR(xts_ctx->fallback)) { | |
707 | pr_err("Allocating XTS fallback algorithm %s failed\n", | |
708 | name); | |
709 | return PTR_ERR(xts_ctx->fallback); | |
710 | } | |
711 | return 0; | |
712 | } | |
713 | ||
714 | static void xts_fallback_exit(struct crypto_tfm *tfm) | |
715 | { | |
716 | struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm); | |
717 | ||
718 | crypto_free_blkcipher(xts_ctx->fallback); | |
719 | xts_ctx->fallback = NULL; | |
720 | } | |
721 | ||
722 | static struct crypto_alg xts_aes_alg = { | |
723 | .cra_name = "xts(aes)", | |
724 | .cra_driver_name = "xts-aes-s390", | |
c7d4d259 | 725 | .cra_priority = 400, /* combo: aes + xts */ |
99d97222 GS |
726 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | |
727 | CRYPTO_ALG_NEED_FALLBACK, | |
728 | .cra_blocksize = AES_BLOCK_SIZE, | |
729 | .cra_ctxsize = sizeof(struct s390_xts_ctx), | |
730 | .cra_type = &crypto_blkcipher_type, | |
731 | .cra_module = THIS_MODULE, | |
99d97222 GS |
732 | .cra_init = xts_fallback_init, |
733 | .cra_exit = xts_fallback_exit, | |
734 | .cra_u = { | |
735 | .blkcipher = { | |
736 | .min_keysize = 2 * AES_MIN_KEY_SIZE, | |
737 | .max_keysize = 2 * AES_MAX_KEY_SIZE, | |
738 | .ivsize = AES_BLOCK_SIZE, | |
739 | .setkey = xts_aes_set_key, | |
740 | .encrypt = xts_aes_encrypt, | |
741 | .decrypt = xts_aes_decrypt, | |
742 | } | |
743 | } | |
744 | }; | |
745 | ||
4f57ba71 IT |
746 | static int xts_aes_alg_reg; |
747 | ||
0200f3ec GS |
748 | static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, |
749 | unsigned int key_len) | |
750 | { | |
751 | struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); | |
752 | ||
753 | switch (key_len) { | |
754 | case 16: | |
c7d4d259 MS |
755 | sctx->enc = CPACF_KMCTR_AES_128_ENC; |
756 | sctx->dec = CPACF_KMCTR_AES_128_DEC; | |
0200f3ec GS |
757 | break; |
758 | case 24: | |
c7d4d259 MS |
759 | sctx->enc = CPACF_KMCTR_AES_192_ENC; |
760 | sctx->dec = CPACF_KMCTR_AES_192_DEC; | |
0200f3ec GS |
761 | break; |
762 | case 32: | |
c7d4d259 MS |
763 | sctx->enc = CPACF_KMCTR_AES_256_ENC; |
764 | sctx->dec = CPACF_KMCTR_AES_256_DEC; | |
0200f3ec GS |
765 | break; |
766 | } | |
767 | ||
768 | return aes_set_key(tfm, in_key, key_len); | |
769 | } | |
770 | ||
0519e9ad HF |
771 | static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes) |
772 | { | |
773 | unsigned int i, n; | |
774 | ||
775 | /* only use complete blocks, max. PAGE_SIZE */ | |
776 | n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1); | |
777 | for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) { | |
778 | memcpy(ctrptr + i, ctrptr + i - AES_BLOCK_SIZE, | |
779 | AES_BLOCK_SIZE); | |
780 | crypto_inc(ctrptr + i, AES_BLOCK_SIZE); | |
781 | } | |
782 | return n; | |
783 | } | |
784 | ||
0200f3ec GS |
785 | static int ctr_aes_crypt(struct blkcipher_desc *desc, long func, |
786 | struct s390_aes_ctx *sctx, struct blkcipher_walk *walk) | |
787 | { | |
788 | int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE); | |
0519e9ad HF |
789 | unsigned int n, nbytes; |
790 | u8 buf[AES_BLOCK_SIZE], ctrbuf[AES_BLOCK_SIZE]; | |
791 | u8 *out, *in, *ctrptr = ctrbuf; | |
0200f3ec GS |
792 | |
793 | if (!walk->nbytes) | |
794 | return ret; | |
795 | ||
0519e9ad HF |
796 | if (spin_trylock(&ctrblk_lock)) |
797 | ctrptr = ctrblk; | |
798 | ||
799 | memcpy(ctrptr, walk->iv, AES_BLOCK_SIZE); | |
0200f3ec GS |
800 | while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { |
801 | out = walk->dst.virt.addr; | |
802 | in = walk->src.virt.addr; | |
803 | while (nbytes >= AES_BLOCK_SIZE) { | |
0519e9ad HF |
804 | if (ctrptr == ctrblk) |
805 | n = __ctrblk_init(ctrptr, nbytes); | |
806 | else | |
807 | n = AES_BLOCK_SIZE; | |
c7d4d259 | 808 | ret = cpacf_kmctr(func, sctx->key, out, in, n, ctrptr); |
0519e9ad HF |
809 | if (ret < 0 || ret != n) { |
810 | if (ctrptr == ctrblk) | |
811 | spin_unlock(&ctrblk_lock); | |
36eb2caa | 812 | return -EIO; |
0519e9ad | 813 | } |
0200f3ec | 814 | if (n > AES_BLOCK_SIZE) |
0519e9ad | 815 | memcpy(ctrptr, ctrptr + n - AES_BLOCK_SIZE, |
0200f3ec | 816 | AES_BLOCK_SIZE); |
0519e9ad | 817 | crypto_inc(ctrptr, AES_BLOCK_SIZE); |
0200f3ec GS |
818 | out += n; |
819 | in += n; | |
820 | nbytes -= n; | |
821 | } | |
822 | ret = blkcipher_walk_done(desc, walk, nbytes); | |
823 | } | |
0519e9ad HF |
824 | if (ctrptr == ctrblk) { |
825 | if (nbytes) | |
826 | memcpy(ctrbuf, ctrptr, AES_BLOCK_SIZE); | |
827 | else | |
828 | memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE); | |
829 | spin_unlock(&ctrblk_lock); | |
3901c112 HF |
830 | } else { |
831 | if (!nbytes) | |
832 | memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE); | |
0519e9ad | 833 | } |
0200f3ec GS |
834 | /* |
835 | * final block may be < AES_BLOCK_SIZE, copy only nbytes | |
836 | */ | |
837 | if (nbytes) { | |
838 | out = walk->dst.virt.addr; | |
839 | in = walk->src.virt.addr; | |
c7d4d259 MS |
840 | ret = cpacf_kmctr(func, sctx->key, buf, in, |
841 | AES_BLOCK_SIZE, ctrbuf); | |
36eb2caa JG |
842 | if (ret < 0 || ret != AES_BLOCK_SIZE) |
843 | return -EIO; | |
0200f3ec | 844 | memcpy(out, buf, nbytes); |
0519e9ad | 845 | crypto_inc(ctrbuf, AES_BLOCK_SIZE); |
0200f3ec | 846 | ret = blkcipher_walk_done(desc, walk, 0); |
0519e9ad | 847 | memcpy(walk->iv, ctrbuf, AES_BLOCK_SIZE); |
0200f3ec | 848 | } |
0519e9ad | 849 | |
0200f3ec GS |
850 | return ret; |
851 | } | |
852 | ||
853 | static int ctr_aes_encrypt(struct blkcipher_desc *desc, | |
854 | struct scatterlist *dst, struct scatterlist *src, | |
855 | unsigned int nbytes) | |
856 | { | |
857 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); | |
858 | struct blkcipher_walk walk; | |
859 | ||
860 | blkcipher_walk_init(&walk, dst, src, nbytes); | |
861 | return ctr_aes_crypt(desc, sctx->enc, sctx, &walk); | |
862 | } | |
863 | ||
864 | static int ctr_aes_decrypt(struct blkcipher_desc *desc, | |
865 | struct scatterlist *dst, struct scatterlist *src, | |
866 | unsigned int nbytes) | |
867 | { | |
868 | struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); | |
869 | struct blkcipher_walk walk; | |
870 | ||
871 | blkcipher_walk_init(&walk, dst, src, nbytes); | |
872 | return ctr_aes_crypt(desc, sctx->dec, sctx, &walk); | |
873 | } | |
874 | ||
875 | static struct crypto_alg ctr_aes_alg = { | |
876 | .cra_name = "ctr(aes)", | |
877 | .cra_driver_name = "ctr-aes-s390", | |
c7d4d259 | 878 | .cra_priority = 400, /* combo: aes + ctr */ |
0200f3ec GS |
879 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, |
880 | .cra_blocksize = 1, | |
881 | .cra_ctxsize = sizeof(struct s390_aes_ctx), | |
882 | .cra_type = &crypto_blkcipher_type, | |
883 | .cra_module = THIS_MODULE, | |
0200f3ec GS |
884 | .cra_u = { |
885 | .blkcipher = { | |
886 | .min_keysize = AES_MIN_KEY_SIZE, | |
887 | .max_keysize = AES_MAX_KEY_SIZE, | |
888 | .ivsize = AES_BLOCK_SIZE, | |
889 | .setkey = ctr_aes_set_key, | |
890 | .encrypt = ctr_aes_encrypt, | |
891 | .decrypt = ctr_aes_decrypt, | |
892 | } | |
893 | } | |
894 | }; | |
895 | ||
4f57ba71 IT |
896 | static int ctr_aes_alg_reg; |
897 | ||
9f7819c1 | 898 | static int __init aes_s390_init(void) |
bf754ae8 JG |
899 | { |
900 | int ret; | |
901 | ||
c7d4d259 | 902 | if (cpacf_query(CPACF_KM, CPACF_KM_AES_128_ENC)) |
86aa9fc2 | 903 | keylen_flag |= AES_KEYLEN_128; |
c7d4d259 | 904 | if (cpacf_query(CPACF_KM, CPACF_KM_AES_192_ENC)) |
86aa9fc2 | 905 | keylen_flag |= AES_KEYLEN_192; |
c7d4d259 | 906 | if (cpacf_query(CPACF_KM, CPACF_KM_AES_256_ENC)) |
86aa9fc2 JG |
907 | keylen_flag |= AES_KEYLEN_256; |
908 | ||
909 | if (!keylen_flag) | |
910 | return -EOPNOTSUPP; | |
bf754ae8 | 911 | |
86aa9fc2 | 912 | /* z9 109 and z9 BC/EC only support 128 bit key length */ |
b0c3e75d | 913 | if (keylen_flag == AES_KEYLEN_128) |
39f09392 JG |
914 | pr_info("AES hardware acceleration is only available for" |
915 | " 128-bit keys\n"); | |
bf754ae8 JG |
916 | |
917 | ret = crypto_register_alg(&aes_alg); | |
86aa9fc2 | 918 | if (ret) |
a9e62fad | 919 | goto aes_err; |
a9e62fad HX |
920 | |
921 | ret = crypto_register_alg(&ecb_aes_alg); | |
86aa9fc2 | 922 | if (ret) |
a9e62fad | 923 | goto ecb_aes_err; |
a9e62fad HX |
924 | |
925 | ret = crypto_register_alg(&cbc_aes_alg); | |
86aa9fc2 | 926 | if (ret) |
a9e62fad | 927 | goto cbc_aes_err; |
a9e62fad | 928 | |
c7d4d259 MS |
929 | if (cpacf_query(CPACF_KM, CPACF_KM_XTS_128_ENC) && |
930 | cpacf_query(CPACF_KM, CPACF_KM_XTS_256_ENC)) { | |
99d97222 GS |
931 | ret = crypto_register_alg(&xts_aes_alg); |
932 | if (ret) | |
933 | goto xts_aes_err; | |
4f57ba71 | 934 | xts_aes_alg_reg = 1; |
99d97222 GS |
935 | } |
936 | ||
c7d4d259 MS |
937 | if (cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_128_ENC) && |
938 | cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_192_ENC) && | |
939 | cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_256_ENC)) { | |
0200f3ec GS |
940 | ctrblk = (u8 *) __get_free_page(GFP_KERNEL); |
941 | if (!ctrblk) { | |
942 | ret = -ENOMEM; | |
943 | goto ctr_aes_err; | |
944 | } | |
945 | ret = crypto_register_alg(&ctr_aes_alg); | |
946 | if (ret) { | |
947 | free_page((unsigned long) ctrblk); | |
948 | goto ctr_aes_err; | |
949 | } | |
4f57ba71 | 950 | ctr_aes_alg_reg = 1; |
0200f3ec GS |
951 | } |
952 | ||
a9e62fad | 953 | out: |
bf754ae8 | 954 | return ret; |
a9e62fad | 955 | |
0200f3ec GS |
956 | ctr_aes_err: |
957 | crypto_unregister_alg(&xts_aes_alg); | |
99d97222 GS |
958 | xts_aes_err: |
959 | crypto_unregister_alg(&cbc_aes_alg); | |
a9e62fad HX |
960 | cbc_aes_err: |
961 | crypto_unregister_alg(&ecb_aes_alg); | |
962 | ecb_aes_err: | |
963 | crypto_unregister_alg(&aes_alg); | |
964 | aes_err: | |
965 | goto out; | |
bf754ae8 JG |
966 | } |
967 | ||
9f7819c1 | 968 | static void __exit aes_s390_fini(void) |
bf754ae8 | 969 | { |
4f57ba71 IT |
970 | if (ctr_aes_alg_reg) { |
971 | crypto_unregister_alg(&ctr_aes_alg); | |
972 | free_page((unsigned long) ctrblk); | |
973 | } | |
974 | if (xts_aes_alg_reg) | |
975 | crypto_unregister_alg(&xts_aes_alg); | |
a9e62fad HX |
976 | crypto_unregister_alg(&cbc_aes_alg); |
977 | crypto_unregister_alg(&ecb_aes_alg); | |
bf754ae8 JG |
978 | crypto_unregister_alg(&aes_alg); |
979 | } | |
980 | ||
d05377c1 | 981 | module_cpu_feature_match(MSA, aes_s390_init); |
9f7819c1 | 982 | module_exit(aes_s390_fini); |
bf754ae8 | 983 | |
5d26a105 | 984 | MODULE_ALIAS_CRYPTO("aes-all"); |
bf754ae8 JG |
985 | |
986 | MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm"); | |
987 | MODULE_LICENSE("GPL"); |