Commit | Line | Data |
---|---|---|
7c185371 TL |
1 | /* |
2 | * AMD Cryptographic Coprocessor (CCP) AES CMAC crypto API support | |
3 | * | |
4 | * Copyright (C) 2013 Advanced Micro Devices, Inc. | |
5 | * | |
6 | * Author: Tom Lendacky <thomas.lendacky@amd.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
12 | ||
13 | #include <linux/module.h> | |
14 | #include <linux/sched.h> | |
15 | #include <linux/delay.h> | |
16 | #include <linux/scatterlist.h> | |
17 | #include <linux/crypto.h> | |
18 | #include <crypto/algapi.h> | |
19 | #include <crypto/aes.h> | |
20 | #include <crypto/hash.h> | |
21 | #include <crypto/internal/hash.h> | |
22 | #include <crypto/scatterwalk.h> | |
23 | ||
24 | #include "ccp-crypto.h" | |
25 | ||
26 | ||
27 | static int ccp_aes_cmac_complete(struct crypto_async_request *async_req, | |
28 | int ret) | |
29 | { | |
30 | struct ahash_request *req = ahash_request_cast(async_req); | |
31 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
32 | struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req); | |
33 | unsigned int digest_size = crypto_ahash_digestsize(tfm); | |
34 | ||
35 | if (ret) | |
36 | goto e_free; | |
37 | ||
38 | if (rctx->hash_rem) { | |
39 | /* Save remaining data to buffer */ | |
81a59f00 TL |
40 | unsigned int offset = rctx->nbytes - rctx->hash_rem; |
41 | scatterwalk_map_and_copy(rctx->buf, rctx->src, | |
42 | offset, rctx->hash_rem, 0); | |
7c185371 TL |
43 | rctx->buf_count = rctx->hash_rem; |
44 | } else | |
45 | rctx->buf_count = 0; | |
46 | ||
393897c5 TL |
47 | /* Update result area if supplied */ |
48 | if (req->result) | |
49 | memcpy(req->result, rctx->iv, digest_size); | |
7c185371 TL |
50 | |
51 | e_free: | |
52 | sg_free_table(&rctx->data_sg); | |
53 | ||
54 | return ret; | |
55 | } | |
56 | ||
57 | static int ccp_do_cmac_update(struct ahash_request *req, unsigned int nbytes, | |
58 | unsigned int final) | |
59 | { | |
60 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
61 | struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); | |
62 | struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req); | |
63 | struct scatterlist *sg, *cmac_key_sg = NULL; | |
64 | unsigned int block_size = | |
65 | crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); | |
81a59f00 | 66 | unsigned int need_pad, sg_count; |
5258de8a | 67 | gfp_t gfp; |
81a59f00 | 68 | u64 len; |
7c185371 TL |
69 | int ret; |
70 | ||
369f3dab | 71 | if (!ctx->u.aes.key_len) |
7c185371 | 72 | return -EINVAL; |
7c185371 TL |
73 | |
74 | if (nbytes) | |
75 | rctx->null_msg = 0; | |
76 | ||
81a59f00 TL |
77 | len = (u64)rctx->buf_count + (u64)nbytes; |
78 | ||
79 | if (!final && (len <= block_size)) { | |
7c185371 TL |
80 | scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src, |
81 | 0, nbytes, 0); | |
82 | rctx->buf_count += nbytes; | |
83 | ||
84 | return 0; | |
85 | } | |
86 | ||
81a59f00 TL |
87 | rctx->src = req->src; |
88 | rctx->nbytes = nbytes; | |
7c185371 TL |
89 | |
90 | rctx->final = final; | |
81a59f00 TL |
91 | rctx->hash_rem = final ? 0 : len & (block_size - 1); |
92 | rctx->hash_cnt = len - rctx->hash_rem; | |
93 | if (!final && !rctx->hash_rem) { | |
7c185371 TL |
94 | /* CCP can't do zero length final, so keep some data around */ |
95 | rctx->hash_cnt -= block_size; | |
96 | rctx->hash_rem = block_size; | |
97 | } | |
98 | ||
99 | if (final && (rctx->null_msg || (len & (block_size - 1)))) | |
100 | need_pad = 1; | |
101 | else | |
102 | need_pad = 0; | |
103 | ||
104 | sg_init_one(&rctx->iv_sg, rctx->iv, sizeof(rctx->iv)); | |
105 | ||
106 | /* Build the data scatterlist table - allocate enough entries for all | |
107 | * possible data pieces (buffer, input data, padding) | |
108 | */ | |
109 | sg_count = (nbytes) ? sg_nents(req->src) + 2 : 2; | |
5258de8a TL |
110 | gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? |
111 | GFP_KERNEL : GFP_ATOMIC; | |
112 | ret = sg_alloc_table(&rctx->data_sg, sg_count, gfp); | |
7c185371 TL |
113 | if (ret) |
114 | return ret; | |
115 | ||
116 | sg = NULL; | |
117 | if (rctx->buf_count) { | |
118 | sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count); | |
119 | sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg); | |
120 | } | |
121 | ||
122 | if (nbytes) | |
123 | sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src); | |
124 | ||
125 | if (need_pad) { | |
126 | int pad_length = block_size - (len & (block_size - 1)); | |
127 | ||
128 | rctx->hash_cnt += pad_length; | |
129 | ||
130 | memset(rctx->pad, 0, sizeof(rctx->pad)); | |
131 | rctx->pad[0] = 0x80; | |
132 | sg_init_one(&rctx->pad_sg, rctx->pad, pad_length); | |
133 | sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->pad_sg); | |
134 | } | |
77dc4a51 | 135 | if (sg) { |
7c185371 | 136 | sg_mark_end(sg); |
77dc4a51 TL |
137 | sg = rctx->data_sg.sgl; |
138 | } | |
7c185371 TL |
139 | |
140 | /* Initialize the K1/K2 scatterlist */ | |
141 | if (final) | |
142 | cmac_key_sg = (need_pad) ? &ctx->u.aes.k2_sg | |
143 | : &ctx->u.aes.k1_sg; | |
144 | ||
145 | memset(&rctx->cmd, 0, sizeof(rctx->cmd)); | |
146 | INIT_LIST_HEAD(&rctx->cmd.entry); | |
147 | rctx->cmd.engine = CCP_ENGINE_AES; | |
148 | rctx->cmd.u.aes.type = ctx->u.aes.type; | |
149 | rctx->cmd.u.aes.mode = ctx->u.aes.mode; | |
150 | rctx->cmd.u.aes.action = CCP_AES_ACTION_ENCRYPT; | |
151 | rctx->cmd.u.aes.key = &ctx->u.aes.key_sg; | |
152 | rctx->cmd.u.aes.key_len = ctx->u.aes.key_len; | |
153 | rctx->cmd.u.aes.iv = &rctx->iv_sg; | |
154 | rctx->cmd.u.aes.iv_len = AES_BLOCK_SIZE; | |
77dc4a51 | 155 | rctx->cmd.u.aes.src = sg; |
7c185371 TL |
156 | rctx->cmd.u.aes.src_len = rctx->hash_cnt; |
157 | rctx->cmd.u.aes.dst = NULL; | |
158 | rctx->cmd.u.aes.cmac_key = cmac_key_sg; | |
159 | rctx->cmd.u.aes.cmac_key_len = ctx->u.aes.kn_len; | |
160 | rctx->cmd.u.aes.cmac_final = final; | |
161 | ||
162 | ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); | |
163 | ||
164 | return ret; | |
165 | } | |
166 | ||
167 | static int ccp_aes_cmac_init(struct ahash_request *req) | |
168 | { | |
169 | struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req); | |
170 | ||
171 | memset(rctx, 0, sizeof(*rctx)); | |
172 | ||
173 | rctx->null_msg = 1; | |
174 | ||
175 | return 0; | |
176 | } | |
177 | ||
178 | static int ccp_aes_cmac_update(struct ahash_request *req) | |
179 | { | |
180 | return ccp_do_cmac_update(req, req->nbytes, 0); | |
181 | } | |
182 | ||
183 | static int ccp_aes_cmac_final(struct ahash_request *req) | |
184 | { | |
185 | return ccp_do_cmac_update(req, 0, 1); | |
186 | } | |
187 | ||
188 | static int ccp_aes_cmac_finup(struct ahash_request *req) | |
189 | { | |
190 | return ccp_do_cmac_update(req, req->nbytes, 1); | |
191 | } | |
192 | ||
193 | static int ccp_aes_cmac_digest(struct ahash_request *req) | |
194 | { | |
195 | int ret; | |
196 | ||
197 | ret = ccp_aes_cmac_init(req); | |
198 | if (ret) | |
199 | return ret; | |
200 | ||
201 | return ccp_do_cmac_update(req, req->nbytes, 1); | |
202 | } | |
203 | ||
204 | static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, | |
205 | unsigned int key_len) | |
206 | { | |
207 | struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); | |
208 | struct ccp_crypto_ahash_alg *alg = | |
209 | ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm)); | |
210 | u64 k0_hi, k0_lo, k1_hi, k1_lo, k2_hi, k2_lo; | |
211 | u64 rb_hi = 0x00, rb_lo = 0x87; | |
212 | __be64 *gk; | |
213 | int ret; | |
214 | ||
215 | switch (key_len) { | |
216 | case AES_KEYSIZE_128: | |
217 | ctx->u.aes.type = CCP_AES_TYPE_128; | |
218 | break; | |
219 | case AES_KEYSIZE_192: | |
220 | ctx->u.aes.type = CCP_AES_TYPE_192; | |
221 | break; | |
222 | case AES_KEYSIZE_256: | |
223 | ctx->u.aes.type = CCP_AES_TYPE_256; | |
224 | break; | |
225 | default: | |
226 | crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
227 | return -EINVAL; | |
228 | } | |
229 | ctx->u.aes.mode = alg->mode; | |
230 | ||
231 | /* Set to zero until complete */ | |
232 | ctx->u.aes.key_len = 0; | |
233 | ||
234 | /* Set the key for the AES cipher used to generate the keys */ | |
235 | ret = crypto_cipher_setkey(ctx->u.aes.tfm_cipher, key, key_len); | |
236 | if (ret) | |
237 | return ret; | |
238 | ||
239 | /* Encrypt a block of zeroes - use key area in context */ | |
240 | memset(ctx->u.aes.key, 0, sizeof(ctx->u.aes.key)); | |
241 | crypto_cipher_encrypt_one(ctx->u.aes.tfm_cipher, ctx->u.aes.key, | |
242 | ctx->u.aes.key); | |
243 | ||
244 | /* Generate K1 and K2 */ | |
245 | k0_hi = be64_to_cpu(*((__be64 *)ctx->u.aes.key)); | |
246 | k0_lo = be64_to_cpu(*((__be64 *)ctx->u.aes.key + 1)); | |
247 | ||
248 | k1_hi = (k0_hi << 1) | (k0_lo >> 63); | |
249 | k1_lo = k0_lo << 1; | |
250 | if (ctx->u.aes.key[0] & 0x80) { | |
251 | k1_hi ^= rb_hi; | |
252 | k1_lo ^= rb_lo; | |
253 | } | |
254 | gk = (__be64 *)ctx->u.aes.k1; | |
255 | *gk = cpu_to_be64(k1_hi); | |
256 | gk++; | |
257 | *gk = cpu_to_be64(k1_lo); | |
258 | ||
259 | k2_hi = (k1_hi << 1) | (k1_lo >> 63); | |
260 | k2_lo = k1_lo << 1; | |
261 | if (ctx->u.aes.k1[0] & 0x80) { | |
262 | k2_hi ^= rb_hi; | |
263 | k2_lo ^= rb_lo; | |
264 | } | |
265 | gk = (__be64 *)ctx->u.aes.k2; | |
266 | *gk = cpu_to_be64(k2_hi); | |
267 | gk++; | |
268 | *gk = cpu_to_be64(k2_lo); | |
269 | ||
270 | ctx->u.aes.kn_len = sizeof(ctx->u.aes.k1); | |
271 | sg_init_one(&ctx->u.aes.k1_sg, ctx->u.aes.k1, sizeof(ctx->u.aes.k1)); | |
272 | sg_init_one(&ctx->u.aes.k2_sg, ctx->u.aes.k2, sizeof(ctx->u.aes.k2)); | |
273 | ||
274 | /* Save the supplied key */ | |
275 | memset(ctx->u.aes.key, 0, sizeof(ctx->u.aes.key)); | |
276 | memcpy(ctx->u.aes.key, key, key_len); | |
277 | ctx->u.aes.key_len = key_len; | |
278 | sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len); | |
279 | ||
280 | return ret; | |
281 | } | |
282 | ||
283 | static int ccp_aes_cmac_cra_init(struct crypto_tfm *tfm) | |
284 | { | |
285 | struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); | |
286 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); | |
287 | struct crypto_cipher *cipher_tfm; | |
288 | ||
289 | ctx->complete = ccp_aes_cmac_complete; | |
290 | ctx->u.aes.key_len = 0; | |
291 | ||
292 | crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_aes_cmac_req_ctx)); | |
293 | ||
294 | cipher_tfm = crypto_alloc_cipher("aes", 0, | |
295 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); | |
296 | if (IS_ERR(cipher_tfm)) { | |
297 | pr_warn("could not load aes cipher driver\n"); | |
298 | return PTR_ERR(cipher_tfm); | |
299 | } | |
300 | ctx->u.aes.tfm_cipher = cipher_tfm; | |
301 | ||
302 | return 0; | |
303 | } | |
304 | ||
305 | static void ccp_aes_cmac_cra_exit(struct crypto_tfm *tfm) | |
306 | { | |
307 | struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); | |
308 | ||
309 | if (ctx->u.aes.tfm_cipher) | |
310 | crypto_free_cipher(ctx->u.aes.tfm_cipher); | |
311 | ctx->u.aes.tfm_cipher = NULL; | |
312 | } | |
313 | ||
314 | int ccp_register_aes_cmac_algs(struct list_head *head) | |
315 | { | |
316 | struct ccp_crypto_ahash_alg *ccp_alg; | |
317 | struct ahash_alg *alg; | |
318 | struct hash_alg_common *halg; | |
319 | struct crypto_alg *base; | |
320 | int ret; | |
321 | ||
322 | ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); | |
323 | if (!ccp_alg) | |
324 | return -ENOMEM; | |
325 | ||
326 | INIT_LIST_HEAD(&ccp_alg->entry); | |
327 | ccp_alg->mode = CCP_AES_MODE_CMAC; | |
328 | ||
329 | alg = &ccp_alg->alg; | |
330 | alg->init = ccp_aes_cmac_init; | |
331 | alg->update = ccp_aes_cmac_update; | |
332 | alg->final = ccp_aes_cmac_final; | |
333 | alg->finup = ccp_aes_cmac_finup; | |
334 | alg->digest = ccp_aes_cmac_digest; | |
335 | alg->setkey = ccp_aes_cmac_setkey; | |
336 | ||
337 | halg = &alg->halg; | |
338 | halg->digestsize = AES_BLOCK_SIZE; | |
339 | ||
340 | base = &halg->base; | |
341 | snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)"); | |
342 | snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "cmac-aes-ccp"); | |
343 | base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | | |
344 | CRYPTO_ALG_KERN_DRIVER_ONLY | | |
345 | CRYPTO_ALG_NEED_FALLBACK; | |
346 | base->cra_blocksize = AES_BLOCK_SIZE; | |
347 | base->cra_ctxsize = sizeof(struct ccp_ctx); | |
348 | base->cra_priority = CCP_CRA_PRIORITY; | |
349 | base->cra_type = &crypto_ahash_type; | |
350 | base->cra_init = ccp_aes_cmac_cra_init; | |
351 | base->cra_exit = ccp_aes_cmac_cra_exit; | |
352 | base->cra_module = THIS_MODULE; | |
353 | ||
354 | ret = crypto_register_ahash(alg); | |
355 | if (ret) { | |
356 | pr_err("%s ahash algorithm registration error (%d)\n", | |
357 | base->cra_name, ret); | |
358 | kfree(ccp_alg); | |
359 | return ret; | |
360 | } | |
361 | ||
362 | list_add(&ccp_alg->entry, head); | |
363 | ||
364 | return 0; | |
365 | } |