crypto: ccp - Updates for checkpatch warnings/errors
[deliverable/linux.git] / drivers / crypto / ccp / ccp-crypto-aes-cmac.c
1 /*
2 * AMD Cryptographic Coprocessor (CCP) AES CMAC crypto API support
3 *
4 * Copyright (C) 2013 Advanced Micro Devices, Inc.
5 *
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 #include <linux/module.h>
14 #include <linux/sched.h>
15 #include <linux/delay.h>
16 #include <linux/scatterlist.h>
17 #include <linux/crypto.h>
18 #include <crypto/algapi.h>
19 #include <crypto/aes.h>
20 #include <crypto/hash.h>
21 #include <crypto/internal/hash.h>
22 #include <crypto/scatterwalk.h>
23
24 #include "ccp-crypto.h"
25
26 static int ccp_aes_cmac_complete(struct crypto_async_request *async_req,
27 int ret)
28 {
29 struct ahash_request *req = ahash_request_cast(async_req);
30 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
31 struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
32 unsigned int digest_size = crypto_ahash_digestsize(tfm);
33
34 if (ret)
35 goto e_free;
36
37 if (rctx->hash_rem) {
38 /* Save remaining data to buffer */
39 unsigned int offset = rctx->nbytes - rctx->hash_rem;
40
41 scatterwalk_map_and_copy(rctx->buf, rctx->src,
42 offset, rctx->hash_rem, 0);
43 rctx->buf_count = rctx->hash_rem;
44 } else {
45 rctx->buf_count = 0;
46 }
47
48 /* Update result area if supplied */
49 if (req->result)
50 memcpy(req->result, rctx->iv, digest_size);
51
52 e_free:
53 sg_free_table(&rctx->data_sg);
54
55 return ret;
56 }
57
58 static int ccp_do_cmac_update(struct ahash_request *req, unsigned int nbytes,
59 unsigned int final)
60 {
61 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
62 struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
63 struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
64 struct scatterlist *sg, *cmac_key_sg = NULL;
65 unsigned int block_size =
66 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
67 unsigned int need_pad, sg_count;
68 gfp_t gfp;
69 u64 len;
70 int ret;
71
72 if (!ctx->u.aes.key_len)
73 return -EINVAL;
74
75 if (nbytes)
76 rctx->null_msg = 0;
77
78 len = (u64)rctx->buf_count + (u64)nbytes;
79
80 if (!final && (len <= block_size)) {
81 scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src,
82 0, nbytes, 0);
83 rctx->buf_count += nbytes;
84
85 return 0;
86 }
87
88 rctx->src = req->src;
89 rctx->nbytes = nbytes;
90
91 rctx->final = final;
92 rctx->hash_rem = final ? 0 : len & (block_size - 1);
93 rctx->hash_cnt = len - rctx->hash_rem;
94 if (!final && !rctx->hash_rem) {
95 /* CCP can't do zero length final, so keep some data around */
96 rctx->hash_cnt -= block_size;
97 rctx->hash_rem = block_size;
98 }
99
100 if (final && (rctx->null_msg || (len & (block_size - 1))))
101 need_pad = 1;
102 else
103 need_pad = 0;
104
105 sg_init_one(&rctx->iv_sg, rctx->iv, sizeof(rctx->iv));
106
107 /* Build the data scatterlist table - allocate enough entries for all
108 * possible data pieces (buffer, input data, padding)
109 */
110 sg_count = (nbytes) ? sg_nents(req->src) + 2 : 2;
111 gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
112 GFP_KERNEL : GFP_ATOMIC;
113 ret = sg_alloc_table(&rctx->data_sg, sg_count, gfp);
114 if (ret)
115 return ret;
116
117 sg = NULL;
118 if (rctx->buf_count) {
119 sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count);
120 sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg);
121 }
122
123 if (nbytes)
124 sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src);
125
126 if (need_pad) {
127 int pad_length = block_size - (len & (block_size - 1));
128
129 rctx->hash_cnt += pad_length;
130
131 memset(rctx->pad, 0, sizeof(rctx->pad));
132 rctx->pad[0] = 0x80;
133 sg_init_one(&rctx->pad_sg, rctx->pad, pad_length);
134 sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->pad_sg);
135 }
136 if (sg) {
137 sg_mark_end(sg);
138 sg = rctx->data_sg.sgl;
139 }
140
141 /* Initialize the K1/K2 scatterlist */
142 if (final)
143 cmac_key_sg = (need_pad) ? &ctx->u.aes.k2_sg
144 : &ctx->u.aes.k1_sg;
145
146 memset(&rctx->cmd, 0, sizeof(rctx->cmd));
147 INIT_LIST_HEAD(&rctx->cmd.entry);
148 rctx->cmd.engine = CCP_ENGINE_AES;
149 rctx->cmd.u.aes.type = ctx->u.aes.type;
150 rctx->cmd.u.aes.mode = ctx->u.aes.mode;
151 rctx->cmd.u.aes.action = CCP_AES_ACTION_ENCRYPT;
152 rctx->cmd.u.aes.key = &ctx->u.aes.key_sg;
153 rctx->cmd.u.aes.key_len = ctx->u.aes.key_len;
154 rctx->cmd.u.aes.iv = &rctx->iv_sg;
155 rctx->cmd.u.aes.iv_len = AES_BLOCK_SIZE;
156 rctx->cmd.u.aes.src = sg;
157 rctx->cmd.u.aes.src_len = rctx->hash_cnt;
158 rctx->cmd.u.aes.dst = NULL;
159 rctx->cmd.u.aes.cmac_key = cmac_key_sg;
160 rctx->cmd.u.aes.cmac_key_len = ctx->u.aes.kn_len;
161 rctx->cmd.u.aes.cmac_final = final;
162
163 ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
164
165 return ret;
166 }
167
168 static int ccp_aes_cmac_init(struct ahash_request *req)
169 {
170 struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
171
172 memset(rctx, 0, sizeof(*rctx));
173
174 rctx->null_msg = 1;
175
176 return 0;
177 }
178
179 static int ccp_aes_cmac_update(struct ahash_request *req)
180 {
181 return ccp_do_cmac_update(req, req->nbytes, 0);
182 }
183
184 static int ccp_aes_cmac_final(struct ahash_request *req)
185 {
186 return ccp_do_cmac_update(req, 0, 1);
187 }
188
189 static int ccp_aes_cmac_finup(struct ahash_request *req)
190 {
191 return ccp_do_cmac_update(req, req->nbytes, 1);
192 }
193
194 static int ccp_aes_cmac_digest(struct ahash_request *req)
195 {
196 int ret;
197
198 ret = ccp_aes_cmac_init(req);
199 if (ret)
200 return ret;
201
202 return ccp_aes_cmac_finup(req);
203 }
204
205 static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
206 unsigned int key_len)
207 {
208 struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
209 struct ccp_crypto_ahash_alg *alg =
210 ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm));
211 u64 k0_hi, k0_lo, k1_hi, k1_lo, k2_hi, k2_lo;
212 u64 rb_hi = 0x00, rb_lo = 0x87;
213 __be64 *gk;
214 int ret;
215
216 switch (key_len) {
217 case AES_KEYSIZE_128:
218 ctx->u.aes.type = CCP_AES_TYPE_128;
219 break;
220 case AES_KEYSIZE_192:
221 ctx->u.aes.type = CCP_AES_TYPE_192;
222 break;
223 case AES_KEYSIZE_256:
224 ctx->u.aes.type = CCP_AES_TYPE_256;
225 break;
226 default:
227 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
228 return -EINVAL;
229 }
230 ctx->u.aes.mode = alg->mode;
231
232 /* Set to zero until complete */
233 ctx->u.aes.key_len = 0;
234
235 /* Set the key for the AES cipher used to generate the keys */
236 ret = crypto_cipher_setkey(ctx->u.aes.tfm_cipher, key, key_len);
237 if (ret)
238 return ret;
239
240 /* Encrypt a block of zeroes - use key area in context */
241 memset(ctx->u.aes.key, 0, sizeof(ctx->u.aes.key));
242 crypto_cipher_encrypt_one(ctx->u.aes.tfm_cipher, ctx->u.aes.key,
243 ctx->u.aes.key);
244
245 /* Generate K1 and K2 */
246 k0_hi = be64_to_cpu(*((__be64 *)ctx->u.aes.key));
247 k0_lo = be64_to_cpu(*((__be64 *)ctx->u.aes.key + 1));
248
249 k1_hi = (k0_hi << 1) | (k0_lo >> 63);
250 k1_lo = k0_lo << 1;
251 if (ctx->u.aes.key[0] & 0x80) {
252 k1_hi ^= rb_hi;
253 k1_lo ^= rb_lo;
254 }
255 gk = (__be64 *)ctx->u.aes.k1;
256 *gk = cpu_to_be64(k1_hi);
257 gk++;
258 *gk = cpu_to_be64(k1_lo);
259
260 k2_hi = (k1_hi << 1) | (k1_lo >> 63);
261 k2_lo = k1_lo << 1;
262 if (ctx->u.aes.k1[0] & 0x80) {
263 k2_hi ^= rb_hi;
264 k2_lo ^= rb_lo;
265 }
266 gk = (__be64 *)ctx->u.aes.k2;
267 *gk = cpu_to_be64(k2_hi);
268 gk++;
269 *gk = cpu_to_be64(k2_lo);
270
271 ctx->u.aes.kn_len = sizeof(ctx->u.aes.k1);
272 sg_init_one(&ctx->u.aes.k1_sg, ctx->u.aes.k1, sizeof(ctx->u.aes.k1));
273 sg_init_one(&ctx->u.aes.k2_sg, ctx->u.aes.k2, sizeof(ctx->u.aes.k2));
274
275 /* Save the supplied key */
276 memset(ctx->u.aes.key, 0, sizeof(ctx->u.aes.key));
277 memcpy(ctx->u.aes.key, key, key_len);
278 ctx->u.aes.key_len = key_len;
279 sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
280
281 return ret;
282 }
283
284 static int ccp_aes_cmac_cra_init(struct crypto_tfm *tfm)
285 {
286 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
287 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
288 struct crypto_cipher *cipher_tfm;
289
290 ctx->complete = ccp_aes_cmac_complete;
291 ctx->u.aes.key_len = 0;
292
293 crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_aes_cmac_req_ctx));
294
295 cipher_tfm = crypto_alloc_cipher("aes", 0,
296 CRYPTO_ALG_ASYNC |
297 CRYPTO_ALG_NEED_FALLBACK);
298 if (IS_ERR(cipher_tfm)) {
299 pr_warn("could not load aes cipher driver\n");
300 return PTR_ERR(cipher_tfm);
301 }
302 ctx->u.aes.tfm_cipher = cipher_tfm;
303
304 return 0;
305 }
306
307 static void ccp_aes_cmac_cra_exit(struct crypto_tfm *tfm)
308 {
309 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
310
311 if (ctx->u.aes.tfm_cipher)
312 crypto_free_cipher(ctx->u.aes.tfm_cipher);
313 ctx->u.aes.tfm_cipher = NULL;
314 }
315
316 int ccp_register_aes_cmac_algs(struct list_head *head)
317 {
318 struct ccp_crypto_ahash_alg *ccp_alg;
319 struct ahash_alg *alg;
320 struct hash_alg_common *halg;
321 struct crypto_alg *base;
322 int ret;
323
324 ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
325 if (!ccp_alg)
326 return -ENOMEM;
327
328 INIT_LIST_HEAD(&ccp_alg->entry);
329 ccp_alg->mode = CCP_AES_MODE_CMAC;
330
331 alg = &ccp_alg->alg;
332 alg->init = ccp_aes_cmac_init;
333 alg->update = ccp_aes_cmac_update;
334 alg->final = ccp_aes_cmac_final;
335 alg->finup = ccp_aes_cmac_finup;
336 alg->digest = ccp_aes_cmac_digest;
337 alg->setkey = ccp_aes_cmac_setkey;
338
339 halg = &alg->halg;
340 halg->digestsize = AES_BLOCK_SIZE;
341
342 base = &halg->base;
343 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)");
344 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "cmac-aes-ccp");
345 base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC |
346 CRYPTO_ALG_KERN_DRIVER_ONLY |
347 CRYPTO_ALG_NEED_FALLBACK;
348 base->cra_blocksize = AES_BLOCK_SIZE;
349 base->cra_ctxsize = sizeof(struct ccp_ctx);
350 base->cra_priority = CCP_CRA_PRIORITY;
351 base->cra_type = &crypto_ahash_type;
352 base->cra_init = ccp_aes_cmac_cra_init;
353 base->cra_exit = ccp_aes_cmac_cra_exit;
354 base->cra_module = THIS_MODULE;
355
356 ret = crypto_register_ahash(alg);
357 if (ret) {
358 pr_err("%s ahash algorithm registration error (%d)\n",
359 base->cra_name, ret);
360 kfree(ccp_alg);
361 return ret;
362 }
363
364 list_add(&ccp_alg->entry, head);
365
366 return 0;
367 }
This page took 0.040975 seconds and 5 git commands to generate.