crypto: ppc/sha256 - kernel config
[deliverable/linux.git] / drivers / crypto / ccp / ccp-crypto-sha.c
CommitLineData
0ab0a1d5
TL
1/*
2 * AMD Cryptographic Coprocessor (CCP) SHA crypto API support
3 *
4 * Copyright (C) 2013 Advanced Micro Devices, Inc.
5 *
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/sched.h>
15#include <linux/delay.h>
16#include <linux/scatterlist.h>
17#include <linux/crypto.h>
18#include <crypto/algapi.h>
19#include <crypto/hash.h>
20#include <crypto/internal/hash.h>
21#include <crypto/sha.h>
22#include <crypto/scatterwalk.h>
23
24#include "ccp-crypto.h"
25
26
0ab0a1d5
TL
27static int ccp_sha_complete(struct crypto_async_request *async_req, int ret)
28{
29 struct ahash_request *req = ahash_request_cast(async_req);
30 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
0ab0a1d5
TL
31 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
32 unsigned int digest_size = crypto_ahash_digestsize(tfm);
33
34 if (ret)
35 goto e_free;
36
37 if (rctx->hash_rem) {
38 /* Save remaining data to buffer */
81a59f00
TL
39 unsigned int offset = rctx->nbytes - rctx->hash_rem;
40 scatterwalk_map_and_copy(rctx->buf, rctx->src,
41 offset, rctx->hash_rem, 0);
0ab0a1d5
TL
42 rctx->buf_count = rctx->hash_rem;
43 } else
44 rctx->buf_count = 0;
45
393897c5
TL
46 /* Update result area if supplied */
47 if (req->result)
48 memcpy(req->result, rctx->ctx, digest_size);
0ab0a1d5 49
0ab0a1d5
TL
50e_free:
51 sg_free_table(&rctx->data_sg);
52
53 return ret;
54}
55
56static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes,
57 unsigned int final)
58{
59 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
c11baa02 60 struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
0ab0a1d5
TL
61 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
62 struct scatterlist *sg;
63 unsigned int block_size =
64 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
81a59f00 65 unsigned int sg_count;
5258de8a 66 gfp_t gfp;
81a59f00 67 u64 len;
0ab0a1d5
TL
68 int ret;
69
81a59f00
TL
70 len = (u64)rctx->buf_count + (u64)nbytes;
71
72 if (!final && (len <= block_size)) {
0ab0a1d5
TL
73 scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src,
74 0, nbytes, 0);
75 rctx->buf_count += nbytes;
76
77 return 0;
78 }
79
81a59f00
TL
80 rctx->src = req->src;
81 rctx->nbytes = nbytes;
0ab0a1d5
TL
82
83 rctx->final = final;
81a59f00
TL
84 rctx->hash_rem = final ? 0 : len & (block_size - 1);
85 rctx->hash_cnt = len - rctx->hash_rem;
86 if (!final && !rctx->hash_rem) {
0ab0a1d5
TL
87 /* CCP can't do zero length final, so keep some data around */
88 rctx->hash_cnt -= block_size;
89 rctx->hash_rem = block_size;
90 }
91
92 /* Initialize the context scatterlist */
93 sg_init_one(&rctx->ctx_sg, rctx->ctx, sizeof(rctx->ctx));
94
0ab0a1d5 95 sg = NULL;
77dc4a51
TL
96 if (rctx->buf_count && nbytes) {
97 /* Build the data scatterlist table - allocate enough entries
98 * for both data pieces (buffer and input data)
99 */
100 gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
101 GFP_KERNEL : GFP_ATOMIC;
102 sg_count = sg_nents(req->src) + 1;
103 ret = sg_alloc_table(&rctx->data_sg, sg_count, gfp);
104 if (ret)
105 return ret;
0ab0a1d5 106
0ab0a1d5
TL
107 sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count);
108 sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg);
0ab0a1d5 109 sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src);
0ab0a1d5
TL
110 sg_mark_end(sg);
111
77dc4a51
TL
112 sg = rctx->data_sg.sgl;
113 } else if (rctx->buf_count) {
114 sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count);
115
116 sg = &rctx->buf_sg;
117 } else if (nbytes) {
118 sg = req->src;
119 }
120
0ab0a1d5
TL
121 rctx->msg_bits += (rctx->hash_cnt << 3); /* Total in bits */
122
123 memset(&rctx->cmd, 0, sizeof(rctx->cmd));
124 INIT_LIST_HEAD(&rctx->cmd.entry);
125 rctx->cmd.engine = CCP_ENGINE_SHA;
126 rctx->cmd.u.sha.type = rctx->type;
127 rctx->cmd.u.sha.ctx = &rctx->ctx_sg;
128 rctx->cmd.u.sha.ctx_len = sizeof(rctx->ctx);
77dc4a51 129 rctx->cmd.u.sha.src = sg;
0ab0a1d5 130 rctx->cmd.u.sha.src_len = rctx->hash_cnt;
c11baa02
TL
131 rctx->cmd.u.sha.opad = ctx->u.sha.key_len ?
132 &ctx->u.sha.opad_sg : NULL;
133 rctx->cmd.u.sha.opad_len = ctx->u.sha.key_len ?
134 ctx->u.sha.opad_count : 0;
135 rctx->cmd.u.sha.first = rctx->first;
0ab0a1d5
TL
136 rctx->cmd.u.sha.final = rctx->final;
137 rctx->cmd.u.sha.msg_bits = rctx->msg_bits;
138
139 rctx->first = 0;
140
141 ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
142
143 return ret;
144}
145
146static int ccp_sha_init(struct ahash_request *req)
147{
148 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
77dc4a51 149 struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
0ab0a1d5
TL
150 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
151 struct ccp_crypto_ahash_alg *alg =
152 ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm));
77dc4a51
TL
153 unsigned int block_size =
154 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
0ab0a1d5
TL
155
156 memset(rctx, 0, sizeof(*rctx));
157
0ab0a1d5
TL
158 rctx->type = alg->type;
159 rctx->first = 1;
160
77dc4a51
TL
161 if (ctx->u.sha.key_len) {
162 /* Buffer the HMAC key for first update */
163 memcpy(rctx->buf, ctx->u.sha.ipad, block_size);
164 rctx->buf_count = block_size;
165 }
166
0ab0a1d5
TL
167 return 0;
168}
169
170static int ccp_sha_update(struct ahash_request *req)
171{
172 return ccp_do_sha_update(req, req->nbytes, 0);
173}
174
175static int ccp_sha_final(struct ahash_request *req)
176{
177 return ccp_do_sha_update(req, 0, 1);
178}
179
180static int ccp_sha_finup(struct ahash_request *req)
181{
182 return ccp_do_sha_update(req, req->nbytes, 1);
183}
184
185static int ccp_sha_digest(struct ahash_request *req)
186{
82d1585b 187 int ret;
0ab0a1d5 188
82d1585b
TL
189 ret = ccp_sha_init(req);
190 if (ret)
191 return ret;
192
193 return ccp_sha_finup(req);
0ab0a1d5
TL
194}
195
196static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
197 unsigned int key_len)
198{
199 struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
c11baa02 200 struct crypto_shash *shash = ctx->u.sha.hmac_tfm;
61ded524
JSM
201
202 SHASH_DESC_ON_STACK(sdesc, shash);
203
c11baa02
TL
204 unsigned int block_size = crypto_shash_blocksize(shash);
205 unsigned int digest_size = crypto_shash_digestsize(shash);
0ab0a1d5
TL
206 int i, ret;
207
208 /* Set to zero until complete */
209 ctx->u.sha.key_len = 0;
210
211 /* Clear key area to provide zero padding for keys smaller
212 * than the block size
213 */
214 memset(ctx->u.sha.key, 0, sizeof(ctx->u.sha.key));
215
216 if (key_len > block_size) {
217 /* Must hash the input key */
61ded524
JSM
218 sdesc->tfm = shash;
219 sdesc->flags = crypto_ahash_get_flags(tfm) &
c11baa02
TL
220 CRYPTO_TFM_REQ_MAY_SLEEP;
221
61ded524 222 ret = crypto_shash_digest(sdesc, key, key_len,
c11baa02 223 ctx->u.sha.key);
0ab0a1d5
TL
224 if (ret) {
225 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
226 return -EINVAL;
227 }
228
229 key_len = digest_size;
230 } else
231 memcpy(ctx->u.sha.key, key, key_len);
232
233 for (i = 0; i < block_size; i++) {
234 ctx->u.sha.ipad[i] = ctx->u.sha.key[i] ^ 0x36;
235 ctx->u.sha.opad[i] = ctx->u.sha.key[i] ^ 0x5c;
236 }
237
c11baa02
TL
238 sg_init_one(&ctx->u.sha.opad_sg, ctx->u.sha.opad, block_size);
239 ctx->u.sha.opad_count = block_size;
240
0ab0a1d5
TL
241 ctx->u.sha.key_len = key_len;
242
243 return 0;
244}
245
246static int ccp_sha_cra_init(struct crypto_tfm *tfm)
247{
248 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
249 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
250
251 ctx->complete = ccp_sha_complete;
252 ctx->u.sha.key_len = 0;
253
254 crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_sha_req_ctx));
255
256 return 0;
257}
258
259static void ccp_sha_cra_exit(struct crypto_tfm *tfm)
260{
261}
262
263static int ccp_hmac_sha_cra_init(struct crypto_tfm *tfm)
264{
265 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
266 struct ccp_crypto_ahash_alg *alg = ccp_crypto_ahash_alg(tfm);
c11baa02 267 struct crypto_shash *hmac_tfm;
0ab0a1d5 268
c11baa02 269 hmac_tfm = crypto_alloc_shash(alg->child_alg, 0, 0);
0ab0a1d5
TL
270 if (IS_ERR(hmac_tfm)) {
271 pr_warn("could not load driver %s need for HMAC support\n",
272 alg->child_alg);
273 return PTR_ERR(hmac_tfm);
274 }
275
276 ctx->u.sha.hmac_tfm = hmac_tfm;
277
278 return ccp_sha_cra_init(tfm);
279}
280
281static void ccp_hmac_sha_cra_exit(struct crypto_tfm *tfm)
282{
283 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
284
285 if (ctx->u.sha.hmac_tfm)
c11baa02 286 crypto_free_shash(ctx->u.sha.hmac_tfm);
0ab0a1d5
TL
287
288 ccp_sha_cra_exit(tfm);
289}
290
0ab0a1d5
TL
291struct ccp_sha_def {
292 const char *name;
293 const char *drv_name;
0ab0a1d5
TL
294 enum ccp_sha_type type;
295 u32 digest_size;
296 u32 block_size;
297};
298
299static struct ccp_sha_def sha_algs[] = {
300 {
301 .name = "sha1",
302 .drv_name = "sha1-ccp",
0ab0a1d5
TL
303 .type = CCP_SHA_TYPE_1,
304 .digest_size = SHA1_DIGEST_SIZE,
305 .block_size = SHA1_BLOCK_SIZE,
306 },
307 {
308 .name = "sha224",
309 .drv_name = "sha224-ccp",
0ab0a1d5
TL
310 .type = CCP_SHA_TYPE_224,
311 .digest_size = SHA224_DIGEST_SIZE,
312 .block_size = SHA224_BLOCK_SIZE,
313 },
314 {
315 .name = "sha256",
316 .drv_name = "sha256-ccp",
0ab0a1d5
TL
317 .type = CCP_SHA_TYPE_256,
318 .digest_size = SHA256_DIGEST_SIZE,
319 .block_size = SHA256_BLOCK_SIZE,
320 },
321};
322
323static int ccp_register_hmac_alg(struct list_head *head,
324 const struct ccp_sha_def *def,
325 const struct ccp_crypto_ahash_alg *base_alg)
326{
327 struct ccp_crypto_ahash_alg *ccp_alg;
328 struct ahash_alg *alg;
329 struct hash_alg_common *halg;
330 struct crypto_alg *base;
331 int ret;
332
333 ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
334 if (!ccp_alg)
335 return -ENOMEM;
336
337 /* Copy the base algorithm and only change what's necessary */
d1dd206c 338 *ccp_alg = *base_alg;
0ab0a1d5
TL
339 INIT_LIST_HEAD(&ccp_alg->entry);
340
341 strncpy(ccp_alg->child_alg, def->name, CRYPTO_MAX_ALG_NAME);
342
343 alg = &ccp_alg->alg;
344 alg->setkey = ccp_sha_setkey;
345
346 halg = &alg->halg;
347
348 base = &halg->base;
349 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", def->name);
350 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s",
351 def->drv_name);
352 base->cra_init = ccp_hmac_sha_cra_init;
353 base->cra_exit = ccp_hmac_sha_cra_exit;
354
355 ret = crypto_register_ahash(alg);
356 if (ret) {
357 pr_err("%s ahash algorithm registration error (%d)\n",
358 base->cra_name, ret);
359 kfree(ccp_alg);
360 return ret;
361 }
362
363 list_add(&ccp_alg->entry, head);
364
365 return ret;
366}
367
368static int ccp_register_sha_alg(struct list_head *head,
369 const struct ccp_sha_def *def)
370{
371 struct ccp_crypto_ahash_alg *ccp_alg;
372 struct ahash_alg *alg;
373 struct hash_alg_common *halg;
374 struct crypto_alg *base;
375 int ret;
376
377 ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
378 if (!ccp_alg)
379 return -ENOMEM;
380
381 INIT_LIST_HEAD(&ccp_alg->entry);
382
0ab0a1d5
TL
383 ccp_alg->type = def->type;
384
385 alg = &ccp_alg->alg;
386 alg->init = ccp_sha_init;
387 alg->update = ccp_sha_update;
388 alg->final = ccp_sha_final;
389 alg->finup = ccp_sha_finup;
390 alg->digest = ccp_sha_digest;
391
392 halg = &alg->halg;
393 halg->digestsize = def->digest_size;
394
395 base = &halg->base;
396 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
397 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
398 def->drv_name);
399 base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC |
400 CRYPTO_ALG_KERN_DRIVER_ONLY |
401 CRYPTO_ALG_NEED_FALLBACK;
402 base->cra_blocksize = def->block_size;
403 base->cra_ctxsize = sizeof(struct ccp_ctx);
404 base->cra_priority = CCP_CRA_PRIORITY;
405 base->cra_type = &crypto_ahash_type;
406 base->cra_init = ccp_sha_cra_init;
407 base->cra_exit = ccp_sha_cra_exit;
408 base->cra_module = THIS_MODULE;
409
410 ret = crypto_register_ahash(alg);
411 if (ret) {
412 pr_err("%s ahash algorithm registration error (%d)\n",
413 base->cra_name, ret);
414 kfree(ccp_alg);
415 return ret;
416 }
417
418 list_add(&ccp_alg->entry, head);
419
420 ret = ccp_register_hmac_alg(head, def, ccp_alg);
421
422 return ret;
423}
424
425int ccp_register_sha_algs(struct list_head *head)
426{
427 int i, ret;
428
429 for (i = 0; i < ARRAY_SIZE(sha_algs); i++) {
430 ret = ccp_register_sha_alg(head, &sha_algs[i]);
431 if (ret)
432 return ret;
433 }
434
435 return 0;
436}
This page took 0.098382 seconds and 5 git commands to generate.