crypto: ppc/sha256 - kernel config
[deliverable/linux.git] / drivers / crypto / ccp / ccp-crypto-main.c
CommitLineData
d3123599
TL
1/*
2 * AMD Cryptographic Coprocessor (CCP) crypto API support
3 *
4 * Copyright (C) 2013 Advanced Micro Devices, Inc.
5 *
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
d81ed653 14#include <linux/moduleparam.h>
d3123599
TL
15#include <linux/kernel.h>
16#include <linux/list.h>
17#include <linux/ccp.h>
18#include <linux/scatterlist.h>
19#include <crypto/internal/hash.h>
20
21#include "ccp-crypto.h"
22
23MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
24MODULE_LICENSE("GPL");
25MODULE_VERSION("1.0.0");
26MODULE_DESCRIPTION("AMD Cryptographic Coprocessor crypto API support");
27
d81ed653
TL
28static unsigned int aes_disable;
29module_param(aes_disable, uint, 0444);
30MODULE_PARM_DESC(aes_disable, "Disable use of AES - any non-zero value");
31
32static unsigned int sha_disable;
33module_param(sha_disable, uint, 0444);
34MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any non-zero value");
35
d3123599
TL
36
37/* List heads for the supported algorithms */
38static LIST_HEAD(hash_algs);
39static LIST_HEAD(cipher_algs);
40
bc385447
TL
41/* For any tfm, requests for that tfm must be returned on the order
42 * received. With multiple queues available, the CCP can process more
43 * than one cmd at a time. Therefore we must maintain a cmd list to insure
44 * the proper ordering of requests on a given tfm.
d3123599 45 */
bc385447 46struct ccp_crypto_queue {
d3123599
TL
47 struct list_head cmds;
48 struct list_head *backlog;
49 unsigned int cmd_count;
50};
bc385447 51#define CCP_CRYPTO_MAX_QLEN 100
d3123599 52
bc385447
TL
53static struct ccp_crypto_queue req_queue;
54static spinlock_t req_queue_lock;
d3123599
TL
55
56struct ccp_crypto_cmd {
57 struct list_head entry;
58
59 struct ccp_cmd *cmd;
60
61 /* Save the crypto_tfm and crypto_async_request addresses
62 * separately to avoid any reference to a possibly invalid
63 * crypto_async_request structure after invoking the request
64 * callback
65 */
66 struct crypto_async_request *req;
67 struct crypto_tfm *tfm;
68
69 /* Used for held command processing to determine state */
70 int ret;
d3123599
TL
71};
72
73struct ccp_crypto_cpu {
74 struct work_struct work;
75 struct completion completion;
76 struct ccp_crypto_cmd *crypto_cmd;
77 int err;
78};
79
80
81static inline bool ccp_crypto_success(int err)
82{
83 if (err && (err != -EINPROGRESS) && (err != -EBUSY))
84 return false;
85
86 return true;
87}
88
d3123599
TL
89static struct ccp_crypto_cmd *ccp_crypto_cmd_complete(
90 struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog)
91{
d3123599 92 struct ccp_crypto_cmd *held = NULL, *tmp;
bc385447 93 unsigned long flags;
d3123599
TL
94
95 *backlog = NULL;
96
bc385447 97 spin_lock_irqsave(&req_queue_lock, flags);
d3123599
TL
98
99 /* Held cmds will be after the current cmd in the queue so start
100 * searching for a cmd with a matching tfm for submission.
101 */
102 tmp = crypto_cmd;
bc385447 103 list_for_each_entry_continue(tmp, &req_queue.cmds, entry) {
d3123599
TL
104 if (crypto_cmd->tfm != tmp->tfm)
105 continue;
106 held = tmp;
107 break;
108 }
109
110 /* Process the backlog:
111 * Because cmds can be executed from any point in the cmd list
112 * special precautions have to be taken when handling the backlog.
113 */
bc385447 114 if (req_queue.backlog != &req_queue.cmds) {
d3123599 115 /* Skip over this cmd if it is the next backlog cmd */
bc385447
TL
116 if (req_queue.backlog == &crypto_cmd->entry)
117 req_queue.backlog = crypto_cmd->entry.next;
d3123599 118
bc385447 119 *backlog = container_of(req_queue.backlog,
d3123599 120 struct ccp_crypto_cmd, entry);
bc385447 121 req_queue.backlog = req_queue.backlog->next;
d3123599
TL
122
123 /* Skip over this cmd if it is now the next backlog cmd */
bc385447
TL
124 if (req_queue.backlog == &crypto_cmd->entry)
125 req_queue.backlog = crypto_cmd->entry.next;
d3123599
TL
126 }
127
128 /* Remove the cmd entry from the list of cmds */
bc385447 129 req_queue.cmd_count--;
d3123599
TL
130 list_del(&crypto_cmd->entry);
131
bc385447
TL
132 spin_unlock_irqrestore(&req_queue_lock, flags);
133
d3123599
TL
134 return held;
135}
136
bc385447 137static void ccp_crypto_complete(void *data, int err)
d3123599 138{
bc385447 139 struct ccp_crypto_cmd *crypto_cmd = data;
d3123599
TL
140 struct ccp_crypto_cmd *held, *next, *backlog;
141 struct crypto_async_request *req = crypto_cmd->req;
142 struct ccp_ctx *ctx = crypto_tfm_ctx(req->tfm);
bc385447 143 int ret;
d3123599 144
bc385447 145 if (err == -EINPROGRESS) {
d3123599
TL
146 /* Only propogate the -EINPROGRESS if necessary */
147 if (crypto_cmd->ret == -EBUSY) {
148 crypto_cmd->ret = -EINPROGRESS;
149 req->complete(req, -EINPROGRESS);
150 }
151
bc385447 152 return;
d3123599
TL
153 }
154
155 /* Operation has completed - update the queue before invoking
156 * the completion callbacks and retrieve the next cmd (cmd with
157 * a matching tfm) that can be submitted to the CCP.
158 */
159 held = ccp_crypto_cmd_complete(crypto_cmd, &backlog);
160 if (backlog) {
161 backlog->ret = -EINPROGRESS;
162 backlog->req->complete(backlog->req, -EINPROGRESS);
163 }
164
165 /* Transition the state from -EBUSY to -EINPROGRESS first */
166 if (crypto_cmd->ret == -EBUSY)
167 req->complete(req, -EINPROGRESS);
168
169 /* Completion callbacks */
bc385447 170 ret = err;
d3123599
TL
171 if (ctx->complete)
172 ret = ctx->complete(req, ret);
173 req->complete(req, ret);
174
175 /* Submit the next cmd */
176 while (held) {
0611451b
TL
177 /* Since we have already queued the cmd, we must indicate that
178 * we can backlog so as not to "lose" this request.
179 */
180 held->cmd->flags |= CCP_CMD_MAY_BACKLOG;
d3123599
TL
181 ret = ccp_enqueue_cmd(held->cmd);
182 if (ccp_crypto_success(ret))
183 break;
184
185 /* Error occurred, report it and get the next entry */
950b10ba
TL
186 ctx = crypto_tfm_ctx(held->req->tfm);
187 if (ctx->complete)
188 ret = ctx->complete(held->req, ret);
d3123599
TL
189 held->req->complete(held->req, ret);
190
191 next = ccp_crypto_cmd_complete(held, &backlog);
192 if (backlog) {
193 backlog->ret = -EINPROGRESS;
194 backlog->req->complete(backlog->req, -EINPROGRESS);
195 }
196
197 kfree(held);
198 held = next;
199 }
200
201 kfree(crypto_cmd);
d3123599
TL
202}
203
204static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
205{
d3123599 206 struct ccp_crypto_cmd *active = NULL, *tmp;
bc385447 207 unsigned long flags;
c65a52f8 208 bool free_cmd = true;
bc385447 209 int ret;
d3123599 210
bc385447 211 spin_lock_irqsave(&req_queue_lock, flags);
d3123599
TL
212
213 /* Check if the cmd can/should be queued */
bc385447 214 if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
d3123599
TL
215 ret = -EBUSY;
216 if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
bc385447 217 goto e_lock;
d3123599
TL
218 }
219
220 /* Look for an entry with the same tfm. If there is a cmd
bc385447
TL
221 * with the same tfm in the list then the current cmd cannot
222 * be submitted to the CCP yet.
d3123599 223 */
bc385447 224 list_for_each_entry(tmp, &req_queue.cmds, entry) {
d3123599
TL
225 if (crypto_cmd->tfm != tmp->tfm)
226 continue;
227 active = tmp;
228 break;
229 }
230
231 ret = -EINPROGRESS;
232 if (!active) {
233 ret = ccp_enqueue_cmd(crypto_cmd->cmd);
234 if (!ccp_crypto_success(ret))
c65a52f8
TL
235 goto e_lock; /* Error, don't queue it */
236 if ((ret == -EBUSY) &&
237 !(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
238 goto e_lock; /* Not backlogging, don't queue it */
d3123599
TL
239 }
240
bc385447 241 if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
d3123599 242 ret = -EBUSY;
bc385447
TL
243 if (req_queue.backlog == &req_queue.cmds)
244 req_queue.backlog = &crypto_cmd->entry;
d3123599
TL
245 }
246 crypto_cmd->ret = ret;
247
bc385447
TL
248 req_queue.cmd_count++;
249 list_add_tail(&crypto_cmd->entry, &req_queue.cmds);
d3123599 250
c65a52f8
TL
251 free_cmd = false;
252
bc385447
TL
253e_lock:
254 spin_unlock_irqrestore(&req_queue_lock, flags);
d3123599 255
c65a52f8
TL
256 if (free_cmd)
257 kfree(crypto_cmd);
258
d3123599
TL
259 return ret;
260}
261
262/**
263 * ccp_crypto_enqueue_request - queue an crypto async request for processing
264 * by the CCP
265 *
266 * @req: crypto_async_request struct to be processed
267 * @cmd: ccp_cmd struct to be sent to the CCP
268 */
269int ccp_crypto_enqueue_request(struct crypto_async_request *req,
270 struct ccp_cmd *cmd)
271{
272 struct ccp_crypto_cmd *crypto_cmd;
273 gfp_t gfp;
d3123599
TL
274
275 gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
276
277 crypto_cmd = kzalloc(sizeof(*crypto_cmd), gfp);
278 if (!crypto_cmd)
279 return -ENOMEM;
280
281 /* The tfm pointer must be saved and not referenced from the
282 * crypto_async_request (req) pointer because it is used after
283 * completion callback for the request and the req pointer
284 * might not be valid anymore.
285 */
286 crypto_cmd->cmd = cmd;
287 crypto_cmd->req = req;
288 crypto_cmd->tfm = req->tfm;
289
290 cmd->callback = ccp_crypto_complete;
291 cmd->data = crypto_cmd;
292
293 if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
294 cmd->flags |= CCP_CMD_MAY_BACKLOG;
295 else
296 cmd->flags &= ~CCP_CMD_MAY_BACKLOG;
297
c65a52f8 298 return ccp_crypto_enqueue_cmd(crypto_cmd);
d3123599
TL
299}
300
301struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
302 struct scatterlist *sg_add)
303{
304 struct scatterlist *sg, *sg_last = NULL;
305
306 for (sg = table->sgl; sg; sg = sg_next(sg))
307 if (!sg_page(sg))
308 break;
309 BUG_ON(!sg);
310
311 for (; sg && sg_add; sg = sg_next(sg), sg_add = sg_next(sg_add)) {
312 sg_set_page(sg, sg_page(sg_add), sg_add->length,
313 sg_add->offset);
314 sg_last = sg;
315 }
316 BUG_ON(sg_add);
317
318 return sg_last;
319}
320
321static int ccp_register_algs(void)
322{
323 int ret;
324
d81ed653
TL
325 if (!aes_disable) {
326 ret = ccp_register_aes_algs(&cipher_algs);
327 if (ret)
328 return ret;
d3123599 329
d81ed653
TL
330 ret = ccp_register_aes_cmac_algs(&hash_algs);
331 if (ret)
332 return ret;
d3123599 333
d81ed653
TL
334 ret = ccp_register_aes_xts_algs(&cipher_algs);
335 if (ret)
336 return ret;
337 }
d3123599 338
d81ed653
TL
339 if (!sha_disable) {
340 ret = ccp_register_sha_algs(&hash_algs);
341 if (ret)
342 return ret;
343 }
d3123599
TL
344
345 return 0;
346}
347
348static void ccp_unregister_algs(void)
349{
350 struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp;
351 struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp;
352
353 list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) {
354 crypto_unregister_ahash(&ahash_alg->alg);
355 list_del(&ahash_alg->entry);
356 kfree(ahash_alg);
357 }
358
359 list_for_each_entry_safe(ablk_alg, ablk_tmp, &cipher_algs, entry) {
360 crypto_unregister_alg(&ablk_alg->alg);
361 list_del(&ablk_alg->entry);
362 kfree(ablk_alg);
363 }
364}
365
d3123599
TL
366static int ccp_crypto_init(void)
367{
368 int ret;
369
c9f21cb6
TL
370 ret = ccp_present();
371 if (ret)
372 return ret;
373
bc385447
TL
374 spin_lock_init(&req_queue_lock);
375 INIT_LIST_HEAD(&req_queue.cmds);
376 req_queue.backlog = &req_queue.cmds;
377 req_queue.cmd_count = 0;
d3123599
TL
378
379 ret = ccp_register_algs();
bc385447 380 if (ret)
d3123599 381 ccp_unregister_algs();
d3123599
TL
382
383 return ret;
384}
385
386static void ccp_crypto_exit(void)
387{
388 ccp_unregister_algs();
d3123599
TL
389}
390
391module_init(ccp_crypto_init);
392module_exit(ccp_crypto_exit);
This page took 0.153454 seconds and 5 git commands to generate.