n2_crypto: Log algorithm success/failure in kernel log.
[deliverable/linux.git] / drivers / crypto / n2_core.c
CommitLineData
0a625fd2
DM
1/* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
2 *
3 * Copyright (C) 2010 David S. Miller <davem@davemloft.net>
4 */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/of.h>
11#include <linux/of_device.h>
12#include <linux/cpumask.h>
13#include <linux/slab.h>
14#include <linux/interrupt.h>
15#include <linux/crypto.h>
16#include <crypto/md5.h>
17#include <crypto/sha.h>
18#include <crypto/aes.h>
19#include <crypto/des.h>
20#include <linux/mutex.h>
21#include <linux/delay.h>
22#include <linux/sched.h>
23
24#include <crypto/internal/hash.h>
25#include <crypto/scatterwalk.h>
26#include <crypto/algapi.h>
27
28#include <asm/hypervisor.h>
29#include <asm/mdesc.h>
30
31#include "n2_core.h"
32
33#define DRV_MODULE_NAME "n2_crypto"
34#define DRV_MODULE_VERSION "0.1"
35#define DRV_MODULE_RELDATE "April 29, 2010"
36
37static char version[] __devinitdata =
38 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
39
40MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
41MODULE_DESCRIPTION("Niagara2 Crypto driver");
42MODULE_LICENSE("GPL");
43MODULE_VERSION(DRV_MODULE_VERSION);
44
45#define N2_CRA_PRIORITY 300
46
47static DEFINE_MUTEX(spu_lock);
48
49struct spu_queue {
50 cpumask_t sharing;
51 unsigned long qhandle;
52
53 spinlock_t lock;
54 u8 q_type;
55 void *q;
56 unsigned long head;
57 unsigned long tail;
58 struct list_head jobs;
59
60 unsigned long devino;
61
62 char irq_name[32];
63 unsigned int irq;
64
65 struct list_head list;
66};
67
68static struct spu_queue **cpu_to_cwq;
69static struct spu_queue **cpu_to_mau;
70
71static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off)
72{
73 if (q->q_type == HV_NCS_QTYPE_MAU) {
74 off += MAU_ENTRY_SIZE;
75 if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES))
76 off = 0;
77 } else {
78 off += CWQ_ENTRY_SIZE;
79 if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES))
80 off = 0;
81 }
82 return off;
83}
84
85struct n2_request_common {
86 struct list_head entry;
87 unsigned int offset;
88};
89#define OFFSET_NOT_RUNNING (~(unsigned int)0)
90
91/* An async job request records the final tail value it used in
92 * n2_request_common->offset, test to see if that offset is in
93 * the range old_head, new_head, inclusive.
94 */
95static inline bool job_finished(struct spu_queue *q, unsigned int offset,
96 unsigned long old_head, unsigned long new_head)
97{
98 if (old_head <= new_head) {
99 if (offset > old_head && offset <= new_head)
100 return true;
101 } else {
102 if (offset > old_head || offset <= new_head)
103 return true;
104 }
105 return false;
106}
107
108/* When the HEAD marker is unequal to the actual HEAD, we get
109 * a virtual device INO interrupt. We should process the
110 * completed CWQ entries and adjust the HEAD marker to clear
111 * the IRQ.
112 */
113static irqreturn_t cwq_intr(int irq, void *dev_id)
114{
115 unsigned long off, new_head, hv_ret;
116 struct spu_queue *q = dev_id;
117
118 pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n",
119 smp_processor_id(), q->qhandle);
120
121 spin_lock(&q->lock);
122
123 hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head);
124
125 pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n",
126 smp_processor_id(), new_head, hv_ret);
127
128 for (off = q->head; off != new_head; off = spu_next_offset(q, off)) {
129 /* XXX ... XXX */
130 }
131
132 hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head);
133 if (hv_ret == HV_EOK)
134 q->head = new_head;
135
136 spin_unlock(&q->lock);
137
138 return IRQ_HANDLED;
139}
140
141static irqreturn_t mau_intr(int irq, void *dev_id)
142{
143 struct spu_queue *q = dev_id;
144 unsigned long head, hv_ret;
145
146 spin_lock(&q->lock);
147
148 pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n",
149 smp_processor_id(), q->qhandle);
150
151 hv_ret = sun4v_ncs_gethead(q->qhandle, &head);
152
153 pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n",
154 smp_processor_id(), head, hv_ret);
155
156 sun4v_ncs_sethead_marker(q->qhandle, head);
157
158 spin_unlock(&q->lock);
159
160 return IRQ_HANDLED;
161}
162
163static void *spu_queue_next(struct spu_queue *q, void *cur)
164{
165 return q->q + spu_next_offset(q, cur - q->q);
166}
167
168static int spu_queue_num_free(struct spu_queue *q)
169{
170 unsigned long head = q->head;
171 unsigned long tail = q->tail;
172 unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES);
173 unsigned long diff;
174
175 if (head > tail)
176 diff = head - tail;
177 else
178 diff = (end - tail) + head;
179
180 return (diff / CWQ_ENTRY_SIZE) - 1;
181}
182
183static void *spu_queue_alloc(struct spu_queue *q, int num_entries)
184{
185 int avail = spu_queue_num_free(q);
186
187 if (avail >= num_entries)
188 return q->q + q->tail;
189
190 return NULL;
191}
192
193static unsigned long spu_queue_submit(struct spu_queue *q, void *last)
194{
195 unsigned long hv_ret, new_tail;
196
197 new_tail = spu_next_offset(q, last - q->q);
198
199 hv_ret = sun4v_ncs_settail(q->qhandle, new_tail);
200 if (hv_ret == HV_EOK)
201 q->tail = new_tail;
202 return hv_ret;
203}
204
205static u64 control_word_base(unsigned int len, unsigned int hmac_key_len,
206 int enc_type, int auth_type,
207 unsigned int hash_len,
208 bool sfas, bool sob, bool eob, bool encrypt,
209 int opcode)
210{
211 u64 word = (len - 1) & CONTROL_LEN;
212
213 word |= ((u64) opcode << CONTROL_OPCODE_SHIFT);
214 word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT);
215 word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT);
216 if (sfas)
217 word |= CONTROL_STORE_FINAL_AUTH_STATE;
218 if (sob)
219 word |= CONTROL_START_OF_BLOCK;
220 if (eob)
221 word |= CONTROL_END_OF_BLOCK;
222 if (encrypt)
223 word |= CONTROL_ENCRYPT;
224 if (hmac_key_len)
225 word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT;
226 if (hash_len)
227 word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT;
228
229 return word;
230}
231
232#if 0
233static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
234{
235 if (this_len >= 64 ||
236 qp->head != qp->tail)
237 return true;
238 return false;
239}
240#endif
241
0a625fd2 242struct n2_hash_ctx {
c9aa55e5
DM
243 struct crypto_ahash *fallback_tfm;
244};
0a625fd2 245
c9aa55e5 246struct n2_hash_req_ctx {
0a625fd2
DM
247 union {
248 struct md5_state md5;
249 struct sha1_state sha1;
250 struct sha256_state sha256;
251 } u;
252
253 unsigned char hash_key[64];
254 unsigned char keyed_zero_hash[32];
c9aa55e5
DM
255
256 struct ahash_request fallback_req;
0a625fd2
DM
257};
258
259static int n2_hash_async_init(struct ahash_request *req)
260{
c9aa55e5 261 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
0a625fd2
DM
262 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
263 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
264
c9aa55e5
DM
265 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
266 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
0a625fd2 267
c9aa55e5 268 return crypto_ahash_init(&rctx->fallback_req);
0a625fd2
DM
269}
270
271static int n2_hash_async_update(struct ahash_request *req)
272{
c9aa55e5 273 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
0a625fd2
DM
274 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
275 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
276
c9aa55e5
DM
277 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
278 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
279 rctx->fallback_req.nbytes = req->nbytes;
280 rctx->fallback_req.src = req->src;
0a625fd2 281
c9aa55e5 282 return crypto_ahash_update(&rctx->fallback_req);
0a625fd2
DM
283}
284
285static int n2_hash_async_final(struct ahash_request *req)
286{
c9aa55e5 287 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
0a625fd2
DM
288 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
289 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
290
c9aa55e5
DM
291 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
292 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
293 rctx->fallback_req.result = req->result;
0a625fd2 294
c9aa55e5 295 return crypto_ahash_final(&rctx->fallback_req);
0a625fd2
DM
296}
297
298static int n2_hash_async_finup(struct ahash_request *req)
299{
c9aa55e5 300 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
0a625fd2
DM
301 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
302 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
303
c9aa55e5
DM
304 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
305 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
306 rctx->fallback_req.nbytes = req->nbytes;
307 rctx->fallback_req.src = req->src;
308 rctx->fallback_req.result = req->result;
0a625fd2 309
c9aa55e5 310 return crypto_ahash_finup(&rctx->fallback_req);
0a625fd2
DM
311}
312
313static int n2_hash_cra_init(struct crypto_tfm *tfm)
314{
315 const char *fallback_driver_name = tfm->__crt_alg->cra_name;
316 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
317 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
318 struct crypto_ahash *fallback_tfm;
319 int err;
320
321 fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
322 CRYPTO_ALG_NEED_FALLBACK);
323 if (IS_ERR(fallback_tfm)) {
324 pr_warning("Fallback driver '%s' could not be loaded!\n",
325 fallback_driver_name);
326 err = PTR_ERR(fallback_tfm);
327 goto out;
328 }
329
c9aa55e5
DM
330 crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
331 crypto_ahash_reqsize(fallback_tfm)));
332
333 ctx->fallback_tfm = fallback_tfm;
0a625fd2
DM
334 return 0;
335
336out:
337 return err;
338}
339
340static void n2_hash_cra_exit(struct crypto_tfm *tfm)
341{
342 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
343 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
344
c9aa55e5 345 crypto_free_ahash(ctx->fallback_tfm);
0a625fd2
DM
346}
347
348static unsigned long wait_for_tail(struct spu_queue *qp)
349{
350 unsigned long head, hv_ret;
351
352 do {
353 hv_ret = sun4v_ncs_gethead(qp->qhandle, &head);
354 if (hv_ret != HV_EOK) {
355 pr_err("Hypervisor error on gethead\n");
356 break;
357 }
358 if (head == qp->tail) {
359 qp->head = head;
360 break;
361 }
362 } while (1);
363 return hv_ret;
364}
365
366static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
367 struct cwq_initial_entry *ent)
368{
369 unsigned long hv_ret = spu_queue_submit(qp, ent);
370
371 if (hv_ret == HV_EOK)
372 hv_ret = wait_for_tail(qp);
373
374 return hv_ret;
375}
376
377static int n2_hash_async_digest(struct ahash_request *req,
378 unsigned int auth_type, unsigned int digest_size,
379 unsigned int result_size, void *hash_loc)
380{
381 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
0a625fd2
DM
382 struct cwq_initial_entry *ent;
383 struct crypto_hash_walk walk;
384 struct spu_queue *qp;
385 unsigned long flags;
386 int err = -ENODEV;
387 int nbytes, cpu;
388
389 /* The total effective length of the operation may not
390 * exceed 2^16.
391 */
392 if (unlikely(req->nbytes > (1 << 16))) {
c9aa55e5 393 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
65a23d67 394 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
c9aa55e5
DM
395
396 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
397 rctx->fallback_req.base.flags =
0a625fd2 398 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
c9aa55e5
DM
399 rctx->fallback_req.nbytes = req->nbytes;
400 rctx->fallback_req.src = req->src;
401 rctx->fallback_req.result = req->result;
0a625fd2 402
c9aa55e5 403 return crypto_ahash_digest(&rctx->fallback_req);
0a625fd2
DM
404 }
405
0a625fd2
DM
406 nbytes = crypto_hash_walk_first(req, &walk);
407
408 cpu = get_cpu();
409 qp = cpu_to_cwq[cpu];
410 if (!qp)
411 goto out;
412
413 spin_lock_irqsave(&qp->lock, flags);
414
415 /* XXX can do better, improve this later by doing a by-hand scatterlist
416 * XXX walk, etc.
417 */
418 ent = qp->q + qp->tail;
419
420 ent->control = control_word_base(nbytes, 0, 0,
421 auth_type, digest_size,
422 false, true, false, false,
423 OPCODE_INPLACE_BIT |
424 OPCODE_AUTH_MAC);
425 ent->src_addr = __pa(walk.data);
426 ent->auth_key_addr = 0UL;
427 ent->auth_iv_addr = __pa(hash_loc);
428 ent->final_auth_state_addr = 0UL;
429 ent->enc_key_addr = 0UL;
430 ent->enc_iv_addr = 0UL;
431 ent->dest_addr = __pa(hash_loc);
432
433 nbytes = crypto_hash_walk_done(&walk, 0);
434 while (nbytes > 0) {
435 ent = spu_queue_next(qp, ent);
436
437 ent->control = (nbytes - 1);
438 ent->src_addr = __pa(walk.data);
439 ent->auth_key_addr = 0UL;
440 ent->auth_iv_addr = 0UL;
441 ent->final_auth_state_addr = 0UL;
442 ent->enc_key_addr = 0UL;
443 ent->enc_iv_addr = 0UL;
444 ent->dest_addr = 0UL;
445
446 nbytes = crypto_hash_walk_done(&walk, 0);
447 }
448 ent->control |= CONTROL_END_OF_BLOCK;
449
450 if (submit_and_wait_for_tail(qp, ent) != HV_EOK)
451 err = -EINVAL;
452 else
453 err = 0;
454
455 spin_unlock_irqrestore(&qp->lock, flags);
456
457 if (!err)
458 memcpy(req->result, hash_loc, result_size);
459out:
460 put_cpu();
461
462 return err;
463}
464
465static int n2_md5_async_digest(struct ahash_request *req)
466{
c9aa55e5
DM
467 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
468 struct md5_state *m = &rctx->u.md5;
0a625fd2
DM
469
470 if (unlikely(req->nbytes == 0)) {
471 static const char md5_zero[MD5_DIGEST_SIZE] = {
472 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
473 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e,
474 };
475
476 memcpy(req->result, md5_zero, MD5_DIGEST_SIZE);
477 return 0;
478 }
479 m->hash[0] = cpu_to_le32(0x67452301);
480 m->hash[1] = cpu_to_le32(0xefcdab89);
481 m->hash[2] = cpu_to_le32(0x98badcfe);
482 m->hash[3] = cpu_to_le32(0x10325476);
483
484 return n2_hash_async_digest(req, AUTH_TYPE_MD5,
485 MD5_DIGEST_SIZE, MD5_DIGEST_SIZE,
486 m->hash);
487}
488
489static int n2_sha1_async_digest(struct ahash_request *req)
490{
c9aa55e5
DM
491 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
492 struct sha1_state *s = &rctx->u.sha1;
0a625fd2
DM
493
494 if (unlikely(req->nbytes == 0)) {
495 static const char sha1_zero[SHA1_DIGEST_SIZE] = {
496 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32,
497 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8,
498 0x07, 0x09
499 };
500
501 memcpy(req->result, sha1_zero, SHA1_DIGEST_SIZE);
502 return 0;
503 }
504 s->state[0] = SHA1_H0;
505 s->state[1] = SHA1_H1;
506 s->state[2] = SHA1_H2;
507 s->state[3] = SHA1_H3;
508 s->state[4] = SHA1_H4;
509
510 return n2_hash_async_digest(req, AUTH_TYPE_SHA1,
511 SHA1_DIGEST_SIZE, SHA1_DIGEST_SIZE,
512 s->state);
513}
514
515static int n2_sha256_async_digest(struct ahash_request *req)
516{
c9aa55e5
DM
517 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
518 struct sha256_state *s = &rctx->u.sha256;
0a625fd2
DM
519
520 if (req->nbytes == 0) {
521 static const char sha256_zero[SHA256_DIGEST_SIZE] = {
522 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a,
523 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae,
524 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99,
525 0x1b, 0x78, 0x52, 0xb8, 0x55
526 };
527
528 memcpy(req->result, sha256_zero, SHA256_DIGEST_SIZE);
529 return 0;
530 }
531 s->state[0] = SHA256_H0;
532 s->state[1] = SHA256_H1;
533 s->state[2] = SHA256_H2;
534 s->state[3] = SHA256_H3;
535 s->state[4] = SHA256_H4;
536 s->state[5] = SHA256_H5;
537 s->state[6] = SHA256_H6;
538 s->state[7] = SHA256_H7;
539
540 return n2_hash_async_digest(req, AUTH_TYPE_SHA256,
541 SHA256_DIGEST_SIZE, SHA256_DIGEST_SIZE,
542 s->state);
543}
544
545static int n2_sha224_async_digest(struct ahash_request *req)
546{
c9aa55e5
DM
547 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
548 struct sha256_state *s = &rctx->u.sha256;
0a625fd2
DM
549
550 if (req->nbytes == 0) {
551 static const char sha224_zero[SHA224_DIGEST_SIZE] = {
552 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
553 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
554 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
555 0x2f
556 };
557
558 memcpy(req->result, sha224_zero, SHA224_DIGEST_SIZE);
559 return 0;
560 }
561 s->state[0] = SHA224_H0;
562 s->state[1] = SHA224_H1;
563 s->state[2] = SHA224_H2;
564 s->state[3] = SHA224_H3;
565 s->state[4] = SHA224_H4;
566 s->state[5] = SHA224_H5;
567 s->state[6] = SHA224_H6;
568 s->state[7] = SHA224_H7;
569
570 return n2_hash_async_digest(req, AUTH_TYPE_SHA256,
571 SHA256_DIGEST_SIZE, SHA224_DIGEST_SIZE,
572 s->state);
573}
574
575struct n2_cipher_context {
576 int key_len;
577 int enc_type;
578 union {
579 u8 aes[AES_MAX_KEY_SIZE];
580 u8 des[DES_KEY_SIZE];
581 u8 des3[3 * DES_KEY_SIZE];
582 u8 arc4[258]; /* S-box, X, Y */
583 } key;
584};
585
586#define N2_CHUNK_ARR_LEN 16
587
588struct n2_crypto_chunk {
589 struct list_head entry;
590 unsigned long iv_paddr : 44;
591 unsigned long arr_len : 20;
592 unsigned long dest_paddr;
593 unsigned long dest_final;
594 struct {
595 unsigned long src_paddr : 44;
596 unsigned long src_len : 20;
597 } arr[N2_CHUNK_ARR_LEN];
598};
599
600struct n2_request_context {
601 struct ablkcipher_walk walk;
602 struct list_head chunk_list;
603 struct n2_crypto_chunk chunk;
604 u8 temp_iv[16];
605};
606
607/* The SPU allows some level of flexibility for partial cipher blocks
608 * being specified in a descriptor.
609 *
610 * It merely requires that every descriptor's length field is at least
611 * as large as the cipher block size. This means that a cipher block
612 * can span at most 2 descriptors. However, this does not allow a
613 * partial block to span into the final descriptor as that would
614 * violate the rule (since every descriptor's length must be at lest
615 * the block size). So, for example, assuming an 8 byte block size:
616 *
617 * 0xe --> 0xa --> 0x8
618 *
619 * is a valid length sequence, whereas:
620 *
621 * 0xe --> 0xb --> 0x7
622 *
623 * is not a valid sequence.
624 */
625
626struct n2_cipher_alg {
627 struct list_head entry;
628 u8 enc_type;
629 struct crypto_alg alg;
630};
631
632static inline struct n2_cipher_alg *n2_cipher_alg(struct crypto_tfm *tfm)
633{
634 struct crypto_alg *alg = tfm->__crt_alg;
635
636 return container_of(alg, struct n2_cipher_alg, alg);
637}
638
639struct n2_cipher_request_context {
640 struct ablkcipher_walk walk;
641};
642
643static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
644 unsigned int keylen)
645{
646 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
647 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
648 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
649
650 ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
651
652 switch (keylen) {
653 case AES_KEYSIZE_128:
654 ctx->enc_type |= ENC_TYPE_ALG_AES128;
655 break;
656 case AES_KEYSIZE_192:
657 ctx->enc_type |= ENC_TYPE_ALG_AES192;
658 break;
659 case AES_KEYSIZE_256:
660 ctx->enc_type |= ENC_TYPE_ALG_AES256;
661 break;
662 default:
663 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
664 return -EINVAL;
665 }
666
667 ctx->key_len = keylen;
668 memcpy(ctx->key.aes, key, keylen);
669 return 0;
670}
671
672static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
673 unsigned int keylen)
674{
675 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
676 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
677 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
678 u32 tmp[DES_EXPKEY_WORDS];
679 int err;
680
681 ctx->enc_type = n2alg->enc_type;
682
683 if (keylen != DES_KEY_SIZE) {
684 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
685 return -EINVAL;
686 }
687
688 err = des_ekey(tmp, key);
689 if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
690 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
691 return -EINVAL;
692 }
693
694 ctx->key_len = keylen;
695 memcpy(ctx->key.des, key, keylen);
696 return 0;
697}
698
699static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
700 unsigned int keylen)
701{
702 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
703 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
704 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
705
706 ctx->enc_type = n2alg->enc_type;
707
708 if (keylen != (3 * DES_KEY_SIZE)) {
709 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
710 return -EINVAL;
711 }
712 ctx->key_len = keylen;
713 memcpy(ctx->key.des3, key, keylen);
714 return 0;
715}
716
717static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
718 unsigned int keylen)
719{
720 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
721 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
722 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
723 u8 *s = ctx->key.arc4;
724 u8 *x = s + 256;
725 u8 *y = x + 1;
726 int i, j, k;
727
728 ctx->enc_type = n2alg->enc_type;
729
730 j = k = 0;
731 *x = 0;
732 *y = 0;
733 for (i = 0; i < 256; i++)
734 s[i] = i;
735 for (i = 0; i < 256; i++) {
736 u8 a = s[i];
737 j = (j + key[k] + a) & 0xff;
738 s[i] = s[j];
739 s[j] = a;
740 if (++k >= keylen)
741 k = 0;
742 }
743
744 return 0;
745}
746
747static inline int cipher_descriptor_len(int nbytes, unsigned int block_size)
748{
749 int this_len = nbytes;
750
751 this_len -= (nbytes & (block_size - 1));
752 return this_len > (1 << 16) ? (1 << 16) : this_len;
753}
754
755static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp,
756 struct spu_queue *qp, bool encrypt)
757{
758 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
759 struct cwq_initial_entry *ent;
760 bool in_place;
761 int i;
762
763 ent = spu_queue_alloc(qp, cp->arr_len);
764 if (!ent) {
765 pr_info("queue_alloc() of %d fails\n",
766 cp->arr_len);
767 return -EBUSY;
768 }
769
770 in_place = (cp->dest_paddr == cp->arr[0].src_paddr);
771
772 ent->control = control_word_base(cp->arr[0].src_len,
773 0, ctx->enc_type, 0, 0,
774 false, true, false, encrypt,
775 OPCODE_ENCRYPT |
776 (in_place ? OPCODE_INPLACE_BIT : 0));
777 ent->src_addr = cp->arr[0].src_paddr;
778 ent->auth_key_addr = 0UL;
779 ent->auth_iv_addr = 0UL;
780 ent->final_auth_state_addr = 0UL;
781 ent->enc_key_addr = __pa(&ctx->key);
782 ent->enc_iv_addr = cp->iv_paddr;
783 ent->dest_addr = (in_place ? 0UL : cp->dest_paddr);
784
785 for (i = 1; i < cp->arr_len; i++) {
786 ent = spu_queue_next(qp, ent);
787
788 ent->control = cp->arr[i].src_len - 1;
789 ent->src_addr = cp->arr[i].src_paddr;
790 ent->auth_key_addr = 0UL;
791 ent->auth_iv_addr = 0UL;
792 ent->final_auth_state_addr = 0UL;
793 ent->enc_key_addr = 0UL;
794 ent->enc_iv_addr = 0UL;
795 ent->dest_addr = 0UL;
796 }
797 ent->control |= CONTROL_END_OF_BLOCK;
798
799 return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
800}
801
802static int n2_compute_chunks(struct ablkcipher_request *req)
803{
804 struct n2_request_context *rctx = ablkcipher_request_ctx(req);
805 struct ablkcipher_walk *walk = &rctx->walk;
806 struct n2_crypto_chunk *chunk;
807 unsigned long dest_prev;
808 unsigned int tot_len;
809 bool prev_in_place;
810 int err, nbytes;
811
812 ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes);
813 err = ablkcipher_walk_phys(req, walk);
814 if (err)
815 return err;
816
817 INIT_LIST_HEAD(&rctx->chunk_list);
818
819 chunk = &rctx->chunk;
820 INIT_LIST_HEAD(&chunk->entry);
821
822 chunk->iv_paddr = 0UL;
823 chunk->arr_len = 0;
824 chunk->dest_paddr = 0UL;
825
826 prev_in_place = false;
827 dest_prev = ~0UL;
828 tot_len = 0;
829
830 while ((nbytes = walk->nbytes) != 0) {
831 unsigned long dest_paddr, src_paddr;
832 bool in_place;
833 int this_len;
834
835 src_paddr = (page_to_phys(walk->src.page) +
836 walk->src.offset);
837 dest_paddr = (page_to_phys(walk->dst.page) +
838 walk->dst.offset);
839 in_place = (src_paddr == dest_paddr);
840 this_len = cipher_descriptor_len(nbytes, walk->blocksize);
841
842 if (chunk->arr_len != 0) {
843 if (in_place != prev_in_place ||
844 (!prev_in_place &&
845 dest_paddr != dest_prev) ||
846 chunk->arr_len == N2_CHUNK_ARR_LEN ||
847 tot_len + this_len > (1 << 16)) {
848 chunk->dest_final = dest_prev;
849 list_add_tail(&chunk->entry,
850 &rctx->chunk_list);
851 chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC);
852 if (!chunk) {
853 err = -ENOMEM;
854 break;
855 }
856 INIT_LIST_HEAD(&chunk->entry);
857 }
858 }
859 if (chunk->arr_len == 0) {
860 chunk->dest_paddr = dest_paddr;
861 tot_len = 0;
862 }
863 chunk->arr[chunk->arr_len].src_paddr = src_paddr;
864 chunk->arr[chunk->arr_len].src_len = this_len;
865 chunk->arr_len++;
866
867 dest_prev = dest_paddr + this_len;
868 prev_in_place = in_place;
869 tot_len += this_len;
870
871 err = ablkcipher_walk_done(req, walk, nbytes - this_len);
872 if (err)
873 break;
874 }
875 if (!err && chunk->arr_len != 0) {
876 chunk->dest_final = dest_prev;
877 list_add_tail(&chunk->entry, &rctx->chunk_list);
878 }
879
880 return err;
881}
882
883static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv)
884{
885 struct n2_request_context *rctx = ablkcipher_request_ctx(req);
886 struct n2_crypto_chunk *c, *tmp;
887
888 if (final_iv)
889 memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
890
891 ablkcipher_walk_complete(&rctx->walk);
892 list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
893 list_del(&c->entry);
894 if (unlikely(c != &rctx->chunk))
895 kfree(c);
896 }
897
898}
899
900static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt)
901{
902 struct n2_request_context *rctx = ablkcipher_request_ctx(req);
903 struct crypto_tfm *tfm = req->base.tfm;
904 int err = n2_compute_chunks(req);
905 struct n2_crypto_chunk *c, *tmp;
906 unsigned long flags, hv_ret;
907 struct spu_queue *qp;
908
909 if (err)
910 return err;
911
912 qp = cpu_to_cwq[get_cpu()];
913 err = -ENODEV;
914 if (!qp)
915 goto out;
916
917 spin_lock_irqsave(&qp->lock, flags);
918
919 list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
920 err = __n2_crypt_chunk(tfm, c, qp, encrypt);
921 if (err)
922 break;
923 list_del(&c->entry);
924 if (unlikely(c != &rctx->chunk))
925 kfree(c);
926 }
927 if (!err) {
928 hv_ret = wait_for_tail(qp);
929 if (hv_ret != HV_EOK)
930 err = -EINVAL;
931 }
932
933 spin_unlock_irqrestore(&qp->lock, flags);
934
935 put_cpu();
936
937out:
938 n2_chunk_complete(req, NULL);
939 return err;
940}
941
942static int n2_encrypt_ecb(struct ablkcipher_request *req)
943{
944 return n2_do_ecb(req, true);
945}
946
947static int n2_decrypt_ecb(struct ablkcipher_request *req)
948{
949 return n2_do_ecb(req, false);
950}
951
952static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt)
953{
954 struct n2_request_context *rctx = ablkcipher_request_ctx(req);
955 struct crypto_tfm *tfm = req->base.tfm;
956 unsigned long flags, hv_ret, iv_paddr;
957 int err = n2_compute_chunks(req);
958 struct n2_crypto_chunk *c, *tmp;
959 struct spu_queue *qp;
960 void *final_iv_addr;
961
962 final_iv_addr = NULL;
963
964 if (err)
965 return err;
966
967 qp = cpu_to_cwq[get_cpu()];
968 err = -ENODEV;
969 if (!qp)
970 goto out;
971
972 spin_lock_irqsave(&qp->lock, flags);
973
974 if (encrypt) {
975 iv_paddr = __pa(rctx->walk.iv);
976 list_for_each_entry_safe(c, tmp, &rctx->chunk_list,
977 entry) {
978 c->iv_paddr = iv_paddr;
979 err = __n2_crypt_chunk(tfm, c, qp, true);
980 if (err)
981 break;
982 iv_paddr = c->dest_final - rctx->walk.blocksize;
983 list_del(&c->entry);
984 if (unlikely(c != &rctx->chunk))
985 kfree(c);
986 }
987 final_iv_addr = __va(iv_paddr);
988 } else {
989 list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list,
990 entry) {
991 if (c == &rctx->chunk) {
992 iv_paddr = __pa(rctx->walk.iv);
993 } else {
994 iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr +
995 tmp->arr[tmp->arr_len-1].src_len -
996 rctx->walk.blocksize);
997 }
998 if (!final_iv_addr) {
999 unsigned long pa;
1000
1001 pa = (c->arr[c->arr_len-1].src_paddr +
1002 c->arr[c->arr_len-1].src_len -
1003 rctx->walk.blocksize);
1004 final_iv_addr = rctx->temp_iv;
1005 memcpy(rctx->temp_iv, __va(pa),
1006 rctx->walk.blocksize);
1007 }
1008 c->iv_paddr = iv_paddr;
1009 err = __n2_crypt_chunk(tfm, c, qp, false);
1010 if (err)
1011 break;
1012 list_del(&c->entry);
1013 if (unlikely(c != &rctx->chunk))
1014 kfree(c);
1015 }
1016 }
1017 if (!err) {
1018 hv_ret = wait_for_tail(qp);
1019 if (hv_ret != HV_EOK)
1020 err = -EINVAL;
1021 }
1022
1023 spin_unlock_irqrestore(&qp->lock, flags);
1024
1025 put_cpu();
1026
1027out:
1028 n2_chunk_complete(req, err ? NULL : final_iv_addr);
1029 return err;
1030}
1031
1032static int n2_encrypt_chaining(struct ablkcipher_request *req)
1033{
1034 return n2_do_chaining(req, true);
1035}
1036
1037static int n2_decrypt_chaining(struct ablkcipher_request *req)
1038{
1039 return n2_do_chaining(req, false);
1040}
1041
1042struct n2_cipher_tmpl {
1043 const char *name;
1044 const char *drv_name;
1045 u8 block_size;
1046 u8 enc_type;
1047 struct ablkcipher_alg ablkcipher;
1048};
1049
1050static const struct n2_cipher_tmpl cipher_tmpls[] = {
1051 /* ARC4: only ECB is supported (chaining bits ignored) */
1052 { .name = "ecb(arc4)",
1053 .drv_name = "ecb-arc4",
1054 .block_size = 1,
1055 .enc_type = (ENC_TYPE_ALG_RC4_STREAM |
1056 ENC_TYPE_CHAINING_ECB),
1057 .ablkcipher = {
1058 .min_keysize = 1,
1059 .max_keysize = 256,
1060 .setkey = n2_arc4_setkey,
1061 .encrypt = n2_encrypt_ecb,
1062 .decrypt = n2_decrypt_ecb,
1063 },
1064 },
1065
1066 /* DES: ECB CBC and CFB are supported */
1067 { .name = "ecb(des)",
1068 .drv_name = "ecb-des",
1069 .block_size = DES_BLOCK_SIZE,
1070 .enc_type = (ENC_TYPE_ALG_DES |
1071 ENC_TYPE_CHAINING_ECB),
1072 .ablkcipher = {
1073 .min_keysize = DES_KEY_SIZE,
1074 .max_keysize = DES_KEY_SIZE,
1075 .setkey = n2_des_setkey,
1076 .encrypt = n2_encrypt_ecb,
1077 .decrypt = n2_decrypt_ecb,
1078 },
1079 },
1080 { .name = "cbc(des)",
1081 .drv_name = "cbc-des",
1082 .block_size = DES_BLOCK_SIZE,
1083 .enc_type = (ENC_TYPE_ALG_DES |
1084 ENC_TYPE_CHAINING_CBC),
1085 .ablkcipher = {
1086 .ivsize = DES_BLOCK_SIZE,
1087 .min_keysize = DES_KEY_SIZE,
1088 .max_keysize = DES_KEY_SIZE,
1089 .setkey = n2_des_setkey,
1090 .encrypt = n2_encrypt_chaining,
1091 .decrypt = n2_decrypt_chaining,
1092 },
1093 },
1094 { .name = "cfb(des)",
1095 .drv_name = "cfb-des",
1096 .block_size = DES_BLOCK_SIZE,
1097 .enc_type = (ENC_TYPE_ALG_DES |
1098 ENC_TYPE_CHAINING_CFB),
1099 .ablkcipher = {
1100 .min_keysize = DES_KEY_SIZE,
1101 .max_keysize = DES_KEY_SIZE,
1102 .setkey = n2_des_setkey,
1103 .encrypt = n2_encrypt_chaining,
1104 .decrypt = n2_decrypt_chaining,
1105 },
1106 },
1107
1108 /* 3DES: ECB CBC and CFB are supported */
1109 { .name = "ecb(des3_ede)",
1110 .drv_name = "ecb-3des",
1111 .block_size = DES_BLOCK_SIZE,
1112 .enc_type = (ENC_TYPE_ALG_3DES |
1113 ENC_TYPE_CHAINING_ECB),
1114 .ablkcipher = {
1115 .min_keysize = 3 * DES_KEY_SIZE,
1116 .max_keysize = 3 * DES_KEY_SIZE,
1117 .setkey = n2_3des_setkey,
1118 .encrypt = n2_encrypt_ecb,
1119 .decrypt = n2_decrypt_ecb,
1120 },
1121 },
1122 { .name = "cbc(des3_ede)",
1123 .drv_name = "cbc-3des",
1124 .block_size = DES_BLOCK_SIZE,
1125 .enc_type = (ENC_TYPE_ALG_3DES |
1126 ENC_TYPE_CHAINING_CBC),
1127 .ablkcipher = {
1128 .ivsize = DES_BLOCK_SIZE,
1129 .min_keysize = 3 * DES_KEY_SIZE,
1130 .max_keysize = 3 * DES_KEY_SIZE,
1131 .setkey = n2_3des_setkey,
1132 .encrypt = n2_encrypt_chaining,
1133 .decrypt = n2_decrypt_chaining,
1134 },
1135 },
1136 { .name = "cfb(des3_ede)",
1137 .drv_name = "cfb-3des",
1138 .block_size = DES_BLOCK_SIZE,
1139 .enc_type = (ENC_TYPE_ALG_3DES |
1140 ENC_TYPE_CHAINING_CFB),
1141 .ablkcipher = {
1142 .min_keysize = 3 * DES_KEY_SIZE,
1143 .max_keysize = 3 * DES_KEY_SIZE,
1144 .setkey = n2_3des_setkey,
1145 .encrypt = n2_encrypt_chaining,
1146 .decrypt = n2_decrypt_chaining,
1147 },
1148 },
1149 /* AES: ECB CBC and CTR are supported */
1150 { .name = "ecb(aes)",
1151 .drv_name = "ecb-aes",
1152 .block_size = AES_BLOCK_SIZE,
1153 .enc_type = (ENC_TYPE_ALG_AES128 |
1154 ENC_TYPE_CHAINING_ECB),
1155 .ablkcipher = {
1156 .min_keysize = AES_MIN_KEY_SIZE,
1157 .max_keysize = AES_MAX_KEY_SIZE,
1158 .setkey = n2_aes_setkey,
1159 .encrypt = n2_encrypt_ecb,
1160 .decrypt = n2_decrypt_ecb,
1161 },
1162 },
1163 { .name = "cbc(aes)",
1164 .drv_name = "cbc-aes",
1165 .block_size = AES_BLOCK_SIZE,
1166 .enc_type = (ENC_TYPE_ALG_AES128 |
1167 ENC_TYPE_CHAINING_CBC),
1168 .ablkcipher = {
1169 .ivsize = AES_BLOCK_SIZE,
1170 .min_keysize = AES_MIN_KEY_SIZE,
1171 .max_keysize = AES_MAX_KEY_SIZE,
1172 .setkey = n2_aes_setkey,
1173 .encrypt = n2_encrypt_chaining,
1174 .decrypt = n2_decrypt_chaining,
1175 },
1176 },
1177 { .name = "ctr(aes)",
1178 .drv_name = "ctr-aes",
1179 .block_size = AES_BLOCK_SIZE,
1180 .enc_type = (ENC_TYPE_ALG_AES128 |
1181 ENC_TYPE_CHAINING_COUNTER),
1182 .ablkcipher = {
1183 .ivsize = AES_BLOCK_SIZE,
1184 .min_keysize = AES_MIN_KEY_SIZE,
1185 .max_keysize = AES_MAX_KEY_SIZE,
1186 .setkey = n2_aes_setkey,
1187 .encrypt = n2_encrypt_chaining,
1188 .decrypt = n2_encrypt_chaining,
1189 },
1190 },
1191
1192};
1193#define NUM_CIPHER_TMPLS ARRAY_SIZE(cipher_tmpls)
1194
1195static LIST_HEAD(cipher_algs);
1196
1197struct n2_hash_tmpl {
1198 const char *name;
1199 int (*digest)(struct ahash_request *req);
1200 u8 digest_size;
1201 u8 block_size;
1202};
1203static const struct n2_hash_tmpl hash_tmpls[] = {
1204 { .name = "md5",
1205 .digest = n2_md5_async_digest,
1206 .digest_size = MD5_DIGEST_SIZE,
1207 .block_size = MD5_HMAC_BLOCK_SIZE },
1208 { .name = "sha1",
1209 .digest = n2_sha1_async_digest,
1210 .digest_size = SHA1_DIGEST_SIZE,
1211 .block_size = SHA1_BLOCK_SIZE },
1212 { .name = "sha256",
1213 .digest = n2_sha256_async_digest,
1214 .digest_size = SHA256_DIGEST_SIZE,
1215 .block_size = SHA256_BLOCK_SIZE },
1216 { .name = "sha224",
1217 .digest = n2_sha224_async_digest,
1218 .digest_size = SHA224_DIGEST_SIZE,
1219 .block_size = SHA224_BLOCK_SIZE },
1220};
1221#define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
1222
1223struct n2_ahash_alg {
1224 struct list_head entry;
1225 struct ahash_alg alg;
1226};
1227static LIST_HEAD(ahash_algs);
1228
1229static int algs_registered;
1230
1231static void __n2_unregister_algs(void)
1232{
1233 struct n2_cipher_alg *cipher, *cipher_tmp;
1234 struct n2_ahash_alg *alg, *alg_tmp;
1235
1236 list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) {
1237 crypto_unregister_alg(&cipher->alg);
1238 list_del(&cipher->entry);
1239 kfree(cipher);
1240 }
1241 list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
1242 crypto_unregister_ahash(&alg->alg);
1243 list_del(&alg->entry);
1244 kfree(alg);
1245 }
1246}
1247
1248static int n2_cipher_cra_init(struct crypto_tfm *tfm)
1249{
1250 tfm->crt_ablkcipher.reqsize = sizeof(struct n2_request_context);
1251 return 0;
1252}
1253
1254static int __devinit __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl)
1255{
1256 struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1257 struct crypto_alg *alg;
1258 int err;
1259
1260 if (!p)
1261 return -ENOMEM;
1262
1263 alg = &p->alg;
1264
1265 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1266 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
1267 alg->cra_priority = N2_CRA_PRIORITY;
1268 alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
1269 alg->cra_blocksize = tmpl->block_size;
1270 p->enc_type = tmpl->enc_type;
1271 alg->cra_ctxsize = sizeof(struct n2_cipher_context);
1272 alg->cra_type = &crypto_ablkcipher_type;
1273 alg->cra_u.ablkcipher = tmpl->ablkcipher;
1274 alg->cra_init = n2_cipher_cra_init;
1275 alg->cra_module = THIS_MODULE;
1276
1277 list_add(&p->entry, &cipher_algs);
1278 err = crypto_register_alg(alg);
1279 if (err) {
38511108 1280 pr_err("%s alg registration failed\n", alg->cra_name);
0a625fd2
DM
1281 list_del(&p->entry);
1282 kfree(p);
38511108
DM
1283 } else {
1284 pr_info("%s alg registered\n", alg->cra_name);
0a625fd2
DM
1285 }
1286 return err;
1287}
1288
1289static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
1290{
1291 struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1292 struct hash_alg_common *halg;
1293 struct crypto_alg *base;
1294 struct ahash_alg *ahash;
1295 int err;
1296
1297 if (!p)
1298 return -ENOMEM;
1299
1300 ahash = &p->alg;
1301 ahash->init = n2_hash_async_init;
1302 ahash->update = n2_hash_async_update;
1303 ahash->final = n2_hash_async_final;
1304 ahash->finup = n2_hash_async_finup;
1305 ahash->digest = tmpl->digest;
1306
1307 halg = &ahash->halg;
1308 halg->digestsize = tmpl->digest_size;
1309
1310 base = &halg->base;
1311 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1312 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
1313 base->cra_priority = N2_CRA_PRIORITY;
1314 base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK;
1315 base->cra_blocksize = tmpl->block_size;
1316 base->cra_ctxsize = sizeof(struct n2_hash_ctx);
1317 base->cra_module = THIS_MODULE;
1318 base->cra_init = n2_hash_cra_init;
1319 base->cra_exit = n2_hash_cra_exit;
1320
1321 list_add(&p->entry, &ahash_algs);
1322 err = crypto_register_ahash(ahash);
1323 if (err) {
38511108 1324 pr_err("%s alg registration failed\n", base->cra_name);
0a625fd2
DM
1325 list_del(&p->entry);
1326 kfree(p);
38511108
DM
1327 } else {
1328 pr_info("%s alg registered\n", base->cra_name);
0a625fd2
DM
1329 }
1330 return err;
1331}
1332
1333static int __devinit n2_register_algs(void)
1334{
1335 int i, err = 0;
1336
1337 mutex_lock(&spu_lock);
1338 if (algs_registered++)
1339 goto out;
1340
1341 for (i = 0; i < NUM_HASH_TMPLS; i++) {
1342 err = __n2_register_one_ahash(&hash_tmpls[i]);
1343 if (err) {
1344 __n2_unregister_algs();
1345 goto out;
1346 }
1347 }
1348 for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
1349 err = __n2_register_one_cipher(&cipher_tmpls[i]);
1350 if (err) {
1351 __n2_unregister_algs();
1352 goto out;
1353 }
1354 }
1355
1356out:
1357 mutex_unlock(&spu_lock);
1358 return err;
1359}
1360
1361static void __exit n2_unregister_algs(void)
1362{
1363 mutex_lock(&spu_lock);
1364 if (!--algs_registered)
1365 __n2_unregister_algs();
1366 mutex_unlock(&spu_lock);
1367}
1368
1369/* To map CWQ queues to interrupt sources, the hypervisor API provides
1370 * a devino. This isn't very useful to us because all of the
1371 * interrupts listed in the of_device node have been translated to
1372 * Linux virtual IRQ cookie numbers.
1373 *
1374 * So we have to back-translate, going through the 'intr' and 'ino'
1375 * property tables of the n2cp MDESC node, matching it with the OF
1376 * 'interrupts' property entries, in order to to figure out which
1377 * devino goes to which already-translated IRQ.
1378 */
1379static int find_devino_index(struct of_device *dev, struct spu_mdesc_info *ip,
1380 unsigned long dev_ino)
1381{
1382 const unsigned int *dev_intrs;
1383 unsigned int intr;
1384 int i;
1385
1386 for (i = 0; i < ip->num_intrs; i++) {
1387 if (ip->ino_table[i].ino == dev_ino)
1388 break;
1389 }
1390 if (i == ip->num_intrs)
1391 return -ENODEV;
1392
1393 intr = ip->ino_table[i].intr;
1394
ff6c7341 1395 dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL);
0a625fd2
DM
1396 if (!dev_intrs)
1397 return -ENODEV;
1398
1399 for (i = 0; i < dev->num_irqs; i++) {
1400 if (dev_intrs[i] == intr)
1401 return i;
1402 }
1403
1404 return -ENODEV;
1405}
1406
1407static int spu_map_ino(struct of_device *dev, struct spu_mdesc_info *ip,
1408 const char *irq_name, struct spu_queue *p,
1409 irq_handler_t handler)
1410{
1411 unsigned long herr;
1412 int index;
1413
1414 herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino);
1415 if (herr)
1416 return -EINVAL;
1417
1418 index = find_devino_index(dev, ip, p->devino);
1419 if (index < 0)
1420 return index;
1421
1422 p->irq = dev->irqs[index];
1423
1424 sprintf(p->irq_name, "%s-%d", irq_name, index);
1425
1426 return request_irq(p->irq, handler, IRQF_SAMPLE_RANDOM,
1427 p->irq_name, p);
1428}
1429
1430static struct kmem_cache *queue_cache[2];
1431
1432static void *new_queue(unsigned long q_type)
1433{
1434 return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL);
1435}
1436
1437static void free_queue(void *p, unsigned long q_type)
1438{
1439 return kmem_cache_free(queue_cache[q_type - 1], p);
1440}
1441
1442static int queue_cache_init(void)
1443{
1444 if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1445 queue_cache[HV_NCS_QTYPE_MAU - 1] =
527b9525 1446 kmem_cache_create("mau_queue",
0a625fd2
DM
1447 (MAU_NUM_ENTRIES *
1448 MAU_ENTRY_SIZE),
1449 MAU_ENTRY_SIZE, 0, NULL);
1450 if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1451 return -ENOMEM;
1452
1453 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1])
1454 queue_cache[HV_NCS_QTYPE_CWQ - 1] =
1455 kmem_cache_create("cwq_queue",
1456 (CWQ_NUM_ENTRIES *
1457 CWQ_ENTRY_SIZE),
1458 CWQ_ENTRY_SIZE, 0, NULL);
1459 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
1460 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1461 return -ENOMEM;
1462 }
1463 return 0;
1464}
1465
1466static void queue_cache_destroy(void)
1467{
1468 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1469 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
1470}
1471
1472static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
1473{
1474 cpumask_var_t old_allowed;
1475 unsigned long hv_ret;
1476
1477 if (cpumask_empty(&p->sharing))
1478 return -EINVAL;
1479
1480 if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
1481 return -ENOMEM;
1482
1483 cpumask_copy(old_allowed, &current->cpus_allowed);
1484
1485 set_cpus_allowed_ptr(current, &p->sharing);
1486
1487 hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
1488 CWQ_NUM_ENTRIES, &p->qhandle);
1489 if (!hv_ret)
1490 sun4v_ncs_sethead_marker(p->qhandle, 0);
1491
1492 set_cpus_allowed_ptr(current, old_allowed);
1493
1494 free_cpumask_var(old_allowed);
1495
1496 return (hv_ret ? -EINVAL : 0);
1497}
1498
1499static int spu_queue_setup(struct spu_queue *p)
1500{
1501 int err;
1502
1503 p->q = new_queue(p->q_type);
1504 if (!p->q)
1505 return -ENOMEM;
1506
1507 err = spu_queue_register(p, p->q_type);
1508 if (err) {
1509 free_queue(p->q, p->q_type);
1510 p->q = NULL;
1511 }
1512
1513 return err;
1514}
1515
1516static void spu_queue_destroy(struct spu_queue *p)
1517{
1518 unsigned long hv_ret;
1519
1520 if (!p->q)
1521 return;
1522
1523 hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle);
1524
1525 if (!hv_ret)
1526 free_queue(p->q, p->q_type);
1527}
1528
1529static void spu_list_destroy(struct list_head *list)
1530{
1531 struct spu_queue *p, *n;
1532
1533 list_for_each_entry_safe(p, n, list, list) {
1534 int i;
1535
1536 for (i = 0; i < NR_CPUS; i++) {
1537 if (cpu_to_cwq[i] == p)
1538 cpu_to_cwq[i] = NULL;
1539 }
1540
1541 if (p->irq) {
1542 free_irq(p->irq, p);
1543 p->irq = 0;
1544 }
1545 spu_queue_destroy(p);
1546 list_del(&p->list);
1547 kfree(p);
1548 }
1549}
1550
1551/* Walk the backward arcs of a CWQ 'exec-unit' node,
1552 * gathering cpu membership information.
1553 */
1554static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
1555 struct of_device *dev,
1556 u64 node, struct spu_queue *p,
1557 struct spu_queue **table)
1558{
1559 u64 arc;
1560
1561 mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) {
1562 u64 tgt = mdesc_arc_target(mdesc, arc);
1563 const char *name = mdesc_node_name(mdesc, tgt);
1564 const u64 *id;
1565
1566 if (strcmp(name, "cpu"))
1567 continue;
1568 id = mdesc_get_property(mdesc, tgt, "id", NULL);
1569 if (table[*id] != NULL) {
1570 dev_err(&dev->dev, "%s: SPU cpu slot already set.\n",
ff6c7341 1571 dev->dev.of_node->full_name);
0a625fd2
DM
1572 return -EINVAL;
1573 }
1574 cpu_set(*id, p->sharing);
1575 table[*id] = p;
1576 }
1577 return 0;
1578}
1579
1580/* Process an 'exec-unit' MDESC node of type 'cwq'. */
1581static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
1582 struct of_device *dev, struct mdesc_handle *mdesc,
1583 u64 node, const char *iname, unsigned long q_type,
1584 irq_handler_t handler, struct spu_queue **table)
1585{
1586 struct spu_queue *p;
1587 int err;
1588
1589 p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
1590 if (!p) {
1591 dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n",
ff6c7341 1592 dev->dev.of_node->full_name);
0a625fd2
DM
1593 return -ENOMEM;
1594 }
1595
1596 cpus_clear(p->sharing);
1597 spin_lock_init(&p->lock);
1598 p->q_type = q_type;
1599 INIT_LIST_HEAD(&p->jobs);
1600 list_add(&p->list, list);
1601
1602 err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table);
1603 if (err)
1604 return err;
1605
1606 err = spu_queue_setup(p);
1607 if (err)
1608 return err;
1609
1610 return spu_map_ino(dev, ip, iname, p, handler);
1611}
1612
1613static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct of_device *dev,
1614 struct spu_mdesc_info *ip, struct list_head *list,
1615 const char *exec_name, unsigned long q_type,
1616 irq_handler_t handler, struct spu_queue **table)
1617{
1618 int err = 0;
1619 u64 node;
1620
1621 mdesc_for_each_node_by_name(mdesc, node, "exec-unit") {
1622 const char *type;
1623
1624 type = mdesc_get_property(mdesc, node, "type", NULL);
1625 if (!type || strcmp(type, exec_name))
1626 continue;
1627
1628 err = handle_exec_unit(ip, list, dev, mdesc, node,
1629 exec_name, q_type, handler, table);
1630 if (err) {
1631 spu_list_destroy(list);
1632 break;
1633 }
1634 }
1635
1636 return err;
1637}
1638
1639static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node,
1640 struct spu_mdesc_info *ip)
1641{
1642 const u64 *intr, *ino;
1643 int intr_len, ino_len;
1644 int i;
1645
1646 intr = mdesc_get_property(mdesc, node, "intr", &intr_len);
1647 if (!intr)
1648 return -ENODEV;
1649
1650 ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
1651 if (!intr)
1652 return -ENODEV;
1653
1654 if (intr_len != ino_len)
1655 return -EINVAL;
1656
1657 ip->num_intrs = intr_len / sizeof(u64);
1658 ip->ino_table = kzalloc((sizeof(struct ino_blob) *
1659 ip->num_intrs),
1660 GFP_KERNEL);
1661 if (!ip->ino_table)
1662 return -ENOMEM;
1663
1664 for (i = 0; i < ip->num_intrs; i++) {
1665 struct ino_blob *b = &ip->ino_table[i];
1666 b->intr = intr[i];
1667 b->ino = ino[i];
1668 }
1669
1670 return 0;
1671}
1672
1673static int __devinit grab_mdesc_irq_props(struct mdesc_handle *mdesc,
1674 struct of_device *dev,
1675 struct spu_mdesc_info *ip,
1676 const char *node_name)
1677{
1678 const unsigned int *reg;
1679 u64 node;
1680
ff6c7341 1681 reg = of_get_property(dev->dev.of_node, "reg", NULL);
0a625fd2
DM
1682 if (!reg)
1683 return -ENODEV;
1684
1685 mdesc_for_each_node_by_name(mdesc, node, "virtual-device") {
1686 const char *name;
1687 const u64 *chdl;
1688
1689 name = mdesc_get_property(mdesc, node, "name", NULL);
1690 if (!name || strcmp(name, node_name))
1691 continue;
1692 chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL);
1693 if (!chdl || (*chdl != *reg))
1694 continue;
1695 ip->cfg_handle = *chdl;
1696 return get_irq_props(mdesc, node, ip);
1697 }
1698
1699 return -ENODEV;
1700}
1701
1702static unsigned long n2_spu_hvapi_major;
1703static unsigned long n2_spu_hvapi_minor;
1704
1705static int __devinit n2_spu_hvapi_register(void)
1706{
1707 int err;
1708
1709 n2_spu_hvapi_major = 2;
1710 n2_spu_hvapi_minor = 0;
1711
1712 err = sun4v_hvapi_register(HV_GRP_NCS,
1713 n2_spu_hvapi_major,
1714 &n2_spu_hvapi_minor);
1715
1716 if (!err)
1717 pr_info("Registered NCS HVAPI version %lu.%lu\n",
1718 n2_spu_hvapi_major,
1719 n2_spu_hvapi_minor);
1720
1721 return err;
1722}
1723
1724static void n2_spu_hvapi_unregister(void)
1725{
1726 sun4v_hvapi_unregister(HV_GRP_NCS);
1727}
1728
1729static int global_ref;
1730
1731static int __devinit grab_global_resources(void)
1732{
1733 int err = 0;
1734
1735 mutex_lock(&spu_lock);
1736
1737 if (global_ref++)
1738 goto out;
1739
1740 err = n2_spu_hvapi_register();
1741 if (err)
1742 goto out;
1743
1744 err = queue_cache_init();
1745 if (err)
1746 goto out_hvapi_release;
1747
1748 err = -ENOMEM;
1749 cpu_to_cwq = kzalloc(sizeof(struct spu_queue *) * NR_CPUS,
1750 GFP_KERNEL);
1751 if (!cpu_to_cwq)
1752 goto out_queue_cache_destroy;
1753
1754 cpu_to_mau = kzalloc(sizeof(struct spu_queue *) * NR_CPUS,
1755 GFP_KERNEL);
1756 if (!cpu_to_mau)
1757 goto out_free_cwq_table;
1758
1759 err = 0;
1760
1761out:
1762 if (err)
1763 global_ref--;
1764 mutex_unlock(&spu_lock);
1765 return err;
1766
1767out_free_cwq_table:
1768 kfree(cpu_to_cwq);
1769 cpu_to_cwq = NULL;
1770
1771out_queue_cache_destroy:
1772 queue_cache_destroy();
1773
1774out_hvapi_release:
1775 n2_spu_hvapi_unregister();
1776 goto out;
1777}
1778
1779static void release_global_resources(void)
1780{
1781 mutex_lock(&spu_lock);
1782 if (!--global_ref) {
1783 kfree(cpu_to_cwq);
1784 cpu_to_cwq = NULL;
1785
1786 kfree(cpu_to_mau);
1787 cpu_to_mau = NULL;
1788
1789 queue_cache_destroy();
1790 n2_spu_hvapi_unregister();
1791 }
1792 mutex_unlock(&spu_lock);
1793}
1794
1795static struct n2_crypto * __devinit alloc_n2cp(void)
1796{
1797 struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL);
1798
1799 if (np)
1800 INIT_LIST_HEAD(&np->cwq_list);
1801
1802 return np;
1803}
1804
1805static void free_n2cp(struct n2_crypto *np)
1806{
1807 if (np->cwq_info.ino_table) {
1808 kfree(np->cwq_info.ino_table);
1809 np->cwq_info.ino_table = NULL;
1810 }
1811
1812 kfree(np);
1813}
1814
1815static void __devinit n2_spu_driver_version(void)
1816{
1817 static int n2_spu_version_printed;
1818
1819 if (n2_spu_version_printed++ == 0)
1820 pr_info("%s", version);
1821}
1822
1823static int __devinit n2_crypto_probe(struct of_device *dev,
1824 const struct of_device_id *match)
1825{
1826 struct mdesc_handle *mdesc;
1827 const char *full_name;
1828 struct n2_crypto *np;
1829 int err;
1830
1831 n2_spu_driver_version();
1832
ff6c7341 1833 full_name = dev->dev.of_node->full_name;
0a625fd2
DM
1834 pr_info("Found N2CP at %s\n", full_name);
1835
1836 np = alloc_n2cp();
1837 if (!np) {
1838 dev_err(&dev->dev, "%s: Unable to allocate n2cp.\n",
1839 full_name);
1840 return -ENOMEM;
1841 }
1842
1843 err = grab_global_resources();
1844 if (err) {
1845 dev_err(&dev->dev, "%s: Unable to grab "
1846 "global resources.\n", full_name);
1847 goto out_free_n2cp;
1848 }
1849
1850 mdesc = mdesc_grab();
1851
1852 if (!mdesc) {
1853 dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
1854 full_name);
1855 err = -ENODEV;
1856 goto out_free_global;
1857 }
1858 err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
1859 if (err) {
1860 dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
1861 full_name);
1862 mdesc_release(mdesc);
1863 goto out_free_global;
1864 }
1865
1866 err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list,
1867 "cwq", HV_NCS_QTYPE_CWQ, cwq_intr,
1868 cpu_to_cwq);
1869 mdesc_release(mdesc);
1870
1871 if (err) {
1872 dev_err(&dev->dev, "%s: CWQ MDESC scan failed.\n",
1873 full_name);
1874 goto out_free_global;
1875 }
1876
1877 err = n2_register_algs();
1878 if (err) {
1879 dev_err(&dev->dev, "%s: Unable to register algorithms.\n",
1880 full_name);
1881 goto out_free_spu_list;
1882 }
1883
1884 dev_set_drvdata(&dev->dev, np);
1885
1886 return 0;
1887
1888out_free_spu_list:
1889 spu_list_destroy(&np->cwq_list);
1890
1891out_free_global:
1892 release_global_resources();
1893
1894out_free_n2cp:
1895 free_n2cp(np);
1896
1897 return err;
1898}
1899
1900static int __devexit n2_crypto_remove(struct of_device *dev)
1901{
1902 struct n2_crypto *np = dev_get_drvdata(&dev->dev);
1903
1904 n2_unregister_algs();
1905
1906 spu_list_destroy(&np->cwq_list);
1907
1908 release_global_resources();
1909
1910 free_n2cp(np);
1911
1912 return 0;
1913}
1914
1915static struct n2_mau * __devinit alloc_ncp(void)
1916{
1917 struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL);
1918
1919 if (mp)
1920 INIT_LIST_HEAD(&mp->mau_list);
1921
1922 return mp;
1923}
1924
1925static void free_ncp(struct n2_mau *mp)
1926{
1927 if (mp->mau_info.ino_table) {
1928 kfree(mp->mau_info.ino_table);
1929 mp->mau_info.ino_table = NULL;
1930 }
1931
1932 kfree(mp);
1933}
1934
1935static int __devinit n2_mau_probe(struct of_device *dev,
1936 const struct of_device_id *match)
1937{
1938 struct mdesc_handle *mdesc;
1939 const char *full_name;
1940 struct n2_mau *mp;
1941 int err;
1942
1943 n2_spu_driver_version();
1944
ff6c7341 1945 full_name = dev->dev.of_node->full_name;
0a625fd2
DM
1946 pr_info("Found NCP at %s\n", full_name);
1947
1948 mp = alloc_ncp();
1949 if (!mp) {
1950 dev_err(&dev->dev, "%s: Unable to allocate ncp.\n",
1951 full_name);
1952 return -ENOMEM;
1953 }
1954
1955 err = grab_global_resources();
1956 if (err) {
1957 dev_err(&dev->dev, "%s: Unable to grab "
1958 "global resources.\n", full_name);
1959 goto out_free_ncp;
1960 }
1961
1962 mdesc = mdesc_grab();
1963
1964 if (!mdesc) {
1965 dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
1966 full_name);
1967 err = -ENODEV;
1968 goto out_free_global;
1969 }
1970
1971 err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
1972 if (err) {
1973 dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
1974 full_name);
1975 mdesc_release(mdesc);
1976 goto out_free_global;
1977 }
1978
1979 err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list,
1980 "mau", HV_NCS_QTYPE_MAU, mau_intr,
1981 cpu_to_mau);
1982 mdesc_release(mdesc);
1983
1984 if (err) {
1985 dev_err(&dev->dev, "%s: MAU MDESC scan failed.\n",
1986 full_name);
1987 goto out_free_global;
1988 }
1989
1990 dev_set_drvdata(&dev->dev, mp);
1991
1992 return 0;
1993
1994out_free_global:
1995 release_global_resources();
1996
1997out_free_ncp:
1998 free_ncp(mp);
1999
2000 return err;
2001}
2002
2003static int __devexit n2_mau_remove(struct of_device *dev)
2004{
2005 struct n2_mau *mp = dev_get_drvdata(&dev->dev);
2006
2007 spu_list_destroy(&mp->mau_list);
2008
2009 release_global_resources();
2010
2011 free_ncp(mp);
2012
2013 return 0;
2014}
2015
2016static struct of_device_id n2_crypto_match[] = {
2017 {
2018 .name = "n2cp",
2019 .compatible = "SUNW,n2-cwq",
2020 },
2021 {
2022 .name = "n2cp",
2023 .compatible = "SUNW,vf-cwq",
2024 },
2025 {},
2026};
2027
2028MODULE_DEVICE_TABLE(of, n2_crypto_match);
2029
2030static struct of_platform_driver n2_crypto_driver = {
ff6c7341
DM
2031 .driver = {
2032 .name = "n2cp",
2033 .owner = THIS_MODULE,
2034 .of_match_table = n2_crypto_match,
2035 },
0a625fd2
DM
2036 .probe = n2_crypto_probe,
2037 .remove = __devexit_p(n2_crypto_remove),
2038};
2039
2040static struct of_device_id n2_mau_match[] = {
2041 {
2042 .name = "ncp",
2043 .compatible = "SUNW,n2-mau",
2044 },
2045 {
2046 .name = "ncp",
2047 .compatible = "SUNW,vf-mau",
2048 },
2049 {},
2050};
2051
2052MODULE_DEVICE_TABLE(of, n2_mau_match);
2053
2054static struct of_platform_driver n2_mau_driver = {
ff6c7341
DM
2055 .driver = {
2056 .name = "ncp",
2057 .owner = THIS_MODULE,
2058 .of_match_table = n2_mau_match,
2059 },
0a625fd2
DM
2060 .probe = n2_mau_probe,
2061 .remove = __devexit_p(n2_mau_remove),
2062};
2063
2064static int __init n2_init(void)
2065{
2066 int err = of_register_driver(&n2_crypto_driver, &of_bus_type);
2067
2068 if (!err) {
2069 err = of_register_driver(&n2_mau_driver, &of_bus_type);
2070 if (err)
2071 of_unregister_driver(&n2_crypto_driver);
2072 }
2073 return err;
2074}
2075
2076static void __exit n2_exit(void)
2077{
2078 of_unregister_driver(&n2_mau_driver);
2079 of_unregister_driver(&n2_crypto_driver);
2080}
2081
2082module_init(n2_init);
2083module_exit(n2_exit);
This page took 0.190786 seconds and 5 git commands to generate.