Commit | Line | Data |
---|---|---|
0a625fd2 DM |
1 | /* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support. |
2 | * | |
3 | * Copyright (C) 2010 David S. Miller <davem@davemloft.net> | |
4 | */ | |
5 | ||
6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
7 | ||
8 | #include <linux/kernel.h> | |
9 | #include <linux/module.h> | |
10 | #include <linux/of.h> | |
11 | #include <linux/of_device.h> | |
12 | #include <linux/cpumask.h> | |
13 | #include <linux/slab.h> | |
14 | #include <linux/interrupt.h> | |
15 | #include <linux/crypto.h> | |
16 | #include <crypto/md5.h> | |
17 | #include <crypto/sha.h> | |
18 | #include <crypto/aes.h> | |
19 | #include <crypto/des.h> | |
20 | #include <linux/mutex.h> | |
21 | #include <linux/delay.h> | |
22 | #include <linux/sched.h> | |
23 | ||
24 | #include <crypto/internal/hash.h> | |
25 | #include <crypto/scatterwalk.h> | |
26 | #include <crypto/algapi.h> | |
27 | ||
28 | #include <asm/hypervisor.h> | |
29 | #include <asm/mdesc.h> | |
30 | ||
31 | #include "n2_core.h" | |
32 | ||
33 | #define DRV_MODULE_NAME "n2_crypto" | |
34 | #define DRV_MODULE_VERSION "0.1" | |
35 | #define DRV_MODULE_RELDATE "April 29, 2010" | |
36 | ||
37 | static char version[] __devinitdata = | |
38 | DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | |
39 | ||
40 | MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); | |
41 | MODULE_DESCRIPTION("Niagara2 Crypto driver"); | |
42 | MODULE_LICENSE("GPL"); | |
43 | MODULE_VERSION(DRV_MODULE_VERSION); | |
44 | ||
45 | #define N2_CRA_PRIORITY 300 | |
46 | ||
47 | static DEFINE_MUTEX(spu_lock); | |
48 | ||
49 | struct spu_queue { | |
50 | cpumask_t sharing; | |
51 | unsigned long qhandle; | |
52 | ||
53 | spinlock_t lock; | |
54 | u8 q_type; | |
55 | void *q; | |
56 | unsigned long head; | |
57 | unsigned long tail; | |
58 | struct list_head jobs; | |
59 | ||
60 | unsigned long devino; | |
61 | ||
62 | char irq_name[32]; | |
63 | unsigned int irq; | |
64 | ||
65 | struct list_head list; | |
66 | }; | |
67 | ||
68 | static struct spu_queue **cpu_to_cwq; | |
69 | static struct spu_queue **cpu_to_mau; | |
70 | ||
71 | static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off) | |
72 | { | |
73 | if (q->q_type == HV_NCS_QTYPE_MAU) { | |
74 | off += MAU_ENTRY_SIZE; | |
75 | if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES)) | |
76 | off = 0; | |
77 | } else { | |
78 | off += CWQ_ENTRY_SIZE; | |
79 | if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES)) | |
80 | off = 0; | |
81 | } | |
82 | return off; | |
83 | } | |
84 | ||
85 | struct n2_request_common { | |
86 | struct list_head entry; | |
87 | unsigned int offset; | |
88 | }; | |
89 | #define OFFSET_NOT_RUNNING (~(unsigned int)0) | |
90 | ||
91 | /* An async job request records the final tail value it used in | |
92 | * n2_request_common->offset, test to see if that offset is in | |
93 | * the range old_head, new_head, inclusive. | |
94 | */ | |
95 | static inline bool job_finished(struct spu_queue *q, unsigned int offset, | |
96 | unsigned long old_head, unsigned long new_head) | |
97 | { | |
98 | if (old_head <= new_head) { | |
99 | if (offset > old_head && offset <= new_head) | |
100 | return true; | |
101 | } else { | |
102 | if (offset > old_head || offset <= new_head) | |
103 | return true; | |
104 | } | |
105 | return false; | |
106 | } | |
107 | ||
108 | /* When the HEAD marker is unequal to the actual HEAD, we get | |
109 | * a virtual device INO interrupt. We should process the | |
110 | * completed CWQ entries and adjust the HEAD marker to clear | |
111 | * the IRQ. | |
112 | */ | |
113 | static irqreturn_t cwq_intr(int irq, void *dev_id) | |
114 | { | |
115 | unsigned long off, new_head, hv_ret; | |
116 | struct spu_queue *q = dev_id; | |
117 | ||
118 | pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n", | |
119 | smp_processor_id(), q->qhandle); | |
120 | ||
121 | spin_lock(&q->lock); | |
122 | ||
123 | hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head); | |
124 | ||
125 | pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n", | |
126 | smp_processor_id(), new_head, hv_ret); | |
127 | ||
128 | for (off = q->head; off != new_head; off = spu_next_offset(q, off)) { | |
129 | /* XXX ... XXX */ | |
130 | } | |
131 | ||
132 | hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head); | |
133 | if (hv_ret == HV_EOK) | |
134 | q->head = new_head; | |
135 | ||
136 | spin_unlock(&q->lock); | |
137 | ||
138 | return IRQ_HANDLED; | |
139 | } | |
140 | ||
141 | static irqreturn_t mau_intr(int irq, void *dev_id) | |
142 | { | |
143 | struct spu_queue *q = dev_id; | |
144 | unsigned long head, hv_ret; | |
145 | ||
146 | spin_lock(&q->lock); | |
147 | ||
148 | pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n", | |
149 | smp_processor_id(), q->qhandle); | |
150 | ||
151 | hv_ret = sun4v_ncs_gethead(q->qhandle, &head); | |
152 | ||
153 | pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n", | |
154 | smp_processor_id(), head, hv_ret); | |
155 | ||
156 | sun4v_ncs_sethead_marker(q->qhandle, head); | |
157 | ||
158 | spin_unlock(&q->lock); | |
159 | ||
160 | return IRQ_HANDLED; | |
161 | } | |
162 | ||
163 | static void *spu_queue_next(struct spu_queue *q, void *cur) | |
164 | { | |
165 | return q->q + spu_next_offset(q, cur - q->q); | |
166 | } | |
167 | ||
168 | static int spu_queue_num_free(struct spu_queue *q) | |
169 | { | |
170 | unsigned long head = q->head; | |
171 | unsigned long tail = q->tail; | |
172 | unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES); | |
173 | unsigned long diff; | |
174 | ||
175 | if (head > tail) | |
176 | diff = head - tail; | |
177 | else | |
178 | diff = (end - tail) + head; | |
179 | ||
180 | return (diff / CWQ_ENTRY_SIZE) - 1; | |
181 | } | |
182 | ||
183 | static void *spu_queue_alloc(struct spu_queue *q, int num_entries) | |
184 | { | |
185 | int avail = spu_queue_num_free(q); | |
186 | ||
187 | if (avail >= num_entries) | |
188 | return q->q + q->tail; | |
189 | ||
190 | return NULL; | |
191 | } | |
192 | ||
193 | static unsigned long spu_queue_submit(struct spu_queue *q, void *last) | |
194 | { | |
195 | unsigned long hv_ret, new_tail; | |
196 | ||
197 | new_tail = spu_next_offset(q, last - q->q); | |
198 | ||
199 | hv_ret = sun4v_ncs_settail(q->qhandle, new_tail); | |
200 | if (hv_ret == HV_EOK) | |
201 | q->tail = new_tail; | |
202 | return hv_ret; | |
203 | } | |
204 | ||
205 | static u64 control_word_base(unsigned int len, unsigned int hmac_key_len, | |
206 | int enc_type, int auth_type, | |
207 | unsigned int hash_len, | |
208 | bool sfas, bool sob, bool eob, bool encrypt, | |
209 | int opcode) | |
210 | { | |
211 | u64 word = (len - 1) & CONTROL_LEN; | |
212 | ||
213 | word |= ((u64) opcode << CONTROL_OPCODE_SHIFT); | |
214 | word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT); | |
215 | word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT); | |
216 | if (sfas) | |
217 | word |= CONTROL_STORE_FINAL_AUTH_STATE; | |
218 | if (sob) | |
219 | word |= CONTROL_START_OF_BLOCK; | |
220 | if (eob) | |
221 | word |= CONTROL_END_OF_BLOCK; | |
222 | if (encrypt) | |
223 | word |= CONTROL_ENCRYPT; | |
224 | if (hmac_key_len) | |
225 | word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT; | |
226 | if (hash_len) | |
227 | word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT; | |
228 | ||
229 | return word; | |
230 | } | |
231 | ||
232 | #if 0 | |
233 | static inline bool n2_should_run_async(struct spu_queue *qp, int this_len) | |
234 | { | |
235 | if (this_len >= 64 || | |
236 | qp->head != qp->tail) | |
237 | return true; | |
238 | return false; | |
239 | } | |
240 | #endif | |
241 | ||
3a2c0346 DM |
242 | struct n2_ahash_alg { |
243 | struct list_head entry; | |
244 | const char *hash_zero; | |
245 | const u32 *hash_init; | |
246 | u8 hw_op_hashsz; | |
247 | u8 digest_size; | |
248 | u8 auth_type; | |
249 | struct ahash_alg alg; | |
250 | }; | |
251 | ||
252 | static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm) | |
253 | { | |
254 | struct crypto_alg *alg = tfm->__crt_alg; | |
255 | struct ahash_alg *ahash_alg; | |
256 | ||
257 | ahash_alg = container_of(alg, struct ahash_alg, halg.base); | |
258 | ||
259 | return container_of(ahash_alg, struct n2_ahash_alg, alg); | |
260 | } | |
261 | ||
0a625fd2 | 262 | struct n2_hash_ctx { |
c9aa55e5 DM |
263 | struct crypto_ahash *fallback_tfm; |
264 | }; | |
0a625fd2 | 265 | |
c9aa55e5 | 266 | struct n2_hash_req_ctx { |
0a625fd2 DM |
267 | union { |
268 | struct md5_state md5; | |
269 | struct sha1_state sha1; | |
270 | struct sha256_state sha256; | |
271 | } u; | |
272 | ||
c9aa55e5 | 273 | struct ahash_request fallback_req; |
0a625fd2 DM |
274 | }; |
275 | ||
276 | static int n2_hash_async_init(struct ahash_request *req) | |
277 | { | |
c9aa55e5 | 278 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); |
0a625fd2 DM |
279 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
280 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | |
281 | ||
c9aa55e5 DM |
282 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); |
283 | rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | |
0a625fd2 | 284 | |
c9aa55e5 | 285 | return crypto_ahash_init(&rctx->fallback_req); |
0a625fd2 DM |
286 | } |
287 | ||
288 | static int n2_hash_async_update(struct ahash_request *req) | |
289 | { | |
c9aa55e5 | 290 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); |
0a625fd2 DM |
291 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
292 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | |
293 | ||
c9aa55e5 DM |
294 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); |
295 | rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | |
296 | rctx->fallback_req.nbytes = req->nbytes; | |
297 | rctx->fallback_req.src = req->src; | |
0a625fd2 | 298 | |
c9aa55e5 | 299 | return crypto_ahash_update(&rctx->fallback_req); |
0a625fd2 DM |
300 | } |
301 | ||
302 | static int n2_hash_async_final(struct ahash_request *req) | |
303 | { | |
c9aa55e5 | 304 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); |
0a625fd2 DM |
305 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
306 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | |
307 | ||
c9aa55e5 DM |
308 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); |
309 | rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | |
310 | rctx->fallback_req.result = req->result; | |
0a625fd2 | 311 | |
c9aa55e5 | 312 | return crypto_ahash_final(&rctx->fallback_req); |
0a625fd2 DM |
313 | } |
314 | ||
315 | static int n2_hash_async_finup(struct ahash_request *req) | |
316 | { | |
c9aa55e5 | 317 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); |
0a625fd2 DM |
318 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
319 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | |
320 | ||
c9aa55e5 DM |
321 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); |
322 | rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | |
323 | rctx->fallback_req.nbytes = req->nbytes; | |
324 | rctx->fallback_req.src = req->src; | |
325 | rctx->fallback_req.result = req->result; | |
0a625fd2 | 326 | |
c9aa55e5 | 327 | return crypto_ahash_finup(&rctx->fallback_req); |
0a625fd2 DM |
328 | } |
329 | ||
330 | static int n2_hash_cra_init(struct crypto_tfm *tfm) | |
331 | { | |
332 | const char *fallback_driver_name = tfm->__crt_alg->cra_name; | |
333 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); | |
334 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); | |
335 | struct crypto_ahash *fallback_tfm; | |
336 | int err; | |
337 | ||
338 | fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0, | |
339 | CRYPTO_ALG_NEED_FALLBACK); | |
340 | if (IS_ERR(fallback_tfm)) { | |
341 | pr_warning("Fallback driver '%s' could not be loaded!\n", | |
342 | fallback_driver_name); | |
343 | err = PTR_ERR(fallback_tfm); | |
344 | goto out; | |
345 | } | |
346 | ||
c9aa55e5 DM |
347 | crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) + |
348 | crypto_ahash_reqsize(fallback_tfm))); | |
349 | ||
350 | ctx->fallback_tfm = fallback_tfm; | |
0a625fd2 DM |
351 | return 0; |
352 | ||
353 | out: | |
354 | return err; | |
355 | } | |
356 | ||
357 | static void n2_hash_cra_exit(struct crypto_tfm *tfm) | |
358 | { | |
359 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); | |
360 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); | |
361 | ||
c9aa55e5 | 362 | crypto_free_ahash(ctx->fallback_tfm); |
0a625fd2 DM |
363 | } |
364 | ||
365 | static unsigned long wait_for_tail(struct spu_queue *qp) | |
366 | { | |
367 | unsigned long head, hv_ret; | |
368 | ||
369 | do { | |
370 | hv_ret = sun4v_ncs_gethead(qp->qhandle, &head); | |
371 | if (hv_ret != HV_EOK) { | |
372 | pr_err("Hypervisor error on gethead\n"); | |
373 | break; | |
374 | } | |
375 | if (head == qp->tail) { | |
376 | qp->head = head; | |
377 | break; | |
378 | } | |
379 | } while (1); | |
380 | return hv_ret; | |
381 | } | |
382 | ||
383 | static unsigned long submit_and_wait_for_tail(struct spu_queue *qp, | |
384 | struct cwq_initial_entry *ent) | |
385 | { | |
386 | unsigned long hv_ret = spu_queue_submit(qp, ent); | |
387 | ||
388 | if (hv_ret == HV_EOK) | |
389 | hv_ret = wait_for_tail(qp); | |
390 | ||
391 | return hv_ret; | |
392 | } | |
393 | ||
3a2c0346 DM |
394 | static int n2_do_async_digest(struct ahash_request *req, |
395 | unsigned int auth_type, unsigned int digest_size, | |
396 | unsigned int result_size, void *hash_loc) | |
0a625fd2 DM |
397 | { |
398 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
0a625fd2 DM |
399 | struct cwq_initial_entry *ent; |
400 | struct crypto_hash_walk walk; | |
401 | struct spu_queue *qp; | |
402 | unsigned long flags; | |
403 | int err = -ENODEV; | |
404 | int nbytes, cpu; | |
405 | ||
406 | /* The total effective length of the operation may not | |
407 | * exceed 2^16. | |
408 | */ | |
409 | if (unlikely(req->nbytes > (1 << 16))) { | |
c9aa55e5 | 410 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); |
65a23d67 | 411 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); |
c9aa55e5 DM |
412 | |
413 | ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); | |
414 | rctx->fallback_req.base.flags = | |
0a625fd2 | 415 | req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; |
c9aa55e5 DM |
416 | rctx->fallback_req.nbytes = req->nbytes; |
417 | rctx->fallback_req.src = req->src; | |
418 | rctx->fallback_req.result = req->result; | |
0a625fd2 | 419 | |
c9aa55e5 | 420 | return crypto_ahash_digest(&rctx->fallback_req); |
0a625fd2 DM |
421 | } |
422 | ||
0a625fd2 DM |
423 | nbytes = crypto_hash_walk_first(req, &walk); |
424 | ||
425 | cpu = get_cpu(); | |
426 | qp = cpu_to_cwq[cpu]; | |
427 | if (!qp) | |
428 | goto out; | |
429 | ||
430 | spin_lock_irqsave(&qp->lock, flags); | |
431 | ||
432 | /* XXX can do better, improve this later by doing a by-hand scatterlist | |
433 | * XXX walk, etc. | |
434 | */ | |
435 | ent = qp->q + qp->tail; | |
436 | ||
437 | ent->control = control_word_base(nbytes, 0, 0, | |
438 | auth_type, digest_size, | |
439 | false, true, false, false, | |
440 | OPCODE_INPLACE_BIT | | |
441 | OPCODE_AUTH_MAC); | |
442 | ent->src_addr = __pa(walk.data); | |
443 | ent->auth_key_addr = 0UL; | |
444 | ent->auth_iv_addr = __pa(hash_loc); | |
445 | ent->final_auth_state_addr = 0UL; | |
446 | ent->enc_key_addr = 0UL; | |
447 | ent->enc_iv_addr = 0UL; | |
448 | ent->dest_addr = __pa(hash_loc); | |
449 | ||
450 | nbytes = crypto_hash_walk_done(&walk, 0); | |
451 | while (nbytes > 0) { | |
452 | ent = spu_queue_next(qp, ent); | |
453 | ||
454 | ent->control = (nbytes - 1); | |
455 | ent->src_addr = __pa(walk.data); | |
456 | ent->auth_key_addr = 0UL; | |
457 | ent->auth_iv_addr = 0UL; | |
458 | ent->final_auth_state_addr = 0UL; | |
459 | ent->enc_key_addr = 0UL; | |
460 | ent->enc_iv_addr = 0UL; | |
461 | ent->dest_addr = 0UL; | |
462 | ||
463 | nbytes = crypto_hash_walk_done(&walk, 0); | |
464 | } | |
465 | ent->control |= CONTROL_END_OF_BLOCK; | |
466 | ||
467 | if (submit_and_wait_for_tail(qp, ent) != HV_EOK) | |
468 | err = -EINVAL; | |
469 | else | |
470 | err = 0; | |
471 | ||
472 | spin_unlock_irqrestore(&qp->lock, flags); | |
473 | ||
474 | if (!err) | |
475 | memcpy(req->result, hash_loc, result_size); | |
476 | out: | |
477 | put_cpu(); | |
478 | ||
479 | return err; | |
480 | } | |
481 | ||
3a2c0346 | 482 | static int n2_hash_async_digest(struct ahash_request *req) |
0a625fd2 | 483 | { |
3a2c0346 | 484 | struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm); |
c9aa55e5 | 485 | struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); |
3a2c0346 | 486 | int ds; |
0a625fd2 | 487 | |
3a2c0346 | 488 | ds = n2alg->digest_size; |
0a625fd2 | 489 | if (unlikely(req->nbytes == 0)) { |
3a2c0346 | 490 | memcpy(req->result, n2alg->hash_zero, ds); |
0a625fd2 DM |
491 | return 0; |
492 | } | |
3a2c0346 | 493 | memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz); |
0a625fd2 | 494 | |
3a2c0346 DM |
495 | return n2_do_async_digest(req, n2alg->auth_type, |
496 | n2alg->hw_op_hashsz, ds, | |
497 | &rctx->u); | |
0a625fd2 DM |
498 | } |
499 | ||
500 | struct n2_cipher_context { | |
501 | int key_len; | |
502 | int enc_type; | |
503 | union { | |
504 | u8 aes[AES_MAX_KEY_SIZE]; | |
505 | u8 des[DES_KEY_SIZE]; | |
506 | u8 des3[3 * DES_KEY_SIZE]; | |
507 | u8 arc4[258]; /* S-box, X, Y */ | |
508 | } key; | |
509 | }; | |
510 | ||
511 | #define N2_CHUNK_ARR_LEN 16 | |
512 | ||
513 | struct n2_crypto_chunk { | |
514 | struct list_head entry; | |
515 | unsigned long iv_paddr : 44; | |
516 | unsigned long arr_len : 20; | |
517 | unsigned long dest_paddr; | |
518 | unsigned long dest_final; | |
519 | struct { | |
520 | unsigned long src_paddr : 44; | |
521 | unsigned long src_len : 20; | |
522 | } arr[N2_CHUNK_ARR_LEN]; | |
523 | }; | |
524 | ||
525 | struct n2_request_context { | |
526 | struct ablkcipher_walk walk; | |
527 | struct list_head chunk_list; | |
528 | struct n2_crypto_chunk chunk; | |
529 | u8 temp_iv[16]; | |
530 | }; | |
531 | ||
532 | /* The SPU allows some level of flexibility for partial cipher blocks | |
533 | * being specified in a descriptor. | |
534 | * | |
535 | * It merely requires that every descriptor's length field is at least | |
536 | * as large as the cipher block size. This means that a cipher block | |
537 | * can span at most 2 descriptors. However, this does not allow a | |
538 | * partial block to span into the final descriptor as that would | |
539 | * violate the rule (since every descriptor's length must be at lest | |
540 | * the block size). So, for example, assuming an 8 byte block size: | |
541 | * | |
542 | * 0xe --> 0xa --> 0x8 | |
543 | * | |
544 | * is a valid length sequence, whereas: | |
545 | * | |
546 | * 0xe --> 0xb --> 0x7 | |
547 | * | |
548 | * is not a valid sequence. | |
549 | */ | |
550 | ||
551 | struct n2_cipher_alg { | |
552 | struct list_head entry; | |
553 | u8 enc_type; | |
554 | struct crypto_alg alg; | |
555 | }; | |
556 | ||
557 | static inline struct n2_cipher_alg *n2_cipher_alg(struct crypto_tfm *tfm) | |
558 | { | |
559 | struct crypto_alg *alg = tfm->__crt_alg; | |
560 | ||
561 | return container_of(alg, struct n2_cipher_alg, alg); | |
562 | } | |
563 | ||
564 | struct n2_cipher_request_context { | |
565 | struct ablkcipher_walk walk; | |
566 | }; | |
567 | ||
568 | static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | |
569 | unsigned int keylen) | |
570 | { | |
571 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | |
572 | struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); | |
573 | struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); | |
574 | ||
575 | ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK); | |
576 | ||
577 | switch (keylen) { | |
578 | case AES_KEYSIZE_128: | |
579 | ctx->enc_type |= ENC_TYPE_ALG_AES128; | |
580 | break; | |
581 | case AES_KEYSIZE_192: | |
582 | ctx->enc_type |= ENC_TYPE_ALG_AES192; | |
583 | break; | |
584 | case AES_KEYSIZE_256: | |
585 | ctx->enc_type |= ENC_TYPE_ALG_AES256; | |
586 | break; | |
587 | default: | |
588 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
589 | return -EINVAL; | |
590 | } | |
591 | ||
592 | ctx->key_len = keylen; | |
593 | memcpy(ctx->key.aes, key, keylen); | |
594 | return 0; | |
595 | } | |
596 | ||
597 | static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | |
598 | unsigned int keylen) | |
599 | { | |
600 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | |
601 | struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); | |
602 | struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); | |
603 | u32 tmp[DES_EXPKEY_WORDS]; | |
604 | int err; | |
605 | ||
606 | ctx->enc_type = n2alg->enc_type; | |
607 | ||
608 | if (keylen != DES_KEY_SIZE) { | |
609 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
610 | return -EINVAL; | |
611 | } | |
612 | ||
613 | err = des_ekey(tmp, key); | |
614 | if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { | |
615 | tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; | |
616 | return -EINVAL; | |
617 | } | |
618 | ||
619 | ctx->key_len = keylen; | |
620 | memcpy(ctx->key.des, key, keylen); | |
621 | return 0; | |
622 | } | |
623 | ||
624 | static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | |
625 | unsigned int keylen) | |
626 | { | |
627 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | |
628 | struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); | |
629 | struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); | |
630 | ||
631 | ctx->enc_type = n2alg->enc_type; | |
632 | ||
633 | if (keylen != (3 * DES_KEY_SIZE)) { | |
634 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
635 | return -EINVAL; | |
636 | } | |
637 | ctx->key_len = keylen; | |
638 | memcpy(ctx->key.des3, key, keylen); | |
639 | return 0; | |
640 | } | |
641 | ||
642 | static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | |
643 | unsigned int keylen) | |
644 | { | |
645 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | |
646 | struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); | |
647 | struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); | |
648 | u8 *s = ctx->key.arc4; | |
649 | u8 *x = s + 256; | |
650 | u8 *y = x + 1; | |
651 | int i, j, k; | |
652 | ||
653 | ctx->enc_type = n2alg->enc_type; | |
654 | ||
655 | j = k = 0; | |
656 | *x = 0; | |
657 | *y = 0; | |
658 | for (i = 0; i < 256; i++) | |
659 | s[i] = i; | |
660 | for (i = 0; i < 256; i++) { | |
661 | u8 a = s[i]; | |
662 | j = (j + key[k] + a) & 0xff; | |
663 | s[i] = s[j]; | |
664 | s[j] = a; | |
665 | if (++k >= keylen) | |
666 | k = 0; | |
667 | } | |
668 | ||
669 | return 0; | |
670 | } | |
671 | ||
672 | static inline int cipher_descriptor_len(int nbytes, unsigned int block_size) | |
673 | { | |
674 | int this_len = nbytes; | |
675 | ||
676 | this_len -= (nbytes & (block_size - 1)); | |
677 | return this_len > (1 << 16) ? (1 << 16) : this_len; | |
678 | } | |
679 | ||
680 | static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp, | |
681 | struct spu_queue *qp, bool encrypt) | |
682 | { | |
683 | struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); | |
684 | struct cwq_initial_entry *ent; | |
685 | bool in_place; | |
686 | int i; | |
687 | ||
688 | ent = spu_queue_alloc(qp, cp->arr_len); | |
689 | if (!ent) { | |
690 | pr_info("queue_alloc() of %d fails\n", | |
691 | cp->arr_len); | |
692 | return -EBUSY; | |
693 | } | |
694 | ||
695 | in_place = (cp->dest_paddr == cp->arr[0].src_paddr); | |
696 | ||
697 | ent->control = control_word_base(cp->arr[0].src_len, | |
698 | 0, ctx->enc_type, 0, 0, | |
699 | false, true, false, encrypt, | |
700 | OPCODE_ENCRYPT | | |
701 | (in_place ? OPCODE_INPLACE_BIT : 0)); | |
702 | ent->src_addr = cp->arr[0].src_paddr; | |
703 | ent->auth_key_addr = 0UL; | |
704 | ent->auth_iv_addr = 0UL; | |
705 | ent->final_auth_state_addr = 0UL; | |
706 | ent->enc_key_addr = __pa(&ctx->key); | |
707 | ent->enc_iv_addr = cp->iv_paddr; | |
708 | ent->dest_addr = (in_place ? 0UL : cp->dest_paddr); | |
709 | ||
710 | for (i = 1; i < cp->arr_len; i++) { | |
711 | ent = spu_queue_next(qp, ent); | |
712 | ||
713 | ent->control = cp->arr[i].src_len - 1; | |
714 | ent->src_addr = cp->arr[i].src_paddr; | |
715 | ent->auth_key_addr = 0UL; | |
716 | ent->auth_iv_addr = 0UL; | |
717 | ent->final_auth_state_addr = 0UL; | |
718 | ent->enc_key_addr = 0UL; | |
719 | ent->enc_iv_addr = 0UL; | |
720 | ent->dest_addr = 0UL; | |
721 | } | |
722 | ent->control |= CONTROL_END_OF_BLOCK; | |
723 | ||
724 | return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0; | |
725 | } | |
726 | ||
727 | static int n2_compute_chunks(struct ablkcipher_request *req) | |
728 | { | |
729 | struct n2_request_context *rctx = ablkcipher_request_ctx(req); | |
730 | struct ablkcipher_walk *walk = &rctx->walk; | |
731 | struct n2_crypto_chunk *chunk; | |
732 | unsigned long dest_prev; | |
733 | unsigned int tot_len; | |
734 | bool prev_in_place; | |
735 | int err, nbytes; | |
736 | ||
737 | ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes); | |
738 | err = ablkcipher_walk_phys(req, walk); | |
739 | if (err) | |
740 | return err; | |
741 | ||
742 | INIT_LIST_HEAD(&rctx->chunk_list); | |
743 | ||
744 | chunk = &rctx->chunk; | |
745 | INIT_LIST_HEAD(&chunk->entry); | |
746 | ||
747 | chunk->iv_paddr = 0UL; | |
748 | chunk->arr_len = 0; | |
749 | chunk->dest_paddr = 0UL; | |
750 | ||
751 | prev_in_place = false; | |
752 | dest_prev = ~0UL; | |
753 | tot_len = 0; | |
754 | ||
755 | while ((nbytes = walk->nbytes) != 0) { | |
756 | unsigned long dest_paddr, src_paddr; | |
757 | bool in_place; | |
758 | int this_len; | |
759 | ||
760 | src_paddr = (page_to_phys(walk->src.page) + | |
761 | walk->src.offset); | |
762 | dest_paddr = (page_to_phys(walk->dst.page) + | |
763 | walk->dst.offset); | |
764 | in_place = (src_paddr == dest_paddr); | |
765 | this_len = cipher_descriptor_len(nbytes, walk->blocksize); | |
766 | ||
767 | if (chunk->arr_len != 0) { | |
768 | if (in_place != prev_in_place || | |
769 | (!prev_in_place && | |
770 | dest_paddr != dest_prev) || | |
771 | chunk->arr_len == N2_CHUNK_ARR_LEN || | |
772 | tot_len + this_len > (1 << 16)) { | |
773 | chunk->dest_final = dest_prev; | |
774 | list_add_tail(&chunk->entry, | |
775 | &rctx->chunk_list); | |
776 | chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC); | |
777 | if (!chunk) { | |
778 | err = -ENOMEM; | |
779 | break; | |
780 | } | |
781 | INIT_LIST_HEAD(&chunk->entry); | |
782 | } | |
783 | } | |
784 | if (chunk->arr_len == 0) { | |
785 | chunk->dest_paddr = dest_paddr; | |
786 | tot_len = 0; | |
787 | } | |
788 | chunk->arr[chunk->arr_len].src_paddr = src_paddr; | |
789 | chunk->arr[chunk->arr_len].src_len = this_len; | |
790 | chunk->arr_len++; | |
791 | ||
792 | dest_prev = dest_paddr + this_len; | |
793 | prev_in_place = in_place; | |
794 | tot_len += this_len; | |
795 | ||
796 | err = ablkcipher_walk_done(req, walk, nbytes - this_len); | |
797 | if (err) | |
798 | break; | |
799 | } | |
800 | if (!err && chunk->arr_len != 0) { | |
801 | chunk->dest_final = dest_prev; | |
802 | list_add_tail(&chunk->entry, &rctx->chunk_list); | |
803 | } | |
804 | ||
805 | return err; | |
806 | } | |
807 | ||
808 | static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv) | |
809 | { | |
810 | struct n2_request_context *rctx = ablkcipher_request_ctx(req); | |
811 | struct n2_crypto_chunk *c, *tmp; | |
812 | ||
813 | if (final_iv) | |
814 | memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize); | |
815 | ||
816 | ablkcipher_walk_complete(&rctx->walk); | |
817 | list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) { | |
818 | list_del(&c->entry); | |
819 | if (unlikely(c != &rctx->chunk)) | |
820 | kfree(c); | |
821 | } | |
822 | ||
823 | } | |
824 | ||
825 | static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt) | |
826 | { | |
827 | struct n2_request_context *rctx = ablkcipher_request_ctx(req); | |
828 | struct crypto_tfm *tfm = req->base.tfm; | |
829 | int err = n2_compute_chunks(req); | |
830 | struct n2_crypto_chunk *c, *tmp; | |
831 | unsigned long flags, hv_ret; | |
832 | struct spu_queue *qp; | |
833 | ||
834 | if (err) | |
835 | return err; | |
836 | ||
837 | qp = cpu_to_cwq[get_cpu()]; | |
838 | err = -ENODEV; | |
839 | if (!qp) | |
840 | goto out; | |
841 | ||
842 | spin_lock_irqsave(&qp->lock, flags); | |
843 | ||
844 | list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) { | |
845 | err = __n2_crypt_chunk(tfm, c, qp, encrypt); | |
846 | if (err) | |
847 | break; | |
848 | list_del(&c->entry); | |
849 | if (unlikely(c != &rctx->chunk)) | |
850 | kfree(c); | |
851 | } | |
852 | if (!err) { | |
853 | hv_ret = wait_for_tail(qp); | |
854 | if (hv_ret != HV_EOK) | |
855 | err = -EINVAL; | |
856 | } | |
857 | ||
858 | spin_unlock_irqrestore(&qp->lock, flags); | |
859 | ||
860 | put_cpu(); | |
861 | ||
862 | out: | |
863 | n2_chunk_complete(req, NULL); | |
864 | return err; | |
865 | } | |
866 | ||
867 | static int n2_encrypt_ecb(struct ablkcipher_request *req) | |
868 | { | |
869 | return n2_do_ecb(req, true); | |
870 | } | |
871 | ||
872 | static int n2_decrypt_ecb(struct ablkcipher_request *req) | |
873 | { | |
874 | return n2_do_ecb(req, false); | |
875 | } | |
876 | ||
877 | static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt) | |
878 | { | |
879 | struct n2_request_context *rctx = ablkcipher_request_ctx(req); | |
880 | struct crypto_tfm *tfm = req->base.tfm; | |
881 | unsigned long flags, hv_ret, iv_paddr; | |
882 | int err = n2_compute_chunks(req); | |
883 | struct n2_crypto_chunk *c, *tmp; | |
884 | struct spu_queue *qp; | |
885 | void *final_iv_addr; | |
886 | ||
887 | final_iv_addr = NULL; | |
888 | ||
889 | if (err) | |
890 | return err; | |
891 | ||
892 | qp = cpu_to_cwq[get_cpu()]; | |
893 | err = -ENODEV; | |
894 | if (!qp) | |
895 | goto out; | |
896 | ||
897 | spin_lock_irqsave(&qp->lock, flags); | |
898 | ||
899 | if (encrypt) { | |
900 | iv_paddr = __pa(rctx->walk.iv); | |
901 | list_for_each_entry_safe(c, tmp, &rctx->chunk_list, | |
902 | entry) { | |
903 | c->iv_paddr = iv_paddr; | |
904 | err = __n2_crypt_chunk(tfm, c, qp, true); | |
905 | if (err) | |
906 | break; | |
907 | iv_paddr = c->dest_final - rctx->walk.blocksize; | |
908 | list_del(&c->entry); | |
909 | if (unlikely(c != &rctx->chunk)) | |
910 | kfree(c); | |
911 | } | |
912 | final_iv_addr = __va(iv_paddr); | |
913 | } else { | |
914 | list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list, | |
915 | entry) { | |
916 | if (c == &rctx->chunk) { | |
917 | iv_paddr = __pa(rctx->walk.iv); | |
918 | } else { | |
919 | iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr + | |
920 | tmp->arr[tmp->arr_len-1].src_len - | |
921 | rctx->walk.blocksize); | |
922 | } | |
923 | if (!final_iv_addr) { | |
924 | unsigned long pa; | |
925 | ||
926 | pa = (c->arr[c->arr_len-1].src_paddr + | |
927 | c->arr[c->arr_len-1].src_len - | |
928 | rctx->walk.blocksize); | |
929 | final_iv_addr = rctx->temp_iv; | |
930 | memcpy(rctx->temp_iv, __va(pa), | |
931 | rctx->walk.blocksize); | |
932 | } | |
933 | c->iv_paddr = iv_paddr; | |
934 | err = __n2_crypt_chunk(tfm, c, qp, false); | |
935 | if (err) | |
936 | break; | |
937 | list_del(&c->entry); | |
938 | if (unlikely(c != &rctx->chunk)) | |
939 | kfree(c); | |
940 | } | |
941 | } | |
942 | if (!err) { | |
943 | hv_ret = wait_for_tail(qp); | |
944 | if (hv_ret != HV_EOK) | |
945 | err = -EINVAL; | |
946 | } | |
947 | ||
948 | spin_unlock_irqrestore(&qp->lock, flags); | |
949 | ||
950 | put_cpu(); | |
951 | ||
952 | out: | |
953 | n2_chunk_complete(req, err ? NULL : final_iv_addr); | |
954 | return err; | |
955 | } | |
956 | ||
957 | static int n2_encrypt_chaining(struct ablkcipher_request *req) | |
958 | { | |
959 | return n2_do_chaining(req, true); | |
960 | } | |
961 | ||
962 | static int n2_decrypt_chaining(struct ablkcipher_request *req) | |
963 | { | |
964 | return n2_do_chaining(req, false); | |
965 | } | |
966 | ||
967 | struct n2_cipher_tmpl { | |
968 | const char *name; | |
969 | const char *drv_name; | |
970 | u8 block_size; | |
971 | u8 enc_type; | |
972 | struct ablkcipher_alg ablkcipher; | |
973 | }; | |
974 | ||
975 | static const struct n2_cipher_tmpl cipher_tmpls[] = { | |
976 | /* ARC4: only ECB is supported (chaining bits ignored) */ | |
977 | { .name = "ecb(arc4)", | |
978 | .drv_name = "ecb-arc4", | |
979 | .block_size = 1, | |
980 | .enc_type = (ENC_TYPE_ALG_RC4_STREAM | | |
981 | ENC_TYPE_CHAINING_ECB), | |
982 | .ablkcipher = { | |
983 | .min_keysize = 1, | |
984 | .max_keysize = 256, | |
985 | .setkey = n2_arc4_setkey, | |
986 | .encrypt = n2_encrypt_ecb, | |
987 | .decrypt = n2_decrypt_ecb, | |
988 | }, | |
989 | }, | |
990 | ||
991 | /* DES: ECB CBC and CFB are supported */ | |
992 | { .name = "ecb(des)", | |
993 | .drv_name = "ecb-des", | |
994 | .block_size = DES_BLOCK_SIZE, | |
995 | .enc_type = (ENC_TYPE_ALG_DES | | |
996 | ENC_TYPE_CHAINING_ECB), | |
997 | .ablkcipher = { | |
998 | .min_keysize = DES_KEY_SIZE, | |
999 | .max_keysize = DES_KEY_SIZE, | |
1000 | .setkey = n2_des_setkey, | |
1001 | .encrypt = n2_encrypt_ecb, | |
1002 | .decrypt = n2_decrypt_ecb, | |
1003 | }, | |
1004 | }, | |
1005 | { .name = "cbc(des)", | |
1006 | .drv_name = "cbc-des", | |
1007 | .block_size = DES_BLOCK_SIZE, | |
1008 | .enc_type = (ENC_TYPE_ALG_DES | | |
1009 | ENC_TYPE_CHAINING_CBC), | |
1010 | .ablkcipher = { | |
1011 | .ivsize = DES_BLOCK_SIZE, | |
1012 | .min_keysize = DES_KEY_SIZE, | |
1013 | .max_keysize = DES_KEY_SIZE, | |
1014 | .setkey = n2_des_setkey, | |
1015 | .encrypt = n2_encrypt_chaining, | |
1016 | .decrypt = n2_decrypt_chaining, | |
1017 | }, | |
1018 | }, | |
1019 | { .name = "cfb(des)", | |
1020 | .drv_name = "cfb-des", | |
1021 | .block_size = DES_BLOCK_SIZE, | |
1022 | .enc_type = (ENC_TYPE_ALG_DES | | |
1023 | ENC_TYPE_CHAINING_CFB), | |
1024 | .ablkcipher = { | |
1025 | .min_keysize = DES_KEY_SIZE, | |
1026 | .max_keysize = DES_KEY_SIZE, | |
1027 | .setkey = n2_des_setkey, | |
1028 | .encrypt = n2_encrypt_chaining, | |
1029 | .decrypt = n2_decrypt_chaining, | |
1030 | }, | |
1031 | }, | |
1032 | ||
1033 | /* 3DES: ECB CBC and CFB are supported */ | |
1034 | { .name = "ecb(des3_ede)", | |
1035 | .drv_name = "ecb-3des", | |
1036 | .block_size = DES_BLOCK_SIZE, | |
1037 | .enc_type = (ENC_TYPE_ALG_3DES | | |
1038 | ENC_TYPE_CHAINING_ECB), | |
1039 | .ablkcipher = { | |
1040 | .min_keysize = 3 * DES_KEY_SIZE, | |
1041 | .max_keysize = 3 * DES_KEY_SIZE, | |
1042 | .setkey = n2_3des_setkey, | |
1043 | .encrypt = n2_encrypt_ecb, | |
1044 | .decrypt = n2_decrypt_ecb, | |
1045 | }, | |
1046 | }, | |
1047 | { .name = "cbc(des3_ede)", | |
1048 | .drv_name = "cbc-3des", | |
1049 | .block_size = DES_BLOCK_SIZE, | |
1050 | .enc_type = (ENC_TYPE_ALG_3DES | | |
1051 | ENC_TYPE_CHAINING_CBC), | |
1052 | .ablkcipher = { | |
1053 | .ivsize = DES_BLOCK_SIZE, | |
1054 | .min_keysize = 3 * DES_KEY_SIZE, | |
1055 | .max_keysize = 3 * DES_KEY_SIZE, | |
1056 | .setkey = n2_3des_setkey, | |
1057 | .encrypt = n2_encrypt_chaining, | |
1058 | .decrypt = n2_decrypt_chaining, | |
1059 | }, | |
1060 | }, | |
1061 | { .name = "cfb(des3_ede)", | |
1062 | .drv_name = "cfb-3des", | |
1063 | .block_size = DES_BLOCK_SIZE, | |
1064 | .enc_type = (ENC_TYPE_ALG_3DES | | |
1065 | ENC_TYPE_CHAINING_CFB), | |
1066 | .ablkcipher = { | |
1067 | .min_keysize = 3 * DES_KEY_SIZE, | |
1068 | .max_keysize = 3 * DES_KEY_SIZE, | |
1069 | .setkey = n2_3des_setkey, | |
1070 | .encrypt = n2_encrypt_chaining, | |
1071 | .decrypt = n2_decrypt_chaining, | |
1072 | }, | |
1073 | }, | |
1074 | /* AES: ECB CBC and CTR are supported */ | |
1075 | { .name = "ecb(aes)", | |
1076 | .drv_name = "ecb-aes", | |
1077 | .block_size = AES_BLOCK_SIZE, | |
1078 | .enc_type = (ENC_TYPE_ALG_AES128 | | |
1079 | ENC_TYPE_CHAINING_ECB), | |
1080 | .ablkcipher = { | |
1081 | .min_keysize = AES_MIN_KEY_SIZE, | |
1082 | .max_keysize = AES_MAX_KEY_SIZE, | |
1083 | .setkey = n2_aes_setkey, | |
1084 | .encrypt = n2_encrypt_ecb, | |
1085 | .decrypt = n2_decrypt_ecb, | |
1086 | }, | |
1087 | }, | |
1088 | { .name = "cbc(aes)", | |
1089 | .drv_name = "cbc-aes", | |
1090 | .block_size = AES_BLOCK_SIZE, | |
1091 | .enc_type = (ENC_TYPE_ALG_AES128 | | |
1092 | ENC_TYPE_CHAINING_CBC), | |
1093 | .ablkcipher = { | |
1094 | .ivsize = AES_BLOCK_SIZE, | |
1095 | .min_keysize = AES_MIN_KEY_SIZE, | |
1096 | .max_keysize = AES_MAX_KEY_SIZE, | |
1097 | .setkey = n2_aes_setkey, | |
1098 | .encrypt = n2_encrypt_chaining, | |
1099 | .decrypt = n2_decrypt_chaining, | |
1100 | }, | |
1101 | }, | |
1102 | { .name = "ctr(aes)", | |
1103 | .drv_name = "ctr-aes", | |
1104 | .block_size = AES_BLOCK_SIZE, | |
1105 | .enc_type = (ENC_TYPE_ALG_AES128 | | |
1106 | ENC_TYPE_CHAINING_COUNTER), | |
1107 | .ablkcipher = { | |
1108 | .ivsize = AES_BLOCK_SIZE, | |
1109 | .min_keysize = AES_MIN_KEY_SIZE, | |
1110 | .max_keysize = AES_MAX_KEY_SIZE, | |
1111 | .setkey = n2_aes_setkey, | |
1112 | .encrypt = n2_encrypt_chaining, | |
1113 | .decrypt = n2_encrypt_chaining, | |
1114 | }, | |
1115 | }, | |
1116 | ||
1117 | }; | |
1118 | #define NUM_CIPHER_TMPLS ARRAY_SIZE(cipher_tmpls) | |
1119 | ||
1120 | static LIST_HEAD(cipher_algs); | |
1121 | ||
1122 | struct n2_hash_tmpl { | |
1123 | const char *name; | |
3a2c0346 DM |
1124 | const char *hash_zero; |
1125 | const u32 *hash_init; | |
1126 | u8 hw_op_hashsz; | |
0a625fd2 DM |
1127 | u8 digest_size; |
1128 | u8 block_size; | |
3a2c0346 DM |
1129 | u8 auth_type; |
1130 | }; | |
1131 | ||
1132 | static const char md5_zero[MD5_DIGEST_SIZE] = { | |
1133 | 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04, | |
1134 | 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e, | |
0a625fd2 | 1135 | }; |
3a2c0346 DM |
1136 | static const u32 md5_init[MD5_HASH_WORDS] = { |
1137 | cpu_to_le32(0x67452301), | |
1138 | cpu_to_le32(0xefcdab89), | |
1139 | cpu_to_le32(0x98badcfe), | |
1140 | cpu_to_le32(0x10325476), | |
1141 | }; | |
1142 | static const char sha1_zero[SHA1_DIGEST_SIZE] = { | |
1143 | 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, | |
1144 | 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8, | |
1145 | 0x07, 0x09 | |
1146 | }; | |
1147 | static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = { | |
1148 | SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, | |
1149 | }; | |
1150 | static const char sha256_zero[SHA256_DIGEST_SIZE] = { | |
1151 | 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, | |
1152 | 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, | |
1153 | 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, | |
1154 | 0x1b, 0x78, 0x52, 0xb8, 0x55 | |
1155 | }; | |
1156 | static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = { | |
1157 | SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, | |
1158 | SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7, | |
1159 | }; | |
1160 | static const char sha224_zero[SHA224_DIGEST_SIZE] = { | |
1161 | 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47, | |
1162 | 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2, | |
1163 | 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4, | |
1164 | 0x2f | |
1165 | }; | |
1166 | static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = { | |
1167 | SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, | |
1168 | SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7, | |
1169 | }; | |
1170 | ||
0a625fd2 DM |
1171 | static const struct n2_hash_tmpl hash_tmpls[] = { |
1172 | { .name = "md5", | |
3a2c0346 DM |
1173 | .hash_zero = md5_zero, |
1174 | .hash_init = md5_init, | |
1175 | .auth_type = AUTH_TYPE_MD5, | |
1176 | .hw_op_hashsz = MD5_DIGEST_SIZE, | |
0a625fd2 DM |
1177 | .digest_size = MD5_DIGEST_SIZE, |
1178 | .block_size = MD5_HMAC_BLOCK_SIZE }, | |
1179 | { .name = "sha1", | |
3a2c0346 DM |
1180 | .hash_zero = sha1_zero, |
1181 | .hash_init = sha1_init, | |
1182 | .auth_type = AUTH_TYPE_SHA1, | |
1183 | .hw_op_hashsz = SHA1_DIGEST_SIZE, | |
0a625fd2 DM |
1184 | .digest_size = SHA1_DIGEST_SIZE, |
1185 | .block_size = SHA1_BLOCK_SIZE }, | |
1186 | { .name = "sha256", | |
3a2c0346 DM |
1187 | .hash_zero = sha256_zero, |
1188 | .hash_init = sha256_init, | |
1189 | .auth_type = AUTH_TYPE_SHA256, | |
1190 | .hw_op_hashsz = SHA256_DIGEST_SIZE, | |
0a625fd2 DM |
1191 | .digest_size = SHA256_DIGEST_SIZE, |
1192 | .block_size = SHA256_BLOCK_SIZE }, | |
1193 | { .name = "sha224", | |
3a2c0346 DM |
1194 | .hash_zero = sha224_zero, |
1195 | .hash_init = sha224_init, | |
1196 | .auth_type = AUTH_TYPE_SHA256, | |
1197 | .hw_op_hashsz = SHA256_DIGEST_SIZE, | |
0a625fd2 DM |
1198 | .digest_size = SHA224_DIGEST_SIZE, |
1199 | .block_size = SHA224_BLOCK_SIZE }, | |
1200 | }; | |
1201 | #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls) | |
1202 | ||
0a625fd2 DM |
1203 | static LIST_HEAD(ahash_algs); |
1204 | ||
1205 | static int algs_registered; | |
1206 | ||
1207 | static void __n2_unregister_algs(void) | |
1208 | { | |
1209 | struct n2_cipher_alg *cipher, *cipher_tmp; | |
1210 | struct n2_ahash_alg *alg, *alg_tmp; | |
1211 | ||
1212 | list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) { | |
1213 | crypto_unregister_alg(&cipher->alg); | |
1214 | list_del(&cipher->entry); | |
1215 | kfree(cipher); | |
1216 | } | |
1217 | list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) { | |
1218 | crypto_unregister_ahash(&alg->alg); | |
1219 | list_del(&alg->entry); | |
1220 | kfree(alg); | |
1221 | } | |
1222 | } | |
1223 | ||
1224 | static int n2_cipher_cra_init(struct crypto_tfm *tfm) | |
1225 | { | |
1226 | tfm->crt_ablkcipher.reqsize = sizeof(struct n2_request_context); | |
1227 | return 0; | |
1228 | } | |
1229 | ||
1230 | static int __devinit __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl) | |
1231 | { | |
1232 | struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); | |
1233 | struct crypto_alg *alg; | |
1234 | int err; | |
1235 | ||
1236 | if (!p) | |
1237 | return -ENOMEM; | |
1238 | ||
1239 | alg = &p->alg; | |
1240 | ||
1241 | snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); | |
1242 | snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name); | |
1243 | alg->cra_priority = N2_CRA_PRIORITY; | |
1244 | alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; | |
1245 | alg->cra_blocksize = tmpl->block_size; | |
1246 | p->enc_type = tmpl->enc_type; | |
1247 | alg->cra_ctxsize = sizeof(struct n2_cipher_context); | |
1248 | alg->cra_type = &crypto_ablkcipher_type; | |
1249 | alg->cra_u.ablkcipher = tmpl->ablkcipher; | |
1250 | alg->cra_init = n2_cipher_cra_init; | |
1251 | alg->cra_module = THIS_MODULE; | |
1252 | ||
1253 | list_add(&p->entry, &cipher_algs); | |
1254 | err = crypto_register_alg(alg); | |
1255 | if (err) { | |
38511108 | 1256 | pr_err("%s alg registration failed\n", alg->cra_name); |
0a625fd2 DM |
1257 | list_del(&p->entry); |
1258 | kfree(p); | |
38511108 DM |
1259 | } else { |
1260 | pr_info("%s alg registered\n", alg->cra_name); | |
0a625fd2 DM |
1261 | } |
1262 | return err; | |
1263 | } | |
1264 | ||
1265 | static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl) | |
1266 | { | |
1267 | struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); | |
1268 | struct hash_alg_common *halg; | |
1269 | struct crypto_alg *base; | |
1270 | struct ahash_alg *ahash; | |
1271 | int err; | |
1272 | ||
1273 | if (!p) | |
1274 | return -ENOMEM; | |
1275 | ||
3a2c0346 DM |
1276 | p->hash_zero = tmpl->hash_zero; |
1277 | p->hash_init = tmpl->hash_init; | |
1278 | p->auth_type = tmpl->auth_type; | |
1279 | p->hw_op_hashsz = tmpl->hw_op_hashsz; | |
1280 | p->digest_size = tmpl->digest_size; | |
1281 | ||
0a625fd2 DM |
1282 | ahash = &p->alg; |
1283 | ahash->init = n2_hash_async_init; | |
1284 | ahash->update = n2_hash_async_update; | |
1285 | ahash->final = n2_hash_async_final; | |
1286 | ahash->finup = n2_hash_async_finup; | |
3a2c0346 | 1287 | ahash->digest = n2_hash_async_digest; |
0a625fd2 DM |
1288 | |
1289 | halg = &ahash->halg; | |
1290 | halg->digestsize = tmpl->digest_size; | |
1291 | ||
1292 | base = &halg->base; | |
1293 | snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); | |
1294 | snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name); | |
1295 | base->cra_priority = N2_CRA_PRIORITY; | |
1296 | base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK; | |
1297 | base->cra_blocksize = tmpl->block_size; | |
1298 | base->cra_ctxsize = sizeof(struct n2_hash_ctx); | |
1299 | base->cra_module = THIS_MODULE; | |
1300 | base->cra_init = n2_hash_cra_init; | |
1301 | base->cra_exit = n2_hash_cra_exit; | |
1302 | ||
1303 | list_add(&p->entry, &ahash_algs); | |
1304 | err = crypto_register_ahash(ahash); | |
1305 | if (err) { | |
38511108 | 1306 | pr_err("%s alg registration failed\n", base->cra_name); |
0a625fd2 DM |
1307 | list_del(&p->entry); |
1308 | kfree(p); | |
38511108 DM |
1309 | } else { |
1310 | pr_info("%s alg registered\n", base->cra_name); | |
0a625fd2 DM |
1311 | } |
1312 | return err; | |
1313 | } | |
1314 | ||
1315 | static int __devinit n2_register_algs(void) | |
1316 | { | |
1317 | int i, err = 0; | |
1318 | ||
1319 | mutex_lock(&spu_lock); | |
1320 | if (algs_registered++) | |
1321 | goto out; | |
1322 | ||
1323 | for (i = 0; i < NUM_HASH_TMPLS; i++) { | |
1324 | err = __n2_register_one_ahash(&hash_tmpls[i]); | |
1325 | if (err) { | |
1326 | __n2_unregister_algs(); | |
1327 | goto out; | |
1328 | } | |
1329 | } | |
1330 | for (i = 0; i < NUM_CIPHER_TMPLS; i++) { | |
1331 | err = __n2_register_one_cipher(&cipher_tmpls[i]); | |
1332 | if (err) { | |
1333 | __n2_unregister_algs(); | |
1334 | goto out; | |
1335 | } | |
1336 | } | |
1337 | ||
1338 | out: | |
1339 | mutex_unlock(&spu_lock); | |
1340 | return err; | |
1341 | } | |
1342 | ||
1343 | static void __exit n2_unregister_algs(void) | |
1344 | { | |
1345 | mutex_lock(&spu_lock); | |
1346 | if (!--algs_registered) | |
1347 | __n2_unregister_algs(); | |
1348 | mutex_unlock(&spu_lock); | |
1349 | } | |
1350 | ||
1351 | /* To map CWQ queues to interrupt sources, the hypervisor API provides | |
1352 | * a devino. This isn't very useful to us because all of the | |
1353 | * interrupts listed in the of_device node have been translated to | |
1354 | * Linux virtual IRQ cookie numbers. | |
1355 | * | |
1356 | * So we have to back-translate, going through the 'intr' and 'ino' | |
1357 | * property tables of the n2cp MDESC node, matching it with the OF | |
1358 | * 'interrupts' property entries, in order to to figure out which | |
1359 | * devino goes to which already-translated IRQ. | |
1360 | */ | |
1361 | static int find_devino_index(struct of_device *dev, struct spu_mdesc_info *ip, | |
1362 | unsigned long dev_ino) | |
1363 | { | |
1364 | const unsigned int *dev_intrs; | |
1365 | unsigned int intr; | |
1366 | int i; | |
1367 | ||
1368 | for (i = 0; i < ip->num_intrs; i++) { | |
1369 | if (ip->ino_table[i].ino == dev_ino) | |
1370 | break; | |
1371 | } | |
1372 | if (i == ip->num_intrs) | |
1373 | return -ENODEV; | |
1374 | ||
1375 | intr = ip->ino_table[i].intr; | |
1376 | ||
ff6c7341 | 1377 | dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL); |
0a625fd2 DM |
1378 | if (!dev_intrs) |
1379 | return -ENODEV; | |
1380 | ||
1381 | for (i = 0; i < dev->num_irqs; i++) { | |
1382 | if (dev_intrs[i] == intr) | |
1383 | return i; | |
1384 | } | |
1385 | ||
1386 | return -ENODEV; | |
1387 | } | |
1388 | ||
1389 | static int spu_map_ino(struct of_device *dev, struct spu_mdesc_info *ip, | |
1390 | const char *irq_name, struct spu_queue *p, | |
1391 | irq_handler_t handler) | |
1392 | { | |
1393 | unsigned long herr; | |
1394 | int index; | |
1395 | ||
1396 | herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino); | |
1397 | if (herr) | |
1398 | return -EINVAL; | |
1399 | ||
1400 | index = find_devino_index(dev, ip, p->devino); | |
1401 | if (index < 0) | |
1402 | return index; | |
1403 | ||
1404 | p->irq = dev->irqs[index]; | |
1405 | ||
1406 | sprintf(p->irq_name, "%s-%d", irq_name, index); | |
1407 | ||
1408 | return request_irq(p->irq, handler, IRQF_SAMPLE_RANDOM, | |
1409 | p->irq_name, p); | |
1410 | } | |
1411 | ||
1412 | static struct kmem_cache *queue_cache[2]; | |
1413 | ||
1414 | static void *new_queue(unsigned long q_type) | |
1415 | { | |
1416 | return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL); | |
1417 | } | |
1418 | ||
1419 | static void free_queue(void *p, unsigned long q_type) | |
1420 | { | |
1421 | return kmem_cache_free(queue_cache[q_type - 1], p); | |
1422 | } | |
1423 | ||
1424 | static int queue_cache_init(void) | |
1425 | { | |
1426 | if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) | |
1427 | queue_cache[HV_NCS_QTYPE_MAU - 1] = | |
527b9525 | 1428 | kmem_cache_create("mau_queue", |
0a625fd2 DM |
1429 | (MAU_NUM_ENTRIES * |
1430 | MAU_ENTRY_SIZE), | |
1431 | MAU_ENTRY_SIZE, 0, NULL); | |
1432 | if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) | |
1433 | return -ENOMEM; | |
1434 | ||
1435 | if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) | |
1436 | queue_cache[HV_NCS_QTYPE_CWQ - 1] = | |
1437 | kmem_cache_create("cwq_queue", | |
1438 | (CWQ_NUM_ENTRIES * | |
1439 | CWQ_ENTRY_SIZE), | |
1440 | CWQ_ENTRY_SIZE, 0, NULL); | |
1441 | if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) { | |
1442 | kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); | |
1443 | return -ENOMEM; | |
1444 | } | |
1445 | return 0; | |
1446 | } | |
1447 | ||
1448 | static void queue_cache_destroy(void) | |
1449 | { | |
1450 | kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); | |
1451 | kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]); | |
1452 | } | |
1453 | ||
1454 | static int spu_queue_register(struct spu_queue *p, unsigned long q_type) | |
1455 | { | |
1456 | cpumask_var_t old_allowed; | |
1457 | unsigned long hv_ret; | |
1458 | ||
1459 | if (cpumask_empty(&p->sharing)) | |
1460 | return -EINVAL; | |
1461 | ||
1462 | if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL)) | |
1463 | return -ENOMEM; | |
1464 | ||
1465 | cpumask_copy(old_allowed, ¤t->cpus_allowed); | |
1466 | ||
1467 | set_cpus_allowed_ptr(current, &p->sharing); | |
1468 | ||
1469 | hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q), | |
1470 | CWQ_NUM_ENTRIES, &p->qhandle); | |
1471 | if (!hv_ret) | |
1472 | sun4v_ncs_sethead_marker(p->qhandle, 0); | |
1473 | ||
1474 | set_cpus_allowed_ptr(current, old_allowed); | |
1475 | ||
1476 | free_cpumask_var(old_allowed); | |
1477 | ||
1478 | return (hv_ret ? -EINVAL : 0); | |
1479 | } | |
1480 | ||
1481 | static int spu_queue_setup(struct spu_queue *p) | |
1482 | { | |
1483 | int err; | |
1484 | ||
1485 | p->q = new_queue(p->q_type); | |
1486 | if (!p->q) | |
1487 | return -ENOMEM; | |
1488 | ||
1489 | err = spu_queue_register(p, p->q_type); | |
1490 | if (err) { | |
1491 | free_queue(p->q, p->q_type); | |
1492 | p->q = NULL; | |
1493 | } | |
1494 | ||
1495 | return err; | |
1496 | } | |
1497 | ||
1498 | static void spu_queue_destroy(struct spu_queue *p) | |
1499 | { | |
1500 | unsigned long hv_ret; | |
1501 | ||
1502 | if (!p->q) | |
1503 | return; | |
1504 | ||
1505 | hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle); | |
1506 | ||
1507 | if (!hv_ret) | |
1508 | free_queue(p->q, p->q_type); | |
1509 | } | |
1510 | ||
1511 | static void spu_list_destroy(struct list_head *list) | |
1512 | { | |
1513 | struct spu_queue *p, *n; | |
1514 | ||
1515 | list_for_each_entry_safe(p, n, list, list) { | |
1516 | int i; | |
1517 | ||
1518 | for (i = 0; i < NR_CPUS; i++) { | |
1519 | if (cpu_to_cwq[i] == p) | |
1520 | cpu_to_cwq[i] = NULL; | |
1521 | } | |
1522 | ||
1523 | if (p->irq) { | |
1524 | free_irq(p->irq, p); | |
1525 | p->irq = 0; | |
1526 | } | |
1527 | spu_queue_destroy(p); | |
1528 | list_del(&p->list); | |
1529 | kfree(p); | |
1530 | } | |
1531 | } | |
1532 | ||
1533 | /* Walk the backward arcs of a CWQ 'exec-unit' node, | |
1534 | * gathering cpu membership information. | |
1535 | */ | |
1536 | static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc, | |
1537 | struct of_device *dev, | |
1538 | u64 node, struct spu_queue *p, | |
1539 | struct spu_queue **table) | |
1540 | { | |
1541 | u64 arc; | |
1542 | ||
1543 | mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) { | |
1544 | u64 tgt = mdesc_arc_target(mdesc, arc); | |
1545 | const char *name = mdesc_node_name(mdesc, tgt); | |
1546 | const u64 *id; | |
1547 | ||
1548 | if (strcmp(name, "cpu")) | |
1549 | continue; | |
1550 | id = mdesc_get_property(mdesc, tgt, "id", NULL); | |
1551 | if (table[*id] != NULL) { | |
1552 | dev_err(&dev->dev, "%s: SPU cpu slot already set.\n", | |
ff6c7341 | 1553 | dev->dev.of_node->full_name); |
0a625fd2 DM |
1554 | return -EINVAL; |
1555 | } | |
1556 | cpu_set(*id, p->sharing); | |
1557 | table[*id] = p; | |
1558 | } | |
1559 | return 0; | |
1560 | } | |
1561 | ||
1562 | /* Process an 'exec-unit' MDESC node of type 'cwq'. */ | |
1563 | static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list, | |
1564 | struct of_device *dev, struct mdesc_handle *mdesc, | |
1565 | u64 node, const char *iname, unsigned long q_type, | |
1566 | irq_handler_t handler, struct spu_queue **table) | |
1567 | { | |
1568 | struct spu_queue *p; | |
1569 | int err; | |
1570 | ||
1571 | p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL); | |
1572 | if (!p) { | |
1573 | dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n", | |
ff6c7341 | 1574 | dev->dev.of_node->full_name); |
0a625fd2 DM |
1575 | return -ENOMEM; |
1576 | } | |
1577 | ||
1578 | cpus_clear(p->sharing); | |
1579 | spin_lock_init(&p->lock); | |
1580 | p->q_type = q_type; | |
1581 | INIT_LIST_HEAD(&p->jobs); | |
1582 | list_add(&p->list, list); | |
1583 | ||
1584 | err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table); | |
1585 | if (err) | |
1586 | return err; | |
1587 | ||
1588 | err = spu_queue_setup(p); | |
1589 | if (err) | |
1590 | return err; | |
1591 | ||
1592 | return spu_map_ino(dev, ip, iname, p, handler); | |
1593 | } | |
1594 | ||
1595 | static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct of_device *dev, | |
1596 | struct spu_mdesc_info *ip, struct list_head *list, | |
1597 | const char *exec_name, unsigned long q_type, | |
1598 | irq_handler_t handler, struct spu_queue **table) | |
1599 | { | |
1600 | int err = 0; | |
1601 | u64 node; | |
1602 | ||
1603 | mdesc_for_each_node_by_name(mdesc, node, "exec-unit") { | |
1604 | const char *type; | |
1605 | ||
1606 | type = mdesc_get_property(mdesc, node, "type", NULL); | |
1607 | if (!type || strcmp(type, exec_name)) | |
1608 | continue; | |
1609 | ||
1610 | err = handle_exec_unit(ip, list, dev, mdesc, node, | |
1611 | exec_name, q_type, handler, table); | |
1612 | if (err) { | |
1613 | spu_list_destroy(list); | |
1614 | break; | |
1615 | } | |
1616 | } | |
1617 | ||
1618 | return err; | |
1619 | } | |
1620 | ||
1621 | static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node, | |
1622 | struct spu_mdesc_info *ip) | |
1623 | { | |
1624 | const u64 *intr, *ino; | |
1625 | int intr_len, ino_len; | |
1626 | int i; | |
1627 | ||
1628 | intr = mdesc_get_property(mdesc, node, "intr", &intr_len); | |
1629 | if (!intr) | |
1630 | return -ENODEV; | |
1631 | ||
1632 | ino = mdesc_get_property(mdesc, node, "ino", &ino_len); | |
1633 | if (!intr) | |
1634 | return -ENODEV; | |
1635 | ||
1636 | if (intr_len != ino_len) | |
1637 | return -EINVAL; | |
1638 | ||
1639 | ip->num_intrs = intr_len / sizeof(u64); | |
1640 | ip->ino_table = kzalloc((sizeof(struct ino_blob) * | |
1641 | ip->num_intrs), | |
1642 | GFP_KERNEL); | |
1643 | if (!ip->ino_table) | |
1644 | return -ENOMEM; | |
1645 | ||
1646 | for (i = 0; i < ip->num_intrs; i++) { | |
1647 | struct ino_blob *b = &ip->ino_table[i]; | |
1648 | b->intr = intr[i]; | |
1649 | b->ino = ino[i]; | |
1650 | } | |
1651 | ||
1652 | return 0; | |
1653 | } | |
1654 | ||
1655 | static int __devinit grab_mdesc_irq_props(struct mdesc_handle *mdesc, | |
1656 | struct of_device *dev, | |
1657 | struct spu_mdesc_info *ip, | |
1658 | const char *node_name) | |
1659 | { | |
1660 | const unsigned int *reg; | |
1661 | u64 node; | |
1662 | ||
ff6c7341 | 1663 | reg = of_get_property(dev->dev.of_node, "reg", NULL); |
0a625fd2 DM |
1664 | if (!reg) |
1665 | return -ENODEV; | |
1666 | ||
1667 | mdesc_for_each_node_by_name(mdesc, node, "virtual-device") { | |
1668 | const char *name; | |
1669 | const u64 *chdl; | |
1670 | ||
1671 | name = mdesc_get_property(mdesc, node, "name", NULL); | |
1672 | if (!name || strcmp(name, node_name)) | |
1673 | continue; | |
1674 | chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL); | |
1675 | if (!chdl || (*chdl != *reg)) | |
1676 | continue; | |
1677 | ip->cfg_handle = *chdl; | |
1678 | return get_irq_props(mdesc, node, ip); | |
1679 | } | |
1680 | ||
1681 | return -ENODEV; | |
1682 | } | |
1683 | ||
1684 | static unsigned long n2_spu_hvapi_major; | |
1685 | static unsigned long n2_spu_hvapi_minor; | |
1686 | ||
1687 | static int __devinit n2_spu_hvapi_register(void) | |
1688 | { | |
1689 | int err; | |
1690 | ||
1691 | n2_spu_hvapi_major = 2; | |
1692 | n2_spu_hvapi_minor = 0; | |
1693 | ||
1694 | err = sun4v_hvapi_register(HV_GRP_NCS, | |
1695 | n2_spu_hvapi_major, | |
1696 | &n2_spu_hvapi_minor); | |
1697 | ||
1698 | if (!err) | |
1699 | pr_info("Registered NCS HVAPI version %lu.%lu\n", | |
1700 | n2_spu_hvapi_major, | |
1701 | n2_spu_hvapi_minor); | |
1702 | ||
1703 | return err; | |
1704 | } | |
1705 | ||
1706 | static void n2_spu_hvapi_unregister(void) | |
1707 | { | |
1708 | sun4v_hvapi_unregister(HV_GRP_NCS); | |
1709 | } | |
1710 | ||
1711 | static int global_ref; | |
1712 | ||
1713 | static int __devinit grab_global_resources(void) | |
1714 | { | |
1715 | int err = 0; | |
1716 | ||
1717 | mutex_lock(&spu_lock); | |
1718 | ||
1719 | if (global_ref++) | |
1720 | goto out; | |
1721 | ||
1722 | err = n2_spu_hvapi_register(); | |
1723 | if (err) | |
1724 | goto out; | |
1725 | ||
1726 | err = queue_cache_init(); | |
1727 | if (err) | |
1728 | goto out_hvapi_release; | |
1729 | ||
1730 | err = -ENOMEM; | |
1731 | cpu_to_cwq = kzalloc(sizeof(struct spu_queue *) * NR_CPUS, | |
1732 | GFP_KERNEL); | |
1733 | if (!cpu_to_cwq) | |
1734 | goto out_queue_cache_destroy; | |
1735 | ||
1736 | cpu_to_mau = kzalloc(sizeof(struct spu_queue *) * NR_CPUS, | |
1737 | GFP_KERNEL); | |
1738 | if (!cpu_to_mau) | |
1739 | goto out_free_cwq_table; | |
1740 | ||
1741 | err = 0; | |
1742 | ||
1743 | out: | |
1744 | if (err) | |
1745 | global_ref--; | |
1746 | mutex_unlock(&spu_lock); | |
1747 | return err; | |
1748 | ||
1749 | out_free_cwq_table: | |
1750 | kfree(cpu_to_cwq); | |
1751 | cpu_to_cwq = NULL; | |
1752 | ||
1753 | out_queue_cache_destroy: | |
1754 | queue_cache_destroy(); | |
1755 | ||
1756 | out_hvapi_release: | |
1757 | n2_spu_hvapi_unregister(); | |
1758 | goto out; | |
1759 | } | |
1760 | ||
1761 | static void release_global_resources(void) | |
1762 | { | |
1763 | mutex_lock(&spu_lock); | |
1764 | if (!--global_ref) { | |
1765 | kfree(cpu_to_cwq); | |
1766 | cpu_to_cwq = NULL; | |
1767 | ||
1768 | kfree(cpu_to_mau); | |
1769 | cpu_to_mau = NULL; | |
1770 | ||
1771 | queue_cache_destroy(); | |
1772 | n2_spu_hvapi_unregister(); | |
1773 | } | |
1774 | mutex_unlock(&spu_lock); | |
1775 | } | |
1776 | ||
1777 | static struct n2_crypto * __devinit alloc_n2cp(void) | |
1778 | { | |
1779 | struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL); | |
1780 | ||
1781 | if (np) | |
1782 | INIT_LIST_HEAD(&np->cwq_list); | |
1783 | ||
1784 | return np; | |
1785 | } | |
1786 | ||
1787 | static void free_n2cp(struct n2_crypto *np) | |
1788 | { | |
1789 | if (np->cwq_info.ino_table) { | |
1790 | kfree(np->cwq_info.ino_table); | |
1791 | np->cwq_info.ino_table = NULL; | |
1792 | } | |
1793 | ||
1794 | kfree(np); | |
1795 | } | |
1796 | ||
1797 | static void __devinit n2_spu_driver_version(void) | |
1798 | { | |
1799 | static int n2_spu_version_printed; | |
1800 | ||
1801 | if (n2_spu_version_printed++ == 0) | |
1802 | pr_info("%s", version); | |
1803 | } | |
1804 | ||
1805 | static int __devinit n2_crypto_probe(struct of_device *dev, | |
1806 | const struct of_device_id *match) | |
1807 | { | |
1808 | struct mdesc_handle *mdesc; | |
1809 | const char *full_name; | |
1810 | struct n2_crypto *np; | |
1811 | int err; | |
1812 | ||
1813 | n2_spu_driver_version(); | |
1814 | ||
ff6c7341 | 1815 | full_name = dev->dev.of_node->full_name; |
0a625fd2 DM |
1816 | pr_info("Found N2CP at %s\n", full_name); |
1817 | ||
1818 | np = alloc_n2cp(); | |
1819 | if (!np) { | |
1820 | dev_err(&dev->dev, "%s: Unable to allocate n2cp.\n", | |
1821 | full_name); | |
1822 | return -ENOMEM; | |
1823 | } | |
1824 | ||
1825 | err = grab_global_resources(); | |
1826 | if (err) { | |
1827 | dev_err(&dev->dev, "%s: Unable to grab " | |
1828 | "global resources.\n", full_name); | |
1829 | goto out_free_n2cp; | |
1830 | } | |
1831 | ||
1832 | mdesc = mdesc_grab(); | |
1833 | ||
1834 | if (!mdesc) { | |
1835 | dev_err(&dev->dev, "%s: Unable to grab MDESC.\n", | |
1836 | full_name); | |
1837 | err = -ENODEV; | |
1838 | goto out_free_global; | |
1839 | } | |
1840 | err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp"); | |
1841 | if (err) { | |
1842 | dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n", | |
1843 | full_name); | |
1844 | mdesc_release(mdesc); | |
1845 | goto out_free_global; | |
1846 | } | |
1847 | ||
1848 | err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list, | |
1849 | "cwq", HV_NCS_QTYPE_CWQ, cwq_intr, | |
1850 | cpu_to_cwq); | |
1851 | mdesc_release(mdesc); | |
1852 | ||
1853 | if (err) { | |
1854 | dev_err(&dev->dev, "%s: CWQ MDESC scan failed.\n", | |
1855 | full_name); | |
1856 | goto out_free_global; | |
1857 | } | |
1858 | ||
1859 | err = n2_register_algs(); | |
1860 | if (err) { | |
1861 | dev_err(&dev->dev, "%s: Unable to register algorithms.\n", | |
1862 | full_name); | |
1863 | goto out_free_spu_list; | |
1864 | } | |
1865 | ||
1866 | dev_set_drvdata(&dev->dev, np); | |
1867 | ||
1868 | return 0; | |
1869 | ||
1870 | out_free_spu_list: | |
1871 | spu_list_destroy(&np->cwq_list); | |
1872 | ||
1873 | out_free_global: | |
1874 | release_global_resources(); | |
1875 | ||
1876 | out_free_n2cp: | |
1877 | free_n2cp(np); | |
1878 | ||
1879 | return err; | |
1880 | } | |
1881 | ||
1882 | static int __devexit n2_crypto_remove(struct of_device *dev) | |
1883 | { | |
1884 | struct n2_crypto *np = dev_get_drvdata(&dev->dev); | |
1885 | ||
1886 | n2_unregister_algs(); | |
1887 | ||
1888 | spu_list_destroy(&np->cwq_list); | |
1889 | ||
1890 | release_global_resources(); | |
1891 | ||
1892 | free_n2cp(np); | |
1893 | ||
1894 | return 0; | |
1895 | } | |
1896 | ||
1897 | static struct n2_mau * __devinit alloc_ncp(void) | |
1898 | { | |
1899 | struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL); | |
1900 | ||
1901 | if (mp) | |
1902 | INIT_LIST_HEAD(&mp->mau_list); | |
1903 | ||
1904 | return mp; | |
1905 | } | |
1906 | ||
1907 | static void free_ncp(struct n2_mau *mp) | |
1908 | { | |
1909 | if (mp->mau_info.ino_table) { | |
1910 | kfree(mp->mau_info.ino_table); | |
1911 | mp->mau_info.ino_table = NULL; | |
1912 | } | |
1913 | ||
1914 | kfree(mp); | |
1915 | } | |
1916 | ||
1917 | static int __devinit n2_mau_probe(struct of_device *dev, | |
1918 | const struct of_device_id *match) | |
1919 | { | |
1920 | struct mdesc_handle *mdesc; | |
1921 | const char *full_name; | |
1922 | struct n2_mau *mp; | |
1923 | int err; | |
1924 | ||
1925 | n2_spu_driver_version(); | |
1926 | ||
ff6c7341 | 1927 | full_name = dev->dev.of_node->full_name; |
0a625fd2 DM |
1928 | pr_info("Found NCP at %s\n", full_name); |
1929 | ||
1930 | mp = alloc_ncp(); | |
1931 | if (!mp) { | |
1932 | dev_err(&dev->dev, "%s: Unable to allocate ncp.\n", | |
1933 | full_name); | |
1934 | return -ENOMEM; | |
1935 | } | |
1936 | ||
1937 | err = grab_global_resources(); | |
1938 | if (err) { | |
1939 | dev_err(&dev->dev, "%s: Unable to grab " | |
1940 | "global resources.\n", full_name); | |
1941 | goto out_free_ncp; | |
1942 | } | |
1943 | ||
1944 | mdesc = mdesc_grab(); | |
1945 | ||
1946 | if (!mdesc) { | |
1947 | dev_err(&dev->dev, "%s: Unable to grab MDESC.\n", | |
1948 | full_name); | |
1949 | err = -ENODEV; | |
1950 | goto out_free_global; | |
1951 | } | |
1952 | ||
1953 | err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp"); | |
1954 | if (err) { | |
1955 | dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n", | |
1956 | full_name); | |
1957 | mdesc_release(mdesc); | |
1958 | goto out_free_global; | |
1959 | } | |
1960 | ||
1961 | err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list, | |
1962 | "mau", HV_NCS_QTYPE_MAU, mau_intr, | |
1963 | cpu_to_mau); | |
1964 | mdesc_release(mdesc); | |
1965 | ||
1966 | if (err) { | |
1967 | dev_err(&dev->dev, "%s: MAU MDESC scan failed.\n", | |
1968 | full_name); | |
1969 | goto out_free_global; | |
1970 | } | |
1971 | ||
1972 | dev_set_drvdata(&dev->dev, mp); | |
1973 | ||
1974 | return 0; | |
1975 | ||
1976 | out_free_global: | |
1977 | release_global_resources(); | |
1978 | ||
1979 | out_free_ncp: | |
1980 | free_ncp(mp); | |
1981 | ||
1982 | return err; | |
1983 | } | |
1984 | ||
1985 | static int __devexit n2_mau_remove(struct of_device *dev) | |
1986 | { | |
1987 | struct n2_mau *mp = dev_get_drvdata(&dev->dev); | |
1988 | ||
1989 | spu_list_destroy(&mp->mau_list); | |
1990 | ||
1991 | release_global_resources(); | |
1992 | ||
1993 | free_ncp(mp); | |
1994 | ||
1995 | return 0; | |
1996 | } | |
1997 | ||
1998 | static struct of_device_id n2_crypto_match[] = { | |
1999 | { | |
2000 | .name = "n2cp", | |
2001 | .compatible = "SUNW,n2-cwq", | |
2002 | }, | |
2003 | { | |
2004 | .name = "n2cp", | |
2005 | .compatible = "SUNW,vf-cwq", | |
2006 | }, | |
2007 | {}, | |
2008 | }; | |
2009 | ||
2010 | MODULE_DEVICE_TABLE(of, n2_crypto_match); | |
2011 | ||
2012 | static struct of_platform_driver n2_crypto_driver = { | |
ff6c7341 DM |
2013 | .driver = { |
2014 | .name = "n2cp", | |
2015 | .owner = THIS_MODULE, | |
2016 | .of_match_table = n2_crypto_match, | |
2017 | }, | |
0a625fd2 DM |
2018 | .probe = n2_crypto_probe, |
2019 | .remove = __devexit_p(n2_crypto_remove), | |
2020 | }; | |
2021 | ||
2022 | static struct of_device_id n2_mau_match[] = { | |
2023 | { | |
2024 | .name = "ncp", | |
2025 | .compatible = "SUNW,n2-mau", | |
2026 | }, | |
2027 | { | |
2028 | .name = "ncp", | |
2029 | .compatible = "SUNW,vf-mau", | |
2030 | }, | |
2031 | {}, | |
2032 | }; | |
2033 | ||
2034 | MODULE_DEVICE_TABLE(of, n2_mau_match); | |
2035 | ||
2036 | static struct of_platform_driver n2_mau_driver = { | |
ff6c7341 DM |
2037 | .driver = { |
2038 | .name = "ncp", | |
2039 | .owner = THIS_MODULE, | |
2040 | .of_match_table = n2_mau_match, | |
2041 | }, | |
0a625fd2 DM |
2042 | .probe = n2_mau_probe, |
2043 | .remove = __devexit_p(n2_mau_remove), | |
2044 | }; | |
2045 | ||
2046 | static int __init n2_init(void) | |
2047 | { | |
2048 | int err = of_register_driver(&n2_crypto_driver, &of_bus_type); | |
2049 | ||
2050 | if (!err) { | |
2051 | err = of_register_driver(&n2_mau_driver, &of_bus_type); | |
2052 | if (err) | |
2053 | of_unregister_driver(&n2_crypto_driver); | |
2054 | } | |
2055 | return err; | |
2056 | } | |
2057 | ||
2058 | static void __exit n2_exit(void) | |
2059 | { | |
2060 | of_unregister_driver(&n2_mau_driver); | |
2061 | of_unregister_driver(&n2_crypto_driver); | |
2062 | } | |
2063 | ||
2064 | module_init(n2_init); | |
2065 | module_exit(n2_exit); |