crypto: talitos - Implement AEAD for SEC1 using HMAC_SNOOP_NO_AFEU
[deliverable/linux.git] / drivers / crypto / talitos.c
1 /*
2 * talitos - Freescale Integrated Security Engine (SEC) device driver
3 *
4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/io.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
43
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/internal/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
55
56 #include "talitos.h"
57
58 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59 bool is_sec1)
60 {
61 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
62 if (!is_sec1)
63 ptr->eptr = upper_32_bits(dma_addr);
64 }
65
66 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
67 struct talitos_ptr *src_ptr, bool is_sec1)
68 {
69 dst_ptr->ptr = src_ptr->ptr;
70 if (!is_sec1)
71 dst_ptr->eptr = src_ptr->eptr;
72 }
73
74 static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
75 bool is_sec1)
76 {
77 if (is_sec1) {
78 ptr->res = 0;
79 ptr->len1 = cpu_to_be16(len);
80 } else {
81 ptr->len = cpu_to_be16(len);
82 }
83 }
84
85 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
86 bool is_sec1)
87 {
88 if (is_sec1)
89 return be16_to_cpu(ptr->len1);
90 else
91 return be16_to_cpu(ptr->len);
92 }
93
94 static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
95 bool is_sec1)
96 {
97 if (!is_sec1)
98 ptr->j_extent = val;
99 }
100
101 static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
102 {
103 if (!is_sec1)
104 ptr->j_extent |= val;
105 }
106
107 /*
108 * map virtual single (contiguous) pointer to h/w descriptor pointer
109 */
110 static void map_single_talitos_ptr(struct device *dev,
111 struct talitos_ptr *ptr,
112 unsigned int len, void *data,
113 enum dma_data_direction dir)
114 {
115 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
116 struct talitos_private *priv = dev_get_drvdata(dev);
117 bool is_sec1 = has_ftr_sec1(priv);
118
119 to_talitos_ptr_len(ptr, len, is_sec1);
120 to_talitos_ptr(ptr, dma_addr, is_sec1);
121 to_talitos_ptr_ext_set(ptr, 0, is_sec1);
122 }
123
124 /*
125 * unmap bus single (contiguous) h/w descriptor pointer
126 */
127 static void unmap_single_talitos_ptr(struct device *dev,
128 struct talitos_ptr *ptr,
129 enum dma_data_direction dir)
130 {
131 struct talitos_private *priv = dev_get_drvdata(dev);
132 bool is_sec1 = has_ftr_sec1(priv);
133
134 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
135 from_talitos_ptr_len(ptr, is_sec1), dir);
136 }
137
138 static int reset_channel(struct device *dev, int ch)
139 {
140 struct talitos_private *priv = dev_get_drvdata(dev);
141 unsigned int timeout = TALITOS_TIMEOUT;
142 bool is_sec1 = has_ftr_sec1(priv);
143
144 if (is_sec1) {
145 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
146 TALITOS1_CCCR_LO_RESET);
147
148 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
149 TALITOS1_CCCR_LO_RESET) && --timeout)
150 cpu_relax();
151 } else {
152 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
153 TALITOS2_CCCR_RESET);
154
155 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
156 TALITOS2_CCCR_RESET) && --timeout)
157 cpu_relax();
158 }
159
160 if (timeout == 0) {
161 dev_err(dev, "failed to reset channel %d\n", ch);
162 return -EIO;
163 }
164
165 /* set 36-bit addressing, done writeback enable and done IRQ enable */
166 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
167 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
168
169 /* and ICCR writeback, if available */
170 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
171 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
172 TALITOS_CCCR_LO_IWSE);
173
174 return 0;
175 }
176
177 static int reset_device(struct device *dev)
178 {
179 struct talitos_private *priv = dev_get_drvdata(dev);
180 unsigned int timeout = TALITOS_TIMEOUT;
181 bool is_sec1 = has_ftr_sec1(priv);
182 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
183
184 setbits32(priv->reg + TALITOS_MCR, mcr);
185
186 while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
187 && --timeout)
188 cpu_relax();
189
190 if (priv->irq[1]) {
191 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
192 setbits32(priv->reg + TALITOS_MCR, mcr);
193 }
194
195 if (timeout == 0) {
196 dev_err(dev, "failed to reset device\n");
197 return -EIO;
198 }
199
200 return 0;
201 }
202
203 /*
204 * Reset and initialize the device
205 */
206 static int init_device(struct device *dev)
207 {
208 struct talitos_private *priv = dev_get_drvdata(dev);
209 int ch, err;
210 bool is_sec1 = has_ftr_sec1(priv);
211
212 /*
213 * Master reset
214 * errata documentation: warning: certain SEC interrupts
215 * are not fully cleared by writing the MCR:SWR bit,
216 * set bit twice to completely reset
217 */
218 err = reset_device(dev);
219 if (err)
220 return err;
221
222 err = reset_device(dev);
223 if (err)
224 return err;
225
226 /* reset channels */
227 for (ch = 0; ch < priv->num_channels; ch++) {
228 err = reset_channel(dev, ch);
229 if (err)
230 return err;
231 }
232
233 /* enable channel done and error interrupts */
234 if (is_sec1) {
235 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
236 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
237 /* disable parity error check in DEU (erroneous? test vect.) */
238 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
239 } else {
240 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
241 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
242 }
243
244 /* disable integrity check error interrupts (use writeback instead) */
245 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
246 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
247 TALITOS_MDEUICR_LO_ICE);
248
249 return 0;
250 }
251
252 /**
253 * talitos_submit - submits a descriptor to the device for processing
254 * @dev: the SEC device to be used
255 * @ch: the SEC device channel to be used
256 * @desc: the descriptor to be processed by the device
257 * @callback: whom to call when processing is complete
258 * @context: a handle for use by caller (optional)
259 *
260 * desc must contain valid dma-mapped (bus physical) address pointers.
261 * callback must check err and feedback in descriptor header
262 * for device processing status.
263 */
264 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
265 void (*callback)(struct device *dev,
266 struct talitos_desc *desc,
267 void *context, int error),
268 void *context)
269 {
270 struct talitos_private *priv = dev_get_drvdata(dev);
271 struct talitos_request *request;
272 unsigned long flags;
273 int head;
274 bool is_sec1 = has_ftr_sec1(priv);
275
276 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
277
278 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
279 /* h/w fifo is full */
280 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
281 return -EAGAIN;
282 }
283
284 head = priv->chan[ch].head;
285 request = &priv->chan[ch].fifo[head];
286
287 /* map descriptor and save caller data */
288 if (is_sec1) {
289 desc->hdr1 = desc->hdr;
290 desc->next_desc = 0;
291 request->dma_desc = dma_map_single(dev, &desc->hdr1,
292 TALITOS_DESC_SIZE,
293 DMA_BIDIRECTIONAL);
294 } else {
295 request->dma_desc = dma_map_single(dev, desc,
296 TALITOS_DESC_SIZE,
297 DMA_BIDIRECTIONAL);
298 }
299 request->callback = callback;
300 request->context = context;
301
302 /* increment fifo head */
303 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
304
305 smp_wmb();
306 request->desc = desc;
307
308 /* GO! */
309 wmb();
310 out_be32(priv->chan[ch].reg + TALITOS_FF,
311 upper_32_bits(request->dma_desc));
312 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
313 lower_32_bits(request->dma_desc));
314
315 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
316
317 return -EINPROGRESS;
318 }
319 EXPORT_SYMBOL(talitos_submit);
320
321 /*
322 * process what was done, notify callback of error if not
323 */
324 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
325 {
326 struct talitos_private *priv = dev_get_drvdata(dev);
327 struct talitos_request *request, saved_req;
328 unsigned long flags;
329 int tail, status;
330 bool is_sec1 = has_ftr_sec1(priv);
331
332 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
333
334 tail = priv->chan[ch].tail;
335 while (priv->chan[ch].fifo[tail].desc) {
336 __be32 hdr;
337
338 request = &priv->chan[ch].fifo[tail];
339
340 /* descriptors with their done bits set don't get the error */
341 rmb();
342 hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr;
343
344 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
345 status = 0;
346 else
347 if (!error)
348 break;
349 else
350 status = error;
351
352 dma_unmap_single(dev, request->dma_desc,
353 TALITOS_DESC_SIZE,
354 DMA_BIDIRECTIONAL);
355
356 /* copy entries so we can call callback outside lock */
357 saved_req.desc = request->desc;
358 saved_req.callback = request->callback;
359 saved_req.context = request->context;
360
361 /* release request entry in fifo */
362 smp_wmb();
363 request->desc = NULL;
364
365 /* increment fifo tail */
366 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
367
368 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
369
370 atomic_dec(&priv->chan[ch].submit_count);
371
372 saved_req.callback(dev, saved_req.desc, saved_req.context,
373 status);
374 /* channel may resume processing in single desc error case */
375 if (error && !reset_ch && status == error)
376 return;
377 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
378 tail = priv->chan[ch].tail;
379 }
380
381 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
382 }
383
384 /*
385 * process completed requests for channels that have done status
386 */
387 #define DEF_TALITOS1_DONE(name, ch_done_mask) \
388 static void talitos1_done_##name(unsigned long data) \
389 { \
390 struct device *dev = (struct device *)data; \
391 struct talitos_private *priv = dev_get_drvdata(dev); \
392 unsigned long flags; \
393 \
394 if (ch_done_mask & 0x10000000) \
395 flush_channel(dev, 0, 0, 0); \
396 if (priv->num_channels == 1) \
397 goto out; \
398 if (ch_done_mask & 0x40000000) \
399 flush_channel(dev, 1, 0, 0); \
400 if (ch_done_mask & 0x00010000) \
401 flush_channel(dev, 2, 0, 0); \
402 if (ch_done_mask & 0x00040000) \
403 flush_channel(dev, 3, 0, 0); \
404 \
405 out: \
406 /* At this point, all completed channels have been processed */ \
407 /* Unmask done interrupts for channels completed later on. */ \
408 spin_lock_irqsave(&priv->reg_lock, flags); \
409 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
410 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
411 spin_unlock_irqrestore(&priv->reg_lock, flags); \
412 }
413
414 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
415
416 #define DEF_TALITOS2_DONE(name, ch_done_mask) \
417 static void talitos2_done_##name(unsigned long data) \
418 { \
419 struct device *dev = (struct device *)data; \
420 struct talitos_private *priv = dev_get_drvdata(dev); \
421 unsigned long flags; \
422 \
423 if (ch_done_mask & 1) \
424 flush_channel(dev, 0, 0, 0); \
425 if (priv->num_channels == 1) \
426 goto out; \
427 if (ch_done_mask & (1 << 2)) \
428 flush_channel(dev, 1, 0, 0); \
429 if (ch_done_mask & (1 << 4)) \
430 flush_channel(dev, 2, 0, 0); \
431 if (ch_done_mask & (1 << 6)) \
432 flush_channel(dev, 3, 0, 0); \
433 \
434 out: \
435 /* At this point, all completed channels have been processed */ \
436 /* Unmask done interrupts for channels completed later on. */ \
437 spin_lock_irqsave(&priv->reg_lock, flags); \
438 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
439 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
440 spin_unlock_irqrestore(&priv->reg_lock, flags); \
441 }
442
443 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
444 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
445 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
446
447 /*
448 * locate current (offending) descriptor
449 */
450 static u32 current_desc_hdr(struct device *dev, int ch)
451 {
452 struct talitos_private *priv = dev_get_drvdata(dev);
453 int tail, iter;
454 dma_addr_t cur_desc;
455
456 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
457 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
458
459 if (!cur_desc) {
460 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
461 return 0;
462 }
463
464 tail = priv->chan[ch].tail;
465
466 iter = tail;
467 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
468 iter = (iter + 1) & (priv->fifo_len - 1);
469 if (iter == tail) {
470 dev_err(dev, "couldn't locate current descriptor\n");
471 return 0;
472 }
473 }
474
475 return priv->chan[ch].fifo[iter].desc->hdr;
476 }
477
478 /*
479 * user diagnostics; report root cause of error based on execution unit status
480 */
481 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
482 {
483 struct talitos_private *priv = dev_get_drvdata(dev);
484 int i;
485
486 if (!desc_hdr)
487 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
488
489 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
490 case DESC_HDR_SEL0_AFEU:
491 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
492 in_be32(priv->reg_afeu + TALITOS_EUISR),
493 in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
494 break;
495 case DESC_HDR_SEL0_DEU:
496 dev_err(dev, "DEUISR 0x%08x_%08x\n",
497 in_be32(priv->reg_deu + TALITOS_EUISR),
498 in_be32(priv->reg_deu + TALITOS_EUISR_LO));
499 break;
500 case DESC_HDR_SEL0_MDEUA:
501 case DESC_HDR_SEL0_MDEUB:
502 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
503 in_be32(priv->reg_mdeu + TALITOS_EUISR),
504 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
505 break;
506 case DESC_HDR_SEL0_RNG:
507 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
508 in_be32(priv->reg_rngu + TALITOS_ISR),
509 in_be32(priv->reg_rngu + TALITOS_ISR_LO));
510 break;
511 case DESC_HDR_SEL0_PKEU:
512 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
513 in_be32(priv->reg_pkeu + TALITOS_EUISR),
514 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
515 break;
516 case DESC_HDR_SEL0_AESU:
517 dev_err(dev, "AESUISR 0x%08x_%08x\n",
518 in_be32(priv->reg_aesu + TALITOS_EUISR),
519 in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
520 break;
521 case DESC_HDR_SEL0_CRCU:
522 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
523 in_be32(priv->reg_crcu + TALITOS_EUISR),
524 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
525 break;
526 case DESC_HDR_SEL0_KEU:
527 dev_err(dev, "KEUISR 0x%08x_%08x\n",
528 in_be32(priv->reg_pkeu + TALITOS_EUISR),
529 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
530 break;
531 }
532
533 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
534 case DESC_HDR_SEL1_MDEUA:
535 case DESC_HDR_SEL1_MDEUB:
536 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
537 in_be32(priv->reg_mdeu + TALITOS_EUISR),
538 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
539 break;
540 case DESC_HDR_SEL1_CRCU:
541 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
542 in_be32(priv->reg_crcu + TALITOS_EUISR),
543 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
544 break;
545 }
546
547 for (i = 0; i < 8; i++)
548 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
549 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
550 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
551 }
552
553 /*
554 * recover from error interrupts
555 */
556 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
557 {
558 struct talitos_private *priv = dev_get_drvdata(dev);
559 unsigned int timeout = TALITOS_TIMEOUT;
560 int ch, error, reset_dev = 0;
561 u32 v_lo;
562 bool is_sec1 = has_ftr_sec1(priv);
563 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
564
565 for (ch = 0; ch < priv->num_channels; ch++) {
566 /* skip channels without errors */
567 if (is_sec1) {
568 /* bits 29, 31, 17, 19 */
569 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
570 continue;
571 } else {
572 if (!(isr & (1 << (ch * 2 + 1))))
573 continue;
574 }
575
576 error = -EINVAL;
577
578 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
579
580 if (v_lo & TALITOS_CCPSR_LO_DOF) {
581 dev_err(dev, "double fetch fifo overflow error\n");
582 error = -EAGAIN;
583 reset_ch = 1;
584 }
585 if (v_lo & TALITOS_CCPSR_LO_SOF) {
586 /* h/w dropped descriptor */
587 dev_err(dev, "single fetch fifo overflow error\n");
588 error = -EAGAIN;
589 }
590 if (v_lo & TALITOS_CCPSR_LO_MDTE)
591 dev_err(dev, "master data transfer error\n");
592 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
593 dev_err(dev, is_sec1 ? "pointeur not complete error\n"
594 : "s/g data length zero error\n");
595 if (v_lo & TALITOS_CCPSR_LO_FPZ)
596 dev_err(dev, is_sec1 ? "parity error\n"
597 : "fetch pointer zero error\n");
598 if (v_lo & TALITOS_CCPSR_LO_IDH)
599 dev_err(dev, "illegal descriptor header error\n");
600 if (v_lo & TALITOS_CCPSR_LO_IEU)
601 dev_err(dev, is_sec1 ? "static assignment error\n"
602 : "invalid exec unit error\n");
603 if (v_lo & TALITOS_CCPSR_LO_EU)
604 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
605 if (!is_sec1) {
606 if (v_lo & TALITOS_CCPSR_LO_GB)
607 dev_err(dev, "gather boundary error\n");
608 if (v_lo & TALITOS_CCPSR_LO_GRL)
609 dev_err(dev, "gather return/length error\n");
610 if (v_lo & TALITOS_CCPSR_LO_SB)
611 dev_err(dev, "scatter boundary error\n");
612 if (v_lo & TALITOS_CCPSR_LO_SRL)
613 dev_err(dev, "scatter return/length error\n");
614 }
615
616 flush_channel(dev, ch, error, reset_ch);
617
618 if (reset_ch) {
619 reset_channel(dev, ch);
620 } else {
621 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
622 TALITOS2_CCCR_CONT);
623 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
624 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
625 TALITOS2_CCCR_CONT) && --timeout)
626 cpu_relax();
627 if (timeout == 0) {
628 dev_err(dev, "failed to restart channel %d\n",
629 ch);
630 reset_dev = 1;
631 }
632 }
633 }
634 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
635 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
636 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
637 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
638 isr, isr_lo);
639 else
640 dev_err(dev, "done overflow, internal time out, or "
641 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
642
643 /* purge request queues */
644 for (ch = 0; ch < priv->num_channels; ch++)
645 flush_channel(dev, ch, -EIO, 1);
646
647 /* reset and reinitialize the device */
648 init_device(dev);
649 }
650 }
651
652 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
653 static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
654 { \
655 struct device *dev = data; \
656 struct talitos_private *priv = dev_get_drvdata(dev); \
657 u32 isr, isr_lo; \
658 unsigned long flags; \
659 \
660 spin_lock_irqsave(&priv->reg_lock, flags); \
661 isr = in_be32(priv->reg + TALITOS_ISR); \
662 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
663 /* Acknowledge interrupt */ \
664 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
665 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
666 \
667 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
668 spin_unlock_irqrestore(&priv->reg_lock, flags); \
669 talitos_error(dev, isr & ch_err_mask, isr_lo); \
670 } \
671 else { \
672 if (likely(isr & ch_done_mask)) { \
673 /* mask further done interrupts. */ \
674 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
675 /* done_task will unmask done interrupts at exit */ \
676 tasklet_schedule(&priv->done_task[tlet]); \
677 } \
678 spin_unlock_irqrestore(&priv->reg_lock, flags); \
679 } \
680 \
681 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
682 IRQ_NONE; \
683 }
684
685 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
686
687 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
688 static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
689 { \
690 struct device *dev = data; \
691 struct talitos_private *priv = dev_get_drvdata(dev); \
692 u32 isr, isr_lo; \
693 unsigned long flags; \
694 \
695 spin_lock_irqsave(&priv->reg_lock, flags); \
696 isr = in_be32(priv->reg + TALITOS_ISR); \
697 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
698 /* Acknowledge interrupt */ \
699 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
700 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
701 \
702 if (unlikely(isr & ch_err_mask || isr_lo)) { \
703 spin_unlock_irqrestore(&priv->reg_lock, flags); \
704 talitos_error(dev, isr & ch_err_mask, isr_lo); \
705 } \
706 else { \
707 if (likely(isr & ch_done_mask)) { \
708 /* mask further done interrupts. */ \
709 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
710 /* done_task will unmask done interrupts at exit */ \
711 tasklet_schedule(&priv->done_task[tlet]); \
712 } \
713 spin_unlock_irqrestore(&priv->reg_lock, flags); \
714 } \
715 \
716 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
717 IRQ_NONE; \
718 }
719
720 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
721 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
722 0)
723 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
724 1)
725
726 /*
727 * hwrng
728 */
729 static int talitos_rng_data_present(struct hwrng *rng, int wait)
730 {
731 struct device *dev = (struct device *)rng->priv;
732 struct talitos_private *priv = dev_get_drvdata(dev);
733 u32 ofl;
734 int i;
735
736 for (i = 0; i < 20; i++) {
737 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
738 TALITOS_RNGUSR_LO_OFL;
739 if (ofl || !wait)
740 break;
741 udelay(10);
742 }
743
744 return !!ofl;
745 }
746
747 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
748 {
749 struct device *dev = (struct device *)rng->priv;
750 struct talitos_private *priv = dev_get_drvdata(dev);
751
752 /* rng fifo requires 64-bit accesses */
753 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
754 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
755
756 return sizeof(u32);
757 }
758
759 static int talitos_rng_init(struct hwrng *rng)
760 {
761 struct device *dev = (struct device *)rng->priv;
762 struct talitos_private *priv = dev_get_drvdata(dev);
763 unsigned int timeout = TALITOS_TIMEOUT;
764
765 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
766 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
767 & TALITOS_RNGUSR_LO_RD)
768 && --timeout)
769 cpu_relax();
770 if (timeout == 0) {
771 dev_err(dev, "failed to reset rng hw\n");
772 return -ENODEV;
773 }
774
775 /* start generating */
776 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
777
778 return 0;
779 }
780
781 static int talitos_register_rng(struct device *dev)
782 {
783 struct talitos_private *priv = dev_get_drvdata(dev);
784 int err;
785
786 priv->rng.name = dev_driver_string(dev),
787 priv->rng.init = talitos_rng_init,
788 priv->rng.data_present = talitos_rng_data_present,
789 priv->rng.data_read = talitos_rng_data_read,
790 priv->rng.priv = (unsigned long)dev;
791
792 err = hwrng_register(&priv->rng);
793 if (!err)
794 priv->rng_registered = true;
795
796 return err;
797 }
798
799 static void talitos_unregister_rng(struct device *dev)
800 {
801 struct talitos_private *priv = dev_get_drvdata(dev);
802
803 if (!priv->rng_registered)
804 return;
805
806 hwrng_unregister(&priv->rng);
807 priv->rng_registered = false;
808 }
809
810 /*
811 * crypto alg
812 */
813 #define TALITOS_CRA_PRIORITY 3000
814 #define TALITOS_MAX_KEY_SIZE 96
815 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
816
817 struct talitos_ctx {
818 struct device *dev;
819 int ch;
820 __be32 desc_hdr_template;
821 u8 key[TALITOS_MAX_KEY_SIZE];
822 u8 iv[TALITOS_MAX_IV_LENGTH];
823 unsigned int keylen;
824 unsigned int enckeylen;
825 unsigned int authkeylen;
826 };
827
828 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
829 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
830
831 struct talitos_ahash_req_ctx {
832 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
833 unsigned int hw_context_size;
834 u8 buf[HASH_MAX_BLOCK_SIZE];
835 u8 bufnext[HASH_MAX_BLOCK_SIZE];
836 unsigned int swinit;
837 unsigned int first;
838 unsigned int last;
839 unsigned int to_hash_later;
840 unsigned int nbuf;
841 struct scatterlist bufsl[2];
842 struct scatterlist *psrc;
843 };
844
845 struct talitos_export_state {
846 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
847 u8 buf[HASH_MAX_BLOCK_SIZE];
848 unsigned int swinit;
849 unsigned int first;
850 unsigned int last;
851 unsigned int to_hash_later;
852 unsigned int nbuf;
853 };
854
855 static int aead_setkey(struct crypto_aead *authenc,
856 const u8 *key, unsigned int keylen)
857 {
858 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
859 struct crypto_authenc_keys keys;
860
861 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
862 goto badkey;
863
864 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
865 goto badkey;
866
867 memcpy(ctx->key, keys.authkey, keys.authkeylen);
868 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
869
870 ctx->keylen = keys.authkeylen + keys.enckeylen;
871 ctx->enckeylen = keys.enckeylen;
872 ctx->authkeylen = keys.authkeylen;
873
874 return 0;
875
876 badkey:
877 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
878 return -EINVAL;
879 }
880
881 /*
882 * talitos_edesc - s/w-extended descriptor
883 * @src_nents: number of segments in input scatterlist
884 * @dst_nents: number of segments in output scatterlist
885 * @icv_ool: whether ICV is out-of-line
886 * @iv_dma: dma address of iv for checking continuity and link table
887 * @dma_len: length of dma mapped link_tbl space
888 * @dma_link_tbl: bus physical address of link_tbl/buf
889 * @desc: h/w descriptor
890 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
891 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
892 *
893 * if decrypting (with authcheck), or either one of src_nents or dst_nents
894 * is greater than 1, an integrity check value is concatenated to the end
895 * of link_tbl data
896 */
897 struct talitos_edesc {
898 int src_nents;
899 int dst_nents;
900 bool icv_ool;
901 dma_addr_t iv_dma;
902 int dma_len;
903 dma_addr_t dma_link_tbl;
904 struct talitos_desc desc;
905 union {
906 struct talitos_ptr link_tbl[0];
907 u8 buf[0];
908 };
909 };
910
911 static void talitos_sg_unmap(struct device *dev,
912 struct talitos_edesc *edesc,
913 struct scatterlist *src,
914 struct scatterlist *dst,
915 unsigned int len, unsigned int offset)
916 {
917 struct talitos_private *priv = dev_get_drvdata(dev);
918 bool is_sec1 = has_ftr_sec1(priv);
919 unsigned int src_nents = edesc->src_nents ? : 1;
920 unsigned int dst_nents = edesc->dst_nents ? : 1;
921
922 if (is_sec1 && dst && dst_nents > 1) {
923 dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
924 len, DMA_FROM_DEVICE);
925 sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
926 offset);
927 }
928 if (src != dst) {
929 if (src_nents == 1 || !is_sec1)
930 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
931
932 if (dst && (dst_nents == 1 || !is_sec1))
933 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
934 } else if (src_nents == 1 || !is_sec1) {
935 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
936 }
937 }
938
939 static void ipsec_esp_unmap(struct device *dev,
940 struct talitos_edesc *edesc,
941 struct aead_request *areq)
942 {
943 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
944 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
945 unsigned int ivsize = crypto_aead_ivsize(aead);
946
947 if (edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP)
948 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
949 DMA_FROM_DEVICE);
950 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
951 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
952 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
953
954 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
955 areq->assoclen);
956
957 if (edesc->dma_len)
958 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
959 DMA_BIDIRECTIONAL);
960
961 if (!(edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
962 unsigned int dst_nents = edesc->dst_nents ? : 1;
963
964 sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
965 areq->assoclen + areq->cryptlen - ivsize);
966 }
967 }
968
969 /*
970 * ipsec_esp descriptor callbacks
971 */
972 static void ipsec_esp_encrypt_done(struct device *dev,
973 struct talitos_desc *desc, void *context,
974 int err)
975 {
976 struct talitos_private *priv = dev_get_drvdata(dev);
977 bool is_sec1 = has_ftr_sec1(priv);
978 struct aead_request *areq = context;
979 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
980 unsigned int authsize = crypto_aead_authsize(authenc);
981 struct talitos_edesc *edesc;
982 struct scatterlist *sg;
983 void *icvdata;
984
985 edesc = container_of(desc, struct talitos_edesc, desc);
986
987 ipsec_esp_unmap(dev, edesc, areq);
988
989 /* copy the generated ICV to dst */
990 if (edesc->icv_ool) {
991 if (is_sec1)
992 icvdata = edesc->buf + areq->assoclen + areq->cryptlen;
993 else
994 icvdata = &edesc->link_tbl[edesc->src_nents +
995 edesc->dst_nents + 2];
996 sg = sg_last(areq->dst, edesc->dst_nents);
997 memcpy((char *)sg_virt(sg) + sg->length - authsize,
998 icvdata, authsize);
999 }
1000
1001 kfree(edesc);
1002
1003 aead_request_complete(areq, err);
1004 }
1005
1006 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1007 struct talitos_desc *desc,
1008 void *context, int err)
1009 {
1010 struct aead_request *req = context;
1011 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1012 unsigned int authsize = crypto_aead_authsize(authenc);
1013 struct talitos_edesc *edesc;
1014 struct scatterlist *sg;
1015 char *oicv, *icv;
1016 struct talitos_private *priv = dev_get_drvdata(dev);
1017 bool is_sec1 = has_ftr_sec1(priv);
1018
1019 edesc = container_of(desc, struct talitos_edesc, desc);
1020
1021 ipsec_esp_unmap(dev, edesc, req);
1022
1023 if (!err) {
1024 /* auth check */
1025 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
1026 icv = (char *)sg_virt(sg) + sg->length - authsize;
1027
1028 if (edesc->dma_len) {
1029 if (is_sec1)
1030 oicv = (char *)&edesc->dma_link_tbl +
1031 req->assoclen + req->cryptlen;
1032 else
1033 oicv = (char *)
1034 &edesc->link_tbl[edesc->src_nents +
1035 edesc->dst_nents + 2];
1036 if (edesc->icv_ool)
1037 icv = oicv + authsize;
1038 } else
1039 oicv = (char *)&edesc->link_tbl[0];
1040
1041 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1042 }
1043
1044 kfree(edesc);
1045
1046 aead_request_complete(req, err);
1047 }
1048
1049 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1050 struct talitos_desc *desc,
1051 void *context, int err)
1052 {
1053 struct aead_request *req = context;
1054 struct talitos_edesc *edesc;
1055
1056 edesc = container_of(desc, struct talitos_edesc, desc);
1057
1058 ipsec_esp_unmap(dev, edesc, req);
1059
1060 /* check ICV auth status */
1061 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1062 DESC_HDR_LO_ICCR1_PASS))
1063 err = -EBADMSG;
1064
1065 kfree(edesc);
1066
1067 aead_request_complete(req, err);
1068 }
1069
1070 /*
1071 * convert scatterlist to SEC h/w link table format
1072 * stop at cryptlen bytes
1073 */
1074 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1075 unsigned int offset, int cryptlen,
1076 struct talitos_ptr *link_tbl_ptr)
1077 {
1078 int n_sg = sg_count;
1079 int count = 0;
1080
1081 while (cryptlen && sg && n_sg--) {
1082 unsigned int len = sg_dma_len(sg);
1083
1084 if (offset >= len) {
1085 offset -= len;
1086 goto next;
1087 }
1088
1089 len -= offset;
1090
1091 if (len > cryptlen)
1092 len = cryptlen;
1093
1094 to_talitos_ptr(link_tbl_ptr + count,
1095 sg_dma_address(sg) + offset, 0);
1096 to_talitos_ptr_len(link_tbl_ptr + count, len, 0);
1097 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1098 count++;
1099 cryptlen -= len;
1100 offset = 0;
1101
1102 next:
1103 sg = sg_next(sg);
1104 }
1105
1106 /* tag end of link table */
1107 if (count > 0)
1108 to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1109 DESC_PTR_LNKTBL_RETURN, 0);
1110
1111 return count;
1112 }
1113
1114 static inline int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
1115 int cryptlen,
1116 struct talitos_ptr *link_tbl_ptr)
1117 {
1118 return sg_to_link_tbl_offset(sg, sg_count, 0, cryptlen,
1119 link_tbl_ptr);
1120 }
1121
1122 int talitos_sg_map(struct device *dev, struct scatterlist *src,
1123 unsigned int len, struct talitos_edesc *edesc,
1124 struct talitos_ptr *ptr,
1125 int sg_count, unsigned int offset, int tbl_off)
1126 {
1127 struct talitos_private *priv = dev_get_drvdata(dev);
1128 bool is_sec1 = has_ftr_sec1(priv);
1129
1130 to_talitos_ptr_len(ptr, len, is_sec1);
1131 to_talitos_ptr_ext_set(ptr, 0, is_sec1);
1132
1133 if (sg_count == 1) {
1134 to_talitos_ptr(ptr, sg_dma_address(src) + offset, is_sec1);
1135 return sg_count;
1136 }
1137 if (is_sec1) {
1138 to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, is_sec1);
1139 return sg_count;
1140 }
1141 sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len,
1142 &edesc->link_tbl[tbl_off]);
1143 if (sg_count == 1) {
1144 /* Only one segment now, so no link tbl needed*/
1145 copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1146 return sg_count;
1147 }
1148 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1149 tbl_off * sizeof(struct talitos_ptr), is_sec1);
1150 to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1151
1152 return sg_count;
1153 }
1154
1155 /*
1156 * fill in and submit ipsec_esp descriptor
1157 */
1158 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1159 void (*callback)(struct device *dev,
1160 struct talitos_desc *desc,
1161 void *context, int error))
1162 {
1163 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1164 unsigned int authsize = crypto_aead_authsize(aead);
1165 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1166 struct device *dev = ctx->dev;
1167 struct talitos_desc *desc = &edesc->desc;
1168 unsigned int cryptlen = areq->cryptlen;
1169 unsigned int ivsize = crypto_aead_ivsize(aead);
1170 int tbl_off = 0;
1171 int sg_count, ret;
1172 int sg_link_tbl_len;
1173 bool sync_needed = false;
1174 struct talitos_private *priv = dev_get_drvdata(dev);
1175 bool is_sec1 = has_ftr_sec1(priv);
1176
1177 /* hmac key */
1178 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
1179 DMA_TO_DEVICE);
1180
1181 sg_count = edesc->src_nents ?: 1;
1182 if (is_sec1 && sg_count > 1)
1183 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1184 areq->assoclen + cryptlen);
1185 else
1186 sg_count = dma_map_sg(dev, areq->src, sg_count,
1187 (areq->src == areq->dst) ?
1188 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1189
1190 /* hmac data */
1191 ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1192 &desc->ptr[1], sg_count, 0, tbl_off);
1193
1194 if (ret > 1) {
1195 tbl_off += ret;
1196 sync_needed = true;
1197 }
1198
1199 /* cipher iv */
1200 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
1201 to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, is_sec1);
1202 to_talitos_ptr_len(&desc->ptr[2], ivsize, is_sec1);
1203 to_talitos_ptr_ext_set(&desc->ptr[2], 0, is_sec1);
1204 } else {
1205 to_talitos_ptr(&desc->ptr[3], edesc->iv_dma, is_sec1);
1206 to_talitos_ptr_len(&desc->ptr[3], ivsize, is_sec1);
1207 to_talitos_ptr_ext_set(&desc->ptr[3], 0, is_sec1);
1208 }
1209
1210 /* cipher key */
1211 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
1212 map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
1213 (char *)&ctx->key + ctx->authkeylen,
1214 DMA_TO_DEVICE);
1215 else
1216 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->enckeylen,
1217 (char *)&ctx->key + ctx->authkeylen,
1218 DMA_TO_DEVICE);
1219
1220 /*
1221 * cipher in
1222 * map and adjust cipher len to aead request cryptlen.
1223 * extent is bytes of HMAC postpended to ciphertext,
1224 * typically 12 for ipsec
1225 */
1226 to_talitos_ptr_len(&desc->ptr[4], cryptlen, is_sec1);
1227 to_talitos_ptr_ext_set(&desc->ptr[4], 0, is_sec1);
1228
1229 sg_link_tbl_len = cryptlen;
1230
1231 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
1232 to_talitos_ptr_ext_set(&desc->ptr[4], authsize, is_sec1);
1233
1234 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1235 sg_link_tbl_len += authsize;
1236 }
1237
1238 sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1239 &desc->ptr[4], sg_count, areq->assoclen,
1240 tbl_off);
1241
1242 if (sg_count > 1) {
1243 tbl_off += sg_count;
1244 sync_needed = true;
1245 }
1246
1247 /* cipher out */
1248 if (areq->src != areq->dst) {
1249 sg_count = edesc->dst_nents ? : 1;
1250 if (!is_sec1 || sg_count == 1)
1251 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1252 }
1253
1254 sg_count = talitos_sg_map(dev, areq->dst, cryptlen, edesc,
1255 &desc->ptr[5], sg_count, areq->assoclen,
1256 tbl_off);
1257
1258 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
1259 to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1260
1261 if (sg_count > 1) {
1262 edesc->icv_ool = true;
1263 sync_needed = true;
1264
1265 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
1266 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1267 int offset = (edesc->src_nents + edesc->dst_nents + 2) *
1268 sizeof(struct talitos_ptr) + authsize;
1269
1270 /* Add an entry to the link table for ICV data */
1271 tbl_ptr += sg_count - 1;
1272 to_talitos_ptr_ext_set(tbl_ptr, 0, is_sec1);
1273 tbl_ptr++;
1274 to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
1275 is_sec1);
1276 to_talitos_ptr_len(tbl_ptr, authsize, is_sec1);
1277
1278 /* icv data follows link tables */
1279 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
1280 is_sec1);
1281 }
1282 } else {
1283 edesc->icv_ool = false;
1284 }
1285
1286 /* ICV data */
1287 if (!(desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
1288 to_talitos_ptr_len(&desc->ptr[6], authsize, is_sec1);
1289 to_talitos_ptr(&desc->ptr[6], edesc->dma_link_tbl +
1290 areq->assoclen + cryptlen, is_sec1);
1291 }
1292
1293 /* iv out */
1294 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
1295 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1296 DMA_FROM_DEVICE);
1297
1298 if (sync_needed)
1299 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1300 edesc->dma_len,
1301 DMA_BIDIRECTIONAL);
1302
1303 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1304 if (ret != -EINPROGRESS) {
1305 ipsec_esp_unmap(dev, edesc, areq);
1306 kfree(edesc);
1307 }
1308 return ret;
1309 }
1310
1311 /*
1312 * allocate and map the extended descriptor
1313 */
1314 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1315 struct scatterlist *src,
1316 struct scatterlist *dst,
1317 u8 *iv,
1318 unsigned int assoclen,
1319 unsigned int cryptlen,
1320 unsigned int authsize,
1321 unsigned int ivsize,
1322 int icv_stashing,
1323 u32 cryptoflags,
1324 bool encrypt)
1325 {
1326 struct talitos_edesc *edesc;
1327 int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1328 dma_addr_t iv_dma = 0;
1329 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1330 GFP_ATOMIC;
1331 struct talitos_private *priv = dev_get_drvdata(dev);
1332 bool is_sec1 = has_ftr_sec1(priv);
1333 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1334 void *err;
1335
1336 if (cryptlen + authsize > max_len) {
1337 dev_err(dev, "length exceeds h/w max limit\n");
1338 return ERR_PTR(-EINVAL);
1339 }
1340
1341 if (ivsize)
1342 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1343
1344 if (!dst || dst == src) {
1345 src_len = assoclen + cryptlen + authsize;
1346 src_nents = sg_nents_for_len(src, src_len);
1347 if (src_nents < 0) {
1348 dev_err(dev, "Invalid number of src SG.\n");
1349 err = ERR_PTR(-EINVAL);
1350 goto error_sg;
1351 }
1352 src_nents = (src_nents == 1) ? 0 : src_nents;
1353 dst_nents = dst ? src_nents : 0;
1354 dst_len = 0;
1355 } else { /* dst && dst != src*/
1356 src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1357 src_nents = sg_nents_for_len(src, src_len);
1358 if (src_nents < 0) {
1359 dev_err(dev, "Invalid number of src SG.\n");
1360 err = ERR_PTR(-EINVAL);
1361 goto error_sg;
1362 }
1363 src_nents = (src_nents == 1) ? 0 : src_nents;
1364 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1365 dst_nents = sg_nents_for_len(dst, dst_len);
1366 if (dst_nents < 0) {
1367 dev_err(dev, "Invalid number of dst SG.\n");
1368 err = ERR_PTR(-EINVAL);
1369 goto error_sg;
1370 }
1371 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1372 }
1373
1374 /*
1375 * allocate space for base edesc plus the link tables,
1376 * allowing for two separate entries for AD and generated ICV (+ 2),
1377 * and space for two sets of ICVs (stashed and generated)
1378 */
1379 alloc_len = sizeof(struct talitos_edesc);
1380 if (src_nents || dst_nents) {
1381 if (is_sec1)
1382 dma_len = (src_nents ? src_len : 0) +
1383 (dst_nents ? dst_len : 0);
1384 else
1385 dma_len = (src_nents + dst_nents + 2) *
1386 sizeof(struct talitos_ptr) + authsize * 2;
1387 alloc_len += dma_len;
1388 } else {
1389 dma_len = 0;
1390 alloc_len += icv_stashing ? authsize : 0;
1391 }
1392
1393 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1394 if (!edesc) {
1395 dev_err(dev, "could not allocate edescriptor\n");
1396 err = ERR_PTR(-ENOMEM);
1397 goto error_sg;
1398 }
1399
1400 edesc->src_nents = src_nents;
1401 edesc->dst_nents = dst_nents;
1402 edesc->iv_dma = iv_dma;
1403 edesc->dma_len = dma_len;
1404 if (dma_len)
1405 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1406 edesc->dma_len,
1407 DMA_BIDIRECTIONAL);
1408
1409 return edesc;
1410 error_sg:
1411 if (iv_dma)
1412 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1413 return err;
1414 }
1415
1416 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1417 int icv_stashing, bool encrypt)
1418 {
1419 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1420 unsigned int authsize = crypto_aead_authsize(authenc);
1421 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1422 unsigned int ivsize = crypto_aead_ivsize(authenc);
1423
1424 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1425 iv, areq->assoclen, areq->cryptlen,
1426 authsize, ivsize, icv_stashing,
1427 areq->base.flags, encrypt);
1428 }
1429
1430 static int aead_encrypt(struct aead_request *req)
1431 {
1432 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1433 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1434 struct talitos_edesc *edesc;
1435
1436 /* allocate extended descriptor */
1437 edesc = aead_edesc_alloc(req, req->iv, 0, true);
1438 if (IS_ERR(edesc))
1439 return PTR_ERR(edesc);
1440
1441 /* set encrypt */
1442 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1443
1444 return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
1445 }
1446
1447 static int aead_decrypt(struct aead_request *req)
1448 {
1449 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1450 unsigned int authsize = crypto_aead_authsize(authenc);
1451 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1452 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1453 struct talitos_edesc *edesc;
1454 struct scatterlist *sg;
1455 void *icvdata;
1456
1457 req->cryptlen -= authsize;
1458
1459 /* allocate extended descriptor */
1460 edesc = aead_edesc_alloc(req, req->iv, 1, false);
1461 if (IS_ERR(edesc))
1462 return PTR_ERR(edesc);
1463
1464 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1465 ((!edesc->src_nents && !edesc->dst_nents) ||
1466 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1467
1468 /* decrypt and check the ICV */
1469 edesc->desc.hdr = ctx->desc_hdr_template |
1470 DESC_HDR_DIR_INBOUND |
1471 DESC_HDR_MODE1_MDEU_CICV;
1472
1473 /* reset integrity check result bits */
1474 edesc->desc.hdr_lo = 0;
1475
1476 return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
1477 }
1478
1479 /* Have to check the ICV with software */
1480 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1481
1482 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1483 if (edesc->dma_len)
1484 icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1485 edesc->dst_nents + 2];
1486 else
1487 icvdata = &edesc->link_tbl[0];
1488
1489 sg = sg_last(req->src, edesc->src_nents ? : 1);
1490
1491 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
1492
1493 return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
1494 }
1495
1496 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1497 const u8 *key, unsigned int keylen)
1498 {
1499 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1500
1501 memcpy(&ctx->key, key, keylen);
1502 ctx->keylen = keylen;
1503
1504 return 0;
1505 }
1506
1507 static void common_nonsnoop_unmap(struct device *dev,
1508 struct talitos_edesc *edesc,
1509 struct ablkcipher_request *areq)
1510 {
1511 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1512
1513 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
1514 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1515 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1516
1517 if (edesc->dma_len)
1518 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1519 DMA_BIDIRECTIONAL);
1520 }
1521
1522 static void ablkcipher_done(struct device *dev,
1523 struct talitos_desc *desc, void *context,
1524 int err)
1525 {
1526 struct ablkcipher_request *areq = context;
1527 struct talitos_edesc *edesc;
1528
1529 edesc = container_of(desc, struct talitos_edesc, desc);
1530
1531 common_nonsnoop_unmap(dev, edesc, areq);
1532
1533 kfree(edesc);
1534
1535 areq->base.complete(&areq->base, err);
1536 }
1537
1538 static int common_nonsnoop(struct talitos_edesc *edesc,
1539 struct ablkcipher_request *areq,
1540 void (*callback) (struct device *dev,
1541 struct talitos_desc *desc,
1542 void *context, int error))
1543 {
1544 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1545 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1546 struct device *dev = ctx->dev;
1547 struct talitos_desc *desc = &edesc->desc;
1548 unsigned int cryptlen = areq->nbytes;
1549 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1550 int sg_count, ret;
1551 bool sync_needed = false;
1552 struct talitos_private *priv = dev_get_drvdata(dev);
1553 bool is_sec1 = has_ftr_sec1(priv);
1554
1555 /* first DWORD empty */
1556 desc->ptr[0] = zero_entry;
1557
1558 /* cipher iv */
1559 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1);
1560 to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1);
1561 to_talitos_ptr_ext_set(&desc->ptr[1], 0, is_sec1);
1562
1563 /* cipher key */
1564 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1565 (char *)&ctx->key, DMA_TO_DEVICE);
1566
1567 sg_count = edesc->src_nents ?: 1;
1568 if (is_sec1 && sg_count > 1)
1569 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1570 cryptlen);
1571 else
1572 sg_count = dma_map_sg(dev, areq->src, sg_count,
1573 (areq->src == areq->dst) ?
1574 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1575 /*
1576 * cipher in
1577 */
1578 sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1579 &desc->ptr[3], sg_count, 0, 0);
1580 if (sg_count > 1)
1581 sync_needed = true;
1582
1583 /* cipher out */
1584 if (areq->src != areq->dst) {
1585 sg_count = edesc->dst_nents ? : 1;
1586 if (!is_sec1 || sg_count == 1)
1587 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1588 }
1589
1590 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1591 sg_count, 0, (edesc->src_nents + 1));
1592 if (ret > 1)
1593 sync_needed = true;
1594
1595 /* iv out */
1596 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1597 DMA_FROM_DEVICE);
1598
1599 /* last DWORD empty */
1600 desc->ptr[6] = zero_entry;
1601
1602 if (sync_needed)
1603 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1604 edesc->dma_len, DMA_BIDIRECTIONAL);
1605
1606 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1607 if (ret != -EINPROGRESS) {
1608 common_nonsnoop_unmap(dev, edesc, areq);
1609 kfree(edesc);
1610 }
1611 return ret;
1612 }
1613
1614 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1615 areq, bool encrypt)
1616 {
1617 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1618 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1619 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1620
1621 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1622 areq->info, 0, areq->nbytes, 0, ivsize, 0,
1623 areq->base.flags, encrypt);
1624 }
1625
1626 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1627 {
1628 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1629 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1630 struct talitos_edesc *edesc;
1631
1632 /* allocate extended descriptor */
1633 edesc = ablkcipher_edesc_alloc(areq, true);
1634 if (IS_ERR(edesc))
1635 return PTR_ERR(edesc);
1636
1637 /* set encrypt */
1638 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1639
1640 return common_nonsnoop(edesc, areq, ablkcipher_done);
1641 }
1642
1643 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1644 {
1645 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1646 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1647 struct talitos_edesc *edesc;
1648
1649 /* allocate extended descriptor */
1650 edesc = ablkcipher_edesc_alloc(areq, false);
1651 if (IS_ERR(edesc))
1652 return PTR_ERR(edesc);
1653
1654 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1655
1656 return common_nonsnoop(edesc, areq, ablkcipher_done);
1657 }
1658
1659 static void common_nonsnoop_hash_unmap(struct device *dev,
1660 struct talitos_edesc *edesc,
1661 struct ahash_request *areq)
1662 {
1663 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1664 struct talitos_private *priv = dev_get_drvdata(dev);
1665 bool is_sec1 = has_ftr_sec1(priv);
1666
1667 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1668
1669 talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1670
1671 /* When using hashctx-in, must unmap it. */
1672 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1673 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1674 DMA_TO_DEVICE);
1675
1676 if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1))
1677 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1678 DMA_TO_DEVICE);
1679
1680 if (edesc->dma_len)
1681 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1682 DMA_BIDIRECTIONAL);
1683
1684 }
1685
1686 static void ahash_done(struct device *dev,
1687 struct talitos_desc *desc, void *context,
1688 int err)
1689 {
1690 struct ahash_request *areq = context;
1691 struct talitos_edesc *edesc =
1692 container_of(desc, struct talitos_edesc, desc);
1693 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1694
1695 if (!req_ctx->last && req_ctx->to_hash_later) {
1696 /* Position any partial block for next update/final/finup */
1697 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
1698 req_ctx->nbuf = req_ctx->to_hash_later;
1699 }
1700 common_nonsnoop_hash_unmap(dev, edesc, areq);
1701
1702 kfree(edesc);
1703
1704 areq->base.complete(&areq->base, err);
1705 }
1706
1707 /*
1708 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1709 * ourself and submit a padded block
1710 */
1711 void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1712 struct talitos_edesc *edesc,
1713 struct talitos_ptr *ptr)
1714 {
1715 static u8 padded_hash[64] = {
1716 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1717 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1718 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1719 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1720 };
1721
1722 pr_err_once("Bug in SEC1, padding ourself\n");
1723 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1724 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1725 (char *)padded_hash, DMA_TO_DEVICE);
1726 }
1727
1728 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1729 struct ahash_request *areq, unsigned int length,
1730 void (*callback) (struct device *dev,
1731 struct talitos_desc *desc,
1732 void *context, int error))
1733 {
1734 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1735 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1736 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1737 struct device *dev = ctx->dev;
1738 struct talitos_desc *desc = &edesc->desc;
1739 int ret;
1740 bool sync_needed = false;
1741 struct talitos_private *priv = dev_get_drvdata(dev);
1742 bool is_sec1 = has_ftr_sec1(priv);
1743 int sg_count;
1744
1745 /* first DWORD empty */
1746 desc->ptr[0] = zero_entry;
1747
1748 /* hash context in */
1749 if (!req_ctx->first || req_ctx->swinit) {
1750 map_single_talitos_ptr(dev, &desc->ptr[1],
1751 req_ctx->hw_context_size,
1752 (char *)req_ctx->hw_context,
1753 DMA_TO_DEVICE);
1754 req_ctx->swinit = 0;
1755 } else {
1756 desc->ptr[1] = zero_entry;
1757 /* Indicate next op is not the first. */
1758 req_ctx->first = 0;
1759 }
1760
1761 /* HMAC key */
1762 if (ctx->keylen)
1763 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1764 (char *)&ctx->key, DMA_TO_DEVICE);
1765 else
1766 desc->ptr[2] = zero_entry;
1767
1768 sg_count = edesc->src_nents ?: 1;
1769 if (is_sec1 && sg_count > 1)
1770 sg_copy_to_buffer(areq->src, sg_count, edesc->buf, length);
1771 else
1772 sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1773 DMA_TO_DEVICE);
1774 /*
1775 * data in
1776 */
1777 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1778 &desc->ptr[3], sg_count, 0, 0);
1779 if (sg_count > 1)
1780 sync_needed = true;
1781
1782 /* fifth DWORD empty */
1783 desc->ptr[4] = zero_entry;
1784
1785 /* hash/HMAC out -or- hash context out */
1786 if (req_ctx->last)
1787 map_single_talitos_ptr(dev, &desc->ptr[5],
1788 crypto_ahash_digestsize(tfm),
1789 areq->result, DMA_FROM_DEVICE);
1790 else
1791 map_single_talitos_ptr(dev, &desc->ptr[5],
1792 req_ctx->hw_context_size,
1793 req_ctx->hw_context, DMA_FROM_DEVICE);
1794
1795 /* last DWORD empty */
1796 desc->ptr[6] = zero_entry;
1797
1798 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1799 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1800
1801 if (sync_needed)
1802 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1803 edesc->dma_len, DMA_BIDIRECTIONAL);
1804
1805 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1806 if (ret != -EINPROGRESS) {
1807 common_nonsnoop_hash_unmap(dev, edesc, areq);
1808 kfree(edesc);
1809 }
1810 return ret;
1811 }
1812
1813 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1814 unsigned int nbytes)
1815 {
1816 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1817 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1818 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1819
1820 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1821 nbytes, 0, 0, 0, areq->base.flags, false);
1822 }
1823
1824 static int ahash_init(struct ahash_request *areq)
1825 {
1826 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1827 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1828
1829 /* Initialize the context */
1830 req_ctx->nbuf = 0;
1831 req_ctx->first = 1; /* first indicates h/w must init its context */
1832 req_ctx->swinit = 0; /* assume h/w init of context */
1833 req_ctx->hw_context_size =
1834 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1835 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1836 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1837
1838 return 0;
1839 }
1840
1841 /*
1842 * on h/w without explicit sha224 support, we initialize h/w context
1843 * manually with sha224 constants, and tell it to run sha256.
1844 */
1845 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1846 {
1847 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1848
1849 ahash_init(areq);
1850 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1851
1852 req_ctx->hw_context[0] = SHA224_H0;
1853 req_ctx->hw_context[1] = SHA224_H1;
1854 req_ctx->hw_context[2] = SHA224_H2;
1855 req_ctx->hw_context[3] = SHA224_H3;
1856 req_ctx->hw_context[4] = SHA224_H4;
1857 req_ctx->hw_context[5] = SHA224_H5;
1858 req_ctx->hw_context[6] = SHA224_H6;
1859 req_ctx->hw_context[7] = SHA224_H7;
1860
1861 /* init 64-bit count */
1862 req_ctx->hw_context[8] = 0;
1863 req_ctx->hw_context[9] = 0;
1864
1865 return 0;
1866 }
1867
1868 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1869 {
1870 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1871 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1872 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1873 struct talitos_edesc *edesc;
1874 unsigned int blocksize =
1875 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1876 unsigned int nbytes_to_hash;
1877 unsigned int to_hash_later;
1878 unsigned int nsg;
1879 int nents;
1880
1881 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1882 /* Buffer up to one whole block */
1883 nents = sg_nents_for_len(areq->src, nbytes);
1884 if (nents < 0) {
1885 dev_err(ctx->dev, "Invalid number of src SG.\n");
1886 return nents;
1887 }
1888 sg_copy_to_buffer(areq->src, nents,
1889 req_ctx->buf + req_ctx->nbuf, nbytes);
1890 req_ctx->nbuf += nbytes;
1891 return 0;
1892 }
1893
1894 /* At least (blocksize + 1) bytes are available to hash */
1895 nbytes_to_hash = nbytes + req_ctx->nbuf;
1896 to_hash_later = nbytes_to_hash & (blocksize - 1);
1897
1898 if (req_ctx->last)
1899 to_hash_later = 0;
1900 else if (to_hash_later)
1901 /* There is a partial block. Hash the full block(s) now */
1902 nbytes_to_hash -= to_hash_later;
1903 else {
1904 /* Keep one block buffered */
1905 nbytes_to_hash -= blocksize;
1906 to_hash_later = blocksize;
1907 }
1908
1909 /* Chain in any previously buffered data */
1910 if (req_ctx->nbuf) {
1911 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1912 sg_init_table(req_ctx->bufsl, nsg);
1913 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1914 if (nsg > 1)
1915 sg_chain(req_ctx->bufsl, 2, areq->src);
1916 req_ctx->psrc = req_ctx->bufsl;
1917 } else
1918 req_ctx->psrc = areq->src;
1919
1920 if (to_hash_later) {
1921 nents = sg_nents_for_len(areq->src, nbytes);
1922 if (nents < 0) {
1923 dev_err(ctx->dev, "Invalid number of src SG.\n");
1924 return nents;
1925 }
1926 sg_pcopy_to_buffer(areq->src, nents,
1927 req_ctx->bufnext,
1928 to_hash_later,
1929 nbytes - to_hash_later);
1930 }
1931 req_ctx->to_hash_later = to_hash_later;
1932
1933 /* Allocate extended descriptor */
1934 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1935 if (IS_ERR(edesc))
1936 return PTR_ERR(edesc);
1937
1938 edesc->desc.hdr = ctx->desc_hdr_template;
1939
1940 /* On last one, request SEC to pad; otherwise continue */
1941 if (req_ctx->last)
1942 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
1943 else
1944 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
1945
1946 /* request SEC to INIT hash. */
1947 if (req_ctx->first && !req_ctx->swinit)
1948 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
1949
1950 /* When the tfm context has a keylen, it's an HMAC.
1951 * A first or last (ie. not middle) descriptor must request HMAC.
1952 */
1953 if (ctx->keylen && (req_ctx->first || req_ctx->last))
1954 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
1955
1956 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1957 ahash_done);
1958 }
1959
1960 static int ahash_update(struct ahash_request *areq)
1961 {
1962 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1963
1964 req_ctx->last = 0;
1965
1966 return ahash_process_req(areq, areq->nbytes);
1967 }
1968
1969 static int ahash_final(struct ahash_request *areq)
1970 {
1971 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1972
1973 req_ctx->last = 1;
1974
1975 return ahash_process_req(areq, 0);
1976 }
1977
1978 static int ahash_finup(struct ahash_request *areq)
1979 {
1980 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1981
1982 req_ctx->last = 1;
1983
1984 return ahash_process_req(areq, areq->nbytes);
1985 }
1986
1987 static int ahash_digest(struct ahash_request *areq)
1988 {
1989 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1990 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
1991
1992 ahash->init(areq);
1993 req_ctx->last = 1;
1994
1995 return ahash_process_req(areq, areq->nbytes);
1996 }
1997
1998 static int ahash_export(struct ahash_request *areq, void *out)
1999 {
2000 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2001 struct talitos_export_state *export = out;
2002
2003 memcpy(export->hw_context, req_ctx->hw_context,
2004 req_ctx->hw_context_size);
2005 memcpy(export->buf, req_ctx->buf, req_ctx->nbuf);
2006 export->swinit = req_ctx->swinit;
2007 export->first = req_ctx->first;
2008 export->last = req_ctx->last;
2009 export->to_hash_later = req_ctx->to_hash_later;
2010 export->nbuf = req_ctx->nbuf;
2011
2012 return 0;
2013 }
2014
2015 static int ahash_import(struct ahash_request *areq, const void *in)
2016 {
2017 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2018 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2019 const struct talitos_export_state *export = in;
2020
2021 memset(req_ctx, 0, sizeof(*req_ctx));
2022 req_ctx->hw_context_size =
2023 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2024 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2025 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2026 memcpy(req_ctx->hw_context, export->hw_context,
2027 req_ctx->hw_context_size);
2028 memcpy(req_ctx->buf, export->buf, export->nbuf);
2029 req_ctx->swinit = export->swinit;
2030 req_ctx->first = export->first;
2031 req_ctx->last = export->last;
2032 req_ctx->to_hash_later = export->to_hash_later;
2033 req_ctx->nbuf = export->nbuf;
2034
2035 return 0;
2036 }
2037
2038 struct keyhash_result {
2039 struct completion completion;
2040 int err;
2041 };
2042
2043 static void keyhash_complete(struct crypto_async_request *req, int err)
2044 {
2045 struct keyhash_result *res = req->data;
2046
2047 if (err == -EINPROGRESS)
2048 return;
2049
2050 res->err = err;
2051 complete(&res->completion);
2052 }
2053
2054 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2055 u8 *hash)
2056 {
2057 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2058
2059 struct scatterlist sg[1];
2060 struct ahash_request *req;
2061 struct keyhash_result hresult;
2062 int ret;
2063
2064 init_completion(&hresult.completion);
2065
2066 req = ahash_request_alloc(tfm, GFP_KERNEL);
2067 if (!req)
2068 return -ENOMEM;
2069
2070 /* Keep tfm keylen == 0 during hash of the long key */
2071 ctx->keylen = 0;
2072 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2073 keyhash_complete, &hresult);
2074
2075 sg_init_one(&sg[0], key, keylen);
2076
2077 ahash_request_set_crypt(req, sg, hash, keylen);
2078 ret = crypto_ahash_digest(req);
2079 switch (ret) {
2080 case 0:
2081 break;
2082 case -EINPROGRESS:
2083 case -EBUSY:
2084 ret = wait_for_completion_interruptible(
2085 &hresult.completion);
2086 if (!ret)
2087 ret = hresult.err;
2088 break;
2089 default:
2090 break;
2091 }
2092 ahash_request_free(req);
2093
2094 return ret;
2095 }
2096
2097 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2098 unsigned int keylen)
2099 {
2100 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2101 unsigned int blocksize =
2102 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2103 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2104 unsigned int keysize = keylen;
2105 u8 hash[SHA512_DIGEST_SIZE];
2106 int ret;
2107
2108 if (keylen <= blocksize)
2109 memcpy(ctx->key, key, keysize);
2110 else {
2111 /* Must get the hash of the long key */
2112 ret = keyhash(tfm, key, keylen, hash);
2113
2114 if (ret) {
2115 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2116 return -EINVAL;
2117 }
2118
2119 keysize = digestsize;
2120 memcpy(ctx->key, hash, digestsize);
2121 }
2122
2123 ctx->keylen = keysize;
2124
2125 return 0;
2126 }
2127
2128
2129 struct talitos_alg_template {
2130 u32 type;
2131 union {
2132 struct crypto_alg crypto;
2133 struct ahash_alg hash;
2134 struct aead_alg aead;
2135 } alg;
2136 __be32 desc_hdr_template;
2137 };
2138
2139 static struct talitos_alg_template driver_algs[] = {
2140 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
2141 { .type = CRYPTO_ALG_TYPE_AEAD,
2142 .alg.aead = {
2143 .base = {
2144 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2145 .cra_driver_name = "authenc-hmac-sha1-"
2146 "cbc-aes-talitos",
2147 .cra_blocksize = AES_BLOCK_SIZE,
2148 .cra_flags = CRYPTO_ALG_ASYNC,
2149 },
2150 .ivsize = AES_BLOCK_SIZE,
2151 .maxauthsize = SHA1_DIGEST_SIZE,
2152 },
2153 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2154 DESC_HDR_SEL0_AESU |
2155 DESC_HDR_MODE0_AESU_CBC |
2156 DESC_HDR_SEL1_MDEUA |
2157 DESC_HDR_MODE1_MDEU_INIT |
2158 DESC_HDR_MODE1_MDEU_PAD |
2159 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2160 },
2161 { .type = CRYPTO_ALG_TYPE_AEAD,
2162 .alg.aead = {
2163 .base = {
2164 .cra_name = "authenc(hmac(sha1),"
2165 "cbc(des3_ede))",
2166 .cra_driver_name = "authenc-hmac-sha1-"
2167 "cbc-3des-talitos",
2168 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2169 .cra_flags = CRYPTO_ALG_ASYNC,
2170 },
2171 .ivsize = DES3_EDE_BLOCK_SIZE,
2172 .maxauthsize = SHA1_DIGEST_SIZE,
2173 },
2174 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2175 DESC_HDR_SEL0_DEU |
2176 DESC_HDR_MODE0_DEU_CBC |
2177 DESC_HDR_MODE0_DEU_3DES |
2178 DESC_HDR_SEL1_MDEUA |
2179 DESC_HDR_MODE1_MDEU_INIT |
2180 DESC_HDR_MODE1_MDEU_PAD |
2181 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2182 },
2183 { .type = CRYPTO_ALG_TYPE_AEAD,
2184 .alg.aead = {
2185 .base = {
2186 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2187 .cra_driver_name = "authenc-hmac-sha224-"
2188 "cbc-aes-talitos",
2189 .cra_blocksize = AES_BLOCK_SIZE,
2190 .cra_flags = CRYPTO_ALG_ASYNC,
2191 },
2192 .ivsize = AES_BLOCK_SIZE,
2193 .maxauthsize = SHA224_DIGEST_SIZE,
2194 },
2195 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2196 DESC_HDR_SEL0_AESU |
2197 DESC_HDR_MODE0_AESU_CBC |
2198 DESC_HDR_SEL1_MDEUA |
2199 DESC_HDR_MODE1_MDEU_INIT |
2200 DESC_HDR_MODE1_MDEU_PAD |
2201 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2202 },
2203 { .type = CRYPTO_ALG_TYPE_AEAD,
2204 .alg.aead = {
2205 .base = {
2206 .cra_name = "authenc(hmac(sha224),"
2207 "cbc(des3_ede))",
2208 .cra_driver_name = "authenc-hmac-sha224-"
2209 "cbc-3des-talitos",
2210 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2211 .cra_flags = CRYPTO_ALG_ASYNC,
2212 },
2213 .ivsize = DES3_EDE_BLOCK_SIZE,
2214 .maxauthsize = SHA224_DIGEST_SIZE,
2215 },
2216 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2217 DESC_HDR_SEL0_DEU |
2218 DESC_HDR_MODE0_DEU_CBC |
2219 DESC_HDR_MODE0_DEU_3DES |
2220 DESC_HDR_SEL1_MDEUA |
2221 DESC_HDR_MODE1_MDEU_INIT |
2222 DESC_HDR_MODE1_MDEU_PAD |
2223 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2224 },
2225 { .type = CRYPTO_ALG_TYPE_AEAD,
2226 .alg.aead = {
2227 .base = {
2228 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2229 .cra_driver_name = "authenc-hmac-sha256-"
2230 "cbc-aes-talitos",
2231 .cra_blocksize = AES_BLOCK_SIZE,
2232 .cra_flags = CRYPTO_ALG_ASYNC,
2233 },
2234 .ivsize = AES_BLOCK_SIZE,
2235 .maxauthsize = SHA256_DIGEST_SIZE,
2236 },
2237 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2238 DESC_HDR_SEL0_AESU |
2239 DESC_HDR_MODE0_AESU_CBC |
2240 DESC_HDR_SEL1_MDEUA |
2241 DESC_HDR_MODE1_MDEU_INIT |
2242 DESC_HDR_MODE1_MDEU_PAD |
2243 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2244 },
2245 { .type = CRYPTO_ALG_TYPE_AEAD,
2246 .alg.aead = {
2247 .base = {
2248 .cra_name = "authenc(hmac(sha256),"
2249 "cbc(des3_ede))",
2250 .cra_driver_name = "authenc-hmac-sha256-"
2251 "cbc-3des-talitos",
2252 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2253 .cra_flags = CRYPTO_ALG_ASYNC,
2254 },
2255 .ivsize = DES3_EDE_BLOCK_SIZE,
2256 .maxauthsize = SHA256_DIGEST_SIZE,
2257 },
2258 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2259 DESC_HDR_SEL0_DEU |
2260 DESC_HDR_MODE0_DEU_CBC |
2261 DESC_HDR_MODE0_DEU_3DES |
2262 DESC_HDR_SEL1_MDEUA |
2263 DESC_HDR_MODE1_MDEU_INIT |
2264 DESC_HDR_MODE1_MDEU_PAD |
2265 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2266 },
2267 { .type = CRYPTO_ALG_TYPE_AEAD,
2268 .alg.aead = {
2269 .base = {
2270 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2271 .cra_driver_name = "authenc-hmac-sha384-"
2272 "cbc-aes-talitos",
2273 .cra_blocksize = AES_BLOCK_SIZE,
2274 .cra_flags = CRYPTO_ALG_ASYNC,
2275 },
2276 .ivsize = AES_BLOCK_SIZE,
2277 .maxauthsize = SHA384_DIGEST_SIZE,
2278 },
2279 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2280 DESC_HDR_SEL0_AESU |
2281 DESC_HDR_MODE0_AESU_CBC |
2282 DESC_HDR_SEL1_MDEUB |
2283 DESC_HDR_MODE1_MDEU_INIT |
2284 DESC_HDR_MODE1_MDEU_PAD |
2285 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2286 },
2287 { .type = CRYPTO_ALG_TYPE_AEAD,
2288 .alg.aead = {
2289 .base = {
2290 .cra_name = "authenc(hmac(sha384),"
2291 "cbc(des3_ede))",
2292 .cra_driver_name = "authenc-hmac-sha384-"
2293 "cbc-3des-talitos",
2294 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2295 .cra_flags = CRYPTO_ALG_ASYNC,
2296 },
2297 .ivsize = DES3_EDE_BLOCK_SIZE,
2298 .maxauthsize = SHA384_DIGEST_SIZE,
2299 },
2300 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2301 DESC_HDR_SEL0_DEU |
2302 DESC_HDR_MODE0_DEU_CBC |
2303 DESC_HDR_MODE0_DEU_3DES |
2304 DESC_HDR_SEL1_MDEUB |
2305 DESC_HDR_MODE1_MDEU_INIT |
2306 DESC_HDR_MODE1_MDEU_PAD |
2307 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2308 },
2309 { .type = CRYPTO_ALG_TYPE_AEAD,
2310 .alg.aead = {
2311 .base = {
2312 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2313 .cra_driver_name = "authenc-hmac-sha512-"
2314 "cbc-aes-talitos",
2315 .cra_blocksize = AES_BLOCK_SIZE,
2316 .cra_flags = CRYPTO_ALG_ASYNC,
2317 },
2318 .ivsize = AES_BLOCK_SIZE,
2319 .maxauthsize = SHA512_DIGEST_SIZE,
2320 },
2321 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2322 DESC_HDR_SEL0_AESU |
2323 DESC_HDR_MODE0_AESU_CBC |
2324 DESC_HDR_SEL1_MDEUB |
2325 DESC_HDR_MODE1_MDEU_INIT |
2326 DESC_HDR_MODE1_MDEU_PAD |
2327 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2328 },
2329 { .type = CRYPTO_ALG_TYPE_AEAD,
2330 .alg.aead = {
2331 .base = {
2332 .cra_name = "authenc(hmac(sha512),"
2333 "cbc(des3_ede))",
2334 .cra_driver_name = "authenc-hmac-sha512-"
2335 "cbc-3des-talitos",
2336 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2337 .cra_flags = CRYPTO_ALG_ASYNC,
2338 },
2339 .ivsize = DES3_EDE_BLOCK_SIZE,
2340 .maxauthsize = SHA512_DIGEST_SIZE,
2341 },
2342 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2343 DESC_HDR_SEL0_DEU |
2344 DESC_HDR_MODE0_DEU_CBC |
2345 DESC_HDR_MODE0_DEU_3DES |
2346 DESC_HDR_SEL1_MDEUB |
2347 DESC_HDR_MODE1_MDEU_INIT |
2348 DESC_HDR_MODE1_MDEU_PAD |
2349 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2350 },
2351 { .type = CRYPTO_ALG_TYPE_AEAD,
2352 .alg.aead = {
2353 .base = {
2354 .cra_name = "authenc(hmac(md5),cbc(aes))",
2355 .cra_driver_name = "authenc-hmac-md5-"
2356 "cbc-aes-talitos",
2357 .cra_blocksize = AES_BLOCK_SIZE,
2358 .cra_flags = CRYPTO_ALG_ASYNC,
2359 },
2360 .ivsize = AES_BLOCK_SIZE,
2361 .maxauthsize = MD5_DIGEST_SIZE,
2362 },
2363 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2364 DESC_HDR_SEL0_AESU |
2365 DESC_HDR_MODE0_AESU_CBC |
2366 DESC_HDR_SEL1_MDEUA |
2367 DESC_HDR_MODE1_MDEU_INIT |
2368 DESC_HDR_MODE1_MDEU_PAD |
2369 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2370 },
2371 { .type = CRYPTO_ALG_TYPE_AEAD,
2372 .alg.aead = {
2373 .base = {
2374 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2375 .cra_driver_name = "authenc-hmac-md5-"
2376 "cbc-3des-talitos",
2377 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2378 .cra_flags = CRYPTO_ALG_ASYNC,
2379 },
2380 .ivsize = DES3_EDE_BLOCK_SIZE,
2381 .maxauthsize = MD5_DIGEST_SIZE,
2382 },
2383 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2384 DESC_HDR_SEL0_DEU |
2385 DESC_HDR_MODE0_DEU_CBC |
2386 DESC_HDR_MODE0_DEU_3DES |
2387 DESC_HDR_SEL1_MDEUA |
2388 DESC_HDR_MODE1_MDEU_INIT |
2389 DESC_HDR_MODE1_MDEU_PAD |
2390 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2391 },
2392 /* ABLKCIPHER algorithms. */
2393 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2394 .alg.crypto = {
2395 .cra_name = "ecb(aes)",
2396 .cra_driver_name = "ecb-aes-talitos",
2397 .cra_blocksize = AES_BLOCK_SIZE,
2398 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2399 CRYPTO_ALG_ASYNC,
2400 .cra_ablkcipher = {
2401 .min_keysize = AES_MIN_KEY_SIZE,
2402 .max_keysize = AES_MAX_KEY_SIZE,
2403 .ivsize = AES_BLOCK_SIZE,
2404 }
2405 },
2406 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2407 DESC_HDR_SEL0_AESU,
2408 },
2409 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2410 .alg.crypto = {
2411 .cra_name = "cbc(aes)",
2412 .cra_driver_name = "cbc-aes-talitos",
2413 .cra_blocksize = AES_BLOCK_SIZE,
2414 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2415 CRYPTO_ALG_ASYNC,
2416 .cra_ablkcipher = {
2417 .min_keysize = AES_MIN_KEY_SIZE,
2418 .max_keysize = AES_MAX_KEY_SIZE,
2419 .ivsize = AES_BLOCK_SIZE,
2420 }
2421 },
2422 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2423 DESC_HDR_SEL0_AESU |
2424 DESC_HDR_MODE0_AESU_CBC,
2425 },
2426 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2427 .alg.crypto = {
2428 .cra_name = "ctr(aes)",
2429 .cra_driver_name = "ctr-aes-talitos",
2430 .cra_blocksize = AES_BLOCK_SIZE,
2431 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2432 CRYPTO_ALG_ASYNC,
2433 .cra_ablkcipher = {
2434 .min_keysize = AES_MIN_KEY_SIZE,
2435 .max_keysize = AES_MAX_KEY_SIZE,
2436 .ivsize = AES_BLOCK_SIZE,
2437 }
2438 },
2439 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2440 DESC_HDR_SEL0_AESU |
2441 DESC_HDR_MODE0_AESU_CTR,
2442 },
2443 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2444 .alg.crypto = {
2445 .cra_name = "ecb(des)",
2446 .cra_driver_name = "ecb-des-talitos",
2447 .cra_blocksize = DES_BLOCK_SIZE,
2448 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2449 CRYPTO_ALG_ASYNC,
2450 .cra_ablkcipher = {
2451 .min_keysize = DES_KEY_SIZE,
2452 .max_keysize = DES_KEY_SIZE,
2453 .ivsize = DES_BLOCK_SIZE,
2454 }
2455 },
2456 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2457 DESC_HDR_SEL0_DEU,
2458 },
2459 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2460 .alg.crypto = {
2461 .cra_name = "cbc(des)",
2462 .cra_driver_name = "cbc-des-talitos",
2463 .cra_blocksize = DES_BLOCK_SIZE,
2464 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2465 CRYPTO_ALG_ASYNC,
2466 .cra_ablkcipher = {
2467 .min_keysize = DES_KEY_SIZE,
2468 .max_keysize = DES_KEY_SIZE,
2469 .ivsize = DES_BLOCK_SIZE,
2470 }
2471 },
2472 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2473 DESC_HDR_SEL0_DEU |
2474 DESC_HDR_MODE0_DEU_CBC,
2475 },
2476 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2477 .alg.crypto = {
2478 .cra_name = "ecb(des3_ede)",
2479 .cra_driver_name = "ecb-3des-talitos",
2480 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2481 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2482 CRYPTO_ALG_ASYNC,
2483 .cra_ablkcipher = {
2484 .min_keysize = DES3_EDE_KEY_SIZE,
2485 .max_keysize = DES3_EDE_KEY_SIZE,
2486 .ivsize = DES3_EDE_BLOCK_SIZE,
2487 }
2488 },
2489 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2490 DESC_HDR_SEL0_DEU |
2491 DESC_HDR_MODE0_DEU_3DES,
2492 },
2493 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2494 .alg.crypto = {
2495 .cra_name = "cbc(des3_ede)",
2496 .cra_driver_name = "cbc-3des-talitos",
2497 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2498 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2499 CRYPTO_ALG_ASYNC,
2500 .cra_ablkcipher = {
2501 .min_keysize = DES3_EDE_KEY_SIZE,
2502 .max_keysize = DES3_EDE_KEY_SIZE,
2503 .ivsize = DES3_EDE_BLOCK_SIZE,
2504 }
2505 },
2506 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2507 DESC_HDR_SEL0_DEU |
2508 DESC_HDR_MODE0_DEU_CBC |
2509 DESC_HDR_MODE0_DEU_3DES,
2510 },
2511 /* AHASH algorithms. */
2512 { .type = CRYPTO_ALG_TYPE_AHASH,
2513 .alg.hash = {
2514 .halg.digestsize = MD5_DIGEST_SIZE,
2515 .halg.statesize = sizeof(struct talitos_export_state),
2516 .halg.base = {
2517 .cra_name = "md5",
2518 .cra_driver_name = "md5-talitos",
2519 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2520 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2521 CRYPTO_ALG_ASYNC,
2522 }
2523 },
2524 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2525 DESC_HDR_SEL0_MDEUA |
2526 DESC_HDR_MODE0_MDEU_MD5,
2527 },
2528 { .type = CRYPTO_ALG_TYPE_AHASH,
2529 .alg.hash = {
2530 .halg.digestsize = SHA1_DIGEST_SIZE,
2531 .halg.statesize = sizeof(struct talitos_export_state),
2532 .halg.base = {
2533 .cra_name = "sha1",
2534 .cra_driver_name = "sha1-talitos",
2535 .cra_blocksize = SHA1_BLOCK_SIZE,
2536 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2537 CRYPTO_ALG_ASYNC,
2538 }
2539 },
2540 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2541 DESC_HDR_SEL0_MDEUA |
2542 DESC_HDR_MODE0_MDEU_SHA1,
2543 },
2544 { .type = CRYPTO_ALG_TYPE_AHASH,
2545 .alg.hash = {
2546 .halg.digestsize = SHA224_DIGEST_SIZE,
2547 .halg.statesize = sizeof(struct talitos_export_state),
2548 .halg.base = {
2549 .cra_name = "sha224",
2550 .cra_driver_name = "sha224-talitos",
2551 .cra_blocksize = SHA224_BLOCK_SIZE,
2552 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2553 CRYPTO_ALG_ASYNC,
2554 }
2555 },
2556 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2557 DESC_HDR_SEL0_MDEUA |
2558 DESC_HDR_MODE0_MDEU_SHA224,
2559 },
2560 { .type = CRYPTO_ALG_TYPE_AHASH,
2561 .alg.hash = {
2562 .halg.digestsize = SHA256_DIGEST_SIZE,
2563 .halg.statesize = sizeof(struct talitos_export_state),
2564 .halg.base = {
2565 .cra_name = "sha256",
2566 .cra_driver_name = "sha256-talitos",
2567 .cra_blocksize = SHA256_BLOCK_SIZE,
2568 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2569 CRYPTO_ALG_ASYNC,
2570 }
2571 },
2572 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2573 DESC_HDR_SEL0_MDEUA |
2574 DESC_HDR_MODE0_MDEU_SHA256,
2575 },
2576 { .type = CRYPTO_ALG_TYPE_AHASH,
2577 .alg.hash = {
2578 .halg.digestsize = SHA384_DIGEST_SIZE,
2579 .halg.statesize = sizeof(struct talitos_export_state),
2580 .halg.base = {
2581 .cra_name = "sha384",
2582 .cra_driver_name = "sha384-talitos",
2583 .cra_blocksize = SHA384_BLOCK_SIZE,
2584 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2585 CRYPTO_ALG_ASYNC,
2586 }
2587 },
2588 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2589 DESC_HDR_SEL0_MDEUB |
2590 DESC_HDR_MODE0_MDEUB_SHA384,
2591 },
2592 { .type = CRYPTO_ALG_TYPE_AHASH,
2593 .alg.hash = {
2594 .halg.digestsize = SHA512_DIGEST_SIZE,
2595 .halg.statesize = sizeof(struct talitos_export_state),
2596 .halg.base = {
2597 .cra_name = "sha512",
2598 .cra_driver_name = "sha512-talitos",
2599 .cra_blocksize = SHA512_BLOCK_SIZE,
2600 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2601 CRYPTO_ALG_ASYNC,
2602 }
2603 },
2604 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2605 DESC_HDR_SEL0_MDEUB |
2606 DESC_HDR_MODE0_MDEUB_SHA512,
2607 },
2608 { .type = CRYPTO_ALG_TYPE_AHASH,
2609 .alg.hash = {
2610 .halg.digestsize = MD5_DIGEST_SIZE,
2611 .halg.statesize = sizeof(struct talitos_export_state),
2612 .halg.base = {
2613 .cra_name = "hmac(md5)",
2614 .cra_driver_name = "hmac-md5-talitos",
2615 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2616 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2617 CRYPTO_ALG_ASYNC,
2618 }
2619 },
2620 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2621 DESC_HDR_SEL0_MDEUA |
2622 DESC_HDR_MODE0_MDEU_MD5,
2623 },
2624 { .type = CRYPTO_ALG_TYPE_AHASH,
2625 .alg.hash = {
2626 .halg.digestsize = SHA1_DIGEST_SIZE,
2627 .halg.statesize = sizeof(struct talitos_export_state),
2628 .halg.base = {
2629 .cra_name = "hmac(sha1)",
2630 .cra_driver_name = "hmac-sha1-talitos",
2631 .cra_blocksize = SHA1_BLOCK_SIZE,
2632 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2633 CRYPTO_ALG_ASYNC,
2634 }
2635 },
2636 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2637 DESC_HDR_SEL0_MDEUA |
2638 DESC_HDR_MODE0_MDEU_SHA1,
2639 },
2640 { .type = CRYPTO_ALG_TYPE_AHASH,
2641 .alg.hash = {
2642 .halg.digestsize = SHA224_DIGEST_SIZE,
2643 .halg.statesize = sizeof(struct talitos_export_state),
2644 .halg.base = {
2645 .cra_name = "hmac(sha224)",
2646 .cra_driver_name = "hmac-sha224-talitos",
2647 .cra_blocksize = SHA224_BLOCK_SIZE,
2648 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2649 CRYPTO_ALG_ASYNC,
2650 }
2651 },
2652 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2653 DESC_HDR_SEL0_MDEUA |
2654 DESC_HDR_MODE0_MDEU_SHA224,
2655 },
2656 { .type = CRYPTO_ALG_TYPE_AHASH,
2657 .alg.hash = {
2658 .halg.digestsize = SHA256_DIGEST_SIZE,
2659 .halg.statesize = sizeof(struct talitos_export_state),
2660 .halg.base = {
2661 .cra_name = "hmac(sha256)",
2662 .cra_driver_name = "hmac-sha256-talitos",
2663 .cra_blocksize = SHA256_BLOCK_SIZE,
2664 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2665 CRYPTO_ALG_ASYNC,
2666 }
2667 },
2668 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2669 DESC_HDR_SEL0_MDEUA |
2670 DESC_HDR_MODE0_MDEU_SHA256,
2671 },
2672 { .type = CRYPTO_ALG_TYPE_AHASH,
2673 .alg.hash = {
2674 .halg.digestsize = SHA384_DIGEST_SIZE,
2675 .halg.statesize = sizeof(struct talitos_export_state),
2676 .halg.base = {
2677 .cra_name = "hmac(sha384)",
2678 .cra_driver_name = "hmac-sha384-talitos",
2679 .cra_blocksize = SHA384_BLOCK_SIZE,
2680 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2681 CRYPTO_ALG_ASYNC,
2682 }
2683 },
2684 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2685 DESC_HDR_SEL0_MDEUB |
2686 DESC_HDR_MODE0_MDEUB_SHA384,
2687 },
2688 { .type = CRYPTO_ALG_TYPE_AHASH,
2689 .alg.hash = {
2690 .halg.digestsize = SHA512_DIGEST_SIZE,
2691 .halg.statesize = sizeof(struct talitos_export_state),
2692 .halg.base = {
2693 .cra_name = "hmac(sha512)",
2694 .cra_driver_name = "hmac-sha512-talitos",
2695 .cra_blocksize = SHA512_BLOCK_SIZE,
2696 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2697 CRYPTO_ALG_ASYNC,
2698 }
2699 },
2700 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2701 DESC_HDR_SEL0_MDEUB |
2702 DESC_HDR_MODE0_MDEUB_SHA512,
2703 }
2704 };
2705
2706 struct talitos_crypto_alg {
2707 struct list_head entry;
2708 struct device *dev;
2709 struct talitos_alg_template algt;
2710 };
2711
2712 static int talitos_init_common(struct talitos_ctx *ctx,
2713 struct talitos_crypto_alg *talitos_alg)
2714 {
2715 struct talitos_private *priv;
2716
2717 /* update context with ptr to dev */
2718 ctx->dev = talitos_alg->dev;
2719
2720 /* assign SEC channel to tfm in round-robin fashion */
2721 priv = dev_get_drvdata(ctx->dev);
2722 ctx->ch = atomic_inc_return(&priv->last_chan) &
2723 (priv->num_channels - 1);
2724
2725 /* copy descriptor header template value */
2726 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
2727
2728 /* select done notification */
2729 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2730
2731 return 0;
2732 }
2733
2734 static int talitos_cra_init(struct crypto_tfm *tfm)
2735 {
2736 struct crypto_alg *alg = tfm->__crt_alg;
2737 struct talitos_crypto_alg *talitos_alg;
2738 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2739
2740 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2741 talitos_alg = container_of(__crypto_ahash_alg(alg),
2742 struct talitos_crypto_alg,
2743 algt.alg.hash);
2744 else
2745 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2746 algt.alg.crypto);
2747
2748 return talitos_init_common(ctx, talitos_alg);
2749 }
2750
2751 static int talitos_cra_init_aead(struct crypto_aead *tfm)
2752 {
2753 struct aead_alg *alg = crypto_aead_alg(tfm);
2754 struct talitos_crypto_alg *talitos_alg;
2755 struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
2756
2757 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2758 algt.alg.aead);
2759
2760 return talitos_init_common(ctx, talitos_alg);
2761 }
2762
2763 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2764 {
2765 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2766
2767 talitos_cra_init(tfm);
2768
2769 ctx->keylen = 0;
2770 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2771 sizeof(struct talitos_ahash_req_ctx));
2772
2773 return 0;
2774 }
2775
2776 /*
2777 * given the alg's descriptor header template, determine whether descriptor
2778 * type and primary/secondary execution units required match the hw
2779 * capabilities description provided in the device tree node.
2780 */
2781 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2782 {
2783 struct talitos_private *priv = dev_get_drvdata(dev);
2784 int ret;
2785
2786 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2787 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2788
2789 if (SECONDARY_EU(desc_hdr_template))
2790 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2791 & priv->exec_units);
2792
2793 return ret;
2794 }
2795
2796 static int talitos_remove(struct platform_device *ofdev)
2797 {
2798 struct device *dev = &ofdev->dev;
2799 struct talitos_private *priv = dev_get_drvdata(dev);
2800 struct talitos_crypto_alg *t_alg, *n;
2801 int i;
2802
2803 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2804 switch (t_alg->algt.type) {
2805 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2806 break;
2807 case CRYPTO_ALG_TYPE_AEAD:
2808 crypto_unregister_aead(&t_alg->algt.alg.aead);
2809 case CRYPTO_ALG_TYPE_AHASH:
2810 crypto_unregister_ahash(&t_alg->algt.alg.hash);
2811 break;
2812 }
2813 list_del(&t_alg->entry);
2814 kfree(t_alg);
2815 }
2816
2817 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
2818 talitos_unregister_rng(dev);
2819
2820 for (i = 0; priv->chan && i < priv->num_channels; i++)
2821 kfree(priv->chan[i].fifo);
2822
2823 kfree(priv->chan);
2824
2825 for (i = 0; i < 2; i++)
2826 if (priv->irq[i]) {
2827 free_irq(priv->irq[i], dev);
2828 irq_dispose_mapping(priv->irq[i]);
2829 }
2830
2831 tasklet_kill(&priv->done_task[0]);
2832 if (priv->irq[1])
2833 tasklet_kill(&priv->done_task[1]);
2834
2835 iounmap(priv->reg);
2836
2837 kfree(priv);
2838
2839 return 0;
2840 }
2841
2842 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2843 struct talitos_alg_template
2844 *template)
2845 {
2846 struct talitos_private *priv = dev_get_drvdata(dev);
2847 struct talitos_crypto_alg *t_alg;
2848 struct crypto_alg *alg;
2849
2850 t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
2851 if (!t_alg)
2852 return ERR_PTR(-ENOMEM);
2853
2854 t_alg->algt = *template;
2855
2856 switch (t_alg->algt.type) {
2857 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2858 alg = &t_alg->algt.alg.crypto;
2859 alg->cra_init = talitos_cra_init;
2860 alg->cra_type = &crypto_ablkcipher_type;
2861 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
2862 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
2863 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
2864 alg->cra_ablkcipher.geniv = "eseqiv";
2865 break;
2866 case CRYPTO_ALG_TYPE_AEAD:
2867 alg = &t_alg->algt.alg.aead.base;
2868 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
2869 t_alg->algt.alg.aead.setkey = aead_setkey;
2870 t_alg->algt.alg.aead.encrypt = aead_encrypt;
2871 t_alg->algt.alg.aead.decrypt = aead_decrypt;
2872 break;
2873 case CRYPTO_ALG_TYPE_AHASH:
2874 alg = &t_alg->algt.alg.hash.halg.base;
2875 alg->cra_init = talitos_cra_init_ahash;
2876 alg->cra_type = &crypto_ahash_type;
2877 t_alg->algt.alg.hash.init = ahash_init;
2878 t_alg->algt.alg.hash.update = ahash_update;
2879 t_alg->algt.alg.hash.final = ahash_final;
2880 t_alg->algt.alg.hash.finup = ahash_finup;
2881 t_alg->algt.alg.hash.digest = ahash_digest;
2882 t_alg->algt.alg.hash.setkey = ahash_setkey;
2883 t_alg->algt.alg.hash.import = ahash_import;
2884 t_alg->algt.alg.hash.export = ahash_export;
2885
2886 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
2887 !strncmp(alg->cra_name, "hmac", 4)) {
2888 kfree(t_alg);
2889 return ERR_PTR(-ENOTSUPP);
2890 }
2891 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
2892 (!strcmp(alg->cra_name, "sha224") ||
2893 !strcmp(alg->cra_name, "hmac(sha224)"))) {
2894 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
2895 t_alg->algt.desc_hdr_template =
2896 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2897 DESC_HDR_SEL0_MDEUA |
2898 DESC_HDR_MODE0_MDEU_SHA256;
2899 }
2900 break;
2901 default:
2902 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
2903 kfree(t_alg);
2904 return ERR_PTR(-EINVAL);
2905 }
2906
2907 alg->cra_module = THIS_MODULE;
2908 alg->cra_priority = TALITOS_CRA_PRIORITY;
2909 alg->cra_alignmask = 0;
2910 alg->cra_ctxsize = sizeof(struct talitos_ctx);
2911 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
2912
2913 t_alg->dev = dev;
2914
2915 return t_alg;
2916 }
2917
2918 static int talitos_probe_irq(struct platform_device *ofdev)
2919 {
2920 struct device *dev = &ofdev->dev;
2921 struct device_node *np = ofdev->dev.of_node;
2922 struct talitos_private *priv = dev_get_drvdata(dev);
2923 int err;
2924 bool is_sec1 = has_ftr_sec1(priv);
2925
2926 priv->irq[0] = irq_of_parse_and_map(np, 0);
2927 if (!priv->irq[0]) {
2928 dev_err(dev, "failed to map irq\n");
2929 return -EINVAL;
2930 }
2931 if (is_sec1) {
2932 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
2933 dev_driver_string(dev), dev);
2934 goto primary_out;
2935 }
2936
2937 priv->irq[1] = irq_of_parse_and_map(np, 1);
2938
2939 /* get the primary irq line */
2940 if (!priv->irq[1]) {
2941 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
2942 dev_driver_string(dev), dev);
2943 goto primary_out;
2944 }
2945
2946 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
2947 dev_driver_string(dev), dev);
2948 if (err)
2949 goto primary_out;
2950
2951 /* get the secondary irq line */
2952 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
2953 dev_driver_string(dev), dev);
2954 if (err) {
2955 dev_err(dev, "failed to request secondary irq\n");
2956 irq_dispose_mapping(priv->irq[1]);
2957 priv->irq[1] = 0;
2958 }
2959
2960 return err;
2961
2962 primary_out:
2963 if (err) {
2964 dev_err(dev, "failed to request primary irq\n");
2965 irq_dispose_mapping(priv->irq[0]);
2966 priv->irq[0] = 0;
2967 }
2968
2969 return err;
2970 }
2971
2972 static int talitos_probe(struct platform_device *ofdev)
2973 {
2974 struct device *dev = &ofdev->dev;
2975 struct device_node *np = ofdev->dev.of_node;
2976 struct talitos_private *priv;
2977 const unsigned int *prop;
2978 int i, err;
2979 int stride;
2980
2981 priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
2982 if (!priv)
2983 return -ENOMEM;
2984
2985 INIT_LIST_HEAD(&priv->alg_list);
2986
2987 dev_set_drvdata(dev, priv);
2988
2989 priv->ofdev = ofdev;
2990
2991 spin_lock_init(&priv->reg_lock);
2992
2993 priv->reg = of_iomap(np, 0);
2994 if (!priv->reg) {
2995 dev_err(dev, "failed to of_iomap\n");
2996 err = -ENOMEM;
2997 goto err_out;
2998 }
2999
3000 /* get SEC version capabilities from device tree */
3001 prop = of_get_property(np, "fsl,num-channels", NULL);
3002 if (prop)
3003 priv->num_channels = *prop;
3004
3005 prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
3006 if (prop)
3007 priv->chfifo_len = *prop;
3008
3009 prop = of_get_property(np, "fsl,exec-units-mask", NULL);
3010 if (prop)
3011 priv->exec_units = *prop;
3012
3013 prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
3014 if (prop)
3015 priv->desc_types = *prop;
3016
3017 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3018 !priv->exec_units || !priv->desc_types) {
3019 dev_err(dev, "invalid property data in device tree node\n");
3020 err = -EINVAL;
3021 goto err_out;
3022 }
3023
3024 if (of_device_is_compatible(np, "fsl,sec3.0"))
3025 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3026
3027 if (of_device_is_compatible(np, "fsl,sec2.1"))
3028 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3029 TALITOS_FTR_SHA224_HWINIT |
3030 TALITOS_FTR_HMAC_OK;
3031
3032 if (of_device_is_compatible(np, "fsl,sec1.0"))
3033 priv->features |= TALITOS_FTR_SEC1;
3034
3035 if (of_device_is_compatible(np, "fsl,sec1.2")) {
3036 priv->reg_deu = priv->reg + TALITOS12_DEU;
3037 priv->reg_aesu = priv->reg + TALITOS12_AESU;
3038 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3039 stride = TALITOS1_CH_STRIDE;
3040 } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3041 priv->reg_deu = priv->reg + TALITOS10_DEU;
3042 priv->reg_aesu = priv->reg + TALITOS10_AESU;
3043 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3044 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3045 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3046 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3047 stride = TALITOS1_CH_STRIDE;
3048 } else {
3049 priv->reg_deu = priv->reg + TALITOS2_DEU;
3050 priv->reg_aesu = priv->reg + TALITOS2_AESU;
3051 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3052 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3053 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3054 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3055 priv->reg_keu = priv->reg + TALITOS2_KEU;
3056 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3057 stride = TALITOS2_CH_STRIDE;
3058 }
3059
3060 err = talitos_probe_irq(ofdev);
3061 if (err)
3062 goto err_out;
3063
3064 if (of_device_is_compatible(np, "fsl,sec1.0")) {
3065 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3066 (unsigned long)dev);
3067 } else {
3068 if (!priv->irq[1]) {
3069 tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3070 (unsigned long)dev);
3071 } else {
3072 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3073 (unsigned long)dev);
3074 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3075 (unsigned long)dev);
3076 }
3077 }
3078
3079 priv->chan = kzalloc(sizeof(struct talitos_channel) *
3080 priv->num_channels, GFP_KERNEL);
3081 if (!priv->chan) {
3082 dev_err(dev, "failed to allocate channel management space\n");
3083 err = -ENOMEM;
3084 goto err_out;
3085 }
3086
3087 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3088
3089 for (i = 0; i < priv->num_channels; i++) {
3090 priv->chan[i].reg = priv->reg + stride * (i + 1);
3091 if (!priv->irq[1] || !(i & 1))
3092 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3093
3094 spin_lock_init(&priv->chan[i].head_lock);
3095 spin_lock_init(&priv->chan[i].tail_lock);
3096
3097 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
3098 priv->fifo_len, GFP_KERNEL);
3099 if (!priv->chan[i].fifo) {
3100 dev_err(dev, "failed to allocate request fifo %d\n", i);
3101 err = -ENOMEM;
3102 goto err_out;
3103 }
3104
3105 atomic_set(&priv->chan[i].submit_count,
3106 -(priv->chfifo_len - 1));
3107 }
3108
3109 dma_set_mask(dev, DMA_BIT_MASK(36));
3110
3111 /* reset and initialize the h/w */
3112 err = init_device(dev);
3113 if (err) {
3114 dev_err(dev, "failed to initialize device\n");
3115 goto err_out;
3116 }
3117
3118 /* register the RNG, if available */
3119 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3120 err = talitos_register_rng(dev);
3121 if (err) {
3122 dev_err(dev, "failed to register hwrng: %d\n", err);
3123 goto err_out;
3124 } else
3125 dev_info(dev, "hwrng\n");
3126 }
3127
3128 /* register crypto algorithms the device supports */
3129 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3130 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3131 struct talitos_crypto_alg *t_alg;
3132 struct crypto_alg *alg = NULL;
3133
3134 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3135 if (IS_ERR(t_alg)) {
3136 err = PTR_ERR(t_alg);
3137 if (err == -ENOTSUPP)
3138 continue;
3139 goto err_out;
3140 }
3141
3142 switch (t_alg->algt.type) {
3143 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3144 err = crypto_register_alg(
3145 &t_alg->algt.alg.crypto);
3146 alg = &t_alg->algt.alg.crypto;
3147 break;
3148
3149 case CRYPTO_ALG_TYPE_AEAD:
3150 err = crypto_register_aead(
3151 &t_alg->algt.alg.aead);
3152 alg = &t_alg->algt.alg.aead.base;
3153 break;
3154
3155 case CRYPTO_ALG_TYPE_AHASH:
3156 err = crypto_register_ahash(
3157 &t_alg->algt.alg.hash);
3158 alg = &t_alg->algt.alg.hash.halg.base;
3159 break;
3160 }
3161 if (err) {
3162 dev_err(dev, "%s alg registration failed\n",
3163 alg->cra_driver_name);
3164 kfree(t_alg);
3165 } else
3166 list_add_tail(&t_alg->entry, &priv->alg_list);
3167 }
3168 }
3169 if (!list_empty(&priv->alg_list))
3170 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3171 (char *)of_get_property(np, "compatible", NULL));
3172
3173 return 0;
3174
3175 err_out:
3176 talitos_remove(ofdev);
3177
3178 return err;
3179 }
3180
3181 static const struct of_device_id talitos_match[] = {
3182 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3183 {
3184 .compatible = "fsl,sec1.0",
3185 },
3186 #endif
3187 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3188 {
3189 .compatible = "fsl,sec2.0",
3190 },
3191 #endif
3192 {},
3193 };
3194 MODULE_DEVICE_TABLE(of, talitos_match);
3195
3196 static struct platform_driver talitos_driver = {
3197 .driver = {
3198 .name = "talitos",
3199 .of_match_table = talitos_match,
3200 },
3201 .probe = talitos_probe,
3202 .remove = talitos_remove,
3203 };
3204
3205 module_platform_driver(talitos_driver);
3206
3207 MODULE_LICENSE("GPL");
3208 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3209 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");
This page took 0.130295 seconds and 5 git commands to generate.