2 * talitos - Freescale Integrated Security Engine (SEC) device driver
4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/internal/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
58 static void to_talitos_ptr(struct talitos_ptr
*ptr
, dma_addr_t dma_addr
,
61 ptr
->ptr
= cpu_to_be32(lower_32_bits(dma_addr
));
63 ptr
->eptr
= upper_32_bits(dma_addr
);
66 static void copy_talitos_ptr(struct talitos_ptr
*dst_ptr
,
67 struct talitos_ptr
*src_ptr
, bool is_sec1
)
69 dst_ptr
->ptr
= src_ptr
->ptr
;
71 dst_ptr
->eptr
= src_ptr
->eptr
;
74 static void to_talitos_ptr_len(struct talitos_ptr
*ptr
, unsigned int len
,
79 ptr
->len1
= cpu_to_be16(len
);
81 ptr
->len
= cpu_to_be16(len
);
85 static unsigned short from_talitos_ptr_len(struct talitos_ptr
*ptr
,
89 return be16_to_cpu(ptr
->len1
);
91 return be16_to_cpu(ptr
->len
);
94 static void to_talitos_ptr_extent_clear(struct talitos_ptr
*ptr
, bool is_sec1
)
101 * map virtual single (contiguous) pointer to h/w descriptor pointer
103 static void map_single_talitos_ptr(struct device
*dev
,
104 struct talitos_ptr
*ptr
,
105 unsigned int len
, void *data
,
106 enum dma_data_direction dir
)
108 dma_addr_t dma_addr
= dma_map_single(dev
, data
, len
, dir
);
109 struct talitos_private
*priv
= dev_get_drvdata(dev
);
110 bool is_sec1
= has_ftr_sec1(priv
);
112 to_talitos_ptr_len(ptr
, len
, is_sec1
);
113 to_talitos_ptr(ptr
, dma_addr
, is_sec1
);
114 to_talitos_ptr_extent_clear(ptr
, is_sec1
);
118 * unmap bus single (contiguous) h/w descriptor pointer
120 static void unmap_single_talitos_ptr(struct device
*dev
,
121 struct talitos_ptr
*ptr
,
122 enum dma_data_direction dir
)
124 struct talitos_private
*priv
= dev_get_drvdata(dev
);
125 bool is_sec1
= has_ftr_sec1(priv
);
127 dma_unmap_single(dev
, be32_to_cpu(ptr
->ptr
),
128 from_talitos_ptr_len(ptr
, is_sec1
), dir
);
131 static int reset_channel(struct device
*dev
, int ch
)
133 struct talitos_private
*priv
= dev_get_drvdata(dev
);
134 unsigned int timeout
= TALITOS_TIMEOUT
;
135 bool is_sec1
= has_ftr_sec1(priv
);
138 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
,
139 TALITOS1_CCCR_LO_RESET
);
141 while ((in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
) &
142 TALITOS1_CCCR_LO_RESET
) && --timeout
)
145 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
,
146 TALITOS2_CCCR_RESET
);
148 while ((in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
) &
149 TALITOS2_CCCR_RESET
) && --timeout
)
154 dev_err(dev
, "failed to reset channel %d\n", ch
);
158 /* set 36-bit addressing, done writeback enable and done IRQ enable */
159 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
, TALITOS_CCCR_LO_EAE
|
160 TALITOS_CCCR_LO_CDWE
| TALITOS_CCCR_LO_CDIE
);
162 /* and ICCR writeback, if available */
163 if (priv
->features
& TALITOS_FTR_HW_AUTH_CHECK
)
164 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
,
165 TALITOS_CCCR_LO_IWSE
);
170 static int reset_device(struct device
*dev
)
172 struct talitos_private
*priv
= dev_get_drvdata(dev
);
173 unsigned int timeout
= TALITOS_TIMEOUT
;
174 bool is_sec1
= has_ftr_sec1(priv
);
175 u32 mcr
= is_sec1
? TALITOS1_MCR_SWR
: TALITOS2_MCR_SWR
;
177 setbits32(priv
->reg
+ TALITOS_MCR
, mcr
);
179 while ((in_be32(priv
->reg
+ TALITOS_MCR
) & mcr
)
184 mcr
= TALITOS_MCR_RCA1
| TALITOS_MCR_RCA3
;
185 setbits32(priv
->reg
+ TALITOS_MCR
, mcr
);
189 dev_err(dev
, "failed to reset device\n");
197 * Reset and initialize the device
199 static int init_device(struct device
*dev
)
201 struct talitos_private
*priv
= dev_get_drvdata(dev
);
203 bool is_sec1
= has_ftr_sec1(priv
);
207 * errata documentation: warning: certain SEC interrupts
208 * are not fully cleared by writing the MCR:SWR bit,
209 * set bit twice to completely reset
211 err
= reset_device(dev
);
215 err
= reset_device(dev
);
220 for (ch
= 0; ch
< priv
->num_channels
; ch
++) {
221 err
= reset_channel(dev
, ch
);
226 /* enable channel done and error interrupts */
228 clrbits32(priv
->reg
+ TALITOS_IMR
, TALITOS1_IMR_INIT
);
229 clrbits32(priv
->reg
+ TALITOS_IMR_LO
, TALITOS1_IMR_LO_INIT
);
230 /* disable parity error check in DEU (erroneous? test vect.) */
231 setbits32(priv
->reg_deu
+ TALITOS_EUICR
, TALITOS1_DEUICR_KPE
);
233 setbits32(priv
->reg
+ TALITOS_IMR
, TALITOS2_IMR_INIT
);
234 setbits32(priv
->reg
+ TALITOS_IMR_LO
, TALITOS2_IMR_LO_INIT
);
237 /* disable integrity check error interrupts (use writeback instead) */
238 if (priv
->features
& TALITOS_FTR_HW_AUTH_CHECK
)
239 setbits32(priv
->reg_mdeu
+ TALITOS_EUICR_LO
,
240 TALITOS_MDEUICR_LO_ICE
);
246 * talitos_submit - submits a descriptor to the device for processing
247 * @dev: the SEC device to be used
248 * @ch: the SEC device channel to be used
249 * @desc: the descriptor to be processed by the device
250 * @callback: whom to call when processing is complete
251 * @context: a handle for use by caller (optional)
253 * desc must contain valid dma-mapped (bus physical) address pointers.
254 * callback must check err and feedback in descriptor header
255 * for device processing status.
257 int talitos_submit(struct device
*dev
, int ch
, struct talitos_desc
*desc
,
258 void (*callback
)(struct device
*dev
,
259 struct talitos_desc
*desc
,
260 void *context
, int error
),
263 struct talitos_private
*priv
= dev_get_drvdata(dev
);
264 struct talitos_request
*request
;
267 bool is_sec1
= has_ftr_sec1(priv
);
269 spin_lock_irqsave(&priv
->chan
[ch
].head_lock
, flags
);
271 if (!atomic_inc_not_zero(&priv
->chan
[ch
].submit_count
)) {
272 /* h/w fifo is full */
273 spin_unlock_irqrestore(&priv
->chan
[ch
].head_lock
, flags
);
277 head
= priv
->chan
[ch
].head
;
278 request
= &priv
->chan
[ch
].fifo
[head
];
280 /* map descriptor and save caller data */
282 desc
->hdr1
= desc
->hdr
;
284 request
->dma_desc
= dma_map_single(dev
, &desc
->hdr1
,
288 request
->dma_desc
= dma_map_single(dev
, desc
,
292 request
->callback
= callback
;
293 request
->context
= context
;
295 /* increment fifo head */
296 priv
->chan
[ch
].head
= (priv
->chan
[ch
].head
+ 1) & (priv
->fifo_len
- 1);
299 request
->desc
= desc
;
303 out_be32(priv
->chan
[ch
].reg
+ TALITOS_FF
,
304 upper_32_bits(request
->dma_desc
));
305 out_be32(priv
->chan
[ch
].reg
+ TALITOS_FF_LO
,
306 lower_32_bits(request
->dma_desc
));
308 spin_unlock_irqrestore(&priv
->chan
[ch
].head_lock
, flags
);
312 EXPORT_SYMBOL(talitos_submit
);
315 * process what was done, notify callback of error if not
317 static void flush_channel(struct device
*dev
, int ch
, int error
, int reset_ch
)
319 struct talitos_private
*priv
= dev_get_drvdata(dev
);
320 struct talitos_request
*request
, saved_req
;
323 bool is_sec1
= has_ftr_sec1(priv
);
325 spin_lock_irqsave(&priv
->chan
[ch
].tail_lock
, flags
);
327 tail
= priv
->chan
[ch
].tail
;
328 while (priv
->chan
[ch
].fifo
[tail
].desc
) {
331 request
= &priv
->chan
[ch
].fifo
[tail
];
333 /* descriptors with their done bits set don't get the error */
335 hdr
= is_sec1
? request
->desc
->hdr1
: request
->desc
->hdr
;
337 if ((hdr
& DESC_HDR_DONE
) == DESC_HDR_DONE
)
345 dma_unmap_single(dev
, request
->dma_desc
,
349 /* copy entries so we can call callback outside lock */
350 saved_req
.desc
= request
->desc
;
351 saved_req
.callback
= request
->callback
;
352 saved_req
.context
= request
->context
;
354 /* release request entry in fifo */
356 request
->desc
= NULL
;
358 /* increment fifo tail */
359 priv
->chan
[ch
].tail
= (tail
+ 1) & (priv
->fifo_len
- 1);
361 spin_unlock_irqrestore(&priv
->chan
[ch
].tail_lock
, flags
);
363 atomic_dec(&priv
->chan
[ch
].submit_count
);
365 saved_req
.callback(dev
, saved_req
.desc
, saved_req
.context
,
367 /* channel may resume processing in single desc error case */
368 if (error
&& !reset_ch
&& status
== error
)
370 spin_lock_irqsave(&priv
->chan
[ch
].tail_lock
, flags
);
371 tail
= priv
->chan
[ch
].tail
;
374 spin_unlock_irqrestore(&priv
->chan
[ch
].tail_lock
, flags
);
378 * process completed requests for channels that have done status
380 #define DEF_TALITOS1_DONE(name, ch_done_mask) \
381 static void talitos1_done_##name(unsigned long data) \
383 struct device *dev = (struct device *)data; \
384 struct talitos_private *priv = dev_get_drvdata(dev); \
385 unsigned long flags; \
387 if (ch_done_mask & 0x10000000) \
388 flush_channel(dev, 0, 0, 0); \
389 if (priv->num_channels == 1) \
391 if (ch_done_mask & 0x40000000) \
392 flush_channel(dev, 1, 0, 0); \
393 if (ch_done_mask & 0x00010000) \
394 flush_channel(dev, 2, 0, 0); \
395 if (ch_done_mask & 0x00040000) \
396 flush_channel(dev, 3, 0, 0); \
399 /* At this point, all completed channels have been processed */ \
400 /* Unmask done interrupts for channels completed later on. */ \
401 spin_lock_irqsave(&priv->reg_lock, flags); \
402 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
403 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
404 spin_unlock_irqrestore(&priv->reg_lock, flags); \
407 DEF_TALITOS1_DONE(4ch
, TALITOS1_ISR_4CHDONE
)
409 #define DEF_TALITOS2_DONE(name, ch_done_mask) \
410 static void talitos2_done_##name(unsigned long data) \
412 struct device *dev = (struct device *)data; \
413 struct talitos_private *priv = dev_get_drvdata(dev); \
414 unsigned long flags; \
416 if (ch_done_mask & 1) \
417 flush_channel(dev, 0, 0, 0); \
418 if (priv->num_channels == 1) \
420 if (ch_done_mask & (1 << 2)) \
421 flush_channel(dev, 1, 0, 0); \
422 if (ch_done_mask & (1 << 4)) \
423 flush_channel(dev, 2, 0, 0); \
424 if (ch_done_mask & (1 << 6)) \
425 flush_channel(dev, 3, 0, 0); \
428 /* At this point, all completed channels have been processed */ \
429 /* Unmask done interrupts for channels completed later on. */ \
430 spin_lock_irqsave(&priv->reg_lock, flags); \
431 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
432 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
433 spin_unlock_irqrestore(&priv->reg_lock, flags); \
436 DEF_TALITOS2_DONE(4ch
, TALITOS2_ISR_4CHDONE
)
437 DEF_TALITOS2_DONE(ch0_2
, TALITOS2_ISR_CH_0_2_DONE
)
438 DEF_TALITOS2_DONE(ch1_3
, TALITOS2_ISR_CH_1_3_DONE
)
441 * locate current (offending) descriptor
443 static u32
current_desc_hdr(struct device
*dev
, int ch
)
445 struct talitos_private
*priv
= dev_get_drvdata(dev
);
449 cur_desc
= ((u64
)in_be32(priv
->chan
[ch
].reg
+ TALITOS_CDPR
)) << 32;
450 cur_desc
|= in_be32(priv
->chan
[ch
].reg
+ TALITOS_CDPR_LO
);
453 dev_err(dev
, "CDPR is NULL, giving up search for offending descriptor\n");
457 tail
= priv
->chan
[ch
].tail
;
460 while (priv
->chan
[ch
].fifo
[iter
].dma_desc
!= cur_desc
) {
461 iter
= (iter
+ 1) & (priv
->fifo_len
- 1);
463 dev_err(dev
, "couldn't locate current descriptor\n");
468 return priv
->chan
[ch
].fifo
[iter
].desc
->hdr
;
472 * user diagnostics; report root cause of error based on execution unit status
474 static void report_eu_error(struct device
*dev
, int ch
, u32 desc_hdr
)
476 struct talitos_private
*priv
= dev_get_drvdata(dev
);
480 desc_hdr
= in_be32(priv
->chan
[ch
].reg
+ TALITOS_DESCBUF
);
482 switch (desc_hdr
& DESC_HDR_SEL0_MASK
) {
483 case DESC_HDR_SEL0_AFEU
:
484 dev_err(dev
, "AFEUISR 0x%08x_%08x\n",
485 in_be32(priv
->reg_afeu
+ TALITOS_EUISR
),
486 in_be32(priv
->reg_afeu
+ TALITOS_EUISR_LO
));
488 case DESC_HDR_SEL0_DEU
:
489 dev_err(dev
, "DEUISR 0x%08x_%08x\n",
490 in_be32(priv
->reg_deu
+ TALITOS_EUISR
),
491 in_be32(priv
->reg_deu
+ TALITOS_EUISR_LO
));
493 case DESC_HDR_SEL0_MDEUA
:
494 case DESC_HDR_SEL0_MDEUB
:
495 dev_err(dev
, "MDEUISR 0x%08x_%08x\n",
496 in_be32(priv
->reg_mdeu
+ TALITOS_EUISR
),
497 in_be32(priv
->reg_mdeu
+ TALITOS_EUISR_LO
));
499 case DESC_HDR_SEL0_RNG
:
500 dev_err(dev
, "RNGUISR 0x%08x_%08x\n",
501 in_be32(priv
->reg_rngu
+ TALITOS_ISR
),
502 in_be32(priv
->reg_rngu
+ TALITOS_ISR_LO
));
504 case DESC_HDR_SEL0_PKEU
:
505 dev_err(dev
, "PKEUISR 0x%08x_%08x\n",
506 in_be32(priv
->reg_pkeu
+ TALITOS_EUISR
),
507 in_be32(priv
->reg_pkeu
+ TALITOS_EUISR_LO
));
509 case DESC_HDR_SEL0_AESU
:
510 dev_err(dev
, "AESUISR 0x%08x_%08x\n",
511 in_be32(priv
->reg_aesu
+ TALITOS_EUISR
),
512 in_be32(priv
->reg_aesu
+ TALITOS_EUISR_LO
));
514 case DESC_HDR_SEL0_CRCU
:
515 dev_err(dev
, "CRCUISR 0x%08x_%08x\n",
516 in_be32(priv
->reg_crcu
+ TALITOS_EUISR
),
517 in_be32(priv
->reg_crcu
+ TALITOS_EUISR_LO
));
519 case DESC_HDR_SEL0_KEU
:
520 dev_err(dev
, "KEUISR 0x%08x_%08x\n",
521 in_be32(priv
->reg_pkeu
+ TALITOS_EUISR
),
522 in_be32(priv
->reg_pkeu
+ TALITOS_EUISR_LO
));
526 switch (desc_hdr
& DESC_HDR_SEL1_MASK
) {
527 case DESC_HDR_SEL1_MDEUA
:
528 case DESC_HDR_SEL1_MDEUB
:
529 dev_err(dev
, "MDEUISR 0x%08x_%08x\n",
530 in_be32(priv
->reg_mdeu
+ TALITOS_EUISR
),
531 in_be32(priv
->reg_mdeu
+ TALITOS_EUISR_LO
));
533 case DESC_HDR_SEL1_CRCU
:
534 dev_err(dev
, "CRCUISR 0x%08x_%08x\n",
535 in_be32(priv
->reg_crcu
+ TALITOS_EUISR
),
536 in_be32(priv
->reg_crcu
+ TALITOS_EUISR_LO
));
540 for (i
= 0; i
< 8; i
++)
541 dev_err(dev
, "DESCBUF 0x%08x_%08x\n",
542 in_be32(priv
->chan
[ch
].reg
+ TALITOS_DESCBUF
+ 8*i
),
543 in_be32(priv
->chan
[ch
].reg
+ TALITOS_DESCBUF_LO
+ 8*i
));
547 * recover from error interrupts
549 static void talitos_error(struct device
*dev
, u32 isr
, u32 isr_lo
)
551 struct talitos_private
*priv
= dev_get_drvdata(dev
);
552 unsigned int timeout
= TALITOS_TIMEOUT
;
553 int ch
, error
, reset_dev
= 0;
555 bool is_sec1
= has_ftr_sec1(priv
);
556 int reset_ch
= is_sec1
? 1 : 0; /* only SEC2 supports continuation */
558 for (ch
= 0; ch
< priv
->num_channels
; ch
++) {
559 /* skip channels without errors */
561 /* bits 29, 31, 17, 19 */
562 if (!(isr
& (1 << (29 + (ch
& 1) * 2 - (ch
& 2) * 6))))
565 if (!(isr
& (1 << (ch
* 2 + 1))))
571 v_lo
= in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCPSR_LO
);
573 if (v_lo
& TALITOS_CCPSR_LO_DOF
) {
574 dev_err(dev
, "double fetch fifo overflow error\n");
578 if (v_lo
& TALITOS_CCPSR_LO_SOF
) {
579 /* h/w dropped descriptor */
580 dev_err(dev
, "single fetch fifo overflow error\n");
583 if (v_lo
& TALITOS_CCPSR_LO_MDTE
)
584 dev_err(dev
, "master data transfer error\n");
585 if (v_lo
& TALITOS_CCPSR_LO_SGDLZ
)
586 dev_err(dev
, is_sec1
? "pointeur not complete error\n"
587 : "s/g data length zero error\n");
588 if (v_lo
& TALITOS_CCPSR_LO_FPZ
)
589 dev_err(dev
, is_sec1
? "parity error\n"
590 : "fetch pointer zero error\n");
591 if (v_lo
& TALITOS_CCPSR_LO_IDH
)
592 dev_err(dev
, "illegal descriptor header error\n");
593 if (v_lo
& TALITOS_CCPSR_LO_IEU
)
594 dev_err(dev
, is_sec1
? "static assignment error\n"
595 : "invalid exec unit error\n");
596 if (v_lo
& TALITOS_CCPSR_LO_EU
)
597 report_eu_error(dev
, ch
, current_desc_hdr(dev
, ch
));
599 if (v_lo
& TALITOS_CCPSR_LO_GB
)
600 dev_err(dev
, "gather boundary error\n");
601 if (v_lo
& TALITOS_CCPSR_LO_GRL
)
602 dev_err(dev
, "gather return/length error\n");
603 if (v_lo
& TALITOS_CCPSR_LO_SB
)
604 dev_err(dev
, "scatter boundary error\n");
605 if (v_lo
& TALITOS_CCPSR_LO_SRL
)
606 dev_err(dev
, "scatter return/length error\n");
609 flush_channel(dev
, ch
, error
, reset_ch
);
612 reset_channel(dev
, ch
);
614 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
,
616 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
, 0);
617 while ((in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
) &
618 TALITOS2_CCCR_CONT
) && --timeout
)
621 dev_err(dev
, "failed to restart channel %d\n",
627 if (reset_dev
|| (is_sec1
&& isr
& ~TALITOS1_ISR_4CHERR
) ||
628 (!is_sec1
&& isr
& ~TALITOS2_ISR_4CHERR
) || isr_lo
) {
629 if (is_sec1
&& (isr_lo
& TALITOS1_ISR_TEA_ERR
))
630 dev_err(dev
, "TEA error: ISR 0x%08x_%08x\n",
633 dev_err(dev
, "done overflow, internal time out, or "
634 "rngu error: ISR 0x%08x_%08x\n", isr
, isr_lo
);
636 /* purge request queues */
637 for (ch
= 0; ch
< priv
->num_channels
; ch
++)
638 flush_channel(dev
, ch
, -EIO
, 1);
640 /* reset and reinitialize the device */
645 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
646 static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
648 struct device *dev = data; \
649 struct talitos_private *priv = dev_get_drvdata(dev); \
651 unsigned long flags; \
653 spin_lock_irqsave(&priv->reg_lock, flags); \
654 isr = in_be32(priv->reg + TALITOS_ISR); \
655 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
656 /* Acknowledge interrupt */ \
657 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
658 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
660 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
661 spin_unlock_irqrestore(&priv->reg_lock, flags); \
662 talitos_error(dev, isr & ch_err_mask, isr_lo); \
665 if (likely(isr & ch_done_mask)) { \
666 /* mask further done interrupts. */ \
667 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
668 /* done_task will unmask done interrupts at exit */ \
669 tasklet_schedule(&priv->done_task[tlet]); \
671 spin_unlock_irqrestore(&priv->reg_lock, flags); \
674 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
678 DEF_TALITOS1_INTERRUPT(4ch
, TALITOS1_ISR_4CHDONE
, TALITOS1_ISR_4CHERR
, 0)
680 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
681 static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
683 struct device *dev = data; \
684 struct talitos_private *priv = dev_get_drvdata(dev); \
686 unsigned long flags; \
688 spin_lock_irqsave(&priv->reg_lock, flags); \
689 isr = in_be32(priv->reg + TALITOS_ISR); \
690 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
691 /* Acknowledge interrupt */ \
692 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
693 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
695 if (unlikely(isr & ch_err_mask || isr_lo)) { \
696 spin_unlock_irqrestore(&priv->reg_lock, flags); \
697 talitos_error(dev, isr & ch_err_mask, isr_lo); \
700 if (likely(isr & ch_done_mask)) { \
701 /* mask further done interrupts. */ \
702 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
703 /* done_task will unmask done interrupts at exit */ \
704 tasklet_schedule(&priv->done_task[tlet]); \
706 spin_unlock_irqrestore(&priv->reg_lock, flags); \
709 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
713 DEF_TALITOS2_INTERRUPT(4ch
, TALITOS2_ISR_4CHDONE
, TALITOS2_ISR_4CHERR
, 0)
714 DEF_TALITOS2_INTERRUPT(ch0_2
, TALITOS2_ISR_CH_0_2_DONE
, TALITOS2_ISR_CH_0_2_ERR
,
716 DEF_TALITOS2_INTERRUPT(ch1_3
, TALITOS2_ISR_CH_1_3_DONE
, TALITOS2_ISR_CH_1_3_ERR
,
722 static int talitos_rng_data_present(struct hwrng
*rng
, int wait
)
724 struct device
*dev
= (struct device
*)rng
->priv
;
725 struct talitos_private
*priv
= dev_get_drvdata(dev
);
729 for (i
= 0; i
< 20; i
++) {
730 ofl
= in_be32(priv
->reg_rngu
+ TALITOS_EUSR_LO
) &
731 TALITOS_RNGUSR_LO_OFL
;
740 static int talitos_rng_data_read(struct hwrng
*rng
, u32
*data
)
742 struct device
*dev
= (struct device
*)rng
->priv
;
743 struct talitos_private
*priv
= dev_get_drvdata(dev
);
745 /* rng fifo requires 64-bit accesses */
746 *data
= in_be32(priv
->reg_rngu
+ TALITOS_EU_FIFO
);
747 *data
= in_be32(priv
->reg_rngu
+ TALITOS_EU_FIFO_LO
);
752 static int talitos_rng_init(struct hwrng
*rng
)
754 struct device
*dev
= (struct device
*)rng
->priv
;
755 struct talitos_private
*priv
= dev_get_drvdata(dev
);
756 unsigned int timeout
= TALITOS_TIMEOUT
;
758 setbits32(priv
->reg_rngu
+ TALITOS_EURCR_LO
, TALITOS_RNGURCR_LO_SR
);
759 while (!(in_be32(priv
->reg_rngu
+ TALITOS_EUSR_LO
)
760 & TALITOS_RNGUSR_LO_RD
)
764 dev_err(dev
, "failed to reset rng hw\n");
768 /* start generating */
769 setbits32(priv
->reg_rngu
+ TALITOS_EUDSR_LO
, 0);
774 static int talitos_register_rng(struct device
*dev
)
776 struct talitos_private
*priv
= dev_get_drvdata(dev
);
779 priv
->rng
.name
= dev_driver_string(dev
),
780 priv
->rng
.init
= talitos_rng_init
,
781 priv
->rng
.data_present
= talitos_rng_data_present
,
782 priv
->rng
.data_read
= talitos_rng_data_read
,
783 priv
->rng
.priv
= (unsigned long)dev
;
785 err
= hwrng_register(&priv
->rng
);
787 priv
->rng_registered
= true;
792 static void talitos_unregister_rng(struct device
*dev
)
794 struct talitos_private
*priv
= dev_get_drvdata(dev
);
796 if (!priv
->rng_registered
)
799 hwrng_unregister(&priv
->rng
);
800 priv
->rng_registered
= false;
806 #define TALITOS_CRA_PRIORITY 3000
807 #define TALITOS_MAX_KEY_SIZE 96
808 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
813 __be32 desc_hdr_template
;
814 u8 key
[TALITOS_MAX_KEY_SIZE
];
815 u8 iv
[TALITOS_MAX_IV_LENGTH
];
817 unsigned int enckeylen
;
818 unsigned int authkeylen
;
821 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
822 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
824 struct talitos_ahash_req_ctx
{
825 u32 hw_context
[TALITOS_MDEU_MAX_CONTEXT_SIZE
/ sizeof(u32
)];
826 unsigned int hw_context_size
;
827 u8 buf
[HASH_MAX_BLOCK_SIZE
];
828 u8 bufnext
[HASH_MAX_BLOCK_SIZE
];
832 unsigned int to_hash_later
;
834 struct scatterlist bufsl
[2];
835 struct scatterlist
*psrc
;
838 static int aead_setkey(struct crypto_aead
*authenc
,
839 const u8
*key
, unsigned int keylen
)
841 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
842 struct crypto_authenc_keys keys
;
844 if (crypto_authenc_extractkeys(&keys
, key
, keylen
) != 0)
847 if (keys
.authkeylen
+ keys
.enckeylen
> TALITOS_MAX_KEY_SIZE
)
850 memcpy(ctx
->key
, keys
.authkey
, keys
.authkeylen
);
851 memcpy(&ctx
->key
[keys
.authkeylen
], keys
.enckey
, keys
.enckeylen
);
853 ctx
->keylen
= keys
.authkeylen
+ keys
.enckeylen
;
854 ctx
->enckeylen
= keys
.enckeylen
;
855 ctx
->authkeylen
= keys
.authkeylen
;
860 crypto_aead_set_flags(authenc
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
865 * talitos_edesc - s/w-extended descriptor
866 * @src_nents: number of segments in input scatterlist
867 * @dst_nents: number of segments in output scatterlist
868 * @icv_ool: whether ICV is out-of-line
869 * @iv_dma: dma address of iv for checking continuity and link table
870 * @dma_len: length of dma mapped link_tbl space
871 * @dma_link_tbl: bus physical address of link_tbl/buf
872 * @desc: h/w descriptor
873 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
874 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
876 * if decrypting (with authcheck), or either one of src_nents or dst_nents
877 * is greater than 1, an integrity check value is concatenated to the end
880 struct talitos_edesc
{
886 dma_addr_t dma_link_tbl
;
887 struct talitos_desc desc
;
889 struct talitos_ptr link_tbl
[0];
894 static void talitos_sg_unmap(struct device
*dev
,
895 struct talitos_edesc
*edesc
,
896 struct scatterlist
*src
,
897 struct scatterlist
*dst
)
899 unsigned int src_nents
= edesc
->src_nents
? : 1;
900 unsigned int dst_nents
= edesc
->dst_nents
? : 1;
903 dma_unmap_sg(dev
, src
, src_nents
, DMA_TO_DEVICE
);
906 dma_unmap_sg(dev
, dst
, dst_nents
, DMA_FROM_DEVICE
);
909 dma_unmap_sg(dev
, src
, src_nents
, DMA_BIDIRECTIONAL
);
912 static void ipsec_esp_unmap(struct device
*dev
,
913 struct talitos_edesc
*edesc
,
914 struct aead_request
*areq
)
916 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[6], DMA_FROM_DEVICE
);
917 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[3], DMA_TO_DEVICE
);
918 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[2], DMA_TO_DEVICE
);
919 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[0], DMA_TO_DEVICE
);
921 talitos_sg_unmap(dev
, edesc
, areq
->src
, areq
->dst
);
924 dma_unmap_single(dev
, edesc
->dma_link_tbl
, edesc
->dma_len
,
929 * ipsec_esp descriptor callbacks
931 static void ipsec_esp_encrypt_done(struct device
*dev
,
932 struct talitos_desc
*desc
, void *context
,
935 struct aead_request
*areq
= context
;
936 struct crypto_aead
*authenc
= crypto_aead_reqtfm(areq
);
937 unsigned int authsize
= crypto_aead_authsize(authenc
);
938 struct talitos_edesc
*edesc
;
939 struct scatterlist
*sg
;
942 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
944 ipsec_esp_unmap(dev
, edesc
, areq
);
946 /* copy the generated ICV to dst */
947 if (edesc
->icv_ool
) {
948 icvdata
= &edesc
->link_tbl
[edesc
->src_nents
+
949 edesc
->dst_nents
+ 2];
950 sg
= sg_last(areq
->dst
, edesc
->dst_nents
);
951 memcpy((char *)sg_virt(sg
) + sg
->length
- authsize
,
957 aead_request_complete(areq
, err
);
960 static void ipsec_esp_decrypt_swauth_done(struct device
*dev
,
961 struct talitos_desc
*desc
,
962 void *context
, int err
)
964 struct aead_request
*req
= context
;
965 struct crypto_aead
*authenc
= crypto_aead_reqtfm(req
);
966 unsigned int authsize
= crypto_aead_authsize(authenc
);
967 struct talitos_edesc
*edesc
;
968 struct scatterlist
*sg
;
971 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
973 ipsec_esp_unmap(dev
, edesc
, req
);
977 sg
= sg_last(req
->dst
, edesc
->dst_nents
? : 1);
978 icv
= (char *)sg_virt(sg
) + sg
->length
- authsize
;
980 if (edesc
->dma_len
) {
981 oicv
= (char *)&edesc
->link_tbl
[edesc
->src_nents
+
982 edesc
->dst_nents
+ 2];
984 icv
= oicv
+ authsize
;
986 oicv
= (char *)&edesc
->link_tbl
[0];
988 err
= crypto_memneq(oicv
, icv
, authsize
) ? -EBADMSG
: 0;
993 aead_request_complete(req
, err
);
996 static void ipsec_esp_decrypt_hwauth_done(struct device
*dev
,
997 struct talitos_desc
*desc
,
998 void *context
, int err
)
1000 struct aead_request
*req
= context
;
1001 struct talitos_edesc
*edesc
;
1003 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
1005 ipsec_esp_unmap(dev
, edesc
, req
);
1007 /* check ICV auth status */
1008 if (!err
&& ((desc
->hdr_lo
& DESC_HDR_LO_ICCR1_MASK
) !=
1009 DESC_HDR_LO_ICCR1_PASS
))
1014 aead_request_complete(req
, err
);
1018 * convert scatterlist to SEC h/w link table format
1019 * stop at cryptlen bytes
1021 static int sg_to_link_tbl_offset(struct scatterlist
*sg
, int sg_count
,
1022 unsigned int offset
, int cryptlen
,
1023 struct talitos_ptr
*link_tbl_ptr
)
1025 int n_sg
= sg_count
;
1028 while (cryptlen
&& sg
&& n_sg
--) {
1029 unsigned int len
= sg_dma_len(sg
);
1031 if (offset
>= len
) {
1041 to_talitos_ptr(link_tbl_ptr
+ count
,
1042 sg_dma_address(sg
) + offset
, 0);
1043 link_tbl_ptr
[count
].len
= cpu_to_be16(len
);
1044 link_tbl_ptr
[count
].j_extent
= 0;
1053 /* tag end of link table */
1055 link_tbl_ptr
[count
- 1].j_extent
= DESC_PTR_LNKTBL_RETURN
;
1060 static inline int sg_to_link_tbl(struct scatterlist
*sg
, int sg_count
,
1062 struct talitos_ptr
*link_tbl_ptr
)
1064 return sg_to_link_tbl_offset(sg
, sg_count
, 0, cryptlen
,
1069 * fill in and submit ipsec_esp descriptor
1071 static int ipsec_esp(struct talitos_edesc
*edesc
, struct aead_request
*areq
,
1072 void (*callback
)(struct device
*dev
,
1073 struct talitos_desc
*desc
,
1074 void *context
, int error
))
1076 struct crypto_aead
*aead
= crypto_aead_reqtfm(areq
);
1077 unsigned int authsize
= crypto_aead_authsize(aead
);
1078 struct talitos_ctx
*ctx
= crypto_aead_ctx(aead
);
1079 struct device
*dev
= ctx
->dev
;
1080 struct talitos_desc
*desc
= &edesc
->desc
;
1081 unsigned int cryptlen
= areq
->cryptlen
;
1082 unsigned int ivsize
= crypto_aead_ivsize(aead
);
1085 int sg_link_tbl_len
;
1088 map_single_talitos_ptr(dev
, &desc
->ptr
[0], ctx
->authkeylen
, &ctx
->key
,
1091 sg_count
= dma_map_sg(dev
, areq
->src
, edesc
->src_nents
?: 1,
1092 (areq
->src
== areq
->dst
) ? DMA_BIDIRECTIONAL
1095 desc
->ptr
[1].len
= cpu_to_be16(areq
->assoclen
);
1097 (ret
= sg_to_link_tbl_offset(areq
->src
, sg_count
, 0,
1099 &edesc
->link_tbl
[tbl_off
])) > 1) {
1100 to_talitos_ptr(&desc
->ptr
[1], edesc
->dma_link_tbl
+ tbl_off
*
1101 sizeof(struct talitos_ptr
), 0);
1102 desc
->ptr
[1].j_extent
= DESC_PTR_LNKTBL_JUMP
;
1104 dma_sync_single_for_device(dev
, edesc
->dma_link_tbl
,
1105 edesc
->dma_len
, DMA_BIDIRECTIONAL
);
1109 to_talitos_ptr(&desc
->ptr
[1], sg_dma_address(areq
->src
), 0);
1110 desc
->ptr
[1].j_extent
= 0;
1114 to_talitos_ptr(&desc
->ptr
[2], edesc
->iv_dma
, 0);
1115 desc
->ptr
[2].len
= cpu_to_be16(ivsize
);
1116 desc
->ptr
[2].j_extent
= 0;
1119 map_single_talitos_ptr(dev
, &desc
->ptr
[3], ctx
->enckeylen
,
1120 (char *)&ctx
->key
+ ctx
->authkeylen
,
1125 * map and adjust cipher len to aead request cryptlen.
1126 * extent is bytes of HMAC postpended to ciphertext,
1127 * typically 12 for ipsec
1129 desc
->ptr
[4].len
= cpu_to_be16(cryptlen
);
1130 desc
->ptr
[4].j_extent
= authsize
;
1132 sg_link_tbl_len
= cryptlen
;
1133 if (edesc
->desc
.hdr
& DESC_HDR_MODE1_MDEU_CICV
)
1134 sg_link_tbl_len
+= authsize
;
1136 if (sg_count
== 1) {
1137 to_talitos_ptr(&desc
->ptr
[4], sg_dma_address(areq
->src
) +
1139 } else if ((ret
= sg_to_link_tbl_offset(areq
->src
, sg_count
,
1140 areq
->assoclen
, sg_link_tbl_len
,
1141 &edesc
->link_tbl
[tbl_off
])) >
1143 desc
->ptr
[4].j_extent
|= DESC_PTR_LNKTBL_JUMP
;
1144 to_talitos_ptr(&desc
->ptr
[4], edesc
->dma_link_tbl
+
1146 sizeof(struct talitos_ptr
), 0);
1147 dma_sync_single_for_device(dev
, edesc
->dma_link_tbl
,
1152 copy_talitos_ptr(&desc
->ptr
[4], &edesc
->link_tbl
[tbl_off
], 0);
1156 desc
->ptr
[5].len
= cpu_to_be16(cryptlen
);
1157 desc
->ptr
[5].j_extent
= authsize
;
1159 if (areq
->src
!= areq
->dst
)
1160 sg_count
= dma_map_sg(dev
, areq
->dst
, edesc
->dst_nents
? : 1,
1163 edesc
->icv_ool
= false;
1165 if (sg_count
== 1) {
1166 to_talitos_ptr(&desc
->ptr
[5], sg_dma_address(areq
->dst
) +
1168 } else if ((sg_count
=
1169 sg_to_link_tbl_offset(areq
->dst
, sg_count
,
1170 areq
->assoclen
, cryptlen
,
1171 &edesc
->link_tbl
[tbl_off
])) > 1) {
1172 struct talitos_ptr
*tbl_ptr
= &edesc
->link_tbl
[tbl_off
];
1174 to_talitos_ptr(&desc
->ptr
[5], edesc
->dma_link_tbl
+
1175 tbl_off
* sizeof(struct talitos_ptr
), 0);
1177 /* Add an entry to the link table for ICV data */
1178 tbl_ptr
+= sg_count
- 1;
1179 tbl_ptr
->j_extent
= 0;
1181 tbl_ptr
->j_extent
= DESC_PTR_LNKTBL_RETURN
;
1182 tbl_ptr
->len
= cpu_to_be16(authsize
);
1184 /* icv data follows link tables */
1185 to_talitos_ptr(tbl_ptr
, edesc
->dma_link_tbl
+
1186 (edesc
->src_nents
+ edesc
->dst_nents
+
1187 2) * sizeof(struct talitos_ptr
) +
1189 desc
->ptr
[5].j_extent
|= DESC_PTR_LNKTBL_JUMP
;
1190 dma_sync_single_for_device(ctx
->dev
, edesc
->dma_link_tbl
,
1191 edesc
->dma_len
, DMA_BIDIRECTIONAL
);
1193 edesc
->icv_ool
= true;
1195 copy_talitos_ptr(&desc
->ptr
[5], &edesc
->link_tbl
[tbl_off
], 0);
1199 map_single_talitos_ptr(dev
, &desc
->ptr
[6], ivsize
, ctx
->iv
,
1202 ret
= talitos_submit(dev
, ctx
->ch
, desc
, callback
, areq
);
1203 if (ret
!= -EINPROGRESS
) {
1204 ipsec_esp_unmap(dev
, edesc
, areq
);
1211 * allocate and map the extended descriptor
1213 static struct talitos_edesc
*talitos_edesc_alloc(struct device
*dev
,
1214 struct scatterlist
*src
,
1215 struct scatterlist
*dst
,
1217 unsigned int assoclen
,
1218 unsigned int cryptlen
,
1219 unsigned int authsize
,
1220 unsigned int ivsize
,
1225 struct talitos_edesc
*edesc
;
1226 int src_nents
, dst_nents
, alloc_len
, dma_len
;
1227 dma_addr_t iv_dma
= 0;
1228 gfp_t flags
= cryptoflags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
1230 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1231 bool is_sec1
= has_ftr_sec1(priv
);
1232 int max_len
= is_sec1
? TALITOS1_MAX_DATA_LEN
: TALITOS2_MAX_DATA_LEN
;
1235 if (cryptlen
+ authsize
> max_len
) {
1236 dev_err(dev
, "length exceeds h/w max limit\n");
1237 return ERR_PTR(-EINVAL
);
1241 iv_dma
= dma_map_single(dev
, iv
, ivsize
, DMA_TO_DEVICE
);
1243 if (!dst
|| dst
== src
) {
1244 src_nents
= sg_nents_for_len(src
,
1245 assoclen
+ cryptlen
+ authsize
);
1246 if (src_nents
< 0) {
1247 dev_err(dev
, "Invalid number of src SG.\n");
1248 err
= ERR_PTR(-EINVAL
);
1251 src_nents
= (src_nents
== 1) ? 0 : src_nents
;
1252 dst_nents
= dst
? src_nents
: 0;
1253 } else { /* dst && dst != src*/
1254 src_nents
= sg_nents_for_len(src
, assoclen
+ cryptlen
+
1255 (encrypt
? 0 : authsize
));
1256 if (src_nents
< 0) {
1257 dev_err(dev
, "Invalid number of src SG.\n");
1258 err
= ERR_PTR(-EINVAL
);
1261 src_nents
= (src_nents
== 1) ? 0 : src_nents
;
1262 dst_nents
= sg_nents_for_len(dst
, assoclen
+ cryptlen
+
1263 (encrypt
? authsize
: 0));
1264 if (dst_nents
< 0) {
1265 dev_err(dev
, "Invalid number of dst SG.\n");
1266 err
= ERR_PTR(-EINVAL
);
1269 dst_nents
= (dst_nents
== 1) ? 0 : dst_nents
;
1273 * allocate space for base edesc plus the link tables,
1274 * allowing for two separate entries for AD and generated ICV (+ 2),
1275 * and space for two sets of ICVs (stashed and generated)
1277 alloc_len
= sizeof(struct talitos_edesc
);
1278 if (src_nents
|| dst_nents
) {
1280 dma_len
= (src_nents
? cryptlen
: 0) +
1281 (dst_nents
? cryptlen
: 0);
1283 dma_len
= (src_nents
+ dst_nents
+ 2) *
1284 sizeof(struct talitos_ptr
) + authsize
* 2;
1285 alloc_len
+= dma_len
;
1288 alloc_len
+= icv_stashing
? authsize
: 0;
1291 edesc
= kmalloc(alloc_len
, GFP_DMA
| flags
);
1293 dev_err(dev
, "could not allocate edescriptor\n");
1294 err
= ERR_PTR(-ENOMEM
);
1298 edesc
->src_nents
= src_nents
;
1299 edesc
->dst_nents
= dst_nents
;
1300 edesc
->iv_dma
= iv_dma
;
1301 edesc
->dma_len
= dma_len
;
1303 edesc
->dma_link_tbl
= dma_map_single(dev
, &edesc
->link_tbl
[0],
1310 dma_unmap_single(dev
, iv_dma
, ivsize
, DMA_TO_DEVICE
);
1314 static struct talitos_edesc
*aead_edesc_alloc(struct aead_request
*areq
, u8
*iv
,
1315 int icv_stashing
, bool encrypt
)
1317 struct crypto_aead
*authenc
= crypto_aead_reqtfm(areq
);
1318 unsigned int authsize
= crypto_aead_authsize(authenc
);
1319 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
1320 unsigned int ivsize
= crypto_aead_ivsize(authenc
);
1322 return talitos_edesc_alloc(ctx
->dev
, areq
->src
, areq
->dst
,
1323 iv
, areq
->assoclen
, areq
->cryptlen
,
1324 authsize
, ivsize
, icv_stashing
,
1325 areq
->base
.flags
, encrypt
);
1328 static int aead_encrypt(struct aead_request
*req
)
1330 struct crypto_aead
*authenc
= crypto_aead_reqtfm(req
);
1331 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
1332 struct talitos_edesc
*edesc
;
1334 /* allocate extended descriptor */
1335 edesc
= aead_edesc_alloc(req
, req
->iv
, 0, true);
1337 return PTR_ERR(edesc
);
1340 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_MODE0_ENCRYPT
;
1342 return ipsec_esp(edesc
, req
, ipsec_esp_encrypt_done
);
1345 static int aead_decrypt(struct aead_request
*req
)
1347 struct crypto_aead
*authenc
= crypto_aead_reqtfm(req
);
1348 unsigned int authsize
= crypto_aead_authsize(authenc
);
1349 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
1350 struct talitos_private
*priv
= dev_get_drvdata(ctx
->dev
);
1351 struct talitos_edesc
*edesc
;
1352 struct scatterlist
*sg
;
1355 req
->cryptlen
-= authsize
;
1357 /* allocate extended descriptor */
1358 edesc
= aead_edesc_alloc(req
, req
->iv
, 1, false);
1360 return PTR_ERR(edesc
);
1362 if ((priv
->features
& TALITOS_FTR_HW_AUTH_CHECK
) &&
1363 ((!edesc
->src_nents
&& !edesc
->dst_nents
) ||
1364 priv
->features
& TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT
)) {
1366 /* decrypt and check the ICV */
1367 edesc
->desc
.hdr
= ctx
->desc_hdr_template
|
1368 DESC_HDR_DIR_INBOUND
|
1369 DESC_HDR_MODE1_MDEU_CICV
;
1371 /* reset integrity check result bits */
1372 edesc
->desc
.hdr_lo
= 0;
1374 return ipsec_esp(edesc
, req
, ipsec_esp_decrypt_hwauth_done
);
1377 /* Have to check the ICV with software */
1378 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_DIR_INBOUND
;
1380 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1382 icvdata
= (char *)&edesc
->link_tbl
[edesc
->src_nents
+
1383 edesc
->dst_nents
+ 2];
1385 icvdata
= &edesc
->link_tbl
[0];
1387 sg
= sg_last(req
->src
, edesc
->src_nents
? : 1);
1389 memcpy(icvdata
, (char *)sg_virt(sg
) + sg
->length
- authsize
, authsize
);
1391 return ipsec_esp(edesc
, req
, ipsec_esp_decrypt_swauth_done
);
1394 static int ablkcipher_setkey(struct crypto_ablkcipher
*cipher
,
1395 const u8
*key
, unsigned int keylen
)
1397 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1399 memcpy(&ctx
->key
, key
, keylen
);
1400 ctx
->keylen
= keylen
;
1405 static void unmap_sg_talitos_ptr(struct device
*dev
, struct scatterlist
*src
,
1406 struct scatterlist
*dst
, unsigned int len
,
1407 struct talitos_edesc
*edesc
)
1409 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1410 bool is_sec1
= has_ftr_sec1(priv
);
1413 if (!edesc
->src_nents
) {
1414 dma_unmap_sg(dev
, src
, 1,
1415 dst
!= src
? DMA_TO_DEVICE
1416 : DMA_BIDIRECTIONAL
);
1418 if (dst
&& edesc
->dst_nents
) {
1419 dma_sync_single_for_device(dev
,
1420 edesc
->dma_link_tbl
+ len
,
1421 len
, DMA_FROM_DEVICE
);
1422 sg_copy_from_buffer(dst
, edesc
->dst_nents
? : 1,
1423 edesc
->buf
+ len
, len
);
1424 } else if (dst
&& dst
!= src
) {
1425 dma_unmap_sg(dev
, dst
, 1, DMA_FROM_DEVICE
);
1428 talitos_sg_unmap(dev
, edesc
, src
, dst
);
1432 static void common_nonsnoop_unmap(struct device
*dev
,
1433 struct talitos_edesc
*edesc
,
1434 struct ablkcipher_request
*areq
)
1436 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[5], DMA_FROM_DEVICE
);
1438 unmap_sg_talitos_ptr(dev
, areq
->src
, areq
->dst
, areq
->nbytes
, edesc
);
1439 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[2], DMA_TO_DEVICE
);
1440 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[1], DMA_TO_DEVICE
);
1443 dma_unmap_single(dev
, edesc
->dma_link_tbl
, edesc
->dma_len
,
1447 static void ablkcipher_done(struct device
*dev
,
1448 struct talitos_desc
*desc
, void *context
,
1451 struct ablkcipher_request
*areq
= context
;
1452 struct talitos_edesc
*edesc
;
1454 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
1456 common_nonsnoop_unmap(dev
, edesc
, areq
);
1460 areq
->base
.complete(&areq
->base
, err
);
1463 int map_sg_in_talitos_ptr(struct device
*dev
, struct scatterlist
*src
,
1464 unsigned int len
, struct talitos_edesc
*edesc
,
1465 enum dma_data_direction dir
, struct talitos_ptr
*ptr
)
1468 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1469 bool is_sec1
= has_ftr_sec1(priv
);
1471 to_talitos_ptr_len(ptr
, len
, is_sec1
);
1474 sg_count
= edesc
->src_nents
? : 1;
1476 if (sg_count
== 1) {
1477 dma_map_sg(dev
, src
, 1, dir
);
1478 to_talitos_ptr(ptr
, sg_dma_address(src
), is_sec1
);
1480 sg_copy_to_buffer(src
, sg_count
, edesc
->buf
, len
);
1481 to_talitos_ptr(ptr
, edesc
->dma_link_tbl
, is_sec1
);
1482 dma_sync_single_for_device(dev
, edesc
->dma_link_tbl
,
1483 len
, DMA_TO_DEVICE
);
1486 to_talitos_ptr_extent_clear(ptr
, is_sec1
);
1488 sg_count
= dma_map_sg(dev
, src
, edesc
->src_nents
? : 1, dir
);
1490 if (sg_count
== 1) {
1491 to_talitos_ptr(ptr
, sg_dma_address(src
), is_sec1
);
1493 sg_count
= sg_to_link_tbl(src
, sg_count
, len
,
1494 &edesc
->link_tbl
[0]);
1496 to_talitos_ptr(ptr
, edesc
->dma_link_tbl
, 0);
1497 ptr
->j_extent
|= DESC_PTR_LNKTBL_JUMP
;
1498 dma_sync_single_for_device(dev
,
1499 edesc
->dma_link_tbl
,
1503 /* Only one segment now, so no link tbl needed*/
1504 to_talitos_ptr(ptr
, sg_dma_address(src
),
1512 void map_sg_out_talitos_ptr(struct device
*dev
, struct scatterlist
*dst
,
1513 unsigned int len
, struct talitos_edesc
*edesc
,
1514 enum dma_data_direction dir
,
1515 struct talitos_ptr
*ptr
, int sg_count
)
1517 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1518 bool is_sec1
= has_ftr_sec1(priv
);
1520 if (dir
!= DMA_NONE
)
1521 sg_count
= dma_map_sg(dev
, dst
, edesc
->dst_nents
? : 1, dir
);
1523 to_talitos_ptr_len(ptr
, len
, is_sec1
);
1526 if (sg_count
== 1) {
1527 if (dir
!= DMA_NONE
)
1528 dma_map_sg(dev
, dst
, 1, dir
);
1529 to_talitos_ptr(ptr
, sg_dma_address(dst
), is_sec1
);
1531 to_talitos_ptr(ptr
, edesc
->dma_link_tbl
+ len
, is_sec1
);
1532 dma_sync_single_for_device(dev
,
1533 edesc
->dma_link_tbl
+ len
,
1534 len
, DMA_FROM_DEVICE
);
1537 to_talitos_ptr_extent_clear(ptr
, is_sec1
);
1539 if (sg_count
== 1) {
1540 to_talitos_ptr(ptr
, sg_dma_address(dst
), is_sec1
);
1542 struct talitos_ptr
*link_tbl_ptr
=
1543 &edesc
->link_tbl
[edesc
->src_nents
+ 1];
1545 to_talitos_ptr(ptr
, edesc
->dma_link_tbl
+
1546 (edesc
->src_nents
+ 1) *
1547 sizeof(struct talitos_ptr
), 0);
1548 ptr
->j_extent
|= DESC_PTR_LNKTBL_JUMP
;
1549 sg_to_link_tbl(dst
, sg_count
, len
, link_tbl_ptr
);
1550 dma_sync_single_for_device(dev
, edesc
->dma_link_tbl
,
1557 static int common_nonsnoop(struct talitos_edesc
*edesc
,
1558 struct ablkcipher_request
*areq
,
1559 void (*callback
) (struct device
*dev
,
1560 struct talitos_desc
*desc
,
1561 void *context
, int error
))
1563 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1564 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1565 struct device
*dev
= ctx
->dev
;
1566 struct talitos_desc
*desc
= &edesc
->desc
;
1567 unsigned int cryptlen
= areq
->nbytes
;
1568 unsigned int ivsize
= crypto_ablkcipher_ivsize(cipher
);
1570 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1571 bool is_sec1
= has_ftr_sec1(priv
);
1573 /* first DWORD empty */
1574 desc
->ptr
[0] = zero_entry
;
1577 to_talitos_ptr(&desc
->ptr
[1], edesc
->iv_dma
, is_sec1
);
1578 to_talitos_ptr_len(&desc
->ptr
[1], ivsize
, is_sec1
);
1579 to_talitos_ptr_extent_clear(&desc
->ptr
[1], is_sec1
);
1582 map_single_talitos_ptr(dev
, &desc
->ptr
[2], ctx
->keylen
,
1583 (char *)&ctx
->key
, DMA_TO_DEVICE
);
1588 sg_count
= map_sg_in_talitos_ptr(dev
, areq
->src
, cryptlen
, edesc
,
1589 (areq
->src
== areq
->dst
) ?
1590 DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
,
1594 map_sg_out_talitos_ptr(dev
, areq
->dst
, cryptlen
, edesc
,
1595 (areq
->src
== areq
->dst
) ? DMA_NONE
1597 &desc
->ptr
[4], sg_count
);
1600 map_single_talitos_ptr(dev
, &desc
->ptr
[5], ivsize
, ctx
->iv
,
1603 /* last DWORD empty */
1604 desc
->ptr
[6] = zero_entry
;
1606 ret
= talitos_submit(dev
, ctx
->ch
, desc
, callback
, areq
);
1607 if (ret
!= -EINPROGRESS
) {
1608 common_nonsnoop_unmap(dev
, edesc
, areq
);
1614 static struct talitos_edesc
*ablkcipher_edesc_alloc(struct ablkcipher_request
*
1617 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1618 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1619 unsigned int ivsize
= crypto_ablkcipher_ivsize(cipher
);
1621 return talitos_edesc_alloc(ctx
->dev
, areq
->src
, areq
->dst
,
1622 areq
->info
, 0, areq
->nbytes
, 0, ivsize
, 0,
1623 areq
->base
.flags
, encrypt
);
1626 static int ablkcipher_encrypt(struct ablkcipher_request
*areq
)
1628 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1629 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1630 struct talitos_edesc
*edesc
;
1632 /* allocate extended descriptor */
1633 edesc
= ablkcipher_edesc_alloc(areq
, true);
1635 return PTR_ERR(edesc
);
1638 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_MODE0_ENCRYPT
;
1640 return common_nonsnoop(edesc
, areq
, ablkcipher_done
);
1643 static int ablkcipher_decrypt(struct ablkcipher_request
*areq
)
1645 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1646 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1647 struct talitos_edesc
*edesc
;
1649 /* allocate extended descriptor */
1650 edesc
= ablkcipher_edesc_alloc(areq
, false);
1652 return PTR_ERR(edesc
);
1654 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_DIR_INBOUND
;
1656 return common_nonsnoop(edesc
, areq
, ablkcipher_done
);
1659 static void common_nonsnoop_hash_unmap(struct device
*dev
,
1660 struct talitos_edesc
*edesc
,
1661 struct ahash_request
*areq
)
1663 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1664 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1665 bool is_sec1
= has_ftr_sec1(priv
);
1667 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[5], DMA_FROM_DEVICE
);
1669 unmap_sg_talitos_ptr(dev
, req_ctx
->psrc
, NULL
, 0, edesc
);
1671 /* When using hashctx-in, must unmap it. */
1672 if (from_talitos_ptr_len(&edesc
->desc
.ptr
[1], is_sec1
))
1673 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[1],
1676 if (from_talitos_ptr_len(&edesc
->desc
.ptr
[2], is_sec1
))
1677 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[2],
1681 dma_unmap_single(dev
, edesc
->dma_link_tbl
, edesc
->dma_len
,
1686 static void ahash_done(struct device
*dev
,
1687 struct talitos_desc
*desc
, void *context
,
1690 struct ahash_request
*areq
= context
;
1691 struct talitos_edesc
*edesc
=
1692 container_of(desc
, struct talitos_edesc
, desc
);
1693 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1695 if (!req_ctx
->last
&& req_ctx
->to_hash_later
) {
1696 /* Position any partial block for next update/final/finup */
1697 memcpy(req_ctx
->buf
, req_ctx
->bufnext
, req_ctx
->to_hash_later
);
1698 req_ctx
->nbuf
= req_ctx
->to_hash_later
;
1700 common_nonsnoop_hash_unmap(dev
, edesc
, areq
);
1704 areq
->base
.complete(&areq
->base
, err
);
1708 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1709 * ourself and submit a padded block
1711 void talitos_handle_buggy_hash(struct talitos_ctx
*ctx
,
1712 struct talitos_edesc
*edesc
,
1713 struct talitos_ptr
*ptr
)
1715 static u8 padded_hash
[64] = {
1716 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1717 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1718 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1719 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1722 pr_err_once("Bug in SEC1, padding ourself\n");
1723 edesc
->desc
.hdr
&= ~DESC_HDR_MODE0_MDEU_PAD
;
1724 map_single_talitos_ptr(ctx
->dev
, ptr
, sizeof(padded_hash
),
1725 (char *)padded_hash
, DMA_TO_DEVICE
);
1728 static int common_nonsnoop_hash(struct talitos_edesc
*edesc
,
1729 struct ahash_request
*areq
, unsigned int length
,
1730 void (*callback
) (struct device
*dev
,
1731 struct talitos_desc
*desc
,
1732 void *context
, int error
))
1734 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1735 struct talitos_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1736 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1737 struct device
*dev
= ctx
->dev
;
1738 struct talitos_desc
*desc
= &edesc
->desc
;
1740 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1741 bool is_sec1
= has_ftr_sec1(priv
);
1743 /* first DWORD empty */
1744 desc
->ptr
[0] = zero_entry
;
1746 /* hash context in */
1747 if (!req_ctx
->first
|| req_ctx
->swinit
) {
1748 map_single_talitos_ptr(dev
, &desc
->ptr
[1],
1749 req_ctx
->hw_context_size
,
1750 (char *)req_ctx
->hw_context
,
1752 req_ctx
->swinit
= 0;
1754 desc
->ptr
[1] = zero_entry
;
1755 /* Indicate next op is not the first. */
1761 map_single_talitos_ptr(dev
, &desc
->ptr
[2], ctx
->keylen
,
1762 (char *)&ctx
->key
, DMA_TO_DEVICE
);
1764 desc
->ptr
[2] = zero_entry
;
1769 map_sg_in_talitos_ptr(dev
, req_ctx
->psrc
, length
, edesc
,
1770 DMA_TO_DEVICE
, &desc
->ptr
[3]);
1772 /* fifth DWORD empty */
1773 desc
->ptr
[4] = zero_entry
;
1775 /* hash/HMAC out -or- hash context out */
1777 map_single_talitos_ptr(dev
, &desc
->ptr
[5],
1778 crypto_ahash_digestsize(tfm
),
1779 areq
->result
, DMA_FROM_DEVICE
);
1781 map_single_talitos_ptr(dev
, &desc
->ptr
[5],
1782 req_ctx
->hw_context_size
,
1783 req_ctx
->hw_context
, DMA_FROM_DEVICE
);
1785 /* last DWORD empty */
1786 desc
->ptr
[6] = zero_entry
;
1788 if (is_sec1
&& from_talitos_ptr_len(&desc
->ptr
[3], true) == 0)
1789 talitos_handle_buggy_hash(ctx
, edesc
, &desc
->ptr
[3]);
1791 ret
= talitos_submit(dev
, ctx
->ch
, desc
, callback
, areq
);
1792 if (ret
!= -EINPROGRESS
) {
1793 common_nonsnoop_hash_unmap(dev
, edesc
, areq
);
1799 static struct talitos_edesc
*ahash_edesc_alloc(struct ahash_request
*areq
,
1800 unsigned int nbytes
)
1802 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1803 struct talitos_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1804 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1806 return talitos_edesc_alloc(ctx
->dev
, req_ctx
->psrc
, NULL
, NULL
, 0,
1807 nbytes
, 0, 0, 0, areq
->base
.flags
, false);
1810 static int ahash_init(struct ahash_request
*areq
)
1812 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1813 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1815 /* Initialize the context */
1817 req_ctx
->first
= 1; /* first indicates h/w must init its context */
1818 req_ctx
->swinit
= 0; /* assume h/w init of context */
1819 req_ctx
->hw_context_size
=
1820 (crypto_ahash_digestsize(tfm
) <= SHA256_DIGEST_SIZE
)
1821 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1822 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
;
1828 * on h/w without explicit sha224 support, we initialize h/w context
1829 * manually with sha224 constants, and tell it to run sha256.
1831 static int ahash_init_sha224_swinit(struct ahash_request
*areq
)
1833 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1836 req_ctx
->swinit
= 1;/* prevent h/w initting context with sha256 values*/
1838 req_ctx
->hw_context
[0] = SHA224_H0
;
1839 req_ctx
->hw_context
[1] = SHA224_H1
;
1840 req_ctx
->hw_context
[2] = SHA224_H2
;
1841 req_ctx
->hw_context
[3] = SHA224_H3
;
1842 req_ctx
->hw_context
[4] = SHA224_H4
;
1843 req_ctx
->hw_context
[5] = SHA224_H5
;
1844 req_ctx
->hw_context
[6] = SHA224_H6
;
1845 req_ctx
->hw_context
[7] = SHA224_H7
;
1847 /* init 64-bit count */
1848 req_ctx
->hw_context
[8] = 0;
1849 req_ctx
->hw_context
[9] = 0;
1854 static int ahash_process_req(struct ahash_request
*areq
, unsigned int nbytes
)
1856 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1857 struct talitos_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1858 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1859 struct talitos_edesc
*edesc
;
1860 unsigned int blocksize
=
1861 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
1862 unsigned int nbytes_to_hash
;
1863 unsigned int to_hash_later
;
1867 if (!req_ctx
->last
&& (nbytes
+ req_ctx
->nbuf
<= blocksize
)) {
1868 /* Buffer up to one whole block */
1869 nents
= sg_nents_for_len(areq
->src
, nbytes
);
1871 dev_err(ctx
->dev
, "Invalid number of src SG.\n");
1874 sg_copy_to_buffer(areq
->src
, nents
,
1875 req_ctx
->buf
+ req_ctx
->nbuf
, nbytes
);
1876 req_ctx
->nbuf
+= nbytes
;
1880 /* At least (blocksize + 1) bytes are available to hash */
1881 nbytes_to_hash
= nbytes
+ req_ctx
->nbuf
;
1882 to_hash_later
= nbytes_to_hash
& (blocksize
- 1);
1886 else if (to_hash_later
)
1887 /* There is a partial block. Hash the full block(s) now */
1888 nbytes_to_hash
-= to_hash_later
;
1890 /* Keep one block buffered */
1891 nbytes_to_hash
-= blocksize
;
1892 to_hash_later
= blocksize
;
1895 /* Chain in any previously buffered data */
1896 if (req_ctx
->nbuf
) {
1897 nsg
= (req_ctx
->nbuf
< nbytes_to_hash
) ? 2 : 1;
1898 sg_init_table(req_ctx
->bufsl
, nsg
);
1899 sg_set_buf(req_ctx
->bufsl
, req_ctx
->buf
, req_ctx
->nbuf
);
1901 sg_chain(req_ctx
->bufsl
, 2, areq
->src
);
1902 req_ctx
->psrc
= req_ctx
->bufsl
;
1904 req_ctx
->psrc
= areq
->src
;
1906 if (to_hash_later
) {
1907 nents
= sg_nents_for_len(areq
->src
, nbytes
);
1909 dev_err(ctx
->dev
, "Invalid number of src SG.\n");
1912 sg_pcopy_to_buffer(areq
->src
, nents
,
1915 nbytes
- to_hash_later
);
1917 req_ctx
->to_hash_later
= to_hash_later
;
1919 /* Allocate extended descriptor */
1920 edesc
= ahash_edesc_alloc(areq
, nbytes_to_hash
);
1922 return PTR_ERR(edesc
);
1924 edesc
->desc
.hdr
= ctx
->desc_hdr_template
;
1926 /* On last one, request SEC to pad; otherwise continue */
1928 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_PAD
;
1930 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_CONT
;
1932 /* request SEC to INIT hash. */
1933 if (req_ctx
->first
&& !req_ctx
->swinit
)
1934 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_INIT
;
1936 /* When the tfm context has a keylen, it's an HMAC.
1937 * A first or last (ie. not middle) descriptor must request HMAC.
1939 if (ctx
->keylen
&& (req_ctx
->first
|| req_ctx
->last
))
1940 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_HMAC
;
1942 return common_nonsnoop_hash(edesc
, areq
, nbytes_to_hash
,
1946 static int ahash_update(struct ahash_request
*areq
)
1948 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1952 return ahash_process_req(areq
, areq
->nbytes
);
1955 static int ahash_final(struct ahash_request
*areq
)
1957 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1961 return ahash_process_req(areq
, 0);
1964 static int ahash_finup(struct ahash_request
*areq
)
1966 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1970 return ahash_process_req(areq
, areq
->nbytes
);
1973 static int ahash_digest(struct ahash_request
*areq
)
1975 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1976 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
1981 return ahash_process_req(areq
, areq
->nbytes
);
1984 struct keyhash_result
{
1985 struct completion completion
;
1989 static void keyhash_complete(struct crypto_async_request
*req
, int err
)
1991 struct keyhash_result
*res
= req
->data
;
1993 if (err
== -EINPROGRESS
)
1997 complete(&res
->completion
);
2000 static int keyhash(struct crypto_ahash
*tfm
, const u8
*key
, unsigned int keylen
,
2003 struct talitos_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
2005 struct scatterlist sg
[1];
2006 struct ahash_request
*req
;
2007 struct keyhash_result hresult
;
2010 init_completion(&hresult
.completion
);
2012 req
= ahash_request_alloc(tfm
, GFP_KERNEL
);
2016 /* Keep tfm keylen == 0 during hash of the long key */
2018 ahash_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
2019 keyhash_complete
, &hresult
);
2021 sg_init_one(&sg
[0], key
, keylen
);
2023 ahash_request_set_crypt(req
, sg
, hash
, keylen
);
2024 ret
= crypto_ahash_digest(req
);
2030 ret
= wait_for_completion_interruptible(
2031 &hresult
.completion
);
2038 ahash_request_free(req
);
2043 static int ahash_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
2044 unsigned int keylen
)
2046 struct talitos_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
2047 unsigned int blocksize
=
2048 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
2049 unsigned int digestsize
= crypto_ahash_digestsize(tfm
);
2050 unsigned int keysize
= keylen
;
2051 u8 hash
[SHA512_DIGEST_SIZE
];
2054 if (keylen
<= blocksize
)
2055 memcpy(ctx
->key
, key
, keysize
);
2057 /* Must get the hash of the long key */
2058 ret
= keyhash(tfm
, key
, keylen
, hash
);
2061 crypto_ahash_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
2065 keysize
= digestsize
;
2066 memcpy(ctx
->key
, hash
, digestsize
);
2069 ctx
->keylen
= keysize
;
2075 struct talitos_alg_template
{
2078 struct crypto_alg crypto
;
2079 struct ahash_alg hash
;
2080 struct aead_alg aead
;
2082 __be32 desc_hdr_template
;
2085 static struct talitos_alg_template driver_algs
[] = {
2086 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
2087 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2090 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
2091 .cra_driver_name
= "authenc-hmac-sha1-"
2093 .cra_blocksize
= AES_BLOCK_SIZE
,
2094 .cra_flags
= CRYPTO_ALG_ASYNC
,
2096 .ivsize
= AES_BLOCK_SIZE
,
2097 .maxauthsize
= SHA1_DIGEST_SIZE
,
2099 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2100 DESC_HDR_SEL0_AESU
|
2101 DESC_HDR_MODE0_AESU_CBC
|
2102 DESC_HDR_SEL1_MDEUA
|
2103 DESC_HDR_MODE1_MDEU_INIT
|
2104 DESC_HDR_MODE1_MDEU_PAD
|
2105 DESC_HDR_MODE1_MDEU_SHA1_HMAC
,
2107 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2110 .cra_name
= "authenc(hmac(sha1),"
2112 .cra_driver_name
= "authenc-hmac-sha1-"
2114 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2115 .cra_flags
= CRYPTO_ALG_ASYNC
,
2117 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2118 .maxauthsize
= SHA1_DIGEST_SIZE
,
2120 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2122 DESC_HDR_MODE0_DEU_CBC
|
2123 DESC_HDR_MODE0_DEU_3DES
|
2124 DESC_HDR_SEL1_MDEUA
|
2125 DESC_HDR_MODE1_MDEU_INIT
|
2126 DESC_HDR_MODE1_MDEU_PAD
|
2127 DESC_HDR_MODE1_MDEU_SHA1_HMAC
,
2129 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2132 .cra_name
= "authenc(hmac(sha224),cbc(aes))",
2133 .cra_driver_name
= "authenc-hmac-sha224-"
2135 .cra_blocksize
= AES_BLOCK_SIZE
,
2136 .cra_flags
= CRYPTO_ALG_ASYNC
,
2138 .ivsize
= AES_BLOCK_SIZE
,
2139 .maxauthsize
= SHA224_DIGEST_SIZE
,
2141 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2142 DESC_HDR_SEL0_AESU
|
2143 DESC_HDR_MODE0_AESU_CBC
|
2144 DESC_HDR_SEL1_MDEUA
|
2145 DESC_HDR_MODE1_MDEU_INIT
|
2146 DESC_HDR_MODE1_MDEU_PAD
|
2147 DESC_HDR_MODE1_MDEU_SHA224_HMAC
,
2149 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2152 .cra_name
= "authenc(hmac(sha224),"
2154 .cra_driver_name
= "authenc-hmac-sha224-"
2156 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2157 .cra_flags
= CRYPTO_ALG_ASYNC
,
2159 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2160 .maxauthsize
= SHA224_DIGEST_SIZE
,
2162 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2164 DESC_HDR_MODE0_DEU_CBC
|
2165 DESC_HDR_MODE0_DEU_3DES
|
2166 DESC_HDR_SEL1_MDEUA
|
2167 DESC_HDR_MODE1_MDEU_INIT
|
2168 DESC_HDR_MODE1_MDEU_PAD
|
2169 DESC_HDR_MODE1_MDEU_SHA224_HMAC
,
2171 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2174 .cra_name
= "authenc(hmac(sha256),cbc(aes))",
2175 .cra_driver_name
= "authenc-hmac-sha256-"
2177 .cra_blocksize
= AES_BLOCK_SIZE
,
2178 .cra_flags
= CRYPTO_ALG_ASYNC
,
2180 .ivsize
= AES_BLOCK_SIZE
,
2181 .maxauthsize
= SHA256_DIGEST_SIZE
,
2183 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2184 DESC_HDR_SEL0_AESU
|
2185 DESC_HDR_MODE0_AESU_CBC
|
2186 DESC_HDR_SEL1_MDEUA
|
2187 DESC_HDR_MODE1_MDEU_INIT
|
2188 DESC_HDR_MODE1_MDEU_PAD
|
2189 DESC_HDR_MODE1_MDEU_SHA256_HMAC
,
2191 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2194 .cra_name
= "authenc(hmac(sha256),"
2196 .cra_driver_name
= "authenc-hmac-sha256-"
2198 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2199 .cra_flags
= CRYPTO_ALG_ASYNC
,
2201 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2202 .maxauthsize
= SHA256_DIGEST_SIZE
,
2204 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2206 DESC_HDR_MODE0_DEU_CBC
|
2207 DESC_HDR_MODE0_DEU_3DES
|
2208 DESC_HDR_SEL1_MDEUA
|
2209 DESC_HDR_MODE1_MDEU_INIT
|
2210 DESC_HDR_MODE1_MDEU_PAD
|
2211 DESC_HDR_MODE1_MDEU_SHA256_HMAC
,
2213 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2216 .cra_name
= "authenc(hmac(sha384),cbc(aes))",
2217 .cra_driver_name
= "authenc-hmac-sha384-"
2219 .cra_blocksize
= AES_BLOCK_SIZE
,
2220 .cra_flags
= CRYPTO_ALG_ASYNC
,
2222 .ivsize
= AES_BLOCK_SIZE
,
2223 .maxauthsize
= SHA384_DIGEST_SIZE
,
2225 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2226 DESC_HDR_SEL0_AESU
|
2227 DESC_HDR_MODE0_AESU_CBC
|
2228 DESC_HDR_SEL1_MDEUB
|
2229 DESC_HDR_MODE1_MDEU_INIT
|
2230 DESC_HDR_MODE1_MDEU_PAD
|
2231 DESC_HDR_MODE1_MDEUB_SHA384_HMAC
,
2233 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2236 .cra_name
= "authenc(hmac(sha384),"
2238 .cra_driver_name
= "authenc-hmac-sha384-"
2240 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2241 .cra_flags
= CRYPTO_ALG_ASYNC
,
2243 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2244 .maxauthsize
= SHA384_DIGEST_SIZE
,
2246 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2248 DESC_HDR_MODE0_DEU_CBC
|
2249 DESC_HDR_MODE0_DEU_3DES
|
2250 DESC_HDR_SEL1_MDEUB
|
2251 DESC_HDR_MODE1_MDEU_INIT
|
2252 DESC_HDR_MODE1_MDEU_PAD
|
2253 DESC_HDR_MODE1_MDEUB_SHA384_HMAC
,
2255 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2258 .cra_name
= "authenc(hmac(sha512),cbc(aes))",
2259 .cra_driver_name
= "authenc-hmac-sha512-"
2261 .cra_blocksize
= AES_BLOCK_SIZE
,
2262 .cra_flags
= CRYPTO_ALG_ASYNC
,
2264 .ivsize
= AES_BLOCK_SIZE
,
2265 .maxauthsize
= SHA512_DIGEST_SIZE
,
2267 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2268 DESC_HDR_SEL0_AESU
|
2269 DESC_HDR_MODE0_AESU_CBC
|
2270 DESC_HDR_SEL1_MDEUB
|
2271 DESC_HDR_MODE1_MDEU_INIT
|
2272 DESC_HDR_MODE1_MDEU_PAD
|
2273 DESC_HDR_MODE1_MDEUB_SHA512_HMAC
,
2275 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2278 .cra_name
= "authenc(hmac(sha512),"
2280 .cra_driver_name
= "authenc-hmac-sha512-"
2282 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2283 .cra_flags
= CRYPTO_ALG_ASYNC
,
2285 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2286 .maxauthsize
= SHA512_DIGEST_SIZE
,
2288 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2290 DESC_HDR_MODE0_DEU_CBC
|
2291 DESC_HDR_MODE0_DEU_3DES
|
2292 DESC_HDR_SEL1_MDEUB
|
2293 DESC_HDR_MODE1_MDEU_INIT
|
2294 DESC_HDR_MODE1_MDEU_PAD
|
2295 DESC_HDR_MODE1_MDEUB_SHA512_HMAC
,
2297 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2300 .cra_name
= "authenc(hmac(md5),cbc(aes))",
2301 .cra_driver_name
= "authenc-hmac-md5-"
2303 .cra_blocksize
= AES_BLOCK_SIZE
,
2304 .cra_flags
= CRYPTO_ALG_ASYNC
,
2306 .ivsize
= AES_BLOCK_SIZE
,
2307 .maxauthsize
= MD5_DIGEST_SIZE
,
2309 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2310 DESC_HDR_SEL0_AESU
|
2311 DESC_HDR_MODE0_AESU_CBC
|
2312 DESC_HDR_SEL1_MDEUA
|
2313 DESC_HDR_MODE1_MDEU_INIT
|
2314 DESC_HDR_MODE1_MDEU_PAD
|
2315 DESC_HDR_MODE1_MDEU_MD5_HMAC
,
2317 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2320 .cra_name
= "authenc(hmac(md5),cbc(des3_ede))",
2321 .cra_driver_name
= "authenc-hmac-md5-"
2323 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2324 .cra_flags
= CRYPTO_ALG_ASYNC
,
2326 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2327 .maxauthsize
= MD5_DIGEST_SIZE
,
2329 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2331 DESC_HDR_MODE0_DEU_CBC
|
2332 DESC_HDR_MODE0_DEU_3DES
|
2333 DESC_HDR_SEL1_MDEUA
|
2334 DESC_HDR_MODE1_MDEU_INIT
|
2335 DESC_HDR_MODE1_MDEU_PAD
|
2336 DESC_HDR_MODE1_MDEU_MD5_HMAC
,
2338 /* ABLKCIPHER algorithms. */
2339 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2341 .cra_name
= "ecb(aes)",
2342 .cra_driver_name
= "ecb-aes-talitos",
2343 .cra_blocksize
= AES_BLOCK_SIZE
,
2344 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2347 .min_keysize
= AES_MIN_KEY_SIZE
,
2348 .max_keysize
= AES_MAX_KEY_SIZE
,
2349 .ivsize
= AES_BLOCK_SIZE
,
2352 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2355 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2357 .cra_name
= "cbc(aes)",
2358 .cra_driver_name
= "cbc-aes-talitos",
2359 .cra_blocksize
= AES_BLOCK_SIZE
,
2360 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2363 .min_keysize
= AES_MIN_KEY_SIZE
,
2364 .max_keysize
= AES_MAX_KEY_SIZE
,
2365 .ivsize
= AES_BLOCK_SIZE
,
2368 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2369 DESC_HDR_SEL0_AESU
|
2370 DESC_HDR_MODE0_AESU_CBC
,
2372 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2374 .cra_name
= "ctr(aes)",
2375 .cra_driver_name
= "ctr-aes-talitos",
2376 .cra_blocksize
= AES_BLOCK_SIZE
,
2377 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2380 .min_keysize
= AES_MIN_KEY_SIZE
,
2381 .max_keysize
= AES_MAX_KEY_SIZE
,
2382 .ivsize
= AES_BLOCK_SIZE
,
2385 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2386 DESC_HDR_SEL0_AESU
|
2387 DESC_HDR_MODE0_AESU_CTR
,
2389 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2391 .cra_name
= "ecb(des)",
2392 .cra_driver_name
= "ecb-des-talitos",
2393 .cra_blocksize
= DES_BLOCK_SIZE
,
2394 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2397 .min_keysize
= DES_KEY_SIZE
,
2398 .max_keysize
= DES_KEY_SIZE
,
2399 .ivsize
= DES_BLOCK_SIZE
,
2402 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2405 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2407 .cra_name
= "cbc(des)",
2408 .cra_driver_name
= "cbc-des-talitos",
2409 .cra_blocksize
= DES_BLOCK_SIZE
,
2410 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2413 .min_keysize
= DES_KEY_SIZE
,
2414 .max_keysize
= DES_KEY_SIZE
,
2415 .ivsize
= DES_BLOCK_SIZE
,
2418 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2420 DESC_HDR_MODE0_DEU_CBC
,
2422 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2424 .cra_name
= "ecb(des3_ede)",
2425 .cra_driver_name
= "ecb-3des-talitos",
2426 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2427 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2430 .min_keysize
= DES3_EDE_KEY_SIZE
,
2431 .max_keysize
= DES3_EDE_KEY_SIZE
,
2432 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2435 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2437 DESC_HDR_MODE0_DEU_3DES
,
2439 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2441 .cra_name
= "cbc(des3_ede)",
2442 .cra_driver_name
= "cbc-3des-talitos",
2443 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2444 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2447 .min_keysize
= DES3_EDE_KEY_SIZE
,
2448 .max_keysize
= DES3_EDE_KEY_SIZE
,
2449 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2452 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2454 DESC_HDR_MODE0_DEU_CBC
|
2455 DESC_HDR_MODE0_DEU_3DES
,
2457 /* AHASH algorithms. */
2458 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2460 .halg
.digestsize
= MD5_DIGEST_SIZE
,
2463 .cra_driver_name
= "md5-talitos",
2464 .cra_blocksize
= MD5_HMAC_BLOCK_SIZE
,
2465 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2469 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2470 DESC_HDR_SEL0_MDEUA
|
2471 DESC_HDR_MODE0_MDEU_MD5
,
2473 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2475 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
2478 .cra_driver_name
= "sha1-talitos",
2479 .cra_blocksize
= SHA1_BLOCK_SIZE
,
2480 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2484 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2485 DESC_HDR_SEL0_MDEUA
|
2486 DESC_HDR_MODE0_MDEU_SHA1
,
2488 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2490 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
2492 .cra_name
= "sha224",
2493 .cra_driver_name
= "sha224-talitos",
2494 .cra_blocksize
= SHA224_BLOCK_SIZE
,
2495 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2499 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2500 DESC_HDR_SEL0_MDEUA
|
2501 DESC_HDR_MODE0_MDEU_SHA224
,
2503 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2505 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
2507 .cra_name
= "sha256",
2508 .cra_driver_name
= "sha256-talitos",
2509 .cra_blocksize
= SHA256_BLOCK_SIZE
,
2510 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2514 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2515 DESC_HDR_SEL0_MDEUA
|
2516 DESC_HDR_MODE0_MDEU_SHA256
,
2518 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2520 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
2522 .cra_name
= "sha384",
2523 .cra_driver_name
= "sha384-talitos",
2524 .cra_blocksize
= SHA384_BLOCK_SIZE
,
2525 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2529 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2530 DESC_HDR_SEL0_MDEUB
|
2531 DESC_HDR_MODE0_MDEUB_SHA384
,
2533 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2535 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
2537 .cra_name
= "sha512",
2538 .cra_driver_name
= "sha512-talitos",
2539 .cra_blocksize
= SHA512_BLOCK_SIZE
,
2540 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2544 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2545 DESC_HDR_SEL0_MDEUB
|
2546 DESC_HDR_MODE0_MDEUB_SHA512
,
2548 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2550 .halg
.digestsize
= MD5_DIGEST_SIZE
,
2552 .cra_name
= "hmac(md5)",
2553 .cra_driver_name
= "hmac-md5-talitos",
2554 .cra_blocksize
= MD5_HMAC_BLOCK_SIZE
,
2555 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2559 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2560 DESC_HDR_SEL0_MDEUA
|
2561 DESC_HDR_MODE0_MDEU_MD5
,
2563 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2565 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
2567 .cra_name
= "hmac(sha1)",
2568 .cra_driver_name
= "hmac-sha1-talitos",
2569 .cra_blocksize
= SHA1_BLOCK_SIZE
,
2570 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2574 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2575 DESC_HDR_SEL0_MDEUA
|
2576 DESC_HDR_MODE0_MDEU_SHA1
,
2578 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2580 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
2582 .cra_name
= "hmac(sha224)",
2583 .cra_driver_name
= "hmac-sha224-talitos",
2584 .cra_blocksize
= SHA224_BLOCK_SIZE
,
2585 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2589 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2590 DESC_HDR_SEL0_MDEUA
|
2591 DESC_HDR_MODE0_MDEU_SHA224
,
2593 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2595 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
2597 .cra_name
= "hmac(sha256)",
2598 .cra_driver_name
= "hmac-sha256-talitos",
2599 .cra_blocksize
= SHA256_BLOCK_SIZE
,
2600 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2604 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2605 DESC_HDR_SEL0_MDEUA
|
2606 DESC_HDR_MODE0_MDEU_SHA256
,
2608 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2610 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
2612 .cra_name
= "hmac(sha384)",
2613 .cra_driver_name
= "hmac-sha384-talitos",
2614 .cra_blocksize
= SHA384_BLOCK_SIZE
,
2615 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2619 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2620 DESC_HDR_SEL0_MDEUB
|
2621 DESC_HDR_MODE0_MDEUB_SHA384
,
2623 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2625 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
2627 .cra_name
= "hmac(sha512)",
2628 .cra_driver_name
= "hmac-sha512-talitos",
2629 .cra_blocksize
= SHA512_BLOCK_SIZE
,
2630 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2634 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2635 DESC_HDR_SEL0_MDEUB
|
2636 DESC_HDR_MODE0_MDEUB_SHA512
,
2640 struct talitos_crypto_alg
{
2641 struct list_head entry
;
2643 struct talitos_alg_template algt
;
2646 static int talitos_init_common(struct talitos_ctx
*ctx
,
2647 struct talitos_crypto_alg
*talitos_alg
)
2649 struct talitos_private
*priv
;
2651 /* update context with ptr to dev */
2652 ctx
->dev
= talitos_alg
->dev
;
2654 /* assign SEC channel to tfm in round-robin fashion */
2655 priv
= dev_get_drvdata(ctx
->dev
);
2656 ctx
->ch
= atomic_inc_return(&priv
->last_chan
) &
2657 (priv
->num_channels
- 1);
2659 /* copy descriptor header template value */
2660 ctx
->desc_hdr_template
= talitos_alg
->algt
.desc_hdr_template
;
2662 /* select done notification */
2663 ctx
->desc_hdr_template
|= DESC_HDR_DONE_NOTIFY
;
2668 static int talitos_cra_init(struct crypto_tfm
*tfm
)
2670 struct crypto_alg
*alg
= tfm
->__crt_alg
;
2671 struct talitos_crypto_alg
*talitos_alg
;
2672 struct talitos_ctx
*ctx
= crypto_tfm_ctx(tfm
);
2674 if ((alg
->cra_flags
& CRYPTO_ALG_TYPE_MASK
) == CRYPTO_ALG_TYPE_AHASH
)
2675 talitos_alg
= container_of(__crypto_ahash_alg(alg
),
2676 struct talitos_crypto_alg
,
2679 talitos_alg
= container_of(alg
, struct talitos_crypto_alg
,
2682 return talitos_init_common(ctx
, talitos_alg
);
2685 static int talitos_cra_init_aead(struct crypto_aead
*tfm
)
2687 struct aead_alg
*alg
= crypto_aead_alg(tfm
);
2688 struct talitos_crypto_alg
*talitos_alg
;
2689 struct talitos_ctx
*ctx
= crypto_aead_ctx(tfm
);
2691 talitos_alg
= container_of(alg
, struct talitos_crypto_alg
,
2694 return talitos_init_common(ctx
, talitos_alg
);
2697 static int talitos_cra_init_ahash(struct crypto_tfm
*tfm
)
2699 struct talitos_ctx
*ctx
= crypto_tfm_ctx(tfm
);
2701 talitos_cra_init(tfm
);
2704 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
2705 sizeof(struct talitos_ahash_req_ctx
));
2711 * given the alg's descriptor header template, determine whether descriptor
2712 * type and primary/secondary execution units required match the hw
2713 * capabilities description provided in the device tree node.
2715 static int hw_supports(struct device
*dev
, __be32 desc_hdr_template
)
2717 struct talitos_private
*priv
= dev_get_drvdata(dev
);
2720 ret
= (1 << DESC_TYPE(desc_hdr_template
) & priv
->desc_types
) &&
2721 (1 << PRIMARY_EU(desc_hdr_template
) & priv
->exec_units
);
2723 if (SECONDARY_EU(desc_hdr_template
))
2724 ret
= ret
&& (1 << SECONDARY_EU(desc_hdr_template
)
2725 & priv
->exec_units
);
2730 static int talitos_remove(struct platform_device
*ofdev
)
2732 struct device
*dev
= &ofdev
->dev
;
2733 struct talitos_private
*priv
= dev_get_drvdata(dev
);
2734 struct talitos_crypto_alg
*t_alg
, *n
;
2737 list_for_each_entry_safe(t_alg
, n
, &priv
->alg_list
, entry
) {
2738 switch (t_alg
->algt
.type
) {
2739 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
2741 case CRYPTO_ALG_TYPE_AEAD
:
2742 crypto_unregister_aead(&t_alg
->algt
.alg
.aead
);
2743 case CRYPTO_ALG_TYPE_AHASH
:
2744 crypto_unregister_ahash(&t_alg
->algt
.alg
.hash
);
2747 list_del(&t_alg
->entry
);
2751 if (hw_supports(dev
, DESC_HDR_SEL0_RNG
))
2752 talitos_unregister_rng(dev
);
2754 for (i
= 0; priv
->chan
&& i
< priv
->num_channels
; i
++)
2755 kfree(priv
->chan
[i
].fifo
);
2759 for (i
= 0; i
< 2; i
++)
2761 free_irq(priv
->irq
[i
], dev
);
2762 irq_dispose_mapping(priv
->irq
[i
]);
2765 tasklet_kill(&priv
->done_task
[0]);
2767 tasklet_kill(&priv
->done_task
[1]);
2776 static struct talitos_crypto_alg
*talitos_alg_alloc(struct device
*dev
,
2777 struct talitos_alg_template
2780 struct talitos_private
*priv
= dev_get_drvdata(dev
);
2781 struct talitos_crypto_alg
*t_alg
;
2782 struct crypto_alg
*alg
;
2784 t_alg
= kzalloc(sizeof(struct talitos_crypto_alg
), GFP_KERNEL
);
2786 return ERR_PTR(-ENOMEM
);
2788 t_alg
->algt
= *template;
2790 switch (t_alg
->algt
.type
) {
2791 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
2792 alg
= &t_alg
->algt
.alg
.crypto
;
2793 alg
->cra_init
= talitos_cra_init
;
2794 alg
->cra_type
= &crypto_ablkcipher_type
;
2795 alg
->cra_ablkcipher
.setkey
= ablkcipher_setkey
;
2796 alg
->cra_ablkcipher
.encrypt
= ablkcipher_encrypt
;
2797 alg
->cra_ablkcipher
.decrypt
= ablkcipher_decrypt
;
2798 alg
->cra_ablkcipher
.geniv
= "eseqiv";
2800 case CRYPTO_ALG_TYPE_AEAD
:
2801 alg
= &t_alg
->algt
.alg
.aead
.base
;
2802 t_alg
->algt
.alg
.aead
.init
= talitos_cra_init_aead
;
2803 t_alg
->algt
.alg
.aead
.setkey
= aead_setkey
;
2804 t_alg
->algt
.alg
.aead
.encrypt
= aead_encrypt
;
2805 t_alg
->algt
.alg
.aead
.decrypt
= aead_decrypt
;
2807 case CRYPTO_ALG_TYPE_AHASH
:
2808 alg
= &t_alg
->algt
.alg
.hash
.halg
.base
;
2809 alg
->cra_init
= talitos_cra_init_ahash
;
2810 alg
->cra_type
= &crypto_ahash_type
;
2811 t_alg
->algt
.alg
.hash
.init
= ahash_init
;
2812 t_alg
->algt
.alg
.hash
.update
= ahash_update
;
2813 t_alg
->algt
.alg
.hash
.final
= ahash_final
;
2814 t_alg
->algt
.alg
.hash
.finup
= ahash_finup
;
2815 t_alg
->algt
.alg
.hash
.digest
= ahash_digest
;
2816 t_alg
->algt
.alg
.hash
.setkey
= ahash_setkey
;
2818 if (!(priv
->features
& TALITOS_FTR_HMAC_OK
) &&
2819 !strncmp(alg
->cra_name
, "hmac", 4)) {
2821 return ERR_PTR(-ENOTSUPP
);
2823 if (!(priv
->features
& TALITOS_FTR_SHA224_HWINIT
) &&
2824 (!strcmp(alg
->cra_name
, "sha224") ||
2825 !strcmp(alg
->cra_name
, "hmac(sha224)"))) {
2826 t_alg
->algt
.alg
.hash
.init
= ahash_init_sha224_swinit
;
2827 t_alg
->algt
.desc_hdr_template
=
2828 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2829 DESC_HDR_SEL0_MDEUA
|
2830 DESC_HDR_MODE0_MDEU_SHA256
;
2834 dev_err(dev
, "unknown algorithm type %d\n", t_alg
->algt
.type
);
2836 return ERR_PTR(-EINVAL
);
2839 alg
->cra_module
= THIS_MODULE
;
2840 alg
->cra_priority
= TALITOS_CRA_PRIORITY
;
2841 alg
->cra_alignmask
= 0;
2842 alg
->cra_ctxsize
= sizeof(struct talitos_ctx
);
2843 alg
->cra_flags
|= CRYPTO_ALG_KERN_DRIVER_ONLY
;
2850 static int talitos_probe_irq(struct platform_device
*ofdev
)
2852 struct device
*dev
= &ofdev
->dev
;
2853 struct device_node
*np
= ofdev
->dev
.of_node
;
2854 struct talitos_private
*priv
= dev_get_drvdata(dev
);
2856 bool is_sec1
= has_ftr_sec1(priv
);
2858 priv
->irq
[0] = irq_of_parse_and_map(np
, 0);
2859 if (!priv
->irq
[0]) {
2860 dev_err(dev
, "failed to map irq\n");
2864 err
= request_irq(priv
->irq
[0], talitos1_interrupt_4ch
, 0,
2865 dev_driver_string(dev
), dev
);
2869 priv
->irq
[1] = irq_of_parse_and_map(np
, 1);
2871 /* get the primary irq line */
2872 if (!priv
->irq
[1]) {
2873 err
= request_irq(priv
->irq
[0], talitos2_interrupt_4ch
, 0,
2874 dev_driver_string(dev
), dev
);
2878 err
= request_irq(priv
->irq
[0], talitos2_interrupt_ch0_2
, 0,
2879 dev_driver_string(dev
), dev
);
2883 /* get the secondary irq line */
2884 err
= request_irq(priv
->irq
[1], talitos2_interrupt_ch1_3
, 0,
2885 dev_driver_string(dev
), dev
);
2887 dev_err(dev
, "failed to request secondary irq\n");
2888 irq_dispose_mapping(priv
->irq
[1]);
2896 dev_err(dev
, "failed to request primary irq\n");
2897 irq_dispose_mapping(priv
->irq
[0]);
2904 static int talitos_probe(struct platform_device
*ofdev
)
2906 struct device
*dev
= &ofdev
->dev
;
2907 struct device_node
*np
= ofdev
->dev
.of_node
;
2908 struct talitos_private
*priv
;
2909 const unsigned int *prop
;
2913 priv
= kzalloc(sizeof(struct talitos_private
), GFP_KERNEL
);
2917 INIT_LIST_HEAD(&priv
->alg_list
);
2919 dev_set_drvdata(dev
, priv
);
2921 priv
->ofdev
= ofdev
;
2923 spin_lock_init(&priv
->reg_lock
);
2925 priv
->reg
= of_iomap(np
, 0);
2927 dev_err(dev
, "failed to of_iomap\n");
2932 /* get SEC version capabilities from device tree */
2933 prop
= of_get_property(np
, "fsl,num-channels", NULL
);
2935 priv
->num_channels
= *prop
;
2937 prop
= of_get_property(np
, "fsl,channel-fifo-len", NULL
);
2939 priv
->chfifo_len
= *prop
;
2941 prop
= of_get_property(np
, "fsl,exec-units-mask", NULL
);
2943 priv
->exec_units
= *prop
;
2945 prop
= of_get_property(np
, "fsl,descriptor-types-mask", NULL
);
2947 priv
->desc_types
= *prop
;
2949 if (!is_power_of_2(priv
->num_channels
) || !priv
->chfifo_len
||
2950 !priv
->exec_units
|| !priv
->desc_types
) {
2951 dev_err(dev
, "invalid property data in device tree node\n");
2956 if (of_device_is_compatible(np
, "fsl,sec3.0"))
2957 priv
->features
|= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT
;
2959 if (of_device_is_compatible(np
, "fsl,sec2.1"))
2960 priv
->features
|= TALITOS_FTR_HW_AUTH_CHECK
|
2961 TALITOS_FTR_SHA224_HWINIT
|
2962 TALITOS_FTR_HMAC_OK
;
2964 if (of_device_is_compatible(np
, "fsl,sec1.0"))
2965 priv
->features
|= TALITOS_FTR_SEC1
;
2967 if (of_device_is_compatible(np
, "fsl,sec1.2")) {
2968 priv
->reg_deu
= priv
->reg
+ TALITOS12_DEU
;
2969 priv
->reg_aesu
= priv
->reg
+ TALITOS12_AESU
;
2970 priv
->reg_mdeu
= priv
->reg
+ TALITOS12_MDEU
;
2971 stride
= TALITOS1_CH_STRIDE
;
2972 } else if (of_device_is_compatible(np
, "fsl,sec1.0")) {
2973 priv
->reg_deu
= priv
->reg
+ TALITOS10_DEU
;
2974 priv
->reg_aesu
= priv
->reg
+ TALITOS10_AESU
;
2975 priv
->reg_mdeu
= priv
->reg
+ TALITOS10_MDEU
;
2976 priv
->reg_afeu
= priv
->reg
+ TALITOS10_AFEU
;
2977 priv
->reg_rngu
= priv
->reg
+ TALITOS10_RNGU
;
2978 priv
->reg_pkeu
= priv
->reg
+ TALITOS10_PKEU
;
2979 stride
= TALITOS1_CH_STRIDE
;
2981 priv
->reg_deu
= priv
->reg
+ TALITOS2_DEU
;
2982 priv
->reg_aesu
= priv
->reg
+ TALITOS2_AESU
;
2983 priv
->reg_mdeu
= priv
->reg
+ TALITOS2_MDEU
;
2984 priv
->reg_afeu
= priv
->reg
+ TALITOS2_AFEU
;
2985 priv
->reg_rngu
= priv
->reg
+ TALITOS2_RNGU
;
2986 priv
->reg_pkeu
= priv
->reg
+ TALITOS2_PKEU
;
2987 priv
->reg_keu
= priv
->reg
+ TALITOS2_KEU
;
2988 priv
->reg_crcu
= priv
->reg
+ TALITOS2_CRCU
;
2989 stride
= TALITOS2_CH_STRIDE
;
2992 err
= talitos_probe_irq(ofdev
);
2996 if (of_device_is_compatible(np
, "fsl,sec1.0")) {
2997 tasklet_init(&priv
->done_task
[0], talitos1_done_4ch
,
2998 (unsigned long)dev
);
3000 if (!priv
->irq
[1]) {
3001 tasklet_init(&priv
->done_task
[0], talitos2_done_4ch
,
3002 (unsigned long)dev
);
3004 tasklet_init(&priv
->done_task
[0], talitos2_done_ch0_2
,
3005 (unsigned long)dev
);
3006 tasklet_init(&priv
->done_task
[1], talitos2_done_ch1_3
,
3007 (unsigned long)dev
);
3011 priv
->chan
= kzalloc(sizeof(struct talitos_channel
) *
3012 priv
->num_channels
, GFP_KERNEL
);
3014 dev_err(dev
, "failed to allocate channel management space\n");
3019 priv
->fifo_len
= roundup_pow_of_two(priv
->chfifo_len
);
3021 for (i
= 0; i
< priv
->num_channels
; i
++) {
3022 priv
->chan
[i
].reg
= priv
->reg
+ stride
* (i
+ 1);
3023 if (!priv
->irq
[1] || !(i
& 1))
3024 priv
->chan
[i
].reg
+= TALITOS_CH_BASE_OFFSET
;
3026 spin_lock_init(&priv
->chan
[i
].head_lock
);
3027 spin_lock_init(&priv
->chan
[i
].tail_lock
);
3029 priv
->chan
[i
].fifo
= kzalloc(sizeof(struct talitos_request
) *
3030 priv
->fifo_len
, GFP_KERNEL
);
3031 if (!priv
->chan
[i
].fifo
) {
3032 dev_err(dev
, "failed to allocate request fifo %d\n", i
);
3037 atomic_set(&priv
->chan
[i
].submit_count
,
3038 -(priv
->chfifo_len
- 1));
3041 dma_set_mask(dev
, DMA_BIT_MASK(36));
3043 /* reset and initialize the h/w */
3044 err
= init_device(dev
);
3046 dev_err(dev
, "failed to initialize device\n");
3050 /* register the RNG, if available */
3051 if (hw_supports(dev
, DESC_HDR_SEL0_RNG
)) {
3052 err
= talitos_register_rng(dev
);
3054 dev_err(dev
, "failed to register hwrng: %d\n", err
);
3057 dev_info(dev
, "hwrng\n");
3060 /* register crypto algorithms the device supports */
3061 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
3062 if (hw_supports(dev
, driver_algs
[i
].desc_hdr_template
)) {
3063 struct talitos_crypto_alg
*t_alg
;
3064 struct crypto_alg
*alg
= NULL
;
3066 t_alg
= talitos_alg_alloc(dev
, &driver_algs
[i
]);
3067 if (IS_ERR(t_alg
)) {
3068 err
= PTR_ERR(t_alg
);
3069 if (err
== -ENOTSUPP
)
3074 switch (t_alg
->algt
.type
) {
3075 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
3076 err
= crypto_register_alg(
3077 &t_alg
->algt
.alg
.crypto
);
3078 alg
= &t_alg
->algt
.alg
.crypto
;
3081 case CRYPTO_ALG_TYPE_AEAD
:
3082 err
= crypto_register_aead(
3083 &t_alg
->algt
.alg
.aead
);
3084 alg
= &t_alg
->algt
.alg
.aead
.base
;
3087 case CRYPTO_ALG_TYPE_AHASH
:
3088 err
= crypto_register_ahash(
3089 &t_alg
->algt
.alg
.hash
);
3090 alg
= &t_alg
->algt
.alg
.hash
.halg
.base
;
3094 dev_err(dev
, "%s alg registration failed\n",
3095 alg
->cra_driver_name
);
3098 list_add_tail(&t_alg
->entry
, &priv
->alg_list
);
3101 if (!list_empty(&priv
->alg_list
))
3102 dev_info(dev
, "%s algorithms registered in /proc/crypto\n",
3103 (char *)of_get_property(np
, "compatible", NULL
));
3108 talitos_remove(ofdev
);
3113 static const struct of_device_id talitos_match
[] = {
3114 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3116 .compatible
= "fsl,sec1.0",
3119 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3121 .compatible
= "fsl,sec2.0",
3126 MODULE_DEVICE_TABLE(of
, talitos_match
);
3128 static struct platform_driver talitos_driver
= {
3131 .of_match_table
= talitos_match
,
3133 .probe
= talitos_probe
,
3134 .remove
= talitos_remove
,
3137 module_platform_driver(talitos_driver
);
3139 MODULE_LICENSE("GPL");
3140 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3141 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");