2 * talitos - Freescale Integrated Security Engine (SEC) device driver
4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
58 static void to_talitos_ptr(struct talitos_ptr
*ptr
, dma_addr_t dma_addr
,
61 ptr
->ptr
= cpu_to_be32(lower_32_bits(dma_addr
));
63 ptr
->eptr
= upper_32_bits(dma_addr
);
66 static void to_talitos_ptr_len(struct talitos_ptr
*ptr
, unsigned short len
,
71 ptr
->len1
= cpu_to_be16(len
);
73 ptr
->len
= cpu_to_be16(len
);
77 static unsigned short from_talitos_ptr_len(struct talitos_ptr
*ptr
,
81 return be16_to_cpu(ptr
->len1
);
83 return be16_to_cpu(ptr
->len
);
86 static void to_talitos_ptr_extent_clear(struct talitos_ptr
*ptr
, bool is_sec1
)
93 * map virtual single (contiguous) pointer to h/w descriptor pointer
95 static void map_single_talitos_ptr(struct device
*dev
,
96 struct talitos_ptr
*ptr
,
97 unsigned short len
, void *data
,
98 enum dma_data_direction dir
)
100 dma_addr_t dma_addr
= dma_map_single(dev
, data
, len
, dir
);
101 struct talitos_private
*priv
= dev_get_drvdata(dev
);
102 bool is_sec1
= has_ftr_sec1(priv
);
104 to_talitos_ptr_len(ptr
, len
, is_sec1
);
105 to_talitos_ptr(ptr
, dma_addr
, is_sec1
);
106 to_talitos_ptr_extent_clear(ptr
, is_sec1
);
110 * unmap bus single (contiguous) h/w descriptor pointer
112 static void unmap_single_talitos_ptr(struct device
*dev
,
113 struct talitos_ptr
*ptr
,
114 enum dma_data_direction dir
)
116 struct talitos_private
*priv
= dev_get_drvdata(dev
);
117 bool is_sec1
= has_ftr_sec1(priv
);
119 dma_unmap_single(dev
, be32_to_cpu(ptr
->ptr
),
120 from_talitos_ptr_len(ptr
, is_sec1
), dir
);
123 static int reset_channel(struct device
*dev
, int ch
)
125 struct talitos_private
*priv
= dev_get_drvdata(dev
);
126 unsigned int timeout
= TALITOS_TIMEOUT
;
127 bool is_sec1
= has_ftr_sec1(priv
);
130 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
,
131 TALITOS1_CCCR_LO_RESET
);
133 while ((in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
) &
134 TALITOS1_CCCR_LO_RESET
) && --timeout
)
137 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
,
138 TALITOS2_CCCR_RESET
);
140 while ((in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
) &
141 TALITOS2_CCCR_RESET
) && --timeout
)
146 dev_err(dev
, "failed to reset channel %d\n", ch
);
150 /* set 36-bit addressing, done writeback enable and done IRQ enable */
151 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
, TALITOS_CCCR_LO_EAE
|
152 TALITOS_CCCR_LO_CDWE
| TALITOS_CCCR_LO_CDIE
);
154 /* and ICCR writeback, if available */
155 if (priv
->features
& TALITOS_FTR_HW_AUTH_CHECK
)
156 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
,
157 TALITOS_CCCR_LO_IWSE
);
162 static int reset_device(struct device
*dev
)
164 struct talitos_private
*priv
= dev_get_drvdata(dev
);
165 unsigned int timeout
= TALITOS_TIMEOUT
;
166 bool is_sec1
= has_ftr_sec1(priv
);
167 u32 mcr
= is_sec1
? TALITOS1_MCR_SWR
: TALITOS2_MCR_SWR
;
169 setbits32(priv
->reg
+ TALITOS_MCR
, mcr
);
171 while ((in_be32(priv
->reg
+ TALITOS_MCR
) & mcr
)
176 mcr
= TALITOS_MCR_RCA1
| TALITOS_MCR_RCA3
;
177 setbits32(priv
->reg
+ TALITOS_MCR
, mcr
);
181 dev_err(dev
, "failed to reset device\n");
189 * Reset and initialize the device
191 static int init_device(struct device
*dev
)
193 struct talitos_private
*priv
= dev_get_drvdata(dev
);
195 bool is_sec1
= has_ftr_sec1(priv
);
199 * errata documentation: warning: certain SEC interrupts
200 * are not fully cleared by writing the MCR:SWR bit,
201 * set bit twice to completely reset
203 err
= reset_device(dev
);
207 err
= reset_device(dev
);
212 for (ch
= 0; ch
< priv
->num_channels
; ch
++) {
213 err
= reset_channel(dev
, ch
);
218 /* enable channel done and error interrupts */
220 clrbits32(priv
->reg
+ TALITOS_IMR
, TALITOS1_IMR_INIT
);
221 clrbits32(priv
->reg
+ TALITOS_IMR_LO
, TALITOS1_IMR_LO_INIT
);
222 /* disable parity error check in DEU (erroneous? test vect.) */
223 setbits32(priv
->reg_deu
+ TALITOS_EUICR
, TALITOS1_DEUICR_KPE
);
225 setbits32(priv
->reg
+ TALITOS_IMR
, TALITOS2_IMR_INIT
);
226 setbits32(priv
->reg
+ TALITOS_IMR_LO
, TALITOS2_IMR_LO_INIT
);
229 /* disable integrity check error interrupts (use writeback instead) */
230 if (priv
->features
& TALITOS_FTR_HW_AUTH_CHECK
)
231 setbits32(priv
->reg_mdeu
+ TALITOS_EUICR_LO
,
232 TALITOS_MDEUICR_LO_ICE
);
238 * talitos_submit - submits a descriptor to the device for processing
239 * @dev: the SEC device to be used
240 * @ch: the SEC device channel to be used
241 * @desc: the descriptor to be processed by the device
242 * @callback: whom to call when processing is complete
243 * @context: a handle for use by caller (optional)
245 * desc must contain valid dma-mapped (bus physical) address pointers.
246 * callback must check err and feedback in descriptor header
247 * for device processing status.
249 int talitos_submit(struct device
*dev
, int ch
, struct talitos_desc
*desc
,
250 void (*callback
)(struct device
*dev
,
251 struct talitos_desc
*desc
,
252 void *context
, int error
),
255 struct talitos_private
*priv
= dev_get_drvdata(dev
);
256 struct talitos_request
*request
;
259 bool is_sec1
= has_ftr_sec1(priv
);
261 spin_lock_irqsave(&priv
->chan
[ch
].head_lock
, flags
);
263 if (!atomic_inc_not_zero(&priv
->chan
[ch
].submit_count
)) {
264 /* h/w fifo is full */
265 spin_unlock_irqrestore(&priv
->chan
[ch
].head_lock
, flags
);
269 head
= priv
->chan
[ch
].head
;
270 request
= &priv
->chan
[ch
].fifo
[head
];
272 /* map descriptor and save caller data */
274 desc
->hdr1
= desc
->hdr
;
276 request
->dma_desc
= dma_map_single(dev
, &desc
->hdr1
,
280 request
->dma_desc
= dma_map_single(dev
, desc
,
284 request
->callback
= callback
;
285 request
->context
= context
;
287 /* increment fifo head */
288 priv
->chan
[ch
].head
= (priv
->chan
[ch
].head
+ 1) & (priv
->fifo_len
- 1);
291 request
->desc
= desc
;
295 out_be32(priv
->chan
[ch
].reg
+ TALITOS_FF
,
296 upper_32_bits(request
->dma_desc
));
297 out_be32(priv
->chan
[ch
].reg
+ TALITOS_FF_LO
,
298 lower_32_bits(request
->dma_desc
));
300 spin_unlock_irqrestore(&priv
->chan
[ch
].head_lock
, flags
);
304 EXPORT_SYMBOL(talitos_submit
);
307 * process what was done, notify callback of error if not
309 static void flush_channel(struct device
*dev
, int ch
, int error
, int reset_ch
)
311 struct talitos_private
*priv
= dev_get_drvdata(dev
);
312 struct talitos_request
*request
, saved_req
;
315 bool is_sec1
= has_ftr_sec1(priv
);
317 spin_lock_irqsave(&priv
->chan
[ch
].tail_lock
, flags
);
319 tail
= priv
->chan
[ch
].tail
;
320 while (priv
->chan
[ch
].fifo
[tail
].desc
) {
323 request
= &priv
->chan
[ch
].fifo
[tail
];
325 /* descriptors with their done bits set don't get the error */
327 hdr
= is_sec1
? request
->desc
->hdr1
: request
->desc
->hdr
;
329 if ((hdr
& DESC_HDR_DONE
) == DESC_HDR_DONE
)
337 dma_unmap_single(dev
, request
->dma_desc
,
341 /* copy entries so we can call callback outside lock */
342 saved_req
.desc
= request
->desc
;
343 saved_req
.callback
= request
->callback
;
344 saved_req
.context
= request
->context
;
346 /* release request entry in fifo */
348 request
->desc
= NULL
;
350 /* increment fifo tail */
351 priv
->chan
[ch
].tail
= (tail
+ 1) & (priv
->fifo_len
- 1);
353 spin_unlock_irqrestore(&priv
->chan
[ch
].tail_lock
, flags
);
355 atomic_dec(&priv
->chan
[ch
].submit_count
);
357 saved_req
.callback(dev
, saved_req
.desc
, saved_req
.context
,
359 /* channel may resume processing in single desc error case */
360 if (error
&& !reset_ch
&& status
== error
)
362 spin_lock_irqsave(&priv
->chan
[ch
].tail_lock
, flags
);
363 tail
= priv
->chan
[ch
].tail
;
366 spin_unlock_irqrestore(&priv
->chan
[ch
].tail_lock
, flags
);
370 * process completed requests for channels that have done status
372 #define DEF_TALITOS1_DONE(name, ch_done_mask) \
373 static void talitos1_done_##name(unsigned long data) \
375 struct device *dev = (struct device *)data; \
376 struct talitos_private *priv = dev_get_drvdata(dev); \
377 unsigned long flags; \
379 if (ch_done_mask & 0x10000000) \
380 flush_channel(dev, 0, 0, 0); \
381 if (priv->num_channels == 1) \
383 if (ch_done_mask & 0x40000000) \
384 flush_channel(dev, 1, 0, 0); \
385 if (ch_done_mask & 0x00010000) \
386 flush_channel(dev, 2, 0, 0); \
387 if (ch_done_mask & 0x00040000) \
388 flush_channel(dev, 3, 0, 0); \
391 /* At this point, all completed channels have been processed */ \
392 /* Unmask done interrupts for channels completed later on. */ \
393 spin_lock_irqsave(&priv->reg_lock, flags); \
394 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
395 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
396 spin_unlock_irqrestore(&priv->reg_lock, flags); \
399 DEF_TALITOS1_DONE(4ch
, TALITOS1_ISR_4CHDONE
)
401 #define DEF_TALITOS2_DONE(name, ch_done_mask) \
402 static void talitos2_done_##name(unsigned long data) \
404 struct device *dev = (struct device *)data; \
405 struct talitos_private *priv = dev_get_drvdata(dev); \
406 unsigned long flags; \
408 if (ch_done_mask & 1) \
409 flush_channel(dev, 0, 0, 0); \
410 if (priv->num_channels == 1) \
412 if (ch_done_mask & (1 << 2)) \
413 flush_channel(dev, 1, 0, 0); \
414 if (ch_done_mask & (1 << 4)) \
415 flush_channel(dev, 2, 0, 0); \
416 if (ch_done_mask & (1 << 6)) \
417 flush_channel(dev, 3, 0, 0); \
420 /* At this point, all completed channels have been processed */ \
421 /* Unmask done interrupts for channels completed later on. */ \
422 spin_lock_irqsave(&priv->reg_lock, flags); \
423 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
424 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
425 spin_unlock_irqrestore(&priv->reg_lock, flags); \
428 DEF_TALITOS2_DONE(4ch
, TALITOS2_ISR_4CHDONE
)
429 DEF_TALITOS2_DONE(ch0_2
, TALITOS2_ISR_CH_0_2_DONE
)
430 DEF_TALITOS2_DONE(ch1_3
, TALITOS2_ISR_CH_1_3_DONE
)
433 * locate current (offending) descriptor
435 static u32
current_desc_hdr(struct device
*dev
, int ch
)
437 struct talitos_private
*priv
= dev_get_drvdata(dev
);
441 cur_desc
= ((u64
)in_be32(priv
->chan
[ch
].reg
+ TALITOS_CDPR
)) << 32;
442 cur_desc
|= in_be32(priv
->chan
[ch
].reg
+ TALITOS_CDPR_LO
);
445 dev_err(dev
, "CDPR is NULL, giving up search for offending descriptor\n");
449 tail
= priv
->chan
[ch
].tail
;
452 while (priv
->chan
[ch
].fifo
[iter
].dma_desc
!= cur_desc
) {
453 iter
= (iter
+ 1) & (priv
->fifo_len
- 1);
455 dev_err(dev
, "couldn't locate current descriptor\n");
460 return priv
->chan
[ch
].fifo
[iter
].desc
->hdr
;
464 * user diagnostics; report root cause of error based on execution unit status
466 static void report_eu_error(struct device
*dev
, int ch
, u32 desc_hdr
)
468 struct talitos_private
*priv
= dev_get_drvdata(dev
);
472 desc_hdr
= in_be32(priv
->chan
[ch
].reg
+ TALITOS_DESCBUF
);
474 switch (desc_hdr
& DESC_HDR_SEL0_MASK
) {
475 case DESC_HDR_SEL0_AFEU
:
476 dev_err(dev
, "AFEUISR 0x%08x_%08x\n",
477 in_be32(priv
->reg_afeu
+ TALITOS_EUISR
),
478 in_be32(priv
->reg_afeu
+ TALITOS_EUISR_LO
));
480 case DESC_HDR_SEL0_DEU
:
481 dev_err(dev
, "DEUISR 0x%08x_%08x\n",
482 in_be32(priv
->reg_deu
+ TALITOS_EUISR
),
483 in_be32(priv
->reg_deu
+ TALITOS_EUISR_LO
));
485 case DESC_HDR_SEL0_MDEUA
:
486 case DESC_HDR_SEL0_MDEUB
:
487 dev_err(dev
, "MDEUISR 0x%08x_%08x\n",
488 in_be32(priv
->reg_mdeu
+ TALITOS_EUISR
),
489 in_be32(priv
->reg_mdeu
+ TALITOS_EUISR_LO
));
491 case DESC_HDR_SEL0_RNG
:
492 dev_err(dev
, "RNGUISR 0x%08x_%08x\n",
493 in_be32(priv
->reg_rngu
+ TALITOS_ISR
),
494 in_be32(priv
->reg_rngu
+ TALITOS_ISR_LO
));
496 case DESC_HDR_SEL0_PKEU
:
497 dev_err(dev
, "PKEUISR 0x%08x_%08x\n",
498 in_be32(priv
->reg_pkeu
+ TALITOS_EUISR
),
499 in_be32(priv
->reg_pkeu
+ TALITOS_EUISR_LO
));
501 case DESC_HDR_SEL0_AESU
:
502 dev_err(dev
, "AESUISR 0x%08x_%08x\n",
503 in_be32(priv
->reg_aesu
+ TALITOS_EUISR
),
504 in_be32(priv
->reg_aesu
+ TALITOS_EUISR_LO
));
506 case DESC_HDR_SEL0_CRCU
:
507 dev_err(dev
, "CRCUISR 0x%08x_%08x\n",
508 in_be32(priv
->reg_crcu
+ TALITOS_EUISR
),
509 in_be32(priv
->reg_crcu
+ TALITOS_EUISR_LO
));
511 case DESC_HDR_SEL0_KEU
:
512 dev_err(dev
, "KEUISR 0x%08x_%08x\n",
513 in_be32(priv
->reg_pkeu
+ TALITOS_EUISR
),
514 in_be32(priv
->reg_pkeu
+ TALITOS_EUISR_LO
));
518 switch (desc_hdr
& DESC_HDR_SEL1_MASK
) {
519 case DESC_HDR_SEL1_MDEUA
:
520 case DESC_HDR_SEL1_MDEUB
:
521 dev_err(dev
, "MDEUISR 0x%08x_%08x\n",
522 in_be32(priv
->reg_mdeu
+ TALITOS_EUISR
),
523 in_be32(priv
->reg_mdeu
+ TALITOS_EUISR_LO
));
525 case DESC_HDR_SEL1_CRCU
:
526 dev_err(dev
, "CRCUISR 0x%08x_%08x\n",
527 in_be32(priv
->reg_crcu
+ TALITOS_EUISR
),
528 in_be32(priv
->reg_crcu
+ TALITOS_EUISR_LO
));
532 for (i
= 0; i
< 8; i
++)
533 dev_err(dev
, "DESCBUF 0x%08x_%08x\n",
534 in_be32(priv
->chan
[ch
].reg
+ TALITOS_DESCBUF
+ 8*i
),
535 in_be32(priv
->chan
[ch
].reg
+ TALITOS_DESCBUF_LO
+ 8*i
));
539 * recover from error interrupts
541 static void talitos_error(struct device
*dev
, u32 isr
, u32 isr_lo
)
543 struct talitos_private
*priv
= dev_get_drvdata(dev
);
544 unsigned int timeout
= TALITOS_TIMEOUT
;
545 int ch
, error
, reset_dev
= 0;
547 bool is_sec1
= has_ftr_sec1(priv
);
548 int reset_ch
= is_sec1
? 1 : 0; /* only SEC2 supports continuation */
550 for (ch
= 0; ch
< priv
->num_channels
; ch
++) {
551 /* skip channels without errors */
553 /* bits 29, 31, 17, 19 */
554 if (!(isr
& (1 << (29 + (ch
& 1) * 2 - (ch
& 2) * 6))))
557 if (!(isr
& (1 << (ch
* 2 + 1))))
563 v
= in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCPSR
);
564 v_lo
= in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCPSR_LO
);
566 if (v_lo
& TALITOS_CCPSR_LO_DOF
) {
567 dev_err(dev
, "double fetch fifo overflow error\n");
571 if (v_lo
& TALITOS_CCPSR_LO_SOF
) {
572 /* h/w dropped descriptor */
573 dev_err(dev
, "single fetch fifo overflow error\n");
576 if (v_lo
& TALITOS_CCPSR_LO_MDTE
)
577 dev_err(dev
, "master data transfer error\n");
578 if (v_lo
& TALITOS_CCPSR_LO_SGDLZ
)
579 dev_err(dev
, is_sec1
? "pointeur not complete error\n"
580 : "s/g data length zero error\n");
581 if (v_lo
& TALITOS_CCPSR_LO_FPZ
)
582 dev_err(dev
, is_sec1
? "parity error\n"
583 : "fetch pointer zero error\n");
584 if (v_lo
& TALITOS_CCPSR_LO_IDH
)
585 dev_err(dev
, "illegal descriptor header error\n");
586 if (v_lo
& TALITOS_CCPSR_LO_IEU
)
587 dev_err(dev
, is_sec1
? "static assignment error\n"
588 : "invalid exec unit error\n");
589 if (v_lo
& TALITOS_CCPSR_LO_EU
)
590 report_eu_error(dev
, ch
, current_desc_hdr(dev
, ch
));
592 if (v_lo
& TALITOS_CCPSR_LO_GB
)
593 dev_err(dev
, "gather boundary error\n");
594 if (v_lo
& TALITOS_CCPSR_LO_GRL
)
595 dev_err(dev
, "gather return/length error\n");
596 if (v_lo
& TALITOS_CCPSR_LO_SB
)
597 dev_err(dev
, "scatter boundary error\n");
598 if (v_lo
& TALITOS_CCPSR_LO_SRL
)
599 dev_err(dev
, "scatter return/length error\n");
602 flush_channel(dev
, ch
, error
, reset_ch
);
605 reset_channel(dev
, ch
);
607 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
,
609 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
, 0);
610 while ((in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
) &
611 TALITOS2_CCCR_CONT
) && --timeout
)
614 dev_err(dev
, "failed to restart channel %d\n",
620 if (reset_dev
|| (is_sec1
&& isr
& ~TALITOS1_ISR_4CHERR
) ||
621 (!is_sec1
&& isr
& ~TALITOS2_ISR_4CHERR
) || isr_lo
) {
622 if (is_sec1
&& (isr_lo
& TALITOS1_ISR_TEA_ERR
))
623 dev_err(dev
, "TEA error: ISR 0x%08x_%08x\n",
626 dev_err(dev
, "done overflow, internal time out, or "
627 "rngu error: ISR 0x%08x_%08x\n", isr
, isr_lo
);
629 /* purge request queues */
630 for (ch
= 0; ch
< priv
->num_channels
; ch
++)
631 flush_channel(dev
, ch
, -EIO
, 1);
633 /* reset and reinitialize the device */
638 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
639 static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
641 struct device *dev = data; \
642 struct talitos_private *priv = dev_get_drvdata(dev); \
644 unsigned long flags; \
646 spin_lock_irqsave(&priv->reg_lock, flags); \
647 isr = in_be32(priv->reg + TALITOS_ISR); \
648 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
649 /* Acknowledge interrupt */ \
650 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
651 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
653 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
654 spin_unlock_irqrestore(&priv->reg_lock, flags); \
655 talitos_error(dev, isr & ch_err_mask, isr_lo); \
658 if (likely(isr & ch_done_mask)) { \
659 /* mask further done interrupts. */ \
660 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
661 /* done_task will unmask done interrupts at exit */ \
662 tasklet_schedule(&priv->done_task[tlet]); \
664 spin_unlock_irqrestore(&priv->reg_lock, flags); \
667 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
671 DEF_TALITOS1_INTERRUPT(4ch
, TALITOS1_ISR_4CHDONE
, TALITOS1_ISR_4CHERR
, 0)
673 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
674 static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
676 struct device *dev = data; \
677 struct talitos_private *priv = dev_get_drvdata(dev); \
679 unsigned long flags; \
681 spin_lock_irqsave(&priv->reg_lock, flags); \
682 isr = in_be32(priv->reg + TALITOS_ISR); \
683 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
684 /* Acknowledge interrupt */ \
685 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
686 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
688 if (unlikely(isr & ch_err_mask || isr_lo)) { \
689 spin_unlock_irqrestore(&priv->reg_lock, flags); \
690 talitos_error(dev, isr & ch_err_mask, isr_lo); \
693 if (likely(isr & ch_done_mask)) { \
694 /* mask further done interrupts. */ \
695 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
696 /* done_task will unmask done interrupts at exit */ \
697 tasklet_schedule(&priv->done_task[tlet]); \
699 spin_unlock_irqrestore(&priv->reg_lock, flags); \
702 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
706 DEF_TALITOS2_INTERRUPT(4ch
, TALITOS2_ISR_4CHDONE
, TALITOS2_ISR_4CHERR
, 0)
707 DEF_TALITOS2_INTERRUPT(ch0_2
, TALITOS2_ISR_CH_0_2_DONE
, TALITOS2_ISR_CH_0_2_ERR
,
709 DEF_TALITOS2_INTERRUPT(ch1_3
, TALITOS2_ISR_CH_1_3_DONE
, TALITOS2_ISR_CH_1_3_ERR
,
715 static int talitos_rng_data_present(struct hwrng
*rng
, int wait
)
717 struct device
*dev
= (struct device
*)rng
->priv
;
718 struct talitos_private
*priv
= dev_get_drvdata(dev
);
722 for (i
= 0; i
< 20; i
++) {
723 ofl
= in_be32(priv
->reg_rngu
+ TALITOS_EUSR_LO
) &
724 TALITOS_RNGUSR_LO_OFL
;
733 static int talitos_rng_data_read(struct hwrng
*rng
, u32
*data
)
735 struct device
*dev
= (struct device
*)rng
->priv
;
736 struct talitos_private
*priv
= dev_get_drvdata(dev
);
738 /* rng fifo requires 64-bit accesses */
739 *data
= in_be32(priv
->reg_rngu
+ TALITOS_EU_FIFO
);
740 *data
= in_be32(priv
->reg_rngu
+ TALITOS_EU_FIFO_LO
);
745 static int talitos_rng_init(struct hwrng
*rng
)
747 struct device
*dev
= (struct device
*)rng
->priv
;
748 struct talitos_private
*priv
= dev_get_drvdata(dev
);
749 unsigned int timeout
= TALITOS_TIMEOUT
;
751 setbits32(priv
->reg_rngu
+ TALITOS_EURCR_LO
, TALITOS_RNGURCR_LO_SR
);
752 while (!(in_be32(priv
->reg_rngu
+ TALITOS_EUSR_LO
)
753 & TALITOS_RNGUSR_LO_RD
)
757 dev_err(dev
, "failed to reset rng hw\n");
761 /* start generating */
762 setbits32(priv
->reg_rngu
+ TALITOS_EUDSR_LO
, 0);
767 static int talitos_register_rng(struct device
*dev
)
769 struct talitos_private
*priv
= dev_get_drvdata(dev
);
771 priv
->rng
.name
= dev_driver_string(dev
),
772 priv
->rng
.init
= talitos_rng_init
,
773 priv
->rng
.data_present
= talitos_rng_data_present
,
774 priv
->rng
.data_read
= talitos_rng_data_read
,
775 priv
->rng
.priv
= (unsigned long)dev
;
777 return hwrng_register(&priv
->rng
);
780 static void talitos_unregister_rng(struct device
*dev
)
782 struct talitos_private
*priv
= dev_get_drvdata(dev
);
784 hwrng_unregister(&priv
->rng
);
790 #define TALITOS_CRA_PRIORITY 3000
791 #define TALITOS_MAX_KEY_SIZE 96
792 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
797 __be32 desc_hdr_template
;
798 u8 key
[TALITOS_MAX_KEY_SIZE
];
799 u8 iv
[TALITOS_MAX_IV_LENGTH
];
801 unsigned int enckeylen
;
802 unsigned int authkeylen
;
803 unsigned int authsize
;
806 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
807 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
809 struct talitos_ahash_req_ctx
{
810 u32 hw_context
[TALITOS_MDEU_MAX_CONTEXT_SIZE
/ sizeof(u32
)];
811 unsigned int hw_context_size
;
812 u8 buf
[HASH_MAX_BLOCK_SIZE
];
813 u8 bufnext
[HASH_MAX_BLOCK_SIZE
];
817 unsigned int to_hash_later
;
819 struct scatterlist bufsl
[2];
820 struct scatterlist
*psrc
;
823 static int aead_setauthsize(struct crypto_aead
*authenc
,
824 unsigned int authsize
)
826 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
828 ctx
->authsize
= authsize
;
833 static int aead_setkey(struct crypto_aead
*authenc
,
834 const u8
*key
, unsigned int keylen
)
836 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
837 struct crypto_authenc_keys keys
;
839 if (crypto_authenc_extractkeys(&keys
, key
, keylen
) != 0)
842 if (keys
.authkeylen
+ keys
.enckeylen
> TALITOS_MAX_KEY_SIZE
)
845 memcpy(ctx
->key
, keys
.authkey
, keys
.authkeylen
);
846 memcpy(&ctx
->key
[keys
.authkeylen
], keys
.enckey
, keys
.enckeylen
);
848 ctx
->keylen
= keys
.authkeylen
+ keys
.enckeylen
;
849 ctx
->enckeylen
= keys
.enckeylen
;
850 ctx
->authkeylen
= keys
.authkeylen
;
855 crypto_aead_set_flags(authenc
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
860 * talitos_edesc - s/w-extended descriptor
861 * @assoc_nents: number of segments in associated data scatterlist
862 * @src_nents: number of segments in input scatterlist
863 * @dst_nents: number of segments in output scatterlist
864 * @assoc_chained: whether assoc is chained or not
865 * @src_chained: whether src is chained or not
866 * @dst_chained: whether dst is chained or not
867 * @iv_dma: dma address of iv for checking continuity and link table
868 * @dma_len: length of dma mapped link_tbl space
869 * @dma_link_tbl: bus physical address of link_tbl/buf
870 * @desc: h/w descriptor
871 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
872 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
874 * if decrypting (with authcheck), or either one of src_nents or dst_nents
875 * is greater than 1, an integrity check value is concatenated to the end
878 struct talitos_edesc
{
887 dma_addr_t dma_link_tbl
;
888 struct talitos_desc desc
;
890 struct talitos_ptr link_tbl
[0];
895 static int talitos_map_sg(struct device
*dev
, struct scatterlist
*sg
,
896 unsigned int nents
, enum dma_data_direction dir
,
899 if (unlikely(chained
))
901 dma_map_sg(dev
, sg
, 1, dir
);
905 dma_map_sg(dev
, sg
, nents
, dir
);
909 static void talitos_unmap_sg_chain(struct device
*dev
, struct scatterlist
*sg
,
910 enum dma_data_direction dir
)
913 dma_unmap_sg(dev
, sg
, 1, dir
);
918 static void talitos_sg_unmap(struct device
*dev
,
919 struct talitos_edesc
*edesc
,
920 struct scatterlist
*src
,
921 struct scatterlist
*dst
)
923 unsigned int src_nents
= edesc
->src_nents
? : 1;
924 unsigned int dst_nents
= edesc
->dst_nents
? : 1;
927 if (edesc
->src_chained
)
928 talitos_unmap_sg_chain(dev
, src
, DMA_TO_DEVICE
);
930 dma_unmap_sg(dev
, src
, src_nents
, DMA_TO_DEVICE
);
933 if (edesc
->dst_chained
)
934 talitos_unmap_sg_chain(dev
, dst
,
937 dma_unmap_sg(dev
, dst
, dst_nents
,
941 if (edesc
->src_chained
)
942 talitos_unmap_sg_chain(dev
, src
, DMA_BIDIRECTIONAL
);
944 dma_unmap_sg(dev
, src
, src_nents
, DMA_BIDIRECTIONAL
);
947 static void ipsec_esp_unmap(struct device
*dev
,
948 struct talitos_edesc
*edesc
,
949 struct aead_request
*areq
)
951 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[6], DMA_FROM_DEVICE
);
952 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[3], DMA_TO_DEVICE
);
953 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[2], DMA_TO_DEVICE
);
954 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[0], DMA_TO_DEVICE
);
956 if (edesc
->assoc_chained
)
957 talitos_unmap_sg_chain(dev
, areq
->assoc
, DMA_TO_DEVICE
);
958 else if (areq
->assoclen
)
959 /* assoc_nents counts also for IV in non-contiguous cases */
960 dma_unmap_sg(dev
, areq
->assoc
,
961 edesc
->assoc_nents
? edesc
->assoc_nents
- 1 : 1,
964 talitos_sg_unmap(dev
, edesc
, areq
->src
, areq
->dst
);
967 dma_unmap_single(dev
, edesc
->dma_link_tbl
, edesc
->dma_len
,
972 * ipsec_esp descriptor callbacks
974 static void ipsec_esp_encrypt_done(struct device
*dev
,
975 struct talitos_desc
*desc
, void *context
,
978 struct aead_request
*areq
= context
;
979 struct crypto_aead
*authenc
= crypto_aead_reqtfm(areq
);
980 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
981 struct talitos_edesc
*edesc
;
982 struct scatterlist
*sg
;
985 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
987 ipsec_esp_unmap(dev
, edesc
, areq
);
989 /* copy the generated ICV to dst */
990 if (edesc
->dst_nents
) {
991 icvdata
= &edesc
->link_tbl
[edesc
->src_nents
+
992 edesc
->dst_nents
+ 2 +
994 sg
= sg_last(areq
->dst
, edesc
->dst_nents
);
995 memcpy((char *)sg_virt(sg
) + sg
->length
- ctx
->authsize
,
996 icvdata
, ctx
->authsize
);
1001 aead_request_complete(areq
, err
);
1004 static void ipsec_esp_decrypt_swauth_done(struct device
*dev
,
1005 struct talitos_desc
*desc
,
1006 void *context
, int err
)
1008 struct aead_request
*req
= context
;
1009 struct crypto_aead
*authenc
= crypto_aead_reqtfm(req
);
1010 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
1011 struct talitos_edesc
*edesc
;
1012 struct scatterlist
*sg
;
1015 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
1017 ipsec_esp_unmap(dev
, edesc
, req
);
1022 icvdata
= &edesc
->link_tbl
[edesc
->src_nents
+
1023 edesc
->dst_nents
+ 2 +
1024 edesc
->assoc_nents
];
1026 icvdata
= &edesc
->link_tbl
[0];
1028 sg
= sg_last(req
->dst
, edesc
->dst_nents
? : 1);
1029 err
= memcmp(icvdata
, (char *)sg_virt(sg
) + sg
->length
-
1030 ctx
->authsize
, ctx
->authsize
) ? -EBADMSG
: 0;
1035 aead_request_complete(req
, err
);
1038 static void ipsec_esp_decrypt_hwauth_done(struct device
*dev
,
1039 struct talitos_desc
*desc
,
1040 void *context
, int err
)
1042 struct aead_request
*req
= context
;
1043 struct talitos_edesc
*edesc
;
1045 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
1047 ipsec_esp_unmap(dev
, edesc
, req
);
1049 /* check ICV auth status */
1050 if (!err
&& ((desc
->hdr_lo
& DESC_HDR_LO_ICCR1_MASK
) !=
1051 DESC_HDR_LO_ICCR1_PASS
))
1056 aead_request_complete(req
, err
);
1060 * convert scatterlist to SEC h/w link table format
1061 * stop at cryptlen bytes
1063 static int sg_to_link_tbl(struct scatterlist
*sg
, int sg_count
,
1064 int cryptlen
, struct talitos_ptr
*link_tbl_ptr
)
1066 int n_sg
= sg_count
;
1069 to_talitos_ptr(link_tbl_ptr
, sg_dma_address(sg
), 0);
1070 link_tbl_ptr
->len
= cpu_to_be16(sg_dma_len(sg
));
1071 link_tbl_ptr
->j_extent
= 0;
1073 cryptlen
-= sg_dma_len(sg
);
1077 /* adjust (decrease) last one (or two) entry's len to cryptlen */
1079 while (be16_to_cpu(link_tbl_ptr
->len
) <= (-cryptlen
)) {
1080 /* Empty this entry, and move to previous one */
1081 cryptlen
+= be16_to_cpu(link_tbl_ptr
->len
);
1082 link_tbl_ptr
->len
= 0;
1086 be16_add_cpu(&link_tbl_ptr
->len
, cryptlen
);
1088 /* tag end of link table */
1089 link_tbl_ptr
->j_extent
= DESC_PTR_LNKTBL_RETURN
;
1095 * fill in and submit ipsec_esp descriptor
1097 static int ipsec_esp(struct talitos_edesc
*edesc
, struct aead_request
*areq
,
1098 u64 seq
, void (*callback
) (struct device
*dev
,
1099 struct talitos_desc
*desc
,
1100 void *context
, int error
))
1102 struct crypto_aead
*aead
= crypto_aead_reqtfm(areq
);
1103 struct talitos_ctx
*ctx
= crypto_aead_ctx(aead
);
1104 struct device
*dev
= ctx
->dev
;
1105 struct talitos_desc
*desc
= &edesc
->desc
;
1106 unsigned int cryptlen
= areq
->cryptlen
;
1107 unsigned int authsize
= ctx
->authsize
;
1108 unsigned int ivsize
= crypto_aead_ivsize(aead
);
1110 int sg_link_tbl_len
;
1113 map_single_talitos_ptr(dev
, &desc
->ptr
[0], ctx
->authkeylen
, &ctx
->key
,
1117 desc
->ptr
[1].len
= cpu_to_be16(areq
->assoclen
+ ivsize
);
1118 if (edesc
->assoc_nents
) {
1119 int tbl_off
= edesc
->src_nents
+ edesc
->dst_nents
+ 2;
1120 struct talitos_ptr
*tbl_ptr
= &edesc
->link_tbl
[tbl_off
];
1122 to_talitos_ptr(&desc
->ptr
[1], edesc
->dma_link_tbl
+ tbl_off
*
1123 sizeof(struct talitos_ptr
), 0);
1124 desc
->ptr
[1].j_extent
= DESC_PTR_LNKTBL_JUMP
;
1126 /* assoc_nents - 1 entries for assoc, 1 for IV */
1127 sg_count
= sg_to_link_tbl(areq
->assoc
, edesc
->assoc_nents
- 1,
1128 areq
->assoclen
, tbl_ptr
);
1130 /* add IV to link table */
1131 tbl_ptr
+= sg_count
- 1;
1132 tbl_ptr
->j_extent
= 0;
1134 to_talitos_ptr(tbl_ptr
, edesc
->iv_dma
, 0);
1135 tbl_ptr
->len
= cpu_to_be16(ivsize
);
1136 tbl_ptr
->j_extent
= DESC_PTR_LNKTBL_RETURN
;
1138 dma_sync_single_for_device(dev
, edesc
->dma_link_tbl
,
1139 edesc
->dma_len
, DMA_BIDIRECTIONAL
);
1142 to_talitos_ptr(&desc
->ptr
[1],
1143 sg_dma_address(areq
->assoc
), 0);
1145 to_talitos_ptr(&desc
->ptr
[1], edesc
->iv_dma
, 0);
1146 desc
->ptr
[1].j_extent
= 0;
1150 to_talitos_ptr(&desc
->ptr
[2], edesc
->iv_dma
, 0);
1151 desc
->ptr
[2].len
= cpu_to_be16(ivsize
);
1152 desc
->ptr
[2].j_extent
= 0;
1153 /* Sync needed for the aead_givencrypt case */
1154 dma_sync_single_for_device(dev
, edesc
->iv_dma
, ivsize
, DMA_TO_DEVICE
);
1157 map_single_talitos_ptr(dev
, &desc
->ptr
[3], ctx
->enckeylen
,
1158 (char *)&ctx
->key
+ ctx
->authkeylen
,
1163 * map and adjust cipher len to aead request cryptlen.
1164 * extent is bytes of HMAC postpended to ciphertext,
1165 * typically 12 for ipsec
1167 desc
->ptr
[4].len
= cpu_to_be16(cryptlen
);
1168 desc
->ptr
[4].j_extent
= authsize
;
1170 sg_count
= talitos_map_sg(dev
, areq
->src
, edesc
->src_nents
? : 1,
1171 (areq
->src
== areq
->dst
) ? DMA_BIDIRECTIONAL
1173 edesc
->src_chained
);
1175 if (sg_count
== 1) {
1176 to_talitos_ptr(&desc
->ptr
[4], sg_dma_address(areq
->src
), 0);
1178 sg_link_tbl_len
= cryptlen
;
1180 if (edesc
->desc
.hdr
& DESC_HDR_MODE1_MDEU_CICV
)
1181 sg_link_tbl_len
= cryptlen
+ authsize
;
1183 sg_count
= sg_to_link_tbl(areq
->src
, sg_count
, sg_link_tbl_len
,
1184 &edesc
->link_tbl
[0]);
1186 desc
->ptr
[4].j_extent
|= DESC_PTR_LNKTBL_JUMP
;
1187 to_talitos_ptr(&desc
->ptr
[4], edesc
->dma_link_tbl
, 0);
1188 dma_sync_single_for_device(dev
, edesc
->dma_link_tbl
,
1192 /* Only one segment now, so no link tbl needed */
1193 to_talitos_ptr(&desc
->ptr
[4],
1194 sg_dma_address(areq
->src
), 0);
1199 desc
->ptr
[5].len
= cpu_to_be16(cryptlen
);
1200 desc
->ptr
[5].j_extent
= authsize
;
1202 if (areq
->src
!= areq
->dst
)
1203 sg_count
= talitos_map_sg(dev
, areq
->dst
,
1204 edesc
->dst_nents
? : 1,
1205 DMA_FROM_DEVICE
, edesc
->dst_chained
);
1207 if (sg_count
== 1) {
1208 to_talitos_ptr(&desc
->ptr
[5], sg_dma_address(areq
->dst
), 0);
1210 int tbl_off
= edesc
->src_nents
+ 1;
1211 struct talitos_ptr
*tbl_ptr
= &edesc
->link_tbl
[tbl_off
];
1213 to_talitos_ptr(&desc
->ptr
[5], edesc
->dma_link_tbl
+
1214 tbl_off
* sizeof(struct talitos_ptr
), 0);
1215 sg_count
= sg_to_link_tbl(areq
->dst
, sg_count
, cryptlen
,
1218 /* Add an entry to the link table for ICV data */
1219 tbl_ptr
+= sg_count
- 1;
1220 tbl_ptr
->j_extent
= 0;
1222 tbl_ptr
->j_extent
= DESC_PTR_LNKTBL_RETURN
;
1223 tbl_ptr
->len
= cpu_to_be16(authsize
);
1225 /* icv data follows link tables */
1226 to_talitos_ptr(tbl_ptr
, edesc
->dma_link_tbl
+
1227 (tbl_off
+ edesc
->dst_nents
+ 1 +
1228 edesc
->assoc_nents
) *
1229 sizeof(struct talitos_ptr
), 0);
1230 desc
->ptr
[5].j_extent
|= DESC_PTR_LNKTBL_JUMP
;
1231 dma_sync_single_for_device(ctx
->dev
, edesc
->dma_link_tbl
,
1232 edesc
->dma_len
, DMA_BIDIRECTIONAL
);
1236 map_single_talitos_ptr(dev
, &desc
->ptr
[6], ivsize
, ctx
->iv
,
1239 ret
= talitos_submit(dev
, ctx
->ch
, desc
, callback
, areq
);
1240 if (ret
!= -EINPROGRESS
) {
1241 ipsec_esp_unmap(dev
, edesc
, areq
);
1248 * derive number of elements in scatterlist
1250 static int sg_count(struct scatterlist
*sg_list
, int nbytes
, bool *chained
)
1252 struct scatterlist
*sg
= sg_list
;
1256 while (nbytes
> 0) {
1258 nbytes
-= sg
->length
;
1259 if (!sg_is_last(sg
) && (sg
+ 1)->length
== 0)
1268 * allocate and map the extended descriptor
1270 static struct talitos_edesc
*talitos_edesc_alloc(struct device
*dev
,
1271 struct scatterlist
*assoc
,
1272 struct scatterlist
*src
,
1273 struct scatterlist
*dst
,
1275 unsigned int assoclen
,
1276 unsigned int cryptlen
,
1277 unsigned int authsize
,
1278 unsigned int ivsize
,
1283 struct talitos_edesc
*edesc
;
1284 int assoc_nents
= 0, src_nents
, dst_nents
, alloc_len
, dma_len
;
1285 bool assoc_chained
= false, src_chained
= false, dst_chained
= false;
1286 dma_addr_t iv_dma
= 0;
1287 gfp_t flags
= cryptoflags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
1289 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1290 bool is_sec1
= has_ftr_sec1(priv
);
1291 int max_len
= is_sec1
? TALITOS1_MAX_DATA_LEN
: TALITOS2_MAX_DATA_LEN
;
1293 if (cryptlen
+ authsize
> max_len
) {
1294 dev_err(dev
, "length exceeds h/w max limit\n");
1295 return ERR_PTR(-EINVAL
);
1299 iv_dma
= dma_map_single(dev
, iv
, ivsize
, DMA_TO_DEVICE
);
1303 * Currently it is assumed that iv is provided whenever assoc
1308 assoc_nents
= sg_count(assoc
, assoclen
, &assoc_chained
);
1309 talitos_map_sg(dev
, assoc
, assoc_nents
, DMA_TO_DEVICE
,
1311 assoc_nents
= (assoc_nents
== 1) ? 0 : assoc_nents
;
1313 if (assoc_nents
|| sg_dma_address(assoc
) + assoclen
!= iv_dma
)
1314 assoc_nents
= assoc_nents
? assoc_nents
+ 1 : 2;
1317 if (!dst
|| dst
== src
) {
1318 src_nents
= sg_count(src
, cryptlen
+ authsize
, &src_chained
);
1319 src_nents
= (src_nents
== 1) ? 0 : src_nents
;
1320 dst_nents
= dst
? src_nents
: 0;
1321 } else { /* dst && dst != src*/
1322 src_nents
= sg_count(src
, cryptlen
+ (encrypt
? 0 : authsize
),
1324 src_nents
= (src_nents
== 1) ? 0 : src_nents
;
1325 dst_nents
= sg_count(dst
, cryptlen
+ (encrypt
? authsize
: 0),
1327 dst_nents
= (dst_nents
== 1) ? 0 : dst_nents
;
1331 * allocate space for base edesc plus the link tables,
1332 * allowing for two separate entries for ICV and generated ICV (+ 2),
1333 * and the ICV data itself
1335 alloc_len
= sizeof(struct talitos_edesc
);
1336 if (assoc_nents
|| src_nents
|| dst_nents
) {
1338 dma_len
= src_nents
? cryptlen
: 0 +
1339 dst_nents
? cryptlen
: 0;
1341 dma_len
= (src_nents
+ dst_nents
+ 2 + assoc_nents
) *
1342 sizeof(struct talitos_ptr
) + authsize
;
1343 alloc_len
+= dma_len
;
1346 alloc_len
+= icv_stashing
? authsize
: 0;
1349 edesc
= kmalloc(alloc_len
, GFP_DMA
| flags
);
1352 talitos_unmap_sg_chain(dev
, assoc
, DMA_TO_DEVICE
);
1354 dma_unmap_sg(dev
, assoc
,
1355 assoc_nents
? assoc_nents
- 1 : 1,
1359 dma_unmap_single(dev
, iv_dma
, ivsize
, DMA_TO_DEVICE
);
1361 dev_err(dev
, "could not allocate edescriptor\n");
1362 return ERR_PTR(-ENOMEM
);
1365 edesc
->assoc_nents
= assoc_nents
;
1366 edesc
->src_nents
= src_nents
;
1367 edesc
->dst_nents
= dst_nents
;
1368 edesc
->assoc_chained
= assoc_chained
;
1369 edesc
->src_chained
= src_chained
;
1370 edesc
->dst_chained
= dst_chained
;
1371 edesc
->iv_dma
= iv_dma
;
1372 edesc
->dma_len
= dma_len
;
1374 edesc
->dma_link_tbl
= dma_map_single(dev
, &edesc
->link_tbl
[0],
1381 static struct talitos_edesc
*aead_edesc_alloc(struct aead_request
*areq
, u8
*iv
,
1382 int icv_stashing
, bool encrypt
)
1384 struct crypto_aead
*authenc
= crypto_aead_reqtfm(areq
);
1385 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
1386 unsigned int ivsize
= crypto_aead_ivsize(authenc
);
1388 return talitos_edesc_alloc(ctx
->dev
, areq
->assoc
, areq
->src
, areq
->dst
,
1389 iv
, areq
->assoclen
, areq
->cryptlen
,
1390 ctx
->authsize
, ivsize
, icv_stashing
,
1391 areq
->base
.flags
, encrypt
);
1394 static int aead_encrypt(struct aead_request
*req
)
1396 struct crypto_aead
*authenc
= crypto_aead_reqtfm(req
);
1397 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
1398 struct talitos_edesc
*edesc
;
1400 /* allocate extended descriptor */
1401 edesc
= aead_edesc_alloc(req
, req
->iv
, 0, true);
1403 return PTR_ERR(edesc
);
1406 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_MODE0_ENCRYPT
;
1408 return ipsec_esp(edesc
, req
, 0, ipsec_esp_encrypt_done
);
1411 static int aead_decrypt(struct aead_request
*req
)
1413 struct crypto_aead
*authenc
= crypto_aead_reqtfm(req
);
1414 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
1415 unsigned int authsize
= ctx
->authsize
;
1416 struct talitos_private
*priv
= dev_get_drvdata(ctx
->dev
);
1417 struct talitos_edesc
*edesc
;
1418 struct scatterlist
*sg
;
1421 req
->cryptlen
-= authsize
;
1423 /* allocate extended descriptor */
1424 edesc
= aead_edesc_alloc(req
, req
->iv
, 1, false);
1426 return PTR_ERR(edesc
);
1428 if ((priv
->features
& TALITOS_FTR_HW_AUTH_CHECK
) &&
1429 ((!edesc
->src_nents
&& !edesc
->dst_nents
) ||
1430 priv
->features
& TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT
)) {
1432 /* decrypt and check the ICV */
1433 edesc
->desc
.hdr
= ctx
->desc_hdr_template
|
1434 DESC_HDR_DIR_INBOUND
|
1435 DESC_HDR_MODE1_MDEU_CICV
;
1437 /* reset integrity check result bits */
1438 edesc
->desc
.hdr_lo
= 0;
1440 return ipsec_esp(edesc
, req
, 0, ipsec_esp_decrypt_hwauth_done
);
1443 /* Have to check the ICV with software */
1444 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_DIR_INBOUND
;
1446 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1448 icvdata
= &edesc
->link_tbl
[edesc
->src_nents
+
1449 edesc
->dst_nents
+ 2 +
1450 edesc
->assoc_nents
];
1452 icvdata
= &edesc
->link_tbl
[0];
1454 sg
= sg_last(req
->src
, edesc
->src_nents
? : 1);
1456 memcpy(icvdata
, (char *)sg_virt(sg
) + sg
->length
- ctx
->authsize
,
1459 return ipsec_esp(edesc
, req
, 0, ipsec_esp_decrypt_swauth_done
);
1462 static int aead_givencrypt(struct aead_givcrypt_request
*req
)
1464 struct aead_request
*areq
= &req
->areq
;
1465 struct crypto_aead
*authenc
= crypto_aead_reqtfm(areq
);
1466 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
1467 struct talitos_edesc
*edesc
;
1469 /* allocate extended descriptor */
1470 edesc
= aead_edesc_alloc(areq
, req
->giv
, 0, true);
1472 return PTR_ERR(edesc
);
1475 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_MODE0_ENCRYPT
;
1477 memcpy(req
->giv
, ctx
->iv
, crypto_aead_ivsize(authenc
));
1478 /* avoid consecutive packets going out with same IV */
1479 *(__be64
*)req
->giv
^= cpu_to_be64(req
->seq
);
1481 return ipsec_esp(edesc
, areq
, req
->seq
, ipsec_esp_encrypt_done
);
1484 static int ablkcipher_setkey(struct crypto_ablkcipher
*cipher
,
1485 const u8
*key
, unsigned int keylen
)
1487 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1489 memcpy(&ctx
->key
, key
, keylen
);
1490 ctx
->keylen
= keylen
;
1495 static void unmap_sg_talitos_ptr(struct device
*dev
, struct scatterlist
*src
,
1496 struct scatterlist
*dst
, unsigned int len
,
1497 struct talitos_edesc
*edesc
)
1499 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1500 bool is_sec1
= has_ftr_sec1(priv
);
1503 if (!edesc
->src_nents
) {
1504 dma_unmap_sg(dev
, src
, 1,
1505 dst
!= src
? DMA_TO_DEVICE
1506 : DMA_BIDIRECTIONAL
);
1508 if (dst
&& edesc
->dst_nents
) {
1509 dma_sync_single_for_device(dev
,
1510 edesc
->dma_link_tbl
+ len
,
1511 len
, DMA_FROM_DEVICE
);
1512 sg_copy_from_buffer(dst
, edesc
->dst_nents
? : 1,
1513 edesc
->buf
+ len
, len
);
1514 } else if (dst
&& dst
!= src
) {
1515 dma_unmap_sg(dev
, dst
, 1, DMA_FROM_DEVICE
);
1518 talitos_sg_unmap(dev
, edesc
, src
, dst
);
1522 static void common_nonsnoop_unmap(struct device
*dev
,
1523 struct talitos_edesc
*edesc
,
1524 struct ablkcipher_request
*areq
)
1526 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[5], DMA_FROM_DEVICE
);
1528 unmap_sg_talitos_ptr(dev
, areq
->src
, areq
->dst
, areq
->nbytes
, edesc
);
1529 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[2], DMA_TO_DEVICE
);
1530 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[1], DMA_TO_DEVICE
);
1533 dma_unmap_single(dev
, edesc
->dma_link_tbl
, edesc
->dma_len
,
1537 static void ablkcipher_done(struct device
*dev
,
1538 struct talitos_desc
*desc
, void *context
,
1541 struct ablkcipher_request
*areq
= context
;
1542 struct talitos_edesc
*edesc
;
1544 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
1546 common_nonsnoop_unmap(dev
, edesc
, areq
);
1550 areq
->base
.complete(&areq
->base
, err
);
1553 int map_sg_in_talitos_ptr(struct device
*dev
, struct scatterlist
*src
,
1554 unsigned int len
, struct talitos_edesc
*edesc
,
1555 enum dma_data_direction dir
, struct talitos_ptr
*ptr
)
1558 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1559 bool is_sec1
= has_ftr_sec1(priv
);
1561 to_talitos_ptr_len(ptr
, len
, is_sec1
);
1564 sg_count
= edesc
->src_nents
? : 1;
1566 if (sg_count
== 1) {
1567 dma_map_sg(dev
, src
, 1, dir
);
1568 to_talitos_ptr(ptr
, sg_dma_address(src
), is_sec1
);
1570 sg_copy_to_buffer(src
, sg_count
, edesc
->buf
, len
);
1571 to_talitos_ptr(ptr
, edesc
->dma_link_tbl
, is_sec1
);
1572 dma_sync_single_for_device(dev
, edesc
->dma_link_tbl
,
1573 len
, DMA_TO_DEVICE
);
1576 to_talitos_ptr_extent_clear(ptr
, is_sec1
);
1578 sg_count
= talitos_map_sg(dev
, src
, edesc
->src_nents
? : 1, dir
,
1579 edesc
->src_chained
);
1581 if (sg_count
== 1) {
1582 to_talitos_ptr(ptr
, sg_dma_address(src
), is_sec1
);
1584 sg_count
= sg_to_link_tbl(src
, sg_count
, len
,
1585 &edesc
->link_tbl
[0]);
1587 to_talitos_ptr(ptr
, edesc
->dma_link_tbl
, 0);
1588 ptr
->j_extent
|= DESC_PTR_LNKTBL_JUMP
;
1589 dma_sync_single_for_device(dev
,
1590 edesc
->dma_link_tbl
,
1594 /* Only one segment now, so no link tbl needed*/
1595 to_talitos_ptr(ptr
, sg_dma_address(src
),
1603 void map_sg_out_talitos_ptr(struct device
*dev
, struct scatterlist
*dst
,
1604 unsigned int len
, struct talitos_edesc
*edesc
,
1605 enum dma_data_direction dir
,
1606 struct talitos_ptr
*ptr
, int sg_count
)
1608 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1609 bool is_sec1
= has_ftr_sec1(priv
);
1611 if (dir
!= DMA_NONE
)
1612 sg_count
= talitos_map_sg(dev
, dst
, edesc
->dst_nents
? : 1,
1613 dir
, edesc
->dst_chained
);
1615 to_talitos_ptr_len(ptr
, len
, is_sec1
);
1618 if (sg_count
== 1) {
1619 if (dir
!= DMA_NONE
)
1620 dma_map_sg(dev
, dst
, 1, dir
);
1621 to_talitos_ptr(ptr
, sg_dma_address(dst
), is_sec1
);
1623 to_talitos_ptr(ptr
, edesc
->dma_link_tbl
+ len
, is_sec1
);
1624 dma_sync_single_for_device(dev
,
1625 edesc
->dma_link_tbl
+ len
,
1626 len
, DMA_FROM_DEVICE
);
1629 to_talitos_ptr_extent_clear(ptr
, is_sec1
);
1631 if (sg_count
== 1) {
1632 to_talitos_ptr(ptr
, sg_dma_address(dst
), is_sec1
);
1634 struct talitos_ptr
*link_tbl_ptr
=
1635 &edesc
->link_tbl
[edesc
->src_nents
+ 1];
1637 to_talitos_ptr(ptr
, edesc
->dma_link_tbl
+
1638 (edesc
->src_nents
+ 1) *
1639 sizeof(struct talitos_ptr
), 0);
1640 ptr
->j_extent
|= DESC_PTR_LNKTBL_JUMP
;
1641 sg_count
= sg_to_link_tbl(dst
, sg_count
, len
,
1643 dma_sync_single_for_device(dev
, edesc
->dma_link_tbl
,
1650 static int common_nonsnoop(struct talitos_edesc
*edesc
,
1651 struct ablkcipher_request
*areq
,
1652 void (*callback
) (struct device
*dev
,
1653 struct talitos_desc
*desc
,
1654 void *context
, int error
))
1656 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1657 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1658 struct device
*dev
= ctx
->dev
;
1659 struct talitos_desc
*desc
= &edesc
->desc
;
1660 unsigned int cryptlen
= areq
->nbytes
;
1661 unsigned int ivsize
= crypto_ablkcipher_ivsize(cipher
);
1663 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1664 bool is_sec1
= has_ftr_sec1(priv
);
1666 /* first DWORD empty */
1667 desc
->ptr
[0] = zero_entry
;
1670 to_talitos_ptr(&desc
->ptr
[1], edesc
->iv_dma
, is_sec1
);
1671 to_talitos_ptr_len(&desc
->ptr
[1], ivsize
, is_sec1
);
1672 to_talitos_ptr_extent_clear(&desc
->ptr
[1], is_sec1
);
1675 map_single_talitos_ptr(dev
, &desc
->ptr
[2], ctx
->keylen
,
1676 (char *)&ctx
->key
, DMA_TO_DEVICE
);
1681 sg_count
= map_sg_in_talitos_ptr(dev
, areq
->src
, cryptlen
, edesc
,
1682 (areq
->src
== areq
->dst
) ?
1683 DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
,
1687 map_sg_out_talitos_ptr(dev
, areq
->dst
, cryptlen
, edesc
,
1688 (areq
->src
== areq
->dst
) ? DMA_NONE
1690 &desc
->ptr
[4], sg_count
);
1693 map_single_talitos_ptr(dev
, &desc
->ptr
[5], ivsize
, ctx
->iv
,
1696 /* last DWORD empty */
1697 desc
->ptr
[6] = zero_entry
;
1699 ret
= talitos_submit(dev
, ctx
->ch
, desc
, callback
, areq
);
1700 if (ret
!= -EINPROGRESS
) {
1701 common_nonsnoop_unmap(dev
, edesc
, areq
);
1707 static struct talitos_edesc
*ablkcipher_edesc_alloc(struct ablkcipher_request
*
1710 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1711 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1712 unsigned int ivsize
= crypto_ablkcipher_ivsize(cipher
);
1714 return talitos_edesc_alloc(ctx
->dev
, NULL
, areq
->src
, areq
->dst
,
1715 areq
->info
, 0, areq
->nbytes
, 0, ivsize
, 0,
1716 areq
->base
.flags
, encrypt
);
1719 static int ablkcipher_encrypt(struct ablkcipher_request
*areq
)
1721 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1722 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1723 struct talitos_edesc
*edesc
;
1725 /* allocate extended descriptor */
1726 edesc
= ablkcipher_edesc_alloc(areq
, true);
1728 return PTR_ERR(edesc
);
1731 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_MODE0_ENCRYPT
;
1733 return common_nonsnoop(edesc
, areq
, ablkcipher_done
);
1736 static int ablkcipher_decrypt(struct ablkcipher_request
*areq
)
1738 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1739 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1740 struct talitos_edesc
*edesc
;
1742 /* allocate extended descriptor */
1743 edesc
= ablkcipher_edesc_alloc(areq
, false);
1745 return PTR_ERR(edesc
);
1747 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_DIR_INBOUND
;
1749 return common_nonsnoop(edesc
, areq
, ablkcipher_done
);
1752 static void common_nonsnoop_hash_unmap(struct device
*dev
,
1753 struct talitos_edesc
*edesc
,
1754 struct ahash_request
*areq
)
1756 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1757 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1758 bool is_sec1
= has_ftr_sec1(priv
);
1760 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[5], DMA_FROM_DEVICE
);
1762 unmap_sg_talitos_ptr(dev
, req_ctx
->psrc
, NULL
, 0, edesc
);
1764 /* When using hashctx-in, must unmap it. */
1765 if (from_talitos_ptr_len(&edesc
->desc
.ptr
[1], is_sec1
))
1766 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[1],
1769 if (from_talitos_ptr_len(&edesc
->desc
.ptr
[2], is_sec1
))
1770 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[2],
1774 dma_unmap_single(dev
, edesc
->dma_link_tbl
, edesc
->dma_len
,
1779 static void ahash_done(struct device
*dev
,
1780 struct talitos_desc
*desc
, void *context
,
1783 struct ahash_request
*areq
= context
;
1784 struct talitos_edesc
*edesc
=
1785 container_of(desc
, struct talitos_edesc
, desc
);
1786 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1788 if (!req_ctx
->last
&& req_ctx
->to_hash_later
) {
1789 /* Position any partial block for next update/final/finup */
1790 memcpy(req_ctx
->buf
, req_ctx
->bufnext
, req_ctx
->to_hash_later
);
1791 req_ctx
->nbuf
= req_ctx
->to_hash_later
;
1793 common_nonsnoop_hash_unmap(dev
, edesc
, areq
);
1797 areq
->base
.complete(&areq
->base
, err
);
1801 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1802 * ourself and submit a padded block
1804 void talitos_handle_buggy_hash(struct talitos_ctx
*ctx
,
1805 struct talitos_edesc
*edesc
,
1806 struct talitos_ptr
*ptr
)
1808 static u8 padded_hash
[64] = {
1809 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1810 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1811 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1812 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1815 pr_err_once("Bug in SEC1, padding ourself\n");
1816 edesc
->desc
.hdr
&= ~DESC_HDR_MODE0_MDEU_PAD
;
1817 map_single_talitos_ptr(ctx
->dev
, ptr
, sizeof(padded_hash
),
1818 (char *)padded_hash
, DMA_TO_DEVICE
);
1821 static int common_nonsnoop_hash(struct talitos_edesc
*edesc
,
1822 struct ahash_request
*areq
, unsigned int length
,
1823 void (*callback
) (struct device
*dev
,
1824 struct talitos_desc
*desc
,
1825 void *context
, int error
))
1827 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1828 struct talitos_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1829 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1830 struct device
*dev
= ctx
->dev
;
1831 struct talitos_desc
*desc
= &edesc
->desc
;
1833 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1834 bool is_sec1
= has_ftr_sec1(priv
);
1836 /* first DWORD empty */
1837 desc
->ptr
[0] = zero_entry
;
1839 /* hash context in */
1840 if (!req_ctx
->first
|| req_ctx
->swinit
) {
1841 map_single_talitos_ptr(dev
, &desc
->ptr
[1],
1842 req_ctx
->hw_context_size
,
1843 (char *)req_ctx
->hw_context
,
1845 req_ctx
->swinit
= 0;
1847 desc
->ptr
[1] = zero_entry
;
1848 /* Indicate next op is not the first. */
1854 map_single_talitos_ptr(dev
, &desc
->ptr
[2], ctx
->keylen
,
1855 (char *)&ctx
->key
, DMA_TO_DEVICE
);
1857 desc
->ptr
[2] = zero_entry
;
1862 map_sg_in_talitos_ptr(dev
, req_ctx
->psrc
, length
, edesc
,
1863 DMA_TO_DEVICE
, &desc
->ptr
[3]);
1865 /* fifth DWORD empty */
1866 desc
->ptr
[4] = zero_entry
;
1868 /* hash/HMAC out -or- hash context out */
1870 map_single_talitos_ptr(dev
, &desc
->ptr
[5],
1871 crypto_ahash_digestsize(tfm
),
1872 areq
->result
, DMA_FROM_DEVICE
);
1874 map_single_talitos_ptr(dev
, &desc
->ptr
[5],
1875 req_ctx
->hw_context_size
,
1876 req_ctx
->hw_context
, DMA_FROM_DEVICE
);
1878 /* last DWORD empty */
1879 desc
->ptr
[6] = zero_entry
;
1881 if (is_sec1
&& from_talitos_ptr_len(&desc
->ptr
[3], true) == 0)
1882 talitos_handle_buggy_hash(ctx
, edesc
, &desc
->ptr
[3]);
1884 ret
= talitos_submit(dev
, ctx
->ch
, desc
, callback
, areq
);
1885 if (ret
!= -EINPROGRESS
) {
1886 common_nonsnoop_hash_unmap(dev
, edesc
, areq
);
1892 static struct talitos_edesc
*ahash_edesc_alloc(struct ahash_request
*areq
,
1893 unsigned int nbytes
)
1895 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1896 struct talitos_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1897 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1899 return talitos_edesc_alloc(ctx
->dev
, NULL
, req_ctx
->psrc
, NULL
, NULL
, 0,
1900 nbytes
, 0, 0, 0, areq
->base
.flags
, false);
1903 static int ahash_init(struct ahash_request
*areq
)
1905 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1906 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1908 /* Initialize the context */
1910 req_ctx
->first
= 1; /* first indicates h/w must init its context */
1911 req_ctx
->swinit
= 0; /* assume h/w init of context */
1912 req_ctx
->hw_context_size
=
1913 (crypto_ahash_digestsize(tfm
) <= SHA256_DIGEST_SIZE
)
1914 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1915 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
;
1921 * on h/w without explicit sha224 support, we initialize h/w context
1922 * manually with sha224 constants, and tell it to run sha256.
1924 static int ahash_init_sha224_swinit(struct ahash_request
*areq
)
1926 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1929 req_ctx
->swinit
= 1;/* prevent h/w initting context with sha256 values*/
1931 req_ctx
->hw_context
[0] = SHA224_H0
;
1932 req_ctx
->hw_context
[1] = SHA224_H1
;
1933 req_ctx
->hw_context
[2] = SHA224_H2
;
1934 req_ctx
->hw_context
[3] = SHA224_H3
;
1935 req_ctx
->hw_context
[4] = SHA224_H4
;
1936 req_ctx
->hw_context
[5] = SHA224_H5
;
1937 req_ctx
->hw_context
[6] = SHA224_H6
;
1938 req_ctx
->hw_context
[7] = SHA224_H7
;
1940 /* init 64-bit count */
1941 req_ctx
->hw_context
[8] = 0;
1942 req_ctx
->hw_context
[9] = 0;
1947 static int ahash_process_req(struct ahash_request
*areq
, unsigned int nbytes
)
1949 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1950 struct talitos_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1951 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1952 struct talitos_edesc
*edesc
;
1953 unsigned int blocksize
=
1954 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
1955 unsigned int nbytes_to_hash
;
1956 unsigned int to_hash_later
;
1960 if (!req_ctx
->last
&& (nbytes
+ req_ctx
->nbuf
<= blocksize
)) {
1961 /* Buffer up to one whole block */
1962 sg_copy_to_buffer(areq
->src
,
1963 sg_count(areq
->src
, nbytes
, &chained
),
1964 req_ctx
->buf
+ req_ctx
->nbuf
, nbytes
);
1965 req_ctx
->nbuf
+= nbytes
;
1969 /* At least (blocksize + 1) bytes are available to hash */
1970 nbytes_to_hash
= nbytes
+ req_ctx
->nbuf
;
1971 to_hash_later
= nbytes_to_hash
& (blocksize
- 1);
1975 else if (to_hash_later
)
1976 /* There is a partial block. Hash the full block(s) now */
1977 nbytes_to_hash
-= to_hash_later
;
1979 /* Keep one block buffered */
1980 nbytes_to_hash
-= blocksize
;
1981 to_hash_later
= blocksize
;
1984 /* Chain in any previously buffered data */
1985 if (req_ctx
->nbuf
) {
1986 nsg
= (req_ctx
->nbuf
< nbytes_to_hash
) ? 2 : 1;
1987 sg_init_table(req_ctx
->bufsl
, nsg
);
1988 sg_set_buf(req_ctx
->bufsl
, req_ctx
->buf
, req_ctx
->nbuf
);
1990 scatterwalk_sg_chain(req_ctx
->bufsl
, 2, areq
->src
);
1991 req_ctx
->psrc
= req_ctx
->bufsl
;
1993 req_ctx
->psrc
= areq
->src
;
1995 if (to_hash_later
) {
1996 int nents
= sg_count(areq
->src
, nbytes
, &chained
);
1997 sg_pcopy_to_buffer(areq
->src
, nents
,
2000 nbytes
- to_hash_later
);
2002 req_ctx
->to_hash_later
= to_hash_later
;
2004 /* Allocate extended descriptor */
2005 edesc
= ahash_edesc_alloc(areq
, nbytes_to_hash
);
2007 return PTR_ERR(edesc
);
2009 edesc
->desc
.hdr
= ctx
->desc_hdr_template
;
2011 /* On last one, request SEC to pad; otherwise continue */
2013 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_PAD
;
2015 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_CONT
;
2017 /* request SEC to INIT hash. */
2018 if (req_ctx
->first
&& !req_ctx
->swinit
)
2019 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_INIT
;
2021 /* When the tfm context has a keylen, it's an HMAC.
2022 * A first or last (ie. not middle) descriptor must request HMAC.
2024 if (ctx
->keylen
&& (req_ctx
->first
|| req_ctx
->last
))
2025 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_HMAC
;
2027 return common_nonsnoop_hash(edesc
, areq
, nbytes_to_hash
,
2031 static int ahash_update(struct ahash_request
*areq
)
2033 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2037 return ahash_process_req(areq
, areq
->nbytes
);
2040 static int ahash_final(struct ahash_request
*areq
)
2042 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2046 return ahash_process_req(areq
, 0);
2049 static int ahash_finup(struct ahash_request
*areq
)
2051 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2055 return ahash_process_req(areq
, areq
->nbytes
);
2058 static int ahash_digest(struct ahash_request
*areq
)
2060 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2061 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
2066 return ahash_process_req(areq
, areq
->nbytes
);
2069 struct keyhash_result
{
2070 struct completion completion
;
2074 static void keyhash_complete(struct crypto_async_request
*req
, int err
)
2076 struct keyhash_result
*res
= req
->data
;
2078 if (err
== -EINPROGRESS
)
2082 complete(&res
->completion
);
2085 static int keyhash(struct crypto_ahash
*tfm
, const u8
*key
, unsigned int keylen
,
2088 struct talitos_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
2090 struct scatterlist sg
[1];
2091 struct ahash_request
*req
;
2092 struct keyhash_result hresult
;
2095 init_completion(&hresult
.completion
);
2097 req
= ahash_request_alloc(tfm
, GFP_KERNEL
);
2101 /* Keep tfm keylen == 0 during hash of the long key */
2103 ahash_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
2104 keyhash_complete
, &hresult
);
2106 sg_init_one(&sg
[0], key
, keylen
);
2108 ahash_request_set_crypt(req
, sg
, hash
, keylen
);
2109 ret
= crypto_ahash_digest(req
);
2115 ret
= wait_for_completion_interruptible(
2116 &hresult
.completion
);
2123 ahash_request_free(req
);
2128 static int ahash_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
2129 unsigned int keylen
)
2131 struct talitos_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
2132 unsigned int blocksize
=
2133 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
2134 unsigned int digestsize
= crypto_ahash_digestsize(tfm
);
2135 unsigned int keysize
= keylen
;
2136 u8 hash
[SHA512_DIGEST_SIZE
];
2139 if (keylen
<= blocksize
)
2140 memcpy(ctx
->key
, key
, keysize
);
2142 /* Must get the hash of the long key */
2143 ret
= keyhash(tfm
, key
, keylen
, hash
);
2146 crypto_ahash_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
2150 keysize
= digestsize
;
2151 memcpy(ctx
->key
, hash
, digestsize
);
2154 ctx
->keylen
= keysize
;
2160 struct talitos_alg_template
{
2163 struct crypto_alg crypto
;
2164 struct ahash_alg hash
;
2166 __be32 desc_hdr_template
;
2169 static struct talitos_alg_template driver_algs
[] = {
2170 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
2171 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2173 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
2174 .cra_driver_name
= "authenc-hmac-sha1-cbc-aes-talitos",
2175 .cra_blocksize
= AES_BLOCK_SIZE
,
2176 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2178 .ivsize
= AES_BLOCK_SIZE
,
2179 .maxauthsize
= SHA1_DIGEST_SIZE
,
2182 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2183 DESC_HDR_SEL0_AESU
|
2184 DESC_HDR_MODE0_AESU_CBC
|
2185 DESC_HDR_SEL1_MDEUA
|
2186 DESC_HDR_MODE1_MDEU_INIT
|
2187 DESC_HDR_MODE1_MDEU_PAD
|
2188 DESC_HDR_MODE1_MDEU_SHA1_HMAC
,
2190 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2192 .cra_name
= "authenc(hmac(sha1),cbc(des3_ede))",
2193 .cra_driver_name
= "authenc-hmac-sha1-cbc-3des-talitos",
2194 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2195 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2197 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2198 .maxauthsize
= SHA1_DIGEST_SIZE
,
2201 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2203 DESC_HDR_MODE0_DEU_CBC
|
2204 DESC_HDR_MODE0_DEU_3DES
|
2205 DESC_HDR_SEL1_MDEUA
|
2206 DESC_HDR_MODE1_MDEU_INIT
|
2207 DESC_HDR_MODE1_MDEU_PAD
|
2208 DESC_HDR_MODE1_MDEU_SHA1_HMAC
,
2210 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2212 .cra_name
= "authenc(hmac(sha224),cbc(aes))",
2213 .cra_driver_name
= "authenc-hmac-sha224-cbc-aes-talitos",
2214 .cra_blocksize
= AES_BLOCK_SIZE
,
2215 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2217 .ivsize
= AES_BLOCK_SIZE
,
2218 .maxauthsize
= SHA224_DIGEST_SIZE
,
2221 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2222 DESC_HDR_SEL0_AESU
|
2223 DESC_HDR_MODE0_AESU_CBC
|
2224 DESC_HDR_SEL1_MDEUA
|
2225 DESC_HDR_MODE1_MDEU_INIT
|
2226 DESC_HDR_MODE1_MDEU_PAD
|
2227 DESC_HDR_MODE1_MDEU_SHA224_HMAC
,
2229 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2231 .cra_name
= "authenc(hmac(sha224),cbc(des3_ede))",
2232 .cra_driver_name
= "authenc-hmac-sha224-cbc-3des-talitos",
2233 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2234 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2236 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2237 .maxauthsize
= SHA224_DIGEST_SIZE
,
2240 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2242 DESC_HDR_MODE0_DEU_CBC
|
2243 DESC_HDR_MODE0_DEU_3DES
|
2244 DESC_HDR_SEL1_MDEUA
|
2245 DESC_HDR_MODE1_MDEU_INIT
|
2246 DESC_HDR_MODE1_MDEU_PAD
|
2247 DESC_HDR_MODE1_MDEU_SHA224_HMAC
,
2249 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2251 .cra_name
= "authenc(hmac(sha256),cbc(aes))",
2252 .cra_driver_name
= "authenc-hmac-sha256-cbc-aes-talitos",
2253 .cra_blocksize
= AES_BLOCK_SIZE
,
2254 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2256 .ivsize
= AES_BLOCK_SIZE
,
2257 .maxauthsize
= SHA256_DIGEST_SIZE
,
2260 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2261 DESC_HDR_SEL0_AESU
|
2262 DESC_HDR_MODE0_AESU_CBC
|
2263 DESC_HDR_SEL1_MDEUA
|
2264 DESC_HDR_MODE1_MDEU_INIT
|
2265 DESC_HDR_MODE1_MDEU_PAD
|
2266 DESC_HDR_MODE1_MDEU_SHA256_HMAC
,
2268 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2270 .cra_name
= "authenc(hmac(sha256),cbc(des3_ede))",
2271 .cra_driver_name
= "authenc-hmac-sha256-cbc-3des-talitos",
2272 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2273 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2275 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2276 .maxauthsize
= SHA256_DIGEST_SIZE
,
2279 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2281 DESC_HDR_MODE0_DEU_CBC
|
2282 DESC_HDR_MODE0_DEU_3DES
|
2283 DESC_HDR_SEL1_MDEUA
|
2284 DESC_HDR_MODE1_MDEU_INIT
|
2285 DESC_HDR_MODE1_MDEU_PAD
|
2286 DESC_HDR_MODE1_MDEU_SHA256_HMAC
,
2288 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2290 .cra_name
= "authenc(hmac(sha384),cbc(aes))",
2291 .cra_driver_name
= "authenc-hmac-sha384-cbc-aes-talitos",
2292 .cra_blocksize
= AES_BLOCK_SIZE
,
2293 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2295 .ivsize
= AES_BLOCK_SIZE
,
2296 .maxauthsize
= SHA384_DIGEST_SIZE
,
2299 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2300 DESC_HDR_SEL0_AESU
|
2301 DESC_HDR_MODE0_AESU_CBC
|
2302 DESC_HDR_SEL1_MDEUB
|
2303 DESC_HDR_MODE1_MDEU_INIT
|
2304 DESC_HDR_MODE1_MDEU_PAD
|
2305 DESC_HDR_MODE1_MDEUB_SHA384_HMAC
,
2307 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2309 .cra_name
= "authenc(hmac(sha384),cbc(des3_ede))",
2310 .cra_driver_name
= "authenc-hmac-sha384-cbc-3des-talitos",
2311 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2312 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2314 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2315 .maxauthsize
= SHA384_DIGEST_SIZE
,
2318 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2320 DESC_HDR_MODE0_DEU_CBC
|
2321 DESC_HDR_MODE0_DEU_3DES
|
2322 DESC_HDR_SEL1_MDEUB
|
2323 DESC_HDR_MODE1_MDEU_INIT
|
2324 DESC_HDR_MODE1_MDEU_PAD
|
2325 DESC_HDR_MODE1_MDEUB_SHA384_HMAC
,
2327 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2329 .cra_name
= "authenc(hmac(sha512),cbc(aes))",
2330 .cra_driver_name
= "authenc-hmac-sha512-cbc-aes-talitos",
2331 .cra_blocksize
= AES_BLOCK_SIZE
,
2332 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2334 .ivsize
= AES_BLOCK_SIZE
,
2335 .maxauthsize
= SHA512_DIGEST_SIZE
,
2338 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2339 DESC_HDR_SEL0_AESU
|
2340 DESC_HDR_MODE0_AESU_CBC
|
2341 DESC_HDR_SEL1_MDEUB
|
2342 DESC_HDR_MODE1_MDEU_INIT
|
2343 DESC_HDR_MODE1_MDEU_PAD
|
2344 DESC_HDR_MODE1_MDEUB_SHA512_HMAC
,
2346 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2348 .cra_name
= "authenc(hmac(sha512),cbc(des3_ede))",
2349 .cra_driver_name
= "authenc-hmac-sha512-cbc-3des-talitos",
2350 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2351 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2353 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2354 .maxauthsize
= SHA512_DIGEST_SIZE
,
2357 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2359 DESC_HDR_MODE0_DEU_CBC
|
2360 DESC_HDR_MODE0_DEU_3DES
|
2361 DESC_HDR_SEL1_MDEUB
|
2362 DESC_HDR_MODE1_MDEU_INIT
|
2363 DESC_HDR_MODE1_MDEU_PAD
|
2364 DESC_HDR_MODE1_MDEUB_SHA512_HMAC
,
2366 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2368 .cra_name
= "authenc(hmac(md5),cbc(aes))",
2369 .cra_driver_name
= "authenc-hmac-md5-cbc-aes-talitos",
2370 .cra_blocksize
= AES_BLOCK_SIZE
,
2371 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2373 .ivsize
= AES_BLOCK_SIZE
,
2374 .maxauthsize
= MD5_DIGEST_SIZE
,
2377 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2378 DESC_HDR_SEL0_AESU
|
2379 DESC_HDR_MODE0_AESU_CBC
|
2380 DESC_HDR_SEL1_MDEUA
|
2381 DESC_HDR_MODE1_MDEU_INIT
|
2382 DESC_HDR_MODE1_MDEU_PAD
|
2383 DESC_HDR_MODE1_MDEU_MD5_HMAC
,
2385 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2387 .cra_name
= "authenc(hmac(md5),cbc(des3_ede))",
2388 .cra_driver_name
= "authenc-hmac-md5-cbc-3des-talitos",
2389 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2390 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2392 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2393 .maxauthsize
= MD5_DIGEST_SIZE
,
2396 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2398 DESC_HDR_MODE0_DEU_CBC
|
2399 DESC_HDR_MODE0_DEU_3DES
|
2400 DESC_HDR_SEL1_MDEUA
|
2401 DESC_HDR_MODE1_MDEU_INIT
|
2402 DESC_HDR_MODE1_MDEU_PAD
|
2403 DESC_HDR_MODE1_MDEU_MD5_HMAC
,
2405 /* ABLKCIPHER algorithms. */
2406 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2408 .cra_name
= "cbc(aes)",
2409 .cra_driver_name
= "cbc-aes-talitos",
2410 .cra_blocksize
= AES_BLOCK_SIZE
,
2411 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2414 .min_keysize
= AES_MIN_KEY_SIZE
,
2415 .max_keysize
= AES_MAX_KEY_SIZE
,
2416 .ivsize
= AES_BLOCK_SIZE
,
2419 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2420 DESC_HDR_SEL0_AESU
|
2421 DESC_HDR_MODE0_AESU_CBC
,
2423 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2425 .cra_name
= "cbc(des3_ede)",
2426 .cra_driver_name
= "cbc-3des-talitos",
2427 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2428 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2431 .min_keysize
= DES3_EDE_KEY_SIZE
,
2432 .max_keysize
= DES3_EDE_KEY_SIZE
,
2433 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2436 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2438 DESC_HDR_MODE0_DEU_CBC
|
2439 DESC_HDR_MODE0_DEU_3DES
,
2441 /* AHASH algorithms. */
2442 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2444 .halg
.digestsize
= MD5_DIGEST_SIZE
,
2447 .cra_driver_name
= "md5-talitos",
2448 .cra_blocksize
= MD5_HMAC_BLOCK_SIZE
,
2449 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2453 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2454 DESC_HDR_SEL0_MDEUA
|
2455 DESC_HDR_MODE0_MDEU_MD5
,
2457 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2459 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
2462 .cra_driver_name
= "sha1-talitos",
2463 .cra_blocksize
= SHA1_BLOCK_SIZE
,
2464 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2468 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2469 DESC_HDR_SEL0_MDEUA
|
2470 DESC_HDR_MODE0_MDEU_SHA1
,
2472 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2474 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
2476 .cra_name
= "sha224",
2477 .cra_driver_name
= "sha224-talitos",
2478 .cra_blocksize
= SHA224_BLOCK_SIZE
,
2479 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2483 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2484 DESC_HDR_SEL0_MDEUA
|
2485 DESC_HDR_MODE0_MDEU_SHA224
,
2487 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2489 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
2491 .cra_name
= "sha256",
2492 .cra_driver_name
= "sha256-talitos",
2493 .cra_blocksize
= SHA256_BLOCK_SIZE
,
2494 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2498 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2499 DESC_HDR_SEL0_MDEUA
|
2500 DESC_HDR_MODE0_MDEU_SHA256
,
2502 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2504 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
2506 .cra_name
= "sha384",
2507 .cra_driver_name
= "sha384-talitos",
2508 .cra_blocksize
= SHA384_BLOCK_SIZE
,
2509 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2513 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2514 DESC_HDR_SEL0_MDEUB
|
2515 DESC_HDR_MODE0_MDEUB_SHA384
,
2517 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2519 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
2521 .cra_name
= "sha512",
2522 .cra_driver_name
= "sha512-talitos",
2523 .cra_blocksize
= SHA512_BLOCK_SIZE
,
2524 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2528 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2529 DESC_HDR_SEL0_MDEUB
|
2530 DESC_HDR_MODE0_MDEUB_SHA512
,
2532 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2534 .halg
.digestsize
= MD5_DIGEST_SIZE
,
2536 .cra_name
= "hmac(md5)",
2537 .cra_driver_name
= "hmac-md5-talitos",
2538 .cra_blocksize
= MD5_HMAC_BLOCK_SIZE
,
2539 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2543 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2544 DESC_HDR_SEL0_MDEUA
|
2545 DESC_HDR_MODE0_MDEU_MD5
,
2547 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2549 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
2551 .cra_name
= "hmac(sha1)",
2552 .cra_driver_name
= "hmac-sha1-talitos",
2553 .cra_blocksize
= SHA1_BLOCK_SIZE
,
2554 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2558 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2559 DESC_HDR_SEL0_MDEUA
|
2560 DESC_HDR_MODE0_MDEU_SHA1
,
2562 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2564 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
2566 .cra_name
= "hmac(sha224)",
2567 .cra_driver_name
= "hmac-sha224-talitos",
2568 .cra_blocksize
= SHA224_BLOCK_SIZE
,
2569 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2573 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2574 DESC_HDR_SEL0_MDEUA
|
2575 DESC_HDR_MODE0_MDEU_SHA224
,
2577 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2579 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
2581 .cra_name
= "hmac(sha256)",
2582 .cra_driver_name
= "hmac-sha256-talitos",
2583 .cra_blocksize
= SHA256_BLOCK_SIZE
,
2584 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2588 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2589 DESC_HDR_SEL0_MDEUA
|
2590 DESC_HDR_MODE0_MDEU_SHA256
,
2592 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2594 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
2596 .cra_name
= "hmac(sha384)",
2597 .cra_driver_name
= "hmac-sha384-talitos",
2598 .cra_blocksize
= SHA384_BLOCK_SIZE
,
2599 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2603 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2604 DESC_HDR_SEL0_MDEUB
|
2605 DESC_HDR_MODE0_MDEUB_SHA384
,
2607 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2609 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
2611 .cra_name
= "hmac(sha512)",
2612 .cra_driver_name
= "hmac-sha512-talitos",
2613 .cra_blocksize
= SHA512_BLOCK_SIZE
,
2614 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2618 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2619 DESC_HDR_SEL0_MDEUB
|
2620 DESC_HDR_MODE0_MDEUB_SHA512
,
2624 struct talitos_crypto_alg
{
2625 struct list_head entry
;
2627 struct talitos_alg_template algt
;
2630 static int talitos_cra_init(struct crypto_tfm
*tfm
)
2632 struct crypto_alg
*alg
= tfm
->__crt_alg
;
2633 struct talitos_crypto_alg
*talitos_alg
;
2634 struct talitos_ctx
*ctx
= crypto_tfm_ctx(tfm
);
2635 struct talitos_private
*priv
;
2637 if ((alg
->cra_flags
& CRYPTO_ALG_TYPE_MASK
) == CRYPTO_ALG_TYPE_AHASH
)
2638 talitos_alg
= container_of(__crypto_ahash_alg(alg
),
2639 struct talitos_crypto_alg
,
2642 talitos_alg
= container_of(alg
, struct talitos_crypto_alg
,
2645 /* update context with ptr to dev */
2646 ctx
->dev
= talitos_alg
->dev
;
2648 /* assign SEC channel to tfm in round-robin fashion */
2649 priv
= dev_get_drvdata(ctx
->dev
);
2650 ctx
->ch
= atomic_inc_return(&priv
->last_chan
) &
2651 (priv
->num_channels
- 1);
2653 /* copy descriptor header template value */
2654 ctx
->desc_hdr_template
= talitos_alg
->algt
.desc_hdr_template
;
2656 /* select done notification */
2657 ctx
->desc_hdr_template
|= DESC_HDR_DONE_NOTIFY
;
2662 static int talitos_cra_init_aead(struct crypto_tfm
*tfm
)
2664 struct talitos_ctx
*ctx
= crypto_tfm_ctx(tfm
);
2666 talitos_cra_init(tfm
);
2668 /* random first IV */
2669 get_random_bytes(ctx
->iv
, TALITOS_MAX_IV_LENGTH
);
2674 static int talitos_cra_init_ahash(struct crypto_tfm
*tfm
)
2676 struct talitos_ctx
*ctx
= crypto_tfm_ctx(tfm
);
2678 talitos_cra_init(tfm
);
2681 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
2682 sizeof(struct talitos_ahash_req_ctx
));
2688 * given the alg's descriptor header template, determine whether descriptor
2689 * type and primary/secondary execution units required match the hw
2690 * capabilities description provided in the device tree node.
2692 static int hw_supports(struct device
*dev
, __be32 desc_hdr_template
)
2694 struct talitos_private
*priv
= dev_get_drvdata(dev
);
2697 ret
= (1 << DESC_TYPE(desc_hdr_template
) & priv
->desc_types
) &&
2698 (1 << PRIMARY_EU(desc_hdr_template
) & priv
->exec_units
);
2700 if (SECONDARY_EU(desc_hdr_template
))
2701 ret
= ret
&& (1 << SECONDARY_EU(desc_hdr_template
)
2702 & priv
->exec_units
);
2707 static int talitos_remove(struct platform_device
*ofdev
)
2709 struct device
*dev
= &ofdev
->dev
;
2710 struct talitos_private
*priv
= dev_get_drvdata(dev
);
2711 struct talitos_crypto_alg
*t_alg
, *n
;
2714 list_for_each_entry_safe(t_alg
, n
, &priv
->alg_list
, entry
) {
2715 switch (t_alg
->algt
.type
) {
2716 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
2717 case CRYPTO_ALG_TYPE_AEAD
:
2718 crypto_unregister_alg(&t_alg
->algt
.alg
.crypto
);
2720 case CRYPTO_ALG_TYPE_AHASH
:
2721 crypto_unregister_ahash(&t_alg
->algt
.alg
.hash
);
2724 list_del(&t_alg
->entry
);
2728 if (hw_supports(dev
, DESC_HDR_SEL0_RNG
))
2729 talitos_unregister_rng(dev
);
2731 for (i
= 0; i
< priv
->num_channels
; i
++)
2732 kfree(priv
->chan
[i
].fifo
);
2736 for (i
= 0; i
< 2; i
++)
2738 free_irq(priv
->irq
[i
], dev
);
2739 irq_dispose_mapping(priv
->irq
[i
]);
2742 tasklet_kill(&priv
->done_task
[0]);
2744 tasklet_kill(&priv
->done_task
[1]);
2753 static struct talitos_crypto_alg
*talitos_alg_alloc(struct device
*dev
,
2754 struct talitos_alg_template
2757 struct talitos_private
*priv
= dev_get_drvdata(dev
);
2758 struct talitos_crypto_alg
*t_alg
;
2759 struct crypto_alg
*alg
;
2761 t_alg
= kzalloc(sizeof(struct talitos_crypto_alg
), GFP_KERNEL
);
2763 return ERR_PTR(-ENOMEM
);
2765 t_alg
->algt
= *template;
2767 switch (t_alg
->algt
.type
) {
2768 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
2769 alg
= &t_alg
->algt
.alg
.crypto
;
2770 alg
->cra_init
= talitos_cra_init
;
2771 alg
->cra_type
= &crypto_ablkcipher_type
;
2772 alg
->cra_ablkcipher
.setkey
= ablkcipher_setkey
;
2773 alg
->cra_ablkcipher
.encrypt
= ablkcipher_encrypt
;
2774 alg
->cra_ablkcipher
.decrypt
= ablkcipher_decrypt
;
2775 alg
->cra_ablkcipher
.geniv
= "eseqiv";
2777 case CRYPTO_ALG_TYPE_AEAD
:
2778 alg
= &t_alg
->algt
.alg
.crypto
;
2779 alg
->cra_init
= talitos_cra_init_aead
;
2780 alg
->cra_type
= &crypto_aead_type
;
2781 alg
->cra_aead
.setkey
= aead_setkey
;
2782 alg
->cra_aead
.setauthsize
= aead_setauthsize
;
2783 alg
->cra_aead
.encrypt
= aead_encrypt
;
2784 alg
->cra_aead
.decrypt
= aead_decrypt
;
2785 alg
->cra_aead
.givencrypt
= aead_givencrypt
;
2786 alg
->cra_aead
.geniv
= "<built-in>";
2788 case CRYPTO_ALG_TYPE_AHASH
:
2789 alg
= &t_alg
->algt
.alg
.hash
.halg
.base
;
2790 alg
->cra_init
= talitos_cra_init_ahash
;
2791 alg
->cra_type
= &crypto_ahash_type
;
2792 t_alg
->algt
.alg
.hash
.init
= ahash_init
;
2793 t_alg
->algt
.alg
.hash
.update
= ahash_update
;
2794 t_alg
->algt
.alg
.hash
.final
= ahash_final
;
2795 t_alg
->algt
.alg
.hash
.finup
= ahash_finup
;
2796 t_alg
->algt
.alg
.hash
.digest
= ahash_digest
;
2797 t_alg
->algt
.alg
.hash
.setkey
= ahash_setkey
;
2799 if (!(priv
->features
& TALITOS_FTR_HMAC_OK
) &&
2800 !strncmp(alg
->cra_name
, "hmac", 4)) {
2802 return ERR_PTR(-ENOTSUPP
);
2804 if (!(priv
->features
& TALITOS_FTR_SHA224_HWINIT
) &&
2805 (!strcmp(alg
->cra_name
, "sha224") ||
2806 !strcmp(alg
->cra_name
, "hmac(sha224)"))) {
2807 t_alg
->algt
.alg
.hash
.init
= ahash_init_sha224_swinit
;
2808 t_alg
->algt
.desc_hdr_template
=
2809 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2810 DESC_HDR_SEL0_MDEUA
|
2811 DESC_HDR_MODE0_MDEU_SHA256
;
2815 dev_err(dev
, "unknown algorithm type %d\n", t_alg
->algt
.type
);
2816 return ERR_PTR(-EINVAL
);
2819 alg
->cra_module
= THIS_MODULE
;
2820 alg
->cra_priority
= TALITOS_CRA_PRIORITY
;
2821 alg
->cra_alignmask
= 0;
2822 alg
->cra_ctxsize
= sizeof(struct talitos_ctx
);
2823 alg
->cra_flags
|= CRYPTO_ALG_KERN_DRIVER_ONLY
;
2830 static int talitos_probe_irq(struct platform_device
*ofdev
)
2832 struct device
*dev
= &ofdev
->dev
;
2833 struct device_node
*np
= ofdev
->dev
.of_node
;
2834 struct talitos_private
*priv
= dev_get_drvdata(dev
);
2836 bool is_sec1
= has_ftr_sec1(priv
);
2838 priv
->irq
[0] = irq_of_parse_and_map(np
, 0);
2839 if (!priv
->irq
[0]) {
2840 dev_err(dev
, "failed to map irq\n");
2844 err
= request_irq(priv
->irq
[0], talitos1_interrupt_4ch
, 0,
2845 dev_driver_string(dev
), dev
);
2849 priv
->irq
[1] = irq_of_parse_and_map(np
, 1);
2851 /* get the primary irq line */
2852 if (!priv
->irq
[1]) {
2853 err
= request_irq(priv
->irq
[0], talitos2_interrupt_4ch
, 0,
2854 dev_driver_string(dev
), dev
);
2858 err
= request_irq(priv
->irq
[0], talitos2_interrupt_ch0_2
, 0,
2859 dev_driver_string(dev
), dev
);
2863 /* get the secondary irq line */
2864 err
= request_irq(priv
->irq
[1], talitos2_interrupt_ch1_3
, 0,
2865 dev_driver_string(dev
), dev
);
2867 dev_err(dev
, "failed to request secondary irq\n");
2868 irq_dispose_mapping(priv
->irq
[1]);
2876 dev_err(dev
, "failed to request primary irq\n");
2877 irq_dispose_mapping(priv
->irq
[0]);
2884 static int talitos_probe(struct platform_device
*ofdev
)
2886 struct device
*dev
= &ofdev
->dev
;
2887 struct device_node
*np
= ofdev
->dev
.of_node
;
2888 struct talitos_private
*priv
;
2889 const unsigned int *prop
;
2893 priv
= kzalloc(sizeof(struct talitos_private
), GFP_KERNEL
);
2897 INIT_LIST_HEAD(&priv
->alg_list
);
2899 dev_set_drvdata(dev
, priv
);
2901 priv
->ofdev
= ofdev
;
2903 spin_lock_init(&priv
->reg_lock
);
2905 priv
->reg
= of_iomap(np
, 0);
2907 dev_err(dev
, "failed to of_iomap\n");
2912 /* get SEC version capabilities from device tree */
2913 prop
= of_get_property(np
, "fsl,num-channels", NULL
);
2915 priv
->num_channels
= *prop
;
2917 prop
= of_get_property(np
, "fsl,channel-fifo-len", NULL
);
2919 priv
->chfifo_len
= *prop
;
2921 prop
= of_get_property(np
, "fsl,exec-units-mask", NULL
);
2923 priv
->exec_units
= *prop
;
2925 prop
= of_get_property(np
, "fsl,descriptor-types-mask", NULL
);
2927 priv
->desc_types
= *prop
;
2929 if (!is_power_of_2(priv
->num_channels
) || !priv
->chfifo_len
||
2930 !priv
->exec_units
|| !priv
->desc_types
) {
2931 dev_err(dev
, "invalid property data in device tree node\n");
2936 if (of_device_is_compatible(np
, "fsl,sec3.0"))
2937 priv
->features
|= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT
;
2939 if (of_device_is_compatible(np
, "fsl,sec2.1"))
2940 priv
->features
|= TALITOS_FTR_HW_AUTH_CHECK
|
2941 TALITOS_FTR_SHA224_HWINIT
|
2942 TALITOS_FTR_HMAC_OK
;
2944 if (of_device_is_compatible(np
, "fsl,sec1.0"))
2945 priv
->features
|= TALITOS_FTR_SEC1
;
2947 if (of_device_is_compatible(np
, "fsl,sec1.2")) {
2948 priv
->reg_deu
= priv
->reg
+ TALITOS12_DEU
;
2949 priv
->reg_aesu
= priv
->reg
+ TALITOS12_AESU
;
2950 priv
->reg_mdeu
= priv
->reg
+ TALITOS12_MDEU
;
2951 stride
= TALITOS1_CH_STRIDE
;
2952 } else if (of_device_is_compatible(np
, "fsl,sec1.0")) {
2953 priv
->reg_deu
= priv
->reg
+ TALITOS10_DEU
;
2954 priv
->reg_aesu
= priv
->reg
+ TALITOS10_AESU
;
2955 priv
->reg_mdeu
= priv
->reg
+ TALITOS10_MDEU
;
2956 priv
->reg_afeu
= priv
->reg
+ TALITOS10_AFEU
;
2957 priv
->reg_rngu
= priv
->reg
+ TALITOS10_RNGU
;
2958 priv
->reg_pkeu
= priv
->reg
+ TALITOS10_PKEU
;
2959 stride
= TALITOS1_CH_STRIDE
;
2961 priv
->reg_deu
= priv
->reg
+ TALITOS2_DEU
;
2962 priv
->reg_aesu
= priv
->reg
+ TALITOS2_AESU
;
2963 priv
->reg_mdeu
= priv
->reg
+ TALITOS2_MDEU
;
2964 priv
->reg_afeu
= priv
->reg
+ TALITOS2_AFEU
;
2965 priv
->reg_rngu
= priv
->reg
+ TALITOS2_RNGU
;
2966 priv
->reg_pkeu
= priv
->reg
+ TALITOS2_PKEU
;
2967 priv
->reg_keu
= priv
->reg
+ TALITOS2_KEU
;
2968 priv
->reg_crcu
= priv
->reg
+ TALITOS2_CRCU
;
2969 stride
= TALITOS2_CH_STRIDE
;
2972 err
= talitos_probe_irq(ofdev
);
2976 if (of_device_is_compatible(np
, "fsl,sec1.0")) {
2977 tasklet_init(&priv
->done_task
[0], talitos1_done_4ch
,
2978 (unsigned long)dev
);
2980 if (!priv
->irq
[1]) {
2981 tasklet_init(&priv
->done_task
[0], talitos2_done_4ch
,
2982 (unsigned long)dev
);
2984 tasklet_init(&priv
->done_task
[0], talitos2_done_ch0_2
,
2985 (unsigned long)dev
);
2986 tasklet_init(&priv
->done_task
[1], talitos2_done_ch1_3
,
2987 (unsigned long)dev
);
2991 priv
->chan
= kzalloc(sizeof(struct talitos_channel
) *
2992 priv
->num_channels
, GFP_KERNEL
);
2994 dev_err(dev
, "failed to allocate channel management space\n");
2999 priv
->fifo_len
= roundup_pow_of_two(priv
->chfifo_len
);
3001 for (i
= 0; i
< priv
->num_channels
; i
++) {
3002 priv
->chan
[i
].reg
= priv
->reg
+ stride
* (i
+ 1);
3003 if (!priv
->irq
[1] || !(i
& 1))
3004 priv
->chan
[i
].reg
+= TALITOS_CH_BASE_OFFSET
;
3006 spin_lock_init(&priv
->chan
[i
].head_lock
);
3007 spin_lock_init(&priv
->chan
[i
].tail_lock
);
3009 priv
->chan
[i
].fifo
= kzalloc(sizeof(struct talitos_request
) *
3010 priv
->fifo_len
, GFP_KERNEL
);
3011 if (!priv
->chan
[i
].fifo
) {
3012 dev_err(dev
, "failed to allocate request fifo %d\n", i
);
3017 atomic_set(&priv
->chan
[i
].submit_count
,
3018 -(priv
->chfifo_len
- 1));
3021 dma_set_mask(dev
, DMA_BIT_MASK(36));
3023 /* reset and initialize the h/w */
3024 err
= init_device(dev
);
3026 dev_err(dev
, "failed to initialize device\n");
3030 /* register the RNG, if available */
3031 if (hw_supports(dev
, DESC_HDR_SEL0_RNG
)) {
3032 err
= talitos_register_rng(dev
);
3034 dev_err(dev
, "failed to register hwrng: %d\n", err
);
3037 dev_info(dev
, "hwrng\n");
3040 /* register crypto algorithms the device supports */
3041 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
3042 if (hw_supports(dev
, driver_algs
[i
].desc_hdr_template
)) {
3043 struct talitos_crypto_alg
*t_alg
;
3046 t_alg
= talitos_alg_alloc(dev
, &driver_algs
[i
]);
3047 if (IS_ERR(t_alg
)) {
3048 err
= PTR_ERR(t_alg
);
3049 if (err
== -ENOTSUPP
)
3054 switch (t_alg
->algt
.type
) {
3055 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
3056 case CRYPTO_ALG_TYPE_AEAD
:
3057 err
= crypto_register_alg(
3058 &t_alg
->algt
.alg
.crypto
);
3059 name
= t_alg
->algt
.alg
.crypto
.cra_driver_name
;
3061 case CRYPTO_ALG_TYPE_AHASH
:
3062 err
= crypto_register_ahash(
3063 &t_alg
->algt
.alg
.hash
);
3065 t_alg
->algt
.alg
.hash
.halg
.base
.cra_driver_name
;
3069 dev_err(dev
, "%s alg registration failed\n",
3073 list_add_tail(&t_alg
->entry
, &priv
->alg_list
);
3076 if (!list_empty(&priv
->alg_list
))
3077 dev_info(dev
, "%s algorithms registered in /proc/crypto\n",
3078 (char *)of_get_property(np
, "compatible", NULL
));
3083 talitos_remove(ofdev
);
3088 static const struct of_device_id talitos_match
[] = {
3090 .compatible
= "fsl,sec2.0",
3094 MODULE_DEVICE_TABLE(of
, talitos_match
);
3096 static struct platform_driver talitos_driver
= {
3099 .of_match_table
= talitos_match
,
3101 .probe
= talitos_probe
,
3102 .remove
= talitos_remove
,
3105 module_platform_driver(talitos_driver
);
3107 MODULE_LICENSE("GPL");
3108 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3109 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");